summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore2
-rw-r--r--.mailmap3
-rw-r--r--CREDITS11
-rw-r--r--Documentation/ABI/stable/sysfs-bus-mhi4
-rw-r--r--Documentation/ABI/stable/sysfs-driver-dma-idxd2
-rw-r--r--Documentation/ABI/stable/sysfs-driver-mlxreg-io53
-rw-r--r--Documentation/ABI/testing/configfs-usb-gadget-midi254
-rw-r--r--Documentation/ABI/testing/debugfs-tpmi31
-rw-r--r--Documentation/ABI/testing/sysfs-bus-counter8
-rw-r--r--Documentation/ABI/testing/sysfs-bus-fsi-devices-sbefifo2
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio16
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-filter-admv88182
-rw-r--r--Documentation/ABI/testing/sysfs-bus-thunderbolt38
-rw-r--r--Documentation/ABI/testing/sysfs-bus-umc28
-rw-r--r--Documentation/ABI/testing/sysfs-bus-usb34
-rw-r--r--Documentation/ABI/testing/sysfs-class-firmware-attributes101
-rw-r--r--Documentation/ABI/testing/sysfs-class-led9
-rw-r--r--Documentation/ABI/testing/sysfs-class-uwb_rc156
-rw-r--r--Documentation/ABI/testing/sysfs-class-uwb_rc-wusbhc57
-rw-r--r--Documentation/ABI/testing/sysfs-driver-ufs247
-rw-r--r--Documentation/ABI/testing/sysfs-fs-f2fs6
-rw-r--r--Documentation/ABI/testing/sysfs-platform-asus-wmi88
-rw-r--r--Documentation/ABI/testing/sysfs-platform-mellanox-bootctl66
-rw-r--r--Documentation/ABI/testing/sysfs-wusb_cbaf101
-rw-r--r--Documentation/admin-guide/cgroup-v1/memory.rst6
-rw-r--r--Documentation/admin-guide/cgroup-v2.rst2
-rw-r--r--Documentation/admin-guide/devices.txt16
-rw-r--r--Documentation/admin-guide/dynamic-debug-howto.rst5
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt52
-rw-r--r--Documentation/admin-guide/media/qcom_camss.rst6
-rw-r--r--Documentation/admin-guide/sysctl/kernel.rst56
-rw-r--r--Documentation/bpf/btf.rst31
-rw-r--r--Documentation/bpf/index.rst1
-rw-r--r--Documentation/bpf/linux-notes.rst (renamed from Documentation/bpf/standardization/linux-notes.rst)0
-rw-r--r--Documentation/bpf/llvm_reloc.rst304
-rw-r--r--Documentation/bpf/standardization/abi.rst25
-rw-r--r--Documentation/bpf/standardization/index.rst2
-rw-r--r--Documentation/bpf/standardization/instruction-set.rst44
-rw-r--r--Documentation/core-api/printk-formats.rst9
-rw-r--r--Documentation/core-api/workqueue.rst356
-rw-r--r--Documentation/dev-tools/kasan.rst4
-rw-r--r--Documentation/devicetree/bindings/dma/atmel-xdma.txt3
-rw-r--r--Documentation/devicetree/bindings/dma/brcm,bcm2835-dma.txt83
-rw-r--r--Documentation/devicetree/bindings/dma/brcm,bcm2835-dma.yaml102
-rw-r--r--Documentation/devicetree/bindings/dma/fsl,edma.yaml106
-rw-r--r--Documentation/devicetree/bindings/dma/qcom,bam-dma.yaml31
-rw-r--r--Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt6
-rw-r--r--Documentation/devicetree/bindings/extcon/maxim,max77843.yaml1
-rw-r--r--Documentation/devicetree/bindings/extcon/siliconmitus,sm5502-muic.yaml4
-rw-r--r--Documentation/devicetree/bindings/fsi/ibm,i2cr-fsi-master.yaml41
-rw-r--r--Documentation/devicetree/bindings/i2c/cdns,i2c-r1p10.yaml3
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-arb-gpio-challenge.txt82
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-arb-gpio-challenge.yaml135
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-arb.txt35
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-atr.yaml34
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.yaml46
-rw-r--r--Documentation/devicetree/bindings/i2c/nxp,pca9541.txt29
-rw-r--r--Documentation/devicetree/bindings/i2c/nxp,pca9541.yaml56
-rw-r--r--Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml1
-rw-r--r--Documentation/devicetree/bindings/i3c/i3c.yaml15
-rw-r--r--Documentation/devicetree/bindings/iio/adc/allwinner,sun20i-d1-gpadc.yaml91
-rw-r--r--Documentation/devicetree/bindings/iio/adc/ti,ads1015.yaml15
-rw-r--r--Documentation/devicetree/bindings/iio/dac/microchip,mcp4728.yaml49
-rw-r--r--Documentation/devicetree/bindings/iio/frequency/adi,admv1013.yaml60
-rw-r--r--Documentation/devicetree/bindings/iio/frequency/adi,admv1014.yaml8
-rw-r--r--Documentation/devicetree/bindings/iio/light/rohm,bu27010.yaml49
-rw-r--r--Documentation/devicetree/bindings/iio/proximity/murata,irsd200.yaml60
-rw-r--r--Documentation/devicetree/bindings/iio/proximity/semtech,sx9310.yaml5
-rw-r--r--Documentation/devicetree/bindings/iio/proximity/semtech,sx9324.yaml5
-rw-r--r--Documentation/devicetree/bindings/input/azoteq,iqs7222.yaml248
-rw-r--r--Documentation/devicetree/bindings/input/ilitek,ili9882t.yaml67
-rw-r--r--Documentation/devicetree/bindings/input/stmpe-keypad.txt41
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/azoteq,iqs7211.yaml769
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml6
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/eeti,exc3000.yaml2
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/melfas,mms114.yaml5
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/stmpe.txt108
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml6
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,osm-l3.yaml1
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml18
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/st,sti-irq-syscfg.txt30
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/st,stih407-irq-syscfg.yaml65
-rw-r--r--Documentation/devicetree/bindings/iommu/arm,smmu.yaml41
-rw-r--r--Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml12
-rw-r--r--Documentation/devicetree/bindings/iommu/qcom,iommu.yaml22
-rw-r--r--Documentation/devicetree/bindings/leds/common.yaml15
-rw-r--r--Documentation/devicetree/bindings/leds/leds-an30259a.txt55
-rw-r--r--Documentation/devicetree/bindings/leds/leds-aw2013.yaml13
-rw-r--r--Documentation/devicetree/bindings/leds/leds-group-multicolor.yaml64
-rw-r--r--Documentation/devicetree/bindings/leds/nxp,pca953x.yaml4
-rw-r--r--Documentation/devicetree/bindings/leds/nxp,pca995x.yaml81
-rw-r--r--Documentation/devicetree/bindings/leds/panasonic,an30259a.yaml84
-rw-r--r--Documentation/devicetree/bindings/leds/rohm,bd2606mvv.yaml4
-rw-r--r--Documentation/devicetree/bindings/leds/rohm,bd71828-leds.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/amphion,vpu.yaml8
-rw-r--r--Documentation/devicetree/bindings/media/cdns,csi2rx.txt100
-rw-r--r--Documentation/devicetree/bindings/media/cdns,csi2rx.yaml201
-rw-r--r--Documentation/devicetree/bindings/media/cec/nvidia,tegra114-cec.yaml1
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ov5695.txt41
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ov7251.txt52
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ovti,ov5693.yaml31
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ovti,ov7251.yaml109
-rw-r--r--Documentation/devicetree/bindings/media/i2c/st,st-mipid02.yaml1
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ti,ds90ub913.yaml133
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ti,ds90ub953.yaml134
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ti,ds90ub960.yaml427
-rw-r--r--Documentation/devicetree/bindings/media/mediatek,vcodec-decoder.yaml67
-rw-r--r--Documentation/devicetree/bindings/media/nxp,imx8-isi.yaml5
-rw-r--r--Documentation/devicetree/bindings/media/rockchip-isp1.yaml1
-rw-r--r--Documentation/devicetree/bindings/mfd/allwinner,sun6i-a31-prcm.yaml43
-rw-r--r--Documentation/devicetree/bindings/mfd/allwinner,sun8i-a23-prcm.yaml42
-rw-r--r--Documentation/devicetree/bindings/mfd/atmel-flexcom.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/atmel-gpbr.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/atmel-hlcdc.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/atmel-matrix.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/atmel-smc.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/brcm,bcm6318-gpio-sysctl.yaml4
-rw-r--r--Documentation/devicetree/bindings/mfd/brcm,bcm63268-gpio-sysctl.yaml4
-rw-r--r--Documentation/devicetree/bindings/mfd/brcm,bcm6328-gpio-sysctl.yaml4
-rw-r--r--Documentation/devicetree/bindings/mfd/brcm,bcm6358-gpio-sysctl.yaml4
-rw-r--r--Documentation/devicetree/bindings/mfd/brcm,bcm6362-gpio-sysctl.yaml4
-rw-r--r--Documentation/devicetree/bindings/mfd/brcm,bcm6368-gpio-sysctl.yaml4
-rw-r--r--Documentation/devicetree/bindings/mfd/maxim,max77693.yaml52
-rw-r--r--Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.yaml6
-rw-r--r--Documentation/devicetree/bindings/mfd/rohm,bd71847-pmic.yaml1
-rw-r--r--Documentation/devicetree/bindings/mfd/st,stmpe.yaml297
-rw-r--r--Documentation/devicetree/bindings/mfd/st,stpmic1.yaml12
-rw-r--r--Documentation/devicetree/bindings/mfd/stericsson,db8500-prcmu.yaml20
-rw-r--r--Documentation/devicetree/bindings/mfd/stmpe.txt42
-rw-r--r--Documentation/devicetree/bindings/mtd/amlogic,meson-nand.yaml6
-rw-r--r--Documentation/devicetree/bindings/mtd/jedec,spi-nor.yaml21
-rw-r--r--Documentation/devicetree/bindings/mtd/marvell,nand-controller.yaml1
-rw-r--r--Documentation/devicetree/bindings/mtd/nand-controller.yaml2
-rw-r--r--Documentation/devicetree/bindings/mtd/oxnas-nand.txt41
-rw-r--r--Documentation/devicetree/bindings/mtd/partitions/seama.yaml44
-rw-r--r--Documentation/devicetree/bindings/nvmem/fsl,t1023-sfp.yaml37
-rw-r--r--Documentation/devicetree/bindings/nvmem/layouts/fixed-cell.yaml26
-rw-r--r--Documentation/devicetree/bindings/nvmem/layouts/fixed-layout.yaml12
-rw-r--r--Documentation/devicetree/bindings/nvmem/nvmem.yaml5
-rw-r--r--Documentation/devicetree/bindings/nvmem/qcom,qfprom.yaml2
-rw-r--r--Documentation/devicetree/bindings/nvmem/qcom,sec-qfprom.yaml55
-rw-r--r--Documentation/devicetree/bindings/peci/nuvoton,npcm-peci.yaml56
-rw-r--r--Documentation/devicetree/bindings/phy/mediatek,tphy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,ipq5332-usb-hsphy.yaml59
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,ipq8074-qmp-pcie-phy.yaml278
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,msm8996-qmp-ufs-phy.yaml228
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,msm8996-qmp-usb3-phy.yaml80
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,msm8998-qmp-pcie-phy.yaml97
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,sc7180-qmp-usb3-dp-phy.yaml282
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml55
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml48
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb43dp-phy.yaml46
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml7
-rw-r--r--Documentation/devicetree/bindings/phy/realtek,usb2phy.yaml175
-rw-r--r--Documentation/devicetree/bindings/phy/realtek,usb3phy.yaml107
-rw-r--r--Documentation/devicetree/bindings/phy/rockchip,inno-usb2phy.yaml21
-rw-r--r--Documentation/devicetree/bindings/phy/rockchip,pcie3-phy.yaml33
-rw-r--r--Documentation/devicetree/bindings/phy/rockchip,px30-dsi-dphy.yaml1
-rw-r--r--Documentation/devicetree/bindings/phy/samsung,usb3-drd-phy.yaml1
-rw-r--r--Documentation/devicetree/bindings/phy/starfive,jh7110-dphy-rx.yaml71
-rw-r--r--Documentation/devicetree/bindings/phy/starfive,jh7110-pcie-phy.yaml58
-rw-r--r--Documentation/devicetree/bindings/phy/starfive,jh7110-usb-phy.yaml50
-rw-r--r--Documentation/devicetree/bindings/pwm/brcm,kona-pwm.txt21
-rw-r--r--Documentation/devicetree/bindings/pwm/brcm,kona-pwm.yaml51
-rw-r--r--Documentation/devicetree/bindings/remoteproc/fsl,imx-rproc.yaml20
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,adsp.yaml24
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,msm8996-mss-pil.yaml12
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,pas-common.yaml1
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,qcs404-pas.yaml3
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,sc7180-pas.yaml3
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,sc8180x-pas.yaml3
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,sc8280xp-pas.yaml3
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,sdx55-pas.yaml3
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,sm6115-pas.yaml39
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,sm6350-pas.yaml3
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,sm8150-pas.yaml3
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,sm8350-pas.yaml3
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,sm8550-pas.yaml1
-rw-r--r--Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.yaml18
-rw-r--r--Documentation/devicetree/bindings/rtc/intersil,isl12022.yaml64
-rw-r--r--Documentation/devicetree/bindings/rtc/maxim,ds3231.txt38
-rw-r--r--Documentation/devicetree/bindings/rtc/nxp,pcf2127.yaml1
-rw-r--r--Documentation/devicetree/bindings/rtc/st,m48t86.yaml38
-rw-r--r--Documentation/devicetree/bindings/rtc/trivial-rtc.yaml2
-rw-r--r--Documentation/devicetree/bindings/serial/amlogic,meson-uart.yaml6
-rw-r--r--Documentation/devicetree/bindings/serial/fsl-lpuart.yaml6
-rw-r--r--Documentation/devicetree/bindings/serial/nxp,sc16is7xx.txt46
-rw-r--r--Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml1
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,easrc.yaml8
-rw-r--r--Documentation/devicetree/bindings/thermal/loongson,ls2k-thermal.yaml44
-rw-r--r--Documentation/devicetree/bindings/timer/oxsemi,rps-timer.txt17
-rw-r--r--Documentation/devicetree/bindings/usb/ci-hdrc-usb2.yaml17
-rw-r--r--Documentation/devicetree/bindings/usb/cypress,hx3.yaml77
-rw-r--r--Documentation/devicetree/bindings/usb/generic-ehci.yaml1
-rw-r--r--Documentation/devicetree/bindings/usb/genesys,gl850g.yaml1
-rw-r--r--Documentation/devicetree/bindings/usb/qcom,dwc3.yaml36
-rw-r--r--Documentation/devicetree/bindings/usb/samsung,exynos-dwc3.yaml20
-rw-r--r--Documentation/devicetree/bindings/watchdog/amlogic,meson-gxbb-wdt.yaml1
-rw-r--r--Documentation/devicetree/bindings/watchdog/marvell,cn10624-wdt.yaml83
-rw-r--r--Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml1
-rw-r--r--Documentation/devicetree/bindings/watchdog/ti,rti-wdt.yaml28
-rw-r--r--Documentation/driver-api/interconnect.rst25
-rw-r--r--Documentation/driver-api/libata.rst38
-rw-r--r--Documentation/driver-api/media/cec-core.rst44
-rw-r--r--Documentation/driver-api/media/v4l2-cci.rst5
-rw-r--r--Documentation/driver-api/media/v4l2-core.rst1
-rw-r--r--Documentation/driver-api/media/v4l2-subdev.rst110
-rw-r--r--Documentation/driver-api/tty/tty_buffer.rst7
-rw-r--r--Documentation/driver-api/usb/usb.rst9
-rw-r--r--Documentation/features/debug/KASAN/arch-support.txt2
-rw-r--r--Documentation/features/debug/kcov/arch-support.txt2
-rw-r--r--Documentation/features/debug/kgdb/arch-support.txt2
-rw-r--r--Documentation/filesystems/ceph.rst10
-rw-r--r--Documentation/filesystems/gfs2-glocks.rst3
-rw-r--r--Documentation/filesystems/nfs/exporting.rst26
-rw-r--r--Documentation/filesystems/proc.rst4
-rw-r--r--Documentation/gpu/amdgpu/driver-misc.rst8
-rw-r--r--Documentation/hid/hidintro.rst524
-rw-r--r--Documentation/hid/hidreport-parsing.rst49
-rw-r--r--Documentation/hid/index.rst1
-rw-r--r--Documentation/i2c/i2c-address-translators.rst96
-rw-r--r--Documentation/i2c/index.rst1
-rw-r--r--Documentation/kbuild/kconfig.rst17
-rw-r--r--Documentation/kbuild/llvm.rst124
-rw-r--r--Documentation/process/maintainer-netdev.rst36
-rw-r--r--Documentation/process/stable-kernel-rules.rst195
-rw-r--r--Documentation/riscv/vm-layout.rst22
-rw-r--r--Documentation/scsi/scsi_mid_low_api.rst4
-rw-r--r--Documentation/trace/events.rst14
-rw-r--r--Documentation/trace/fprobetrace.rst64
-rw-r--r--Documentation/translations/zh_CN/dev-tools/kasan.rst2
-rw-r--r--Documentation/usb/authorization.rst9
-rw-r--r--Documentation/usb/gadget-testing.rst154
-rw-r--r--Documentation/userspace-api/media/v4l/dev-decoder.rst16
-rw-r--r--Documentation/userspace-api/media/v4l/dev-encoder.rst24
-rw-r--r--Documentation/userspace-api/media/v4l/dev-stateless-decoder.rst4
-rw-r--r--Documentation/userspace-api/media/v4l/pixfmt-reserved.rst13
-rw-r--r--Documentation/userspace-api/media/v4l/vidioc-subdev-g-routing.rst7
-rw-r--r--Documentation/userspace-api/netlink/intro.rst2
-rw-r--r--Documentation/virt/kvm/api.rst4
-rw-r--r--MAINTAINERS188
-rw-r--r--Makefile172
-rw-r--r--arch/alpha/include/asm/Kbuild1
-rw-r--r--arch/alpha/kernel/setup.c2
-rw-r--r--arch/alpha/kernel/srmcons.c5
-rw-r--r--arch/alpha/lib/callback_srm.S2
-rw-r--r--arch/alpha/lib/clear_page.S2
-rw-r--r--arch/alpha/lib/clear_user.S2
-rw-r--r--arch/alpha/lib/copy_page.S2
-rw-r--r--arch/alpha/lib/copy_user.S2
-rw-r--r--arch/alpha/lib/csum_ipv6_magic.S2
-rw-r--r--arch/alpha/lib/divide.S2
-rw-r--r--arch/alpha/lib/ev6-clear_page.S2
-rw-r--r--arch/alpha/lib/ev6-clear_user.S2
-rw-r--r--arch/alpha/lib/ev6-copy_page.S2
-rw-r--r--arch/alpha/lib/ev6-copy_user.S2
-rw-r--r--arch/alpha/lib/ev6-csum_ipv6_magic.S2
-rw-r--r--arch/alpha/lib/ev6-divide.S2
-rw-r--r--arch/alpha/lib/ev6-memchr.S2
-rw-r--r--arch/alpha/lib/ev6-memcpy.S2
-rw-r--r--arch/alpha/lib/ev6-memset.S2
-rw-r--r--arch/alpha/lib/ev67-strcat.S2
-rw-r--r--arch/alpha/lib/ev67-strchr.S2
-rw-r--r--arch/alpha/lib/ev67-strlen.S2
-rw-r--r--arch/alpha/lib/ev67-strncat.S2
-rw-r--r--arch/alpha/lib/ev67-strrchr.S2
-rw-r--r--arch/alpha/lib/memchr.S2
-rw-r--r--arch/alpha/lib/memmove.S2
-rw-r--r--arch/alpha/lib/memset.S2
-rw-r--r--arch/alpha/lib/strcat.S2
-rw-r--r--arch/alpha/lib/strchr.S2
-rw-r--r--arch/alpha/lib/strcpy.S2
-rw-r--r--arch/alpha/lib/strlen.S2
-rw-r--r--arch/alpha/lib/strncat.S2
-rw-r--r--arch/alpha/lib/strncpy.S2
-rw-r--r--arch/alpha/lib/strrchr.S2
-rw-r--r--arch/alpha/lib/udiv-qrnnd.S2
-rw-r--r--arch/arc/Kconfig8
-rw-r--r--arch/arc/Makefile6
-rw-r--r--arch/arc/include/asm/arcregs.h99
-rw-r--r--arch/arc/include/asm/atomic-llsc.h6
-rw-r--r--arch/arc/include/asm/atomic64-arcv2.h6
-rw-r--r--arch/arc/include/asm/current.h2
-rw-r--r--arch/arc/include/asm/dwarf.h32
-rw-r--r--arch/arc/include/asm/entry-arcv2.h66
-rw-r--r--arch/arc/include/asm/entry-compact.h50
-rw-r--r--arch/arc/include/asm/entry.h128
-rw-r--r--arch/arc/include/asm/irq.h1
-rw-r--r--arch/arc/include/asm/mmu.h2
-rw-r--r--arch/arc/include/asm/processor.h7
-rw-r--r--arch/arc/include/asm/ptrace.h65
-rw-r--r--arch/arc/include/asm/setup.h8
-rw-r--r--arch/arc/include/asm/smp.h2
-rw-r--r--arch/arc/include/asm/thread_info.h10
-rw-r--r--arch/arc/include/asm/uaccess.h21
-rw-r--r--arch/arc/kernel/Makefile9
-rw-r--r--arch/arc/kernel/asm-offsets.c14
-rw-r--r--arch/arc/kernel/ctx_sw.c112
-rw-r--r--arch/arc/kernel/ctx_sw_asm.S76
-rw-r--r--arch/arc/kernel/devtree.c1
-rw-r--r--arch/arc/kernel/entry-arcv2.S15
-rw-r--r--arch/arc/kernel/entry-compact.S19
-rw-r--r--arch/arc/kernel/entry.S70
-rw-r--r--arch/arc/kernel/intc-arcv2.c2
-rw-r--r--arch/arc/kernel/kgdb.c2
-rw-r--r--arch/arc/kernel/mcip.c2
-rw-r--r--arch/arc/kernel/process.c17
-rw-r--r--arch/arc/kernel/ptrace.c8
-rw-r--r--arch/arc/kernel/setup.c563
-rw-r--r--arch/arc/kernel/signal.c1
-rw-r--r--arch/arc/kernel/smp.c7
-rw-r--r--arch/arc/kernel/stacktrace.c1
-rw-r--r--arch/arc/kernel/traps.c5
-rw-r--r--arch/arc/kernel/troubleshoot.c13
-rw-r--r--arch/arc/lib/memset-archs.S3
-rw-r--r--arch/arc/mm/cache.c179
-rw-r--r--arch/arc/mm/extable.c11
-rw-r--r--arch/arc/mm/fault.c7
-rw-r--r--arch/arc/mm/init.c1
-rw-r--r--arch/arc/mm/tlb.c99
-rw-r--r--arch/arc/plat-axs10x/axs10x.c1
-rw-r--r--arch/arm/boot/dts/nuvoton/nuvoton-common-npcm7xx.dtsi9
-rw-r--r--arch/arm/configs/dram_0x00000000.config1
-rw-r--r--arch/arm/configs/dram_0xc0000000.config1
-rw-r--r--arch/arm/configs/dram_0xd0000000.config1
-rw-r--r--arch/arm/configs/lpae.config1
-rw-r--r--arch/arm/include/asm/arm_pmuv3.h2
-rw-r--r--arch/arm/include/asm/ide.h24
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-a1.dtsi4
-rw-r--r--arch/arm64/boot/dts/nuvoton/nuvoton-common-npcm8xx.dtsi9
-rw-r--r--arch/arm64/configs/defconfig1
-rw-r--r--arch/arm64/configs/virt.config1
-rw-r--r--arch/arm64/include/asm/kvm_arm.h51
-rw-r--r--arch/arm64/include/asm/kvm_asm.h3
-rw-r--r--arch/arm64/include/asm/kvm_host.h24
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h1
-rw-r--r--arch/arm64/include/asm/kvm_nested.h2
-rw-r--r--arch/arm64/include/asm/kvm_pgtable.h10
-rw-r--r--arch/arm64/include/asm/sysreg.h268
-rw-r--r--arch/arm64/include/asm/tlbflush.h124
-rw-r--r--arch/arm64/kernel/cpufeature.c7
-rw-r--r--arch/arm64/kernel/idreg-override.c6
-rw-r--r--arch/arm64/kvm/Kconfig2
-rw-r--r--arch/arm64/kvm/arm.c65
-rw-r--r--arch/arm64/kvm/emulate-nested.c1852
-rw-r--r--arch/arm64/kvm/guest.c15
-rw-r--r--arch/arm64/kvm/handle_exit.c29
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/switch.h127
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/mm.h1
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-main.c11
-rw-r--r--arch/arm64/kvm/hyp/nvhe/mm.c83
-rw-r--r--arch/arm64/kvm/hyp/nvhe/setup.c27
-rw-r--r--arch/arm64/kvm/hyp/nvhe/switch.c2
-rw-r--r--arch/arm64/kvm/hyp/nvhe/tlb.c30
-rw-r--r--arch/arm64/kvm/hyp/pgtable.c63
-rw-r--r--arch/arm64/kvm/hyp/vhe/tlb.c28
-rw-r--r--arch/arm64/kvm/mmu.c104
-rw-r--r--arch/arm64/kvm/nested.c11
-rw-r--r--arch/arm64/kvm/pmu-emul.c37
-rw-r--r--arch/arm64/kvm/pmu.c18
-rw-r--r--arch/arm64/kvm/reset.c25
-rw-r--r--arch/arm64/kvm/sys_regs.c15
-rw-r--r--arch/arm64/kvm/trace_arm.h26
-rw-r--r--arch/arm64/kvm/vgic/vgic.h2
-rw-r--r--arch/arm64/lib/csum.c2
-rw-r--r--arch/arm64/tools/cpucaps1
-rw-r--r--arch/arm64/tools/sysreg129
-rw-r--r--arch/csky/include/asm/traps.h2
-rw-r--r--arch/ia64/include/asm/Kbuild1
-rw-r--r--arch/ia64/kernel/entry.S3
-rw-r--r--arch/ia64/kernel/esi_stub.S2
-rw-r--r--arch/ia64/kernel/head.S3
-rw-r--r--arch/ia64/kernel/ivt.S3
-rw-r--r--arch/ia64/kernel/pal.S2
-rw-r--r--arch/ia64/lib/clear_page.S2
-rw-r--r--arch/ia64/lib/clear_user.S2
-rw-r--r--arch/ia64/lib/copy_page.S2
-rw-r--r--arch/ia64/lib/copy_page_mck.S2
-rw-r--r--arch/ia64/lib/copy_user.S2
-rw-r--r--arch/ia64/lib/flush.S3
-rw-r--r--arch/ia64/lib/idiv32.S2
-rw-r--r--arch/ia64/lib/idiv64.S2
-rw-r--r--arch/ia64/lib/ip_fast_csum.S2
-rw-r--r--arch/ia64/lib/memcpy.S2
-rw-r--r--arch/ia64/lib/memcpy_mck.S2
-rw-r--r--arch/ia64/lib/memset.S2
-rw-r--r--arch/ia64/lib/strlen.S2
-rw-r--r--arch/ia64/lib/strncpy_from_user.S2
-rw-r--r--arch/ia64/lib/strnlen_user.S2
-rw-r--r--arch/ia64/lib/xor.S2
-rw-r--r--arch/loongarch/Kconfig26
-rw-r--r--arch/loongarch/Makefile3
-rw-r--r--arch/loongarch/configs/loongson3_defconfig74
-rw-r--r--arch/loongarch/include/asm/asm-prototypes.h1
-rw-r--r--arch/loongarch/include/asm/asmmacro.h158
-rw-r--r--arch/loongarch/include/asm/kasan.h126
-rw-r--r--arch/loongarch/include/asm/kfence.h61
-rw-r--r--arch/loongarch/include/asm/kgdb.h97
-rw-r--r--arch/loongarch/include/asm/lbt.h109
-rw-r--r--arch/loongarch/include/asm/loongarch.h47
-rw-r--r--arch/loongarch/include/asm/mmzone.h2
-rw-r--r--arch/loongarch/include/asm/page.h7
-rw-r--r--arch/loongarch/include/asm/pgalloc.h1
-rw-r--r--arch/loongarch/include/asm/pgtable.h31
-rw-r--r--arch/loongarch/include/asm/processor.h26
-rw-r--r--arch/loongarch/include/asm/setup.h8
-rw-r--r--arch/loongarch/include/asm/stackframe.h4
-rw-r--r--arch/loongarch/include/asm/string.h20
-rw-r--r--arch/loongarch/include/asm/switch_to.h2
-rw-r--r--arch/loongarch/include/asm/thread_info.h4
-rw-r--r--arch/loongarch/include/asm/xor.h68
-rw-r--r--arch/loongarch/include/asm/xor_simd.h34
-rw-r--r--arch/loongarch/include/uapi/asm/ptrace.h6
-rw-r--r--arch/loongarch/include/uapi/asm/sigcontext.h10
-rw-r--r--arch/loongarch/kernel/Makefile9
-rw-r--r--arch/loongarch/kernel/asm-offsets.c18
-rw-r--r--arch/loongarch/kernel/cpu-probe.c14
-rw-r--r--arch/loongarch/kernel/entry.S5
-rw-r--r--arch/loongarch/kernel/fpu.S14
-rw-r--r--arch/loongarch/kernel/head.S13
-rw-r--r--arch/loongarch/kernel/kfpu.c55
-rw-r--r--arch/loongarch/kernel/kgdb.c727
-rw-r--r--arch/loongarch/kernel/lbt.S155
-rw-r--r--arch/loongarch/kernel/numa.c35
-rw-r--r--arch/loongarch/kernel/process.c15
-rw-r--r--arch/loongarch/kernel/ptrace.c54
-rw-r--r--arch/loongarch/kernel/relocate.c8
-rw-r--r--arch/loongarch/kernel/setup.c4
-rw-r--r--arch/loongarch/kernel/signal.c188
-rw-r--r--arch/loongarch/kernel/stacktrace.c18
-rw-r--r--arch/loongarch/kernel/sysrq.c2
-rw-r--r--arch/loongarch/kernel/traps.c50
-rw-r--r--arch/loongarch/lib/Makefile2
-rw-r--r--arch/loongarch/lib/clear_user.S87
-rw-r--r--arch/loongarch/lib/copy_user.S161
-rw-r--r--arch/loongarch/lib/memcpy.S8
-rw-r--r--arch/loongarch/lib/memmove.S20
-rw-r--r--arch/loongarch/lib/memset.S8
-rw-r--r--arch/loongarch/lib/xor_simd.c93
-rw-r--r--arch/loongarch/lib/xor_simd.h38
-rw-r--r--arch/loongarch/lib/xor_simd_glue.c72
-rw-r--r--arch/loongarch/lib/xor_template.c110
-rw-r--r--arch/loongarch/mm/Makefile3
-rw-r--r--arch/loongarch/mm/cache.c1
-rw-r--r--arch/loongarch/mm/fault.c22
-rw-r--r--arch/loongarch/mm/init.c71
-rw-r--r--arch/loongarch/mm/kasan_init.c243
-rw-r--r--arch/loongarch/mm/mmap.c13
-rw-r--r--arch/loongarch/mm/pgtable.c12
-rw-r--r--arch/loongarch/vdso/Makefile3
-rw-r--r--arch/m68k/coldfire/dma_timer.c2
-rw-r--r--arch/m68k/emu/nfcon.c8
-rw-r--r--arch/m68k/include/asm/ide.h67
-rw-r--r--arch/m68k/kernel/pcibios.c6
-rw-r--r--arch/microblaze/include/asm/page.h27
-rw-r--r--arch/microblaze/include/asm/setup.h2
-rw-r--r--arch/microblaze/kernel/reset.c1
-rw-r--r--arch/microblaze/mm/init.c16
-rw-r--r--arch/mips/Makefile16
-rw-r--r--arch/mips/bmips/setup.c1
-rw-r--r--arch/mips/cavium-octeon/Makefile1
-rw-r--r--arch/mips/cavium-octeon/flash_setup.c3
-rw-r--r--arch/mips/cavium-octeon/octeon-memcpy.S2
-rw-r--r--arch/mips/cavium-octeon/octeon-platform.c3
-rw-r--r--arch/mips/configs/ip22_defconfig1
-rw-r--r--arch/mips/configs/loongson3_defconfig1
-rw-r--r--arch/mips/configs/malta_defconfig1
-rw-r--r--arch/mips/configs/malta_kvm_defconfig1
-rw-r--r--arch/mips/configs/maltaup_xpa_defconfig1
-rw-r--r--arch/mips/configs/rm200_defconfig1
-rw-r--r--arch/mips/include/asm/Kbuild1
-rw-r--r--arch/mips/include/asm/kvm_host.h3
-rw-r--r--arch/mips/include/asm/mach-loongson32/loongson1.h2
-rw-r--r--arch/mips/include/asm/mach-loongson32/regs-clk.h81
-rw-r--r--arch/mips/include/asm/mach-loongson32/regs-rtc.h19
-rw-r--r--arch/mips/kernel/mcount.S2
-rw-r--r--arch/mips/kernel/octeon_switch.S1
-rw-r--r--arch/mips/kernel/r2300_fpu.S2
-rw-r--r--arch/mips/kernel/r2300_switch.S1
-rw-r--r--arch/mips/kernel/r4k_fpu.S2
-rw-r--r--arch/mips/kernel/sysrq.c2
-rw-r--r--arch/mips/kvm/mips.c12
-rw-r--r--arch/mips/kvm/mmu.c2
-rw-r--r--arch/mips/lantiq/irq.c2
-rw-r--r--arch/mips/lantiq/xway/dcdc.c3
-rw-r--r--arch/mips/lantiq/xway/gptu.c3
-rw-r--r--arch/mips/lantiq/xway/sysctrl.c1
-rw-r--r--arch/mips/lantiq/xway/vmmc.c3
-rw-r--r--arch/mips/lib/csum_partial.S2
-rw-r--r--arch/mips/lib/memcpy.S2
-rw-r--r--arch/mips/lib/memset.S2
-rw-r--r--arch/mips/lib/strncpy_user.S2
-rw-r--r--arch/mips/lib/strnlen_user.S2
-rw-r--r--arch/mips/loongson32/common/platform.c8
-rw-r--r--arch/mips/loongson64/smp.c160
-rw-r--r--arch/mips/mm/page-funcs.S2
-rw-r--r--arch/mips/mm/tlb-funcs.S2
-rw-r--r--arch/mips/pci/pci-lantiq.c4
-rw-r--r--arch/mips/pci/pci-rt2880.c5
-rw-r--r--arch/mips/pic32/pic32mzda/config.c2
-rw-r--r--arch/mips/ralink/ill_acc.c2
-rw-r--r--arch/mips/ralink/irq.c2
-rw-r--r--arch/mips/ralink/of.c2
-rw-r--r--arch/mips/ralink/prom.c2
-rw-r--r--arch/mips/txx9/generic/pci.c43
-rw-r--r--arch/mips/vdso/vdso.lds.S2
-rw-r--r--arch/openrisc/include/asm/bug.h11
-rw-r--r--arch/openrisc/include/asm/page.h11
-rw-r--r--arch/openrisc/include/asm/processor.h1
-rw-r--r--arch/openrisc/kernel/process.c4
-rw-r--r--arch/openrisc/kernel/ptrace.c4
-rw-r--r--arch/openrisc/kernel/signal.c7
-rw-r--r--arch/openrisc/kernel/smp.c2
-rw-r--r--arch/openrisc/kernel/time.c2
-rw-r--r--arch/openrisc/kernel/traps.c85
-rw-r--r--arch/openrisc/mm/fault.c4
-rw-r--r--arch/openrisc/mm/init.c2
-rw-r--r--arch/openrisc/mm/ioremap.c2
-rw-r--r--arch/openrisc/mm/tlb.c9
-rw-r--r--arch/parisc/include/asm/ide.h54
-rw-r--r--arch/powerpc/configs/disable-werror.config1
-rw-r--r--arch/powerpc/configs/security.config4
-rw-r--r--arch/powerpc/crypto/Kconfig4
-rw-r--r--arch/powerpc/include/asm/fs_pd.h27
-rw-r--r--arch/powerpc/include/asm/ide.h18
-rw-r--r--arch/powerpc/platforms/8xx/mpc885ads_setup.c1
-rw-r--r--arch/powerpc/platforms/8xx/tqm8xx_setup.c1
-rw-r--r--arch/powerpc/sysdev/fsl_soc.c2
-rw-r--r--arch/powerpc/xmon/xmon.c2
-rw-r--r--arch/riscv/Kconfig24
-rw-r--r--arch/riscv/Makefile3
-rw-r--r--arch/riscv/configs/32-bit.config1
-rw-r--r--arch/riscv/configs/64-bit.config1
-rw-r--r--arch/riscv/include/asm/alternative-macros.h2
-rw-r--r--arch/riscv/include/asm/cache.h14
-rw-r--r--arch/riscv/include/asm/cacheflush.h2
-rw-r--r--arch/riscv/include/asm/cfi.h22
-rw-r--r--arch/riscv/include/asm/csr.h2
-rw-r--r--arch/riscv/include/asm/elf.h13
-rw-r--r--arch/riscv/include/asm/hwcap.h17
-rw-r--r--arch/riscv/include/asm/insn.h10
-rw-r--r--arch/riscv/include/asm/kvm_host.h9
-rw-r--r--arch/riscv/include/asm/kvm_vcpu_vector.h6
-rw-r--r--arch/riscv/include/asm/mmu.h4
-rw-r--r--arch/riscv/include/asm/pgtable.h33
-rw-r--r--arch/riscv/include/asm/processor.h52
-rw-r--r--arch/riscv/include/asm/syscall.h5
-rw-r--r--arch/riscv/include/asm/syscall_wrapper.h87
-rw-r--r--arch/riscv/include/uapi/asm/kvm.h16
-rw-r--r--arch/riscv/include/uapi/asm/ptrace.h5
-rw-r--r--arch/riscv/include/uapi/asm/sigcontext.h2
-rw-r--r--arch/riscv/kernel/Makefile2
-rw-r--r--arch/riscv/kernel/cfi.c77
-rw-r--r--arch/riscv/kernel/compat_syscall_table.c8
-rw-r--r--arch/riscv/kernel/cpu.c181
-rw-r--r--arch/riscv/kernel/cpufeature.c521
-rw-r--r--arch/riscv/kernel/head.S6
-rw-r--r--arch/riscv/kernel/mcount.S9
-rw-r--r--arch/riscv/kernel/probes/decode-insn.c11
-rw-r--r--arch/riscv/kernel/probes/simulate-insn.c105
-rw-r--r--arch/riscv/kernel/probes/simulate-insn.h5
-rw-r--r--arch/riscv/kernel/setup.c6
-rw-r--r--arch/riscv/kernel/suspend_entry.S5
-rw-r--r--arch/riscv/kernel/sys_riscv.c6
-rw-r--r--arch/riscv/kernel/syscall_table.c8
-rw-r--r--arch/riscv/kernel/traps.c4
-rw-r--r--arch/riscv/kvm/Makefile1
-rw-r--r--arch/riscv/kvm/aia.c4
-rw-r--r--arch/riscv/kvm/mmu.c8
-rw-r--r--arch/riscv/kvm/vcpu.c547
-rw-r--r--arch/riscv/kvm/vcpu_fp.c12
-rw-r--r--arch/riscv/kvm/vcpu_onereg.c1051
-rw-r--r--arch/riscv/kvm/vcpu_sbi.c16
-rw-r--r--arch/riscv/kvm/vcpu_timer.c11
-rw-r--r--arch/riscv/kvm/vcpu_vector.c72
-rw-r--r--arch/riscv/mm/context.c2
-rw-r--r--arch/riscv/mm/dma-noncoherent.c8
-rw-r--r--arch/riscv/mm/init.c93
-rw-r--r--arch/riscv/mm/kasan_init.c8
-rw-r--r--arch/riscv/purgatory/Makefile4
-rw-r--r--arch/s390/boot/ipl_parm.c7
-rw-r--r--arch/s390/boot/startup.c4
-rw-r--r--arch/s390/boot/vmem.c12
-rw-r--r--arch/s390/configs/btf.config1
-rw-r--r--arch/s390/configs/kasan.config1
-rw-r--r--arch/s390/include/asm/airq.h1
-rw-r--r--arch/s390/include/asm/dma.h2
-rw-r--r--arch/s390/include/asm/kvm_host.h2
-rw-r--r--arch/s390/include/asm/sections.h4
-rw-r--r--arch/s390/include/asm/set_memory.h62
-rw-r--r--arch/s390/include/asm/setup.h1
-rw-r--r--arch/s390/include/asm/uv.h19
-rw-r--r--arch/s390/include/uapi/asm/kvm.h16
-rw-r--r--arch/s390/kernel/early.c5
-rw-r--r--arch/s390/kernel/machine_kexec.c4
-rw-r--r--arch/s390/kernel/setup.c17
-rw-r--r--arch/s390/kernel/uv.c2
-rw-r--r--arch/s390/kvm/intercept.c38
-rw-r--r--arch/s390/kvm/interrupt.c17
-rw-r--r--arch/s390/kvm/kvm-s390.c102
-rw-r--r--arch/s390/kvm/pv.c9
-rw-r--r--arch/s390/mm/dump_pagetables.c4
-rw-r--r--arch/s390/mm/fault.c2
-rw-r--r--arch/s390/mm/init.c4
-rw-r--r--arch/s390/mm/pageattr.c2
-rw-r--r--arch/s390/mm/vmem.c148
-rw-r--r--arch/s390/net/bpf_jit_comp.c10
-rw-r--r--arch/sparc/include/asm/Kbuild1
-rw-r--r--arch/sparc/include/asm/ide.h97
-rw-r--r--arch/sparc/include/asm/vio.h2
-rw-r--r--arch/sparc/kernel/entry.S2
-rw-r--r--arch/sparc/kernel/head_32.S2
-rw-r--r--arch/sparc/kernel/head_64.S2
-rw-r--r--arch/sparc/kernel/process_64.c4
-rw-r--r--arch/sparc/lib/U1memcpy.S2
-rw-r--r--arch/sparc/lib/VISsave.S2
-rw-r--r--arch/sparc/lib/ashldi3.S2
-rw-r--r--arch/sparc/lib/ashrdi3.S2
-rw-r--r--arch/sparc/lib/atomic_64.S2
-rw-r--r--arch/sparc/lib/bitops.S2
-rw-r--r--arch/sparc/lib/blockops.S2
-rw-r--r--arch/sparc/lib/bzero.S2
-rw-r--r--arch/sparc/lib/checksum_32.S2
-rw-r--r--arch/sparc/lib/checksum_64.S2
-rw-r--r--arch/sparc/lib/clear_page.S2
-rw-r--r--arch/sparc/lib/copy_in_user.S2
-rw-r--r--arch/sparc/lib/copy_page.S2
-rw-r--r--arch/sparc/lib/copy_user.S2
-rw-r--r--arch/sparc/lib/csum_copy.S2
-rw-r--r--arch/sparc/lib/divdi3.S2
-rw-r--r--arch/sparc/lib/ffs.S2
-rw-r--r--arch/sparc/lib/fls.S2
-rw-r--r--arch/sparc/lib/fls64.S2
-rw-r--r--arch/sparc/lib/hweight.S2
-rw-r--r--arch/sparc/lib/ipcsum.S2
-rw-r--r--arch/sparc/lib/locks.S2
-rw-r--r--arch/sparc/lib/lshrdi3.S2
-rw-r--r--arch/sparc/lib/mcount.S2
-rw-r--r--arch/sparc/lib/memcmp.S2
-rw-r--r--arch/sparc/lib/memcpy.S3
-rw-r--r--arch/sparc/lib/memmove.S2
-rw-r--r--arch/sparc/lib/memscan_32.S2
-rw-r--r--arch/sparc/lib/memscan_64.S2
-rw-r--r--arch/sparc/lib/memset.S2
-rw-r--r--arch/sparc/lib/muldi3.S2
-rw-r--r--arch/sparc/lib/multi3.S2
-rw-r--r--arch/sparc/lib/strlen.S2
-rw-r--r--arch/sparc/lib/strncmp_32.S2
-rw-r--r--arch/sparc/lib/strncmp_64.S2
-rw-r--r--arch/sparc/lib/xor.S2
-rw-r--r--arch/sparc/mm/tlb.c1
-rw-r--r--arch/um/Kbuild2
-rw-r--r--arch/um/Makefile9
-rw-r--r--arch/um/configs/i386_defconfig1
-rw-r--r--arch/um/configs/x86_64_defconfig1
-rw-r--r--arch/um/drivers/Kconfig16
-rw-r--r--arch/um/drivers/Makefile2
-rw-r--r--arch/um/drivers/hostaudio_kern.c2
-rw-r--r--arch/um/drivers/line.c2
-rw-r--r--arch/um/drivers/line.h3
-rw-r--r--arch/um/drivers/port_kern.c2
-rw-r--r--arch/um/drivers/slirp_kern.c2
-rw-r--r--arch/um/drivers/virt-pci.c2
-rw-r--r--arch/um/drivers/xterm_kern.c1
-rw-r--r--arch/um/include/shared/irq_kern.h1
-rw-r--r--arch/um/kernel/irq.c2
-rw-r--r--arch/x86/Makefile4
-rw-r--r--arch/x86/hyperv/hv_apic.c15
-rw-r--r--arch/x86/hyperv/hv_init.c105
-rw-r--r--arch/x86/hyperv/ivm.c263
-rw-r--r--arch/x86/include/asm/audit.h7
-rw-r--r--arch/x86/include/asm/cpufeatures.h1
-rw-r--r--arch/x86/include/asm/hyperv-tlfs.h10
-rw-r--r--arch/x86/include/asm/kexec.h2
-rw-r--r--arch/x86/include/asm/kvm_host.h46
-rw-r--r--arch/x86/include/asm/kvm_page_track.h67
-rw-r--r--arch/x86/include/asm/mshyperv.h71
-rw-r--r--arch/x86/include/asm/reboot.h7
-rw-r--r--arch/x86/include/asm/svm.h5
-rw-r--r--arch/x86/include/asm/virtext.h154
-rw-r--r--arch/x86/include/asm/vmx.h2
-rw-r--r--arch/x86/kernel/apm_32.c6
-rw-r--r--arch/x86/kernel/audit_64.c5
-rw-r--r--arch/x86/kernel/cpu/common.c8
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c91
-rw-r--r--arch/x86/kernel/cpu/resctrl/pseudo_lock.c41
-rw-r--r--arch/x86/kernel/cpuid.c31
-rw-r--r--arch/x86/kernel/crash.c31
-rw-r--r--arch/x86/kernel/fpu/xstate.c2
-rw-r--r--arch/x86/kernel/i8259.c4
-rw-r--r--arch/x86/kernel/msr.c31
-rw-r--r--arch/x86/kernel/reboot.c66
-rw-r--r--arch/x86/kvm/Kconfig15
-rw-r--r--arch/x86/kvm/cpuid.c40
-rw-r--r--arch/x86/kvm/cpuid.h46
-rw-r--r--arch/x86/kvm/emulate.c2
-rw-r--r--arch/x86/kvm/governed_features.h21
-rw-r--r--arch/x86/kvm/hyperv.c1
-rw-r--r--arch/x86/kvm/kvm_emulate.h1
-rw-r--r--arch/x86/kvm/lapic.c29
-rw-r--r--arch/x86/kvm/mmu.h2
-rw-r--r--arch/x86/kvm/mmu/mmu.c371
-rw-r--r--arch/x86/kvm/mmu/mmu_internal.h27
-rw-r--r--arch/x86/kvm/mmu/page_track.c252
-rw-r--r--arch/x86/kvm/mmu/page_track.h58
-rw-r--r--arch/x86/kvm/mmu/paging_tmpl.h41
-rw-r--r--arch/x86/kvm/mmu/spte.c6
-rw-r--r--arch/x86/kvm/mmu/spte.h21
-rw-r--r--arch/x86/kvm/mmu/tdp_iter.c11
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c37
-rw-r--r--arch/x86/kvm/pmu.c4
-rw-r--r--arch/x86/kvm/reverse_cpuid.h1
-rw-r--r--arch/x86/kvm/svm/avic.c59
-rw-r--r--arch/x86/kvm/svm/nested.c57
-rw-r--r--arch/x86/kvm/svm/sev.c100
-rw-r--r--arch/x86/kvm/svm/svm.c327
-rw-r--r--arch/x86/kvm/svm/svm.h61
-rw-r--r--arch/x86/kvm/vmx/capabilities.h2
-rw-r--r--arch/x86/kvm/vmx/hyperv.c2
-rw-r--r--arch/x86/kvm/vmx/nested.c13
-rw-r--r--arch/x86/kvm/vmx/nested.h2
-rw-r--r--arch/x86/kvm/vmx/pmu_intel.c81
-rw-r--r--arch/x86/kvm/vmx/vmx.c228
-rw-r--r--arch/x86/kvm/vmx/vmx.h3
-rw-r--r--arch/x86/kvm/x86.c83
-rw-r--r--arch/x86/kvm/x86.h1
-rw-r--r--arch/x86/um/Makefile5
-rw-r--r--arch/x86/um/asm/mm_context.h2
-rw-r--r--arch/x86/um/checksum_32.S214
-rw-r--r--arch/xtensa/Kconfig3
-rw-r--r--arch/xtensa/include/asm/core.h9
-rw-r--r--arch/xtensa/include/asm/mtd-xip.h14
-rw-r--r--arch/xtensa/include/asm/sections.h4
-rw-r--r--arch/xtensa/kernel/perf_event.c17
-rw-r--r--arch/xtensa/kernel/setup.c3
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S11
-rw-r--r--arch/xtensa/platforms/iss/console.c27
-rw-r--r--block/blk-map.c7
-rw-r--r--block/blk-throttle.c112
-rw-r--r--block/blk-throttle.h4
-rw-r--r--block/fops.c4
-rw-r--r--block/ioctl.c2
-rw-r--r--drivers/accel/ivpu/ivpu_jsm_msg.c3
-rw-r--r--drivers/accessibility/speakup/spk_ttyio.c7
-rw-r--r--drivers/acpi/arm64/amba.c1
-rw-r--r--drivers/acpi/scan.c2
-rw-r--r--drivers/acpi/thermal.c41
-rw-r--r--drivers/amba/bus.c1
-rw-r--r--drivers/android/binder.c20
-rw-r--r--drivers/android/binderfs.c1
-rw-r--r--drivers/ata/ahci.c4
-rw-r--r--drivers/ata/ahci_ceva.c2
-rw-r--r--drivers/ata/ahci_dwc.c2
-rw-r--r--drivers/ata/ahci_mtk.c1
-rw-r--r--drivers/ata/ahci_mvebu.c2
-rw-r--r--drivers/ata/ahci_octeon.c4
-rw-r--r--drivers/ata/ahci_qoriq.c4
-rw-r--r--drivers/ata/ahci_seattle.c4
-rw-r--r--drivers/ata/ahci_sunxi.c2
-rw-r--r--drivers/ata/ahci_tegra.c5
-rw-r--r--drivers/ata/ahci_xgene.c16
-rw-r--r--drivers/ata/libahci.c7
-rw-r--r--drivers/ata/libahci_platform.c1
-rw-r--r--drivers/ata/libata-core.c367
-rw-r--r--drivers/ata/libata-eh.c158
-rw-r--r--drivers/ata/libata-sata.c112
-rw-r--r--drivers/ata/libata-scsi.c165
-rw-r--r--drivers/ata/libata-sff.c32
-rw-r--r--drivers/ata/libata.h3
-rw-r--r--drivers/ata/pata_arasan_cf.c6
-rw-r--r--drivers/ata/pata_buddha.c1
-rw-r--r--drivers/ata/pata_ep93xx.c25
-rw-r--r--drivers/ata/pata_falcon.c63
-rw-r--r--drivers/ata/pata_ftide010.c16
-rw-r--r--drivers/ata/pata_gayle.c1
-rw-r--r--drivers/ata/pata_imx.c37
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c24
-rw-r--r--drivers/ata/pata_mpc52xx.c10
-rw-r--r--drivers/ata/pata_pxa.c6
-rw-r--r--drivers/ata/pata_rb532_cf.c6
-rw-r--r--drivers/ata/pata_sl82c105.c3
-rw-r--r--drivers/ata/sata_dwc_460ex.c8
-rw-r--r--drivers/ata/sata_fsl.c12
-rw-r--r--drivers/ata/sata_gemini.c10
-rw-r--r--drivers/ata/sata_highbank.c4
-rw-r--r--drivers/ata/sata_inic162x.c2
-rw-r--r--drivers/ata/sata_mv.c7
-rw-r--r--drivers/ata/sata_nv.c2
-rw-r--r--drivers/ata/sata_rcar.c17
-rw-r--r--drivers/ata/sata_sil24.c4
-rw-r--r--drivers/ata/sata_sx4.c1
-rw-r--r--drivers/base/core.c30
-rw-r--r--drivers/base/cpu.c19
-rw-r--r--drivers/base/dd.c2
-rw-r--r--drivers/base/node.c1
-rw-r--r--drivers/base/test/.kunitconfig2
-rw-r--r--drivers/base/test/Kconfig4
-rw-r--r--drivers/base/test/Makefile3
-rw-r--r--drivers/base/test/platform-device-test.c224
-rw-r--r--drivers/base/test/root-device-test.c112
-rw-r--r--drivers/base/test/test_async_driver_probe.c2
-rw-r--r--drivers/block/drbd/drbd_main.c2
-rw-r--r--drivers/block/null_blk/main.c12
-rw-r--r--drivers/block/rbd.c4
-rw-r--r--drivers/bluetooth/hci_ldisc.c15
-rw-r--r--drivers/bus/mhi/host/boot.c34
-rw-r--r--drivers/bus/mhi/host/init.c2
-rw-r--r--drivers/bus/mhi/host/main.c1
-rw-r--r--drivers/bus/mhi/host/pci_generic.c53
-rw-r--r--drivers/bus/mhi/host/pm.c5
-rw-r--r--drivers/cdx/controller/cdx_controller.c3
-rw-r--r--drivers/cdx/controller/cdx_rpmsg.c3
-rw-r--r--drivers/char/agp/uninorth-agp.c1
-rw-r--r--drivers/char/bsr.c3
-rw-r--r--drivers/char/tpm/tpm-chip.c11
-rw-r--r--drivers/char/tpm/tpm-interface.c21
-rw-r--r--drivers/char/tpm/tpm.h4
-rw-r--r--drivers/char/tpm/tpm2-space.c2
-rw-r--r--drivers/char/tpm/tpm_crb.c38
-rw-r--r--drivers/char/ttyprintk.c5
-rw-r--r--drivers/char/xillybus/Kconfig2
-rw-r--r--drivers/clocksource/Kconfig7
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/arm_arch_timer.c7
-rw-r--r--drivers/clocksource/hyperv_timer.c2
-rw-r--r--drivers/clocksource/timer-loongson1-pwm.c2
-rw-r--r--drivers/clocksource/timer-oxnas-rps.c288
-rw-r--r--drivers/clocksource/timer-sun5i.c288
-rw-r--r--drivers/counter/Kconfig2
-rw-r--r--drivers/counter/microchip-tcb-capture.c1
-rw-r--r--drivers/counter/rz-mtu3-cnt.c4
-rw-r--r--drivers/cpufreq/cpufreq.c53
-rw-r--r--drivers/cpufreq/cpufreq_governor.c4
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c2
-rw-r--r--drivers/dma/Kconfig21
-rw-r--r--drivers/dma/Makefile7
-rw-r--r--drivers/dma/apple-admac.c3
-rw-r--r--drivers/dma/at_hdmac.c2
-rw-r--r--drivers/dma/bcm-sba-raid.c4
-rw-r--r--drivers/dma/bestcomm/bestcomm.c3
-rw-r--r--drivers/dma/dma-jz4780.c1
-rw-r--r--drivers/dma/dmaengine.c82
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c1
-rw-r--r--drivers/dma/dw/rzn1-dmamux.c4
-rw-r--r--drivers/dma/ep93xx_dma.c4
-rw-r--r--drivers/dma/fsl-edma-common.c307
-rw-r--r--drivers/dma/fsl-edma-common.h127
-rw-r--r--drivers/dma/fsl-edma-main.c (renamed from drivers/dma/fsl-edma.c)313
-rw-r--r--drivers/dma/fsl-qdma.c4
-rw-r--r--drivers/dma/fsl_raid.c3
-rw-r--r--drivers/dma/fsldma.c3
-rw-r--r--drivers/dma/idxd/device.c41
-rw-r--r--drivers/dma/idxd/dma.c5
-rw-r--r--drivers/dma/idxd/idxd.h14
-rw-r--r--drivers/dma/idxd/init.c54
-rw-r--r--drivers/dma/idxd/perfmon.c7
-rw-r--r--drivers/dma/idxd/sysfs.c40
-rw-r--r--drivers/dma/img-mdc-dma.c1
-rw-r--r--drivers/dma/imx-dma.c2
-rw-r--r--drivers/dma/imx-sdma.c1
-rw-r--r--drivers/dma/ioat/dca.c2
-rw-r--r--drivers/dma/ioat/dma.h1
-rw-r--r--drivers/dma/ioat/init.c19
-rw-r--r--drivers/dma/ipu/Makefile2
-rw-r--r--drivers/dma/ipu/ipu_idmac.c1801
-rw-r--r--drivers/dma/ipu/ipu_intern.h173
-rw-r--r--drivers/dma/ipu/ipu_irq.c367
-rw-r--r--drivers/dma/lgm/lgm-dma.c7
-rw-r--r--drivers/dma/lpc18xx-dmamux.c4
-rw-r--r--drivers/dma/mcf-edma-main.c (renamed from drivers/dma/mcf-edma.c)43
-rw-r--r--drivers/dma/mediatek/mtk-cqdma.c1
-rw-r--r--drivers/dma/mediatek/mtk-hsdma.c1
-rw-r--r--drivers/dma/mediatek/mtk-uart-apdma.c2
-rw-r--r--drivers/dma/mpc512x_dma.c4
-rw-r--r--drivers/dma/mxs-dma.c1
-rw-r--r--drivers/dma/nbpfaxi.c1
-rw-r--r--drivers/dma/owl-dma.c5
-rw-r--r--drivers/dma/ppc4xx/adma.c2
-rw-r--r--drivers/dma/qcom/gpi.c3
-rw-r--r--drivers/dma/qcom/hidma.c14
-rw-r--r--drivers/dma/qcom/hidma_mgmt.c5
-rw-r--r--drivers/dma/sh/rz-dmac.c17
-rw-r--r--drivers/dma/sh/shdmac.c8
-rw-r--r--drivers/dma/sprd-dma.c2
-rw-r--r--drivers/dma/ste_dma40.c4
-rw-r--r--drivers/dma/stm32-dma.c3
-rw-r--r--drivers/dma/stm32-dmamux.c4
-rw-r--r--drivers/dma/stm32-mdma.c1
-rw-r--r--drivers/dma/sun6i-dma.c2
-rw-r--r--drivers/dma/tegra186-gpc-dma.c2
-rw-r--r--drivers/dma/tegra20-apb-dma.c1
-rw-r--r--drivers/dma/tegra210-adma.c3
-rw-r--r--drivers/dma/ti/dma-crossbar.c5
-rw-r--r--drivers/dma/ti/edma.c1
-rw-r--r--drivers/dma/ti/k3-udma-private.c2
-rw-r--r--drivers/dma/ti/k3-udma.c1
-rw-r--r--drivers/dma/ti/omap-dma.c2
-rw-r--r--drivers/dma/xgene-dma.c3
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c74
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c3
-rw-r--r--drivers/extcon/Kconfig1
-rw-r--r--drivers/firmware/google/Kconfig2
-rw-r--r--drivers/firmware/stratix10-rsu.c100
-rw-r--r--drivers/firmware/stratix10-svc.c20
-rw-r--r--drivers/fpga/Kconfig2
-rw-r--r--drivers/fpga/Makefile3
-rw-r--r--drivers/fpga/altera-fpga2sdram.c2
-rw-r--r--drivers/fpga/altera-freeze-bridge.c11
-rw-r--r--drivers/fpga/altera-pr-ip-core-plat.c9
-rw-r--r--drivers/fpga/dfl-fme-main.c17
-rw-r--r--drivers/fpga/dfl-fme-mgr.c4
-rw-r--r--drivers/fpga/dfl-pci.c15
-rw-r--r--drivers/fpga/fpga-bridge.c48
-rw-r--r--drivers/fpga/fpga-mgr.c25
-rw-r--r--drivers/fpga/fpga-region.c37
-rw-r--r--drivers/fpga/microchip-spi.c2
-rw-r--r--drivers/fpga/of-fpga-region.c2
-rw-r--r--drivers/fpga/socfpga-a10.c7
-rw-r--r--drivers/fpga/socfpga.c7
-rw-r--r--drivers/fpga/stratix10-soc.c1
-rw-r--r--drivers/fpga/tests/.kunitconfig5
-rw-r--r--drivers/fpga/tests/Kconfig11
-rw-r--r--drivers/fpga/tests/Makefile6
-rw-r--r--drivers/fpga/tests/fpga-bridge-test.c175
-rw-r--r--drivers/fpga/tests/fpga-mgr-test.c327
-rw-r--r--drivers/fpga/tests/fpga-region-test.c211
-rw-r--r--drivers/fpga/ts73xx-fpga.c4
-rw-r--r--drivers/fpga/xilinx-pr-decoupler.c4
-rw-r--r--drivers/fpga/zynq-fpga.c4
-rw-r--r--drivers/fsi/Kconfig17
-rw-r--r--drivers/fsi/Makefile2
-rw-r--r--drivers/fsi/fsi-core.c154
-rw-r--r--drivers/fsi/fsi-master-aspeed.c4
-rw-r--r--drivers/fsi/fsi-master-ast-cf.c5
-rw-r--r--drivers/fsi/fsi-master-gpio.c2
-rw-r--r--drivers/fsi/fsi-master-hub.c2
-rw-r--r--drivers/fsi/fsi-master-i2cr.c316
-rw-r--r--drivers/fsi/fsi-master-i2cr.h33
-rw-r--r--drivers/fsi/fsi-master.h2
-rw-r--r--drivers/fsi/fsi-occ.c2
-rw-r--r--drivers/fsi/fsi-sbefifo.c53
-rw-r--r--drivers/fsi/fsi-scom.c8
-rw-r--r--drivers/fsi/fsi-slave.h28
-rw-r--r--drivers/fsi/i2cr-scom.c154
-rw-r--r--drivers/gpio/gpio-zynq.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c24
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c9
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c68
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c47
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c118
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c21
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c9
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h2
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h18
-rw-r--r--drivers/gpu/drm/amd/include/discovery.h38
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c11
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h16
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c54
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c102
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c120
-rw-r--r--drivers/gpu/drm/i915/gvt/page_track.c10
-rw-r--r--drivers/gpu/drm/i915/i915_request.c7
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_exec.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c32
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c5
-rw-r--r--drivers/hid/Kconfig13
-rw-r--r--drivers/hid/Makefile1
-rw-r--r--drivers/hid/hid-apple.c3
-rw-r--r--drivers/hid/hid-cp2112.c169
-rw-r--r--drivers/hid/hid-google-stadiaff.c158
-rw-r--r--drivers/hid/hid-ids.h2
-rw-r--r--drivers/hid/hid-input.c21
-rw-r--r--drivers/hid/hid-logitech-dj.c16
-rw-r--r--drivers/hid/hid-logitech-hidpp.c121
-rw-r--r--drivers/hid/hid-multitouch.c13
-rw-r--r--drivers/hid/hid-nvidia-shield.c428
-rw-r--r--drivers/hid/hid-roccat-arvo.c20
-rw-r--r--drivers/hid/hid-roccat-isku.c21
-rw-r--r--drivers/hid/hid-roccat-kone.c24
-rw-r--r--drivers/hid/hid-roccat-koneplus.c22
-rw-r--r--drivers/hid/hid-roccat-konepure.c22
-rw-r--r--drivers/hid/hid-roccat-kovaplus.c22
-rw-r--r--drivers/hid/hid-roccat-pyra.c22
-rw-r--r--drivers/hid/hid-roccat-ryos.c20
-rw-r--r--drivers/hid/hid-roccat-savu.c20
-rw-r--r--drivers/hid/hid-roccat.c2
-rw-r--r--drivers/hid/hid-sensor-hub.c2
-rw-r--r--drivers/hid/hid-steelseries.c311
-rw-r--r--drivers/hid/hid-uclogic-core.c13
-rw-r--r--drivers/hid/hid-wiimote-debug.c10
-rw-r--r--drivers/hid/hidraw.c18
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-of-elan.c50
-rw-r--r--drivers/hid/wacom.h1
-rw-r--r--drivers/hid/wacom_sys.c44
-rw-r--r--drivers/hid/wacom_wac.c7
-rw-r--r--drivers/hid/wacom_wac.h4
-rw-r--r--drivers/hv/connection.c16
-rw-r--r--drivers/hv/hv.c131
-rw-r--r--drivers/hv/hv_balloon.c82
-rw-r--r--drivers/hv/hv_common.c48
-rw-r--r--drivers/hv/hyperv_vmbus.h11
-rw-r--r--drivers/hv/vmbus_drv.c3
-rw-r--r--drivers/hwmon/peci/cputemp.c18
-rw-r--r--drivers/hwmon/peci/dimmtemp.c50
-rw-r--r--drivers/hwspinlock/omap_hwspinlock.c8
-rw-r--r--drivers/hwspinlock/qcom_hwspinlock.c11
-rw-r--r--drivers/hwspinlock/u8500_hsem.c6
-rw-r--r--drivers/hwtracing/coresight/coresight-core.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-core.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-dummy.c12
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-cfg.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c118
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h4
-rw-r--r--drivers/hwtracing/coresight/coresight-platform.c46
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c5
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h2
-rw-r--r--drivers/hwtracing/coresight/coresight-trbe.c43
-rw-r--r--drivers/hwtracing/coresight/coresight-trbe.h2
-rw-r--r--drivers/hwtracing/ptt/hisi_ptt.c8
-rw-r--r--drivers/i2c/Kconfig9
-rw-r--r--drivers/i2c/Makefile1
-rw-r--r--drivers/i2c/busses/Kconfig2
-rw-r--r--drivers/i2c/busses/i2c-ali15x3.c11
-rw-r--r--drivers/i2c/busses/i2c-at91-core.c18
-rw-r--r--drivers/i2c/busses/i2c-at91-master.c7
-rw-r--r--drivers/i2c/busses/i2c-au1550.c15
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c20
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c16
-rw-r--r--drivers/i2c/busses/i2c-brcmstb.c27
-rw-r--r--drivers/i2c/busses/i2c-cpm.c4
-rw-r--r--drivers/i2c/busses/i2c-davinci.c16
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c12
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c22
-rw-r--r--drivers/i2c/busses/i2c-dln2.c6
-rw-r--r--drivers/i2c/busses/i2c-emev2.c2
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c12
-rw-r--r--drivers/i2c/busses/i2c-gxp.c3
-rw-r--r--drivers/i2c/busses/i2c-hisi.c12
-rw-r--r--drivers/i2c/busses/i2c-hix5hd2.c10
-rw-r--r--drivers/i2c/busses/i2c-i801.c6
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c3
-rw-r--r--drivers/i2c/busses/i2c-img-scb.c10
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c23
-rw-r--r--drivers/i2c/busses/i2c-imx.c10
-rw-r--r--drivers/i2c/busses/i2c-jz4780.c2
-rw-r--r--drivers/i2c/busses/i2c-kempld.c19
-rw-r--r--drivers/i2c/busses/i2c-lpc2k.c9
-rw-r--r--drivers/i2c/busses/i2c-meson.c1
-rw-r--r--drivers/i2c/busses/i2c-microchip-corei2c.c5
-rw-r--r--drivers/i2c/busses/i2c-mlxbf.c60
-rw-r--r--drivers/i2c/busses/i2c-mlxcpld.c10
-rw-r--r--drivers/i2c/busses/i2c-mpc.c3
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c12
-rw-r--r--drivers/i2c/busses/i2c-mt7621.c3
-rw-r--r--drivers/i2c/busses/i2c-mxs.c1
-rw-r--r--drivers/i2c/busses/i2c-nforce2.c4
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c12
-rw-r--r--drivers/i2c/busses/i2c-npcm7xx.c1
-rw-r--r--drivers/i2c/busses/i2c-ocores.c10
-rw-r--r--drivers/i2c/busses/i2c-owl.c3
-rw-r--r--drivers/i2c/busses/i2c-pca-platform.c1
-rw-r--r--drivers/i2c/busses/i2c-pnx.c15
-rw-r--r--drivers/i2c/busses/i2c-pxa-pci.c1
-rw-r--r--drivers/i2c/busses/i2c-pxa.c20
-rw-r--r--drivers/i2c/busses/i2c-qcom-cci.c8
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c4
-rw-r--r--drivers/i2c/busses/i2c-qup.c16
-rw-r--r--drivers/i2c/busses/i2c-rcar.c12
-rw-r--r--drivers/i2c/busses/i2c-riic.c1
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c20
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c22
-rw-r--r--drivers/i2c/busses/i2c-sis5595.c20
-rw-r--r--drivers/i2c/busses/i2c-sprd.c1
-rw-r--r--drivers/i2c/busses/i2c-st.c3
-rw-r--r--drivers/i2c/busses/i2c-stm32f4.c3
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c8
-rw-r--r--drivers/i2c/busses/i2c-synquacer.c28
-rw-r--r--drivers/i2c/busses/i2c-tegra-bpmp.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra.c2
-rw-r--r--drivers/i2c/busses/i2c-tiny-usb.c4
-rw-r--r--drivers/i2c/busses/i2c-virtio.c8
-rw-r--r--drivers/i2c/busses/i2c-xlp9xx.c6
-rw-r--r--drivers/i2c/i2c-atr.c710
-rw-r--r--drivers/i2c/muxes/Kconfig6
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpmux.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-ltc4306.c3
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c101
-rw-r--r--drivers/i3c/master.c6
-rw-r--r--drivers/i3c/master/ast2600-i3c-master.c1
-rw-r--r--drivers/i3c/master/i3c-master-cdns.c1
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/cmd_v1.c2
-rw-r--r--drivers/i3c/master/svc-i3c-master.c19
-rw-r--r--drivers/iio/accel/adxl313_i2c.c8
-rw-r--r--drivers/iio/accel/adxl355_i2c.c15
-rw-r--r--drivers/iio/accel/adxl372_spi.c1
-rw-r--r--drivers/iio/accel/bma180.c1
-rw-r--r--drivers/iio/accel/da280.c11
-rw-r--r--drivers/iio/accel/kxsd9-spi.c1
-rw-r--r--drivers/iio/accel/mma8452.c2
-rw-r--r--drivers/iio/adc/Kconfig12
-rw-r--r--drivers/iio/adc/Makefile1
-rw-r--r--drivers/iio/adc/ab8500-gpadc.c6
-rw-r--r--drivers/iio/adc/ad7124.c2
-rw-r--r--drivers/iio/adc/ad7192.c31
-rw-r--r--drivers/iio/adc/ad9467.c2
-rw-r--r--drivers/iio/adc/adi-axi-adc.c3
-rw-r--r--drivers/iio/adc/at91_adc.c1
-rw-r--r--drivers/iio/adc/bcm_iproc_adc.c4
-rw-r--r--drivers/iio/adc/cc10001_adc.c1
-rw-r--r--drivers/iio/adc/imx7d_adc.c2
-rw-r--r--drivers/iio/adc/ina2xx-adc.c2
-rw-r--r--drivers/iio/adc/lpc32xx_adc.c4
-rw-r--r--drivers/iio/adc/men_z188_adc.c1
-rw-r--r--drivers/iio/adc/meson_saradc.c243
-rw-r--r--drivers/iio/adc/npcm_adc.c4
-rw-r--r--drivers/iio/adc/palmas_gpadc.c7
-rw-r--r--drivers/iio/adc/qcom-spmi-adc5.c2
-rw-r--r--drivers/iio/adc/qcom-spmi-iadc.c1
-rw-r--r--drivers/iio/adc/rockchip_saradc.c3
-rw-r--r--drivers/iio/adc/sc27xx_adc.c1
-rw-r--r--drivers/iio/adc/spear_adc.c4
-rw-r--r--drivers/iio/adc/stm32-adc-core.c5
-rw-r--r--drivers/iio/adc/stm32-dfsdm-adc.c3
-rw-r--r--drivers/iio/adc/stm32-dfsdm-core.c4
-rw-r--r--drivers/iio/adc/stmpe-adc.c2
-rw-r--r--drivers/iio/adc/sun20i-gpadc-iio.c276
-rw-r--r--drivers/iio/adc/sun4i-gpadc-iio.c1
-rw-r--r--drivers/iio/adc/ti-lmp92064.c53
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c1
-rw-r--r--drivers/iio/amplifiers/Kconfig1
-rw-r--r--drivers/iio/amplifiers/ad8366.c15
-rw-r--r--drivers/iio/cdc/ad7150.c10
-rw-r--r--drivers/iio/chemical/scd4x.c79
-rw-r--r--drivers/iio/common/Kconfig1
-rw-r--r--drivers/iio/common/Makefile1
-rw-r--r--drivers/iio/common/inv_sensors/Kconfig7
-rw-r--r--drivers/iio/common/inv_sensors/Makefile6
-rw-r--r--drivers/iio/common/inv_sensors/inv_sensors_timestamp.c (renamed from drivers/iio/imu/inv_icm42600/inv_icm42600_timestamp.c)100
-rw-r--r--drivers/iio/dac/Kconfig11
-rw-r--r--drivers/iio/dac/Makefile1
-rw-r--r--drivers/iio/dac/mcp4728.c618
-rw-r--r--drivers/iio/filter/admv8818.c65
-rw-r--r--drivers/iio/frequency/admv1013.c14
-rw-r--r--drivers/iio/imu/adis16475.c5
-rw-r--r--drivers/iio/imu/inv_icm42600/Kconfig1
-rw-r--r--drivers/iio/imu/inv_icm42600/Makefile1
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c35
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c33
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_core.c14
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c35
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_timestamp.h85
-rw-r--r--drivers/iio/imu/inv_mpu6050/Kconfig1
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c33
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h22
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c102
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c6
-rw-r--r--drivers/iio/industrialio-core.c161
-rw-r--r--drivers/iio/industrialio-event.c2
-rw-r--r--drivers/iio/industrialio-trigger.c2
-rw-r--r--drivers/iio/light/cm3605.c2
-rw-r--r--drivers/iio/light/rohm-bu27008.c630
-rw-r--r--drivers/iio/light/vcnl4000.c710
-rw-r--r--drivers/iio/potentiometer/mcp4018.c35
-rw-r--r--drivers/iio/potentiometer/mcp4531.c139
-rw-r--r--drivers/iio/proximity/Kconfig12
-rw-r--r--drivers/iio/proximity/Makefile1
-rw-r--r--drivers/iio/proximity/irsd200.c958
-rw-r--r--drivers/iio/proximity/isl29501.c2
-rw-r--r--drivers/iio/proximity/mb1232.c7
-rw-r--r--drivers/iio/temperature/mlx90614.c2
-rw-r--r--drivers/iio/trigger/stm32-lptimer-trigger.c9
-rw-r--r--drivers/infiniband/core/cache.c11
-rw-r--r--drivers/infiniband/core/cma.c32
-rw-r--r--drivers/infiniband/core/iwpm_util.c2
-rw-r--r--drivers/infiniband/core/netlink.c2
-rw-r--r--drivers/infiniband/core/uverbs_main.c35
-rw-r--r--drivers/infiniband/core/uverbs_std_types_counters.c2
-rw-r--r--drivers/infiniband/core/verbs.c109
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h35
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.c84
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.h55
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c255
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h6
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c277
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c47
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c66
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c38
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h23
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c85
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h2
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c2
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_cmds_defs.h13
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.c8
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.h10
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c24
-rw-r--r--drivers/infiniband/hw/erdma/erdma_hw.h18
-rw-r--r--drivers/infiniband/hw/erdma/erdma_qp.c2
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.c434
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.h36
-rw-r--r--drivers/infiniband/hw/hfi1/Makefile1
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c4
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c8
-rw-r--r--drivers/infiniband/hw/hfi1/device.c72
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h4
-rw-r--r--drivers/infiniband/hw/hfi1/pin_system.c474
-rw-r--r--drivers/infiniband/hw/hfi1/pinning.h20
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c9
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c441
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.h17
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h35
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c151
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h14
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c86
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c28
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_restrack.c75
-rw-r--r--drivers/infiniband/hw/irdma/cm.c90
-rw-r--r--drivers/infiniband/hw/irdma/ctrl.c23
-rw-r--r--drivers/infiniband/hw/irdma/hw.c63
-rw-r--r--drivers/infiniband/hw/irdma/i40iw_hw.c1
-rw-r--r--drivers/infiniband/hw/irdma/i40iw_hw.h2
-rw-r--r--drivers/infiniband/hw/irdma/icrdma_hw.c1
-rw-r--r--drivers/infiniband/hw/irdma/icrdma_hw.h1
-rw-r--r--drivers/infiniband/hw/irdma/irdma.h1
-rw-r--r--drivers/infiniband/hw/irdma/main.h8
-rw-r--r--drivers/infiniband/hw/irdma/type.h3
-rw-r--r--drivers/infiniband/hw/irdma/uk.c218
-rw-r--r--drivers/infiniband/hw/irdma/user.h19
-rw-r--r--drivers/infiniband/hw/irdma/utils.c25
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c259
-rw-r--r--drivers/infiniband/hw/irdma/verbs.h5
-rw-r--r--drivers/infiniband/hw/mlx4/main.c47
-rw-r--r--drivers/infiniband/hw/mlx5/counters.c2
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c40
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c8
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c20
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c2
-rw-r--r--drivers/infiniband/hw/qedr/qedr_roce_cm.c1
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c17
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c159
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c45
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_srq.c60
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c1
-rw-r--r--drivers/infiniband/sw/siw/siw.h4
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c1
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c62
-rw-r--r--drivers/infiniband/sw/siw/siw_qp.c4
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_tx.c52
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c12
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c2
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c19
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c2
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c15
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.h2
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c4
-rw-r--r--drivers/input/gameport/Kconfig4
-rw-r--r--drivers/input/gameport/gameport.c26
-rw-r--r--drivers/input/joystick/xpad.c25
-rw-r--r--drivers/input/keyboard/adp5588-keys.c17
-rw-r--r--drivers/input/keyboard/amikbd.c25
-rw-r--r--drivers/input/keyboard/bcm-keypad.c24
-rw-r--r--drivers/input/keyboard/gpio_keys.c21
-rw-r--r--drivers/input/keyboard/gpio_keys_polled.c8
-rw-r--r--drivers/input/keyboard/lm8323.c95
-rw-r--r--drivers/input/keyboard/lm8333.c44
-rw-r--r--drivers/input/keyboard/lpc32xx-keys.c9
-rw-r--r--drivers/input/keyboard/mcs_touchkey.c65
-rw-r--r--drivers/input/keyboard/nomadik-ske-keypad.c127
-rw-r--r--drivers/input/keyboard/nspire-keypad.c3
-rw-r--r--drivers/input/keyboard/omap4-keypad.c9
-rw-r--r--drivers/input/keyboard/opencores-kbd.c9
-rw-r--r--drivers/input/keyboard/pinephone-keyboard.c20
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c9
-rw-r--r--drivers/input/keyboard/qt1070.c46
-rw-r--r--drivers/input/keyboard/qt2160.c130
-rw-r--r--drivers/input/keyboard/sun4i-lradc-keys.c6
-rw-r--r--drivers/input/keyboard/tca6416-keypad.c141
-rw-r--r--drivers/input/keyboard/tegra-kbc.c2
-rw-r--r--drivers/input/keyboard/tm2-touchkey.c1
-rw-r--r--drivers/input/misc/Kconfig4
-rw-r--r--drivers/input/misc/cpcap-pwrbutton.c12
-rw-r--r--drivers/input/misc/da9063_onkey.c9
-rw-r--r--drivers/input/misc/gpio-vibra.c22
-rw-r--r--drivers/input/misc/iqs269a.c2
-rw-r--r--drivers/input/misc/iqs626a.c2
-rw-r--r--drivers/input/misc/iqs7222.c478
-rw-r--r--drivers/input/misc/mma8450.c2
-rw-r--r--drivers/input/misc/pm8941-pwrkey.c1
-rw-r--r--drivers/input/misc/pm8xxx-vibrator.c1
-rw-r--r--drivers/input/misc/pmic8xxx-pwrkey.c1
-rw-r--r--drivers/input/misc/pwm-beeper.c19
-rw-r--r--drivers/input/misc/pwm-vibra.c32
-rw-r--r--drivers/input/misc/rotary_encoder.c9
-rw-r--r--drivers/input/misc/sparcspkr.c3
-rw-r--r--drivers/input/mouse/elan_i2c_core.c9
-rw-r--r--drivers/input/mouse/psmouse-smbus.c19
-rw-r--r--drivers/input/serio/apbps2.c2
-rw-r--r--drivers/input/serio/i8042-acpipnpio.h7
-rw-r--r--drivers/input/serio/i8042-sparcio.h4
-rw-r--r--drivers/input/serio/rpckbd.c8
-rw-r--r--drivers/input/serio/serport.c18
-rw-r--r--drivers/input/serio/xilinx_ps2.c4
-rw-r--r--drivers/input/touchscreen/Kconfig14
-rw-r--r--drivers/input/touchscreen/Makefile1
-rw-r--r--drivers/input/touchscreen/bu21013_ts.c72
-rw-r--r--drivers/input/touchscreen/bu21029_ts.c51
-rw-r--r--drivers/input/touchscreen/chipone_icn8318.c8
-rw-r--r--drivers/input/touchscreen/cy8ctma140.c8
-rw-r--r--drivers/input/touchscreen/cyttsp5.c2
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c10
-rw-r--r--drivers/input/touchscreen/ektf2127.c8
-rw-r--r--drivers/input/touchscreen/elants_i2c.c22
-rw-r--r--drivers/input/touchscreen/exc3000.c17
-rw-r--r--drivers/input/touchscreen/goodix.c41
-rw-r--r--drivers/input/touchscreen/ili210x.c2
-rw-r--r--drivers/input/touchscreen/iqs5xx.c2
-rw-r--r--drivers/input/touchscreen/iqs7211.c2557
-rw-r--r--drivers/input/touchscreen/lpc32xx_ts.c98
-rw-r--r--drivers/input/touchscreen/melfas_mip4.c9
-rw-r--r--drivers/input/touchscreen/mms114.c89
-rw-r--r--drivers/input/touchscreen/novatek-nvt-ts.c10
-rw-r--r--drivers/input/touchscreen/pixcir_i2c_ts.c40
-rw-r--r--drivers/input/touchscreen/raydium_i2c_ts.c30
-rw-r--r--drivers/input/touchscreen/resistive-adc-touch.c8
-rw-r--r--drivers/input/touchscreen/silead.c8
-rw-r--r--drivers/input/touchscreen/sis_i2c.c20
-rw-r--r--drivers/input/touchscreen/surface3_spi.c13
-rw-r--r--drivers/input/touchscreen/sx8654.c10
-rw-r--r--drivers/input/touchscreen/ti_am335x_tsc.c1
-rw-r--r--drivers/interconnect/Makefile2
-rw-r--r--drivers/interconnect/core.c84
-rw-r--r--drivers/interconnect/debugfs-client.c168
-rw-r--r--drivers/interconnect/icc-clk.c2
-rw-r--r--drivers/interconnect/imx/imx8mp.c1
-rw-r--r--drivers/interconnect/internal.h5
-rw-r--r--drivers/interconnect/qcom/bcm-voter.c43
-rw-r--r--drivers/interconnect/qcom/bcm-voter.h8
-rw-r--r--drivers/interconnect/qcom/icc-rpm.c4
-rw-r--r--drivers/interconnect/qcom/icc-rpmh.c5
-rw-r--r--drivers/interconnect/qcom/icc-rpmh.h10
-rw-r--r--drivers/interconnect/qcom/msm8916.c2
-rw-r--r--drivers/interconnect/qcom/msm8939.c2
-rw-r--r--drivers/interconnect/qcom/msm8974.c5
-rw-r--r--drivers/interconnect/qcom/msm8996.c3
-rw-r--r--drivers/interconnect/qcom/osm-l3.c4
-rw-r--r--drivers/interconnect/qcom/qcm2290.c10
-rw-r--r--drivers/interconnect/qcom/qcs404.c2
-rw-r--r--drivers/interconnect/qcom/qdu1000.c3
-rw-r--r--drivers/interconnect/qcom/sa8775p.c3
-rw-r--r--drivers/interconnect/qcom/sc7180.c1616
-rw-r--r--drivers/interconnect/qcom/sc7280.c3
-rw-r--r--drivers/interconnect/qcom/sc8180x.c3
-rw-r--r--drivers/interconnect/qcom/sc8280xp.c3
-rw-r--r--drivers/interconnect/qcom/sdm660.c3
-rw-r--r--drivers/interconnect/qcom/sdm670.c1389
-rw-r--r--drivers/interconnect/qcom/sdm845.c1658
-rw-r--r--drivers/interconnect/qcom/sdx55.c846
-rw-r--r--drivers/interconnect/qcom/sdx65.c833
-rw-r--r--drivers/interconnect/qcom/sm6350.c1529
-rw-r--r--drivers/interconnect/qcom/sm8150.c1689
-rw-r--r--drivers/interconnect/qcom/sm8250.c1822
-rw-r--r--drivers/interconnect/qcom/sm8250.h6
-rw-r--r--drivers/interconnect/qcom/sm8350.c1801
-rw-r--r--drivers/interconnect/qcom/sm8450.c5
-rw-r--r--drivers/interconnect/qcom/sm8550.c4
-rw-r--r--drivers/interconnect/qcom/smd-rpm.c2
-rw-r--r--drivers/iommu/amd/amd_iommu.h7
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h22
-rw-r--r--drivers/iommu/amd/init.c131
-rw-r--r--drivers/iommu/amd/iommu.c86
-rw-r--r--drivers/iommu/amd/iommu_v2.c7
-rw-r--r--drivers/iommu/apple-dart.c2
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c2
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c45
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h2
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c2
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c7
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c1
-rw-r--r--drivers/iommu/arm/arm-smmu/qcom_iommu.c71
-rw-r--r--drivers/iommu/dma-iommu.c26
-rw-r--r--drivers/iommu/dma-iommu.h8
-rw-r--r--drivers/iommu/intel/iommu.c244
-rw-r--r--drivers/iommu/intel/iommu.h9
-rw-r--r--drivers/iommu/intel/pasid.c4
-rw-r--r--drivers/iommu/intel/pasid.h2
-rw-r--r--drivers/iommu/intel/svm.c62
-rw-r--r--drivers/iommu/iommu-sva.c29
-rw-r--r--drivers/iommu/iommu-sysfs.c8
-rw-r--r--drivers/iommu/iommu.c483
-rw-r--r--drivers/iommu/ipmmu-vmsa.c21
-rw-r--r--drivers/iommu/mtk_iommu.c151
-rw-r--r--drivers/iommu/of_iommu.c2
-rw-r--r--drivers/iommu/rockchip-iommu.c50
-rw-r--r--drivers/iommu/sprd-iommu.c2
-rw-r--r--drivers/iommu/tegra-smmu.c2
-rw-r--r--drivers/iommu/virtio-iommu.c2
-rw-r--r--drivers/ipack/devices/ipoctal.c7
-rw-r--r--drivers/isdn/capi/capi.c8
-rw-r--r--drivers/leds/Kconfig9
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/blink/Kconfig4
-rw-r--r--drivers/leds/flash/Kconfig2
-rw-r--r--drivers/leds/flash/leds-qcom-flash.c5
-rw-r--r--drivers/leds/led-class-multicolor.c8
-rw-r--r--drivers/leds/led-class.c76
-rw-r--r--drivers/leds/led-core.c8
-rw-r--r--drivers/leds/leds-an30259a.c2
-rw-r--r--drivers/leds/leds-ariel.c2
-rw-r--r--drivers/leds/leds-aw200xx.c4
-rw-r--r--drivers/leds/leds-aw2013.c38
-rw-r--r--drivers/leds/leds-cpcap.c2
-rw-r--r--drivers/leds/leds-cr0014114.c2
-rw-r--r--drivers/leds/leds-ip30.c8
-rw-r--r--drivers/leds/leds-is31fl32xx.c1
-rw-r--r--drivers/leds/leds-lp5521.c5
-rw-r--r--drivers/leds/leds-lp5523.c4
-rw-r--r--drivers/leds/leds-lp5562.c4
-rw-r--r--drivers/leds/leds-lp8501.c4
-rw-r--r--drivers/leds/leds-mlxreg.c1
-rw-r--r--drivers/leds/leds-ns2.c2
-rw-r--r--drivers/leds/leds-pca9532.c1
-rw-r--r--drivers/leds/leds-pca995x.c204
-rw-r--r--drivers/leds/leds-pm8058.c1
-rw-r--r--drivers/leds/leds-pwm.c4
-rw-r--r--drivers/leds/leds-spi-byte.c2
-rw-r--r--drivers/leds/leds-syscon.c3
-rw-r--r--drivers/leds/leds-ti-lmu-common.c2
-rw-r--r--drivers/leds/leds-tlc591xx.c1
-rw-r--r--drivers/leds/leds-turris-omnia.c13
-rw-r--r--drivers/leds/rgb/Kconfig12
-rw-r--r--drivers/leds/rgb/Makefile1
-rw-r--r--drivers/leds/rgb/leds-group-multicolor.c169
-rw-r--r--drivers/leds/rgb/leds-qcom-lpg.c2
-rw-r--r--drivers/leds/simple/Kconfig15
-rw-r--r--drivers/leds/simple/Makefile1
-rw-r--r--drivers/leds/simple/simatic-ipc-leds-gpio-core.c4
-rw-r--r--drivers/leds/simple/simatic-ipc-leds-gpio-elkhartlake.c57
-rw-r--r--drivers/leds/simple/simatic-ipc-leds-gpio.h2
-rw-r--r--drivers/leds/trigger/ledtrig-netdev.c13
-rw-r--r--drivers/leds/trigger/ledtrig-tty.c12
-rw-r--r--drivers/leds/uleds.c12
-rw-r--r--drivers/mailbox/arm_mhu.c1
-rw-r--r--drivers/mailbox/arm_mhu_db.c1
-rw-r--r--drivers/mailbox/bcm-flexrm-mailbox.c10
-rw-r--r--drivers/mailbox/bcm-pdc-mailbox.c18
-rw-r--r--drivers/mailbox/hi3660-mailbox.c1
-rw-r--r--drivers/mailbox/hi6220-mailbox.c1
-rw-r--r--drivers/mailbox/imx-mailbox.c3
-rw-r--r--drivers/mailbox/mailbox-mpfs.c1
-rw-r--r--drivers/mailbox/mailbox-test.c8
-rw-r--r--drivers/mailbox/mailbox.c1
-rw-r--r--drivers/mailbox/mtk-adsp-mailbox.c3
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c2
-rw-r--r--drivers/mailbox/omap-mailbox.c2
-rw-r--r--drivers/mailbox/platform_mhu.c5
-rw-r--r--drivers/mailbox/qcom-ipcc.c4
-rw-r--r--drivers/mailbox/rockchip-mailbox.c8
-rw-r--r--drivers/mailbox/sprd-mailbox.c2
-rw-r--r--drivers/mailbox/stm32-ipcc.c1
-rw-r--r--drivers/mailbox/tegra-hsp.c5
-rw-r--r--drivers/mailbox/ti-msgmgr.c16
-rw-r--r--drivers/mailbox/zynqmp-ipi-mailbox.c2
-rw-r--r--drivers/media/cec/core/cec-adap.c8
-rw-r--r--drivers/media/cec/core/cec-notifier.c1
-rw-r--r--drivers/media/cec/core/cec-pin-priv.h1
-rw-r--r--drivers/media/cec/core/cec-pin.c32
-rw-r--r--drivers/media/cec/i2c/ch7322.c2
-rw-r--r--drivers/media/cec/platform/cec-gpio/cec-gpio.c10
-rw-r--r--drivers/media/cec/platform/meson/ao-cec.c2
-rw-r--r--drivers/media/cec/platform/stm32/stm32-cec.c1
-rw-r--r--drivers/media/cec/platform/tegra/tegra_cec.c6
-rw-r--r--drivers/media/common/siano/smsdvb-debugfs.c334
-rw-r--r--drivers/media/common/siano/smsendian.c2
-rw-r--r--drivers/media/dvb-frontends/cx24120.c4
-rw-r--r--drivers/media/dvb-frontends/dib7000p.c2
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.c29
-rw-r--r--drivers/media/dvb-frontends/mb86a16.c10
-rw-r--r--drivers/media/dvb-frontends/mn88443x.c2
-rw-r--r--drivers/media/firewire/firedtv-avc.c4
-rw-r--r--drivers/media/i2c/Kconfig298
-rw-r--r--drivers/media/i2c/Makefile4
-rw-r--r--drivers/media/i2c/ad5820.c2
-rw-r--r--drivers/media/i2c/adv748x/adv748x-csi2.c13
-rw-r--r--drivers/media/i2c/ccs-pll.c2
-rw-r--r--drivers/media/i2c/ccs/Kconfig5
-rw-r--r--drivers/media/i2c/ccs/ccs-data.c101
-rw-r--r--drivers/media/i2c/ds90ub913.c903
-rw-r--r--drivers/media/i2c/ds90ub953.c1430
-rw-r--r--drivers/media/i2c/ds90ub960.c4059
-rw-r--r--drivers/media/i2c/dw9719.c350
-rw-r--r--drivers/media/i2c/et8ek8/Kconfig4
-rw-r--r--drivers/media/i2c/hi556.c2
-rw-r--r--drivers/media/i2c/hi847.c2
-rw-r--r--drivers/media/i2c/imx208.c2
-rw-r--r--drivers/media/i2c/imx219.c292
-rw-r--r--drivers/media/i2c/imx290.c369
-rw-r--r--drivers/media/i2c/imx296.c2
-rw-r--r--drivers/media/i2c/imx319.c2
-rw-r--r--drivers/media/i2c/imx355.c2
-rw-r--r--drivers/media/i2c/imx415.c2
-rw-r--r--drivers/media/i2c/isl7998x.c2
-rw-r--r--drivers/media/i2c/max9286.c29
-rw-r--r--drivers/media/i2c/mt9m111.c2
-rw-r--r--drivers/media/i2c/og01a1b.c2
-rw-r--r--drivers/media/i2c/ov01a10.c2
-rw-r--r--drivers/media/i2c/ov08x40.c16
-rw-r--r--drivers/media/i2c/ov13858.c2
-rw-r--r--drivers/media/i2c/ov13b10.c129
-rw-r--r--drivers/media/i2c/ov2640.c2
-rw-r--r--drivers/media/i2c/ov2680.c1290
-rw-r--r--drivers/media/i2c/ov2740.c2
-rw-r--r--drivers/media/i2c/ov5640.c24
-rw-r--r--drivers/media/i2c/ov5670.c2
-rw-r--r--drivers/media/i2c/ov5675.c2
-rw-r--r--drivers/media/i2c/ov5693.c587
-rw-r--r--drivers/media/i2c/ov7740.c2
-rw-r--r--drivers/media/i2c/rdacm20.c16
-rw-r--r--drivers/media/i2c/rdacm21.c17
-rw-r--r--drivers/media/i2c/st-mipid02.c25
-rw-r--r--drivers/media/i2c/tc358743.c8
-rw-r--r--drivers/media/i2c/tc358746.c15
-rw-r--r--drivers/media/i2c/tvp5150.c4
-rw-r--r--drivers/media/i2c/video-i2c.c2
-rw-r--r--drivers/media/pci/Kconfig2
-rw-r--r--drivers/media/pci/bt8xx/Kconfig2
-rw-r--r--drivers/media/pci/bt8xx/bt848.h8
-rw-r--r--drivers/media/pci/bt8xx/bttv-audio-hook.c10
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c1014
-rw-r--r--drivers/media/pci/bt8xx/bttv-risc.c415
-rw-r--r--drivers/media/pci/bt8xx/bttv-vbi.c281
-rw-r--r--drivers/media/pci/bt8xx/bttvp.h79
-rw-r--r--drivers/media/pci/cx18/cx18-gpio.c2
-rw-r--r--drivers/media/pci/cx18/cx18-irq.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c6
-rw-r--r--drivers/media/pci/cx23885/cx23885-video.c2
-rw-r--r--drivers/media/pci/intel/Kconfig11
-rw-r--r--drivers/media/pci/intel/Makefile5
-rw-r--r--drivers/media/pci/intel/ipu-bridge.c814
-rw-r--r--drivers/media/pci/intel/ipu3/Kconfig1
-rw-r--r--drivers/media/pci/intel/ipu3/Makefile3
-rw-r--r--drivers/media/pci/intel/ipu3/cio2-bridge.c494
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.c (renamed from drivers/media/pci/intel/ipu3/ipu3-cio2-main.c)26
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.h6
-rw-r--r--drivers/media/pci/intel/ivsc/Kconfig12
-rw-r--r--drivers/media/pci/intel/ivsc/Makefile9
-rw-r--r--drivers/media/pci/intel/ivsc/mei_ace.c579
-rw-r--r--drivers/media/pci/intel/ivsc/mei_csi.c825
-rw-r--r--drivers/media/pci/saa7164/saa7164-encoder.c2
-rw-r--r--drivers/media/pci/saa7164/saa7164-fw.c1
-rw-r--r--drivers/media/pci/ttpci/budget-av.c34
-rw-r--r--drivers/media/pci/ttpci/budget-ci.c30
-rw-r--r--drivers/media/pci/ttpci/budget-core.c6
-rw-r--r--drivers/media/pci/ttpci/budget.c20
-rw-r--r--drivers/media/platform/allegro-dvt/allegro-core.c3
-rw-r--r--drivers/media/platform/amphion/vdec.c41
-rw-r--r--drivers/media/platform/amphion/venc.c6
-rw-r--r--drivers/media/platform/amphion/vpu.h3
-rw-r--r--drivers/media/platform/amphion/vpu_cmds.c18
-rw-r--r--drivers/media/platform/amphion/vpu_core.c4
-rw-r--r--drivers/media/platform/amphion/vpu_dbg.c17
-rw-r--r--drivers/media/platform/amphion/vpu_drv.c4
-rw-r--r--drivers/media/platform/amphion/vpu_helpers.c61
-rw-r--r--drivers/media/platform/amphion/vpu_malone.c2
-rw-r--r--drivers/media/platform/amphion/vpu_mbox.c2
-rw-r--r--drivers/media/platform/amphion/vpu_msgs.c14
-rw-r--r--drivers/media/platform/amphion/vpu_rpc.c2
-rw-r--r--drivers/media/platform/amphion/vpu_v4l2.c7
-rw-r--r--drivers/media/platform/amphion/vpu_windsor.c2
-rw-r--r--drivers/media/platform/aspeed/aspeed-video.c5
-rw-r--r--drivers/media/platform/atmel/atmel-isi.c16
-rw-r--r--drivers/media/platform/atmel/atmel-isi.h2
-rw-r--r--drivers/media/platform/cadence/cdns-csi2rx.c117
-rw-r--r--drivers/media/platform/chips-media/coda-common.c4
-rw-r--r--drivers/media/platform/intel/pxa_camera.c82
-rw-r--r--drivers/media/platform/marvell/cafe-driver.c18
-rw-r--r--drivers/media/platform/marvell/mcam-core.c15
-rw-r--r--drivers/media/platform/marvell/mmp-driver.c11
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c1
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c2
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c4
-rw-r--r--drivers/media/platform/mediatek/mdp/mtk_mdp_comp.c2
-rw-r--r--drivers/media/platform/mediatek/mdp/mtk_mdp_core.c2
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c4
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/Makefile55
-rw-r--r--drivers/media/platform/mediatek/vcodec/common/Makefile21
-rw-r--r--drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_cmn_drv.h147
-rw-r--r--drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_dbgfs.c (renamed from drivers/media/platform/mediatek/vcodec/mtk_vcodec_dbgfs.c)68
-rw-r--r--drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_dbgfs.h (renamed from drivers/media/platform/mediatek/vcodec/mtk_vcodec_dbgfs.h)24
-rw-r--r--drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw.c (renamed from drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw.c)21
-rw-r--r--drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw.h (renamed from drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw.h)8
-rw-r--r--drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_priv.h (renamed from drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw_priv.h)14
-rw-r--r--drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c (renamed from drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw_scp.c)26
-rw-r--r--drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c (renamed from drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw_vpu.c)64
-rw-r--r--drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_intr.c68
-rw-r--r--drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_intr.h (renamed from drivers/media/platform/mediatek/vcodec/mtk_vcodec_intr.h)6
-rw-r--r--drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_util.c (renamed from drivers/media/platform/mediatek/vcodec/mtk_vcodec_util.c)83
-rw-r--r--drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_util.h75
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/Makefile25
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.c (renamed from drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.c)204
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.h (renamed from drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.h)10
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c (renamed from drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_drv.c)165
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h324
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_hw.c (renamed from drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_hw.c)23
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_hw.h (renamed from drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_hw.h)9
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_pm.c (renamed from drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_pm.c)41
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_pm.h (renamed from drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_pm.h)6
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateful.c (renamed from drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateful.c)176
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c (renamed from drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateless.c)235
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_av1_req_lat_if.c (renamed from drivers/media/platform/mediatek/vcodec/vdec/vdec_av1_req_lat_if.c)170
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_if.c (renamed from drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_if.c)79
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_common.c (renamed from drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_common.c)4
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_common.h (renamed from drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_common.h)6
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_if.c (renamed from drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_if.c)75
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c (renamed from drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c)157
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c (renamed from drivers/media/platform/mediatek/vcodec/vdec/vdec_hevc_req_multi_if.c)129
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c (renamed from drivers/media/platform/mediatek/vcodec/vdec/vdec_vp8_if.c)80
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c (renamed from drivers/media/platform/mediatek/vcodec/vdec/vdec_vp8_req_if.c)81
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c (renamed from drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_if.c)137
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c (renamed from drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_req_lat_if.c)129
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec_drv_base.h (renamed from drivers/media/platform/mediatek/vcodec/vdec_drv_base.h)2
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec_drv_if.c (renamed from drivers/media/platform/mediatek/vcodec/vdec_drv_if.c)12
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec_drv_if.h (renamed from drivers/media/platform/mediatek/vcodec/vdec_drv_if.h)10
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec_ipi_msg.h (renamed from drivers/media/platform/mediatek/vcodec/vdec_ipi_msg.h)0
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec_msg_queue.c (renamed from drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c)70
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec_msg_queue.h (renamed from drivers/media/platform/mediatek/vcodec/vdec_msg_queue.h)14
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c (renamed from drivers/media/platform/mediatek/vcodec/vdec_vpu_if.c)128
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.h (renamed from drivers/media/platform/mediatek/vcodec/vdec_vpu_if.h)6
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/Makefile11
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c (renamed from drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c)296
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.h (renamed from drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.h)12
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c (renamed from drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_drv.c)75
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h248
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.c (renamed from drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_pm.c)15
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.h (renamed from drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_pm.h)4
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c (renamed from drivers/media/platform/mediatek/vcodec/venc/venc_h264_if.c)110
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/venc/venc_vp8_if.c (renamed from drivers/media/platform/mediatek/vcodec/venc/venc_vp8_if.c)69
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/venc_drv_base.h (renamed from drivers/media/platform/mediatek/vcodec/venc_drv_base.h)4
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/venc_drv_if.c (renamed from drivers/media/platform/mediatek/vcodec/venc_drv_if.c)10
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/venc_drv_if.h (renamed from drivers/media/platform/mediatek/vcodec/venc_drv_if.h)11
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/venc_ipi_msg.h (renamed from drivers/media/platform/mediatek/vcodec/venc_ipi_msg.h)0
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c (renamed from drivers/media/platform/mediatek/vcodec/venc_vpu_if.c)110
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.h (renamed from drivers/media/platform/mediatek/vcodec/venc_vpu_if.h)3
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_drv.h548
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_intr.c43
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_util.h83
-rw-r--r--drivers/media/platform/mediatek/vpu/mtk_vpu.c4
-rw-r--r--drivers/media/platform/microchip/microchip-csi2dc.c11
-rw-r--r--drivers/media/platform/microchip/microchip-isc-base.c4
-rw-r--r--drivers/media/platform/microchip/microchip-isc.h2
-rw-r--r--drivers/media/platform/microchip/microchip-sama5d2-isc.c13
-rw-r--r--drivers/media/platform/microchip/microchip-sama7g5-isc.c13
-rw-r--r--drivers/media/platform/nvidia/tegra-vde/vde.c3
-rw-r--r--drivers/media/platform/nxp/Kconfig11
-rw-r--r--drivers/media/platform/nxp/Makefile1
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c1
-rw-r--r--drivers/media/platform/nxp/imx-mipi-csis.c18
-rw-r--r--drivers/media/platform/nxp/imx-pxp.c1
-rw-r--r--drivers/media/platform/nxp/imx7-media-csi.c26
-rw-r--r--drivers/media/platform/nxp/imx8-isi/Makefile4
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c35
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h14
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c38
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-gasket.c85
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c1
-rw-r--r--drivers/media/platform/nxp/imx8mq-mipi-csi2.c (renamed from drivers/staging/media/imx/imx8mq-mipi-csi2.c)11
-rw-r--r--drivers/media/platform/qcom/camss/camss.c26
-rw-r--r--drivers/media/platform/qcom/camss/camss.h2
-rw-r--r--drivers/media/platform/qcom/venus/core.c16
-rw-r--r--drivers/media/platform/qcom/venus/core.h19
-rw-r--r--drivers/media/platform/qcom/venus/firmware.c42
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c7
-rw-r--r--drivers/media/platform/qcom/venus/hfi_helper.h61
-rw-r--r--drivers/media/platform/qcom/venus/hfi_msgs.c2
-rw-r--r--drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c22
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform.c4
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus.c42
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c10
-rw-r--r--drivers/media/platform/qcom/venus/vdec_ctrls.c2
-rw-r--r--drivers/media/platform/qcom/venus/venc.c4
-rw-r--r--drivers/media/platform/qcom/venus/venc_ctrls.c2
-rw-r--r--drivers/media/platform/renesas/rcar-isp.c14
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-core.c53
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-csi2.c312
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-vin.h10
-rw-r--r--drivers/media/platform/renesas/rcar_drif.c15
-rw-r--r--drivers/media/platform/renesas/rcar_fdp1.c1
-rw-r--r--drivers/media/platform/renesas/rcar_jpu.c5
-rw-r--r--drivers/media/platform/renesas/renesas-ceu.c11
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-core.c15
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h2
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c13
-rw-r--r--drivers/media/platform/renesas/sh_vou.c12
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_drv.c1
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-common.h2
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c7
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c14
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c8
-rw-r--r--drivers/media/platform/samsung/exynos-gsc/gsc-core.c1
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-core.c5
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-lite.c4
-rw-r--r--drivers/media/platform/samsung/exynos4-is/media-dev.c12
-rw-r--r--drivers/media/platform/samsung/exynos4-is/media-dev.h2
-rw-r--r--drivers/media/platform/samsung/s3c-camif/camif-core.c4
-rw-r--r--drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c6
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmi.c92
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c11
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c3
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_csi_bridge.c12
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_csi_bridge.h2
-rw-r--r--drivers/media/platform/sunxi/sun6i-mipi-csi2/sun6i_mipi_csi2.c13
-rw-r--r--drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_mipi_csi2.c13
-rw-r--r--drivers/media/platform/sunxi/sun8i-di/sun8i-di.c4
-rw-r--r--drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c4
-rw-r--r--drivers/media/platform/ti/am437x/am437x-vpfe.c42
-rw-r--r--drivers/media/platform/ti/am437x/am437x-vpfe.h2
-rw-r--r--drivers/media/platform/ti/cal/cal-camerarx.c206
-rw-r--r--drivers/media/platform/ti/cal/cal-video.c23
-rw-r--r--drivers/media/platform/ti/cal/cal.c78
-rw-r--r--drivers/media/platform/ti/cal/cal.h10
-rw-r--r--drivers/media/platform/ti/davinci/vpif_capture.c36
-rw-r--r--drivers/media/platform/ti/omap3isp/isp.c62
-rw-r--r--drivers/media/platform/ti/omap3isp/isp.h15
-rw-r--r--drivers/media/platform/ti/omap3isp/ispccdc.c13
-rw-r--r--drivers/media/platform/ti/omap3isp/ispccp2.c2
-rw-r--r--drivers/media/platform/ti/omap3isp/ispcsi2.c2
-rw-r--r--drivers/media/platform/ti/omap3isp/ispcsiphy.c15
-rw-r--r--drivers/media/platform/verisilicon/hantro_drv.c15
-rw-r--r--drivers/media/platform/verisilicon/hantro_v4l2.c9
-rw-r--r--drivers/media/platform/video-mux.c10
-rw-r--r--drivers/media/platform/xilinx/xilinx-vipp.c55
-rw-r--r--drivers/media/radio/wl128x/fmdrv_common.c3
-rw-r--r--drivers/media/rc/Kconfig1
-rw-r--r--drivers/media/rc/gpio-ir-recv.c2
-rw-r--r--drivers/media/rc/gpio-ir-tx.c2
-rw-r--r--drivers/media/rc/ir-rx51.c2
-rw-r--r--drivers/media/rc/meson-ir.c122
-rw-r--r--drivers/media/rc/mtk-cir.c3
-rw-r--r--drivers/media/rc/sunxi-cir.c3
-rw-r--r--drivers/media/test-drivers/vivid/vivid-core.c2
-rw-r--r--drivers/media/tuners/qt1010.c11
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c9
-rw-r--r--drivers/media/usb/dvb-usb-v2/anysee.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/az6007.c8
-rw-r--r--drivers/media/usb/dvb-usb-v2/gl861.c2
-rw-r--r--drivers/media/usb/dvb-usb/af9005.c5
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c24
-rw-r--r--drivers/media/usb/dvb-usb/m920x.c5
-rw-r--r--drivers/media/usb/dvb-usb/opera1.c9
-rw-r--r--drivers/media/usb/go7007/go7007-i2c.c2
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw.c6
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-sysfs.c20
-rw-r--r--drivers/media/usb/siano/smsusb.c21
-rw-r--r--drivers/media/usb/ttusb-dec/ttusbdecfe.c12
-rw-r--r--drivers/media/v4l2-core/Kconfig9
-rw-r--r--drivers/media/v4l2-core/Makefile1
-rw-r--r--drivers/media/v4l2-core/v4l2-async.c701
-rw-r--r--drivers/media/v4l2-core/v4l2-cci.c166
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c127
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c44
-rw-r--r--drivers/mfd/Kconfig5
-rw-r--r--drivers/mfd/ab8500-core.c1
-rw-r--r--drivers/mfd/acer-ec-a500.c2
-rw-r--r--drivers/mfd/act8945a.c4
-rw-r--r--drivers/mfd/altera-a10sr.c2
-rw-r--r--drivers/mfd/altera-sysmgr.c3
-rw-r--r--drivers/mfd/arizona-core.c1
-rw-r--r--drivers/mfd/atc260x-core.c1
-rw-r--r--drivers/mfd/atmel-hlcdc.c4
-rw-r--r--drivers/mfd/axp20x.c12
-rw-r--r--drivers/mfd/bcm590xx.c1
-rw-r--r--drivers/mfd/cros_ec_dev.c2
-rw-r--r--drivers/mfd/cs47l15-tables.c8
-rw-r--r--drivers/mfd/cs47l24-tables.c2
-rw-r--r--drivers/mfd/cs47l35-tables.c8
-rw-r--r--drivers/mfd/cs47l85-tables.c8
-rw-r--r--drivers/mfd/cs47l90-tables.c8
-rw-r--r--drivers/mfd/cs47l92-tables.c8
-rw-r--r--drivers/mfd/da9052-i2c.c5
-rw-r--r--drivers/mfd/da9055-i2c.c1
-rw-r--r--drivers/mfd/da9062-core.c2
-rw-r--r--drivers/mfd/exynos-lpass.c4
-rw-r--r--drivers/mfd/hi6421-pmic-core.c2
-rw-r--r--drivers/mfd/hi655x-pmic.c7
-rw-r--r--drivers/mfd/ipaq-micro.c7
-rw-r--r--drivers/mfd/iqs62x.c2
-rw-r--r--drivers/mfd/lochnagar-i2c.c2
-rw-r--r--drivers/mfd/lp873x.c2
-rw-r--r--drivers/mfd/lp87565.c2
-rw-r--r--drivers/mfd/madera-i2c.c1
-rw-r--r--drivers/mfd/madera-spi.c1
-rw-r--r--drivers/mfd/max14577.c3
-rw-r--r--drivers/mfd/max77541.c2
-rw-r--r--drivers/mfd/max77620.c1
-rw-r--r--drivers/mfd/max77686.c1
-rw-r--r--drivers/mfd/max77843.c2
-rw-r--r--drivers/mfd/max8907.c1
-rw-r--r--drivers/mfd/max8925-core.c1
-rw-r--r--drivers/mfd/max8997.c1
-rw-r--r--drivers/mfd/max8998.c1
-rw-r--r--drivers/mfd/mc13xxx-i2c.c9
-rw-r--r--drivers/mfd/mt6358-irq.c5
-rw-r--r--drivers/mfd/mt6397-core.c5
-rw-r--r--drivers/mfd/mt6397-irq.c5
-rw-r--r--drivers/mfd/mxs-lradc.c2
-rw-r--r--drivers/mfd/omap-usb-host.c4
-rw-r--r--drivers/mfd/omap-usb-tll.c4
-rw-r--r--drivers/mfd/palmas.c3
-rw-r--r--drivers/mfd/qcom-pm8008.c2
-rw-r--r--drivers/mfd/qcom-pm8xxx.c10
-rw-r--r--drivers/mfd/rave-sp.c2
-rw-r--r--drivers/mfd/rk8xx-core.c2
-rw-r--r--drivers/mfd/rn5t618.c2
-rw-r--r--drivers/mfd/rohm-bd71828.c2
-rw-r--r--drivers/mfd/rohm-bd718x7.c2
-rw-r--r--drivers/mfd/rohm-bd9576.c2
-rw-r--r--drivers/mfd/rsmu_i2c.c2
-rw-r--r--drivers/mfd/rsmu_spi.c2
-rw-r--r--drivers/mfd/rt5033.c2
-rw-r--r--drivers/mfd/rz-mtu3.c34
-rw-r--r--drivers/mfd/sec-core.c2
-rw-r--r--drivers/mfd/sprd-sc27xx-spi.c2
-rw-r--r--drivers/mfd/ssbi.c6
-rw-r--r--drivers/mfd/stm32-lptimer.c1
-rw-r--r--drivers/mfd/stm32-timers.c4
-rw-r--r--drivers/mfd/stmpe-i2c.c2
-rw-r--r--drivers/mfd/stpmic1.c2
-rw-r--r--drivers/mfd/sun4i-gpadc.c4
-rw-r--r--drivers/mfd/tc3589x.c4
-rw-r--r--drivers/mfd/ti-lmu.c1
-rw-r--r--drivers/mfd/ti_am335x_tscadc.c5
-rw-r--r--drivers/mfd/tps6507x.c1
-rw-r--r--drivers/mfd/tps65090.c1
-rw-r--r--drivers/mfd/tps65217.c1
-rw-r--r--drivers/mfd/tps65218.c1
-rw-r--r--drivers/mfd/tps6594-core.c2
-rw-r--r--drivers/mfd/twl6040.c2
-rw-r--r--drivers/mfd/wm5102-tables.c2
-rw-r--r--drivers/mfd/wm5110-tables.c2
-rw-r--r--drivers/mfd/wm831x-core.c3
-rw-r--r--drivers/mfd/wm831x-i2c.c2
-rw-r--r--drivers/mfd/wm831x-spi.c2
-rw-r--r--drivers/mfd/wm8994-core.c2
-rw-r--r--drivers/mfd/wm8994-regmap.c6
-rw-r--r--drivers/mfd/wm8997-tables.c2
-rw-r--r--drivers/mfd/wm8998-tables.c2
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/atmel-ssc.c3
-rw-r--r--drivers/misc/bcm-vk/bcm_vk.h2
-rw-r--r--drivers/misc/bcm-vk/bcm_vk_msg.c14
-rw-r--r--drivers/misc/bcm-vk/bcm_vk_tty.c5
-rw-r--r--drivers/misc/cxl/base.c1
-rw-r--r--drivers/misc/eeprom/idt_89hpesx.c23
-rw-r--r--drivers/misc/fastrpc.c36
-rw-r--r--drivers/misc/genwqe/card_base.c49
-rw-r--r--drivers/misc/genwqe/card_base.h2
-rw-r--r--drivers/misc/hi6421v600-irq.c4
-rw-r--r--drivers/misc/hpilo.c18
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.c2
-rw-r--r--drivers/misc/mchp_pci1xxxx/Kconfig1
-rw-r--r--drivers/misc/mchp_pci1xxxx/Makefile2
-rw-r--r--drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c443
-rw-r--r--drivers/misc/mei/bus-fixup.c10
-rw-r--r--drivers/misc/mei/bus.c1
-rw-r--r--drivers/misc/mei/gsc-me.c1
-rw-r--r--drivers/misc/mei/init.c8
-rw-r--r--drivers/misc/mei/main.c25
-rw-r--r--drivers/misc/mei/mei_dev.h2
-rw-r--r--drivers/misc/mei/pxp/mei_pxp.c3
-rw-r--r--drivers/misc/qcom-coincell.c1
-rw-r--r--drivers/misc/sram.c2
-rw-r--r--drivers/misc/ti-st/st_core.c18
-rw-r--r--drivers/misc/ti-st/st_kim.c13
-rw-r--r--drivers/misc/tps6594-esm.c9
-rw-r--r--drivers/misc/tps6594-pfsm.c9
-rw-r--r--drivers/misc/vcpu_stall_detector.c1
-rw-r--r--drivers/misc/xilinx_sdfec.c7
-rw-r--r--drivers/misc/xilinx_tmr_inject.c3
-rw-r--r--drivers/misc/xilinx_tmr_manager.c6
-rw-r--r--drivers/mmc/core/sdio_uart.c4
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c3
-rw-r--r--drivers/mtd/devices/docg3.c8
-rw-r--r--drivers/mtd/devices/mchp23k256.c2
-rw-r--r--drivers/mtd/devices/mchp48l640.c2
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c1
-rw-r--r--drivers/mtd/devices/spear_smi.c20
-rw-r--r--drivers/mtd/devices/st_spi_fsm.c14
-rw-r--r--drivers/mtd/lpddr/lpddr2_nvm.c4
-rw-r--r--drivers/mtd/maps/lantiq-flash.c11
-rw-r--r--drivers/mtd/maps/physmap-bt1-rom.c1
-rw-r--r--drivers/mtd/maps/physmap-core.c3
-rw-r--r--drivers/mtd/maps/physmap-gemini.c2
-rw-r--r--drivers/mtd/maps/physmap-ixp4xx.c2
-rw-r--r--drivers/mtd/maps/physmap-ixp4xx.h1
-rw-r--r--drivers/mtd/maps/physmap-versatile.c4
-rw-r--r--drivers/mtd/maps/plat-ram.c3
-rw-r--r--drivers/mtd/maps/sun_uflash.c2
-rw-r--r--drivers/mtd/mtdblock.c2
-rw-r--r--drivers/mtd/mtdblock_ro.c2
-rw-r--r--drivers/mtd/mtdcore.c97
-rw-r--r--drivers/mtd/mtdcore.h1
-rw-r--r--drivers/mtd/mtdpart.c14
-rw-r--r--drivers/mtd/nand/ecc-mxic.c2
-rw-r--r--drivers/mtd/nand/ecc.c2
-rw-r--r--drivers/mtd/nand/onenand/onenand_omap2.c12
-rw-r--r--drivers/mtd/nand/onenand/onenand_samsung.c9
-rw-r--r--drivers/mtd/nand/raw/Kconfig9
-rw-r--r--drivers/mtd/nand/raw/Makefile1
-rw-r--r--drivers/mtd/nand/raw/ams-delta.c2
-rw-r--r--drivers/mtd/nand/raw/arasan-nand-controller.c29
-rw-r--r--drivers/mtd/nand/raw/atmel/nand-controller.c3
-rw-r--r--drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c4
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c130
-rw-r--r--drivers/mtd/nand/raw/brcmnand/iproc_nand.c7
-rw-r--r--drivers/mtd/nand/raw/davinci_nand.c1
-rw-r--r--drivers/mtd/nand/raw/denali_dt.c1
-rw-r--r--drivers/mtd/nand/raw/fsl_ifc_nand.c1
-rw-r--r--drivers/mtd/nand/raw/fsl_upm.c6
-rw-r--r--drivers/mtd/nand/raw/fsmc_nand.c19
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c2
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_ecc.c1
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c1
-rw-r--r--drivers/mtd/nand/raw/intel-nand-controller.c15
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_mlc.c3
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_slc.c15
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c19
-rw-r--r--drivers/mtd/nand/raw/meson_nand.c72
-rw-r--r--drivers/mtd/nand/raw/mpc5121_nfc.c15
-rw-r--r--drivers/mtd/nand/raw/mtk_nand.c63
-rw-r--r--drivers/mtd/nand/raw/mxc_nand.c10
-rw-r--r--drivers/mtd/nand/raw/nand_base.c1
-rw-r--r--drivers/mtd/nand/raw/ndfc.c3
-rw-r--r--drivers/mtd/nand/raw/omap2.c5
-rw-r--r--drivers/mtd/nand/raw/orion_nand.c22
-rw-r--r--drivers/mtd/nand/raw/oxnas_nand.c209
-rw-r--r--drivers/mtd/nand/raw/pl35x-nand-controller.c4
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c1003
-rw-r--r--drivers/mtd/nand/raw/rockchip-nand-controller.c1
-rw-r--r--drivers/mtd/nand/raw/s3c2410.c1
-rw-r--r--drivers/mtd/nand/raw/sh_flctl.c4
-rw-r--r--drivers/mtd/nand/raw/socrates_nand.c3
-rw-r--r--drivers/mtd/nand/raw/stm32_fmc2_nand.c21
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c33
-rw-r--r--drivers/mtd/nand/raw/vf610_nfc.c35
-rw-r--r--drivers/mtd/nand/raw/xway_nand.c3
-rw-r--r--drivers/mtd/nand/spi/esmt.c9
-rw-r--r--drivers/mtd/nand/spi/gigadevice.c20
-rw-r--r--drivers/mtd/nand/spi/toshiba.c33
-rw-r--r--drivers/mtd/spi-nor/atmel.c8
-rw-r--r--drivers/mtd/spi-nor/controllers/nxp-spifi.c34
-rw-r--r--drivers/mtd/spi-nor/core.c57
-rw-r--r--drivers/mtd/spi-nor/core.h9
-rw-r--r--drivers/mtd/spi-nor/debugfs.c1
-rw-r--r--drivers/mtd/spi-nor/issi.c4
-rw-r--r--drivers/mtd/spi-nor/macronix.c4
-rw-r--r--drivers/mtd/spi-nor/micron-st.c8
-rw-r--r--drivers/mtd/spi-nor/spansion.c318
-rw-r--r--drivers/mtd/spi-nor/sst.c12
-rw-r--r--drivers/mtd/spi-nor/swp.c9
-rw-r--r--drivers/mtd/spi-nor/winbond.c9
-rw-r--r--drivers/mtd/spi-nor/xilinx.c4
-rw-r--r--drivers/net/caif/caif_serial.c2
-rw-r--r--drivers/net/can/can327.c47
-rw-r--r--drivers/net/can/sja1000/ems_pci.c7
-rw-r--r--drivers/net/can/slcan/slcan-core.c13
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c16
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c51
-rw-r--r--drivers/net/dsa/sja1105/sja1105_spi.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h54
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h1
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c19
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c5
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h4
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c49
-rw-r--r--drivers/net/ethernet/sfc/rx.c20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c5
-rw-r--r--drivers/net/hamradio/6pack.c4
-rw-r--r--drivers/net/hamradio/mkiss.c4
-rw-r--r--drivers/net/macsec.c3
-rw-r--r--drivers/net/mctp/mctp-serial.c5
-rw-r--r--drivers/net/phy/micrel.c9
-rw-r--r--drivers/net/ppp/ppp_async.c26
-rw-r--r--drivers/net/ppp/ppp_synctty.c26
-rw-r--r--drivers/net/slip/slip.c4
-rw-r--r--drivers/net/usb/hso.c20
-rw-r--r--drivers/net/veth.c4
-rw-r--r--drivers/net/virtio_net.c228
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c3
-rw-r--r--drivers/nfc/nxp-nci/i2c.c1
-rw-r--r--drivers/nvmem/Kconfig31
-rw-r--r--drivers/nvmem/Makefile4
-rw-r--r--drivers/nvmem/bcm-ocotp.c1
-rw-r--r--drivers/nvmem/brcm_nvram.c3
-rw-r--r--drivers/nvmem/core.c19
-rw-r--r--drivers/nvmem/imx-iim.c1
-rw-r--r--drivers/nvmem/imx-ocotp-ele.c2
-rw-r--r--drivers/nvmem/imx-ocotp-scu.c2
-rw-r--r--drivers/nvmem/imx-ocotp.c1
-rw-r--r--drivers/nvmem/lpc18xx_otp.c5
-rw-r--r--drivers/nvmem/meson-mx-efuse.c5
-rw-r--r--drivers/nvmem/qcom-spmi-sdam.c2
-rw-r--r--drivers/nvmem/qfprom.c17
-rw-r--r--drivers/nvmem/qoriq-efuse.c78
-rw-r--r--drivers/nvmem/rave-sp-eeprom.c2
-rw-r--r--drivers/nvmem/rockchip-efuse.c3
-rw-r--r--drivers/nvmem/sc27xx-efuse.c1
-rw-r--r--drivers/nvmem/sec-qfprom.c96
-rw-r--r--drivers/nvmem/snvs_lpgpr.c3
-rw-r--r--drivers/nvmem/sprd-efuse.c2
-rw-r--r--drivers/nvmem/stm32-romem.c3
-rw-r--r--drivers/nvmem/sunplus-ocotp.c2
-rw-r--r--drivers/nvmem/sunxi_sid.c5
-rw-r--r--drivers/nvmem/u-boot-env.c4
-rw-r--r--drivers/nvmem/uniphier-efuse.c3
-rw-r--r--drivers/parport/parport_pc.c5
-rw-r--r--drivers/pcmcia/Kconfig1
-rw-r--r--drivers/peci/controller/Kconfig16
-rw-r--r--drivers/peci/controller/Makefile1
-rw-r--r--drivers/peci/controller/peci-aspeed.c2
-rw-r--r--drivers/peci/controller/peci-npcm.c298
-rw-r--r--drivers/peci/core.c4
-rw-r--r--drivers/peci/cpu.c5
-rw-r--r--drivers/perf/arm_pmuv3.c2
-rw-r--r--drivers/perf/cxl_pmu.c2
-rw-r--r--drivers/perf/riscv_pmu.c113
-rw-r--r--drivers/perf/riscv_pmu_legacy.c28
-rw-r--r--drivers/perf/riscv_pmu_sbi.c196
-rw-r--r--drivers/phy/Kconfig2
-rw-r--r--drivers/phy/Makefile2
-rw-r--r--drivers/phy/allwinner/phy-sun4i-usb.c2
-rw-r--r--drivers/phy/allwinner/phy-sun50i-usb3.c1
-rw-r--r--drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c2
-rw-r--r--drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c1
-rw-r--r--drivers/phy/amlogic/phy-meson-axg-pcie.c1
-rw-r--r--drivers/phy/amlogic/phy-meson-g12a-mipi-dphy-analog.c1
-rw-r--r--drivers/phy/amlogic/phy-meson-g12a-usb2.c4
-rw-r--r--drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c2
-rw-r--r--drivers/phy/amlogic/phy-meson-gxl-usb2.c2
-rw-r--r--drivers/phy/amlogic/phy-meson8-hdmi-tx.c2
-rw-r--r--drivers/phy/amlogic/phy-meson8b-usb2.c2
-rw-r--r--drivers/phy/broadcom/phy-bcm-ns-usb3.c2
-rw-r--r--drivers/phy/broadcom/phy-bcm-sr-usb.c2
-rw-r--r--drivers/phy/broadcom/phy-bcm63xx-usbh.c1
-rw-r--r--drivers/phy/broadcom/phy-brcm-sata.c2
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb.c1
-rw-r--r--drivers/phy/cadence/cdns-dphy-rx.c1
-rw-r--r--drivers/phy/cadence/cdns-dphy.c3
-rw-r--r--drivers/phy/cadence/phy-cadence-sierra.c98
-rw-r--r--drivers/phy/cadence/phy-cadence-torrent.c1734
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8m-pcie.c2
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8mq-usb.c4
-rw-r--r--drivers/phy/freescale/phy-fsl-lynx-28g.c1
-rw-r--r--drivers/phy/hisilicon/phy-hi3660-usb3.c1
-rw-r--r--drivers/phy/hisilicon/phy-hi3670-usb3.c1
-rw-r--r--drivers/phy/hisilicon/phy-hi6220-usb.c1
-rw-r--r--drivers/phy/hisilicon/phy-hisi-inno-usb2.c3
-rw-r--r--drivers/phy/hisilicon/phy-histb-combphy.c3
-rw-r--r--drivers/phy/hisilicon/phy-hix5hd2-sata.c1
-rw-r--r--drivers/phy/ingenic/phy-ingenic-usb.c1
-rw-r--r--drivers/phy/lantiq/phy-lantiq-rcu-usb2.c1
-rw-r--r--drivers/phy/marvell/phy-armada38x-comphy.c1
-rw-r--r--drivers/phy/marvell/phy-berlin-sata.c1
-rw-r--r--drivers/phy/marvell/phy-mmp3-hsic.c1
-rw-r--r--drivers/phy/marvell/phy-mmp3-usb.c1
-rw-r--r--drivers/phy/marvell/phy-mvebu-a3700-comphy.c1
-rw-r--r--drivers/phy/marvell/phy-mvebu-a3700-utmi.c2
-rw-r--r--drivers/phy/marvell/phy-mvebu-cp110-comphy.c4
-rw-r--r--drivers/phy/marvell/phy-mvebu-cp110-utmi.c2
-rw-r--r--drivers/phy/marvell/phy-mvebu-sata.c1
-rw-r--r--drivers/phy/marvell/phy-pxa-28nm-usb2.c1
-rw-r--r--drivers/phy/marvell/phy-pxa-usb.c2
-rw-r--r--drivers/phy/mediatek/phy-mtk-hdmi.h1
-rw-r--r--drivers/phy/mediatek/phy-mtk-mipi-dsi.c30
-rw-r--r--drivers/phy/mediatek/phy-mtk-mipi-dsi.h2
-rw-r--r--drivers/phy/mediatek/phy-mtk-pcie.c2
-rw-r--r--drivers/phy/mediatek/phy-mtk-tphy.c2
-rw-r--r--drivers/phy/mediatek/phy-mtk-ufs.c1
-rw-r--r--drivers/phy/phy-can-transceiver.c1
-rw-r--r--drivers/phy/phy-xgene.c1
-rw-r--r--drivers/phy/qualcomm/Kconfig21
-rw-r--r--drivers/phy/qualcomm/Makefile2
-rw-r--r--drivers/phy/qualcomm/phy-ath79-usb.c1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-edp.c2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ipq4019-usb.c3
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-m31.c294
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-combo.c492
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcie.c483
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v5_20.h2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6.h5
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-ufs.c13
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usb-legacy.c1407
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usb.c557
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qusb2.c1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-snps-eusb2.c1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-usb-hs.c2
-rw-r--r--drivers/phy/ralink/phy-mt7621-pci.c3
-rw-r--r--drivers/phy/realtek/Kconfig27
-rw-r--r--drivers/phy/realtek/Makefile3
-rw-r--r--drivers/phy/realtek/phy-rtk-usb2.c1331
-rw-r--r--drivers/phy/realtek/phy-rtk-usb3.c767
-rw-r--r--drivers/phy/renesas/phy-rcar-gen2.c1
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-pcie.c1
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb2.c2
-rw-r--r--drivers/phy/renesas/r8a779f0-ether-serdes.c10
-rw-r--r--drivers/phy/rockchip/phy-rockchip-dphy-rx0.c1
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c5
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-hdmi.c235
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-usb2.c357
-rw-r--r--drivers/phy/rockchip/phy-rockchip-naneng-combphy.c3
-rw-r--r--drivers/phy/rockchip/phy-rockchip-snps-pcie3.c3
-rw-r--r--drivers/phy/rockchip/phy-rockchip-typec.c3
-rw-r--r--drivers/phy/samsung/phy-exynos-dp-video.c2
-rw-r--r--drivers/phy/samsung/phy-exynos-mipi-video.c3
-rw-r--r--drivers/phy/samsung/phy-exynos5-usbdrd.c184
-rw-r--r--drivers/phy/samsung/phy-samsung-usb2.c2
-rw-r--r--drivers/phy/socionext/phy-uniphier-pcie.c2
-rw-r--r--drivers/phy/st/phy-spear1310-miphy.c3
-rw-r--r--drivers/phy/st/phy-spear1340-miphy.c3
-rw-r--r--drivers/phy/st/phy-stm32-usbphyc.c3
-rw-r--r--drivers/phy/starfive/Kconfig38
-rw-r--r--drivers/phy/starfive/Makefile4
-rw-r--r--drivers/phy/starfive/phy-jh7110-dphy-rx.c232
-rw-r--r--drivers/phy/starfive/phy-jh7110-pcie.c204
-rw-r--r--drivers/phy/starfive/phy-jh7110-usb.c152
-rw-r--r--drivers/phy/sunplus/phy-sunplus-usb2.c2
-rw-r--r--drivers/phy/tegra/phy-tegra194-p2u.c2
-rw-r--r--drivers/phy/tegra/xusb.c2
-rw-r--r--drivers/phy/ti/phy-gmii-sel.c9
-rw-r--r--drivers/phy/ti/phy-tusb1210.c1
-rw-r--r--drivers/phy/ti/phy-twl4030-usb.c1
-rw-r--r--drivers/phy/xilinx/phy-zynqmp.c95
-rw-r--r--drivers/platform/chrome/cros_ec.c1
-rw-r--r--drivers/platform/chrome/cros_ec_typec.c28
-rw-r--r--drivers/platform/mellanox/mlxbf-bootctl.c505
-rw-r--r--drivers/platform/mellanox/mlxbf-bootctl.h27
-rw-r--r--drivers/platform/mellanox/mlxreg-hotplug.c3
-rw-r--r--drivers/platform/mellanox/mlxreg-io.c1
-rw-r--r--drivers/platform/mellanox/nvsw-sn2201.c12
-rw-r--r--drivers/platform/x86/Kconfig31
-rw-r--r--drivers/platform/x86/Makefile5
-rw-r--r--drivers/platform/x86/amd/Kconfig16
-rw-r--r--drivers/platform/x86/amd/Makefile3
-rw-r--r--drivers/platform/x86/amd/pmc/Kconfig20
-rw-r--r--drivers/platform/x86/amd/pmc/Makefile8
-rw-r--r--drivers/platform/x86/amd/pmc/pmc-quirks.c (renamed from drivers/platform/x86/amd/pmc-quirks.c)0
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.c (renamed from drivers/platform/x86/amd/pmc.c)0
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.h (renamed from drivers/platform/x86/amd/pmc.h)0
-rw-r--r--drivers/platform/x86/amd/pmf/cnqf.c5
-rw-r--r--drivers/platform/x86/amd/pmf/core.c3
-rw-r--r--drivers/platform/x86/asus-wmi.c646
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/sysman.c9
-rw-r--r--drivers/platform/x86/hp/Kconfig16
-rw-r--r--drivers/platform/x86/hp/Makefile1
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/Makefile11
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/biosattr-interface.c312
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/bioscfg.c1063
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/bioscfg.h487
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/enum-attributes.c457
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/int-attributes.c418
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/order-list-attributes.c441
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/passwdobj-attributes.c556
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/spmobj-attributes.c381
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/string-attributes.c395
-rw-r--r--drivers/platform/x86/hp/hp-bioscfg/surestart-attributes.c132
-rw-r--r--drivers/platform/x86/ideapad-laptop.c118
-rw-r--r--drivers/platform/x86/intel/tpmi.c399
-rw-r--r--drivers/platform/x86/mlx-platform.c376
-rw-r--r--drivers/platform/x86/sel3350-platform.c251
-rw-r--r--drivers/platform/x86/siemens/Kconfig64
-rw-r--r--drivers/platform/x86/siemens/Makefile11
-rw-r--r--drivers/platform/x86/siemens/simatic-ipc-batt-apollolake.c51
-rw-r--r--drivers/platform/x86/siemens/simatic-ipc-batt-elkhartlake.c51
-rw-r--r--drivers/platform/x86/siemens/simatic-ipc-batt-f7188x.c87
-rw-r--r--drivers/platform/x86/siemens/simatic-ipc-batt.c253
-rw-r--r--drivers/platform/x86/siemens/simatic-ipc-batt.h20
-rw-r--r--drivers/platform/x86/siemens/simatic-ipc.c236
-rw-r--r--drivers/platform/x86/simatic-ipc.c151
-rw-r--r--drivers/platform/x86/system76_acpi.c74
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c58
-rw-r--r--drivers/platform/x86/wmi-bmof.c26
-rw-r--r--drivers/power/supply/power_supply_core.c4
-rw-r--r--drivers/powercap/intel_rapl_common.c4
-rw-r--r--drivers/pwm/Kconfig2
-rw-r--r--drivers/pwm/core.c41
-rw-r--r--drivers/pwm/pwm-apple.c1
-rw-r--r--drivers/pwm/pwm-atmel-hlcdc.c66
-rw-r--r--drivers/pwm/pwm-atmel-tcb.c123
-rw-r--r--drivers/pwm/pwm-atmel.c77
-rw-r--r--drivers/pwm/pwm-bcm-kona.c4
-rw-r--r--drivers/pwm/pwm-berlin.c1
-rw-r--r--drivers/pwm/pwm-crc.c4
-rw-r--r--drivers/pwm/pwm-cros-ec.c11
-rw-r--r--drivers/pwm/pwm-fsl-ftm.c3
-rw-r--r--drivers/pwm/pwm-hibvt.c2
-rw-r--r--drivers/pwm/pwm-imx1.c1
-rw-r--r--drivers/pwm/pwm-jz4740.c2
-rw-r--r--drivers/pwm/pwm-lp3943.c5
-rw-r--r--drivers/pwm/pwm-lpc18xx-sct.c34
-rw-r--r--drivers/pwm/pwm-lpc32xx.c16
-rw-r--r--drivers/pwm/pwm-mediatek.c1
-rw-r--r--drivers/pwm/pwm-meson.c1
-rw-r--r--drivers/pwm/pwm-microchip-core.c2
-rw-r--r--drivers/pwm/pwm-mtk-disp.c1
-rw-r--r--drivers/pwm/pwm-ntxec.c4
-rw-r--r--drivers/pwm/pwm-pxa.c10
-rw-r--r--drivers/pwm/pwm-rockchip.c4
-rw-r--r--drivers/pwm/pwm-rz-mtu3.c2
-rw-r--r--drivers/pwm/pwm-sifive.c5
-rw-r--r--drivers/pwm/pwm-sl28cpld.c14
-rw-r--r--drivers/pwm/pwm-sprd.c1
-rw-r--r--drivers/pwm/pwm-stm32.c14
-rw-r--r--drivers/pwm/pwm-stmpe.c17
-rw-r--r--drivers/pwm/pwm-sun4i.c1
-rw-r--r--drivers/pwm/pwm-sunplus.c1
-rw-r--r--drivers/pwm/pwm-tegra.c1
-rw-r--r--drivers/pwm/pwm-tiecap.c2
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c2
-rw-r--r--drivers/pwm/pwm-visconti.c2
-rw-r--r--drivers/pwm/pwm-vt8500.c5
-rw-r--r--drivers/regulator/tps6287x-regulator.c2
-rw-r--r--drivers/regulator/tps6594-regulator.c31
-rw-r--r--drivers/remoteproc/imx_dsp_rproc.c3
-rw-r--r--drivers/remoteproc/imx_rproc.c73
-rw-r--r--drivers/remoteproc/imx_rproc.h2
-rw-r--r--drivers/remoteproc/omap_remoteproc.c3
-rw-r--r--drivers/remoteproc/pru_rproc.c25
-rw-r--r--drivers/remoteproc/qcom_common.c21
-rw-r--r--drivers/remoteproc/qcom_q6v5_adsp.c26
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c87
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c102
-rw-r--r--drivers/remoteproc/qcom_q6v5_wcss.c3
-rw-r--r--drivers/remoteproc/qcom_sysmon.c1
-rw-r--r--drivers/remoteproc/qcom_wcnss.c30
-rw-r--r--drivers/remoteproc/qcom_wcnss_iris.c1
-rw-r--r--drivers/remoteproc/rcar_rproc.c3
-rw-r--r--drivers/remoteproc/remoteproc_coredump.c2
-rw-r--r--drivers/remoteproc/remoteproc_internal.h4
-rw-r--r--drivers/remoteproc/st_slim_rproc.c1
-rw-r--r--drivers/remoteproc/stm32_rproc.c16
-rw-r--r--drivers/remoteproc/ti_k3_dsp_remoteproc.c2
-rw-r--r--drivers/remoteproc/ti_k3_r5_remoteproc.c3
-rw-r--r--drivers/remoteproc/wkup_m3_rproc.c2
-rw-r--r--drivers/rpmsg/qcom_glink_native.c61
-rw-r--r--drivers/rpmsg/rpmsg_char.c54
-rw-r--r--drivers/rpmsg/rpmsg_core.c21
-rw-r--r--drivers/rpmsg/rpmsg_internal.h2
-rw-r--r--drivers/rtc/Kconfig5
-rw-r--r--drivers/rtc/interface.c2
-rw-r--r--drivers/rtc/rtc-abx80x.c2
-rw-r--r--drivers/rtc/rtc-armada38x.c5
-rw-r--r--drivers/rtc/rtc-aspeed.c2
-rw-r--r--drivers/rtc/rtc-at91rm9200.c3
-rw-r--r--drivers/rtc/rtc-at91sam9.c2
-rw-r--r--drivers/rtc/rtc-cmos.c11
-rw-r--r--drivers/rtc/rtc-cros-ec.c38
-rw-r--r--drivers/rtc/rtc-da9063.c7
-rw-r--r--drivers/rtc/rtc-ds1305.c5
-rw-r--r--drivers/rtc/rtc-ds1307.c2
-rw-r--r--drivers/rtc/rtc-ds1742.c1
-rw-r--r--drivers/rtc/rtc-ds2404.c169
-rw-r--r--drivers/rtc/rtc-fsl-ftm-alarm.c5
-rw-r--r--drivers/rtc/rtc-isl12022.c126
-rw-r--r--drivers/rtc/rtc-isl12026.c5
-rw-r--r--drivers/rtc/rtc-isl1208.c19
-rw-r--r--drivers/rtc/rtc-jz4740.c4
-rw-r--r--drivers/rtc/rtc-lpc24xx.c3
-rw-r--r--drivers/rtc/rtc-m41t80.c2
-rw-r--r--drivers/rtc/rtc-m48t86.c8
-rw-r--r--drivers/rtc/rtc-mpc5121.c4
-rw-r--r--drivers/rtc/rtc-mt6397.c2
-rw-r--r--drivers/rtc/rtc-mt7622.c4
-rw-r--r--drivers/rtc/rtc-mxc.c1
-rw-r--r--drivers/rtc/rtc-nct3018y.c2
-rw-r--r--drivers/rtc/rtc-omap.c8
-rw-r--r--drivers/rtc/rtc-pcf2127.c872
-rw-r--r--drivers/rtc/rtc-pcf85063.c101
-rw-r--r--drivers/rtc/rtc-pcf85363.c31
-rw-r--r--drivers/rtc/rtc-pxa.c1
-rw-r--r--drivers/rtc/rtc-rs5c372.c5
-rw-r--r--drivers/rtc/rtc-rv3028.c82
-rw-r--r--drivers/rtc/rtc-rv3032.c2
-rw-r--r--drivers/rtc/rtc-rv8803.c5
-rw-r--r--drivers/rtc/rtc-rx6110.c1
-rw-r--r--drivers/rtc/rtc-rx8581.c1
-rw-r--r--drivers/rtc/rtc-rzn1.c5
-rw-r--r--drivers/rtc/rtc-s3c.c1
-rw-r--r--drivers/rtc/rtc-stm32.c143
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c1
-rw-r--r--drivers/rtc/rtc-sun6i.c3
-rw-r--r--drivers/rtc/rtc-sunplus.c2
-rw-r--r--drivers/rtc/rtc-sunxi.c2
-rw-r--r--drivers/rtc/rtc-ti-k3.c2
-rw-r--r--drivers/rtc/rtc-tps6586x.c1
-rw-r--r--drivers/rtc/rtc-tps65910.c7
-rw-r--r--drivers/rtc/rtc-twl.c43
-rw-r--r--drivers/rtc/rtc-wm8350.c9
-rw-r--r--drivers/s390/block/dasd_devmap.c6
-rw-r--r--drivers/s390/block/dasd_eckd.c10
-rw-r--r--drivers/s390/block/dasd_int.h4
-rw-r--r--drivers/s390/block/dcssblk.c13
-rw-r--r--drivers/s390/char/con3215.c6
-rw-r--r--drivers/s390/char/con3270.c6
-rw-r--r--drivers/s390/char/monreader.c12
-rw-r--r--drivers/s390/char/sclp_tty.c10
-rw-r--r--drivers/s390/char/sclp_vt220.c6
-rw-r--r--drivers/s390/cio/airq.c4
-rw-r--r--drivers/s390/crypto/zcrypt_api.c11
-rw-r--r--drivers/s390/virtio/virtio_ccw.c1
-rw-r--r--drivers/scsi/Kconfig2
-rw-r--r--drivers/scsi/aic7xxx/aicasm/Makefile18
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c1
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.c4
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c7
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c12
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c4
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c4
-rw-r--r--drivers/scsi/bfa/bfa_fc.h2
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.c2
-rw-r--r--drivers/scsi/elx/libefc_sli/sli4.c8
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c20
-rw-r--r--drivers/scsi/gvp11.c5
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c16
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c9
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c14
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c27
-rw-r--r--drivers/scsi/hosts.c4
-rw-r--r--drivers/scsi/isci/host.h2
-rw-r--r--drivers/scsi/isci/init.c9
-rw-r--r--drivers/scsi/isci/phy.c2
-rw-r--r--drivers/scsi/isci/request.c2
-rw-r--r--drivers/scsi/libsas/sas_ata.c21
-rw-r--r--drivers/scsi/libsas/sas_discover.c10
-rw-r--r--drivers/scsi/libsas/sas_expander.c2
-rw-r--r--drivers/scsi/libsas/sas_host_smp.c4
-rw-r--r--drivers/scsi/libsas/sas_init.c16
-rw-r--r--drivers/scsi/libsas/sas_internal.h7
-rw-r--r--drivers/scsi/libsas/sas_phy.c8
-rw-r--r--drivers/scsi/libsas/sas_port.c8
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c15
-rw-r--r--drivers/scsi/lpfc/lpfc.h23
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c136
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c70
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c77
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c53
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c94
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c16
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c2
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h2
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_ioc.h1
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_transport.h2
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h23
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c37
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c155
-rw-r--r--drivers/scsi/mvsas/mv_init.c7
-rw-r--r--drivers/scsi/mvsas/mv_sas.c9
-rw-r--r--drivers/scsi/mvumi.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c12
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.h2
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c5
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h2
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c27
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.h2
-rw-r--r--drivers/scsi/pmcraid.c3
-rw-r--r--drivers/scsi/ppa.c84
-rw-r--r--drivers/scsi/ppa.h4
-rw-r--r--drivers/scsi/qedf/qedf_dbg.h2
-rw-r--r--drivers/scsi/qedf/qedf_debugfs.c35
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h9
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h11
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c217
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c15
-rw-r--r--drivers/scsi/qlogicpti.c4
-rw-r--r--drivers/scsi/scsi_priv.h2
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c80
-rw-r--r--drivers/scsi/storvsc_drv.c7
-rw-r--r--drivers/scsi/sun_esp.c3
-rw-r--r--drivers/soundwire/bus.c18
-rw-r--r--drivers/soundwire/intel_ace2x.c283
-rw-r--r--drivers/soundwire/intel_auxdevice.c112
-rw-r--r--drivers/soundwire/qcom.c1
-rw-r--r--drivers/spi/spi-sun6i.c31
-rw-r--r--drivers/staging/axis-fifo/axis-fifo.c18
-rw-r--r--drivers/staging/fieldbus/anybuss/arcx-anybus.c6
-rw-r--r--drivers/staging/gdm724x/gdm_tty.c17
-rw-r--r--drivers/staging/greybus/arche-platform.c1
-rw-r--r--drivers/staging/greybus/fw-core.c4
-rw-r--r--drivers/staging/greybus/pwm.c13
-rw-r--r--drivers/staging/greybus/uart.c3
-rw-r--r--drivers/staging/media/atomisp/Kconfig3
-rw-r--r--drivers/staging/media/atomisp/i2c/Kconfig12
-rw-r--r--drivers/staging/media/atomisp/i2c/Makefile1
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-ov2680.c849
-rw-r--r--drivers/staging/media/atomisp/i2c/ov2680.h249
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_cmd.c24
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_css20.c3
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_csi2.h67
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_csi2_bridge.c414
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_v4l2.c3
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_mipi.c16
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_param_dvs.c3
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css_sp.c14
-rw-r--r--drivers/staging/media/deprecated/atmel/atmel-isc-base.c4
-rw-r--r--drivers/staging/media/deprecated/atmel/atmel-isc.h2
-rw-r--r--drivers/staging/media/deprecated/atmel/atmel-sama5d2-isc.c9
-rw-r--r--drivers/staging/media/deprecated/atmel/atmel-sama7g5-isc.c9
-rw-r--r--drivers/staging/media/imx/Kconfig27
-rw-r--r--drivers/staging/media/imx/Makefile9
-rw-r--r--drivers/staging/media/imx/imx-media-capture.c1
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c10
-rw-r--r--drivers/staging/media/imx/imx-media-dev-common.c8
-rw-r--r--drivers/staging/media/imx/imx-media-dev.c2
-rw-r--r--drivers/staging/media/imx/imx-media-of.c4
-rw-r--r--drivers/staging/media/imx/imx6-mipi-csi2.c12
-rw-r--r--drivers/staging/media/ipu3/ipu3-css.c14
-rw-r--r--drivers/staging/media/meson/vdec/esparser.c1
-rw-r--r--drivers/staging/media/meson/vdec/vdec.c2
-rw-r--r--drivers/staging/media/meson/vdec/vdec_platform.c9
-rw-r--r--drivers/staging/media/rkvdec/rkvdec-vp9.c1
-rw-r--r--drivers/staging/media/rkvdec/rkvdec.c2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_hw.c2
-rw-r--r--drivers/staging/media/sunxi/sun6i-isp/sun6i_isp.c3
-rw-r--r--drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_proc.c6
-rw-r--r--drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_proc.h2
-rw-r--r--drivers/staging/media/tegra-video/csi.c1
-rw-r--r--drivers/staging/media/tegra-video/vi.c23
-rw-r--r--drivers/staging/media/tegra-video/vip.c2
-rw-r--r--drivers/staging/most/dim2/dim2.c2
-rw-r--r--drivers/staging/pi433/pi433_if.c1
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c11
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_cam.c17
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_cam.h3
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c25
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.h8
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c38
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ps.c6
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_wx.c12
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BAProc.c60
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HTProc.c3
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TS.h2
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TSProc.c65
-rw-r--r--drivers/staging/rtl8192e/rtllib.h32
-rw-r--r--drivers/staging/rtl8192e/rtllib_module.c3
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c84
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c348
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac_wx.c17
-rw-r--r--drivers/staging/rtl8192e/rtllib_tx.c5
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c10
-rw-r--r--drivers/staging/rtl8712/os_intfs.c1
-rw-r--r--drivers/staging/rtl8712/usb_intf.c1
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ap.c3
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme.c5
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c6
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_recv.c36
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_security.c8
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_sta_mgt.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_wlan_util.c6
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_xmit.c12
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_intf.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/odm.c4
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c2
-rw-r--r--drivers/staging/rtl8723bs/include/wifi.h15
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c167
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_linux.c161
-rw-r--r--drivers/staging/rtl8723bs/os_dep/recv_linux.c2
-rw-r--r--drivers/staging/rts5208/sd.c3
-rw-r--r--drivers/staging/sm750fb/ddk750_dvi.c2
-rw-r--r--drivers/staging/sm750fb/ddk750_sii164.c22
-rw-r--r--drivers/staging/sm750fb/ddk750_sii164.h20
-rw-r--r--drivers/staging/vme_user/vme.c92
-rw-r--r--drivers/staging/vme_user/vme_bridge.h46
-rw-r--r--drivers/staging/vt6655/baseband.c6
-rw-r--r--drivers/staging/vt6655/srom.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c74
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c26
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.h1
-rw-r--r--drivers/target/target_core_iblock.c7
-rw-r--r--drivers/thermal/Kconfig12
-rw-r--r--drivers/thermal/Makefile1
-rw-r--r--drivers/thermal/armada_thermal.c5
-rw-r--r--drivers/thermal/broadcom/brcmstb_thermal.c1
-rw-r--r--drivers/thermal/broadcom/sr-thermal.c1
-rw-r--r--drivers/thermal/db8500_thermal.c2
-rw-r--r--drivers/thermal/dove_thermal.c4
-rw-r--r--drivers/thermal/imx8mm_thermal.c6
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c6
-rw-r--r--drivers/thermal/k3_bandgap.c1
-rw-r--r--drivers/thermal/k3_j72xx_bandgap.c2
-rw-r--r--drivers/thermal/kirkwood_thermal.c4
-rw-r--r--drivers/thermal/loongson2_thermal.c169
-rw-r--r--drivers/thermal/max77620_thermal.c2
-rw-r--r--drivers/thermal/mediatek/auxadc_thermal.c2
-rw-r--r--drivers/thermal/mediatek/lvts_thermal.c175
-rw-r--r--drivers/thermal/qcom/tsens-v0_1.c6
-rw-r--r--drivers/thermal/qcom/tsens-v1.c2
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c2
-rw-r--r--drivers/thermal/spear_thermal.c4
-rw-r--r--drivers/thermal/sun8i_thermal.c8
-rw-r--r--drivers/thermal/tegra/tegra-bpmp-thermal.c52
-rw-r--r--drivers/thermal/thermal-generic-adc.c1
-rw-r--r--drivers/thermal/thermal_core.c18
-rw-r--r--drivers/thermal/thermal_core.h2
-rw-r--r--drivers/thermal/thermal_helpers.c3
-rw-r--r--drivers/thermal/thermal_trip.c24
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.c2
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-thermal-common.c3
-rw-r--r--drivers/thunderbolt/acpi.c18
-rw-r--r--drivers/thunderbolt/switch.c75
-rw-r--r--drivers/thunderbolt/tb.c24
-rw-r--r--drivers/thunderbolt/tb.h4
-rw-r--r--drivers/thunderbolt/tmu.c2
-rw-r--r--drivers/tty/Kconfig1
-rw-r--r--drivers/tty/amiserial.c4
-rw-r--r--drivers/tty/ehv_bytechan.c4
-rw-r--r--drivers/tty/goldfish.c7
-rw-r--r--drivers/tty/hvc/hvc_console.c4
-rw-r--r--drivers/tty/hvc/hvc_opal.c2
-rw-r--r--drivers/tty/hvc/hvcs.c10
-rw-r--r--drivers/tty/hvc/hvsi.c15
-rw-r--r--drivers/tty/ipwireless/hardware.c2
-rw-r--r--drivers/tty/ipwireless/tty.c4
-rw-r--r--drivers/tty/mips_ejtag_fdc.c6
-rw-r--r--drivers/tty/moxa.c8
-rw-r--r--drivers/tty/mxser.c4
-rw-r--r--drivers/tty/n_gsm.c89
-rw-r--r--drivers/tty/n_hdlc.c12
-rw-r--r--drivers/tty/n_null.c25
-rw-r--r--drivers/tty/n_tty.c613
-rw-r--r--drivers/tty/nozomi.c6
-rw-r--r--drivers/tty/pty.c2
-rw-r--r--drivers/tty/rpmsg_tty.c5
-rw-r--r--drivers/tty/serdev/serdev-ttyport.c4
-rw-r--r--drivers/tty/serial/21285.c3
-rw-r--r--drivers/tty/serial/8250/8250_bcm7271.c4
-rw-r--r--drivers/tty/serial/8250/8250_dw.c5
-rw-r--r--drivers/tty/serial/8250/8250_early.c1
-rw-r--r--drivers/tty/serial/8250/8250_ingenic.c1
-rw-r--r--drivers/tty/serial/8250/8250_men_mcb.c212
-rw-r--r--drivers/tty/serial/8250/8250_omap.c1
-rw-r--r--drivers/tty/serial/8250/8250_pci.c10
-rw-r--r--drivers/tty/serial/8250/8250_port.c11
-rw-r--r--drivers/tty/serial/8250/8250_pxa.c1
-rw-r--r--drivers/tty/serial/Makefile2
-rw-r--r--drivers/tty/serial/altera_jtaguart.c4
-rw-r--r--drivers/tty/serial/altera_uart.c2
-rw-r--r--drivers/tty/serial/amba-pl010.c3
-rw-r--r--drivers/tty/serial/amba-pl011.c5
-rw-r--r--drivers/tty/serial/apbuart.c6
-rw-r--r--drivers/tty/serial/ar933x_uart.c3
-rw-r--r--drivers/tty/serial/arc_uart.c29
-rw-r--r--drivers/tty/serial/atmel_serial.c3
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c8
-rw-r--r--drivers/tty/serial/clps711x.c6
-rw-r--r--drivers/tty/serial/cpm_uart.c (renamed from drivers/tty/serial/cpm_uart/cpm_uart_core.c)157
-rw-r--r--drivers/tty/serial/cpm_uart.h (renamed from drivers/tty/serial/cpm_uart/cpm_uart.h)38
-rw-r--r--drivers/tty/serial/cpm_uart/Makefile12
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c122
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_cpm1.h33
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c156
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_cpm2.h33
-rw-r--r--drivers/tty/serial/digicolor-usart.c3
-rw-r--r--drivers/tty/serial/dz.c2
-rw-r--r--drivers/tty/serial/fsl_linflexuart.c10
-rw-r--r--drivers/tty/serial/fsl_lpuart.c88
-rw-r--r--drivers/tty/serial/imx.c4
-rw-r--r--drivers/tty/serial/ip22zilog.c2
-rw-r--r--drivers/tty/serial/kgdb_nmi.c3
-rw-r--r--drivers/tty/serial/lantiq.c3
-rw-r--r--drivers/tty/serial/liteuart.c3
-rw-r--r--drivers/tty/serial/ma35d1_serial.c3
-rw-r--r--drivers/tty/serial/max3100.c3
-rw-r--r--drivers/tty/serial/max310x.c10
-rw-r--r--drivers/tty/serial/mcf.c2
-rw-r--r--drivers/tty/serial/meson_uart.c138
-rw-r--r--drivers/tty/serial/milbeaut_usio.c3
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c2
-rw-r--r--drivers/tty/serial/mps2-uart.c4
-rw-r--r--drivers/tty/serial/mvebu-uart.c11
-rw-r--r--drivers/tty/serial/mxs-auart.c5
-rw-r--r--drivers/tty/serial/omap-serial.c11
-rw-r--r--drivers/tty/serial/pic32_uart.c1
-rw-r--r--drivers/tty/serial/pxa.c2
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c17
-rw-r--r--drivers/tty/serial/rp2.c4
-rw-r--r--drivers/tty/serial/sa1100.c3
-rw-r--r--drivers/tty/serial/samsung_tty.c8
-rw-r--r--drivers/tty/serial/sb1250-duart.c3
-rw-r--r--drivers/tty/serial/sc16is7xx.c170
-rw-r--r--drivers/tty/serial/sccnxp.c7
-rw-r--r--drivers/tty/serial/serial-tegra.c29
-rw-r--r--drivers/tty/serial/serial_core.c28
-rw-r--r--drivers/tty/serial/serial_txx9.c3
-rw-r--r--drivers/tty/serial/sh-sci.c1
-rw-r--r--drivers/tty/serial/sifive.c29
-rw-r--r--drivers/tty/serial/sprd_serial.c38
-rw-r--r--drivers/tty/serial/st-asc.c9
-rw-r--r--drivers/tty/serial/stm32-usart.c312
-rw-r--r--drivers/tty/serial/stm32-usart.h1
-rw-r--r--drivers/tty/serial/sunhv.c4
-rw-r--r--drivers/tty/serial/sunplus-uart.c2
-rw-r--r--drivers/tty/serial/sunsab.c3
-rw-r--r--drivers/tty/serial/sunsu.c4
-rw-r--r--drivers/tty/serial/sunzilog.c4
-rw-r--r--drivers/tty/serial/tegra-tcu.c1
-rw-r--r--drivers/tty/serial/uartlite.c3
-rw-r--r--drivers/tty/serial/ucc_uart.c4
-rw-r--r--drivers/tty/serial/vt8500_serial.c8
-rw-r--r--drivers/tty/serial/xilinx_uartps.c4
-rw-r--r--drivers/tty/serial/zs.c3
-rw-r--r--drivers/tty/synclink_gt.c83
-rw-r--r--drivers/tty/sysrq.c84
-rw-r--r--drivers/tty/tty.h8
-rw-r--r--drivers/tty/tty_audit.c6
-rw-r--r--drivers/tty/tty_buffer.c192
-rw-r--r--drivers/tty/tty_io.c50
-rw-r--r--drivers/tty/tty_ioctl.c18
-rw-r--r--drivers/tty/tty_port.c34
-rw-r--r--drivers/tty/ttynull.c4
-rw-r--r--drivers/tty/vcc.c18
-rw-r--r--drivers/tty/vt/selection.c2
-rw-r--r--drivers/tty/vt/vt.c22
-rw-r--r--drivers/ufs/core/Kconfig8
-rw-r--r--drivers/ufs/core/Makefile1
-rw-r--r--drivers/ufs/core/ufs-hwmon.c3
-rw-r--r--drivers/ufs/core/ufs-mcq.c34
-rw-r--r--drivers/ufs/core/ufs-sysfs.c22
-rw-r--r--drivers/ufs/core/ufs_bsg.c2
-rw-r--r--drivers/ufs/core/ufshcd-crypto.h20
-rw-r--r--drivers/ufs/core/ufshcd-priv.h4
-rw-r--r--drivers/ufs/core/ufshcd.c627
-rw-r--r--drivers/ufs/core/ufshpb.c2668
-rw-r--r--drivers/ufs/core/ufshpb.h318
-rw-r--r--drivers/ufs/host/cdns-pltfrm.c27
-rw-r--r--drivers/ufs/host/tc-dwc-g210-pci.c2
-rw-r--r--drivers/ufs/host/tc-dwc-g210.c32
-rw-r--r--drivers/ufs/host/ti-j721e-ufs.c2
-rw-r--r--drivers/ufs/host/ufs-mediatek.c180
-rw-r--r--drivers/ufs/host/ufs-mediatek.h33
-rw-r--r--drivers/ufs/host/ufs-qcom.c174
-rw-r--r--drivers/ufs/host/ufs-qcom.h4
-rw-r--r--drivers/ufs/host/ufs-renesas.c2
-rw-r--r--drivers/ufs/host/ufshcd-dwc.c22
-rw-r--r--drivers/ufs/host/ufshcd-pci.c3
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.c13
-rw-r--r--drivers/uio/uio_pruss.c7
-rw-r--r--drivers/usb/cdns3/cdns3-gadget.c1
-rw-r--r--drivers/usb/cdns3/cdns3-plat.c4
-rw-r--r--drivers/usb/cdns3/cdns3-starfive.c6
-rw-r--r--drivers/usb/cdns3/cdns3-ti.c1
-rw-r--r--drivers/usb/cdns3/cdnsp-pci.c3
-rw-r--r--drivers/usb/cdns3/core.c16
-rw-r--r--drivers/usb/cdns3/core.h7
-rw-r--r--drivers/usb/cdns3/drd.c4
-rw-r--r--drivers/usb/chipidea/ci.h19
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c18
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.h1
-rw-r--r--drivers/usb/chipidea/ci_hdrc_tegra.c3
-rw-r--r--drivers/usb/chipidea/core.c5
-rw-r--r--drivers/usb/chipidea/host.c1
-rw-r--r--drivers/usb/chipidea/udc.c12
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c18
-rw-r--r--drivers/usb/class/cdc-acm.c33
-rw-r--r--drivers/usb/common/common.c1
-rw-r--r--drivers/usb/core/config.c3
-rw-r--r--drivers/usb/core/devices.c1
-rw-r--r--drivers/usb/core/file.c68
-rw-r--r--drivers/usb/core/hcd.c50
-rw-r--r--drivers/usb/core/hub.c505
-rw-r--r--drivers/usb/core/ledtrig-usbport.c13
-rw-r--r--drivers/usb/core/message.c30
-rw-r--r--drivers/usb/core/of.c1
-rw-r--r--drivers/usb/core/sysfs.c3
-rw-r--r--drivers/usb/core/urb.c27
-rw-r--r--drivers/usb/core/usb.c20
-rw-r--r--drivers/usb/core/usb.h5
-rw-r--r--drivers/usb/dwc2/core.h1
-rw-r--r--drivers/usb/dwc2/gadget.c1
-rw-r--r--drivers/usb/dwc2/hcd_intr.c4
-rw-r--r--drivers/usb/dwc2/params.c39
-rw-r--r--drivers/usb/dwc2/pci.c14
-rw-r--r--drivers/usb/dwc2/platform.c2
-rw-r--r--drivers/usb/dwc3/Kconfig10
-rw-r--r--drivers/usb/dwc3/Makefile1
-rw-r--r--drivers/usb/dwc3/dwc3-am62.c96
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c9
-rw-r--r--drivers/usb/dwc3/dwc3-imx8mp.c2
-rw-r--r--drivers/usb/dwc3/dwc3-keystone.c3
-rw-r--r--drivers/usb/dwc3/dwc3-meson-g12a.c6
-rw-r--r--drivers/usb/dwc3/dwc3-octeon.c (renamed from arch/mips/cavium-octeon/octeon-usb.c)438
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c1
-rw-r--r--drivers/usb/gadget/Kconfig18
-rw-r--r--drivers/usb/gadget/composite.c34
-rw-r--r--drivers/usb/gadget/config.c8
-rw-r--r--drivers/usb/gadget/function/Makefile2
-rw-r--r--drivers/usb/gadget/function/f_acm.c4
-rw-r--r--drivers/usb/gadget/function/f_ecm.c19
-rw-r--r--drivers/usb/gadget/function/f_eem.c4
-rw-r--r--drivers/usb/gadget/function/f_loopback.c4
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c2
-rw-r--r--drivers/usb/gadget/function/f_midi.c56
-rw-r--r--drivers/usb/gadget/function/f_midi2.c2871
-rw-r--r--drivers/usb/gadget/function/f_ncm.c23
-rw-r--r--drivers/usb/gadget/function/f_obex.c3
-rw-r--r--drivers/usb/gadget/function/f_rndis.c19
-rw-r--r--drivers/usb/gadget/function/f_serial.c4
-rw-r--r--drivers/usb/gadget/function/f_sourcesink.c4
-rw-r--r--drivers/usb/gadget/function/f_subset.c4
-rw-r--r--drivers/usb/gadget/function/f_uvc.c36
-rw-r--r--drivers/usb/gadget/function/u_ether.c5
-rw-r--r--drivers/usb/gadget/function/u_ether.h13
-rw-r--r--drivers/usb/gadget/function/u_midi2.h81
-rw-r--r--drivers/usb/gadget/function/u_phonet.h1
-rw-r--r--drivers/usb/gadget/function/u_serial.c6
-rw-r--r--drivers/usb/gadget/function/u_serial.h4
-rw-r--r--drivers/usb/gadget/function/uvc.h2
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/core.c3
-rw-r--r--drivers/usb/gadget/udc/aspeed_udc.c4
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c6
-rw-r--r--drivers/usb/gadget/udc/core.c1
-rw-r--r--drivers/usb/gadget/udc/fsl_qe_udc.c2
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c3
-rw-r--r--drivers/usb/gadget/udc/gr_udc.c13
-rw-r--r--drivers/usb/gadget/udc/max3420_udc.c4
-rw-r--r--drivers/usb/gadget/udc/mv_u3d_core.c4
-rw-r--r--drivers/usb/gadget/udc/mv_udc_core.c2
-rw-r--r--drivers/usb/gadget/udc/pxa27x_udc.c2
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c2
-rw-r--r--drivers/usb/gadget/udc/renesas_usbf.c6
-rw-r--r--drivers/usb/gadget/udc/snps_udc_plat.c7
-rw-r--r--drivers/usb/gadget/udc/tegra-xudc.c1
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c35
-rw-r--r--drivers/usb/host/ehci-atmel.c7
-rw-r--r--drivers/usb/host/ehci-brcm.c4
-rw-r--r--drivers/usb/host/ehci-exynos.c3
-rw-r--r--drivers/usb/host/ehci-fsl.c5
-rw-r--r--drivers/usb/host/ehci-hcd.c8
-rw-r--r--drivers/usb/host/ehci-hub.c10
-rw-r--r--drivers/usb/host/ehci-mv.c3
-rw-r--r--drivers/usb/host/ehci-npcm7xx.c5
-rw-r--r--drivers/usb/host/ehci-omap.c3
-rw-r--r--drivers/usb/host/ehci-orion.c9
-rw-r--r--drivers/usb/host/ehci-platform.c3
-rw-r--r--drivers/usb/host/ehci-sched.c3
-rw-r--r--drivers/usb/host/ehci-sh.c7
-rw-r--r--drivers/usb/host/ehci-spear.c3
-rw-r--r--drivers/usb/host/ehci-st.c12
-rw-r--r--drivers/usb/host/ehci.h10
-rw-r--r--drivers/usb/host/fhci-hcd.c3
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c3
-rw-r--r--drivers/usb/host/isp1362-hcd.c3
-rw-r--r--drivers/usb/host/ohci-at91.c9
-rw-r--r--drivers/usb/host/ohci-da8xx.c4
-rw-r--r--drivers/usb/host/ohci-exynos.c3
-rw-r--r--drivers/usb/host/ohci-nxp.c3
-rw-r--r--drivers/usb/host/ohci-platform.c3
-rw-r--r--drivers/usb/host/ohci-ppc-of.c3
-rw-r--r--drivers/usb/host/ohci-pxa27x.c3
-rw-r--r--drivers/usb/host/ohci-sm501.c3
-rw-r--r--drivers/usb/host/ohci-spear.c3
-rw-r--r--drivers/usb/host/ohci-st.c14
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c3
-rw-r--r--drivers/usb/host/uhci-platform.c3
-rw-r--r--drivers/usb/host/xhci-dbgtty.c7
-rw-r--r--drivers/usb/host/xhci-mem.c3
-rw-r--r--drivers/usb/host/xhci-plat.c1
-rw-r--r--drivers/usb/host/xhci-rcar.c1
-rw-r--r--drivers/usb/host/xhci-tegra.c28
-rw-r--r--drivers/usb/host/xhci.c11
-rw-r--r--drivers/usb/misc/cypress_cy7c63.c2
-rw-r--r--drivers/usb/misc/cytherm.c12
-rw-r--r--drivers/usb/misc/onboard_usb_hub.c41
-rw-r--r--drivers/usb/misc/onboard_usb_hub.h15
-rw-r--r--drivers/usb/misc/usb251xb.c2
-rw-r--r--drivers/usb/misc/usb_u132.h97
-rw-r--r--drivers/usb/misc/usbsevseg.c2
-rw-r--r--drivers/usb/mtu3/mtu3.h1
-rw-r--r--drivers/usb/mtu3/mtu3_host.c1
-rw-r--r--drivers/usb/musb/cppi_dma.h3
-rw-r--r--drivers/usb/musb/jz4740.c2
-rw-r--r--drivers/usb/musb/mediatek.c1
-rw-r--r--drivers/usb/musb/mpfs.c1
-rw-r--r--drivers/usb/musb/musb_core.c4
-rw-r--r--drivers/usb/musb/musb_dma.h4
-rw-r--r--drivers/usb/musb/musb_dsps.c2
-rw-r--r--drivers/usb/musb/musb_gadget.c2
-rw-r--r--drivers/usb/musb/sunxi.c1
-rw-r--r--drivers/usb/musb/tusb6010.c17
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c16
-rw-r--r--drivers/usb/phy/phy-tegra-usb.c2
-rw-r--r--drivers/usb/renesas_usbhs/common.c2
-rw-r--r--drivers/usb/renesas_usbhs/rza.c2
-rw-r--r--drivers/usb/renesas_usbhs/rza2.c1
-rw-r--r--drivers/usb/serial/option.c7
-rw-r--r--drivers/usb/serial/usb-serial.c5
-rw-r--r--drivers/usb/serial/xr_serial.c89
-rw-r--r--drivers/usb/typec/altmodes/displayport.c5
-rw-r--r--drivers/usb/typec/bus.c12
-rw-r--r--drivers/usb/typec/mux/intel_pmc_mux.c53
-rw-r--r--drivers/usb/typec/mux/nb7vpq904m.c2
-rw-r--r--drivers/usb/typec/tcpm/Kconfig1
-rw-r--r--drivers/usb/typec/tcpm/fusb302.c2
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c39
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c2
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c6
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c4
-rw-r--r--drivers/usb/typec/tcpm/tcpci_mt6370.c2
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c177
-rw-r--r--drivers/usb/typec/ucsi/Kconfig1
-rw-r--r--drivers/usb/typec/ucsi/Makefile2
-rw-r--r--drivers/usb/typec/ucsi/debugfs.c99
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c15
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h24
-rw-r--r--drivers/usb/typec/ucsi/ucsi_glink.c1
-rw-r--r--drivers/usb/usbip/vudc_dev.c5
-rw-r--r--drivers/vdpa/mlx5/core/mlx5_vdpa.h3
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c8
-rw-r--r--drivers/vhost/vdpa.c15
-rw-r--r--drivers/video/backlight/gpio_backlight.c3
-rw-r--r--drivers/video/backlight/led_bl.c2
-rw-r--r--drivers/video/backlight/lp855x_bl.c33
-rw-r--r--drivers/video/backlight/qcom-wled.c2
-rw-r--r--drivers/video/fbdev/Kconfig11
-rw-r--r--drivers/video/fbdev/Makefile1
-rw-r--r--drivers/video/fbdev/core/fbsysfs.c8
-rw-r--r--drivers/video/fbdev/core/modedb.c22
-rw-r--r--drivers/video/fbdev/g364fb.c2
-rw-r--r--drivers/video/fbdev/mx3fb.c1695
-rw-r--r--drivers/video/fbdev/neofb.c27
-rw-r--r--drivers/video/fbdev/ssd1307fb.c4
-rw-r--r--drivers/virtio/virtio_ring.c412
-rw-r--r--drivers/virtio/virtio_vdpa.c17
-rw-r--r--drivers/watchdog/Kconfig100
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/armada_37xx_wdt.c1
-rw-r--r--drivers/watchdog/at91rm9200_wdt.c3
-rw-r--r--drivers/watchdog/cpwd.c2
-rw-r--r--drivers/watchdog/ftwdt010_wdt.c6
-rw-r--r--drivers/watchdog/gef_wdt.c2
-rw-r--r--drivers/watchdog/imx2_wdt.c5
-rw-r--r--drivers/watchdog/imx7ulp_wdt.c1
-rw-r--r--drivers/watchdog/intel-mid_wdt.c1
-rw-r--r--drivers/watchdog/lantiq_wdt.c3
-rw-r--r--drivers/watchdog/loongson1_wdt.c1
-rw-r--r--drivers/watchdog/marvell_gti_wdt.c340
-rw-r--r--drivers/watchdog/menz69_wdt.c1
-rw-r--r--drivers/watchdog/meson_gxbb_wdt.c21
-rw-r--r--drivers/watchdog/meson_wdt.c4
-rw-r--r--drivers/watchdog/mpc8xxx_wdt.c4
-rw-r--r--drivers/watchdog/mtk_wdt.c1
-rw-r--r--drivers/watchdog/of_xilinx_wdt.c3
-rw-r--r--drivers/watchdog/pic32-dmt.c1
-rw-r--r--drivers/watchdog/pic32-wdt.c1
-rw-r--r--drivers/watchdog/pika_wdt.c2
-rw-r--r--drivers/watchdog/pm8916_wdt.c2
-rw-r--r--drivers/watchdog/qcom-wdt.c1
-rw-r--r--drivers/watchdog/rave-sp-wdt.c2
-rw-r--r--drivers/watchdog/riowd.c2
-rw-r--r--drivers/watchdog/rti_wdt.c48
-rw-r--r--drivers/watchdog/rza_wdt.c4
-rw-r--r--drivers/watchdog/rzg2l_wdt.c2
-rw-r--r--drivers/watchdog/s3c2410_wdt.c16
-rw-r--r--drivers/watchdog/sama5d4_wdt.c8
-rw-r--r--drivers/watchdog/sbsa_gwdt.c3
-rw-r--r--drivers/watchdog/simatic-ipc-wdt.c9
-rw-r--r--drivers/watchdog/starfive-wdt.c13
-rw-r--r--drivers/watchdog/stm32_iwdg.c3
-rw-r--r--drivers/watchdog/sunxi_wdt.c1
-rw-r--r--drivers/watchdog/watchdog_core.c2
-rw-r--r--drivers/watchdog/xilinx_wwdt.c7
-rw-r--r--fs/Kconfig.binfmt2
-rw-r--r--fs/binfmt_elf_fdpic.c38
-rw-r--r--fs/ceph/Makefile1
-rw-r--r--fs/ceph/acl.c4
-rw-r--r--fs/ceph/addr.c196
-rw-r--r--fs/ceph/caps.c235
-rw-r--r--fs/ceph/crypto.c673
-rw-r--r--fs/ceph/crypto.h288
-rw-r--r--fs/ceph/dir.c194
-rw-r--r--fs/ceph/export.c44
-rw-r--r--fs/ceph/file.c602
-rw-r--r--fs/ceph/inode.c625
-rw-r--r--fs/ceph/ioctl.c127
-rw-r--r--fs/ceph/mds_client.c676
-rw-r--r--fs/ceph/mds_client.h35
-rw-r--r--fs/ceph/quota.c14
-rw-r--r--fs/ceph/snap.c10
-rw-r--r--fs/ceph/super.c191
-rw-r--r--fs/ceph/super.h49
-rw-r--r--fs/ceph/xattr.c30
-rw-r--r--fs/debugfs/file.c48
-rw-r--r--fs/exportfs/expfs.c1
-rw-r--r--fs/f2fs/checkpoint.c2
-rw-r--r--fs/f2fs/compress.c14
-rw-r--r--fs/f2fs/data.c14
-rw-r--r--fs/f2fs/debug.c33
-rw-r--r--fs/f2fs/f2fs.h96
-rw-r--r--fs/f2fs/file.c60
-rw-r--r--fs/f2fs/gc.c18
-rw-r--r--fs/f2fs/inline.c3
-rw-r--r--fs/f2fs/inode.c35
-rw-r--r--fs/f2fs/recovery.c1
-rw-r--r--fs/f2fs/segment.c89
-rw-r--r--fs/f2fs/super.c40
-rw-r--r--fs/f2fs/sysfs.c18
-rw-r--r--fs/f2fs/xattr.c6
-rw-r--r--fs/fuse/dir.c159
-rw-r--r--fs/fuse/file.c115
-rw-r--r--fs/fuse/fuse_i.h18
-rw-r--r--fs/fuse/inode.c34
-rw-r--r--fs/fuse/readdir.c16
-rw-r--r--fs/gfs2/aops.c7
-rw-r--r--fs/gfs2/bmap.c2
-rw-r--r--fs/gfs2/glock.c47
-rw-r--r--fs/gfs2/glock.h9
-rw-r--r--fs/gfs2/glops.c2
-rw-r--r--fs/gfs2/incore.h7
-rw-r--r--fs/gfs2/inode.c14
-rw-r--r--fs/gfs2/lock_dlm.c5
-rw-r--r--fs/gfs2/log.c69
-rw-r--r--fs/gfs2/lops.c7
-rw-r--r--fs/gfs2/main.c10
-rw-r--r--fs/gfs2/ops_fstype.c42
-rw-r--r--fs/gfs2/quota.c368
-rw-r--r--fs/gfs2/recovery.c4
-rw-r--r--fs/gfs2/recovery.h2
-rw-r--r--fs/gfs2/super.c28
-rw-r--r--fs/gfs2/super.h1
-rw-r--r--fs/gfs2/sys.c12
-rw-r--r--fs/gfs2/util.c34
-rw-r--r--fs/kernfs/dir.c4
-rw-r--r--fs/kernfs/mount.c13
-rw-r--r--fs/lockd/mon.c3
-rw-r--r--fs/lockd/svc.c52
-rw-r--r--fs/lockd/svclock.c18
-rw-r--r--fs/locks.c7
-rw-r--r--fs/nfs/Kconfig6
-rw-r--r--fs/nfs/blocklayout/dev.c4
-rw-r--r--fs/nfs/callback.c23
-rw-r--r--fs/nfs/client.c2
-rw-r--r--fs/nfs/dir.c15
-rw-r--r--fs/nfs/direct.c20
-rw-r--r--fs/nfs/dns_resolve.c12
-rw-r--r--fs/nfs/file.c2
-rw-r--r--fs/nfs/internal.h3
-rw-r--r--fs/nfs/nfs2xdr.c2
-rw-r--r--fs/nfs/nfs3client.c3
-rw-r--r--fs/nfs/nfs3xdr.c2
-rw-r--r--fs/nfs/nfs42.h1
-rw-r--r--fs/nfs/nfs42proc.c5
-rw-r--r--fs/nfs/nfs42xdr.c17
-rw-r--r--fs/nfs/nfs4client.c3
-rw-r--r--fs/nfs/nfs4proc.c17
-rw-r--r--fs/nfs/pnfs_dev.c2
-rw-r--r--fs/nfs/pnfs_nfs.c5
-rw-r--r--fs/nfs/read.c10
-rw-r--r--fs/nfsd/blocklayoutxdr.c9
-rw-r--r--fs/nfsd/cache.h8
-rw-r--r--fs/nfsd/flexfilelayoutxdr.c9
-rw-r--r--fs/nfsd/nfs3proc.c4
-rw-r--r--fs/nfsd/nfs4acl.c34
-rw-r--r--fs/nfsd/nfs4proc.c51
-rw-r--r--fs/nfsd/nfs4state.c162
-rw-r--r--fs/nfsd/nfs4xdr.c39
-rw-r--r--fs/nfsd/nfscache.c204
-rw-r--r--fs/nfsd/nfsctl.c1
-rw-r--r--fs/nfsd/nfsd.h7
-rw-r--r--fs/nfsd/nfsfh.c26
-rw-r--r--fs/nfsd/nfsfh.h6
-rw-r--r--fs/nfsd/nfssvc.c111
-rw-r--r--fs/nfsd/state.h3
-rw-r--r--fs/nfsd/stats.c2
-rw-r--r--fs/nfsd/stats.h7
-rw-r--r--fs/nfsd/trace.h27
-rw-r--r--fs/nfsd/vfs.c52
-rw-r--r--fs/nfsd/xdr4.h11
-rw-r--r--fs/ntfs3/super.c18
-rw-r--r--fs/proc/task_mmu.c6
-rw-r--r--fs/pstore/platform.c34
-rw-r--r--fs/stat.c17
-rw-r--r--fs/tracefs/Makefile1
-rw-r--r--fs/tracefs/event_inode.c807
-rw-r--r--fs/tracefs/inode.c157
-rw-r--r--fs/tracefs/internal.h29
-rw-r--r--include/asm-generic/current.h2
-rw-r--r--include/asm-generic/hyperv-tlfs.h1
-rw-r--r--include/asm-generic/ide_iops.h39
-rw-r--r--include/asm-generic/mshyperv.h17
-rw-r--r--include/asm-generic/preempt.h14
-rw-r--r--include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h88
-rw-r--r--include/dt-bindings/iio/qcom,spmi-adc7-pm8350b.h124
-rw-r--r--include/dt-bindings/iio/qcom,spmi-adc7-pmk8350.h50
-rw-r--r--include/dt-bindings/iio/qcom,spmi-adc7-pmr735a.h22
-rw-r--r--include/dt-bindings/iio/qcom,spmi-adc7-pmr735b.h22
-rw-r--r--include/dt-bindings/interconnect/qcom,sm8250.h7
-rw-r--r--include/dt-bindings/memory/mediatek,mt8188-memory-port.h489
-rw-r--r--include/kvm/arm_pmu.h4
-rw-r--r--include/linux/amd-iommu.h1
-rw-r--r--include/linux/audit.h2
-rw-r--r--include/linux/bpf.h2
-rw-r--r--include/linux/btf.h1
-rw-r--r--include/linux/ceph/ceph_fs.h68
-rw-r--r--include/linux/ceph/messenger.h40
-rw-r--r--include/linux/ceph/osd_client.h93
-rw-r--r--include/linux/ceph/rados.h4
-rw-r--r--include/linux/cgroup-defs.h14
-rw-r--r--include/linux/console.h3
-rw-r--r--include/linux/coresight.h59
-rw-r--r--include/linux/counter.h2
-rw-r--r--include/linux/cpufreq.h3
-rw-r--r--include/linux/dev_printk.h2
-rw-r--r--include/linux/device.h2
-rw-r--r--include/linux/dmar.h2
-rw-r--r--include/linux/dynamic_debug.h4
-rw-r--r--include/linux/elf-fdpic.h14
-rw-r--r--include/linux/export.h4
-rw-r--r--include/linux/extcon.h12
-rw-r--r--include/linux/fb.h10
-rw-r--r--include/linux/firmware/intel/stratix10-smc.h25
-rw-r--r--include/linux/firmware/intel/stratix10-svc-client.h5
-rw-r--r--include/linux/fs_uart_pd.h71
-rw-r--r--include/linux/ftrace.h5
-rw-r--r--include/linux/gameport.h2
-rw-r--r--include/linux/greybus/svc.h3
-rw-r--r--include/linux/hid-roccat.h2
-rw-r--r--include/linux/hid.h26
-rw-r--r--include/linux/hyperv.h6
-rw-r--r--include/linux/i2c-atr.h116
-rw-r--r--include/linux/iio/common/inv_sensors_timestamp.h95
-rw-r--r--include/linux/iio/types.h2
-rw-r--r--include/linux/intel_tpmi.h2
-rw-r--r--include/linux/interconnect-provider.h2
-rw-r--r--include/linux/iommu.h15
-rw-r--r--include/linux/ipv6.h1
-rw-r--r--include/linux/kasan.h2
-rw-r--r--include/linux/kernfs.h4
-rw-r--r--include/linux/kobject.h8
-rw-r--r--include/linux/kvm_host.h53
-rw-r--r--include/linux/leds.h3
-rw-r--r--include/linux/libata.h40
-rw-r--r--include/linux/lockd/lockd.h4
-rw-r--r--include/linux/mfd/88pm860x.h6
-rw-r--r--include/linux/mfd/abx500/ab8500.h4
-rw-r--r--include/linux/mfd/dbx500-prcmu.h21
-rw-r--r--include/linux/mfd/hi655x-pmic.h1
-rw-r--r--include/linux/mfd/max77686-private.h4
-rw-r--r--include/linux/mfd/rz-mtu3.h66
-rw-r--r--include/linux/mhi.h6
-rw-r--r--include/linux/micrel_phy.h7
-rw-r--r--include/linux/misc_cgroup.h28
-rw-r--r--include/linux/mtd/mtd.h2
-rw-r--r--include/linux/mtd/rawnand.h1
-rw-r--r--include/linux/nvmem-consumer.h4
-rw-r--r--include/linux/of.h4
-rw-r--r--include/linux/pci_ids.h4
-rw-r--r--include/linux/peci.h4
-rw-r--r--include/linux/percpu_counter.h41
-rw-r--r--include/linux/perf/riscv_pmu.h12
-rw-r--r--include/linux/perf_event.h3
-rw-r--r--include/linux/phylink.h4
-rw-r--r--include/linux/platform_data/rtc-ds2404.h20
-rw-r--r--include/linux/platform_data/video-mx3fb.h50
-rw-r--r--include/linux/platform_data/x86/asus-wmi.h19
-rw-r--r--include/linux/platform_data/x86/simatic-ipc-base.h5
-rw-r--r--include/linux/platform_data/x86/simatic-ipc.h6
-rw-r--r--include/linux/pwm.h6
-rw-r--r--include/linux/raid/pq.h4
-rw-r--r--include/linux/remoteproc.h4
-rw-r--r--include/linux/rmap.h1
-rw-r--r--include/linux/rpmsg.h15
-rw-r--r--include/linux/rtc.h1
-rw-r--r--include/linux/sched.h14
-rw-r--r--include/linux/serial_core.h18
-rw-r--r--include/linux/soundwire/sdw.h12
-rw-r--r--include/linux/soundwire/sdw_intel.h7
-rw-r--r--include/linux/string_choices.h1
-rw-r--r--include/linux/sunrpc/cache.h12
-rw-r--r--include/linux/sunrpc/clnt.h2
-rw-r--r--include/linux/sunrpc/stats.h23
-rw-r--r--include/linux/sunrpc/svc.h52
-rw-r--r--include/linux/sunrpc/svc_xprt.h38
-rw-r--r--include/linux/sunrpc/svcauth.h53
-rw-r--r--include/linux/sunrpc/svcsock.h9
-rw-r--r--include/linux/sunrpc/xdr.h8
-rw-r--r--include/linux/sunrpc/xprt.h2
-rw-r--r--include/linux/sysrq.h18
-rw-r--r--include/linux/tca6416_keypad.h1
-rw-r--r--include/linux/thermal.h85
-rw-r--r--include/linux/ti_wilink_st.h2
-rw-r--r--include/linux/trace_events.h2
-rw-r--r--include/linux/tracefs.h23
-rw-r--r--include/linux/tty.h18
-rw-r--r--include/linux/tty_buffer.h20
-rw-r--r--include/linux/tty_driver.h9
-rw-r--r--include/linux/tty_flip.h70
-rw-r--r--include/linux/tty_ldisc.h67
-rw-r--r--include/linux/tty_port.h7
-rw-r--r--include/linux/usb.h12
-rw-r--r--include/linux/usb/ch9.h5
-rw-r--r--include/linux/usb/chipidea.h1
-rw-r--r--include/linux/usb/composite.h23
-rw-r--r--include/linux/usb/hcd.h2
-rw-r--r--include/linux/usb/phy.h13
-rw-r--r--include/linux/usb/tcpci.h1
-rw-r--r--include/linux/usb/typec_altmode.h2
-rw-r--r--include/linux/vdpa.h4
-rw-r--r--include/linux/virtio.h24
-rw-r--r--include/linux/workqueue.h115
-rw-r--r--include/linux/xarray.h18
-rw-r--r--include/media/cec.h11
-rw-r--r--include/media/davinci/vpif_types.h2
-rw-r--r--include/media/i2c/ds90ub9xx.h22
-rw-r--r--include/media/ipu-bridge.h (renamed from drivers/media/pci/intel/ipu3/cio2-bridge.h)97
-rw-r--r--include/media/ov_16bit_addr_reg_helpers.h92
-rw-r--r--include/media/v4l2-async.h238
-rw-r--r--include/media/v4l2-cci.h125
-rw-r--r--include/media/v4l2-fwnode.h70
-rw-r--r--include/media/v4l2-subdev.h21
-rw-r--r--include/net/ip.h3
-rw-r--r--include/net/ip6_fib.h5
-rw-r--r--include/net/ip_fib.h5
-rw-r--r--include/net/ip_tunnels.h15
-rw-r--r--include/net/scm.h14
-rw-r--r--include/net/sock.h29
-rw-r--r--include/rdma/ib_verbs.h2
-rw-r--r--include/rdma/iw_cm.h21
-rw-r--r--include/rv/da_monitor.h2
-rw-r--r--include/scsi/libsas.h32
-rw-r--r--include/scsi/scsi_host.h2
-rw-r--r--include/scsi/scsi_transport_iscsi.h1
-rw-r--r--include/soc/mediatek/smi.h1
-rw-r--r--include/sound/dmaengine_pcm.h2
-rw-r--r--include/sound/soc-component.h4
-rw-r--r--include/target/iscsi/iscsi_target_core.h4
-rw-r--r--include/trace/events/fsi.h31
-rw-r--r--include/trace/events/fsi_master_i2cr.h107
-rw-r--r--include/trace/events/sunrpc.h80
-rw-r--r--include/trace/events/task.h2
-rw-r--r--include/uapi/linux/cgroupstats.h2
-rw-r--r--include/uapi/linux/elf-fdpic.h15
-rw-r--r--include/uapi/linux/fsi.h10
-rw-r--r--include/uapi/linux/fuse.h60
-rw-r--r--include/uapi/linux/gsmmux.h118
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h1
-rw-r--r--include/uapi/linux/rpmsg.h10
-rw-r--r--include/uapi/linux/serial_core.h44
-rw-r--r--include/uapi/linux/sync_file.h2
-rw-r--r--include/uapi/linux/usb/ch11.h6
-rw-r--r--include/uapi/linux/usb/ch9.h5
-rw-r--r--include/uapi/linux/vhost_types.h4
-rw-r--r--include/uapi/linux/videodev2.h2
-rw-r--r--include/uapi/rdma/bnxt_re-abi.h7
-rw-r--r--include/uapi/rdma/irdma-abi.h9
-rw-r--r--include/uapi/scsi/scsi_bsg_ufs.h52
-rw-r--r--include/ufs/ufs.h90
-rw-r--r--include/ufs/ufs_quirks.h6
-rw-r--r--include/ufs/ufshcd.h60
-rw-r--r--include/ufs/ufshci.h53
-rw-r--r--include/ufs/unipro.h6
-rw-r--r--init/main.c1
-rw-r--r--io_uring/fdinfo.c2
-rw-r--r--io_uring/io-wq.c10
-rw-r--r--io_uring/io-wq.h1
-rw-r--r--io_uring/io_uring.c56
-rw-r--r--io_uring/sqpoll.c4
-rw-r--r--kernel/auditsc.c2
-rw-r--r--kernel/bpf/bpf_local_storage.c49
-rw-r--r--kernel/bpf/btf.c2
-rw-r--r--kernel/bpf/syscall.c2
-rw-r--r--kernel/bpf/trampoline.c5
-rw-r--r--kernel/cgroup/cgroup-v1.c2
-rw-r--r--kernel/cgroup/cgroup.c109
-rw-r--r--kernel/cgroup/cpuset.c264
-rw-r--r--kernel/cgroup/misc.c55
-rw-r--r--kernel/cgroup/namespace.c6
-rw-r--r--kernel/cgroup/rstat.c12
-rw-r--r--kernel/configs/debug.config2
-rw-r--r--kernel/configs/kvm_guest.config1
-rw-r--r--kernel/configs/nopm.config2
-rw-r--r--kernel/configs/rust.config1
-rw-r--r--kernel/configs/x86_debug.config1
-rw-r--r--kernel/configs/xen.config2
-rw-r--r--kernel/cpu.c24
-rw-r--r--kernel/debug/debug_core.c2
-rw-r--r--kernel/debug/kdb/kdb_io.c2
-rw-r--r--kernel/fork.c15
-rw-r--r--kernel/kprobes.c6
-rw-r--r--kernel/power/poweroff.c2
-rw-r--r--kernel/printk/internal.h2
-rw-r--r--kernel/printk/printk.c213
-rw-r--r--kernel/printk/printk_ringbuffer.c2
-rw-r--r--kernel/printk/printk_safe.c9
-rw-r--r--kernel/rcu/tree_stall.h2
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--kernel/time/tick-sched.c2
-rw-r--r--kernel/trace/Makefile1
-rw-r--r--kernel/trace/ftrace.c10
-rw-r--r--kernel/trace/ring_buffer.c20
-rw-r--r--kernel/trace/trace.c123
-rw-r--r--kernel/trace/trace.h14
-rw-r--r--kernel/trace/trace_btf.c122
-rw-r--r--kernel/trace/trace_btf.h11
-rw-r--r--kernel/trace/trace_entries.h2
-rw-r--r--kernel/trace/trace_eprobe.c22
-rw-r--r--kernel/trace/trace_events.c76
-rw-r--r--kernel/trace/trace_events_filter.c315
-rw-r--r--kernel/trace/trace_events_user.c15
-rw-r--r--kernel/trace/trace_export.c9
-rw-r--r--kernel/trace/trace_fprobe.c59
-rw-r--r--kernel/trace/trace_hwlat.c2
-rw-r--r--kernel/trace/trace_kprobe.c1
-rw-r--r--kernel/trace/trace_probe.c499
-rw-r--r--kernel/trace/trace_probe.h27
-rw-r--r--kernel/trace/trace_uprobe.c1
-rw-r--r--kernel/workqueue.c1616
-rw-r--r--kernel/workqueue_internal.h2
-rw-r--r--lib/Kconfig.debug7
-rw-r--r--lib/dynamic_debug.c8
-rw-r--r--lib/idr.c2
-rw-r--r--lib/kobject.c62
-rw-r--r--lib/percpu_counter.c62
-rw-r--r--lib/raid6/Makefile1
-rw-r--r--lib/raid6/algos.c16
-rw-r--r--lib/raid6/loongarch.h38
-rw-r--r--lib/raid6/loongarch_simd.c422
-rw-r--r--lib/raid6/recov_loongarch_simd.c513
-rw-r--r--lib/raid6/test/Makefile12
-rw-r--r--lib/test_scanf.c2
-rw-r--r--lib/xarray.c8
-rw-r--r--mm/filemap.c3
-rw-r--r--mm/kasan/init.c18
-rw-r--r--mm/kasan/kasan.h6
-rw-r--r--mm/kfence/core.c5
-rw-r--r--mm/kmemleak.c5
-rw-r--r--mm/ksm.c4
-rw-r--r--mm/memcontrol.c22
-rw-r--r--mm/memfd.c2
-rw-r--r--mm/memory-failure.c37
-rw-r--r--mm/page_alloc.c21
-rw-r--r--mm/pagewalk.c2
-rw-r--r--mm/percpu.c69
-rw-r--r--mm/util.c4
-rw-r--r--mm/vmalloc.c26
-rw-r--r--net/bluetooth/rfcomm/tty.c31
-rw-r--r--net/bpf/test_run.c1
-rw-r--r--net/can/j1939/socket.c10
-rw-r--r--net/ceph/messenger.c78
-rw-r--r--net/ceph/messenger_v1.c98
-rw-r--r--net/ceph/messenger_v2.c289
-rw-r--r--net/ceph/osd_client.c334
-rw-r--r--net/core/flow_dissector.c3
-rw-r--r--net/core/skbuff.c54
-rw-r--r--net/core/skmsg.c12
-rw-r--r--net/core/sock.c27
-rw-r--r--net/core/sock_map.c36
-rw-r--r--net/handshake/netlink.c18
-rw-r--r--net/ipv4/fib_semantics.c5
-rw-r--r--net/ipv4/fib_trie.c3
-rw-r--r--net/ipv4/igmp.c3
-rw-r--r--net/ipv4/ip_forward.c1
-rw-r--r--net/ipv4/ip_input.c3
-rw-r--r--net/ipv4/ip_output.c9
-rw-r--r--net/ipv4/ip_sockglue.c2
-rw-r--r--net/ipv4/ipmr.c1
-rw-r--r--net/ipv4/route.c1
-rw-r--r--net/ipv4/tcp.c4
-rw-r--r--net/ipv4/tcp_output.c2
-rw-r--r--net/ipv4/udp.c6
-rw-r--r--net/ipv6/addrconf.c2
-rw-r--r--net/ipv6/ip6_input.c3
-rw-r--r--net/ipv6/ip6_output.c3
-rw-r--r--net/ipv6/ip6mr.c2
-rw-r--r--net/ipv6/ping.c2
-rw-r--r--net/ipv6/raw.c2
-rw-r--r--net/ipv6/route.c3
-rw-r--r--net/ipv6/udp.c2
-rw-r--r--net/kcm/kcmsock.c2
-rw-r--r--net/mptcp/protocol.c23
-rw-r--r--net/netfilter/ipset/ip_set_hash_netportnet.c1
-rw-r--r--net/netfilter/nf_tables_api.c54
-rw-r--r--net/netfilter/nfnetlink_osf.c8
-rw-r--r--net/netfilter/nft_exthdr.c42
-rw-r--r--net/netfilter/nft_set_rbtree.c8
-rw-r--r--net/netfilter/xt_sctp.c2
-rw-r--r--net/netfilter/xt_u32.c21
-rw-r--r--net/nfc/nci/uart.c23
-rw-r--r--net/sched/sch_fq_pie.c27
-rw-r--r--net/sched/sch_plug.c2
-rw-r--r--net/sched/sch_qfq.c22
-rw-r--r--net/sctp/proc.c2
-rw-r--r--net/sctp/socket.c10
-rw-r--r--net/socket.c15
-rw-r--r--net/sunrpc/.kunitconfig1
-rw-r--r--net/sunrpc/Kconfig35
-rw-r--r--net/sunrpc/auth_gss/Makefile2
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_internal.h23
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_keys.c84
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c257
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seal.c69
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seqnum.c106
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_test.c196
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_unseal.c77
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_wrap.c287
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c7
-rw-r--r--net/sunrpc/clnt.c8
-rw-r--r--net/sunrpc/svc.c99
-rw-r--r--net/sunrpc/svc_xprt.c126
-rw-r--r--net/sunrpc/svcauth.c35
-rw-r--r--net/sunrpc/svcauth_unix.c9
-rw-r--r--net/sunrpc/svcsock.c131
-rw-r--r--net/sunrpc/xdr.c77
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h1
-rw-r--r--net/sunrpc/xprtsock.c55
-rw-r--r--net/unix/af_unix.c2
-rw-r--r--net/unix/scm.c6
-rw-r--r--net/xdp/xsk.c22
-rw-r--r--net/xdp/xsk_diag.c3
-rw-r--r--scripts/Makefile.extrawarn115
-rw-r--r--scripts/Makefile.modinst55
-rw-r--r--scripts/Makefile.modpost1
-rw-r--r--scripts/Makefile.package57
-rwxr-xr-xscripts/bpf_doc.py56
-rwxr-xr-xscripts/depmod.sh39
-rwxr-xr-xscripts/dummy-tools/gcc3
-rw-r--r--scripts/kconfig/Makefile15
-rw-r--r--scripts/kconfig/confdata.c21
-rw-r--r--scripts/kconfig/expr.h1
-rw-r--r--scripts/kconfig/lkc.h1
-rw-r--r--scripts/kconfig/lxdialog/dialog.h10
-rw-r--r--scripts/kconfig/lxdialog/textbox.c68
-rw-r--r--scripts/kconfig/mconf.c89
-rw-r--r--scripts/kconfig/menu.c30
-rw-r--r--scripts/kconfig/nconf.c113
-rw-r--r--scripts/kconfig/nconf.gui.c37
-rw-r--r--scripts/kconfig/nconf.h5
-rw-r--r--scripts/kconfig/preprocess.c3
-rwxr-xr-xscripts/kconfig/qconf-cfg.sh25
-rw-r--r--scripts/kconfig/qconf.cc40
-rw-r--r--scripts/mod/modpost.c173
-rw-r--r--scripts/mod/modpost.h22
-rwxr-xr-xscripts/package/builddeb29
-rwxr-xr-xscripts/package/debian/rules33
-rwxr-xr-xscripts/package/install-extmod-build39
-rw-r--r--scripts/package/kernel.spec117
-rwxr-xr-xscripts/package/mkdebian33
-rwxr-xr-xscripts/package/mkspec148
-rwxr-xr-xscripts/remove-stale-files2
-rwxr-xr-xscripts/setlocalversion38
-rw-r--r--security/landlock/ruleset.h2
-rw-r--r--security/tomoyo/common.c1
-rw-r--r--security/tomoyo/common.h6
-rw-r--r--security/tomoyo/domain.c5
-rw-r--r--sound/Kconfig2
-rw-r--r--sound/core/pcm_lib.c8
-rw-r--r--sound/core/seq/seq_memory.c9
-rw-r--r--sound/isa/sb/emu8000_pcm.c2
-rw-r--r--sound/pci/hda/patch_cs8409.c2
-rw-r--r--sound/pci/hda/patch_cs8409.h1
-rw-r--r--sound/pci/hda/patch_realtek.c30
-rw-r--r--sound/pci/hda/tas2781_hda_i2c.c16
-rw-r--r--sound/soc/amd/yc/acp6x-mach.c14
-rw-r--r--sound/soc/atmel/mchp-pdmc.c2
-rw-r--r--sound/soc/codecs/Kconfig6
-rw-r--r--sound/soc/codecs/Makefile8
-rw-r--r--sound/soc/codecs/cs35l45.c11
-rw-r--r--sound/soc/codecs/cs35l56-shared.c29
-rw-r--r--sound/soc/codecs/cs42l43.c3
-rw-r--r--sound/soc/codecs/cx20442.c4
-rw-r--r--sound/soc/codecs/rt5645.c16
-rw-r--r--sound/soc/codecs/wcd-clsh-v2.c8
-rw-r--r--sound/soc/intel/avs/pcm.c22
-rw-r--r--sound/soc/soc-component.c4
-rw-r--r--sound/soc/soc-generic-dmaengine-pcm.c10
-rw-r--r--sound/soc/stm/stm32_sai_sub.c2
-rw-r--r--sound/soc/ti/ams-delta.c2
-rw-r--r--sound/usb/midi2.c7
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h1
-rw-r--r--tools/bpf/bpftool/link.c2
-rw-r--r--tools/iio/iio_generic_buffer.c17
-rw-r--r--tools/lib/perf/mmap.c66
-rw-r--r--tools/mm/Makefile4
-rw-r--r--tools/perf/tests/mmap-basic.c6
-rw-r--r--tools/power/cpupower/Makefile2
-rw-r--r--tools/power/x86/intel-speed-select/isst-config.c51
-rw-r--r--tools/power/x86/intel-speed-select/isst-display.c2
-rw-r--r--tools/power/x86/intel-speed-select/isst.h2
-rw-r--r--tools/testing/radix-tree/multiorder.c68
-rw-r--r--tools/testing/selftests/bpf/Makefile12
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_obj_pinning.c5
-rw-r--r--tools/testing/selftests/bpf/prog_tests/d_path.c19
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sk_storage_omem_uncharge.c56
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h26
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_listen.c7
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_tracing_net.h1
-rw-r--r--tools/testing/selftests/bpf/progs/sk_storage_omem_uncharge.c61
-rw-r--r--tools/testing/selftests/cgroup/.gitignore1
-rw-r--r--tools/testing/selftests/cgroup/Makefile2
-rw-r--r--tools/testing/selftests/cgroup/cgroup_util.c2
-rw-r--r--tools/testing/selftests/cgroup/cgroup_util.h2
-rw-r--r--tools/testing/selftests/cgroup/test_core.c2
-rw-r--r--tools/testing/selftests/cgroup/test_cpuset.c275
-rwxr-xr-xtools/testing/selftests/cgroup/test_cpuset_prs.sh2
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/add_remove_btfarg.tc20
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/fprobe_syntax_errors.tc10
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_char.tc9
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc9
-rw-r--r--tools/testing/selftests/kvm/Makefile20
-rw-r--r--tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c8
-rw-r--r--tools/testing/selftests/kvm/aarch64/arch_timer.c22
-rw-r--r--tools/testing/selftests/kvm/aarch64/debug-exceptions.c8
-rw-r--r--tools/testing/selftests/kvm/aarch64/get-reg-list.c554
-rw-r--r--tools/testing/selftests/kvm/aarch64/hypercalls.c20
-rw-r--r--tools/testing/selftests/kvm/aarch64/page_fault_test.c17
-rw-r--r--tools/testing/selftests/kvm/aarch64/vgic_irq.c3
-rw-r--r--tools/testing/selftests/kvm/get-reg-list.c401
-rw-r--r--tools/testing/selftests/kvm/guest_print_test.c219
-rw-r--r--tools/testing/selftests/kvm/include/aarch64/arch_timer.h12
-rw-r--r--tools/testing/selftests/kvm/include/aarch64/ucall.h20
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util_base.h21
-rw-r--r--tools/testing/selftests/kvm/include/riscv/processor.h3
-rw-r--r--tools/testing/selftests/kvm/include/riscv/ucall.h20
-rw-r--r--tools/testing/selftests/kvm/include/s390x/ucall.h19
-rw-r--r--tools/testing/selftests/kvm/include/test_util.h20
-rw-r--r--tools/testing/selftests/kvm/include/ucall_common.h98
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/processor.h5
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/ucall.h13
-rw-r--r--tools/testing/selftests/kvm/kvm_page_table_test.c8
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/ucall.c11
-rw-r--r--tools/testing/selftests/kvm/lib/guest_sprintf.c307
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c6
-rw-r--r--tools/testing/selftests/kvm/lib/riscv/ucall.c11
-rw-r--r--tools/testing/selftests/kvm/lib/s390x/ucall.c10
-rw-r--r--tools/testing/selftests/kvm/lib/sparsebit.c1
-rw-r--r--tools/testing/selftests/kvm/lib/string_override.c9
-rw-r--r--tools/testing/selftests/kvm/lib/test_util.c15
-rw-r--r--tools/testing/selftests/kvm/lib/ucall_common.c44
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/processor.c18
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/ucall.c36
-rw-r--r--tools/testing/selftests/kvm/max_guest_memory_test.c2
-rw-r--r--tools/testing/selftests/kvm/memslot_perf_test.c4
-rw-r--r--tools/testing/selftests/kvm/riscv/get-reg-list.c872
-rw-r--r--tools/testing/selftests/kvm/s390x/cmma_test.c62
-rw-r--r--tools/testing/selftests/kvm/s390x/debug_test.c160
-rw-r--r--tools/testing/selftests/kvm/s390x/memop.c13
-rw-r--r--tools/testing/selftests/kvm/s390x/tprot.c11
-rw-r--r--tools/testing/selftests/kvm/set_memory_region_test.c21
-rw-r--r--tools/testing/selftests/kvm/steal_time.c20
-rw-r--r--tools/testing/selftests/kvm/x86_64/cpuid_test.c12
-rw-r--r--tools/testing/selftests/kvm/x86_64/dirty_log_page_splitting_test.c18
-rw-r--r--tools/testing/selftests/kvm/x86_64/exit_on_emulation_failure_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_extended_hypercalls.c3
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_features.c29
-rw-r--r--tools/testing/selftests/kvm/x86_64/kvm_pv_test.c8
-rw-r--r--tools/testing/selftests/kvm/x86_64/monitor_mwait_test.c35
-rw-r--r--tools/testing/selftests/kvm/x86_64/nested_exceptions_test.c16
-rw-r--r--tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c317
-rw-r--r--tools/testing/selftests/kvm/x86_64/recalc_apic_map_test.c6
-rw-r--r--tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c6
-rw-r--r--tools/testing/selftests/kvm/x86_64/svm_nested_soft_inject_test.c22
-rw-r--r--tools/testing/selftests/kvm/x86_64/sync_regs_test.c132
-rw-r--r--tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c34
-rw-r--r--tools/testing/selftests/kvm/x86_64/userspace_io_test.c10
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_exception_with_invalid_guest_state.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c31
-rw-r--r--tools/testing/selftests/kvm/x86_64/xapic_state_test.c8
-rw-r--r--tools/testing/selftests/kvm/x86_64/xcr0_cpuid_test.c29
-rw-r--r--tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c20
-rw-r--r--tools/testing/selftests/landlock/fs_test.c8
-rwxr-xr-xtools/testing/selftests/net/fib_tests.sh155
-rw-r--r--tools/testing/selftests/riscv/Makefile2
-rw-r--r--tools/testing/selftests/riscv/mm/.gitignore2
-rw-r--r--tools/testing/selftests/riscv/mm/Makefile15
-rw-r--r--tools/testing/selftests/riscv/mm/testcases/mmap_bottomup.c35
-rw-r--r--tools/testing/selftests/riscv/mm/testcases/mmap_default.c35
-rw-r--r--tools/testing/selftests/riscv/mm/testcases/mmap_test.h64
-rwxr-xr-xtools/testing/selftests/riscv/mm/testcases/run_mmap.sh12
-rw-r--r--tools/testing/selftests/x86/test_shadow_stack.c2
-rw-r--r--tools/workqueue/wq_dump.py177
-rw-r--r--tools/workqueue/wq_monitor.py21
-rw-r--r--virt/kvm/Kconfig3
-rw-r--r--virt/kvm/kvm_main.c54
3574 files changed, 116857 insertions, 52382 deletions
diff --git a/.gitignore b/.gitignore
index 9fd4c9533b3d..0bbae167bf93 100644
--- a/.gitignore
+++ b/.gitignore
@@ -74,7 +74,7 @@ modules.order
#
# RPM spec file (make rpm-pkg)
#
-/*.spec
+/kernel.spec
/rpmbuild/
#
diff --git a/.mailmap b/.mailmap
index e50662536c48..a0a6efe87186 100644
--- a/.mailmap
+++ b/.mailmap
@@ -139,6 +139,9 @@ Daniel Borkmann <daniel@iogearbox.net> <dborkman@redhat.com>
Daniel Borkmann <daniel@iogearbox.net> <dxchgb@gmail.com>
David Brownell <david-b@pacbell.net>
David Collins <quic_collinsd@quicinc.com> <collinsd@codeaurora.org>
+David Rheinsberg <david@readahead.eu> <dh.herrmann@gmail.com>
+David Rheinsberg <david@readahead.eu> <dh.herrmann@googlemail.com>
+David Rheinsberg <david@readahead.eu> <david.rheinsberg@gmail.com>
David Woodhouse <dwmw2@shinybook.infradead.org>
Dedy Lansky <quic_dlansky@quicinc.com> <dlansky@codeaurora.org>
Deepak Kumar Singh <quic_deesin@quicinc.com> <deesin@codeaurora.org>
diff --git a/CREDITS b/CREDITS
index 8b4882024635..f33a33fd2371 100644
--- a/CREDITS
+++ b/CREDITS
@@ -666,11 +666,6 @@ S: Tamsui town, Taipei county,
S: Taiwan 251
S: Republic of China
-N: Reinette Chatre
-E: reinette.chatre@intel.com
-D: WiMedia Link Protocol implementation
-D: UWB stack bits and pieces
-
N: Michael Elizabeth Chastain
E: mec@shout.net
D: Configure, Menuconfig, xconfig
@@ -3023,12 +3018,6 @@ S: Demonstratsii 8-382
S: Tula 300000
S: Russia
-N: Inaky Perez-Gonzalez
-E: inaky.perez-gonzalez@intel.com
-D: UWB stack, HWA-RC driver and HWA-HC drivers
-D: Wireless USB additions to the USB stack
-D: WiMedia Link Protocol bits and pieces
-
N: Gordon Peters
E: GordPeters@smarttech.com
D: Isochronous receive for IEEE 1394 driver (OHCI module).
diff --git a/Documentation/ABI/stable/sysfs-bus-mhi b/Documentation/ABI/stable/sysfs-bus-mhi
index 96ccc3385a2b..1a47f9e0cc84 100644
--- a/Documentation/ABI/stable/sysfs-bus-mhi
+++ b/Documentation/ABI/stable/sysfs-bus-mhi
@@ -1,7 +1,7 @@
What: /sys/bus/mhi/devices/.../serialnumber
Date: Sept 2020
KernelVersion: 5.10
-Contact: Bhaumik Bhatt <bbhatt@codeaurora.org>
+Contact: mhi@lists.linux.dev
Description: The file holds the serial number of the client device obtained
using a BHI (Boot Host Interface) register read after at least
one attempt to power up the device has been done. If read
@@ -12,7 +12,7 @@ Users: Any userspace application or clients interested in device info.
What: /sys/bus/mhi/devices/.../oem_pk_hash
Date: Sept 2020
KernelVersion: 5.10
-Contact: Bhaumik Bhatt <bbhatt@codeaurora.org>
+Contact: mhi@lists.linux.dev
Description: The file holds the OEM PK Hash value of the endpoint device
obtained using a BHI (Boot Host Interface) register read after
at least one attempt to power up the device has been done. If
diff --git a/Documentation/ABI/stable/sysfs-driver-dma-idxd b/Documentation/ABI/stable/sysfs-driver-dma-idxd
index 534b7a3d59fc..825e619250bf 100644
--- a/Documentation/ABI/stable/sysfs-driver-dma-idxd
+++ b/Documentation/ABI/stable/sysfs-driver-dma-idxd
@@ -84,7 +84,7 @@ What: /sys/bus/dsa/devices/dsa<m>/pasid_enabled
Date: Oct 27, 2020
KernelVersion: 5.11.0
Contact: dmaengine@vger.kernel.org
-Description: To indicate if PASID (process address space identifier) is
+Description: To indicate if user PASID (process address space identifier) is
enabled or not for this device.
What: /sys/bus/dsa/devices/dsa<m>/state
diff --git a/Documentation/ABI/stable/sysfs-driver-mlxreg-io b/Documentation/ABI/stable/sysfs-driver-mlxreg-io
index 60953903d007..2cdfd09123da 100644
--- a/Documentation/ABI/stable/sysfs-driver-mlxreg-io
+++ b/Documentation/ABI/stable/sysfs-driver-mlxreg-io
@@ -662,3 +662,56 @@ Description: This file shows the system reset cause due to AC power failure.
Value 1 in file means this is reset cause, 0 - otherwise.
The file is read only.
+
+What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/cpld5_pn
+What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/cpld5_version
+What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/cpld5_version_min
+Date: August 2023
+KernelVersion: 6.6
+Contact: Vadim Pasternak <vadimp@nvidia.com>
+Description: These files show with which CPLD part numbers, version and minor
+ versions have been burned the 5-th CPLD device equipped on a
+ system.
+
+ The files are read only.
+
+What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/jtag_cap
+Date: August 2023
+KernelVersion: 6.6
+Contact: Vadim Pasternak <vadimp@nvidia.com>
+Description: This file indicates the available method of CPLD/FPGA devices
+ field update through the JTAG chain:
+
+ b00 - field update through LPC bus register memory space.
+ b01 - Reserved.
+ b10 - Reserved.
+ b11 - field update through CPU GPIOs bit-banging.
+
+ The file is read only.
+
+What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/lid_open
+Date: August 2023
+KernelVersion: 6.6
+Contact: Vadim Pasternak <vadimp@nvidia.com>
+Description: 1 - indicates that system lid is opened, otherwise 0.
+
+ The file is read only.
+
+What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_long_pwr_pb
+Date: August 2023
+KernelVersion: 6.6
+Contact: Vadim Pasternak <vadimp@nvidia.com>
+Description: This file if set 1 indicates that system has been reset by
+ long press of power button.
+
+ The file is read only.
+
+What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/reset_swb_dc_dc_pwr_fail
+Date: August 2023
+KernelVersion: 6.6
+Contact: Vadim Pasternak <vadimp@nvidia.com>
+Description: This file shows 1 in case the system reset happened due to the
+ failure of any DC-DC power converter devices equipped on the
+ switch board.
+
+ The file is read only.
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-midi2 b/Documentation/ABI/testing/configfs-usb-gadget-midi2
new file mode 100644
index 000000000000..0eac3aaba137
--- /dev/null
+++ b/Documentation/ABI/testing/configfs-usb-gadget-midi2
@@ -0,0 +1,54 @@
+What: /config/usb-gadget/gadget/functions/midi2.name
+Date: Jul 2023
+KernelVersion: 6.6
+Description:
+ The attributes:
+
+ ============ ===============================================
+ process_ump Flag to process UMP Stream messages (0 or 1)
+ static_block Flag for static blocks (0 or 1)
+ iface_name MIDI interface name string
+ ============ ===============================================
+
+What: /config/usb-gadget/gadget/functions/midi2.name/ep.number
+Date: Jul 2023
+KernelVersion: 6.6
+Description:
+ This group contains a UMP Endpoint configuration.
+ A new Endpoint starts from 0, and can be up to 3.
+
+ The attributes:
+
+ ============= ===============================================
+ protocol_caps MIDI protocol capabilities (1, 2 or 3 for both)
+ protocol Default MIDI protocol (1 or 2)
+ ep_name UMP Endpoint name string
+ product_id Product ID string
+ manufacturer Manufacture ID (24 bit)
+ family Device family ID (16 bit)
+ model Device model ID (16 bit)
+ sw_revision Software Revision (32 bit)
+ ============= ===============================================
+
+What: /config/usb-gadget/gadget/functions/midi2.name/ep.number/block.number
+Date: Jul 2023
+KernelVersion: 6.6
+Description:
+ This group contains a UMP Function Block configuration.
+ A new block starts from 0, and can be up to 31.
+
+ The attributes:
+
+ ================= ==============================================
+ name Function Block name string
+ direction 1: input, 2: output, 3: bidirectional
+ first_group The first UMP Group number (0-15)
+ num_groups The number of groups in this FB (1-16)
+ midi1_first_group The first UMP Group number for MIDI 1.0 (0-15)
+ midi1_num_groups The number of groups for MIDI 1.0 (0-16)
+ ui_hint 0: unknown, 1: receiver, 2: sender, 3: both
+ midi_ci_verison Supported MIDI-CI version number (8 bit)
+ is_midi1 Legacy MIDI 1.0 device (0, 1 or 2)
+ sysex8_streams Max number of SysEx8 streams (8 bit)
+ active Active FB flag (0 or 1)
+ ================= ==============================================
diff --git a/Documentation/ABI/testing/debugfs-tpmi b/Documentation/ABI/testing/debugfs-tpmi
new file mode 100644
index 000000000000..597f0475fe6e
--- /dev/null
+++ b/Documentation/ABI/testing/debugfs-tpmi
@@ -0,0 +1,31 @@
+What: /sys/kernel/debug/tpmi-<n>/pfs_dump
+Date: November 2023
+KernelVersion: 6.6
+Contact: srinivas.pandruvada@linux.intel.com
+Description:
+The PFS (PM Feature Structure) table, shows details of each power
+management feature. This includes:
+tpmi_id, number of entries, entry size, offset, vsec offset, lock status
+and disabled status.
+Users: Debugging, any user space test suite
+
+What: /sys/kernel/debug/tpmi-<n>/tpmi-id-<n>/mem_dump
+Date: November 2023
+KernelVersion: 6.6
+Contact: srinivas.pandruvada@linux.intel.com
+Description:
+Shows the memory dump of the MMIO region for a TPMI ID.
+Users: Debugging, any user space test suite
+
+What: /sys/kernel/debug/tpmi-<n>/tpmi-id-<n>/mem_write
+Date: November 2023
+KernelVersion: 6.6
+Contact: srinivas.pandruvada@linux.intel.com
+Description:
+Allows to write at any offset. It doesn't check for Read/Write access
+as hardware will not allow to write at read-only memory. This write is
+at offset multiples of 4. The format is instance,offset,contents.
+Example:
+echo 0,0x20,0xff > mem_write
+echo 1,64,64 > mem_write
+Users: Debugging, any user space test suite
diff --git a/Documentation/ABI/testing/sysfs-bus-counter b/Documentation/ABI/testing/sysfs-bus-counter
index dc3b3a5c876b..73ac84c0bca7 100644
--- a/Documentation/ABI/testing/sysfs-bus-counter
+++ b/Documentation/ABI/testing/sysfs-bus-counter
@@ -22,11 +22,11 @@ Description:
phase clock.
What: /sys/bus/counter/devices/counterX/external_input_phase_clock_select_available
-KernelVersion: 6.4
-Contact: linux-iio@vger.kernel.org
+KernelVersion: 6.4
+Contact: linux-iio@vger.kernel.org
Description:
- Discrete set of available values for the respective device
- configuration are listed in this file.
+ Discrete set of available values for the respective device
+ configuration are listed in this file.
What: /sys/bus/counter/devices/counterX/countY/count
KernelVersion: 5.2
diff --git a/Documentation/ABI/testing/sysfs-bus-fsi-devices-sbefifo b/Documentation/ABI/testing/sysfs-bus-fsi-devices-sbefifo
index 1329b0bb2882..c7393b4dd2d8 100644
--- a/Documentation/ABI/testing/sysfs-bus-fsi-devices-sbefifo
+++ b/Documentation/ABI/testing/sysfs-bus-fsi-devices-sbefifo
@@ -7,4 +7,4 @@ Description:
by the driver. A value of 1 indicates that a timeout has
occurred and no transfers have completed since the timeout. A
value of 0 indicates that no timeout has occurred, or if one
- has, more recent transfers have completed successful.
+ has, more recent transfers have completed successfully.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
index 7140e8e7313f..a2854dc9a839 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio
+++ b/Documentation/ABI/testing/sysfs-bus-iio
@@ -2163,3 +2163,19 @@ Contact: linux-iio@vger.kernel.org
Description:
An example format is 16-bytes, 2-digits-per-byte, HEX-string
representing the sensor unique ID number.
+
+What: /sys/.../events/in_proximity_thresh_either_runningperiod
+KernelVersion: 6.6
+Contact: linux-iio@vger.kernel.org
+Description:
+ A running period of time (in seconds) for which
+ in_proximity_thresh_either_runningcount amount of conditions
+ must occur before an event is generated. If direction is not
+ specified then this period applies to both directions.
+
+What: /sys/.../events/in_proximity_thresh_either_runningcount
+KernelVersion: 6.6
+Contact: linux-iio@vger.kernel.org
+Description:
+ Number of conditions that must occur, during a running
+ period, before an event is generated.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-filter-admv8818 b/Documentation/ABI/testing/sysfs-bus-iio-filter-admv8818
index f6c035752639..31dbb390573f 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio-filter-admv8818
+++ b/Documentation/ABI/testing/sysfs-bus-iio-filter-admv8818
@@ -7,6 +7,8 @@ Description:
- auto -> Adjust bandpass filter to track changes in input clock rate.
- manual -> disable/unregister the clock rate notifier / input clock tracking.
+ - bypass -> bypass low pass filter, high pass filter and disable/unregister
+ the clock rate notifier
What: /sys/bus/iio/devices/iio:deviceX/filter_mode
KernelVersion:
diff --git a/Documentation/ABI/testing/sysfs-bus-thunderbolt b/Documentation/ABI/testing/sysfs-bus-thunderbolt
index 76ab3e1fe374..221b6c75ed93 100644
--- a/Documentation/ABI/testing/sysfs-bus-thunderbolt
+++ b/Documentation/ABI/testing/sysfs-bus-thunderbolt
@@ -1,7 +1,7 @@
What: /sys/bus/thunderbolt/devices/.../domainX/boot_acl
Date: Jun 2018
KernelVersion: 4.17
-Contact: thunderbolt-software@lists.01.org
+Contact: Mika Westerberg <mika.westerberg@linux.intel.com>
Description: Holds a comma separated list of device unique_ids that
are allowed to be connected automatically during system
startup (e.g boot devices). The list always contains
@@ -33,7 +33,7 @@ Description: This attribute tells whether the system supports
What: /sys/bus/thunderbolt/devices/.../domainX/iommu_dma_protection
Date: Mar 2019
KernelVersion: 4.21
-Contact: thunderbolt-software@lists.01.org
+Contact: Mika Westerberg <mika.westerberg@linux.intel.com>
Description: This attribute tells whether the system uses IOMMU
for DMA protection. Value of 1 means IOMMU is used 0 means
it is not (DMA protection is solely based on Thunderbolt
@@ -42,7 +42,7 @@ Description: This attribute tells whether the system uses IOMMU
What: /sys/bus/thunderbolt/devices/.../domainX/security
Date: Sep 2017
KernelVersion: 4.13
-Contact: thunderbolt-software@lists.01.org
+Contact: Mika Westerberg <mika.westerberg@linux.intel.com>
Description: This attribute holds current Thunderbolt security level
set by the system BIOS. Possible values are:
@@ -64,7 +64,7 @@ Description: This attribute holds current Thunderbolt security level
What: /sys/bus/thunderbolt/devices/.../authorized
Date: Sep 2017
KernelVersion: 4.13
-Contact: thunderbolt-software@lists.01.org
+Contact: Mika Westerberg <mika.westerberg@linux.intel.com>
Description: This attribute is used to authorize Thunderbolt devices
after they have been connected. If the device is not
authorized, no PCIe devices are available to the system.
@@ -98,7 +98,7 @@ Description: This attribute is used to authorize Thunderbolt devices
What: /sys/bus/thunderbolt/devices/.../boot
Date: Jun 2018
KernelVersion: 4.17
-Contact: thunderbolt-software@lists.01.org
+Contact: Mika Westerberg <mika.westerberg@linux.intel.com>
Description: This attribute contains 1 if Thunderbolt device was already
authorized on boot and 0 otherwise.
@@ -113,7 +113,7 @@ Description: This attribute contains the generation of the Thunderbolt
What: /sys/bus/thunderbolt/devices/.../key
Date: Sep 2017
KernelVersion: 4.13
-Contact: thunderbolt-software@lists.01.org
+Contact: Mika Westerberg <mika.westerberg@linux.intel.com>
Description: When a devices supports Thunderbolt secure connect it will
have this attribute. Writing 32 byte hex string changes
authorization to use the secure connection method instead.
@@ -123,14 +123,14 @@ Description: When a devices supports Thunderbolt secure connect it will
What: /sys/bus/thunderbolt/devices/.../device
Date: Sep 2017
KernelVersion: 4.13
-Contact: thunderbolt-software@lists.01.org
+Contact: Mika Westerberg <mika.westerberg@linux.intel.com>
Description: This attribute contains id of this device extracted from
the device DROM.
What: /sys/bus/thunderbolt/devices/.../device_name
Date: Sep 2017
KernelVersion: 4.13
-Contact: thunderbolt-software@lists.01.org
+Contact: Mika Westerberg <mika.westerberg@linux.intel.com>
Description: This attribute contains name of this device extracted from
the device DROM.
@@ -172,21 +172,21 @@ Description: This attribute reports number of TX lanes the device is
What: /sys/bus/thunderbolt/devices/.../vendor
Date: Sep 2017
KernelVersion: 4.13
-Contact: thunderbolt-software@lists.01.org
+Contact: Mika Westerberg <mika.westerberg@linux.intel.com>
Description: This attribute contains vendor id of this device extracted
from the device DROM.
What: /sys/bus/thunderbolt/devices/.../vendor_name
Date: Sep 2017
KernelVersion: 4.13
-Contact: thunderbolt-software@lists.01.org
+Contact: Mika Westerberg <mika.westerberg@linux.intel.com>
Description: This attribute contains vendor name of this device extracted
from the device DROM.
What: /sys/bus/thunderbolt/devices/.../unique_id
Date: Sep 2017
KernelVersion: 4.13
-Contact: thunderbolt-software@lists.01.org
+Contact: Mika Westerberg <mika.westerberg@linux.intel.com>
Description: This attribute contains unique_id string of this device.
This is either read from hardware registers (UUID on
newer hardware) or based on UID from the device DROM.
@@ -195,7 +195,7 @@ Description: This attribute contains unique_id string of this device.
What: /sys/bus/thunderbolt/devices/.../nvm_version
Date: Sep 2017
KernelVersion: 4.13
-Contact: thunderbolt-software@lists.01.org
+Contact: Mika Westerberg <mika.westerberg@linux.intel.com>
Description: If the device has upgradeable firmware the version
number is available here. Format: %x.%x, major.minor.
If the device is in safe mode reading the file returns
@@ -204,7 +204,7 @@ Description: If the device has upgradeable firmware the version
What: /sys/bus/thunderbolt/devices/.../nvm_authenticate
Date: Sep 2017
KernelVersion: 4.13
-Contact: thunderbolt-software@lists.01.org
+Contact: Mika Westerberg <mika.westerberg@linux.intel.com>
Description: When new NVM image is written to the non-active NVM
area (through non_activeX NVMem device), the
authentication procedure is started by writing to
@@ -246,7 +246,7 @@ Description: For supported devices, automatically authenticate the new Thunderbo
What: /sys/bus/thunderbolt/devices/<xdomain>.<service>/key
Date: Jan 2018
KernelVersion: 4.15
-Contact: thunderbolt-software@lists.01.org
+Contact: Mika Westerberg <mika.westerberg@linux.intel.com>
Description: This contains name of the property directory the XDomain
service exposes. This entry describes the protocol in
question. Following directories are already reserved by
@@ -261,35 +261,35 @@ Description: This contains name of the property directory the XDomain
What: /sys/bus/thunderbolt/devices/<xdomain>.<service>/modalias
Date: Jan 2018
KernelVersion: 4.15
-Contact: thunderbolt-software@lists.01.org
+Contact: Mika Westerberg <mika.westerberg@linux.intel.com>
Description: Stores the same MODALIAS value emitted by uevent for
the XDomain service. Format: tbtsvc:kSpNvNrN
What: /sys/bus/thunderbolt/devices/<xdomain>.<service>/prtcid
Date: Jan 2018
KernelVersion: 4.15
-Contact: thunderbolt-software@lists.01.org
+Contact: Mika Westerberg <mika.westerberg@linux.intel.com>
Description: This contains XDomain protocol identifier the XDomain
service supports.
What: /sys/bus/thunderbolt/devices/<xdomain>.<service>/prtcvers
Date: Jan 2018
KernelVersion: 4.15
-Contact: thunderbolt-software@lists.01.org
+Contact: Mika Westerberg <mika.westerberg@linux.intel.com>
Description: This contains XDomain protocol version the XDomain
service supports.
What: /sys/bus/thunderbolt/devices/<xdomain>.<service>/prtcrevs
Date: Jan 2018
KernelVersion: 4.15
-Contact: thunderbolt-software@lists.01.org
+Contact: Mika Westerberg <mika.westerberg@linux.intel.com>
Description: This contains XDomain software version the XDomain
service supports.
What: /sys/bus/thunderbolt/devices/<xdomain>.<service>/prtcstns
Date: Jan 2018
KernelVersion: 4.15
-Contact: thunderbolt-software@lists.01.org
+Contact: Mika Westerberg <mika.westerberg@linux.intel.com>
Description: This contains XDomain service specific settings as
bitmask. Format: %x
diff --git a/Documentation/ABI/testing/sysfs-bus-umc b/Documentation/ABI/testing/sysfs-bus-umc
deleted file mode 100644
index 80ef88bc5097..000000000000
--- a/Documentation/ABI/testing/sysfs-bus-umc
+++ /dev/null
@@ -1,28 +0,0 @@
-What: /sys/bus/umc/
-Date: July 2008
-KernelVersion: 2.6.27
-Contact: David Vrabel <david.vrabel@csr.com>
-Description:
- The Wireless Host Controller Interface (WHCI)
- specification describes a PCI-based device with
- multiple capabilities; the UWB Multi-interface
- Controller (UMC).
-
- The umc bus presents each of the individual
- capabilities as a device.
-
-What: /sys/bus/umc/devices/.../capability_id
-Date: July 2008
-KernelVersion: 2.6.27
-Contact: David Vrabel <david.vrabel@csr.com>
-Description:
- The ID of this capability, with 0 being the radio
- controller capability.
-
-What: /sys/bus/umc/devices/.../version
-Date: July 2008
-KernelVersion: 2.6.27
-Contact: David Vrabel <david.vrabel@csr.com>
-Description:
- The specification version this capability's hardware
- interface complies with.
diff --git a/Documentation/ABI/testing/sysfs-bus-usb b/Documentation/ABI/testing/sysfs-bus-usb
index be663258b9b7..a44bfe020061 100644
--- a/Documentation/ABI/testing/sysfs-bus-usb
+++ b/Documentation/ABI/testing/sysfs-bus-usb
@@ -28,40 +28,6 @@ Description:
drivers, non-authorized one are not. By default, wired
USB devices are authorized.
- Certified Wireless USB devices are not authorized
- initially and should be (by writing 1) after the
- device has been authenticated.
-
-What: /sys/bus/usb/device/.../wusb_cdid
-Date: July 2008
-KernelVersion: 2.6.27
-Contact: David Vrabel <david.vrabel@csr.com>
-Description:
- For Certified Wireless USB devices only.
-
- A devices's CDID, as 16 space-separated hex octets.
-
-What: /sys/bus/usb/device/.../wusb_ck
-Date: July 2008
-KernelVersion: 2.6.27
-Contact: David Vrabel <david.vrabel@csr.com>
-Description:
- For Certified Wireless USB devices only.
-
- Write the device's connection key (CK) to start the
- authentication of the device. The CK is 16
- space-separated hex octets.
-
-What: /sys/bus/usb/device/.../wusb_disconnect
-Date: July 2008
-KernelVersion: 2.6.27
-Contact: David Vrabel <david.vrabel@csr.com>
-Description:
- For Certified Wireless USB devices only.
-
- Write a 1 to force the device to disconnect
- (equivalent to unplugging a wired USB device).
-
What: /sys/bus/usb/drivers/.../new_id
Date: October 2011
Contact: linux-usb@vger.kernel.org
diff --git a/Documentation/ABI/testing/sysfs-class-firmware-attributes b/Documentation/ABI/testing/sysfs-class-firmware-attributes
index 1b3ecae80b3d..f205d39409a3 100644
--- a/Documentation/ABI/testing/sysfs-class-firmware-attributes
+++ b/Documentation/ABI/testing/sysfs-class-firmware-attributes
@@ -22,6 +22,11 @@ Description:
- integer: a range of numerical values
- string
+ HP specific types
+ -----------------
+ - ordered-list - a set of ordered list valid values
+
+
All attribute types support the following values:
current_value:
@@ -126,6 +131,21 @@ Description:
value will not be effective through sysfs until this rule is
met.
+ HP specific class extensions
+ ------------------------------
+
+ On HP systems the following additional attributes are available:
+
+ "ordered-list"-type specific properties:
+
+ elements:
+ A file that can be read to obtain the possible
+ list of values of the <attr>. Values are separated using
+ semi-colon (``;``) and listed according to their priority.
+ An element listed first has the highest priority. Writing
+ the list in a different order to current_value alters
+ the priority order for the particular attribute.
+
What: /sys/class/firmware-attributes/*/authentication/
Date: February 2021
KernelVersion: 5.11
@@ -206,7 +226,7 @@ Description:
Drivers may emit a CHANGE uevent when a password is set or unset
userspace may check it again.
- On Dell and Lenovo systems, if Admin password is set, then all BIOS attributes
+ On Dell, Lenovo and HP systems, if Admin password is set, then all BIOS attributes
require password validation.
On Lenovo systems if you change the Admin password the new password is not active until
the next boot.
@@ -296,6 +316,15 @@ Description:
echo "signature" > authentication/Admin/signature
echo "password" > authentication/Admin/certificate_to_password
+ HP specific class extensions
+ --------------------------------
+
+ On HP systems the following additional settings are available:
+
+ role: enhanced-bios-auth:
+ This role is specific to Secure Platform Management (SPM) attribute.
+ It requires configuring an endorsement (kek) and signing certificate (sk).
+
What: /sys/class/firmware-attributes/*/attributes/pending_reboot
Date: February 2021
@@ -311,7 +340,7 @@ Description:
== =========================================
0 All BIOS attributes setting are current
1 A reboot is necessary to get pending BIOS
- attribute changes applied
+ attribute changes applied
== =========================================
Note, userspace applications need to follow below steps for efficient
@@ -364,3 +393,71 @@ Description:
use it to enable extra debug attributes or BIOS features for testing purposes.
Note that any changes to this attribute requires a reboot for changes to take effect.
+
+
+ HP specific class extensions - Secure Platform Manager (SPM)
+ --------------------------------
+
+What: /sys/class/firmware-attributes/*/authentication/SPM/kek
+Date: March 2023
+KernelVersion: 5.18
+Contact: "Jorge Lopez" <jorge.lopez2@hp.com>
+Description:
+ 'kek' Key-Encryption-Key is a write-only file that can be used to configure the
+ RSA public key that will be used by the BIOS to verify
+ signatures when setting the signing key. When written,
+ the bytes should correspond to the KEK certificate
+ (x509 .DER format containing an OU). The size of the
+ certificate must be less than or equal to 4095 bytes.
+
+What: /sys/class/firmware-attributes/*/authentication/SPM/sk
+Date: March 2023
+KernelVersion: 5.18
+Contact: "Jorge Lopez" <jorge.lopez2@hp.com>
+Description:
+ 'sk' Signature Key is a write-only file that can be used to configure the RSA
+ public key that will be used by the BIOS to verify signatures
+ when configuring BIOS settings and security features. When
+ written, the bytes should correspond to the modulus of the
+ public key. The exponent is assumed to be 0x10001.
+
+What: /sys/class/firmware-attributes/*/authentication/SPM/status
+Date: March 2023
+KernelVersion: 5.18
+Contact: "Jorge Lopez" <jorge.lopez2@hp.com>
+Description:
+ 'status' is a read-only file that returns ASCII text in JSON format reporting
+ the status information.
+
+ "State": "not provisioned | provisioned | provisioning in progress",
+ "Version": "Major.Minor",
+ "Nonce": <16-bit unsigned number display in base 10>,
+ "FeaturesInUse": <16-bit unsigned number display in base 10>,
+ "EndorsementKeyMod": "<256 bytes in base64>",
+ "SigningKeyMod": "<256 bytes in base64>"
+
+What: /sys/class/firmware-attributes/*/attributes/Sure_Start/audit_log_entries
+Date: March 2023
+KernelVersion: 5.18
+Contact: "Jorge Lopez" <jorge.lopez2@hp.com>
+Description:
+ 'audit_log_entries' is a read-only file that returns the events in the log.
+
+ Audit log entry format
+
+ Byte 0-15: Requested Audit Log entry (Each Audit log is 16 bytes)
+ Byte 16-127: Unused
+
+What: /sys/class/firmware-attributes/*/attributes/Sure_Start/audit_log_entry_count
+Date: March 2023
+KernelVersion: 5.18
+Contact: "Jorge Lopez" <jorge.lopez2@hp.com>
+Description:
+ 'audit_log_entry_count' is a read-only file that returns the number of existing
+ audit log events available to be read. Values are separated using comma. (``,``)
+
+ [No of entries],[log entry size],[Max number of entries supported]
+
+ log entry size identifies audit log size for the current BIOS version.
+ The current size is 16 bytes but it can be up to 128 bytes long in future BIOS
+ versions.
diff --git a/Documentation/ABI/testing/sysfs-class-led b/Documentation/ABI/testing/sysfs-class-led
index 2e24ac3bd7ef..b2ff0012c0f2 100644
--- a/Documentation/ABI/testing/sysfs-class-led
+++ b/Documentation/ABI/testing/sysfs-class-led
@@ -59,6 +59,15 @@ Description:
brightness. Reading this file when no hw brightness change
event has happened will return an ENODATA error.
+What: /sys/class/leds/<led>/color
+Date: June 2023
+KernelVersion: 6.5
+Description:
+ Color of the LED.
+
+ This is a read-only file. Reading this file returns the color
+ of the LED as a string (e.g: "red", "green", "multicolor").
+
What: /sys/class/leds/<led>/trigger
Date: March 2006
KernelVersion: 2.6.17
diff --git a/Documentation/ABI/testing/sysfs-class-uwb_rc b/Documentation/ABI/testing/sysfs-class-uwb_rc
deleted file mode 100644
index a7ea169dc4eb..000000000000
--- a/Documentation/ABI/testing/sysfs-class-uwb_rc
+++ /dev/null
@@ -1,156 +0,0 @@
-What: /sys/class/uwb_rc
-Date: July 2008
-KernelVersion: 2.6.27
-Contact: linux-usb@vger.kernel.org
-Description:
- Interfaces for WiMedia Ultra Wideband Common Radio
- Platform (UWB) radio controllers.
-
- Familiarity with the ECMA-368 'High Rate Ultra
- Wideband MAC and PHY Specification' is assumed.
-
-What: /sys/class/uwb_rc/beacon_timeout_ms
-Date: July 2008
-KernelVersion: 2.6.27
-Description:
- If no beacons are received from a device for at least
- this time, the device will be considered to have gone
- and it will be removed. The default is 3 superframes
- (~197 ms) as required by the specification.
-
-What: /sys/class/uwb_rc/uwb<N>/
-Date: July 2008
-KernelVersion: 2.6.27
-Contact: linux-usb@vger.kernel.org
-Description:
- An individual UWB radio controller.
-
-What: /sys/class/uwb_rc/uwb<N>/beacon
-Date: July 2008
-KernelVersion: 2.6.27
-Contact: linux-usb@vger.kernel.org
-Description:
- Write:
-
- <channel>
-
- to force a specific channel to be used when beaconing,
- or, if <channel> is -1, to prohibit beaconing. If
- <channel> is 0, then the default channel selection
- algorithm will be used. Valid channels depends on the
- radio controller's supported band groups.
-
- Reading returns the currently active channel, or -1 if
- the radio controller is not beaconing.
-
-What: /sys/class/uwb_rc/uwb<N>/ASIE
-Date: August 2014
-KernelVersion: 3.18
-Contact: linux-usb@vger.kernel.org
-Description:
-
- The application-specific information element (ASIE)
- included in this device's beacon, in space separated
- hex octets.
-
- Reading returns the current ASIE. Writing replaces
- the current ASIE with the one written.
-
-What: /sys/class/uwb_rc/uwb<N>/scan
-Date: July 2008
-KernelVersion: 2.6.27
-Contact: linux-usb@vger.kernel.org
-Description:
- Write:
-
- <channel> <type> [<bpst offset>]
-
- to start (or stop) scanning on a channel. <type> is one of:
-
- == =======================================
- 0 scan
- 1 scan outside BP
- 2 scan while inactive
- 3 scanning disabled
- 4 scan (with start time of <bpst offset>)
- == =======================================
-
-What: /sys/class/uwb_rc/uwb<N>/mac_address
-Date: July 2008
-KernelVersion: 2.6.27
-Contact: linux-usb@vger.kernel.org
-Description:
- The EUI-48, in colon-separated hex octets, for this
- radio controller. A write will change the radio
- controller's EUI-48 but only do so while the device is
- not beaconing or scanning.
-
-What: /sys/class/uwb_rc/uwb<N>/wusbhc
-Date: July 2008
-KernelVersion: 2.6.27
-Contact: linux-usb@vger.kernel.org
-Description:
- A symlink to the device (if any) of the WUSB Host
- Controller PAL using this radio controller.
-
-What: /sys/class/uwb_rc/uwb<N>/<EUI-48>/
-Date: July 2008
-KernelVersion: 2.6.27
-Contact: linux-usb@vger.kernel.org
-Description:
- A neighbour UWB device that has either been detected
- as part of a scan or is a member of the radio
- controllers beacon group.
-
-What: /sys/class/uwb_rc/uwb<N>/<EUI-48>/BPST
-Date: July 2008
-KernelVersion: 2.6.27
-Contact: linux-usb@vger.kernel.org
-Description:
- The time (using the radio controllers internal 1 ms
- interval superframe timer) of the last beacon from
- this device was received.
-
-What: /sys/class/uwb_rc/uwb<N>/<EUI-48>/DevAddr
-Date: July 2008
-KernelVersion: 2.6.27
-Contact: linux-usb@vger.kernel.org
-Description:
- The current DevAddr of this device in colon separated
- hex octets.
-
-What: /sys/class/uwb_rc/uwb<N>/<EUI-48>/EUI_48
-Date: July 2008
-KernelVersion: 2.6.27
-Contact: linux-usb@vger.kernel.org
-Description:
-
- The EUI-48 of this device in colon separated hex
- octets.
-
-What: /sys/class/uwb_rc/uwb<N>/<EUI-48>/IEs
-Date: July 2008
-KernelVersion: 2.6.27
-Contact: linux-usb@vger.kernel.org
-Description:
- The latest IEs included in this device's beacon, in
- space separated hex octets with one IE per line.
-
-What: /sys/class/uwb_rc/uwb<N>/<EUI-48>/LQE
-Date: July 2008
-KernelVersion: 2.6.27
-Contact: linux-usb@vger.kernel.org
-Description:
- Link Quality Estimate - the Signal to Noise Ratio
- (SNR) of all packets received from this device in dB.
- This gives an estimate on a suitable PHY rate. Refer
- to [ECMA-368] section 13.3 for more details.
-
-What: /sys/class/uwb_rc/uwb<N>/<EUI-48>/RSSI
-Date: July 2008
-KernelVersion: 2.6.27
-Contact: linux-usb@vger.kernel.org
-Description:
- Received Signal Strength Indication - the strength of
- the received signal in dB. LQE is a more useful
- measure of the radio link quality.
diff --git a/Documentation/ABI/testing/sysfs-class-uwb_rc-wusbhc b/Documentation/ABI/testing/sysfs-class-uwb_rc-wusbhc
deleted file mode 100644
index 130ea5189b02..000000000000
--- a/Documentation/ABI/testing/sysfs-class-uwb_rc-wusbhc
+++ /dev/null
@@ -1,57 +0,0 @@
-What: /sys/class/uwb_rc/uwb<N>/wusbhc/wusb_chid
-Date: July 2008
-KernelVersion: 2.6.27
-Contact: David Vrabel <david.vrabel@csr.com>
-Description:
- Write the CHID (16 space-separated hex octets) for this host controller.
- This starts the host controller, allowing it to accept connection from
- WUSB devices.
-
- Set an all zero CHID to stop the host controller.
-
-What: /sys/class/uwb_rc/uwb<N>/wusbhc/wusb_trust_timeout
-Date: July 2008
-KernelVersion: 2.6.27
-Contact: David Vrabel <david.vrabel@csr.com>
-Description:
- Devices that haven't sent a WUSB packet to the host
- within 'wusb_trust_timeout' ms are considered to have
- disconnected and are removed. The default value of
- 4000 ms is the value required by the WUSB
- specification.
-
- Since this relates to security (specifically, the
- lifetime of PTKs and GTKs) it should not be changed
- from the default.
-
-What: /sys/class/uwb_rc/uwb<N>/wusbhc/wusb_phy_rate
-Date: August 2009
-KernelVersion: 2.6.32
-Contact: David Vrabel <david.vrabel@csr.com>
-Description:
- The maximum PHY rate to use for all connected devices.
- This is only of limited use for testing and
- development as the hardware's automatic rate
- adaptation is better then this simple control.
-
- Refer to [ECMA-368] section 10.3.1.1 for the value to
- use.
-
-What: /sys/class/uwb_rc/uwb<N>/wusbhc/wusb_dnts
-Date: June 2013
-KernelVersion: 3.11
-Contact: Thomas Pugliese <thomas.pugliese@gmail.com>
-Description:
- The device notification time slot (DNTS) count and interval in
- milliseconds that the WUSB host should use. This controls how
- often the devices will have the opportunity to send
- notifications to the host.
-
-What: /sys/class/uwb_rc/uwb<N>/wusbhc/wusb_retry_count
-Date: June 2013
-KernelVersion: 3.11
-Contact: Thomas Pugliese <thomas.pugliese@gmail.com>
-Description:
- The number of retries that the WUSB host should attempt
- before reporting an error for a bus transaction. The range of
- valid values is [0..15], where 0 indicates infinite retries.
diff --git a/Documentation/ABI/testing/sysfs-driver-ufs b/Documentation/ABI/testing/sysfs-driver-ufs
index e487f969a15e..0c7efaf62de0 100644
--- a/Documentation/ABI/testing/sysfs-driver-ufs
+++ b/Documentation/ABI/testing/sysfs-driver-ufs
@@ -1437,180 +1437,6 @@ Description:
If avail_wb_buff < wb_flush_threshold, it indicates that WriteBooster buffer needs to
be flushed, otherwise it is not necessary.
-What: /sys/bus/platform/drivers/ufshcd/*/device_descriptor/hpb_version
-What: /sys/bus/platform/devices/*.ufs/device_descriptor/hpb_version
-Date: June 2021
-Contact: Daejun Park <daejun7.park@samsung.com>
-Description: This entry shows the HPB specification version.
- The full information about the descriptor can be found in the UFS
- HPB (Host Performance Booster) Extension specifications.
- Example: version 1.2.3 = 0123h
-
- The file is read only.
-
-What: /sys/bus/platform/drivers/ufshcd/*/device_descriptor/hpb_control
-What: /sys/bus/platform/devices/*.ufs/device_descriptor/hpb_control
-Date: June 2021
-Contact: Daejun Park <daejun7.park@samsung.com>
-Description: This entry shows an indication of the HPB control mode.
- 00h: Host control mode
- 01h: Device control mode
-
- The file is read only.
-
-What: /sys/bus/platform/drivers/ufshcd/*/geometry_descriptor/hpb_region_size
-What: /sys/bus/platform/devices/*.ufs/geometry_descriptor/hpb_region_size
-Date: June 2021
-Contact: Daejun Park <daejun7.park@samsung.com>
-Description: This entry shows the bHPBRegionSize which can be calculated
- as in the following (in bytes):
- HPB Region size = 512B * 2^bHPBRegionSize
-
- The file is read only.
-
-What: /sys/bus/platform/drivers/ufshcd/*/geometry_descriptor/hpb_number_lu
-What: /sys/bus/platform/devices/*.ufs/geometry_descriptor/hpb_number_lu
-Date: June 2021
-Contact: Daejun Park <daejun7.park@samsung.com>
-Description: This entry shows the maximum number of HPB LU supported by
- the device.
- 00h: HPB is not supported by the device.
- 01h ~ 20h: Maximum number of HPB LU supported by the device
-
- The file is read only.
-
-What: /sys/bus/platform/drivers/ufshcd/*/geometry_descriptor/hpb_subregion_size
-What: /sys/bus/platform/devices/*.ufs/geometry_descriptor/hpb_subregion_size
-Date: June 2021
-Contact: Daejun Park <daejun7.park@samsung.com>
-Description: This entry shows the bHPBSubRegionSize, which can be
- calculated as in the following (in bytes) and shall be a multiple of
- logical block size:
- HPB Sub-Region size = 512B x 2^bHPBSubRegionSize
- bHPBSubRegionSize shall not exceed bHPBRegionSize.
-
- The file is read only.
-
-What: /sys/bus/platform/drivers/ufshcd/*/geometry_descriptor/hpb_max_active_regions
-What: /sys/bus/platform/devices/*.ufs/geometry_descriptor/hpb_max_active_regions
-Date: June 2021
-Contact: Daejun Park <daejun7.park@samsung.com>
-Description: This entry shows the maximum number of active HPB regions that
- is supported by the device.
-
- The file is read only.
-
-What: /sys/class/scsi_device/*/device/unit_descriptor/hpb_lu_max_active_regions
-Date: June 2021
-Contact: Daejun Park <daejun7.park@samsung.com>
-Description: This entry shows the maximum number of HPB regions assigned to
- the HPB logical unit.
-
- The file is read only.
-
-What: /sys/class/scsi_device/*/device/unit_descriptor/hpb_pinned_region_start_offset
-Date: June 2021
-Contact: Daejun Park <daejun7.park@samsung.com>
-Description: This entry shows the start offset of HPB pinned region.
-
- The file is read only.
-
-What: /sys/class/scsi_device/*/device/unit_descriptor/hpb_number_pinned_regions
-Date: June 2021
-Contact: Daejun Park <daejun7.park@samsung.com>
-Description: This entry shows the number of HPB pinned regions assigned to
- the HPB logical unit.
-
- The file is read only.
-
-What: /sys/class/scsi_device/*/device/hpb_stats/hit_cnt
-Date: June 2021
-Contact: Daejun Park <daejun7.park@samsung.com>
-Description: This entry shows the number of reads that changed to HPB read.
-
- The file is read only.
-
-What: /sys/class/scsi_device/*/device/hpb_stats/miss_cnt
-Date: June 2021
-Contact: Daejun Park <daejun7.park@samsung.com>
-Description: This entry shows the number of reads that cannot be changed to
- HPB read.
-
- The file is read only.
-
-What: /sys/class/scsi_device/*/device/hpb_stats/rcmd_noti_cnt
-Date: June 2021
-Contact: Daejun Park <daejun7.park@samsung.com>
-Description: This entry shows the number of response UPIUs that has
- recommendations for activating sub-regions and/or inactivating region.
-
- The file is read only.
-
-What: /sys/class/scsi_device/*/device/hpb_stats/rcmd_active_cnt
-Date: June 2021
-Contact: Daejun Park <daejun7.park@samsung.com>
-Description: For the HPB device control mode, this entry shows the number of
- active sub-regions recommended by response UPIUs. For the HPB host control
- mode, this entry shows the number of active sub-regions recommended by the
- HPB host control mode heuristic algorithm.
-
- The file is read only.
-
-What: /sys/class/scsi_device/*/device/hpb_stats/rcmd_inactive_cnt
-Date: June 2021
-Contact: Daejun Park <daejun7.park@samsung.com>
-Description: For the HPB device control mode, this entry shows the number of
- inactive regions recommended by response UPIUs. For the HPB host control
- mode, this entry shows the number of inactive regions recommended by the
- HPB host control mode heuristic algorithm.
-
- The file is read only.
-
-What: /sys/class/scsi_device/*/device/hpb_stats/map_req_cnt
-Date: June 2021
-Contact: Daejun Park <daejun7.park@samsung.com>
-Description: This entry shows the number of read buffer commands for
- activating sub-regions recommended by response UPIUs.
-
- The file is read only.
-
-What: /sys/class/scsi_device/*/device/hpb_params/requeue_timeout_ms
-Date: June 2021
-Contact: Daejun Park <daejun7.park@samsung.com>
-Description: This entry shows the requeue timeout threshold for write buffer
- command in ms. The value can be changed by writing an integer to
- this entry.
-
-What: /sys/bus/platform/drivers/ufshcd/*/attributes/max_data_size_hpb_single_cmd
-What: /sys/bus/platform/devices/*.ufs/attributes/max_data_size_hpb_single_cmd
-Date: June 2021
-Contact: Daejun Park <daejun7.park@samsung.com>
-Description: This entry shows the maximum HPB data size for using a single HPB
- command.
-
- === ========
- 00h 4KB
- 01h 8KB
- 02h 12KB
- ...
- FFh 1024KB
- === ========
-
- The file is read only.
-
-What: /sys/bus/platform/drivers/ufshcd/*/flags/hpb_enable
-What: /sys/bus/platform/devices/*.ufs/flags/hpb_enable
-Date: June 2021
-Contact: Daejun Park <daejun7.park@samsung.com>
-Description: This entry shows the status of HPB.
-
- == ============================
- 0 HPB is not enabled.
- 1 HPB is enabled
- == ============================
-
- The file is read only.
-
Contact: Daniil Lunev <dlunev@chromium.org>
What: /sys/bus/platform/drivers/ufshcd/*/capabilities/
What: /sys/bus/platform/devices/*.ufs/capabilities/
@@ -1648,76 +1474,3 @@ Description: Indicates status of Write Booster.
The file is read only.
-What: /sys/class/scsi_device/*/device/hpb_param_sysfs/activation_thld
-Date: February 2021
-Contact: Avri Altman <avri.altman@wdc.com>
-Description: In host control mode, reads are the major source of activation
- trials. Once this threshold hs met, the region is added to the
- "to-be-activated" list. Since we reset the read counter upon
- write, this include sending a rb command updating the region
- ppn as well.
-
-What: /sys/class/scsi_device/*/device/hpb_param_sysfs/normalization_factor
-Date: February 2021
-Contact: Avri Altman <avri.altman@wdc.com>
-Description: In host control mode, we think of the regions as "buckets".
- Those buckets are being filled with reads, and emptied on write.
- We use entries_per_srgn - the amount of blocks in a subregion as
- our bucket size. This applies because HPB1.0 only handles
- single-block reads. Once the bucket size is crossed, we trigger
- a normalization work - not only to avoid overflow, but mainly
- because we want to keep those counters normalized, as we are
- using those reads as a comparative score, to make various decisions.
- The normalization is dividing (shift right) the read counter by
- the normalization_factor. If during consecutive normalizations
- an active region has exhausted its reads - inactivate it.
-
-What: /sys/class/scsi_device/*/device/hpb_param_sysfs/eviction_thld_enter
-Date: February 2021
-Contact: Avri Altman <avri.altman@wdc.com>
-Description: Region deactivation is often due to the fact that eviction took
- place: A region becomes active at the expense of another. This is
- happening when the max-active-regions limit has been crossed.
- In host mode, eviction is considered an extreme measure. We
- want to verify that the entering region has enough reads, and
- the exiting region has much fewer reads. eviction_thld_enter is
- the min reads that a region must have in order to be considered
- a candidate for evicting another region.
-
-What: /sys/class/scsi_device/*/device/hpb_param_sysfs/eviction_thld_exit
-Date: February 2021
-Contact: Avri Altman <avri.altman@wdc.com>
-Description: Same as above for the exiting region. A region is considered to
- be a candidate for eviction only if it has fewer reads than
- eviction_thld_exit.
-
-What: /sys/class/scsi_device/*/device/hpb_param_sysfs/read_timeout_ms
-Date: February 2021
-Contact: Avri Altman <avri.altman@wdc.com>
-Description: In order not to hang on to "cold" regions, we inactivate
- a region that has no READ access for a predefined amount of
- time - read_timeout_ms. If read_timeout_ms has expired, and the
- region is dirty, it is less likely that we can make any use of
- HPB reading it so we inactivate it. Still, deactivation has
- its overhead, and we may still benefit from HPB reading this
- region if it is clean - see read_timeout_expiries.
-
-What: /sys/class/scsi_device/*/device/hpb_param_sysfs/read_timeout_expiries
-Date: February 2021
-Contact: Avri Altman <avri.altman@wdc.com>
-Description: If the region read timeout has expired, but the region is clean,
- just re-wind its timer for another spin. Do that as long as it
- is clean and did not exhaust its read_timeout_expiries threshold.
-
-What: /sys/class/scsi_device/*/device/hpb_param_sysfs/timeout_polling_interval_ms
-Date: February 2021
-Contact: Avri Altman <avri.altman@wdc.com>
-Description: The frequency with which the delayed worker that checks the
- read_timeouts is awakened.
-
-What: /sys/class/scsi_device/*/device/hpb_param_sysfs/inflight_map_req
-Date: February 2021
-Contact: Avri Altman <avri.altman@wdc.com>
-Description: In host control mode the host is the originator of map requests.
- To avoid flooding the device with map requests, use a simple throttling
- mechanism that limits the number of inflight map requests.
diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
index ad3d76d37c8b..36c3cb547901 100644
--- a/Documentation/ABI/testing/sysfs-fs-f2fs
+++ b/Documentation/ABI/testing/sysfs-fs-f2fs
@@ -102,9 +102,9 @@ What: /sys/fs/f2fs/<disk>/max_small_discards
Date: November 2013
Contact: "Jaegeuk Kim" <jaegeuk.kim@samsung.com>
Description: Controls the issue rate of discard commands that consist of small
- blocks less than 2MB. The candidates to be discarded are cached until
- checkpoint is triggered, and issued during the checkpoint.
- By default, it is disabled with 0.
+ blocks less than 2MB. The candidates to be discarded are cached during
+ checkpoint, and issued by issue_discard thread after checkpoint.
+ It is enabled by default.
What: /sys/fs/f2fs/<disk>/max_ordered_discard
Date: October 2022
diff --git a/Documentation/ABI/testing/sysfs-platform-asus-wmi b/Documentation/ABI/testing/sysfs-platform-asus-wmi
index a77a004a1baa..8a7e25bde085 100644
--- a/Documentation/ABI/testing/sysfs-platform-asus-wmi
+++ b/Documentation/ABI/testing/sysfs-platform-asus-wmi
@@ -98,3 +98,91 @@ Description:
Enable an LCD response-time boost to reduce or remove ghosting:
* 0 - Disable,
* 1 - Enable
+
+What: /sys/devices/platform/<platform>/charge_mode
+Date: Jun 2023
+KernelVersion: 6.5
+Contact: "Luke Jones" <luke@ljones.dev>
+Description:
+ Get the current charging mode being used:
+ * 1 - Barrel connected charger,
+ * 2 - USB-C charging
+ * 3 - Both connected, barrel used for charging
+
+What: /sys/devices/platform/<platform>/egpu_connected
+Date: Jun 2023
+KernelVersion: 6.5
+Contact: "Luke Jones" <luke@ljones.dev>
+Description:
+ Show if the egpu (XG Mobile) is correctly connected:
+ * 0 - False,
+ * 1 - True
+
+What: /sys/devices/platform/<platform>/mini_led_mode
+Date: Jun 2023
+KernelVersion: 6.5
+Contact: "Luke Jones" <luke@ljones.dev>
+Description:
+ Change the mini-LED mode:
+ * 0 - Single-zone,
+ * 1 - Multi-zone
+
+What: /sys/devices/platform/<platform>/ppt_pl1_spl
+Date: Jun 2023
+KernelVersion: 6.5
+Contact: "Luke Jones" <luke@ljones.dev>
+Description:
+ Set the Package Power Target total of CPU: PL1 on Intel, SPL on AMD.
+ Shown on Intel+Nvidia or AMD+Nvidia based systems:
+
+ * min=5, max=250
+
+What: /sys/devices/platform/<platform>/ppt_pl2_sppt
+Date: Jun 2023
+KernelVersion: 6.5
+Contact: "Luke Jones" <luke@ljones.dev>
+Description:
+ Set the Slow Package Power Tracking Limit of CPU: PL2 on Intel, SPPT,
+ on AMD. Shown on Intel+Nvidia or AMD+Nvidia based systems:
+
+ * min=5, max=250
+
+What: /sys/devices/platform/<platform>/ppt_fppt
+Date: Jun 2023
+KernelVersion: 6.5
+Contact: "Luke Jones" <luke@ljones.dev>
+Description:
+ Set the Fast Package Power Tracking Limit of CPU. AMD+Nvidia only:
+ * min=5, max=250
+
+What: /sys/devices/platform/<platform>/ppt_apu_sppt
+Date: Jun 2023
+KernelVersion: 6.5
+Contact: "Luke Jones" <luke@ljones.dev>
+Description:
+ Set the APU SPPT limit. Shown on full AMD systems only:
+ * min=5, max=130
+
+What: /sys/devices/platform/<platform>/ppt_platform_sppt
+Date: Jun 2023
+KernelVersion: 6.5
+Contact: "Luke Jones" <luke@ljones.dev>
+Description:
+ Set the platform SPPT limit. Shown on full AMD systems only:
+ * min=5, max=130
+
+What: /sys/devices/platform/<platform>/nv_dynamic_boost
+Date: Jun 2023
+KernelVersion: 6.5
+Contact: "Luke Jones" <luke@ljones.dev>
+Description:
+ Set the dynamic boost limit of the Nvidia dGPU:
+ * min=5, max=25
+
+What: /sys/devices/platform/<platform>/nv_temp_target
+Date: Jun 2023
+KernelVersion: 6.5
+Contact: "Luke Jones" <luke@ljones.dev>
+Description:
+ Set the target temperature limit of the Nvidia dGPU:
+ * min=75, max=87
diff --git a/Documentation/ABI/testing/sysfs-platform-mellanox-bootctl b/Documentation/ABI/testing/sysfs-platform-mellanox-bootctl
index 4c5c02d8f870..65ed3865da62 100644
--- a/Documentation/ABI/testing/sysfs-platform-mellanox-bootctl
+++ b/Documentation/ABI/testing/sysfs-platform-mellanox-bootctl
@@ -84,3 +84,69 @@ Description:
The file used to write BlueField boot log with the format
"[INFO|WARN|ERR|ASSERT ]<msg>". Log level 'INFO' is used by
default if not specified.
+
+What: /sys/bus/platform/devices/MLNXBF04:00/oob_mac
+Date: August 2023
+KernelVersion: 6.5
+Contact: "David Thompson <davthompson@nvidia.com>"
+Description:
+ The "oob_mac" sysfs attribute holds the MAC address for
+ the out-of-band 1Gbps Ethernet port. This MAC address is
+ provided on a board-level label.
+
+What: /sys/bus/platform/devices/MLNXBF04:00/opn
+Date: August 2023
+KernelVersion: 6.5
+Contact: "David Thompson <davthompson@nvidia.com>"
+Description:
+ The "opn" sysfs attribute holds the board's part number.
+ This value is provided on a board-level label.
+
+What: /sys/bus/platform/devices/MLNXBF04:00/sku
+Date: August 2023
+KernelVersion: 6.5
+Contact: "David Thompson <davthompson@nvidia.com>"
+Description:
+ The "sku" sysfs attribute holds the board's SKU number.
+ This value is provided on a board-level label.
+
+What: /sys/bus/platform/devices/MLNXBF04:00/modl
+Date: August 2023
+KernelVersion: 6.5
+Contact: "David Thompson <davthompson@nvidia.com>"
+Description:
+ The "modl" sysfs attribute holds the board's model number.
+ This value is provided on a board-level label.
+
+What: /sys/bus/platform/devices/MLNXBF04:00/sn
+Date: August 2023
+KernelVersion: 6.5
+Contact: "David Thompson <davthompson@nvidia.com>"
+Description:
+ The "sn" sysfs attribute holds the board's serial number.
+ This value is provided on a board-level label.
+
+What: /sys/bus/platform/devices/MLNXBF04:00/uuid
+Date: August 2023
+KernelVersion: 6.5
+Contact: "David Thompson <davthompson@nvidia.com>"
+Description:
+ The "uuid" sysfs attribute holds the board's UUID.
+ This value is provided by the manufacturing team.
+
+What: /sys/bus/platform/devices/MLNXBF04:00/rev
+Date: August 2023
+KernelVersion: 6.5
+Contact: "David Thompson <davthompson@nvidia.com>"
+Description:
+ The "rev" sysfs attribute holds the board's revision.
+ This value is provided on a board-level label.
+
+What: /sys/bus/platform/devices/MLNXBF04:00/mfg_lock
+Date: August 2023
+KernelVersion: 6.5
+Contact: "David Thompson <davthompson@nvidia.com>"
+Description:
+ The "mfg_lock" sysfs attribute is write-only.
+ A successful write to this attribute will latch the
+ board-level attributes into EEPROM, making them read-only.
diff --git a/Documentation/ABI/testing/sysfs-wusb_cbaf b/Documentation/ABI/testing/sysfs-wusb_cbaf
deleted file mode 100644
index 2969d3694ec0..000000000000
--- a/Documentation/ABI/testing/sysfs-wusb_cbaf
+++ /dev/null
@@ -1,101 +0,0 @@
-What: /sys/bus/usb/drivers/wusb_cbaf/.../wusb_*
-Date: August 2008
-KernelVersion: 2.6.27
-Contact: David Vrabel <david.vrabel@csr.com>
-Description:
- Various files for managing Cable Based Association of
- (wireless) USB devices.
-
- The sequence of operations should be:
-
- 1. Device is plugged in.
-
- 2. The connection manager (CM) sees a device with CBA capability.
- (the wusb_chid etc. files in /sys/devices/blah/OURDEVICE).
-
- 3. The CM writes the host name, supported band groups,
- and the CHID (host ID) into the wusb_host_name,
- wusb_host_band_groups and wusb_chid files. These
- get sent to the device and the CDID (if any) for
- this host is requested.
-
- 4. The CM can verify that the device's supported band
- groups (wusb_device_band_groups) are compatible
- with the host.
-
- 5. The CM reads the wusb_cdid file.
-
- 6. The CM looks it up its database.
-
- - If it has a matching CHID,CDID entry, the device
- has been authorized before and nothing further
- needs to be done.
-
- - If the CDID is zero (or the CM doesn't find a
- matching CDID in its database), the device is
- assumed to be not known. The CM may associate
- the host with device by: writing a randomly
- generated CDID to wusb_cdid and then a random CK
- to wusb_ck (this uploads the new CC to the
- device).
-
- CMD may choose to prompt the user before
- associating with a new device.
-
- 7. Device is unplugged.
-
- References:
- [WUSB-AM]
- Association Models Supplement to the
- Certified Wireless Universal Serial Bus
- Specification, version 1.0.
-
-What: /sys/bus/usb/drivers/wusb_cbaf/.../wusb_chid
-Date: August 2008
-KernelVersion: 2.6.27
-Contact: David Vrabel <david.vrabel@csr.com>
-Description:
- The CHID of the host formatted as 16 space-separated
- hex octets.
-
- Writes fetches device's supported band groups and the
- the CDID for any existing association with this host.
-
-What: /sys/bus/usb/drivers/wusb_cbaf/.../wusb_host_name
-Date: August 2008
-KernelVersion: 2.6.27
-Contact: David Vrabel <david.vrabel@csr.com>
-Description:
- A friendly name for the host as a UTF-8 encoded string.
-
-What: /sys/bus/usb/drivers/wusb_cbaf/.../wusb_host_band_groups
-Date: August 2008
-KernelVersion: 2.6.27
-Contact: David Vrabel <david.vrabel@csr.com>
-Description:
- The band groups supported by the host, in the format
- defined in [WUSB-AM].
-
-What: /sys/bus/usb/drivers/wusb_cbaf/.../wusb_device_band_groups
-Date: August 2008
-KernelVersion: 2.6.27
-Contact: David Vrabel <david.vrabel@csr.com>
-Description:
- The band groups supported by the device, in the format
- defined in [WUSB-AM].
-
-What: /sys/bus/usb/drivers/wusb_cbaf/.../wusb_cdid
-Date: August 2008
-KernelVersion: 2.6.27
-Contact: David Vrabel <david.vrabel@csr.com>
-Description:
- The device's CDID formatted as 16 space-separated hex
- octets.
-
-What: /sys/bus/usb/drivers/wusb_cbaf/.../wusb_ck
-Date: August 2008
-KernelVersion: 2.6.27
-Contact: David Vrabel <david.vrabel@csr.com>
-Description:
- Write 16 space-separated random, hex octets to
- associate with the device.
diff --git a/Documentation/admin-guide/cgroup-v1/memory.rst b/Documentation/admin-guide/cgroup-v1/memory.rst
index 8d3afeede10e..5f502bf68fbc 100644
--- a/Documentation/admin-guide/cgroup-v1/memory.rst
+++ b/Documentation/admin-guide/cgroup-v1/memory.rst
@@ -195,11 +195,11 @@ are not accounted. We just account pages under usual VM management.
RSS pages are accounted at page_fault unless they've already been accounted
for earlier. A file page will be accounted for as Page Cache when it's
-inserted into inode (radix-tree). While it's mapped into the page tables of
+inserted into inode (xarray). While it's mapped into the page tables of
processes, duplicate accounting is carefully avoided.
An RSS page is unaccounted when it's fully unmapped. A PageCache page is
-unaccounted when it's removed from radix-tree. Even if RSS pages are fully
+unaccounted when it's removed from xarray. Even if RSS pages are fully
unmapped (by kswapd), they may exist as SwapCache in the system until they
are really freed. Such SwapCaches are also accounted.
A swapped-in page is accounted after adding into swapcache.
@@ -907,7 +907,7 @@ experiences some pressure. In this situation, only group C will receive the
notification, i.e. groups A and B will not receive it. This is done to avoid
excessive "broadcasting" of messages, which disturbs the system and which is
especially bad if we are low on memory or thrashing. Group B, will receive
-notification only if there are no event listers for group C.
+notification only if there are no event listeners for group C.
There are three optional modes that specify different propagation behavior:
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
index 4ef890191196..b26b5274eaaf 100644
--- a/Documentation/admin-guide/cgroup-v2.rst
+++ b/Documentation/admin-guide/cgroup-v2.rst
@@ -1045,7 +1045,7 @@ All time durations are in microseconds.
- user_usec
- system_usec
- and the following three when the controller is enabled:
+ and the following five when the controller is enabled:
- nr_periods
- nr_throttled
diff --git a/Documentation/admin-guide/devices.txt b/Documentation/admin-guide/devices.txt
index b1b57f638b94..839054923530 100644
--- a/Documentation/admin-guide/devices.txt
+++ b/Documentation/admin-guide/devices.txt
@@ -2691,18 +2691,9 @@
45 = /dev/ttyMM1 Marvell MPSC - port 1 (obsolete unused)
46 = /dev/ttyCPM0 PPC CPM (SCC or SMC) - port 0
...
- 49 = /dev/ttyCPM5 PPC CPM (SCC or SMC) - port 3
- 50 = /dev/ttyIOC0 Altix serial card
- ...
- 81 = /dev/ttyIOC31 Altix serial card
+ 51 = /dev/ttyCPM5 PPC CPM (SCC or SMC) - port 5
82 = /dev/ttyVR0 NEC VR4100 series SIU
83 = /dev/ttyVR1 NEC VR4100 series DSIU
- 84 = /dev/ttyIOC84 Altix ioc4 serial card
- ...
- 115 = /dev/ttyIOC115 Altix ioc4 serial card
- 116 = /dev/ttySIOC0 Altix ioc3 serial card
- ...
- 147 = /dev/ttySIOC31 Altix ioc3 serial card
148 = /dev/ttyPSC0 PPC PSC - port 0
...
153 = /dev/ttyPSC5 PPC PSC - port 5
@@ -2761,10 +2752,7 @@
43 = /dev/ttycusmx2 Callout device for ttySMX2
46 = /dev/cucpm0 Callout device for ttyCPM0
...
- 49 = /dev/cucpm5 Callout device for ttyCPM5
- 50 = /dev/cuioc40 Callout device for ttyIOC40
- ...
- 81 = /dev/cuioc431 Callout device for ttyIOC431
+ 51 = /dev/cucpm5 Callout device for ttyCPM5
82 = /dev/cuvr0 Callout device for ttyVR0
83 = /dev/cuvr1 Callout device for ttyVR1
diff --git a/Documentation/admin-guide/dynamic-debug-howto.rst b/Documentation/admin-guide/dynamic-debug-howto.rst
index 8dc668cc1216..0b3d39c610d9 100644
--- a/Documentation/admin-guide/dynamic-debug-howto.rst
+++ b/Documentation/admin-guide/dynamic-debug-howto.rst
@@ -216,13 +216,14 @@ The flags are::
t Include thread ID, or <intr>
m Include module name
f Include the function name
+ s Include the source file name
l Include line number
For ``print_hex_dump_debug()`` and ``print_hex_dump_bytes()``, only
the ``p`` flag has meaning, other flags are ignored.
-Note the regexp ``^[-+=][flmpt_]+$`` matches a flags specification.
-To clear all flags at once, use ``=_`` or ``-flmpt``.
+Note the regexp ``^[-+=][fslmpt_]+$`` matches a flags specification.
+To clear all flags at once, use ``=_`` or ``-fslmpt``.
Debug messages during Boot Process
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index fcf79acea475..0a1731a0f0ef 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -873,7 +873,7 @@
memory region [offset, offset + size] for that kernel
image. If '@offset' is omitted, then a suitable offset
is selected automatically.
- [KNL, X86-64, ARM64] Select a region under 4G first, and
+ [KNL, X86-64, ARM64, RISCV] Select a region under 4G first, and
fall back to reserve region above 4G when '@offset'
hasn't been specified.
See Documentation/admin-guide/kdump/kdump.rst for further details.
@@ -886,14 +886,14 @@
Documentation/admin-guide/kdump/kdump.rst for an example.
crashkernel=size[KMG],high
- [KNL, X86-64, ARM64] range could be above 4G. Allow kernel
- to allocate physical memory region from top, so could
- be above 4G if system have more than 4G ram installed.
- Otherwise memory region will be allocated below 4G, if
- available.
+ [KNL, X86-64, ARM64, RISCV] range could be above 4G.
+ Allow kernel to allocate physical memory region from top,
+ so could be above 4G if system have more than 4G ram
+ installed. Otherwise memory region will be allocated
+ below 4G, if available.
It will be ignored if crashkernel=X is specified.
crashkernel=size[KMG],low
- [KNL, X86-64, ARM64] range under 4G. When crashkernel=X,high
+ [KNL, X86-64, ARM64, RISCV] range under 4G. When crashkernel=X,high
is passed, kernel could allocate physical memory region
above 4G, that cause second kernel crash on system
that require some amount of low memory, e.g. swiotlb
@@ -904,6 +904,7 @@
size is platform dependent.
--> x86: max(swiotlb_size_or_default() + 8MiB, 256MiB)
--> arm64: 128MiB
+ --> riscv: 128MiB
This one lets the user specify own low range under 4G
for second kernel instead.
0: to disable low allocation.
@@ -5554,6 +5555,13 @@
[KNL] Disable ring 3 MONITOR/MWAIT feature on supported
CPUs.
+ riscv_isa_fallback [RISCV]
+ When CONFIG_RISCV_ISA_FALLBACK is not enabled, permit
+ falling back to detecting extension support by parsing
+ "riscv,isa" property on devicetree systems when the
+ replacement properties are not found. See the Kconfig
+ entry for RISCV_ISA_FALLBACK.
+
ro [KNL] Mount root device read-only on boot
rodata= [KNL]
@@ -6711,7 +6719,7 @@
usbcore.authorized_default=
[USB] Default USB device authorization:
- (default -1 = authorized except for wireless USB,
+ (default -1 = authorized (same as 1),
0 = not authorized, 1 = authorized, 2 = authorized
if device connected to internal port)
@@ -7068,6 +7076,13 @@
disables both lockup detectors. Default is 10
seconds.
+ workqueue.unbound_cpus=
+ [KNL,SMP] Specify to constrain one or some CPUs
+ to use in unbound workqueues.
+ Format: <cpu-list>
+ By default, all online CPUs are available for
+ unbound workqueues.
+
workqueue.watchdog_thresh=
If CONFIG_WQ_WATCHDOG is configured, workqueue can
warn stall conditions and dump internal state to
@@ -7089,15 +7104,6 @@
threshold repeatedly. They are likely good
candidates for using WQ_UNBOUND workqueues instead.
- workqueue.disable_numa
- By default, all work items queued to unbound
- workqueues are affine to the NUMA nodes they're
- issued on, which results in better behavior in
- general. If NUMA affinity needs to be disabled for
- whatever reason, this option can be used. Note
- that this also can be controlled per-workqueue for
- workqueues visible under /sys/bus/workqueue/.
-
workqueue.power_efficient
Per-cpu workqueues are generally preferred because
they show better performance thanks to cache
@@ -7113,6 +7119,18 @@
The default value of this parameter is determined by
the config option CONFIG_WQ_POWER_EFFICIENT_DEFAULT.
+ workqueue.default_affinity_scope=
+ Select the default affinity scope to use for unbound
+ workqueues. Can be one of "cpu", "smt", "cache",
+ "numa" and "system". Default is "cache". For more
+ information, see the Affinity Scopes section in
+ Documentation/core-api/workqueue.rst.
+
+ This can be changed after boot by writing to the
+ matching /sys/module/workqueue/parameters file. All
+ workqueues with the "default" affinity scope will be
+ updated accordignly.
+
workqueue.debug_force_rr_cpu
Workqueue used to implicitly guarantee that work
items queued without explicit CPU specified are put
diff --git a/Documentation/admin-guide/media/qcom_camss.rst b/Documentation/admin-guide/media/qcom_camss.rst
index a72e17d09cb7..8a8f3ff40105 100644
--- a/Documentation/admin-guide/media/qcom_camss.rst
+++ b/Documentation/admin-guide/media/qcom_camss.rst
@@ -18,7 +18,7 @@ The driver implements V4L2, Media controller and V4L2 subdev interfaces.
Camera sensor using V4L2 subdev interface in the kernel is supported.
The driver is implemented using as a reference the Qualcomm Camera Subsystem
-driver for Android as found in Code Aurora [#f1]_ [#f2]_.
+driver for Android as found in Code Linaro [#f1]_ [#f2]_.
Qualcomm Camera Subsystem hardware
@@ -181,5 +181,5 @@ Referenced 2018-06-22.
References
----------
-.. [#f1] https://source.codeaurora.org/quic/la/kernel/msm-3.10/
-.. [#f2] https://source.codeaurora.org/quic/la/kernel/msm-3.18/
+.. [#f1] https://git.codelinaro.org/clo/la/kernel/msm-3.10/
+.. [#f2] https://git.codelinaro.org/clo/la/kernel/msm-3.18/
diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
index 3800fab1619b..cf33de56da27 100644
--- a/Documentation/admin-guide/sysctl/kernel.rst
+++ b/Documentation/admin-guide/sysctl/kernel.rst
@@ -450,6 +450,35 @@ this allows system administrators to override the
``IA64_THREAD_UAC_NOPRINT`` ``prctl`` and avoid logs being flooded.
+io_uring_disabled
+=================
+
+Prevents all processes from creating new io_uring instances. Enabling this
+shrinks the kernel's attack surface.
+
+= ======================================================================
+0 All processes can create io_uring instances as normal. This is the
+ default setting.
+1 io_uring creation is disabled (io_uring_setup() will fail with
+ -EPERM) for unprivileged processes not in the io_uring_group group.
+ Existing io_uring instances can still be used. See the
+ documentation for io_uring_group for more information.
+2 io_uring creation is disabled for all processes. io_uring_setup()
+ always fails with -EPERM. Existing io_uring instances can still be
+ used.
+= ======================================================================
+
+
+io_uring_group
+==============
+
+When io_uring_disabled is set to 1, a process must either be
+privileged (CAP_SYS_ADMIN) or be in the io_uring_group group in order
+to create an io_uring instance. If io_uring_group is set to -1 (the
+default), only processes with the CAP_SYS_ADMIN capability may create
+io_uring instances.
+
+
kexec_load_disabled
===================
@@ -941,16 +970,35 @@ enabled, otherwise writing to this file will return ``-EBUSY``.
The default value is 8.
-perf_user_access (arm64 only)
-=================================
+perf_user_access (arm64 and riscv only)
+=======================================
+
+Controls user space access for reading perf event counters.
-Controls user space access for reading perf event counters. When set to 1,
-user space can read performance monitor counter registers directly.
+arm64
+=====
The default value is 0 (access disabled).
+When set to 1, user space can read performance monitor counter registers
+directly.
+
See Documentation/arch/arm64/perf.rst for more information.
+riscv
+=====
+
+When set to 0, user space access is disabled.
+
+The default value is 1, user space can read performance monitor counter
+registers through perf, any direct access without perf intervention will trigger
+an illegal instruction.
+
+When set to 2, which enables legacy mode (user space has direct access to cycle
+and insret CSRs only). Note that this legacy value is deprecated and will be
+removed once all user space applications are fixed.
+
+Note that the time CSR is always directly accessible to all modes.
pid_max
=======
diff --git a/Documentation/bpf/btf.rst b/Documentation/bpf/btf.rst
index f32db1f44ae9..e43c2fdafcd7 100644
--- a/Documentation/bpf/btf.rst
+++ b/Documentation/bpf/btf.rst
@@ -726,8 +726,8 @@ same as the one describe in :ref:`BTF_Type_String`.
4.2 .BTF.ext section
--------------------
-The .BTF.ext section encodes func_info and line_info which needs loader
-manipulation before loading into the kernel.
+The .BTF.ext section encodes func_info, line_info and CO-RE relocations
+which needs loader manipulation before loading into the kernel.
The specification for .BTF.ext section is defined at ``tools/lib/bpf/btf.h``
and ``tools/lib/bpf/btf.c``.
@@ -745,15 +745,20 @@ The current header of .BTF.ext section::
__u32 func_info_len;
__u32 line_info_off;
__u32 line_info_len;
+
+ /* optional part of .BTF.ext header */
+ __u32 core_relo_off;
+ __u32 core_relo_len;
};
It is very similar to .BTF section. Instead of type/string section, it
-contains func_info and line_info section. See :ref:`BPF_Prog_Load` for details
-about func_info and line_info record format.
+contains func_info, line_info and core_relo sub-sections.
+See :ref:`BPF_Prog_Load` for details about func_info and line_info
+record format.
The func_info is organized as below.::
- func_info_rec_size
+ func_info_rec_size /* __u32 value */
btf_ext_info_sec for section #1 /* func_info for section #1 */
btf_ext_info_sec for section #2 /* func_info for section #2 */
...
@@ -773,7 +778,7 @@ Here, num_info must be greater than 0.
The line_info is organized as below.::
- line_info_rec_size
+ line_info_rec_size /* __u32 value */
btf_ext_info_sec for section #1 /* line_info for section #1 */
btf_ext_info_sec for section #2 /* line_info for section #2 */
...
@@ -787,6 +792,20 @@ kernel API, the ``insn_off`` is the instruction offset in the unit of ``struct
bpf_insn``. For ELF API, the ``insn_off`` is the byte offset from the
beginning of section (``btf_ext_info_sec->sec_name_off``).
+The core_relo is organized as below.::
+
+ core_relo_rec_size /* __u32 value */
+ btf_ext_info_sec for section #1 /* core_relo for section #1 */
+ btf_ext_info_sec for section #2 /* core_relo for section #2 */
+
+``core_relo_rec_size`` specifies the size of ``bpf_core_relo``
+structure when .BTF.ext is generated. All ``bpf_core_relo`` structures
+within a single ``btf_ext_info_sec`` describe relocations applied to
+section named by ``btf_ext_info_sec->sec_name_off``.
+
+See :ref:`Documentation/bpf/llvm_reloc.rst <btf-co-re-relocations>`
+for more information on CO-RE relocations.
+
4.2 .BTF_ids section
--------------------
diff --git a/Documentation/bpf/index.rst b/Documentation/bpf/index.rst
index 1ff177b89d66..aeaeb35e6d4a 100644
--- a/Documentation/bpf/index.rst
+++ b/Documentation/bpf/index.rst
@@ -29,6 +29,7 @@ that goes into great technical depth about the BPF Architecture.
bpf_licensing
test_debug
clang-notes
+ linux-notes
other
redirect
diff --git a/Documentation/bpf/standardization/linux-notes.rst b/Documentation/bpf/linux-notes.rst
index 00d2693de025..00d2693de025 100644
--- a/Documentation/bpf/standardization/linux-notes.rst
+++ b/Documentation/bpf/linux-notes.rst
diff --git a/Documentation/bpf/llvm_reloc.rst b/Documentation/bpf/llvm_reloc.rst
index 450e6403fe3d..44188e219d32 100644
--- a/Documentation/bpf/llvm_reloc.rst
+++ b/Documentation/bpf/llvm_reloc.rst
@@ -240,3 +240,307 @@ The .BTF/.BTF.ext sections has R_BPF_64_NODYLD32 relocations::
Offset Info Type Symbol's Value Symbol's Name
000000000000002c 0000000200000004 R_BPF_64_NODYLD32 0000000000000000 .text
0000000000000040 0000000200000004 R_BPF_64_NODYLD32 0000000000000000 .text
+
+.. _btf-co-re-relocations:
+
+=================
+CO-RE Relocations
+=================
+
+From object file point of view CO-RE mechanism is implemented as a set
+of CO-RE specific relocation records. These relocation records are not
+related to ELF relocations and are encoded in .BTF.ext section.
+See :ref:`Documentation/bpf/btf.rst <BTF_Ext_Section>` for more
+information on .BTF.ext structure.
+
+CO-RE relocations are applied to BPF instructions to update immediate
+or offset fields of the instruction at load time with information
+relevant for target kernel.
+
+Field to patch is selected basing on the instruction class:
+
+* For BPF_ALU, BPF_ALU64, BPF_LD `immediate` field is patched;
+* For BPF_LDX, BPF_STX, BPF_ST `offset` field is patched;
+* BPF_JMP, BPF_JMP32 instructions **should not** be patched.
+
+Relocation kinds
+================
+
+There are several kinds of CO-RE relocations that could be split in
+three groups:
+
+* Field-based - patch instruction with field related information, e.g.
+ change offset field of the BPF_LDX instruction to reflect offset
+ of a specific structure field in the target kernel.
+
+* Type-based - patch instruction with type related information, e.g.
+ change immediate field of the BPF_ALU move instruction to 0 or 1 to
+ reflect if specific type is present in the target kernel.
+
+* Enum-based - patch instruction with enum related information, e.g.
+ change immediate field of the BPF_LD_IMM64 instruction to reflect
+ value of a specific enum literal in the target kernel.
+
+The complete list of relocation kinds is represented by the following enum:
+
+.. code-block:: c
+
+ enum bpf_core_relo_kind {
+ BPF_CORE_FIELD_BYTE_OFFSET = 0, /* field byte offset */
+ BPF_CORE_FIELD_BYTE_SIZE = 1, /* field size in bytes */
+ BPF_CORE_FIELD_EXISTS = 2, /* field existence in target kernel */
+ BPF_CORE_FIELD_SIGNED = 3, /* field signedness (0 - unsigned, 1 - signed) */
+ BPF_CORE_FIELD_LSHIFT_U64 = 4, /* bitfield-specific left bitshift */
+ BPF_CORE_FIELD_RSHIFT_U64 = 5, /* bitfield-specific right bitshift */
+ BPF_CORE_TYPE_ID_LOCAL = 6, /* type ID in local BPF object */
+ BPF_CORE_TYPE_ID_TARGET = 7, /* type ID in target kernel */
+ BPF_CORE_TYPE_EXISTS = 8, /* type existence in target kernel */
+ BPF_CORE_TYPE_SIZE = 9, /* type size in bytes */
+ BPF_CORE_ENUMVAL_EXISTS = 10, /* enum value existence in target kernel */
+ BPF_CORE_ENUMVAL_VALUE = 11, /* enum value integer value */
+ BPF_CORE_TYPE_MATCHES = 12, /* type match in target kernel */
+ };
+
+Notes:
+
+* ``BPF_CORE_FIELD_LSHIFT_U64`` and ``BPF_CORE_FIELD_RSHIFT_U64`` are
+ supposed to be used to read bitfield values using the following
+ algorithm:
+
+ .. code-block:: c
+
+ // To read bitfield ``f`` from ``struct s``
+ is_signed = relo(s->f, BPF_CORE_FIELD_SIGNED)
+ off = relo(s->f, BPF_CORE_FIELD_BYTE_OFFSET)
+ sz = relo(s->f, BPF_CORE_FIELD_BYTE_SIZE)
+ l = relo(s->f, BPF_CORE_FIELD_LSHIFT_U64)
+ r = relo(s->f, BPF_CORE_FIELD_RSHIFT_U64)
+ // define ``v`` as signed or unsigned integer of size ``sz``
+ v = *({s|u}<sz> *)((void *)s + off)
+ v <<= l
+ v >>= r
+
+* The ``BPF_CORE_TYPE_MATCHES`` queries matching relation, defined as
+ follows:
+
+ * for integers: types match if size and signedness match;
+ * for arrays & pointers: target types are recursively matched;
+ * for structs & unions:
+
+ * local members need to exist in target with the same name;
+
+ * for each member we recursively check match unless it is already behind a
+ pointer, in which case we only check matching names and compatible kind;
+
+ * for enums:
+
+ * local variants have to have a match in target by symbolic name (but not
+ numeric value);
+
+ * size has to match (but enum may match enum64 and vice versa);
+
+ * for function pointers:
+
+ * number and position of arguments in local type has to match target;
+ * for each argument and the return value we recursively check match.
+
+CO-RE Relocation Record
+=======================
+
+Relocation record is encoded as the following structure:
+
+.. code-block:: c
+
+ struct bpf_core_relo {
+ __u32 insn_off;
+ __u32 type_id;
+ __u32 access_str_off;
+ enum bpf_core_relo_kind kind;
+ };
+
+* ``insn_off`` - instruction offset (in bytes) within a code section
+ associated with this relocation;
+
+* ``type_id`` - BTF type ID of the "root" (containing) entity of a
+ relocatable type or field;
+
+* ``access_str_off`` - offset into corresponding .BTF string section.
+ String interpretation depends on specific relocation kind:
+
+ * for field-based relocations, string encodes an accessed field using
+ a sequence of field and array indices, separated by colon (:). It's
+ conceptually very close to LLVM's `getelementptr <GEP_>`_ instruction's
+ arguments for identifying offset to a field. For example, consider the
+ following C code:
+
+ .. code-block:: c
+
+ struct sample {
+ int a;
+ int b;
+ struct { int c[10]; };
+ } __attribute__((preserve_access_index));
+ struct sample *s;
+
+ * Access to ``s[0].a`` would be encoded as ``0:0``:
+
+ * ``0``: first element of ``s`` (as if ``s`` is an array);
+ * ``0``: index of field ``a`` in ``struct sample``.
+
+ * Access to ``s->a`` would be encoded as ``0:0`` as well.
+ * Access to ``s->b`` would be encoded as ``0:1``:
+
+ * ``0``: first element of ``s``;
+ * ``1``: index of field ``b`` in ``struct sample``.
+
+ * Access to ``s[1].c[5]`` would be encoded as ``1:2:0:5``:
+
+ * ``1``: second element of ``s``;
+ * ``2``: index of anonymous structure field in ``struct sample``;
+ * ``0``: index of field ``c`` in anonymous structure;
+ * ``5``: access to array element #5.
+
+ * for type-based relocations, string is expected to be just "0";
+
+ * for enum value-based relocations, string contains an index of enum
+ value within its enum type;
+
+* ``kind`` - one of ``enum bpf_core_relo_kind``.
+
+.. _GEP: https://llvm.org/docs/LangRef.html#getelementptr-instruction
+
+.. _btf_co_re_relocation_examples:
+
+CO-RE Relocation Examples
+=========================
+
+For the following C code:
+
+.. code-block:: c
+
+ struct foo {
+ int a;
+ int b;
+ unsigned c:15;
+ } __attribute__((preserve_access_index));
+
+ enum bar { U, V };
+
+With the following BTF definitions:
+
+.. code-block::
+
+ ...
+ [2] STRUCT 'foo' size=8 vlen=2
+ 'a' type_id=3 bits_offset=0
+ 'b' type_id=3 bits_offset=32
+ 'c' type_id=4 bits_offset=64 bitfield_size=15
+ [3] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED
+ [4] INT 'unsigned int' size=4 bits_offset=0 nr_bits=32 encoding=(none)
+ ...
+ [16] ENUM 'bar' encoding=UNSIGNED size=4 vlen=2
+ 'U' val=0
+ 'V' val=1
+
+Field offset relocations are generated automatically when
+``__attribute__((preserve_access_index))`` is used, for example:
+
+.. code-block:: c
+
+ void alpha(struct foo *s, volatile unsigned long *g) {
+ *g = s->a;
+ s->a = 1;
+ }
+
+ 00 <alpha>:
+ 0: r3 = *(s32 *)(r1 + 0x0)
+ 00: CO-RE <byte_off> [2] struct foo::a (0:0)
+ 1: *(u64 *)(r2 + 0x0) = r3
+ 2: *(u32 *)(r1 + 0x0) = 0x1
+ 10: CO-RE <byte_off> [2] struct foo::a (0:0)
+ 3: exit
+
+
+All relocation kinds could be requested via built-in functions.
+E.g. field-based relocations:
+
+.. code-block:: c
+
+ void bravo(struct foo *s, volatile unsigned long *g) {
+ *g = __builtin_preserve_field_info(s->b, 0 /* field byte offset */);
+ *g = __builtin_preserve_field_info(s->b, 1 /* field byte size */);
+ *g = __builtin_preserve_field_info(s->b, 2 /* field existence */);
+ *g = __builtin_preserve_field_info(s->b, 3 /* field signedness */);
+ *g = __builtin_preserve_field_info(s->c, 4 /* bitfield left shift */);
+ *g = __builtin_preserve_field_info(s->c, 5 /* bitfield right shift */);
+ }
+
+ 20 <bravo>:
+ 4: r1 = 0x4
+ 20: CO-RE <byte_off> [2] struct foo::b (0:1)
+ 5: *(u64 *)(r2 + 0x0) = r1
+ 6: r1 = 0x4
+ 30: CO-RE <byte_sz> [2] struct foo::b (0:1)
+ 7: *(u64 *)(r2 + 0x0) = r1
+ 8: r1 = 0x1
+ 40: CO-RE <field_exists> [2] struct foo::b (0:1)
+ 9: *(u64 *)(r2 + 0x0) = r1
+ 10: r1 = 0x1
+ 50: CO-RE <signed> [2] struct foo::b (0:1)
+ 11: *(u64 *)(r2 + 0x0) = r1
+ 12: r1 = 0x31
+ 60: CO-RE <lshift_u64> [2] struct foo::c (0:2)
+ 13: *(u64 *)(r2 + 0x0) = r1
+ 14: r1 = 0x31
+ 70: CO-RE <rshift_u64> [2] struct foo::c (0:2)
+ 15: *(u64 *)(r2 + 0x0) = r1
+ 16: exit
+
+
+Type-based relocations:
+
+.. code-block:: c
+
+ void charlie(struct foo *s, volatile unsigned long *g) {
+ *g = __builtin_preserve_type_info(*s, 0 /* type existence */);
+ *g = __builtin_preserve_type_info(*s, 1 /* type size */);
+ *g = __builtin_preserve_type_info(*s, 2 /* type matches */);
+ *g = __builtin_btf_type_id(*s, 0 /* type id in this object file */);
+ *g = __builtin_btf_type_id(*s, 1 /* type id in target kernel */);
+ }
+
+ 88 <charlie>:
+ 17: r1 = 0x1
+ 88: CO-RE <type_exists> [2] struct foo
+ 18: *(u64 *)(r2 + 0x0) = r1
+ 19: r1 = 0xc
+ 98: CO-RE <type_size> [2] struct foo
+ 20: *(u64 *)(r2 + 0x0) = r1
+ 21: r1 = 0x1
+ a8: CO-RE <type_matches> [2] struct foo
+ 22: *(u64 *)(r2 + 0x0) = r1
+ 23: r1 = 0x2 ll
+ b8: CO-RE <local_type_id> [2] struct foo
+ 25: *(u64 *)(r2 + 0x0) = r1
+ 26: r1 = 0x2 ll
+ d0: CO-RE <target_type_id> [2] struct foo
+ 28: *(u64 *)(r2 + 0x0) = r1
+ 29: exit
+
+Enum-based relocations:
+
+.. code-block:: c
+
+ void delta(struct foo *s, volatile unsigned long *g) {
+ *g = __builtin_preserve_enum_value(*(enum bar *)U, 0 /* enum literal existence */);
+ *g = __builtin_preserve_enum_value(*(enum bar *)V, 1 /* enum literal value */);
+ }
+
+ f0 <delta>:
+ 30: r1 = 0x1 ll
+ f0: CO-RE <enumval_exists> [16] enum bar::U = 0
+ 32: *(u64 *)(r2 + 0x0) = r1
+ 33: r1 = 0x1 ll
+ 108: CO-RE <enumval_value> [16] enum bar::V = 1
+ 35: *(u64 *)(r2 + 0x0) = r1
+ 36: exit
diff --git a/Documentation/bpf/standardization/abi.rst b/Documentation/bpf/standardization/abi.rst
new file mode 100644
index 000000000000..0c2e10eeb89a
--- /dev/null
+++ b/Documentation/bpf/standardization/abi.rst
@@ -0,0 +1,25 @@
+.. contents::
+.. sectnum::
+
+===================================================
+BPF ABI Recommended Conventions and Guidelines v1.0
+===================================================
+
+This is version 1.0 of an informational document containing recommended
+conventions and guidelines for producing portable BPF program binaries.
+
+Registers and calling convention
+================================
+
+BPF has 10 general purpose registers and a read-only frame pointer register,
+all of which are 64-bits wide.
+
+The BPF calling convention is defined as:
+
+* R0: return value from function calls, and exit value for BPF programs
+* R1 - R5: arguments for function calls
+* R6 - R9: callee saved registers that function calls will preserve
+* R10: read-only frame pointer to access stack
+
+R0 - R5 are scratch registers and BPF programs needs to spill/fill them if
+necessary across calls.
diff --git a/Documentation/bpf/standardization/index.rst b/Documentation/bpf/standardization/index.rst
index 09c6ba055fd7..a50c3baf6345 100644
--- a/Documentation/bpf/standardization/index.rst
+++ b/Documentation/bpf/standardization/index.rst
@@ -12,7 +12,7 @@ for the working group charter, documents, and more.
:maxdepth: 1
instruction-set
- linux-notes
+ abi
.. Links:
.. _IETF BPF Working Group: https://datatracker.ietf.org/wg/bpf/about/
diff --git a/Documentation/bpf/standardization/instruction-set.rst b/Documentation/bpf/standardization/instruction-set.rst
index 4f73e9dc8d9e..c5d53a6e8c79 100644
--- a/Documentation/bpf/standardization/instruction-set.rst
+++ b/Documentation/bpf/standardization/instruction-set.rst
@@ -1,11 +1,11 @@
.. contents::
.. sectnum::
-========================================
-eBPF Instruction Set Specification, v1.0
-========================================
+=======================================
+BPF Instruction Set Specification, v1.0
+=======================================
-This document specifies version 1.0 of the eBPF instruction set.
+This document specifies version 1.0 of the BPF instruction set.
Documentation conventions
=========================
@@ -97,26 +97,10 @@ Definitions
A: 10000110
B: 11111111 10000110
-Registers and calling convention
-================================
-
-eBPF has 10 general purpose registers and a read-only frame pointer register,
-all of which are 64-bits wide.
-
-The eBPF calling convention is defined as:
-
-* R0: return value from function calls, and exit value for eBPF programs
-* R1 - R5: arguments for function calls
-* R6 - R9: callee saved registers that function calls will preserve
-* R10: read-only frame pointer to access stack
-
-R0 - R5 are scratch registers and eBPF programs needs to spill/fill them if
-necessary across calls.
-
Instruction encoding
====================
-eBPF has two instruction encodings:
+BPF has two instruction encodings:
* the basic instruction encoding, which uses 64 bits to encode an instruction
* the wide instruction encoding, which appends a second 64-bit immediate (i.e.,
@@ -260,7 +244,7 @@ BPF_END 0xd0 0 byte swap operations (see `Byte swap instructions`_ b
========= ===== ======= ==========================================================
Underflow and overflow are allowed during arithmetic operations, meaning
-the 64-bit or 32-bit value will wrap. If eBPF program execution would
+the 64-bit or 32-bit value will wrap. If BPF program execution would
result in division by zero, the destination register is instead set to zero.
If execution would result in modulo by zero, for ``BPF_ALU64`` the value of
the destination register is unchanged whereas for ``BPF_ALU`` the upper
@@ -373,7 +357,7 @@ BPF_JNE 0x5 any PC += offset if dst != src
BPF_JSGT 0x6 any PC += offset if dst > src signed
BPF_JSGE 0x7 any PC += offset if dst >= src signed
BPF_CALL 0x8 0x0 call helper function by address see `Helper functions`_
-BPF_CALL 0x8 0x1 call PC += offset see `Program-local functions`_
+BPF_CALL 0x8 0x1 call PC += imm see `Program-local functions`_
BPF_CALL 0x8 0x2 call helper function by BTF ID see `Helper functions`_
BPF_EXIT 0x9 0x0 return BPF_JMP only
BPF_JLT 0xa any PC += offset if dst < src unsigned
@@ -382,7 +366,7 @@ BPF_JSLT 0xc any PC += offset if dst < src signed
BPF_JSLE 0xd any PC += offset if dst <= src signed
======== ===== === =========================================== =========================================
-The eBPF program needs to store the return value into register R0 before doing a
+The BPF program needs to store the return value into register R0 before doing a
``BPF_EXIT``.
Example:
@@ -424,8 +408,8 @@ Program-local functions
~~~~~~~~~~~~~~~~~~~~~~~
Program-local functions are functions exposed by the same BPF program as the
caller, and are referenced by offset from the call instruction, similar to
-``BPF_JA``. A ``BPF_EXIT`` within the program-local function will return to
-the caller.
+``BPF_JA``. The offset is encoded in the imm field of the call instruction.
+A ``BPF_EXIT`` within the program-local function will return to the caller.
Load and store instructions
===========================
@@ -502,9 +486,9 @@ Atomic operations
Atomic operations are operations that operate on memory and can not be
interrupted or corrupted by other access to the same memory region
-by other eBPF programs or means outside of this specification.
+by other BPF programs or means outside of this specification.
-All atomic operations supported by eBPF are encoded as store operations
+All atomic operations supported by BPF are encoded as store operations
that use the ``BPF_ATOMIC`` mode modifier as follows:
* ``BPF_ATOMIC | BPF_W | BPF_STX`` for 32-bit operations
@@ -594,7 +578,7 @@ where
Maps
~~~~
-Maps are shared memory regions accessible by eBPF programs on some platforms.
+Maps are shared memory regions accessible by BPF programs on some platforms.
A map can have various semantics as defined in a separate document, and may or
may not have a single contiguous memory region, but the 'map_val(map)' is
currently only defined for maps that do have a single contiguous memory region.
@@ -616,6 +600,6 @@ identified by the given id.
Legacy BPF Packet access instructions
-------------------------------------
-eBPF previously introduced special instructions for access to packet data that were
+BPF previously introduced special instructions for access to packet data that were
carried over from classic BPF. However, these instructions are
deprecated and should no longer be used.
diff --git a/Documentation/core-api/printk-formats.rst b/Documentation/core-api/printk-formats.rst
index dfe7e75a71de..4451ef501936 100644
--- a/Documentation/core-api/printk-formats.rst
+++ b/Documentation/core-api/printk-formats.rst
@@ -15,9 +15,10 @@ Integer types
If variable is of Type, use printk format specifier:
------------------------------------------------------------
- char %d or %x
+ signed char %d or %hhx
unsigned char %u or %x
- short int %d or %x
+ char %u or %x
+ short int %d or %hx
unsigned short int %u or %x
int %d or %x
unsigned int %u or %x
@@ -27,9 +28,9 @@ Integer types
unsigned long long %llu or %llx
size_t %zu or %zx
ssize_t %zd or %zx
- s8 %d or %x
+ s8 %d or %hhx
u8 %u or %x
- s16 %d or %x
+ s16 %d or %hx
u16 %u or %x
s32 %d or %x
u32 %u or %x
diff --git a/Documentation/core-api/workqueue.rst b/Documentation/core-api/workqueue.rst
index a4c9b9d1905f..5d7b01aed1fe 100644
--- a/Documentation/core-api/workqueue.rst
+++ b/Documentation/core-api/workqueue.rst
@@ -1,6 +1,6 @@
-====================================
-Concurrency Managed Workqueue (cmwq)
-====================================
+=========
+Workqueue
+=========
:Date: September, 2010
:Author: Tejun Heo <tj@kernel.org>
@@ -25,8 +25,8 @@ there is no work item left on the workqueue the worker becomes idle.
When a new work item gets queued, the worker begins executing again.
-Why cmwq?
-=========
+Why Concurrency Managed Workqueue?
+==================================
In the original wq implementation, a multi threaded (MT) wq had one
worker thread per CPU and a single threaded (ST) wq had one worker
@@ -220,17 +220,16 @@ resources, scheduled and executed.
``max_active``
--------------
-``@max_active`` determines the maximum number of execution contexts
-per CPU which can be assigned to the work items of a wq. For example,
-with ``@max_active`` of 16, at most 16 work items of the wq can be
-executing at the same time per CPU.
+``@max_active`` determines the maximum number of execution contexts per
+CPU which can be assigned to the work items of a wq. For example, with
+``@max_active`` of 16, at most 16 work items of the wq can be executing
+at the same time per CPU. This is always a per-CPU attribute, even for
+unbound workqueues.
-Currently, for a bound wq, the maximum limit for ``@max_active`` is
-512 and the default value used when 0 is specified is 256. For an
-unbound wq, the limit is higher of 512 and 4 *
-``num_possible_cpus()``. These values are chosen sufficiently high
-such that they are not the limiting factor while providing protection
-in runaway cases.
+The maximum limit for ``@max_active`` is 512 and the default value used
+when 0 is specified is 256. These values are chosen sufficiently high
+such that they are not the limiting factor while providing protection in
+runaway cases.
The number of active work items of a wq is usually regulated by the
users of the wq, more specifically, by how many work items the users
@@ -348,27 +347,346 @@ Guidelines
level of locality in wq operations and work item execution.
+Affinity Scopes
+===============
+
+An unbound workqueue groups CPUs according to its affinity scope to improve
+cache locality. For example, if a workqueue is using the default affinity
+scope of "cache", it will group CPUs according to last level cache
+boundaries. A work item queued on the workqueue will be assigned to a worker
+on one of the CPUs which share the last level cache with the issuing CPU.
+Once started, the worker may or may not be allowed to move outside the scope
+depending on the ``affinity_strict`` setting of the scope.
+
+Workqueue currently supports the following affinity scopes.
+
+``default``
+ Use the scope in module parameter ``workqueue.default_affinity_scope``
+ which is always set to one of the scopes below.
+
+``cpu``
+ CPUs are not grouped. A work item issued on one CPU is processed by a
+ worker on the same CPU. This makes unbound workqueues behave as per-cpu
+ workqueues without concurrency management.
+
+``smt``
+ CPUs are grouped according to SMT boundaries. This usually means that the
+ logical threads of each physical CPU core are grouped together.
+
+``cache``
+ CPUs are grouped according to cache boundaries. Which specific cache
+ boundary is used is determined by the arch code. L3 is used in a lot of
+ cases. This is the default affinity scope.
+
+``numa``
+ CPUs are grouped according to NUMA bounaries.
+
+``system``
+ All CPUs are put in the same group. Workqueue makes no effort to process a
+ work item on a CPU close to the issuing CPU.
+
+The default affinity scope can be changed with the module parameter
+``workqueue.default_affinity_scope`` and a specific workqueue's affinity
+scope can be changed using ``apply_workqueue_attrs()``.
+
+If ``WQ_SYSFS`` is set, the workqueue will have the following affinity scope
+related interface files under its ``/sys/devices/virtual/WQ_NAME/``
+directory.
+
+``affinity_scope``
+ Read to see the current affinity scope. Write to change.
+
+ When default is the current scope, reading this file will also show the
+ current effective scope in parentheses, for example, ``default (cache)``.
+
+``affinity_strict``
+ 0 by default indicating that affinity scopes are not strict. When a work
+ item starts execution, workqueue makes a best-effort attempt to ensure
+ that the worker is inside its affinity scope, which is called
+ repatriation. Once started, the scheduler is free to move the worker
+ anywhere in the system as it sees fit. This enables benefiting from scope
+ locality while still being able to utilize other CPUs if necessary and
+ available.
+
+ If set to 1, all workers of the scope are guaranteed always to be in the
+ scope. This may be useful when crossing affinity scopes has other
+ implications, for example, in terms of power consumption or workload
+ isolation. Strict NUMA scope can also be used to match the workqueue
+ behavior of older kernels.
+
+
+Affinity Scopes and Performance
+===============================
+
+It'd be ideal if an unbound workqueue's behavior is optimal for vast
+majority of use cases without further tuning. Unfortunately, in the current
+kernel, there exists a pronounced trade-off between locality and utilization
+necessitating explicit configurations when workqueues are heavily used.
+
+Higher locality leads to higher efficiency where more work is performed for
+the same number of consumed CPU cycles. However, higher locality may also
+cause lower overall system utilization if the work items are not spread
+enough across the affinity scopes by the issuers. The following performance
+testing with dm-crypt clearly illustrates this trade-off.
+
+The tests are run on a CPU with 12-cores/24-threads split across four L3
+caches (AMD Ryzen 9 3900x). CPU clock boost is turned off for consistency.
+``/dev/dm-0`` is a dm-crypt device created on NVME SSD (Samsung 990 PRO) and
+opened with ``cryptsetup`` with default settings.
+
+
+Scenario 1: Enough issuers and work spread across the machine
+-------------------------------------------------------------
+
+The command used: ::
+
+ $ fio --filename=/dev/dm-0 --direct=1 --rw=randrw --bs=32k --ioengine=libaio \
+ --iodepth=64 --runtime=60 --numjobs=24 --time_based --group_reporting \
+ --name=iops-test-job --verify=sha512
+
+There are 24 issuers, each issuing 64 IOs concurrently. ``--verify=sha512``
+makes ``fio`` generate and read back the content each time which makes
+execution locality matter between the issuer and ``kcryptd``. The followings
+are the read bandwidths and CPU utilizations depending on different affinity
+scope settings on ``kcryptd`` measured over five runs. Bandwidths are in
+MiBps, and CPU util in percents.
+
+.. list-table::
+ :widths: 16 20 20
+ :header-rows: 1
+
+ * - Affinity
+ - Bandwidth (MiBps)
+ - CPU util (%)
+
+ * - system
+ - 1159.40 ±1.34
+ - 99.31 ±0.02
+
+ * - cache
+ - 1166.40 ±0.89
+ - 99.34 ±0.01
+
+ * - cache (strict)
+ - 1166.00 ±0.71
+ - 99.35 ±0.01
+
+With enough issuers spread across the system, there is no downside to
+"cache", strict or otherwise. All three configurations saturate the whole
+machine but the cache-affine ones outperform by 0.6% thanks to improved
+locality.
+
+
+Scenario 2: Fewer issuers, enough work for saturation
+-----------------------------------------------------
+
+The command used: ::
+
+ $ fio --filename=/dev/dm-0 --direct=1 --rw=randrw --bs=32k \
+ --ioengine=libaio --iodepth=64 --runtime=60 --numjobs=8 \
+ --time_based --group_reporting --name=iops-test-job --verify=sha512
+
+The only difference from the previous scenario is ``--numjobs=8``. There are
+a third of the issuers but is still enough total work to saturate the
+system.
+
+.. list-table::
+ :widths: 16 20 20
+ :header-rows: 1
+
+ * - Affinity
+ - Bandwidth (MiBps)
+ - CPU util (%)
+
+ * - system
+ - 1155.40 ±0.89
+ - 97.41 ±0.05
+
+ * - cache
+ - 1154.40 ±1.14
+ - 96.15 ±0.09
+
+ * - cache (strict)
+ - 1112.00 ±4.64
+ - 93.26 ±0.35
+
+This is more than enough work to saturate the system. Both "system" and
+"cache" are nearly saturating the machine but not fully. "cache" is using
+less CPU but the better efficiency puts it at the same bandwidth as
+"system".
+
+Eight issuers moving around over four L3 cache scope still allow "cache
+(strict)" to mostly saturate the machine but the loss of work conservation
+is now starting to hurt with 3.7% bandwidth loss.
+
+
+Scenario 3: Even fewer issuers, not enough work to saturate
+-----------------------------------------------------------
+
+The command used: ::
+
+ $ fio --filename=/dev/dm-0 --direct=1 --rw=randrw --bs=32k \
+ --ioengine=libaio --iodepth=64 --runtime=60 --numjobs=4 \
+ --time_based --group_reporting --name=iops-test-job --verify=sha512
+
+Again, the only difference is ``--numjobs=4``. With the number of issuers
+reduced to four, there now isn't enough work to saturate the whole system
+and the bandwidth becomes dependent on completion latencies.
+
+.. list-table::
+ :widths: 16 20 20
+ :header-rows: 1
+
+ * - Affinity
+ - Bandwidth (MiBps)
+ - CPU util (%)
+
+ * - system
+ - 993.60 ±1.82
+ - 75.49 ±0.06
+
+ * - cache
+ - 973.40 ±1.52
+ - 74.90 ±0.07
+
+ * - cache (strict)
+ - 828.20 ±4.49
+ - 66.84 ±0.29
+
+Now, the tradeoff between locality and utilization is clearer. "cache" shows
+2% bandwidth loss compared to "system" and "cache (struct)" whopping 20%.
+
+
+Conclusion and Recommendations
+------------------------------
+
+In the above experiments, the efficiency advantage of the "cache" affinity
+scope over "system" is, while consistent and noticeable, small. However, the
+impact is dependent on the distances between the scopes and may be more
+pronounced in processors with more complex topologies.
+
+While the loss of work-conservation in certain scenarios hurts, it is a lot
+better than "cache (strict)" and maximizing workqueue utilization is
+unlikely to be the common case anyway. As such, "cache" is the default
+affinity scope for unbound pools.
+
+* As there is no one option which is great for most cases, workqueue usages
+ that may consume a significant amount of CPU are recommended to configure
+ the workqueues using ``apply_workqueue_attrs()`` and/or enable
+ ``WQ_SYSFS``.
+
+* An unbound workqueue with strict "cpu" affinity scope behaves the same as
+ ``WQ_CPU_INTENSIVE`` per-cpu workqueue. There is no real advanage to the
+ latter and an unbound workqueue provides a lot more flexibility.
+
+* Affinity scopes are introduced in Linux v6.5. To emulate the previous
+ behavior, use strict "numa" affinity scope.
+
+* The loss of work-conservation in non-strict affinity scopes is likely
+ originating from the scheduler. There is no theoretical reason why the
+ kernel wouldn't be able to do the right thing and maintain
+ work-conservation in most cases. As such, it is possible that future
+ scheduler improvements may make most of these tunables unnecessary.
+
+
+Examining Configuration
+=======================
+
+Use tools/workqueue/wq_dump.py to examine unbound CPU affinity
+configuration, worker pools and how workqueues map to the pools: ::
+
+ $ tools/workqueue/wq_dump.py
+ Affinity Scopes
+ ===============
+ wq_unbound_cpumask=0000000f
+
+ CPU
+ nr_pods 4
+ pod_cpus [0]=00000001 [1]=00000002 [2]=00000004 [3]=00000008
+ pod_node [0]=0 [1]=0 [2]=1 [3]=1
+ cpu_pod [0]=0 [1]=1 [2]=2 [3]=3
+
+ SMT
+ nr_pods 4
+ pod_cpus [0]=00000001 [1]=00000002 [2]=00000004 [3]=00000008
+ pod_node [0]=0 [1]=0 [2]=1 [3]=1
+ cpu_pod [0]=0 [1]=1 [2]=2 [3]=3
+
+ CACHE (default)
+ nr_pods 2
+ pod_cpus [0]=00000003 [1]=0000000c
+ pod_node [0]=0 [1]=1
+ cpu_pod [0]=0 [1]=0 [2]=1 [3]=1
+
+ NUMA
+ nr_pods 2
+ pod_cpus [0]=00000003 [1]=0000000c
+ pod_node [0]=0 [1]=1
+ cpu_pod [0]=0 [1]=0 [2]=1 [3]=1
+
+ SYSTEM
+ nr_pods 1
+ pod_cpus [0]=0000000f
+ pod_node [0]=-1
+ cpu_pod [0]=0 [1]=0 [2]=0 [3]=0
+
+ Worker Pools
+ ============
+ pool[00] ref= 1 nice= 0 idle/workers= 4/ 4 cpu= 0
+ pool[01] ref= 1 nice=-20 idle/workers= 2/ 2 cpu= 0
+ pool[02] ref= 1 nice= 0 idle/workers= 4/ 4 cpu= 1
+ pool[03] ref= 1 nice=-20 idle/workers= 2/ 2 cpu= 1
+ pool[04] ref= 1 nice= 0 idle/workers= 4/ 4 cpu= 2
+ pool[05] ref= 1 nice=-20 idle/workers= 2/ 2 cpu= 2
+ pool[06] ref= 1 nice= 0 idle/workers= 3/ 3 cpu= 3
+ pool[07] ref= 1 nice=-20 idle/workers= 2/ 2 cpu= 3
+ pool[08] ref=42 nice= 0 idle/workers= 6/ 6 cpus=0000000f
+ pool[09] ref=28 nice= 0 idle/workers= 3/ 3 cpus=00000003
+ pool[10] ref=28 nice= 0 idle/workers= 17/ 17 cpus=0000000c
+ pool[11] ref= 1 nice=-20 idle/workers= 1/ 1 cpus=0000000f
+ pool[12] ref= 2 nice=-20 idle/workers= 1/ 1 cpus=00000003
+ pool[13] ref= 2 nice=-20 idle/workers= 1/ 1 cpus=0000000c
+
+ Workqueue CPU -> pool
+ =====================
+ [ workqueue \ CPU 0 1 2 3 dfl]
+ events percpu 0 2 4 6
+ events_highpri percpu 1 3 5 7
+ events_long percpu 0 2 4 6
+ events_unbound unbound 9 9 10 10 8
+ events_freezable percpu 0 2 4 6
+ events_power_efficient percpu 0 2 4 6
+ events_freezable_power_ percpu 0 2 4 6
+ rcu_gp percpu 0 2 4 6
+ rcu_par_gp percpu 0 2 4 6
+ slub_flushwq percpu 0 2 4 6
+ netns ordered 8 8 8 8 8
+ ...
+
+See the command's help message for more info.
+
+
Monitoring
==========
Use tools/workqueue/wq_monitor.py to monitor workqueue operations: ::
$ tools/workqueue/wq_monitor.py events
- total infl CPUtime CPUhog CMwake mayday rescued
+ total infl CPUtime CPUhog CMW/RPR mayday rescued
events 18545 0 6.1 0 5 - -
events_highpri 8 0 0.0 0 0 - -
events_long 3 0 0.0 0 0 - -
- events_unbound 38306 0 0.1 - - - -
+ events_unbound 38306 0 0.1 - 7 - -
events_freezable 0 0 0.0 0 0 - -
events_power_efficient 29598 0 0.2 0 0 - -
events_freezable_power_ 10 0 0.0 0 0 - -
sock_diag_events 0 0 0.0 0 0 - -
- total infl CPUtime CPUhog CMwake mayday rescued
+ total infl CPUtime CPUhog CMW/RPR mayday rescued
events 18548 0 6.1 0 5 - -
events_highpri 8 0 0.0 0 0 - -
events_long 3 0 0.0 0 0 - -
- events_unbound 38322 0 0.1 - - - -
+ events_unbound 38322 0 0.1 - 7 - -
events_freezable 0 0 0.0 0 0 - -
events_power_efficient 29603 0 0.2 0 0 - -
events_freezable_power_ 10 0 0.0 0 0 - -
diff --git a/Documentation/dev-tools/kasan.rst b/Documentation/dev-tools/kasan.rst
index f4acf9c2e90f..382818a7197a 100644
--- a/Documentation/dev-tools/kasan.rst
+++ b/Documentation/dev-tools/kasan.rst
@@ -41,8 +41,8 @@ Support
Architectures
~~~~~~~~~~~~~
-Generic KASAN is supported on x86_64, arm, arm64, powerpc, riscv, s390, and
-xtensa, and the tag-based KASAN modes are supported only on arm64.
+Generic KASAN is supported on x86_64, arm, arm64, powerpc, riscv, s390, xtensa,
+and loongarch, and the tag-based KASAN modes are supported only on arm64.
Compilers
~~~~~~~~~
diff --git a/Documentation/devicetree/bindings/dma/atmel-xdma.txt b/Documentation/devicetree/bindings/dma/atmel-xdma.txt
index 510b7f25ba24..76d649b3a25d 100644
--- a/Documentation/devicetree/bindings/dma/atmel-xdma.txt
+++ b/Documentation/devicetree/bindings/dma/atmel-xdma.txt
@@ -3,7 +3,8 @@
* XDMA Controller
Required properties:
- compatible: Should be "atmel,sama5d4-dma", "microchip,sam9x60-dma" or
- "microchip,sama7g5-dma".
+ "microchip,sama7g5-dma" or
+ "microchip,sam9x7-dma", "atmel,sama5d4-dma".
- reg: Should contain DMA registers location and length.
- interrupts: Should contain DMA interrupt.
- #dma-cells: Must be <1>, used to represent the number of integer cells in
diff --git a/Documentation/devicetree/bindings/dma/brcm,bcm2835-dma.txt b/Documentation/devicetree/bindings/dma/brcm,bcm2835-dma.txt
deleted file mode 100644
index b6a8cc0978cd..000000000000
--- a/Documentation/devicetree/bindings/dma/brcm,bcm2835-dma.txt
+++ /dev/null
@@ -1,83 +0,0 @@
-* BCM2835 DMA controller
-
-The BCM2835 DMA controller has 16 channels in total.
-Only the lower 13 channels have an associated IRQ.
-Some arbitrary channels are used by the firmware
-(1,3,6,7 in the current firmware version).
-The channels 0,2 and 3 have special functionality
-and should not be used by the driver.
-
-Required properties:
-- compatible: Should be "brcm,bcm2835-dma".
-- reg: Should contain DMA registers location and length.
-- interrupts: Should contain the DMA interrupts associated
- to the DMA channels in ascending order.
-- interrupt-names: Should contain the names of the interrupt
- in the form "dmaXX".
- Use "dma-shared-all" for the common interrupt line
- that is shared by all dma channels.
-- #dma-cells: Must be <1>, the cell in the dmas property of the
- client device represents the DREQ number.
-- brcm,dma-channel-mask: Bit mask representing the channels
- not used by the firmware in ascending order,
- i.e. first channel corresponds to LSB.
-
-Example:
-
-dma: dma@7e007000 {
- compatible = "brcm,bcm2835-dma";
- reg = <0x7e007000 0xf00>;
- interrupts = <1 16>,
- <1 17>,
- <1 18>,
- <1 19>,
- <1 20>,
- <1 21>,
- <1 22>,
- <1 23>,
- <1 24>,
- <1 25>,
- <1 26>,
- /* dma channel 11-14 share one irq */
- <1 27>,
- <1 27>,
- <1 27>,
- <1 27>,
- /* unused shared irq for all channels */
- <1 28>;
- interrupt-names = "dma0",
- "dma1",
- "dma2",
- "dma3",
- "dma4",
- "dma5",
- "dma6",
- "dma7",
- "dma8",
- "dma9",
- "dma10",
- "dma11",
- "dma12",
- "dma13",
- "dma14",
- "dma-shared-all";
-
- #dma-cells = <1>;
- brcm,dma-channel-mask = <0x7f35>;
-};
-
-
-DMA clients connected to the BCM2835 DMA controller must use the format
-described in the dma.txt file, using a two-cell specifier for each channel.
-
-Example:
-
-bcm2835_i2s: i2s@7e203000 {
- compatible = "brcm,bcm2835-i2s";
- reg = < 0x7e203000 0x24>;
- clocks = <&clocks BCM2835_CLOCK_PCM>;
-
- dmas = <&dma 2>,
- <&dma 3>;
- dma-names = "tx", "rx";
-};
diff --git a/Documentation/devicetree/bindings/dma/brcm,bcm2835-dma.yaml b/Documentation/devicetree/bindings/dma/brcm,bcm2835-dma.yaml
new file mode 100644
index 000000000000..c9b9a5490826
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/brcm,bcm2835-dma.yaml
@@ -0,0 +1,102 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/brcm,bcm2835-dma.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: BCM2835 DMA controller
+
+maintainers:
+ - Nicolas Saenz Julienne <nsaenz@kernel.org>
+
+description:
+ The BCM2835 DMA controller has 16 channels in total. Only the lower
+ 13 channels have an associated IRQ. Some arbitrary channels are used by the
+ VideoCore firmware (1,3,6,7 in the current firmware version). The channels
+ 0, 2 and 3 have special functionality and should not be used by the driver.
+
+allOf:
+ - $ref: dma-controller.yaml#
+
+properties:
+ compatible:
+ const: brcm,bcm2835-dma
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ description:
+ Should contain the DMA interrupts associated to the DMA channels in
+ ascending order.
+ minItems: 1
+ maxItems: 16
+
+ interrupt-names:
+ minItems: 1
+ maxItems: 16
+
+ '#dma-cells':
+ description: The single cell represents the DREQ number.
+ const: 1
+
+ brcm,dma-channel-mask:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Bitmask of available DMA channels in ascending order that are
+ not reserved by firmware and are available to the
+ kernel. i.e. first channel corresponds to LSB.
+
+unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - "#dma-cells"
+ - brcm,dma-channel-mask
+
+examples:
+ - |
+ dma-controller@7e007000 {
+ compatible = "brcm,bcm2835-dma";
+ reg = <0x7e007000 0xf00>;
+ interrupts = <1 16>,
+ <1 17>,
+ <1 18>,
+ <1 19>,
+ <1 20>,
+ <1 21>,
+ <1 22>,
+ <1 23>,
+ <1 24>,
+ <1 25>,
+ <1 26>,
+ /* dma channel 11-14 share one irq */
+ <1 27>,
+ <1 27>,
+ <1 27>,
+ <1 27>,
+ /* unused shared irq for all channels */
+ <1 28>;
+ interrupt-names = "dma0",
+ "dma1",
+ "dma2",
+ "dma3",
+ "dma4",
+ "dma5",
+ "dma6",
+ "dma7",
+ "dma8",
+ "dma9",
+ "dma10",
+ "dma11",
+ "dma12",
+ "dma13",
+ "dma14",
+ "dma-shared-all";
+ #dma-cells = <1>;
+ brcm,dma-channel-mask = <0x7f35>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/dma/fsl,edma.yaml b/Documentation/devicetree/bindings/dma/fsl,edma.yaml
index 5fd8fc604261..437db0c62339 100644
--- a/Documentation/devicetree/bindings/dma/fsl,edma.yaml
+++ b/Documentation/devicetree/bindings/dma/fsl,edma.yaml
@@ -21,32 +21,41 @@ properties:
- enum:
- fsl,vf610-edma
- fsl,imx7ulp-edma
+ - fsl,imx8qm-adma
+ - fsl,imx8qm-edma
+ - fsl,imx93-edma3
+ - fsl,imx93-edma4
- items:
- const: fsl,ls1028a-edma
- const: fsl,vf610-edma
reg:
- minItems: 2
+ minItems: 1
maxItems: 3
interrupts:
- minItems: 2
- maxItems: 17
+ minItems: 1
+ maxItems: 64
interrupt-names:
- minItems: 2
- maxItems: 17
+ minItems: 1
+ maxItems: 64
"#dma-cells":
- const: 2
+ enum:
+ - 2
+ - 3
dma-channels:
- const: 32
+ minItems: 1
+ maxItems: 64
clocks:
+ minItems: 1
maxItems: 2
clock-names:
+ minItems: 1
maxItems: 2
big-endian:
@@ -69,21 +78,52 @@ allOf:
properties:
compatible:
contains:
+ enum:
+ - fsl,imx8qm-adma
+ - fsl,imx8qm-edma
+ - fsl,imx93-edma3
+ - fsl,imx93-edma4
+ then:
+ properties:
+ "#dma-cells":
+ const: 3
+ # It is not necessary to write the interrupt name for each channel.
+ # instead, you can simply maintain the sequential IRQ numbers as
+ # defined for the DMA channels.
+ interrupt-names: false
+ clock-names:
+ items:
+ - const: dma
+ clocks:
+ maxItems: 1
+
+ - if:
+ properties:
+ compatible:
+ contains:
const: fsl,vf610-edma
then:
properties:
+ clocks:
+ minItems: 2
clock-names:
items:
- const: dmamux0
- const: dmamux1
interrupts:
+ minItems: 2
maxItems: 2
interrupt-names:
items:
- const: edma-tx
- const: edma-err
reg:
+ minItems: 2
maxItems: 3
+ "#dma-cells":
+ const: 2
+ dma-channels:
+ const: 32
- if:
properties:
@@ -92,14 +132,22 @@ allOf:
const: fsl,imx7ulp-edma
then:
properties:
+ clock:
+ minItems: 2
clock-names:
items:
- const: dma
- const: dmamux0
interrupts:
+ minItems: 2
maxItems: 17
reg:
+ minItems: 2
maxItems: 2
+ "#dma-cells":
+ const: 2
+ dma-channels:
+ const: 32
unevaluatedProperties: false
@@ -153,3 +201,47 @@ examples:
clock-names = "dma", "dmamux0";
clocks = <&pcc2 IMX7ULP_CLK_DMA1>, <&pcc2 IMX7ULP_CLK_DMA_MUX1>;
};
+
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/clock/imx93-clock.h>
+
+ dma-controller@44000000 {
+ compatible = "fsl,imx93-edma3";
+ reg = <0x44000000 0x200000>;
+ #dma-cells = <3>;
+ dma-channels = <31>;
+ interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX93_CLK_EDMA1_GATE>;
+ clock-names = "dma";
+ };
diff --git a/Documentation/devicetree/bindings/dma/qcom,bam-dma.yaml b/Documentation/devicetree/bindings/dma/qcom,bam-dma.yaml
index 4f5510b6fa02..3ad0d9b1fbc5 100644
--- a/Documentation/devicetree/bindings/dma/qcom,bam-dma.yaml
+++ b/Documentation/devicetree/bindings/dma/qcom,bam-dma.yaml
@@ -15,13 +15,19 @@ allOf:
properties:
compatible:
- enum:
- # APQ8064, IPQ8064 and MSM8960
- - qcom,bam-v1.3.0
- # MSM8974, APQ8074 and APQ8084
- - qcom,bam-v1.4.0
- # MSM8916 and SDM845
- - qcom,bam-v1.7.0
+ oneOf:
+ - enum:
+ # APQ8064, IPQ8064 and MSM8960
+ - qcom,bam-v1.3.0
+ # MSM8974, APQ8074 and APQ8084
+ - qcom,bam-v1.4.0
+ # MSM8916, SDM630
+ - qcom,bam-v1.7.0
+ - items:
+ - enum:
+ # SDM845, SM6115, SM8150, SM8250 and QCM2290
+ - qcom,bam-v1.7.4
+ - const: qcom,bam-v1.7.0
clocks:
maxItems: 1
@@ -38,7 +44,7 @@ properties:
iommus:
minItems: 1
- maxItems: 4
+ maxItems: 6
num-channels:
$ref: /schemas/types.yaml#/definitions/uint32
@@ -81,6 +87,15 @@ required:
- qcom,ee
- reg
+anyOf:
+ - required:
+ - qcom,powered-remotely
+ - required:
+ - qcom,controlled-remotely
+ - required:
+ - clocks
+ - clock-names
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
index d1700a5c36bf..590d1948f202 100644
--- a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
+++ b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
@@ -49,6 +49,12 @@ Optional properties for AXI DMA and MCDMA:
register as configured in h/w. Takes values {8...26}. If the property
is missing or invalid then the default value 23 is used. This is the
maximum value that is supported by all IP versions.
+
+Optional properties for AXI DMA:
+- xlnx,axistream-connected: Tells whether DMA is connected to AXI stream IP.
+- xlnx,irq-delay: Tells the interrupt delay timeout value. Valid range is from
+ 0-255. Setting this value to zero disables the delay timer interrupt.
+ 1 timeout interval = 125 * clock period of SG clock.
Optional properties for VDMA:
- xlnx,flush-fsync: Tells which channel to Flush on Frame sync.
It takes following values:
diff --git a/Documentation/devicetree/bindings/extcon/maxim,max77843.yaml b/Documentation/devicetree/bindings/extcon/maxim,max77843.yaml
index 128960545640..55800fb0221d 100644
--- a/Documentation/devicetree/bindings/extcon/maxim,max77843.yaml
+++ b/Documentation/devicetree/bindings/extcon/maxim,max77843.yaml
@@ -23,6 +23,7 @@ properties:
connector:
$ref: /schemas/connector/usb-connector.yaml#
+ unevaluatedProperties: false
ports:
$ref: /schemas/graph.yaml#/properties/ports
diff --git a/Documentation/devicetree/bindings/extcon/siliconmitus,sm5502-muic.yaml b/Documentation/devicetree/bindings/extcon/siliconmitus,sm5502-muic.yaml
index 7a224b2f0977..7ef2d9bef72d 100644
--- a/Documentation/devicetree/bindings/extcon/siliconmitus,sm5502-muic.yaml
+++ b/Documentation/devicetree/bindings/extcon/siliconmitus,sm5502-muic.yaml
@@ -27,6 +27,10 @@ properties:
description: I2C slave address of the device. Usually 0x25 for SM5502
and SM5703, 0x14 for SM5504.
+ connector:
+ $ref: /schemas/connector/usb-connector.yaml#
+ unevaluatedProperties: false
+
interrupts:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/fsi/ibm,i2cr-fsi-master.yaml b/Documentation/devicetree/bindings/fsi/ibm,i2cr-fsi-master.yaml
new file mode 100644
index 000000000000..442cecdc57cb
--- /dev/null
+++ b/Documentation/devicetree/bindings/fsi/ibm,i2cr-fsi-master.yaml
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/fsi/ibm,i2cr-fsi-master.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: IBM I2C Responder virtual FSI master
+
+maintainers:
+ - Eddie James <eajames@linux.ibm.com>
+
+description: |
+ The I2C Responder (I2CR) is a an I2C device that's connected to an FSI CFAM
+ (see fsi.txt). The I2CR translates I2C bus operations to FSI CFAM reads and
+ writes or SCOM operations, thereby acting as an FSI master.
+
+properties:
+ compatible:
+ enum:
+ - ibm,i2cr-fsi-master
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2cr@20 {
+ compatible = "ibm,i2cr-fsi-master";
+ reg = <0x20>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/i2c/cdns,i2c-r1p10.yaml b/Documentation/devicetree/bindings/i2c/cdns,i2c-r1p10.yaml
index ff57c5416ebc..9f1d35ce1fe8 100644
--- a/Documentation/devicetree/bindings/i2c/cdns,i2c-r1p10.yaml
+++ b/Documentation/devicetree/bindings/i2c/cdns,i2c-r1p10.yaml
@@ -48,6 +48,9 @@ properties:
default: 16
enum: [2, 4, 8, 16, 32, 64, 128, 256]
+ power-domains:
+ maxItems: 1
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/i2c/i2c-arb-gpio-challenge.txt b/Documentation/devicetree/bindings/i2c/i2c-arb-gpio-challenge.txt
deleted file mode 100644
index 548a73cde796..000000000000
--- a/Documentation/devicetree/bindings/i2c/i2c-arb-gpio-challenge.txt
+++ /dev/null
@@ -1,82 +0,0 @@
-GPIO-based I2C Arbitration Using a Challenge & Response Mechanism
-=================================================================
-This uses GPIO lines and a challenge & response mechanism to arbitrate who is
-the master of an I2C bus in a multimaster situation.
-
-In many cases using GPIOs to arbitrate is not needed and a design can use
-the standard I2C multi-master rules. Using GPIOs is generally useful in
-the case where there is a device on the bus that has errata and/or bugs
-that makes standard multimaster mode not feasible.
-
-Note that this scheme works well enough but has some downsides:
-* It is nonstandard (not using standard I2C multimaster)
-* Having two masters on a bus in general makes it relatively hard to debug
- problems (hard to tell if i2c issues were caused by one master, another, or
- some device on the bus).
-
-
-Algorithm:
-
-All masters on the bus have a 'bus claim' line which is an output that the
-others can see. These are all active low with pull-ups enabled. We'll
-describe these lines as:
-
-- OUR_CLAIM: output from us signaling to other hosts that we want the bus
-- THEIR_CLAIMS: output from others signaling that they want the bus
-
-The basic algorithm is to assert your line when you want the bus, then make
-sure that the other side doesn't want it also. A detailed explanation is best
-done with an example.
-
-Let's say we want to claim the bus. We:
-1. Assert OUR_CLAIM.
-2. Waits a little bit for the other sides to notice (slew time, say 10
- microseconds).
-3. Check THEIR_CLAIMS. If none are asserted then the we have the bus and we are
- done.
-4. Otherwise, wait for a few milliseconds and see if THEIR_CLAIMS are released.
-5. If not, back off, release the claim and wait for a few more milliseconds.
-6. Go back to 1 (until retry time has expired).
-
-
-Required properties:
-- compatible: i2c-arb-gpio-challenge
-- our-claim-gpio: The GPIO that we use to claim the bus.
-- their-claim-gpios: The GPIOs that the other sides use to claim the bus.
- Note that some implementations may only support a single other master.
-- I2C arbitration bus node. See i2c-arb.txt in this directory.
-
-Optional properties:
-- slew-delay-us: microseconds to wait for a GPIO to go high. Default is 10 us.
-- wait-retry-us: we'll attempt another claim after this many microseconds.
- Default is 3000 us.
-- wait-free-us: we'll give up after this many microseconds. Default is 50000 us.
-
-
-Example:
- i2c@12ca0000 {
- compatible = "acme,some-i2c-device";
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
- i2c-arbitrator {
- compatible = "i2c-arb-gpio-challenge";
-
- i2c-parent = <&{/i2c@12CA0000}>;
-
- our-claim-gpio = <&gpf0 3 1>;
- their-claim-gpios = <&gpe0 4 1>;
- slew-delay-us = <10>;
- wait-retry-us = <3000>;
- wait-free-us = <50000>;
-
- i2c-arb {
- #address-cells = <1>;
- #size-cells = <0>;
-
- i2c@52 {
- // Normal I2C device
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/i2c/i2c-arb-gpio-challenge.yaml b/Documentation/devicetree/bindings/i2c/i2c-arb-gpio-challenge.yaml
new file mode 100644
index 000000000000..b618b5a3433a
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-arb-gpio-challenge.yaml
@@ -0,0 +1,135 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/i2c/i2c-arb-gpio-challenge.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: GPIO-based I2C Arbitration Using a Challenge & Response Mechanism
+
+maintainers:
+ - Doug Anderson <dianders@chromium.org>
+ - Peter Rosin <peda@axentia.se>
+
+description: |
+ This uses GPIO lines and a challenge & response mechanism to arbitrate who is
+ the master of an I2C bus in a multimaster situation.
+
+ In many cases using GPIOs to arbitrate is not needed and a design can use the
+ standard I2C multi-master rules. Using GPIOs is generally useful in the case
+ where there is a device on the bus that has errata and/or bugs that makes
+ standard multimaster mode not feasible.
+
+ Note that this scheme works well enough but has some downsides:
+ * It is nonstandard (not using standard I2C multimaster)
+ * Having two masters on a bus in general makes it relatively hard to debug
+ problems (hard to tell if i2c issues were caused by one master, another,
+ or some device on the bus).
+
+ Algorithm:
+ All masters on the bus have a 'bus claim' line which is an output that the
+ others can see. These are all active low with pull-ups enabled. We'll
+ describe these lines as:
+ * OUR_CLAIM: output from us signaling to other hosts that we want the bus
+ * THEIR_CLAIMS: output from others signaling that they want the bus
+
+ The basic algorithm is to assert your line when you want the bus, then make
+ sure that the other side doesn't want it also. A detailed explanation is
+ best done with an example.
+
+ Let's say we want to claim the bus. We:
+ 1. Assert OUR_CLAIM.
+ 2. Waits a little bit for the other sides to notice (slew time, say 10
+ microseconds).
+ 3. Check THEIR_CLAIMS. If none are asserted then the we have the bus and we
+ are done.
+ 4. Otherwise, wait for a few milliseconds and see if THEIR_CLAIMS are released.
+ 5. If not, back off, release the claim and wait for a few more milliseconds.
+ 6. Go back to 1 (until retry time has expired).
+
+properties:
+ compatible:
+ const: i2c-arb-gpio-challenge
+
+ i2c-parent:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ The I2C bus that this multiplexer's master-side port is connected to.
+
+ our-claim-gpios:
+ maxItems: 1
+ description:
+ The GPIO that we use to claim the bus.
+
+ slew-delay-us:
+ default: 10
+ description:
+ Time to wait for a GPIO to go high.
+
+ their-claim-gpios:
+ minItems: 1
+ maxItems: 8
+ description:
+ The GPIOs that the other sides use to claim the bus. Note that some
+ implementations may only support a single other master.
+
+ wait-free-us:
+ default: 50000
+ description:
+ We'll give up after this many microseconds.
+
+ wait-retry-us:
+ default: 3000
+ description:
+ We'll attempt another claim after this many microseconds.
+
+ i2c-arb:
+ type: object
+ $ref: /schemas/i2c/i2c-controller.yaml
+ unevaluatedProperties: false
+ description:
+ I2C arbitration bus node.
+
+required:
+ - compatible
+ - i2c-arb
+ - our-claim-gpios
+ - their-claim-gpios
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ i2c-arbitrator {
+ compatible = "i2c-arb-gpio-challenge";
+ i2c-parent = <&i2c_4>;
+
+ our-claim-gpios = <&gpf0 3 GPIO_ACTIVE_LOW>;
+ their-claim-gpios = <&gpe0 4 GPIO_ACTIVE_LOW>;
+ slew-delay-us = <10>;
+ wait-retry-us = <3000>;
+ wait-free-us = <50000>;
+
+ i2c-arb {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ sbs-battery@b {
+ compatible = "sbs,sbs-battery";
+ reg = <0xb>;
+ sbs,poll-retry-count = <1>;
+ };
+
+ embedded-controller@1e {
+ compatible = "google,cros-ec-i2c";
+ reg = <0x1e>;
+ interrupts = <6 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gpx1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&ec_irq>;
+ wakeup-source;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/i2c/i2c-arb.txt b/Documentation/devicetree/bindings/i2c/i2c-arb.txt
deleted file mode 100644
index 59abf9277bdc..000000000000
--- a/Documentation/devicetree/bindings/i2c/i2c-arb.txt
+++ /dev/null
@@ -1,35 +0,0 @@
-Common i2c arbitration bus properties.
-
-- i2c-arb child node
-
-Required properties for the i2c-arb child node:
-- #address-cells = <1>;
-- #size-cells = <0>;
-
-Optional properties for i2c-arb child node:
-- Child nodes conforming to i2c bus binding
-
-
-Example :
-
- /*
- An NXP pca9541 I2C bus master selector at address 0x74
- with a NXP pca8574 GPIO expander attached.
- */
-
- arb@74 {
- compatible = "nxp,pca9541";
- reg = <0x74>;
-
- i2c-arb {
- #address-cells = <1>;
- #size-cells = <0>;
-
- gpio@38 {
- compatible = "nxp,pca8574";
- reg = <0x38>;
- #gpio-cells = <2>;
- gpio-controller;
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/i2c/i2c-atr.yaml b/Documentation/devicetree/bindings/i2c/i2c-atr.yaml
new file mode 100644
index 000000000000..1939ab339bfc
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-atr.yaml
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/i2c/i2c-atr.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Common i2c address translator properties
+
+maintainers:
+ - Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+
+description:
+ An I2C Address Translator (ATR) is a device with an I2C slave parent
+ ("upstream") port and N I2C master child ("downstream") ports, and
+ forwards transactions from upstream to the appropriate downstream port
+ with a modified slave address. The address used on the parent bus is
+ called the "alias" and is (potentially) different from the physical
+ slave address of the child bus. Address translation is done by the
+ hardware.
+
+properties:
+ i2c-alias-pool:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ description:
+ I2C alias pool is a pool of I2C addresses on the main I2C bus that can be
+ used to access the remote peripherals on the serializer's I2C bus. The
+ addresses must be available, not used by any other peripheral. Each
+ remote peripheral is assigned an alias from the pool, and transactions to
+ that address will be forwarded to the remote peripheral, with the address
+ translated to the remote peripheral's real address. This property is not
+ needed if there are no I2C addressable remote peripherals.
+
+additionalProperties: true
+...
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.yaml b/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.yaml
index 9f1726d0356b..2d7bb998b0e9 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.yaml
+++ b/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.yaml
@@ -4,21 +4,29 @@
$id: http://devicetree.org/schemas/i2c/i2c-mux-pca954x.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: NXP PCA954x I2C bus switch
+title: NXP PCA954x I2C and compatible bus switches
maintainers:
- Laurent Pinchart <laurent.pinchart@ideasonboard.com>
description:
- The binding supports NXP PCA954x and PCA984x I2C mux/switch devices.
-
-allOf:
- - $ref: /schemas/i2c/i2c-mux.yaml#
+ The NXP PCA954x and compatible devices are I2C bus
+ multiplexer/switches that share the same functionality
+ and register layout.
+ The devices usually have 4 or 8 child buses, which are
+ attached to the parent bus by using the SMBus "Send Byte"
+ command.
properties:
compatible:
oneOf:
- enum:
+ - maxim,max7356
+ - maxim,max7357
+ - maxim,max7358
+ - maxim,max7367
+ - maxim,max7368
+ - maxim,max7369
- nxp,pca9540
- nxp,pca9542
- nxp,pca9543
@@ -59,10 +67,34 @@ properties:
description: if present, overrides i2c-mux-idle-disconnect
$ref: /schemas/mux/mux-controller.yaml#/properties/idle-state
+ vdd-supply:
+ description: A voltage regulator supplying power to the chip. On PCA9846
+ the regulator supplies power to VDD2 (core logic) and optionally to VDD1.
+
required:
- compatible
- reg
+allOf:
+ - $ref: /schemas/i2c/i2c-mux.yaml#
+ - if:
+ not:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - maxim,max7367
+ - maxim,max7369
+ - nxp,pca9542
+ - nxp,pca9543
+ - nxp,pca9544
+ - nxp,pca9545
+ then:
+ properties:
+ interrupts: false
+ "#interrupt-cells": false
+ interrupt-controller: false
+
unevaluatedProperties: false
examples:
@@ -74,11 +106,13 @@ examples:
#size-cells = <0>;
i2c-mux@74 {
- compatible = "nxp,pca9548";
+ compatible = "nxp,pca9545";
#address-cells = <1>;
#size-cells = <0>;
reg = <0x74>;
+ vdd-supply = <&p3v3>;
+
interrupt-parent = <&ipic>;
interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
diff --git a/Documentation/devicetree/bindings/i2c/nxp,pca9541.txt b/Documentation/devicetree/bindings/i2c/nxp,pca9541.txt
deleted file mode 100644
index 42bfc09c8918..000000000000
--- a/Documentation/devicetree/bindings/i2c/nxp,pca9541.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-* NXP PCA9541 I2C bus master selector
-
-Required Properties:
-
- - compatible: Must be "nxp,pca9541"
-
- - reg: The I2C address of the device.
-
- The following required properties are defined externally:
-
- - I2C arbitration bus node. See i2c-arb.txt in this directory.
-
-
-Example:
-
- i2c-arbitrator@74 {
- compatible = "nxp,pca9541";
- reg = <0x74>;
-
- i2c-arb {
- #address-cells = <1>;
- #size-cells = <0>;
-
- eeprom@54 {
- compatible = "atmel,24c08";
- reg = <0x54>;
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/i2c/nxp,pca9541.yaml b/Documentation/devicetree/bindings/i2c/nxp,pca9541.yaml
new file mode 100644
index 000000000000..b65c25c1a435
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/nxp,pca9541.yaml
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/i2c/nxp,pca9541.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP PCA9541 I2C bus master selector
+
+maintainers:
+ - Peter Rosin <peda@axentia.se>
+
+properties:
+ compatible:
+ const: nxp,pca9541
+
+ reg:
+ maxItems: 1
+
+ i2c-arb:
+ type: object
+ $ref: /schemas/i2c/i2c-controller.yaml
+ unevaluatedProperties: false
+ description:
+ I2C arbitration bus node.
+
+required:
+ - compatible
+ - reg
+ - i2c-arb
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c-arbitrator@74 {
+ compatible = "nxp,pca9541";
+ reg = <0x74>;
+
+ i2c-arb {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@54 {
+ compatible = "atmel,24c08";
+ reg = <0x54>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml b/Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml
index ec79b7270437..042d4dc636ee 100644
--- a/Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml
+++ b/Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml
@@ -269,6 +269,7 @@ examples:
port {
ov7251_ep: endpoint {
data-lanes = <0 1>;
+ link-frequencies = /bits/ 64 <240000000 319200000>;
remote-endpoint = <&csiphy3_ep>;
};
};
diff --git a/Documentation/devicetree/bindings/i3c/i3c.yaml b/Documentation/devicetree/bindings/i3c/i3c.yaml
index fdb4212149e7..ab69f4115de4 100644
--- a/Documentation/devicetree/bindings/i3c/i3c.yaml
+++ b/Documentation/devicetree/bindings/i3c/i3c.yaml
@@ -135,9 +135,10 @@ patternProperties:
minimum: 0x1
maximum: 0xff
description: |
- Dynamic address to be assigned to this device. This property is only
- valid if the I3C device has a static address (first cell of the reg
- property != 0).
+ Dynamic address to be assigned to this device. In case static address is
+ present (first cell of the reg property != 0), this address is assigned
+ through SETDASA. If static address is not present, this address is assigned
+ through SETNEWDA after assigning a temporary address via ENTDAA.
required:
- reg
@@ -163,12 +164,18 @@ examples:
pagesize = <0x8>;
};
- /* I3C device with a static I2C address. */
+ /* I3C device with a static I2C address and assigned address. */
thermal_sensor: sensor@68,39200144004 {
reg = <0x68 0x392 0x144004>;
assigned-address = <0xa>;
};
+ /* I3C device with only assigned address. */
+ pressure_sensor: sensor@0,39200124004 {
+ reg = <0x0 0x392 0x124000>;
+ assigned-address = <0xc>;
+ };
+
/*
* I3C device without a static I2C address but requiring
* resources described in the DT.
diff --git a/Documentation/devicetree/bindings/iio/adc/allwinner,sun20i-d1-gpadc.yaml b/Documentation/devicetree/bindings/iio/adc/allwinner,sun20i-d1-gpadc.yaml
new file mode 100644
index 000000000000..7ef46c90ebc8
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/allwinner,sun20i-d1-gpadc.yaml
@@ -0,0 +1,91 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/adc/allwinner,sun20i-d1-gpadc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Allwinner D1 General Purpose ADC
+
+maintainers:
+ - Maksim Kiselev <bigunclemax@gmail.com>
+
+properties:
+ compatible:
+ enum:
+ - allwinner,sun20i-d1-gpadc
+
+ "#io-channel-cells":
+ const: 1
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+ clocks:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ reg:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+patternProperties:
+ "^channel@[0-9a-f]+$":
+ $ref: adc.yaml
+ type: object
+ description:
+ Represents the internal channels of the ADC.
+
+ properties:
+ reg:
+ items:
+ minimum: 0
+ maximum: 15
+
+ required:
+ - reg
+
+ unevaluatedProperties: false
+
+required:
+ - "#io-channel-cells"
+ - clocks
+ - compatible
+ - interrupts
+ - reg
+ - resets
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/sun20i-d1-ccu.h>
+ #include <dt-bindings/reset/sun20i-d1-ccu.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ gpadc: adc@2009000 {
+ compatible = "allwinner,sun20i-d1-gpadc";
+ reg = <0x2009000 0x400>;
+ clocks = <&ccu CLK_BUS_GPADC>;
+ resets = <&ccu RST_BUS_GPADC>;
+ interrupts = <73 IRQ_TYPE_LEVEL_HIGH>;
+ #io-channel-cells = <1>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ channel@0 {
+ reg = <0>;
+ };
+
+ channel@1 {
+ reg = <1>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/adc/ti,ads1015.yaml b/Documentation/devicetree/bindings/iio/adc/ti,ads1015.yaml
index 2127d639a768..e004659099c1 100644
--- a/Documentation/devicetree/bindings/iio/adc/ti,ads1015.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/ti,ads1015.yaml
@@ -78,9 +78,9 @@ patternProperties:
ti,datarate:
$ref: /schemas/types.yaml#/definitions/uint32
minimum: 0
- maximum: 6
+ maximum: 7
description: |
- Data acquisition rate in samples per second
+ Data acquisition rate in samples per second for ADS1015, TLA2024
0: 128
1: 250
2: 490
@@ -88,6 +88,17 @@ patternProperties:
4: 1600 (default)
5: 2400
6: 3300
+ 7: 3300
+
+ Data acquisition rate in samples per second for ADS1115
+ 0: 8
+ 1: 16
+ 2: 32
+ 3: 64
+ 4: 128 (default)
+ 5: 250
+ 6: 475
+ 7: 860
required:
- reg
diff --git a/Documentation/devicetree/bindings/iio/dac/microchip,mcp4728.yaml b/Documentation/devicetree/bindings/iio/dac/microchip,mcp4728.yaml
new file mode 100644
index 000000000000..99831d7f1c16
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/dac/microchip,mcp4728.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+
+$id: http://devicetree.org/schemas/iio/dac/microchip,mcp4728.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Microchip MCP4728 DAC
+
+maintainers:
+ - Andrea Collamati <andrea.collamati@gmail.com>
+
+description: |
+ MCP4728 is a quad channel, 12-bit voltage output
+ Digital-to-Analog Converter with non-volatile
+ memory and I2C compatible Serial Interface.
+ https://www.microchip.com/en-us/product/mcp4728
+
+properties:
+ compatible:
+ const: microchip,mcp4728
+
+ reg:
+ maxItems: 1
+
+ vdd-supply:
+ description: |
+ Provides both power and acts as the reference supply on the MCP4728
+ when Internal Vref is not selected.
+
+required:
+ - compatible
+ - reg
+ - vdd-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dac@60 {
+ compatible = "microchip,mcp4728";
+ reg = <0x60>;
+ vdd-supply = <&vdac_vdd>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/frequency/adi,admv1013.yaml b/Documentation/devicetree/bindings/iio/frequency/adi,admv1013.yaml
index fc813bcb6532..f2eb2287ed9e 100644
--- a/Documentation/devicetree/bindings/iio/frequency/adi,admv1013.yaml
+++ b/Documentation/devicetree/bindings/iio/frequency/adi,admv1013.yaml
@@ -39,6 +39,46 @@ properties:
description:
Analog voltage regulator.
+ vcc-drv-supply:
+ description:
+ RF Driver voltage regulator.
+
+ vcc2-drv-supply:
+ description:
+ RF predriver voltage regulator.
+
+ vcc-vva-supply:
+ description:
+ VVA Control Circuit voltage regulator.
+
+ vcc-amp1-supply:
+ description:
+ RF Amplifier 1 voltage regulator.
+
+ vcc-amp2-supply:
+ description:
+ RF Amplifier 2 voltage regulator.
+
+ vcc-env-supply:
+ description:
+ Envelope Detector voltage regulator.
+
+ vcc-bg-supply:
+ description:
+ Mixer Chip Band Gap Circuit voltage regulator.
+
+ vcc-bg2-supply:
+ description:
+ VGA Chip Band Gap Circuit voltage regulator.
+
+ vcc-mixer-supply:
+ description:
+ Mixer voltage regulator.
+
+ vcc-quad-supply:
+ description:
+ Quadruppler voltage regulator.
+
adi,detector-enable:
description:
Enable the Envelope Detector available at output pins VENV_P and
@@ -69,6 +109,16 @@ required:
- clocks
- clock-names
- vcm-supply
+ - vcc-drv-supply
+ - vcc2-drv-supply
+ - vcc-vva-supply
+ - vcc-amp1-supply
+ - vcc-amp2-supply
+ - vcc-env-supply
+ - vcc-bg-supply
+ - vcc-bg2-supply
+ - vcc-mixer-supply
+ - vcc-quad-supply
allOf:
- $ref: /schemas/spi/spi-peripheral-props.yaml#
@@ -87,6 +137,16 @@ examples:
clocks = <&admv1013_lo>;
clock-names = "lo_in";
vcm-supply = <&vcm>;
+ vcc-drv-supply = <&vcc_drv>;
+ vcc2-drv-supply = <&vcc2_drv>;
+ vcc-vva-supply = <&vcc_vva>;
+ vcc-amp1-supply = <&vcc_amp1>;
+ vcc-amp2-supply = <&vcc_amp2>;
+ vcc-env-supply = <&vcc_env>;
+ vcc-bg-supply = <&vcc_bg>;
+ vcc-bg2-supply = <&vcc_bg2>;
+ vcc-mixer-supply = <&vcc_mixer>;
+ vcc-quad-supply = <&vcc_quad>;
adi,quad-se-mode = "diff";
adi,detector-enable;
};
diff --git a/Documentation/devicetree/bindings/iio/frequency/adi,admv1014.yaml b/Documentation/devicetree/bindings/iio/frequency/adi,admv1014.yaml
index d17601dbc498..39cc63a11762 100644
--- a/Documentation/devicetree/bindings/iio/frequency/adi,admv1014.yaml
+++ b/Documentation/devicetree/bindings/iio/frequency/adi,admv1014.yaml
@@ -103,6 +103,14 @@ required:
- clocks
- clock-names
- vcm-supply
+ - vcc-if-bb-supply
+ - vcc-vga-supply
+ - vcc-vva-supply
+ - vcc-lna-3p3-supply
+ - vcc-lna-1p5-supply
+ - vcc-bg-supply
+ - vcc-quad-supply
+ - vcc-mixer-supply
allOf:
- $ref: /schemas/spi/spi-peripheral-props.yaml#
diff --git a/Documentation/devicetree/bindings/iio/light/rohm,bu27010.yaml b/Documentation/devicetree/bindings/iio/light/rohm,bu27010.yaml
new file mode 100644
index 000000000000..8376d64a641a
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/light/rohm,bu27010.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/light/rohm,bu27010.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ROHM BU27010 color sensor
+
+maintainers:
+ - Matti Vaittinen <mazziesaccount@gmail.com>
+
+description: |
+ The ROHM BU27010 is a sensor with 6 photodiodes (red, green, blue, clear,
+ IR and flickering detection) with five configurable channels. Red, green
+ and flickering detection being always available and two out of the rest
+ three (blue, clear, IR) can be selected to be simultaneously measured.
+ Typical application is adjusting LCD/OLED backlight of TVs, mobile phones
+ and tablet PCs.
+
+properties:
+ compatible:
+ const: rohm,bu27010
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ vdd-supply: true
+
+required:
+ - compatible
+ - reg
+ - vdd-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ light-sensor@38 {
+ compatible = "rohm,bu27010";
+ reg = <0x38>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/proximity/murata,irsd200.yaml b/Documentation/devicetree/bindings/iio/proximity/murata,irsd200.yaml
new file mode 100644
index 000000000000..67f5389ece67
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/proximity/murata,irsd200.yaml
@@ -0,0 +1,60 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/proximity/murata,irsd200.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Murata IRS-D200 PIR sensor
+
+maintainers:
+ - Waqar Hameed <waqar.hameed@axis.com>
+
+description:
+ PIR sensor for human detection.
+
+properties:
+ compatible:
+ const: murata,irsd200
+
+ reg:
+ items:
+ - enum:
+ - 0x48
+ - 0x49
+ description: |
+ When the AD pin is connected to GND, the slave address is 0x48.
+ When the AD pin is connected to VDD, the slave address is 0x49.
+
+ interrupts:
+ maxItems: 1
+ description:
+ Type should be IRQ_TYPE_EDGE_RISING.
+
+ vdd-supply:
+ description:
+ 3.3 V supply voltage.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - vdd-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ proximity@48 {
+ compatible = "murata,irsd200";
+ reg = <0x48>;
+ interrupts = <24 IRQ_TYPE_EDGE_RISING>;
+ vdd-supply = <&regulator_3v3>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/proximity/semtech,sx9310.yaml b/Documentation/devicetree/bindings/iio/proximity/semtech,sx9310.yaml
index 5de0bb2180e6..775555d147bf 100644
--- a/Documentation/devicetree/bindings/iio/proximity/semtech,sx9310.yaml
+++ b/Documentation/devicetree/bindings/iio/proximity/semtech,sx9310.yaml
@@ -15,6 +15,9 @@ description: |
Specifications about the devices can be found at:
https://www.semtech.com/products/smart-sensing/sar-sensors/sx9310
+allOf:
+ - $ref: /schemas/iio/iio.yaml#
+
properties:
compatible:
enum:
@@ -102,7 +105,7 @@ required:
- reg
- "#io-channel-cells"
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/iio/proximity/semtech,sx9324.yaml b/Documentation/devicetree/bindings/iio/proximity/semtech,sx9324.yaml
index b3aa2ebf9661..48f221463166 100644
--- a/Documentation/devicetree/bindings/iio/proximity/semtech,sx9324.yaml
+++ b/Documentation/devicetree/bindings/iio/proximity/semtech,sx9324.yaml
@@ -13,6 +13,9 @@ maintainers:
description: |
Semtech's SX9324 proximity sensor.
+allOf:
+ - $ref: /schemas/iio/iio.yaml#
+
properties:
compatible:
const: semtech,sx9324
@@ -167,7 +170,7 @@ required:
- reg
- "#io-channel-cells"
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/input/azoteq,iqs7222.yaml b/Documentation/devicetree/bindings/input/azoteq,iqs7222.yaml
index 9ddba7f2e7aa..5b1769c19b17 100644
--- a/Documentation/devicetree/bindings/input/azoteq,iqs7222.yaml
+++ b/Documentation/devicetree/bindings/input/azoteq,iqs7222.yaml
@@ -4,14 +4,14 @@
$id: http://devicetree.org/schemas/input/azoteq,iqs7222.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: Azoteq IQS7222A/B/C Capacitive Touch Controller
+title: Azoteq IQS7222A/B/C/D Capacitive Touch Controller
maintainers:
- Jeff LaBundy <jeff@labundy.com>
description: |
- The Azoteq IQS7222A, IQS7222B and IQS7222C are multichannel capacitive touch
- controllers that feature additional sensing capabilities.
+ The Azoteq IQS7222A, IQS7222B, IQS7222C and IQS7222D are multichannel
+ capacitive touch controllers that feature additional sensing capabilities.
Link to datasheets: https://www.azoteq.com/
@@ -21,6 +21,7 @@ properties:
- azoteq,iqs7222a
- azoteq,iqs7222b
- azoteq,iqs7222c
+ - azoteq,iqs7222d
reg:
maxItems: 1
@@ -173,6 +174,152 @@ properties:
maximum: 3000
description: Specifies the report rate (in ms) during ultra-low-power mode.
+ touchscreen-size-x: true
+ touchscreen-size-y: true
+ touchscreen-inverted-x: true
+ touchscreen-inverted-y: true
+ touchscreen-swapped-x-y: true
+
+ trackpad:
+ type: object
+ description: Represents all channels associated with the trackpad.
+
+ properties:
+ azoteq,channel-select:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 12
+ items:
+ minimum: 0
+ maximum: 13
+ description:
+ Specifies the order of the channels that participate in the trackpad.
+ Specify 255 to omit a given channel for the purpose of mapping a non-
+ rectangular trackpad.
+
+ azoteq,num-rows:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ maximum: 12
+ description: Specifies the number of rows that comprise the trackpad.
+
+ azoteq,num-cols:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ maximum: 12
+ description: Specifies the number of columns that comprise the trackpad.
+
+ azoteq,top-speed:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ multipleOf: 4
+ minimum: 0
+ maximum: 1020
+ description:
+ Specifies the speed (in coordinates traveled per conversion) after
+ which coordinate filtering is no longer applied.
+
+ azoteq,bottom-speed:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 255
+ description:
+ Specifies the speed (in coordinates traveled per conversion) after
+ which coordinate filtering is linearly reduced.
+
+ azoteq,use-prox:
+ type: boolean
+ description:
+ Directs the trackpad to respond to the proximity states of the
+ selected channels instead of their corresponding touch states.
+ Note the trackpad cannot report granular coordinates during a
+ state of proximity.
+
+ patternProperties:
+ "^azoteq,lower-cal-(x|y)$":
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 255
+ description: Specifies the trackpad's lower starting points.
+
+ "^azoteq,upper-cal-(x|y)$":
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 255
+ description: Specifies the trackpad's upper starting points.
+
+ "^event-(press|tap|(swipe|flick)-(x|y)-(pos|neg))$":
+ type: object
+ $ref: input.yaml#
+ description:
+ Represents a press or gesture event reported by the trackpad. Specify
+ 'linux,code' under the press event to report absolute coordinates.
+
+ properties:
+ linux,code: true
+
+ azoteq,gesture-angle-tighten:
+ type: boolean
+ description:
+ Limits the tangent of the gesture angle to 0.5 (axial gestures
+ only). If specified in one direction, the effect is applied in
+ either direction.
+
+ azoteq,gesture-max-ms:
+ multipleOf: 16
+ minimum: 0
+ maximum: 4080
+ description:
+ Specifies the length of time (in ms) within which a tap, swipe
+ or flick gesture must be completed in order to be acknowledged
+ by the device. The number specified for any one swipe or flick
+ gesture applies to all other swipe or flick gestures.
+
+ azoteq,gesture-min-ms:
+ multipleOf: 16
+ minimum: 0
+ maximum: 4080
+ description:
+ Specifies the length of time (in ms) for which a tap gesture must
+ be held in order to be acknowledged by the device.
+
+ azoteq,gesture-dist:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 65535
+ description:
+ Specifies the distance (in coordinates) across which a swipe or
+ flick gesture must travel in order to be acknowledged by the
+ device. The number specified for any one swipe or flick gesture
+ applies to all remaining swipe or flick gestures.
+
+ For tap gestures, this property specifies the distance from the
+ original point of contact across which the contact is permitted
+ to travel before the gesture is rejected by the device.
+
+ azoteq,gpio-select:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 3
+ items:
+ minimum: 0
+ maximum: 2
+ description: |
+ Specifies one or more GPIO mapped to the event as follows:
+ 0: GPIO0
+ 1: GPIO3
+ 2: GPIO4
+
+ Note that although multiple events can be mapped to a single
+ GPIO, they must all be of the same type (proximity, touch or
+ trackpad gesture).
+
+ additionalProperties: false
+
+ required:
+ - azoteq,channel-select
+
+ additionalProperties: false
+
patternProperties:
"^cycle-[0-9]$":
type: object
@@ -288,6 +435,10 @@ patternProperties:
Activates the reference channel in response to proximity events
instead of touch events.
+ azoteq,counts-filt-enable:
+ type: boolean
+ description: Applies counts filtering to the channel.
+
azoteq,ati-band:
$ref: /schemas/types.yaml#/definitions/uint32
enum: [0, 1, 2, 3]
@@ -432,12 +583,12 @@ patternProperties:
description: |
Specifies one or more GPIO mapped to the event as follows:
0: GPIO0
- 1: GPIO3 (IQS7222C only)
- 2: GPIO4 (IQS7222C only)
+ 1: GPIO3
+ 2: GPIO4
Note that although multiple events can be mapped to a single
GPIO, they must all be of the same type (proximity, touch or
- slider gesture).
+ slider/trackpad gesture).
azoteq,thresh:
$ref: /schemas/types.yaml#/definitions/uint32
@@ -521,16 +672,16 @@ patternProperties:
minimum: 0
maximum: 65535
description:
- Specifies the speed of movement after which coordinate filtering is
- no longer applied.
+ Specifies the speed (in coordinates traveled per conversion) after
+ which coordinate filtering is no longer applied.
azoteq,bottom-speed:
$ref: /schemas/types.yaml#/definitions/uint32
minimum: 0
maximum: 255
description:
- Specifies the speed of movement after which coordinate filtering is
- linearly reduced.
+ Specifies the speed (in coordinates traveled per conversion) after
+ which coordinate filtering is linearly reduced.
azoteq,bottom-beta:
$ref: /schemas/types.yaml#/definitions/uint32
@@ -595,10 +746,10 @@ patternProperties:
minimum: 0
maximum: 4080
description:
- Specifies the distance across which a swipe or flick gesture must
- travel in order to be acknowledged by the device. The number spec-
- ified for any one swipe or flick gesture applies to all remaining
- swipe or flick gestures.
+ Specifies the distance (in coordinates) across which a swipe or
+ flick gesture must travel in order to be acknowledged by the
+ device. The number specified for any one swipe or flick gesture
+ applies to all remaining swipe or flick gestures.
azoteq,gpio-select:
$ref: /schemas/types.yaml#/definitions/uint32-array
@@ -610,8 +761,8 @@ patternProperties:
description: |
Specifies one or more GPIO mapped to the event as follows:
0: GPIO0
- 1: GPIO3 (IQS7222C only)
- 2: GPIO4 (IQS7222C only)
+ 1: GPIO3
+ 2: GPIO4
Note that although multiple events can be mapped to a single
GPIO, they must all be of the same type (proximity, touch or
@@ -629,8 +780,8 @@ patternProperties:
description: |
Represents a GPIO mapped to one or more events as follows:
gpio-0: GPIO0
- gpio-1: GPIO3 (IQS7222C only)
- gpio-2: GPIO4 (IQS7222C only)
+ gpio-1: GPIO3
+ gpio-2: GPIO4
allOf:
- $ref: ../pinctrl/pincfg-node.yaml#
@@ -641,11 +792,53 @@ patternProperties:
additionalProperties: false
allOf:
+ - $ref: touchscreen/touchscreen.yaml#
+
- if:
properties:
compatible:
contains:
- const: azoteq,iqs7222b
+ enum:
+ - azoteq,iqs7222a
+ - azoteq,iqs7222b
+ - azoteq,iqs7222c
+
+ then:
+ properties:
+ touchscreen-size-x: false
+ touchscreen-size-y: false
+ touchscreen-inverted-x: false
+ touchscreen-inverted-y: false
+ touchscreen-swapped-x-y: false
+
+ trackpad: false
+
+ patternProperties:
+ "^channel-([0-9]|1[0-9])$":
+ properties:
+ azoteq,counts-filt-enable: false
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - azoteq,iqs7222b
+ - azoteq,iqs7222c
+
+ then:
+ patternProperties:
+ "^channel-([0-9]|1[0-9])$":
+ properties:
+ azoteq,ulp-allow: false
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - azoteq,iqs7222b
+ - azoteq,iqs7222d
then:
patternProperties:
@@ -657,13 +850,22 @@ allOf:
properties:
azoteq,ref-select: false
+ "^slider-[0-1]$": false
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: azoteq,iqs7222b
+
+ then:
+ patternProperties:
+ "^channel-([0-9]|1[0-9])$":
patternProperties:
"^event-(prox|touch)$":
properties:
azoteq,gpio-select: false
- "^slider-[0-1]$": false
-
"^gpio-[0-2]$": false
- if:
@@ -704,10 +906,6 @@ allOf:
else:
patternProperties:
- "^channel-([0-9]|1[0-9])$":
- properties:
- azoteq,ulp-allow: false
-
"^slider-[0-1]$":
patternProperties:
"^event-(press|tap|(swipe|flick)-(pos|neg))$":
diff --git a/Documentation/devicetree/bindings/input/ilitek,ili9882t.yaml b/Documentation/devicetree/bindings/input/ilitek,ili9882t.yaml
new file mode 100644
index 000000000000..c5d9e0e919f9
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/ilitek,ili9882t.yaml
@@ -0,0 +1,67 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/ilitek,ili9882t.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Ilitek ili9882t touchscreen controller
+
+maintainers:
+ - Cong Yang <yangcong5@huaqin.corp-partner.google.com>
+
+description:
+ Supports the Ilitek ili9882t touchscreen controller.
+ This touchscreen controller uses the i2c-hid protocol with a reset GPIO.
+
+allOf:
+ - $ref: /schemas/input/touchscreen/touchscreen.yaml#
+
+properties:
+ compatible:
+ const: ilitek,ili9882t
+
+ reg:
+ const: 0x41
+
+ interrupts:
+ maxItems: 1
+
+ panel: true
+
+ reset-gpios:
+ maxItems: 1
+ description: Reset GPIO.
+
+ vccio-supply:
+ description: The 1.8V supply to the touchscreen.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - panel
+ - vccio-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ touchscreen: touchscreen@41 {
+ compatible = "ilitek,ili9882t";
+ reg = <0x41>;
+
+ interrupt-parent = <&pio>;
+ interrupts = <12 IRQ_TYPE_LEVEL_LOW>;
+
+ panel = <&panel>;
+ reset-gpios = <&pio 60 GPIO_ACTIVE_LOW>;
+ vccio-supply = <&mt6366_vio18_reg>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/input/stmpe-keypad.txt b/Documentation/devicetree/bindings/input/stmpe-keypad.txt
deleted file mode 100644
index 12bb771d66d4..000000000000
--- a/Documentation/devicetree/bindings/input/stmpe-keypad.txt
+++ /dev/null
@@ -1,41 +0,0 @@
-* STMPE Keypad
-
-Required properties:
- - compatible : "st,stmpe-keypad"
- - linux,keymap : See ./matrix-keymap.txt
-
-Optional properties:
- - debounce-interval : Debouncing interval time in milliseconds
- - st,scan-count : Scanning cycles elapsed before key data is updated
- - st,no-autorepeat : If specified device will not autorepeat
- - keypad,num-rows : See ./matrix-keymap.txt
- - keypad,num-columns : See ./matrix-keymap.txt
-
-Example:
-
- stmpe_keypad {
- compatible = "st,stmpe-keypad";
-
- debounce-interval = <64>;
- st,scan-count = <8>;
- st,no-autorepeat;
-
- linux,keymap = <0x205006b
- 0x4010074
- 0x3050072
- 0x1030004
- 0x502006a
- 0x500000a
- 0x5008b
- 0x706001c
- 0x405000b
- 0x6070003
- 0x3040067
- 0x303006c
- 0x60400e7
- 0x602009e
- 0x4020073
- 0x5050002
- 0x4030069
- 0x3020008>;
- };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/azoteq,iqs7211.yaml b/Documentation/devicetree/bindings/input/touchscreen/azoteq,iqs7211.yaml
new file mode 100644
index 000000000000..8cf371b99f19
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/azoteq,iqs7211.yaml
@@ -0,0 +1,769 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/touchscreen/azoteq,iqs7211.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Azoteq IQS7210A/7211A/E Trackpad/Touchscreen Controller
+
+maintainers:
+ - Jeff LaBundy <jeff@labundy.com>
+
+description: |
+ The Azoteq IQS7210A, IQS7211A and IQS7211E trackpad and touchscreen control-
+ lers employ projected-capacitance sensing and can track two contacts.
+
+ Link to datasheets: https://www.azoteq.com/
+
+properties:
+ compatible:
+ enum:
+ - azoteq,iqs7210a
+ - azoteq,iqs7211a
+ - azoteq,iqs7211e
+
+ reg:
+ maxItems: 1
+
+ irq-gpios:
+ maxItems: 1
+ description:
+ Specifies the GPIO connected to the device's active-low RDY output. The
+ pin doubles as the IQS7211E's active-low MCLR input, in which case this
+ GPIO must be configured as open-drain.
+
+ reset-gpios:
+ maxItems: 1
+ description:
+ Specifies the GPIO connected to the device's active-low MCLR input. The
+ device is temporarily held in hardware reset prior to initialization if
+ this property is present.
+
+ azoteq,forced-comms:
+ type: boolean
+ description:
+ Enables forced communication; to be used with host adapters that cannot
+ tolerate clock stretching.
+
+ azoteq,forced-comms-default:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1]
+ description:
+ Indicates if the device's OTP memory enables (1) or disables (0) forced
+ communication by default. Specifying this property can expedite startup
+ time if the default value is known.
+
+ If this property is not specified, communication is not initiated until
+ the device asserts its RDY pin shortly after exiting hardware reset. At
+ that point, forced communication is either enabled or disabled based on
+ the presence or absence of the 'azoteq,forced-comms' property.
+
+ azoteq,rate-active-ms:
+ minimum: 0
+ maximum: 65535
+ description: Specifies the report rate (in ms) during active mode.
+
+ azoteq,rate-touch-ms:
+ minimum: 0
+ maximum: 65535
+ description: Specifies the report rate (in ms) during idle-touch mode.
+
+ azoteq,rate-idle-ms:
+ minimum: 0
+ maximum: 65535
+ description: Specifies the report rate (in ms) during idle mode.
+
+ azoteq,rate-lp1-ms:
+ minimum: 0
+ maximum: 65535
+ description: Specifies the report rate (in ms) during low-power mode 1.
+
+ azoteq,rate-lp2-ms:
+ minimum: 0
+ maximum: 65535
+ description: Specifies the report rate (in ms) during low-power mode 2.
+
+ azoteq,timeout-active-ms:
+ multipleOf: 1000
+ minimum: 0
+ maximum: 65535000
+ description:
+ Specifies the length of time (in ms) to wait for an event before moving
+ from active mode to idle or idle-touch modes.
+
+ azoteq,timeout-touch-ms:
+ multipleOf: 1000
+ minimum: 0
+ maximum: 65535000
+ description:
+ Specifies the length of time (in ms) to wait for an event before moving
+ from idle-touch mode to idle mode.
+
+ azoteq,timeout-idle-ms:
+ multipleOf: 1000
+ minimum: 0
+ maximum: 65535000
+ description:
+ Specifies the length of time (in ms) to wait for an event before moving
+ from idle mode to low-power mode 1.
+
+ azoteq,timeout-lp1-ms:
+ multipleOf: 1000
+ minimum: 0
+ maximum: 65535000
+ description:
+ Specifies the length of time (in ms) to wait for an event before moving
+ from low-power mode 1 to low-power mode 2.
+
+ azoteq,timeout-lp2-ms:
+ multipleOf: 1000
+ minimum: 0
+ maximum: 60000
+ description:
+ Specifies the rate (in ms) at which the trackpad reference values
+ are updated during low-power modes 1 and 2.
+
+ azoteq,timeout-ati-ms:
+ multipleOf: 1000
+ minimum: 0
+ maximum: 60000
+ description:
+ Specifies the delay (in ms) before the automatic tuning implementation
+ (ATI) is retried in the event it fails to complete.
+
+ azoteq,timeout-comms-ms:
+ minimum: 0
+ maximum: 65535
+ description:
+ Specifies the delay (in ms) before a communication window is closed.
+
+ azoteq,timeout-press-ms:
+ multipleOf: 1000
+ minimum: 0
+ maximum: 60000
+ description:
+ Specifies the length of time (in ms) to wait before automatically
+ releasing a press event. Specify zero to allow the press state to
+ persist indefinitely.
+
+ azoteq,fosc-freq:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1]
+ description: |
+ Specifies the device's core clock frequency as follows:
+ 0: 14 MHz
+ 1: 18 MHz
+
+ azoteq,fosc-trim:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 15
+ description: Specifies the device's core clock frequency trim.
+
+ azoteq,num-contacts:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 2
+ default: 0
+ description: Specifies the number of contacts reported by the device.
+
+ azoteq,contact-split:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 255
+ description: Specifies the contact (finger) split factor.
+
+ azoteq,trim-x:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 255
+ description: Specifies the horizontal trim width.
+
+ azoteq,trim-y:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 255
+ description: Specifies the vertical trim height.
+
+ trackpad:
+ type: object
+ description: Represents all channels associated with the trackpad.
+
+ properties:
+ azoteq,rx-enable:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 8
+ items:
+ minimum: 0
+ maximum: 7
+ description:
+ Specifies the order of the CRx pin(s) associated with the trackpad.
+
+ azoteq,tx-enable:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 12
+ items:
+ minimum: 0
+ maximum: 11
+ description:
+ Specifies the order of the CTx pin(s) associated with the trackpad.
+
+ azoteq,channel-select:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 36
+ items:
+ minimum: 0
+ maximum: 255
+ description: |
+ Specifies the channels mapped to each cycle in the following order:
+ Cycle 0, slot 0
+ Cycle 0, slot 1
+ Cycle 1, slot 0
+ Cycle 1, slot 1
+ ...and so on. Specify 255 to disable a given slot.
+
+ azoteq,ati-frac-div-fine:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 31
+ description: Specifies the trackpad's ATI fine fractional divider.
+
+ azoteq,ati-frac-mult-coarse:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 15
+ description: Specifies the trackpad's ATI coarse fractional multiplier.
+
+ azoteq,ati-frac-div-coarse:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 31
+ description: Specifies the trackpad's ATI coarse fractional divider.
+
+ azoteq,ati-comp-div:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 31
+ description: Specifies the trackpad's ATI compensation divider.
+
+ azoteq,ati-target:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 65535
+ description: Specifies the trackpad's ATI target.
+
+ azoteq,touch-enter:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 255
+ description: Specifies the trackpad's touch entrance factor.
+
+ azoteq,touch-exit:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 255
+ description: Specifies the trackpad's touch exit factor.
+
+ azoteq,thresh:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 255
+ description: Specifies the trackpad's stationary touch threshold.
+
+ azoteq,conv-period:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 255
+ description: Specifies the trackpad's conversion period.
+
+ azoteq,conv-frac:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 255
+ description: Specifies the trackpad's conversion frequency fraction.
+
+ patternProperties:
+ "^event-(tap(-double|-triple)?|hold|palm|swipe-(x|y)-(pos|neg)(-hold)?)$":
+ type: object
+ $ref: ../input.yaml#
+ description:
+ Represents a gesture event reported by the trackpad. In the case of
+ axial gestures, the duration or distance specified in one direction
+ applies to both directions along the same axis.
+
+ properties:
+ linux,code: true
+
+ azoteq,gesture-max-ms:
+ minimum: 0
+ maximum: 65535
+ description: Specifies the maximum duration of tap/swipe gestures.
+
+ azoteq,gesture-mid-ms:
+ minimum: 0
+ maximum: 65535
+ description:
+ Specifies the maximum duration between subsequent tap gestures
+ (IQS7211E only).
+
+ azoteq,gesture-min-ms:
+ minimum: 0
+ maximum: 65535
+ description: Specifies the minimum duration of hold gestures.
+
+ azoteq,gesture-dist:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 65535
+ description:
+ Specifies the minimum (swipe) or maximum (tap and hold) distance
+ a finger may travel to be considered a gesture.
+
+ azoteq,gesture-dist-rep:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 65535
+ description:
+ Specifies the minimum distance a finger must travel to elicit a
+ repeated swipe gesture (IQS7211E only).
+
+ azoteq,gesture-angle:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 75
+ description:
+ Specifies the maximum angle (in degrees) a finger may travel to
+ be considered a swipe gesture.
+
+ azoteq,thresh:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 42
+ description: Specifies the palm gesture threshold (IQS7211E only).
+
+ additionalProperties: false
+
+ dependencies:
+ azoteq,rx-enable: ["azoteq,tx-enable"]
+ azoteq,tx-enable: ["azoteq,rx-enable"]
+ azoteq,channel-select: ["azoteq,rx-enable"]
+
+ additionalProperties: false
+
+ alp:
+ type: object
+ $ref: ../input.yaml#
+ description: Represents the alternate low-power channel (ALP).
+
+ properties:
+ azoteq,rx-enable:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 8
+ items:
+ minimum: 0
+ maximum: 7
+ description:
+ Specifies the CRx pin(s) associated with the ALP in no particular
+ order.
+
+ azoteq,tx-enable:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 12
+ items:
+ minimum: 0
+ maximum: 11
+ description:
+ Specifies the CTx pin(s) associated with the ALP in no particular
+ order.
+
+ azoteq,ati-frac-div-fine:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 31
+ description: Specifies the ALP's ATI fine fractional divider.
+
+ azoteq,ati-frac-mult-coarse:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 15
+ description: Specifies the ALP's ATI coarse fractional multiplier.
+
+ azoteq,ati-frac-div-coarse:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 31
+ description: Specifies the ALP's ATI coarse fractional divider.
+
+ azoteq,ati-comp-div:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 31
+ description: Specifies the ALP's ATI compensation divider.
+
+ azoteq,ati-target:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 65535
+ description: Specifies the ALP's ATI target.
+
+ azoteq,ati-base:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ multipleOf: 8
+ minimum: 0
+ maximum: 255
+ description: Specifies the ALP's ATI base.
+
+ azoteq,ati-mode:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1]
+ description: |
+ Specifies the ALP's ATI mode as follows:
+ 0: Partial
+ 1: Full
+
+ azoteq,sense-mode:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1]
+ description: |
+ Specifies the ALP's sensing mode as follows:
+ 0: Self capacitive
+ 1: Mutual capacitive
+
+ azoteq,debounce-enter:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 255
+ description: Specifies the ALP's debounce entrance factor.
+
+ azoteq,debounce-exit:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 255
+ description: Specifies the ALP's debounce exit factor.
+
+ azoteq,thresh:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 65535
+ description: Specifies the ALP's proximity or touch threshold.
+
+ azoteq,conv-period:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 255
+ description: Specifies the ALP's conversion period.
+
+ azoteq,conv-frac:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 255
+ description: Specifies the ALP's conversion frequency fraction.
+
+ linux,code: true
+
+ additionalProperties: false
+
+ button:
+ type: object
+ description: Represents the inductive or capacitive button.
+
+ properties:
+ azoteq,ati-frac-div-fine:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 31
+ description: Specifies the button's ATI fine fractional divider.
+
+ azoteq,ati-frac-mult-coarse:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 15
+ description: Specifies the button's ATI coarse fractional multiplier.
+
+ azoteq,ati-frac-div-coarse:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 31
+ description: Specifies the button's ATI coarse fractional divider.
+
+ azoteq,ati-comp-div:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 31
+ description: Specifies the button's ATI compensation divider.
+
+ azoteq,ati-target:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 65535
+ description: Specifies the button's ATI target.
+
+ azoteq,ati-base:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ multipleOf: 8
+ minimum: 0
+ maximum: 255
+ description: Specifies the button's ATI base.
+
+ azoteq,ati-mode:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1]
+ description: |
+ Specifies the button's ATI mode as follows:
+ 0: Partial
+ 1: Full
+
+ azoteq,sense-mode:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2]
+ description: |
+ Specifies the button's sensing mode as follows:
+ 0: Self capacitive
+ 1: Mutual capacitive
+ 2: Inductive
+
+ azoteq,touch-enter:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 255
+ description: Specifies the button's touch entrance factor.
+
+ azoteq,touch-exit:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 255
+ description: Specifies the button's touch exit factor.
+
+ azoteq,debounce-enter:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 255
+ description: Specifies the button's debounce entrance factor.
+
+ azoteq,debounce-exit:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 255
+ description: Specifies the button's debounce exit factor.
+
+ azoteq,thresh:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 65535
+ description: Specifies the button's proximity threshold.
+
+ azoteq,conv-period:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 255
+ description: Specifies the button's conversion period.
+
+ azoteq,conv-frac:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 255
+ description: Specifies the button's conversion frequency fraction.
+
+ patternProperties:
+ "^event-(prox|touch)$":
+ type: object
+ $ref: ../input.yaml#
+ description:
+ Represents a proximity or touch event reported by the button.
+
+ properties:
+ linux,code: true
+
+ additionalProperties: false
+
+ additionalProperties: false
+
+ wakeup-source: true
+
+ touchscreen-size-x: true
+ touchscreen-size-y: true
+ touchscreen-inverted-x: true
+ touchscreen-inverted-y: true
+ touchscreen-swapped-x-y: true
+
+dependencies:
+ touchscreen-size-x: ["azoteq,num-contacts"]
+ touchscreen-size-y: ["azoteq,num-contacts"]
+ touchscreen-inverted-x: ["azoteq,num-contacts"]
+ touchscreen-inverted-y: ["azoteq,num-contacts"]
+ touchscreen-swapped-x-y: ["azoteq,num-contacts"]
+
+required:
+ - compatible
+ - reg
+ - irq-gpios
+
+additionalProperties: false
+
+allOf:
+ - $ref: touchscreen.yaml#
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: azoteq,iqs7210a
+
+ then:
+ properties:
+ alp:
+ properties:
+ azoteq,rx-enable:
+ maxItems: 4
+ items:
+ minimum: 4
+
+ else:
+ properties:
+ azoteq,timeout-press-ms: false
+
+ alp:
+ properties:
+ azoteq,ati-mode: false
+
+ button: false
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: azoteq,iqs7211e
+
+ then:
+ properties:
+ reset-gpios: false
+
+ trackpad:
+ properties:
+ azoteq,tx-enable:
+ maxItems: 13
+ items:
+ maximum: 12
+
+ alp:
+ properties:
+ azoteq,tx-enable:
+ maxItems: 13
+ items:
+ maximum: 12
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/input/input.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ touch@56 {
+ compatible = "azoteq,iqs7210a";
+ reg = <0x56>;
+ irq-gpios = <&gpio 4 GPIO_ACTIVE_LOW>;
+ reset-gpios = <&gpio 17 (GPIO_ACTIVE_LOW |
+ GPIO_PUSH_PULL)>;
+ azoteq,num-contacts = <2>;
+
+ trackpad {
+ azoteq,rx-enable = <6>, <5>, <4>, <3>, <2>;
+ azoteq,tx-enable = <1>, <7>, <8>, <9>, <10>;
+ };
+
+ button {
+ azoteq,sense-mode = <2>;
+ azoteq,touch-enter = <40>;
+ azoteq,touch-exit = <36>;
+
+ event-touch {
+ linux,code = <KEY_HOME>;
+ };
+ };
+
+ alp {
+ azoteq,sense-mode = <1>;
+ linux,code = <KEY_POWER>;
+ };
+ };
+ };
+
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/input/input.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ touch@56 {
+ compatible = "azoteq,iqs7211e";
+ reg = <0x56>;
+ irq-gpios = <&gpio 4 (GPIO_ACTIVE_LOW |
+ GPIO_OPEN_DRAIN)>;
+
+ trackpad {
+ event-tap {
+ linux,code = <KEY_PLAYPAUSE>;
+ };
+
+ event-tap-double {
+ linux,code = <KEY_SHUFFLE>;
+ };
+
+ event-tap-triple {
+ linux,code = <KEY_AGAIN>;
+ };
+
+ event-hold {
+ linux,code = <KEY_STOP>;
+ };
+
+ event-palm {
+ linux,code = <KEY_EXIT>;
+ };
+
+ event-swipe-x-pos {
+ linux,code = <KEY_REWIND>;
+ };
+
+ event-swipe-x-pos-hold {
+ linux,code = <KEY_PREVIOUS>;
+ };
+
+ event-swipe-x-neg {
+ linux,code = <KEY_FASTFORWARD>;
+ };
+
+ event-swipe-x-neg-hold {
+ linux,code = <KEY_NEXT>;
+ };
+
+ event-swipe-y-pos {
+ linux,code = <KEY_VOLUMEUP>;
+ };
+
+ event-swipe-y-pos-hold {
+ linux,code = <KEY_MUTE>;
+ };
+
+ event-swipe-y-neg {
+ linux,code = <KEY_VOLUMEDOWN>;
+ };
+
+ event-swipe-y-neg-hold {
+ linux,code = <KEY_MUTE>;
+ };
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml b/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml
index ef4c841387bd..f2808cb4d99d 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml
+++ b/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.yaml
@@ -93,6 +93,12 @@ properties:
minimum: 1
maximum: 255
+ threshold:
+ description: Allows setting the "click"-threshold in the range from 0 to 255.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 255
+
touchscreen-size-x: true
touchscreen-size-y: true
touchscreen-fuzz-x: true
diff --git a/Documentation/devicetree/bindings/input/touchscreen/eeti,exc3000.yaml b/Documentation/devicetree/bindings/input/touchscreen/eeti,exc3000.yaml
index 007adbc89c14..9dc25d30a0a8 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/eeti,exc3000.yaml
+++ b/Documentation/devicetree/bindings/input/touchscreen/eeti,exc3000.yaml
@@ -24,6 +24,8 @@ properties:
maxItems: 1
reset-gpios:
maxItems: 1
+ vdd-supply:
+ description: Power supply regulator for the chip
touchscreen-size-x: true
touchscreen-size-y: true
touchscreen-inverted-x: true
diff --git a/Documentation/devicetree/bindings/input/touchscreen/melfas,mms114.yaml b/Documentation/devicetree/bindings/input/touchscreen/melfas,mms114.yaml
index fdd02898e249..07f9dd6b1c9c 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/melfas,mms114.yaml
+++ b/Documentation/devicetree/bindings/input/touchscreen/melfas,mms114.yaml
@@ -52,6 +52,11 @@ properties:
touchscreen-swapped-x-y: true
touchscreen-max-pressure: true
+ linux,keycodes:
+ description: Keycodes for the touch keys
+ minItems: 1
+ maxItems: 15
+
additionalProperties: false
required:
diff --git a/Documentation/devicetree/bindings/input/touchscreen/stmpe.txt b/Documentation/devicetree/bindings/input/touchscreen/stmpe.txt
deleted file mode 100644
index 238b51555c04..000000000000
--- a/Documentation/devicetree/bindings/input/touchscreen/stmpe.txt
+++ /dev/null
@@ -1,108 +0,0 @@
-STMPE Touchscreen
-----------------
-
-Required properties:
- - compatible: "st,stmpe-ts"
-
-Optional properties:
-- st,ave-ctrl : Sample average control
- 0 -> 1 sample
- 1 -> 2 samples
- 2 -> 4 samples
- 3 -> 8 samples
-- st,touch-det-delay : Touch detect interrupt delay (recommended is 3)
- 0 -> 10 us
- 1 -> 50 us
- 2 -> 100 us
- 3 -> 500 us
- 4 -> 1 ms
- 5 -> 5 ms
- 6 -> 10 ms
- 7 -> 50 ms
-- st,settling : Panel driver settling time (recommended is 2)
- 0 -> 10 us
- 1 -> 100 us
- 2 -> 500 us
- 3 -> 1 ms
- 4 -> 5 ms
- 5 -> 10 ms
- 6 -> 50 ms
- 7 -> 100 ms
-- st,fraction-z : Length of the fractional part in z (recommended is 7)
- (fraction-z ([0..7]) = Count of the fractional part)
-- st,i-drive : current limit value of the touchscreen drivers
- 0 -> 20 mA (typical 35mA max)
- 1 -> 50 mA (typical 80 mA max)
-
-Optional properties common with MFD (deprecated):
- - st,sample-time : ADC conversion time in number of clock.
- 0 -> 36 clocks
- 1 -> 44 clocks
- 2 -> 56 clocks
- 3 -> 64 clocks
- 4 -> 80 clocks (recommended)
- 5 -> 96 clocks
- 6 -> 124 clocks
- - st,mod-12b : ADC Bit mode
- 0 -> 10bit ADC
- 1 -> 12bit ADC
- - st,ref-sel : ADC reference source
- 0 -> internal
- 1 -> external
- - st,adc-freq : ADC Clock speed
- 0 -> 1.625 MHz
- 1 -> 3.25 MHz
- 2 || 3 -> 6.5 MHz
-
-Node should be child node of stmpe node to which it belongs.
-
-Note that common ADC settings of stmpe_touchscreen (child) will take precedence
-over the settings done in MFD.
-
-Example:
-
-stmpe811@41 {
- compatible = "st,stmpe811";
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_touch_int>;
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x41>;
- interrupts = <10 IRQ_TYPE_LEVEL_LOW>;
- interrupt-parent = <&gpio4>;
- interrupt-controller;
- id = <0>;
- blocks = <0x5>;
- irq-trigger = <0x1>;
- /* Common ADC settings */
- /* 3.25 MHz ADC clock speed */
- st,adc-freq = <1>;
- /* 12-bit ADC */
- st,mod-12b = <1>;
- /* internal ADC reference */
- st,ref-sel = <0>;
- /* ADC converstion time: 80 clocks */
- st,sample-time = <4>;
-
- stmpe_touchscreen {
- compatible = "st,stmpe-ts";
- reg = <0>;
- /* 8 sample average control */
- st,ave-ctrl = <3>;
- /* 5 ms touch detect interrupt delay */
- st,touch-det-delay = <5>;
- /* 1 ms panel driver settling time */
- st,settling = <3>;
- /* 7 length fractional part in z */
- st,fraction-z = <7>;
- /*
- * 50 mA typical 80 mA max touchscreen drivers
- * current limit value
- */
- st,i-drive = <1>;
- };
- stmpe_adc {
- compatible = "st,stmpe-adc";
- st,norequest-mask = <0x0F>;
- };
-};
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml b/Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml
index 5d17bdcfdf70..73f809cdb783 100644
--- a/Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml
+++ b/Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml
@@ -25,14 +25,20 @@ properties:
- const: qcom,msm8998-bwmon # BWMON v4
- items:
- enum:
+ - qcom,sc7180-cpu-bwmon
- qcom,sc7280-cpu-bwmon
- qcom,sc8280xp-cpu-bwmon
- qcom,sdm845-cpu-bwmon
+ - qcom,sm6350-llcc-bwmon
+ - qcom,sm8250-cpu-bwmon
- qcom,sm8550-cpu-bwmon
- const: qcom,sdm845-bwmon # BWMON v4, unified register space
- items:
- enum:
+ - qcom,sc7180-llcc-bwmon
- qcom,sc8280xp-llcc-bwmon
+ - qcom,sm6350-cpu-bwmon
+ - qcom,sm8250-llcc-bwmon
- qcom,sm8550-llcc-bwmon
- const: qcom,sc7280-llcc-bwmon
- const: qcom,sc7280-llcc-bwmon # BWMON v5
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,osm-l3.yaml b/Documentation/devicetree/bindings/interconnect/qcom,osm-l3.yaml
index 9d0a98d77ae9..21dae0b92819 100644
--- a/Documentation/devicetree/bindings/interconnect/qcom,osm-l3.yaml
+++ b/Documentation/devicetree/bindings/interconnect/qcom,osm-l3.yaml
@@ -21,6 +21,7 @@ properties:
- enum:
- qcom,sc7180-osm-l3
- qcom,sc8180x-osm-l3
+ - qcom,sdm670-osm-l3
- qcom,sdm845-osm-l3
- qcom,sm6350-osm-l3
- qcom,sm8150-osm-l3
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml b/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml
index 4d93ad415e0b..a46497af1fd8 100644
--- a/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml
+++ b/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml
@@ -18,9 +18,6 @@ description: |
least one RPMh device child node pertaining to their RSC and each provider
can map to multiple RPMh resources.
-allOf:
- - $ref: qcom,rpmh-common.yaml#
-
properties:
reg:
maxItems: 1
@@ -91,6 +88,7 @@ properties:
- qcom,sm8250-mc-virt
- qcom,sm8250-mmss-noc
- qcom,sm8250-npu-noc
+ - qcom,sm8250-qup-virt
- qcom,sm8250-system-noc
- qcom,sm8350-aggre1-noc
- qcom,sm8350-aggre2-noc
@@ -107,7 +105,19 @@ properties:
required:
- compatible
- - reg
+
+allOf:
+ - $ref: qcom,rpmh-common.yaml#
+ - if:
+ not:
+ properties:
+ compatible:
+ enum:
+ - qcom,sm8250-qup-virt
+ then:
+ required:
+ - reg
+
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/interrupt-controller/st,sti-irq-syscfg.txt b/Documentation/devicetree/bindings/interrupt-controller/st,sti-irq-syscfg.txt
deleted file mode 100644
index 977d7ed3670e..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/st,sti-irq-syscfg.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-STMicroelectronics STi System Configuration Controlled IRQs
------------------------------------------------------------
-
-On STi based systems; External, CTI (Core Sight), PMU (Performance Management),
-and PL310 L2 Cache IRQs are controlled using System Configuration registers.
-This driver is used to unmask them prior to use.
-
-Required properties:
-- compatible : Should be "st,stih407-irq-syscfg"
-- st,syscfg : Phandle to Cortex-A9 IRQ system config registers
-- st,irq-device : Array of IRQs to enable - should be 2 in length
-- st,fiq-device : Array of FIQs to enable - should be 2 in length
-
-Optional properties:
-- st,invert-ext : External IRQs can be inverted at will. This property inverts
- these IRQs using bitwise logic. A number of defines have been
- provided for convenience:
- ST_IRQ_SYSCFG_EXT_1_INV
- ST_IRQ_SYSCFG_EXT_2_INV
- ST_IRQ_SYSCFG_EXT_3_INV
-Example:
-
-irq-syscfg {
- compatible = "st,stih407-irq-syscfg";
- st,syscfg = <&syscfg_cpu>;
- st,irq-device = <ST_IRQ_SYSCFG_PMU_0>,
- <ST_IRQ_SYSCFG_PMU_1>;
- st,fiq-device = <ST_IRQ_SYSCFG_DISABLED>,
- <ST_IRQ_SYSCFG_DISABLED>;
-};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/st,stih407-irq-syscfg.yaml b/Documentation/devicetree/bindings/interrupt-controller/st,stih407-irq-syscfg.yaml
new file mode 100644
index 000000000000..2b153d7c5421
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/st,stih407-irq-syscfg.yaml
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/st,stih407-irq-syscfg.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics STi System Configuration Controlled IRQs
+
+maintainers:
+ - Patrice Chotard <patrice.chotard@foss.st.com>
+
+description:
+ On STi based systems; External, CTI (Core Sight), PMU (Performance
+ Management), and PL310 L2 Cache IRQs are controlled using System
+ Configuration registers. This device is used to unmask them prior to use.
+
+properties:
+ compatible:
+ const: st,stih407-irq-syscfg
+
+ st,syscfg:
+ description: Phandle to Cortex-A9 IRQ system config registers
+ $ref: /schemas/types.yaml#/definitions/phandle
+
+ st,irq-device:
+ description: Array of IRQs to enable.
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ items:
+ - description: Enable the IRQ of the channel one.
+ - description: Enable the IRQ of the channel two.
+
+ st,fiq-device:
+ description: Array of FIQs to enable.
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ items:
+ - description: Enable the IRQ of the channel one.
+ - description: Enable the IRQ of the channel two.
+
+ st,invert-ext:
+ description: External IRQs can be inverted at will. This property inverts
+ these three IRQs using bitwise logic, each one being encoded respectively
+ on the first, second and fourth bit.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [ 1, 2, 3, 4, 5, 6 ]
+
+required:
+ - compatible
+ - st,syscfg
+ - st,irq-device
+ - st,fiq-device
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq-st.h>
+ irq-syscfg {
+ compatible = "st,stih407-irq-syscfg";
+ st,syscfg = <&syscfg_cpu>;
+ st,irq-device = <ST_IRQ_SYSCFG_PMU_0>,
+ <ST_IRQ_SYSCFG_PMU_1>;
+ st,fiq-device = <ST_IRQ_SYSCFG_DISABLED>,
+ <ST_IRQ_SYSCFG_DISABLED>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml
index 3a31a979709b..cf29ab10501c 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml
@@ -270,6 +270,47 @@ allOf:
contains:
enum:
- qcom,msm8998-smmu-v2
+ then:
+ anyOf:
+ - properties:
+ clock-names:
+ items:
+ - const: bus
+ clocks:
+ items:
+ - description: bus clock required for downstream bus access and for
+ the smmu ptw
+ - properties:
+ clock-names:
+ items:
+ - const: iface
+ - const: mem
+ - const: mem_iface
+ clocks:
+ items:
+ - description: interface clock required to access smmu's registers
+ through the TCU's programming interface.
+ - description: bus clock required for memory access
+ - description: bus clock required for GPU memory access
+ - properties:
+ clock-names:
+ items:
+ - const: iface-mm
+ - const: iface-smmu
+ - const: bus-smmu
+ clocks:
+ items:
+ - description: interface clock required to access mnoc's registers
+ through the TCU's programming interface.
+ - description: interface clock required to access smmu's registers
+ through the TCU's programming interface.
+ - description: bus clock required for the smmu ptw
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
- qcom,sdm630-smmu-v2
- qcom,sm6375-smmu-v2
then:
diff --git a/Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml b/Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml
index 5b6395bc10e0..ea6b0f5f24de 100644
--- a/Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml
+++ b/Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml
@@ -78,6 +78,9 @@ properties:
- mediatek,mt8173-m4u # generation two
- mediatek,mt8183-m4u # generation two
- mediatek,mt8186-iommu-mm # generation two
+ - mediatek,mt8188-iommu-vdo # generation two
+ - mediatek,mt8188-iommu-vpp # generation two
+ - mediatek,mt8188-iommu-infra # generation two
- mediatek,mt8192-m4u # generation two
- mediatek,mt8195-iommu-vdo # generation two
- mediatek,mt8195-iommu-vpp # generation two
@@ -123,6 +126,7 @@ properties:
description: |
This is the mtk_m4u_id according to the HW. Specifies the mtk_m4u_id as
defined in
+ dt-binding/memory/mediatek,mt8188-memory-port.h for mt8188,
dt-binding/memory/mt2701-larb-port.h for mt2701 and mt7623,
dt-binding/memory/mt2712-larb-port.h for mt2712,
dt-binding/memory/mt6779-larb-port.h for mt6779,
@@ -155,6 +159,8 @@ allOf:
- mediatek,mt6795-m4u
- mediatek,mt8173-m4u
- mediatek,mt8186-iommu-mm
+ - mediatek,mt8188-iommu-vdo
+ - mediatek,mt8188-iommu-vpp
- mediatek,mt8192-m4u
- mediatek,mt8195-iommu-vdo
- mediatek,mt8195-iommu-vpp
@@ -168,6 +174,8 @@ allOf:
compatible:
enum:
- mediatek,mt8186-iommu-mm
+ - mediatek,mt8188-iommu-vdo
+ - mediatek,mt8188-iommu-vpp
- mediatek,mt8192-m4u
- mediatek,mt8195-iommu-vdo
- mediatek,mt8195-iommu-vpp
@@ -194,7 +202,9 @@ allOf:
properties:
compatible:
contains:
- const: mediatek,mt8195-iommu-infra
+ enum:
+ - mediatek,mt8188-iommu-infra
+ - mediatek,mt8195-iommu-infra
then:
required:
diff --git a/Documentation/devicetree/bindings/iommu/qcom,iommu.yaml b/Documentation/devicetree/bindings/iommu/qcom,iommu.yaml
index d9fabdf930d9..a74eb899c381 100644
--- a/Documentation/devicetree/bindings/iommu/qcom,iommu.yaml
+++ b/Documentation/devicetree/bindings/iommu/qcom,iommu.yaml
@@ -17,11 +17,16 @@ description: |
properties:
compatible:
- items:
- - enum:
- - qcom,msm8916-iommu
- - qcom,msm8953-iommu
- - const: qcom,msm-iommu-v1
+ oneOf:
+ - items:
+ - enum:
+ - qcom,msm8916-iommu
+ - qcom,msm8953-iommu
+ - const: qcom,msm-iommu-v1
+ - items:
+ - enum:
+ - qcom,msm8976-iommu
+ - const: qcom,msm-iommu-v2
clocks:
items:
@@ -64,6 +69,8 @@ patternProperties:
enum:
- qcom,msm-iommu-v1-ns
- qcom,msm-iommu-v1-sec
+ - qcom,msm-iommu-v2-ns
+ - qcom,msm-iommu-v2-sec
interrupts:
maxItems: 1
@@ -71,6 +78,11 @@ patternProperties:
reg:
maxItems: 1
+ qcom,ctx-asid:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ The ASID number associated to the context bank.
+
required:
- compatible
- interrupts
diff --git a/Documentation/devicetree/bindings/leds/common.yaml b/Documentation/devicetree/bindings/leds/common.yaml
index 58b492d00246..5fb7007f3618 100644
--- a/Documentation/devicetree/bindings/leds/common.yaml
+++ b/Documentation/devicetree/bindings/leds/common.yaml
@@ -83,8 +83,7 @@ properties:
- enum:
# LED will act as a back-light, controlled by the framebuffer system
- backlight
- # LED will turn on (but for leds-gpio see "default-state" property in
- # Documentation/devicetree/bindings/leds/leds-gpio.yaml)
+ # LED will turn on (see also "default-state" property)
- default-on
# LED "double" flashes at a load average based rate
- heartbeat
@@ -158,6 +157,18 @@ properties:
For flash LED controllers with configurable current this property is
mandatory for the LEDs in the non-flash modes (e.g. torch or indicator).
+ max-brightness:
+ description:
+ Normally, the maximum brightness is determined by the hardware, and this
+ property is not required. This property is used to set a software limit.
+ It could happen that an LED is made so bright that it gets damaged or
+ causes damage due to restrictions in a specific system, such as mounting
+ conditions.
+ Note that this flag is mainly used for PWM-LEDs, where it is not possible
+ to map brightness to current. Drivers for other controllers should use
+ led-max-microamp.
+ $ref: /schemas/types.yaml#definitions/uint32
+
panic-indicator:
description:
This property specifies that the LED should be used, if at all possible,
diff --git a/Documentation/devicetree/bindings/leds/leds-an30259a.txt b/Documentation/devicetree/bindings/leds/leds-an30259a.txt
deleted file mode 100644
index cbd833906b2b..000000000000
--- a/Documentation/devicetree/bindings/leds/leds-an30259a.txt
+++ /dev/null
@@ -1,55 +0,0 @@
-* Panasonic AN30259A 3-channel LED driver
-
-The AN30259A is a LED controller capable of driving three LEDs independently. It supports
-constant current output and sloping current output modes. The chip is connected over I2C.
-
-Required properties:
- - compatible: Must be "panasonic,an30259a".
- - reg: I2C slave address.
- - #address-cells: Must be 1.
- - #size-cells: Must be 0.
-
-Each LED is represented as a sub-node of the panasonic,an30259a node.
-
-Required sub-node properties:
- - reg: Pin that the LED is connected to. Must be 1, 2, or 3.
-
-Optional sub-node properties:
- - function :
- see Documentation/devicetree/bindings/leds/common.txt
- - color :
- see Documentation/devicetree/bindings/leds/common.txt
- - label :
- see Documentation/devicetree/bindings/leds/common.txt (deprecated)
- - linux,default-trigger :
- see Documentation/devicetree/bindings/leds/common.txt
-
-Example:
-
-#include <dt-bindings/leds/common.h>
-
-led-controller@30 {
- compatible = "panasonic,an30259a";
- reg = <0x30>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- led@1 {
- reg = <1>;
- linux,default-trigger = "heartbeat";
- function = LED_FUNCTION_INDICATOR;
- color = <LED_COLOR_ID_RED>;
- };
-
- led@2 {
- reg = <2>;
- function = LED_FUNCTION_INDICATOR;
- color = <LED_COLOR_ID_GREEN>;
- };
-
- led@3 {
- reg = <3>;
- function = LED_FUNCTION_INDICATOR;
- color = <LED_COLOR_ID_BLUE>;
- };
-};
diff --git a/Documentation/devicetree/bindings/leds/leds-aw2013.yaml b/Documentation/devicetree/bindings/leds/leds-aw2013.yaml
index 08f3e1cfc1b1..26238446f2bd 100644
--- a/Documentation/devicetree/bindings/leds/leds-aw2013.yaml
+++ b/Documentation/devicetree/bindings/leds/leds-aw2013.yaml
@@ -20,9 +20,20 @@ properties:
reg:
maxItems: 1
+ interrupts:
+ maxItems: 1
+ description: Open-drain, low active interrupt pin "INTN".
+ Used to report completion of operations (power up, LED breath effects).
+
vcc-supply:
description: Regulator providing power to the "VCC" pin.
+ vio-supply:
+ description: Regulator providing power for pull-up of the I/O lines.
+ "VIO1" in the typical application circuit example of the datasheet.
+ Note that this regulator does not directly connect to AW2013, but is
+ needed for the correct operation of the interrupt and I2C lines.
+
"#address-cells":
const: 1
@@ -52,6 +63,7 @@ additionalProperties: false
examples:
- |
#include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/leds/common.h>
i2c {
@@ -61,6 +73,7 @@ examples:
led-controller@45 {
compatible = "awinic,aw2013";
reg = <0x45>;
+ interrupts = <42 IRQ_TYPE_LEVEL_LOW>;
#address-cells = <1>;
#size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/leds/leds-group-multicolor.yaml b/Documentation/devicetree/bindings/leds/leds-group-multicolor.yaml
new file mode 100644
index 000000000000..8ed059a5a724
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/leds-group-multicolor.yaml
@@ -0,0 +1,64 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/leds/leds-group-multicolor.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Multi-color LED built with monochromatic LEDs
+
+maintainers:
+ - Jean-Jacques Hiblot <jjhiblot@traphandler.com>
+
+description: |
+ This driver combines several monochromatic LEDs into one multi-color
+ LED using the multicolor LED class.
+
+properties:
+ compatible:
+ const: leds-group-multicolor
+
+ leds:
+ description:
+ An aray of monochromatic leds
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+
+required:
+ - leds
+
+allOf:
+ - $ref: leds-class-multicolor.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/leds/common.h>
+
+ monochromatic-leds {
+ compatible = "gpio-leds";
+
+ led0: led-0 {
+ gpios = <&mcu_pio 0 GPIO_ACTIVE_LOW>;
+ color = <LED_COLOR_ID_RED>;
+ };
+
+ led1: led-1 {
+ gpios = <&mcu_pio 1 GPIO_ACTIVE_HIGH>;
+ color = <LED_COLOR_ID_GREEN>;
+ };
+
+ led2: led-2 {
+ gpios = <&mcu_pio 2 GPIO_ACTIVE_HIGH>;
+ color = <LED_COLOR_ID_BLUE>;
+ };
+ };
+
+ multi-led {
+ compatible = "leds-group-multicolor";
+ color = <LED_COLOR_ID_RGB>;
+ function = LED_FUNCTION_INDICATOR;
+ leds = <&led0>, <&led1>, <&led2>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/leds/nxp,pca953x.yaml b/Documentation/devicetree/bindings/leds/nxp,pca953x.yaml
index edf6f55df685..9610bca57dd5 100644
--- a/Documentation/devicetree/bindings/leds/nxp,pca953x.yaml
+++ b/Documentation/devicetree/bindings/leds/nxp,pca953x.yaml
@@ -29,6 +29,10 @@ properties:
gpio-controller: true
+ gpio-line-names:
+ minItems: 1
+ maxItems: 16
+
'#gpio-cells':
const: 2
diff --git a/Documentation/devicetree/bindings/leds/nxp,pca995x.yaml b/Documentation/devicetree/bindings/leds/nxp,pca995x.yaml
new file mode 100644
index 000000000000..654915c1f687
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/nxp,pca995x.yaml
@@ -0,0 +1,81 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/leds/nxp,pca995x.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP PCA995x LED controllers
+
+maintainers:
+ - Isai Gaspar <isaiezequiel.gaspar@nxp.com>
+ - Marek Vasut <marex@denx.de>
+
+description:
+ The NXP PCA9952/PCA9955B are programmable LED controllers connected via I2C
+ that can drive 16 separate lines. Each of them can be individually switched
+ on and off, and brightness can be controlled via individual PWM.
+
+ Datasheets are available at
+ https://www.nxp.com/docs/en/data-sheet/PCA9952_PCA9955.pdf
+ https://www.nxp.com/docs/en/data-sheet/PCA9955B.pdf
+
+properties:
+ compatible:
+ enum:
+ - nxp,pca9952
+ - nxp,pca9955b
+
+ reg:
+ maxItems: 1
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+patternProperties:
+ "^led@[0-9a-f]+$":
+ type: object
+ $ref: common.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ reg:
+ minimum: 0
+ maximum: 15
+
+ required:
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/leds/common.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led-controller@1 {
+ compatible = "nxp,pca9955b";
+ reg = <0x01>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0x0>;
+ color = <LED_COLOR_ID_RED>;
+ function = LED_FUNCTION_POWER;
+ };
+
+ led@2 {
+ reg = <0x2>;
+ color = <LED_COLOR_ID_WHITE>;
+ function = LED_FUNCTION_STATUS;
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/leds/panasonic,an30259a.yaml b/Documentation/devicetree/bindings/leds/panasonic,an30259a.yaml
new file mode 100644
index 000000000000..e918dceea082
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/panasonic,an30259a.yaml
@@ -0,0 +1,84 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/leds/panasonic,an30259a.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Panasonic AN30259A 3-channel LED controller
+
+maintainers:
+ - Iskren Chernev <me@iskren.info>
+
+description:
+ The AN30259A is a LED controller capable of driving three LEDs independently.
+ It supports constant current output and sloping current output modes. The chip
+ is connected over I2C.
+
+properties:
+ compatible:
+ const: panasonic,an30259a
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+patternProperties:
+ "^led@[1-3]$":
+ $ref: common.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ reg:
+ enum: [ 1, 2, 3 ]
+
+required:
+ - compatible
+ - reg
+ - "#address-cells"
+ - "#size-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/leds/common.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led-controller@30 {
+ compatible = "panasonic,an30259a";
+ reg = <0x30>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@1 {
+ reg = <1>;
+ linux,default-trigger = "heartbeat";
+ function = LED_FUNCTION_INDICATOR;
+ color = <LED_COLOR_ID_RED>;
+ };
+
+ led@2 {
+ reg = <2>;
+ function = LED_FUNCTION_INDICATOR;
+ color = <LED_COLOR_ID_GREEN>;
+ };
+
+ led@3 {
+ reg = <3>;
+ function = LED_FUNCTION_INDICATOR;
+ color = <LED_COLOR_ID_BLUE>;
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/leds/rohm,bd2606mvv.yaml b/Documentation/devicetree/bindings/leds/rohm,bd2606mvv.yaml
index 14700a2e5fea..44dd91aa239d 100644
--- a/Documentation/devicetree/bindings/leds/rohm,bd2606mvv.yaml
+++ b/Documentation/devicetree/bindings/leds/rohm,bd2606mvv.yaml
@@ -35,7 +35,7 @@ properties:
description: GPIO pin to enable/disable the device.
patternProperties:
- "^led@[0-6]$":
+ "^led@[0-5]$":
type: object
$ref: common.yaml#
unevaluatedProperties: false
@@ -43,7 +43,7 @@ patternProperties:
properties:
reg:
minimum: 0
- maximum: 6
+ maximum: 5
required:
- reg
diff --git a/Documentation/devicetree/bindings/leds/rohm,bd71828-leds.yaml b/Documentation/devicetree/bindings/leds/rohm,bd71828-leds.yaml
index 58f0d94c6d71..b7a3ef76cbf4 100644
--- a/Documentation/devicetree/bindings/leds/rohm,bd71828-leds.yaml
+++ b/Documentation/devicetree/bindings/leds/rohm,bd71828-leds.yaml
@@ -18,8 +18,6 @@ description: |
The device has two LED outputs referred as GRNLED and AMBLED in data-sheet.
-select: false
-
properties:
compatible:
const: rohm,bd71828-leds
diff --git a/Documentation/devicetree/bindings/media/amphion,vpu.yaml b/Documentation/devicetree/bindings/media/amphion,vpu.yaml
index a9d80eaeeeb6..c0d83d755239 100644
--- a/Documentation/devicetree/bindings/media/amphion,vpu.yaml
+++ b/Documentation/devicetree/bindings/media/amphion,vpu.yaml
@@ -47,7 +47,7 @@ patternProperties:
$ref: ../mailbox/fsl,mu.yaml#
- "^vpu_core@[0-9a-f]+$":
+ "^vpu-core@[0-9a-f]+$":
description:
Each core correspond a decoder or encoder, need to configure them
separately. NXP i.MX8QM SoC has one decoder and two encoder, i.MX8QXP SoC
@@ -143,7 +143,7 @@ examples:
power-domains = <&pd IMX_SC_R_VPU_MU_2>;
};
- vpu_core0: vpu_core@2d080000 {
+ vpu_core0: vpu-core@2d080000 {
compatible = "nxp,imx8q-vpu-decoder";
reg = <0x2d080000 0x10000>;
power-domains = <&pd IMX_SC_R_VPU_DEC_0>;
@@ -154,7 +154,7 @@ examples:
memory-region = <&decoder_boot>, <&decoder_rpc>;
};
- vpu_core1: vpu_core@2d090000 {
+ vpu_core1: vpu-core@2d090000 {
compatible = "nxp,imx8q-vpu-encoder";
reg = <0x2d090000 0x10000>;
power-domains = <&pd IMX_SC_R_VPU_ENC_0>;
@@ -165,7 +165,7 @@ examples:
memory-region = <&encoder1_boot>, <&encoder1_rpc>;
};
- vpu_core2: vpu_core@2d0a0000 {
+ vpu_core2: vpu-core@2d0a0000 {
reg = <0x2d0a0000 0x10000>;
compatible = "nxp,imx8q-vpu-encoder";
power-domains = <&pd IMX_SC_R_VPU_ENC_1>;
diff --git a/Documentation/devicetree/bindings/media/cdns,csi2rx.txt b/Documentation/devicetree/bindings/media/cdns,csi2rx.txt
deleted file mode 100644
index 6b02a0657ad9..000000000000
--- a/Documentation/devicetree/bindings/media/cdns,csi2rx.txt
+++ /dev/null
@@ -1,100 +0,0 @@
-Cadence MIPI-CSI2 RX controller
-===============================
-
-The Cadence MIPI-CSI2 RX controller is a CSI-2 bridge supporting up to 4 CSI
-lanes in input, and 4 different pixel streams in output.
-
-Required properties:
- - compatible: must be set to "cdns,csi2rx" and an SoC-specific compatible
- - reg: base address and size of the memory mapped region
- - clocks: phandles to the clocks driving the controller
- - clock-names: must contain:
- * sys_clk: main clock
- * p_clk: register bank clock
- * pixel_if[0-3]_clk: pixel stream output clock, one for each stream
- implemented in hardware, between 0 and 3
-
-Optional properties:
- - phys: phandle to the external D-PHY, phy-names must be provided
- - phy-names: must contain "dphy", if the implementation uses an
- external D-PHY
-
-Required subnodes:
- - ports: A ports node with one port child node per device input and output
- port, in accordance with the video interface bindings defined in
- Documentation/devicetree/bindings/media/video-interfaces.txt. The
- port nodes are numbered as follows:
-
- Port Description
- -----------------------------
- 0 CSI-2 input
- 1 Stream 0 output
- 2 Stream 1 output
- 3 Stream 2 output
- 4 Stream 3 output
-
- The stream output port nodes are optional if they are not
- connected to anything at the hardware level or implemented
- in the design.Since there is only one endpoint per port,
- the endpoints are not numbered.
-
-
-Example:
-
-csi2rx: csi-bridge@0d060000 {
- compatible = "cdns,csi2rx";
- reg = <0x0d060000 0x1000>;
- clocks = <&byteclock>, <&byteclock>
- <&coreclock>, <&coreclock>,
- <&coreclock>, <&coreclock>;
- clock-names = "sys_clk", "p_clk",
- "pixel_if0_clk", "pixel_if1_clk",
- "pixel_if2_clk", "pixel_if3_clk";
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@0 {
- reg = <0>;
-
- csi2rx_in_sensor: endpoint {
- remote-endpoint = <&sensor_out_csi2rx>;
- clock-lanes = <0>;
- data-lanes = <1 2>;
- };
- };
-
- port@1 {
- reg = <1>;
-
- csi2rx_out_grabber0: endpoint {
- remote-endpoint = <&grabber0_in_csi2rx>;
- };
- };
-
- port@2 {
- reg = <2>;
-
- csi2rx_out_grabber1: endpoint {
- remote-endpoint = <&grabber1_in_csi2rx>;
- };
- };
-
- port@3 {
- reg = <3>;
-
- csi2rx_out_grabber2: endpoint {
- remote-endpoint = <&grabber2_in_csi2rx>;
- };
- };
-
- port@4 {
- reg = <4>;
-
- csi2rx_out_grabber3: endpoint {
- remote-endpoint = <&grabber3_in_csi2rx>;
- };
- };
- };
-};
diff --git a/Documentation/devicetree/bindings/media/cdns,csi2rx.yaml b/Documentation/devicetree/bindings/media/cdns,csi2rx.yaml
new file mode 100644
index 000000000000..30a335b10762
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/cdns,csi2rx.yaml
@@ -0,0 +1,201 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/cdns,csi2rx.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cadence MIPI-CSI2 RX controller
+
+maintainers:
+ - Maxime Ripard <mripard@kernel.org>
+
+description:
+ The Cadence MIPI-CSI2 RX controller is a CSI-2 bridge supporting up to 4 CSI
+ lanes in input, and 4 different pixel streams in output.
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - starfive,jh7110-csi2rx
+ - const: cdns,csi2rx
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: CSI2Rx system clock
+ - description: Gated Register bank clock for APB interface
+ - description: pixel Clock for Stream interface 0
+ - description: pixel Clock for Stream interface 1
+ - description: pixel Clock for Stream interface 2
+ - description: pixel Clock for Stream interface 3
+
+ clock-names:
+ items:
+ - const: sys_clk
+ - const: p_clk
+ - const: pixel_if0_clk
+ - const: pixel_if1_clk
+ - const: pixel_if2_clk
+ - const: pixel_if3_clk
+
+ resets:
+ items:
+ - description: CSI2Rx system reset
+ - description: Gated Register bank reset for APB interface
+ - description: pixel reset for Stream interface 0
+ - description: pixel reset for Stream interface 1
+ - description: pixel reset for Stream interface 2
+ - description: pixel reset for Stream interface 3
+
+ reset-names:
+ items:
+ - const: sys
+ - const: reg_bank
+ - const: pixel_if0
+ - const: pixel_if1
+ - const: pixel_if2
+ - const: pixel_if3
+
+ phys:
+ maxItems: 1
+ description: MIPI D-PHY
+
+ phy-names:
+ items:
+ - const: dphy
+
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+
+ properties:
+ port@0:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description:
+ Input port node, single endpoint describing the CSI-2 transmitter.
+
+ properties:
+ endpoint:
+ $ref: video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ bus-type:
+ const: 4
+
+ clock-lanes:
+ const: 0
+
+ data-lanes:
+ minItems: 1
+ maxItems: 4
+ items:
+ maximum: 4
+
+ required:
+ - data-lanes
+
+ port@1:
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ Stream 0 Output port node
+
+ port@2:
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ Stream 1 Output port node
+
+ port@3:
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ Stream 2 Output port node
+
+ port@4:
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ Stream 3 Output port node
+
+ required:
+ - port@0
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - ports
+
+additionalProperties: false
+
+examples:
+ - |
+ csi@d060000 {
+ compatible = "starfive,jh7110-csi2rx", "cdns,csi2rx";
+ reg = <0x0d060000 0x1000>;
+ clocks = <&byteclock 7>, <&byteclock 6>,
+ <&coreclock 8>, <&coreclock 9>,
+ <&coreclock 10>, <&coreclock 11>;
+ clock-names = "sys_clk", "p_clk",
+ "pixel_if0_clk", "pixel_if1_clk",
+ "pixel_if2_clk", "pixel_if3_clk";
+ resets = <&bytereset 9>, <&bytereset 4>,
+ <&corereset 5>, <&corereset 6>,
+ <&corereset 7>, <&corereset 8>;
+ reset-names = "sys", "reg_bank",
+ "pixel_if0", "pixel_if1",
+ "pixel_if2", "pixel_if3";
+ phys = <&csi_phy>;
+ phy-names = "dphy";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ csi2rx_in_sensor: endpoint {
+ remote-endpoint = <&sensor_out_csi2rx>;
+ clock-lanes = <0>;
+ data-lanes = <1 2>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ csi2rx_out_grabber0: endpoint {
+ remote-endpoint = <&grabber0_in_csi2rx>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+
+ csi2rx_out_grabber1: endpoint {
+ remote-endpoint = <&grabber1_in_csi2rx>;
+ };
+ };
+
+ port@3 {
+ reg = <3>;
+
+ csi2rx_out_grabber2: endpoint {
+ remote-endpoint = <&grabber2_in_csi2rx>;
+ };
+ };
+
+ port@4 {
+ reg = <4>;
+
+ csi2rx_out_grabber3: endpoint {
+ remote-endpoint = <&grabber3_in_csi2rx>;
+ };
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/media/cec/nvidia,tegra114-cec.yaml b/Documentation/devicetree/bindings/media/cec/nvidia,tegra114-cec.yaml
index 369c48fd9bf9..a6b73498bc21 100644
--- a/Documentation/devicetree/bindings/media/cec/nvidia,tegra114-cec.yaml
+++ b/Documentation/devicetree/bindings/media/cec/nvidia,tegra114-cec.yaml
@@ -53,6 +53,5 @@ examples:
interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&tegra_car TEGRA124_CLK_CEC>;
clock-names = "cec";
- status = "disabled";
hdmi-phandle = <&hdmi>;
};
diff --git a/Documentation/devicetree/bindings/media/i2c/ov5695.txt b/Documentation/devicetree/bindings/media/i2c/ov5695.txt
deleted file mode 100644
index 640a63717d96..000000000000
--- a/Documentation/devicetree/bindings/media/i2c/ov5695.txt
+++ /dev/null
@@ -1,41 +0,0 @@
-* Omnivision OV5695 MIPI CSI-2 sensor
-
-Required Properties:
-- compatible: shall be "ovti,ov5695"
-- clocks: reference to the xvclk input clock
-- clock-names: shall be "xvclk"
-- avdd-supply: Analog voltage supply, 2.8 volts
-- dovdd-supply: Digital I/O voltage supply, 1.8 volts
-- dvdd-supply: Digital core voltage supply, 1.2 volts
-- reset-gpios: Low active reset gpio
-
-The device node shall contain one 'port' child node with an
-'endpoint' subnode for its digital output video port,
-in accordance with the video interface bindings defined in
-Documentation/devicetree/bindings/media/video-interfaces.txt.
-The endpoint optional property 'data-lanes' shall be "<1 2>".
-
-Example:
-&i2c7 {
- ov5695: camera-sensor@36 {
- compatible = "ovti,ov5695";
- reg = <0x36>;
- pinctrl-names = "default";
- pinctrl-0 = <&clk_24m_cam>;
-
- clocks = <&cru SCLK_TESTCLKOUT1>;
- clock-names = "xvclk";
-
- avdd-supply = <&pp2800_cam>;
- dovdd-supply = <&pp1800>;
- dvdd-supply = <&pp1250_cam>;
- reset-gpios = <&gpio2 5 GPIO_ACTIVE_LOW>;
-
- port {
- wcam_out: endpoint {
- remote-endpoint = <&mipi_in_wcam>;
- data-lanes = <1 2>;
- };
- };
- };
-};
diff --git a/Documentation/devicetree/bindings/media/i2c/ov7251.txt b/Documentation/devicetree/bindings/media/i2c/ov7251.txt
deleted file mode 100644
index 8281151f7493..000000000000
--- a/Documentation/devicetree/bindings/media/i2c/ov7251.txt
+++ /dev/null
@@ -1,52 +0,0 @@
-* Omnivision 1/7.5-Inch B&W VGA CMOS Digital Image Sensor
-
-The Omnivision OV7251 is a 1/7.5-Inch CMOS active pixel digital image sensor
-with an active array size of 640H x 480V. It is programmable through a serial
-I2C interface.
-
-Required Properties:
-- compatible: Value should be "ovti,ov7251".
-- clocks: Reference to the xclk clock.
-- clock-names: Should be "xclk".
-- clock-frequency: Frequency of the xclk clock.
-- enable-gpios: Chip enable GPIO. Polarity is GPIO_ACTIVE_HIGH. This corresponds
- to the hardware pin XSHUTDOWN which is physically active low.
-- vdddo-supply: Chip digital IO regulator.
-- vdda-supply: Chip analog regulator.
-- vddd-supply: Chip digital core regulator.
-
-The device node shall contain one 'port' child node with a single 'endpoint'
-subnode for its digital output video port, in accordance with the video
-interface bindings defined in
-Documentation/devicetree/bindings/media/video-interfaces.txt.
-
-Example:
-
- &i2c1 {
- ...
-
- ov7251: camera-sensor@60 {
- compatible = "ovti,ov7251";
- reg = <0x60>;
-
- enable-gpios = <&gpio1 6 GPIO_ACTIVE_HIGH>;
- pinctrl-names = "default";
- pinctrl-0 = <&camera_bw_default>;
-
- clocks = <&clks 200>;
- clock-names = "xclk";
- clock-frequency = <24000000>;
-
- vdddo-supply = <&camera_dovdd_1v8>;
- vdda-supply = <&camera_avdd_2v8>;
- vddd-supply = <&camera_dvdd_1v2>;
-
- port {
- ov7251_ep: endpoint {
- clock-lanes = <1>;
- data-lanes = <0>;
- remote-endpoint = <&csi0_ep>;
- };
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/media/i2c/ovti,ov5693.yaml b/Documentation/devicetree/bindings/media/i2c/ovti,ov5693.yaml
index 359dc08440a8..6829a4aadd22 100644
--- a/Documentation/devicetree/bindings/media/i2c/ovti,ov5693.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/ovti,ov5693.yaml
@@ -5,26 +5,41 @@
$id: http://devicetree.org/schemas/media/i2c/ovti,ov5693.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: Omnivision OV5693 CMOS Sensor
+title: Omnivision OV5693/OV5695 CMOS Sensors
maintainers:
- Tommaso Merciai <tommaso.merciai@amarulasolutions.com>
description: |
- The Omnivision OV5693 is a high performance, 1/4-inch, 5 megapixel, CMOS
- image sensor that delivers 2592x1944 at 30fps. It provides full-frame,
+ The Omnivision OV5693/OV5695 are high performance, 1/4-inch, 5 megapixel, CMOS
+ image sensors that deliver 2592x1944 at 30fps. It provides full-frame,
sub-sampled, and windowed 10-bit MIPI images in various formats via the
Serial Camera Control Bus (SCCB) interface.
- OV5693 is controlled via I2C and two-wire Serial Camera Control Bus (SCCB).
- The sensor output is available via CSI-2 serial data output (up to 2-lane).
+ OV5693/OV5695 are controlled via I2C and two-wire Serial Camera Control Bus
+ (SCCB). The sensor output is available via CSI-2 serial data output (up to
+ 2-lane).
allOf:
- $ref: /schemas/media/video-interface-devices.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: ovti,ov5693
+ then:
+ properties:
+ port:
+ properties:
+ endpoint:
+ required:
+ - link-frequencies
properties:
compatible:
- const: ovti,ov5693
+ enum:
+ - ovti,ov5693
+ - ovti,ov5695
reg:
maxItems: 1
@@ -34,6 +49,9 @@ properties:
System input clock (aka XVCLK). From 6 to 27 MHz.
maxItems: 1
+ clock-names:
+ const: xvclk
+
dovdd-supply:
description:
Digital I/O voltage supply, 1.8V.
@@ -72,7 +90,6 @@ properties:
required:
- data-lanes
- - link-frequencies
required:
- compatible
diff --git a/Documentation/devicetree/bindings/media/i2c/ovti,ov7251.yaml b/Documentation/devicetree/bindings/media/i2c/ovti,ov7251.yaml
new file mode 100644
index 000000000000..2e5187acbbb8
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/ovti,ov7251.yaml
@@ -0,0 +1,109 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/i2c/ovti,ov7251.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: OmniVision OV7251 Image Sensor
+
+description:
+ The Omnivision OV7251 is a 1/7.5-Inch CMOS active pixel digital image sensor
+ with an active array size of 640H x 480V. It is programmable through a serial
+ I2C interface.
+
+maintainers:
+ - Todor Tomov <todor.too@gmail.com>
+
+properties:
+ compatible:
+ const: ovti,ov7251
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ description: XCLK Input Clock
+
+ clock-names:
+ const: xclk
+
+ clock-frequency:
+ description: Frequency of the xclk clock in Hz.
+
+ vdda-supply:
+ description: Analog voltage supply, 2.8 volts
+
+ vddd-supply:
+ description: Digital core voltage supply, 1.2 volts
+
+ vdddo-supply:
+ description: Digital I/O voltage supply, 1.8 volts
+
+ enable-gpios:
+ maxItems: 1
+ description:
+ Reference to the GPIO connected to the XSHUTDOWN pin, if any. Polarity
+ is GPIO_ACTIVE_HIGH.
+
+ port:
+ description: Digital Output Port
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ additionalProperties: false
+
+ properties:
+ endpoint:
+ $ref: /schemas/media/video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ clock-lanes:
+ maximum: 1
+
+ data-lanes:
+ maxItems: 1
+
+ link-frequencies: true
+
+ required:
+ - data-lanes
+ - link-frequencies
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - vdddo-supply
+ - vdda-supply
+ - port
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ camera@3c {
+ compatible = "ovti,ov7251";
+ reg = <0x3c>;
+ clocks = <&clks 1>;
+ clock-frequency = <24000000>;
+ vdddo-supply = <&ov7251_vdddo_1v8>;
+ vdda-supply = <&ov7251_vdda_2v8>;
+ vddd-supply = <&ov7251_vddd_1v5>;
+ enable-gpios = <&gpio1 19 GPIO_ACTIVE_HIGH>;
+
+ port {
+ ov7251_ep: endpoint {
+ remote-endpoint = <&csi0_ep>;
+ clock-lanes = <1>;
+ data-lanes = <0>;
+ link-frequencies = /bits/ 64 <240000000 319200000>;
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/media/i2c/st,st-mipid02.yaml b/Documentation/devicetree/bindings/media/i2c/st,st-mipid02.yaml
index 19a39d753aad..b68141264c0e 100644
--- a/Documentation/devicetree/bindings/media/i2c/st,st-mipid02.yaml
+++ b/Documentation/devicetree/bindings/media/i2c/st,st-mipid02.yaml
@@ -143,7 +143,6 @@ examples:
mipid02: csi2rx@14 {
compatible = "st,st-mipid02";
reg = <0x14>;
- status = "okay";
clocks = <&clk_ext_camera_12>;
clock-names = "xclk";
VDDE-supply = <&vdd>;
diff --git a/Documentation/devicetree/bindings/media/i2c/ti,ds90ub913.yaml b/Documentation/devicetree/bindings/media/i2c/ti,ds90ub913.yaml
new file mode 100644
index 000000000000..f6612bb0f667
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/ti,ds90ub913.yaml
@@ -0,0 +1,133 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/i2c/ti,ds90ub913.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Texas Instruments DS90UB913 FPD-Link III Serializer
+
+maintainers:
+ - Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+
+description:
+ The TI DS90UB913 is an FPD-Link III video serializer for parallel video.
+
+properties:
+ compatible:
+ enum:
+ - ti,ds90ub913a-q1
+
+ '#gpio-cells':
+ const: 2
+ description:
+ First cell is the GPO pin number, second cell is the flags. The GPO pin
+ number must be in range of [0, 3]. Note that GPOs 2 and 3 are not
+ available in external oscillator mode.
+
+ gpio-controller: true
+
+ clocks:
+ maxItems: 1
+ description:
+ Reference clock connected to the CLKIN pin.
+
+ clock-names:
+ items:
+ - const: clkin
+
+ '#clock-cells':
+ const: 0
+
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+
+ properties:
+ port@0:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description: Parallel input port
+
+ properties:
+ endpoint:
+ $ref: /schemas/media/video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ required:
+ - pclk-sample
+
+ port@1:
+ $ref: /schemas/graph.yaml#/properties/port
+ unevaluatedProperties: false
+ description: FPD-Link III output port
+
+ required:
+ - port@0
+ - port@1
+
+ i2c:
+ $ref: /schemas/i2c/i2c-controller.yaml#
+ unevaluatedProperties: false
+
+required:
+ - compatible
+ - '#gpio-cells'
+ - gpio-controller
+ - '#clock-cells'
+ - ports
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ serializer {
+ compatible = "ti,ds90ub913a-q1";
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ clocks = <&clk_cam_48M>;
+ clock-names = "clkin";
+
+ #clock-cells = <0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ ub913_in: endpoint {
+ remote-endpoint = <&sensor_out>;
+ pclk-sample = <1>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ endpoint {
+ remote-endpoint = <&deser_fpd_in>;
+ };
+ };
+ };
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ sensor@48 {
+ compatible = "aptina,mt9v111";
+ reg = <0x48>;
+
+ clocks = <&fixed_clock>;
+
+ port {
+ sensor_out: endpoint {
+ remote-endpoint = <&ub913_in>;
+ };
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/media/i2c/ti,ds90ub953.yaml b/Documentation/devicetree/bindings/media/i2c/ti,ds90ub953.yaml
new file mode 100644
index 000000000000..2030366994d1
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/ti,ds90ub953.yaml
@@ -0,0 +1,134 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/i2c/ti,ds90ub953.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Texas Instruments DS90UB953 FPD-Link III Serializer
+
+maintainers:
+ - Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+
+description:
+ The TI DS90UB953 is an FPD-Link III video serializer for MIPI CSI-2.
+
+properties:
+ compatible:
+ enum:
+ - ti,ds90ub953-q1
+ - ti,ds90ub971-q1
+
+ '#gpio-cells':
+ const: 2
+ description:
+ First cell is the GPIO pin number, second cell is the flags. The GPIO pin
+ number must be in range of [0, 3].
+
+ gpio-controller: true
+
+ clocks:
+ maxItems: 1
+ description:
+ Reference clock connected to the CLKIN pin.
+
+ clock-names:
+ items:
+ - const: clkin
+
+ '#clock-cells':
+ const: 0
+
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+
+ properties:
+ port@0:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description: CSI-2 input port
+
+ properties:
+ endpoint:
+ $ref: /schemas/media/video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ required:
+ - data-lanes
+
+ port@1:
+ $ref: /schemas/graph.yaml#/properties/port
+ unevaluatedProperties: false
+ description: FPD-Link III output port
+
+ required:
+ - port@0
+ - port@1
+
+ i2c:
+ $ref: /schemas/i2c/i2c-controller.yaml#
+ unevaluatedProperties: false
+
+required:
+ - compatible
+ - '#gpio-cells'
+ - gpio-controller
+ - '#clock-cells'
+ - ports
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ serializer {
+ compatible = "ti,ds90ub953-q1";
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ #clock-cells = <0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ ub953_in: endpoint {
+ clock-lanes = <0>;
+ data-lanes = <1 2 3 4>;
+ remote-endpoint = <&sensor_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ endpoint {
+ remote-endpoint = <&deser_fpd_in>;
+ };
+ };
+ };
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ sensor@1a {
+ compatible = "sony,imx274";
+ reg = <0x1a>;
+
+ reset-gpios = <&serializer 0 GPIO_ACTIVE_LOW>;
+
+ clocks = <&serializer>;
+ clock-names = "inck";
+
+ port {
+ sensor_out: endpoint {
+ remote-endpoint = <&ub953_in>;
+ };
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/media/i2c/ti,ds90ub960.yaml b/Documentation/devicetree/bindings/media/i2c/ti,ds90ub960.yaml
new file mode 100644
index 000000000000..289737721c2c
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/ti,ds90ub960.yaml
@@ -0,0 +1,427 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/i2c/ti,ds90ub960.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Texas Instruments DS90UB9XX Family FPD-Link Deserializer Hubs
+
+maintainers:
+ - Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+
+description:
+ The TI DS90UB9XX devices are FPD-Link video deserializers with I2C and GPIO
+ forwarding.
+
+allOf:
+ - $ref: /schemas/i2c/i2c-atr.yaml#
+
+properties:
+ compatible:
+ enum:
+ - ti,ds90ub960-q1
+ - ti,ds90ub9702-q1
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+ description:
+ Reference clock connected to the REFCLK pin.
+
+ clock-names:
+ items:
+ - const: refclk
+
+ powerdown-gpios:
+ maxItems: 1
+ description:
+ Specifier for the GPIO connected to the PDB pin.
+
+ i2c-alias-pool:
+ minItems: 1
+ maxItems: 32
+
+ links:
+ type: object
+ additionalProperties: false
+
+ properties:
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+ ti,manual-strobe:
+ type: boolean
+ description:
+ Enable manual strobe position and EQ level
+
+ patternProperties:
+ '^link@[0-3]$':
+ type: object
+ additionalProperties: false
+ properties:
+ reg:
+ description: The link number
+ maxItems: 1
+
+ i2c-alias:
+ description:
+ The I2C address used for the serializer. Transactions to this
+ address on the I2C bus where the deserializer resides are
+ forwarded to the serializer.
+
+ ti,rx-mode:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum:
+ - 0 # RAW10
+ - 1 # RAW12 HF
+ - 2 # RAW12 LF
+ - 3 # CSI2 SYNC
+ - 4 # CSI2 NON-SYNC
+ description:
+ FPD-Link Input Mode. This should reflect the hardware and the
+ default mode of the connected device.
+
+ ti,cdr-mode:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum:
+ - 0 # FPD-Link III
+ - 1 # FPD-Link IV
+ description:
+ FPD-Link CDR Mode. This should reflect the hardware and the
+ default mode of the connected device.
+
+ ti,strobe-pos:
+ $ref: /schemas/types.yaml#/definitions/int32
+ minimum: -13
+ maximum: 13
+ description: Manual strobe position
+
+ ti,eq-level:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ maximum: 14
+ description: Manual EQ level
+
+ serializer:
+ type: object
+ description: FPD-Link Serializer node
+
+ required:
+ - reg
+ - i2c-alias
+ - ti,rx-mode
+ - serializer
+
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+
+ properties:
+ port@0:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description: FPD-Link input 0
+
+ properties:
+ endpoint:
+ $ref: /schemas/media/video-interfaces.yaml#
+ unevaluatedProperties: false
+ description:
+ Endpoint for FPD-Link port. If the RX mode for this port is RAW,
+ hsync-active and vsync-active must be defined.
+
+ port@1:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description: FPD-Link input 1
+
+ properties:
+ endpoint:
+ $ref: /schemas/media/video-interfaces.yaml#
+ unevaluatedProperties: false
+ description:
+ Endpoint for FPD-Link port. If the RX mode for this port is RAW,
+ hsync-active and vsync-active must be defined.
+
+ port@2:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description: FPD-Link input 2
+
+ properties:
+ endpoint:
+ $ref: /schemas/media/video-interfaces.yaml#
+ unevaluatedProperties: false
+ description:
+ Endpoint for FPD-Link port. If the RX mode for this port is RAW,
+ hsync-active and vsync-active must be defined.
+
+ port@3:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description: FPD-Link input 3
+
+ properties:
+ endpoint:
+ $ref: /schemas/media/video-interfaces.yaml#
+ unevaluatedProperties: false
+ description:
+ Endpoint for FPD-Link port. If the RX mode for this port is RAW,
+ hsync-active and vsync-active must be defined.
+
+ port@4:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description: CSI-2 Output 0
+
+ properties:
+ endpoint:
+ $ref: /schemas/media/video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ data-lanes:
+ minItems: 1
+ maxItems: 4
+ link-frequencies:
+ maxItems: 1
+
+ required:
+ - data-lanes
+ - link-frequencies
+
+ port@5:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description: CSI-2 Output 1
+
+ properties:
+ endpoint:
+ $ref: /schemas/media/video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ data-lanes:
+ minItems: 1
+ maxItems: 4
+ link-frequencies:
+ maxItems: 1
+
+ required:
+ - data-lanes
+ - link-frequencies
+
+ required:
+ - port@0
+ - port@1
+ - port@2
+ - port@3
+ - port@4
+ - port@5
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - ports
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c {
+ clock-frequency = <400000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ deser@3d {
+ compatible = "ti,ds90ub960-q1";
+ reg = <0x3d>;
+
+ clock-names = "refclk";
+ clocks = <&fixed_clock>;
+
+ powerdown-gpios = <&pca9555 7 GPIO_ACTIVE_LOW>;
+
+ i2c-alias-pool = <0x4a 0x4b 0x4c 0x4d 0x4e 0x4f>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* Port 0, Camera 0 */
+ port@0 {
+ reg = <0>;
+
+ ub960_fpd3_1_in: endpoint {
+ remote-endpoint = <&ub953_1_out>;
+ };
+ };
+
+ /* Port 1, Camera 1 */
+ port@1 {
+ reg = <1>;
+
+ ub960_fpd3_2_in: endpoint {
+ remote-endpoint = <&ub913_2_out>;
+ hsync-active = <0>;
+ vsync-active = <1>;
+ };
+ };
+
+ /* Port 2, unconnected */
+ port@2 {
+ reg = <2>;
+ };
+
+ /* Port 3, unconnected */
+ port@3 {
+ reg = <3>;
+ };
+
+ /* Port 4, CSI-2 TX */
+ port@4 {
+ reg = <4>;
+ ds90ub960_0_csi_out: endpoint {
+ data-lanes = <1 2 3 4>;
+ link-frequencies = /bits/ 64 <800000000>;
+ remote-endpoint = <&csi2_phy0>;
+ };
+ };
+
+ /* Port 5, unconnected */
+ port@5 {
+ reg = <5>;
+ };
+ };
+
+ links {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* Link 0 has DS90UB953 serializer and IMX274 sensor */
+
+ link@0 {
+ reg = <0>;
+ i2c-alias = <0x44>;
+
+ ti,rx-mode = <3>;
+
+ serializer1: serializer {
+ compatible = "ti,ds90ub953-q1";
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ #clock-cells = <0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ ub953_1_in: endpoint {
+ data-lanes = <1 2 3 4>;
+ remote-endpoint = <&sensor_1_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ ub953_1_out: endpoint {
+ remote-endpoint = <&ub960_fpd3_1_in>;
+ };
+ };
+ };
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ sensor@1a {
+ compatible = "sony,imx274";
+ reg = <0x1a>;
+
+ reset-gpios = <&serializer1 0 GPIO_ACTIVE_LOW>;
+
+ port {
+ sensor_1_out: endpoint {
+ remote-endpoint = <&ub953_1_in>;
+ };
+ };
+ };
+ };
+ };
+ }; /* End of link@0 */
+
+ /* Link 1 has DS90UB913 serializer and MT9V111 sensor */
+
+ link@1 {
+ reg = <1>;
+ i2c-alias = <0x45>;
+
+ ti,rx-mode = <0>;
+
+ serializer2: serializer {
+ compatible = "ti,ds90ub913a-q1";
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ clocks = <&clk_cam_48M>;
+ clock-names = "clkin";
+
+ #clock-cells = <0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ ub913_2_in: endpoint {
+ remote-endpoint = <&sensor_2_out>;
+ pclk-sample = <1>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ ub913_2_out: endpoint {
+ remote-endpoint = <&ub960_fpd3_2_in>;
+ };
+ };
+ };
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ sensor@48 {
+ compatible = "aptina,mt9v111";
+ reg = <0x48>;
+
+ clocks = <&serializer2>;
+
+ port {
+ sensor_2_out: endpoint {
+ remote-endpoint = <&ub913_2_in>;
+ };
+ };
+ };
+ };
+ };
+ }; /* End of link@1 */
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/media/mediatek,vcodec-decoder.yaml b/Documentation/devicetree/bindings/media/mediatek,vcodec-decoder.yaml
index fad59b486d5d..b401c67e3ba0 100644
--- a/Documentation/devicetree/bindings/media/mediatek,vcodec-decoder.yaml
+++ b/Documentation/devicetree/bindings/media/mediatek,vcodec-decoder.yaml
@@ -21,24 +21,33 @@ properties:
- mediatek,mt8183-vcodec-dec
reg:
- maxItems: 12
+ minItems: 11
+ maxItems: 11
+
+ reg-names:
+ items:
+ - const: misc
+ - const: ld
+ - const: top
+ - const: cm
+ - const: ad
+ - const: av
+ - const: pp
+ - const: hwd
+ - const: hwq
+ - const: hwb
+ - const: hwg
interrupts:
maxItems: 1
clocks:
+ minItems: 1
maxItems: 8
clock-names:
- items:
- - const: vcodecpll
- - const: univpll_d2
- - const: clk_cci400_sel
- - const: vdec_sel
- - const: vdecpll
- - const: vencpll
- - const: venc_lt_sel
- - const: vdec_bus_clk_src
+ minItems: 1
+ maxItems: 8
assigned-clocks: true
@@ -66,6 +75,10 @@ properties:
description:
Describes point to scp.
+ mediatek,vdecsys:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: Phandle to the vdecsys syscon node.
+
required:
- compatible
- reg
@@ -73,8 +86,7 @@ required:
- clocks
- clock-names
- iommus
- - assigned-clocks
- - assigned-clock-parents
+ - mediatek,vdecsys
allOf:
- if:
@@ -88,6 +100,15 @@ allOf:
required:
- mediatek,scp
+ properties:
+ clocks:
+ minItems: 1
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: vdec
+
- if:
properties:
compatible:
@@ -99,6 +120,22 @@ allOf:
required:
- mediatek,vpu
+ properties:
+ clocks:
+ minItems: 8
+ maxItems: 8
+
+ clock-names:
+ items:
+ - const: vcodecpll
+ - const: univpll_d2
+ - const: clk_cci400_sel
+ - const: vdec_sel
+ - const: vdecpll
+ - const: vencpll
+ - const: venc_lt_sel
+ - const: vdec_bus_clk_src
+
additionalProperties: false
examples:
@@ -109,10 +146,9 @@ examples:
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/power/mt8173-power.h>
- vcodec_dec: vcodec@16000000 {
+ vcodec_dec: vcodec@16020000 {
compatible = "mediatek,mt8173-vcodec-dec";
- reg = <0x16000000 0x100>, /*VDEC_SYS*/
- <0x16020000 0x1000>, /*VDEC_MISC*/
+ reg = <0x16020000 0x1000>, /*VDEC_MISC*/
<0x16021000 0x800>, /*VDEC_LD*/
<0x16021800 0x800>, /*VDEC_TOP*/
<0x16022000 0x1000>, /*VDEC_CM*/
@@ -133,6 +169,7 @@ examples:
<&iommu M4U_PORT_HW_VDEC_VLD_EXT>,
<&iommu M4U_PORT_HW_VDEC_VLD2_EXT>;
mediatek,vpu = <&vpu>;
+ mediatek,vdecsys = <&vdecsys>;
power-domains = <&scpsys MT8173_POWER_DOMAIN_VDEC>;
clocks = <&apmixedsys CLK_APMIXED_VCODECPLL>,
<&topckgen CLK_TOP_UNIVPLL_D2>,
diff --git a/Documentation/devicetree/bindings/media/nxp,imx8-isi.yaml b/Documentation/devicetree/bindings/media/nxp,imx8-isi.yaml
index 6038b9b5ab36..e4665469a86c 100644
--- a/Documentation/devicetree/bindings/media/nxp,imx8-isi.yaml
+++ b/Documentation/devicetree/bindings/media/nxp,imx8-isi.yaml
@@ -21,6 +21,7 @@ properties:
enum:
- fsl,imx8mn-isi
- fsl,imx8mp-isi
+ - fsl,imx93-isi
reg:
maxItems: 1
@@ -72,7 +73,9 @@ allOf:
properties:
compatible:
contains:
- const: fsl,imx8mn-isi
+ enum:
+ - fsl,imx8mn-isi
+ - fsl,imx93-isi
then:
properties:
interrupts:
diff --git a/Documentation/devicetree/bindings/media/rockchip-isp1.yaml b/Documentation/devicetree/bindings/media/rockchip-isp1.yaml
index 0bad7e640148..e466dff8286d 100644
--- a/Documentation/devicetree/bindings/media/rockchip-isp1.yaml
+++ b/Documentation/devicetree/bindings/media/rockchip-isp1.yaml
@@ -199,6 +199,7 @@ examples:
wcam: camera@36 {
compatible = "ovti,ov5695";
reg = <0x36>;
+ clocks = <&cru SCLK_TESTCLKOUT1>;
port {
wcam_out: endpoint {
diff --git a/Documentation/devicetree/bindings/mfd/allwinner,sun6i-a31-prcm.yaml b/Documentation/devicetree/bindings/mfd/allwinner,sun6i-a31-prcm.yaml
index cf94176fe1eb..8789e3639ff7 100644
--- a/Documentation/devicetree/bindings/mfd/allwinner,sun6i-a31-prcm.yaml
+++ b/Documentation/devicetree/bindings/mfd/allwinner,sun6i-a31-prcm.yaml
@@ -34,6 +34,9 @@ patternProperties:
- allwinner,sun6i-a31-clock-reset
- fixed-factor-clock
+ required:
+ - compatible
+
allOf:
- if:
properties:
@@ -55,25 +58,17 @@ patternProperties:
"#clock-cells":
const: 0
- # Already checked in the main schema
- compatible: true
-
clocks:
maxItems: 2
clock-output-names:
maxItems: 1
- phandle: true
-
required:
- "#clock-cells"
- - compatible
- clocks
- clock-output-names
- additionalProperties: false
-
- if:
properties:
compatible:
@@ -85,25 +80,17 @@ patternProperties:
"#clock-cells":
const: 0
- # Already checked in the main schema
- compatible: true
-
clocks:
maxItems: 1
clock-output-names:
maxItems: 1
- phandle: true
-
required:
- "#clock-cells"
- - compatible
- clocks
- clock-output-names
- additionalProperties: false
-
- if:
properties:
compatible:
@@ -119,9 +106,6 @@ patternProperties:
offset of the bit controlling this particular gate in
the register.
- # Already checked in the main schema
- compatible: true
-
clocks:
maxItems: 1
@@ -129,16 +113,11 @@ patternProperties:
minItems: 1
maxItems: 32
- phandle: true
-
required:
- "#clock-cells"
- - compatible
- clocks
- clock-output-names
- additionalProperties: false
-
- if:
properties:
compatible:
@@ -150,9 +129,6 @@ patternProperties:
"#clock-cells":
const: 0
- # Already checked in the main schema
- compatible: true
-
clocks:
maxItems: 4
description: >
@@ -162,16 +138,11 @@ patternProperties:
clock-output-names:
maxItems: 1
- phandle: true
-
required:
- "#clock-cells"
- - compatible
- clocks
- clock-output-names
- additionalProperties: false
-
- if:
properties:
compatible:
@@ -183,16 +154,8 @@ patternProperties:
"#reset-cells":
const: 1
- # Already checked in the main schema
- compatible: true
-
- phandle: true
-
required:
- "#reset-cells"
- - compatible
-
- additionalProperties: false
required:
- compatible
diff --git a/Documentation/devicetree/bindings/mfd/allwinner,sun8i-a23-prcm.yaml b/Documentation/devicetree/bindings/mfd/allwinner,sun8i-a23-prcm.yaml
index 16c80a7eec49..e51f85519911 100644
--- a/Documentation/devicetree/bindings/mfd/allwinner,sun8i-a23-prcm.yaml
+++ b/Documentation/devicetree/bindings/mfd/allwinner,sun8i-a23-prcm.yaml
@@ -57,25 +57,17 @@ patternProperties:
"#clock-cells":
const: 0
- # Already checked in the main schema
- compatible: true
-
clocks:
maxItems: 1
clock-output-names:
maxItems: 1
- phandle: true
-
required:
- "#clock-cells"
- - compatible
- clocks
- clock-output-names
- additionalProperties: false
-
- if:
properties:
compatible:
@@ -91,9 +83,6 @@ patternProperties:
offset of the bit controlling this particular gate in
the register.
- # Already checked in the main schema
- compatible: true
-
clocks:
maxItems: 1
@@ -101,16 +90,11 @@ patternProperties:
minItems: 1
maxItems: 32
- phandle: true
-
required:
- "#clock-cells"
- - compatible
- clocks
- clock-output-names
- additionalProperties: false
-
- if:
properties:
compatible:
@@ -122,34 +106,8 @@ patternProperties:
"#reset-cells":
const: 1
- # Already checked in the main schema
- compatible: true
-
- phandle: true
-
required:
- "#reset-cells"
- - compatible
-
- additionalProperties: false
-
- - if:
- properties:
- compatible:
- contains:
- const: allwinner,sun8i-a23-codec-analog
-
- then:
- properties:
- # Already checked in the main schema
- compatible: true
-
- phandle: true
-
- required:
- - compatible
-
- additionalProperties: false
required:
- compatible
diff --git a/Documentation/devicetree/bindings/mfd/atmel-flexcom.txt b/Documentation/devicetree/bindings/mfd/atmel-flexcom.txt
index 9d837535637b..af692e8833a5 100644
--- a/Documentation/devicetree/bindings/mfd/atmel-flexcom.txt
+++ b/Documentation/devicetree/bindings/mfd/atmel-flexcom.txt
@@ -6,6 +6,7 @@ at boot time according to the device tree.
Required properties:
- compatible: Should be "atmel,sama5d2-flexcom"
+ or "microchip,sam9x7-flexcom", "atmel,sama5d2-flexcom"
- reg: Should be the offset/length value for Flexcom dedicated
I/O registers (without USART, TWI or SPI registers).
- clocks: Should be the Flexcom peripheral clock from PMC.
diff --git a/Documentation/devicetree/bindings/mfd/atmel-gpbr.txt b/Documentation/devicetree/bindings/mfd/atmel-gpbr.txt
index e8c525569f10..3c989d1760a2 100644
--- a/Documentation/devicetree/bindings/mfd/atmel-gpbr.txt
+++ b/Documentation/devicetree/bindings/mfd/atmel-gpbr.txt
@@ -6,6 +6,7 @@ Required properties:
- compatible: Should be one of the following:
"atmel,at91sam9260-gpbr", "syscon"
"microchip,sam9x60-gpbr", "syscon"
+ "microchip,sam9x7-gpbr", "microchip,sam9x60-gpbr", "syscon"
- reg: contains offset/length value of the GPBR memory
region.
diff --git a/Documentation/devicetree/bindings/mfd/atmel-hlcdc.txt b/Documentation/devicetree/bindings/mfd/atmel-hlcdc.txt
index 5f8880cc757e..7de696eefaed 100644
--- a/Documentation/devicetree/bindings/mfd/atmel-hlcdc.txt
+++ b/Documentation/devicetree/bindings/mfd/atmel-hlcdc.txt
@@ -8,6 +8,7 @@ Required properties:
"atmel,sama5d3-hlcdc"
"atmel,sama5d4-hlcdc"
"microchip,sam9x60-hlcdc"
+ "microchip,sam9x75-xlcdc"
- reg: base address and size of the HLCDC device registers.
- clock-names: the name of the 3 clocks requested by the HLCDC device.
Should contain "periph_clk", "sys_clk" and "slow_clk".
diff --git a/Documentation/devicetree/bindings/mfd/atmel-matrix.txt b/Documentation/devicetree/bindings/mfd/atmel-matrix.txt
index 89d05c64fb01..6e5f83614e83 100644
--- a/Documentation/devicetree/bindings/mfd/atmel-matrix.txt
+++ b/Documentation/devicetree/bindings/mfd/atmel-matrix.txt
@@ -14,6 +14,7 @@ Required properties:
"atmel,at91sam9x5-matrix", "syscon"
"atmel,sama5d3-matrix", "syscon"
"microchip,sam9x60-matrix", "syscon"
+ "microchip,sam9x7-matrix", "atmel,at91sam9x5-matrix", "syscon"
- reg: Contains offset/length value of the Bus Matrix
memory region.
diff --git a/Documentation/devicetree/bindings/mfd/atmel-smc.txt b/Documentation/devicetree/bindings/mfd/atmel-smc.txt
index 5696d9fcb5dc..fd62add38a79 100644
--- a/Documentation/devicetree/bindings/mfd/atmel-smc.txt
+++ b/Documentation/devicetree/bindings/mfd/atmel-smc.txt
@@ -10,6 +10,7 @@ Required properties:
"atmel,sama5d3-smc", "syscon"
"atmel,sama5d2-smc", "syscon"
"microchip,sam9x60-smc", "syscon"
+ "microchip,sam9x7-smc", "atmel,at91sam9260-smc", "syscon"
- reg: Contains offset/length value of the SMC memory
region.
diff --git a/Documentation/devicetree/bindings/mfd/brcm,bcm6318-gpio-sysctl.yaml b/Documentation/devicetree/bindings/mfd/brcm,bcm6318-gpio-sysctl.yaml
index 9f9a14af875e..cb480162f967 100644
--- a/Documentation/devicetree/bindings/mfd/brcm,bcm6318-gpio-sysctl.yaml
+++ b/Documentation/devicetree/bindings/mfd/brcm,bcm6318-gpio-sysctl.yaml
@@ -35,7 +35,7 @@ patternProperties:
"^gpio@[0-9a-f]+$":
# Child node
type: object
- $ref: "../gpio/brcm,bcm63xx-gpio.yaml"
+ $ref: /schemas/gpio/brcm,bcm63xx-gpio.yaml
description:
GPIO controller for the SoC GPIOs. This child node definition
should follow the bindings specified in
@@ -44,7 +44,7 @@ patternProperties:
"^pinctrl@[0-9a-f]+$":
# Child node
type: object
- $ref: "../pinctrl/brcm,bcm6318-pinctrl.yaml"
+ $ref: /schemas/pinctrl/brcm,bcm6318-pinctrl.yaml
description:
Pin controller for the SoC pins. This child node definition
should follow the bindings specified in
diff --git a/Documentation/devicetree/bindings/mfd/brcm,bcm63268-gpio-sysctl.yaml b/Documentation/devicetree/bindings/mfd/brcm,bcm63268-gpio-sysctl.yaml
index 803277dd2725..c14def1b2ad2 100644
--- a/Documentation/devicetree/bindings/mfd/brcm,bcm63268-gpio-sysctl.yaml
+++ b/Documentation/devicetree/bindings/mfd/brcm,bcm63268-gpio-sysctl.yaml
@@ -35,7 +35,7 @@ patternProperties:
"^gpio@[0-9a-f]+$":
# Child node
type: object
- $ref: "../gpio/brcm,bcm63xx-gpio.yaml"
+ $ref: /schemas/gpio/brcm,bcm63xx-gpio.yaml
description:
GPIO controller for the SoC GPIOs. This child node definition
should follow the bindings specified in
@@ -44,7 +44,7 @@ patternProperties:
"^pinctrl@[0-9a-f]+$":
# Child node
type: object
- $ref: "../pinctrl/brcm,bcm63268-pinctrl.yaml"
+ $ref: /schemas/pinctrl/brcm,bcm63268-pinctrl.yaml
description:
Pin controller for the SoC pins. This child node definition
should follow the bindings specified in
diff --git a/Documentation/devicetree/bindings/mfd/brcm,bcm6328-gpio-sysctl.yaml b/Documentation/devicetree/bindings/mfd/brcm,bcm6328-gpio-sysctl.yaml
index b9a6856ce970..5f48209ed40f 100644
--- a/Documentation/devicetree/bindings/mfd/brcm,bcm6328-gpio-sysctl.yaml
+++ b/Documentation/devicetree/bindings/mfd/brcm,bcm6328-gpio-sysctl.yaml
@@ -35,7 +35,7 @@ patternProperties:
"^gpio@[0-9a-f]+$":
# Child node
type: object
- $ref: "../gpio/brcm,bcm63xx-gpio.yaml"
+ $ref: /schemas/gpio/brcm,bcm63xx-gpio.yaml
description:
GPIO controller for the SoC GPIOs. This child node definition
should follow the bindings specified in
@@ -44,7 +44,7 @@ patternProperties:
"^pinctrl@[0-9a-f]+$":
# Child node
type: object
- $ref: "../pinctrl/brcm,bcm6328-pinctrl.yaml"
+ $ref: /schemas/pinctrl/brcm,bcm6328-pinctrl.yaml
description:
Pin controller for the SoC pins. This child node definition
should follow the bindings specified in
diff --git a/Documentation/devicetree/bindings/mfd/brcm,bcm6358-gpio-sysctl.yaml b/Documentation/devicetree/bindings/mfd/brcm,bcm6358-gpio-sysctl.yaml
index 4651fe4dde07..f1f4629565d9 100644
--- a/Documentation/devicetree/bindings/mfd/brcm,bcm6358-gpio-sysctl.yaml
+++ b/Documentation/devicetree/bindings/mfd/brcm,bcm6358-gpio-sysctl.yaml
@@ -35,7 +35,7 @@ patternProperties:
"^gpio@[0-9a-f]+$":
# Child node
type: object
- $ref: "../gpio/brcm,bcm63xx-gpio.yaml"
+ $ref: /schemas/gpio/brcm,bcm63xx-gpio.yaml
description:
GPIO controller for the SoC GPIOs. This child node definition
should follow the bindings specified in
@@ -44,7 +44,7 @@ patternProperties:
"^pinctrl@[0-9a-f]+$":
# Child node
type: object
- $ref: "../pinctrl/brcm,bcm6358-pinctrl.yaml"
+ $ref: /schemas/pinctrl/brcm,bcm6358-pinctrl.yaml
description:
Pin controller for the SoC pins. This child node definition
should follow the bindings specified in
diff --git a/Documentation/devicetree/bindings/mfd/brcm,bcm6362-gpio-sysctl.yaml b/Documentation/devicetree/bindings/mfd/brcm,bcm6362-gpio-sysctl.yaml
index 0330b621fd38..4d594739b382 100644
--- a/Documentation/devicetree/bindings/mfd/brcm,bcm6362-gpio-sysctl.yaml
+++ b/Documentation/devicetree/bindings/mfd/brcm,bcm6362-gpio-sysctl.yaml
@@ -35,7 +35,7 @@ patternProperties:
"^gpio@[0-9a-f]+$":
# Child node
type: object
- $ref: "../gpio/brcm,bcm63xx-gpio.yaml"
+ $ref: /schemas/gpio/brcm,bcm63xx-gpio.yaml
description:
GPIO controller for the SoC GPIOs. This child node definition
should follow the bindings specified in
@@ -44,7 +44,7 @@ patternProperties:
"^pinctrl@[0-9a-f]+$":
# Child node
type: object
- $ref: "../pinctrl/brcm,bcm6362-pinctrl.yaml"
+ $ref: /schemas/pinctrl/brcm,bcm6362-pinctrl.yaml
description:
Pin controller for the SoC pins. This child node definition
should follow the bindings specified in
diff --git a/Documentation/devicetree/bindings/mfd/brcm,bcm6368-gpio-sysctl.yaml b/Documentation/devicetree/bindings/mfd/brcm,bcm6368-gpio-sysctl.yaml
index 82d3e4415bda..aae83d432880 100644
--- a/Documentation/devicetree/bindings/mfd/brcm,bcm6368-gpio-sysctl.yaml
+++ b/Documentation/devicetree/bindings/mfd/brcm,bcm6368-gpio-sysctl.yaml
@@ -35,7 +35,7 @@ patternProperties:
"^gpio@[0-9a-f]+$":
# Child node
type: object
- $ref: "../gpio/brcm,bcm63xx-gpio.yaml"
+ $ref: /schemas/gpio/brcm,bcm63xx-gpio.yaml
description:
GPIO controller for the SoC GPIOs. This child node definition
should follow the bindings specified in
@@ -44,7 +44,7 @@ patternProperties:
"^pinctrl@[0-9a-f]+$":
# Child node
type: object
- $ref: "../pinctrl/brcm,bcm6368-pinctrl.yaml"
+ $ref: /schemas/pinctrl/brcm,bcm6368-pinctrl.yaml
description:
Pin controller for the SoC pins. This child node definition
should follow the bindings specified in
diff --git a/Documentation/devicetree/bindings/mfd/maxim,max77693.yaml b/Documentation/devicetree/bindings/mfd/maxim,max77693.yaml
index 1b06a77ec798..9804d13de648 100644
--- a/Documentation/devicetree/bindings/mfd/maxim,max77693.yaml
+++ b/Documentation/devicetree/bindings/mfd/maxim,max77693.yaml
@@ -31,12 +31,17 @@ properties:
charger:
$ref: /schemas/power/supply/maxim,max77693.yaml
+ connector:
+ $ref: /schemas/connector/usb-connector.yaml#
+ unevaluatedProperties: false
+
led:
$ref: /schemas/leds/maxim,max77693.yaml
max77693-muic:
type: object
additionalProperties: false
+ deprecated: true
properties:
compatible:
@@ -45,6 +50,21 @@ properties:
required:
- compatible
+ muic:
+ type: object
+ additionalProperties: false
+
+ properties:
+ compatible:
+ const: maxim,max77693-muic
+
+ connector:
+ $ref: /schemas/connector/usb-connector.yaml#
+ unevaluatedProperties: false
+
+ required:
+ - compatible
+
motor-driver:
type: object
additionalProperties: false
@@ -107,6 +127,38 @@ examples:
};
};
+ muic {
+ compatible = "maxim,max77693-muic";
+
+ connector {
+ compatible = "samsung,usb-connector-11pin",
+ "usb-b-connector";
+ label = "micro-USB";
+ type = "micro";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ muic_to_usb: endpoint {
+ remote-endpoint = <&usb_to_muic>;
+ };
+ };
+
+ port@3 {
+ reg = <3>;
+
+ muic_to_mhl: endpoint {
+ remote-endpoint = <&mhl_to_muic>;
+ };
+ };
+ };
+ };
+ };
+
motor-driver {
compatible = "maxim,max77693-haptic";
haptic-supply = <&ldo26_reg>;
diff --git a/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.yaml b/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.yaml
index 8b9a2008a354..9f03436b1cdc 100644
--- a/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.yaml
+++ b/Documentation/devicetree/bindings/mfd/qcom,spmi-pmic.yaml
@@ -41,6 +41,7 @@ properties:
- qcom,pm660
- qcom,pm660l
- qcom,pm7250b
+ - qcom,pm7550ba
- qcom,pm7325
- qcom,pm8004
- qcom,pm8005
@@ -70,6 +71,8 @@ properties:
- qcom,pm8994
- qcom,pm8998
- qcom,pma8084
+ - qcom,pmc8180
+ - qcom,pmc8180c
- qcom,pmd9635
- qcom,pmi632
- qcom,pmi8950
@@ -88,6 +91,7 @@ properties:
- qcom,pms405
- qcom,pmx55
- qcom,pmx65
+ - qcom,pmx75
- qcom,smb2351
- const: qcom,spmi-pmic
@@ -127,7 +131,7 @@ patternProperties:
"^audio-codec@[0-9a-f]+$":
type: object
- additionalProperties: true # FIXME qcom,pm8916-wcd-analog-codec binding not converted yet
+ $ref: /schemas/sound/qcom,pm8916-wcd-analog-codec.yaml#
"^charger@[0-9a-f]+$":
type: object
diff --git a/Documentation/devicetree/bindings/mfd/rohm,bd71847-pmic.yaml b/Documentation/devicetree/bindings/mfd/rohm,bd71847-pmic.yaml
index 7ab7b2c7f3e6..d783cc4e4e86 100644
--- a/Documentation/devicetree/bindings/mfd/rohm,bd71847-pmic.yaml
+++ b/Documentation/devicetree/bindings/mfd/rohm,bd71847-pmic.yaml
@@ -130,7 +130,6 @@ dependencies:
examples:
- |
#include <dt-bindings/interrupt-controller/irq.h>
- #include <dt-bindings/leds/common.h>
i2c {
#address-cells = <1>;
diff --git a/Documentation/devicetree/bindings/mfd/st,stmpe.yaml b/Documentation/devicetree/bindings/mfd/st,stmpe.yaml
new file mode 100644
index 000000000000..b77cc3f3075d
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/st,stmpe.yaml
@@ -0,0 +1,297 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/st,stmpe.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectonics Port Expander (STMPE)
+
+description: STMicroelectronics Port Expander (STMPE) is a series of slow
+ bus controllers for various expanded peripherals such as GPIO, keypad,
+ touchscreen, ADC, PWM or rotator. It can contain one or several different
+ peripherals connected to SPI or I2C.
+
+maintainers:
+ - Linus Walleij <linus.walleij@linaro.org>
+
+allOf:
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
+
+properties:
+ compatible:
+ enum:
+ - st,stmpe601
+ - st,stmpe801
+ - st,stmpe811
+ - st,stmpe1600
+ - st,stmpe1601
+ - st,stmpe2401
+ - st,stmpe2403
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ vcc-supply: true
+
+ vio-supply: true
+
+ reset-gpios:
+ maxItems: 1
+
+ wakeup-source: true
+
+ st,autosleep-timeout:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [ 4, 16, 32, 64, 128, 256, 512, 1024 ]
+ description: Time idle before going to automatic sleep to save power
+
+ st,sample-time:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [ 0, 1, 2, 3, 4, 5, 6 ]
+ description: |
+ Sample time per iteration
+ 0 = 36 clock ticks
+ 1 = 44 clock ticks
+ 2 = 56 clock ticks
+ 3 = 64 clock ticks
+ 4 = 80 clock ticks - recommended
+ 5 = 96 clock ticks
+ 6 = 124 clock ticks
+
+ st,mod-12b:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [ 0, 1 ]
+ description: ADC bit mode 0 = 10bit ADC, 1 = 12bit ADC
+
+ st,ref-sel:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [ 0, 1 ]
+ description: ADC reference source 0 = internal, 1 = external
+
+ st,adc-freq:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [ 0, 1, 2, 3 ]
+ description: |
+ ADC clock speed
+ 0 = 1.625 MHz
+ 1 = 3.25 MHz
+ 2, 3 = 6.5 MHz
+
+ adc:
+ type: object
+ $ref: /schemas/iio/adc/st,stmpe-adc.yaml#
+
+ gpio:
+ type: object
+ $ref: /schemas/gpio/st,stmpe-gpio.yaml#
+
+ keyboard-controller:
+ type: object
+ $ref: /schemas/input/matrix-keymap.yaml#
+
+ unevaluatedProperties: false
+
+ properties:
+ compatible:
+ const: st,stmpe-keypad
+
+ debounce-interval:
+ description: Debouncing interval in milliseconds
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ st,no-autorepeat:
+ description: If present, the keys will not autorepeat when pressed
+ $ref: /schemas/types.yaml#/definitions/flag
+
+ st,scan-count:
+ description: Scanning cycles elapsed before key data is updated
+ $ref: /schemas/types.yaml#/definitions/uint32
+
+ required:
+ - compatible
+ - linux,keymap
+
+ pwm:
+ type: object
+ $ref: /schemas/pwm/pwm.yaml#
+
+ unevaluatedProperties: false
+
+ properties:
+ compatible:
+ const: st,stmpe-pwm
+
+ "#pwm-cells":
+ const: 2
+
+ touchscreen:
+ type: object
+ $ref: /schemas/input/touchscreen/touchscreen.yaml#
+
+ unevaluatedProperties: false
+
+ properties:
+ compatible:
+ const: st,stmpe-ts
+
+ st,ave-ctrl:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [ 0, 1, 2, 3 ]
+ description: |
+ Sample average control
+ 0 = 1 sample
+ 1 = 2 samples
+ 2 = 4 samples
+ 3 = 8 samples
+
+ st,touch-det-delay:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [ 0, 1, 2, 3, 4, 5, 6, 7 ]
+ description: |
+ Touch detection delay
+ 0 = 10 us
+ 1 = 50 us
+ 2 = 100 us
+ 3 = 500 us - recommended
+ 4 = 1 ms
+ 5 = 5 ms
+ 6 = 10 ms
+ 7 = 50 ms
+
+ st,settling:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [ 0, 1, 2, 3, 4, 5, 6, 7 ]
+ description: |
+ Panel driver settling time
+ 0 = 10 us
+ 1 = 100 us
+ 2 = 500 us - recommended
+ 3 = 1 ms
+ 4 = 5 ms
+ 5 = 10 ms
+ 6 = 50 ms
+ 7 = 100 ms
+
+ st,fraction-z:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [ 0, 1, 2, 3, 4, 5, 6, 7 ]
+ description: Length of the fractional part in z, recommended is 7
+ (fraction-z ([0..7]) = Count of the fractional part)
+
+ st,i-drive:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [ 0, 1 ]
+ description: |
+ current limit value of the touchscreen drivers
+ 0 = 20 mA (typical 35 mA max)
+ 1 = 50 mA (typical 80 mA max)
+
+ required:
+ - compatible
+
+additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/input/input.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port-expander@43 {
+ compatible = "st,stmpe2401";
+ reg = <0x43>;
+ reset-gpios = <&gpio 13 GPIO_ACTIVE_LOW>;
+ interrupts = <26 IRQ_TYPE_EDGE_FALLING>;
+ interrupt-parent = <&gpio>;
+ vcc-supply = <&db8500_vsmps2_reg>;
+ vio-supply = <&db8500_vsmps2_reg>;
+ wakeup-source;
+ st,autosleep-timeout = <1024>;
+
+ gpio {
+ compatible = "st,stmpe-gpio";
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ st,norequest-mask = <0xf0f002>;
+ };
+
+ keyboard-controller {
+ compatible = "st,stmpe-keypad";
+ debounce-interval = <64>;
+ st,scan-count = <8>;
+ st,no-autorepeat;
+ keypad,num-rows = <8>;
+ keypad,num-columns = <8>;
+ linux,keymap = <
+ MATRIX_KEY(0x00, 0x00, KEY_1)
+ MATRIX_KEY(0x00, 0x01, KEY_2)
+ MATRIX_KEY(0x00, 0x02, KEY_3)
+ MATRIX_KEY(0x00, 0x03, KEY_4)
+ MATRIX_KEY(0x00, 0x04, KEY_5)
+ MATRIX_KEY(0x00, 0x05, KEY_6)
+ MATRIX_KEY(0x00, 0x06, KEY_7)
+ MATRIX_KEY(0x00, 0x07, KEY_8)
+ MATRIX_KEY(0x00, 0x08, KEY_9)
+ MATRIX_KEY(0x00, 0x09, KEY_0)
+ >;
+ };
+
+ pwm {
+ compatible = "st,stmpe-pwm";
+ #pwm-cells = <2>;
+ };
+ };
+
+ port-expander@41 {
+ compatible = "st,stmpe811";
+ reg = <0x41>;
+ interrupts = <10 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-parent = <&gpio>;
+ st,adc-freq = <1>;
+ st,mod-12b = <1>;
+ st,ref-sel = <0>;
+ st,sample-time = <4>;
+
+ adc {
+ compatible = "st,stmpe-adc";
+ st,norequest-mask = <0x0f>;
+ #io-channel-cells = <1>;
+ };
+
+ gpio {
+ compatible = "st,stmpe-gpio";
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ pwm {
+ compatible = "st,stmpe-pwm";
+ #pwm-cells = <2>;
+ };
+
+ touchscreen {
+ compatible = "st,stmpe-ts";
+ st,ave-ctrl = <3>;
+ st,touch-det-delay = <5>;
+ st,settling = <3>;
+ st,fraction-z = <7>;
+ st,i-drive = <1>;
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/mfd/st,stpmic1.yaml b/Documentation/devicetree/bindings/mfd/st,stpmic1.yaml
index 97c61097f9e2..b17ebeb0a42f 100644
--- a/Documentation/devicetree/bindings/mfd/st,stpmic1.yaml
+++ b/Documentation/devicetree/bindings/mfd/st,stpmic1.yaml
@@ -106,6 +106,7 @@ properties:
const: st,stpmic1-regulators
ldo3:
+ $ref: /schemas/regulator/regulator.yaml
type: object
properties:
@@ -128,6 +129,7 @@ properties:
additionalProperties: false
ldo4:
+ $ref: /schemas/regulator/regulator.yaml
type: object
properties:
@@ -142,11 +144,14 @@ properties:
regulator-name: true
regulator-boot-on: true
regulator-always-on: true
+ regulator-min-microvolt: true
+ regulator-max-microvolt: true
regulator-over-current-protection: true
additionalProperties: false
vref_ddr:
+ $ref: /schemas/regulator/regulator.yaml
type: object
properties:
@@ -165,6 +170,7 @@ properties:
additionalProperties: false
boost:
+ $ref: /schemas/regulator/regulator.yaml
type: object
properties:
@@ -187,10 +193,8 @@ properties:
"^(buck[1-4]|ldo[1-6]|vref_ddr|boost|pwr_sw[1-2])-supply$":
description: STPMIC1 voltage regulators supplies
- "^(buck[1-4]|ldo[1-6]|boost|vref_ddr|pwr_sw[1-2])$":
- $ref: ../regulator/regulator.yaml
-
"^ldo[1-2,5-6]$":
+ $ref: /schemas/regulator/regulator.yaml
type: object
properties:
@@ -213,6 +217,7 @@ properties:
additionalProperties: false
"^buck[1-4]$":
+ $ref: /schemas/regulator/regulator.yaml
type: object
properties:
@@ -237,6 +242,7 @@ properties:
additionalProperties: false
"^pwr_sw[1-2]$":
+ $ref: /schemas/regulator/regulator.yaml
type: object
properties:
diff --git a/Documentation/devicetree/bindings/mfd/stericsson,db8500-prcmu.yaml b/Documentation/devicetree/bindings/mfd/stericsson,db8500-prcmu.yaml
index a66d58b4d1f2..5e0002f099e4 100644
--- a/Documentation/devicetree/bindings/mfd/stericsson,db8500-prcmu.yaml
+++ b/Documentation/devicetree/bindings/mfd/stericsson,db8500-prcmu.yaml
@@ -72,44 +72,52 @@ properties:
main voltage domain for the chip.
type: object
$ref: ../regulator/regulator.yaml#
+ unevaluatedProperties: false
db8500_varm:
description: The voltage for the ARM Cortex A-9 CPU.
type: object
$ref: ../regulator/regulator.yaml#
+ unevaluatedProperties: false
db8500_vmodem:
description: The voltage for the modem subsystem.
type: object
$ref: ../regulator/regulator.yaml#
+ unevaluatedProperties: false
db8500_vpll:
description: The voltage for the phase locked loop clocks.
type: object
$ref: ../regulator/regulator.yaml#
+ unevaluatedProperties: false
db8500_vsmps1:
description: Also known as VIO12, is a step-down voltage regulator
for 1.2V I/O. SMPS means System Management Power Source.
type: object
$ref: ../regulator/regulator.yaml#
+ unevaluatedProperties: false
db8500_vsmps2:
description: Also known as VIO18, is a step-down voltage regulator
for 1.8V I/O. SMPS means System Management Power Source.
type: object
$ref: ../regulator/regulator.yaml#
+ unevaluatedProperties: false
db8500_vsmps3:
description: This is a step-down voltage regulator
for 0.87 thru 1.875V I/O. SMPS means System Management Power Source.
type: object
$ref: ../regulator/regulator.yaml#
+ unevaluatedProperties: false
db8500_vrf1:
description: RF transceiver voltage regulator.
type: object
$ref: ../regulator/regulator.yaml#
+ unevaluatedProperties: false
db8500_sva_mmdsp:
description: Smart Video Accelerator (SVA) multimedia DSP (MMDSP)
@@ -117,18 +125,21 @@ properties:
for video encoding and decoding.
type: object
$ref: ../regulator/regulator.yaml#
+ unevaluatedProperties: false
db8500_sva_mmdsp_ret:
description: Smart Video Accelerator (SVA) multimedia DSP (MMDSP)
voltage regulator for retention mode.
type: object
$ref: ../regulator/regulator.yaml#
+ unevaluatedProperties: false
db8500_sva_pipe:
description: Smart Video Accelerator (SVA) multimedia DSP (MMDSP)
voltage regulator for the data pipe.
type: object
$ref: ../regulator/regulator.yaml#
+ unevaluatedProperties: false
db8500_sia_mmdsp:
description: Smart Image Accelerator (SIA) multimedia DSP (MMDSP)
@@ -136,18 +147,21 @@ properties:
for image encoding and decoding.
type: object
$ref: ../regulator/regulator.yaml#
+ unevaluatedProperties: false
db8500_sia_mmdsp_ret:
description: Smart Image Accelerator (SIA) multimedia DSP (MMDSP)
voltage regulator for retention mode.
type: object
$ref: ../regulator/regulator.yaml#
+ unevaluatedProperties: false
db8500_sia_pipe:
description: Smart Image Accelerator (SIA) multimedia DSP (MMDSP)
voltage regulator for the data pipe.
type: object
$ref: ../regulator/regulator.yaml#
+ unevaluatedProperties: false
db8500_sga:
description: Smart Graphics Accelerator (SGA) voltage regulator.
@@ -155,6 +169,7 @@ properties:
accelerator block.
type: object
$ref: ../regulator/regulator.yaml#
+ unevaluatedProperties: false
db8500_b2r2_mcde:
description: Blit Blend Rotate and Rescale (B2R2), and Multi-Channel
@@ -162,28 +177,33 @@ properties:
blocks.
type: object
$ref: ../regulator/regulator.yaml#
+ unevaluatedProperties: false
db8500_esram12:
description: Embedded Static RAM (ESRAM) 1 and 2 voltage regulator.
type: object
$ref: ../regulator/regulator.yaml#
+ unevaluatedProperties: false
db8500_esram12_ret:
description: Embedded Static RAM (ESRAM) 1 and 2 voltage regulator for
retention mode.
type: object
$ref: ../regulator/regulator.yaml#
+ unevaluatedProperties: false
db8500_esram34:
description: Embedded Static RAM (ESRAM) 3 and 4 voltage regulator.
type: object
$ref: ../regulator/regulator.yaml#
+ unevaluatedProperties: false
db8500_esram34_ret:
description: Embedded Static RAM (ESRAM) 3 and 4 voltage regulator for
retention mode.
type: object
$ref: ../regulator/regulator.yaml#
+ unevaluatedProperties: false
required:
- compatible
diff --git a/Documentation/devicetree/bindings/mfd/stmpe.txt b/Documentation/devicetree/bindings/mfd/stmpe.txt
deleted file mode 100644
index d4408a417193..000000000000
--- a/Documentation/devicetree/bindings/mfd/stmpe.txt
+++ /dev/null
@@ -1,42 +0,0 @@
-* ST Microelectronics STMPE Multi-Functional Device
-
-STMPE is an MFD device which may expose the following inbuilt devices: gpio,
-keypad, touchscreen, adc, pwm, rotator.
-
-Required properties:
- - compatible : "st,stmpe[610|801|811|1600|1601|2401|2403]"
- - reg : I2C/SPI address of the device
-
-Optional properties:
- - interrupts : The interrupt outputs from the controller
- - interrupt-controller : Marks the device node as an interrupt controller
- - wakeup-source : Marks the input device as wakable
- - st,autosleep-timeout : Valid entries (ms); 4, 16, 32, 64, 128, 256, 512 and 1024
- - irq-gpio : If present, which GPIO to use for event IRQ
-
-Optional properties for devices with touch and ADC (STMPE811|STMPE610):
- - st,sample-time : ADC conversion time in number of clock.
- 0 -> 36 clocks 4 -> 80 clocks (recommended)
- 1 -> 44 clocks 5 -> 96 clocks
- 2 -> 56 clocks 6 -> 124 clocks
- 3 -> 64 clocks
- - st,mod-12b : ADC Bit mode
- 0 -> 10bit ADC 1 -> 12bit ADC
- - st,ref-sel : ADC reference source
- 0 -> internal 1 -> external
- - st,adc-freq : ADC Clock speed
- 0 -> 1.625 MHz 2 || 3 -> 6.5 MHz
- 1 -> 3.25 MHz
-
-Example:
-
- stmpe1601: stmpe1601@40 {
- compatible = "st,stmpe1601";
- reg = <0x40>;
- interrupts = <26 0x4>;
- interrupt-parent = <&gpio6>;
- interrupt-controller;
-
- wakeup-source;
- st,autosleep-timeout = <1024>;
- };
diff --git a/Documentation/devicetree/bindings/mtd/amlogic,meson-nand.yaml b/Documentation/devicetree/bindings/mtd/amlogic,meson-nand.yaml
index 787ef488dd5b..57b6957c8415 100644
--- a/Documentation/devicetree/bindings/mtd/amlogic,meson-nand.yaml
+++ b/Documentation/devicetree/bindings/mtd/amlogic,meson-nand.yaml
@@ -50,7 +50,7 @@ patternProperties:
const: hw
nand-ecc-step-size:
- const: 1024
+ enum: [512, 1024]
nand-ecc-strength:
enum: [8, 16, 24, 30, 40, 50, 60]
@@ -66,6 +66,10 @@ patternProperties:
unevaluatedProperties: false
+ dependencies:
+ nand-ecc-strength: [nand-ecc-step-size]
+ nand-ecc-step-size: [nand-ecc-strength]
+
required:
- compatible
diff --git a/Documentation/devicetree/bindings/mtd/jedec,spi-nor.yaml b/Documentation/devicetree/bindings/mtd/jedec,spi-nor.yaml
index 89959e5c47ba..58f0cea160ef 100644
--- a/Documentation/devicetree/bindings/mtd/jedec,spi-nor.yaml
+++ b/Documentation/devicetree/bindings/mtd/jedec,spi-nor.yaml
@@ -43,8 +43,10 @@ properties:
- const: jedec,spi-nor
- const: jedec,spi-nor
description:
- Must also include "jedec,spi-nor" for any SPI NOR flash that can be
- identified by the JEDEC READ ID opcode (0x9F).
+ SPI NOR flashes compatible with the JEDEC SFDP standard or which may be
+ identified with the READ ID opcode (0x9F) do not deserve a specific
+ compatible. They should instead only be matched against the generic
+ "jedec,spi-nor" compatible.
reg:
minItems: 1
@@ -70,6 +72,21 @@ properties:
be used on such systems, to denote the absence of a reliable reset
mechanism.
+ no-wp:
+ type: boolean
+ description:
+ The status register write disable (SRWD) bit in status register, combined
+ with the WP# signal, provides hardware data protection for the device. When
+ the SRWD bit is set to 1, and the WP# signal is either driven LOW or hard
+ strapped to LOW, the status register nonvolatile bits become read-only and
+ the WRITE STATUS REGISTER operation will not execute. The only way to exit
+ this hardware-protected mode is to drive WP# HIGH. If the WP# signal of the
+ flash device is not connected or is wrongly tied to GND (that includes internal
+ pull-downs) then status register permanently becomes read-only as the SRWD bit
+ cannot be reset. This boolean flag can be used on such systems to avoid setting
+ the SRWD bit while writing the status register. WP# signal hard strapped to GND
+ can be a valid use case.
+
reset-gpios:
description:
A GPIO line connected to the RESET (active low) signal of the device.
diff --git a/Documentation/devicetree/bindings/mtd/marvell,nand-controller.yaml b/Documentation/devicetree/bindings/mtd/marvell,nand-controller.yaml
index a10729bb1840..1ecea848e8b9 100644
--- a/Documentation/devicetree/bindings/mtd/marvell,nand-controller.yaml
+++ b/Documentation/devicetree/bindings/mtd/marvell,nand-controller.yaml
@@ -16,6 +16,7 @@ properties:
- const: marvell,armada-8k-nand-controller
- const: marvell,armada370-nand-controller
- enum:
+ - marvell,ac5-nand-controller
- marvell,armada370-nand-controller
- marvell,pxa3xx-nand-controller
- description: legacy bindings
diff --git a/Documentation/devicetree/bindings/mtd/nand-controller.yaml b/Documentation/devicetree/bindings/mtd/nand-controller.yaml
index 83a4fe4cc29d..28167c0cf271 100644
--- a/Documentation/devicetree/bindings/mtd/nand-controller.yaml
+++ b/Documentation/devicetree/bindings/mtd/nand-controller.yaml
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/mtd/nand-controller.yaml#
diff --git a/Documentation/devicetree/bindings/mtd/oxnas-nand.txt b/Documentation/devicetree/bindings/mtd/oxnas-nand.txt
deleted file mode 100644
index 2ba07fc8b79c..000000000000
--- a/Documentation/devicetree/bindings/mtd/oxnas-nand.txt
+++ /dev/null
@@ -1,41 +0,0 @@
-* Oxford Semiconductor OXNAS NAND Controller
-
-Please refer to nand-controller.yaml for generic information regarding MTD NAND bindings.
-
-Required properties:
- - compatible: "oxsemi,ox820-nand"
- - reg: Base address and length for NAND mapped memory.
-
-Optional Properties:
- - clocks: phandle to the NAND gate clock if needed.
- - resets: phandle to the NAND reset control if needed.
-
-Example:
-
-nandc: nand-controller@41000000 {
- compatible = "oxsemi,ox820-nand";
- reg = <0x41000000 0x100000>;
- clocks = <&stdclk CLK_820_NAND>;
- resets = <&reset RESET_NAND>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- nand@0 {
- reg = <0>;
- #address-cells = <1>;
- #size-cells = <1>;
- nand-ecc-mode = "soft";
- nand-ecc-algo = "hamming";
-
- partition@0 {
- label = "boot";
- reg = <0x00000000 0x00e00000>;
- read-only;
- };
-
- partition@e00000 {
- label = "ubi";
- reg = <0x00e00000 0x07200000>;
- };
- };
-};
diff --git a/Documentation/devicetree/bindings/mtd/partitions/seama.yaml b/Documentation/devicetree/bindings/mtd/partitions/seama.yaml
new file mode 100644
index 000000000000..4c1cbf43e81a
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/partitions/seama.yaml
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mtd/partitions/seama.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Seattle Image Partitions
+
+description: The SEAttle iMAge (SEAMA) partition is a type of partition
+ used for NAND flash devices. This type of flash image is found in some
+ D-Link routers such as DIR-645, DIR-842, DIR-859, DIR-860L, DIR-885L,
+ DIR890L and DCH-M225, as well as in WD and NEC routers on the ath79
+ (MIPS), Broadcom BCM53xx, and RAMIPS platforms. This partition type
+ does not have children defined in the device tree, they need to be
+ detected by software.
+
+allOf:
+ - $ref: partition.yaml#
+
+maintainers:
+ - Linus Walleij <linus.walleij@linaro.org>
+
+properties:
+ compatible:
+ const: seama
+
+required:
+ - compatible
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ compatible = "seama";
+ reg = <0x0 0x800000>;
+ label = "firmware";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/nvmem/fsl,t1023-sfp.yaml b/Documentation/devicetree/bindings/nvmem/fsl,t1023-sfp.yaml
new file mode 100644
index 000000000000..df826b40d8ca
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/fsl,t1023-sfp.yaml
@@ -0,0 +1,37 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/nvmem/fsl,t1023-sfp.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP QorIQ eFuse support
+
+maintainers:
+ - Richard Alpe <richard@bit42.se>
+
+description:
+ Read support for the eFuses (SFP) on NXP QorIQ series SoC's.
+
+allOf:
+ - $ref: nvmem.yaml#
+
+properties:
+ compatible:
+ const: fsl,t1023-sfp
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ efuse@e8000 {
+ compatible = "fsl,t1023-sfp";
+ reg = <0xe8000 0x1000>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/nvmem/layouts/fixed-cell.yaml b/Documentation/devicetree/bindings/nvmem/layouts/fixed-cell.yaml
index e698098450e1..ac2381e66027 100644
--- a/Documentation/devicetree/bindings/nvmem/layouts/fixed-cell.yaml
+++ b/Documentation/devicetree/bindings/nvmem/layouts/fixed-cell.yaml
@@ -11,6 +11,15 @@ maintainers:
- Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
properties:
+ compatible:
+ oneOf:
+ - const: mac-base
+ description: >
+ Cell with base MAC address to be used for calculating extra relative
+ addresses.
+ It can be stored in a plain binary format (cell length 6) or as an
+ ASCII text like "00:11:22:33:44:55" (cell length 17).
+
reg:
maxItems: 1
@@ -25,6 +34,23 @@ properties:
description:
Size in bit within the address range specified by reg.
+allOf:
+ - if:
+ required: [ compatible ]
+ then:
+ if:
+ properties:
+ compatible:
+ contains:
+ const: mac-base
+ then:
+ properties:
+ "#nvmem-cell-cells":
+ description: The first argument is a MAC address offset.
+ const: 1
+ required:
+ - "#nvmem-cell-cells"
+
required:
- reg
diff --git a/Documentation/devicetree/bindings/nvmem/layouts/fixed-layout.yaml b/Documentation/devicetree/bindings/nvmem/layouts/fixed-layout.yaml
index c271537d0714..9bd34bd5af30 100644
--- a/Documentation/devicetree/bindings/nvmem/layouts/fixed-layout.yaml
+++ b/Documentation/devicetree/bindings/nvmem/layouts/fixed-layout.yaml
@@ -44,6 +44,18 @@ examples:
#address-cells = <1>;
#size-cells = <1>;
+ mac@100 {
+ compatible = "mac-base";
+ reg = <0x100 0x6>;
+ #nvmem-cell-cells = <1>;
+ };
+
+ mac@110 {
+ compatible = "mac-base";
+ reg = <0x110 0x11>;
+ #nvmem-cell-cells = <1>;
+ };
+
calibration@4000 {
reg = <0x4000 0x100>;
};
diff --git a/Documentation/devicetree/bindings/nvmem/nvmem.yaml b/Documentation/devicetree/bindings/nvmem/nvmem.yaml
index 980244100690..9f921d940142 100644
--- a/Documentation/devicetree/bindings/nvmem/nvmem.yaml
+++ b/Documentation/devicetree/bindings/nvmem/nvmem.yaml
@@ -49,7 +49,10 @@ properties:
patternProperties:
"@[0-9a-f]+(,[0-7])?$":
type: object
- $ref: layouts/fixed-cell.yaml
+ allOf:
+ - $ref: layouts/fixed-cell.yaml
+ - properties:
+ compatible: false
deprecated: true
additionalProperties: true
diff --git a/Documentation/devicetree/bindings/nvmem/qcom,qfprom.yaml b/Documentation/devicetree/bindings/nvmem/qcom,qfprom.yaml
index 6cd4682a167d..8740938c32eb 100644
--- a/Documentation/devicetree/bindings/nvmem/qcom,qfprom.yaml
+++ b/Documentation/devicetree/bindings/nvmem/qcom,qfprom.yaml
@@ -23,11 +23,13 @@ properties:
- qcom,ipq8064-qfprom
- qcom,ipq8074-qfprom
- qcom,ipq9574-qfprom
+ - qcom,msm8226-qfprom
- qcom,msm8916-qfprom
- qcom,msm8974-qfprom
- qcom,msm8976-qfprom
- qcom,msm8996-qfprom
- qcom,msm8998-qfprom
+ - qcom,qcm2290-qfprom
- qcom,qcs404-qfprom
- qcom,sc7180-qfprom
- qcom,sc7280-qfprom
diff --git a/Documentation/devicetree/bindings/nvmem/qcom,sec-qfprom.yaml b/Documentation/devicetree/bindings/nvmem/qcom,sec-qfprom.yaml
new file mode 100644
index 000000000000..9b133f783d29
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/qcom,sec-qfprom.yaml
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/nvmem/qcom,sec-qfprom.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Technologies Inc, Secure QFPROM Efuse
+
+maintainers:
+ - Komal Bajaj <quic_kbajaj@quicinc.com>
+
+description:
+ For some of the Qualcomm SoC's, it is possible that the qfprom region is
+ protected from non-secure access. In such situations, the OS have to use
+ secure calls to read the region.
+
+allOf:
+ - $ref: nvmem.yaml#
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - qcom,qdu1000-sec-qfprom
+ - const: qcom,sec-qfprom
+
+ reg:
+ items:
+ - description: The secure qfprom corrected region.
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ efuse@221c8000 {
+ compatible = "qcom,qdu1000-sec-qfprom", "qcom,sec-qfprom";
+ reg = <0 0x221c8000 0 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ multi_chan_ddr: multi-chan-ddr@12b {
+ reg = <0x12b 0x1>;
+ bits = <0 2>;
+ };
+ };
+ };
+
diff --git a/Documentation/devicetree/bindings/peci/nuvoton,npcm-peci.yaml b/Documentation/devicetree/bindings/peci/nuvoton,npcm-peci.yaml
new file mode 100644
index 000000000000..087e02a9ade3
--- /dev/null
+++ b/Documentation/devicetree/bindings/peci/nuvoton,npcm-peci.yaml
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/peci/nuvoton,npcm-peci.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Nuvoton PECI Bus
+
+maintainers:
+ - Tomer Maimon <tmaimon77@gmail.com>
+
+allOf:
+ - $ref: peci-controller.yaml#
+
+properties:
+ compatible:
+ enum:
+ - nuvoton,npcm750-peci
+ - nuvoton,npcm845-peci
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ description:
+ Clock source for PECI controller. Should reference the APB clock.
+ maxItems: 1
+
+ cmd-timeout-ms:
+ minimum: 1
+ maximum: 1000
+ default: 1000
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/nuvoton,npcm7xx-clock.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ peci-controller@f0100000 {
+ compatible = "nuvoton,npcm750-peci";
+ reg = <0xf0100000 0x200>;
+ interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk NPCM7XX_CLK_APB3>;
+ cmd-timeout-ms = <1000>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/phy/mediatek,tphy.yaml b/Documentation/devicetree/bindings/phy/mediatek,tphy.yaml
index 230a17f24966..2bb91542e984 100644
--- a/Documentation/devicetree/bindings/phy/mediatek,tphy.yaml
+++ b/Documentation/devicetree/bindings/phy/mediatek,tphy.yaml
@@ -64,7 +64,7 @@ description: |
properties:
$nodename:
- pattern: "^t-phy@[0-9a-f]+$"
+ pattern: "^t-phy(@[0-9a-f]+)?$"
compatible:
oneOf:
diff --git a/Documentation/devicetree/bindings/phy/qcom,ipq5332-usb-hsphy.yaml b/Documentation/devicetree/bindings/phy/qcom,ipq5332-usb-hsphy.yaml
new file mode 100644
index 000000000000..2671a048c926
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/qcom,ipq5332-usb-hsphy.yaml
@@ -0,0 +1,59 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/qcom,ipq5332-usb-hsphy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: M31 USB PHY
+
+maintainers:
+ - Sricharan Ramabadhran <quic_srichara@quicinc.com>
+ - Varadarajan Narayanan <quic_varada@quicinc.com>
+
+description:
+ USB M31 PHY (https://www.m31tech.com) found in Qualcomm
+ IPQ5018, IPQ5332 SoCs.
+
+properties:
+ compatible:
+ items:
+ - const: qcom,ipq5332-usb-hsphy
+
+ "#phy-cells":
+ const: 0
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: cfg_ahb
+
+ resets:
+ maxItems: 1
+
+ vdd-supply:
+ description:
+ Phandle to 5V regulator supply to PHY digital circuit.
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,ipq5332-gcc.h>
+ usb-phy@7b000 {
+ compatible = "qcom,ipq5332-usb-hsphy";
+ reg = <0x0007b000 0x12c>;
+
+ clocks = <&gcc GCC_USB0_PHY_CFG_AHB_CLK>;
+ clock-names = "cfg_ahb";
+
+ #phy-cells = <0>;
+
+ resets = <&gcc GCC_QUSB2_0_PHY_BCR>;
+
+ vdd-supply = <&regulator_fixed_5p0>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/qcom,ipq8074-qmp-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,ipq8074-qmp-pcie-phy.yaml
index 3d42ee3901a1..5073007267ad 100644
--- a/Documentation/devicetree/bindings/phy/qcom,ipq8074-qmp-pcie-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,ipq8074-qmp-pcie-phy.yaml
@@ -13,287 +13,79 @@ description:
QMP PHY controller supports physical layer functionality for a number of
controllers on Qualcomm chipsets, such as, PCIe, UFS, and USB.
- Note that these bindings are for SoCs up to SC8180X. For newer SoCs, see
- qcom,sc8280xp-qmp-pcie-phy.yaml.
-
properties:
compatible:
enum:
- qcom,ipq6018-qmp-pcie-phy
- qcom,ipq8074-qmp-gen3-pcie-phy
- qcom,ipq8074-qmp-pcie-phy
- - qcom,msm8998-qmp-pcie-phy
- - qcom,sc8180x-qmp-pcie-phy
- - qcom,sdm845-qhp-pcie-phy
- - qcom,sdm845-qmp-pcie-phy
- - qcom,sdx55-qmp-pcie-phy
- - qcom,sm8250-qmp-gen3x1-pcie-phy
- - qcom,sm8250-qmp-gen3x2-pcie-phy
- - qcom,sm8250-qmp-modem-pcie-phy
- - qcom,sm8450-qmp-gen3x1-pcie-phy
- - qcom,sm8450-qmp-gen4x2-pcie-phy
reg:
items:
- description: serdes
- "#address-cells":
- enum: [ 1, 2 ]
-
- "#size-cells":
- enum: [ 1, 2 ]
-
- ranges: true
-
clocks:
- minItems: 2
- maxItems: 4
+ maxItems: 3
clock-names:
- minItems: 2
- maxItems: 4
+ items:
+ - const: aux
+ - const: cfg_ahb
+ - const: pipe
resets:
- minItems: 1
maxItems: 2
reset-names:
- minItems: 1
- maxItems: 2
-
- vdda-phy-supply: true
-
- vdda-pll-supply: true
-
- vddp-ref-clk-supply: true
-
-patternProperties:
- "^phy@[0-9a-f]+$":
- type: object
- description: single PHY-provider child node
- properties:
- reg:
- minItems: 3
- maxItems: 6
-
- clocks:
- items:
- - description: PIPE clock
-
- clock-names:
- deprecated: true
- items:
- - const: pipe0
-
- "#clock-cells":
- const: 0
-
- clock-output-names:
- maxItems: 1
+ items:
+ - const: phy
+ - const: common
- "#phy-cells":
- const: 0
+ "#clock-cells":
+ const: 0
- required:
- - reg
- - clocks
- - "#clock-cells"
- - clock-output-names
- - "#phy-cells"
+ clock-output-names:
+ maxItems: 1
- additionalProperties: false
+ "#phy-cells":
+ const: 0
required:
- compatible
- reg
- - "#address-cells"
- - "#size-cells"
- - ranges
- clocks
- clock-names
- resets
- reset-names
+ - "#clock-cells"
+ - clock-output-names
+ - "#phy-cells"
additionalProperties: false
-allOf:
- - if:
- properties:
- compatible:
- contains:
- enum:
- - qcom,msm8998-qmp-pcie-phy
- then:
- properties:
- clocks:
- maxItems: 3
- clock-names:
- items:
- - const: aux
- - const: cfg_ahb
- - const: ref
- resets:
- maxItems: 2
- reset-names:
- items:
- - const: phy
- - const: common
- required:
- - vdda-phy-supply
- - vdda-pll-supply
-
- - if:
- properties:
- compatible:
- contains:
- enum:
- - qcom,ipq6018-qmp-pcie-phy
- - qcom,ipq8074-qmp-gen3-pcie-phy
- - qcom,ipq8074-qmp-pcie-phy
- then:
- properties:
- clocks:
- maxItems: 2
- clock-names:
- items:
- - const: aux
- - const: cfg_ahb
- resets:
- maxItems: 2
- reset-names:
- items:
- - const: phy
- - const: common
-
- - if:
- properties:
- compatible:
- contains:
- enum:
- - qcom,sc8180x-qmp-pcie-phy
- - qcom,sdm845-qhp-pcie-phy
- - qcom,sdm845-qmp-pcie-phy
- - qcom,sdx55-qmp-pcie-phy
- - qcom,sm8250-qmp-gen3x1-pcie-phy
- - qcom,sm8250-qmp-gen3x2-pcie-phy
- - qcom,sm8250-qmp-modem-pcie-phy
- - qcom,sm8450-qmp-gen3x1-pcie-phy
- - qcom,sm8450-qmp-gen4x2-pcie-phy
- then:
- properties:
- clocks:
- maxItems: 4
- clock-names:
- items:
- - const: aux
- - const: cfg_ahb
- - const: ref
- - const: refgen
- resets:
- maxItems: 1
- reset-names:
- items:
- - const: phy
- required:
- - vdda-phy-supply
- - vdda-pll-supply
-
- - if:
- properties:
- compatible:
- contains:
- enum:
- - qcom,sc8180x-qmp-pcie-phy
- - qcom,sm8250-qmp-gen3x2-pcie-phy
- - qcom,sm8250-qmp-modem-pcie-phy
- - qcom,sm8450-qmp-gen4x2-pcie-phy
- then:
- patternProperties:
- "^phy@[0-9a-f]+$":
- properties:
- reg:
- items:
- - description: TX lane 1
- - description: RX lane 1
- - description: PCS
- - description: TX lane 2
- - description: RX lane 2
- - description: PCS_MISC
-
- - if:
- properties:
- compatible:
- contains:
- enum:
- - qcom,sdm845-qmp-pcie-phy
- - qcom,sdx55-qmp-pcie-phy
- - qcom,sm8250-qmp-gen3x1-pcie-phy
- - qcom,sm8450-qmp-gen3x1-pcie-phy
- then:
- patternProperties:
- "^phy@[0-9a-f]+$":
- properties:
- reg:
- items:
- - description: TX
- - description: RX
- - description: PCS
- - description: PCS_MISC
-
- - if:
- properties:
- compatible:
- contains:
- enum:
- - qcom,ipq6018-qmp-pcie-phy
- - qcom,ipq8074-qmp-pcie-phy
- - qcom,msm8998-qmp-pcie-phy
- - qcom,sdm845-qhp-pcie-phy
- then:
- patternProperties:
- "^phy@[0-9a-f]+$":
- properties:
- reg:
- items:
- - description: TX
- - description: RX
- - description: PCS
-
examples:
- |
- #include <dt-bindings/clock/qcom,gcc-sm8250.h>
- phy-wrapper@1c0e000 {
- compatible = "qcom,sm8250-qmp-gen3x2-pcie-phy";
- reg = <0x01c0e000 0x1c0>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x0 0x01c0e000 0x1000>;
-
- clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>,
- <&gcc GCC_PCIE_1_CFG_AHB_CLK>,
- <&gcc GCC_PCIE_WIGIG_CLKREF_EN>,
- <&gcc GCC_PCIE1_PHY_REFGEN_CLK>;
- clock-names = "aux", "cfg_ahb", "ref", "refgen";
-
- resets = <&gcc GCC_PCIE_1_PHY_BCR>;
- reset-names = "phy";
+ #include <dt-bindings/clock/qcom,gcc-ipq6018.h>
+ #include <dt-bindings/reset/qcom,gcc-ipq6018.h>
- vdda-phy-supply = <&vreg_l10c_0p88>;
- vdda-pll-supply = <&vreg_l6b_1p2>;
+ phy@84000 {
+ compatible = "qcom,ipq6018-qmp-pcie-phy";
+ reg = <0x0 0x00084000 0x0 0x1000>;
- phy@200 {
- reg = <0x200 0x170>,
- <0x400 0x200>,
- <0xa00 0x1f0>,
- <0x600 0x170>,
- <0x800 0x200>,
- <0xe00 0xf4>;
+ clocks = <&gcc GCC_PCIE0_AUX_CLK>,
+ <&gcc GCC_PCIE0_AHB_CLK>,
+ <&gcc GCC_PCIE0_PIPE_CLK>;
+ clock-names = "aux",
+ "cfg_ahb",
+ "pipe";
- clocks = <&gcc GCC_PCIE_1_PIPE_CLK>;
+ clock-output-names = "gcc_pcie0_pipe_clk_src";
+ #clock-cells = <0>;
- #clock-cells = <0>;
- clock-output-names = "pcie_1_pipe_clk";
+ #phy-cells = <0>;
- #phy-cells = <0>;
- };
+ resets = <&gcc GCC_PCIE0_PHY_BCR>,
+ <&gcc GCC_PCIE0PHY_PHY_BCR>;
+ reset-names = "phy",
+ "common";
};
diff --git a/Documentation/devicetree/bindings/phy/qcom,msm8996-qmp-ufs-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,msm8996-qmp-ufs-phy.yaml
deleted file mode 100644
index 881ba543fd46..000000000000
--- a/Documentation/devicetree/bindings/phy/qcom,msm8996-qmp-ufs-phy.yaml
+++ /dev/null
@@ -1,228 +0,0 @@
-# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
-%YAML 1.2
----
-$id: http://devicetree.org/schemas/phy/qcom,msm8996-qmp-ufs-phy.yaml#
-$schema: http://devicetree.org/meta-schemas/core.yaml#
-
-title: Qualcomm QMP PHY controller (UFS, MSM8996)
-
-maintainers:
- - Vinod Koul <vkoul@kernel.org>
-
-description:
- QMP PHY controller supports physical layer functionality for a number of
- controllers on Qualcomm chipsets, such as, PCIe, UFS, and USB.
-
- Note that these bindings are for SoCs up to SC8180X. For newer SoCs, see
- qcom,sc8280xp-qmp-ufs-phy.yaml.
-
-properties:
- compatible:
- enum:
- - qcom,msm8996-qmp-ufs-phy
- - qcom,msm8998-qmp-ufs-phy
- - qcom,sc8180x-qmp-ufs-phy
- - qcom,sdm845-qmp-ufs-phy
- - qcom,sm6115-qmp-ufs-phy
- - qcom,sm6350-qmp-ufs-phy
- - qcom,sm8150-qmp-ufs-phy
- - qcom,sm8250-qmp-ufs-phy
- - qcom,sm8350-qmp-ufs-phy
- - qcom,sm8450-qmp-ufs-phy
-
- reg:
- items:
- - description: serdes
-
- "#address-cells":
- enum: [ 1, 2 ]
-
- "#size-cells":
- enum: [ 1, 2 ]
-
- ranges: true
-
- clocks:
- minItems: 1
- maxItems: 3
-
- clock-names:
- minItems: 1
- maxItems: 3
-
- power-domains:
- maxItems: 1
-
- resets:
- maxItems: 1
-
- reset-names:
- items:
- - const: ufsphy
-
- vdda-phy-supply: true
-
- vdda-pll-supply: true
-
- vddp-ref-clk-supply: true
-
-patternProperties:
- "^phy@[0-9a-f]+$":
- type: object
- description: single PHY-provider child node
- properties:
- reg:
- minItems: 3
- maxItems: 6
-
- "#clock-cells":
- const: 1
-
- "#phy-cells":
- const: 0
-
- required:
- - reg
- - "#phy-cells"
-
- additionalProperties: false
-
-required:
- - compatible
- - reg
- - "#address-cells"
- - "#size-cells"
- - ranges
- - clocks
- - clock-names
- - resets
- - reset-names
- - vdda-phy-supply
- - vdda-pll-supply
-
-additionalProperties: false
-
-allOf:
- - if:
- properties:
- compatible:
- contains:
- enum:
- - qcom,msm8996-qmp-ufs-phy
- then:
- properties:
- clocks:
- maxItems: 1
- clock-names:
- items:
- - const: ref
-
- - if:
- properties:
- compatible:
- contains:
- enum:
- - qcom,msm8998-qmp-ufs-phy
- - qcom,sc8180x-qmp-ufs-phy
- - qcom,sdm845-qmp-ufs-phy
- - qcom,sm6115-qmp-ufs-phy
- - qcom,sm6350-qmp-ufs-phy
- - qcom,sm8150-qmp-ufs-phy
- - qcom,sm8250-qmp-ufs-phy
- then:
- properties:
- clocks:
- maxItems: 2
- clock-names:
- items:
- - const: ref
- - const: ref_aux
-
- - if:
- properties:
- compatible:
- contains:
- enum:
- - qcom,sm8450-qmp-ufs-phy
- then:
- properties:
- clocks:
- maxItems: 3
- clock-names:
- items:
- - const: ref
- - const: ref_aux
- - const: qref
-
- - if:
- properties:
- compatible:
- contains:
- enum:
- - qcom,msm8998-qmp-ufs-phy
- - qcom,sc8180x-qmp-ufs-phy
- - qcom,sdm845-qmp-ufs-phy
- - qcom,sm6350-qmp-ufs-phy
- - qcom,sm8150-qmp-ufs-phy
- - qcom,sm8250-qmp-ufs-phy
- - qcom,sm8350-qmp-ufs-phy
- - qcom,sm8450-qmp-ufs-phy
- then:
- patternProperties:
- "^phy@[0-9a-f]+$":
- properties:
- reg:
- items:
- - description: TX lane 1
- - description: RX lane 1
- - description: PCS
- - description: TX lane 2
- - description: RX lane 2
-
- - if:
- properties:
- compatible:
- contains:
- enum:
- - qcom,msm8996-qmp-ufs-phy
- - qcom,sm6115-qmp-ufs-phy
- then:
- patternProperties:
- "^phy@[0-9a-f]+$":
- properties:
- reg:
- items:
- - description: TX
- - description: RX
- - description: PCS
-
-examples:
- - |
- #include <dt-bindings/clock/qcom,gcc-sm8250.h>
- #include <dt-bindings/clock/qcom,rpmh.h>
-
- phy-wrapper@1d87000 {
- compatible = "qcom,sm8250-qmp-ufs-phy";
- reg = <0x01d87000 0x1c0>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x0 0x01d87000 0x1000>;
-
- clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GCC_UFS_PHY_PHY_AUX_CLK>;
- clock-names = "ref", "ref_aux";
-
- resets = <&ufs_mem_hc 0>;
- reset-names = "ufsphy";
-
- vdda-phy-supply = <&vreg_l6b>;
- vdda-pll-supply = <&vreg_l3b>;
-
- phy@400 {
- reg = <0x400 0x108>,
- <0x600 0x1e0>,
- <0xc00 0x1dc>,
- <0x800 0x108>,
- <0xa00 0x1e0>;
- #phy-cells = <0>;
- };
- };
diff --git a/Documentation/devicetree/bindings/phy/qcom,msm8996-qmp-usb3-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,msm8996-qmp-usb3-phy.yaml
index 4c96dab5b9e3..827109d37041 100644
--- a/Documentation/devicetree/bindings/phy/qcom,msm8996-qmp-usb3-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,msm8996-qmp-usb3-phy.yaml
@@ -23,25 +23,16 @@ properties:
- qcom,ipq8074-qmp-usb3-phy
- qcom,msm8996-qmp-usb3-phy
- qcom,msm8998-qmp-usb3-phy
- - qcom,sc7180-qmp-usb3-phy
- - qcom,sc8180x-qmp-usb3-phy
- - qcom,sdm845-qmp-usb3-phy
- qcom,sdm845-qmp-usb3-uni-phy
- qcom,sdx55-qmp-usb3-uni-phy
- qcom,sdx65-qmp-usb3-uni-phy
- - qcom,sm8150-qmp-usb3-phy
- qcom,sm8150-qmp-usb3-uni-phy
- - qcom,sm8250-qmp-usb3-phy
- qcom,sm8250-qmp-usb3-uni-phy
- - qcom,sm8350-qmp-usb3-phy
- qcom,sm8350-qmp-usb3-uni-phy
- - qcom,sm8450-qmp-usb3-phy
reg:
- minItems: 1
items:
- description: serdes
- - description: DP_COM
"#address-cells":
enum: [ 1, 2 ]
@@ -131,28 +122,6 @@ allOf:
compatible:
contains:
enum:
- - qcom,sc7180-qmp-usb3-phy
- then:
- properties:
- clocks:
- maxItems: 4
- clock-names:
- items:
- - const: aux
- - const: cfg_ahb
- - const: ref
- - const: com_aux
- resets:
- maxItems: 1
- reset-names:
- items:
- - const: phy
-
- - if:
- properties:
- compatible:
- contains:
- enum:
- qcom,sdm845-qmp-usb3-uni-phy
then:
properties:
@@ -202,7 +171,6 @@ allOf:
compatible:
contains:
enum:
- - qcom,sm8150-qmp-usb3-phy
- qcom,sm8150-qmp-usb3-uni-phy
- qcom,sm8250-qmp-usb3-uni-phy
- qcom,sm8350-qmp-usb3-uni-phy
@@ -228,51 +196,6 @@ allOf:
compatible:
contains:
enum:
- - qcom,sm8250-qmp-usb3-phy
- - qcom,sm8350-qmp-usb3-phy
- then:
- properties:
- clocks:
- maxItems: 3
- clock-names:
- items:
- - const: aux
- - const: ref_clk_src
- - const: com_aux
- resets:
- maxItems: 2
- reset-names:
- items:
- - const: phy
- - const: common
-
- - if:
- properties:
- compatible:
- contains:
- enum:
- - qcom,sdm845-qmp-usb3-phy
- - qcom,sm8150-qmp-usb3-phy
- - qcom,sm8350-qmp-usb3-phy
- - qcom,sm8450-qmp-usb3-phy
- then:
- patternProperties:
- "^phy@[0-9a-f]+$":
- properties:
- reg:
- items:
- - description: TX lane 1
- - description: RX lane 1
- - description: PCS
- - description: TX lane 2
- - description: RX lane 2
- - description: PCS_MISC
-
- - if:
- properties:
- compatible:
- contains:
- enum:
- qcom,msm8998-qmp-usb3-phy
then:
patternProperties:
@@ -293,12 +216,9 @@ allOf:
enum:
- qcom,ipq6018-qmp-usb3-phy
- qcom,ipq8074-qmp-usb3-phy
- - qcom,sc7180-qmp-usb3-phy
- - qcom,sc8180x-qmp-usb3-phy
- qcom,sdx55-qmp-usb3-uni-phy
- qcom,sdx65-qmp-usb3-uni-phy
- qcom,sm8150-qmp-usb3-uni-phy
- - qcom,sm8250-qmp-usb3-phy
then:
patternProperties:
"^phy@[0-9a-f]+$":
diff --git a/Documentation/devicetree/bindings/phy/qcom,msm8998-qmp-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,msm8998-qmp-pcie-phy.yaml
new file mode 100644
index 000000000000..d05eef0e1ccd
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/qcom,msm8998-qmp-pcie-phy.yaml
@@ -0,0 +1,97 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/qcom,msm8998-qmp-pcie-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm QMP PHY controller (PCIe, MSM8998)
+
+maintainers:
+ - Vinod Koul <vkoul@kernel.org>
+
+description:
+ The QMP PHY controller supports physical layer functionality for a number of
+ controllers on Qualcomm chipsets, such as, PCIe, UFS, and USB.
+
+properties:
+ compatible:
+ const: qcom,msm8998-qmp-pcie-phy
+
+ reg:
+ items:
+ - description: serdes
+
+ clocks:
+ maxItems: 4
+
+ clock-names:
+ items:
+ - const: aux
+ - const: cfg_ahb
+ - const: ref
+ - const: pipe
+
+ resets:
+ maxItems: 2
+
+ reset-names:
+ items:
+ - const: phy
+ - const: common
+
+ vdda-phy-supply: true
+
+ vdda-pll-supply: true
+
+ "#clock-cells":
+ const: 0
+
+ clock-output-names:
+ maxItems: 1
+
+ "#phy-cells":
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - resets
+ - reset-names
+ - vdda-phy-supply
+ - vdda-pll-supply
+ - "#clock-cells"
+ - clock-output-names
+ - "#phy-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,gcc-msm8998.h>
+
+ phy@1c18000 {
+ compatible = "qcom,msm8998-qmp-pcie-phy";
+ reg = <0x01c06000 0x1000>;
+
+ clocks = <&gcc GCC_PCIE_PHY_AUX_CLK>,
+ <&gcc GCC_PCIE_0_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_CLKREF_CLK>,
+ <&gcc GCC_PCIE_0_PIPE_CLK>;
+ clock-names = "aux",
+ "cfg_ahb",
+ "ref",
+ "pipe";
+
+ clock-output-names = "pcie_0_pipe_clk_src";
+ #clock-cells = <0>;
+
+ #phy-cells = <0>;
+
+ resets = <&gcc GCC_PCIE_0_PHY_BCR>, <&gcc GCC_PCIE_PHY_BCR>;
+ reset-names = "phy", "common";
+
+ vdda-phy-supply = <&vreg_l1a_0p875>;
+ vdda-pll-supply = <&vreg_l2a_1p2>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/qcom,sc7180-qmp-usb3-dp-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc7180-qmp-usb3-dp-phy.yaml
deleted file mode 100644
index d30734338888..000000000000
--- a/Documentation/devicetree/bindings/phy/qcom,sc7180-qmp-usb3-dp-phy.yaml
+++ /dev/null
@@ -1,282 +0,0 @@
-# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
-
-%YAML 1.2
----
-$id: http://devicetree.org/schemas/phy/qcom,sc7180-qmp-usb3-dp-phy.yaml#
-$schema: http://devicetree.org/meta-schemas/core.yaml#
-
-title: Qualcomm QMP USB3 DP PHY controller (SC7180)
-
-description:
- The QMP PHY controller supports physical layer functionality for a number of
- controllers on Qualcomm chipsets, such as, PCIe, UFS and USB.
-
- Note that these bindings are for SoCs up to SC8180X. For newer SoCs, see
- qcom,sc8280xp-qmp-usb43dp-phy.yaml.
-
-maintainers:
- - Wesley Cheng <quic_wcheng@quicinc.com>
-
-properties:
- compatible:
- oneOf:
- - enum:
- - qcom,sc7180-qmp-usb3-dp-phy
- - qcom,sc8180x-qmp-usb3-dp-phy
- - qcom,sdm845-qmp-usb3-dp-phy
- - qcom,sm8250-qmp-usb3-dp-phy
- - items:
- - enum:
- - qcom,sc7280-qmp-usb3-dp-phy
- - const: qcom,sm8250-qmp-usb3-dp-phy
-
- reg:
- items:
- - description: Address and length of PHY's USB serdes block.
- - description: Address and length of the DP_COM control block.
- - description: Address and length of PHY's DP serdes block.
-
- reg-names:
- items:
- - const: usb
- - const: dp_com
- - const: dp
-
- "#address-cells":
- enum: [ 1, 2 ]
-
- "#size-cells":
- enum: [ 1, 2 ]
-
- ranges: true
-
- clocks:
- minItems: 3
- maxItems: 4
-
- clock-names:
- minItems: 3
- maxItems: 4
-
- power-domains:
- maxItems: 1
-
- orientation-switch:
- description: Flag the port as possible handler of orientation switching
- type: boolean
-
- resets:
- items:
- - description: reset of phy block.
- - description: phy common block reset.
-
- reset-names:
- items:
- - const: phy
- - const: common
-
- vdda-phy-supply:
- description:
- Phandle to a regulator supply to PHY core block.
-
- vdda-pll-supply:
- description:
- Phandle to 1.8V regulator supply to PHY refclk pll block.
-
- vddp-ref-clk-supply:
- description:
- Phandle to a regulator supply to any specific refclk pll block.
-
-# Required nodes:
-patternProperties:
- "^usb3-phy@[0-9a-f]+$":
- type: object
- additionalProperties: false
- description:
- The USB3 PHY.
-
- properties:
- reg:
- items:
- - description: Address and length of TX.
- - description: Address and length of RX.
- - description: Address and length of PCS.
- - description: Address and length of TX2.
- - description: Address and length of RX2.
- - description: Address and length of pcs_misc.
-
- clocks:
- items:
- - description: pipe clock
-
- clock-names:
- deprecated: true
- items:
- - const: pipe0
-
- clock-output-names:
- items:
- - const: usb3_phy_pipe_clk_src
-
- '#clock-cells':
- const: 0
-
- '#phy-cells':
- const: 0
-
- required:
- - reg
- - clocks
- - '#clock-cells'
- - '#phy-cells'
-
- "^dp-phy@[0-9a-f]+$":
- type: object
- additionalProperties: false
- description:
- The DP PHY.
-
- properties:
- reg:
- items:
- - description: Address and length of TX.
- - description: Address and length of RX.
- - description: Address and length of PCS.
- - description: Address and length of TX2.
- - description: Address and length of RX2.
-
- '#clock-cells':
- const: 1
-
- '#phy-cells':
- const: 0
-
- required:
- - reg
- - '#clock-cells'
- - '#phy-cells'
-
-required:
- - compatible
- - reg
- - "#address-cells"
- - "#size-cells"
- - ranges
- - clocks
- - clock-names
- - resets
- - reset-names
- - vdda-phy-supply
- - vdda-pll-supply
-
-allOf:
- - if:
- properties:
- compatible:
- enum:
- - qcom,sc7180-qmp-usb3-dp-phy
- - qcom,sdm845-qmp-usb3-dp-phy
- then:
- properties:
- clocks:
- items:
- - description: Phy aux clock
- - description: Phy config clock
- - description: 19.2 MHz ref clk
- - description: Phy common block aux clock
- clock-names:
- items:
- - const: aux
- - const: cfg_ahb
- - const: ref
- - const: com_aux
-
- - if:
- properties:
- compatible:
- enum:
- - qcom,sc8180x-qmp-usb3-dp-phy
- then:
- properties:
- clocks:
- items:
- - description: Phy aux clock
- - description: 19.2 MHz ref clk
- - description: Phy common block aux clock
- clock-names:
- items:
- - const: aux
- - const: ref
- - const: com_aux
-
- - if:
- properties:
- compatible:
- enum:
- - qcom,sm8250-qmp-usb3-dp-phy
- then:
- properties:
- clocks:
- items:
- - description: Phy aux clock
- - description: Board XO source
- - description: Phy common block aux clock
- clock-names:
- items:
- - const: aux
- - const: ref_clk_src
- - const: com_aux
-
-additionalProperties: false
-
-examples:
- - |
- #include <dt-bindings/clock/qcom,gcc-sdm845.h>
- usb_1_qmpphy: phy-wrapper@88e9000 {
- compatible = "qcom,sdm845-qmp-usb3-dp-phy";
- reg = <0x088e9000 0x18c>,
- <0x088e8000 0x10>,
- <0x088ea000 0x40>;
- reg-names = "usb", "dp_com", "dp";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x0 0x088e9000 0x2000>;
-
- clocks = <&gcc GCC_USB3_PRIM_PHY_AUX_CLK>,
- <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
- <&gcc GCC_USB3_PRIM_CLKREF_CLK>,
- <&gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>;
- clock-names = "aux", "cfg_ahb", "ref", "com_aux";
-
- resets = <&gcc GCC_USB3_PHY_PRIM_BCR>,
- <&gcc GCC_USB3_DP_PHY_PRIM_BCR>;
- reset-names = "phy", "common";
-
- vdda-phy-supply = <&vdda_usb2_ss_1p2>;
- vdda-pll-supply = <&vdda_usb2_ss_core>;
-
- orientation-switch;
-
- usb3-phy@200 {
- reg = <0x200 0x128>,
- <0x400 0x200>,
- <0xc00 0x218>,
- <0x600 0x128>,
- <0x800 0x200>,
- <0xa00 0x100>;
- #clock-cells = <0>;
- #phy-cells = <0>;
- clocks = <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>;
- clock-output-names = "usb3_phy_pipe_clk_src";
- };
-
- dp-phy@88ea200 {
- reg = <0xa200 0x200>,
- <0xa400 0x200>,
- <0xaa00 0x200>,
- <0xa600 0x200>,
- <0xa800 0x200>;
- #clock-cells = <1>;
- #phy-cells = <0>;
- };
- };
diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml
index a0407fc79563..2c3d6553a7ba 100644
--- a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml
@@ -16,11 +16,24 @@ description:
properties:
compatible:
enum:
+ - qcom,sa8775p-qmp-gen4x2-pcie-phy
+ - qcom,sa8775p-qmp-gen4x4-pcie-phy
+ - qcom,sc8180x-qmp-pcie-phy
- qcom,sc8280xp-qmp-gen3x1-pcie-phy
- qcom,sc8280xp-qmp-gen3x2-pcie-phy
- qcom,sc8280xp-qmp-gen3x4-pcie-phy
+ - qcom,sdm845-qhp-pcie-phy
+ - qcom,sdm845-qmp-pcie-phy
+ - qcom,sdx55-qmp-pcie-phy
- qcom,sdx65-qmp-gen4x2-pcie-phy
+ - qcom,sm8150-qmp-gen3x1-pcie-phy
+ - qcom,sm8150-qmp-gen3x2-pcie-phy
+ - qcom,sm8250-qmp-gen3x1-pcie-phy
+ - qcom,sm8250-qmp-gen3x2-pcie-phy
+ - qcom,sm8250-qmp-modem-pcie-phy
- qcom,sm8350-qmp-gen3x1-pcie-phy
+ - qcom,sm8450-qmp-gen3x1-pcie-phy
+ - qcom,sm8450-qmp-gen4x2-pcie-phy
- qcom,sm8550-qmp-gen3x2-pcie-phy
- qcom,sm8550-qmp-gen4x2-pcie-phy
@@ -30,7 +43,7 @@ properties:
clocks:
minItems: 5
- maxItems: 6
+ maxItems: 7
clock-names:
minItems: 5
@@ -38,9 +51,10 @@ properties:
- const: aux
- const: cfg_ahb
- const: ref
- - const: rchng
+ - enum: [rchng, refgen]
- const: pipe
- const: pipediv2
+ - const: phy_aux
power-domains:
maxItems: 1
@@ -84,7 +98,6 @@ required:
- reg
- clocks
- clock-names
- - power-domains
- resets
- reset-names
- vdda-phy-supply
@@ -120,7 +133,18 @@ allOf:
compatible:
contains:
enum:
+ - qcom,sc8180x-qmp-pcie-phy
+ - qcom,sdm845-qhp-pcie-phy
+ - qcom,sdm845-qmp-pcie-phy
+ - qcom,sdx55-qmp-pcie-phy
+ - qcom,sm8150-qmp-gen3x1-pcie-phy
+ - qcom,sm8150-qmp-gen3x2-pcie-phy
+ - qcom,sm8250-qmp-gen3x1-pcie-phy
+ - qcom,sm8250-qmp-gen3x2-pcie-phy
+ - qcom,sm8250-qmp-modem-pcie-phy
- qcom,sm8350-qmp-gen3x1-pcie-phy
+ - qcom,sm8450-qmp-gen3x1-pcie-phy
+ - qcom,sm8450-qmp-gen3x2-pcie-phy
- qcom,sm8550-qmp-gen3x2-pcie-phy
- qcom,sm8550-qmp-gen4x2-pcie-phy
then:
@@ -129,7 +153,16 @@ allOf:
maxItems: 5
clock-names:
maxItems: 5
- else:
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sc8280xp-qmp-gen3x1-pcie-phy
+ - qcom,sc8280xp-qmp-gen3x2-pcie-phy
+ - qcom,sc8280xp-qmp-gen3x4-pcie-phy
+ then:
properties:
clocks:
minItems: 6
@@ -141,6 +174,20 @@ allOf:
compatible:
contains:
enum:
+ - qcom,sa8775p-qmp-gen4x2-pcie-phy
+ - qcom,sa8775p-qmp-gen4x4-pcie-phy
+ then:
+ properties:
+ clocks:
+ minItems: 7
+ clock-names:
+ minItems: 7
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
- qcom,sm8550-qmp-gen4x2-pcie-phy
then:
properties:
diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml
index a1897a7606df..d981d77e82e4 100644
--- a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-ufs-phy.yaml
@@ -16,21 +16,31 @@ description:
properties:
compatible:
enum:
+ - qcom,msm8996-qmp-ufs-phy
+ - qcom,msm8998-qmp-ufs-phy
- qcom,sa8775p-qmp-ufs-phy
+ - qcom,sc8180x-qmp-ufs-phy
- qcom,sc8280xp-qmp-ufs-phy
+ - qcom,sdm845-qmp-ufs-phy
+ - qcom,sm6115-qmp-ufs-phy
- qcom,sm6125-qmp-ufs-phy
+ - qcom,sm6350-qmp-ufs-phy
- qcom,sm7150-qmp-ufs-phy
+ - qcom,sm8150-qmp-ufs-phy
+ - qcom,sm8250-qmp-ufs-phy
+ - qcom,sm8350-qmp-ufs-phy
+ - qcom,sm8450-qmp-ufs-phy
- qcom,sm8550-qmp-ufs-phy
reg:
maxItems: 1
clocks:
- minItems: 2
+ minItems: 1
maxItems: 3
clock-names:
- minItems: 2
+ minItems: 1
items:
- const: ref
- const: ref_aux
@@ -75,19 +85,51 @@ allOf:
contains:
enum:
- qcom,sa8775p-qmp-ufs-phy
+ - qcom,sm8450-qmp-ufs-phy
then:
properties:
clocks:
minItems: 3
clock-names:
minItems: 3
- else:
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,msm8998-qmp-ufs-phy
+ - qcom,sc8180x-qmp-ufs-phy
+ - qcom,sc8280xp-qmp-ufs-phy
+ - qcom,sdm845-qmp-ufs-phy
+ - qcom,sm6115-qmp-ufs-phy
+ - qcom,sm6125-qmp-ufs-phy
+ - qcom,sm6350-qmp-ufs-phy
+ - qcom,sm7150-qmp-ufs-phy
+ - qcom,sm8150-qmp-ufs-phy
+ - qcom,sm8250-qmp-ufs-phy
+ - qcom,sm8350-qmp-ufs-phy
+ - qcom,sm8550-qmp-ufs-phy
+ then:
properties:
clocks:
maxItems: 2
clock-names:
maxItems: 2
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,msm8996-qmp-ufs-phy
+ then:
+ properties:
+ clocks:
+ maxItems: 1
+ clock-names:
+ maxItems: 1
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb43dp-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb43dp-phy.yaml
index ef1c02d8ac88..9af203dc8793 100644
--- a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb43dp-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb43dp-phy.yaml
@@ -16,8 +16,14 @@ description:
properties:
compatible:
enum:
+ - qcom,sc7180-qmp-usb3-dp-phy
+ - qcom,sc7280-qmp-usb3-dp-phy
+ - qcom,sc8180x-qmp-usb3-dp-phy
- qcom,sc8280xp-qmp-usb43dp-phy
+ - qcom,sdm845-qmp-usb3-dp-phy
- qcom,sm6350-qmp-usb3-dp-phy
+ - qcom,sm8150-qmp-usb3-dp-phy
+ - qcom,sm8250-qmp-usb3-dp-phy
- qcom,sm8350-qmp-usb3-dp-phy
- qcom,sm8450-qmp-usb3-dp-phy
- qcom,sm8550-qmp-usb3-dp-phy
@@ -26,14 +32,17 @@ properties:
maxItems: 1
clocks:
- maxItems: 4
+ minItems: 4
+ maxItems: 5
clock-names:
+ minItems: 4
items:
- const: aux
- const: ref
- const: com_aux
- const: usb3_pipe
+ - const: cfg_ahb
power-domains:
maxItems: 1
@@ -85,7 +94,6 @@ required:
- reg
- clocks
- clock-names
- - power-domains
- resets
- reset-names
- vdda-phy-supply
@@ -93,6 +101,40 @@ required:
- "#clock-cells"
- "#phy-cells"
+allOf:
+ - if:
+ properties:
+ compatible:
+ enum:
+ - qcom,sc7180-qmp-usb3-dp-phy
+ - qcom,sdm845-qmp-usb3-dp-phy
+ then:
+ properties:
+ clocks:
+ maxItems: 5
+ clock-names:
+ maxItems: 5
+ else:
+ properties:
+ clocks:
+ maxItems: 4
+ clock-names:
+ maxItems: 4
+
+ - if:
+ properties:
+ compatible:
+ enum:
+ - qcom,sc8280xp-qmp-usb43dp-phy
+ - qcom,sm6350-qmp-usb3-dp-phy
+ - qcom,sm8550-qmp-usb3-dp-phy
+ then:
+ required:
+ - power-domains
+ else:
+ properties:
+ power-domains: false
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml b/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml
index 083fda530b48..029569d5fcf3 100644
--- a/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml
@@ -15,7 +15,12 @@ description:
properties:
compatible:
- const: qcom,pm8550b-eusb2-repeater
+ oneOf:
+ - items:
+ - enum:
+ - qcom,pm7550ba-eusb2-repeater
+ - const: qcom,pm8550b-eusb2-repeater
+ - const: qcom,pm8550b-eusb2-repeater
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/phy/realtek,usb2phy.yaml b/Documentation/devicetree/bindings/phy/realtek,usb2phy.yaml
new file mode 100644
index 000000000000..9911ada39ee7
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/realtek,usb2phy.yaml
@@ -0,0 +1,175 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2023 Realtek Semiconductor Corporation
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/realtek,usb2phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Realtek DHC SoCs USB 2.0 PHY
+
+maintainers:
+ - Stanley Chang <stanley_chang@realtek.com>
+
+description: |
+ Realtek USB 2.0 PHY support the digital home center (DHC) RTD series SoCs.
+ The USB 2.0 PHY driver is designed to support the XHCI controller. The SoCs
+ support multiple XHCI controllers. One PHY device node maps to one XHCI
+ controller.
+
+ RTD1295/RTD1619 SoCs USB
+ The USB architecture includes three XHCI controllers.
+ Each XHCI maps to one USB 2.0 PHY and map one USB 3.0 PHY on some
+ controllers.
+ XHCI controller#0 -- usb2phy -- phy#0
+ |- usb3phy -- phy#0
+ XHCI controller#1 -- usb2phy -- phy#0
+ XHCI controller#2 -- usb2phy -- phy#0
+ |- usb3phy -- phy#0
+
+ RTD1395 SoCs USB
+ The USB architecture includes two XHCI controllers.
+ The controller#0 has one USB 2.0 PHY. The controller#1 includes two USB 2.0
+ PHY.
+ XHCI controller#0 -- usb2phy -- phy#0
+ XHCI controller#1 -- usb2phy -- phy#0
+ |- phy#1
+
+ RTD1319/RTD1619b SoCs USB
+ The USB architecture includes three XHCI controllers.
+ Each XHCI maps to one USB 2.0 PHY and map one USB 3.0 PHY on controllers#2.
+ XHCI controller#0 -- usb2phy -- phy#0
+ XHCI controller#1 -- usb2phy -- phy#0
+ XHCI controller#2 -- usb2phy -- phy#0
+ |- usb3phy -- phy#0
+
+ RTD1319d SoCs USB
+ The USB architecture includes three XHCI controllers.
+ Each xhci maps to one USB 2.0 PHY and map one USB 3.0 PHY on controllers#0.
+ XHCI controller#0 -- usb2phy -- phy#0
+ |- usb3phy -- phy#0
+ XHCI controller#1 -- usb2phy -- phy#0
+ XHCI controller#2 -- usb2phy -- phy#0
+
+ RTD1312c/RTD1315e SoCs USB
+ The USB architecture includes three XHCI controllers.
+ Each XHCI maps to one USB 2.0 PHY.
+ XHCI controller#0 -- usb2phy -- phy#0
+ XHCI controller#1 -- usb2phy -- phy#0
+ XHCI controller#2 -- usb2phy -- phy#0
+
+properties:
+ compatible:
+ enum:
+ - realtek,rtd1295-usb2phy
+ - realtek,rtd1312c-usb2phy
+ - realtek,rtd1315e-usb2phy
+ - realtek,rtd1319-usb2phy
+ - realtek,rtd1319d-usb2phy
+ - realtek,rtd1395-usb2phy
+ - realtek,rtd1395-usb2phy-2port
+ - realtek,rtd1619-usb2phy
+ - realtek,rtd1619b-usb2phy
+
+ reg:
+ items:
+ - description: PHY data registers
+ - description: PHY control registers
+
+ "#phy-cells":
+ const: 0
+
+ nvmem-cells:
+ maxItems: 2
+ description:
+ Phandles to nvmem cell that contains the trimming data.
+ If unspecified, default value is used.
+
+ nvmem-cell-names:
+ items:
+ - const: usb-dc-cal
+ - const: usb-dc-dis
+ description:
+ The following names, which correspond to each nvmem-cells.
+ usb-dc-cal is the driving level for each phy specified via efuse.
+ usb-dc-dis is the disconnection level for each phy specified via efuse.
+
+ realtek,inverse-hstx-sync-clock:
+ description:
+ For one of the phys of RTD1619b SoC, the synchronous clock of the
+ high-speed tx must be inverted.
+ type: boolean
+
+ realtek,driving-level:
+ description:
+ Control the magnitude of High speed Dp/Dm output swing (mV).
+ For a different board or port, the original magnitude maybe not meet
+ the specification. In this situation we can adjust the value to meet
+ the specification.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ default: 8
+ minimum: 0
+ maximum: 31
+
+ realtek,driving-level-compensate:
+ description:
+ For RTD1315e SoC, the driving level can be adjusted by reading the
+ efuse table. This property provides drive compensation.
+ If the magnitude of High speed Dp/Dm output swing still not meet the
+ specification, then we can set this value to meet the specification.
+ $ref: /schemas/types.yaml#/definitions/int32
+ default: 0
+ minimum: -8
+ maximum: 8
+
+ realtek,disconnection-compensate:
+ description:
+ This adjusts the disconnection level compensation for the different
+ boards with different disconnection level.
+ $ref: /schemas/types.yaml#/definitions/int32
+ default: 0
+ minimum: -8
+ maximum: 8
+
+required:
+ - compatible
+ - reg
+ - "#phy-cells"
+
+allOf:
+ - if:
+ not:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - realtek,rtd1619b-usb2phy
+ then:
+ properties:
+ realtek,inverse-hstx-sync-clock: false
+
+ - if:
+ not:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - realtek,rtd1315e-usb2phy
+ then:
+ properties:
+ realtek,driving-level-compensate: false
+
+additionalProperties: false
+
+examples:
+ - |
+ usb-phy@13214 {
+ compatible = "realtek,rtd1619b-usb2phy";
+ reg = <0x13214 0x4>, <0x28280 0x4>;
+ #phy-cells = <0>;
+ nvmem-cells = <&otp_usb_port0_dc_cal>, <&otp_usb_port0_dc_dis>;
+ nvmem-cell-names = "usb-dc-cal", "usb-dc-dis";
+
+ realtek,inverse-hstx-sync-clock;
+ realtek,driving-level = <0xa>;
+ realtek,disconnection-compensate = <(-1)>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/realtek,usb3phy.yaml b/Documentation/devicetree/bindings/phy/realtek,usb3phy.yaml
new file mode 100644
index 000000000000..dfe2bb4e59e7
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/realtek,usb3phy.yaml
@@ -0,0 +1,107 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2023 Realtek Semiconductor Corporation
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/realtek,usb3phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Realtek DHC SoCs USB 3.0 PHY
+
+maintainers:
+ - Stanley Chang <stanley_chang@realtek.com>
+
+description: |
+ Realtek USB 3.0 PHY support the digital home center (DHC) RTD series SoCs.
+ The USB 3.0 PHY driver is designed to support the XHCI controller. The SoCs
+ support multiple XHCI controllers. One PHY device node maps to one XHCI
+ controller.
+
+ RTD1295/RTD1619 SoCs USB
+ The USB architecture includes three XHCI controllers.
+ Each XHCI maps to one USB 2.0 PHY and map one USB 3.0 PHY on some
+ controllers.
+ XHCI controller#0 -- usb2phy -- phy#0
+ |- usb3phy -- phy#0
+ XHCI controller#1 -- usb2phy -- phy#0
+ XHCI controller#2 -- usb2phy -- phy#0
+ |- usb3phy -- phy#0
+
+ RTD1319/RTD1619b SoCs USB
+ The USB architecture includes three XHCI controllers.
+ Each XHCI maps to one USB 2.0 PHY and map one USB 3.0 PHY on controllers#2.
+ XHCI controller#0 -- usb2phy -- phy#0
+ XHCI controller#1 -- usb2phy -- phy#0
+ XHCI controller#2 -- usb2phy -- phy#0
+ |- usb3phy -- phy#0
+
+ RTD1319d SoCs USB
+ The USB architecture includes three XHCI controllers.
+ Each xhci maps to one USB 2.0 PHY and map one USB 3.0 PHY on controllers#0.
+ XHCI controller#0 -- usb2phy -- phy#0
+ |- usb3phy -- phy#0
+ XHCI controller#1 -- usb2phy -- phy#0
+ XHCI controller#2 -- usb2phy -- phy#0
+
+properties:
+ compatible:
+ enum:
+ - realtek,rtd1295-usb3phy
+ - realtek,rtd1319-usb3phy
+ - realtek,rtd1319d-usb3phy
+ - realtek,rtd1619-usb3phy
+ - realtek,rtd1619b-usb3phy
+
+ reg:
+ maxItems: 1
+
+ "#phy-cells":
+ const: 0
+
+ nvmem-cells:
+ maxItems: 1
+ description: A phandle to the tx lfps swing trim data provided by
+ a nvmem device, if unspecified, default values shall be used.
+
+ nvmem-cell-names:
+ items:
+ - const: usb_u3_tx_lfps_swing_trim
+
+ realtek,amplitude-control-coarse-tuning:
+ description:
+ This adjusts the signal amplitude for normal operation and beacon LFPS.
+ This value is a parameter for coarse tuning.
+ For different boards, if the default value is inappropriate, this
+ property can be assigned to adjust.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ default: 255
+ minimum: 0
+ maximum: 255
+
+ realtek,amplitude-control-fine-tuning:
+ description:
+ This adjusts the signal amplitude for normal operation and beacon LFPS.
+ This value is used for fine-tuning parameters.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ default: 65535
+ minimum: 0
+ maximum: 65535
+
+required:
+ - compatible
+ - reg
+ - "#phy-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ usb-phy@13e10 {
+ compatible = "realtek,rtd1319d-usb3phy";
+ reg = <0x13e10 0x4>;
+ #phy-cells = <0>;
+
+ nvmem-cells = <&otp_usb_u3_tx_lfps_swing_trim>;
+ nvmem-cell-names = "usb_u3_tx_lfps_swing_trim";
+
+ realtek,amplitude-control-coarse-tuning = <0x77>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/rockchip,inno-usb2phy.yaml b/Documentation/devicetree/bindings/phy/rockchip,inno-usb2phy.yaml
index 0d6b8c28be07..5254413137c6 100644
--- a/Documentation/devicetree/bindings/phy/rockchip,inno-usb2phy.yaml
+++ b/Documentation/devicetree/bindings/phy/rockchip,inno-usb2phy.yaml
@@ -20,6 +20,7 @@ properties:
- rockchip,rk3366-usb2phy
- rockchip,rk3399-usb2phy
- rockchip,rk3568-usb2phy
+ - rockchip,rk3588-usb2phy
- rockchip,rv1108-usb2phy
reg:
@@ -56,6 +57,14 @@ properties:
description: Muxed interrupt for both ports
maxItems: 1
+ resets:
+ maxItems: 2
+
+ reset-names:
+ items:
+ - const: phy
+ - const: apb
+
rockchip,usbgrf:
$ref: /schemas/types.yaml#/definitions/phandle
description:
@@ -120,15 +129,21 @@ required:
- reg
- clock-output-names
- "#clock-cells"
- - host-port
- - otg-port
+
+anyOf:
+ - required:
+ - otg-port
+ - required:
+ - host-port
allOf:
- if:
properties:
compatible:
contains:
- const: rockchip,rk3568-usb2phy
+ enum:
+ - rockchip,rk3568-usb2phy
+ - rockchip,rk3588-usb2phy
then:
properties:
diff --git a/Documentation/devicetree/bindings/phy/rockchip,pcie3-phy.yaml b/Documentation/devicetree/bindings/phy/rockchip,pcie3-phy.yaml
index 9f2d8d2cc7a5..c4fbffcde6e4 100644
--- a/Documentation/devicetree/bindings/phy/rockchip,pcie3-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/rockchip,pcie3-phy.yaml
@@ -13,19 +13,18 @@ properties:
compatible:
enum:
- rockchip,rk3568-pcie3-phy
+ - rockchip,rk3588-pcie3-phy
reg:
maxItems: 1
clocks:
- minItems: 3
+ minItems: 1
maxItems: 3
clock-names:
- items:
- - const: refclk_m
- - const: refclk_n
- - const: pclk
+ minItems: 1
+ maxItems: 3
data-lanes:
description: which lanes (by position) should be mapped to which
@@ -61,6 +60,30 @@ required:
- rockchip,phy-grf
- "#phy-cells"
+allOf:
+ - if:
+ properties:
+ compatible:
+ enum:
+ - rockchip,rk3588-pcie3-phy
+ then:
+ properties:
+ clocks:
+ maxItems: 1
+ clock-names:
+ items:
+ - const: pclk
+ else:
+ properties:
+ clocks:
+ minItems: 3
+
+ clock-names:
+ items:
+ - const: refclk_m
+ - const: refclk_n
+ - const: pclk
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/phy/rockchip,px30-dsi-dphy.yaml b/Documentation/devicetree/bindings/phy/rockchip,px30-dsi-dphy.yaml
index 5c35e5ceec0b..46e64fa293d5 100644
--- a/Documentation/devicetree/bindings/phy/rockchip,px30-dsi-dphy.yaml
+++ b/Documentation/devicetree/bindings/phy/rockchip,px30-dsi-dphy.yaml
@@ -19,6 +19,7 @@ properties:
- rockchip,rk3128-dsi-dphy
- rockchip,rk3368-dsi-dphy
- rockchip,rk3568-dsi-dphy
+ - rockchip,rv1126-dsi-dphy
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/phy/samsung,usb3-drd-phy.yaml b/Documentation/devicetree/bindings/phy/samsung,usb3-drd-phy.yaml
index 5ba55f9f20cc..452e584d9812 100644
--- a/Documentation/devicetree/bindings/phy/samsung,usb3-drd-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/samsung,usb3-drd-phy.yaml
@@ -29,6 +29,7 @@ properties:
- samsung,exynos5420-usbdrd-phy
- samsung,exynos5433-usbdrd-phy
- samsung,exynos7-usbdrd-phy
+ - samsung,exynos850-usbdrd-phy
clocks:
minItems: 2
diff --git a/Documentation/devicetree/bindings/phy/starfive,jh7110-dphy-rx.yaml b/Documentation/devicetree/bindings/phy/starfive,jh7110-dphy-rx.yaml
new file mode 100644
index 000000000000..7224cde6fce0
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/starfive,jh7110-dphy-rx.yaml
@@ -0,0 +1,71 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/starfive,jh7110-dphy-rx.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: StarFive SoC JH7110 MIPI D-PHY Rx Controller
+
+maintainers:
+ - Jack Zhu <jack.zhu@starfivetech.com>
+ - Changhuang Liang <changhuang.liang@starfivetech.com>
+
+description:
+ StarFive SoCs contain a MIPI CSI D-PHY based on M31 IP, used to
+ transfer CSI camera data.
+
+properties:
+ compatible:
+ const: starfive,jh7110-dphy-rx
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: config clock
+ - description: reference clock
+ - description: escape mode transmit clock
+
+ clock-names:
+ items:
+ - const: cfg
+ - const: ref
+ - const: tx
+
+ resets:
+ items:
+ - description: DPHY_HW reset
+ - description: DPHY_B09_ALWAYS_ON reset
+
+ power-domains:
+ maxItems: 1
+
+ "#phy-cells":
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - resets
+ - power-domains
+ - "#phy-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ phy@19820000 {
+ compatible = "starfive,jh7110-dphy-rx";
+ reg = <0x19820000 0x10000>;
+ clocks = <&ispcrg 3>,
+ <&ispcrg 4>,
+ <&ispcrg 5>;
+ clock-names = "cfg", "ref", "tx";
+ resets = <&ispcrg 2>,
+ <&ispcrg 3>;
+ power-domains = <&aon_syscon 1>;
+ #phy-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/starfive,jh7110-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/starfive,jh7110-pcie-phy.yaml
new file mode 100644
index 000000000000..2e83a6164cd1
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/starfive,jh7110-pcie-phy.yaml
@@ -0,0 +1,58 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/starfive,jh7110-pcie-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: StarFive JH7110 PCIe 2.0 PHY
+
+maintainers:
+ - Minda Chen <minda.chen@starfivetech.com>
+
+properties:
+ compatible:
+ const: starfive,jh7110-pcie-phy
+
+ reg:
+ maxItems: 1
+
+ "#phy-cells":
+ const: 0
+
+ starfive,sys-syscon:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ items:
+ - items:
+ - description: phandle to System Register Controller sys_syscon node.
+ - description: PHY connect offset of SYS_SYSCONSAIF__SYSCFG register for USB PHY.
+ description:
+ The phandle to System Register Controller syscon node and the PHY connect offset
+ of SYS_SYSCONSAIF__SYSCFG register. Connect PHY to USB3 controller.
+
+ starfive,stg-syscon:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ items:
+ - items:
+ - description: phandle to System Register Controller stg_syscon node.
+ - description: PHY mode offset of STG_SYSCONSAIF__SYSCFG register.
+ - description: PHY enable for USB offset of STG_SYSCONSAIF__SYSCFG register.
+ description:
+ The phandle to System Register Controller syscon node and the offset
+ of STG_SYSCONSAIF__SYSCFG register for PCIe PHY. Total 2 regsisters offset.
+
+required:
+ - compatible
+ - reg
+ - "#phy-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ phy@10210000 {
+ compatible = "starfive,jh7110-pcie-phy";
+ reg = <0x10210000 0x10000>;
+ #phy-cells = <0>;
+ starfive,sys-syscon = <&sys_syscon 0x18>;
+ starfive,stg-syscon = <&stg_syscon 0x148 0x1f4>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/starfive,jh7110-usb-phy.yaml b/Documentation/devicetree/bindings/phy/starfive,jh7110-usb-phy.yaml
new file mode 100644
index 000000000000..269e9f9f12b6
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/starfive,jh7110-usb-phy.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/starfive,jh7110-usb-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: StarFive JH7110 USB 2.0 PHY
+
+maintainers:
+ - Minda Chen <minda.chen@starfivetech.com>
+
+properties:
+ compatible:
+ const: starfive,jh7110-usb-phy
+
+ reg:
+ maxItems: 1
+
+ "#phy-cells":
+ const: 0
+
+ clocks:
+ items:
+ - description: PHY 125m
+ - description: app 125m
+
+ clock-names:
+ items:
+ - const: 125m
+ - const: app_125m
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - "#phy-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ phy@10200000 {
+ compatible = "starfive,jh7110-usb-phy";
+ reg = <0x10200000 0x10000>;
+ clocks = <&syscrg 95>,
+ <&stgcrg 6>;
+ clock-names = "125m", "app_125m";
+ #phy-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/pwm/brcm,kona-pwm.txt b/Documentation/devicetree/bindings/pwm/brcm,kona-pwm.txt
deleted file mode 100644
index c42eecfc81ed..000000000000
--- a/Documentation/devicetree/bindings/pwm/brcm,kona-pwm.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-Broadcom Kona PWM controller device tree bindings
-
-This controller has 6 channels.
-
-Required Properties :
-- compatible: should contain "brcm,kona-pwm"
-- reg: physical base address and length of the controller's registers
-- clocks: phandle + clock specifier pair for the external clock
-- #pwm-cells: Should be 3. See pwm.yaml in this directory for a
- description of the cells format.
-
-Refer to clocks/clock-bindings.txt for generic clock consumer properties.
-
-Example:
-
-pwm: pwm@3e01a000 {
- compatible = "brcm,bcm11351-pwm", "brcm,kona-pwm";
- reg = <0x3e01a000 0xc4>;
- clocks = <&pwm_clk>;
- #pwm-cells = <3>;
-};
diff --git a/Documentation/devicetree/bindings/pwm/brcm,kona-pwm.yaml b/Documentation/devicetree/bindings/pwm/brcm,kona-pwm.yaml
new file mode 100644
index 000000000000..e86c8053b366
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/brcm,kona-pwm.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pwm/brcm,kona-pwm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom Kona family PWM controller
+
+description:
+ This controller has 6 channels.
+
+maintainers:
+ - Florian Fainelli <f.fainelli@gmail.com>
+
+allOf:
+ - $ref: pwm.yaml#
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - brcm,bcm11351-pwm
+ - const: brcm,kona-pwm
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ '#pwm-cells':
+ const: 3
+
+required:
+ - compatible
+ - reg
+ - clocks
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/bcm281xx.h>
+
+ pwm@3e01a000 {
+ compatible = "brcm,bcm11351-pwm", "brcm,kona-pwm";
+ reg = <0x3e01a000 0xcc>;
+ clocks = <&slave_ccu BCM281XX_SLAVE_CCU_PWM>;
+ #pwm-cells = <3>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/remoteproc/fsl,imx-rproc.yaml b/Documentation/devicetree/bindings/remoteproc/fsl,imx-rproc.yaml
index 0c3910f152d1..30632efdad8b 100644
--- a/Documentation/devicetree/bindings/remoteproc/fsl,imx-rproc.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/fsl,imx-rproc.yaml
@@ -20,7 +20,9 @@ properties:
- fsl,imx7ulp-cm4
- fsl,imx8mm-cm4
- fsl,imx8mn-cm7
+ - fsl,imx8mn-cm7-mmio
- fsl,imx8mp-cm7
+ - fsl,imx8mp-cm7-mmio
- fsl,imx8mq-cm4
- fsl,imx8qm-cm4
- fsl,imx8qxp-cm4
@@ -70,6 +72,11 @@ properties:
description:
Specify CPU entry address for SCU enabled processor.
+ fsl,iomuxc-gpr:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Phandle to IOMUXC GPR block which provide access to CM7 CPUWAIT bit.
+
fsl,resource-id:
$ref: /schemas/types.yaml#/definitions/uint32
description:
@@ -79,6 +86,19 @@ properties:
required:
- compatible
+allOf:
+ - if:
+ properties:
+ compatible:
+ not:
+ contains:
+ enum:
+ - fsl,imx8mn-cm7-mmio
+ - fsl,imx8mp-cm7-mmio
+ then:
+ properties:
+ fsl,iomuxc-gpr: false
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,adsp.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,adsp.yaml
index 643ee787a81f..a2b0079de039 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,adsp.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,adsp.yaml
@@ -26,6 +26,7 @@ properties:
- qcom,sdm660-adsp-pas
- qcom,sdm845-adsp-pas
- qcom,sdm845-cdsp-pas
+ - qcom,sdm845-slpi-pas
reg:
maxItems: 1
@@ -44,8 +45,13 @@ properties:
maxItems: 1
description: Reference to the reserved-memory for the Hexagon core
+ firmware-name:
+ maxItems: 1
+ description: Firmware name for the Hexagon core
+
required:
- compatible
+ - memory-region
unevaluatedProperties: false
@@ -63,6 +69,7 @@ allOf:
- qcom,msm8998-adsp-pas
- qcom,sdm845-adsp-pas
- qcom,sdm845-cdsp-pas
+ - qcom,sdm845-slpi-pas
then:
properties:
clocks:
@@ -104,6 +111,7 @@ allOf:
- qcom,msm8998-slpi-pas
- qcom,sdm845-adsp-pas
- qcom,sdm845-cdsp-pas
+ - qcom,sdm845-slpi-pas
then:
properties:
interrupts:
@@ -160,6 +168,22 @@ allOf:
- if:
properties:
compatible:
+ enum:
+ - qcom,sdm845-slpi-pas
+ then:
+ properties:
+ power-domains:
+ items:
+ - description: LCX power domain
+ - description: LMX power domain
+ power-domain-names:
+ items:
+ - const: lcx
+ - const: lmx
+
+ - if:
+ properties:
+ compatible:
contains:
enum:
- qcom,msm8226-adsp-pil
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,msm8996-mss-pil.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,msm8996-mss-pil.yaml
index c1ac6ca1e759..0643faae2c39 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,msm8996-mss-pil.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,msm8996-mss-pil.yaml
@@ -19,6 +19,7 @@ properties:
enum:
- qcom,msm8996-mss-pil
- qcom,msm8998-mss-pil
+ - qcom,sdm660-mss-pil
- qcom,sdm845-mss-pil
reg:
@@ -215,13 +216,12 @@ allOf:
- description: GCC MSS IFACE clock
- description: GCC MSS BUS clock
- description: GCC MSS MEM clock
- - description: RPMH XO clock
+ - description: RPM XO clock
- description: GCC MSS GPLL0 clock
- description: GCC MSS SNOC_AXI clock
- description: GCC MSS MNOC_AXI clock
- - description: RPMH PNOC clock
- - description: GCC MSS PRNG clock
- - description: RPMH QDSS clock
+ - description: RPM PNOC clock
+ - description: RPM QDSS clock
clock-names:
items:
- const: iface
@@ -245,7 +245,9 @@ allOf:
- if:
properties:
compatible:
- const: qcom,msm8998-mss-pil
+ enum:
+ - qcom,msm8998-mss-pil
+ - qcom,sdm660-mss-pil
then:
properties:
clocks:
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,pas-common.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,pas-common.yaml
index 171ef85de193..63a82e7a8bf8 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,pas-common.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,pas-common.yaml
@@ -82,7 +82,6 @@ required:
- clock-names
- interrupts
- interrupt-names
- - memory-region
- qcom,smem-states
- qcom,smem-state-names
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,qcs404-pas.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,qcs404-pas.yaml
index 5efa0e5c0439..eb868a7ff4cd 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,qcs404-pas.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,qcs404-pas.yaml
@@ -42,7 +42,7 @@ properties:
smd-edge: false
memory-region:
- minItems: 1
+ maxItems: 1
description: Reference to the reserved-memory for the Hexagon core
firmware-name:
@@ -52,6 +52,7 @@ properties:
required:
- compatible
- reg
+ - memory-region
allOf:
- $ref: /schemas/remoteproc/qcom,pas-common.yaml#
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,sc7180-pas.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,sc7180-pas.yaml
index 5cefd2c58593..689d5d535331 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,sc7180-pas.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,sc7180-pas.yaml
@@ -51,7 +51,7 @@ properties:
- const: mss
memory-region:
- minItems: 1
+ maxItems: 1
description: Reference to the reserved-memory for the Hexagon core
qcom,qmp:
@@ -67,6 +67,7 @@ properties:
required:
- compatible
- reg
+ - memory-region
allOf:
- $ref: /schemas/remoteproc/qcom,pas-common.yaml#
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,sc8180x-pas.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,sc8180x-pas.yaml
index c1f8dd8d0e4c..4744a37b2b5d 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,sc8180x-pas.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,sc8180x-pas.yaml
@@ -38,7 +38,7 @@ properties:
smd-edge: false
memory-region:
- minItems: 1
+ maxItems: 1
description: Reference to the reserved-memory for the Hexagon core
firmware-name:
@@ -48,6 +48,7 @@ properties:
required:
- compatible
- reg
+ - memory-region
allOf:
- $ref: /schemas/remoteproc/qcom,pas-common.yaml#
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,sc8280xp-pas.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,sc8280xp-pas.yaml
index f6fbc531dc28..96d53baf6e00 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,sc8280xp-pas.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,sc8280xp-pas.yaml
@@ -38,7 +38,7 @@ properties:
smd-edge: false
memory-region:
- minItems: 1
+ maxItems: 1
description: Reference to the reserved-memory for the Hexagon core
firmware-name:
@@ -48,6 +48,7 @@ properties:
required:
- compatible
- reg
+ - memory-region
allOf:
- $ref: /schemas/remoteproc/qcom,pas-common.yaml#
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,sdx55-pas.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,sdx55-pas.yaml
index c66e298462c7..5d463272165f 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,sdx55-pas.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,sdx55-pas.yaml
@@ -46,7 +46,7 @@ properties:
- const: mss
memory-region:
- minItems: 1
+ maxItems: 1
description: Reference to the reserved-memory for the Hexagon core
qcom,qmp:
@@ -62,6 +62,7 @@ properties:
required:
- compatible
- reg
+ - memory-region
allOf:
- $ref: /schemas/remoteproc/qcom,pas-common.yaml#
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,sm6115-pas.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,sm6115-pas.yaml
index f5d1fa9f45f1..028287235912 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,sm6115-pas.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,sm6115-pas.yaml
@@ -15,10 +15,19 @@ description:
properties:
compatible:
- enum:
- - qcom,sm6115-adsp-pas
- - qcom,sm6115-cdsp-pas
- - qcom,sm6115-mpss-pas
+ oneOf:
+ - enum:
+ - qcom,sm6115-adsp-pas
+ - qcom,sm6115-cdsp-pas
+ - qcom,sm6115-mpss-pas
+
+ - items:
+ - const: qcom,qcm2290-adsp-pas
+ - const: qcom,sm6115-adsp-pas
+
+ - items:
+ - const: qcom,qcm2290-mpss-pas
+ - const: qcom,sm6115-mpss-pas
reg:
maxItems: 1
@@ -32,7 +41,7 @@ properties:
- const: xo
memory-region:
- minItems: 1
+ maxItems: 1
description: Reference to the reserved-memory for the Hexagon core
smd-edge: false
@@ -44,15 +53,17 @@ properties:
required:
- compatible
- reg
+ - memory-region
allOf:
- $ref: /schemas/remoteproc/qcom,pas-common.yaml#
- if:
properties:
compatible:
- enum:
- - qcom,sm6115-adsp-pas
- - qcom,sm6115-cdsp-pas
+ contains:
+ enum:
+ - qcom,sm6115-adsp-pas
+ - qcom,sm6115-cdsp-pas
then:
properties:
interrupts:
@@ -69,9 +80,10 @@ allOf:
- if:
properties:
compatible:
- enum:
- - qcom,sm6115-cdsp-pas
- - qcom,sm6115-mpss-pas
+ contains:
+ enum:
+ - qcom,sm6115-cdsp-pas
+ - qcom,sm6115-mpss-pas
then:
properties:
power-domains:
@@ -84,8 +96,9 @@ allOf:
- if:
properties:
compatible:
- enum:
- - qcom,sm6115-adsp-pas
+ contains:
+ enum:
+ - qcom,sm6115-adsp-pas
then:
properties:
power-domains:
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,sm6350-pas.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,sm6350-pas.yaml
index fee02fa800b5..f7e40fb166da 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,sm6350-pas.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,sm6350-pas.yaml
@@ -36,7 +36,7 @@ properties:
description: Reference to the AOSS side-channel message RAM.
memory-region:
- minItems: 1
+ maxItems: 1
description: Reference to the reserved-memory for the Hexagon core
smd-edge: false
@@ -48,6 +48,7 @@ properties:
required:
- compatible
- reg
+ - memory-region
allOf:
- $ref: /schemas/remoteproc/qcom,pas-common.yaml#
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,sm8150-pas.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,sm8150-pas.yaml
index 2c085ac2c3fb..238c6e5e67c5 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,sm8150-pas.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,sm8150-pas.yaml
@@ -40,7 +40,7 @@ properties:
description: Reference to the AOSS side-channel message RAM.
memory-region:
- minItems: 1
+ maxItems: 1
description: Reference to the reserved-memory for the Hexagon core
smd-edge: false
@@ -52,6 +52,7 @@ properties:
required:
- compatible
- reg
+ - memory-region
allOf:
- $ref: /schemas/remoteproc/qcom,pas-common.yaml#
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,sm8350-pas.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,sm8350-pas.yaml
index 9a04d192e7e4..53cea8e53a31 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,sm8350-pas.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,sm8350-pas.yaml
@@ -43,7 +43,7 @@ properties:
smd-edge: false
memory-region:
- minItems: 1
+ maxItems: 1
description: Reference to the reserved-memory for the Hexagon core
firmware-name:
@@ -53,6 +53,7 @@ properties:
required:
- compatible
- reg
+ - memory-region
allOf:
- $ref: /schemas/remoteproc/qcom,pas-common.yaml#
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,sm8550-pas.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,sm8550-pas.yaml
index fe216aa531ed..58120829fb06 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,sm8550-pas.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,sm8550-pas.yaml
@@ -53,6 +53,7 @@ properties:
required:
- compatible
- reg
+ - memory-region
allOf:
- $ref: /schemas/remoteproc/qcom,pas-common.yaml#
diff --git a/Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.yaml b/Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.yaml
index 4d2bef15fb7a..c8bb2eef442d 100644
--- a/Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.yaml
+++ b/Documentation/devicetree/bindings/rtc/atmel,at91rm9200-rtc.yaml
@@ -14,13 +14,17 @@ maintainers:
properties:
compatible:
- enum:
- - atmel,at91rm9200-rtc
- - atmel,at91sam9x5-rtc
- - atmel,sama5d4-rtc
- - atmel,sama5d2-rtc
- - microchip,sam9x60-rtc
- - microchip,sama7g5-rtc
+ oneOf:
+ - enum:
+ - atmel,at91rm9200-rtc
+ - atmel,at91sam9x5-rtc
+ - atmel,sama5d4-rtc
+ - atmel,sama5d2-rtc
+ - microchip,sam9x60-rtc
+ - microchip,sama7g5-rtc
+ - items:
+ - const: microchip,sam9x7-rtc
+ - const: microchip,sam9x60-rtc
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/rtc/intersil,isl12022.yaml b/Documentation/devicetree/bindings/rtc/intersil,isl12022.yaml
new file mode 100644
index 000000000000..c2d1441ef273
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/intersil,isl12022.yaml
@@ -0,0 +1,64 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/intersil,isl12022.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Intersil ISL12022 Real-time Clock
+
+maintainers:
+ - Alexandre Belloni <alexandre.belloni@bootlin.com>
+
+properties:
+ compatible:
+ const: isil,isl12022
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 0
+
+ isil,battery-trip-levels-microvolt:
+ description:
+ The battery voltages at which the first alarm and second alarm
+ should trigger (normally ~85% and ~75% of nominal V_BAT).
+ items:
+ - enum: [2125000, 2295000, 2550000, 2805000, 3060000, 4250000, 4675000]
+ - enum: [1875000, 2025000, 2250000, 2475000, 2700000, 3750000, 4125000]
+
+required:
+ - compatible
+ - reg
+
+allOf:
+ - $ref: rtc.yaml#
+ # If #clock-cells is present, interrupts must not be present
+ - if:
+ required:
+ - '#clock-cells'
+ then:
+ properties:
+ interrupts: false
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ rtc@6f {
+ compatible = "isil,isl12022";
+ reg = <0x6f>;
+ interrupts-extended = <&gpio1 5 IRQ_TYPE_LEVEL_LOW>;
+ isil,battery-trip-levels-microvolt = <2550000>, <2250000>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/rtc/maxim,ds3231.txt b/Documentation/devicetree/bindings/rtc/maxim,ds3231.txt
deleted file mode 100644
index 85be53a42180..000000000000
--- a/Documentation/devicetree/bindings/rtc/maxim,ds3231.txt
+++ /dev/null
@@ -1,38 +0,0 @@
-* Maxim DS3231 Real Time Clock
-
-Required properties:
-- compatible: Should contain "maxim,ds3231".
-- reg: I2C address for chip.
-
-Optional property:
-- #clock-cells: Should be 1.
-- clock-output-names:
- overwrite the default clock names "ds3231_clk_sqw" and "ds3231_clk_32khz".
-
-Each clock is assigned an identifier and client nodes can use this identifier
-to specify the clock which they consume. Following indices are allowed:
- - 0: square-wave output on the SQW pin
- - 1: square-wave output on the 32kHz pin
-
-- interrupts: rtc alarm/event interrupt. When this property is selected,
- clock on the SQW pin cannot be used.
-
-Example:
-
-ds3231: ds3231@51 {
- compatible = "maxim,ds3231";
- reg = <0x68>;
- #clock-cells = <1>;
-};
-
-device1 {
-...
- clocks = <&ds3231 0>;
-...
-};
-
-device2 {
-...
- clocks = <&ds3231 1>;
-...
-};
diff --git a/Documentation/devicetree/bindings/rtc/nxp,pcf2127.yaml b/Documentation/devicetree/bindings/rtc/nxp,pcf2127.yaml
index bcb230027622..2d9fe5a75b06 100644
--- a/Documentation/devicetree/bindings/rtc/nxp,pcf2127.yaml
+++ b/Documentation/devicetree/bindings/rtc/nxp,pcf2127.yaml
@@ -18,6 +18,7 @@ properties:
- nxp,pca2129
- nxp,pcf2127
- nxp,pcf2129
+ - nxp,pcf2131
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/rtc/st,m48t86.yaml b/Documentation/devicetree/bindings/rtc/st,m48t86.yaml
new file mode 100644
index 000000000000..e3e12fa23380
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/st,m48t86.yaml
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/st,m48t86.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ST M48T86 / Dallas DS12887 RTC with SRAM
+
+maintainers:
+ - Alexandre Belloni <alexandre.belloni@bootlin.com>
+
+allOf:
+ - $ref: rtc.yaml
+
+properties:
+ compatible:
+ enum:
+ - st,m48t86
+
+ reg:
+ items:
+ - description: index register
+ - description: data register
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ rtc@10800000 {
+ compatible = "st,m48t86";
+ reg = <0x10800000 0x1>, <0x11700000 0x1>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/rtc/trivial-rtc.yaml b/Documentation/devicetree/bindings/rtc/trivial-rtc.yaml
index 9af77f21bb7f..2a65f31ac5a0 100644
--- a/Documentation/devicetree/bindings/rtc/trivial-rtc.yaml
+++ b/Documentation/devicetree/bindings/rtc/trivial-rtc.yaml
@@ -45,8 +45,6 @@ properties:
- isil,isl1208
# Intersil ISL1218 Low Power RTC with Battery Backed SRAM
- isil,isl1218
- # Intersil ISL12022 Real-time Clock
- - isil,isl12022
# Real Time Clock Module with I2C-Bus
- microcrystal,rv3029
# Real Time Clock
diff --git a/Documentation/devicetree/bindings/serial/amlogic,meson-uart.yaml b/Documentation/devicetree/bindings/serial/amlogic,meson-uart.yaml
index 01ec45b3b406..2e189e548327 100644
--- a/Documentation/devicetree/bindings/serial/amlogic,meson-uart.yaml
+++ b/Documentation/devicetree/bindings/serial/amlogic,meson-uart.yaml
@@ -33,6 +33,7 @@ properties:
- amlogic,meson8b-uart
- amlogic,meson-gx-uart
- amlogic,meson-s4-uart
+ - amlogic,meson-a1-uart
- const: amlogic,meson-ao-uart
- description: Always-on power domain UART controller on G12A SoCs
items:
@@ -46,10 +47,15 @@ properties:
- amlogic,meson8b-uart
- amlogic,meson-gx-uart
- amlogic,meson-s4-uart
+ - amlogic,meson-a1-uart
- description: Everything-Else power domain UART controller on G12A SoCs
items:
- const: amlogic,meson-g12a-uart
- const: amlogic,meson-gx-uart
+ - description: UART controller on S4 compatible SoCs
+ items:
+ - const: amlogic,t7-uart
+ - const: amlogic,meson-s4-uart
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/serial/fsl-lpuart.yaml b/Documentation/devicetree/bindings/serial/fsl-lpuart.yaml
index 93062403276b..3a5b59f5d3e3 100644
--- a/Documentation/devicetree/bindings/serial/fsl-lpuart.yaml
+++ b/Documentation/devicetree/bindings/serial/fsl-lpuart.yaml
@@ -25,11 +25,15 @@ properties:
- fsl,imxrt1050-lpuart
- items:
- enum:
- - fsl,imx93-lpuart
- fsl,imx8ulp-lpuart
- const: fsl,imx7ulp-lpuart
- items:
- enum:
+ - fsl,imx93-lpuart
+ - const: fsl,imx8ulp-lpuart
+ - const: fsl,imx7ulp-lpuart
+ - items:
+ - enum:
- fsl,imx8qm-lpuart
- fsl,imx8dxl-lpuart
- const: fsl,imx8qxp-lpuart
diff --git a/Documentation/devicetree/bindings/serial/nxp,sc16is7xx.txt b/Documentation/devicetree/bindings/serial/nxp,sc16is7xx.txt
index 0fa8e3e43bf8..1a7e4bff0456 100644
--- a/Documentation/devicetree/bindings/serial/nxp,sc16is7xx.txt
+++ b/Documentation/devicetree/bindings/serial/nxp,sc16is7xx.txt
@@ -23,6 +23,9 @@ Optional properties:
1 = active low.
- irda-mode-ports: An array that lists the indices of the port that
should operate in IrDA mode.
+- nxp,modem-control-line-ports: An array that lists the indices of the port that
+ should have shared GPIO lines configured as
+ modem control lines.
Example:
sc16is750: sc16is750@51 {
@@ -35,6 +38,26 @@ Example:
#gpio-cells = <2>;
};
+ sc16is752: sc16is752@53 {
+ compatible = "nxp,sc16is752";
+ reg = <0x53>;
+ clocks = <&clk20m>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <7 IRQ_TYPE_EDGE_FALLING>;
+ nxp,modem-control-line-ports = <1>; /* Port 1 as modem control lines */
+ gpio-controller; /* Port 0 as GPIOs */
+ #gpio-cells = <2>;
+ };
+
+ sc16is752: sc16is752@54 {
+ compatible = "nxp,sc16is752";
+ reg = <0x54>;
+ clocks = <&clk20m>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <7 IRQ_TYPE_EDGE_FALLING>;
+ nxp,modem-control-line-ports = <0 1>; /* Ports 0 and 1 as modem control lines */
+ };
+
* spi as bus
Required properties:
@@ -59,6 +82,9 @@ Optional properties:
1 = active low.
- irda-mode-ports: An array that lists the indices of the port that
should operate in IrDA mode.
+- nxp,modem-control-line-ports: An array that lists the indices of the port that
+ should have shared GPIO lines configured as
+ modem control lines.
Example:
sc16is750: sc16is750@0 {
@@ -70,3 +96,23 @@ Example:
gpio-controller;
#gpio-cells = <2>;
};
+
+ sc16is752: sc16is752@1 {
+ compatible = "nxp,sc16is752";
+ reg = <1>;
+ clocks = <&clk20m>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <7 IRQ_TYPE_EDGE_FALLING>;
+ nxp,modem-control-line-ports = <1>; /* Port 1 as modem control lines */
+ gpio-controller; /* Port 0 as GPIOs */
+ #gpio-cells = <2>;
+ };
+
+ sc16is752: sc16is752@2 {
+ compatible = "nxp,sc16is752";
+ reg = <2>;
+ clocks = <&clk20m>;
+ interrupt-parent = <&gpio3>;
+ interrupts = <7 IRQ_TYPE_EDGE_FALLING>;
+ nxp,modem-control-line-ports = <0 1>; /* Ports 0 and 1 as modem control lines */
+ };
diff --git a/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml b/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml
index 3862411c77b5..17c553123f96 100644
--- a/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml
+++ b/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.yaml
@@ -117,7 +117,6 @@ properties:
required:
- compatible
- reg
- - interrupts
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/sound/fsl,easrc.yaml b/Documentation/devicetree/bindings/sound/fsl,easrc.yaml
index bdde68a1059c..a680d7aff237 100644
--- a/Documentation/devicetree/bindings/sound/fsl,easrc.yaml
+++ b/Documentation/devicetree/bindings/sound/fsl,easrc.yaml
@@ -14,7 +14,13 @@ properties:
pattern: "^easrc@.*"
compatible:
- const: fsl,imx8mn-easrc
+ oneOf:
+ - enum:
+ - fsl,imx8mn-easrc
+ - items:
+ - enum:
+ - fsl,imx8mp-easrc
+ - const: fsl,imx8mn-easrc
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/thermal/loongson,ls2k-thermal.yaml b/Documentation/devicetree/bindings/thermal/loongson,ls2k-thermal.yaml
new file mode 100644
index 000000000000..7538469997f9
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/loongson,ls2k-thermal.yaml
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/thermal/loongson,ls2k-thermal.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Thermal sensors on Loongson-2 SoCs
+
+maintainers:
+ - zhanghongchen <zhanghongchen@loongson.cn>
+ - Yinbo Zhu <zhuyinbo@loongson.cn>
+
+properties:
+ compatible:
+ oneOf:
+ - enum:
+ - loongson,ls2k1000-thermal
+ - items:
+ - enum:
+ - loongson,ls2k2000-thermal
+ - const: loongson,ls2k1000-thermal
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ thermal: thermal-sensor@1fe01500 {
+ compatible = "loongson,ls2k1000-thermal";
+ reg = <0x1fe01500 0x30>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
+ };
diff --git a/Documentation/devicetree/bindings/timer/oxsemi,rps-timer.txt b/Documentation/devicetree/bindings/timer/oxsemi,rps-timer.txt
deleted file mode 100644
index d191612539e8..000000000000
--- a/Documentation/devicetree/bindings/timer/oxsemi,rps-timer.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-Oxford Semiconductor OXNAS SoCs Family RPS Timer
-================================================
-
-Required properties:
-- compatible: Should be "oxsemi,ox810se-rps-timer" or "oxsemi,ox820-rps-timer"
-- reg : Specifies base physical address and size of the registers.
-- interrupts : The interrupts of the two timers
-- clocks : The phandle of the timer clock source
-
-example:
-
-timer0: timer@200 {
- compatible = "oxsemi,ox810se-rps-timer";
- reg = <0x200 0x40>;
- clocks = <&rpsclk>;
- interrupts = <4 5>;
-};
diff --git a/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.yaml b/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.yaml
index 91f135c3495b..1394557517b1 100644
--- a/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.yaml
+++ b/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.yaml
@@ -34,6 +34,7 @@ properties:
- fsl,imx23-usb
- fsl,imx25-usb
- fsl,imx28-usb
+ - fsl,imx35-usb
- fsl,imx50-usb
- fsl,imx51-usb
- fsl,imx53-usb
@@ -76,11 +77,11 @@ properties:
clocks:
minItems: 1
- maxItems: 2
+ maxItems: 3
clock-names:
minItems: 1
- maxItems: 2
+ maxItems: 3
dr_mode: true
@@ -292,6 +293,18 @@ properties:
minimum: 0x0
maximum: 0xf
+ fsl,picophy-rise-fall-time-adjust:
+ description:
+ HS Transmitter Rise/Fall Time Adjustment. Adjust the rise/fall times
+ of the high-speed transmitter waveform. It has no unit. The rise/fall
+ time will be increased or decreased by a certain percentage relative
+ to design default time. (0:-10%; 1:design default; 2:+15%; 3:+20%)
+ Details can refer to TXRISETUNE0 bit of USBNC_n_PHY_CFG1.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 3
+ default: 1
+
usb-phy:
description: phandle for the PHY device. Use "phys" instead.
$ref: /schemas/types.yaml#/definitions/phandle
diff --git a/Documentation/devicetree/bindings/usb/cypress,hx3.yaml b/Documentation/devicetree/bindings/usb/cypress,hx3.yaml
new file mode 100644
index 000000000000..47add0d85fb8
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/cypress,hx3.yaml
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: GPL-2.0-only or BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/cypress,hx3.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cypress HX3 USB 3.0 hub controller family
+
+maintainers:
+ - Benjamin Bara <benjamin.bara@skidata.com>
+
+allOf:
+ - $ref: usb-device.yaml#
+
+properties:
+ compatible:
+ enum:
+ - usb4b4,6504
+ - usb4b4,6506
+
+ reg: true
+
+ reset-gpios:
+ items:
+ - description: GPIO specifier for RESETN pin.
+
+ vdd-supply:
+ description:
+ 1V2 power supply (VDD_EFUSE, AVDD12, DVDD12).
+
+ vdd2-supply:
+ description:
+ 3V3 power supply (AVDD33, VDD_IO).
+
+ peer-hub:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ phandle to the peer hub on the controller.
+
+required:
+ - compatible
+ - reg
+ - peer-hub
+ - vdd-supply
+ - vdd2-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ usb {
+ dr_mode = "host";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* 2.0 hub on port 1 */
+ hub_2_0: hub@1 {
+ compatible = "usb4b4,6504";
+ reg = <1>;
+ peer-hub = <&hub_3_0>;
+ reset-gpios = <&gpio1 11 GPIO_ACTIVE_LOW>;
+ vdd-supply = <&reg_1v2_usb>;
+ vdd2-supply = <&reg_3v3_usb>;
+ };
+
+ /* 3.0 hub on port 2 */
+ hub_3_0: hub@2 {
+ compatible = "usb4b4,6506";
+ reg = <2>;
+ peer-hub = <&hub_2_0>;
+ reset-gpios = <&gpio1 11 GPIO_ACTIVE_LOW>;
+ vdd-supply = <&reg_1v2_usb>;
+ vdd2-supply = <&reg_3v3_usb>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/usb/generic-ehci.yaml b/Documentation/devicetree/bindings/usb/generic-ehci.yaml
index f37191f21501..87986c45be88 100644
--- a/Documentation/devicetree/bindings/usb/generic-ehci.yaml
+++ b/Documentation/devicetree/bindings/usb/generic-ehci.yaml
@@ -68,6 +68,7 @@ properties:
- const: generic-ehci
- items:
- enum:
+ - atmel,at91sam9g45-ehci
- cavium,octeon-6335-ehci
- ibm,usb-ehci-440epx
- ibm,usb-ehci-460ex
diff --git a/Documentation/devicetree/bindings/usb/genesys,gl850g.yaml b/Documentation/devicetree/bindings/usb/genesys,gl850g.yaml
index 6378356727e1..d0927f6768a4 100644
--- a/Documentation/devicetree/bindings/usb/genesys,gl850g.yaml
+++ b/Documentation/devicetree/bindings/usb/genesys,gl850g.yaml
@@ -17,6 +17,7 @@ properties:
enum:
- usb5e3,608
- usb5e3,610
+ - usb5e3,620
reg: true
diff --git a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
index ae24dac78d9a..67591057f234 100644
--- a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
+++ b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
@@ -14,6 +14,7 @@ properties:
items:
- enum:
- qcom,ipq4019-dwc3
+ - qcom,ipq5332-dwc3
- qcom,ipq6018-dwc3
- qcom,ipq8064-dwc3
- qcom,ipq8074-dwc3
@@ -82,15 +83,6 @@ properties:
minItems: 1
maxItems: 9
- assigned-clocks:
- items:
- - description: Phandle and clock specifier of MOCK_UTMI_CLK.
- - description: Phandle and clock specifoer of MASTER_CLK.
-
- assigned-clock-rates:
- items:
- - description: Must be 19.2MHz (19200000).
- - description: Must be >= 60 MHz in HS mode, >= 125 MHz in SS mode.
resets:
maxItems: 1
@@ -246,6 +238,7 @@ allOf:
compatible:
contains:
enum:
+ - qcom,ipq5332-dwc3
- qcom,msm8994-dwc3
- qcom,qcs404-dwc3
then:
@@ -290,15 +283,23 @@ allOf:
then:
properties:
clocks:
- minItems: 6
+ minItems: 5
+ maxItems: 6
clock-names:
- items:
- - const: cfg_noc
- - const: core
- - const: iface
- - const: sleep
- - const: mock_utmi
- - const: bus
+ oneOf:
+ - items:
+ - const: cfg_noc
+ - const: core
+ - const: iface
+ - const: sleep
+ - const: mock_utmi
+ - const: bus
+ - items:
+ - const: cfg_noc
+ - const: core
+ - const: sleep
+ - const: mock_utmi
+ - const: bus
- if:
properties:
@@ -410,6 +411,7 @@ allOf:
compatible:
contains:
enum:
+ - qcom,ipq5332-dwc3
- qcom,sdm660-dwc3
then:
properties:
diff --git a/Documentation/devicetree/bindings/usb/samsung,exynos-dwc3.yaml b/Documentation/devicetree/bindings/usb/samsung,exynos-dwc3.yaml
index 42ceaf13cd5d..1ade99e85ba8 100644
--- a/Documentation/devicetree/bindings/usb/samsung,exynos-dwc3.yaml
+++ b/Documentation/devicetree/bindings/usb/samsung,exynos-dwc3.yaml
@@ -15,6 +15,7 @@ properties:
- samsung,exynos5250-dwusb3
- samsung,exynos5433-dwusb3
- samsung,exynos7-dwusb3
+ - samsung,exynos850-dwusb3
'#address-cells':
const: 1
@@ -72,7 +73,7 @@ allOf:
properties:
compatible:
contains:
- const: samsung,exynos54333-dwusb3
+ const: samsung,exynos5433-dwusb3
then:
properties:
clocks:
@@ -82,8 +83,8 @@ allOf:
items:
- const: aclk
- const: susp_clk
- - const: pipe_pclk
- const: phyclk
+ - const: pipe_pclk
- if:
properties:
@@ -101,6 +102,21 @@ allOf:
- const: usbdrd30_susp_clk
- const: usbdrd30_axius_clk
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos850-dwusb3
+ then:
+ properties:
+ clocks:
+ minItems: 2
+ maxItems: 2
+ clock-names:
+ items:
+ - const: bus_early
+ - const: ref
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/watchdog/amlogic,meson-gxbb-wdt.yaml b/Documentation/devicetree/bindings/watchdog/amlogic,meson-gxbb-wdt.yaml
index f5cc7aa1b93b..443e2e7ab467 100644
--- a/Documentation/devicetree/bindings/watchdog/amlogic,meson-gxbb-wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/amlogic,meson-gxbb-wdt.yaml
@@ -17,6 +17,7 @@ properties:
compatible:
enum:
- amlogic,meson-gxbb-wdt
+ - amlogic,t7-wdt
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/watchdog/marvell,cn10624-wdt.yaml b/Documentation/devicetree/bindings/watchdog/marvell,cn10624-wdt.yaml
new file mode 100644
index 000000000000..1b583f232e53
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/marvell,cn10624-wdt.yaml
@@ -0,0 +1,83 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/watchdog/marvell,cn10624-wdt.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Marvell Global Timer (GTI) system watchdog
+
+maintainers:
+ - Bharat Bhushan <bbhushan2@marvell.com>
+
+allOf:
+ - $ref: watchdog.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - enum:
+ - marvell,cn9670-wdt
+ - marvell,cn10624-wdt
+
+ - items:
+ - enum:
+ - marvell,cn9880-wdt
+ - marvell,cnf9535-wdt
+ - const: marvell,cn9670-wdt
+
+ - items:
+ - enum:
+ - marvell,cn10308-wdt
+ - marvell,cnf10518-wdt
+ - const: marvell,cn10624-wdt
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: refclk
+
+ marvell,wdt-timer-index:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 0
+ maximum: 63
+ description:
+ An SoC have many timers (up to 64), firmware can reserve one or more timer
+ for some other use case and configures one of the global timer as watchdog
+ timer. Firmware will update this field with the timer number configured
+ as watchdog timer.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ watchdog@802000040000 {
+ compatible = "marvell,cn9670-wdt";
+ reg = <0x00008020 0x00040000 0x00000000 0x00020000>;
+ interrupts = <GIC_SPI 38 IRQ_TYPE_EDGE_RISING>;
+ clocks = <&sclk>;
+ clock-names = "refclk";
+ marvell,wdt-timer-index = <63>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml b/Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml
index 6d0fe6abd06a..5046dfa55f13 100644
--- a/Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml
@@ -18,6 +18,7 @@ properties:
- items:
- enum:
- qcom,kpss-wdt-ipq4019
+ - qcom,apss-wdt-ipq5018
- qcom,apss-wdt-ipq5332
- qcom,apss-wdt-ipq9574
- qcom,apss-wdt-msm8994
diff --git a/Documentation/devicetree/bindings/watchdog/ti,rti-wdt.yaml b/Documentation/devicetree/bindings/watchdog/ti,rti-wdt.yaml
index fc553211e42d..62ddc284a524 100644
--- a/Documentation/devicetree/bindings/watchdog/ti,rti-wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/ti,rti-wdt.yaml
@@ -34,6 +34,20 @@ properties:
power-domains:
maxItems: 1
+ memory-region:
+ maxItems: 1
+ description:
+ Contains the watchdog reserved memory. It is optional.
+ In the reserved memory, the specified values, which are
+ PON_REASON_SOF_NUM(0xBBBBCCCC), PON_REASON_MAGIC_NUM(0xDDDDDDDD),
+ and PON_REASON_EOF_NUM(0xCCCCBBBB), are pre-stored at the first
+ 3 * 4 bytes to tell that last boot was caused by watchdog reset.
+ Once the PON reason is captured by driver(rti_wdt.c), the driver
+ is supposed to wipe the whole memory region. Surely, if this
+ property is set, at least 12 bytes reserved memory starting from
+ specific memory address(0xa220000) should be set. More please
+ refer to example.
+
required:
- compatible
- reg
@@ -47,7 +61,18 @@ examples:
/*
* RTI WDT in main domain on J721e SoC. Assigned clocks are used to
* select the source clock for the watchdog, forcing it to tick with
- * a 32kHz clock in this case.
+ * a 32kHz clock in this case. Add a reserved memory(optional) to keep
+ * the watchdog reset cause persistent, which was be written in 12 bytes
+ * starting from 0xa2200000 by RTI Watchdog Firmware, then make it
+ * possible to get watchdog reset cause in driver.
+ *
+ * Reserved memory should be defined as follows:
+ * reserved-memory {
+ * wdt_reset_memory_region: wdt-memory@a2200000 {
+ * reg = <0x00 0xa2200000 0x00 0x1000>;
+ * no-map;
+ * };
+ * }
*/
#include <dt-bindings/soc/ti,sci_pm_domain.h>
@@ -58,4 +83,5 @@ examples:
power-domains = <&k3_pds 252 TI_SCI_PD_EXCLUSIVE>;
assigned-clocks = <&k3_clks 252 1>;
assigned-clock-parents = <&k3_clks 252 5>;
+ memory-region = <&wdt_reset_memory_region>;
};
diff --git a/Documentation/driver-api/interconnect.rst b/Documentation/driver-api/interconnect.rst
index 5ed4f57a6bac..a92d0f277a1f 100644
--- a/Documentation/driver-api/interconnect.rst
+++ b/Documentation/driver-api/interconnect.rst
@@ -113,3 +113,28 @@ through dot to generate diagrams in many graphical formats::
$ cat /sys/kernel/debug/interconnect/interconnect_graph | \
dot -Tsvg > interconnect_graph.svg
+
+The ``test-client`` directory provides interfaces for issuing BW requests to
+any arbitrary path. Note that for safety reasons, this feature is disabled by
+default without a Kconfig to enable it. Enabling it requires code changes to
+``#define INTERCONNECT_ALLOW_WRITE_DEBUGFS``. Example usage::
+
+ cd /sys/kernel/debug/interconnect/test-client/
+
+ # Configure node endpoints for the path from CPU to DDR on
+ # qcom/sm8550.
+ echo chm_apps > src_node
+ echo ebi > dst_node
+
+ # Get path between src_node and dst_node. This is only
+ # necessary after updating the node endpoints.
+ echo 1 > get
+
+ # Set desired BW to 1GBps avg and 2GBps peak.
+ echo 1000000 > avg_bw
+ echo 2000000 > peak_bw
+
+ # Vote for avg_bw and peak_bw on the latest path from "get".
+ # Voting for multiple paths is possible by repeating this
+ # process for different nodes endpoints.
+ echo 1 > commit
diff --git a/Documentation/driver-api/libata.rst b/Documentation/driver-api/libata.rst
index 311af516a3fd..5da27a749246 100644
--- a/Documentation/driver-api/libata.rst
+++ b/Documentation/driver-api/libata.rst
@@ -32,22 +32,6 @@ register blocks.
:c:type:`struct ata_port_operations <ata_port_operations>`
----------------------------------------------------------
-Disable ATA port
-~~~~~~~~~~~~~~~~
-
-::
-
- void (*port_disable) (struct ata_port *);
-
-
-Called from :c:func:`ata_bus_probe` error path, as well as when unregistering
-from the SCSI module (rmmod, hot unplug). This function should do
-whatever needs to be done to take the port out of use. In most cases,
-:c:func:`ata_port_disable` can be used as this hook.
-
-Called from :c:func:`ata_bus_probe` on a failed probe. Called from
-:c:func:`ata_scsi_release`.
-
Post-IDENTIFY device configuration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -274,14 +258,6 @@ Exception and probe handling (EH)
::
- void (*eng_timeout) (struct ata_port *ap);
- void (*phy_reset) (struct ata_port *ap);
-
-
-Deprecated. Use ``->error_handler()`` instead.
-
-::
-
void (*freeze) (struct ata_port *ap);
void (*thaw) (struct ata_port *ap);
@@ -364,8 +340,7 @@ SATA phy read/write
u32 val);
-Read and write standard SATA phy registers. Currently only used if
-``->phy_reset`` hook called the :c:func:`sata_phy_reset` helper function.
+Read and write standard SATA phy registers.
sc_reg is one of SCR_STATUS, SCR_CONTROL, SCR_ERROR, or SCR_ACTIVE.
Init and shutdown
@@ -536,13 +511,12 @@ to return without deallocating the qc. This leads us to
:c:func:`ata_scsi_error` is the current ``transportt->eh_strategy_handler()``
for libata. As discussed above, this will be entered in two cases -
-timeout and ATAPI error completion. This function calls low level libata
-driver's :c:func:`eng_timeout` callback, the standard callback for which is
-:c:func:`ata_eng_timeout`. It checks if a qc is active and calls
-:c:func:`ata_qc_timeout` on the qc if so. Actual error handling occurs in
-:c:func:`ata_qc_timeout`.
+timeout and ATAPI error completion. This function will check if a qc is active
+and has not failed yet. Such a qc will be marked with AC_ERR_TIMEOUT such that
+EH will know to handle it later. Then it calls low level libata driver's
+:c:func:`error_handler` callback.
-If EH is invoked for timeout, :c:func:`ata_qc_timeout` stops BMDMA and
+When the :c:func:`error_handler` callback is invoked it stops BMDMA and
completes the qc. Note that as we're currently in EH, we cannot call
scsi_done. As described in SCSI EH doc, a recovered scmd should be
either retried with :c:func:`scsi_queue_insert` or finished with
diff --git a/Documentation/driver-api/media/cec-core.rst b/Documentation/driver-api/media/cec-core.rst
index ae0d20798edc..f1ffdec388f3 100644
--- a/Documentation/driver-api/media/cec-core.rst
+++ b/Documentation/driver-api/media/cec-core.rst
@@ -109,9 +109,11 @@ your driver:
int (*adap_monitor_all_enable)(struct cec_adapter *adap, bool enable);
int (*adap_monitor_pin_enable)(struct cec_adapter *adap, bool enable);
int (*adap_log_addr)(struct cec_adapter *adap, u8 logical_addr);
- void (*adap_configured)(struct cec_adapter *adap, bool configured);
+ void (*adap_unconfigured)(struct cec_adapter *adap);
int (*adap_transmit)(struct cec_adapter *adap, u8 attempts,
u32 signal_free_time, struct cec_msg *msg);
+ void (*adap_nb_transmit_canceled)(struct cec_adapter *adap,
+ const struct cec_msg *msg);
void (*adap_status)(struct cec_adapter *adap, struct seq_file *file);
void (*adap_free)(struct cec_adapter *adap);
@@ -122,8 +124,8 @@ your driver:
...
};
-The seven low-level ops deal with various aspects of controlling the CEC adapter
-hardware:
+These low-level ops deal with various aspects of controlling the CEC adapter
+hardware. They are all called with the mutex adap->lock held.
To enable/disable the hardware::
@@ -179,14 +181,12 @@ can receive directed messages to that address.
Note that adap_log_addr must return 0 if logical_addr is CEC_LOG_ADDR_INVALID.
-Called when the adapter is fully configured or unconfigured::
+Called when the adapter is unconfigured::
- void (*adap_configured)(struct cec_adapter *adap, bool configured);
+ void (*adap_unconfigured)(struct cec_adapter *adap);
-If configured == true, then the adapter is fully configured, i.e. all logical
-addresses have been successfully claimed. If configured == false, then the
-adapter is unconfigured. If the driver has to take specific actions after
-(un)configuration, then that can be done through this optional callback.
+The adapter is unconfigured. If the driver has to take specific actions after
+unconfiguration, then that can be done through this optional callback.
To transmit a new message::
@@ -207,6 +207,19 @@ The CEC_FREE_TIME_TO_USEC macro can be used to convert signal_free_time to
microseconds (one data bit period is 2.4 ms).
+To pass on the result of a canceled non-blocking transmit::
+
+ void (*adap_nb_transmit_canceled)(struct cec_adapter *adap,
+ const struct cec_msg *msg);
+
+This optional callback can be used to obtain the result of a canceled
+non-blocking transmit with sequence number msg->sequence. This is
+called if the transmit was aborted, the transmit timed out (i.e. the
+hardware never signaled that the transmit finished), or the transmit
+was successful, but the wait for the expected reply was either aborted
+or it timed out.
+
+
To log the current CEC hardware status::
void (*adap_status)(struct cec_adapter *adap, struct seq_file *file);
@@ -372,7 +385,8 @@ Implementing the High-Level CEC Adapter
---------------------------------------
The low-level operations drive the hardware, the high-level operations are
-CEC protocol driven. The following high-level callbacks are available:
+CEC protocol driven. The high-level callbacks are called without the adap->lock
+mutex being held. The following high-level callbacks are available:
.. code-block:: none
@@ -384,9 +398,19 @@ CEC protocol driven. The following high-level callbacks are available:
...
/* High-level CEC message callback */
+ void (*configured)(struct cec_adapter *adap);
int (*received)(struct cec_adapter *adap, struct cec_msg *msg);
};
+Called when the adapter is configured::
+
+ void (*configured)(struct cec_adapter *adap);
+
+The adapter is fully configured, i.e. all logical addresses have been
+successfully claimed. If the driver has to take specific actions after
+configuration, then that can be done through this optional callback.
+
+
The received() callback allows the driver to optionally handle a newly
received CEC message::
diff --git a/Documentation/driver-api/media/v4l2-cci.rst b/Documentation/driver-api/media/v4l2-cci.rst
new file mode 100644
index 000000000000..dd297a40ed20
--- /dev/null
+++ b/Documentation/driver-api/media/v4l2-cci.rst
@@ -0,0 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+V4L2 CCI kAPI
+^^^^^^^^^^^^^
+.. kernel-doc:: include/media/v4l2-cci.h
diff --git a/Documentation/driver-api/media/v4l2-core.rst b/Documentation/driver-api/media/v4l2-core.rst
index 1a8c4a5f256b..239045ecc8f4 100644
--- a/Documentation/driver-api/media/v4l2-core.rst
+++ b/Documentation/driver-api/media/v4l2-core.rst
@@ -22,6 +22,7 @@ Video4Linux devices
v4l2-mem2mem
v4l2-async
v4l2-fwnode
+ v4l2-cci
v4l2-rect
v4l2-tuner
v4l2-common
diff --git a/Documentation/driver-api/media/v4l2-subdev.rst b/Documentation/driver-api/media/v4l2-subdev.rst
index 602dadaa81d8..e56b50b3f203 100644
--- a/Documentation/driver-api/media/v4l2-subdev.rst
+++ b/Documentation/driver-api/media/v4l2-subdev.rst
@@ -157,6 +157,9 @@ below.
Using one or the other registration method only affects the probing process, the
run-time bridge-subdevice interaction is in both cases the same.
+Registering synchronous sub-devices
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
In the **synchronous** case a device (bridge) driver needs to register the
:c:type:`v4l2_subdev` with the v4l2_device:
@@ -175,10 +178,12 @@ You can unregister a sub-device using:
:c:func:`v4l2_device_unregister_subdev <v4l2_device_unregister_subdev>`
(:c:type:`sd <v4l2_subdev>`).
-
Afterwards the subdev module can be unloaded and
:c:type:`sd <v4l2_subdev>`->dev == ``NULL``.
+Registering asynchronous sub-devices
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
In the **asynchronous** case subdevice probing can be invoked independently of
the bridge driver availability. The subdevice driver then has to verify whether
all the requirements for a successful probing are satisfied. This can include a
@@ -190,64 +195,89 @@ performed using the :c:func:`v4l2_async_unregister_subdev` call. Subdevices
registered this way are stored in a global list of subdevices, ready to be
picked up by bridge drivers.
-Bridge drivers in turn have to register a notifier object. This is
-performed using the :c:func:`v4l2_async_nf_register` call. To
-unregister the notifier the driver has to call
-:c:func:`v4l2_async_nf_unregister`. The former of the two functions
-takes two arguments: a pointer to struct :c:type:`v4l2_device` and a
-pointer to struct :c:type:`v4l2_async_notifier`.
+Asynchronous sub-device notifiers
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Bridge drivers in turn have to register a notifier object. This is performed
+using the :c:func:`v4l2_async_nf_register` call. To unregister the notifier the
+driver has to call :c:func:`v4l2_async_nf_unregister`. Before releasing memory
+of an unregister notifier, it must be cleaned up by calling
+:c:func:`v4l2_async_nf_cleanup`.
Before registering the notifier, bridge drivers must do two things: first, the
-notifier must be initialized using the :c:func:`v4l2_async_nf_init`.
-Second, bridge drivers can then begin to form a list of subdevice descriptors
-that the bridge device needs for its operation. Several functions are available
-to add subdevice descriptors to a notifier, depending on the type of device and
-the needs of the driver.
-
-:c:func:`v4l2_async_nf_add_fwnode_remote` and
-:c:func:`v4l2_async_nf_add_i2c` are for bridge and ISP drivers for
-registering their async sub-devices with the notifier.
-
-:c:func:`v4l2_async_register_subdev_sensor` is a helper function for
-sensor drivers registering their own async sub-device, but it also registers a
-notifier and further registers async sub-devices for lens and flash devices
-found in firmware. The notifier for the sub-device is unregistered with the
-async sub-device.
-
-These functions allocate an async sub-device descriptor which is of type struct
-:c:type:`v4l2_async_subdev` embedded in a driver-specific struct. The &struct
-:c:type:`v4l2_async_subdev` shall be the first member of this struct:
+notifier must be initialized using the :c:func:`v4l2_async_nf_init`. Second,
+bridge drivers can then begin to form a list of async connection descriptors
+that the bridge device needs for its
+operation. :c:func:`v4l2_async_nf_add_fwnode`,
+:c:func:`v4l2_async_nf_add_fwnode_remote` and :c:func:`v4l2_async_nf_add_i2c`
+
+Async connection descriptors describe connections to external sub-devices the
+drivers for which are not yet probed. Based on an async connection, a media data
+or ancillary link may be created when the related sub-device becomes
+available. There may be one or more async connections to a given sub-device but
+this is not known at the time of adding the connections to the notifier. Async
+connections are bound as matching async sub-devices are found, one by one.
+
+Asynchronous sub-device notifier for sub-devices
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+A driver that registers an asynchronous sub-device may also register an
+asynchronous notifier. This is called an asynchronous sub-device notifier andthe
+process is similar to that of a bridge driver apart from that the notifier is
+initialised using :c:func:`v4l2_async_subdev_nf_init` instead. A sub-device
+notifier may complete only after the V4L2 device becomes available, i.e. there's
+a path via async sub-devices and notifiers to a notifier that is not an
+asynchronous sub-device notifier.
+
+Asynchronous sub-device registration helper for camera sensor drivers
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+:c:func:`v4l2_async_register_subdev_sensor` is a helper function for sensor
+drivers registering their own async connection, but it also registers a notifier
+and further registers async connections for lens and flash devices found in
+firmware. The notifier for the sub-device is unregistered and cleaned up with
+the async sub-device, using :c:func:`v4l2_async_unregister_subdev`.
+
+Asynchronous sub-device notifier example
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+These functions allocate an async connection descriptor which is of type struct
+:c:type:`v4l2_async_connection` embedded in a driver-specific struct. The &struct
+:c:type:`v4l2_async_connection` shall be the first member of this struct:
.. code-block:: c
- struct my_async_subdev {
- struct v4l2_async_subdev asd;
+ struct my_async_connection {
+ struct v4l2_async_connection asc;
...
};
- struct my_async_subdev *my_asd;
+ struct my_async_connection *my_asc;
struct fwnode_handle *ep;
...
- my_asd = v4l2_async_nf_add_fwnode_remote(&notifier, ep,
- struct my_async_subdev);
+ my_asc = v4l2_async_nf_add_fwnode_remote(&notifier, ep,
+ struct my_async_connection);
fwnode_handle_put(ep);
- if (IS_ERR(asd))
- return PTR_ERR(asd);
+ if (IS_ERR(my_asc))
+ return PTR_ERR(my_asc);
+
+Asynchronous sub-device notifier callbacks
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-The V4L2 core will then use these descriptors to match asynchronously
-registered subdevices to them. If a match is detected the ``.bound()``
-notifier callback is called. After all subdevices have been located the
-.complete() callback is called. When a subdevice is removed from the
-system the .unbind() method is called. All three callbacks are optional.
+The V4L2 core will then use these connection descriptors to match asynchronously
+registered subdevices to them. If a match is detected the ``.bound()`` notifier
+callback is called. After all connections have been bound the .complete()
+callback is called. When a connection is removed from the system the
+``.unbind()`` method is called. All three callbacks are optional.
Drivers can store any type of custom data in their driver-specific
-:c:type:`v4l2_async_subdev` wrapper. If any of that data requires special
+:c:type:`v4l2_async_connection` wrapper. If any of that data requires special
handling when the structure is freed, drivers must implement the ``.destroy()``
notifier callback. The framework will call it right before freeing the
-:c:type:`v4l2_async_subdev`.
+:c:type:`v4l2_async_connection`.
Calling subdev operations
~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/Documentation/driver-api/tty/tty_buffer.rst b/Documentation/driver-api/tty/tty_buffer.rst
index a39d4781e0d2..4b5ca1776d4f 100644
--- a/Documentation/driver-api/tty/tty_buffer.rst
+++ b/Documentation/driver-api/tty/tty_buffer.rst
@@ -15,10 +15,13 @@ Flip Buffer Management
======================
.. kernel-doc:: drivers/tty/tty_buffer.c
- :identifiers: tty_prepare_flip_string tty_insert_flip_string_fixed_flag
- tty_insert_flip_string_flags __tty_insert_flip_char
+ :identifiers: tty_prepare_flip_string
tty_flip_buffer_push tty_ldisc_receive_buf
+.. kernel-doc:: include/linux/tty_flip.h
+ :identifiers: tty_insert_flip_string_fixed_flag tty_insert_flip_string_flags
+ tty_insert_flip_char
+
----
Other Functions
diff --git a/Documentation/driver-api/usb/usb.rst b/Documentation/driver-api/usb/usb.rst
index 2c94ff2f4385..fb41768696ec 100644
--- a/Documentation/driver-api/usb/usb.rst
+++ b/Documentation/driver-api/usb/usb.rst
@@ -420,6 +420,12 @@ USBDEVFS_CONNECTINFO
know the devnum value already, it's the DDD value of the device file
name.
+USBDEVFS_GET_SPEED
+ Returns the speed of the device. The speed is returned as a
+ nummerical value in accordance with enum usb_device_speed
+
+ File modification time is not updated by this request.
+
USBDEVFS_GETDRIVER
Returns the name of the kernel driver bound to a given interface (a
string). Parameter is a pointer to this structure, which is
@@ -771,8 +777,7 @@ Speed may be:
======= ======================================================
1.5 Mbit/s for low speed USB
12 Mbit/s for full speed USB
- 480 Mbit/s for high speed USB (added for USB 2.0);
- also used for Wireless USB, which has no fixed speed
+ 480 Mbit/s for high speed USB (added for USB 2.0)
5000 Mbit/s for SuperSpeed USB (added for USB 3.0)
======= ======================================================
diff --git a/Documentation/features/debug/KASAN/arch-support.txt b/Documentation/features/debug/KASAN/arch-support.txt
index bf0124fae643..c4581c2edb28 100644
--- a/Documentation/features/debug/KASAN/arch-support.txt
+++ b/Documentation/features/debug/KASAN/arch-support.txt
@@ -13,7 +13,7 @@
| csky: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
- | loongarch: | TODO |
+ | loongarch: | ok |
| m68k: | TODO |
| microblaze: | TODO |
| mips: | TODO |
diff --git a/Documentation/features/debug/kcov/arch-support.txt b/Documentation/features/debug/kcov/arch-support.txt
index ffcc9f2b1d74..de84cefbcdd3 100644
--- a/Documentation/features/debug/kcov/arch-support.txt
+++ b/Documentation/features/debug/kcov/arch-support.txt
@@ -13,7 +13,7 @@
| csky: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
- | loongarch: | TODO |
+ | loongarch: | ok |
| m68k: | TODO |
| microblaze: | TODO |
| mips: | ok |
diff --git a/Documentation/features/debug/kgdb/arch-support.txt b/Documentation/features/debug/kgdb/arch-support.txt
index 958498f9f2a4..5e91ec78c80b 100644
--- a/Documentation/features/debug/kgdb/arch-support.txt
+++ b/Documentation/features/debug/kgdb/arch-support.txt
@@ -13,7 +13,7 @@
| csky: | TODO |
| hexagon: | ok |
| ia64: | TODO |
- | loongarch: | TODO |
+ | loongarch: | ok |
| m68k: | TODO |
| microblaze: | ok |
| mips: | ok |
diff --git a/Documentation/filesystems/ceph.rst b/Documentation/filesystems/ceph.rst
index 76ce938e7024..085f309ece60 100644
--- a/Documentation/filesystems/ceph.rst
+++ b/Documentation/filesystems/ceph.rst
@@ -57,6 +57,16 @@ a snapshot on any subdirectory (and its nested contents) in the
system. Snapshot creation and deletion are as simple as 'mkdir
.snap/foo' and 'rmdir .snap/foo'.
+Snapshot names have two limitations:
+
+* They can not start with an underscore ('_'), as these names are reserved
+ for internal usage by the MDS.
+* They can not exceed 240 characters in size. This is because the MDS makes
+ use of long snapshot names internally, which follow the format:
+ `_<SNAPSHOT-NAME>_<INODE-NUMBER>`. Since filenames in general can't have
+ more than 255 characters, and `<node-id>` takes 13 characters, the long
+ snapshot names can take as much as 255 - 1 - 1 - 13 = 240.
+
Ceph also provides some recursive accounting on directories for nested
files and bytes. That is, a 'getfattr -d foo' on any directory in the
system will reveal the total number of nested regular files and
diff --git a/Documentation/filesystems/gfs2-glocks.rst b/Documentation/filesystems/gfs2-glocks.rst
index bec25c8b3e4b..8a5842929b60 100644
--- a/Documentation/filesystems/gfs2-glocks.rst
+++ b/Documentation/filesystems/gfs2-glocks.rst
@@ -20,8 +20,7 @@ The gl_holders list contains all the queued lock requests (not
just the holders) associated with the glock. If there are any
held locks, then they will be contiguous entries at the head
of the list. Locks are granted in strictly the order that they
-are queued, except for those marked LM_FLAG_PRIORITY which are
-used only during recovery, and even then only for journal locks.
+are queued.
There are three lock states that users of the glock layer can request,
namely shared (SH), deferred (DF) and exclusive (EX). Those translate
diff --git a/Documentation/filesystems/nfs/exporting.rst b/Documentation/filesystems/nfs/exporting.rst
index 3d97b8d8f735..4b30daee399a 100644
--- a/Documentation/filesystems/nfs/exporting.rst
+++ b/Documentation/filesystems/nfs/exporting.rst
@@ -215,3 +215,29 @@ following flags are defined:
This flag causes nfsd to close any open files for this inode _before_
calling into the vfs to do an unlink or a rename that would replace
an existing file.
+
+ EXPORT_OP_REMOTE_FS - Backing storage for this filesystem is remote
+ PF_LOCAL_THROTTLE exists for loopback NFSD, where a thread needs to
+ write to one bdi (the final bdi) in order to free up writes queued
+ to another bdi (the client bdi). Such threads get a private balance
+ of dirty pages so that dirty pages for the client bdi do not imact
+ the daemon writing to the final bdi. For filesystems whose durable
+ storage is not local (such as exported NFS filesystems), this
+ constraint has negative consequences. EXPORT_OP_REMOTE_FS enables
+ an export to disable writeback throttling.
+
+ EXPORT_OP_NOATOMIC_ATTR - Filesystem does not update attributes atomically
+ EXPORT_OP_NOATOMIC_ATTR indicates that the exported filesystem
+ cannot provide the semantics required by the "atomic" boolean in
+ NFSv4's change_info4. This boolean indicates to a client whether the
+ returned before and after change attributes were obtained atomically
+ with the respect to the requested metadata operation (UNLINK,
+ OPEN/CREATE, MKDIR, etc).
+
+ EXPORT_OP_FLUSH_ON_CLOSE - Filesystem flushes file data on close(2)
+ On most filesystems, inodes can remain under writeback after the
+ file is closed. NFSD relies on client activity or local flusher
+ threads to handle writeback. Certain filesystems, such as NFS, flush
+ all of an inode's dirty data on last close. Exports that behave this
+ way should set EXPORT_OP_FLUSH_ON_CLOSE so that NFSD knows to skip
+ waiting for writeback when closing such files.
diff --git a/Documentation/filesystems/proc.rst b/Documentation/filesystems/proc.rst
index 1f9615553b82..2b59cff8be17 100644
--- a/Documentation/filesystems/proc.rst
+++ b/Documentation/filesystems/proc.rst
@@ -461,6 +461,7 @@ Memory Area, or VMA) there is a series of lines such as the following::
Private_Dirty: 0 kB
Referenced: 892 kB
Anonymous: 0 kB
+ KSM: 0 kB
LazyFree: 0 kB
AnonHugePages: 0 kB
ShmemPmdMapped: 0 kB
@@ -501,6 +502,9 @@ accessed.
a mapping associated with a file may contain anonymous pages: when MAP_PRIVATE
and a page is modified, the file page is replaced by a private anonymous copy.
+"KSM" reports how many of the pages are KSM pages. Note that KSM-placed zeropages
+are not included, only actual KSM pages.
+
"LazyFree" shows the amount of memory which is marked by madvise(MADV_FREE).
The memory isn't freed immediately with madvise(). It's freed in memory
pressure if the memory is clean. Please note that the printed value might
diff --git a/Documentation/gpu/amdgpu/driver-misc.rst b/Documentation/gpu/amdgpu/driver-misc.rst
index be131e963d87..4321c38fef21 100644
--- a/Documentation/gpu/amdgpu/driver-misc.rst
+++ b/Documentation/gpu/amdgpu/driver-misc.rst
@@ -11,19 +11,19 @@ via sysfs
product_name
------------
-.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
:doc: product_name
product_number
--------------
-.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
- :doc: product_name
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
+ :doc: product_number
serial_number
-------------
-.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
:doc: serial_number
unique_id
diff --git a/Documentation/hid/hidintro.rst b/Documentation/hid/hidintro.rst
new file mode 100644
index 000000000000..73523e315ebd
--- /dev/null
+++ b/Documentation/hid/hidintro.rst
@@ -0,0 +1,524 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+======================================
+Introduction to HID report descriptors
+======================================
+
+This chapter is meant to give a broad overview of what HID report
+descriptors are, and of how a casual (non-kernel) programmer can deal
+with HID devices that are not working well with Linux.
+
+.. contents::
+ :local:
+ :depth: 2
+
+.. toctree::
+ :maxdepth: 2
+
+ hidreport-parsing
+
+
+Introduction
+============
+
+HID stands for Human Interface Device, and can be whatever device you
+are using to interact with a computer, be it a mouse, a touchpad, a
+tablet, a microphone.
+
+Many HID devices work out the box, even if their hardware is different.
+For example, mice can have any number of buttons; they may have a
+wheel; movement sensitivity differs between different models, and so
+on. Nonetheless, most of the time everything just works, without the
+need to have specialized code in the kernel for every mouse model
+developed since 1970.
+
+This is because modern HID devices do advertise their capabilities
+through the *HID report descriptor*, a fixed set of bytes describing
+exactly what *HID reports* may be sent between the device and the host
+and the meaning of each individual bit in those reports. For example,
+a HID Report Descriptor may specify that "in a report with ID 3 the
+bits from 8 to 15 is the delta x coordinate of a mouse".
+
+The HID report itself then merely carries the actual data values
+without any extra meta information. Note that HID reports may be sent
+from the device ("Input Reports", i.e. input events), to the device
+("Output Reports" to e.g. change LEDs) or used for device configuration
+("Feature reports"). A device may support one or more HID reports.
+
+The HID subsystem is in charge of parsing the HID report descriptors,
+and converts HID events into normal input device interfaces (see
+Documentation/hid/hid-transport.rst). Devices may misbehave because the
+HID report descriptor provided by the device is wrong, or because it
+needs to be dealt with in a special way, or because some special
+device or interaction mode is not handled by the default code.
+
+The format of HID report descriptors is described by two documents,
+available from the `USB Implementers Forum <https://www.usb.org/>`_
+`HID web page <https://www.usb.org/hid>`_ address:
+
+ * the `HID USB Device Class Definition
+ <https://www.usb.org/document-library/device-class-definition-hid-111>`_ (HID Spec from now on)
+ * the `HID Usage Tables <https://usb.org/document-library/hid-usage-tables-14>`_ (HUT from now on)
+
+The HID subsystem can deal with different transport drivers
+(USB, I2C, Bluetooth, etc.). See Documentation/hid/hid-transport.rst.
+
+Parsing HID report descriptors
+==============================
+
+The current list of HID devices can be found at ``/sys/bus/hid/devices/``.
+For each device, say ``/sys/bus/hid/devices/0003\:093A\:2510.0002/``,
+one can read the corresponding report descriptor::
+
+ $ hexdump -C /sys/bus/hid/devices/0003\:093A\:2510.0002/report_descriptor
+ 00000000 05 01 09 02 a1 01 09 01 a1 00 05 09 19 01 29 03 |..............).|
+ 00000010 15 00 25 01 75 01 95 03 81 02 75 05 95 01 81 01 |..%.u.....u.....|
+ 00000020 05 01 09 30 09 31 09 38 15 81 25 7f 75 08 95 03 |...0.1.8..%.u...|
+ 00000030 81 06 c0 c0 |....|
+ 00000034
+
+Optional: the HID report descriptor can be read also by
+directly accessing the hidraw driver [#hidraw]_.
+
+The basic structure of HID report descriptors is defined in the HID
+spec, while HUT "defines constants that can be interpreted by an
+application to identify the purpose and meaning of a data field in a
+HID report". Each entry is defined by at least two bytes, where the
+first one defines what type of value is following and is described in
+the HID spec, while the second one carries the actual value and is
+described in the HUT.
+
+HID report descriptors can, in principle, be painstakingly parsed by
+hand, byte by byte.
+
+A short introduction on how to do this is sketched in
+Documentation/hid/hidreport-parsing.rst; you only need to understand it
+if you need to patch HID report descriptors.
+
+In practice you should not parse HID report descriptors by hand; rather,
+you should use an existing parser. Among all the available ones
+
+ * the online `USB Descriptor and Request Parser
+ <http://eleccelerator.com/usbdescreqparser/>`_;
+ * `hidrdd <https://github.com/abend0c1/hidrdd>`_,
+ that provides very detailed and somewhat verbose descriptions
+ (verbosity can be useful if you are not familiar with HID report
+ descriptors);
+ * `hid-tools <https://gitlab.freedesktop.org/libevdev/hid-tools>`_,
+ a complete utility set that allows, among other things,
+ to record and replay the raw HID reports and to debug
+ and replay HID devices.
+ It is being actively developed by the Linux HID subsystem maintainers.
+
+Parsing the mouse HID report descriptor with `hid-tools
+<https://gitlab.freedesktop.org/libevdev/hid-tools>`_ leads to
+(explanations interposed)::
+
+ $ ./hid-decode /sys/bus/hid/devices/0003\:093A\:2510.0002/report_descriptor
+ # device 0:0
+ # 0x05, 0x01, // Usage Page (Generic Desktop) 0
+ # 0x09, 0x02, // Usage (Mouse) 2
+ # 0xa1, 0x01, // Collection (Application) 4
+ # 0x09, 0x01, // Usage (Pointer) 6
+ # 0xa1, 0x00, // Collection (Physical) 8
+ # 0x05, 0x09, // Usage Page (Button) 10
+
+what follows is a button ::
+
+ # 0x19, 0x01, // Usage Minimum (1) 12
+ # 0x29, 0x03, // Usage Maximum (3) 14
+
+first button is button number 1, last button is button number 3 ::
+
+ # 0x15, 0x00, // Logical Minimum (0) 16
+ # 0x25, 0x01, // Logical Maximum (1) 18
+
+each button can send values from 0 up to including 1
+(i.e. they are binary buttons) ::
+
+ # 0x75, 0x01, // Report Size (1) 20
+
+each button is sent as exactly one bit ::
+
+ # 0x95, 0x03, // Report Count (3) 22
+
+and there are three of those bits (matching the three buttons) ::
+
+ # 0x81, 0x02, // Input (Data,Var,Abs) 24
+
+it's actual Data (not constant padding), they represent
+a single variable (Var) and their values are Absolute (not relative);
+See HID spec Sec. 6.2.2.5 "Input, Output, and Feature Items" ::
+
+ # 0x75, 0x05, // Report Size (5) 26
+
+five additional padding bits, needed to reach a byte ::
+
+ # 0x95, 0x01, // Report Count (1) 28
+
+those five bits are repeated only once ::
+
+ # 0x81, 0x01, // Input (Cnst,Arr,Abs) 30
+
+and take Constant (Cnst) values i.e. they can be ignored. ::
+
+ # 0x05, 0x01, // Usage Page (Generic Desktop) 32
+ # 0x09, 0x30, // Usage (X) 34
+ # 0x09, 0x31, // Usage (Y) 36
+ # 0x09, 0x38, // Usage (Wheel) 38
+
+The mouse has also two physical positions (Usage (X), Usage (Y))
+and a wheel (Usage (Wheel)) ::
+
+ # 0x15, 0x81, // Logical Minimum (-127) 40
+ # 0x25, 0x7f, // Logical Maximum (127) 42
+
+each of them can send values ranging from -127 up to including 127 ::
+
+ # 0x75, 0x08, // Report Size (8) 44
+
+which is represented by eight bits ::
+
+ # 0x95, 0x03, // Report Count (3) 46
+
+and there are three of those eight bits, matching X, Y and Wheel. ::
+
+ # 0x81, 0x06, // Input (Data,Var,Rel) 48
+
+This time the data values are Relative (Rel), i.e. they represent
+the change from the previously sent report (event) ::
+
+ # 0xc0, // End Collection 50
+ # 0xc0, // End Collection 51
+ #
+ R: 52 05 01 09 02 a1 01 09 01 a1 00 05 09 19 01 29 03 15 00 25 01 75 01 95 03 81 02 75 05 95 01 81 01 05 01 09 30 09 31 09 38 15 81 25 7f 75 08 95 03 81 06 c0 c0
+ N: device 0:0
+ I: 3 0001 0001
+
+
+This Report Descriptor tells us that the mouse input will be
+transmitted using four bytes: the first one for the buttons (three
+bits used, five for padding), the last three for the mouse X, Y and
+wheel changes, respectively.
+
+Indeed, for any event, the mouse will send a *report* of four bytes.
+We can check the values sent by resorting e.g. to the `hid-recorder`
+tool, from `hid-tools <https://gitlab.freedesktop.org/libevdev/hid-tools>`_:
+The sequence of bytes sent by clicking and releasing button 1, then button 2, then button 3 is::
+
+ $ sudo ./hid-recorder /dev/hidraw1
+
+ ....
+ output of hid-decode
+ ....
+
+ # Button: 1 0 0 | # | X: 0 | Y: 0 | Wheel: 0
+ E: 000000.000000 4 01 00 00 00
+ # Button: 0 0 0 | # | X: 0 | Y: 0 | Wheel: 0
+ E: 000000.183949 4 00 00 00 00
+ # Button: 0 1 0 | # | X: 0 | Y: 0 | Wheel: 0
+ E: 000001.959698 4 02 00 00 00
+ # Button: 0 0 0 | # | X: 0 | Y: 0 | Wheel: 0
+ E: 000002.103899 4 00 00 00 00
+ # Button: 0 0 1 | # | X: 0 | Y: 0 | Wheel: 0
+ E: 000004.855799 4 04 00 00 00
+ # Button: 0 0 0 | # | X: 0 | Y: 0 | Wheel: 0
+ E: 000005.103864 4 00 00 00 00
+
+This example shows that when button 2 is clicked,
+the bytes ``02 00 00 00`` are sent, and the immediately subsequent
+event (``00 00 00 00``) is the release of button 2 (no buttons are
+pressed, remember that the data values are *absolute*).
+
+If instead one clicks and holds button 1, then clicks and holds button
+2, releases button 1, and finally releases button 2, the reports are::
+
+ # Button: 1 0 0 | # | X: 0 | Y: 0 | Wheel: 0
+ E: 000044.175830 4 01 00 00 00
+ # Button: 1 1 0 | # | X: 0 | Y: 0 | Wheel: 0
+ E: 000045.975997 4 03 00 00 00
+ # Button: 0 1 0 | # | X: 0 | Y: 0 | Wheel: 0
+ E: 000047.407930 4 02 00 00 00
+ # Button: 0 0 0 | # | X: 0 | Y: 0 | Wheel: 0
+ E: 000049.199919 4 00 00 00 00
+
+where with ``03 00 00 00`` both buttons are pressed, and with the
+subsequent ``02 00 00 00`` button 1 is released while button 2 is still
+active.
+
+Output, Input and Feature Reports
+---------------------------------
+
+HID devices can have Input Reports, like in the mouse example, Output
+Reports, and Feature Reports. "Output" means that the information is
+sent to the device. For example, a joystick with force feedback will
+have some output; the led of a keyboard would need an output as well.
+"Input" means that data come from the device.
+
+"Feature"s are not meant to be consumed by the end user and define
+configuration options for the device. They can be queried from the host;
+when declared as *Volatile* they should be changed by the host.
+
+
+Collections, Report IDs and Evdev events
+========================================
+
+A single device can logically group data into different independent
+sets, called a *Collection*. Collections can be nested and there are
+different types of collections (see the HID spec 6.2.2.6
+"Collection, End Collection Items" for details).
+
+Different reports are identified by means of different *Report ID*
+fields, i.e. a number identifying the structure of the immediately
+following report.
+Whenever a Report ID is needed it is transmitted as the first byte of
+any report. A device with only one supported HID report (like the mouse
+example above) may omit the report ID.
+
+Consider the following HID report descriptor::
+
+ 05 01 09 02 A1 01 85 01 05 09 19 01 29 05 15 00
+ 25 01 95 05 75 01 81 02 95 01 75 03 81 01 05 01
+ 09 30 09 31 16 00 F8 26 FF 07 75 0C 95 02 81 06
+ 09 38 15 80 25 7F 75 08 95 01 81 06 05 0C 0A 38
+ 02 15 80 25 7F 75 08 95 01 81 06 C0 05 01 09 02
+ A1 01 85 02 05 09 19 01 29 05 15 00 25 01 95 05
+ 75 01 81 02 95 01 75 03 81 01 05 01 09 30 09 31
+ 16 00 F8 26 FF 07 75 0C 95 02 81 06 09 38 15 80
+ 25 7F 75 08 95 01 81 06 05 0C 0A 38 02 15 80 25
+ 7F 75 08 95 01 81 06 C0 05 01 09 07 A1 01 85 05
+ 05 07 15 00 25 01 09 29 09 3E 09 4B 09 4E 09 E3
+ 09 E8 09 E8 09 E8 75 01 95 08 81 02 95 00 81 01
+ C0 05 0C 09 01 A1 01 85 06 15 00 25 01 75 01 95
+ 01 09 3F 81 06 09 3F 81 06 09 3F 81 06 09 3F 81
+ 06 09 3F 81 06 09 3F 81 06 09 3F 81 06 09 3F 81
+ 06 C0 05 0C 09 01 A1 01 85 03 09 05 15 00 26 FF
+ 00 75 08 95 02 B1 02 C0
+
+After parsing it (try to parse it on your own using the suggested
+tools!) one can see that the device presents two ``Mouse`` Application
+Collections (with reports identified by Reports IDs 1 and 2,
+respectively), a ``Keypad`` Application Collection (whose report is
+identified by the Report ID 5) and two ``Consumer Controls`` Application
+Collections, (with Report IDs 6 and 3, respectively). Note, however,
+that a device can have different Report IDs for the same Application
+Collection.
+
+The data sent will begin with the Report ID byte, and will be followed
+by the corresponding information. For example, the data transmitted for
+the last consumer control::
+
+ 0x05, 0x0C, // Usage Page (Consumer)
+ 0x09, 0x01, // Usage (Consumer Control)
+ 0xA1, 0x01, // Collection (Application)
+ 0x85, 0x03, // Report ID (3)
+ 0x09, 0x05, // Usage (Headphone)
+ 0x15, 0x00, // Logical Minimum (0)
+ 0x26, 0xFF, 0x00, // Logical Maximum (255)
+ 0x75, 0x08, // Report Size (8)
+ 0x95, 0x02, // Report Count (2)
+ 0xB1, 0x02, // Feature (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position,Non-volatile)
+ 0xC0, // End Collection
+
+will be of three bytes: the first for the Report ID (3), the next two
+for the headphone, with two (``Report Count (2)``) bytes
+(``Report Size (8)``), each ranging from 0 (``Logical Minimum (0)``)
+to 255 (``Logical Maximum (255)``).
+
+All the Input data sent by the device should be translated into
+corresponding Evdev events, so that the remaining part of the stack can
+know what is going on, e.g. the bit for the first button translates into
+the ``EV_KEY/BTN_LEFT`` evdev event and relative X movement translates
+into the ``EV_REL/REL_X`` evdev event".
+
+Events
+======
+
+In Linux, one ``/dev/input/event*`` is created for each ``Application
+Collection``. Going back to the mouse example, and repeating the
+sequence where one clicks and holds button 1, then clicks and holds
+button 2, releases button 1, and finally releases button 2, one gets::
+
+ $ sudo libinput record /dev/input/event1
+ # libinput record
+ version: 1
+ ndevices: 1
+ libinput:
+ version: "1.23.0"
+ git: "unknown"
+ system:
+ os: "opensuse-tumbleweed:20230619"
+ kernel: "6.3.7-1-default"
+ dmi: "dmi:bvnHP:bvrU77Ver.01.05.00:bd03/24/2022:br5.0:efr20.29:svnHP:pnHPEliteBook64514inchG9NotebookPC:pvr:rvnHP:rn89D2:rvrKBCVersion14.1D.00:cvnHP:ct10:cvr:sku5Y3J1EA#ABZ:"
+ devices:
+ - node: /dev/input/event1
+ evdev:
+ # Name: PixArt HP USB Optical Mouse
+ # ID: bus 0x3 vendor 0x3f0 product 0x94a version 0x111
+ # Supported Events:
+ # Event type 0 (EV_SYN)
+ # Event type 1 (EV_KEY)
+ # Event code 272 (BTN_LEFT)
+ # Event code 273 (BTN_RIGHT)
+ # Event code 274 (BTN_MIDDLE)
+ # Event type 2 (EV_REL)
+ # Event code 0 (REL_X)
+ # Event code 1 (REL_Y)
+ # Event code 8 (REL_WHEEL)
+ # Event code 11 (REL_WHEEL_HI_RES)
+ # Event type 4 (EV_MSC)
+ # Event code 4 (MSC_SCAN)
+ # Properties:
+ name: "PixArt HP USB Optical Mouse"
+ id: [3, 1008, 2378, 273]
+ codes:
+ 0: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] # EV_SYN
+ 1: [272, 273, 274] # EV_KEY
+ 2: [0, 1, 8, 11] # EV_REL
+ 4: [4] # EV_MSC
+ properties: []
+ hid: [
+ 0x05, 0x01, 0x09, 0x02, 0xa1, 0x01, 0x09, 0x01, 0xa1, 0x00, 0x05, 0x09, 0x19, 0x01, 0x29, 0x03,
+ 0x15, 0x00, 0x25, 0x01, 0x95, 0x08, 0x75, 0x01, 0x81, 0x02, 0x05, 0x01, 0x09, 0x30, 0x09, 0x31,
+ 0x09, 0x38, 0x15, 0x81, 0x25, 0x7f, 0x75, 0x08, 0x95, 0x03, 0x81, 0x06, 0xc0, 0xc0
+ ]
+ udev:
+ properties:
+ - ID_INPUT=1
+ - ID_INPUT_MOUSE=1
+ - LIBINPUT_DEVICE_GROUP=3/3f0/94a:usb-0000:05:00.3-2
+ quirks:
+ events:
+ # Current time is 12:31:56
+ - evdev:
+ - [ 0, 0, 4, 4, 30] # EV_MSC / MSC_SCAN 30 (obfuscated)
+ - [ 0, 0, 1, 272, 1] # EV_KEY / BTN_LEFT 1
+ - [ 0, 0, 0, 0, 0] # ------------ SYN_REPORT (0) ---------- +0ms
+ - evdev:
+ - [ 1, 207892, 4, 4, 30] # EV_MSC / MSC_SCAN 30 (obfuscated)
+ - [ 1, 207892, 1, 273, 1] # EV_KEY / BTN_RIGHT 1
+ - [ 1, 207892, 0, 0, 0] # ------------ SYN_REPORT (0) ---------- +1207ms
+ - evdev:
+ - [ 2, 367823, 4, 4, 30] # EV_MSC / MSC_SCAN 30 (obfuscated)
+ - [ 2, 367823, 1, 272, 0] # EV_KEY / BTN_LEFT 0
+ - [ 2, 367823, 0, 0, 0] # ------------ SYN_REPORT (0) ---------- +1160ms
+ # Current time is 12:32:00
+ - evdev:
+ - [ 3, 247617, 4, 4, 30] # EV_MSC / MSC_SCAN 30 (obfuscated)
+ - [ 3, 247617, 1, 273, 0] # EV_KEY / BTN_RIGHT 0
+ - [ 3, 247617, 0, 0, 0] # ------------ SYN_REPORT (0) ---------- +880ms
+
+Note: if ``libinput record`` is not available on your system try using
+``evemu-record``.
+
+When something does not work
+============================
+
+There can be a number of reasons why a device does not behave
+correctly. For example
+
+* The HID report descriptor provided by the HID device may be wrong
+ because e.g.
+
+ * it does not follow the standard, so that the kernel
+ will not able to make sense of the HID report descriptor;
+ * the HID report descriptor *does not match* what is actually
+ sent by the device (this can be verified by reading the raw HID
+ data);
+* the HID report descriptor may need some "quirks" (see later on).
+
+As a consequence, a ``/dev/input/event*`` may not be created
+for each Application Collection, and/or the events
+there may not match what you would expect.
+
+
+Quirks
+------
+
+There are some known peculiarities of HID devices that the kernel
+knows how to fix - these are called the HID quirks and a list of those
+is available in `include/linux/hid.h`.
+
+Should this be the case, it should be enough to add the required quirk
+in the kernel, for the HID device at hand. This can be done in the file
+`drivers/hid/hid-quirks.c`. How to do it should be relatively
+straightforward after looking into the file.
+
+The list of currently defined quirks, from `include/linux/hid.h`, is
+
+.. kernel-doc:: include/linux/hid.h
+ :doc: HID quirks
+
+Quirks for USB devices can be specified while loading the usbhid module,
+see ``modinfo usbhid``, although the proper fix should go into
+hid-quirks.c and **be submitted upstream**.
+See Documentation/process/submitting-patches.rst for guidelines on how
+to submit a patch. Quirks for other busses need to go into hid-quirks.c.
+
+Fixing HID report descriptors
+-----------------------------
+
+Should you need to patch HID report descriptors the easiest way is to
+resort to eBPF, as described in Documentation/hid/hid-bpf.rst.
+
+Basically, you can change any byte of the original HID report
+descriptor. The examples in samples/hid should be a good starting point
+for your code, see e.g. `samples/hid/hid_mouse.bpf.c`::
+
+ SEC("fmod_ret/hid_bpf_rdesc_fixup")
+ int BPF_PROG(hid_rdesc_fixup, struct hid_bpf_ctx *hctx)
+ {
+ ....
+ data[39] = 0x31;
+ data[41] = 0x30;
+ return 0;
+ }
+
+Of course this can be also done within the kernel source code, see e.g.
+`drivers/hid/hid-aureal.c` or `drivers/hid/hid-samsung.c` for a slightly
+more complex file.
+
+Check Documentation/hid/hidreport-parsing.rst if you need any help
+navigating the HID manuals and understanding the exact meaning of
+the HID report descriptor hex numbers.
+
+Whatever solution you come up with, please remember to **submit the
+fix to the HID maintainers**, so that it can be directly integrated in
+the kernel and that particular HID device will start working for
+everyone else. See Documentation/process/submitting-patches.rst for
+guidelines on how to do this.
+
+
+Modifying the transmitted data on the fly
+-----------------------------------------
+
+Using eBPF it is also possible to modify the data exchanged with the
+device. See again the examples in `samples/hid`.
+
+Again, **please post your fix**, so that it can be integrated in the
+kernel!
+
+Writing a specialized driver
+----------------------------
+
+This should really be your last resort.
+
+
+.. rubric:: Footnotes
+
+.. [#hidraw] read hidraw: see Documentation/hid/hidraw.rst and
+ file `samples/hidraw/hid-example.c` for an example.
+ The output of ``hid-example`` would be, for the same mouse::
+
+ $ sudo ./hid-example
+ Report Descriptor Size: 52
+ Report Descriptor:
+ 5 1 9 2 a1 1 9 1 a1 0 5 9 19 1 29 3 15 0 25 1 75 1 95 3 81 2 75 5 95 1 81 1 5 1 9 30 9 31 9 38 15 81 25 7f 75 8 95 3 81 6 c0 c0
+
+ Raw Name: PixArt USB Optical Mouse
+ Raw Phys: usb-0000:05:00.4-2.3/input0
+ Raw Info:
+ bustype: 3 (USB)
+ vendor: 0x093a
+ product: 0x2510
+ ...
diff --git a/Documentation/hid/hidreport-parsing.rst b/Documentation/hid/hidreport-parsing.rst
new file mode 100644
index 000000000000..1d3c17f29f2b
--- /dev/null
+++ b/Documentation/hid/hidreport-parsing.rst
@@ -0,0 +1,49 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+========================================
+Manual parsing of HID report descriptors
+========================================
+
+Consider again the mouse HID report descriptor
+introduced in Documentation/hid/hidintro.rst::
+
+ $ hexdump -C /sys/bus/hid/devices/0003\:093A\:2510.0002/report_descriptor
+ 00000000 05 01 09 02 a1 01 09 01 a1 00 05 09 19 01 29 03 |..............).|
+ 00000010 15 00 25 01 75 01 95 03 81 02 75 05 95 01 81 01 |..%.u.....u.....|
+ 00000020 05 01 09 30 09 31 09 38 15 81 25 7f 75 08 95 03 |...0.1.8..%.u...|
+ 00000030 81 06 c0 c0 |....|
+ 00000034
+
+and try to parse it by hand.
+
+Start with the first number, 0x05: it carries 2 bits for the
+length of the item, 2 bits for the type of the item and 4 bits for the
+function::
+
+ +----------+
+ | 00000101 |
+ +----------+
+ ^^
+ ---- Length of data (see HID spec 6.2.2.2)
+ ^^
+ ------ Type of the item (see HID spec 6.2.2.2, then jump to 6.2.2.7)
+ ^^^^
+ --------- Function of the item (see HID spec 6.2.2.7, then HUT Sec 3)
+
+In our case, the length is 1 byte, the type is ``Global`` and the
+function is ``Usage Page``, thus for parsing the value 0x01 in the second byte
+we need to refer to HUT Sec 3.
+
+The second number is the actual data, and its meaning can be found in
+the HUT. We have a ``Usage Page``, thus we need to refer to HUT
+Sec. 3, "Usage Pages"; from there, one sees that ``0x01`` stands for
+``Generic Desktop Page``.
+
+Moving now to the second two bytes, and following the same scheme,
+``0x09`` (i.e. ``00001001``) will be followed by one byte (``01``)
+and is a ``Local`` item (``10``). Thus, the meaning of the remaining four bits
+(``0000``) is given in the HID spec Sec. 6.2.2.8 "Local Items", so that
+we have a ``Usage``. From HUT, Sec. 4, "Generic Desktop Page", we see that
+0x02 stands for ``Mouse``.
+
+The following numbers can be parsed in the same way.
diff --git a/Documentation/hid/index.rst b/Documentation/hid/index.rst
index b2028f382f11..af02cf7cfa82 100644
--- a/Documentation/hid/index.rst
+++ b/Documentation/hid/index.rst
@@ -7,6 +7,7 @@ Human Interface Devices (HID)
.. toctree::
:maxdepth: 1
+ hidintro
hiddev
hidraw
hid-sensor
diff --git a/Documentation/i2c/i2c-address-translators.rst b/Documentation/i2c/i2c-address-translators.rst
new file mode 100644
index 000000000000..b22ce9f41ecf
--- /dev/null
+++ b/Documentation/i2c/i2c-address-translators.rst
@@ -0,0 +1,96 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=======================
+I2C Address Translators
+=======================
+
+Author: Luca Ceresoli <luca@lucaceresoli.net>
+Author: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+
+Description
+-----------
+
+An I2C Address Translator (ATR) is a device with an I2C slave parent
+("upstream") port and N I2C master child ("downstream") ports, and
+forwards transactions from upstream to the appropriate downstream port
+with a modified slave address. The address used on the parent bus is
+called the "alias" and is (potentially) different from the physical
+slave address of the child bus. Address translation is done by the
+hardware.
+
+An ATR looks similar to an i2c-mux except:
+ - the address on the parent and child busses can be different
+ - there is normally no need to select the child port; the alias used on the
+ parent bus implies it
+
+The ATR functionality can be provided by a chip with many other features.
+The kernel i2c-atr provides a helper to implement an ATR within a driver.
+
+The ATR creates a new I2C "child" adapter on each child bus. Adding
+devices on the child bus ends up in invoking the driver code to select
+an available alias. Maintaining an appropriate pool of available aliases
+and picking one for each new device is up to the driver implementer. The
+ATR maintains a table of currently assigned alias and uses it to modify
+all I2C transactions directed to devices on the child buses.
+
+A typical example follows.
+
+Topology::
+
+ Slave X @ 0x10
+ .-----. |
+ .-----. | |---+---- B
+ | CPU |--A--| ATR |
+ `-----' | |---+---- C
+ `-----' |
+ Slave Y @ 0x10
+
+Alias table:
+
+A, B and C are three physical I2C busses, electrically independent from
+each other. The ATR receives the transactions initiated on bus A and
+propagates them on bus B or bus C or none depending on the device address
+in the transaction and based on the alias table.
+
+Alias table:
+
+.. table::
+
+ =============== =====
+ Client Alias
+ =============== =====
+ X (bus B, 0x10) 0x20
+ Y (bus C, 0x10) 0x30
+ =============== =====
+
+Transaction:
+
+ - Slave X driver requests a transaction (on adapter B), slave address 0x10
+ - ATR driver finds slave X is on bus B and has alias 0x20, rewrites
+ messages with address 0x20, forwards to adapter A
+ - Physical I2C transaction on bus A, slave address 0x20
+ - ATR chip detects transaction on address 0x20, finds it in table,
+ propagates transaction on bus B with address translated to 0x10,
+ keeps clock streched on bus A waiting for reply
+ - Slave X chip (on bus B) detects transaction at its own physical
+ address 0x10 and replies normally
+ - ATR chip stops clock stretching and forwards reply on bus A,
+ with address translated back to 0x20
+ - ATR driver receives the reply, rewrites messages with address 0x10
+ as they were initially
+ - Slave X driver gets back the msgs[], with reply and address 0x10
+
+Usage:
+
+ 1. In the driver (typically in the probe function) add an ATR by
+ calling i2c_atr_new() passing attach/detach callbacks
+ 2. When the attach callback is called pick an appropriate alias,
+ configure it in the chip and return the chosen alias in the
+ alias_id parameter
+ 3. When the detach callback is called, deconfigure the alias from
+ the chip and put the alias back in the pool for later usage
+
+I2C ATR functions and data structures
+-------------------------------------
+
+.. kernel-doc:: include/linux/i2c-atr.h
diff --git a/Documentation/i2c/index.rst b/Documentation/i2c/index.rst
index 6270f1fd7d4e..2b213d4ce89c 100644
--- a/Documentation/i2c/index.rst
+++ b/Documentation/i2c/index.rst
@@ -18,6 +18,7 @@ Introduction
i2c-topology
muxes/i2c-mux-gpio
i2c-sysfs
+ i2c-address-translators
Writing device drivers
======================
diff --git a/Documentation/kbuild/kconfig.rst b/Documentation/kbuild/kconfig.rst
index 3ee89dfe4697..c946eb44bd13 100644
--- a/Documentation/kbuild/kconfig.rst
+++ b/Documentation/kbuild/kconfig.rst
@@ -56,6 +56,15 @@ KCONFIG_OVERWRITECONFIG
If you set KCONFIG_OVERWRITECONFIG in the environment, Kconfig will not
break symlinks when .config is a symlink to somewhere else.
+KCONFIG_WARN_UNKNOWN_SYMBOLS
+----------------------------
+This environment variable makes Kconfig warn about all unrecognized
+symbols in the config input.
+
+KCONFIG_WERROR
+--------------
+If set, Kconfig treats warnings as errors.
+
`CONFIG_`
---------
If you set `CONFIG_` in the environment, Kconfig will prefix all symbols
@@ -212,6 +221,10 @@ Searching in menuconfig:
first (and in alphabetical order), then come all other symbols,
sorted in alphabetical order.
+ In this menu, pressing the key in the (#) prefix will jump
+ directly to that location. You will be returned to the current
+ search results after exiting this new menu.
+
----------------------------------------------------------------------
User interface options for 'menuconfig'
@@ -264,6 +277,10 @@ Searching in nconfig:
F8 (SymSearch) searches the configuration symbols for the
given string or regular expression (regex).
+ In the SymSearch, pressing the key in the (#) prefix will
+ jump directly to that location. You will be returned to the
+ current search results after exiting this new menu.
+
NCONFIG_MODE
------------
This mode shows all sub-menus in one large tree.
diff --git a/Documentation/kbuild/llvm.rst b/Documentation/kbuild/llvm.rst
index c3851fe1900d..b1d97fafddcf 100644
--- a/Documentation/kbuild/llvm.rst
+++ b/Documentation/kbuild/llvm.rst
@@ -25,50 +25,38 @@ objects <https://www.aosabook.org/en/llvm.html>`_. Clang is a front-end to LLVM
that supports C and the GNU C extensions required by the kernel, and is
pronounced "klang," not "see-lang."
-Clang
------
-
-The compiler used can be swapped out via ``CC=`` command line argument to ``make``.
-``CC=`` should be set when selecting a config and during a build. ::
-
- make CC=clang defconfig
-
- make CC=clang
+Building with LLVM
+------------------
-Cross Compiling
----------------
+Invoke ``make`` via::
-A single Clang compiler binary will typically contain all supported backends,
-which can help simplify cross compiling. ::
-
- make ARCH=arm64 CC=clang CROSS_COMPILE=aarch64-linux-gnu-
+ make LLVM=1
-``CROSS_COMPILE`` is not used to prefix the Clang compiler binary, instead
-``CROSS_COMPILE`` is used to set a command line flag: ``--target=<triple>``. For
-example: ::
+to compile for the host target. For cross compiling::
- clang --target=aarch64-linux-gnu foo.c
+ make LLVM=1 ARCH=arm64
-LLVM Utilities
---------------
+The LLVM= argument
+------------------
-LLVM has substitutes for GNU binutils utilities. They can be enabled individually.
-The full list of supported make variables::
+LLVM has substitutes for GNU binutils utilities. They can be enabled
+individually. The full list of supported make variables::
make CC=clang LD=ld.lld AR=llvm-ar NM=llvm-nm STRIP=llvm-strip \
OBJCOPY=llvm-objcopy OBJDUMP=llvm-objdump READELF=llvm-readelf \
HOSTCC=clang HOSTCXX=clang++ HOSTAR=llvm-ar HOSTLD=ld.lld
-To simplify the above command, Kbuild supports the ``LLVM`` variable::
-
- make LLVM=1
+``LLVM=1`` expands to the above.
If your LLVM tools are not available in your PATH, you can supply their
location using the LLVM variable with a trailing slash::
make LLVM=/path/to/llvm/
-which will use ``/path/to/llvm/clang``, ``/path/to/llvm/ld.lld``, etc.
+which will use ``/path/to/llvm/clang``, ``/path/to/llvm/ld.lld``, etc. The
+following may also be used::
+
+ PATH=/path/to/llvm:$PATH make LLVM=1
If your LLVM tools have a version suffix and you want to test with that
explicit version rather than the unsuffixed executables like ``LLVM=1``, you
@@ -78,31 +66,72 @@ can pass the suffix using the ``LLVM`` variable::
which will use ``clang-14``, ``ld.lld-14``, etc.
+To support combinations of out of tree paths with version suffixes, we
+recommend::
+
+ PATH=/path/to/llvm/:$PATH make LLVM=-14
+
``LLVM=0`` is not the same as omitting ``LLVM`` altogether, it will behave like
-``LLVM=1``. If you only wish to use certain LLVM utilities, use their respective
-make variables.
+``LLVM=1``. If you only wish to use certain LLVM utilities, use their
+respective make variables.
+
+The same value used for ``LLVM=`` should be set for each invocation of ``make``
+if configuring and building via distinct commands. ``LLVM=`` should also be set
+as an environment variable when running scripts that will eventually run
+``make``.
-The integrated assembler is enabled by default. You can pass ``LLVM_IAS=0`` to
-disable it.
+Cross Compiling
+---------------
-Omitting CROSS_COMPILE
+A single Clang compiler binary (and corresponding LLVM utilities) will
+typically contain all supported back ends, which can help simplify cross
+compiling especially when ``LLVM=1`` is used. If you use only LLVM tools,
+``CROSS_COMPILE`` or target-triple-prefixes become unnecessary. Example::
+
+ make LLVM=1 ARCH=arm64
+
+As an example of mixing LLVM and GNU utilities, for a target like ``ARCH=s390``
+which does not yet have ``ld.lld`` or ``llvm-objcopy`` support, you could
+invoke ``make`` via::
+
+ make LLVM=1 ARCH=s390 LD=s390x-linux-gnu-ld.bfd \
+ OBJCOPY=s390x-linux-gnu-objcopy
+
+This example will invoke ``s390x-linux-gnu-ld.bfd`` as the linker and
+``s390x-linux-gnu-objcopy``, so ensure those are reachable in your ``$PATH``.
+
+``CROSS_COMPILE`` is not used to prefix the Clang compiler binary (or
+corresponding LLVM utilities) as is the case for GNU utilities when ``LLVM=1``
+is not set.
+
+The LLVM_IAS= argument
----------------------
-As explained above, ``CROSS_COMPILE`` is used to set ``--target=<triple>``.
+Clang can assemble assembler code. You can pass ``LLVM_IAS=0`` to disable this
+behavior and have Clang invoke the corresponding non-integrated assembler
+instead. Example::
+
+ make LLVM=1 LLVM_IAS=0
+
+``CROSS_COMPILE`` is necessary when cross compiling and ``LLVM_IAS=0``
+is used in order to set ``--prefix=`` for the compiler to find the
+corresponding non-integrated assembler (typically, you don't want to use the
+system assembler when targeting another architecture). Example::
-If ``CROSS_COMPILE`` is not specified, the ``--target=<triple>`` is inferred
-from ``ARCH``.
+ make LLVM=1 ARCH=arm LLVM_IAS=0 CROSS_COMPILE=arm-linux-gnueabi-
-That means if you use only LLVM tools, ``CROSS_COMPILE`` becomes unnecessary.
-For example, to cross-compile the arm64 kernel::
+Ccache
+------
- make ARCH=arm64 LLVM=1
+``ccache`` can be used with ``clang`` to improve subsequent builds, (though
+KBUILD_BUILD_TIMESTAMP_ should be set to a deterministic value between builds
+in order to avoid 100% cache misses, see Reproducible_builds_ for more info):
-If ``LLVM_IAS=0`` is specified, ``CROSS_COMPILE`` is also used to derive
-``--prefix=<path>`` to search for the GNU assembler and linker. ::
+ KBUILD_BUILD_TIMESTAMP='' make LLVM=1 CC="ccache clang"
- make ARCH=arm64 LLVM=1 LLVM_IAS=0 CROSS_COMPILE=aarch64-linux-gnu-
+.. _KBUILD_BUILD_TIMESTAMP: kbuild.html#kbuild-build-timestamp
+.. _Reproducible_builds: reproducible-builds.html#timestamps
Supported Architectures
-----------------------
@@ -135,14 +164,17 @@ yet. Bug reports are always welcome at the issue tracker below!
* - hexagon
- Maintained
- ``LLVM=1``
+ * - loongarch
+ - Maintained
+ - ``LLVM=1``
* - mips
- Maintained
- ``LLVM=1``
* - powerpc
- Maintained
- - ``CC=clang``
+ - ``LLVM=1``
* - riscv
- - Maintained
+ - Supported
- ``LLVM=1``
* - s390
- Maintained
@@ -171,7 +203,11 @@ Getting Help
Getting LLVM
-------------
-We provide prebuilt stable versions of LLVM on `kernel.org <https://kernel.org/pub/tools/llvm/>`_.
+We provide prebuilt stable versions of LLVM on `kernel.org
+<https://kernel.org/pub/tools/llvm/>`_. These have been optimized with profile
+data for building Linux kernels, which should improve kernel build times
+relative to other distributions of LLVM.
+
Below are links that may be useful for building LLVM from source or procuring
it through a distribution's package manager.
diff --git a/Documentation/process/maintainer-netdev.rst b/Documentation/process/maintainer-netdev.rst
index c1c732e9748b..09dcf6377c27 100644
--- a/Documentation/process/maintainer-netdev.rst
+++ b/Documentation/process/maintainer-netdev.rst
@@ -98,7 +98,7 @@ If you aren't subscribed to netdev and/or are simply unsure if
repository link above for any new networking-related commits. You may
also check the following website for the current status:
- https://patchwork.hopto.org/net-next.html
+ https://netdev.bots.linux.dev/net-next.html
The ``net`` tree continues to collect fixes for the vX.Y content, and is
fed back to Linus at regular (~weekly) intervals. Meaning that the
@@ -120,7 +120,37 @@ queue for netdev:
https://patchwork.kernel.org/project/netdevbpf/list/
The "State" field will tell you exactly where things are at with your
-patch. Patches are indexed by the ``Message-ID`` header of the emails
+patch:
+
+================== =============================================================
+Patch state Description
+================== =============================================================
+New, Under review pending review, patch is in the maintainer’s queue for
+ review; the two states are used interchangeably (depending on
+ the exact co-maintainer handling patchwork at the time)
+Accepted patch was applied to the appropriate networking tree, this is
+ usually set automatically by the pw-bot
+Needs ACK waiting for an ack from an area expert or testing
+Changes requested patch has not passed the review, new revision is expected
+ with appropriate code and commit message changes
+Rejected patch has been rejected and new revision is not expected
+Not applicable patch is expected to be applied outside of the networking
+ subsystem
+Awaiting upstream patch should be reviewed and handled by appropriate
+ sub-maintainer, who will send it on to the networking trees;
+ patches set to ``Awaiting upstream`` in netdev's patchwork
+ will usually remain in this state, whether the sub-maintainer
+ requested changes, accepted or rejected the patch
+Deferred patch needs to be reposted later, usually due to dependency
+ or because it was posted for a closed tree
+Superseded new version of the patch was posted, usually set by the
+ pw-bot
+RFC not to be applied, usually not in maintainer’s review queue,
+ pw-bot can automatically set patches to this state based
+ on subject tags
+================== =============================================================
+
+Patches are indexed by the ``Message-ID`` header of the emails
which carried them so if you have trouble finding your patch append
the value of ``Message-ID`` to the URL above.
@@ -155,7 +185,7 @@ must match the MAINTAINERS entry) and a handful of senior reviewers.
Bot records its activity here:
- https://patchwork.hopto.org/pw-bot.html
+ https://netdev.bots.linux.dev/pw-bot.html
Review timelines
~~~~~~~~~~~~~~~~
diff --git a/Documentation/process/stable-kernel-rules.rst b/Documentation/process/stable-kernel-rules.rst
index 51df1197d5ab..41f1e07abfdf 100644
--- a/Documentation/process/stable-kernel-rules.rst
+++ b/Documentation/process/stable-kernel-rules.rst
@@ -6,30 +6,29 @@ Everything you ever wanted to know about Linux -stable releases
Rules on what kind of patches are accepted, and which ones are not, into the
"-stable" tree:
+ - It or an equivalent fix must already exist in Linus' tree (upstream).
- It must be obviously correct and tested.
- It cannot be bigger than 100 lines, with context.
- - It must fix only one thing.
- - It must fix a real bug that bothers people (not a, "This could be a
- problem..." type thing).
- - It must fix a problem that causes a build error (but not for things
- marked CONFIG_BROKEN), an oops, a hang, data corruption, a real
- security issue, or some "oh, that's not good" issue. In short, something
- critical.
- - Serious issues as reported by a user of a distribution kernel may also
- be considered if they fix a notable performance or interactivity issue.
- As these fixes are not as obvious and have a higher risk of a subtle
- regression they should only be submitted by a distribution kernel
- maintainer and include an addendum linking to a bugzilla entry if it
- exists and additional information on the user-visible impact.
- - New device IDs and quirks are also accepted.
- - No "theoretical race condition" issues, unless an explanation of how the
- race can be exploited is also provided.
- - It cannot contain any "trivial" fixes in it (spelling changes,
- whitespace cleanups, etc).
- It must follow the
:ref:`Documentation/process/submitting-patches.rst <submittingpatches>`
rules.
- - It or an equivalent fix must already exist in Linus' tree (upstream).
+ - It must either fix a real bug that bothers people or just add a device ID.
+ To elaborate on the former:
+
+ - It fixes a problem like an oops, a hang, data corruption, a real security
+ issue, a hardware quirk, a build error (but not for things marked
+ CONFIG_BROKEN), or some "oh, that's not good" issue.
+ - Serious issues as reported by a user of a distribution kernel may also
+ be considered if they fix a notable performance or interactivity issue.
+ As these fixes are not as obvious and have a higher risk of a subtle
+ regression they should only be submitted by a distribution kernel
+ maintainer and include an addendum linking to a bugzilla entry if it
+ exists and additional information on the user-visible impact.
+ - No "This could be a problem..." type of things like a "theoretical race
+ condition", unless an explanation of how the bug can be exploited is also
+ provided.
+ - No "trivial" fixes without benefit for users (spelling changes, whitespace
+ cleanups, etc).
Procedure for submitting patches to the -stable tree
@@ -41,111 +40,142 @@ Procedure for submitting patches to the -stable tree
process but should follow the procedures in
:ref:`Documentation/process/security-bugs.rst <securitybugs>`.
-For all other submissions, choose one of the following procedures
------------------------------------------------------------------
+There are three options to submit a change to -stable trees:
+
+ 1. Add a 'stable tag' to the description of a patch you then submit for
+ mainline inclusion.
+ 2. Ask the stable team to pick up a patch already mainlined.
+ 3. Submit a patch to the stable team that is equivalent to a change already
+ mainlined.
+
+The sections below describe each of the options in more detail.
+
+:ref:`option_1` is **strongly** preferred, it is the easiest and most common.
+:ref:`option_2` is mainly meant for changes where backporting was not considered
+at the time of submission. :ref:`option_3` is an alternative to the two earlier
+options for cases where a mainlined patch needs adjustments to apply in older
+series (for example due to API changes).
+
+When using option 2 or 3 you can ask for your change to be included in specific
+stable series. When doing so, ensure the fix or an equivalent is applicable,
+submitted, or already present in all newer stable trees still supported. This is
+meant to prevent regressions that users might later encounter on updating, if
+e.g. a fix merged for 5.19-rc1 would be backported to 5.10.y, but not to 5.15.y.
.. _option_1:
Option 1
********
-To have the patch automatically included in the stable tree, add the tag
+To have a patch you submit for mainline inclusion later automatically picked up
+for stable trees, add the tag
.. code-block:: none
Cc: stable@vger.kernel.org
-in the sign-off area. Once the patch is merged it will be applied to
-the stable tree without anything else needing to be done by the author
-or subsystem maintainer.
+in the sign-off area. Once the patch is mainlined it will be applied to the
+stable tree without anything else needing to be done by the author or
+subsystem maintainer.
-.. _option_2:
+To sent additional instructions to the stable team, use a shell-style inline
+comment:
-Option 2
-********
+ * To specify any additional patch prerequisites for cherry picking use the
+ following format in the sign-off area:
-After the patch has been merged to Linus' tree, send an email to
-stable@vger.kernel.org containing the subject of the patch, the commit ID,
-why you think it should be applied, and what kernel version you wish it to
-be applied to.
+ .. code-block:: none
-.. _option_3:
+ Cc: <stable@vger.kernel.org> # 3.3.x: a1f84a3: sched: Check for idle
+ Cc: <stable@vger.kernel.org> # 3.3.x: 1b9508f: sched: Rate-limit newidle
+ Cc: <stable@vger.kernel.org> # 3.3.x: fd21073: sched: Fix affinity logic
+ Cc: <stable@vger.kernel.org> # 3.3.x
+ Signed-off-by: Ingo Molnar <mingo@elte.hu>
-Option 3
-********
+ The tag sequence has the meaning of:
-Send the patch, after verifying that it follows the above rules, to
-stable@vger.kernel.org. You must note the upstream commit ID in the
-changelog of your submission, as well as the kernel version you wish
-it to be applied to.
+ .. code-block:: none
-:ref:`option_1` is **strongly** preferred, is the easiest and most common.
-:ref:`option_2` and :ref:`option_3` are more useful if the patch isn't deemed
-worthy at the time it is applied to a public git tree (for instance, because
-it deserves more regression testing first). :ref:`option_3` is especially
-useful if the original upstream patch needs to be backported (for example
-the backport needs some special handling due to e.g. API changes).
+ git cherry-pick a1f84a3
+ git cherry-pick 1b9508f
+ git cherry-pick fd21073
+ git cherry-pick <this commit>
-Note that for :ref:`option_3`, if the patch deviates from the original
-upstream patch (for example because it had to be backported) this must be very
-clearly documented and justified in the patch description.
+ * For patches that may have kernel version prerequisites specify them using
+ the following format in the sign-off area:
-The upstream commit ID must be specified with a separate line above the commit
-text, like this:
+ .. code-block:: none
-.. code-block:: none
+ Cc: <stable@vger.kernel.org> # 3.3.x
- commit <sha1> upstream.
+ The tag has the meaning of:
-or alternatively:
+ .. code-block:: none
-.. code-block:: none
+ git cherry-pick <this commit>
- [ Upstream commit <sha1> ]
+ For each "-stable" tree starting with the specified version.
-Additionally, some patches submitted via :ref:`option_1` may have additional
-patch prerequisites which can be cherry-picked. This can be specified in the
-following format in the sign-off area:
+ Note, such tagging is unnecessary if the stable team can derive the
+ appropriate versions from Fixes: tags.
-.. code-block:: none
+ * To delay pick up of patches, use the following format:
- Cc: <stable@vger.kernel.org> # 3.3.x: a1f84a3: sched: Check for idle
- Cc: <stable@vger.kernel.org> # 3.3.x: 1b9508f: sched: Rate-limit newidle
- Cc: <stable@vger.kernel.org> # 3.3.x: fd21073: sched: Fix affinity logic
- Cc: <stable@vger.kernel.org> # 3.3.x
- Signed-off-by: Ingo Molnar <mingo@elte.hu>
+ .. code-block:: none
-The tag sequence has the meaning of:
+ Cc: <stable@vger.kernel.org> # after 4 weeks in mainline
-.. code-block:: none
+ * For any other requests, just add a note to the stable tag. This for example
+ can be used to point out known problems:
- git cherry-pick a1f84a3
- git cherry-pick 1b9508f
- git cherry-pick fd21073
- git cherry-pick <this commit>
+ .. code-block:: none
+
+ Cc: <stable@vger.kernel.org> # see patch description, needs adjustments for <= 6.3
+
+.. _option_2:
+
+Option 2
+********
+
+If the patch already has been merged to mainline, send an email to
+stable@vger.kernel.org containing the subject of the patch, the commit ID,
+why you think it should be applied, and what kernel versions you wish it to
+be applied to.
+
+.. _option_3:
-Also, some patches may have kernel version prerequisites. This can be
-specified in the following format in the sign-off area:
+Option 3
+********
+
+Send the patch, after verifying that it follows the above rules, to
+stable@vger.kernel.org and mention the kernel versions you wish it to be applied
+to. When doing so, you must note the upstream commit ID in the changelog of your
+submission with a separate line above the commit text, like this:
.. code-block:: none
- Cc: <stable@vger.kernel.org> # 3.3.x
+ commit <sha1> upstream.
-The tag has the meaning of:
+or alternatively:
.. code-block:: none
- git cherry-pick <this commit>
+ [ Upstream commit <sha1> ]
+
+If the submitted patch deviates from the original upstream patch (for example
+because it had to be adjusted for the older API), this must be very clearly
+documented and justified in the patch description.
-For each "-stable" tree starting with the specified version.
-Following the submission:
+Following the submission
+------------------------
- - The sender will receive an ACK when the patch has been accepted into the
- queue, or a NAK if the patch is rejected. This response might take a few
- days, according to the developer's schedules.
- - If accepted, the patch will be added to the -stable queue, for review by
- other developers and by the relevant subsystem maintainer.
+The sender will receive an ACK when the patch has been accepted into the
+queue, or a NAK if the patch is rejected. This response might take a few
+days, according to the schedules of the stable team members.
+
+If accepted, the patch will be added to the -stable queue, for review by other
+developers and by the relevant subsystem maintainer.
Review cycle
@@ -174,6 +204,7 @@ Review cycle
security kernel team, and not go through the normal review cycle.
Contact the kernel security team for more details on this procedure.
+
Trees
-----
diff --git a/Documentation/riscv/vm-layout.rst b/Documentation/riscv/vm-layout.rst
index 5462c84f4723..69ff6da1dbf8 100644
--- a/Documentation/riscv/vm-layout.rst
+++ b/Documentation/riscv/vm-layout.rst
@@ -133,3 +133,25 @@ RISC-V Linux Kernel SV57
ffffffff00000000 | -4 GB | ffffffff7fffffff | 2 GB | modules, BPF
ffffffff80000000 | -2 GB | ffffffffffffffff | 2 GB | kernel
__________________|____________|__________________|_________|____________________________________________________________
+
+
+Userspace VAs
+--------------------
+To maintain compatibility with software that relies on the VA space with a
+maximum of 48 bits the kernel will, by default, return virtual addresses to
+userspace from a 48-bit range (sv48). This default behavior is achieved by
+passing 0 into the hint address parameter of mmap. On CPUs with an address space
+smaller than sv48, the CPU maximum supported address space will be the default.
+
+Software can "opt-in" to receiving VAs from another VA space by providing
+a hint address to mmap. A hint address passed to mmap will cause the largest
+address space that fits entirely into the hint to be used, unless there is no
+space left in the address space. If there is no space available in the requested
+address space, an address in the next smallest available address space will be
+returned.
+
+For example, in order to obtain 48-bit VA space, a hint address greater than
+:code:`1 << 47` must be provided. Note that this is 47 due to sv48 userspace
+ending at :code:`1 << 47` and the addresses beyond this are reserved for the
+kernel. Similarly, to obtain 57-bit VA space addresses, a hint address greater
+than or equal to :code:`1 << 56` must be provided.
diff --git a/Documentation/scsi/scsi_mid_low_api.rst b/Documentation/scsi/scsi_mid_low_api.rst
index 6fa3a6279501..022198c51350 100644
--- a/Documentation/scsi/scsi_mid_low_api.rst
+++ b/Documentation/scsi/scsi_mid_low_api.rst
@@ -1190,11 +1190,11 @@ Members of interest:
- pointer to scsi_device object that this command is
associated with.
resid
- - an LLD should set this signed integer to the requested
+ - an LLD should set this unsigned integer to the requested
transfer length (i.e. 'request_bufflen') less the number
of bytes that are actually transferred. 'resid' is
preset to 0 so an LLD can ignore it if it cannot detect
- underruns (overruns should be rare). If possible an LLD
+ underruns (overruns should not be reported). An LLD
should set 'resid' prior to invoking 'done'. The most
interesting case is data transfers from a SCSI target
device (e.g. READs) that underrun.
diff --git a/Documentation/trace/events.rst b/Documentation/trace/events.rst
index 15f78e772384..759907c20e75 100644
--- a/Documentation/trace/events.rst
+++ b/Documentation/trace/events.rst
@@ -219,6 +219,20 @@ the function "security_prepare_creds" and less than the end of that function.
The ".function" postfix can only be attached to values of size long, and can only
be compared with "==" or "!=".
+Cpumask fields or scalar fields that encode a CPU number can be filtered using
+a user-provided cpumask in cpulist format. The format is as follows::
+
+ CPUS{$cpulist}
+
+Operators available to cpumask filtering are:
+
+& (intersection), ==, !=
+
+For example, this will filter events that have their .target_cpu field present
+in the given cpumask::
+
+ target_cpu & CPUS{17-42}
+
5.2 Setting filters
-------------------
diff --git a/Documentation/trace/fprobetrace.rst b/Documentation/trace/fprobetrace.rst
index 7297f9478459..8e9bebcf0a2e 100644
--- a/Documentation/trace/fprobetrace.rst
+++ b/Documentation/trace/fprobetrace.rst
@@ -79,9 +79,9 @@ automatically set by the given name. ::
f:fprobes/myprobe vfs_read count=count pos=pos
It also chooses the fetch type from BTF information. For example, in the above
-example, the ``count`` is unsigned long, and the ``pos`` is a pointer. Thus, both
-are converted to 64bit unsigned long, but only ``pos`` has "%Lx" print-format as
-below ::
+example, the ``count`` is unsigned long, and the ``pos`` is a pointer. Thus,
+both are converted to 64bit unsigned long, but only ``pos`` has "%Lx"
+print-format as below ::
# cat events/fprobes/myprobe/format
name: myprobe
@@ -105,9 +105,47 @@ is expanded to all function arguments of the function or the tracepoint. ::
# cat dynamic_events
f:fprobes/myprobe vfs_read file=file buf=buf count=count pos=pos
-BTF also affects the ``$retval``. If user doesn't set any type, the retval type is
-automatically picked from the BTF. If the function returns ``void``, ``$retval``
-is rejected.
+BTF also affects the ``$retval``. If user doesn't set any type, the retval
+type is automatically picked from the BTF. If the function returns ``void``,
+``$retval`` is rejected.
+
+You can access the data fields of a data structure using allow operator ``->``
+(for pointer type) and dot operator ``.`` (for data structure type.)::
+
+# echo 't sched_switch preempt prev_pid=prev->pid next_pid=next->pid' >> dynamic_events
+
+The field access operators, ``->`` and ``.`` can be combined for accessing deeper
+members and other structure members pointed by the member. e.g. ``foo->bar.baz->qux``
+If there is non-name union member, you can directly access it as the C code does.
+For example::
+
+ struct {
+ union {
+ int a;
+ int b;
+ };
+ } *foo;
+
+To access ``a`` and ``b``, use ``foo->a`` and ``foo->b`` in this case.
+
+This data field access is available for the return value via ``$retval``,
+e.g. ``$retval->name``.
+
+For these BTF arguments and fields, ``:string`` and ``:ustring`` change the
+behavior. If these are used for BTF argument or field, it checks whether
+the BTF type of the argument or the data field is ``char *`` or ``char []``,
+or not. If not, it rejects applying the string types. Also, with the BTF
+support, you don't need a memory dereference operator (``+0(PTR)``) for
+accessing the string pointed by a ``PTR``. It automatically adds the memory
+dereference operator according to the BTF type. e.g. ::
+
+# echo 't sched_switch prev->comm:string' >> dynamic_events
+# echo 'f getname_flags%return $retval->name:string' >> dynamic_events
+
+The ``prev->comm`` is an embedded char array in the data structure, and
+``$retval->name`` is a char pointer in the data structure. But in both
+cases, you can use ``:string`` type to get the string.
+
Usage examples
--------------
@@ -161,10 +199,10 @@ parameters. This means you can access any field values in the task
structure pointed by the ``prev`` and ``next`` arguments.
For example, usually ``task_struct::start_time`` is not traced, but with this
-traceprobe event, you can trace it as below.
+traceprobe event, you can trace that field as below.
::
- # echo 't sched_switch comm=+1896(next):string start_time=+1728(next):u64' > dynamic_events
+ # echo 't sched_switch comm=next->comm:string next->start_time' > dynamic_events
# head -n 20 trace | tail
# TASK-PID CPU# ||||| TIMESTAMP FUNCTION
# | | | ||||| | |
@@ -176,13 +214,3 @@ traceprobe event, you can trace it as below.
<idle>-0 [000] d..3. 5606.690317: sched_switch: (__probestub_sched_switch+0x4/0x10) comm="kworker/0:1" usage=1 start_time=137000000
kworker/0:1-14 [000] d..3. 5606.690339: sched_switch: (__probestub_sched_switch+0x4/0x10) comm="swapper/0" usage=2 start_time=0
<idle>-0 [000] d..3. 5606.692368: sched_switch: (__probestub_sched_switch+0x4/0x10) comm="kworker/0:1" usage=1 start_time=137000000
-
-Currently, to find the offset of a specific field in the data structure,
-you need to build kernel with debuginfo and run `perf probe` command with
-`-D` option. e.g.
-::
-
- # perf probe -D "__probestub_sched_switch next->comm:string next->start_time"
- p:probe/__probestub_sched_switch __probestub_sched_switch+0 comm=+1896(%cx):string start_time=+1728(%cx):u64
-
-And replace the ``%cx`` with the ``next``.
diff --git a/Documentation/translations/zh_CN/dev-tools/kasan.rst b/Documentation/translations/zh_CN/dev-tools/kasan.rst
index 05ef904dbcfb..8fdb20c9665b 100644
--- a/Documentation/translations/zh_CN/dev-tools/kasan.rst
+++ b/Documentation/translations/zh_CN/dev-tools/kasan.rst
@@ -42,7 +42,7 @@ KASAN有三种模式:
体系架构
~~~~~~~~
-在x86_64、arm、arm64、powerpc、riscv、s390和xtensa上支持通用KASAN,
+在x86_64、arm、arm64、powerpc、riscv、s390、xtensa和loongarch上支持通用KASAN,
而基于标签的KASAN模式只在arm64上支持。
编译器
diff --git a/Documentation/usb/authorization.rst b/Documentation/usb/authorization.rst
index 9e53909d04c2..150a14970e95 100644
--- a/Documentation/usb/authorization.rst
+++ b/Documentation/usb/authorization.rst
@@ -33,12 +33,9 @@ Remove the lock down::
$ echo 1 > /sys/bus/usb/devices/usbX/authorized_default
-By default, Wired USB devices are authorized by default to
-connect. Wireless USB hosts deauthorize by default all new connected
-devices (this is so because we need to do an authentication phase
-before authorizing). Writing "2" to the authorized_default attribute
-causes kernel to only authorize by default devices connected to internal
-USB ports.
+By default, all USB devices are authorized. Writing "2" to the
+authorized_default attribute causes the kernel to authorize by default
+only devices connected to internal USB ports.
Example system lockdown (lame)
diff --git a/Documentation/usb/gadget-testing.rst b/Documentation/usb/gadget-testing.rst
index 2fca40443dc9..394cd226bfae 100644
--- a/Documentation/usb/gadget-testing.rst
+++ b/Documentation/usb/gadget-testing.rst
@@ -27,6 +27,7 @@ provided by gadgets.
18. UVC function
19. PRINTER function
20. UAC1 function (new API)
+ 21. MIDI2 function
1. ACM function
@@ -965,3 +966,156 @@ e.g.::
$ arecord -f dat -t wav -D hw:CARD=UAC1Gadget,DEV=0 | \
aplay -D default:CARD=OdroidU3
+
+
+21. MIDI2 function
+==================
+
+The function is provided by usb_f_midi2.ko module.
+It will create a virtual ALSA card containing a UMP rawmidi device
+where the UMP packet is looped back. In addition, a legacy rawmidi
+device is created. The UMP rawmidi is bound with ALSA sequencer
+clients, too.
+
+Function-specific configfs interface
+------------------------------------
+
+The function name to use when creating the function directory is "midi2".
+The midi2 function provides these attributes in its function directory
+as the card top-level information:
+
+ ============= =================================================
+ process_ump Bool flag to process UMP Stream messages (0 or 1)
+ static_block Bool flag for static blocks (0 or 1)
+ iface_name Optional interface name string
+ ============= =================================================
+
+The directory contains a subdirectory "ep.0", and this provides the
+attributes for a UMP Endpoint (which is a pair of USB MIDI Endpoints):
+
+ ============= =================================================
+ protocol_caps MIDI protocol capabilities;
+ 1: MIDI 1.0, 2: MIDI 2.0, or 3: both protocols
+ protocol Default MIDI protocol (either 1 or 2)
+ ep_name UMP Endpoint name string
+ product_id Product ID string
+ manufacturer Manufacture ID number (24 bit)
+ family Device family ID number (16 bit)
+ model Device model ID number (16 bit)
+ sw_revision Software revision (32 bit)
+ ============= =================================================
+
+Each Endpoint subdirectory contains a subdirectory "block.0", which
+represents the Function Block for Block 0 information.
+Its attributes are:
+
+ ================= ===============================================
+ name Function Block name string
+ direction Direction of this FB
+ 1: input, 2: output, or 3: bidirectional
+ first_group The first UMP Group number (0-15)
+ num_groups The number of groups in this FB (1-16)
+ midi1_first_group The first UMP Group number for MIDI 1.0 (0-15)
+ midi1_num_groups The number of groups for MIDI 1.0 (0-16)
+ ui_hint UI-hint of this FB
+ 0: unknown, 1: receiver, 2: sender, 3: both
+ midi_ci_verison Supported MIDI-CI version number (8 bit)
+ is_midi1 Legacy MIDI 1.0 device (0-2)
+ 0: MIDI 2.0 device,
+ 1: MIDI 1.0 without restriction, or
+ 2: MIDI 1.0 with low speed
+ sysex8_streams Max number of SysEx8 streams (8 bit)
+ active Bool flag for FB activity (0 or 1)
+ ================= ===============================================
+
+If multiple Function Blocks are required, you can add more Function
+Blocks by creating subdirectories "block.<num>" with the corresponding
+Function Block number (1, 2, ....). The FB subdirectories can be
+dynamically removed, too. Note that the Function Block numbers must be
+continuous.
+
+Similarly, if you multiple UMP Endpoints are required, you can add
+more Endpoints by creating subdirectories "ep.<num>". The number must
+be continuous.
+
+For emulating the old MIDI 2.0 device without UMP v1.1 support, pass 0
+to `process_ump` flag. Then the whole UMP v1.1 requests are ignored.
+
+Testing the MIDI2 function
+--------------------------
+
+On the device: run the gadget, and running::
+
+ $ cat /proc/asound/cards
+
+will show a new sound card containing a MIDI2 device.
+
+OTOH, on the host::
+
+ $ cat /proc/asound/cards
+
+will show a new sound card containing either MIDI1 or MIDI2 device,
+depending on the USB audio driver configuration.
+
+On both, when ALSA sequencer is enabled on the host, you can find the
+UMP MIDI client such as "MIDI 2.0 Gadget".
+
+As the driver simply loops back the data, there is no need for a real
+device just for testing.
+
+For testing a MIDI input from the gadget to the host (e.g. emulating a
+MIDI keyboard), you can send a MIDI stream like the following.
+
+On the gadget::
+
+ $ aconnect -o
+ ....
+ client 20: 'MIDI 2.0 Gadget' [type=kernel,card=1]
+ 0 'MIDI 2.0 '
+ 1 'Group 1 (MIDI 2.0 Gadget I/O)'
+ $ aplaymidi -p 20:1 to_host.mid
+
+On the host::
+
+ $ aconnect -i
+ ....
+ client 24: 'MIDI 2.0 Gadget' [type=kernel,card=2]
+ 0 'MIDI 2.0 '
+ 1 'Group 1 (MIDI 2.0 Gadget I/O)'
+ $ arecordmidi -p 24:1 from_gadget.mid
+
+If you have a UMP-capable application, you can use the UMP port to
+send/receive the raw UMP packets, too. For example, aseqdump program
+with UMP support can receive from UMP port. On the host::
+
+ $ aseqdump -u 2 -p 24:1
+ Waiting for data. Press Ctrl+C to end.
+ Source Group Event Ch Data
+ 24:1 Group 0, Program change 0, program 0, Bank select 0:0
+ 24:1 Group 0, Channel pressure 0, value 0x80000000
+
+For testing a MIDI output to the gadget to the host (e.g. emulating a
+MIDI synth), it'll be just other way round.
+
+On the gadget::
+
+ $ arecordmidi -p 20:1 from_host.mid
+
+On the host::
+
+ $ aplaymidi -p 24:1 to_gadget.mid
+
+The access to MIDI 1.0 on altset 0 on the host is supported, and it's
+translated from/to UMP packets on the gadget. It's bound to only
+Function Block 0.
+
+The current operation mode can be observed in ALSA control element
+"Operation Mode" for SND_CTL_IFACE_RAWMIDI. For example::
+
+ $ amixer -c1 contents
+ numid=1,iface=RAWMIDI,name='Operation Mode'
+ ; type=INTEGER,access=r--v----,values=1,min=0,max=2,step=0
+ : values=2
+
+where 0 = unused, 1 = MIDI 1.0 (altset 0), 2 = MIDI 2.0 (altset 1).
+The example above shows it's running in 2, i.e. MIDI 2.0.
diff --git a/Documentation/userspace-api/media/v4l/dev-decoder.rst b/Documentation/userspace-api/media/v4l/dev-decoder.rst
index 675bc2c3c6b8..ef8e8cf31f90 100644
--- a/Documentation/userspace-api/media/v4l/dev-decoder.rst
+++ b/Documentation/userspace-api/media/v4l/dev-decoder.rst
@@ -277,7 +277,7 @@ Initialization
other fields
follow standard semantics.
- * **Return fields:**
+ * **Returned fields:**
``sizeimage``
adjusted size of ``OUTPUT`` buffers.
@@ -311,7 +311,7 @@ Initialization
``memory``
follows standard semantics.
- * **Return fields:**
+ * **Returned fields:**
``count``
the actual number of buffers allocated.
@@ -339,7 +339,7 @@ Initialization
``format``
follows standard semantics.
- * **Return fields:**
+ * **Returned fields:**
``count``
adjusted to the number of allocated buffers.
@@ -410,7 +410,7 @@ Capture Setup
``type``
a ``V4L2_BUF_TYPE_*`` enum appropriate for ``CAPTURE``.
- * **Return fields:**
+ * **Returned fields:**
``width``, ``height``
frame buffer resolution for the decoded frames.
@@ -443,7 +443,7 @@ Capture Setup
``target``
set to ``V4L2_SEL_TGT_COMPOSE``.
- * **Return fields:**
+ * **Returned fields:**
``r.left``, ``r.top``, ``r.width``, ``r.height``
the visible rectangle; it must fit within the frame buffer resolution
@@ -552,7 +552,7 @@ Capture Setup
frame is written; defaults to ``V4L2_SEL_TGT_COMPOSE_DEFAULT``;
read-only on hardware without additional compose/scaling capabilities.
- * **Return fields:**
+ * **Returned fields:**
``r.left``, ``r.top``, ``r.width``, ``r.height``
the visible rectangle; it must fit within the frame buffer resolution
@@ -629,7 +629,7 @@ Capture Setup
``memory``
follows standard semantics.
- * **Return fields:**
+ * **Returned fields:**
``count``
actual number of buffers allocated.
@@ -668,7 +668,7 @@ Capture Setup
a format representing the maximum framebuffer resolution to be
accommodated by newly allocated buffers.
- * **Return fields:**
+ * **Returned fields:**
``count``
adjusted to the number of allocated buffers.
diff --git a/Documentation/userspace-api/media/v4l/dev-encoder.rst b/Documentation/userspace-api/media/v4l/dev-encoder.rst
index aa338b9624b0..6c523c69bdce 100644
--- a/Documentation/userspace-api/media/v4l/dev-encoder.rst
+++ b/Documentation/userspace-api/media/v4l/dev-encoder.rst
@@ -115,8 +115,8 @@ Querying Capabilities
4. The client may use :c:func:`VIDIOC_ENUM_FRAMEINTERVALS` to detect supported
frame intervals for a given format and resolution, passing the desired pixel
- format in :c:type:`v4l2_frmsizeenum` ``pixel_format`` and the resolution
- in :c:type:`v4l2_frmsizeenum` ``width`` and :c:type:`v4l2_frmsizeenum`
+ format in :c:type:`v4l2_frmivalenum` ``pixel_format`` and the resolution
+ in :c:type:`v4l2_frmivalenum` ``width`` and :c:type:`v4l2_frmivalenum`
``height``.
* Values returned by :c:func:`VIDIOC_ENUM_FRAMEINTERVALS` for a coded pixel
@@ -163,7 +163,7 @@ Initialization
other fields
follow standard semantics.
- * **Return fields:**
+ * **Returned fields:**
``sizeimage``
adjusted size of ``CAPTURE`` buffers.
@@ -189,7 +189,7 @@ Initialization
other fields
follow standard semantics.
- * **Return fields:**
+ * **Returned fields:**
``pixelformat``
raw format supported for the coded format currently selected on
@@ -215,7 +215,7 @@ Initialization
other fields
follow standard semantics.
- * **Return fields:**
+ * **Returned fields:**
``width``, ``height``
may be adjusted to match encoder minimums, maximums and alignment
@@ -233,7 +233,7 @@ Initialization
:c:func:`VIDIOC_S_PARM`. This also sets the coded frame interval on the
``CAPTURE`` queue to the same value.
- * ** Required fields:**
+ * **Required fields:**
``type``
a ``V4L2_BUF_TYPE_*`` enum appropriate for ``OUTPUT``.
@@ -245,7 +245,7 @@ Initialization
the desired frame interval; the encoder may adjust it to
match hardware requirements.
- * **Return fields:**
+ * **Returned fields:**
``parm.output.timeperframe``
the adjusted frame interval.
@@ -284,7 +284,7 @@ Initialization
the case for off-line encoding. Support for this feature is signalled
by the :ref:`V4L2_FMT_FLAG_ENC_CAP_FRAME_INTERVAL <fmtdesc-flags>` format flag.
- * ** Required fields:**
+ * **Required fields:**
``type``
a ``V4L2_BUF_TYPE_*`` enum appropriate for ``CAPTURE``.
@@ -296,7 +296,7 @@ Initialization
the desired coded frame interval; the encoder may adjust it to
match hardware requirements.
- * **Return fields:**
+ * **Returned fields:**
``parm.capture.timeperframe``
the adjusted frame interval.
@@ -339,7 +339,7 @@ Initialization
rectangle and may be subject to adjustment to match codec and
hardware constraints.
- * **Return fields:**
+ * **Returned fields:**
``r.left``, ``r.top``, ``r.width``, ``r.height``
visible rectangle adjusted by the encoder.
@@ -387,7 +387,7 @@ Initialization
other fields
follow standard semantics.
- * **Return fields:**
+ * **Returned fields:**
``count``
actual number of buffers allocated.
@@ -420,7 +420,7 @@ Initialization
other fields
follow standard semantics.
- * **Return fields:**
+ * **Returned fields:**
``count``
adjusted to the number of allocated buffers.
diff --git a/Documentation/userspace-api/media/v4l/dev-stateless-decoder.rst b/Documentation/userspace-api/media/v4l/dev-stateless-decoder.rst
index 4a26646eeec5..35ed05f2695e 100644
--- a/Documentation/userspace-api/media/v4l/dev-stateless-decoder.rst
+++ b/Documentation/userspace-api/media/v4l/dev-stateless-decoder.rst
@@ -180,7 +180,7 @@ Initialization
``memory``
follows standard semantics.
- * **Return fields:**
+ * **Returned fields:**
``count``
actual number of buffers allocated.
@@ -208,7 +208,7 @@ Initialization
follows standard semantics. ``V4L2_MEMORY_USERPTR`` is not supported
for ``CAPTURE`` buffers.
- * **Return fields:**
+ * **Returned fields:**
``count``
adjusted to allocated number of buffers, in case the codec requires
diff --git a/Documentation/userspace-api/media/v4l/pixfmt-reserved.rst b/Documentation/userspace-api/media/v4l/pixfmt-reserved.rst
index 58f6ae25b2e7..296ad2025e8d 100644
--- a/Documentation/userspace-api/media/v4l/pixfmt-reserved.rst
+++ b/Documentation/userspace-api/media/v4l/pixfmt-reserved.rst
@@ -275,6 +275,19 @@ please make a proposal on the linux-media mailing list.
Decoder's implementation can be found here,
`aspeed_codec <https://github.com/AspeedTech-BMC/aspeed_codec/>`__
+ * .. _V4L2-PIX-FMT-MT2110T:
+
+ - ``V4L2_PIX_FMT_MT2110T``
+ - 'MT2110T'
+ - This format is two-planar 10-Bit tile mode and having similitude with
+ ``V4L2_PIX_FMT_MM21`` in term of alignment and tiling. Used for VP9, AV1
+ and HEVC.
+ * .. _V4L2-PIX-FMT-MT2110R:
+
+ - ``V4L2_PIX_FMT_MT2110R``
+ - 'MT2110R'
+ - This format is two-planar 10-Bit raster mode and having similitude with
+ ``V4L2_PIX_FMT_MM21`` in term of alignment and tiling. Used for AVC.
.. raw:: latex
\normalsize
diff --git a/Documentation/userspace-api/media/v4l/vidioc-subdev-g-routing.rst b/Documentation/userspace-api/media/v4l/vidioc-subdev-g-routing.rst
index 2d6e3bbdd040..72677a280cd6 100644
--- a/Documentation/userspace-api/media/v4l/vidioc-subdev-g-routing.rst
+++ b/Documentation/userspace-api/media/v4l/vidioc-subdev-g-routing.rst
@@ -58,6 +58,9 @@ the subdevice exposes, drivers return the ENOSPC error code and adjust the
value of the ``num_routes`` field. Application should then reserve enough memory
for all the route entries and call ``VIDIOC_SUBDEV_G_ROUTING`` again.
+On a successful ``VIDIOC_SUBDEV_G_ROUTING`` call the driver updates the
+``num_routes`` field to reflect the actual number of routes returned.
+
.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
.. c:type:: v4l2_subdev_routing
@@ -138,9 +141,7 @@ ENOSPC
EINVAL
The sink or source pad identifiers reference a non-existing pad, or reference
- pads of different types (ie. the sink_pad identifiers refers to a source pad)
- or the sink or source stream identifiers reference a non-existing stream on
- the sink or source pad.
+ pads of different types (ie. the sink_pad identifiers refers to a source pad).
E2BIG
The application provided ``num_routes`` for ``VIDIOC_SUBDEV_S_ROUTING`` is
diff --git a/Documentation/userspace-api/netlink/intro.rst b/Documentation/userspace-api/netlink/intro.rst
index af94e71761ec..7b1d401210ef 100644
--- a/Documentation/userspace-api/netlink/intro.rst
+++ b/Documentation/userspace-api/netlink/intro.rst
@@ -528,6 +528,8 @@ families may, however, require a larger buffer. 32kB buffer is recommended
for most efficient handling of dumps (larger buffer fits more dumped
objects and therefore fewer recvmsg() calls are needed).
+.. _classic_netlink:
+
Classic Netlink
===============
diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index 73db30cb60fb..21a7578142a1 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -2259,6 +2259,8 @@ Errors:
EINVAL invalid register ID, or no such register or used with VMs in
protected virtualization mode on s390
EPERM (arm64) register access not allowed before vcpu finalization
+ EBUSY (riscv) changing register value not allowed after the vcpu
+ has run at least once
====== ============================================================
(These error codes are indicative only: do not rely on a specific error
@@ -3499,7 +3501,7 @@ VCPU matching underlying host.
---------------------
:Capability: basic
-:Architectures: arm64, mips
+:Architectures: arm64, mips, riscv
:Type: vcpu ioctl
:Parameters: struct kvm_reg_list (in/out)
:Returns: 0 on success; -1 on error
diff --git a/MAINTAINERS b/MAINTAINERS
index 4e3419c04f27..a62f5a2a1c9e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1018,7 +1018,7 @@ AMD PMC DRIVER
M: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
L: platform-driver-x86@vger.kernel.org
S: Maintained
-F: drivers/platform/x86/amd/pmc.c
+F: drivers/platform/x86/amd/pmc/
AMD PMF DRIVER
M: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
@@ -1086,7 +1086,6 @@ F: include/soc/amlogic/
AMPHION VPU CODEC V4L2 DRIVER
M: Ming Qian <ming.qian@nxp.com>
-M: Shijie Qin <shijie.qin@nxp.com>
M: Zhou Peng <eagle.zhou@nxp.com>
L: linux-media@vger.kernel.org
S: Maintained
@@ -4469,6 +4468,7 @@ M: Maxime Ripard <mripard@kernel.org>
L: linux-media@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/media/cdns,*.txt
+F: Documentation/devicetree/bindings/media/cdns,csi2rx.yaml
F: drivers/media/platform/cadence/cdns-csi2*
CADENCE NAND DRIVER
@@ -5255,6 +5255,8 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git
F: Documentation/admin-guide/cgroup-v1/cpusets.rst
F: include/linux/cpuset.h
F: kernel/cgroup/cpuset.c
+F: tools/testing/selftests/cgroup/test_cpuset.c
+F: tools/testing/selftests/cgroup/test_cpuset_prs.sh
CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
M: Johannes Weiner <hannes@cmpxchg.org>
@@ -6116,7 +6118,7 @@ F: include/video/udlfb.h
DISTRIBUTED LOCK MANAGER (DLM)
M: Christine Caulfield <ccaulfie@redhat.com>
M: David Teigland <teigland@redhat.com>
-L: cluster-devel@redhat.com
+L: gfs2@lists.linux.dev
S: Supported
W: http://sources.redhat.com/cluster/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/teigland/linux-dlm.git
@@ -6275,11 +6277,17 @@ T: git git://linuxtv.org/media_tree.git
F: Documentation/devicetree/bindings/media/i2c/dongwoon,dw9714.yaml
F: drivers/media/i2c/dw9714.c
-DONGWOON DW9768 LENS VOICE COIL DRIVER
-M: Dongchun Zhu <dongchun.zhu@mediatek.com>
+DONGWOON DW9719 LENS VOICE COIL DRIVER
+M: Daniel Scally <djrscally@gmail.com>
L: linux-media@vger.kernel.org
S: Maintained
T: git git://linuxtv.org/media_tree.git
+F: drivers/media/i2c/dw9719.c
+
+DONGWOON DW9768 LENS VOICE COIL DRIVER
+L: linux-media@vger.kernel.org
+S: Orphan
+T: git git://linuxtv.org/media_tree.git
F: Documentation/devicetree/bindings/media/i2c/dongwoon,dw9768.yaml
F: drivers/media/i2c/dw9768.c
@@ -8766,7 +8774,7 @@ F: scripts/get_maintainer.pl
GFS2 FILE SYSTEM
M: Bob Peterson <rpeterso@redhat.com>
M: Andreas Gruenbacher <agruenba@redhat.com>
-L: cluster-devel@redhat.com
+L: gfs2@lists.linux.dev
S: Supported
B: https://bugzilla.kernel.org/enter_bug.cgi?product=File%20System&component=gfs2
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2.git
@@ -9522,6 +9530,12 @@ S: Obsolete
W: http://w1.fi/hostap-driver.html
F: drivers/net/wireless/intersil/hostap/
+HP BIOSCFG DRIVER
+M: Jorge Lopez <jorge.lopez2@hp.com>
+L: platform-driver-x86@vger.kernel.org
+S: Maintained
+F: drivers/platform/x86/hp/hp-bioscfg/
+
HP COMPAQ TC1100 TABLET WMI EXTRAS DRIVER
L: platform-driver-x86@vger.kernel.org
S: Orphan
@@ -9662,7 +9676,7 @@ S: Maintained
F: arch/x86/kernel/cpu/hygon.c
HYNIX HI556 SENSOR DRIVER
-M: Shawn Tu <shawnx.tu@intel.com>
+M: Sakari Ailus <sakari.ailus@linux.intel.com>
L: linux-media@vger.kernel.org
S: Maintained
T: git git://linuxtv.org/media_tree.git
@@ -9675,7 +9689,7 @@ S: Maintained
F: drivers/media/i2c/hi846.c
HYNIX HI847 SENSOR DRIVER
-M: Shawn Tu <shawnx.tu@intel.com>
+M: Sakari Ailus <sakari.ailus@linux.intel.com>
L: linux-media@vger.kernel.org
S: Maintained
F: drivers/media/i2c/hi847.c
@@ -9746,6 +9760,14 @@ L: linux-acpi@vger.kernel.org
S: Maintained
F: drivers/i2c/i2c-core-acpi.c
+I2C ADDRESS TRANSLATOR (ATR)
+M: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+R: Luca Ceresoli <luca.ceresoli@bootlin.com>
+L: linux-i2c@vger.kernel.org
+S: Maintained
+F: drivers/i2c/i2c-atr.c
+F: include/linux/i2c-atr.h
+
I2C CONTROLLER DRIVER FOR NVIDIA GPU
M: Ajay Gupta <ajayg@nvidia.com>
L: linux-i2c@vger.kernel.org
@@ -10802,6 +10824,7 @@ INTEL TPMI DRIVER
M: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
L: platform-driver-x86@vger.kernel.org
S: Maintained
+F: Documentation/ABI/testing/debugfs-tpmi
F: drivers/platform/x86/intel/tpmi.c
F: include/linux/intel_tpmi.h
@@ -11359,6 +11382,7 @@ F: scripts/dummy-tools/
F: scripts/mk*
F: scripts/mod/
F: scripts/package/
+F: usr/
KERNEL HARDENING (not covered by other areas)
M: Kees Cook <keescook@chromium.org>
@@ -11565,6 +11589,8 @@ F: arch/x86/include/uapi/asm/svm.h
F: arch/x86/include/uapi/asm/vmx.h
F: arch/x86/kvm/
F: arch/x86/kvm/*/
+F: tools/testing/selftests/kvm/*/x86_64/
+F: tools/testing/selftests/kvm/x86_64/
KERNFS
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -12367,6 +12393,14 @@ S: Maintained
F: Documentation/devicetree/bindings/pinctrl/loongson,ls2k-pinctrl.yaml
F: drivers/pinctrl/pinctrl-loongson2.c
+LOONGSON-2 SOC SERIES THERMAL DRIVER
+M: zhanghongchen <zhanghongchen@loongson.cn>
+M: Yinbo Zhu <zhuyinbo@loongson.cn>
+L: linux-pm@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/thermal/loongson,ls2k-thermal.yaml
+F: drivers/thermal/loongson2_thermal.c
+
LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI)
M: Sathya Prakash <sathya.prakash@broadcom.com>
M: Sreekanth Reddy <sreekanth.reddy@broadcom.com>
@@ -13087,17 +13121,21 @@ F: drivers/staging/media/imx/
F: include/linux/imx-media.h
F: include/media/imx.h
-MEDIA DRIVERS FOR FREESCALE IMX7
+MEDIA DRIVERS FOR FREESCALE IMX7/8
M: Rui Miguel Silva <rmfrfs@gmail.com>
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+M: Martin Kepplinger <martin.kepplinger@puri.sm>
+R: Purism Kernel Team <kernel@puri.sm>
L: linux-media@vger.kernel.org
S: Maintained
T: git git://linuxtv.org/media_tree.git
F: Documentation/admin-guide/media/imx7.rst
F: Documentation/devicetree/bindings/media/nxp,imx-mipi-csi2.yaml
F: Documentation/devicetree/bindings/media/nxp,imx7-csi.yaml
+F: Documentation/devicetree/bindings/media/nxp,imx8mq-mipi-csi2.yaml
F: drivers/media/platform/nxp/imx-mipi-csis.c
F: drivers/media/platform/nxp/imx7-media-csi.c
+F: drivers/media/platform/nxp/imx8mq-mipi-csi2.c
MEDIA DRIVERS FOR HELENE
M: Abylay Ospan <aospan@netup.ru>
@@ -13329,6 +13367,7 @@ L: linux-mediatek@lists.infradead.org (moderated for non-subscribers)
S: Supported
F: Documentation/devicetree/bindings/iommu/mediatek*
F: drivers/iommu/mtk_iommu*
+F: include/dt-bindings/memory/mediatek,mt*-port.h
F: include/dt-bindings/memory/mt*-port.h
MEDIATEK JPEG DRIVER
@@ -13706,6 +13745,7 @@ F: include/linux/memory_hotplug.h
F: include/linux/mm.h
F: include/linux/mmzone.h
F: include/linux/pagewalk.h
+F: include/linux/rmap.h
F: include/trace/events/ksm.h
F: mm/
F: tools/mm/
@@ -14000,12 +14040,14 @@ F: drivers/nvmem/microchip-otpc.c
F: include/dt-bindings/nvmem/microchip,sama7g5-otpc.h
MICROCHIP PCI1XXXX GP DRIVER
+M: Vaibhaav Ram T.L <vaibhaavram.tl@microchip.com>
M: Kumaravel Thiagarajan <kumaravel.thiagarajan@microchip.com>
L: linux-gpio@vger.kernel.org
S: Supported
F: drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.c
F: drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gp.h
F: drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c
+F: drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
MICROCHIP PCI1XXXX I2C DRIVER
M: Tharun Kumar P <tharunkumar.pasumarthi@microchip.com>
@@ -15643,7 +15685,7 @@ F: Documentation/filesystems/omfs.rst
F: fs/omfs/
OMNIVISION OG01A1B SENSOR DRIVER
-M: Shawn Tu <shawnx.tu@intel.com>
+M: Sakari Ailus <sakari.ailus@linux.intel.com>
L: linux-media@vger.kernel.org
S: Maintained
F: drivers/media/i2c/og01a1b.c
@@ -15656,9 +15698,8 @@ T: git git://linuxtv.org/media_tree.git
F: drivers/media/i2c/ov01a10.c
OMNIVISION OV02A10 SENSOR DRIVER
-M: Dongchun Zhu <dongchun.zhu@mediatek.com>
L: linux-media@vger.kernel.org
-S: Maintained
+S: Orphan
T: git git://linuxtv.org/media_tree.git
F: Documentation/devicetree/bindings/media/i2c/ovti,ov02a10.yaml
F: drivers/media/i2c/ov02a10.c
@@ -15693,6 +15734,7 @@ F: drivers/media/i2c/ov13b10.c
OMNIVISION OV2680 SENSOR DRIVER
M: Rui Miguel Silva <rmfrfs@gmail.com>
+M: Hans de Goede <hansg@kernel.org>
L: linux-media@vger.kernel.org
S: Maintained
T: git git://linuxtv.org/media_tree.git
@@ -15709,7 +15751,7 @@ F: drivers/media/i2c/ov2685.c
OMNIVISION OV2740 SENSOR DRIVER
M: Tianshu Qiu <tian.shu.qiu@intel.com>
-R: Shawn Tu <shawnx.tu@intel.com>
+R: Sakari Ailus <sakari.ailus@linux.intel.com>
R: Bingbu Cao <bingbu.cao@intel.com>
L: linux-media@vger.kernel.org
S: Maintained
@@ -15741,7 +15783,7 @@ F: Documentation/devicetree/bindings/media/i2c/ovti,ov5647.yaml
F: drivers/media/i2c/ov5647.c
OMNIVISION OV5670 SENSOR DRIVER
-M: Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
+M: Sakari Ailus <sakari.ailus@linux.intel.com>
L: linux-media@vger.kernel.org
S: Maintained
T: git git://linuxtv.org/media_tree.git
@@ -15749,7 +15791,7 @@ F: Documentation/devicetree/bindings/media/i2c/ovti,ov5670.yaml
F: drivers/media/i2c/ov5670.c
OMNIVISION OV5675 SENSOR DRIVER
-M: Shawn Tu <shawnx.tu@intel.com>
+M: Sakari Ailus <sakari.ailus@linux.intel.com>
L: linux-media@vger.kernel.org
S: Maintained
T: git git://linuxtv.org/media_tree.git
@@ -15788,9 +15830,8 @@ F: drivers/media/i2c/ov772x.c
F: include/media/i2c/ov772x.h
OMNIVISION OV7740 SENSOR DRIVER
-M: Wenyou Yang <wenyou.yang@microchip.com>
L: linux-media@vger.kernel.org
-S: Maintained
+S: Orphan
T: git git://linuxtv.org/media_tree.git
F: Documentation/devicetree/bindings/media/i2c/ov7740.txt
F: drivers/media/i2c/ov7740.c
@@ -17147,9 +17188,9 @@ F: kernel/sched/psi.c
PRINTK
M: Petr Mladek <pmladek@suse.com>
-M: Sergey Senozhatsky <senozhatsky@chromium.org>
R: Steven Rostedt <rostedt@goodmis.org>
R: John Ogness <john.ogness@linutronix.de>
+R: Sergey Senozhatsky <senozhatsky@chromium.org>
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/printk/linux.git
F: include/linux/printk.h
@@ -18043,7 +18084,6 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/abelloni/linux.git
F: Documentation/admin-guide/rtc.rst
F: Documentation/devicetree/bindings/rtc/
F: drivers/rtc/
-F: include/linux/platform_data/rtc-*
F: include/linux/rtc.h
F: include/linux/rtc/
F: include/uapi/linux/rtc.h
@@ -19496,6 +19536,32 @@ F: drivers/media/mmc/siano/
F: drivers/media/usb/siano/
F: drivers/media/usb/siano/
+SIEMENS IPC LED DRIVERS
+M: Gerd Haeussler <gerd.haeussler.ext@siemens.com>
+M: Xing Tong Wu <xingtong.wu@siemens.com>
+M: Tobias Schaffner <tobias.schaffner@siemens.com>
+L: linux-leds@vger.kernel.org
+S: Maintained
+F: drivers/leds/simple/
+
+SIEMENS IPC PLATFORM DRIVERS
+M: Gerd Haeussler <gerd.haeussler.ext@siemens.com>
+M: Xing Tong Wu <xingtong.wu@siemens.com>
+M: Tobias Schaffner <tobias.schaffner@siemens.com>
+L: platform-driver-x86@vger.kernel.org
+S: Maintained
+F: drivers/platform/x86/siemens/
+F: include/linux/platform_data/x86/simatic-ipc-base.h
+F: include/linux/platform_data/x86/simatic-ipc.h
+
+SIEMENS IPC WATCHDOG DRIVERS
+M: Gerd Haeussler <gerd.haeussler.ext@siemens.com>
+M: Xing Tong Wu <xingtong.wu@siemens.com>
+M: Tobias Schaffner <tobias.schaffner@siemens.com>
+L: linux-watchdog@vger.kernel.org
+S: Maintained
+F: drivers/watchdog/simatic-ipc-wdt.c
+
SIFIVE DRIVERS
M: Palmer Dabbelt <palmer@dabbelt.com>
M: Paul Walmsley <paul.walmsley@sifive.com>
@@ -20364,6 +20430,13 @@ S: Maintained
F: Documentation/devicetree/bindings/net/starfive,jh7110-dwmac.yaml
F: drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
+STARFIVE JH7110 DPHY RX DRIVER
+M: Jack Zhu <jack.zhu@starfivetech.com>
+M: Changhuang Liang <changhuang.liang@starfivetech.com>
+S: Supported
+F: Documentation/devicetree/bindings/phy/starfive,jh7110-dphy-rx.yaml
+F: drivers/phy/starfive/phy-jh7110-dphy-rx.c
+
STARFIVE JH7110 MMC/SD/SDIO DRIVER
M: William Qiu <william.qiu@starfivetech.com>
S: Supported
@@ -20447,6 +20520,14 @@ S: Supported
F: Documentation/devicetree/bindings/watchdog/starfive*
F: drivers/watchdog/starfive-wdt.c
+STARFIVE JH71X0 PCIE AND USB PHY DRIVER
+M: Minda Chen <minda.chen@starfivetech.com>
+S: Supported
+F: Documentation/devicetree/bindings/phy/starfive,jh7110-pcie-phy.yaml
+F: Documentation/devicetree/bindings/phy/starfive,jh7110-usb-phy.yaml
+F: drivers/phy/starfive/phy-jh7110-pcie.c
+F: drivers/phy/starfive/phy-jh7110-usb.c
+
STATIC BRANCH/CALL
M: Peter Zijlstra <peterz@infradead.org>
M: Josh Poimboeuf <jpoimboe@kernel.org>
@@ -21163,7 +21244,7 @@ F: sound/soc/ti/
TEXAS INSTRUMENTS AUDIO (ASoC/HDA) DRIVERS
M: Shenghao Ding <shenghao-ding@ti.com>
M: Kevin Lu <kevin-lu@ti.com>
-M: Baojun Xu <x1077012@ti.com>
+M: Baojun Xu <baojun.xu@ti.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Maintained
F: Documentation/devicetree/bindings/sound/tas2552.txt
@@ -21468,6 +21549,14 @@ F: drivers/misc/tifm*
F: drivers/mmc/host/tifm_sd.c
F: include/linux/tifm.h
+TI FPD-LINK DRIVERS
+M: Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+L: linux-media@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/media/i2c/ti,ds90*
+F: drivers/media/i2c/ds90*
+F: include/media/i2c/ds90*
+
TI KEYSTONE MULTICORE NAVIGATOR DRIVERS
M: Nishanth Menon <nm@ti.com>
M: Santosh Shilimkar <ssantosh@kernel.org>
@@ -21718,6 +21807,20 @@ S: Maintained
F: Documentation/hwmon/tps546d24.rst
F: drivers/hwmon/pmbus/tps546d24.c
+TQ SYSTEMS BOARD & DRIVER SUPPORT
+L: linux@ew.tq-group.com
+S: Supported
+W: https://www.tq-group.com/en/products/tq-embedded/
+F: arch/arm/boot/dts/imx*mba*.dts*
+F: arch/arm/boot/dts/imx*tqma*.dts*
+F: arch/arm/boot/dts/mba*.dtsi
+F: arch/arm64/boot/dts/freescale/imx*mba*.dts*
+F: arch/arm64/boot/dts/freescale/imx*tqma*.dts*
+F: arch/arm64/boot/dts/freescale/mba*.dtsi
+F: drivers/gpio/gpio-tqmx86.c
+F: drivers/mfd/tqmx86.c
+F: drivers/watchdog/tqmx86_wdt.c
+
TRACING
M: Steven Rostedt <rostedt@goodmis.org>
M: Masami Hiramatsu <mhiramat@kernel.org>
@@ -21927,7 +22030,7 @@ F: Documentation/admin-guide/ufs.rst
F: fs/ufs/
UHID USERSPACE HID IO DRIVER
-M: David Rheinsberg <david.rheinsberg@gmail.com>
+M: David Rheinsberg <david@readahead.eu>
L: linux-input@vger.kernel.org
S: Maintained
F: drivers/hid/uhid.c
@@ -22412,6 +22515,39 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: drivers/clk/ux500/
+V4L2 ASYNC AND FWNODE FRAMEWORKS
+M: Sakari Ailus <sakari.ailus@linux.intel.com>
+L: linux-media@vger.kernel.org
+S: Maintained
+T: git git://linuxtv.org/media_tree.git
+F: drivers/media/v4l2-core/v4l2-async.c
+F: drivers/media/v4l2-core/v4l2-fwnode.c
+F: include/media/v4l2-async.h
+F: include/media/v4l2-fwnode.h
+
+V4L2 LENS DRIVERS
+M: Sakari Ailus <sakari.ailus@linux.intel.com>
+L: linux-media@vger.kernel.org
+S: Maintained
+F: drivers/media/i2c/ak*
+F: drivers/media/i2c/dw*
+F: drivers/media/i2c/lm*
+
+V4L2 CAMERA SENSOR DRIVERS
+M: Sakari Ailus <sakari.ailus@linux.intel.com>
+L: linux-media@vger.kernel.org
+S: Maintained
+F: Documentation/driver-api/media/camera-sensor.rst
+F: Documentation/driver-api/media/tx-rx.rst
+F: drivers/media/i2c/ar*
+F: drivers/media/i2c/hi*
+F: drivers/media/i2c/imx*
+F: drivers/media/i2c/mt*
+F: drivers/media/i2c/og*
+F: drivers/media/i2c/ov*
+F: drivers/media/i2c/s5*
+F: drivers/media/i2c/st-vgxy61.c
+
VF610 NAND DRIVER
M: Stefan Agner <stefan@agner.ch>
L: linux-mtd@lists.infradead.org
@@ -22961,9 +23097,9 @@ F: drivers/net/vrf.c
VSPRINTF
M: Petr Mladek <pmladek@suse.com>
M: Steven Rostedt <rostedt@goodmis.org>
-M: Sergey Senozhatsky <senozhatsky@chromium.org>
R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
R: Rasmus Villemoes <linux@rasmusvillemoes.dk>
+R: Sergey Senozhatsky <senozhatsky@chromium.org>
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/printk/linux.git
F: Documentation/core-api/printk-formats.rst
@@ -23066,7 +23202,7 @@ S: Maintained
F: drivers/rtc/rtc-sd3078.c
WIIMOTE HID DRIVER
-M: David Rheinsberg <david.rheinsberg@gmail.com>
+M: David Rheinsberg <david@readahead.eu>
L: linux-input@vger.kernel.org
S: Maintained
F: drivers/hid/hid-wiimote*
@@ -23112,8 +23248,10 @@ S: Orphan
F: drivers/net/wireless/legacy/wl3501*
WMI BINARY MOF DRIVER
-L: platform-drivers-x86@vger.kernel.org
-S: Orphan
+M: Armin Wolf <W_Armin@gmx.de>
+R: Thomas Weißschuh <linux@weissschuh.net>
+L: platform-driver-x86@vger.kernel.org
+S: Maintained
F: Documentation/ABI/stable/sysfs-platform-wmi-bmof
F: Documentation/wmi/devices/wmi-bmof.rst
F: drivers/platform/x86/wmi-bmof.c
diff --git a/Makefile b/Makefile
index 4f283d915e54..73f23fa0677a 100644
--- a/Makefile
+++ b/Makefile
@@ -280,8 +280,8 @@ no-dot-config-targets := $(clean-targets) \
# Installation targets should not require compiler. Unfortunately, vdso_install
# is an exception where build artifacts may be updated. This must be fixed.
no-compiler-targets := $(no-dot-config-targets) install dtbs_install \
- headers_install modules_install kernelrelease image_name
-no-sync-config-targets := $(no-dot-config-targets) %install kernelrelease \
+ headers_install modules_install modules_sign kernelrelease image_name
+no-sync-config-targets := $(no-dot-config-targets) %install modules_sign kernelrelease \
image_name
single-targets := %.a %.i %.ko %.lds %.ll %.lst %.mod %.o %.rsi %.s %.symtypes %/
@@ -510,7 +510,6 @@ LEX = flex
YACC = bison
AWK = awk
INSTALLKERNEL := installkernel
-DEPMOD = depmod
PERL = perl
PYTHON3 = python3
CHECK = sparse
@@ -564,14 +563,6 @@ KBUILD_CFLAGS += -funsigned-char
KBUILD_CFLAGS += -fno-common
KBUILD_CFLAGS += -fno-PIE
KBUILD_CFLAGS += -fno-strict-aliasing
-KBUILD_CFLAGS += -Wall
-KBUILD_CFLAGS += -Wundef
-KBUILD_CFLAGS += -Werror=implicit-function-declaration
-KBUILD_CFLAGS += -Werror=implicit-int
-KBUILD_CFLAGS += -Werror=return-type
-KBUILD_CFLAGS += -Werror=strict-prototypes
-KBUILD_CFLAGS += -Wno-format-security
-KBUILD_CFLAGS += -Wno-trigraphs
KBUILD_CPPFLAGS := -D__KERNEL__
KBUILD_RUSTFLAGS := $(rust_common_flags) \
@@ -824,10 +815,6 @@ endif # may-sync-config
endif # need-config
KBUILD_CFLAGS += -fno-delete-null-pointer-checks
-KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,)
-KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation)
-KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow)
-KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
ifdef CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE
KBUILD_CFLAGS += -O2
@@ -858,40 +845,15 @@ ifdef CONFIG_READABLE_ASM
KBUILD_CFLAGS += -fno-reorder-blocks -fno-ipa-cp-clone -fno-partial-inlining
endif
-ifneq ($(CONFIG_FRAME_WARN),0)
-KBUILD_CFLAGS += -Wframe-larger-than=$(CONFIG_FRAME_WARN)
-endif
-
stackp-flags-y := -fno-stack-protector
stackp-flags-$(CONFIG_STACKPROTECTOR) := -fstack-protector
stackp-flags-$(CONFIG_STACKPROTECTOR_STRONG) := -fstack-protector-strong
KBUILD_CFLAGS += $(stackp-flags-y)
-KBUILD_CPPFLAGS-$(CONFIG_WERROR) += -Werror
-KBUILD_CPPFLAGS += $(KBUILD_CPPFLAGS-y)
-KBUILD_CFLAGS-$(CONFIG_CC_NO_ARRAY_BOUNDS) += -Wno-array-bounds
-
KBUILD_RUSTFLAGS-$(CONFIG_WERROR) += -Dwarnings
KBUILD_RUSTFLAGS += $(KBUILD_RUSTFLAGS-y)
-ifdef CONFIG_CC_IS_CLANG
-# The kernel builds with '-std=gnu11' so use of GNU extensions is acceptable.
-KBUILD_CFLAGS += -Wno-gnu
-else
-
-# gcc inanely warns about local variables called 'main'
-KBUILD_CFLAGS += -Wno-main
-endif
-
-# These warnings generated too much noise in a regular build.
-# Use make W=1 to enable them (see scripts/Makefile.extrawarn)
-KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
-KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
-
-# These result in bogus false positives
-KBUILD_CFLAGS += $(call cc-disable-warning, dangling-pointer)
-
ifdef CONFIG_FRAME_POINTER
KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls
KBUILD_RUSTFLAGS += -Cforce-frame-pointers=y
@@ -1026,51 +988,12 @@ endif
# arch Makefile may override CC so keep this after arch Makefile is included
NOSTDINC_FLAGS += -nostdinc
-# Variable Length Arrays (VLAs) should not be used anywhere in the kernel
-KBUILD_CFLAGS += -Wvla
-
-# disable pointer signed / unsigned warnings in gcc 4.0
-KBUILD_CFLAGS += -Wno-pointer-sign
-
-# In order to make sure new function cast mismatches are not introduced
-# in the kernel (to avoid tripping CFI checking), the kernel should be
-# globally built with -Wcast-function-type.
-KBUILD_CFLAGS += $(call cc-option, -Wcast-function-type)
-
# To gain proper coverage for CONFIG_UBSAN_BOUNDS and CONFIG_FORTIFY_SOURCE,
# the kernel uses only C99 flexible arrays for dynamically sized trailing
# arrays. Enforce this for everything that may examine structure sizes and
# perform bounds checking.
KBUILD_CFLAGS += $(call cc-option, -fstrict-flex-arrays=3)
-# disable stringop warnings in gcc 8+
-KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation)
-
-# We'll want to enable this eventually, but it's not going away for 5.7 at least
-KBUILD_CFLAGS += $(call cc-disable-warning, stringop-overflow)
-
-# Another good warning that we'll want to enable eventually
-KBUILD_CFLAGS += $(call cc-disable-warning, restrict)
-
-# Enabled with W=2, disabled by default as noisy
-ifdef CONFIG_CC_IS_GCC
-KBUILD_CFLAGS += -Wno-maybe-uninitialized
-endif
-
-# The allocators already balk at large sizes, so silence the compiler
-# warnings for bounds checks involving those possible values. While
-# -Wno-alloc-size-larger-than would normally be used here, earlier versions
-# of gcc (<9.1) weirdly don't handle the option correctly when _other_
-# warnings are produced (?!). Using -Walloc-size-larger-than=SIZE_MAX
-# doesn't work (as it is documented to), silently resolving to "0" prior to
-# version 9.1 (and producing an error more recently). Numeric values larger
-# than PTRDIFF_MAX also don't work prior to version 9.1, which are silently
-# ignored, continuing to default to PTRDIFF_MAX. So, left with no other
-# choice, we must perform a versioned check to disable this warning.
-# https://lore.kernel.org/lkml/20210824115859.187f272f@canb.auug.org.au
-KBUILD_CFLAGS-$(call gcc-min-version, 90100) += -Wno-alloc-size-larger-than
-KBUILD_CFLAGS += $(KBUILD_CFLAGS-y) $(CONFIG_CC_IMPLICIT_FALLTHROUGH)
-
# disable invalid "can't wrap" optimizations for signed / pointers
KBUILD_CFLAGS += -fno-strict-overflow
@@ -1082,15 +1005,6 @@ ifdef CONFIG_CC_IS_GCC
KBUILD_CFLAGS += -fconserve-stack
endif
-# Prohibit date/time macros, which would make the build non-deterministic
-KBUILD_CFLAGS += -Werror=date-time
-
-# enforce correct pointer usage
-KBUILD_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types)
-
-# Require designated initializers for all marked structures
-KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init)
-
# change __FILE__ to the relative path from the srctree
KBUILD_CPPFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=)
@@ -1548,44 +1462,8 @@ modules: modules_prepare
modules_prepare: prepare
$(Q)$(MAKE) $(build)=scripts scripts/module.lds
-export modules_sign_only :=
-
-ifeq ($(CONFIG_MODULE_SIG),y)
-PHONY += modules_sign
-modules_sign: modules_install
- @:
-
-# modules_sign is a subset of modules_install.
-# 'make modules_install modules_sign' is equivalent to 'make modules_install'.
-ifeq ($(filter modules_install,$(MAKECMDGOALS)),)
-modules_sign_only := y
-endif
-endif
-
endif # CONFIG_MODULES
-modinst_pre :=
-ifneq ($(filter modules_install,$(MAKECMDGOALS)),)
-modinst_pre := __modinst_pre
-endif
-
-modules_install: $(modinst_pre)
-PHONY += __modinst_pre
-__modinst_pre:
- @rm -rf $(MODLIB)/kernel
- @rm -f $(MODLIB)/source
- @mkdir -p $(MODLIB)
-ifdef CONFIG_MODULES
- @ln -s $(abspath $(srctree)) $(MODLIB)/source
- @if [ ! $(objtree) -ef $(MODLIB)/build ]; then \
- rm -f $(MODLIB)/build ; \
- ln -s $(CURDIR) $(MODLIB)/build ; \
- fi
- @sed 's:^\(.*\)\.o$$:kernel/\1.ko:' modules.order > $(MODLIB)/modules.order
-endif
- @cp -f modules.builtin $(MODLIB)/
- @cp -f $(objtree)/modules.builtin.modinfo $(MODLIB)/
-
###
# Cleaning is done on three levels.
# make clean Delete most generated files
@@ -1594,7 +1472,7 @@ endif
# make distclean Remove editor backup files, patch leftover files and the like
# Directories & files removed with 'make clean'
-CLEAN_FILES += include/ksym vmlinux.symvers modules-only.symvers \
+CLEAN_FILES += vmlinux.symvers modules-only.symvers \
modules.builtin modules.builtin.modinfo modules.nsdeps \
compile_commands.json .thinlto-cache rust/test rust/doc \
rust-project.json .vmlinux.objs .vmlinux.export.c
@@ -1608,7 +1486,7 @@ MRPROPER_FILES += include/config include/generated \
certs/signing_key.pem \
certs/x509.genkey \
vmlinux-gdb.py \
- *.spec rpmbuild \
+ kernel.spec rpmbuild \
rust/libmacros.so
# clean - Delete most, but leave enough to build external modules
@@ -1675,7 +1553,6 @@ help:
@echo ' mrproper - Remove all generated files + config + various backup files'
@echo ' distclean - mrproper + remove editor backup and patch files'
@echo ''
- @echo 'Configuration targets:'
@$(MAKE) -f $(srctree)/scripts/kconfig/Makefile help
@echo ''
@echo 'Other generic targets:'
@@ -1923,19 +1800,39 @@ help:
@echo ' rust-analyzer - generate rust-project.json rust-analyzer support file'
@echo ''
+ifndef CONFIG_MODULES
+modules modules_install: __external_modules_error
__external_modules_error:
@echo >&2 '***'
@echo >&2 '*** The present kernel disabled CONFIG_MODULES.'
@echo >&2 '*** You cannot build or install external modules.'
@echo >&2 '***'
@false
+endif
endif # KBUILD_EXTMOD
# ---------------------------------------------------------------------------
# Modules
-PHONY += modules modules_install modules_prepare
+PHONY += modules modules_install modules_sign modules_prepare
+
+modules_install:
+ $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modinst \
+ sign-only=$(if $(filter modules_install,$(MAKECMDGOALS)),,y)
+
+ifeq ($(CONFIG_MODULE_SIG),y)
+# modules_sign is a subset of modules_install.
+# 'make modules_install modules_sign' is equivalent to 'make modules_install'.
+modules_sign: modules_install
+ @:
+else
+modules_sign:
+ @echo >&2 '***'
+ @echo >&2 '*** CONFIG_MODULE_SIG is disabled. You cannot sign modules.'
+ @echo >&2 '***'
+ @false
+endif
ifdef CONFIG_MODULES
@@ -1953,22 +1850,9 @@ PHONY += modules_check
modules_check: $(MODORDER)
$(Q)$(CONFIG_SHELL) $(srctree)/scripts/modules-check.sh $<
-quiet_cmd_depmod = DEPMOD $(MODLIB)
- cmd_depmod = $(CONFIG_SHELL) $(srctree)/scripts/depmod.sh $(DEPMOD) \
- $(KERNELRELEASE)
-
-modules_install:
- $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modinst
- $(call cmd,depmod)
-
else # CONFIG_MODULES
-# Modules not configured
-# ---------------------------------------------------------------------------
-
-PHONY += __external_modules_error
-
-modules modules_install: __external_modules_error
+modules:
@:
KBUILD_MODULES :=
@@ -2147,6 +2031,10 @@ kernelversion:
image_name:
@echo $(KBUILD_IMAGE)
+PHONY += run-command
+run-command:
+ $(Q)$(KBUILD_RUN_COMMAND)
+
quiet_cmd_rmfiles = $(if $(wildcard $(rm-files)),CLEAN $(wildcard $(rm-files)))
cmd_rmfiles = rm -rf $(rm-files)
diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild
index dd31e97edae8..396caece6d6d 100644
--- a/arch/alpha/include/asm/Kbuild
+++ b/arch/alpha/include/asm/Kbuild
@@ -3,6 +3,5 @@
generated-y += syscall_table.h
generic-y += agp.h
generic-y += asm-offsets.h
-generic-y += export.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index 3d7473531ab1..c80258ec332f 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -421,7 +421,7 @@ register_cpus(void)
arch_initcall(register_cpus);
#ifdef CONFIG_MAGIC_SYSRQ
-static void sysrq_reboot_handler(int unused)
+static void sysrq_reboot_handler(u8 unused)
{
machine_halt();
}
diff --git a/arch/alpha/kernel/srmcons.c b/arch/alpha/kernel/srmcons.c
index 6dc952b0df4a..d6139dbae4ac 100644
--- a/arch/alpha/kernel/srmcons.c
+++ b/arch/alpha/kernel/srmcons.c
@@ -129,9 +129,8 @@ srmcons_do_write(struct tty_port *port, const char *buf, int count)
return count;
}
-static int
-srmcons_write(struct tty_struct *tty,
- const unsigned char *buf, int count)
+static ssize_t
+srmcons_write(struct tty_struct *tty, const u8 *buf, size_t count)
{
unsigned long flags;
diff --git a/arch/alpha/lib/callback_srm.S b/arch/alpha/lib/callback_srm.S
index b13c4a231f1b..36b63f295170 100644
--- a/arch/alpha/lib/callback_srm.S
+++ b/arch/alpha/lib/callback_srm.S
@@ -3,8 +3,8 @@
* arch/alpha/lib/callback_srm.S
*/
+#include <linux/export.h>
#include <asm/console.h>
-#include <asm/export.h>
.text
#define HWRPB_CRB_OFFSET 0xc0
diff --git a/arch/alpha/lib/clear_page.S b/arch/alpha/lib/clear_page.S
index ce02de7b0493..af70ee309a33 100644
--- a/arch/alpha/lib/clear_page.S
+++ b/arch/alpha/lib/clear_page.S
@@ -4,7 +4,7 @@
*
* Zero an entire page.
*/
-#include <asm/export.h>
+#include <linux/export.h>
.text
.align 4
.global clear_page
diff --git a/arch/alpha/lib/clear_user.S b/arch/alpha/lib/clear_user.S
index db6c6ca45896..848eb60a0010 100644
--- a/arch/alpha/lib/clear_user.S
+++ b/arch/alpha/lib/clear_user.S
@@ -10,7 +10,7 @@
* a successful copy). There is also some rather minor exception setup
* stuff.
*/
-#include <asm/export.h>
+#include <linux/export.h>
/* Allow an exception for an insn; exit if we get one. */
#define EX(x,y...) \
diff --git a/arch/alpha/lib/copy_page.S b/arch/alpha/lib/copy_page.S
index 5439a30c77d0..1c444fdad9a5 100644
--- a/arch/alpha/lib/copy_page.S
+++ b/arch/alpha/lib/copy_page.S
@@ -4,7 +4,7 @@
*
* Copy an entire page.
*/
-#include <asm/export.h>
+#include <linux/export.h>
.text
.align 4
.global copy_page
diff --git a/arch/alpha/lib/copy_user.S b/arch/alpha/lib/copy_user.S
index 32ab0344b185..ef18faafcad6 100644
--- a/arch/alpha/lib/copy_user.S
+++ b/arch/alpha/lib/copy_user.S
@@ -12,7 +12,7 @@
* exception setup stuff..
*/
-#include <asm/export.h>
+#include <linux/export.h>
/* Allow an exception for an insn; exit if we get one. */
#define EXI(x,y...) \
diff --git a/arch/alpha/lib/csum_ipv6_magic.S b/arch/alpha/lib/csum_ipv6_magic.S
index c7b213ab01ab..273c426c3859 100644
--- a/arch/alpha/lib/csum_ipv6_magic.S
+++ b/arch/alpha/lib/csum_ipv6_magic.S
@@ -13,7 +13,7 @@
* added by Ivan Kokshaysky <ink@jurassic.park.msu.ru>
*/
-#include <asm/export.h>
+#include <linux/export.h>
.globl csum_ipv6_magic
.align 4
.ent csum_ipv6_magic
diff --git a/arch/alpha/lib/divide.S b/arch/alpha/lib/divide.S
index 2b60eb45e50b..db01840d76ec 100644
--- a/arch/alpha/lib/divide.S
+++ b/arch/alpha/lib/divide.S
@@ -46,7 +46,7 @@
* $28 - compare status
*/
-#include <asm/export.h>
+#include <linux/export.h>
#define halt .long 0
/*
diff --git a/arch/alpha/lib/ev6-clear_page.S b/arch/alpha/lib/ev6-clear_page.S
index 325864c81586..a534d9ff7161 100644
--- a/arch/alpha/lib/ev6-clear_page.S
+++ b/arch/alpha/lib/ev6-clear_page.S
@@ -4,7 +4,7 @@
*
* Zero an entire page.
*/
-#include <asm/export.h>
+#include <linux/export.h>
.text
.align 4
.global clear_page
diff --git a/arch/alpha/lib/ev6-clear_user.S b/arch/alpha/lib/ev6-clear_user.S
index 7e644f83cdf2..af776cc45f91 100644
--- a/arch/alpha/lib/ev6-clear_user.S
+++ b/arch/alpha/lib/ev6-clear_user.S
@@ -29,7 +29,7 @@
* want to leave a hole (and we also want to avoid repeating lots of work)
*/
-#include <asm/export.h>
+#include <linux/export.h>
/* Allow an exception for an insn; exit if we get one. */
#define EX(x,y...) \
99: x,##y; \
diff --git a/arch/alpha/lib/ev6-copy_page.S b/arch/alpha/lib/ev6-copy_page.S
index fd7212c8dcf1..36be5113b7b7 100644
--- a/arch/alpha/lib/ev6-copy_page.S
+++ b/arch/alpha/lib/ev6-copy_page.S
@@ -57,7 +57,7 @@
destination pages are in the dcache, but it is my guess that this is
less important than the dcache miss case. */
-#include <asm/export.h>
+#include <linux/export.h>
.text
.align 4
.global copy_page
diff --git a/arch/alpha/lib/ev6-copy_user.S b/arch/alpha/lib/ev6-copy_user.S
index f3e433754397..b9b19710c364 100644
--- a/arch/alpha/lib/ev6-copy_user.S
+++ b/arch/alpha/lib/ev6-copy_user.S
@@ -23,7 +23,7 @@
* L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
*/
-#include <asm/export.h>
+#include <linux/export.h>
/* Allow an exception for an insn; exit if we get one. */
#define EXI(x,y...) \
99: x,##y; \
diff --git a/arch/alpha/lib/ev6-csum_ipv6_magic.S b/arch/alpha/lib/ev6-csum_ipv6_magic.S
index 9a73f90700a1..2ee548be98e3 100644
--- a/arch/alpha/lib/ev6-csum_ipv6_magic.S
+++ b/arch/alpha/lib/ev6-csum_ipv6_magic.S
@@ -53,7 +53,7 @@
* may cause additional delay in rare cases (load-load replay traps).
*/
-#include <asm/export.h>
+#include <linux/export.h>
.globl csum_ipv6_magic
.align 4
.ent csum_ipv6_magic
diff --git a/arch/alpha/lib/ev6-divide.S b/arch/alpha/lib/ev6-divide.S
index 137ff1a07356..b73a6d26362e 100644
--- a/arch/alpha/lib/ev6-divide.S
+++ b/arch/alpha/lib/ev6-divide.S
@@ -56,7 +56,7 @@
* Try not to change the actual algorithm if possible for consistency.
*/
-#include <asm/export.h>
+#include <linux/export.h>
#define halt .long 0
/*
diff --git a/arch/alpha/lib/ev6-memchr.S b/arch/alpha/lib/ev6-memchr.S
index 56bf9e14eeee..f75ba43e61e3 100644
--- a/arch/alpha/lib/ev6-memchr.S
+++ b/arch/alpha/lib/ev6-memchr.S
@@ -28,7 +28,7 @@
* L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
* Try not to change the actual algorithm if possible for consistency.
*/
-#include <asm/export.h>
+#include <linux/export.h>
.set noreorder
.set noat
diff --git a/arch/alpha/lib/ev6-memcpy.S b/arch/alpha/lib/ev6-memcpy.S
index ffbd056b6eb2..3ef43c26c8af 100644
--- a/arch/alpha/lib/ev6-memcpy.S
+++ b/arch/alpha/lib/ev6-memcpy.S
@@ -20,7 +20,7 @@
* Temp usage notes:
* $1,$2, - scratch
*/
-#include <asm/export.h>
+#include <linux/export.h>
.set noreorder
.set noat
diff --git a/arch/alpha/lib/ev6-memset.S b/arch/alpha/lib/ev6-memset.S
index 1cfcfbbea6f0..89d7809da4cc 100644
--- a/arch/alpha/lib/ev6-memset.S
+++ b/arch/alpha/lib/ev6-memset.S
@@ -27,7 +27,7 @@
* as fixes will need to be made in multiple places. The performance gain
* is worth it.
*/
-#include <asm/export.h>
+#include <linux/export.h>
.set noat
.set noreorder
.text
diff --git a/arch/alpha/lib/ev67-strcat.S b/arch/alpha/lib/ev67-strcat.S
index ec3096a9e8d4..f8c7305b11d6 100644
--- a/arch/alpha/lib/ev67-strcat.S
+++ b/arch/alpha/lib/ev67-strcat.S
@@ -20,7 +20,7 @@
* string once.
*/
-#include <asm/export.h>
+#include <linux/export.h>
.text
.align 4
diff --git a/arch/alpha/lib/ev67-strchr.S b/arch/alpha/lib/ev67-strchr.S
index fbf89e0b6dc3..97a7cb475309 100644
--- a/arch/alpha/lib/ev67-strchr.S
+++ b/arch/alpha/lib/ev67-strchr.S
@@ -16,7 +16,7 @@
* L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
* Try not to change the actual algorithm if possible for consistency.
*/
-#include <asm/export.h>
+#include <linux/export.h>
#include <asm/regdef.h>
.set noreorder
diff --git a/arch/alpha/lib/ev67-strlen.S b/arch/alpha/lib/ev67-strlen.S
index b73106ffbbc7..3d9078807ab4 100644
--- a/arch/alpha/lib/ev67-strlen.S
+++ b/arch/alpha/lib/ev67-strlen.S
@@ -18,7 +18,7 @@
* U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1
* L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
*/
-#include <asm/export.h>
+#include <linux/export.h>
.set noreorder
.set noat
diff --git a/arch/alpha/lib/ev67-strncat.S b/arch/alpha/lib/ev67-strncat.S
index ceb0ca528789..8f313233e3a7 100644
--- a/arch/alpha/lib/ev67-strncat.S
+++ b/arch/alpha/lib/ev67-strncat.S
@@ -21,7 +21,7 @@
* Try not to change the actual algorithm if possible for consistency.
*/
-#include <asm/export.h>
+#include <linux/export.h>
.text
.align 4
diff --git a/arch/alpha/lib/ev67-strrchr.S b/arch/alpha/lib/ev67-strrchr.S
index 7f80e398530f..ae7355f9ec56 100644
--- a/arch/alpha/lib/ev67-strrchr.S
+++ b/arch/alpha/lib/ev67-strrchr.S
@@ -19,7 +19,7 @@
* L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1
*/
-#include <asm/export.h>
+#include <linux/export.h>
#include <asm/regdef.h>
.set noreorder
diff --git a/arch/alpha/lib/memchr.S b/arch/alpha/lib/memchr.S
index c13d3eca2e05..45366e32feee 100644
--- a/arch/alpha/lib/memchr.S
+++ b/arch/alpha/lib/memchr.S
@@ -31,7 +31,7 @@ For correctness consider that:
- only minimum number of quadwords may be accessed
- the third argument is an unsigned long
*/
-#include <asm/export.h>
+#include <linux/export.h>
.set noreorder
.set noat
diff --git a/arch/alpha/lib/memmove.S b/arch/alpha/lib/memmove.S
index 42d1922d0edf..3a27689e3390 100644
--- a/arch/alpha/lib/memmove.S
+++ b/arch/alpha/lib/memmove.S
@@ -7,7 +7,7 @@
* This is hand-massaged output from the original memcpy.c. We defer to
* memcpy whenever possible; the backwards copy loops are not unrolled.
*/
-#include <asm/export.h>
+#include <linux/export.h>
.set noat
.set noreorder
.text
diff --git a/arch/alpha/lib/memset.S b/arch/alpha/lib/memset.S
index 00393e30df25..9075d6918346 100644
--- a/arch/alpha/lib/memset.S
+++ b/arch/alpha/lib/memset.S
@@ -14,7 +14,7 @@
* The scheduling comments are according to the EV5 documentation (and done by
* hand, so they might well be incorrect, please do tell me about it..)
*/
-#include <asm/export.h>
+#include <linux/export.h>
.set noat
.set noreorder
.text
diff --git a/arch/alpha/lib/strcat.S b/arch/alpha/lib/strcat.S
index 055877dccd27..62b90ebbcf44 100644
--- a/arch/alpha/lib/strcat.S
+++ b/arch/alpha/lib/strcat.S
@@ -5,7 +5,7 @@
*
* Append a null-terminated string from SRC to DST.
*/
-#include <asm/export.h>
+#include <linux/export.h>
.text
diff --git a/arch/alpha/lib/strchr.S b/arch/alpha/lib/strchr.S
index 17871dd00280..68c54ff50dfe 100644
--- a/arch/alpha/lib/strchr.S
+++ b/arch/alpha/lib/strchr.S
@@ -6,7 +6,7 @@
* Return the address of a given character within a null-terminated
* string, or null if it is not found.
*/
-#include <asm/export.h>
+#include <linux/export.h>
#include <asm/regdef.h>
.set noreorder
diff --git a/arch/alpha/lib/strcpy.S b/arch/alpha/lib/strcpy.S
index cb74ad23a90d..d8773ba77525 100644
--- a/arch/alpha/lib/strcpy.S
+++ b/arch/alpha/lib/strcpy.S
@@ -6,7 +6,7 @@
* Copy a null-terminated string from SRC to DST. Return a pointer
* to the null-terminator in the source.
*/
-#include <asm/export.h>
+#include <linux/export.h>
.text
.align 3
diff --git a/arch/alpha/lib/strlen.S b/arch/alpha/lib/strlen.S
index dd882fe4d7e3..4fc6a6ff24cd 100644
--- a/arch/alpha/lib/strlen.S
+++ b/arch/alpha/lib/strlen.S
@@ -12,7 +12,7 @@
* do this instead of the 9 instructions that
* binary search needs).
*/
-#include <asm/export.h>
+#include <linux/export.h>
.set noreorder
.set noat
diff --git a/arch/alpha/lib/strncat.S b/arch/alpha/lib/strncat.S
index 522fee3e26ac..a913a7c84a39 100644
--- a/arch/alpha/lib/strncat.S
+++ b/arch/alpha/lib/strncat.S
@@ -10,7 +10,7 @@
* past count, whereas libc may write to count+1. This follows the generic
* implementation in lib/string.c and is, IMHO, more sensible.
*/
-#include <asm/export.h>
+#include <linux/export.h>
.text
.align 3
diff --git a/arch/alpha/lib/strncpy.S b/arch/alpha/lib/strncpy.S
index cc57fad8b7ca..cb90cf022df3 100644
--- a/arch/alpha/lib/strncpy.S
+++ b/arch/alpha/lib/strncpy.S
@@ -11,7 +11,7 @@
* version has cropped that bit o' nastiness as well as assuming that
* __stxncpy is in range of a branch.
*/
-#include <asm/export.h>
+#include <linux/export.h>
.set noat
.set noreorder
diff --git a/arch/alpha/lib/strrchr.S b/arch/alpha/lib/strrchr.S
index 7650ba99b7e2..dd8e073b6cf2 100644
--- a/arch/alpha/lib/strrchr.S
+++ b/arch/alpha/lib/strrchr.S
@@ -6,7 +6,7 @@
* Return the address of the last occurrence of a given character
* within a null-terminated string, or null if it is not found.
*/
-#include <asm/export.h>
+#include <linux/export.h>
#include <asm/regdef.h>
.set noreorder
diff --git a/arch/alpha/lib/udiv-qrnnd.S b/arch/alpha/lib/udiv-qrnnd.S
index b887aa5428e5..96f05918bffe 100644
--- a/arch/alpha/lib/udiv-qrnnd.S
+++ b/arch/alpha/lib/udiv-qrnnd.S
@@ -25,7 +25,7 @@
# along with GCC; see the file COPYING. If not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
# MA 02111-1307, USA.
-#include <asm/export.h>
+#include <linux/export.h>
.set noreorder
.set noat
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 6f4995ad9873..3162db540ee9 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -27,6 +27,8 @@ config ARC
select GENERIC_SCHED_CLOCK
select GENERIC_SMP_IDLE_THREAD
select GENERIC_IOREMAP
+ select GENERIC_STRNCPY_FROM_USER if MMU
+ select GENERIC_STRNLEN_USER if MMU
select HAVE_ARCH_KGDB
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if ARC_MMU_V4
@@ -491,11 +493,11 @@ config ARC_KVADDR_SIZE
kernel-user gutter)
config ARC_CURR_IN_REG
- bool "Dedicate Register r25 for current_task pointer"
+ bool "cache current task pointer in gp"
default y
help
- This reserved Register R25 to point to Current Task in
- kernel mode. This saves memory access for each such access
+ This reserves gp register to point to Current Task in
+ kernel mode eliding memory access for each access
config ARC_EMUL_UNALIGNED
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index 329400a1c355..2390dd042e36 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -28,14 +28,14 @@ cflags-y += $(tune-mcpu-def-y)
endif
endif
-
ifdef CONFIG_ARC_CURR_IN_REG
# For a global register definition, make sure it gets passed to every file
# We had a customer reported bug where some code built in kernel was NOT using
-# any kernel headers, and missing the r25 global register
+# any kernel headers, and missing the global register
# Can't do unconditionally because of recursive include issues
# due to <linux/thread_info.h>
LINUXINCLUDE += -include $(srctree)/arch/arc/include/asm/current.h
+cflags-y += -ffixed-gp
endif
cflags-y += -fsection-anchors
@@ -67,7 +67,7 @@ cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables $(cfi)
# small data is default for elf32 tool-chain. If not usable, disable it
# This also allows repurposing GP as scratch reg to gcc reg allocator
disable_small_data := y
-cflags-$(disable_small_data) += -mno-sdata -fcall-used-gp
+cflags-$(disable_small_data) += -mno-sdata
cflags-$(CONFIG_CPU_BIG_ENDIAN) += -mbig-endian
ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index 2162023195c5..4b13f60fe7ca 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -23,7 +23,7 @@
#define ARC_REG_ICCM_BUILD 0x78 /* ICCM size (common) */
#define ARC_REG_XY_MEM_BCR 0x79
#define ARC_REG_MAC_BCR 0x7a
-#define ARC_REG_MUL_BCR 0x7b
+#define ARC_REG_MPY_BCR 0x7b
#define ARC_REG_SWAP_BCR 0x7c
#define ARC_REG_NORM_BCR 0x7d
#define ARC_REG_MIXMAX_BCR 0x7e
@@ -177,7 +177,7 @@ struct bcr_isa_arcv2 {
#endif
};
-struct bcr_uarch_build_arcv2 {
+struct bcr_uarch_build {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad:8, prod:8, maj:8, min:8;
#else
@@ -185,6 +185,59 @@ struct bcr_uarch_build_arcv2 {
#endif
};
+struct bcr_mmu_3 {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int ver:8, ways:4, sets:4, res:3, sasid:1, pg_sz:4,
+ u_itlb:4, u_dtlb:4;
+#else
+ unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, sasid:1, res:3, sets:4,
+ ways:4, ver:8;
+#endif
+};
+
+struct bcr_mmu_4 {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int ver:8, sasid:1, sz1:4, sz0:4, res:2, pae:1,
+ n_ways:2, n_entry:2, n_super:2, u_itlb:3, u_dtlb:3;
+#else
+ /* DTLB ITLB JES JE JA */
+ unsigned int u_dtlb:3, u_itlb:3, n_super:2, n_entry:2, n_ways:2,
+ pae:1, res:2, sz0:4, sz1:4, sasid:1, ver:8;
+#endif
+};
+
+struct bcr_cache {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
+#else
+ unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
+#endif
+};
+
+struct bcr_slc_cfg {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad:24, way:2, lsz:2, sz:4;
+#else
+ unsigned int sz:4, lsz:2, way:2, pad:24;
+#endif
+};
+
+struct bcr_clust_cfg {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad:7, c:1, num_entries:8, num_cores:8, ver:8;
+#else
+ unsigned int ver:8, num_cores:8, num_entries:8, c:1, pad:7;
+#endif
+};
+
+struct bcr_volatile {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int start:4, limit:4, pad:22, order:1, disable:1;
+#else
+ unsigned int disable:1, order:1, pad:22, limit:4, start:4;
+#endif
+};
+
struct bcr_mpy {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad:8, x1616:8, dsp:4, cycles:2, type:2, ver:8;
@@ -302,48 +355,6 @@ struct bcr_generic {
#endif
};
-/*
- *******************************************************************
- * Generic structures to hold build configuration used at runtime
- */
-
-struct cpuinfo_arc_mmu {
- unsigned int ver:4, pg_sz_k:8, s_pg_sz_m:8, pad:10, sasid:1, pae:1;
- unsigned int sets:12, ways:4, u_dtlb:8, u_itlb:8;
-};
-
-struct cpuinfo_arc_cache {
- unsigned int sz_k:14, line_len:8, assoc:4, alias:1, vipt:1, pad:4;
-};
-
-struct cpuinfo_arc_bpu {
- unsigned int ver, full, num_cache, num_pred, ret_stk;
-};
-
-struct cpuinfo_arc_ccm {
- unsigned int base_addr, sz;
-};
-
-struct cpuinfo_arc {
- struct cpuinfo_arc_cache icache, dcache, slc;
- struct cpuinfo_arc_mmu mmu;
- struct cpuinfo_arc_bpu bpu;
- struct bcr_identity core;
- struct bcr_isa_arcv2 isa;
- const char *release, *name;
- unsigned int vec_base;
- struct cpuinfo_arc_ccm iccm, dccm;
- struct {
- unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2,
- fpu_sp:1, fpu_dp:1, dual:1, dual_enb:1, pad2:4,
- ap_num:4, ap_full:1, smart:1, rtt:1, pad3:1,
- timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;
- } extn;
- struct bcr_mpy extn_mpy;
-};
-
-extern struct cpuinfo_arc cpuinfo_arc700[];
-
static inline int is_isa_arcv2(void)
{
return IS_ENABLED(CONFIG_ISA_ARCV2);
diff --git a/arch/arc/include/asm/atomic-llsc.h b/arch/arc/include/asm/atomic-llsc.h
index 1b0ffaeee16d..5258cb81a16b 100644
--- a/arch/arc/include/asm/atomic-llsc.h
+++ b/arch/arc/include/asm/atomic-llsc.h
@@ -18,7 +18,7 @@ static inline void arch_atomic_##op(int i, atomic_t *v) \
: [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
: [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
[i] "ir" (i) \
- : "cc"); \
+ : "cc", "memory"); \
} \
#define ATOMIC_OP_RETURN(op, asm_op) \
@@ -34,7 +34,7 @@ static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
: [val] "=&r" (val) \
: [ctr] "r" (&v->counter), \
[i] "ir" (i) \
- : "cc"); \
+ : "cc", "memory"); \
\
return val; \
}
@@ -56,7 +56,7 @@ static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
[orig] "=&r" (orig) \
: [ctr] "r" (&v->counter), \
[i] "ir" (i) \
- : "cc"); \
+ : "cc", "memory"); \
\
return orig; \
}
diff --git a/arch/arc/include/asm/atomic64-arcv2.h b/arch/arc/include/asm/atomic64-arcv2.h
index 6b6db981967a..9b5791b85471 100644
--- a/arch/arc/include/asm/atomic64-arcv2.h
+++ b/arch/arc/include/asm/atomic64-arcv2.h
@@ -60,7 +60,7 @@ static inline void arch_atomic64_##op(s64 a, atomic64_t *v) \
" bnz 1b \n" \
: "=&r"(val) \
: "r"(&v->counter), "ir"(a) \
- : "cc"); \
+ : "cc", "memory"); \
} \
#define ATOMIC64_OP_RETURN(op, op1, op2) \
@@ -77,7 +77,7 @@ static inline s64 arch_atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
" bnz 1b \n" \
: [val] "=&r"(val) \
: "r"(&v->counter), "ir"(a) \
- : "cc"); /* memory clobber comes from smp_mb() */ \
+ : "cc", "memory"); \
\
return val; \
}
@@ -99,7 +99,7 @@ static inline s64 arch_atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
" bnz 1b \n" \
: "=&r"(orig), "=&r"(val) \
: "r"(&v->counter), "ir"(a) \
- : "cc"); /* memory clobber comes from smp_mb() */ \
+ : "cc", "memory"); \
\
return orig; \
}
diff --git a/arch/arc/include/asm/current.h b/arch/arc/include/asm/current.h
index 9b9bdd3e6538..06be89f6f2f0 100644
--- a/arch/arc/include/asm/current.h
+++ b/arch/arc/include/asm/current.h
@@ -13,7 +13,7 @@
#ifdef CONFIG_ARC_CURR_IN_REG
-register struct task_struct *curr_arc asm("r25");
+register struct task_struct *curr_arc asm("gp");
#define current (curr_arc)
#else
diff --git a/arch/arc/include/asm/dwarf.h b/arch/arc/include/asm/dwarf.h
index 5f4de05bd4ee..a0d5ebe1bc3f 100644
--- a/arch/arc/include/asm/dwarf.h
+++ b/arch/arc/include/asm/dwarf.h
@@ -10,23 +10,31 @@
#ifdef ARC_DW2_UNWIND_AS_CFI
-#define CFI_STARTPROC .cfi_startproc
-#define CFI_ENDPROC .cfi_endproc
-#define CFI_DEF_CFA .cfi_def_cfa
-#define CFI_REGISTER .cfi_register
-#define CFI_REL_OFFSET .cfi_rel_offset
-#define CFI_UNDEFINED .cfi_undefined
+#define CFI_STARTPROC .cfi_startproc
+#define CFI_ENDPROC .cfi_endproc
+#define CFI_DEF_CFA .cfi_def_cfa
+#define CFI_DEF_CFA_OFFSET .cfi_def_cfa_offset
+#define CFI_DEF_CFA_REGISTER .cfi_def_cfa_register
+#define CFI_OFFSET .cfi_offset
+#define CFI_REL_OFFSET .cfi_rel_offset
+#define CFI_REGISTER .cfi_register
+#define CFI_RESTORE .cfi_restore
+#define CFI_UNDEFINED .cfi_undefined
#else
#define CFI_IGNORE #
-#define CFI_STARTPROC CFI_IGNORE
-#define CFI_ENDPROC CFI_IGNORE
-#define CFI_DEF_CFA CFI_IGNORE
-#define CFI_REGISTER CFI_IGNORE
-#define CFI_REL_OFFSET CFI_IGNORE
-#define CFI_UNDEFINED CFI_IGNORE
+#define CFI_STARTPROC CFI_IGNORE
+#define CFI_ENDPROC CFI_IGNORE
+#define CFI_DEF_CFA CFI_IGNORE
+#define CFI_DEF_CFA_OFFSET CFI_IGNORE
+#define CFI_DEF_CFA_REGISTER CFI_IGNORE
+#define CFI_OFFSET CFI_IGNORE
+#define CFI_REL_OFFSET CFI_IGNORE
+#define CFI_REGISTER CFI_IGNORE
+#define CFI_RESTORE CFI_IGNORE
+#define CFI_UNDEFINED CFI_IGNORE
#endif /* !ARC_DW2_UNWIND_AS_CFI */
diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h
index 0ff4c0610561..4d13320e0c1b 100644
--- a/arch/arc/include/asm/entry-arcv2.h
+++ b/arch/arc/include/asm/entry-arcv2.h
@@ -18,7 +18,6 @@
* | orig_r0 |
* | event/ECR |
* | bta |
- * | user_r25 |
* | gp |
* | fp |
* | sp |
@@ -49,14 +48,18 @@
/*------------------------------------------------------------------------*/
.macro INTERRUPT_PROLOGUE
- ; (A) Before jumping to Interrupt Vector, hardware micro-ops did following:
+ ; Before jumping to Interrupt Vector, hardware micro-ops did following:
; 1. SP auto-switched to kernel mode stack
; 2. STATUS32.Z flag set if in U mode at time of interrupt (U:1,K:0)
; 3. Auto save: (mandatory) Push PC and STAT32 on stack
; hardware does even if CONFIG_ARC_IRQ_NO_AUTOSAVE
- ; 4. Auto save: (optional) r0-r11, blink, LPE,LPS,LPC, JLI,LDI,EI
+ ; 4a. Auto save: (optional) r0-r11, blink, LPE,LPS,LPC, JLI,LDI,EI
;
- ; (B) Manually saved some regs: r12,r25,r30, sp,fp,gp, ACCL pair
+ ; Now
+ ; 4b. If Auto-save (optional) not enabled in hw, manually save them
+ ; 5. Manually save: r12,r30, sp,fp,gp, ACCL pair
+ ;
+ ; At the end, SP points to pt_regs
#ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE
; carve pt_regs on stack (case #3), PC/STAT32 already on stack
@@ -72,15 +75,16 @@
.endm
/*------------------------------------------------------------------------*/
-.macro EXCEPTION_PROLOGUE
+.macro EXCEPTION_PROLOGUE_KEEP_AE
- ; (A) Before jumping to Exception Vector, hardware micro-ops did following:
+ ; Before jumping to Exception Vector, hardware micro-ops did following:
; 1. SP auto-switched to kernel mode stack
; 2. STATUS32.Z flag set if in U mode at time of exception (U:1,K:0)
;
- ; (B) Manually save the complete reg file below
+ ; Now manually save rest of reg file
+ ; At the end, SP points to pt_regs
- sub sp, sp, SZ_PT_REGS ; carve pt_regs
+ sub sp, sp, SZ_PT_REGS ; carve space for pt_regs
; _HARD saves r10 clobbered by _SOFT as scratch hence comes first
@@ -100,6 +104,16 @@
; OUTPUT: r10 has ECR expected by EV_Trap
.endm
+.macro EXCEPTION_PROLOGUE
+
+ EXCEPTION_PROLOGUE_KEEP_AE ; return ECR in r10
+
+ lr r0, [efa]
+ mov r1, sp
+
+ FAKE_RET_FROM_EXCPN ; clobbers r9
+.endm
+
/*------------------------------------------------------------------------
* This macro saves the registers manually which would normally be autosaved
* by hardware on taken interrupts. It is used by
@@ -135,10 +149,10 @@
*/
.macro __SAVE_REGFILE_SOFT
- ST2 gp, fp, PT_r26 ; gp (r26), fp (r27)
-
- st r12, [sp, PT_sp + 4]
- st r30, [sp, PT_sp + 8]
+ st fp, [sp, PT_fp] ; r27
+ st r30, [sp, PT_r30]
+ st r12, [sp, PT_r12]
+ st r26, [sp, PT_r26] ; gp
; Saving pt_regs->sp correctly requires some extra work due to the way
; Auto stack switch works
@@ -153,30 +167,30 @@
; ISA requires ADD.nz to have same dest and src reg operands
mov.nz r10, sp
- add.nz r10, r10, SZ_PT_REGS ; K mode SP
+ add2.nz r10, r10, SZ_PT_REGS/4 ; K mode SP
st r10, [sp, PT_sp] ; SP (pt_regs->sp)
-#ifdef CONFIG_ARC_CURR_IN_REG
- st r25, [sp, PT_user_r25]
- GET_CURR_TASK_ON_CPU r25
-#endif
-
#ifdef CONFIG_ARC_HAS_ACCL_REGS
ST2 r58, r59, PT_r58
#endif
/* clobbers r10, r11 registers pair */
DSP_SAVE_REGFILE_IRQ
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+ GET_CURR_TASK_ON_CPU gp
+#endif
+
.endm
/*------------------------------------------------------------------------*/
.macro __RESTORE_REGFILE_SOFT
- LD2 gp, fp, PT_r26 ; gp (r26), fp (r27)
-
- ld r12, [sp, PT_r12]
+ ld fp, [sp, PT_fp]
ld r30, [sp, PT_r30]
+ ld r12, [sp, PT_r12]
+ ld r26, [sp, PT_r26]
; Restore SP (into AUX_USER_SP) only if returning to U mode
; - for K mode, it will be implicitly restored as stack is unwound
@@ -188,10 +202,6 @@
sr r10, [AUX_USER_SP]
1:
-#ifdef CONFIG_ARC_CURR_IN_REG
- ld r25, [sp, PT_user_r25]
-#endif
-
/* clobbers r10, r11 registers pair */
DSP_RESTORE_REGFILE_IRQ
@@ -249,7 +259,7 @@
btst r0, STATUS_U_BIT ; Z flag set if K, used in restoring SP
- ld r10, [sp, PT_event + 4]
+ ld r10, [sp, PT_bta]
sr r10, [erbta]
LD2 r10, r11, PT_ret
@@ -264,8 +274,8 @@
.macro FAKE_RET_FROM_EXCPN
lr r9, [status32]
- bic r9, r9, STATUS_AE_MASK
- or r9, r9, STATUS_IE_MASK
+ bclr r9, r9, STATUS_AE_BIT
+ bset r9, r9, STATUS_IE_BIT
kflag r9
.endm
diff --git a/arch/arc/include/asm/entry-compact.h b/arch/arc/include/asm/entry-compact.h
index 67ff06e15cea..a0e760eb35a8 100644
--- a/arch/arc/include/asm/entry-compact.h
+++ b/arch/arc/include/asm/entry-compact.h
@@ -140,7 +140,7 @@
*
* After this it is safe to call the "C" handlers
*-------------------------------------------------------------*/
-.macro EXCEPTION_PROLOGUE
+.macro EXCEPTION_PROLOGUE_KEEP_AE
/* Need at least 1 reg to code the early exception prologue */
PROLOG_FREEUP_REG r9, @ex_saved_reg1
@@ -151,14 +151,6 @@
/* ARC700 doesn't provide auto-stack switching */
SWITCH_TO_KERNEL_STK
-#ifdef CONFIG_ARC_CURR_IN_REG
- /* Treat r25 as scratch reg (save on stack) and load with "current" */
- PUSH r25
- GET_CURR_TASK_ON_CPU r25
-#else
- sub sp, sp, 4
-#endif
-
st.a r0, [sp, -8] /* orig_r0 needed for syscall (skip ECR slot) */
sub sp, sp, 4 /* skip pt_regs->sp, already saved above */
@@ -178,7 +170,23 @@
PUSHAX erbta
lr r10, [ecr]
- st r10, [sp, PT_event] /* EV_Trap expects r10 to have ECR */
+ st r10, [sp, PT_event]
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+ /* gp already saved on stack: now load with "current" */
+ GET_CURR_TASK_ON_CPU gp
+#endif
+ ; OUTPUT: r10 has ECR expected by EV_Trap
+.endm
+
+.macro EXCEPTION_PROLOGUE
+
+ EXCEPTION_PROLOGUE_KEEP_AE ; return ECR in r10
+
+ lr r0, [efa]
+ mov r1, sp
+
+ FAKE_RET_FROM_EXCPN ; clobbers r9
.endm
/*--------------------------------------------------------------
@@ -208,11 +216,8 @@
POP gp
RESTORE_R12_TO_R0
-#ifdef CONFIG_ARC_CURR_IN_REG
- ld r25, [sp, 12]
-#endif
ld sp, [sp] /* restore original sp */
- /* orig_r0, ECR, user_r25 skipped automatically */
+ /* orig_r0, ECR skipped automatically */
.endm
/* Dummy ECR values for Interrupts */
@@ -229,13 +234,6 @@
SWITCH_TO_KERNEL_STK
-#ifdef CONFIG_ARC_CURR_IN_REG
- /* Treat r25 as scratch reg (save on stack) and load with "current" */
- PUSH r25
- GET_CURR_TASK_ON_CPU r25
-#else
- sub sp, sp, 4
-#endif
PUSH 0x003\LVL\()abcd /* Dummy ECR */
sub sp, sp, 8 /* skip orig_r0 (not needed)
@@ -255,6 +253,10 @@
PUSHAX lp_start
PUSHAX bta_l\LVL\()
+#ifdef CONFIG_ARC_CURR_IN_REG
+ /* gp already saved on stack: now load with "current" */
+ GET_CURR_TASK_ON_CPU gp
+#endif
.endm
/*--------------------------------------------------------------
@@ -282,11 +284,7 @@
POP gp
RESTORE_R12_TO_R0
-#ifdef CONFIG_ARC_CURR_IN_REG
- ld r25, [sp, 12]
-#endif
- ld sp, [sp] /* restore original sp */
- /* orig_r0, ECR, user_r25 skipped automatically */
+ ld sp, [sp] /* restore original sp; orig_r0, ECR skipped implicitly */
.endm
/* Get thread_info of "current" tsk */
diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h
index fcdd59d77f42..49c2e090cb5c 100644
--- a/arch/arc/include/asm/entry.h
+++ b/arch/arc/include/asm/entry.h
@@ -13,6 +13,8 @@
#include <asm/processor.h> /* For VMALLOC_START */
#include <asm/mmu.h>
+#ifdef __ASSEMBLY__
+
#ifdef CONFIG_ISA_ARCOMPACT
#include <asm/entry-compact.h> /* ISA specific bits */
#else
@@ -89,7 +91,7 @@
* Helpers to save/restore callee-saved regs:
* used by several macros below
*-------------------------------------------------------------*/
-.macro SAVE_R13_TO_R24
+.macro SAVE_R13_TO_R25
PUSH r13
PUSH r14
PUSH r15
@@ -102,9 +104,11 @@
PUSH r22
PUSH r23
PUSH r24
+ PUSH r25
.endm
-.macro RESTORE_R24_TO_R13
+.macro RESTORE_R25_TO_R13
+ POP r25
POP r24
POP r23
POP r22
@@ -119,81 +123,31 @@
POP r13
.endm
-/*--------------------------------------------------------------
- * Collect User Mode callee regs as struct callee_regs - needed by
- * fork/do_signal/unaligned-access-emulation.
- * (By default only scratch regs are saved on entry to kernel)
- *
- * Special handling for r25 if used for caching Task Pointer.
- * It would have been saved in task->thread.user_r25 already, but to keep
- * the interface same it is copied into regular r25 placeholder in
- * struct callee_regs.
- *-------------------------------------------------------------*/
+/*
+ * save user mode callee regs as struct callee_regs
+ * - needed by fork/do_signal/unaligned-access-emulation.
+ */
.macro SAVE_CALLEE_SAVED_USER
+ SAVE_R13_TO_R25
+.endm
- mov r12, sp ; save SP as ref to pt_regs
- SAVE_R13_TO_R24
-
-#ifdef CONFIG_ARC_CURR_IN_REG
- ; Retrieve orig r25 and save it with rest of callee_regs
- ld r12, [r12, PT_user_r25]
- PUSH r12
-#else
- PUSH r25
-#endif
-
+/*
+ * restore user mode callee regs as struct callee_regs
+ * - could have been changed by ptrace tracer or unaligned-access fixup
+ */
+.macro RESTORE_CALLEE_SAVED_USER
+ RESTORE_R25_TO_R13
.endm
-/*--------------------------------------------------------------
- * Save kernel Mode callee regs at the time of Contect Switch.
- *
- * Special handling for r25 if used for caching Task Pointer.
- * Kernel simply skips saving it since it will be loaded with
- * incoming task pointer anyways
- *-------------------------------------------------------------*/
+/*
+ * save/restore kernel mode callee regs at the time of context switch
+ */
.macro SAVE_CALLEE_SAVED_KERNEL
-
- SAVE_R13_TO_R24
-
-#ifdef CONFIG_ARC_CURR_IN_REG
- sub sp, sp, 4
-#else
- PUSH r25
-#endif
+ SAVE_R13_TO_R25
.endm
-/*--------------------------------------------------------------
- * Opposite of SAVE_CALLEE_SAVED_KERNEL
- *-------------------------------------------------------------*/
.macro RESTORE_CALLEE_SAVED_KERNEL
-
-#ifdef CONFIG_ARC_CURR_IN_REG
- add sp, sp, 4 /* skip usual r25 placeholder */
-#else
- POP r25
-#endif
- RESTORE_R24_TO_R13
-.endm
-
-/*--------------------------------------------------------------
- * Opposite of SAVE_CALLEE_SAVED_USER
- *
- * ptrace tracer or unaligned-access fixup might have changed a user mode
- * callee reg which is saved back to usual r25 storage location
- *-------------------------------------------------------------*/
-.macro RESTORE_CALLEE_SAVED_USER
-
-#ifdef CONFIG_ARC_CURR_IN_REG
- POP r12
-#else
- POP r25
-#endif
- RESTORE_R24_TO_R13
-
- ; SP is back to start of pt_regs
-#ifdef CONFIG_ARC_CURR_IN_REG
- st r12, [sp, PT_user_r25]
-#endif
+ RESTORE_R25_TO_R13
.endm
/*--------------------------------------------------------------
@@ -229,10 +183,10 @@
#ifdef CONFIG_SMP
-/*-------------------------------------------------
+/*
* Retrieve the current running task on this CPU
- * 1. Determine curr CPU id.
- * 2. Use it to index into _current_task[ ]
+ * - loads it from backing _current_task[] (and can't use the
+ * caching reg for current task
*/
.macro GET_CURR_TASK_ON_CPU reg
GET_CPU_ID \reg
@@ -254,7 +208,7 @@
add2 \tmp, @_current_task, \tmp
st \tsk, [\tmp]
#ifdef CONFIG_ARC_CURR_IN_REG
- mov r25, \tsk
+ mov gp, \tsk
#endif
.endm
@@ -269,21 +223,20 @@
.macro SET_CURR_TASK_ON_CPU tsk, tmp
st \tsk, [@_current_task]
#ifdef CONFIG_ARC_CURR_IN_REG
- mov r25, \tsk
+ mov gp, \tsk
#endif
.endm
#endif /* SMP / UNI */
-/* ------------------------------------------------------------------
+/*
* Get the ptr to some field of Current Task at @off in task struct
- * -Uses r25 for Current task ptr if that is enabled
+ * - Uses current task cached in reg if enabled
*/
-
#ifdef CONFIG_ARC_CURR_IN_REG
.macro GET_CURR_TASK_FIELD_PTR off, reg
- add \reg, r25, \off
+ add \reg, gp, \off
.endm
#else
@@ -295,4 +248,23 @@
#endif /* CONFIG_ARC_CURR_IN_REG */
+#else /* !__ASSEMBLY__ */
+
+extern void do_signal(struct pt_regs *);
+extern void do_notify_resume(struct pt_regs *);
+extern int do_privilege_fault(unsigned long, struct pt_regs *);
+extern int do_extension_fault(unsigned long, struct pt_regs *);
+extern int insterror_is_error(unsigned long, struct pt_regs *);
+extern int do_memory_error(unsigned long, struct pt_regs *);
+extern int trap_is_brkpt(unsigned long, struct pt_regs *);
+extern int do_misaligned_error(unsigned long, struct pt_regs *);
+extern int do_trap5_error(unsigned long, struct pt_regs *);
+extern int do_misaligned_access(unsigned long, struct pt_regs *, struct callee_regs *);
+extern void do_machine_check_fault(unsigned long, struct pt_regs *);
+extern void do_non_swi_trap(unsigned long, struct pt_regs *);
+extern void do_insterror_or_kprobe(unsigned long, struct pt_regs *);
+extern void do_page_fault(unsigned long, struct pt_regs *);
+
+#endif
+
#endif /* __ASM_ARC_ENTRY_H */
diff --git a/arch/arc/include/asm/irq.h b/arch/arc/include/asm/irq.h
index 0309cb405cfb..c574712ad865 100644
--- a/arch/arc/include/asm/irq.h
+++ b/arch/arc/include/asm/irq.h
@@ -25,5 +25,6 @@
#include <asm-generic/irq.h>
extern void arc_init_IRQ(void);
+extern void arch_do_IRQ(unsigned int, struct pt_regs *);
#endif
diff --git a/arch/arc/include/asm/mmu.h b/arch/arc/include/asm/mmu.h
index ca427c30f70e..9febf5bc3de6 100644
--- a/arch/arc/include/asm/mmu.h
+++ b/arch/arc/include/asm/mmu.h
@@ -14,6 +14,8 @@ typedef struct {
unsigned long asid[NR_CPUS]; /* 8 bit MMU PID + Generation cycle */
} mm_context_t;
+extern void do_tlb_overlap_fault(unsigned long, unsigned long, struct pt_regs *);
+
#endif
#include <asm/mmu-arcv2.h>
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
index fb844fce1ab6..d606658e2fe7 100644
--- a/arch/arc/include/asm/processor.h
+++ b/arch/arc/include/asm/processor.h
@@ -22,7 +22,6 @@
* struct thread_info
*/
struct thread_struct {
- unsigned long ksp; /* kernel mode stack pointer */
unsigned long callee_reg; /* pointer to callee regs */
unsigned long fault_address; /* dbls as brkpt holder as well */
#ifdef CONFIG_ARC_DSP_SAVE_RESTORE_REGS
@@ -33,9 +32,7 @@ struct thread_struct {
#endif
};
-#define INIT_THREAD { \
- .ksp = sizeof(init_stack) + (unsigned long) init_stack, \
-}
+#define INIT_THREAD { }
/* Forward declaration, a strange C thing */
struct task_struct;
@@ -56,7 +53,7 @@ struct task_struct;
* Where about of Task's sp, fp, blink when it was last seen in kernel mode.
* Look in process.c for details of kernel stack layout
*/
-#define TSK_K_ESP(tsk) (tsk->thread.ksp)
+#define TSK_K_ESP(tsk) (task_thread_info(tsk)->ksp)
#define TSK_K_REG(tsk, off) (*((unsigned long *)(TSK_K_ESP(tsk) + \
sizeof(struct callee_regs) + off)))
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
index 5869a74c0db2..4a2b30fb5a98 100644
--- a/arch/arc/include/asm/ptrace.h
+++ b/arch/arc/include/asm/ptrace.h
@@ -12,6 +12,17 @@
#ifndef __ASSEMBLY__
+typedef union {
+ struct {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned long state:8, vec:8, cause:8, param:8;
+#else
+ unsigned long param:8, cause:8, vec:8, state:8;
+#endif
+ };
+ unsigned long full;
+} ecr_reg;
+
/* THE pt_regs: Defines how regs are saved during entry into kernel */
#ifdef CONFIG_ISA_ARCOMPACT
@@ -40,23 +51,10 @@ struct pt_regs {
* Last word used by Linux for extra state mgmt (syscall-restart)
* For interrupts, use artificial ECR values to note current prio-level
*/
- union {
- struct {
-#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned long state:8, ecr_vec:8,
- ecr_cause:8, ecr_param:8;
-#else
- unsigned long ecr_param:8, ecr_cause:8,
- ecr_vec:8, state:8;
-#endif
- };
- unsigned long event;
- };
-
- unsigned long user_r25;
+ ecr_reg ecr;
};
-#define MAX_REG_OFFSET offsetof(struct pt_regs, user_r25)
+#define MAX_REG_OFFSET offsetof(struct pt_regs, ecr)
#else
@@ -64,28 +62,14 @@ struct pt_regs {
unsigned long orig_r0;
- union {
- struct {
-#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned long state:8, ecr_vec:8,
- ecr_cause:8, ecr_param:8;
-#else
- unsigned long ecr_param:8, ecr_cause:8,
- ecr_vec:8, state:8;
-#endif
- };
- unsigned long event;
- };
-
- unsigned long bta; /* bta_l1, bta_l2, erbta */
+ ecr_reg ecr; /* Exception Cause Reg */
- unsigned long user_r25;
+ unsigned long bta; /* erbta */
- unsigned long r26; /* gp */
unsigned long fp;
- unsigned long sp; /* user/kernel sp depending on where we came from */
-
- unsigned long r12, r30;
+ unsigned long r30;
+ unsigned long r12;
+ unsigned long r26; /* gp */
#ifdef CONFIG_ARC_HAS_ACCL_REGS
unsigned long r58, r59; /* ACCL/ACCH used by FPU / DSP MPY */
@@ -94,6 +78,8 @@ struct pt_regs {
unsigned long DSP_CTRL;
#endif
+ unsigned long sp; /* user/kernel sp depending on entry */
+
/*------- Below list auto saved by h/w -----------*/
unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
@@ -134,13 +120,13 @@ struct callee_regs {
/* return 1 if PC in delay slot */
#define delay_mode(regs) ((regs->status32 & STATUS_DE_MASK) == STATUS_DE_MASK)
-#define in_syscall(regs) ((regs->ecr_vec == ECR_V_TRAP) && !regs->ecr_param)
-#define in_brkpt_trap(regs) ((regs->ecr_vec == ECR_V_TRAP) && regs->ecr_param)
+#define in_syscall(regs) ((regs->ecr.vec == ECR_V_TRAP) && !regs->ecr.param)
+#define in_brkpt_trap(regs) ((regs->ecr.vec == ECR_V_TRAP) && regs->ecr.param)
#define STATE_SCALL_RESTARTED 0x01
-#define syscall_wont_restart(reg) (reg->state |= STATE_SCALL_RESTARTED)
-#define syscall_restartable(reg) !(reg->state & STATE_SCALL_RESTARTED)
+#define syscall_wont_restart(regs) (regs->ecr.state |= STATE_SCALL_RESTARTED)
+#define syscall_restartable(regs) !(regs->ecr.state & STATE_SCALL_RESTARTED)
#define current_pt_regs() \
({ \
@@ -181,6 +167,9 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
return *(unsigned long *)((unsigned long)regs + offset);
}
+extern int syscall_trace_entry(struct pt_regs *);
+extern void syscall_trace_exit(struct pt_regs *);
+
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_PTRACE_H */
diff --git a/arch/arc/include/asm/setup.h b/arch/arc/include/asm/setup.h
index 028a8cf76206..1c6db599e1fc 100644
--- a/arch/arc/include/asm/setup.h
+++ b/arch/arc/include/asm/setup.h
@@ -35,11 +35,11 @@ long __init arc_get_mem_sz(void);
#define IS_AVAIL3(v, v2, s) IS_AVAIL1(v, s), IS_AVAIL1(v, IS_DISABLED_RUN(v2))
extern void arc_mmu_init(void);
-extern char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len);
-extern void read_decode_mmu_bcr(void);
+extern int arc_mmu_mumbojumbo(int cpu_id, char *buf, int len);
extern void arc_cache_init(void);
-extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
-extern void read_decode_cache_bcr(void);
+extern int arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
+
+extern void __init handle_uboot_args(void);
#endif /* __ASMARC_SETUP_H */
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h
index d856491606ac..e0913f52c2cd 100644
--- a/arch/arc/include/asm/smp.h
+++ b/arch/arc/include/asm/smp.h
@@ -29,6 +29,8 @@ extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
extern void __init smp_init_cpus(void);
extern void first_lines_of_secondary(void);
extern const char *arc_platform_smp_cpuinfo(void);
+extern void arc_platform_smp_wait_to_boot(int);
+extern void start_kernel_secondary(void);
/*
* API expected BY platform smp code (FROM arch smp code)
diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h
index 6ba7fe417095..4c530cf131f3 100644
--- a/arch/arc/include/asm/thread_info.h
+++ b/arch/arc/include/asm/thread_info.h
@@ -37,16 +37,16 @@
*/
struct thread_info {
unsigned long flags; /* low level flags */
+ unsigned long ksp; /* kernel mode stack top in __switch_to */
int preempt_count; /* 0 => preemptable, <0 => BUG */
- struct task_struct *task; /* main task structure */
- __u32 cpu; /* current CPU */
+ int cpu; /* current CPU */
unsigned long thr_ptr; /* TLS ptr */
+ struct task_struct *task; /* main task structure */
};
/*
- * macros/functions for gaining access to the thread information structure
- *
- * preempt_count needs to be 1 initially, until the scheduler is functional.
+ * initilaize thread_info for any @tsk
+ * - this is not related to init_task per se
*/
#define INIT_THREAD_INFO(tsk) \
{ \
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
index 99712471c96a..1e8809ea000a 100644
--- a/arch/arc/include/asm/uaccess.h
+++ b/arch/arc/include/asm/uaccess.h
@@ -146,8 +146,9 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long n)
if (n == 0)
return 0;
- /* unaligned */
- if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) {
+ /* fallback for unaligned access when hardware doesn't support */
+ if (!IS_ENABLED(CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS) &&
+ (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3))) {
unsigned char tmp;
@@ -373,8 +374,9 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
if (n == 0)
return 0;
- /* unaligned */
- if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) {
+ /* fallback for unaligned access when hardware doesn't support */
+ if (!IS_ENABLED(CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS) &&
+ (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3))) {
unsigned char tmp;
@@ -584,7 +586,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
return res;
}
-static inline unsigned long __arc_clear_user(void __user *to, unsigned long n)
+static inline unsigned long __clear_user(void __user *to, unsigned long n)
{
long res = n;
unsigned char *d_char = to;
@@ -626,17 +628,10 @@ static inline unsigned long __arc_clear_user(void __user *to, unsigned long n)
return res;
}
-#ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
-
#define INLINE_COPY_TO_USER
#define INLINE_COPY_FROM_USER
-#define __clear_user(d, n) __arc_clear_user(d, n)
-#else
-extern unsigned long arc_clear_user_noinline(void __user *to,
- unsigned long n);
-#define __clear_user(d, n) arc_clear_user_noinline(d, n)
-#endif
+#define __clear_user __clear_user
#include <asm-generic/uaccess.h>
diff --git a/arch/arc/kernel/Makefile b/arch/arc/kernel/Makefile
index 0723d888ac44..95fbf9364c67 100644
--- a/arch/arc/kernel/Makefile
+++ b/arch/arc/kernel/Makefile
@@ -5,6 +5,8 @@
obj-y := head.o arcksyms.o setup.o irq.o reset.o ptrace.o process.o devtree.o
obj-y += signal.o traps.o sys.o troubleshoot.o stacktrace.o disasm.o
+obj-y += ctx_sw_asm.o
+
obj-$(CONFIG_ISA_ARCOMPACT) += entry-compact.o intc-compact.o
obj-$(CONFIG_ISA_ARCV2) += entry-arcv2.o intc-arcv2.o
@@ -24,11 +26,4 @@ ifdef CONFIG_ISA_ARCOMPACT
CFLAGS_fpu.o += -mdpfp
endif
-ifdef CONFIG_ARC_DW2_UNWIND
-CFLAGS_ctx_sw.o += -fno-omit-frame-pointer
-obj-y += ctx_sw.o
-else
-obj-y += ctx_sw_asm.o
-endif
-
extra-y := vmlinux.lds
diff --git a/arch/arc/kernel/asm-offsets.c b/arch/arc/kernel/asm-offsets.c
index 0e884036ab74..f77deb799175 100644
--- a/arch/arc/kernel/asm-offsets.c
+++ b/arch/arc/kernel/asm-offsets.c
@@ -20,13 +20,13 @@ int main(void)
BLANK();
- DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
DEFINE(THREAD_CALLEE_REG, offsetof(struct thread_struct, callee_reg));
DEFINE(THREAD_FAULT_ADDR,
offsetof(struct thread_struct, fault_address));
BLANK();
+ DEFINE(THREAD_INFO_KSP, offsetof(struct thread_info, ksp));
DEFINE(THREAD_INFO_FLAGS, offsetof(struct thread_info, flags));
DEFINE(THREAD_INFO_PREEMPT_COUNT,
offsetof(struct thread_info, preempt_count));
@@ -46,7 +46,8 @@ int main(void)
BLANK();
DEFINE(PT_status32, offsetof(struct pt_regs, status32));
- DEFINE(PT_event, offsetof(struct pt_regs, event));
+ DEFINE(PT_event, offsetof(struct pt_regs, ecr));
+ DEFINE(PT_bta, offsetof(struct pt_regs, bta));
DEFINE(PT_sp, offsetof(struct pt_regs, sp));
DEFINE(PT_r0, offsetof(struct pt_regs, r0));
DEFINE(PT_r1, offsetof(struct pt_regs, r1));
@@ -61,13 +62,9 @@ int main(void)
DEFINE(PT_r26, offsetof(struct pt_regs, r26));
DEFINE(PT_ret, offsetof(struct pt_regs, ret));
DEFINE(PT_blink, offsetof(struct pt_regs, blink));
+ OFFSET(PT_fp, pt_regs, fp);
DEFINE(PT_lpe, offsetof(struct pt_regs, lp_end));
DEFINE(PT_lpc, offsetof(struct pt_regs, lp_count));
- DEFINE(PT_user_r25, offsetof(struct pt_regs, user_r25));
-
- DEFINE(SZ_CALLEE_REGS, sizeof(struct callee_regs));
- DEFINE(SZ_PT_REGS, sizeof(struct pt_regs));
-
#ifdef CONFIG_ISA_ARCV2
OFFSET(PT_r12, pt_regs, r12);
OFFSET(PT_r30, pt_regs, r30);
@@ -80,5 +77,8 @@ int main(void)
OFFSET(PT_DSP_CTRL, pt_regs, DSP_CTRL);
#endif
+ DEFINE(SZ_CALLEE_REGS, sizeof(struct callee_regs));
+ DEFINE(SZ_PT_REGS, sizeof(struct pt_regs));
+
return 0;
}
diff --git a/arch/arc/kernel/ctx_sw.c b/arch/arc/kernel/ctx_sw.c
deleted file mode 100644
index 1a76f2d6f694..000000000000
--- a/arch/arc/kernel/ctx_sw.c
+++ /dev/null
@@ -1,112 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * Vineetg: Aug 2009
- * -"C" version of lowest level context switch asm macro called by schedular
- * gcc doesn't generate the dward CFI info for hand written asm, hence can't
- * backtrace out of it (e.g. tasks sleeping in kernel).
- * So we cheat a bit by writing almost similar code in inline-asm.
- * -This is a hacky way of doing things, but there is no other simple way.
- * I don't want/intend to extend unwinding code to understand raw asm
- */
-
-#include <asm/asm-offsets.h>
-#include <linux/sched.h>
-#include <linux/sched/debug.h>
-
-#define KSP_WORD_OFF ((TASK_THREAD + THREAD_KSP) / 4)
-
-struct task_struct *__sched
-__switch_to(struct task_struct *prev_task, struct task_struct *next_task)
-{
- unsigned int tmp;
- unsigned int prev = (unsigned int)prev_task;
- unsigned int next = (unsigned int)next_task;
-
- __asm__ __volatile__(
- /* FP/BLINK save generated by gcc (standard function prologue */
- "st.a r13, [sp, -4] \n\t"
- "st.a r14, [sp, -4] \n\t"
- "st.a r15, [sp, -4] \n\t"
- "st.a r16, [sp, -4] \n\t"
- "st.a r17, [sp, -4] \n\t"
- "st.a r18, [sp, -4] \n\t"
- "st.a r19, [sp, -4] \n\t"
- "st.a r20, [sp, -4] \n\t"
- "st.a r21, [sp, -4] \n\t"
- "st.a r22, [sp, -4] \n\t"
- "st.a r23, [sp, -4] \n\t"
- "st.a r24, [sp, -4] \n\t"
-#ifndef CONFIG_ARC_CURR_IN_REG
- "st.a r25, [sp, -4] \n\t"
-#else
- "sub sp, sp, 4 \n\t" /* usual r25 placeholder */
-#endif
-
- /* set ksp of outgoing task in tsk->thread.ksp */
-#if KSP_WORD_OFF <= 255
- "st.as sp, [%3, %1] \n\t"
-#else
- /*
- * Workaround for NR_CPUS=4k
- * %1 is bigger than 255 (S9 offset for st.as)
- */
- "add2 r24, %3, %1 \n\t"
- "st sp, [r24] \n\t"
-#endif
-
- /*
- * setup _current_task with incoming tsk.
- * optionally, set r25 to that as well
- * For SMP extra work to get to &_current_task[cpu]
- * (open coded SET_CURR_TASK_ON_CPU)
- */
-#ifndef CONFIG_SMP
- "st %2, [@_current_task] \n\t"
-#else
- "lr r24, [identity] \n\t"
- "lsr r24, r24, 8 \n\t"
- "bmsk r24, r24, 7 \n\t"
- "add2 r24, @_current_task, r24 \n\t"
- "st %2, [r24] \n\t"
-#endif
-#ifdef CONFIG_ARC_CURR_IN_REG
- "mov r25, %2 \n\t"
-#endif
-
- /* get ksp of incoming task from tsk->thread.ksp */
- "ld.as sp, [%2, %1] \n\t"
-
- /* start loading it's CALLEE reg file */
-
-#ifndef CONFIG_ARC_CURR_IN_REG
- "ld.ab r25, [sp, 4] \n\t"
-#else
- "add sp, sp, 4 \n\t"
-#endif
- "ld.ab r24, [sp, 4] \n\t"
- "ld.ab r23, [sp, 4] \n\t"
- "ld.ab r22, [sp, 4] \n\t"
- "ld.ab r21, [sp, 4] \n\t"
- "ld.ab r20, [sp, 4] \n\t"
- "ld.ab r19, [sp, 4] \n\t"
- "ld.ab r18, [sp, 4] \n\t"
- "ld.ab r17, [sp, 4] \n\t"
- "ld.ab r16, [sp, 4] \n\t"
- "ld.ab r15, [sp, 4] \n\t"
- "ld.ab r14, [sp, 4] \n\t"
- "ld.ab r13, [sp, 4] \n\t"
-
- /* last (ret value) = prev : although for ARC it mov r0, r0 */
- "mov %0, %3 \n\t"
-
- /* FP/BLINK restore generated by gcc (standard func epilogue */
-
- : "=r"(tmp)
- : "n"(KSP_WORD_OFF), "r"(next), "r"(prev)
- : "blink"
- );
-
- return (struct task_struct *)tmp;
-}
diff --git a/arch/arc/kernel/ctx_sw_asm.S b/arch/arc/kernel/ctx_sw_asm.S
index 02c461484761..48e1f21976ed 100644
--- a/arch/arc/kernel/ctx_sw_asm.S
+++ b/arch/arc/kernel/ctx_sw_asm.S
@@ -11,50 +11,54 @@
#include <asm/entry.h> /* For the SAVE_* macros */
#include <asm/asm-offsets.h>
-#define KSP_WORD_OFF ((TASK_THREAD + THREAD_KSP) / 4)
-
-;################### Low Level Context Switch ##########################
+; IN
+; - r0: prev task (also current)
+; - r1: next task
+; OUT
+; - r0: prev task (so r0 not touched)
.section .sched.text,"ax",@progbits
- .align 4
- .global __switch_to
- .type __switch_to, @function
-__switch_to:
- CFI_STARTPROC
-
- /* Save regs on kernel mode stack of task */
- st.a blink, [sp, -4]
- st.a fp, [sp, -4]
- SAVE_CALLEE_SAVED_KERNEL
+ENTRY_CFI(__switch_to)
- /* Save the now KSP in task->thread.ksp */
-#if KSP_WORD_OFF <= 255
- st.as sp, [r0, KSP_WORD_OFF]
-#else
- /* Workaround for NR_CPUS=4k as ST.as can only take s9 offset */
- add2 r24, r0, KSP_WORD_OFF
- st sp, [r24]
-#endif
- /*
- * Return last task in r0 (return reg)
- * On ARC, Return reg = First Arg reg = r0.
- * Since we already have last task in r0,
- * don't need to do anything special to return it
- */
+ /* save kernel stack frame regs of @prev task */
+ push blink
+ CFI_DEF_CFA_OFFSET 4
+ CFI_OFFSET r31, -4
+
+ push fp
+ CFI_DEF_CFA_OFFSET 8
+ CFI_OFFSET r27, -8
+
+ mov fp, sp
+ CFI_DEF_CFA_REGISTER r27
+
+ /* kernel mode callee regs of @prev */
+ SAVE_CALLEE_SAVED_KERNEL
/*
- * switch to new task, contained in r1
- * Temp reg r3 is required to get the ptr to store val
+ * save final SP to @prev->thread_info.ksp
+ * @prev is "current" so thread_info derived from SP
*/
- SET_CURR_TASK_ON_CPU r1, r3
+ GET_CURR_THR_INFO_FROM_SP r10
+ st sp, [r10, THREAD_INFO_KSP]
+
+ /* update @next in _current_task[] and GP register caching it */
+ SET_CURR_TASK_ON_CPU r1, r10
- /* reload SP with kernel mode stack pointer in task->thread.ksp */
- ld.as sp, [r1, (TASK_THREAD + THREAD_KSP)/4]
+ /* load SP from @next->thread_info.ksp */
+ ld r10, [r1, TASK_THREAD_INFO]
+ ld sp, [r10, THREAD_INFO_KSP]
- /* restore the registers */
+ /* restore callee regs, stack frame regs of @next */
RESTORE_CALLEE_SAVED_KERNEL
- ld.ab fp, [sp, 4]
- ld.ab blink, [sp, 4]
- j [blink]
+ pop fp
+ CFI_RESTORE r27
+ CFI_DEF_CFA r28, 4
+
+ pop blink
+ CFI_RESTORE r31
+ CFI_DEF_CFA_OFFSET 0
+
+ j [blink]
END_CFI(__switch_to)
diff --git a/arch/arc/kernel/devtree.c b/arch/arc/kernel/devtree.c
index 721d465f1580..4c9e61457b2f 100644
--- a/arch/arc/kernel/devtree.c
+++ b/arch/arc/kernel/devtree.c
@@ -12,6 +12,7 @@
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <asm/mach_desc.h>
+#include <asm/serial.h>
#ifdef CONFIG_SERIAL_EARLYCON
diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S
index a7e6a2174187..2e49c81c8086 100644
--- a/arch/arc/kernel/entry-arcv2.S
+++ b/arch/arc/kernel/entry-arcv2.S
@@ -125,11 +125,6 @@ ENTRY(mem_service)
EXCEPTION_PROLOGUE
- lr r0, [efa]
- mov r1, sp
-
- FAKE_RET_FROM_EXCPN
-
bl do_memory_error
b ret_from_exception
END(mem_service)
@@ -138,11 +133,6 @@ ENTRY(EV_Misaligned)
EXCEPTION_PROLOGUE
- lr r0, [efa] ; Faulting Data address
- mov r1, sp
-
- FAKE_RET_FROM_EXCPN
-
SAVE_CALLEE_SAVED_USER
mov r2, sp ; callee_regs
@@ -163,11 +153,6 @@ ENTRY(EV_TLBProtV)
EXCEPTION_PROLOGUE
- lr r0, [efa] ; Faulting Data address
- mov r1, sp ; pt_regs
-
- FAKE_RET_FROM_EXCPN
-
mov blink, ret_from_exception
b do_page_fault
diff --git a/arch/arc/kernel/entry-compact.S b/arch/arc/kernel/entry-compact.S
index 5cb0cd7e4eab..774c03cc1d1a 100644
--- a/arch/arc/kernel/entry-compact.S
+++ b/arch/arc/kernel/entry-compact.S
@@ -254,18 +254,7 @@ END(handle_interrupt_level1)
ENTRY(EV_TLBProtV)
- EXCEPTION_PROLOGUE
-
- mov r2, r10 ; ECR set into r10 already
- lr r0, [efa] ; Faulting Data address (not part of pt_regs saved above)
-
- ; Exception auto-disables further Intr/exceptions.
- ; Re-enable them by pretending to return from exception
- ; (so rest of handler executes in pure K mode)
-
- FAKE_RET_FROM_EXCPN
-
- mov r1, sp ; Handle to pt_regs
+ EXCEPTION_PROLOGUE ; ECR returned in r10
;------ (5) Type of Protection Violation? ----------
;
@@ -273,8 +262,7 @@ ENTRY(EV_TLBProtV)
; -Access Violation : 00_23_(00|01|02|03)_00
; x r w r+w
; -Unaligned Access : 00_23_04_00
- ;
- bbit1 r2, ECR_C_BIT_PROTV_MISALIG_DATA, 4f
+ bbit1 r10, ECR_C_BIT_PROTV_MISALIG_DATA, 4f
;========= (6a) Access Violation Processing ========
bl do_page_fault
@@ -303,9 +291,6 @@ END(EV_TLBProtV)
ENTRY(call_do_page_fault)
EXCEPTION_PROLOGUE
- lr r0, [efa] ; Faulting Data address
- mov r1, sp
- FAKE_RET_FROM_EXCPN
mov blink, ret_from_exception
b do_page_fault
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
index 54e91df678dd..089f6680518f 100644
--- a/arch/arc/kernel/entry.S
+++ b/arch/arc/kernel/entry.S
@@ -80,11 +80,6 @@ ENTRY(instr_service)
EXCEPTION_PROLOGUE
- lr r0, [efa]
- mov r1, sp
-
- FAKE_RET_FROM_EXCPN
-
bl do_insterror_or_kprobe
b ret_from_exception
END(instr_service)
@@ -95,16 +90,15 @@ END(instr_service)
ENTRY(EV_MachineCheck)
- EXCEPTION_PROLOGUE
+ EXCEPTION_PROLOGUE_KEEP_AE ; ECR returned in r10
- lr r2, [ecr]
lr r0, [efa]
mov r1, sp
; MC excpetions disable MMU
ARC_MMU_REENABLE r3
- lsr r3, r2, 8
+ lsr r3, r10, 8
bmsk r3, r3, 7
brne r3, ECR_C_MCHK_DUP_TLB, 1f
@@ -129,11 +123,6 @@ ENTRY(EV_PrivilegeV)
EXCEPTION_PROLOGUE
- lr r0, [efa]
- mov r1, sp
-
- FAKE_RET_FROM_EXCPN
-
bl do_privilege_fault
b ret_from_exception
END(EV_PrivilegeV)
@@ -145,11 +134,6 @@ ENTRY(EV_Extension)
EXCEPTION_PROLOGUE
- lr r0, [efa]
- mov r1, sp
-
- FAKE_RET_FROM_EXCPN
-
bl do_extension_fault
b ret_from_exception
END(EV_Extension)
@@ -160,20 +144,19 @@ END(EV_Extension)
; syscall Tracing
; ---------------------------------------------
tracesys:
- ; save EFA in case tracer wants the PC of traced task
- ; using ERET won't work since next-PC has already committed
+ ; safekeep EFA (r12) if syscall tracer wanted PC
+ ; for traps, ERET is pre-commit so points to next-PC
GET_CURR_TASK_FIELD_PTR TASK_THREAD, r11
st r12, [r11, THREAD_FAULT_ADDR] ; thread.fault_address
- ; PRE Sys Call Ptrace hook
- mov r0, sp ; pt_regs needed
- bl @syscall_trace_entry
+ ; PRE syscall trace hook
+ mov r0, sp ; pt_regs
+ bl @syscall_trace_enter
; Tracing code now returns the syscall num (orig or modif)
mov r8, r0
; Do the Sys Call as we normally would.
- ; Validate the Sys Call number
cmp r8, NR_syscalls - 1
mov.hi r0, -ENOSYS
bhi tracesys_exit
@@ -190,37 +173,36 @@ tracesys:
ld r6, [sp, PT_r6]
ld r7, [sp, PT_r7]
ld.as r9, [sys_call_table, r8]
- jl [r9] ; Entry into Sys Call Handler
+ jl [r9]
tracesys_exit:
- st r0, [sp, PT_r0] ; sys call return value in pt_regs
+ st r0, [sp, PT_r0]
- ;POST Sys Call Ptrace Hook
+ ; POST syscall trace hook
mov r0, sp ; pt_regs needed
bl @syscall_trace_exit
- b ret_from_exception ; NOT ret_from_system_call at is saves r0 which
- ; we'd done before calling post hook above
+
+ ; don't call ret_from_system_call as it saves r0, already done above
+ b ret_from_exception
; ---------------------------------------------
; Breakpoint TRAP
; ---------------------------------------------
trap_with_param:
mov r0, r12 ; EFA in case ptracer/gdb wants stop_pc
- mov r1, sp
+ mov r1, sp ; pt_regs
- ; Save callee regs in case gdb wants to have a look
- ; SP will grow up by size of CALLEE Reg-File
- ; NOTE: clobbers r12
+ ; save callee regs in case tracer/gdb wants to peek
SAVE_CALLEE_SAVED_USER
- ; save location of saved Callee Regs @ thread_struct->pc
+ ; safekeep ref to callee regs
GET_CURR_TASK_FIELD_PTR TASK_THREAD, r10
st sp, [r10, THREAD_CALLEE_REG]
- ; Call the trap handler
+ ; call the non syscall trap handler
bl do_non_swi_trap
- ; unwind stack to discard Callee saved Regs
+ ; unwind stack to discard callee regs
DISCARD_CALLEE_SAVED_USER
b ret_from_exception
@@ -232,37 +214,33 @@ trap_with_param:
ENTRY(EV_Trap)
- EXCEPTION_PROLOGUE
+ EXCEPTION_PROLOGUE_KEEP_AE
lr r12, [efa]
FAKE_RET_FROM_EXCPN
- ;============ TRAP 1 :breakpoints
- ; Check ECR for trap with arg (PROLOGUE ensures r10 has ECR)
+ ;============ TRAP N : breakpoints, kprobes etc
bmsk.f 0, r10, 7
bnz trap_with_param
- ;============ TRAP (no param): syscall top level
+ ;============ TRAP 0 (no param): syscall
- ; If syscall tracing ongoing, invoke pre-post-hooks
+ ; syscall tracing ongoing, invoke pre-post-hooks around syscall
GET_CURR_THR_INFO_FLAGS r10
and.f 0, r10, _TIF_SYSCALL_WORK
bnz tracesys ; this never comes back
;============ Normal syscall case
- ; syscall num shd not exceed the total system calls avail
cmp r8, NR_syscalls - 1
mov.hi r0, -ENOSYS
bhi .Lret_from_system_call
- ; Offset into the syscall_table and call handler
ld.as r9,[sys_call_table, r8]
- jl [r9] ; Entry into Sys Call Handler
+ jl [r9]
.Lret_from_system_call:
-
st r0, [sp, PT_r0] ; sys call return value in pt_regs
; fall through to ret_from_exception
@@ -318,7 +296,7 @@ resume_user_mode_begin:
; tracer might call PEEKUSR(CALLEE reg)
;
; NOTE: SP will grow up by size of CALLEE Reg-File
- SAVE_CALLEE_SAVED_USER ; clobbers r12
+ SAVE_CALLEE_SAVED_USER
; save location of saved Callee Regs @ thread_struct->callee
GET_CURR_TASK_FIELD_PTR TASK_THREAD, r10
diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c
index 5cda19d0aa91..678898757e47 100644
--- a/arch/arc/kernel/intc-arcv2.c
+++ b/arch/arc/kernel/intc-arcv2.c
@@ -108,7 +108,7 @@ static void arcv2_irq_unmask(struct irq_data *data)
write_aux_reg(AUX_IRQ_ENABLE, 1);
}
-void arcv2_irq_enable(struct irq_data *data)
+static void arcv2_irq_enable(struct irq_data *data)
{
/* set default priority */
write_aux_reg(AUX_IRQ_SELECT, data->hwirq);
diff --git a/arch/arc/kernel/kgdb.c b/arch/arc/kernel/kgdb.c
index 345a0000554c..4f2b5951454f 100644
--- a/arch/arc/kernel/kgdb.c
+++ b/arch/arc/kernel/kgdb.c
@@ -175,7 +175,7 @@ void kgdb_trap(struct pt_regs *regs)
* with trap_s 4 (compiled) breakpoints, continuation needs to
* start after the breakpoint.
*/
- if (regs->ecr_param == 3)
+ if (regs->ecr.param == 3)
instruction_pointer(regs) -= BREAK_INSTR_SIZE;
kgdb_handle_exception(1, SIGTRAP, 0, regs);
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index f9fdb557c263..55373ca0d28b 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -165,8 +165,6 @@ static void mcip_probe_n_setup(void)
IS_AVAIL1(mp.idu, "IDU "),
IS_AVAIL1(mp.dbg, "DEBUG "),
IS_AVAIL1(mp.gfrc, "GFRC"));
-
- cpuinfo_arc700[0].extn.gfrc = mp.gfrc;
}
struct plat_smp_ops plat_smp_ops = {
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index 980b71da2f61..186ceab661eb 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -141,7 +141,7 @@ asmlinkage void ret_from_fork(void);
* | unused |
* | |
* ------------------
- * | r25 | <==== top of Stack (thread.ksp)
+ * | r25 | <==== top of Stack (thread_info.ksp)
* ~ ~
* | --to-- | (CALLEE Regs of kernel mode)
* | r13 |
@@ -162,7 +162,6 @@ asmlinkage void ret_from_fork(void);
* | SP |
* | orig_r0 |
* | event/ECR |
- * | user_r25 |
* ------------------ <===== END of PAGE
*/
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
@@ -182,14 +181,14 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
c_callee = ((struct callee_regs *)childksp) - 1;
/*
- * __switch_to() uses thread.ksp to start unwinding stack
+ * __switch_to() uses thread_info.ksp to start unwinding stack
* For kernel threads we don't need to create callee regs, the
* stack layout nevertheless needs to remain the same.
* Also, since __switch_to anyways unwinds callee regs, we use
* this to populate kernel thread entry-pt/args into callee regs,
* so that ret_from_kernel_thread() becomes simpler.
*/
- p->thread.ksp = (unsigned long)c_callee; /* THREAD_KSP */
+ task_thread_info(p)->ksp = (unsigned long)c_callee; /* THREAD_INFO_KSP */
/* __switch_to expects FP(0), BLINK(return addr) at top */
childksp[0] = 0; /* fp */
@@ -243,16 +242,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
*/
c_callee->r25 = task_thread_info(p)->thr_ptr;
-#ifdef CONFIG_ARC_CURR_IN_REG
- /*
- * setup usermode thread pointer #2:
- * however for this special use of r25 in kernel, __switch_to() sets
- * r25 for kernel needs and only in the final return path is usermode
- * r25 setup, from pt_regs->user_r25. So set that up as well
- */
- c_regs->user_r25 = c_callee->r25;
-#endif
-
return 0;
}
diff --git a/arch/arc/kernel/ptrace.c b/arch/arc/kernel/ptrace.c
index 2abdcd9b09e8..e0c233c178b1 100644
--- a/arch/arc/kernel/ptrace.c
+++ b/arch/arc/kernel/ptrace.c
@@ -46,8 +46,7 @@ static const struct pt_regs_offset regoffset_table[] = {
REG_OFFSET_NAME(r0),
REG_OFFSET_NAME(sp),
REG_OFFSET_NAME(orig_r0),
- REG_OFFSET_NAME(event),
- REG_OFFSET_NAME(user_r25),
+ REG_OFFSET_NAME(ecr),
REG_OFFSET_END,
};
@@ -55,9 +54,8 @@ static const struct pt_regs_offset regoffset_table[] = {
static const struct pt_regs_offset regoffset_table[] = {
REG_OFFSET_NAME(orig_r0),
- REG_OFFSET_NAME(event),
+ REG_OFFSET_NAME(ecr),
REG_OFFSET_NAME(bta),
- REG_OFFSET_NAME(user_r25),
REG_OFFSET_NAME(r26),
REG_OFFSET_NAME(fp),
REG_OFFSET_NAME(sp),
@@ -341,7 +339,7 @@ long arch_ptrace(struct task_struct *child, long request,
return ret;
}
-asmlinkage int syscall_trace_entry(struct pt_regs *regs)
+asmlinkage int syscall_trace_enter(struct pt_regs *regs)
{
if (test_thread_flag(TIF_SYSCALL_TRACE))
if (ptrace_report_syscall_entry(regs))
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index 41f07b3e594e..4dcf8589b708 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -29,6 +29,7 @@
#include <asm/mach_desc.h>
#include <asm/smp.h>
#include <asm/dsp-impl.h>
+#include <soc/arc/mcip.h>
#define FIX_PTR(x) __asm__ __volatile__(";" : "+r"(x))
@@ -43,19 +44,22 @@ const struct machine_desc *machine_desc;
struct task_struct *_current_task[NR_CPUS]; /* For stack switching */
-struct cpuinfo_arc cpuinfo_arc700[NR_CPUS];
+struct cpuinfo_arc {
+ int arcver;
+ unsigned int t0:1, t1:1;
+ struct {
+ unsigned long base;
+ unsigned int sz;
+ } iccm, dccm;
+};
+
+#ifdef CONFIG_ISA_ARCV2
-static const struct id_to_str arc_legacy_rel[] = {
+static const struct id_to_str arc_hs_rel[] = {
/* ID.ARCVER, Release */
-#ifdef CONFIG_ISA_ARCOMPACT
- { 0x34, "R4.10"},
- { 0x35, "R4.11"},
-#else
{ 0x51, "R2.0" },
{ 0x52, "R2.1" },
{ 0x53, "R3.0" },
-#endif
- { 0x00, NULL }
};
static const struct id_to_str arc_hs_ver54_rel[] = {
@@ -66,323 +70,296 @@ static const struct id_to_str arc_hs_ver54_rel[] = {
{ 3, "R4.00a"},
{ 0xFF, NULL }
};
+#endif
-static void read_decode_ccm_bcr(struct cpuinfo_arc *cpu)
+static int
+arcompact_mumbojumbo(int c, struct cpuinfo_arc *info, char *buf, int len)
{
- if (is_isa_arcompact()) {
- struct bcr_iccm_arcompact iccm;
- struct bcr_dccm_arcompact dccm;
+ int n = 0;
+#ifdef CONFIG_ISA_ARCOMPACT
+ char *cpu_nm, *isa_nm = "ARCompact";
+ struct bcr_fp_arcompact fpu_sp, fpu_dp;
+ int atomic = 0, be, present;
+ int bpu_full, bpu_cache, bpu_pred;
+ struct bcr_bpu_arcompact bpu;
+ struct bcr_iccm_arcompact iccm;
+ struct bcr_dccm_arcompact dccm;
+ struct bcr_generic isa;
- READ_BCR(ARC_REG_ICCM_BUILD, iccm);
- if (iccm.ver) {
- cpu->iccm.sz = 4096 << iccm.sz; /* 8K to 512K */
- cpu->iccm.base_addr = iccm.base << 16;
- }
+ READ_BCR(ARC_REG_ISA_CFG_BCR, isa);
- READ_BCR(ARC_REG_DCCM_BUILD, dccm);
- if (dccm.ver) {
- unsigned long base;
- cpu->dccm.sz = 2048 << dccm.sz; /* 2K to 256K */
+ if (!isa.ver) /* ISA BCR absent, use Kconfig info */
+ atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC);
+ else {
+ /* ARC700_BUILD only has 2 bits of isa info */
+ atomic = isa.info & 1;
+ }
- base = read_aux_reg(ARC_REG_DCCM_BASE_BUILD);
- cpu->dccm.base_addr = base & ~0xF;
- }
- } else {
- struct bcr_iccm_arcv2 iccm;
- struct bcr_dccm_arcv2 dccm;
- unsigned long region;
-
- READ_BCR(ARC_REG_ICCM_BUILD, iccm);
- if (iccm.ver) {
- cpu->iccm.sz = 256 << iccm.sz00; /* 512B to 16M */
- if (iccm.sz00 == 0xF && iccm.sz01 > 0)
- cpu->iccm.sz <<= iccm.sz01;
-
- region = read_aux_reg(ARC_REG_AUX_ICCM);
- cpu->iccm.base_addr = region & 0xF0000000;
- }
+ be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
- READ_BCR(ARC_REG_DCCM_BUILD, dccm);
- if (dccm.ver) {
- cpu->dccm.sz = 256 << dccm.sz0;
- if (dccm.sz0 == 0xF && dccm.sz1 > 0)
- cpu->dccm.sz <<= dccm.sz1;
+ if (info->arcver < 0x34)
+ cpu_nm = "ARC750";
+ else
+ cpu_nm = "ARC770";
- region = read_aux_reg(ARC_REG_AUX_DCCM);
- cpu->dccm.base_addr = region & 0xF0000000;
- }
- }
-}
+ n += scnprintf(buf + n, len - n, "processor [%d]\t: %s (%s ISA) %s%s%s\n",
+ c, cpu_nm, isa_nm,
+ IS_AVAIL2(atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
+ IS_AVAIL1(be, "[Big-Endian]"));
-static void decode_arc_core(struct cpuinfo_arc *cpu)
-{
- struct bcr_uarch_build_arcv2 uarch;
- const struct id_to_str *tbl;
-
- if (cpu->core.family < 0x54) { /* includes arc700 */
+ READ_BCR(ARC_REG_FP_BCR, fpu_sp);
+ READ_BCR(ARC_REG_DPFP_BCR, fpu_dp);
- for (tbl = &arc_legacy_rel[0]; tbl->id != 0; tbl++) {
- if (cpu->core.family == tbl->id) {
- cpu->release = tbl->str;
- break;
- }
- }
+ if (fpu_sp.ver | fpu_dp.ver)
+ n += scnprintf(buf + n, len - n, "FPU\t\t: %s%s\n",
+ IS_AVAIL1(fpu_sp.ver, "SP "),
+ IS_AVAIL1(fpu_dp.ver, "DP "));
- if (is_isa_arcompact())
- cpu->name = "ARC700";
- else if (tbl->str)
- cpu->name = "HS38";
- else
- cpu->name = cpu->release = "Unknown";
+ READ_BCR(ARC_REG_BPU_BCR, bpu);
+ bpu_full = bpu.fam ? 1 : 0;
+ bpu_cache = 256 << (bpu.ent - 1);
+ bpu_pred = 256 << (bpu.ent - 1);
- return;
+ n += scnprintf(buf + n, len - n,
+ "BPU\t\t: %s%s match, cache:%d, Predict Table:%d\n",
+ IS_AVAIL1(bpu_full, "full"),
+ IS_AVAIL1(!bpu_full, "partial"),
+ bpu_cache, bpu_pred);
+
+ READ_BCR(ARC_REG_ICCM_BUILD, iccm);
+ if (iccm.ver) {
+ info->iccm.sz = 4096 << iccm.sz; /* 8K to 512K */
+ info->iccm.base = iccm.base << 16;
}
- /*
- * Initial HS cores bumped AUX IDENTITY.ARCVER for each release until
- * ARCVER 0x54 which introduced AUX MICRO_ARCH_BUILD and subsequent
- * releases only update it.
- */
- READ_BCR(ARC_REG_MICRO_ARCH_BCR, uarch);
-
- if (uarch.prod == 4) {
- cpu->name = "HS48";
- cpu->extn.dual = 1;
+ READ_BCR(ARC_REG_DCCM_BUILD, dccm);
+ if (dccm.ver) {
+ unsigned long base;
+ info->dccm.sz = 2048 << dccm.sz; /* 2K to 256K */
- } else {
- cpu->name = "HS38";
+ base = read_aux_reg(ARC_REG_DCCM_BASE_BUILD);
+ info->dccm.base = base & ~0xF;
}
- for (tbl = &arc_hs_ver54_rel[0]; tbl->id != 0xFF; tbl++) {
- if (uarch.maj == tbl->id) {
- cpu->release = tbl->str;
- break;
- }
- }
+ /* ARCompact ISA specific sanity checks */
+ present = fpu_dp.ver; /* SP has no arch visible regs */
+ CHK_OPT_STRICT(CONFIG_ARC_FPU_SAVE_RESTORE, present);
+#endif
+ return n;
+
}
-static void read_arc_build_cfg_regs(void)
+static int arcv2_mumbojumbo(int c, struct cpuinfo_arc *info, char *buf, int len)
{
- struct bcr_timer timer;
- struct bcr_generic bcr;
- struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
+ int n = 0;
+#ifdef CONFIG_ISA_ARCV2
+ const char *release, *cpu_nm, *isa_nm = "ARCv2";
+ int dual_issue = 0, dual_enb = 0, mpy_opt, present;
+ int bpu_full, bpu_cache, bpu_pred, bpu_ret_stk;
+ char mpy_nm[16], lpb_nm[32];
struct bcr_isa_arcv2 isa;
- struct bcr_actionpoint ap;
-
- FIX_PTR(cpu);
+ struct bcr_mpy mpy;
+ struct bcr_fp_arcv2 fpu;
+ struct bcr_bpu_arcv2 bpu;
+ struct bcr_lpb lpb;
+ struct bcr_iccm_arcv2 iccm;
+ struct bcr_dccm_arcv2 dccm;
+ struct bcr_erp erp;
- READ_BCR(AUX_IDENTITY, cpu->core);
- decode_arc_core(cpu);
-
- READ_BCR(ARC_REG_TIMERS_BCR, timer);
- cpu->extn.timer0 = timer.t0;
- cpu->extn.timer1 = timer.t1;
- cpu->extn.rtc = timer.rtc;
-
- cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
+ /*
+ * Initial HS cores bumped AUX IDENTITY.ARCVER for each release until
+ * ARCVER 0x54 which introduced AUX MICRO_ARCH_BUILD and subsequent
+ * releases only update it.
+ */
- READ_BCR(ARC_REG_MUL_BCR, cpu->extn_mpy);
+ cpu_nm = "HS38";
- /* Read CCM BCRs for boot reporting even if not enabled in Kconfig */
- read_decode_ccm_bcr(cpu);
+ if (info->arcver > 0x50 && info->arcver <= 0x53) {
+ release = arc_hs_rel[info->arcver - 0x51].str;
+ } else {
+ const struct id_to_str *tbl;
+ struct bcr_uarch_build uarch;
- read_decode_mmu_bcr();
- read_decode_cache_bcr();
+ READ_BCR(ARC_REG_MICRO_ARCH_BCR, uarch);
- if (is_isa_arcompact()) {
- struct bcr_fp_arcompact sp, dp;
- struct bcr_bpu_arcompact bpu;
-
- READ_BCR(ARC_REG_FP_BCR, sp);
- READ_BCR(ARC_REG_DPFP_BCR, dp);
- cpu->extn.fpu_sp = sp.ver ? 1 : 0;
- cpu->extn.fpu_dp = dp.ver ? 1 : 0;
-
- READ_BCR(ARC_REG_BPU_BCR, bpu);
- cpu->bpu.ver = bpu.ver;
- cpu->bpu.full = bpu.fam ? 1 : 0;
- if (bpu.ent) {
- cpu->bpu.num_cache = 256 << (bpu.ent - 1);
- cpu->bpu.num_pred = 256 << (bpu.ent - 1);
+ for (tbl = &arc_hs_ver54_rel[0]; tbl->id != 0xFF; tbl++) {
+ if (uarch.maj == tbl->id) {
+ release = tbl->str;
+ break;
+ }
}
- } else {
- struct bcr_fp_arcv2 spdp;
- struct bcr_bpu_arcv2 bpu;
-
- READ_BCR(ARC_REG_FP_V2_BCR, spdp);
- cpu->extn.fpu_sp = spdp.sp ? 1 : 0;
- cpu->extn.fpu_dp = spdp.dp ? 1 : 0;
-
- READ_BCR(ARC_REG_BPU_BCR, bpu);
- cpu->bpu.ver = bpu.ver;
- cpu->bpu.full = bpu.ft;
- cpu->bpu.num_cache = 256 << bpu.bce;
- cpu->bpu.num_pred = 2048 << bpu.pte;
- cpu->bpu.ret_stk = 4 << bpu.rse;
-
- /* if dual issue hardware, is it enabled ? */
- if (cpu->extn.dual) {
+ if (uarch.prod == 4) {
unsigned int exec_ctrl;
+ cpu_nm = "HS48";
+ dual_issue = 1;
+ /* if dual issue hardware, is it enabled ? */
READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
- cpu->extn.dual_enb = !(exec_ctrl & 1);
+ dual_enb = !(exec_ctrl & 1);
}
}
- READ_BCR(ARC_REG_AP_BCR, ap);
- if (ap.ver) {
- cpu->extn.ap_num = 2 << ap.num;
- cpu->extn.ap_full = !ap.min;
- }
-
- READ_BCR(ARC_REG_SMART_BCR, bcr);
- cpu->extn.smart = bcr.ver ? 1 : 0;
-
- READ_BCR(ARC_REG_RTT_BCR, bcr);
- cpu->extn.rtt = bcr.ver ? 1 : 0;
-
READ_BCR(ARC_REG_ISA_CFG_BCR, isa);
- /* some hacks for lack of feature BCR info in old ARC700 cores */
- if (is_isa_arcompact()) {
- if (!isa.ver) /* ISA BCR absent, use Kconfig info */
- cpu->isa.atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC);
- else {
- /* ARC700_BUILD only has 2 bits of isa info */
- struct bcr_generic bcr = *(struct bcr_generic *)&isa;
- cpu->isa.atomic = bcr.info & 1;
- }
-
- cpu->isa.be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
+ n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s%s%s\n",
+ c, cpu_nm, release, isa_nm,
+ IS_AVAIL1(isa.be, "[Big-Endian]"),
+ IS_AVAIL3(dual_issue, dual_enb, " Dual-Issue "));
+
+ READ_BCR(ARC_REG_MPY_BCR, mpy);
+ mpy_opt = 2; /* stock MPY/MPYH */
+ if (mpy.dsp) /* OPT 7-9 */
+ mpy_opt = mpy.dsp + 6;
+
+ scnprintf(mpy_nm, 16, "mpy[opt %d] ", mpy_opt);
+
+ READ_BCR(ARC_REG_FP_V2_BCR, fpu);
+
+ n += scnprintf(buf + n, len - n, "ISA Extn\t: %s%s%s%s%s%s%s%s%s%s%s\n",
+ IS_AVAIL2(isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
+ IS_AVAIL2(isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64),
+ IS_AVAIL2(isa.unalign, "unalign ", CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS),
+ IS_AVAIL1(mpy.ver, mpy_nm),
+ IS_AVAIL1(isa.div_rem, "div_rem "),
+ IS_AVAIL1((fpu.sp | fpu.dp), " FPU:"),
+ IS_AVAIL1(fpu.sp, " sp"),
+ IS_AVAIL1(fpu.dp, " dp"));
+
+ READ_BCR(ARC_REG_BPU_BCR, bpu);
+ bpu_full = bpu.ft;
+ bpu_cache = 256 << bpu.bce;
+ bpu_pred = 2048 << bpu.pte;
+ bpu_ret_stk = 4 << bpu.rse;
+
+ READ_BCR(ARC_REG_LPB_BUILD, lpb);
+ if (lpb.ver) {
+ unsigned int ctl;
+ ctl = read_aux_reg(ARC_REG_LPB_CTRL);
+
+ scnprintf(lpb_nm, sizeof(lpb_nm), " Loop Buffer:%d %s",
+ lpb.entries, IS_DISABLED_RUN(!ctl));
+ }
- /* there's no direct way to distinguish 750 vs. 770 */
- if (unlikely(cpu->core.family < 0x34 || cpu->mmu.ver < 3))
- cpu->name = "ARC750";
- } else {
- cpu->isa = isa;
+ n += scnprintf(buf + n, len - n,
+ "BPU\t\t: %s%s match, cache:%d, Predict Table:%d Return stk: %d%s\n",
+ IS_AVAIL1(bpu_full, "full"),
+ IS_AVAIL1(!bpu_full, "partial"),
+ bpu_cache, bpu_pred, bpu_ret_stk,
+ lpb_nm);
+
+ READ_BCR(ARC_REG_ICCM_BUILD, iccm);
+ if (iccm.ver) {
+ unsigned long base;
+ info->iccm.sz = 256 << iccm.sz00; /* 512B to 16M */
+ if (iccm.sz00 == 0xF && iccm.sz01 > 0)
+ info->iccm.sz <<= iccm.sz01;
+ base = read_aux_reg(ARC_REG_AUX_ICCM);
+ info->iccm.base = base & 0xF0000000;
}
-}
-static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
-{
- struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
- struct bcr_identity *core = &cpu->core;
- char mpy_opt[16];
- int n = 0;
+ READ_BCR(ARC_REG_DCCM_BUILD, dccm);
+ if (dccm.ver) {
+ unsigned long base;
+ info->dccm.sz = 256 << dccm.sz0;
+ if (dccm.sz0 == 0xF && dccm.sz1 > 0)
+ info->dccm.sz <<= dccm.sz1;
+ base = read_aux_reg(ARC_REG_AUX_DCCM);
+ info->dccm.base = base & 0xF0000000;
+ }
- FIX_PTR(cpu);
+ /* Error Protection: ECC/Parity */
+ READ_BCR(ARC_REG_ERP_BUILD, erp);
+ if (erp.ver) {
+ struct ctl_erp ctl;
+ READ_BCR(ARC_REG_ERP_CTRL, ctl);
+ /* inverted bits: 0 means enabled */
+ n += scnprintf(buf + n, len - n, "Extn [ECC]\t: %s%s%s%s%s%s\n",
+ IS_AVAIL3(erp.ic, !ctl.dpi, "IC "),
+ IS_AVAIL3(erp.dc, !ctl.dpd, "DC "),
+ IS_AVAIL3(erp.mmu, !ctl.mpd, "MMU "));
+ }
- n += scnprintf(buf + n, len - n,
- "\nIDENTITY\t: ARCVER [%#02x] ARCNUM [%#02x] CHIPID [%#4x]\n",
- core->family, core->cpu_id, core->chip_id);
+ /* ARCv2 ISA specific sanity checks */
+ present = fpu.sp | fpu.dp | mpy.dsp; /* DSP and/or FPU */
+ CHK_OPT_STRICT(CONFIG_ARC_HAS_ACCL_REGS, present);
- n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s%s%s\n",
- cpu_id, cpu->name, cpu->release,
- is_isa_arcompact() ? "ARCompact" : "ARCv2",
- IS_AVAIL1(cpu->isa.be, "[Big-Endian]"),
- IS_AVAIL3(cpu->extn.dual, cpu->extn.dual_enb, " Dual-Issue "));
+ dsp_config_check();
+#endif
+ return n;
+}
- n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s%s%s\nISA Extn\t: ",
- IS_AVAIL1(cpu->extn.timer0, "Timer0 "),
- IS_AVAIL1(cpu->extn.timer1, "Timer1 "),
- IS_AVAIL2(cpu->extn.rtc, "RTC [UP 64-bit] ", CONFIG_ARC_TIMERS_64BIT),
- IS_AVAIL2(cpu->extn.gfrc, "GFRC [SMP 64-bit] ", CONFIG_ARC_TIMERS_64BIT));
+static char *arc_cpu_mumbojumbo(int c, struct cpuinfo_arc *info, char *buf, int len)
+{
+ struct bcr_identity ident;
+ struct bcr_timer timer;
+ struct bcr_generic bcr;
+ struct mcip_bcr mp;
+ struct bcr_actionpoint ap;
+ unsigned long vec_base;
+ int ap_num, ap_full, smart, rtt, n;
- if (cpu->extn_mpy.ver) {
- if (is_isa_arcompact()) {
- scnprintf(mpy_opt, 16, "mpy");
- } else {
+ memset(info, 0, sizeof(struct cpuinfo_arc));
- int opt = 2; /* stock MPY/MPYH */
+ READ_BCR(AUX_IDENTITY, ident);
+ info->arcver = ident.family;
- if (cpu->extn_mpy.dsp) /* OPT 7-9 */
- opt = cpu->extn_mpy.dsp + 6;
+ n = scnprintf(buf, len,
+ "\nIDENTITY\t: ARCVER [%#02x] ARCNUM [%#02x] CHIPID [%#4x]\n",
+ ident.family, ident.cpu_id, ident.chip_id);
- scnprintf(mpy_opt, 16, "mpy[opt %d] ", opt);
- }
+ if (is_isa_arcompact()) {
+ n += arcompact_mumbojumbo(c, info, buf + n, len - n);
+ } else if (is_isa_arcv2()){
+ n += arcv2_mumbojumbo(c, info, buf + n, len - n);
}
- n += scnprintf(buf + n, len - n, "%s%s%s%s%s%s%s%s\n",
- IS_AVAIL2(cpu->isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
- IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64),
- IS_AVAIL2(cpu->isa.unalign, "unalign ", CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS),
- IS_AVAIL1(cpu->extn_mpy.ver, mpy_opt),
- IS_AVAIL1(cpu->isa.div_rem, "div_rem "));
+ n += arc_mmu_mumbojumbo(c, buf + n, len - n);
+ n += arc_cache_mumbojumbo(c, buf + n, len - n);
- if (cpu->bpu.ver) {
- n += scnprintf(buf + n, len - n,
- "BPU\t\t: %s%s match, cache:%d, Predict Table:%d Return stk: %d",
- IS_AVAIL1(cpu->bpu.full, "full"),
- IS_AVAIL1(!cpu->bpu.full, "partial"),
- cpu->bpu.num_cache, cpu->bpu.num_pred, cpu->bpu.ret_stk);
-
- if (is_isa_arcv2()) {
- struct bcr_lpb lpb;
-
- READ_BCR(ARC_REG_LPB_BUILD, lpb);
- if (lpb.ver) {
- unsigned int ctl;
- ctl = read_aux_reg(ARC_REG_LPB_CTRL);
-
- n += scnprintf(buf + n, len - n, " Loop Buffer:%d %s",
- lpb.entries,
- IS_DISABLED_RUN(!ctl));
- }
- }
- n += scnprintf(buf + n, len - n, "\n");
- }
+ READ_BCR(ARC_REG_TIMERS_BCR, timer);
+ info->t0 = timer.t0;
+ info->t1 = timer.t1;
- return buf;
-}
+ READ_BCR(ARC_REG_MCIP_BCR, mp);
+ vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
-static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
-{
- int n = 0;
- struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
+ n += scnprintf(buf + n, len - n,
+ "Timers\t\t: %s%s%s%s%s%s\nVector Table\t: %#lx\n",
+ IS_AVAIL1(timer.t0, "Timer0 "),
+ IS_AVAIL1(timer.t1, "Timer1 "),
+ IS_AVAIL2(timer.rtc, "RTC [UP 64-bit] ", CONFIG_ARC_TIMERS_64BIT),
+ IS_AVAIL2(mp.gfrc, "GFRC [SMP 64-bit] ", CONFIG_ARC_TIMERS_64BIT),
+ vec_base);
- FIX_PTR(cpu);
+ READ_BCR(ARC_REG_AP_BCR, ap);
+ if (ap.ver) {
+ ap_num = 2 << ap.num;
+ ap_full = !ap.min;
+ }
- n += scnprintf(buf + n, len - n, "Vector Table\t: %#x\n", cpu->vec_base);
+ READ_BCR(ARC_REG_SMART_BCR, bcr);
+ smart = bcr.ver ? 1 : 0;
- if (cpu->extn.fpu_sp || cpu->extn.fpu_dp)
- n += scnprintf(buf + n, len - n, "FPU\t\t: %s%s\n",
- IS_AVAIL1(cpu->extn.fpu_sp, "SP "),
- IS_AVAIL1(cpu->extn.fpu_dp, "DP "));
+ READ_BCR(ARC_REG_RTT_BCR, bcr);
+ rtt = bcr.ver ? 1 : 0;
- if (cpu->extn.ap_num | cpu->extn.smart | cpu->extn.rtt) {
+ if (ap.ver | smart | rtt) {
n += scnprintf(buf + n, len - n, "DEBUG\t\t: %s%s",
- IS_AVAIL1(cpu->extn.smart, "smaRT "),
- IS_AVAIL1(cpu->extn.rtt, "RTT "));
- if (cpu->extn.ap_num) {
+ IS_AVAIL1(smart, "smaRT "),
+ IS_AVAIL1(rtt, "RTT "));
+ if (ap.ver) {
n += scnprintf(buf + n, len - n, "ActionPoint %d/%s",
- cpu->extn.ap_num,
- cpu->extn.ap_full ? "full":"min");
+ ap_num,
+ ap_full ? "full":"min");
}
n += scnprintf(buf + n, len - n, "\n");
}
- if (cpu->dccm.sz || cpu->iccm.sz)
- n += scnprintf(buf + n, len - n, "Extn [CCM]\t: DCCM @ %x, %d KB / ICCM: @ %x, %d KB\n",
- cpu->dccm.base_addr, TO_KB(cpu->dccm.sz),
- cpu->iccm.base_addr, TO_KB(cpu->iccm.sz));
-
- if (is_isa_arcv2()) {
-
- /* Error Protection: ECC/Parity */
- struct bcr_erp erp;
- READ_BCR(ARC_REG_ERP_BUILD, erp);
-
- if (erp.ver) {
- struct ctl_erp ctl;
- READ_BCR(ARC_REG_ERP_CTRL, ctl);
-
- /* inverted bits: 0 means enabled */
- n += scnprintf(buf + n, len - n, "Extn [ECC]\t: %s%s%s%s%s%s\n",
- IS_AVAIL3(erp.ic, !ctl.dpi, "IC "),
- IS_AVAIL3(erp.dc, !ctl.dpd, "DC "),
- IS_AVAIL3(erp.mmu, !ctl.mpd, "MMU "));
- }
- }
+ if (info->dccm.sz || info->iccm.sz)
+ n += scnprintf(buf + n, len - n,
+ "Extn [CCM]\t: DCCM @ %lx, %d KB / ICCM: @ %lx, %d KB\n",
+ info->dccm.base, TO_KB(info->dccm.sz),
+ info->iccm.base, TO_KB(info->iccm.sz));
return buf;
}
@@ -401,15 +378,15 @@ void chk_opt_weak(char *opt_name, bool hw_exists, bool opt_ena)
panic("Disable %s, hardware NOT present\n", opt_name);
}
-static void arc_chk_core_config(void)
+/*
+ * ISA agnostic sanity checks
+ */
+static void arc_chk_core_config(struct cpuinfo_arc *info)
{
- struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
- int present = 0;
-
- if (!cpu->extn.timer0)
+ if (!info->t0)
panic("Timer0 is not present!\n");
- if (!cpu->extn.timer1)
+ if (!info->t1)
panic("Timer1 is not present!\n");
#ifdef CONFIG_ARC_HAS_DCCM
@@ -417,35 +394,17 @@ static void arc_chk_core_config(void)
* DCCM can be arbit placed in hardware.
* Make sure it's placement/sz matches what Linux is built with
*/
- if ((unsigned int)__arc_dccm_base != cpu->dccm.base_addr)
+ if ((unsigned int)__arc_dccm_base != info->dccm.base)
panic("Linux built with incorrect DCCM Base address\n");
- if (CONFIG_ARC_DCCM_SZ * SZ_1K != cpu->dccm.sz)
+ if (CONFIG_ARC_DCCM_SZ * SZ_1K != info->dccm.sz)
panic("Linux built with incorrect DCCM Size\n");
#endif
#ifdef CONFIG_ARC_HAS_ICCM
- if (CONFIG_ARC_ICCM_SZ * SZ_1K != cpu->iccm.sz)
+ if (CONFIG_ARC_ICCM_SZ * SZ_1K != info->iccm.sz)
panic("Linux built with incorrect ICCM Size\n");
#endif
-
- /*
- * FP hardware/software config sanity
- * -If hardware present, kernel needs to save/restore FPU state
- * -If not, it will crash trying to save/restore the non-existant regs
- */
-
- if (is_isa_arcompact()) {
- /* only DPDP checked since SP has no arch visible regs */
- present = cpu->extn.fpu_dp;
- CHK_OPT_STRICT(CONFIG_ARC_FPU_SAVE_RESTORE, present);
- } else {
- /* Accumulator Low:High pair (r58:59) present if DSP MPY or FPU */
- present = cpu->extn_mpy.dsp | cpu->extn.fpu_sp | cpu->extn.fpu_dp;
- CHK_OPT_STRICT(CONFIG_ARC_HAS_ACCL_REGS, present);
-
- dsp_config_check();
- }
}
/*
@@ -456,21 +415,19 @@ static void arc_chk_core_config(void)
void setup_processor(void)
{
+ struct cpuinfo_arc info;
+ int c = smp_processor_id();
char str[512];
- int cpu_id = smp_processor_id();
- read_arc_build_cfg_regs();
- arc_init_IRQ();
+ pr_info("%s", arc_cpu_mumbojumbo(c, &info, str, sizeof(str)));
+ pr_info("%s", arc_platform_smp_cpuinfo());
- pr_info("%s", arc_cpu_mumbojumbo(cpu_id, str, sizeof(str)));
+ arc_chk_core_config(&info);
+ arc_init_IRQ();
arc_mmu_init();
arc_cache_init();
- pr_info("%s", arc_extn_mumbojumbo(cpu_id, str, sizeof(str)));
- pr_info("%s", arc_platform_smp_cpuinfo());
-
- arc_chk_core_config();
}
static inline bool uboot_arg_invalid(unsigned long addr)
@@ -617,6 +574,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
char *str;
int cpu_id = ptr_to_cpu(v);
struct device *cpu_dev = get_cpu_device(cpu_id);
+ struct cpuinfo_arc info;
struct clk *cpu_clk;
unsigned long freq = 0;
@@ -629,7 +587,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
if (!str)
goto done;
- seq_printf(m, arc_cpu_mumbojumbo(cpu_id, str, PAGE_SIZE));
+ seq_printf(m, arc_cpu_mumbojumbo(cpu_id, &info, str, PAGE_SIZE));
cpu_clk = clk_get(cpu_dev, NULL);
if (IS_ERR(cpu_clk)) {
@@ -646,9 +604,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
loops_per_jiffy / (500000 / HZ),
(loops_per_jiffy / (5000 / HZ)) % 100);
- seq_printf(m, arc_mmu_mumbojumbo(cpu_id, str, PAGE_SIZE));
- seq_printf(m, arc_cache_mumbojumbo(cpu_id, str, PAGE_SIZE));
- seq_printf(m, arc_extn_mumbojumbo(cpu_id, str, PAGE_SIZE));
seq_printf(m, arc_platform_smp_cpuinfo());
free_page((unsigned long)str);
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
index 3c1590c27fae..0b3bb529d246 100644
--- a/arch/arc/kernel/signal.c
+++ b/arch/arc/kernel/signal.c
@@ -53,6 +53,7 @@
#include <linux/sched/task_stack.h>
#include <asm/ucontext.h>
+#include <asm/entry.h>
struct rt_sigframe {
struct siginfo info;
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index 409cfa4675b4..8d9b188caa27 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -23,9 +23,10 @@
#include <linux/export.h>
#include <linux/of_fdt.h>
-#include <asm/processor.h>
-#include <asm/setup.h>
#include <asm/mach_desc.h>
+#include <asm/setup.h>
+#include <asm/smp.h>
+#include <asm/processor.h>
#ifndef CONFIG_ARC_HAS_LLSC
arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
@@ -351,7 +352,7 @@ static inline int __do_IPI(unsigned long msg)
* arch-common ISR to handle for inter-processor interrupts
* Has hooks for platform specific IPI
*/
-irqreturn_t do_IPI(int irq, void *dev_id)
+static irqreturn_t do_IPI(int irq, void *dev_id)
{
unsigned long pending;
unsigned long __maybe_unused copy;
diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
index 5372dc04e784..ea99c066ef25 100644
--- a/arch/arc/kernel/stacktrace.c
+++ b/arch/arc/kernel/stacktrace.c
@@ -29,6 +29,7 @@
#include <asm/arcregs.h>
#include <asm/unwind.h>
+#include <asm/stacktrace.h>
#include <asm/switch_to.h>
/*-------------------------------------------------------------------------
diff --git a/arch/arc/kernel/traps.c b/arch/arc/kernel/traps.c
index 6b83e3f2b41c..9b9570b79362 100644
--- a/arch/arc/kernel/traps.c
+++ b/arch/arc/kernel/traps.c
@@ -16,6 +16,7 @@
#include <linux/ptrace.h>
#include <linux/kprobes.h>
#include <linux/kgdb.h>
+#include <asm/entry.h>
#include <asm/setup.h>
#include <asm/unaligned.h>
#include <asm/kprobes.h>
@@ -109,9 +110,7 @@ void do_machine_check_fault(unsigned long address, struct pt_regs *regs)
*/
void do_non_swi_trap(unsigned long address, struct pt_regs *regs)
{
- unsigned int param = regs->ecr_param;
-
- switch (param) {
+ switch (regs->ecr.param) {
case 1:
trap_is_brkpt(address, regs);
break;
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index 7654c2e42dc0..d5b3ed2c58f5 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -115,8 +115,8 @@ static void show_ecr_verbose(struct pt_regs *regs)
/* For Data fault, this is data address not instruction addr */
address = current->thread.fault_address;
- vec = regs->ecr_vec;
- cause_code = regs->ecr_cause;
+ vec = regs->ecr.vec;
+ cause_code = regs->ecr.cause;
/* For DTLB Miss or ProtV, display the memory involved too */
if (vec == ECR_V_DTLB_MISS) {
@@ -154,7 +154,7 @@ static void show_ecr_verbose(struct pt_regs *regs)
pr_cont("Misaligned r/w from 0x%08lx\n", address);
#endif
} else if (vec == ECR_V_TRAP) {
- if (regs->ecr_param == 5)
+ if (regs->ecr.param == 5)
pr_cont("gcc generated __builtin_trap\n");
} else {
pr_cont("Check Programmer's Manual\n");
@@ -184,9 +184,10 @@ void show_regs(struct pt_regs *regs)
if (user_mode(regs))
show_faulting_vma(regs->ret); /* faulting code, not data */
- pr_info("ECR: 0x%08lx EFA: 0x%08lx ERET: 0x%08lx\nSTAT: 0x%08lx",
- regs->event, current->thread.fault_address, regs->ret,
- regs->status32);
+ pr_info("ECR: 0x%08lx EFA: 0x%08lx ERET: 0x%08lx\n",
+ regs->ecr.full, current->thread.fault_address, regs->ret);
+
+ pr_info("STAT32: 0x%08lx", regs->status32);
#define STS_BIT(r, bit) r->status32 & STATUS_##bit##_MASK ? #bit" " : ""
diff --git a/arch/arc/lib/memset-archs.S b/arch/arc/lib/memset-archs.S
index d2e09fece5bc..d0a5cec4cdca 100644
--- a/arch/arc/lib/memset-archs.S
+++ b/arch/arc/lib/memset-archs.S
@@ -36,12 +36,13 @@
#endif
ENTRY_CFI(memset)
- PREFETCHW_INSTR r0, 0 ; Prefetch the first write location
mov.f 0, r2
;;; if size is zero
jz.d [blink]
mov r3, r0 ; don't clobber ret val
+ PREFETCHW_INSTR r0, 0 ; Prefetch the first write location
+
;;; if length < 8
brls.d.nt r2, 8, .Lsmallchunk
mov.f lp_count,r2
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index 3c16ee942a5c..f7e05c146637 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -28,6 +28,10 @@ int slc_enable = 1, ioc_enable = 1;
unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
unsigned long perip_end = 0xFFFFFFFF; /* legacy value */
+static struct cpuinfo_arc_cache {
+ unsigned int sz_k, line_len, colors;
+} ic_info, dc_info, slc_info;
+
void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr,
unsigned long sz, const int op, const int full_page);
@@ -35,78 +39,24 @@ void (*__dma_cache_wback_inv)(phys_addr_t start, unsigned long sz);
void (*__dma_cache_inv)(phys_addr_t start, unsigned long sz);
void (*__dma_cache_wback)(phys_addr_t start, unsigned long sz);
-char *arc_cache_mumbojumbo(int c, char *buf, int len)
-{
- int n = 0;
- struct cpuinfo_arc_cache *p;
-
-#define PR_CACHE(p, cfg, str) \
- if (!(p)->line_len) \
- n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
- else \
- n += scnprintf(buf + n, len - n, \
- str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n", \
- (p)->sz_k, (p)->assoc, (p)->line_len, \
- (p)->vipt ? "VIPT" : "PIPT", \
- (p)->alias ? " aliasing" : "", \
- IS_USED_CFG(cfg));
-
- PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
- PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
-
- p = &cpuinfo_arc700[c].slc;
- if (p->line_len)
- n += scnprintf(buf + n, len - n,
- "SLC\t\t: %uK, %uB Line%s\n",
- p->sz_k, p->line_len, IS_USED_RUN(slc_enable));
-
- n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
- perip_base,
- IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency (per-device) "));
-
- return buf;
-}
-
-/*
- * Read the Cache Build Confuration Registers, Decode them and save into
- * the cpuinfo structure for later use.
- * No Validation done here, simply read/convert the BCRs
- */
-static void read_decode_cache_bcr_arcv2(int cpu)
+static int read_decode_cache_bcr_arcv2(int c, char *buf, int len)
{
- struct cpuinfo_arc_cache *p_slc = &cpuinfo_arc700[cpu].slc;
+ struct cpuinfo_arc_cache *p_slc = &slc_info;
+ struct bcr_identity ident;
struct bcr_generic sbcr;
-
- struct bcr_slc_cfg {
-#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned int pad:24, way:2, lsz:2, sz:4;
-#else
- unsigned int sz:4, lsz:2, way:2, pad:24;
-#endif
- } slc_cfg;
-
- struct bcr_clust_cfg {
-#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned int pad:7, c:1, num_entries:8, num_cores:8, ver:8;
-#else
- unsigned int ver:8, num_cores:8, num_entries:8, c:1, pad:7;
-#endif
- } cbcr;
-
- struct bcr_volatile {
-#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned int start:4, limit:4, pad:22, order:1, disable:1;
-#else
- unsigned int disable:1, order:1, pad:22, limit:4, start:4;
-#endif
- } vol;
-
+ struct bcr_clust_cfg cbcr;
+ struct bcr_volatile vol;
+ int n = 0;
READ_BCR(ARC_REG_SLC_BCR, sbcr);
if (sbcr.ver) {
+ struct bcr_slc_cfg slc_cfg;
READ_BCR(ARC_REG_SLC_CFG, slc_cfg);
p_slc->sz_k = 128 << slc_cfg.sz;
l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64;
+ n += scnprintf(buf + n, len - n,
+ "SLC\t\t: %uK, %uB Line%s\n",
+ p_slc->sz_k, p_slc->line_len, IS_USED_RUN(slc_enable));
}
READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
@@ -129,70 +79,83 @@ static void read_decode_cache_bcr_arcv2(int cpu)
ioc_enable = 0;
}
+ READ_BCR(AUX_IDENTITY, ident);
+
/* HS 2.0 didn't have AUX_VOL */
- if (cpuinfo_arc700[cpu].core.family > 0x51) {
+ if (ident.family > 0x51) {
READ_BCR(AUX_VOL, vol);
perip_base = vol.start << 28;
/* HS 3.0 has limit and strict-ordering fields */
- if (cpuinfo_arc700[cpu].core.family > 0x52)
+ if (ident.family > 0x52)
perip_end = (vol.limit << 28) - 1;
}
+
+ n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
+ perip_base,
+ IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency (per-device) "));
+
+ return n;
}
-void read_decode_cache_bcr(void)
+int arc_cache_mumbojumbo(int c, char *buf, int len)
{
- struct cpuinfo_arc_cache *p_ic, *p_dc;
- unsigned int cpu = smp_processor_id();
- struct bcr_cache {
-#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
-#else
- unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
-#endif
- } ibcr, dbcr;
+ struct cpuinfo_arc_cache *p_ic = &ic_info, *p_dc = &dc_info;
+ struct bcr_cache ibcr, dbcr;
+ int vipt, assoc;
+ int n = 0;
- p_ic = &cpuinfo_arc700[cpu].icache;
READ_BCR(ARC_REG_IC_BCR, ibcr);
-
if (!ibcr.ver)
goto dc_chk;
- if (ibcr.ver <= 3) {
+ if (is_isa_arcompact() && (ibcr.ver <= 3)) {
BUG_ON(ibcr.config != 3);
- p_ic->assoc = 2; /* Fixed to 2w set assoc */
- } else if (ibcr.ver >= 4) {
- p_ic->assoc = 1 << ibcr.config; /* 1,2,4,8 */
+ assoc = 2; /* Fixed to 2w set assoc */
+ } else if (is_isa_arcv2() && (ibcr.ver >= 4)) {
+ assoc = 1 << ibcr.config; /* 1,2,4,8 */
}
p_ic->line_len = 8 << ibcr.line_len;
p_ic->sz_k = 1 << (ibcr.sz - 1);
- p_ic->vipt = 1;
- p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1;
+ p_ic->colors = p_ic->sz_k/assoc/TO_KB(PAGE_SIZE);
+
+ n += scnprintf(buf + n, len - n,
+ "I-Cache\t\t: %uK, %dway/set, %uB Line, VIPT%s%s\n",
+ p_ic->sz_k, assoc, p_ic->line_len,
+ p_ic->colors > 1 ? " aliasing" : "",
+ IS_USED_CFG(CONFIG_ARC_HAS_ICACHE));
dc_chk:
- p_dc = &cpuinfo_arc700[cpu].dcache;
READ_BCR(ARC_REG_DC_BCR, dbcr);
-
if (!dbcr.ver)
goto slc_chk;
- if (dbcr.ver <= 3) {
+ if (is_isa_arcompact() && (dbcr.ver <= 3)) {
BUG_ON(dbcr.config != 2);
- p_dc->assoc = 4; /* Fixed to 4w set assoc */
- p_dc->vipt = 1;
- p_dc->alias = p_dc->sz_k/p_dc->assoc/TO_KB(PAGE_SIZE) > 1;
- } else if (dbcr.ver >= 4) {
- p_dc->assoc = 1 << dbcr.config; /* 1,2,4,8 */
- p_dc->vipt = 0;
- p_dc->alias = 0; /* PIPT so can't VIPT alias */
+ vipt = 1;
+ assoc = 4; /* Fixed to 4w set assoc */
+ p_dc->colors = p_dc->sz_k/assoc/TO_KB(PAGE_SIZE);
+ } else if (is_isa_arcv2() && (dbcr.ver >= 4)) {
+ vipt = 0;
+ assoc = 1 << dbcr.config; /* 1,2,4,8 */
+ p_dc->colors = 1; /* PIPT so can't VIPT alias */
}
p_dc->line_len = 16 << dbcr.line_len;
p_dc->sz_k = 1 << (dbcr.sz - 1);
+ n += scnprintf(buf + n, len - n,
+ "D-Cache\t\t: %uK, %dway/set, %uB Line, %s%s%s\n",
+ p_dc->sz_k, assoc, p_dc->line_len,
+ vipt ? "VIPT" : "PIPT",
+ p_dc->colors > 1 ? " aliasing" : "",
+ IS_USED_CFG(CONFIG_ARC_HAS_DCACHE));
+
slc_chk:
if (is_isa_arcv2())
- read_decode_cache_bcr_arcv2(cpu);
+ n += read_decode_cache_bcr_arcv2(c, buf + n, len - n);
+
+ return n;
}
/*
@@ -581,7 +544,7 @@ static void __ic_line_inv_vaddr(phys_addr_t paddr, unsigned long vaddr,
#endif /* CONFIG_ARC_HAS_ICACHE */
-noinline void slc_op_rgn(phys_addr_t paddr, unsigned long sz, const int op)
+static noinline void slc_op_rgn(phys_addr_t paddr, unsigned long sz, const int op)
{
#ifdef CONFIG_ISA_ARCV2
/*
@@ -644,7 +607,7 @@ noinline void slc_op_rgn(phys_addr_t paddr, unsigned long sz, const int op)
#endif
}
-noinline void slc_op_line(phys_addr_t paddr, unsigned long sz, const int op)
+static __maybe_unused noinline void slc_op_line(phys_addr_t paddr, unsigned long sz, const int op)
{
#ifdef CONFIG_ISA_ARCV2
/*
@@ -1082,7 +1045,7 @@ SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
* 3. All Caches need to be disabled when setting up IOC to elide any in-flight
* Coherency transactions
*/
-noinline void __init arc_ioc_setup(void)
+static noinline void __init arc_ioc_setup(void)
{
unsigned int ioc_base, mem_sz;
@@ -1144,12 +1107,10 @@ noinline void __init arc_ioc_setup(void)
* one core suffices for all
* - IOC setup / dma callbacks only need to be done once
*/
-void __init arc_cache_init_master(void)
+static noinline void __init arc_cache_init_master(void)
{
- unsigned int __maybe_unused cpu = smp_processor_id();
-
if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
- struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
+ struct cpuinfo_arc_cache *ic = &ic_info;
if (!ic->line_len)
panic("cache support enabled but non-existent cache\n");
@@ -1162,14 +1123,14 @@ void __init arc_cache_init_master(void)
* In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG
* pair to provide vaddr/paddr respectively, just as in MMU v3
*/
- if (is_isa_arcv2() && ic->alias)
+ if (is_isa_arcv2() && ic->colors > 1)
_cache_line_loop_ic_fn = __cache_line_loop_v3;
else
_cache_line_loop_ic_fn = __cache_line_loop;
}
if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) {
- struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
+ struct cpuinfo_arc_cache *dc = &dc_info;
if (!dc->line_len)
panic("cache support enabled but non-existent cache\n");
@@ -1181,14 +1142,13 @@ void __init arc_cache_init_master(void)
/* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */
if (is_isa_arcompact()) {
int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
- int num_colors = dc->sz_k/dc->assoc/TO_KB(PAGE_SIZE);
- if (dc->alias) {
+ if (dc->colors > 1) {
if (!handled)
panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
- if (CACHE_COLORS_NUM != num_colors)
+ if (CACHE_COLORS_NUM != dc->colors)
panic("CACHE_COLORS_NUM not optimized for config\n");
- } else if (!dc->alias && handled) {
+ } else if (handled && dc->colors == 1) {
panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
}
}
@@ -1231,9 +1191,6 @@ void __init arc_cache_init_master(void)
void __ref arc_cache_init(void)
{
unsigned int __maybe_unused cpu = smp_processor_id();
- char str[256];
-
- pr_info("%s", arc_cache_mumbojumbo(0, str, sizeof(str)));
if (!cpu)
arc_cache_init_master();
diff --git a/arch/arc/mm/extable.c b/arch/arc/mm/extable.c
index 4e14c4244ea2..88fa3a4d4906 100644
--- a/arch/arc/mm/extable.c
+++ b/arch/arc/mm/extable.c
@@ -22,14 +22,3 @@ int fixup_exception(struct pt_regs *regs)
return 0;
}
-
-#ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
-
-unsigned long arc_clear_user_noinline(void __user *to,
- unsigned long n)
-{
- return __arc_clear_user(to, n);
-}
-EXPORT_SYMBOL(arc_clear_user_noinline);
-
-#endif
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index f59e722d147f..95119a5e7761 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -13,6 +13,7 @@
#include <linux/kdebug.h>
#include <linux/perf_event.h>
#include <linux/mm_types.h>
+#include <asm/entry.h>
#include <asm/mmu.h>
/*
@@ -99,10 +100,10 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
if (faulthandler_disabled() || !mm)
goto no_context;
- if (regs->ecr_cause & ECR_C_PROTV_STORE) /* ST/EX */
+ if (regs->ecr.cause & ECR_C_PROTV_STORE) /* ST/EX */
write = 1;
- else if ((regs->ecr_vec == ECR_V_PROTV) &&
- (regs->ecr_cause == ECR_C_PROTV_INST_FETCH))
+ else if ((regs->ecr.vec == ECR_V_PROTV) &&
+ (regs->ecr.cause == ECR_C_PROTV_INST_FETCH))
exec = 1;
flags = FAULT_FLAG_DEFAULT;
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index 9f64d729c9f8..6a71b23f1383 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -15,6 +15,7 @@
#include <linux/highmem.h>
#include <asm/page.h>
#include <asm/sections.h>
+#include <asm/setup.h>
#include <asm/arcregs.h>
pgd_t swapper_pg_dir[PTRS_PER_PGD] __aligned(PAGE_SIZE);
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index 6f40f37e6550..e536b2dcd4b0 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -18,7 +18,9 @@
/* A copy of the ASID from the PID reg is kept in asid_cache */
DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE;
-static int __read_mostly pae_exists;
+static struct cpuinfo_arc_mmu {
+ unsigned int ver, pg_sz_k, s_pg_sz_m, pae, sets, ways;
+} mmuinfo;
/*
* Utility Routine to erase a J-TLB entry
@@ -131,7 +133,7 @@ static void tlb_entry_insert(unsigned int pd0, phys_addr_t pd1)
noinline void local_flush_tlb_all(void)
{
- struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
+ struct cpuinfo_arc_mmu *mmu = &mmuinfo;
unsigned long flags;
unsigned int entry;
int num_tlb = mmu->sets * mmu->ways;
@@ -389,7 +391,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
/*
* Routine to create a TLB entry
*/
-void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep)
+static void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep)
{
unsigned long flags;
unsigned int asid_or_sasid, rwx;
@@ -564,89 +566,64 @@ void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
* the cpuinfo structure for later use.
* No Validation is done here, simply read/convert the BCRs
*/
-void read_decode_mmu_bcr(void)
+int arc_mmu_mumbojumbo(int c, char *buf, int len)
{
- struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
- unsigned int tmp;
- struct bcr_mmu_3 {
-#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned int ver:8, ways:4, sets:4, res:3, sasid:1, pg_sz:4,
- u_itlb:4, u_dtlb:4;
-#else
- unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, sasid:1, res:3, sets:4,
- ways:4, ver:8;
-#endif
- } *mmu3;
-
- struct bcr_mmu_4 {
-#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned int ver:8, sasid:1, sz1:4, sz0:4, res:2, pae:1,
- n_ways:2, n_entry:2, n_super:2, u_itlb:3, u_dtlb:3;
-#else
- /* DTLB ITLB JES JE JA */
- unsigned int u_dtlb:3, u_itlb:3, n_super:2, n_entry:2, n_ways:2,
- pae:1, res:2, sz0:4, sz1:4, sasid:1, ver:8;
-#endif
- } *mmu4;
+ struct cpuinfo_arc_mmu *mmu = &mmuinfo;
+ unsigned int bcr, u_dtlb, u_itlb, sasid;
+ struct bcr_mmu_3 *mmu3;
+ struct bcr_mmu_4 *mmu4;
+ char super_pg[64] = "";
+ int n = 0;
- tmp = read_aux_reg(ARC_REG_MMU_BCR);
- mmu->ver = (tmp >> 24);
+ bcr = read_aux_reg(ARC_REG_MMU_BCR);
+ mmu->ver = (bcr >> 24);
if (is_isa_arcompact() && mmu->ver == 3) {
- mmu3 = (struct bcr_mmu_3 *)&tmp;
+ mmu3 = (struct bcr_mmu_3 *)&bcr;
mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1);
mmu->sets = 1 << mmu3->sets;
mmu->ways = 1 << mmu3->ways;
- mmu->u_dtlb = mmu3->u_dtlb;
- mmu->u_itlb = mmu3->u_itlb;
- mmu->sasid = mmu3->sasid;
+ u_dtlb = mmu3->u_dtlb;
+ u_itlb = mmu3->u_itlb;
+ sasid = mmu3->sasid;
} else {
- mmu4 = (struct bcr_mmu_4 *)&tmp;
+ mmu4 = (struct bcr_mmu_4 *)&bcr;
mmu->pg_sz_k = 1 << (mmu4->sz0 - 1);
mmu->s_pg_sz_m = 1 << (mmu4->sz1 - 11);
mmu->sets = 64 << mmu4->n_entry;
mmu->ways = mmu4->n_ways * 2;
- mmu->u_dtlb = mmu4->u_dtlb * 4;
- mmu->u_itlb = mmu4->u_itlb * 4;
- mmu->sasid = mmu4->sasid;
- pae_exists = mmu->pae = mmu4->pae;
+ u_dtlb = mmu4->u_dtlb * 4;
+ u_itlb = mmu4->u_itlb * 4;
+ sasid = mmu4->sasid;
+ mmu->pae = mmu4->pae;
}
-}
-char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
-{
- int n = 0;
- struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[cpu_id].mmu;
- char super_pg[64] = "";
-
- if (p_mmu->s_pg_sz_m)
- scnprintf(super_pg, 64, "%dM Super Page %s",
- p_mmu->s_pg_sz_m,
- IS_USED_CFG(CONFIG_TRANSPARENT_HUGEPAGE));
+ if (mmu->s_pg_sz_m)
+ scnprintf(super_pg, 64, "/%dM%s",
+ mmu->s_pg_sz_m,
+ IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) ? " (THP enabled)":"");
n += scnprintf(buf + n, len - n,
- "MMU [v%x]\t: %dk PAGE, %s, swalk %d lvl, JTLB %d (%dx%d), uDTLB %d, uITLB %d%s%s\n",
- p_mmu->ver, p_mmu->pg_sz_k, super_pg, CONFIG_PGTABLE_LEVELS,
- p_mmu->sets * p_mmu->ways, p_mmu->sets, p_mmu->ways,
- p_mmu->u_dtlb, p_mmu->u_itlb,
- IS_AVAIL2(p_mmu->pae, ", PAE40 ", CONFIG_ARC_HAS_PAE40));
-
- return buf;
+ "MMU [v%x]\t: %dk%s, swalk %d lvl, JTLB %dx%d, uDTLB %d, uITLB %d%s%s%s\n",
+ mmu->ver, mmu->pg_sz_k, super_pg, CONFIG_PGTABLE_LEVELS,
+ mmu->sets, mmu->ways,
+ u_dtlb, u_itlb,
+ IS_AVAIL1(sasid, ", SASID"),
+ IS_AVAIL2(mmu->pae, ", PAE40 ", CONFIG_ARC_HAS_PAE40));
+
+ return n;
}
int pae40_exist_but_not_enab(void)
{
- return pae_exists && !is_pae40_enabled();
+ return mmuinfo.pae && !is_pae40_enabled();
}
void arc_mmu_init(void)
{
- struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
- char str[256];
+ struct cpuinfo_arc_mmu *mmu = &mmuinfo;
int compat = 0;
- pr_info("%s", arc_mmu_mumbojumbo(0, str, sizeof(str)));
-
/*
* Can't be done in processor.h due to header include dependencies
*/
@@ -723,7 +700,7 @@ volatile int dup_pd_silent; /* Be silent abt it or complain (default) */
void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
struct pt_regs *regs)
{
- struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
+ struct cpuinfo_arc_mmu *mmu = &mmuinfo;
unsigned long flags;
int set, n_ways = mmu->ways;
diff --git a/arch/arc/plat-axs10x/axs10x.c b/arch/arc/plat-axs10x/axs10x.c
index b821df7b0089..1feb990a56bc 100644
--- a/arch/arc/plat-axs10x/axs10x.c
+++ b/arch/arc/plat-axs10x/axs10x.c
@@ -6,7 +6,6 @@
*/
#include <linux/of_fdt.h>
-#include <linux/of_platform.h>
#include <linux/libfdt.h>
#include <asm/asm-offsets.h>
diff --git a/arch/arm/boot/dts/nuvoton/nuvoton-common-npcm7xx.dtsi b/arch/arm/boot/dts/nuvoton/nuvoton-common-npcm7xx.dtsi
index c7b5ef15b716..868454ae6bde 100644
--- a/arch/arm/boot/dts/nuvoton/nuvoton-common-npcm7xx.dtsi
+++ b/arch/arm/boot/dts/nuvoton/nuvoton-common-npcm7xx.dtsi
@@ -220,6 +220,15 @@
};
};
+ peci: peci-controller@f0100000 {
+ compatible = "nuvoton,npcm750-peci";
+ reg = <0xf0100000 0x200>;
+ interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk NPCM7XX_CLK_APB3>;
+ cmd-timeout-ms = <1000>;
+ status = "disabled";
+ };
+
spi0: spi@200000 {
compatible = "nuvoton,npcm750-pspi";
reg = <0x200000 0x1000>;
diff --git a/arch/arm/configs/dram_0x00000000.config b/arch/arm/configs/dram_0x00000000.config
index db96dcb420ce..8803a0f58343 100644
--- a/arch/arm/configs/dram_0x00000000.config
+++ b/arch/arm/configs/dram_0x00000000.config
@@ -1 +1,2 @@
+# Help: DRAM base at 0x00000000
CONFIG_DRAM_BASE=0x00000000
diff --git a/arch/arm/configs/dram_0xc0000000.config b/arch/arm/configs/dram_0xc0000000.config
index 343d5333d973..aab8f864686b 100644
--- a/arch/arm/configs/dram_0xc0000000.config
+++ b/arch/arm/configs/dram_0xc0000000.config
@@ -1 +1,2 @@
+# Help: DRAM base at 0xc0000000
CONFIG_DRAM_BASE=0xc0000000
diff --git a/arch/arm/configs/dram_0xd0000000.config b/arch/arm/configs/dram_0xd0000000.config
index 61ba7045f8a1..4aabce4ea3d4 100644
--- a/arch/arm/configs/dram_0xd0000000.config
+++ b/arch/arm/configs/dram_0xd0000000.config
@@ -1 +1,2 @@
+# Help: DRAM base at 0xd0000000
CONFIG_DRAM_BASE=0xd0000000
diff --git a/arch/arm/configs/lpae.config b/arch/arm/configs/lpae.config
index a6d6f7ab3c01..1ab94da8345d 100644
--- a/arch/arm/configs/lpae.config
+++ b/arch/arm/configs/lpae.config
@@ -1,2 +1,3 @@
+# Help: Enable Large Physical Address Extension mode
CONFIG_ARM_LPAE=y
CONFIG_VMSPLIT_2G=y
diff --git a/arch/arm/include/asm/arm_pmuv3.h b/arch/arm/include/asm/arm_pmuv3.h
index f3cd04ff022d..72529f5e2bed 100644
--- a/arch/arm/include/asm/arm_pmuv3.h
+++ b/arch/arm/include/asm/arm_pmuv3.h
@@ -227,6 +227,8 @@ static inline bool kvm_set_pmuserenr(u64 val)
return false;
}
+static inline void kvm_vcpu_pmu_resync_el0(void) {}
+
/* PMU Version in DFR Register */
#define ARMV8_PMU_DFR_VER_NI 0
#define ARMV8_PMU_DFR_VER_V3P4 0x5
diff --git a/arch/arm/include/asm/ide.h b/arch/arm/include/asm/ide.h
deleted file mode 100644
index a81e0b0d6747..000000000000
--- a/arch/arm/include/asm/ide.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * arch/arm/include/asm/ide.h
- *
- * Copyright (C) 1994-1996 Linus Torvalds & authors
- */
-
-/*
- * This file contains the ARM architecture specific IDE code.
- */
-
-#ifndef __ASMARM_IDE_H
-#define __ASMARM_IDE_H
-
-#ifdef __KERNEL__
-
-#define __ide_mm_insw(port,addr,len) readsw(port,addr,len)
-#define __ide_mm_insl(port,addr,len) readsl(port,addr,len)
-#define __ide_mm_outsw(port,addr,len) writesw(port,addr,len)
-#define __ide_mm_outsl(port,addr,len) writesl(port,addr,len)
-
-#endif /* __KERNEL__ */
-
-#endif /* __ASMARM_IDE_H */
diff --git a/arch/arm64/boot/dts/amlogic/meson-a1.dtsi b/arch/arm64/boot/dts/amlogic/meson-a1.dtsi
index c8f344596285..96225c421194 100644
--- a/arch/arm64/boot/dts/amlogic/meson-a1.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-a1.dtsi
@@ -108,7 +108,7 @@
};
uart_AO: serial@1c00 {
- compatible = "amlogic,meson-gx-uart",
+ compatible = "amlogic,meson-a1-uart",
"amlogic,meson-ao-uart";
reg = <0x0 0x1c00 0x0 0x18>;
interrupts = <GIC_SPI 25 IRQ_TYPE_EDGE_RISING>;
@@ -118,7 +118,7 @@
};
uart_AO_B: serial@2000 {
- compatible = "amlogic,meson-gx-uart",
+ compatible = "amlogic,meson-a1-uart",
"amlogic,meson-ao-uart";
reg = <0x0 0x2000 0x0 0x18>;
interrupts = <GIC_SPI 26 IRQ_TYPE_EDGE_RISING>;
diff --git a/arch/arm64/boot/dts/nuvoton/nuvoton-common-npcm8xx.dtsi b/arch/arm64/boot/dts/nuvoton/nuvoton-common-npcm8xx.dtsi
index aa7aac8c3774..ecd171b2feba 100644
--- a/arch/arm64/boot/dts/nuvoton/nuvoton-common-npcm8xx.dtsi
+++ b/arch/arm64/boot/dts/nuvoton/nuvoton-common-npcm8xx.dtsi
@@ -68,6 +68,15 @@
ranges = <0x0 0x0 0xf0000000 0x00300000>,
<0xfff00000 0x0 0xfff00000 0x00016000>;
+ peci: peci-controller@100000 {
+ compatible = "nuvoton,npcm845-peci";
+ reg = <0x100000 0x1000>;
+ interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk NPCM8XX_CLK_APB3>;
+ cmd-timeout-ms = <1000>;
+ status = "disabled";
+ };
+
timer0: timer@8000 {
compatible = "nuvoton,npcm845-timer";
interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 2e9636da2147..5315789f4868 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -1159,7 +1159,6 @@ CONFIG_XEN_GNTDEV=y
CONFIG_XEN_GRANT_DEV_ALLOC=y
CONFIG_STAGING=y
CONFIG_STAGING_MEDIA=y
-CONFIG_VIDEO_IMX_MEDIA=m
CONFIG_VIDEO_MAX96712=m
CONFIG_CHROME_PLATFORMS=y
CONFIG_CROS_EC=y
diff --git a/arch/arm64/configs/virt.config b/arch/arm64/configs/virt.config
index 6865d54e68f8..c47c36f8f67b 100644
--- a/arch/arm64/configs/virt.config
+++ b/arch/arm64/configs/virt.config
@@ -1,3 +1,4 @@
+# Help: Virtualization guest
#
# Base options for platforms
#
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 58e5eb27da68..5882b2415596 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -18,10 +18,19 @@
#define HCR_DCT (UL(1) << 57)
#define HCR_ATA_SHIFT 56
#define HCR_ATA (UL(1) << HCR_ATA_SHIFT)
+#define HCR_TTLBOS (UL(1) << 55)
+#define HCR_TTLBIS (UL(1) << 54)
+#define HCR_ENSCXT (UL(1) << 53)
+#define HCR_TOCU (UL(1) << 52)
#define HCR_AMVOFFEN (UL(1) << 51)
+#define HCR_TICAB (UL(1) << 50)
#define HCR_TID4 (UL(1) << 49)
#define HCR_FIEN (UL(1) << 47)
#define HCR_FWB (UL(1) << 46)
+#define HCR_NV2 (UL(1) << 45)
+#define HCR_AT (UL(1) << 44)
+#define HCR_NV1 (UL(1) << 43)
+#define HCR_NV (UL(1) << 42)
#define HCR_API (UL(1) << 41)
#define HCR_APK (UL(1) << 40)
#define HCR_TEA (UL(1) << 37)
@@ -89,7 +98,6 @@
HCR_BSU_IS | HCR_FB | HCR_TACR | \
HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \
HCR_FMO | HCR_IMO | HCR_PTW | HCR_TID3)
-#define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK | HCR_ATA)
#define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC)
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
@@ -324,6 +332,47 @@
BIT(18) | \
GENMASK(16, 15))
+/*
+ * FGT register definitions
+ *
+ * RES0 and polarity masks as of DDI0487J.a, to be updated as needed.
+ * We're not using the generated masks as they are usually ahead of
+ * the published ARM ARM, which we use as a reference.
+ *
+ * Once we get to a point where the two describe the same thing, we'll
+ * merge the definitions. One day.
+ */
+#define __HFGRTR_EL2_RES0 (GENMASK(63, 56) | GENMASK(53, 51))
+#define __HFGRTR_EL2_MASK GENMASK(49, 0)
+#define __HFGRTR_EL2_nMASK (GENMASK(55, 54) | BIT(50))
+
+#define __HFGWTR_EL2_RES0 (GENMASK(63, 56) | GENMASK(53, 51) | \
+ BIT(46) | BIT(42) | BIT(40) | BIT(28) | \
+ GENMASK(26, 25) | BIT(21) | BIT(18) | \
+ GENMASK(15, 14) | GENMASK(10, 9) | BIT(2))
+#define __HFGWTR_EL2_MASK GENMASK(49, 0)
+#define __HFGWTR_EL2_nMASK (GENMASK(55, 54) | BIT(50))
+
+#define __HFGITR_EL2_RES0 GENMASK(63, 57)
+#define __HFGITR_EL2_MASK GENMASK(54, 0)
+#define __HFGITR_EL2_nMASK GENMASK(56, 55)
+
+#define __HDFGRTR_EL2_RES0 (BIT(49) | BIT(42) | GENMASK(39, 38) | \
+ GENMASK(21, 20) | BIT(8))
+#define __HDFGRTR_EL2_MASK ~__HDFGRTR_EL2_nMASK
+#define __HDFGRTR_EL2_nMASK GENMASK(62, 59)
+
+#define __HDFGWTR_EL2_RES0 (BIT(63) | GENMASK(59, 58) | BIT(51) | BIT(47) | \
+ BIT(43) | GENMASK(40, 38) | BIT(34) | BIT(30) | \
+ BIT(22) | BIT(9) | BIT(6))
+#define __HDFGWTR_EL2_MASK ~__HDFGWTR_EL2_nMASK
+#define __HDFGWTR_EL2_nMASK GENMASK(62, 60)
+
+/* Similar definitions for HCRX_EL2 */
+#define __HCRX_EL2_RES0 (GENMASK(63, 16) | GENMASK(13, 12))
+#define __HCRX_EL2_MASK (0)
+#define __HCRX_EL2_nMASK (GENMASK(15, 14) | GENMASK(4, 0))
+
/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
#define HPFAR_MASK (~UL(0xf))
/*
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 24e28bb2d95b..24b5e6b23417 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -70,6 +70,7 @@ enum __kvm_host_smccc_func {
__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa,
__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa_nsh,
__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid,
+ __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_range,
__KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context,
__KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff,
__KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr,
@@ -229,6 +230,8 @@ extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
extern void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
phys_addr_t ipa,
int level);
+extern void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
+ phys_addr_t start, unsigned long pages);
extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
extern void __kvm_timer_set_cntvoff(u64 cntvoff);
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index d3dd05bbfe23..af06ccb7ee34 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -49,6 +49,7 @@
#define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4)
#define KVM_REQ_RELOAD_PMU KVM_ARCH_REQ(5)
#define KVM_REQ_SUSPEND KVM_ARCH_REQ(6)
+#define KVM_REQ_RESYNC_PMU_EL0 KVM_ARCH_REQ(7)
#define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
KVM_DIRTY_LOG_INITIALLY_SET)
@@ -380,6 +381,7 @@ enum vcpu_sysreg {
CPTR_EL2, /* Architectural Feature Trap Register (EL2) */
HSTR_EL2, /* Hypervisor System Trap Register */
HACR_EL2, /* Hypervisor Auxiliary Control Register */
+ HCRX_EL2, /* Extended Hypervisor Configuration Register */
TTBR0_EL2, /* Translation Table Base Register 0 (EL2) */
TTBR1_EL2, /* Translation Table Base Register 1 (EL2) */
TCR_EL2, /* Translation Control Register (EL2) */
@@ -400,6 +402,11 @@ enum vcpu_sysreg {
TPIDR_EL2, /* EL2 Software Thread ID Register */
CNTHCTL_EL2, /* Counter-timer Hypervisor Control register */
SP_EL2, /* EL2 Stack Pointer */
+ HFGRTR_EL2,
+ HFGWTR_EL2,
+ HFGITR_EL2,
+ HDFGRTR_EL2,
+ HDFGWTR_EL2,
CNTHP_CTL_EL2,
CNTHP_CVAL_EL2,
CNTHV_CTL_EL2,
@@ -567,8 +574,7 @@ struct kvm_vcpu_arch {
/* Cache some mmu pages needed inside spinlock regions */
struct kvm_mmu_memory_cache mmu_page_cache;
- /* Target CPU and feature flags */
- int target;
+ /* feature flags */
DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
/* Virtual SError ESR to restore when HCR_EL2.VSE is set */
@@ -669,6 +675,8 @@ struct kvm_vcpu_arch {
#define VCPU_SVE_FINALIZED __vcpu_single_flag(cflags, BIT(1))
/* PTRAUTH exposed to guest */
#define GUEST_HAS_PTRAUTH __vcpu_single_flag(cflags, BIT(2))
+/* KVM_ARM_VCPU_INIT completed */
+#define VCPU_INITIALIZED __vcpu_single_flag(cflags, BIT(3))
/* Exception pending */
#define PENDING_EXCEPTION __vcpu_single_flag(iflags, BIT(0))
@@ -899,7 +907,6 @@ struct kvm_vcpu_stat {
u64 exits;
};
-void kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
@@ -967,8 +974,6 @@ void kvm_arm_resume_guest(struct kvm *kvm);
#define kvm_call_hyp_nvhe(f, ...) f(__VA_ARGS__)
#endif /* __KVM_NVHE_HYPERVISOR__ */
-void force_vm_exit(const cpumask_t *mask);
-
int handle_exit(struct kvm_vcpu *vcpu, int exception_index);
void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index);
@@ -983,6 +988,7 @@ int kvm_handle_cp10_id(struct kvm_vcpu *vcpu);
void kvm_reset_sys_regs(struct kvm_vcpu *vcpu);
int __init kvm_sys_reg_table_init(void);
+int __init populate_nv_trap_config(void);
bool lock_all_vcpus(struct kvm *kvm);
void unlock_all_vcpus(struct kvm *kvm);
@@ -1049,8 +1055,6 @@ static inline bool kvm_system_needs_idmapped_vectors(void)
return cpus_have_const_cap(ARM64_SPECTRE_V3A);
}
-void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu);
-
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
@@ -1113,13 +1117,15 @@ int __init kvm_set_ipa_limit(void);
#define __KVM_HAVE_ARCH_VM_ALLOC
struct kvm *kvm_arch_alloc_vm(void);
+#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS
+
+#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE
+
static inline bool kvm_vm_is_protected(struct kvm *kvm)
{
return false;
}
-void kvm_init_protected_traps(struct kvm_vcpu *vcpu);
-
int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 0e1e1ab17b4d..96a80e8f6226 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -168,6 +168,7 @@ int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
void __iomem **haddr);
int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
void **haddr);
+int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr);
void __init free_hyp_pgds(void);
void stage2_unmap_vm(struct kvm *kvm);
diff --git a/arch/arm64/include/asm/kvm_nested.h b/arch/arm64/include/asm/kvm_nested.h
index 8fb67f032fd1..fa23cc9c2adc 100644
--- a/arch/arm64/include/asm/kvm_nested.h
+++ b/arch/arm64/include/asm/kvm_nested.h
@@ -11,6 +11,8 @@ static inline bool vcpu_has_nv(const struct kvm_vcpu *vcpu)
test_bit(KVM_ARM_VCPU_HAS_EL2, vcpu->arch.features));
}
+extern bool __check_nv_sr_forward(struct kvm_vcpu *vcpu);
+
struct sys_reg_params;
struct sys_reg_desc;
diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h
index 929d355eae0a..d3e354bb8351 100644
--- a/arch/arm64/include/asm/kvm_pgtable.h
+++ b/arch/arm64/include/asm/kvm_pgtable.h
@@ -746,4 +746,14 @@ enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot(kvm_pte_t pte);
* kvm_pgtable_prot format.
*/
enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte);
+
+/**
+ * kvm_tlb_flush_vmid_range() - Invalidate/flush a range of TLB entries
+ *
+ * @mmu: Stage-2 KVM MMU struct
+ * @addr: The base Intermediate physical address from which to invalidate
+ * @size: Size of the range from the base to invalidate
+ */
+void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
+ phys_addr_t addr, size_t size);
#endif /* __ARM64_KVM_PGTABLE_H__ */
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 16464bf9a8aa..38296579a4fd 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -124,6 +124,37 @@
#define SYS_DC_CIGSW sys_insn(1, 0, 7, 14, 4)
#define SYS_DC_CIGDSW sys_insn(1, 0, 7, 14, 6)
+#define SYS_IC_IALLUIS sys_insn(1, 0, 7, 1, 0)
+#define SYS_IC_IALLU sys_insn(1, 0, 7, 5, 0)
+#define SYS_IC_IVAU sys_insn(1, 3, 7, 5, 1)
+
+#define SYS_DC_IVAC sys_insn(1, 0, 7, 6, 1)
+#define SYS_DC_IGVAC sys_insn(1, 0, 7, 6, 3)
+#define SYS_DC_IGDVAC sys_insn(1, 0, 7, 6, 5)
+
+#define SYS_DC_CVAC sys_insn(1, 3, 7, 10, 1)
+#define SYS_DC_CGVAC sys_insn(1, 3, 7, 10, 3)
+#define SYS_DC_CGDVAC sys_insn(1, 3, 7, 10, 5)
+
+#define SYS_DC_CVAU sys_insn(1, 3, 7, 11, 1)
+
+#define SYS_DC_CVAP sys_insn(1, 3, 7, 12, 1)
+#define SYS_DC_CGVAP sys_insn(1, 3, 7, 12, 3)
+#define SYS_DC_CGDVAP sys_insn(1, 3, 7, 12, 5)
+
+#define SYS_DC_CVADP sys_insn(1, 3, 7, 13, 1)
+#define SYS_DC_CGVADP sys_insn(1, 3, 7, 13, 3)
+#define SYS_DC_CGDVADP sys_insn(1, 3, 7, 13, 5)
+
+#define SYS_DC_CIVAC sys_insn(1, 3, 7, 14, 1)
+#define SYS_DC_CIGVAC sys_insn(1, 3, 7, 14, 3)
+#define SYS_DC_CIGDVAC sys_insn(1, 3, 7, 14, 5)
+
+/* Data cache zero operations */
+#define SYS_DC_ZVA sys_insn(1, 3, 7, 4, 1)
+#define SYS_DC_GVA sys_insn(1, 3, 7, 4, 3)
+#define SYS_DC_GZVA sys_insn(1, 3, 7, 4, 4)
+
/*
* Automatically generated definitions for system registers, the
* manual encodings below are in the process of being converted to
@@ -163,6 +194,82 @@
#define SYS_DBGDTRTX_EL0 sys_reg(2, 3, 0, 5, 0)
#define SYS_DBGVCR32_EL2 sys_reg(2, 4, 0, 7, 0)
+#define SYS_BRBINF_EL1(n) sys_reg(2, 1, 8, (n & 15), (((n & 16) >> 2) | 0))
+#define SYS_BRBINFINJ_EL1 sys_reg(2, 1, 9, 1, 0)
+#define SYS_BRBSRC_EL1(n) sys_reg(2, 1, 8, (n & 15), (((n & 16) >> 2) | 1))
+#define SYS_BRBSRCINJ_EL1 sys_reg(2, 1, 9, 1, 1)
+#define SYS_BRBTGT_EL1(n) sys_reg(2, 1, 8, (n & 15), (((n & 16) >> 2) | 2))
+#define SYS_BRBTGTINJ_EL1 sys_reg(2, 1, 9, 1, 2)
+#define SYS_BRBTS_EL1 sys_reg(2, 1, 9, 0, 2)
+
+#define SYS_BRBCR_EL1 sys_reg(2, 1, 9, 0, 0)
+#define SYS_BRBFCR_EL1 sys_reg(2, 1, 9, 0, 1)
+#define SYS_BRBIDR0_EL1 sys_reg(2, 1, 9, 2, 0)
+
+#define SYS_TRCITECR_EL1 sys_reg(3, 0, 1, 2, 3)
+#define SYS_TRCACATR(m) sys_reg(2, 1, 2, ((m & 7) << 1), (2 | (m >> 3)))
+#define SYS_TRCACVR(m) sys_reg(2, 1, 2, ((m & 7) << 1), (0 | (m >> 3)))
+#define SYS_TRCAUTHSTATUS sys_reg(2, 1, 7, 14, 6)
+#define SYS_TRCAUXCTLR sys_reg(2, 1, 0, 6, 0)
+#define SYS_TRCBBCTLR sys_reg(2, 1, 0, 15, 0)
+#define SYS_TRCCCCTLR sys_reg(2, 1, 0, 14, 0)
+#define SYS_TRCCIDCCTLR0 sys_reg(2, 1, 3, 0, 2)
+#define SYS_TRCCIDCCTLR1 sys_reg(2, 1, 3, 1, 2)
+#define SYS_TRCCIDCVR(m) sys_reg(2, 1, 3, ((m & 7) << 1), 0)
+#define SYS_TRCCLAIMCLR sys_reg(2, 1, 7, 9, 6)
+#define SYS_TRCCLAIMSET sys_reg(2, 1, 7, 8, 6)
+#define SYS_TRCCNTCTLR(m) sys_reg(2, 1, 0, (4 | (m & 3)), 5)
+#define SYS_TRCCNTRLDVR(m) sys_reg(2, 1, 0, (0 | (m & 3)), 5)
+#define SYS_TRCCNTVR(m) sys_reg(2, 1, 0, (8 | (m & 3)), 5)
+#define SYS_TRCCONFIGR sys_reg(2, 1, 0, 4, 0)
+#define SYS_TRCDEVARCH sys_reg(2, 1, 7, 15, 6)
+#define SYS_TRCDEVID sys_reg(2, 1, 7, 2, 7)
+#define SYS_TRCEVENTCTL0R sys_reg(2, 1, 0, 8, 0)
+#define SYS_TRCEVENTCTL1R sys_reg(2, 1, 0, 9, 0)
+#define SYS_TRCEXTINSELR(m) sys_reg(2, 1, 0, (8 | (m & 3)), 4)
+#define SYS_TRCIDR0 sys_reg(2, 1, 0, 8, 7)
+#define SYS_TRCIDR10 sys_reg(2, 1, 0, 2, 6)
+#define SYS_TRCIDR11 sys_reg(2, 1, 0, 3, 6)
+#define SYS_TRCIDR12 sys_reg(2, 1, 0, 4, 6)
+#define SYS_TRCIDR13 sys_reg(2, 1, 0, 5, 6)
+#define SYS_TRCIDR1 sys_reg(2, 1, 0, 9, 7)
+#define SYS_TRCIDR2 sys_reg(2, 1, 0, 10, 7)
+#define SYS_TRCIDR3 sys_reg(2, 1, 0, 11, 7)
+#define SYS_TRCIDR4 sys_reg(2, 1, 0, 12, 7)
+#define SYS_TRCIDR5 sys_reg(2, 1, 0, 13, 7)
+#define SYS_TRCIDR6 sys_reg(2, 1, 0, 14, 7)
+#define SYS_TRCIDR7 sys_reg(2, 1, 0, 15, 7)
+#define SYS_TRCIDR8 sys_reg(2, 1, 0, 0, 6)
+#define SYS_TRCIDR9 sys_reg(2, 1, 0, 1, 6)
+#define SYS_TRCIMSPEC(m) sys_reg(2, 1, 0, (m & 7), 7)
+#define SYS_TRCITEEDCR sys_reg(2, 1, 0, 2, 1)
+#define SYS_TRCOSLSR sys_reg(2, 1, 1, 1, 4)
+#define SYS_TRCPRGCTLR sys_reg(2, 1, 0, 1, 0)
+#define SYS_TRCQCTLR sys_reg(2, 1, 0, 1, 1)
+#define SYS_TRCRSCTLR(m) sys_reg(2, 1, 1, (m & 15), (0 | (m >> 4)))
+#define SYS_TRCRSR sys_reg(2, 1, 0, 10, 0)
+#define SYS_TRCSEQEVR(m) sys_reg(2, 1, 0, (m & 3), 4)
+#define SYS_TRCSEQRSTEVR sys_reg(2, 1, 0, 6, 4)
+#define SYS_TRCSEQSTR sys_reg(2, 1, 0, 7, 4)
+#define SYS_TRCSSCCR(m) sys_reg(2, 1, 1, (m & 7), 2)
+#define SYS_TRCSSCSR(m) sys_reg(2, 1, 1, (8 | (m & 7)), 2)
+#define SYS_TRCSSPCICR(m) sys_reg(2, 1, 1, (m & 7), 3)
+#define SYS_TRCSTALLCTLR sys_reg(2, 1, 0, 11, 0)
+#define SYS_TRCSTATR sys_reg(2, 1, 0, 3, 0)
+#define SYS_TRCSYNCPR sys_reg(2, 1, 0, 13, 0)
+#define SYS_TRCTRACEIDR sys_reg(2, 1, 0, 0, 1)
+#define SYS_TRCTSCTLR sys_reg(2, 1, 0, 12, 0)
+#define SYS_TRCVICTLR sys_reg(2, 1, 0, 0, 2)
+#define SYS_TRCVIIECTLR sys_reg(2, 1, 0, 1, 2)
+#define SYS_TRCVIPCSSCTLR sys_reg(2, 1, 0, 3, 2)
+#define SYS_TRCVISSCTLR sys_reg(2, 1, 0, 2, 2)
+#define SYS_TRCVMIDCCTLR0 sys_reg(2, 1, 3, 2, 2)
+#define SYS_TRCVMIDCCTLR1 sys_reg(2, 1, 3, 3, 2)
+#define SYS_TRCVMIDCVR(m) sys_reg(2, 1, 3, ((m & 7) << 1), 1)
+
+/* ETM */
+#define SYS_TRCOSLAR sys_reg(2, 1, 1, 0, 4)
+
#define SYS_MIDR_EL1 sys_reg(3, 0, 0, 0, 0)
#define SYS_MPIDR_EL1 sys_reg(3, 0, 0, 0, 5)
#define SYS_REVIDR_EL1 sys_reg(3, 0, 0, 0, 6)
@@ -203,8 +310,13 @@
#define SYS_ERXCTLR_EL1 sys_reg(3, 0, 5, 4, 1)
#define SYS_ERXSTATUS_EL1 sys_reg(3, 0, 5, 4, 2)
#define SYS_ERXADDR_EL1 sys_reg(3, 0, 5, 4, 3)
+#define SYS_ERXPFGF_EL1 sys_reg(3, 0, 5, 4, 4)
+#define SYS_ERXPFGCTL_EL1 sys_reg(3, 0, 5, 4, 5)
+#define SYS_ERXPFGCDN_EL1 sys_reg(3, 0, 5, 4, 6)
#define SYS_ERXMISC0_EL1 sys_reg(3, 0, 5, 5, 0)
#define SYS_ERXMISC1_EL1 sys_reg(3, 0, 5, 5, 1)
+#define SYS_ERXMISC2_EL1 sys_reg(3, 0, 5, 5, 2)
+#define SYS_ERXMISC3_EL1 sys_reg(3, 0, 5, 5, 3)
#define SYS_TFSR_EL1 sys_reg(3, 0, 5, 6, 0)
#define SYS_TFSRE0_EL1 sys_reg(3, 0, 5, 6, 1)
@@ -275,6 +387,8 @@
#define SYS_ICC_IGRPEN0_EL1 sys_reg(3, 0, 12, 12, 6)
#define SYS_ICC_IGRPEN1_EL1 sys_reg(3, 0, 12, 12, 7)
+#define SYS_ACCDATA_EL1 sys_reg(3, 0, 13, 0, 5)
+
#define SYS_CNTKCTL_EL1 sys_reg(3, 0, 14, 1, 0)
#define SYS_AIDR_EL1 sys_reg(3, 1, 0, 0, 7)
@@ -383,8 +497,6 @@
#define SYS_VTCR_EL2 sys_reg(3, 4, 2, 1, 2)
#define SYS_TRFCR_EL2 sys_reg(3, 4, 1, 2, 1)
-#define SYS_HDFGRTR_EL2 sys_reg(3, 4, 3, 1, 4)
-#define SYS_HDFGWTR_EL2 sys_reg(3, 4, 3, 1, 5)
#define SYS_HAFGRTR_EL2 sys_reg(3, 4, 3, 1, 6)
#define SYS_SPSR_EL2 sys_reg(3, 4, 4, 0, 0)
#define SYS_ELR_EL2 sys_reg(3, 4, 4, 0, 1)
@@ -478,6 +590,158 @@
#define SYS_SP_EL2 sys_reg(3, 6, 4, 1, 0)
+/* AT instructions */
+#define AT_Op0 1
+#define AT_CRn 7
+
+#define OP_AT_S1E1R sys_insn(AT_Op0, 0, AT_CRn, 8, 0)
+#define OP_AT_S1E1W sys_insn(AT_Op0, 0, AT_CRn, 8, 1)
+#define OP_AT_S1E0R sys_insn(AT_Op0, 0, AT_CRn, 8, 2)
+#define OP_AT_S1E0W sys_insn(AT_Op0, 0, AT_CRn, 8, 3)
+#define OP_AT_S1E1RP sys_insn(AT_Op0, 0, AT_CRn, 9, 0)
+#define OP_AT_S1E1WP sys_insn(AT_Op0, 0, AT_CRn, 9, 1)
+#define OP_AT_S1E2R sys_insn(AT_Op0, 4, AT_CRn, 8, 0)
+#define OP_AT_S1E2W sys_insn(AT_Op0, 4, AT_CRn, 8, 1)
+#define OP_AT_S12E1R sys_insn(AT_Op0, 4, AT_CRn, 8, 4)
+#define OP_AT_S12E1W sys_insn(AT_Op0, 4, AT_CRn, 8, 5)
+#define OP_AT_S12E0R sys_insn(AT_Op0, 4, AT_CRn, 8, 6)
+#define OP_AT_S12E0W sys_insn(AT_Op0, 4, AT_CRn, 8, 7)
+
+/* TLBI instructions */
+#define OP_TLBI_VMALLE1OS sys_insn(1, 0, 8, 1, 0)
+#define OP_TLBI_VAE1OS sys_insn(1, 0, 8, 1, 1)
+#define OP_TLBI_ASIDE1OS sys_insn(1, 0, 8, 1, 2)
+#define OP_TLBI_VAAE1OS sys_insn(1, 0, 8, 1, 3)
+#define OP_TLBI_VALE1OS sys_insn(1, 0, 8, 1, 5)
+#define OP_TLBI_VAALE1OS sys_insn(1, 0, 8, 1, 7)
+#define OP_TLBI_RVAE1IS sys_insn(1, 0, 8, 2, 1)
+#define OP_TLBI_RVAAE1IS sys_insn(1, 0, 8, 2, 3)
+#define OP_TLBI_RVALE1IS sys_insn(1, 0, 8, 2, 5)
+#define OP_TLBI_RVAALE1IS sys_insn(1, 0, 8, 2, 7)
+#define OP_TLBI_VMALLE1IS sys_insn(1, 0, 8, 3, 0)
+#define OP_TLBI_VAE1IS sys_insn(1, 0, 8, 3, 1)
+#define OP_TLBI_ASIDE1IS sys_insn(1, 0, 8, 3, 2)
+#define OP_TLBI_VAAE1IS sys_insn(1, 0, 8, 3, 3)
+#define OP_TLBI_VALE1IS sys_insn(1, 0, 8, 3, 5)
+#define OP_TLBI_VAALE1IS sys_insn(1, 0, 8, 3, 7)
+#define OP_TLBI_RVAE1OS sys_insn(1, 0, 8, 5, 1)
+#define OP_TLBI_RVAAE1OS sys_insn(1, 0, 8, 5, 3)
+#define OP_TLBI_RVALE1OS sys_insn(1, 0, 8, 5, 5)
+#define OP_TLBI_RVAALE1OS sys_insn(1, 0, 8, 5, 7)
+#define OP_TLBI_RVAE1 sys_insn(1, 0, 8, 6, 1)
+#define OP_TLBI_RVAAE1 sys_insn(1, 0, 8, 6, 3)
+#define OP_TLBI_RVALE1 sys_insn(1, 0, 8, 6, 5)
+#define OP_TLBI_RVAALE1 sys_insn(1, 0, 8, 6, 7)
+#define OP_TLBI_VMALLE1 sys_insn(1, 0, 8, 7, 0)
+#define OP_TLBI_VAE1 sys_insn(1, 0, 8, 7, 1)
+#define OP_TLBI_ASIDE1 sys_insn(1, 0, 8, 7, 2)
+#define OP_TLBI_VAAE1 sys_insn(1, 0, 8, 7, 3)
+#define OP_TLBI_VALE1 sys_insn(1, 0, 8, 7, 5)
+#define OP_TLBI_VAALE1 sys_insn(1, 0, 8, 7, 7)
+#define OP_TLBI_VMALLE1OSNXS sys_insn(1, 0, 9, 1, 0)
+#define OP_TLBI_VAE1OSNXS sys_insn(1, 0, 9, 1, 1)
+#define OP_TLBI_ASIDE1OSNXS sys_insn(1, 0, 9, 1, 2)
+#define OP_TLBI_VAAE1OSNXS sys_insn(1, 0, 9, 1, 3)
+#define OP_TLBI_VALE1OSNXS sys_insn(1, 0, 9, 1, 5)
+#define OP_TLBI_VAALE1OSNXS sys_insn(1, 0, 9, 1, 7)
+#define OP_TLBI_RVAE1ISNXS sys_insn(1, 0, 9, 2, 1)
+#define OP_TLBI_RVAAE1ISNXS sys_insn(1, 0, 9, 2, 3)
+#define OP_TLBI_RVALE1ISNXS sys_insn(1, 0, 9, 2, 5)
+#define OP_TLBI_RVAALE1ISNXS sys_insn(1, 0, 9, 2, 7)
+#define OP_TLBI_VMALLE1ISNXS sys_insn(1, 0, 9, 3, 0)
+#define OP_TLBI_VAE1ISNXS sys_insn(1, 0, 9, 3, 1)
+#define OP_TLBI_ASIDE1ISNXS sys_insn(1, 0, 9, 3, 2)
+#define OP_TLBI_VAAE1ISNXS sys_insn(1, 0, 9, 3, 3)
+#define OP_TLBI_VALE1ISNXS sys_insn(1, 0, 9, 3, 5)
+#define OP_TLBI_VAALE1ISNXS sys_insn(1, 0, 9, 3, 7)
+#define OP_TLBI_RVAE1OSNXS sys_insn(1, 0, 9, 5, 1)
+#define OP_TLBI_RVAAE1OSNXS sys_insn(1, 0, 9, 5, 3)
+#define OP_TLBI_RVALE1OSNXS sys_insn(1, 0, 9, 5, 5)
+#define OP_TLBI_RVAALE1OSNXS sys_insn(1, 0, 9, 5, 7)
+#define OP_TLBI_RVAE1NXS sys_insn(1, 0, 9, 6, 1)
+#define OP_TLBI_RVAAE1NXS sys_insn(1, 0, 9, 6, 3)
+#define OP_TLBI_RVALE1NXS sys_insn(1, 0, 9, 6, 5)
+#define OP_TLBI_RVAALE1NXS sys_insn(1, 0, 9, 6, 7)
+#define OP_TLBI_VMALLE1NXS sys_insn(1, 0, 9, 7, 0)
+#define OP_TLBI_VAE1NXS sys_insn(1, 0, 9, 7, 1)
+#define OP_TLBI_ASIDE1NXS sys_insn(1, 0, 9, 7, 2)
+#define OP_TLBI_VAAE1NXS sys_insn(1, 0, 9, 7, 3)
+#define OP_TLBI_VALE1NXS sys_insn(1, 0, 9, 7, 5)
+#define OP_TLBI_VAALE1NXS sys_insn(1, 0, 9, 7, 7)
+#define OP_TLBI_IPAS2E1IS sys_insn(1, 4, 8, 0, 1)
+#define OP_TLBI_RIPAS2E1IS sys_insn(1, 4, 8, 0, 2)
+#define OP_TLBI_IPAS2LE1IS sys_insn(1, 4, 8, 0, 5)
+#define OP_TLBI_RIPAS2LE1IS sys_insn(1, 4, 8, 0, 6)
+#define OP_TLBI_ALLE2OS sys_insn(1, 4, 8, 1, 0)
+#define OP_TLBI_VAE2OS sys_insn(1, 4, 8, 1, 1)
+#define OP_TLBI_ALLE1OS sys_insn(1, 4, 8, 1, 4)
+#define OP_TLBI_VALE2OS sys_insn(1, 4, 8, 1, 5)
+#define OP_TLBI_VMALLS12E1OS sys_insn(1, 4, 8, 1, 6)
+#define OP_TLBI_RVAE2IS sys_insn(1, 4, 8, 2, 1)
+#define OP_TLBI_RVALE2IS sys_insn(1, 4, 8, 2, 5)
+#define OP_TLBI_ALLE2IS sys_insn(1, 4, 8, 3, 0)
+#define OP_TLBI_VAE2IS sys_insn(1, 4, 8, 3, 1)
+#define OP_TLBI_ALLE1IS sys_insn(1, 4, 8, 3, 4)
+#define OP_TLBI_VALE2IS sys_insn(1, 4, 8, 3, 5)
+#define OP_TLBI_VMALLS12E1IS sys_insn(1, 4, 8, 3, 6)
+#define OP_TLBI_IPAS2E1OS sys_insn(1, 4, 8, 4, 0)
+#define OP_TLBI_IPAS2E1 sys_insn(1, 4, 8, 4, 1)
+#define OP_TLBI_RIPAS2E1 sys_insn(1, 4, 8, 4, 2)
+#define OP_TLBI_RIPAS2E1OS sys_insn(1, 4, 8, 4, 3)
+#define OP_TLBI_IPAS2LE1OS sys_insn(1, 4, 8, 4, 4)
+#define OP_TLBI_IPAS2LE1 sys_insn(1, 4, 8, 4, 5)
+#define OP_TLBI_RIPAS2LE1 sys_insn(1, 4, 8, 4, 6)
+#define OP_TLBI_RIPAS2LE1OS sys_insn(1, 4, 8, 4, 7)
+#define OP_TLBI_RVAE2OS sys_insn(1, 4, 8, 5, 1)
+#define OP_TLBI_RVALE2OS sys_insn(1, 4, 8, 5, 5)
+#define OP_TLBI_RVAE2 sys_insn(1, 4, 8, 6, 1)
+#define OP_TLBI_RVALE2 sys_insn(1, 4, 8, 6, 5)
+#define OP_TLBI_ALLE2 sys_insn(1, 4, 8, 7, 0)
+#define OP_TLBI_VAE2 sys_insn(1, 4, 8, 7, 1)
+#define OP_TLBI_ALLE1 sys_insn(1, 4, 8, 7, 4)
+#define OP_TLBI_VALE2 sys_insn(1, 4, 8, 7, 5)
+#define OP_TLBI_VMALLS12E1 sys_insn(1, 4, 8, 7, 6)
+#define OP_TLBI_IPAS2E1ISNXS sys_insn(1, 4, 9, 0, 1)
+#define OP_TLBI_RIPAS2E1ISNXS sys_insn(1, 4, 9, 0, 2)
+#define OP_TLBI_IPAS2LE1ISNXS sys_insn(1, 4, 9, 0, 5)
+#define OP_TLBI_RIPAS2LE1ISNXS sys_insn(1, 4, 9, 0, 6)
+#define OP_TLBI_ALLE2OSNXS sys_insn(1, 4, 9, 1, 0)
+#define OP_TLBI_VAE2OSNXS sys_insn(1, 4, 9, 1, 1)
+#define OP_TLBI_ALLE1OSNXS sys_insn(1, 4, 9, 1, 4)
+#define OP_TLBI_VALE2OSNXS sys_insn(1, 4, 9, 1, 5)
+#define OP_TLBI_VMALLS12E1OSNXS sys_insn(1, 4, 9, 1, 6)
+#define OP_TLBI_RVAE2ISNXS sys_insn(1, 4, 9, 2, 1)
+#define OP_TLBI_RVALE2ISNXS sys_insn(1, 4, 9, 2, 5)
+#define OP_TLBI_ALLE2ISNXS sys_insn(1, 4, 9, 3, 0)
+#define OP_TLBI_VAE2ISNXS sys_insn(1, 4, 9, 3, 1)
+#define OP_TLBI_ALLE1ISNXS sys_insn(1, 4, 9, 3, 4)
+#define OP_TLBI_VALE2ISNXS sys_insn(1, 4, 9, 3, 5)
+#define OP_TLBI_VMALLS12E1ISNXS sys_insn(1, 4, 9, 3, 6)
+#define OP_TLBI_IPAS2E1OSNXS sys_insn(1, 4, 9, 4, 0)
+#define OP_TLBI_IPAS2E1NXS sys_insn(1, 4, 9, 4, 1)
+#define OP_TLBI_RIPAS2E1NXS sys_insn(1, 4, 9, 4, 2)
+#define OP_TLBI_RIPAS2E1OSNXS sys_insn(1, 4, 9, 4, 3)
+#define OP_TLBI_IPAS2LE1OSNXS sys_insn(1, 4, 9, 4, 4)
+#define OP_TLBI_IPAS2LE1NXS sys_insn(1, 4, 9, 4, 5)
+#define OP_TLBI_RIPAS2LE1NXS sys_insn(1, 4, 9, 4, 6)
+#define OP_TLBI_RIPAS2LE1OSNXS sys_insn(1, 4, 9, 4, 7)
+#define OP_TLBI_RVAE2OSNXS sys_insn(1, 4, 9, 5, 1)
+#define OP_TLBI_RVALE2OSNXS sys_insn(1, 4, 9, 5, 5)
+#define OP_TLBI_RVAE2NXS sys_insn(1, 4, 9, 6, 1)
+#define OP_TLBI_RVALE2NXS sys_insn(1, 4, 9, 6, 5)
+#define OP_TLBI_ALLE2NXS sys_insn(1, 4, 9, 7, 0)
+#define OP_TLBI_VAE2NXS sys_insn(1, 4, 9, 7, 1)
+#define OP_TLBI_ALLE1NXS sys_insn(1, 4, 9, 7, 4)
+#define OP_TLBI_VALE2NXS sys_insn(1, 4, 9, 7, 5)
+#define OP_TLBI_VMALLS12E1NXS sys_insn(1, 4, 9, 7, 6)
+
+/* Misc instructions */
+#define OP_BRB_IALL sys_insn(1, 1, 7, 2, 4)
+#define OP_BRB_INJ sys_insn(1, 1, 7, 2, 5)
+#define OP_CFP_RCTX sys_insn(1, 3, 7, 3, 4)
+#define OP_DVP_RCTX sys_insn(1, 3, 7, 3, 5)
+#define OP_CPP_RCTX sys_insn(1, 3, 7, 3, 7)
+
/* Common SCTLR_ELx flags. */
#define SCTLR_ELx_ENTP2 (BIT(60))
#define SCTLR_ELx_DSSBS (BIT(44))
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 55b50e1d4a84..b149cf9f91bc 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -335,14 +335,77 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
*/
#define MAX_TLBI_OPS PTRS_PER_PTE
+/*
+ * __flush_tlb_range_op - Perform TLBI operation upon a range
+ *
+ * @op: TLBI instruction that operates on a range (has 'r' prefix)
+ * @start: The start address of the range
+ * @pages: Range as the number of pages from 'start'
+ * @stride: Flush granularity
+ * @asid: The ASID of the task (0 for IPA instructions)
+ * @tlb_level: Translation Table level hint, if known
+ * @tlbi_user: If 'true', call an additional __tlbi_user()
+ * (typically for user ASIDs). 'flase' for IPA instructions
+ *
+ * When the CPU does not support TLB range operations, flush the TLB
+ * entries one by one at the granularity of 'stride'. If the TLB
+ * range ops are supported, then:
+ *
+ * 1. If 'pages' is odd, flush the first page through non-range
+ * operations;
+ *
+ * 2. For remaining pages: the minimum range granularity is decided
+ * by 'scale', so multiple range TLBI operations may be required.
+ * Start from scale = 0, flush the corresponding number of pages
+ * ((num+1)*2^(5*scale+1) starting from 'addr'), then increase it
+ * until no pages left.
+ *
+ * Note that certain ranges can be represented by either num = 31 and
+ * scale or num = 0 and scale + 1. The loop below favours the latter
+ * since num is limited to 30 by the __TLBI_RANGE_NUM() macro.
+ */
+#define __flush_tlb_range_op(op, start, pages, stride, \
+ asid, tlb_level, tlbi_user) \
+do { \
+ int num = 0; \
+ int scale = 0; \
+ unsigned long addr; \
+ \
+ while (pages > 0) { \
+ if (!system_supports_tlb_range() || \
+ pages % 2 == 1) { \
+ addr = __TLBI_VADDR(start, asid); \
+ __tlbi_level(op, addr, tlb_level); \
+ if (tlbi_user) \
+ __tlbi_user_level(op, addr, tlb_level); \
+ start += stride; \
+ pages -= stride >> PAGE_SHIFT; \
+ continue; \
+ } \
+ \
+ num = __TLBI_RANGE_NUM(pages, scale); \
+ if (num >= 0) { \
+ addr = __TLBI_VADDR_RANGE(start, asid, scale, \
+ num, tlb_level); \
+ __tlbi(r##op, addr); \
+ if (tlbi_user) \
+ __tlbi_user(r##op, addr); \
+ start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
+ pages -= __TLBI_RANGE_PAGES(num, scale); \
+ } \
+ scale++; \
+ } \
+} while (0)
+
+#define __flush_s2_tlb_range_op(op, start, pages, stride, tlb_level) \
+ __flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, false)
+
static inline void __flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end,
unsigned long stride, bool last_level,
int tlb_level)
{
- int num = 0;
- int scale = 0;
- unsigned long asid, addr, pages;
+ unsigned long asid, pages;
start = round_down(start, stride);
end = round_up(end, stride);
@@ -364,56 +427,11 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
dsb(ishst);
asid = ASID(vma->vm_mm);
- /*
- * When the CPU does not support TLB range operations, flush the TLB
- * entries one by one at the granularity of 'stride'. If the TLB
- * range ops are supported, then:
- *
- * 1. If 'pages' is odd, flush the first page through non-range
- * operations;
- *
- * 2. For remaining pages: the minimum range granularity is decided
- * by 'scale', so multiple range TLBI operations may be required.
- * Start from scale = 0, flush the corresponding number of pages
- * ((num+1)*2^(5*scale+1) starting from 'addr'), then increase it
- * until no pages left.
- *
- * Note that certain ranges can be represented by either num = 31 and
- * scale or num = 0 and scale + 1. The loop below favours the latter
- * since num is limited to 30 by the __TLBI_RANGE_NUM() macro.
- */
- while (pages > 0) {
- if (!system_supports_tlb_range() ||
- pages % 2 == 1) {
- addr = __TLBI_VADDR(start, asid);
- if (last_level) {
- __tlbi_level(vale1is, addr, tlb_level);
- __tlbi_user_level(vale1is, addr, tlb_level);
- } else {
- __tlbi_level(vae1is, addr, tlb_level);
- __tlbi_user_level(vae1is, addr, tlb_level);
- }
- start += stride;
- pages -= stride >> PAGE_SHIFT;
- continue;
- }
-
- num = __TLBI_RANGE_NUM(pages, scale);
- if (num >= 0) {
- addr = __TLBI_VADDR_RANGE(start, asid, scale,
- num, tlb_level);
- if (last_level) {
- __tlbi(rvale1is, addr);
- __tlbi_user(rvale1is, addr);
- } else {
- __tlbi(rvae1is, addr);
- __tlbi_user(rvae1is, addr);
- }
- start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT;
- pages -= __TLBI_RANGE_PAGES(num, scale);
- }
- scale++;
- }
+ if (last_level)
+ __flush_tlb_range_op(vale1is, start, pages, stride, asid, tlb_level, true);
+ else
+ __flush_tlb_range_op(vae1is, start, pages, stride, asid, tlb_level, true);
+
dsb(ish);
mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, start, end);
}
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index a5f533f63b60..b018ae12ff5f 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -2627,6 +2627,13 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = has_cpuid_feature,
ARM64_CPUID_FIELDS(ID_AA64ISAR1_EL1, LRCPC, IMP)
},
+ {
+ .desc = "Fine Grained Traps",
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .capability = ARM64_HAS_FGT,
+ .matches = has_cpuid_feature,
+ ARM64_CPUID_FIELDS(ID_AA64MMFR0_EL1, FGT, IMP)
+ },
#ifdef CONFIG_ARM64_SME
{
.desc = "Scalable Matrix Extension",
diff --git a/arch/arm64/kernel/idreg-override.c b/arch/arm64/kernel/idreg-override.c
index aee12c75b738..3addc09f8746 100644
--- a/arch/arm64/kernel/idreg-override.c
+++ b/arch/arm64/kernel/idreg-override.c
@@ -262,9 +262,9 @@ static __init void __parse_cmdline(const char *cmdline, bool parse_aliases)
if (!len)
return;
- len = strscpy(buf, cmdline, ARRAY_SIZE(buf));
- if (len == -E2BIG)
- len = ARRAY_SIZE(buf) - 1;
+ len = min(len, ARRAY_SIZE(buf) - 1);
+ memcpy(buf, cmdline, len);
+ buf[len] = '\0';
if (strcmp(buf, "--") == 0)
return;
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index f531da6b362e..83c1e09be42e 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -25,7 +25,6 @@ menuconfig KVM
select MMU_NOTIFIER
select PREEMPT_NOTIFIERS
select HAVE_KVM_CPU_RELAX_INTERCEPT
- select HAVE_KVM_ARCH_TLB_FLUSH_ALL
select KVM_MMIO
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select KVM_XFER_TO_GUEST_WORK
@@ -43,6 +42,7 @@ menuconfig KVM
select SCHED_INFO
select GUEST_PERF_EVENTS if PERF_EVENTS
select INTERVAL_TREE
+ select XARRAY_MULTI
help
Support hosting virtualized guest machines.
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index d1cb298a58a0..4866b3f7b4ea 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -36,6 +36,7 @@
#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h>
+#include <asm/kvm_nested.h>
#include <asm/kvm_pkvm.h>
#include <asm/kvm_emulate.h>
#include <asm/sections.h>
@@ -365,7 +366,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
#endif
/* Force users to call KVM_ARM_VCPU_INIT */
- vcpu->arch.target = -1;
+ vcpu_clear_flag(vcpu, VCPU_INITIALIZED);
bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
@@ -462,7 +463,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vcpu_ptrauth_disable(vcpu);
kvm_arch_vcpu_load_debug_state_flags(vcpu);
- if (!cpumask_test_cpu(smp_processor_id(), vcpu->kvm->arch.supported_cpus))
+ if (!cpumask_test_cpu(cpu, vcpu->kvm->arch.supported_cpus))
vcpu_set_on_unsupported_cpu(vcpu);
}
@@ -574,7 +575,7 @@ unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.target >= 0;
+ return vcpu_get_flag(vcpu, VCPU_INITIALIZED);
}
/*
@@ -803,6 +804,9 @@ static int check_vcpu_requests(struct kvm_vcpu *vcpu)
kvm_pmu_handle_pmcr(vcpu,
__vcpu_sys_reg(vcpu, PMCR_EL0));
+ if (kvm_check_request(KVM_REQ_RESYNC_PMU_EL0, vcpu))
+ kvm_vcpu_pmu_restore_guest(vcpu);
+
if (kvm_check_request(KVM_REQ_SUSPEND, vcpu))
return kvm_vcpu_suspend(vcpu);
@@ -818,6 +822,9 @@ static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu)
if (likely(!vcpu_mode_is_32bit(vcpu)))
return false;
+ if (vcpu_has_nv(vcpu))
+ return true;
+
return !kvm_supports_32bit_el0();
}
@@ -1058,7 +1065,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
* invalid. The VMM can try and fix it by issuing a
* KVM_ARM_VCPU_INIT if it really wants to.
*/
- vcpu->arch.target = -1;
+ vcpu_clear_flag(vcpu, VCPU_INITIALIZED);
ret = ARM_EXCEPTION_IL;
}
@@ -1219,8 +1226,7 @@ static bool kvm_vcpu_init_changed(struct kvm_vcpu *vcpu,
{
unsigned long features = init->features[0];
- return !bitmap_equal(vcpu->arch.features, &features, KVM_VCPU_MAX_FEATURES) ||
- vcpu->arch.target != init->target;
+ return !bitmap_equal(vcpu->arch.features, &features, KVM_VCPU_MAX_FEATURES);
}
static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
@@ -1236,20 +1242,18 @@ static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
!bitmap_equal(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES))
goto out_unlock;
- vcpu->arch.target = init->target;
bitmap_copy(vcpu->arch.features, &features, KVM_VCPU_MAX_FEATURES);
/* Now we know what it is, we can reset it. */
ret = kvm_reset_vcpu(vcpu);
if (ret) {
- vcpu->arch.target = -1;
bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
goto out_unlock;
}
bitmap_copy(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES);
set_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags);
-
+ vcpu_set_flag(vcpu, VCPU_INITIALIZED);
out_unlock:
mutex_unlock(&kvm->arch.config_lock);
return ret;
@@ -1260,14 +1264,15 @@ static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
{
int ret;
- if (init->target != kvm_target_cpu())
+ if (init->target != KVM_ARM_TARGET_GENERIC_V8 &&
+ init->target != kvm_target_cpu())
return -EINVAL;
ret = kvm_vcpu_init_check_features(vcpu, init);
if (ret)
return ret;
- if (vcpu->arch.target == -1)
+ if (!kvm_vcpu_initialized(vcpu))
return __kvm_vcpu_set_target(vcpu, init);
if (kvm_vcpu_init_changed(vcpu, init))
@@ -1532,12 +1537,6 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
}
-void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
- const struct kvm_memory_slot *memslot)
-{
- kvm_flush_remote_tlbs(kvm);
-}
-
static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
struct kvm_arm_device_addr *dev_addr)
{
@@ -1595,9 +1594,9 @@ int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr);
}
case KVM_ARM_PREFERRED_TARGET: {
- struct kvm_vcpu_init init;
-
- kvm_vcpu_preferred_target(&init);
+ struct kvm_vcpu_init init = {
+ .target = KVM_ARM_TARGET_GENERIC_V8,
+ };
if (copy_to_user(argp, &init, sizeof(init)))
return -EFAULT;
@@ -2276,30 +2275,8 @@ static int __init init_hyp_mode(void)
for_each_possible_cpu(cpu) {
struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
- unsigned long hyp_addr;
- /*
- * Allocate a contiguous HYP private VA range for the stack
- * and guard page. The allocation is also aligned based on
- * the order of its size.
- */
- err = hyp_alloc_private_va_range(PAGE_SIZE * 2, &hyp_addr);
- if (err) {
- kvm_err("Cannot allocate hyp stack guard page\n");
- goto out_err;
- }
-
- /*
- * Since the stack grows downwards, map the stack to the page
- * at the higher address and leave the lower guard page
- * unbacked.
- *
- * Any valid stack address now has the PAGE_SHIFT bit as 1
- * and addresses corresponding to the guard page have the
- * PAGE_SHIFT bit as 0 - this is used for overflow detection.
- */
- err = __create_hyp_mappings(hyp_addr + PAGE_SIZE, PAGE_SIZE,
- __pa(stack_page), PAGE_HYP);
+ err = create_hyp_stack(__pa(stack_page), &params->stack_hyp_va);
if (err) {
kvm_err("Cannot map hyp stack\n");
goto out_err;
@@ -2312,8 +2289,6 @@ static int __init init_hyp_mode(void)
* has been mapped in the flexible private VA space.
*/
params->stack_pa = __pa(stack_page);
-
- params->stack_hyp_va = hyp_addr + (2 * PAGE_SIZE);
}
for_each_possible_cpu(cpu) {
diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c
index b96662029fb1..9ced1bf0c2b7 100644
--- a/arch/arm64/kvm/emulate-nested.c
+++ b/arch/arm64/kvm/emulate-nested.c
@@ -14,6 +14,1858 @@
#include "trace.h"
+enum trap_behaviour {
+ BEHAVE_HANDLE_LOCALLY = 0,
+ BEHAVE_FORWARD_READ = BIT(0),
+ BEHAVE_FORWARD_WRITE = BIT(1),
+ BEHAVE_FORWARD_ANY = BEHAVE_FORWARD_READ | BEHAVE_FORWARD_WRITE,
+};
+
+struct trap_bits {
+ const enum vcpu_sysreg index;
+ const enum trap_behaviour behaviour;
+ const u64 value;
+ const u64 mask;
+};
+
+/* Coarse Grained Trap definitions */
+enum cgt_group_id {
+ /* Indicates no coarse trap control */
+ __RESERVED__,
+
+ /*
+ * The first batch of IDs denote coarse trapping that are used
+ * on their own instead of being part of a combination of
+ * trap controls.
+ */
+ CGT_HCR_TID1,
+ CGT_HCR_TID2,
+ CGT_HCR_TID3,
+ CGT_HCR_IMO,
+ CGT_HCR_FMO,
+ CGT_HCR_TIDCP,
+ CGT_HCR_TACR,
+ CGT_HCR_TSW,
+ CGT_HCR_TPC,
+ CGT_HCR_TPU,
+ CGT_HCR_TTLB,
+ CGT_HCR_TVM,
+ CGT_HCR_TDZ,
+ CGT_HCR_TRVM,
+ CGT_HCR_TLOR,
+ CGT_HCR_TERR,
+ CGT_HCR_APK,
+ CGT_HCR_NV,
+ CGT_HCR_NV_nNV2,
+ CGT_HCR_NV1_nNV2,
+ CGT_HCR_AT,
+ CGT_HCR_nFIEN,
+ CGT_HCR_TID4,
+ CGT_HCR_TICAB,
+ CGT_HCR_TOCU,
+ CGT_HCR_ENSCXT,
+ CGT_HCR_TTLBIS,
+ CGT_HCR_TTLBOS,
+
+ CGT_MDCR_TPMCR,
+ CGT_MDCR_TPM,
+ CGT_MDCR_TDE,
+ CGT_MDCR_TDA,
+ CGT_MDCR_TDOSA,
+ CGT_MDCR_TDRA,
+ CGT_MDCR_E2PB,
+ CGT_MDCR_TPMS,
+ CGT_MDCR_TTRF,
+ CGT_MDCR_E2TB,
+ CGT_MDCR_TDCC,
+
+ /*
+ * Anything after this point is a combination of coarse trap
+ * controls, which must all be evaluated to decide what to do.
+ */
+ __MULTIPLE_CONTROL_BITS__,
+ CGT_HCR_IMO_FMO = __MULTIPLE_CONTROL_BITS__,
+ CGT_HCR_TID2_TID4,
+ CGT_HCR_TTLB_TTLBIS,
+ CGT_HCR_TTLB_TTLBOS,
+ CGT_HCR_TVM_TRVM,
+ CGT_HCR_TPU_TICAB,
+ CGT_HCR_TPU_TOCU,
+ CGT_HCR_NV1_nNV2_ENSCXT,
+ CGT_MDCR_TPM_TPMCR,
+ CGT_MDCR_TDE_TDA,
+ CGT_MDCR_TDE_TDOSA,
+ CGT_MDCR_TDE_TDRA,
+ CGT_MDCR_TDCC_TDE_TDA,
+
+ /*
+ * Anything after this point requires a callback evaluating a
+ * complex trap condition. Ugly stuff.
+ */
+ __COMPLEX_CONDITIONS__,
+ CGT_CNTHCTL_EL1PCTEN = __COMPLEX_CONDITIONS__,
+ CGT_CNTHCTL_EL1PTEN,
+
+ /* Must be last */
+ __NR_CGT_GROUP_IDS__
+};
+
+static const struct trap_bits coarse_trap_bits[] = {
+ [CGT_HCR_TID1] = {
+ .index = HCR_EL2,
+ .value = HCR_TID1,
+ .mask = HCR_TID1,
+ .behaviour = BEHAVE_FORWARD_READ,
+ },
+ [CGT_HCR_TID2] = {
+ .index = HCR_EL2,
+ .value = HCR_TID2,
+ .mask = HCR_TID2,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_HCR_TID3] = {
+ .index = HCR_EL2,
+ .value = HCR_TID3,
+ .mask = HCR_TID3,
+ .behaviour = BEHAVE_FORWARD_READ,
+ },
+ [CGT_HCR_IMO] = {
+ .index = HCR_EL2,
+ .value = HCR_IMO,
+ .mask = HCR_IMO,
+ .behaviour = BEHAVE_FORWARD_WRITE,
+ },
+ [CGT_HCR_FMO] = {
+ .index = HCR_EL2,
+ .value = HCR_FMO,
+ .mask = HCR_FMO,
+ .behaviour = BEHAVE_FORWARD_WRITE,
+ },
+ [CGT_HCR_TIDCP] = {
+ .index = HCR_EL2,
+ .value = HCR_TIDCP,
+ .mask = HCR_TIDCP,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_HCR_TACR] = {
+ .index = HCR_EL2,
+ .value = HCR_TACR,
+ .mask = HCR_TACR,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_HCR_TSW] = {
+ .index = HCR_EL2,
+ .value = HCR_TSW,
+ .mask = HCR_TSW,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_HCR_TPC] = { /* Also called TCPC when FEAT_DPB is implemented */
+ .index = HCR_EL2,
+ .value = HCR_TPC,
+ .mask = HCR_TPC,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_HCR_TPU] = {
+ .index = HCR_EL2,
+ .value = HCR_TPU,
+ .mask = HCR_TPU,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_HCR_TTLB] = {
+ .index = HCR_EL2,
+ .value = HCR_TTLB,
+ .mask = HCR_TTLB,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_HCR_TVM] = {
+ .index = HCR_EL2,
+ .value = HCR_TVM,
+ .mask = HCR_TVM,
+ .behaviour = BEHAVE_FORWARD_WRITE,
+ },
+ [CGT_HCR_TDZ] = {
+ .index = HCR_EL2,
+ .value = HCR_TDZ,
+ .mask = HCR_TDZ,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_HCR_TRVM] = {
+ .index = HCR_EL2,
+ .value = HCR_TRVM,
+ .mask = HCR_TRVM,
+ .behaviour = BEHAVE_FORWARD_READ,
+ },
+ [CGT_HCR_TLOR] = {
+ .index = HCR_EL2,
+ .value = HCR_TLOR,
+ .mask = HCR_TLOR,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_HCR_TERR] = {
+ .index = HCR_EL2,
+ .value = HCR_TERR,
+ .mask = HCR_TERR,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_HCR_APK] = {
+ .index = HCR_EL2,
+ .value = 0,
+ .mask = HCR_APK,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_HCR_NV] = {
+ .index = HCR_EL2,
+ .value = HCR_NV,
+ .mask = HCR_NV,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_HCR_NV_nNV2] = {
+ .index = HCR_EL2,
+ .value = HCR_NV,
+ .mask = HCR_NV | HCR_NV2,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_HCR_NV1_nNV2] = {
+ .index = HCR_EL2,
+ .value = HCR_NV | HCR_NV1,
+ .mask = HCR_NV | HCR_NV1 | HCR_NV2,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_HCR_AT] = {
+ .index = HCR_EL2,
+ .value = HCR_AT,
+ .mask = HCR_AT,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_HCR_nFIEN] = {
+ .index = HCR_EL2,
+ .value = 0,
+ .mask = HCR_FIEN,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_HCR_TID4] = {
+ .index = HCR_EL2,
+ .value = HCR_TID4,
+ .mask = HCR_TID4,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_HCR_TICAB] = {
+ .index = HCR_EL2,
+ .value = HCR_TICAB,
+ .mask = HCR_TICAB,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_HCR_TOCU] = {
+ .index = HCR_EL2,
+ .value = HCR_TOCU,
+ .mask = HCR_TOCU,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_HCR_ENSCXT] = {
+ .index = HCR_EL2,
+ .value = 0,
+ .mask = HCR_ENSCXT,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_HCR_TTLBIS] = {
+ .index = HCR_EL2,
+ .value = HCR_TTLBIS,
+ .mask = HCR_TTLBIS,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_HCR_TTLBOS] = {
+ .index = HCR_EL2,
+ .value = HCR_TTLBOS,
+ .mask = HCR_TTLBOS,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_MDCR_TPMCR] = {
+ .index = MDCR_EL2,
+ .value = MDCR_EL2_TPMCR,
+ .mask = MDCR_EL2_TPMCR,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_MDCR_TPM] = {
+ .index = MDCR_EL2,
+ .value = MDCR_EL2_TPM,
+ .mask = MDCR_EL2_TPM,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_MDCR_TDE] = {
+ .index = MDCR_EL2,
+ .value = MDCR_EL2_TDE,
+ .mask = MDCR_EL2_TDE,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_MDCR_TDA] = {
+ .index = MDCR_EL2,
+ .value = MDCR_EL2_TDA,
+ .mask = MDCR_EL2_TDA,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_MDCR_TDOSA] = {
+ .index = MDCR_EL2,
+ .value = MDCR_EL2_TDOSA,
+ .mask = MDCR_EL2_TDOSA,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_MDCR_TDRA] = {
+ .index = MDCR_EL2,
+ .value = MDCR_EL2_TDRA,
+ .mask = MDCR_EL2_TDRA,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_MDCR_E2PB] = {
+ .index = MDCR_EL2,
+ .value = 0,
+ .mask = BIT(MDCR_EL2_E2PB_SHIFT),
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_MDCR_TPMS] = {
+ .index = MDCR_EL2,
+ .value = MDCR_EL2_TPMS,
+ .mask = MDCR_EL2_TPMS,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_MDCR_TTRF] = {
+ .index = MDCR_EL2,
+ .value = MDCR_EL2_TTRF,
+ .mask = MDCR_EL2_TTRF,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_MDCR_E2TB] = {
+ .index = MDCR_EL2,
+ .value = 0,
+ .mask = BIT(MDCR_EL2_E2TB_SHIFT),
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+ [CGT_MDCR_TDCC] = {
+ .index = MDCR_EL2,
+ .value = MDCR_EL2_TDCC,
+ .mask = MDCR_EL2_TDCC,
+ .behaviour = BEHAVE_FORWARD_ANY,
+ },
+};
+
+#define MCB(id, ...) \
+ [id - __MULTIPLE_CONTROL_BITS__] = \
+ (const enum cgt_group_id[]){ \
+ __VA_ARGS__, __RESERVED__ \
+ }
+
+static const enum cgt_group_id *coarse_control_combo[] = {
+ MCB(CGT_HCR_IMO_FMO, CGT_HCR_IMO, CGT_HCR_FMO),
+ MCB(CGT_HCR_TID2_TID4, CGT_HCR_TID2, CGT_HCR_TID4),
+ MCB(CGT_HCR_TTLB_TTLBIS, CGT_HCR_TTLB, CGT_HCR_TTLBIS),
+ MCB(CGT_HCR_TTLB_TTLBOS, CGT_HCR_TTLB, CGT_HCR_TTLBOS),
+ MCB(CGT_HCR_TVM_TRVM, CGT_HCR_TVM, CGT_HCR_TRVM),
+ MCB(CGT_HCR_TPU_TICAB, CGT_HCR_TPU, CGT_HCR_TICAB),
+ MCB(CGT_HCR_TPU_TOCU, CGT_HCR_TPU, CGT_HCR_TOCU),
+ MCB(CGT_HCR_NV1_nNV2_ENSCXT, CGT_HCR_NV1_nNV2, CGT_HCR_ENSCXT),
+ MCB(CGT_MDCR_TPM_TPMCR, CGT_MDCR_TPM, CGT_MDCR_TPMCR),
+ MCB(CGT_MDCR_TDE_TDA, CGT_MDCR_TDE, CGT_MDCR_TDA),
+ MCB(CGT_MDCR_TDE_TDOSA, CGT_MDCR_TDE, CGT_MDCR_TDOSA),
+ MCB(CGT_MDCR_TDE_TDRA, CGT_MDCR_TDE, CGT_MDCR_TDRA),
+ MCB(CGT_MDCR_TDCC_TDE_TDA, CGT_MDCR_TDCC, CGT_MDCR_TDE, CGT_MDCR_TDA),
+};
+
+typedef enum trap_behaviour (*complex_condition_check)(struct kvm_vcpu *);
+
+/*
+ * Warning, maximum confusion ahead.
+ *
+ * When E2H=0, CNTHCTL_EL2[1:0] are defined as EL1PCEN:EL1PCTEN
+ * When E2H=1, CNTHCTL_EL2[11:10] are defined as EL1PTEN:EL1PCTEN
+ *
+ * Note the single letter difference? Yet, the bits have the same
+ * function despite a different layout and a different name.
+ *
+ * We don't try to reconcile this mess. We just use the E2H=0 bits
+ * to generate something that is in the E2H=1 format, and live with
+ * it. You're welcome.
+ */
+static u64 get_sanitized_cnthctl(struct kvm_vcpu *vcpu)
+{
+ u64 val = __vcpu_sys_reg(vcpu, CNTHCTL_EL2);
+
+ if (!vcpu_el2_e2h_is_set(vcpu))
+ val = (val & (CNTHCTL_EL1PCEN | CNTHCTL_EL1PCTEN)) << 10;
+
+ return val & ((CNTHCTL_EL1PCEN | CNTHCTL_EL1PCTEN) << 10);
+}
+
+static enum trap_behaviour check_cnthctl_el1pcten(struct kvm_vcpu *vcpu)
+{
+ if (get_sanitized_cnthctl(vcpu) & (CNTHCTL_EL1PCTEN << 10))
+ return BEHAVE_HANDLE_LOCALLY;
+
+ return BEHAVE_FORWARD_ANY;
+}
+
+static enum trap_behaviour check_cnthctl_el1pten(struct kvm_vcpu *vcpu)
+{
+ if (get_sanitized_cnthctl(vcpu) & (CNTHCTL_EL1PCEN << 10))
+ return BEHAVE_HANDLE_LOCALLY;
+
+ return BEHAVE_FORWARD_ANY;
+}
+
+#define CCC(id, fn) \
+ [id - __COMPLEX_CONDITIONS__] = fn
+
+static const complex_condition_check ccc[] = {
+ CCC(CGT_CNTHCTL_EL1PCTEN, check_cnthctl_el1pcten),
+ CCC(CGT_CNTHCTL_EL1PTEN, check_cnthctl_el1pten),
+};
+
+/*
+ * Bit assignment for the trap controls. We use a 64bit word with the
+ * following layout for each trapped sysreg:
+ *
+ * [9:0] enum cgt_group_id (10 bits)
+ * [13:10] enum fgt_group_id (4 bits)
+ * [19:14] bit number in the FGT register (6 bits)
+ * [20] trap polarity (1 bit)
+ * [25:21] FG filter (5 bits)
+ * [62:26] Unused (37 bits)
+ * [63] RES0 - Must be zero, as lost on insertion in the xarray
+ */
+#define TC_CGT_BITS 10
+#define TC_FGT_BITS 4
+#define TC_FGF_BITS 5
+
+union trap_config {
+ u64 val;
+ struct {
+ unsigned long cgt:TC_CGT_BITS; /* Coarse Grained Trap id */
+ unsigned long fgt:TC_FGT_BITS; /* Fine Grained Trap id */
+ unsigned long bit:6; /* Bit number */
+ unsigned long pol:1; /* Polarity */
+ unsigned long fgf:TC_FGF_BITS; /* Fine Grained Filter */
+ unsigned long unused:37; /* Unused, should be zero */
+ unsigned long mbz:1; /* Must Be Zero */
+ };
+};
+
+struct encoding_to_trap_config {
+ const u32 encoding;
+ const u32 end;
+ const union trap_config tc;
+ const unsigned int line;
+};
+
+#define SR_RANGE_TRAP(sr_start, sr_end, trap_id) \
+ { \
+ .encoding = sr_start, \
+ .end = sr_end, \
+ .tc = { \
+ .cgt = trap_id, \
+ }, \
+ .line = __LINE__, \
+ }
+
+#define SR_TRAP(sr, trap_id) SR_RANGE_TRAP(sr, sr, trap_id)
+
+/*
+ * Map encoding to trap bits for exception reported with EC=0x18.
+ * These must only be evaluated when running a nested hypervisor, but
+ * that the current context is not a hypervisor context. When the
+ * trapped access matches one of the trap controls, the exception is
+ * re-injected in the nested hypervisor.
+ */
+static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = {
+ SR_TRAP(SYS_REVIDR_EL1, CGT_HCR_TID1),
+ SR_TRAP(SYS_AIDR_EL1, CGT_HCR_TID1),
+ SR_TRAP(SYS_SMIDR_EL1, CGT_HCR_TID1),
+ SR_TRAP(SYS_CTR_EL0, CGT_HCR_TID2),
+ SR_TRAP(SYS_CCSIDR_EL1, CGT_HCR_TID2_TID4),
+ SR_TRAP(SYS_CCSIDR2_EL1, CGT_HCR_TID2_TID4),
+ SR_TRAP(SYS_CLIDR_EL1, CGT_HCR_TID2_TID4),
+ SR_TRAP(SYS_CSSELR_EL1, CGT_HCR_TID2_TID4),
+ SR_RANGE_TRAP(SYS_ID_PFR0_EL1,
+ sys_reg(3, 0, 0, 7, 7), CGT_HCR_TID3),
+ SR_TRAP(SYS_ICC_SGI0R_EL1, CGT_HCR_IMO_FMO),
+ SR_TRAP(SYS_ICC_ASGI1R_EL1, CGT_HCR_IMO_FMO),
+ SR_TRAP(SYS_ICC_SGI1R_EL1, CGT_HCR_IMO_FMO),
+ SR_RANGE_TRAP(sys_reg(3, 0, 11, 0, 0),
+ sys_reg(3, 0, 11, 15, 7), CGT_HCR_TIDCP),
+ SR_RANGE_TRAP(sys_reg(3, 1, 11, 0, 0),
+ sys_reg(3, 1, 11, 15, 7), CGT_HCR_TIDCP),
+ SR_RANGE_TRAP(sys_reg(3, 2, 11, 0, 0),
+ sys_reg(3, 2, 11, 15, 7), CGT_HCR_TIDCP),
+ SR_RANGE_TRAP(sys_reg(3, 3, 11, 0, 0),
+ sys_reg(3, 3, 11, 15, 7), CGT_HCR_TIDCP),
+ SR_RANGE_TRAP(sys_reg(3, 4, 11, 0, 0),
+ sys_reg(3, 4, 11, 15, 7), CGT_HCR_TIDCP),
+ SR_RANGE_TRAP(sys_reg(3, 5, 11, 0, 0),
+ sys_reg(3, 5, 11, 15, 7), CGT_HCR_TIDCP),
+ SR_RANGE_TRAP(sys_reg(3, 6, 11, 0, 0),
+ sys_reg(3, 6, 11, 15, 7), CGT_HCR_TIDCP),
+ SR_RANGE_TRAP(sys_reg(3, 7, 11, 0, 0),
+ sys_reg(3, 7, 11, 15, 7), CGT_HCR_TIDCP),
+ SR_RANGE_TRAP(sys_reg(3, 0, 15, 0, 0),
+ sys_reg(3, 0, 15, 15, 7), CGT_HCR_TIDCP),
+ SR_RANGE_TRAP(sys_reg(3, 1, 15, 0, 0),
+ sys_reg(3, 1, 15, 15, 7), CGT_HCR_TIDCP),
+ SR_RANGE_TRAP(sys_reg(3, 2, 15, 0, 0),
+ sys_reg(3, 2, 15, 15, 7), CGT_HCR_TIDCP),
+ SR_RANGE_TRAP(sys_reg(3, 3, 15, 0, 0),
+ sys_reg(3, 3, 15, 15, 7), CGT_HCR_TIDCP),
+ SR_RANGE_TRAP(sys_reg(3, 4, 15, 0, 0),
+ sys_reg(3, 4, 15, 15, 7), CGT_HCR_TIDCP),
+ SR_RANGE_TRAP(sys_reg(3, 5, 15, 0, 0),
+ sys_reg(3, 5, 15, 15, 7), CGT_HCR_TIDCP),
+ SR_RANGE_TRAP(sys_reg(3, 6, 15, 0, 0),
+ sys_reg(3, 6, 15, 15, 7), CGT_HCR_TIDCP),
+ SR_RANGE_TRAP(sys_reg(3, 7, 15, 0, 0),
+ sys_reg(3, 7, 15, 15, 7), CGT_HCR_TIDCP),
+ SR_TRAP(SYS_ACTLR_EL1, CGT_HCR_TACR),
+ SR_TRAP(SYS_DC_ISW, CGT_HCR_TSW),
+ SR_TRAP(SYS_DC_CSW, CGT_HCR_TSW),
+ SR_TRAP(SYS_DC_CISW, CGT_HCR_TSW),
+ SR_TRAP(SYS_DC_IGSW, CGT_HCR_TSW),
+ SR_TRAP(SYS_DC_IGDSW, CGT_HCR_TSW),
+ SR_TRAP(SYS_DC_CGSW, CGT_HCR_TSW),
+ SR_TRAP(SYS_DC_CGDSW, CGT_HCR_TSW),
+ SR_TRAP(SYS_DC_CIGSW, CGT_HCR_TSW),
+ SR_TRAP(SYS_DC_CIGDSW, CGT_HCR_TSW),
+ SR_TRAP(SYS_DC_CIVAC, CGT_HCR_TPC),
+ SR_TRAP(SYS_DC_CVAC, CGT_HCR_TPC),
+ SR_TRAP(SYS_DC_CVAP, CGT_HCR_TPC),
+ SR_TRAP(SYS_DC_CVADP, CGT_HCR_TPC),
+ SR_TRAP(SYS_DC_IVAC, CGT_HCR_TPC),
+ SR_TRAP(SYS_DC_CIGVAC, CGT_HCR_TPC),
+ SR_TRAP(SYS_DC_CIGDVAC, CGT_HCR_TPC),
+ SR_TRAP(SYS_DC_IGVAC, CGT_HCR_TPC),
+ SR_TRAP(SYS_DC_IGDVAC, CGT_HCR_TPC),
+ SR_TRAP(SYS_DC_CGVAC, CGT_HCR_TPC),
+ SR_TRAP(SYS_DC_CGDVAC, CGT_HCR_TPC),
+ SR_TRAP(SYS_DC_CGVAP, CGT_HCR_TPC),
+ SR_TRAP(SYS_DC_CGDVAP, CGT_HCR_TPC),
+ SR_TRAP(SYS_DC_CGVADP, CGT_HCR_TPC),
+ SR_TRAP(SYS_DC_CGDVADP, CGT_HCR_TPC),
+ SR_TRAP(SYS_IC_IVAU, CGT_HCR_TPU_TOCU),
+ SR_TRAP(SYS_IC_IALLU, CGT_HCR_TPU_TOCU),
+ SR_TRAP(SYS_IC_IALLUIS, CGT_HCR_TPU_TICAB),
+ SR_TRAP(SYS_DC_CVAU, CGT_HCR_TPU_TOCU),
+ SR_TRAP(OP_TLBI_RVAE1, CGT_HCR_TTLB),
+ SR_TRAP(OP_TLBI_RVAAE1, CGT_HCR_TTLB),
+ SR_TRAP(OP_TLBI_RVALE1, CGT_HCR_TTLB),
+ SR_TRAP(OP_TLBI_RVAALE1, CGT_HCR_TTLB),
+ SR_TRAP(OP_TLBI_VMALLE1, CGT_HCR_TTLB),
+ SR_TRAP(OP_TLBI_VAE1, CGT_HCR_TTLB),
+ SR_TRAP(OP_TLBI_ASIDE1, CGT_HCR_TTLB),
+ SR_TRAP(OP_TLBI_VAAE1, CGT_HCR_TTLB),
+ SR_TRAP(OP_TLBI_VALE1, CGT_HCR_TTLB),
+ SR_TRAP(OP_TLBI_VAALE1, CGT_HCR_TTLB),
+ SR_TRAP(OP_TLBI_RVAE1NXS, CGT_HCR_TTLB),
+ SR_TRAP(OP_TLBI_RVAAE1NXS, CGT_HCR_TTLB),
+ SR_TRAP(OP_TLBI_RVALE1NXS, CGT_HCR_TTLB),
+ SR_TRAP(OP_TLBI_RVAALE1NXS, CGT_HCR_TTLB),
+ SR_TRAP(OP_TLBI_VMALLE1NXS, CGT_HCR_TTLB),
+ SR_TRAP(OP_TLBI_VAE1NXS, CGT_HCR_TTLB),
+ SR_TRAP(OP_TLBI_ASIDE1NXS, CGT_HCR_TTLB),
+ SR_TRAP(OP_TLBI_VAAE1NXS, CGT_HCR_TTLB),
+ SR_TRAP(OP_TLBI_VALE1NXS, CGT_HCR_TTLB),
+ SR_TRAP(OP_TLBI_VAALE1NXS, CGT_HCR_TTLB),
+ SR_TRAP(OP_TLBI_RVAE1IS, CGT_HCR_TTLB_TTLBIS),
+ SR_TRAP(OP_TLBI_RVAAE1IS, CGT_HCR_TTLB_TTLBIS),
+ SR_TRAP(OP_TLBI_RVALE1IS, CGT_HCR_TTLB_TTLBIS),
+ SR_TRAP(OP_TLBI_RVAALE1IS, CGT_HCR_TTLB_TTLBIS),
+ SR_TRAP(OP_TLBI_VMALLE1IS, CGT_HCR_TTLB_TTLBIS),
+ SR_TRAP(OP_TLBI_VAE1IS, CGT_HCR_TTLB_TTLBIS),
+ SR_TRAP(OP_TLBI_ASIDE1IS, CGT_HCR_TTLB_TTLBIS),
+ SR_TRAP(OP_TLBI_VAAE1IS, CGT_HCR_TTLB_TTLBIS),
+ SR_TRAP(OP_TLBI_VALE1IS, CGT_HCR_TTLB_TTLBIS),
+ SR_TRAP(OP_TLBI_VAALE1IS, CGT_HCR_TTLB_TTLBIS),
+ SR_TRAP(OP_TLBI_RVAE1ISNXS, CGT_HCR_TTLB_TTLBIS),
+ SR_TRAP(OP_TLBI_RVAAE1ISNXS, CGT_HCR_TTLB_TTLBIS),
+ SR_TRAP(OP_TLBI_RVALE1ISNXS, CGT_HCR_TTLB_TTLBIS),
+ SR_TRAP(OP_TLBI_RVAALE1ISNXS, CGT_HCR_TTLB_TTLBIS),
+ SR_TRAP(OP_TLBI_VMALLE1ISNXS, CGT_HCR_TTLB_TTLBIS),
+ SR_TRAP(OP_TLBI_VAE1ISNXS, CGT_HCR_TTLB_TTLBIS),
+ SR_TRAP(OP_TLBI_ASIDE1ISNXS, CGT_HCR_TTLB_TTLBIS),
+ SR_TRAP(OP_TLBI_VAAE1ISNXS, CGT_HCR_TTLB_TTLBIS),
+ SR_TRAP(OP_TLBI_VALE1ISNXS, CGT_HCR_TTLB_TTLBIS),
+ SR_TRAP(OP_TLBI_VAALE1ISNXS, CGT_HCR_TTLB_TTLBIS),
+ SR_TRAP(OP_TLBI_VMALLE1OS, CGT_HCR_TTLB_TTLBOS),
+ SR_TRAP(OP_TLBI_VAE1OS, CGT_HCR_TTLB_TTLBOS),
+ SR_TRAP(OP_TLBI_ASIDE1OS, CGT_HCR_TTLB_TTLBOS),
+ SR_TRAP(OP_TLBI_VAAE1OS, CGT_HCR_TTLB_TTLBOS),
+ SR_TRAP(OP_TLBI_VALE1OS, CGT_HCR_TTLB_TTLBOS),
+ SR_TRAP(OP_TLBI_VAALE1OS, CGT_HCR_TTLB_TTLBOS),
+ SR_TRAP(OP_TLBI_RVAE1OS, CGT_HCR_TTLB_TTLBOS),
+ SR_TRAP(OP_TLBI_RVAAE1OS, CGT_HCR_TTLB_TTLBOS),
+ SR_TRAP(OP_TLBI_RVALE1OS, CGT_HCR_TTLB_TTLBOS),
+ SR_TRAP(OP_TLBI_RVAALE1OS, CGT_HCR_TTLB_TTLBOS),
+ SR_TRAP(OP_TLBI_VMALLE1OSNXS, CGT_HCR_TTLB_TTLBOS),
+ SR_TRAP(OP_TLBI_VAE1OSNXS, CGT_HCR_TTLB_TTLBOS),
+ SR_TRAP(OP_TLBI_ASIDE1OSNXS, CGT_HCR_TTLB_TTLBOS),
+ SR_TRAP(OP_TLBI_VAAE1OSNXS, CGT_HCR_TTLB_TTLBOS),
+ SR_TRAP(OP_TLBI_VALE1OSNXS, CGT_HCR_TTLB_TTLBOS),
+ SR_TRAP(OP_TLBI_VAALE1OSNXS, CGT_HCR_TTLB_TTLBOS),
+ SR_TRAP(OP_TLBI_RVAE1OSNXS, CGT_HCR_TTLB_TTLBOS),
+ SR_TRAP(OP_TLBI_RVAAE1OSNXS, CGT_HCR_TTLB_TTLBOS),
+ SR_TRAP(OP_TLBI_RVALE1OSNXS, CGT_HCR_TTLB_TTLBOS),
+ SR_TRAP(OP_TLBI_RVAALE1OSNXS, CGT_HCR_TTLB_TTLBOS),
+ SR_TRAP(SYS_SCTLR_EL1, CGT_HCR_TVM_TRVM),
+ SR_TRAP(SYS_TTBR0_EL1, CGT_HCR_TVM_TRVM),
+ SR_TRAP(SYS_TTBR1_EL1, CGT_HCR_TVM_TRVM),
+ SR_TRAP(SYS_TCR_EL1, CGT_HCR_TVM_TRVM),
+ SR_TRAP(SYS_ESR_EL1, CGT_HCR_TVM_TRVM),
+ SR_TRAP(SYS_FAR_EL1, CGT_HCR_TVM_TRVM),
+ SR_TRAP(SYS_AFSR0_EL1, CGT_HCR_TVM_TRVM),
+ SR_TRAP(SYS_AFSR1_EL1, CGT_HCR_TVM_TRVM),
+ SR_TRAP(SYS_MAIR_EL1, CGT_HCR_TVM_TRVM),
+ SR_TRAP(SYS_AMAIR_EL1, CGT_HCR_TVM_TRVM),
+ SR_TRAP(SYS_CONTEXTIDR_EL1, CGT_HCR_TVM_TRVM),
+ SR_TRAP(SYS_DC_ZVA, CGT_HCR_TDZ),
+ SR_TRAP(SYS_DC_GVA, CGT_HCR_TDZ),
+ SR_TRAP(SYS_DC_GZVA, CGT_HCR_TDZ),
+ SR_TRAP(SYS_LORSA_EL1, CGT_HCR_TLOR),
+ SR_TRAP(SYS_LOREA_EL1, CGT_HCR_TLOR),
+ SR_TRAP(SYS_LORN_EL1, CGT_HCR_TLOR),
+ SR_TRAP(SYS_LORC_EL1, CGT_HCR_TLOR),
+ SR_TRAP(SYS_LORID_EL1, CGT_HCR_TLOR),
+ SR_TRAP(SYS_ERRIDR_EL1, CGT_HCR_TERR),
+ SR_TRAP(SYS_ERRSELR_EL1, CGT_HCR_TERR),
+ SR_TRAP(SYS_ERXADDR_EL1, CGT_HCR_TERR),
+ SR_TRAP(SYS_ERXCTLR_EL1, CGT_HCR_TERR),
+ SR_TRAP(SYS_ERXFR_EL1, CGT_HCR_TERR),
+ SR_TRAP(SYS_ERXMISC0_EL1, CGT_HCR_TERR),
+ SR_TRAP(SYS_ERXMISC1_EL1, CGT_HCR_TERR),
+ SR_TRAP(SYS_ERXMISC2_EL1, CGT_HCR_TERR),
+ SR_TRAP(SYS_ERXMISC3_EL1, CGT_HCR_TERR),
+ SR_TRAP(SYS_ERXSTATUS_EL1, CGT_HCR_TERR),
+ SR_TRAP(SYS_APIAKEYLO_EL1, CGT_HCR_APK),
+ SR_TRAP(SYS_APIAKEYHI_EL1, CGT_HCR_APK),
+ SR_TRAP(SYS_APIBKEYLO_EL1, CGT_HCR_APK),
+ SR_TRAP(SYS_APIBKEYHI_EL1, CGT_HCR_APK),
+ SR_TRAP(SYS_APDAKEYLO_EL1, CGT_HCR_APK),
+ SR_TRAP(SYS_APDAKEYHI_EL1, CGT_HCR_APK),
+ SR_TRAP(SYS_APDBKEYLO_EL1, CGT_HCR_APK),
+ SR_TRAP(SYS_APDBKEYHI_EL1, CGT_HCR_APK),
+ SR_TRAP(SYS_APGAKEYLO_EL1, CGT_HCR_APK),
+ SR_TRAP(SYS_APGAKEYHI_EL1, CGT_HCR_APK),
+ /* All _EL2 registers */
+ SR_RANGE_TRAP(sys_reg(3, 4, 0, 0, 0),
+ sys_reg(3, 4, 3, 15, 7), CGT_HCR_NV),
+ /* Skip the SP_EL1 encoding... */
+ SR_TRAP(SYS_SPSR_EL2, CGT_HCR_NV),
+ SR_TRAP(SYS_ELR_EL2, CGT_HCR_NV),
+ SR_RANGE_TRAP(sys_reg(3, 4, 4, 1, 1),
+ sys_reg(3, 4, 10, 15, 7), CGT_HCR_NV),
+ SR_RANGE_TRAP(sys_reg(3, 4, 12, 0, 0),
+ sys_reg(3, 4, 14, 15, 7), CGT_HCR_NV),
+ /* All _EL02, _EL12 registers */
+ SR_RANGE_TRAP(sys_reg(3, 5, 0, 0, 0),
+ sys_reg(3, 5, 10, 15, 7), CGT_HCR_NV),
+ SR_RANGE_TRAP(sys_reg(3, 5, 12, 0, 0),
+ sys_reg(3, 5, 14, 15, 7), CGT_HCR_NV),
+ SR_TRAP(OP_AT_S1E2R, CGT_HCR_NV),
+ SR_TRAP(OP_AT_S1E2W, CGT_HCR_NV),
+ SR_TRAP(OP_AT_S12E1R, CGT_HCR_NV),
+ SR_TRAP(OP_AT_S12E1W, CGT_HCR_NV),
+ SR_TRAP(OP_AT_S12E0R, CGT_HCR_NV),
+ SR_TRAP(OP_AT_S12E0W, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_IPAS2E1, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RIPAS2E1, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_IPAS2LE1, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RIPAS2LE1, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RVAE2, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RVALE2, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_ALLE2, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_VAE2, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_ALLE1, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_VALE2, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_VMALLS12E1, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_IPAS2E1NXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RIPAS2E1NXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_IPAS2LE1NXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RIPAS2LE1NXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RVAE2NXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RVALE2NXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_ALLE2NXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_VAE2NXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_ALLE1NXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_VALE2NXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_VMALLS12E1NXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_IPAS2E1IS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RIPAS2E1IS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_IPAS2LE1IS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RIPAS2LE1IS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RVAE2IS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RVALE2IS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_ALLE2IS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_VAE2IS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_ALLE1IS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_VALE2IS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_VMALLS12E1IS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_IPAS2E1ISNXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RIPAS2E1ISNXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_IPAS2LE1ISNXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RIPAS2LE1ISNXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RVAE2ISNXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RVALE2ISNXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_ALLE2ISNXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_VAE2ISNXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_ALLE1ISNXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_VALE2ISNXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_VMALLS12E1ISNXS,CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_ALLE2OS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_VAE2OS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_ALLE1OS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_VALE2OS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_VMALLS12E1OS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_IPAS2E1OS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RIPAS2E1OS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_IPAS2LE1OS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RIPAS2LE1OS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RVAE2OS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RVALE2OS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_ALLE2OSNXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_VAE2OSNXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_ALLE1OSNXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_VALE2OSNXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_VMALLS12E1OSNXS,CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_IPAS2E1OSNXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RIPAS2E1OSNXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_IPAS2LE1OSNXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RIPAS2LE1OSNXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RVAE2OSNXS, CGT_HCR_NV),
+ SR_TRAP(OP_TLBI_RVALE2OSNXS, CGT_HCR_NV),
+ SR_TRAP(OP_CPP_RCTX, CGT_HCR_NV),
+ SR_TRAP(OP_DVP_RCTX, CGT_HCR_NV),
+ SR_TRAP(OP_CFP_RCTX, CGT_HCR_NV),
+ SR_TRAP(SYS_SP_EL1, CGT_HCR_NV_nNV2),
+ SR_TRAP(SYS_VBAR_EL1, CGT_HCR_NV1_nNV2),
+ SR_TRAP(SYS_ELR_EL1, CGT_HCR_NV1_nNV2),
+ SR_TRAP(SYS_SPSR_EL1, CGT_HCR_NV1_nNV2),
+ SR_TRAP(SYS_SCXTNUM_EL1, CGT_HCR_NV1_nNV2_ENSCXT),
+ SR_TRAP(SYS_SCXTNUM_EL0, CGT_HCR_ENSCXT),
+ SR_TRAP(OP_AT_S1E1R, CGT_HCR_AT),
+ SR_TRAP(OP_AT_S1E1W, CGT_HCR_AT),
+ SR_TRAP(OP_AT_S1E0R, CGT_HCR_AT),
+ SR_TRAP(OP_AT_S1E0W, CGT_HCR_AT),
+ SR_TRAP(OP_AT_S1E1RP, CGT_HCR_AT),
+ SR_TRAP(OP_AT_S1E1WP, CGT_HCR_AT),
+ SR_TRAP(SYS_ERXPFGF_EL1, CGT_HCR_nFIEN),
+ SR_TRAP(SYS_ERXPFGCTL_EL1, CGT_HCR_nFIEN),
+ SR_TRAP(SYS_ERXPFGCDN_EL1, CGT_HCR_nFIEN),
+ SR_TRAP(SYS_PMCR_EL0, CGT_MDCR_TPM_TPMCR),
+ SR_TRAP(SYS_PMCNTENSET_EL0, CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMCNTENCLR_EL0, CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMOVSSET_EL0, CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMOVSCLR_EL0, CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMCEID0_EL0, CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMCEID1_EL0, CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMXEVTYPER_EL0, CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMSWINC_EL0, CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMSELR_EL0, CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMXEVCNTR_EL0, CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMCCNTR_EL0, CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMUSERENR_EL0, CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMINTENSET_EL1, CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMINTENCLR_EL1, CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMMIR_EL1, CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(0), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(1), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(2), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(3), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(4), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(5), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(6), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(7), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(8), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(9), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(10), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(11), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(12), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(13), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(14), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(15), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(16), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(17), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(18), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(19), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(20), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(21), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(22), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(23), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(24), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(25), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(26), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(27), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(28), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(29), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVCNTRn_EL0(30), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(0), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(1), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(2), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(3), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(4), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(5), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(6), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(7), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(8), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(9), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(10), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(11), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(12), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(13), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(14), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(15), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(16), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(17), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(18), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(19), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(20), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(21), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(22), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(23), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(24), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(25), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(26), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(27), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(28), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(29), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMEVTYPERn_EL0(30), CGT_MDCR_TPM),
+ SR_TRAP(SYS_PMCCFILTR_EL0, CGT_MDCR_TPM),
+ SR_TRAP(SYS_MDCCSR_EL0, CGT_MDCR_TDCC_TDE_TDA),
+ SR_TRAP(SYS_MDCCINT_EL1, CGT_MDCR_TDCC_TDE_TDA),
+ SR_TRAP(SYS_OSDTRRX_EL1, CGT_MDCR_TDCC_TDE_TDA),
+ SR_TRAP(SYS_OSDTRTX_EL1, CGT_MDCR_TDCC_TDE_TDA),
+ SR_TRAP(SYS_DBGDTR_EL0, CGT_MDCR_TDCC_TDE_TDA),
+ /*
+ * Also covers DBGDTRRX_EL0, which has the same encoding as
+ * SYS_DBGDTRTX_EL0...
+ */
+ SR_TRAP(SYS_DBGDTRTX_EL0, CGT_MDCR_TDCC_TDE_TDA),
+ SR_TRAP(SYS_MDSCR_EL1, CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_OSECCR_EL1, CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBVRn_EL1(0), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBVRn_EL1(1), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBVRn_EL1(2), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBVRn_EL1(3), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBVRn_EL1(4), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBVRn_EL1(5), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBVRn_EL1(6), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBVRn_EL1(7), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBVRn_EL1(8), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBVRn_EL1(9), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBVRn_EL1(10), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBVRn_EL1(11), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBVRn_EL1(12), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBVRn_EL1(13), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBVRn_EL1(14), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBVRn_EL1(15), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBCRn_EL1(0), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBCRn_EL1(1), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBCRn_EL1(2), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBCRn_EL1(3), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBCRn_EL1(4), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBCRn_EL1(5), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBCRn_EL1(6), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBCRn_EL1(7), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBCRn_EL1(8), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBCRn_EL1(9), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBCRn_EL1(10), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBCRn_EL1(11), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBCRn_EL1(12), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBCRn_EL1(13), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBCRn_EL1(14), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGBCRn_EL1(15), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWVRn_EL1(0), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWVRn_EL1(1), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWVRn_EL1(2), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWVRn_EL1(3), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWVRn_EL1(4), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWVRn_EL1(5), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWVRn_EL1(6), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWVRn_EL1(7), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWVRn_EL1(8), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWVRn_EL1(9), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWVRn_EL1(10), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWVRn_EL1(11), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWVRn_EL1(12), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWVRn_EL1(13), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWVRn_EL1(14), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWVRn_EL1(15), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWCRn_EL1(0), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWCRn_EL1(1), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWCRn_EL1(2), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWCRn_EL1(3), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWCRn_EL1(4), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWCRn_EL1(5), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWCRn_EL1(6), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWCRn_EL1(7), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWCRn_EL1(8), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWCRn_EL1(9), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWCRn_EL1(10), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWCRn_EL1(11), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWCRn_EL1(12), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWCRn_EL1(13), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGWCRn_EL1(14), CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGCLAIMSET_EL1, CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGCLAIMCLR_EL1, CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_DBGAUTHSTATUS_EL1, CGT_MDCR_TDE_TDA),
+ SR_TRAP(SYS_OSLAR_EL1, CGT_MDCR_TDE_TDOSA),
+ SR_TRAP(SYS_OSLSR_EL1, CGT_MDCR_TDE_TDOSA),
+ SR_TRAP(SYS_OSDLR_EL1, CGT_MDCR_TDE_TDOSA),
+ SR_TRAP(SYS_DBGPRCR_EL1, CGT_MDCR_TDE_TDOSA),
+ SR_TRAP(SYS_MDRAR_EL1, CGT_MDCR_TDE_TDRA),
+ SR_TRAP(SYS_PMBLIMITR_EL1, CGT_MDCR_E2PB),
+ SR_TRAP(SYS_PMBPTR_EL1, CGT_MDCR_E2PB),
+ SR_TRAP(SYS_PMBSR_EL1, CGT_MDCR_E2PB),
+ SR_TRAP(SYS_PMSCR_EL1, CGT_MDCR_TPMS),
+ SR_TRAP(SYS_PMSEVFR_EL1, CGT_MDCR_TPMS),
+ SR_TRAP(SYS_PMSFCR_EL1, CGT_MDCR_TPMS),
+ SR_TRAP(SYS_PMSICR_EL1, CGT_MDCR_TPMS),
+ SR_TRAP(SYS_PMSIDR_EL1, CGT_MDCR_TPMS),
+ SR_TRAP(SYS_PMSIRR_EL1, CGT_MDCR_TPMS),
+ SR_TRAP(SYS_PMSLATFR_EL1, CGT_MDCR_TPMS),
+ SR_TRAP(SYS_PMSNEVFR_EL1, CGT_MDCR_TPMS),
+ SR_TRAP(SYS_TRFCR_EL1, CGT_MDCR_TTRF),
+ SR_TRAP(SYS_TRBBASER_EL1, CGT_MDCR_E2TB),
+ SR_TRAP(SYS_TRBLIMITR_EL1, CGT_MDCR_E2TB),
+ SR_TRAP(SYS_TRBMAR_EL1, CGT_MDCR_E2TB),
+ SR_TRAP(SYS_TRBPTR_EL1, CGT_MDCR_E2TB),
+ SR_TRAP(SYS_TRBSR_EL1, CGT_MDCR_E2TB),
+ SR_TRAP(SYS_TRBTRG_EL1, CGT_MDCR_E2TB),
+ SR_TRAP(SYS_CNTP_TVAL_EL0, CGT_CNTHCTL_EL1PTEN),
+ SR_TRAP(SYS_CNTP_CVAL_EL0, CGT_CNTHCTL_EL1PTEN),
+ SR_TRAP(SYS_CNTP_CTL_EL0, CGT_CNTHCTL_EL1PTEN),
+ SR_TRAP(SYS_CNTPCT_EL0, CGT_CNTHCTL_EL1PCTEN),
+ SR_TRAP(SYS_CNTPCTSS_EL0, CGT_CNTHCTL_EL1PCTEN),
+};
+
+static DEFINE_XARRAY(sr_forward_xa);
+
+enum fgt_group_id {
+ __NO_FGT_GROUP__,
+ HFGxTR_GROUP,
+ HDFGRTR_GROUP,
+ HDFGWTR_GROUP,
+ HFGITR_GROUP,
+
+ /* Must be last */
+ __NR_FGT_GROUP_IDS__
+};
+
+enum fg_filter_id {
+ __NO_FGF__,
+ HCRX_FGTnXS,
+
+ /* Must be last */
+ __NR_FG_FILTER_IDS__
+};
+
+#define SR_FGF(sr, g, b, p, f) \
+ { \
+ .encoding = sr, \
+ .end = sr, \
+ .tc = { \
+ .fgt = g ## _GROUP, \
+ .bit = g ## _EL2_ ## b ## _SHIFT, \
+ .pol = p, \
+ .fgf = f, \
+ }, \
+ .line = __LINE__, \
+ }
+
+#define SR_FGT(sr, g, b, p) SR_FGF(sr, g, b, p, __NO_FGF__)
+
+static const struct encoding_to_trap_config encoding_to_fgt[] __initconst = {
+ /* HFGRTR_EL2, HFGWTR_EL2 */
+ SR_FGT(SYS_TPIDR2_EL0, HFGxTR, nTPIDR2_EL0, 0),
+ SR_FGT(SYS_SMPRI_EL1, HFGxTR, nSMPRI_EL1, 0),
+ SR_FGT(SYS_ACCDATA_EL1, HFGxTR, nACCDATA_EL1, 0),
+ SR_FGT(SYS_ERXADDR_EL1, HFGxTR, ERXADDR_EL1, 1),
+ SR_FGT(SYS_ERXPFGCDN_EL1, HFGxTR, ERXPFGCDN_EL1, 1),
+ SR_FGT(SYS_ERXPFGCTL_EL1, HFGxTR, ERXPFGCTL_EL1, 1),
+ SR_FGT(SYS_ERXPFGF_EL1, HFGxTR, ERXPFGF_EL1, 1),
+ SR_FGT(SYS_ERXMISC0_EL1, HFGxTR, ERXMISCn_EL1, 1),
+ SR_FGT(SYS_ERXMISC1_EL1, HFGxTR, ERXMISCn_EL1, 1),
+ SR_FGT(SYS_ERXMISC2_EL1, HFGxTR, ERXMISCn_EL1, 1),
+ SR_FGT(SYS_ERXMISC3_EL1, HFGxTR, ERXMISCn_EL1, 1),
+ SR_FGT(SYS_ERXSTATUS_EL1, HFGxTR, ERXSTATUS_EL1, 1),
+ SR_FGT(SYS_ERXCTLR_EL1, HFGxTR, ERXCTLR_EL1, 1),
+ SR_FGT(SYS_ERXFR_EL1, HFGxTR, ERXFR_EL1, 1),
+ SR_FGT(SYS_ERRSELR_EL1, HFGxTR, ERRSELR_EL1, 1),
+ SR_FGT(SYS_ERRIDR_EL1, HFGxTR, ERRIDR_EL1, 1),
+ SR_FGT(SYS_ICC_IGRPEN0_EL1, HFGxTR, ICC_IGRPENn_EL1, 1),
+ SR_FGT(SYS_ICC_IGRPEN1_EL1, HFGxTR, ICC_IGRPENn_EL1, 1),
+ SR_FGT(SYS_VBAR_EL1, HFGxTR, VBAR_EL1, 1),
+ SR_FGT(SYS_TTBR1_EL1, HFGxTR, TTBR1_EL1, 1),
+ SR_FGT(SYS_TTBR0_EL1, HFGxTR, TTBR0_EL1, 1),
+ SR_FGT(SYS_TPIDR_EL0, HFGxTR, TPIDR_EL0, 1),
+ SR_FGT(SYS_TPIDRRO_EL0, HFGxTR, TPIDRRO_EL0, 1),
+ SR_FGT(SYS_TPIDR_EL1, HFGxTR, TPIDR_EL1, 1),
+ SR_FGT(SYS_TCR_EL1, HFGxTR, TCR_EL1, 1),
+ SR_FGT(SYS_SCXTNUM_EL0, HFGxTR, SCXTNUM_EL0, 1),
+ SR_FGT(SYS_SCXTNUM_EL1, HFGxTR, SCXTNUM_EL1, 1),
+ SR_FGT(SYS_SCTLR_EL1, HFGxTR, SCTLR_EL1, 1),
+ SR_FGT(SYS_REVIDR_EL1, HFGxTR, REVIDR_EL1, 1),
+ SR_FGT(SYS_PAR_EL1, HFGxTR, PAR_EL1, 1),
+ SR_FGT(SYS_MPIDR_EL1, HFGxTR, MPIDR_EL1, 1),
+ SR_FGT(SYS_MIDR_EL1, HFGxTR, MIDR_EL1, 1),
+ SR_FGT(SYS_MAIR_EL1, HFGxTR, MAIR_EL1, 1),
+ SR_FGT(SYS_LORSA_EL1, HFGxTR, LORSA_EL1, 1),
+ SR_FGT(SYS_LORN_EL1, HFGxTR, LORN_EL1, 1),
+ SR_FGT(SYS_LORID_EL1, HFGxTR, LORID_EL1, 1),
+ SR_FGT(SYS_LOREA_EL1, HFGxTR, LOREA_EL1, 1),
+ SR_FGT(SYS_LORC_EL1, HFGxTR, LORC_EL1, 1),
+ SR_FGT(SYS_ISR_EL1, HFGxTR, ISR_EL1, 1),
+ SR_FGT(SYS_FAR_EL1, HFGxTR, FAR_EL1, 1),
+ SR_FGT(SYS_ESR_EL1, HFGxTR, ESR_EL1, 1),
+ SR_FGT(SYS_DCZID_EL0, HFGxTR, DCZID_EL0, 1),
+ SR_FGT(SYS_CTR_EL0, HFGxTR, CTR_EL0, 1),
+ SR_FGT(SYS_CSSELR_EL1, HFGxTR, CSSELR_EL1, 1),
+ SR_FGT(SYS_CPACR_EL1, HFGxTR, CPACR_EL1, 1),
+ SR_FGT(SYS_CONTEXTIDR_EL1, HFGxTR, CONTEXTIDR_EL1, 1),
+ SR_FGT(SYS_CLIDR_EL1, HFGxTR, CLIDR_EL1, 1),
+ SR_FGT(SYS_CCSIDR_EL1, HFGxTR, CCSIDR_EL1, 1),
+ SR_FGT(SYS_APIBKEYLO_EL1, HFGxTR, APIBKey, 1),
+ SR_FGT(SYS_APIBKEYHI_EL1, HFGxTR, APIBKey, 1),
+ SR_FGT(SYS_APIAKEYLO_EL1, HFGxTR, APIAKey, 1),
+ SR_FGT(SYS_APIAKEYHI_EL1, HFGxTR, APIAKey, 1),
+ SR_FGT(SYS_APGAKEYLO_EL1, HFGxTR, APGAKey, 1),
+ SR_FGT(SYS_APGAKEYHI_EL1, HFGxTR, APGAKey, 1),
+ SR_FGT(SYS_APDBKEYLO_EL1, HFGxTR, APDBKey, 1),
+ SR_FGT(SYS_APDBKEYHI_EL1, HFGxTR, APDBKey, 1),
+ SR_FGT(SYS_APDAKEYLO_EL1, HFGxTR, APDAKey, 1),
+ SR_FGT(SYS_APDAKEYHI_EL1, HFGxTR, APDAKey, 1),
+ SR_FGT(SYS_AMAIR_EL1, HFGxTR, AMAIR_EL1, 1),
+ SR_FGT(SYS_AIDR_EL1, HFGxTR, AIDR_EL1, 1),
+ SR_FGT(SYS_AFSR1_EL1, HFGxTR, AFSR1_EL1, 1),
+ SR_FGT(SYS_AFSR0_EL1, HFGxTR, AFSR0_EL1, 1),
+ /* HFGITR_EL2 */
+ SR_FGT(OP_BRB_IALL, HFGITR, nBRBIALL, 0),
+ SR_FGT(OP_BRB_INJ, HFGITR, nBRBINJ, 0),
+ SR_FGT(SYS_DC_CVAC, HFGITR, DCCVAC, 1),
+ SR_FGT(SYS_DC_CGVAC, HFGITR, DCCVAC, 1),
+ SR_FGT(SYS_DC_CGDVAC, HFGITR, DCCVAC, 1),
+ SR_FGT(OP_CPP_RCTX, HFGITR, CPPRCTX, 1),
+ SR_FGT(OP_DVP_RCTX, HFGITR, DVPRCTX, 1),
+ SR_FGT(OP_CFP_RCTX, HFGITR, CFPRCTX, 1),
+ SR_FGT(OP_TLBI_VAALE1, HFGITR, TLBIVAALE1, 1),
+ SR_FGT(OP_TLBI_VALE1, HFGITR, TLBIVALE1, 1),
+ SR_FGT(OP_TLBI_VAAE1, HFGITR, TLBIVAAE1, 1),
+ SR_FGT(OP_TLBI_ASIDE1, HFGITR, TLBIASIDE1, 1),
+ SR_FGT(OP_TLBI_VAE1, HFGITR, TLBIVAE1, 1),
+ SR_FGT(OP_TLBI_VMALLE1, HFGITR, TLBIVMALLE1, 1),
+ SR_FGT(OP_TLBI_RVAALE1, HFGITR, TLBIRVAALE1, 1),
+ SR_FGT(OP_TLBI_RVALE1, HFGITR, TLBIRVALE1, 1),
+ SR_FGT(OP_TLBI_RVAAE1, HFGITR, TLBIRVAAE1, 1),
+ SR_FGT(OP_TLBI_RVAE1, HFGITR, TLBIRVAE1, 1),
+ SR_FGT(OP_TLBI_RVAALE1IS, HFGITR, TLBIRVAALE1IS, 1),
+ SR_FGT(OP_TLBI_RVALE1IS, HFGITR, TLBIRVALE1IS, 1),
+ SR_FGT(OP_TLBI_RVAAE1IS, HFGITR, TLBIRVAAE1IS, 1),
+ SR_FGT(OP_TLBI_RVAE1IS, HFGITR, TLBIRVAE1IS, 1),
+ SR_FGT(OP_TLBI_VAALE1IS, HFGITR, TLBIVAALE1IS, 1),
+ SR_FGT(OP_TLBI_VALE1IS, HFGITR, TLBIVALE1IS, 1),
+ SR_FGT(OP_TLBI_VAAE1IS, HFGITR, TLBIVAAE1IS, 1),
+ SR_FGT(OP_TLBI_ASIDE1IS, HFGITR, TLBIASIDE1IS, 1),
+ SR_FGT(OP_TLBI_VAE1IS, HFGITR, TLBIVAE1IS, 1),
+ SR_FGT(OP_TLBI_VMALLE1IS, HFGITR, TLBIVMALLE1IS, 1),
+ SR_FGT(OP_TLBI_RVAALE1OS, HFGITR, TLBIRVAALE1OS, 1),
+ SR_FGT(OP_TLBI_RVALE1OS, HFGITR, TLBIRVALE1OS, 1),
+ SR_FGT(OP_TLBI_RVAAE1OS, HFGITR, TLBIRVAAE1OS, 1),
+ SR_FGT(OP_TLBI_RVAE1OS, HFGITR, TLBIRVAE1OS, 1),
+ SR_FGT(OP_TLBI_VAALE1OS, HFGITR, TLBIVAALE1OS, 1),
+ SR_FGT(OP_TLBI_VALE1OS, HFGITR, TLBIVALE1OS, 1),
+ SR_FGT(OP_TLBI_VAAE1OS, HFGITR, TLBIVAAE1OS, 1),
+ SR_FGT(OP_TLBI_ASIDE1OS, HFGITR, TLBIASIDE1OS, 1),
+ SR_FGT(OP_TLBI_VAE1OS, HFGITR, TLBIVAE1OS, 1),
+ SR_FGT(OP_TLBI_VMALLE1OS, HFGITR, TLBIVMALLE1OS, 1),
+ /* nXS variants must be checked against HCRX_EL2.FGTnXS */
+ SR_FGF(OP_TLBI_VAALE1NXS, HFGITR, TLBIVAALE1, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_VALE1NXS, HFGITR, TLBIVALE1, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_VAAE1NXS, HFGITR, TLBIVAAE1, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_ASIDE1NXS, HFGITR, TLBIASIDE1, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_VAE1NXS, HFGITR, TLBIVAE1, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_VMALLE1NXS, HFGITR, TLBIVMALLE1, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_RVAALE1NXS, HFGITR, TLBIRVAALE1, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_RVALE1NXS, HFGITR, TLBIRVALE1, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_RVAAE1NXS, HFGITR, TLBIRVAAE1, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_RVAE1NXS, HFGITR, TLBIRVAE1, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_RVAALE1ISNXS, HFGITR, TLBIRVAALE1IS, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_RVALE1ISNXS, HFGITR, TLBIRVALE1IS, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_RVAAE1ISNXS, HFGITR, TLBIRVAAE1IS, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_RVAE1ISNXS, HFGITR, TLBIRVAE1IS, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_VAALE1ISNXS, HFGITR, TLBIVAALE1IS, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_VALE1ISNXS, HFGITR, TLBIVALE1IS, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_VAAE1ISNXS, HFGITR, TLBIVAAE1IS, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_ASIDE1ISNXS, HFGITR, TLBIASIDE1IS, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_VAE1ISNXS, HFGITR, TLBIVAE1IS, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_VMALLE1ISNXS, HFGITR, TLBIVMALLE1IS, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_RVAALE1OSNXS, HFGITR, TLBIRVAALE1OS, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_RVALE1OSNXS, HFGITR, TLBIRVALE1OS, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_RVAAE1OSNXS, HFGITR, TLBIRVAAE1OS, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_RVAE1OSNXS, HFGITR, TLBIRVAE1OS, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_VAALE1OSNXS, HFGITR, TLBIVAALE1OS, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_VALE1OSNXS, HFGITR, TLBIVALE1OS, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_VAAE1OSNXS, HFGITR, TLBIVAAE1OS, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_ASIDE1OSNXS, HFGITR, TLBIASIDE1OS, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_VAE1OSNXS, HFGITR, TLBIVAE1OS, 1, HCRX_FGTnXS),
+ SR_FGF(OP_TLBI_VMALLE1OSNXS, HFGITR, TLBIVMALLE1OS, 1, HCRX_FGTnXS),
+ SR_FGT(OP_AT_S1E1WP, HFGITR, ATS1E1WP, 1),
+ SR_FGT(OP_AT_S1E1RP, HFGITR, ATS1E1RP, 1),
+ SR_FGT(OP_AT_S1E0W, HFGITR, ATS1E0W, 1),
+ SR_FGT(OP_AT_S1E0R, HFGITR, ATS1E0R, 1),
+ SR_FGT(OP_AT_S1E1W, HFGITR, ATS1E1W, 1),
+ SR_FGT(OP_AT_S1E1R, HFGITR, ATS1E1R, 1),
+ SR_FGT(SYS_DC_ZVA, HFGITR, DCZVA, 1),
+ SR_FGT(SYS_DC_GVA, HFGITR, DCZVA, 1),
+ SR_FGT(SYS_DC_GZVA, HFGITR, DCZVA, 1),
+ SR_FGT(SYS_DC_CIVAC, HFGITR, DCCIVAC, 1),
+ SR_FGT(SYS_DC_CIGVAC, HFGITR, DCCIVAC, 1),
+ SR_FGT(SYS_DC_CIGDVAC, HFGITR, DCCIVAC, 1),
+ SR_FGT(SYS_DC_CVADP, HFGITR, DCCVADP, 1),
+ SR_FGT(SYS_DC_CGVADP, HFGITR, DCCVADP, 1),
+ SR_FGT(SYS_DC_CGDVADP, HFGITR, DCCVADP, 1),
+ SR_FGT(SYS_DC_CVAP, HFGITR, DCCVAP, 1),
+ SR_FGT(SYS_DC_CGVAP, HFGITR, DCCVAP, 1),
+ SR_FGT(SYS_DC_CGDVAP, HFGITR, DCCVAP, 1),
+ SR_FGT(SYS_DC_CVAU, HFGITR, DCCVAU, 1),
+ SR_FGT(SYS_DC_CISW, HFGITR, DCCISW, 1),
+ SR_FGT(SYS_DC_CIGSW, HFGITR, DCCISW, 1),
+ SR_FGT(SYS_DC_CIGDSW, HFGITR, DCCISW, 1),
+ SR_FGT(SYS_DC_CSW, HFGITR, DCCSW, 1),
+ SR_FGT(SYS_DC_CGSW, HFGITR, DCCSW, 1),
+ SR_FGT(SYS_DC_CGDSW, HFGITR, DCCSW, 1),
+ SR_FGT(SYS_DC_ISW, HFGITR, DCISW, 1),
+ SR_FGT(SYS_DC_IGSW, HFGITR, DCISW, 1),
+ SR_FGT(SYS_DC_IGDSW, HFGITR, DCISW, 1),
+ SR_FGT(SYS_DC_IVAC, HFGITR, DCIVAC, 1),
+ SR_FGT(SYS_DC_IGVAC, HFGITR, DCIVAC, 1),
+ SR_FGT(SYS_DC_IGDVAC, HFGITR, DCIVAC, 1),
+ SR_FGT(SYS_IC_IVAU, HFGITR, ICIVAU, 1),
+ SR_FGT(SYS_IC_IALLU, HFGITR, ICIALLU, 1),
+ SR_FGT(SYS_IC_IALLUIS, HFGITR, ICIALLUIS, 1),
+ /* HDFGRTR_EL2 */
+ SR_FGT(SYS_PMBIDR_EL1, HDFGRTR, PMBIDR_EL1, 1),
+ SR_FGT(SYS_PMSNEVFR_EL1, HDFGRTR, nPMSNEVFR_EL1, 0),
+ SR_FGT(SYS_BRBINF_EL1(0), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(1), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(2), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(3), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(4), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(5), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(6), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(7), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(8), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(9), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(10), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(11), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(12), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(13), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(14), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(15), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(16), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(17), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(18), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(19), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(20), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(21), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(22), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(23), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(24), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(25), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(26), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(27), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(28), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(29), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(30), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINF_EL1(31), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBINFINJ_EL1, HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(0), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(1), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(2), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(3), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(4), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(5), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(6), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(7), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(8), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(9), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(10), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(11), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(12), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(13), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(14), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(15), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(16), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(17), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(18), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(19), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(20), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(21), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(22), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(23), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(24), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(25), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(26), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(27), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(28), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(29), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(30), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRC_EL1(31), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBSRCINJ_EL1, HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(0), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(1), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(2), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(3), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(4), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(5), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(6), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(7), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(8), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(9), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(10), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(11), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(12), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(13), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(14), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(15), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(16), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(17), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(18), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(19), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(20), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(21), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(22), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(23), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(24), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(25), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(26), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(27), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(28), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(29), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(30), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGT_EL1(31), HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTGTINJ_EL1, HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBTS_EL1, HDFGRTR, nBRBDATA, 0),
+ SR_FGT(SYS_BRBCR_EL1, HDFGRTR, nBRBCTL, 0),
+ SR_FGT(SYS_BRBFCR_EL1, HDFGRTR, nBRBCTL, 0),
+ SR_FGT(SYS_BRBIDR0_EL1, HDFGRTR, nBRBIDR, 0),
+ SR_FGT(SYS_PMCEID0_EL0, HDFGRTR, PMCEIDn_EL0, 1),
+ SR_FGT(SYS_PMCEID1_EL0, HDFGRTR, PMCEIDn_EL0, 1),
+ SR_FGT(SYS_PMUSERENR_EL0, HDFGRTR, PMUSERENR_EL0, 1),
+ SR_FGT(SYS_TRBTRG_EL1, HDFGRTR, TRBTRG_EL1, 1),
+ SR_FGT(SYS_TRBSR_EL1, HDFGRTR, TRBSR_EL1, 1),
+ SR_FGT(SYS_TRBPTR_EL1, HDFGRTR, TRBPTR_EL1, 1),
+ SR_FGT(SYS_TRBMAR_EL1, HDFGRTR, TRBMAR_EL1, 1),
+ SR_FGT(SYS_TRBLIMITR_EL1, HDFGRTR, TRBLIMITR_EL1, 1),
+ SR_FGT(SYS_TRBIDR_EL1, HDFGRTR, TRBIDR_EL1, 1),
+ SR_FGT(SYS_TRBBASER_EL1, HDFGRTR, TRBBASER_EL1, 1),
+ SR_FGT(SYS_TRCVICTLR, HDFGRTR, TRCVICTLR, 1),
+ SR_FGT(SYS_TRCSTATR, HDFGRTR, TRCSTATR, 1),
+ SR_FGT(SYS_TRCSSCSR(0), HDFGRTR, TRCSSCSRn, 1),
+ SR_FGT(SYS_TRCSSCSR(1), HDFGRTR, TRCSSCSRn, 1),
+ SR_FGT(SYS_TRCSSCSR(2), HDFGRTR, TRCSSCSRn, 1),
+ SR_FGT(SYS_TRCSSCSR(3), HDFGRTR, TRCSSCSRn, 1),
+ SR_FGT(SYS_TRCSSCSR(4), HDFGRTR, TRCSSCSRn, 1),
+ SR_FGT(SYS_TRCSSCSR(5), HDFGRTR, TRCSSCSRn, 1),
+ SR_FGT(SYS_TRCSSCSR(6), HDFGRTR, TRCSSCSRn, 1),
+ SR_FGT(SYS_TRCSSCSR(7), HDFGRTR, TRCSSCSRn, 1),
+ SR_FGT(SYS_TRCSEQSTR, HDFGRTR, TRCSEQSTR, 1),
+ SR_FGT(SYS_TRCPRGCTLR, HDFGRTR, TRCPRGCTLR, 1),
+ SR_FGT(SYS_TRCOSLSR, HDFGRTR, TRCOSLSR, 1),
+ SR_FGT(SYS_TRCIMSPEC(0), HDFGRTR, TRCIMSPECn, 1),
+ SR_FGT(SYS_TRCIMSPEC(1), HDFGRTR, TRCIMSPECn, 1),
+ SR_FGT(SYS_TRCIMSPEC(2), HDFGRTR, TRCIMSPECn, 1),
+ SR_FGT(SYS_TRCIMSPEC(3), HDFGRTR, TRCIMSPECn, 1),
+ SR_FGT(SYS_TRCIMSPEC(4), HDFGRTR, TRCIMSPECn, 1),
+ SR_FGT(SYS_TRCIMSPEC(5), HDFGRTR, TRCIMSPECn, 1),
+ SR_FGT(SYS_TRCIMSPEC(6), HDFGRTR, TRCIMSPECn, 1),
+ SR_FGT(SYS_TRCIMSPEC(7), HDFGRTR, TRCIMSPECn, 1),
+ SR_FGT(SYS_TRCDEVARCH, HDFGRTR, TRCID, 1),
+ SR_FGT(SYS_TRCDEVID, HDFGRTR, TRCID, 1),
+ SR_FGT(SYS_TRCIDR0, HDFGRTR, TRCID, 1),
+ SR_FGT(SYS_TRCIDR1, HDFGRTR, TRCID, 1),
+ SR_FGT(SYS_TRCIDR2, HDFGRTR, TRCID, 1),
+ SR_FGT(SYS_TRCIDR3, HDFGRTR, TRCID, 1),
+ SR_FGT(SYS_TRCIDR4, HDFGRTR, TRCID, 1),
+ SR_FGT(SYS_TRCIDR5, HDFGRTR, TRCID, 1),
+ SR_FGT(SYS_TRCIDR6, HDFGRTR, TRCID, 1),
+ SR_FGT(SYS_TRCIDR7, HDFGRTR, TRCID, 1),
+ SR_FGT(SYS_TRCIDR8, HDFGRTR, TRCID, 1),
+ SR_FGT(SYS_TRCIDR9, HDFGRTR, TRCID, 1),
+ SR_FGT(SYS_TRCIDR10, HDFGRTR, TRCID, 1),
+ SR_FGT(SYS_TRCIDR11, HDFGRTR, TRCID, 1),
+ SR_FGT(SYS_TRCIDR12, HDFGRTR, TRCID, 1),
+ SR_FGT(SYS_TRCIDR13, HDFGRTR, TRCID, 1),
+ SR_FGT(SYS_TRCCNTVR(0), HDFGRTR, TRCCNTVRn, 1),
+ SR_FGT(SYS_TRCCNTVR(1), HDFGRTR, TRCCNTVRn, 1),
+ SR_FGT(SYS_TRCCNTVR(2), HDFGRTR, TRCCNTVRn, 1),
+ SR_FGT(SYS_TRCCNTVR(3), HDFGRTR, TRCCNTVRn, 1),
+ SR_FGT(SYS_TRCCLAIMCLR, HDFGRTR, TRCCLAIM, 1),
+ SR_FGT(SYS_TRCCLAIMSET, HDFGRTR, TRCCLAIM, 1),
+ SR_FGT(SYS_TRCAUXCTLR, HDFGRTR, TRCAUXCTLR, 1),
+ SR_FGT(SYS_TRCAUTHSTATUS, HDFGRTR, TRCAUTHSTATUS, 1),
+ SR_FGT(SYS_TRCACATR(0), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACATR(1), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACATR(2), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACATR(3), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACATR(4), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACATR(5), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACATR(6), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACATR(7), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACATR(8), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACATR(9), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACATR(10), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACATR(11), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACATR(12), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACATR(13), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACATR(14), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACATR(15), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACVR(0), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACVR(1), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACVR(2), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACVR(3), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACVR(4), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACVR(5), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACVR(6), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACVR(7), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACVR(8), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACVR(9), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACVR(10), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACVR(11), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACVR(12), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACVR(13), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACVR(14), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCACVR(15), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCBBCTLR, HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCCCCTLR, HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCCIDCCTLR0, HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCCIDCCTLR1, HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCCIDCVR(0), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCCIDCVR(1), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCCIDCVR(2), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCCIDCVR(3), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCCIDCVR(4), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCCIDCVR(5), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCCIDCVR(6), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCCIDCVR(7), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCCNTCTLR(0), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCCNTCTLR(1), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCCNTCTLR(2), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCCNTCTLR(3), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCCNTRLDVR(0), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCCNTRLDVR(1), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCCNTRLDVR(2), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCCNTRLDVR(3), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCCONFIGR, HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCEVENTCTL0R, HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCEVENTCTL1R, HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCEXTINSELR(0), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCEXTINSELR(1), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCEXTINSELR(2), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCEXTINSELR(3), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCQCTLR, HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(2), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(3), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(4), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(5), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(6), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(7), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(8), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(9), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(10), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(11), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(12), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(13), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(14), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(15), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(16), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(17), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(18), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(19), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(20), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(21), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(22), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(23), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(24), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(25), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(26), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(27), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(28), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(29), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(30), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSCTLR(31), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCRSR, HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCSEQEVR(0), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCSEQEVR(1), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCSEQEVR(2), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCSEQRSTEVR, HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCSSCCR(0), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCSSCCR(1), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCSSCCR(2), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCSSCCR(3), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCSSCCR(4), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCSSCCR(5), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCSSCCR(6), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCSSCCR(7), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCSSPCICR(0), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCSSPCICR(1), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCSSPCICR(2), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCSSPCICR(3), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCSSPCICR(4), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCSSPCICR(5), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCSSPCICR(6), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCSSPCICR(7), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCSTALLCTLR, HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCSYNCPR, HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCTRACEIDR, HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCTSCTLR, HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCVIIECTLR, HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCVIPCSSCTLR, HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCVISSCTLR, HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCVMIDCCTLR0, HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCVMIDCCTLR1, HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCVMIDCVR(0), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCVMIDCVR(1), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCVMIDCVR(2), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCVMIDCVR(3), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCVMIDCVR(4), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCVMIDCVR(5), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCVMIDCVR(6), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_TRCVMIDCVR(7), HDFGRTR, TRC, 1),
+ SR_FGT(SYS_PMSLATFR_EL1, HDFGRTR, PMSLATFR_EL1, 1),
+ SR_FGT(SYS_PMSIRR_EL1, HDFGRTR, PMSIRR_EL1, 1),
+ SR_FGT(SYS_PMSIDR_EL1, HDFGRTR, PMSIDR_EL1, 1),
+ SR_FGT(SYS_PMSICR_EL1, HDFGRTR, PMSICR_EL1, 1),
+ SR_FGT(SYS_PMSFCR_EL1, HDFGRTR, PMSFCR_EL1, 1),
+ SR_FGT(SYS_PMSEVFR_EL1, HDFGRTR, PMSEVFR_EL1, 1),
+ SR_FGT(SYS_PMSCR_EL1, HDFGRTR, PMSCR_EL1, 1),
+ SR_FGT(SYS_PMBSR_EL1, HDFGRTR, PMBSR_EL1, 1),
+ SR_FGT(SYS_PMBPTR_EL1, HDFGRTR, PMBPTR_EL1, 1),
+ SR_FGT(SYS_PMBLIMITR_EL1, HDFGRTR, PMBLIMITR_EL1, 1),
+ SR_FGT(SYS_PMMIR_EL1, HDFGRTR, PMMIR_EL1, 1),
+ SR_FGT(SYS_PMSELR_EL0, HDFGRTR, PMSELR_EL0, 1),
+ SR_FGT(SYS_PMOVSCLR_EL0, HDFGRTR, PMOVS, 1),
+ SR_FGT(SYS_PMOVSSET_EL0, HDFGRTR, PMOVS, 1),
+ SR_FGT(SYS_PMINTENCLR_EL1, HDFGRTR, PMINTEN, 1),
+ SR_FGT(SYS_PMINTENSET_EL1, HDFGRTR, PMINTEN, 1),
+ SR_FGT(SYS_PMCNTENCLR_EL0, HDFGRTR, PMCNTEN, 1),
+ SR_FGT(SYS_PMCNTENSET_EL0, HDFGRTR, PMCNTEN, 1),
+ SR_FGT(SYS_PMCCNTR_EL0, HDFGRTR, PMCCNTR_EL0, 1),
+ SR_FGT(SYS_PMCCFILTR_EL0, HDFGRTR, PMCCFILTR_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(0), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(1), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(2), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(3), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(4), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(5), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(6), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(7), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(8), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(9), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(10), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(11), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(12), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(13), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(14), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(15), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(16), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(17), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(18), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(19), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(20), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(21), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(22), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(23), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(24), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(25), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(26), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(27), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(28), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(29), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVTYPERn_EL0(30), HDFGRTR, PMEVTYPERn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(0), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(1), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(2), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(3), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(4), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(5), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(6), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(7), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(8), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(9), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(10), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(11), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(12), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(13), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(14), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(15), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(16), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(17), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(18), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(19), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(20), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(21), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(22), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(23), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(24), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(25), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(26), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(27), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(28), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(29), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_PMEVCNTRn_EL0(30), HDFGRTR, PMEVCNTRn_EL0, 1),
+ SR_FGT(SYS_OSDLR_EL1, HDFGRTR, OSDLR_EL1, 1),
+ SR_FGT(SYS_OSECCR_EL1, HDFGRTR, OSECCR_EL1, 1),
+ SR_FGT(SYS_OSLSR_EL1, HDFGRTR, OSLSR_EL1, 1),
+ SR_FGT(SYS_DBGPRCR_EL1, HDFGRTR, DBGPRCR_EL1, 1),
+ SR_FGT(SYS_DBGAUTHSTATUS_EL1, HDFGRTR, DBGAUTHSTATUS_EL1, 1),
+ SR_FGT(SYS_DBGCLAIMSET_EL1, HDFGRTR, DBGCLAIM, 1),
+ SR_FGT(SYS_DBGCLAIMCLR_EL1, HDFGRTR, DBGCLAIM, 1),
+ SR_FGT(SYS_MDSCR_EL1, HDFGRTR, MDSCR_EL1, 1),
+ /*
+ * The trap bits capture *64* debug registers per bit, but the
+ * ARM ARM only describes the encoding for the first 16, and
+ * we don't really support more than that anyway.
+ */
+ SR_FGT(SYS_DBGWVRn_EL1(0), HDFGRTR, DBGWVRn_EL1, 1),
+ SR_FGT(SYS_DBGWVRn_EL1(1), HDFGRTR, DBGWVRn_EL1, 1),
+ SR_FGT(SYS_DBGWVRn_EL1(2), HDFGRTR, DBGWVRn_EL1, 1),
+ SR_FGT(SYS_DBGWVRn_EL1(3), HDFGRTR, DBGWVRn_EL1, 1),
+ SR_FGT(SYS_DBGWVRn_EL1(4), HDFGRTR, DBGWVRn_EL1, 1),
+ SR_FGT(SYS_DBGWVRn_EL1(5), HDFGRTR, DBGWVRn_EL1, 1),
+ SR_FGT(SYS_DBGWVRn_EL1(6), HDFGRTR, DBGWVRn_EL1, 1),
+ SR_FGT(SYS_DBGWVRn_EL1(7), HDFGRTR, DBGWVRn_EL1, 1),
+ SR_FGT(SYS_DBGWVRn_EL1(8), HDFGRTR, DBGWVRn_EL1, 1),
+ SR_FGT(SYS_DBGWVRn_EL1(9), HDFGRTR, DBGWVRn_EL1, 1),
+ SR_FGT(SYS_DBGWVRn_EL1(10), HDFGRTR, DBGWVRn_EL1, 1),
+ SR_FGT(SYS_DBGWVRn_EL1(11), HDFGRTR, DBGWVRn_EL1, 1),
+ SR_FGT(SYS_DBGWVRn_EL1(12), HDFGRTR, DBGWVRn_EL1, 1),
+ SR_FGT(SYS_DBGWVRn_EL1(13), HDFGRTR, DBGWVRn_EL1, 1),
+ SR_FGT(SYS_DBGWVRn_EL1(14), HDFGRTR, DBGWVRn_EL1, 1),
+ SR_FGT(SYS_DBGWVRn_EL1(15), HDFGRTR, DBGWVRn_EL1, 1),
+ SR_FGT(SYS_DBGWCRn_EL1(0), HDFGRTR, DBGWCRn_EL1, 1),
+ SR_FGT(SYS_DBGWCRn_EL1(1), HDFGRTR, DBGWCRn_EL1, 1),
+ SR_FGT(SYS_DBGWCRn_EL1(2), HDFGRTR, DBGWCRn_EL1, 1),
+ SR_FGT(SYS_DBGWCRn_EL1(3), HDFGRTR, DBGWCRn_EL1, 1),
+ SR_FGT(SYS_DBGWCRn_EL1(4), HDFGRTR, DBGWCRn_EL1, 1),
+ SR_FGT(SYS_DBGWCRn_EL1(5), HDFGRTR, DBGWCRn_EL1, 1),
+ SR_FGT(SYS_DBGWCRn_EL1(6), HDFGRTR, DBGWCRn_EL1, 1),
+ SR_FGT(SYS_DBGWCRn_EL1(7), HDFGRTR, DBGWCRn_EL1, 1),
+ SR_FGT(SYS_DBGWCRn_EL1(8), HDFGRTR, DBGWCRn_EL1, 1),
+ SR_FGT(SYS_DBGWCRn_EL1(9), HDFGRTR, DBGWCRn_EL1, 1),
+ SR_FGT(SYS_DBGWCRn_EL1(10), HDFGRTR, DBGWCRn_EL1, 1),
+ SR_FGT(SYS_DBGWCRn_EL1(11), HDFGRTR, DBGWCRn_EL1, 1),
+ SR_FGT(SYS_DBGWCRn_EL1(12), HDFGRTR, DBGWCRn_EL1, 1),
+ SR_FGT(SYS_DBGWCRn_EL1(13), HDFGRTR, DBGWCRn_EL1, 1),
+ SR_FGT(SYS_DBGWCRn_EL1(14), HDFGRTR, DBGWCRn_EL1, 1),
+ SR_FGT(SYS_DBGWCRn_EL1(15), HDFGRTR, DBGWCRn_EL1, 1),
+ SR_FGT(SYS_DBGBVRn_EL1(0), HDFGRTR, DBGBVRn_EL1, 1),
+ SR_FGT(SYS_DBGBVRn_EL1(1), HDFGRTR, DBGBVRn_EL1, 1),
+ SR_FGT(SYS_DBGBVRn_EL1(2), HDFGRTR, DBGBVRn_EL1, 1),
+ SR_FGT(SYS_DBGBVRn_EL1(3), HDFGRTR, DBGBVRn_EL1, 1),
+ SR_FGT(SYS_DBGBVRn_EL1(4), HDFGRTR, DBGBVRn_EL1, 1),
+ SR_FGT(SYS_DBGBVRn_EL1(5), HDFGRTR, DBGBVRn_EL1, 1),
+ SR_FGT(SYS_DBGBVRn_EL1(6), HDFGRTR, DBGBVRn_EL1, 1),
+ SR_FGT(SYS_DBGBVRn_EL1(7), HDFGRTR, DBGBVRn_EL1, 1),
+ SR_FGT(SYS_DBGBVRn_EL1(8), HDFGRTR, DBGBVRn_EL1, 1),
+ SR_FGT(SYS_DBGBVRn_EL1(9), HDFGRTR, DBGBVRn_EL1, 1),
+ SR_FGT(SYS_DBGBVRn_EL1(10), HDFGRTR, DBGBVRn_EL1, 1),
+ SR_FGT(SYS_DBGBVRn_EL1(11), HDFGRTR, DBGBVRn_EL1, 1),
+ SR_FGT(SYS_DBGBVRn_EL1(12), HDFGRTR, DBGBVRn_EL1, 1),
+ SR_FGT(SYS_DBGBVRn_EL1(13), HDFGRTR, DBGBVRn_EL1, 1),
+ SR_FGT(SYS_DBGBVRn_EL1(14), HDFGRTR, DBGBVRn_EL1, 1),
+ SR_FGT(SYS_DBGBVRn_EL1(15), HDFGRTR, DBGBVRn_EL1, 1),
+ SR_FGT(SYS_DBGBCRn_EL1(0), HDFGRTR, DBGBCRn_EL1, 1),
+ SR_FGT(SYS_DBGBCRn_EL1(1), HDFGRTR, DBGBCRn_EL1, 1),
+ SR_FGT(SYS_DBGBCRn_EL1(2), HDFGRTR, DBGBCRn_EL1, 1),
+ SR_FGT(SYS_DBGBCRn_EL1(3), HDFGRTR, DBGBCRn_EL1, 1),
+ SR_FGT(SYS_DBGBCRn_EL1(4), HDFGRTR, DBGBCRn_EL1, 1),
+ SR_FGT(SYS_DBGBCRn_EL1(5), HDFGRTR, DBGBCRn_EL1, 1),
+ SR_FGT(SYS_DBGBCRn_EL1(6), HDFGRTR, DBGBCRn_EL1, 1),
+ SR_FGT(SYS_DBGBCRn_EL1(7), HDFGRTR, DBGBCRn_EL1, 1),
+ SR_FGT(SYS_DBGBCRn_EL1(8), HDFGRTR, DBGBCRn_EL1, 1),
+ SR_FGT(SYS_DBGBCRn_EL1(9), HDFGRTR, DBGBCRn_EL1, 1),
+ SR_FGT(SYS_DBGBCRn_EL1(10), HDFGRTR, DBGBCRn_EL1, 1),
+ SR_FGT(SYS_DBGBCRn_EL1(11), HDFGRTR, DBGBCRn_EL1, 1),
+ SR_FGT(SYS_DBGBCRn_EL1(12), HDFGRTR, DBGBCRn_EL1, 1),
+ SR_FGT(SYS_DBGBCRn_EL1(13), HDFGRTR, DBGBCRn_EL1, 1),
+ SR_FGT(SYS_DBGBCRn_EL1(14), HDFGRTR, DBGBCRn_EL1, 1),
+ SR_FGT(SYS_DBGBCRn_EL1(15), HDFGRTR, DBGBCRn_EL1, 1),
+ /*
+ * HDFGWTR_EL2
+ *
+ * Although HDFGRTR_EL2 and HDFGWTR_EL2 registers largely
+ * overlap in their bit assignment, there are a number of bits
+ * that are RES0 on one side, and an actual trap bit on the
+ * other. The policy chosen here is to describe all the
+ * read-side mappings, and only the write-side mappings that
+ * differ from the read side, and the trap handler will pick
+ * the correct shadow register based on the access type.
+ */
+ SR_FGT(SYS_TRFCR_EL1, HDFGWTR, TRFCR_EL1, 1),
+ SR_FGT(SYS_TRCOSLAR, HDFGWTR, TRCOSLAR, 1),
+ SR_FGT(SYS_PMCR_EL0, HDFGWTR, PMCR_EL0, 1),
+ SR_FGT(SYS_PMSWINC_EL0, HDFGWTR, PMSWINC_EL0, 1),
+ SR_FGT(SYS_OSLAR_EL1, HDFGWTR, OSLAR_EL1, 1),
+};
+
+static union trap_config get_trap_config(u32 sysreg)
+{
+ return (union trap_config) {
+ .val = xa_to_value(xa_load(&sr_forward_xa, sysreg)),
+ };
+}
+
+static __init void print_nv_trap_error(const struct encoding_to_trap_config *tc,
+ const char *type, int err)
+{
+ kvm_err("%s line %d encoding range "
+ "(%d, %d, %d, %d, %d) - (%d, %d, %d, %d, %d) (err=%d)\n",
+ type, tc->line,
+ sys_reg_Op0(tc->encoding), sys_reg_Op1(tc->encoding),
+ sys_reg_CRn(tc->encoding), sys_reg_CRm(tc->encoding),
+ sys_reg_Op2(tc->encoding),
+ sys_reg_Op0(tc->end), sys_reg_Op1(tc->end),
+ sys_reg_CRn(tc->end), sys_reg_CRm(tc->end),
+ sys_reg_Op2(tc->end),
+ err);
+}
+
+int __init populate_nv_trap_config(void)
+{
+ int ret = 0;
+
+ BUILD_BUG_ON(sizeof(union trap_config) != sizeof(void *));
+ BUILD_BUG_ON(__NR_CGT_GROUP_IDS__ > BIT(TC_CGT_BITS));
+ BUILD_BUG_ON(__NR_FGT_GROUP_IDS__ > BIT(TC_FGT_BITS));
+ BUILD_BUG_ON(__NR_FG_FILTER_IDS__ > BIT(TC_FGF_BITS));
+
+ for (int i = 0; i < ARRAY_SIZE(encoding_to_cgt); i++) {
+ const struct encoding_to_trap_config *cgt = &encoding_to_cgt[i];
+ void *prev;
+
+ if (cgt->tc.val & BIT(63)) {
+ kvm_err("CGT[%d] has MBZ bit set\n", i);
+ ret = -EINVAL;
+ }
+
+ if (cgt->encoding != cgt->end) {
+ prev = xa_store_range(&sr_forward_xa,
+ cgt->encoding, cgt->end,
+ xa_mk_value(cgt->tc.val),
+ GFP_KERNEL);
+ } else {
+ prev = xa_store(&sr_forward_xa, cgt->encoding,
+ xa_mk_value(cgt->tc.val), GFP_KERNEL);
+ if (prev && !xa_is_err(prev)) {
+ ret = -EINVAL;
+ print_nv_trap_error(cgt, "Duplicate CGT", ret);
+ }
+ }
+
+ if (xa_is_err(prev)) {
+ ret = xa_err(prev);
+ print_nv_trap_error(cgt, "Failed CGT insertion", ret);
+ }
+ }
+
+ kvm_info("nv: %ld coarse grained trap handlers\n",
+ ARRAY_SIZE(encoding_to_cgt));
+
+ if (!cpus_have_final_cap(ARM64_HAS_FGT))
+ goto check_mcb;
+
+ for (int i = 0; i < ARRAY_SIZE(encoding_to_fgt); i++) {
+ const struct encoding_to_trap_config *fgt = &encoding_to_fgt[i];
+ union trap_config tc;
+
+ if (fgt->tc.fgt >= __NR_FGT_GROUP_IDS__) {
+ ret = -EINVAL;
+ print_nv_trap_error(fgt, "Invalid FGT", ret);
+ }
+
+ tc = get_trap_config(fgt->encoding);
+
+ if (tc.fgt) {
+ ret = -EINVAL;
+ print_nv_trap_error(fgt, "Duplicate FGT", ret);
+ }
+
+ tc.val |= fgt->tc.val;
+ xa_store(&sr_forward_xa, fgt->encoding,
+ xa_mk_value(tc.val), GFP_KERNEL);
+ }
+
+ kvm_info("nv: %ld fine grained trap handlers\n",
+ ARRAY_SIZE(encoding_to_fgt));
+
+check_mcb:
+ for (int id = __MULTIPLE_CONTROL_BITS__; id < __COMPLEX_CONDITIONS__; id++) {
+ const enum cgt_group_id *cgids;
+
+ cgids = coarse_control_combo[id - __MULTIPLE_CONTROL_BITS__];
+
+ for (int i = 0; cgids[i] != __RESERVED__; i++) {
+ if (cgids[i] >= __MULTIPLE_CONTROL_BITS__) {
+ kvm_err("Recursive MCB %d/%d\n", id, cgids[i]);
+ ret = -EINVAL;
+ }
+ }
+ }
+
+ if (ret)
+ xa_destroy(&sr_forward_xa);
+
+ return ret;
+}
+
+static enum trap_behaviour get_behaviour(struct kvm_vcpu *vcpu,
+ const struct trap_bits *tb)
+{
+ enum trap_behaviour b = BEHAVE_HANDLE_LOCALLY;
+ u64 val;
+
+ val = __vcpu_sys_reg(vcpu, tb->index);
+ if ((val & tb->mask) == tb->value)
+ b |= tb->behaviour;
+
+ return b;
+}
+
+static enum trap_behaviour __compute_trap_behaviour(struct kvm_vcpu *vcpu,
+ const enum cgt_group_id id,
+ enum trap_behaviour b)
+{
+ switch (id) {
+ const enum cgt_group_id *cgids;
+
+ case __RESERVED__ ... __MULTIPLE_CONTROL_BITS__ - 1:
+ if (likely(id != __RESERVED__))
+ b |= get_behaviour(vcpu, &coarse_trap_bits[id]);
+ break;
+ case __MULTIPLE_CONTROL_BITS__ ... __COMPLEX_CONDITIONS__ - 1:
+ /* Yes, this is recursive. Don't do anything stupid. */
+ cgids = coarse_control_combo[id - __MULTIPLE_CONTROL_BITS__];
+ for (int i = 0; cgids[i] != __RESERVED__; i++)
+ b |= __compute_trap_behaviour(vcpu, cgids[i], b);
+ break;
+ default:
+ if (ARRAY_SIZE(ccc))
+ b |= ccc[id - __COMPLEX_CONDITIONS__](vcpu);
+ break;
+ }
+
+ return b;
+}
+
+static enum trap_behaviour compute_trap_behaviour(struct kvm_vcpu *vcpu,
+ const union trap_config tc)
+{
+ enum trap_behaviour b = BEHAVE_HANDLE_LOCALLY;
+
+ return __compute_trap_behaviour(vcpu, tc.cgt, b);
+}
+
+static bool check_fgt_bit(u64 val, const union trap_config tc)
+{
+ return ((val >> tc.bit) & 1) == tc.pol;
+}
+
+#define sanitised_sys_reg(vcpu, reg) \
+ ({ \
+ u64 __val; \
+ __val = __vcpu_sys_reg(vcpu, reg); \
+ __val &= ~__ ## reg ## _RES0; \
+ (__val); \
+ })
+
+bool __check_nv_sr_forward(struct kvm_vcpu *vcpu)
+{
+ union trap_config tc;
+ enum trap_behaviour b;
+ bool is_read;
+ u32 sysreg;
+ u64 esr, val;
+
+ if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu))
+ return false;
+
+ esr = kvm_vcpu_get_esr(vcpu);
+ sysreg = esr_sys64_to_sysreg(esr);
+ is_read = (esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ;
+
+ tc = get_trap_config(sysreg);
+
+ /*
+ * A value of 0 for the whole entry means that we know nothing
+ * for this sysreg, and that it cannot be re-injected into the
+ * nested hypervisor. In this situation, let's cut it short.
+ *
+ * Note that ultimately, we could also make use of the xarray
+ * to store the index of the sysreg in the local descriptor
+ * array, avoiding another search... Hint, hint...
+ */
+ if (!tc.val)
+ return false;
+
+ switch ((enum fgt_group_id)tc.fgt) {
+ case __NO_FGT_GROUP__:
+ break;
+
+ case HFGxTR_GROUP:
+ if (is_read)
+ val = sanitised_sys_reg(vcpu, HFGRTR_EL2);
+ else
+ val = sanitised_sys_reg(vcpu, HFGWTR_EL2);
+ break;
+
+ case HDFGRTR_GROUP:
+ case HDFGWTR_GROUP:
+ if (is_read)
+ val = sanitised_sys_reg(vcpu, HDFGRTR_EL2);
+ else
+ val = sanitised_sys_reg(vcpu, HDFGWTR_EL2);
+ break;
+
+ case HFGITR_GROUP:
+ val = sanitised_sys_reg(vcpu, HFGITR_EL2);
+ switch (tc.fgf) {
+ u64 tmp;
+
+ case __NO_FGF__:
+ break;
+
+ case HCRX_FGTnXS:
+ tmp = sanitised_sys_reg(vcpu, HCRX_EL2);
+ if (tmp & HCRX_EL2_FGTnXS)
+ tc.fgt = __NO_FGT_GROUP__;
+ }
+ break;
+
+ case __NR_FGT_GROUP_IDS__:
+ /* Something is really wrong, bail out */
+ WARN_ONCE(1, "__NR_FGT_GROUP_IDS__");
+ return false;
+ }
+
+ if (tc.fgt != __NO_FGT_GROUP__ && check_fgt_bit(val, tc))
+ goto inject;
+
+ b = compute_trap_behaviour(vcpu, tc);
+
+ if (((b & BEHAVE_FORWARD_READ) && is_read) ||
+ ((b & BEHAVE_FORWARD_WRITE) && !is_read))
+ goto inject;
+
+ return false;
+
+inject:
+ trace_kvm_forward_sysreg_trap(vcpu, sysreg, is_read);
+
+ kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
+ return true;
+}
+
static u64 kvm_check_illegal_exception_return(struct kvm_vcpu *vcpu, u64 spsr)
{
u64 mode = spsr & PSR_MODE_MASK;
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 20280a5233f6..95f6945c4432 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -884,21 +884,6 @@ u32 __attribute_const__ kvm_target_cpu(void)
return KVM_ARM_TARGET_GENERIC_V8;
}
-void kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
-{
- u32 target = kvm_target_cpu();
-
- memset(init, 0, sizeof(*init));
-
- /*
- * For now, we don't return any features.
- * In future, we might use features to return target
- * specific features available for the preferred
- * target type.
- */
- init->target = (__u32)target;
-}
-
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
return -EINVAL;
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 6dcd6604b6bc..617ae6dea5d5 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -222,7 +222,33 @@ static int kvm_handle_eret(struct kvm_vcpu *vcpu)
if (kvm_vcpu_get_esr(vcpu) & ESR_ELx_ERET_ISS_ERET)
return kvm_handle_ptrauth(vcpu);
- kvm_emulate_nested_eret(vcpu);
+ /*
+ * If we got here, two possibilities:
+ *
+ * - the guest is in EL2, and we need to fully emulate ERET
+ *
+ * - the guest is in EL1, and we need to reinject the
+ * exception into the L1 hypervisor.
+ *
+ * If KVM ever traps ERET for its own use, we'll have to
+ * revisit this.
+ */
+ if (is_hyp_ctxt(vcpu))
+ kvm_emulate_nested_eret(vcpu);
+ else
+ kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
+
+ return 1;
+}
+
+static int handle_svc(struct kvm_vcpu *vcpu)
+{
+ /*
+ * So far, SVC traps only for NV via HFGITR_EL2. A SVC from a
+ * 32bit guest would be caught by vpcu_mode_is_bad_32bit(), so
+ * we should only have to deal with a 64 bit exception.
+ */
+ kvm_inject_nested_sync(vcpu, kvm_vcpu_get_esr(vcpu));
return 1;
}
@@ -239,6 +265,7 @@ static exit_handle_fn arm_exit_handlers[] = {
[ESR_ELx_EC_SMC32] = handle_smc,
[ESR_ELx_EC_HVC64] = handle_hvc,
[ESR_ELx_EC_SMC64] = handle_smc,
+ [ESR_ELx_EC_SVC64] = handle_svc,
[ESR_ELx_EC_SYS64] = kvm_handle_sys_reg,
[ESR_ELx_EC_SVE] = handle_sve,
[ESR_ELx_EC_ERET] = kvm_handle_eret,
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index 34f222af6165..9cfe6bd1dbe4 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -70,20 +70,26 @@ static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
}
}
-static inline bool __hfgxtr_traps_required(void)
-{
- if (cpus_have_final_cap(ARM64_SME))
- return true;
-
- if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
- return true;
+#define compute_clr_set(vcpu, reg, clr, set) \
+ do { \
+ u64 hfg; \
+ hfg = __vcpu_sys_reg(vcpu, reg) & ~__ ## reg ## _RES0; \
+ set |= hfg & __ ## reg ## _MASK; \
+ clr |= ~hfg & __ ## reg ## _nMASK; \
+ } while(0)
- return false;
-}
-static inline void __activate_traps_hfgxtr(void)
+static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
{
+ struct kvm_cpu_context *hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
u64 r_clr = 0, w_clr = 0, r_set = 0, w_set = 0, tmp;
+ u64 r_val, w_val;
+
+ if (!cpus_have_final_cap(ARM64_HAS_FGT))
+ return;
+
+ ctxt_sys_reg(hctxt, HFGRTR_EL2) = read_sysreg_s(SYS_HFGRTR_EL2);
+ ctxt_sys_reg(hctxt, HFGWTR_EL2) = read_sysreg_s(SYS_HFGWTR_EL2);
if (cpus_have_final_cap(ARM64_SME)) {
tmp = HFGxTR_EL2_nSMPRI_EL1_MASK | HFGxTR_EL2_nTPIDR2_EL0_MASK;
@@ -98,26 +104,72 @@ static inline void __activate_traps_hfgxtr(void)
if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
w_set |= HFGxTR_EL2_TCR_EL1_MASK;
- sysreg_clear_set_s(SYS_HFGRTR_EL2, r_clr, r_set);
- sysreg_clear_set_s(SYS_HFGWTR_EL2, w_clr, w_set);
+ if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
+ compute_clr_set(vcpu, HFGRTR_EL2, r_clr, r_set);
+ compute_clr_set(vcpu, HFGWTR_EL2, w_clr, w_set);
+ }
+
+ /* The default is not to trap anything but ACCDATA_EL1 */
+ r_val = __HFGRTR_EL2_nMASK & ~HFGxTR_EL2_nACCDATA_EL1;
+ r_val |= r_set;
+ r_val &= ~r_clr;
+
+ w_val = __HFGWTR_EL2_nMASK & ~HFGxTR_EL2_nACCDATA_EL1;
+ w_val |= w_set;
+ w_val &= ~w_clr;
+
+ write_sysreg_s(r_val, SYS_HFGRTR_EL2);
+ write_sysreg_s(w_val, SYS_HFGWTR_EL2);
+
+ if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu))
+ return;
+
+ ctxt_sys_reg(hctxt, HFGITR_EL2) = read_sysreg_s(SYS_HFGITR_EL2);
+
+ r_set = r_clr = 0;
+ compute_clr_set(vcpu, HFGITR_EL2, r_clr, r_set);
+ r_val = __HFGITR_EL2_nMASK;
+ r_val |= r_set;
+ r_val &= ~r_clr;
+
+ write_sysreg_s(r_val, SYS_HFGITR_EL2);
+
+ ctxt_sys_reg(hctxt, HDFGRTR_EL2) = read_sysreg_s(SYS_HDFGRTR_EL2);
+ ctxt_sys_reg(hctxt, HDFGWTR_EL2) = read_sysreg_s(SYS_HDFGWTR_EL2);
+
+ r_clr = r_set = w_clr = w_set = 0;
+
+ compute_clr_set(vcpu, HDFGRTR_EL2, r_clr, r_set);
+ compute_clr_set(vcpu, HDFGWTR_EL2, w_clr, w_set);
+
+ r_val = __HDFGRTR_EL2_nMASK;
+ r_val |= r_set;
+ r_val &= ~r_clr;
+
+ w_val = __HDFGWTR_EL2_nMASK;
+ w_val |= w_set;
+ w_val &= ~w_clr;
+
+ write_sysreg_s(r_val, SYS_HDFGRTR_EL2);
+ write_sysreg_s(w_val, SYS_HDFGWTR_EL2);
}
-static inline void __deactivate_traps_hfgxtr(void)
+static inline void __deactivate_traps_hfgxtr(struct kvm_vcpu *vcpu)
{
- u64 r_clr = 0, w_clr = 0, r_set = 0, w_set = 0, tmp;
+ struct kvm_cpu_context *hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
- if (cpus_have_final_cap(ARM64_SME)) {
- tmp = HFGxTR_EL2_nSMPRI_EL1_MASK | HFGxTR_EL2_nTPIDR2_EL0_MASK;
+ if (!cpus_have_final_cap(ARM64_HAS_FGT))
+ return;
- r_set |= tmp;
- w_set |= tmp;
- }
+ write_sysreg_s(ctxt_sys_reg(hctxt, HFGRTR_EL2), SYS_HFGRTR_EL2);
+ write_sysreg_s(ctxt_sys_reg(hctxt, HFGWTR_EL2), SYS_HFGWTR_EL2);
- if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38))
- w_clr |= HFGxTR_EL2_TCR_EL1_MASK;
+ if (!vcpu_has_nv(vcpu) || is_hyp_ctxt(vcpu))
+ return;
- sysreg_clear_set_s(SYS_HFGRTR_EL2, r_clr, r_set);
- sysreg_clear_set_s(SYS_HFGWTR_EL2, w_clr, w_set);
+ write_sysreg_s(ctxt_sys_reg(hctxt, HFGITR_EL2), SYS_HFGITR_EL2);
+ write_sysreg_s(ctxt_sys_reg(hctxt, HDFGRTR_EL2), SYS_HDFGRTR_EL2);
+ write_sysreg_s(ctxt_sys_reg(hctxt, HDFGWTR_EL2), SYS_HDFGWTR_EL2);
}
static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
@@ -145,8 +197,21 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
vcpu->arch.mdcr_el2_host = read_sysreg(mdcr_el2);
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
- if (__hfgxtr_traps_required())
- __activate_traps_hfgxtr();
+ if (cpus_have_final_cap(ARM64_HAS_HCX)) {
+ u64 hcrx = HCRX_GUEST_FLAGS;
+ if (vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu)) {
+ u64 clr = 0, set = 0;
+
+ compute_clr_set(vcpu, HCRX_EL2, clr, set);
+
+ hcrx |= set;
+ hcrx &= ~clr;
+ }
+
+ write_sysreg_s(hcrx, SYS_HCRX_EL2);
+ }
+
+ __activate_traps_hfgxtr(vcpu);
}
static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
@@ -162,8 +227,10 @@ static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
vcpu_clear_flag(vcpu, PMUSERENR_ON_CPU);
}
- if (__hfgxtr_traps_required())
- __deactivate_traps_hfgxtr();
+ if (cpus_have_final_cap(ARM64_HAS_HCX))
+ write_sysreg_s(HCRX_HOST_FLAGS, SYS_HCRX_EL2);
+
+ __deactivate_traps_hfgxtr(vcpu);
}
static inline void ___activate_traps(struct kvm_vcpu *vcpu)
@@ -177,9 +244,6 @@ static inline void ___activate_traps(struct kvm_vcpu *vcpu)
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
-
- if (cpus_have_final_cap(ARM64_HAS_HCX))
- write_sysreg_s(HCRX_GUEST_FLAGS, SYS_HCRX_EL2);
}
static inline void ___deactivate_traps(struct kvm_vcpu *vcpu)
@@ -194,9 +258,6 @@ static inline void ___deactivate_traps(struct kvm_vcpu *vcpu)
vcpu->arch.hcr_el2 &= ~HCR_VSE;
vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE;
}
-
- if (cpus_have_final_cap(ARM64_HAS_HCX))
- write_sysreg_s(HCRX_HOST_FLAGS, SYS_HCRX_EL2);
}
static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/kvm/hyp/include/nvhe/mm.h b/arch/arm64/kvm/hyp/include/nvhe/mm.h
index d5ec972b5c1e..230e4f2527de 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/mm.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/mm.h
@@ -26,6 +26,7 @@ int pkvm_create_mappings_locked(void *from, void *to, enum kvm_pgtable_prot prot
int __pkvm_create_private_mapping(phys_addr_t phys, size_t size,
enum kvm_pgtable_prot prot,
unsigned long *haddr);
+int pkvm_create_stack(phys_addr_t phys, unsigned long *haddr);
int pkvm_alloc_private_va_range(size_t size, unsigned long *haddr);
#endif /* __KVM_HYP_MM_H */
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index a169c619db60..857d9bc04fd4 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -135,6 +135,16 @@ static void handle___kvm_tlb_flush_vmid_ipa_nsh(struct kvm_cpu_context *host_ctx
__kvm_tlb_flush_vmid_ipa_nsh(kern_hyp_va(mmu), ipa, level);
}
+static void
+handle___kvm_tlb_flush_vmid_range(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
+ DECLARE_REG(phys_addr_t, start, host_ctxt, 2);
+ DECLARE_REG(unsigned long, pages, host_ctxt, 3);
+
+ __kvm_tlb_flush_vmid_range(kern_hyp_va(mmu), start, pages);
+}
+
static void handle___kvm_tlb_flush_vmid(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
@@ -327,6 +337,7 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa_nsh),
HANDLE_FUNC(__kvm_tlb_flush_vmid),
+ HANDLE_FUNC(__kvm_tlb_flush_vmid_range),
HANDLE_FUNC(__kvm_flush_cpu_context),
HANDLE_FUNC(__kvm_timer_set_cntvoff),
HANDLE_FUNC(__vgic_v3_read_vmcr),
diff --git a/arch/arm64/kvm/hyp/nvhe/mm.c b/arch/arm64/kvm/hyp/nvhe/mm.c
index 318298eb3d6b..65a7a186d7b2 100644
--- a/arch/arm64/kvm/hyp/nvhe/mm.c
+++ b/arch/arm64/kvm/hyp/nvhe/mm.c
@@ -44,6 +44,27 @@ static int __pkvm_create_mappings(unsigned long start, unsigned long size,
return err;
}
+static int __pkvm_alloc_private_va_range(unsigned long start, size_t size)
+{
+ unsigned long cur;
+
+ hyp_assert_lock_held(&pkvm_pgd_lock);
+
+ if (!start || start < __io_map_base)
+ return -EINVAL;
+
+ /* The allocated size is always a multiple of PAGE_SIZE */
+ cur = start + PAGE_ALIGN(size);
+
+ /* Are we overflowing on the vmemmap ? */
+ if (cur > __hyp_vmemmap)
+ return -ENOMEM;
+
+ __io_map_base = cur;
+
+ return 0;
+}
+
/**
* pkvm_alloc_private_va_range - Allocates a private VA range.
* @size: The size of the VA range to reserve.
@@ -56,27 +77,16 @@ static int __pkvm_create_mappings(unsigned long start, unsigned long size,
*/
int pkvm_alloc_private_va_range(size_t size, unsigned long *haddr)
{
- unsigned long base, addr;
- int ret = 0;
+ unsigned long addr;
+ int ret;
hyp_spin_lock(&pkvm_pgd_lock);
-
- /* Align the allocation based on the order of its size */
- addr = ALIGN(__io_map_base, PAGE_SIZE << get_order(size));
-
- /* The allocated size is always a multiple of PAGE_SIZE */
- base = addr + PAGE_ALIGN(size);
-
- /* Are we overflowing on the vmemmap ? */
- if (!addr || base > __hyp_vmemmap)
- ret = -ENOMEM;
- else {
- __io_map_base = base;
- *haddr = addr;
- }
-
+ addr = __io_map_base;
+ ret = __pkvm_alloc_private_va_range(addr, size);
hyp_spin_unlock(&pkvm_pgd_lock);
+ *haddr = addr;
+
return ret;
}
@@ -340,6 +350,45 @@ int hyp_create_idmap(u32 hyp_va_bits)
return __pkvm_create_mappings(start, end - start, start, PAGE_HYP_EXEC);
}
+int pkvm_create_stack(phys_addr_t phys, unsigned long *haddr)
+{
+ unsigned long addr, prev_base;
+ size_t size;
+ int ret;
+
+ hyp_spin_lock(&pkvm_pgd_lock);
+
+ prev_base = __io_map_base;
+ /*
+ * Efficient stack verification using the PAGE_SHIFT bit implies
+ * an alignment of our allocation on the order of the size.
+ */
+ size = PAGE_SIZE * 2;
+ addr = ALIGN(__io_map_base, size);
+
+ ret = __pkvm_alloc_private_va_range(addr, size);
+ if (!ret) {
+ /*
+ * Since the stack grows downwards, map the stack to the page
+ * at the higher address and leave the lower guard page
+ * unbacked.
+ *
+ * Any valid stack address now has the PAGE_SHIFT bit as 1
+ * and addresses corresponding to the guard page have the
+ * PAGE_SHIFT bit as 0 - this is used for overflow detection.
+ */
+ ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr + PAGE_SIZE,
+ PAGE_SIZE, phys, PAGE_HYP);
+ if (ret)
+ __io_map_base = prev_base;
+ }
+ hyp_spin_unlock(&pkvm_pgd_lock);
+
+ *haddr = addr + size;
+
+ return ret;
+}
+
static void *admit_host_page(void *arg)
{
struct kvm_hyp_memcache *host_mc = arg;
diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c
index bb98630dfeaf..0d5e0a89ddce 100644
--- a/arch/arm64/kvm/hyp/nvhe/setup.c
+++ b/arch/arm64/kvm/hyp/nvhe/setup.c
@@ -113,7 +113,6 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
for (i = 0; i < hyp_nr_cpus; i++) {
struct kvm_nvhe_init_params *params = per_cpu_ptr(&kvm_init_params, i);
- unsigned long hyp_addr;
start = (void *)kern_hyp_va(per_cpu_base[i]);
end = start + PAGE_ALIGN(hyp_percpu_size);
@@ -121,33 +120,9 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
if (ret)
return ret;
- /*
- * Allocate a contiguous HYP private VA range for the stack
- * and guard page. The allocation is also aligned based on
- * the order of its size.
- */
- ret = pkvm_alloc_private_va_range(PAGE_SIZE * 2, &hyp_addr);
+ ret = pkvm_create_stack(params->stack_pa, &params->stack_hyp_va);
if (ret)
return ret;
-
- /*
- * Since the stack grows downwards, map the stack to the page
- * at the higher address and leave the lower guard page
- * unbacked.
- *
- * Any valid stack address now has the PAGE_SHIFT bit as 1
- * and addresses corresponding to the guard page have the
- * PAGE_SHIFT bit as 0 - this is used for overflow detection.
- */
- hyp_spin_lock(&pkvm_pgd_lock);
- ret = kvm_pgtable_hyp_map(&pkvm_pgtable, hyp_addr + PAGE_SIZE,
- PAGE_SIZE, params->stack_pa, PAGE_HYP);
- hyp_spin_unlock(&pkvm_pgd_lock);
- if (ret)
- return ret;
-
- /* Update stack_hyp_va to end of the stack's private VA range */
- params->stack_hyp_va = hyp_addr + (2 * PAGE_SIZE);
}
/*
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index e89a23153e85..c353a06ee7e6 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -236,7 +236,7 @@ static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
* KVM_ARM_VCPU_INIT, however, this is likely not possible for
* protected VMs.
*/
- vcpu->arch.target = -1;
+ vcpu_clear_flag(vcpu, VCPU_INITIALIZED);
*exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT);
*exit_code |= ARM_EXCEPTION_IL;
}
diff --git a/arch/arm64/kvm/hyp/nvhe/tlb.c b/arch/arm64/kvm/hyp/nvhe/tlb.c
index b9991bbd8e3f..1b265713d6be 100644
--- a/arch/arm64/kvm/hyp/nvhe/tlb.c
+++ b/arch/arm64/kvm/hyp/nvhe/tlb.c
@@ -182,6 +182,36 @@ void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
__tlb_switch_to_host(&cxt);
}
+void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
+ phys_addr_t start, unsigned long pages)
+{
+ struct tlb_inv_context cxt;
+ unsigned long stride;
+
+ /*
+ * Since the range of addresses may not be mapped at
+ * the same level, assume the worst case as PAGE_SIZE
+ */
+ stride = PAGE_SIZE;
+ start = round_down(start, stride);
+
+ /* Switch to requested VMID */
+ __tlb_switch_to_guest(mmu, &cxt, false);
+
+ __flush_s2_tlb_range_op(ipas2e1is, start, pages, stride, 0);
+
+ dsb(ish);
+ __tlbi(vmalle1is);
+ dsb(ish);
+ isb();
+
+ /* See the comment in __kvm_tlb_flush_vmid_ipa() */
+ if (icache_is_vpipt())
+ icache_inval_all_pou();
+
+ __tlb_switch_to_host(&cxt);
+}
+
void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
{
struct tlb_inv_context cxt;
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index f7a93ef29250..f155b8c9e98c 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -670,6 +670,26 @@ static bool stage2_has_fwb(struct kvm_pgtable *pgt)
return !(pgt->flags & KVM_PGTABLE_S2_NOFWB);
}
+void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
+ phys_addr_t addr, size_t size)
+{
+ unsigned long pages, inval_pages;
+
+ if (!system_supports_tlb_range()) {
+ kvm_call_hyp(__kvm_tlb_flush_vmid, mmu);
+ return;
+ }
+
+ pages = size >> PAGE_SHIFT;
+ while (pages > 0) {
+ inval_pages = min(pages, MAX_TLBI_RANGE_PAGES);
+ kvm_call_hyp(__kvm_tlb_flush_vmid_range, mmu, addr, inval_pages);
+
+ addr += inval_pages << PAGE_SHIFT;
+ pages -= inval_pages;
+ }
+}
+
#define KVM_S2_MEMATTR(pgt, attr) PAGE_S2_MEMATTR(attr, stage2_has_fwb(pgt))
static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot prot,
@@ -786,7 +806,8 @@ static bool stage2_try_break_pte(const struct kvm_pgtable_visit_ctx *ctx,
* evicted pte value (if any).
*/
if (kvm_pte_table(ctx->old, ctx->level))
- kvm_call_hyp(__kvm_tlb_flush_vmid, mmu);
+ kvm_tlb_flush_vmid_range(mmu, ctx->addr,
+ kvm_granule_size(ctx->level));
else if (kvm_pte_valid(ctx->old))
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu,
ctx->addr, ctx->level);
@@ -810,16 +831,36 @@ static void stage2_make_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_t n
smp_store_release(ctx->ptep, new);
}
-static void stage2_put_pte(const struct kvm_pgtable_visit_ctx *ctx, struct kvm_s2_mmu *mmu,
- struct kvm_pgtable_mm_ops *mm_ops)
+static bool stage2_unmap_defer_tlb_flush(struct kvm_pgtable *pgt)
+{
+ /*
+ * If FEAT_TLBIRANGE is implemented, defer the individual
+ * TLB invalidations until the entire walk is finished, and
+ * then use the range-based TLBI instructions to do the
+ * invalidations. Condition deferred TLB invalidation on the
+ * system supporting FWB as the optimization is entirely
+ * pointless when the unmap walker needs to perform CMOs.
+ */
+ return system_supports_tlb_range() && stage2_has_fwb(pgt);
+}
+
+static void stage2_unmap_put_pte(const struct kvm_pgtable_visit_ctx *ctx,
+ struct kvm_s2_mmu *mmu,
+ struct kvm_pgtable_mm_ops *mm_ops)
{
+ struct kvm_pgtable *pgt = ctx->arg;
+
/*
- * Clear the existing PTE, and perform break-before-make with
- * TLB maintenance if it was valid.
+ * Clear the existing PTE, and perform break-before-make if it was
+ * valid. Depending on the system support, defer the TLB maintenance
+ * for the same until the entire unmap walk is completed.
*/
if (kvm_pte_valid(ctx->old)) {
kvm_clear_pte(ctx->ptep);
- kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr, ctx->level);
+
+ if (!stage2_unmap_defer_tlb_flush(pgt))
+ kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu,
+ ctx->addr, ctx->level);
}
mm_ops->put_page(ctx->ptep);
@@ -1077,7 +1118,7 @@ static int stage2_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx,
* block entry and rely on the remaining portions being faulted
* back lazily.
*/
- stage2_put_pte(ctx, mmu, mm_ops);
+ stage2_unmap_put_pte(ctx, mmu, mm_ops);
if (need_flush && mm_ops->dcache_clean_inval_poc)
mm_ops->dcache_clean_inval_poc(kvm_pte_follow(ctx->old, mm_ops),
@@ -1091,13 +1132,19 @@ static int stage2_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx,
int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
{
+ int ret;
struct kvm_pgtable_walker walker = {
.cb = stage2_unmap_walker,
.arg = pgt,
.flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
};
- return kvm_pgtable_walk(pgt, addr, size, &walker);
+ ret = kvm_pgtable_walk(pgt, addr, size, &walker);
+ if (stage2_unmap_defer_tlb_flush(pgt))
+ /* Perform the deferred TLB invalidations */
+ kvm_tlb_flush_vmid_range(pgt->mmu, addr, size);
+
+ return ret;
}
struct stage2_attr_data {
diff --git a/arch/arm64/kvm/hyp/vhe/tlb.c b/arch/arm64/kvm/hyp/vhe/tlb.c
index e69da550cdc5..46bd43f61d76 100644
--- a/arch/arm64/kvm/hyp/vhe/tlb.c
+++ b/arch/arm64/kvm/hyp/vhe/tlb.c
@@ -143,6 +143,34 @@ void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
__tlb_switch_to_host(&cxt);
}
+void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
+ phys_addr_t start, unsigned long pages)
+{
+ struct tlb_inv_context cxt;
+ unsigned long stride;
+
+ /*
+ * Since the range of addresses may not be mapped at
+ * the same level, assume the worst case as PAGE_SIZE
+ */
+ stride = PAGE_SIZE;
+ start = round_down(start, stride);
+
+ dsb(ishst);
+
+ /* Switch to requested VMID */
+ __tlb_switch_to_guest(mmu, &cxt);
+
+ __flush_s2_tlb_range_op(ipas2e1is, start, pages, stride, 0);
+
+ dsb(ish);
+ __tlbi(vmalle1is);
+ dsb(ish);
+ isb();
+
+ __tlb_switch_to_host(&cxt);
+}
+
void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
{
struct tlb_inv_context cxt;
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index d3b4feed460c..587a104f66c3 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -161,15 +161,23 @@ static bool memslot_is_logging(struct kvm_memory_slot *memslot)
}
/**
- * kvm_flush_remote_tlbs() - flush all VM TLB entries for v7/8
+ * kvm_arch_flush_remote_tlbs() - flush all VM TLB entries for v7/8
* @kvm: pointer to kvm structure.
*
* Interface to HYP function to flush all VM TLB entries
*/
-void kvm_flush_remote_tlbs(struct kvm *kvm)
+int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
{
- ++kvm->stat.generic.remote_tlb_flush_requests;
kvm_call_hyp(__kvm_tlb_flush_vmid, &kvm->arch.mmu);
+ return 0;
+}
+
+int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm,
+ gfn_t gfn, u64 nr_pages)
+{
+ kvm_tlb_flush_vmid_range(&kvm->arch.mmu,
+ gfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT);
+ return 0;
}
static bool kvm_is_device_pfn(unsigned long pfn)
@@ -592,6 +600,25 @@ int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
return 0;
}
+static int __hyp_alloc_private_va_range(unsigned long base)
+{
+ lockdep_assert_held(&kvm_hyp_pgd_mutex);
+
+ if (!PAGE_ALIGNED(base))
+ return -EINVAL;
+
+ /*
+ * Verify that BIT(VA_BITS - 1) hasn't been flipped by
+ * allocating the new area, as it would indicate we've
+ * overflowed the idmap/IO address range.
+ */
+ if ((base ^ io_map_base) & BIT(VA_BITS - 1))
+ return -ENOMEM;
+
+ io_map_base = base;
+
+ return 0;
+}
/**
* hyp_alloc_private_va_range - Allocates a private VA range.
@@ -612,26 +639,16 @@ int hyp_alloc_private_va_range(size_t size, unsigned long *haddr)
/*
* This assumes that we have enough space below the idmap
- * page to allocate our VAs. If not, the check below will
- * kick. A potential alternative would be to detect that
- * overflow and switch to an allocation above the idmap.
+ * page to allocate our VAs. If not, the check in
+ * __hyp_alloc_private_va_range() will kick. A potential
+ * alternative would be to detect that overflow and switch
+ * to an allocation above the idmap.
*
* The allocated size is always a multiple of PAGE_SIZE.
*/
- base = io_map_base - PAGE_ALIGN(size);
-
- /* Align the allocation based on the order of its size */
- base = ALIGN_DOWN(base, PAGE_SIZE << get_order(size));
-
- /*
- * Verify that BIT(VA_BITS - 1) hasn't been flipped by
- * allocating the new area, as it would indicate we've
- * overflowed the idmap/IO address range.
- */
- if ((base ^ io_map_base) & BIT(VA_BITS - 1))
- ret = -ENOMEM;
- else
- *haddr = io_map_base = base;
+ size = PAGE_ALIGN(size);
+ base = io_map_base - size;
+ ret = __hyp_alloc_private_va_range(base);
mutex_unlock(&kvm_hyp_pgd_mutex);
@@ -668,6 +685,48 @@ static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size,
return ret;
}
+int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr)
+{
+ unsigned long base;
+ size_t size;
+ int ret;
+
+ mutex_lock(&kvm_hyp_pgd_mutex);
+ /*
+ * Efficient stack verification using the PAGE_SHIFT bit implies
+ * an alignment of our allocation on the order of the size.
+ */
+ size = PAGE_SIZE * 2;
+ base = ALIGN_DOWN(io_map_base - size, size);
+
+ ret = __hyp_alloc_private_va_range(base);
+
+ mutex_unlock(&kvm_hyp_pgd_mutex);
+
+ if (ret) {
+ kvm_err("Cannot allocate hyp stack guard page\n");
+ return ret;
+ }
+
+ /*
+ * Since the stack grows downwards, map the stack to the page
+ * at the higher address and leave the lower guard page
+ * unbacked.
+ *
+ * Any valid stack address now has the PAGE_SHIFT bit as 1
+ * and addresses corresponding to the guard page have the
+ * PAGE_SHIFT bit as 0 - this is used for overflow detection.
+ */
+ ret = __create_hyp_mappings(base + PAGE_SIZE, PAGE_SIZE, phys_addr,
+ PAGE_HYP);
+ if (ret)
+ kvm_err("Cannot map hyp stack\n");
+
+ *haddr = base + size;
+
+ return ret;
+}
+
/**
* create_hyp_io_mappings - Map IO into both kernel and HYP
* @phys_addr: The physical start address which gets mapped
@@ -1075,7 +1134,7 @@ static void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
write_lock(&kvm->mmu_lock);
stage2_wp_range(&kvm->arch.mmu, start, end);
write_unlock(&kvm->mmu_lock);
- kvm_flush_remote_tlbs(kvm);
+ kvm_flush_remote_tlbs_memslot(kvm, memslot);
}
/**
@@ -1541,7 +1600,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
out_unlock:
read_unlock(&kvm->mmu_lock);
- kvm_set_pfn_accessed(pfn);
kvm_release_pfn_clean(pfn);
return ret != -EAGAIN ? ret : 0;
}
@@ -1721,7 +1779,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
- kvm_pfn_t pfn = pte_pfn(range->pte);
+ kvm_pfn_t pfn = pte_pfn(range->arg.pte);
if (!kvm->arch.mmu.pgt)
return false;
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index 315354d27978..042695a210ce 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -71,8 +71,9 @@ void access_nested_id_reg(struct kvm_vcpu *v, struct sys_reg_params *p,
break;
case SYS_ID_AA64MMFR0_EL1:
- /* Hide ECV, FGT, ExS, Secure Memory */
- val &= ~(GENMASK_ULL(63, 43) |
+ /* Hide ECV, ExS, Secure Memory */
+ val &= ~(NV_FTR(MMFR0, ECV) |
+ NV_FTR(MMFR0, EXS) |
NV_FTR(MMFR0, TGRAN4_2) |
NV_FTR(MMFR0, TGRAN16_2) |
NV_FTR(MMFR0, TGRAN64_2) |
@@ -116,7 +117,8 @@ void access_nested_id_reg(struct kvm_vcpu *v, struct sys_reg_params *p,
break;
case SYS_ID_AA64MMFR1_EL1:
- val &= (NV_FTR(MMFR1, PAN) |
+ val &= (NV_FTR(MMFR1, HCX) |
+ NV_FTR(MMFR1, PAN) |
NV_FTR(MMFR1, LO) |
NV_FTR(MMFR1, HPDS) |
NV_FTR(MMFR1, VH) |
@@ -124,8 +126,7 @@ void access_nested_id_reg(struct kvm_vcpu *v, struct sys_reg_params *p,
break;
case SYS_ID_AA64MMFR2_EL1:
- val &= ~(NV_FTR(MMFR2, EVT) |
- NV_FTR(MMFR2, BBM) |
+ val &= ~(NV_FTR(MMFR2, BBM) |
NV_FTR(MMFR2, TTL) |
GENMASK_ULL(47, 44) |
NV_FTR(MMFR2, ST) |
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index 560650972478..6b066e04dc5d 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -14,6 +14,7 @@
#include <asm/kvm_emulate.h>
#include <kvm/arm_pmu.h>
#include <kvm/arm_vgic.h>
+#include <asm/arm_pmuv3.h>
#define PERF_ATTR_CFG1_COUNTER_64BIT BIT(0)
@@ -35,12 +36,8 @@ static struct kvm_pmc *kvm_vcpu_idx_to_pmc(struct kvm_vcpu *vcpu, int cnt_idx)
return &vcpu->arch.pmu.pmc[cnt_idx];
}
-static u32 kvm_pmu_event_mask(struct kvm *kvm)
+static u32 __kvm_pmu_event_mask(unsigned int pmuver)
{
- unsigned int pmuver;
-
- pmuver = kvm->arch.arm_pmu->pmuver;
-
switch (pmuver) {
case ID_AA64DFR0_EL1_PMUVer_IMP:
return GENMASK(9, 0);
@@ -55,6 +52,14 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm)
}
}
+static u32 kvm_pmu_event_mask(struct kvm *kvm)
+{
+ u64 dfr0 = IDREG(kvm, SYS_ID_AA64DFR0_EL1);
+ u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, dfr0);
+
+ return __kvm_pmu_event_mask(pmuver);
+}
+
/**
* kvm_pmc_is_64bit - determine if counter is 64bit
* @pmc: counter context
@@ -672,8 +677,11 @@ void kvm_host_pmu_init(struct arm_pmu *pmu)
{
struct arm_pmu_entry *entry;
- if (pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_NI ||
- pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
+ /*
+ * Check the sanitised PMU version for the system, as KVM does not
+ * support implementations where PMUv3 exists on a subset of CPUs.
+ */
+ if (!pmuv3_implemented(kvm_arm_pmu_get_pmuver_limit()))
return;
mutex_lock(&arm_pmus_lock);
@@ -750,11 +758,12 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
} else {
val = read_sysreg(pmceid1_el0);
/*
- * Don't advertise STALL_SLOT, as PMMIR_EL0 is handled
+ * Don't advertise STALL_SLOT*, as PMMIR_EL0 is handled
* as RAZ
*/
- if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P4)
- val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32);
+ val &= ~(BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32) |
+ BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND - 32) |
+ BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND - 32));
base = 32;
}
@@ -950,11 +959,17 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
return 0;
}
case KVM_ARM_VCPU_PMU_V3_FILTER: {
+ u8 pmuver = kvm_arm_pmu_get_pmuver_limit();
struct kvm_pmu_event_filter __user *uaddr;
struct kvm_pmu_event_filter filter;
int nr_events;
- nr_events = kvm_pmu_event_mask(kvm) + 1;
+ /*
+ * Allow userspace to specify an event filter for the entire
+ * event range supported by PMUVer of the hardware, rather
+ * than the guest's PMUVer for KVM backward compatibility.
+ */
+ nr_events = __kvm_pmu_event_mask(pmuver) + 1;
uaddr = (struct kvm_pmu_event_filter __user *)(long)attr->addr;
diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c
index 121f1a14c829..0eea225fd09a 100644
--- a/arch/arm64/kvm/pmu.c
+++ b/arch/arm64/kvm/pmu.c
@@ -236,3 +236,21 @@ bool kvm_set_pmuserenr(u64 val)
ctxt_sys_reg(hctxt, PMUSERENR_EL0) = val;
return true;
}
+
+/*
+ * If we interrupted the guest to update the host PMU context, make
+ * sure we re-apply the guest EL0 state.
+ */
+void kvm_vcpu_pmu_resync_el0(void)
+{
+ struct kvm_vcpu *vcpu;
+
+ if (!has_vhe() || !in_interrupt())
+ return;
+
+ vcpu = kvm_get_running_vcpu();
+ if (!vcpu)
+ return;
+
+ kvm_make_request(KVM_REQ_RESYNC_PMU_EL0, vcpu);
+}
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index bc8556b6f459..7a65a35ee4ac 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -248,21 +248,16 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
}
}
- switch (vcpu->arch.target) {
- default:
- if (vcpu_el1_is_32bit(vcpu)) {
- pstate = VCPU_RESET_PSTATE_SVC;
- } else if (vcpu_has_nv(vcpu)) {
- pstate = VCPU_RESET_PSTATE_EL2;
- } else {
- pstate = VCPU_RESET_PSTATE_EL1;
- }
-
- if (kvm_vcpu_has_pmu(vcpu) && !kvm_arm_support_pmu_v3()) {
- ret = -EINVAL;
- goto out;
- }
- break;
+ if (vcpu_el1_is_32bit(vcpu))
+ pstate = VCPU_RESET_PSTATE_SVC;
+ else if (vcpu_has_nv(vcpu))
+ pstate = VCPU_RESET_PSTATE_EL2;
+ else
+ pstate = VCPU_RESET_PSTATE_EL1;
+
+ if (kvm_vcpu_has_pmu(vcpu) && !kvm_arm_support_pmu_v3()) {
+ ret = -EINVAL;
+ goto out;
}
/* Reset core registers */
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 2ca2973abe66..e92ec810d449 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -2151,6 +2151,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
{ SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
+ { SYS_DESC(SYS_ACCDATA_EL1), undef_access },
+
{ SYS_DESC(SYS_SCXTNUM_EL1), undef_access },
{ SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
@@ -2365,8 +2367,13 @@ static const struct sys_reg_desc sys_reg_descs[] = {
EL2_REG(MDCR_EL2, access_rw, reset_val, 0),
EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_NVHE_EL2_RES1),
EL2_REG(HSTR_EL2, access_rw, reset_val, 0),
+ EL2_REG(HFGRTR_EL2, access_rw, reset_val, 0),
+ EL2_REG(HFGWTR_EL2, access_rw, reset_val, 0),
+ EL2_REG(HFGITR_EL2, access_rw, reset_val, 0),
EL2_REG(HACR_EL2, access_rw, reset_val, 0),
+ EL2_REG(HCRX_EL2, access_rw, reset_val, 0),
+
EL2_REG(TTBR0_EL2, access_rw, reset_val, 0),
EL2_REG(TTBR1_EL2, access_rw, reset_val, 0),
EL2_REG(TCR_EL2, access_rw, reset_val, TCR_EL2_RES1),
@@ -2374,6 +2381,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
EL2_REG(VTCR_EL2, access_rw, reset_val, 0),
{ SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
+ EL2_REG(HDFGRTR_EL2, access_rw, reset_val, 0),
+ EL2_REG(HDFGWTR_EL2, access_rw, reset_val, 0),
EL2_REG(SPSR_EL2, access_rw, reset_val, 0),
EL2_REG(ELR_EL2, access_rw, reset_val, 0),
{ SYS_DESC(SYS_SP_EL1), access_sp_el1},
@@ -3170,6 +3179,9 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu)
trace_kvm_handle_sys_reg(esr);
+ if (__check_nv_sr_forward(vcpu))
+ return 1;
+
params = esr_sys64_to_params(esr);
params.regval = vcpu_get_reg(vcpu, Rt);
@@ -3587,5 +3599,8 @@ int __init kvm_sys_reg_table_init(void)
if (!first_idreg)
return -EINVAL;
+ if (kvm_get_mode() == KVM_MODE_NV)
+ return populate_nv_trap_config();
+
return 0;
}
diff --git a/arch/arm64/kvm/trace_arm.h b/arch/arm64/kvm/trace_arm.h
index 6ce5c025218d..8ad53104934d 100644
--- a/arch/arm64/kvm/trace_arm.h
+++ b/arch/arm64/kvm/trace_arm.h
@@ -364,6 +364,32 @@ TRACE_EVENT(kvm_inject_nested_exception,
__entry->hcr_el2)
);
+TRACE_EVENT(kvm_forward_sysreg_trap,
+ TP_PROTO(struct kvm_vcpu *vcpu, u32 sysreg, bool is_read),
+ TP_ARGS(vcpu, sysreg, is_read),
+
+ TP_STRUCT__entry(
+ __field(u64, pc)
+ __field(u32, sysreg)
+ __field(bool, is_read)
+ ),
+
+ TP_fast_assign(
+ __entry->pc = *vcpu_pc(vcpu);
+ __entry->sysreg = sysreg;
+ __entry->is_read = is_read;
+ ),
+
+ TP_printk("%llx %c (%d,%d,%d,%d,%d)",
+ __entry->pc,
+ __entry->is_read ? 'R' : 'W',
+ sys_reg_Op0(__entry->sysreg),
+ sys_reg_Op1(__entry->sysreg),
+ sys_reg_CRn(__entry->sysreg),
+ sys_reg_CRm(__entry->sysreg),
+ sys_reg_Op2(__entry->sysreg))
+);
+
#endif /* _TRACE_ARM_ARM64_KVM_H */
#undef TRACE_INCLUDE_PATH
diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h
index f9923beedd27..0ab09b0d4440 100644
--- a/arch/arm64/kvm/vgic/vgic.h
+++ b/arch/arm64/kvm/vgic/vgic.h
@@ -199,7 +199,6 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu);
void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr);
void vgic_v2_set_underflow(struct kvm_vcpu *vcpu);
-void vgic_v2_set_npie(struct kvm_vcpu *vcpu);
int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr);
int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
int offset, u32 *val);
@@ -233,7 +232,6 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu);
void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr);
void vgic_v3_set_underflow(struct kvm_vcpu *vcpu);
-void vgic_v3_set_npie(struct kvm_vcpu *vcpu);
void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
void vgic_v3_enable(struct kvm_vcpu *vcpu);
diff --git a/arch/arm64/lib/csum.c b/arch/arm64/lib/csum.c
index 78b87a64ca0a..2432683e48a6 100644
--- a/arch/arm64/lib/csum.c
+++ b/arch/arm64/lib/csum.c
@@ -24,7 +24,7 @@ unsigned int __no_sanitize_address do_csum(const unsigned char *buff, int len)
const u64 *ptr;
u64 data, sum64 = 0;
- if (unlikely(len == 0))
+ if (unlikely(len <= 0))
return 0;
offset = (unsigned long)buff & 7;
diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
index c80ed4f3cbce..c3f06fdef609 100644
--- a/arch/arm64/tools/cpucaps
+++ b/arch/arm64/tools/cpucaps
@@ -26,6 +26,7 @@ HAS_ECV
HAS_ECV_CNTPOFF
HAS_EPAN
HAS_EVT
+HAS_FGT
HAS_GENERIC_AUTH
HAS_GENERIC_AUTH_ARCH_QARMA3
HAS_GENERIC_AUTH_ARCH_QARMA5
diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg
index 65866bf819c3..2517ef7c21cf 100644
--- a/arch/arm64/tools/sysreg
+++ b/arch/arm64/tools/sysreg
@@ -2156,6 +2156,135 @@ Field 1 ICIALLU
Field 0 ICIALLUIS
EndSysreg
+Sysreg HDFGRTR_EL2 3 4 3 1 4
+Field 63 PMBIDR_EL1
+Field 62 nPMSNEVFR_EL1
+Field 61 nBRBDATA
+Field 60 nBRBCTL
+Field 59 nBRBIDR
+Field 58 PMCEIDn_EL0
+Field 57 PMUSERENR_EL0
+Field 56 TRBTRG_EL1
+Field 55 TRBSR_EL1
+Field 54 TRBPTR_EL1
+Field 53 TRBMAR_EL1
+Field 52 TRBLIMITR_EL1
+Field 51 TRBIDR_EL1
+Field 50 TRBBASER_EL1
+Res0 49
+Field 48 TRCVICTLR
+Field 47 TRCSTATR
+Field 46 TRCSSCSRn
+Field 45 TRCSEQSTR
+Field 44 TRCPRGCTLR
+Field 43 TRCOSLSR
+Res0 42
+Field 41 TRCIMSPECn
+Field 40 TRCID
+Res0 39:38
+Field 37 TRCCNTVRn
+Field 36 TRCCLAIM
+Field 35 TRCAUXCTLR
+Field 34 TRCAUTHSTATUS
+Field 33 TRC
+Field 32 PMSLATFR_EL1
+Field 31 PMSIRR_EL1
+Field 30 PMSIDR_EL1
+Field 29 PMSICR_EL1
+Field 28 PMSFCR_EL1
+Field 27 PMSEVFR_EL1
+Field 26 PMSCR_EL1
+Field 25 PMBSR_EL1
+Field 24 PMBPTR_EL1
+Field 23 PMBLIMITR_EL1
+Field 22 PMMIR_EL1
+Res0 21:20
+Field 19 PMSELR_EL0
+Field 18 PMOVS
+Field 17 PMINTEN
+Field 16 PMCNTEN
+Field 15 PMCCNTR_EL0
+Field 14 PMCCFILTR_EL0
+Field 13 PMEVTYPERn_EL0
+Field 12 PMEVCNTRn_EL0
+Field 11 OSDLR_EL1
+Field 10 OSECCR_EL1
+Field 9 OSLSR_EL1
+Res0 8
+Field 7 DBGPRCR_EL1
+Field 6 DBGAUTHSTATUS_EL1
+Field 5 DBGCLAIM
+Field 4 MDSCR_EL1
+Field 3 DBGWVRn_EL1
+Field 2 DBGWCRn_EL1
+Field 1 DBGBVRn_EL1
+Field 0 DBGBCRn_EL1
+EndSysreg
+
+Sysreg HDFGWTR_EL2 3 4 3 1 5
+Res0 63
+Field 62 nPMSNEVFR_EL1
+Field 61 nBRBDATA
+Field 60 nBRBCTL
+Res0 59:58
+Field 57 PMUSERENR_EL0
+Field 56 TRBTRG_EL1
+Field 55 TRBSR_EL1
+Field 54 TRBPTR_EL1
+Field 53 TRBMAR_EL1
+Field 52 TRBLIMITR_EL1
+Res0 51
+Field 50 TRBBASER_EL1
+Field 49 TRFCR_EL1
+Field 48 TRCVICTLR
+Res0 47
+Field 46 TRCSSCSRn
+Field 45 TRCSEQSTR
+Field 44 TRCPRGCTLR
+Res0 43
+Field 42 TRCOSLAR
+Field 41 TRCIMSPECn
+Res0 40:38
+Field 37 TRCCNTVRn
+Field 36 TRCCLAIM
+Field 35 TRCAUXCTLR
+Res0 34
+Field 33 TRC
+Field 32 PMSLATFR_EL1
+Field 31 PMSIRR_EL1
+Res0 30
+Field 29 PMSICR_EL1
+Field 28 PMSFCR_EL1
+Field 27 PMSEVFR_EL1
+Field 26 PMSCR_EL1
+Field 25 PMBSR_EL1
+Field 24 PMBPTR_EL1
+Field 23 PMBLIMITR_EL1
+Res0 22
+Field 21 PMCR_EL0
+Field 20 PMSWINC_EL0
+Field 19 PMSELR_EL0
+Field 18 PMOVS
+Field 17 PMINTEN
+Field 16 PMCNTEN
+Field 15 PMCCNTR_EL0
+Field 14 PMCCFILTR_EL0
+Field 13 PMEVTYPERn_EL0
+Field 12 PMEVCNTRn_EL0
+Field 11 OSDLR_EL1
+Field 10 OSECCR_EL1
+Res0 9
+Field 8 OSLAR_EL1
+Field 7 DBGPRCR_EL1
+Res0 6
+Field 5 DBGCLAIM
+Field 4 MDSCR_EL1
+Field 3 DBGWVRn_EL1
+Field 2 DBGWCRn_EL1
+Field 1 DBGBVRn_EL1
+Field 0 DBGBCRn_EL1
+EndSysreg
+
Sysreg ZCR_EL2 3 4 1 2 0
Fields ZCR_ELx
EndSysreg
diff --git a/arch/csky/include/asm/traps.h b/arch/csky/include/asm/traps.h
index 1e7d303b91e9..732c4aaa2e26 100644
--- a/arch/csky/include/asm/traps.h
+++ b/arch/csky/include/asm/traps.h
@@ -3,6 +3,8 @@
#ifndef __ASM_CSKY_TRAPS_H
#define __ASM_CSKY_TRAPS_H
+#include <linux/linkage.h>
+
#define VEC_RESET 0
#define VEC_ALIGN 1
#define VEC_ACCESS 2
diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild
index 33733245f42b..aefae2efde9f 100644
--- a/arch/ia64/include/asm/Kbuild
+++ b/arch/ia64/include/asm/Kbuild
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
generated-y += syscall_table.h
generic-y += agp.h
-generic-y += export.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
generic-y += vtime.h
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 5eba3fb2e311..ac06d44b9b27 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -37,7 +37,7 @@
* pNonSys: !pSys
*/
-
+#include <linux/export.h>
#include <linux/pgtable.h>
#include <asm/asmmacro.h>
#include <asm/cache.h>
@@ -49,7 +49,6 @@
#include <asm/thread_info.h>
#include <asm/unistd.h>
#include <asm/ftrace.h>
-#include <asm/export.h>
#include "minstate.h"
diff --git a/arch/ia64/kernel/esi_stub.S b/arch/ia64/kernel/esi_stub.S
index 821e68d10598..9928c5b2957c 100644
--- a/arch/ia64/kernel/esi_stub.S
+++ b/arch/ia64/kernel/esi_stub.S
@@ -34,9 +34,9 @@
#define PSR_BITS_TO_SET \
(IA64_PSR_BN)
+#include <linux/export.h>
#include <asm/processor.h>
#include <asm/asmmacro.h>
-#include <asm/export.h>
/*
* Inputs:
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index c096500590e9..85c8a57da402 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -20,7 +20,7 @@
* Support for CPU Hotplug
*/
-
+#include <linux/export.h>
#include <linux/pgtable.h>
#include <asm/asmmacro.h>
#include <asm/fpu.h>
@@ -33,7 +33,6 @@
#include <asm/mca_asm.h>
#include <linux/init.h>
#include <linux/linkage.h>
-#include <asm/export.h>
#ifdef CONFIG_HOTPLUG_CPU
#define SAL_PSR_BITS_TO_SET \
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index 7a418e324d30..da90c49df628 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -47,7 +47,7 @@
* Table is based upon EAS2.6 (Oct 1999)
*/
-
+#include <linux/export.h>
#include <linux/pgtable.h>
#include <asm/asmmacro.h>
#include <asm/break.h>
@@ -58,7 +58,6 @@
#include <asm/thread_info.h>
#include <asm/unistd.h>
#include <asm/errno.h>
-#include <asm/export.h>
#if 0
# define PSR_DEFAULT_BITS psr.ac
diff --git a/arch/ia64/kernel/pal.S b/arch/ia64/kernel/pal.S
index 06d01a070aae..fb6db6966f70 100644
--- a/arch/ia64/kernel/pal.S
+++ b/arch/ia64/kernel/pal.S
@@ -13,9 +13,9 @@
* 05/24/2000 eranian Added support for physical mode static calls
*/
+#include <linux/export.h>
#include <asm/asmmacro.h>
#include <asm/processor.h>
-#include <asm/export.h>
.data
pal_entry_point:
diff --git a/arch/ia64/lib/clear_page.S b/arch/ia64/lib/clear_page.S
index 65b75085c8f4..ba0dd2538fa5 100644
--- a/arch/ia64/lib/clear_page.S
+++ b/arch/ia64/lib/clear_page.S
@@ -10,9 +10,9 @@
* 3/08/02 davidm Some more tweaking
*/
+#include <linux/export.h>
#include <asm/asmmacro.h>
#include <asm/page.h>
-#include <asm/export.h>
#ifdef CONFIG_ITANIUM
# define L3_LINE_SIZE 64 // Itanium L3 line size
diff --git a/arch/ia64/lib/clear_user.S b/arch/ia64/lib/clear_user.S
index a28f39d349eb..1d9e45ccf8e5 100644
--- a/arch/ia64/lib/clear_user.S
+++ b/arch/ia64/lib/clear_user.S
@@ -12,8 +12,8 @@
* Stephane Eranian <eranian@hpl.hp.com>
*/
+#include <linux/export.h>
#include <asm/asmmacro.h>
-#include <asm/export.h>
//
// arguments
diff --git a/arch/ia64/lib/copy_page.S b/arch/ia64/lib/copy_page.S
index 176f857c522e..c0a0e6b2af00 100644
--- a/arch/ia64/lib/copy_page.S
+++ b/arch/ia64/lib/copy_page.S
@@ -15,9 +15,9 @@
*
* 4/06/01 davidm Tuned to make it perform well both for cached and uncached copies.
*/
+#include <linux/export.h>
#include <asm/asmmacro.h>
#include <asm/page.h>
-#include <asm/export.h>
#define PIPE_DEPTH 3
#define EPI p[PIPE_DEPTH-1]
diff --git a/arch/ia64/lib/copy_page_mck.S b/arch/ia64/lib/copy_page_mck.S
index d6fd56e4f1c1..5e8bb4b4b535 100644
--- a/arch/ia64/lib/copy_page_mck.S
+++ b/arch/ia64/lib/copy_page_mck.S
@@ -60,9 +60,9 @@
* to fetch the second-half of the L2 cache line into L1, and the tX words are copied in
* an order that avoids bank conflicts.
*/
+#include <linux/export.h>
#include <asm/asmmacro.h>
#include <asm/page.h>
-#include <asm/export.h>
#define PREFETCH_DIST 8 // McKinley sustains 16 outstanding L2 misses (8 ld, 8 st)
diff --git a/arch/ia64/lib/copy_user.S b/arch/ia64/lib/copy_user.S
index f681556c6b86..8daab72cfe77 100644
--- a/arch/ia64/lib/copy_user.S
+++ b/arch/ia64/lib/copy_user.S
@@ -30,8 +30,8 @@
* - fix extraneous stop bit introduced by the EX() macro.
*/
+#include <linux/export.h>
#include <asm/asmmacro.h>
-#include <asm/export.h>
//
// Tuneable parameters
diff --git a/arch/ia64/lib/flush.S b/arch/ia64/lib/flush.S
index 8573d59c9ed1..f8e795fe45cb 100644
--- a/arch/ia64/lib/flush.S
+++ b/arch/ia64/lib/flush.S
@@ -8,9 +8,8 @@
* 05/28/05 Zoltan Menyhart Dynamic stride size
*/
+#include <linux/export.h>
#include <asm/asmmacro.h>
-#include <asm/export.h>
-
/*
* flush_icache_range(start,end)
diff --git a/arch/ia64/lib/idiv32.S b/arch/ia64/lib/idiv32.S
index def92b708e6e..83586fbc51ff 100644
--- a/arch/ia64/lib/idiv32.S
+++ b/arch/ia64/lib/idiv32.S
@@ -15,8 +15,8 @@
* (http://www.goodreads.com/book/show/2019887.Ia_64_and_Elementary_Functions)
*/
+#include <linux/export.h>
#include <asm/asmmacro.h>
-#include <asm/export.h>
#ifdef MODULO
# define OP mod
diff --git a/arch/ia64/lib/idiv64.S b/arch/ia64/lib/idiv64.S
index a8ba3bd3d4d8..5c9113691f72 100644
--- a/arch/ia64/lib/idiv64.S
+++ b/arch/ia64/lib/idiv64.S
@@ -15,8 +15,8 @@
* (http://www.goodreads.com/book/show/2019887.Ia_64_and_Elementary_Functions)
*/
+#include <linux/export.h>
#include <asm/asmmacro.h>
-#include <asm/export.h>
#ifdef MODULO
# define OP mod
diff --git a/arch/ia64/lib/ip_fast_csum.S b/arch/ia64/lib/ip_fast_csum.S
index dc9e6e6fe876..fcc0b812ce2e 100644
--- a/arch/ia64/lib/ip_fast_csum.S
+++ b/arch/ia64/lib/ip_fast_csum.S
@@ -13,8 +13,8 @@
* Copyright (C) 2002, 2006 Ken Chen <kenneth.w.chen@intel.com>
*/
+#include <linux/export.h>
#include <asm/asmmacro.h>
-#include <asm/export.h>
/*
* Since we know that most likely this function is called with buf aligned
diff --git a/arch/ia64/lib/memcpy.S b/arch/ia64/lib/memcpy.S
index 91a625fddbf0..35c9069a8345 100644
--- a/arch/ia64/lib/memcpy.S
+++ b/arch/ia64/lib/memcpy.S
@@ -14,8 +14,8 @@
* Stephane Eranian <eranian@hpl.hp.com>
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
+#include <linux/export.h>
#include <asm/asmmacro.h>
-#include <asm/export.h>
GLOBAL_ENTRY(memcpy)
diff --git a/arch/ia64/lib/memcpy_mck.S b/arch/ia64/lib/memcpy_mck.S
index cc4e6ac914b6..c0d4362217ae 100644
--- a/arch/ia64/lib/memcpy_mck.S
+++ b/arch/ia64/lib/memcpy_mck.S
@@ -14,9 +14,9 @@
* Copyright (C) 2002 Intel Corp.
* Copyright (C) 2002 Ken Chen <kenneth.w.chen@intel.com>
*/
+#include <linux/export.h>
#include <asm/asmmacro.h>
#include <asm/page.h>
-#include <asm/export.h>
#define EK(y...) EX(y)
diff --git a/arch/ia64/lib/memset.S b/arch/ia64/lib/memset.S
index 07a8b92c6496..552c5c7e4d06 100644
--- a/arch/ia64/lib/memset.S
+++ b/arch/ia64/lib/memset.S
@@ -18,8 +18,8 @@
Since a stf.spill f0 can store 16B in one go, we use this instruction
to get peak speed when value = 0. */
+#include <linux/export.h>
#include <asm/asmmacro.h>
-#include <asm/export.h>
#undef ret
#define dest in0
diff --git a/arch/ia64/lib/strlen.S b/arch/ia64/lib/strlen.S
index d66de5966974..1f4a46c15127 100644
--- a/arch/ia64/lib/strlen.S
+++ b/arch/ia64/lib/strlen.S
@@ -17,8 +17,8 @@
* 09/24/99 S.Eranian add speculation recovery code
*/
+#include <linux/export.h>
#include <asm/asmmacro.h>
-#include <asm/export.h>
//
//
diff --git a/arch/ia64/lib/strncpy_from_user.S b/arch/ia64/lib/strncpy_from_user.S
index 49eb81b69cd2..a287169bd953 100644
--- a/arch/ia64/lib/strncpy_from_user.S
+++ b/arch/ia64/lib/strncpy_from_user.S
@@ -17,8 +17,8 @@
* by Andreas Schwab <schwab@suse.de>).
*/
+#include <linux/export.h>
#include <asm/asmmacro.h>
-#include <asm/export.h>
GLOBAL_ENTRY(__strncpy_from_user)
alloc r2=ar.pfs,3,0,0,0
diff --git a/arch/ia64/lib/strnlen_user.S b/arch/ia64/lib/strnlen_user.S
index 4b684d4da106..a7eb56e840a9 100644
--- a/arch/ia64/lib/strnlen_user.S
+++ b/arch/ia64/lib/strnlen_user.S
@@ -13,8 +13,8 @@
* Copyright (C) 1999, 2001 David Mosberger-Tang <davidm@hpl.hp.com>
*/
+#include <linux/export.h>
#include <asm/asmmacro.h>
-#include <asm/export.h>
GLOBAL_ENTRY(__strnlen_user)
.prologue
diff --git a/arch/ia64/lib/xor.S b/arch/ia64/lib/xor.S
index 5413dafe6b2e..6e2a69662c06 100644
--- a/arch/ia64/lib/xor.S
+++ b/arch/ia64/lib/xor.S
@@ -5,8 +5,8 @@
* Optimized RAID-5 checksumming functions for IA-64.
*/
+#include <linux/export.h>
#include <asm/asmmacro.h>
-#include <asm/export.h>
GLOBAL_ENTRY(xor_ia64_2)
.prologue
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index ecf282dee513..e14396a2ddcb 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -8,11 +8,13 @@ config LOONGARCH
select ACPI_PPTT if ACPI
select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
select ARCH_BINFMT_ELF_STATE
+ select ARCH_DISABLE_KASAN_INLINE
select ARCH_ENABLE_MEMORY_HOTPLUG
select ARCH_ENABLE_MEMORY_HOTREMOVE
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_CPU_FINALIZE_INIT
select ARCH_HAS_FORTIFY_SOURCE
+ select ARCH_HAS_KCOV
select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
select ARCH_HAS_PTE_SPECIAL
@@ -91,6 +93,9 @@ config LOONGARCH
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE
+ select HAVE_ARCH_KASAN
+ select HAVE_ARCH_KFENCE
+ select HAVE_ARCH_KGDB if PERF_EVENTS
select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
@@ -115,6 +120,7 @@ config LOONGARCH
select HAVE_FUNCTION_GRAPH_RETVAL if HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
+ select HAVE_GCC_PLUGINS
select HAVE_GENERIC_VDSO
select HAVE_HW_BREAKPOINT if PERF_EVENTS
select HAVE_IOREMAP_PROT
@@ -254,6 +260,9 @@ config AS_HAS_LSX_EXTENSION
config AS_HAS_LASX_EXTENSION
def_bool $(as-instr,xvld \$xr0$(comma)\$a0$(comma)0)
+config AS_HAS_LBT_EXTENSION
+ def_bool $(as-instr,movscr2gr \$a0$(comma)\$scr0)
+
menu "Kernel type and options"
source "kernel/Kconfig.hz"
@@ -534,6 +543,18 @@ config CPU_HAS_LASX
If unsure, say Y.
+config CPU_HAS_LBT
+ bool "Support for the Loongson Binary Translation Extension"
+ depends on AS_HAS_LBT_EXTENSION
+ help
+ Loongson Binary Translation (LBT) introduces 4 scratch registers (SCR0
+ to SCR3), x86/ARM eflags (eflags) and x87 fpu stack pointer (ftop).
+ Enabling this option allows the kernel to allocate and switch registers
+ specific to LBT.
+
+ If you want to use this feature, such as the Loongson Architecture
+ Translator (LAT), say Y.
+
config CPU_HAS_PREFETCH
bool
default y
@@ -638,6 +659,11 @@ config ARCH_MMAP_RND_BITS_MAX
config ARCH_SUPPORTS_UPROBES
def_bool y
+config KASAN_SHADOW_OFFSET
+ hex
+ default 0x0
+ depends on KASAN
+
menu "Power management options"
config ARCH_SUSPEND_POSSIBLE
diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile
index ef87bab46754..fb0fada43197 100644
--- a/arch/loongarch/Makefile
+++ b/arch/loongarch/Makefile
@@ -84,7 +84,10 @@ LDFLAGS_vmlinux += -static -pie --no-dynamic-linker -z notext
endif
cflags-y += $(call cc-option, -mno-check-zero-division)
+
+ifndef CONFIG_KASAN
cflags-y += -fno-builtin-memcpy -fno-builtin-memmove -fno-builtin-memset
+endif
load-y = 0x9000000000200000
bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y)
diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig
index d64849b4cba1..a3b52aaa83b3 100644
--- a/arch/loongarch/configs/loongson3_defconfig
+++ b/arch/loongarch/configs/loongson3_defconfig
@@ -30,7 +30,6 @@ CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
CONFIG_CHECKPOINT_RESTORE=y
CONFIG_SCHED_AUTOGROUP=y
-CONFIG_SYSFS_DEPRECATED=y
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
@@ -47,8 +46,12 @@ CONFIG_SMP=y
CONFIG_HOTPLUG_CPU=y
CONFIG_NR_CPUS=64
CONFIG_NUMA=y
+CONFIG_CPU_HAS_FPU=y
+CONFIG_CPU_HAS_LSX=y
+CONFIG_CPU_HAS_LASX=y
CONFIG_KEXEC=y
CONFIG_CRASH_DUMP=y
+CONFIG_RANDOMIZE_BASE=y
CONFIG_SUSPEND=y
CONFIG_HIBERNATION=y
CONFIG_ACPI=y
@@ -63,6 +66,7 @@ CONFIG_EFI_ZBOOT=y
CONFIG_EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER=y
CONFIG_EFI_CAPSULE_LOADER=m
CONFIG_EFI_TEST=m
+CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
CONFIG_MODULE_FORCE_LOAD=y
CONFIG_MODULE_UNLOAD=y
@@ -108,7 +112,12 @@ CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
CONFIG_NET_IPIP=m
CONFIG_NET_IPGRE_DEMUX=m
+CONFIG_NET_IPGRE=m
+CONFIG_NET_IPGRE_BROADCAST=y
CONFIG_IP_MROUTE=y
+CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
CONFIG_INET_ESP=m
CONFIG_INET_UDP_DIAG=y
CONFIG_TCP_CONG_ADVANCED=y
@@ -137,7 +146,6 @@ CONFIG_NFT_MASQ=m
CONFIG_NFT_REDIR=m
CONFIG_NFT_NAT=m
CONFIG_NFT_TUNNEL=m
-CONFIG_NFT_OBJREF=m
CONFIG_NFT_QUEUE=m
CONFIG_NFT_QUOTA=m
CONFIG_NFT_REJECT=m
@@ -208,7 +216,11 @@ CONFIG_IP_VS=m
CONFIG_IP_VS_IPV6=y
CONFIG_IP_VS_PROTO_TCP=y
CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+CONFIG_IP_VS_PROTO_SCTP=y
CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
CONFIG_IP_VS_NFCT=y
CONFIG_NF_TABLES_IPV4=y
CONFIG_NFT_DUP_IPV4=m
@@ -227,7 +239,6 @@ CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_TARGET_NETMAP=m
CONFIG_IP_NF_TARGET_REDIRECT=m
CONFIG_IP_NF_MANGLE=m
-CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
CONFIG_IP_NF_RAW=m
@@ -363,6 +374,8 @@ CONFIG_MTD_CFI_AMDSTD=m
CONFIG_MTD_CFI_STAA=m
CONFIG_MTD_RAM=m
CONFIG_MTD_ROM=m
+CONFIG_MTD_UBI=m
+CONFIG_MTD_UBI_BLOCK=y
CONFIG_PARPORT=y
CONFIG_PARPORT_PC=y
CONFIG_PARPORT_SERIAL=y
@@ -370,6 +383,7 @@ CONFIG_PARPORT_PC_FIFO=y
CONFIG_ZRAM=m
CONFIG_ZRAM_DEF_COMP_ZSTD=y
CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
@@ -516,6 +530,8 @@ CONFIG_STMMAC_ETH=y
# CONFIG_NET_VENDOR_TEHUTI is not set
# CONFIG_NET_VENDOR_TI is not set
# CONFIG_NET_VENDOR_VIA is not set
+CONFIG_NGBE=y
+CONFIG_TXGBE=y
# CONFIG_NET_VENDOR_WIZNET is not set
# CONFIG_NET_VENDOR_XILINX is not set
CONFIG_PPP=m
@@ -602,9 +618,15 @@ CONFIG_HW_RANDOM_VIRTIO=m
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_PIIX4=y
CONFIG_I2C_GPIO=y
+CONFIG_I2C_LS2X=y
CONFIG_SPI=y
+CONFIG_SPI_LOONGSON_PCI=m
+CONFIG_SPI_LOONGSON_PLATFORM=m
+CONFIG_PINCTRL=y
+CONFIG_PINCTRL_LOONGSON2=y
CONFIG_GPIO_SYSFS=y
CONFIG_GPIO_LOONGSON=y
+CONFIG_GPIO_LOONGSON_64BIT=y
CONFIG_POWER_RESET=y
CONFIG_POWER_RESET_RESTART=y
CONFIG_POWER_RESET_SYSCON=y
@@ -614,6 +636,7 @@ CONFIG_SENSORS_LM75=m
CONFIG_SENSORS_LM93=m
CONFIG_SENSORS_W83795=m
CONFIG_SENSORS_W83627HF=m
+CONFIG_LOONGSON2_THERMAL=m
CONFIG_RC_CORE=m
CONFIG_LIRC=y
CONFIG_RC_DECODERS=y
@@ -643,6 +666,7 @@ CONFIG_DRM_AMDGPU_USERPTR=y
CONFIG_DRM_AST=y
CONFIG_DRM_QXL=m
CONFIG_DRM_VIRTIO_GPU=m
+CONFIG_DRM_LOONGSON=y
CONFIG_FB=y
CONFIG_FB_EFI=y
CONFIG_FB_RADEON=y
@@ -712,6 +736,7 @@ CONFIG_UCSI_ACPI=m
CONFIG_INFINIBAND=m
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_EFI=y
+CONFIG_RTC_DRV_LOONGSON=y
CONFIG_DMADEVICES=y
CONFIG_UIO=m
CONFIG_UIO_PDRV_GENIRQ=m
@@ -745,7 +770,9 @@ CONFIG_COMEDI_NI_LABPC_PCI=m
CONFIG_COMEDI_NI_PCIDIO=m
CONFIG_COMEDI_NI_PCIMIO=m
CONFIG_STAGING=y
-CONFIG_R8188EU=m
+CONFIG_COMMON_CLK_LOONGSON2=y
+CONFIG_LOONGSON2_GUTS=y
+CONFIG_LOONGSON2_PM=y
CONFIG_PM_DEVFREQ=y
CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
CONFIG_DEVFREQ_GOV_PERFORMANCE=y
@@ -759,10 +786,17 @@ CONFIG_EXT2_FS_SECURITY=y
CONFIG_EXT3_FS=y
CONFIG_EXT3_FS_POSIX_ACL=y
CONFIG_EXT3_FS_SECURITY=y
+CONFIG_JFS_FS=m
+CONFIG_JFS_POSIX_ACL=y
+CONFIG_JFS_SECURITY=y
CONFIG_XFS_FS=y
CONFIG_XFS_QUOTA=y
CONFIG_XFS_POSIX_ACL=y
+CONFIG_GFS2_FS=m
+CONFIG_GFS2_FS_LOCKING_DLM=y
+CONFIG_OCFS2_FS=m
CONFIG_BTRFS_FS=y
+CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_FANOTIFY=y
CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
CONFIG_QUOTA=y
@@ -771,11 +805,14 @@ CONFIG_QFMT_V1=m
CONFIG_QFMT_V2=m
CONFIG_AUTOFS_FS=y
CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
+CONFIG_VIRTIO_FS=m
CONFIG_OVERLAY_FS=y
CONFIG_OVERLAY_FS_INDEX=y
CONFIG_OVERLAY_FS_XINO_AUTO=y
CONFIG_OVERLAY_FS_METACOPY=y
CONFIG_FSCACHE=y
+CONFIG_CACHEFILES=m
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
@@ -784,19 +821,42 @@ CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
CONFIG_FAT_DEFAULT_CODEPAGE=936
CONFIG_FAT_DEFAULT_IOCHARSET="gb2312"
+CONFIG_EXFAT_FS=m
+CONFIG_NTFS3_FS=m
+CONFIG_NTFS3_64BIT_CLUSTER=y
+CONFIG_NTFS3_LZX_XPRESS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_HUGETLBFS=y
CONFIG_CONFIGFS_FS=y
+CONFIG_ORANGEFS_FS=m
+CONFIG_ECRYPT_FS=m
+CONFIG_ECRYPT_FS_MESSAGING=y
CONFIG_HFS_FS=m
CONFIG_HFSPLUS_FS=m
+CONFIG_UBIFS_FS=m
+CONFIG_UBIFS_FS_ADVANCED_COMPR=y
CONFIG_CRAMFS=m
CONFIG_SQUASHFS=y
CONFIG_SQUASHFS_XATTR=y
CONFIG_SQUASHFS_LZ4=y
CONFIG_SQUASHFS_LZO=y
CONFIG_SQUASHFS_XZ=y
+CONFIG_MINIX_FS=m
+CONFIG_ROMFS_FS=m
+CONFIG_PSTORE=m
+CONFIG_PSTORE_LZO_COMPRESS=m
+CONFIG_PSTORE_LZ4_COMPRESS=m
+CONFIG_PSTORE_LZ4HC_COMPRESS=m
+CONFIG_PSTORE_842_COMPRESS=y
+CONFIG_PSTORE_ZSTD_COMPRESS=y
+CONFIG_PSTORE_ZSTD_COMPRESS_DEFAULT=y
+CONFIG_SYSV_FS=m
+CONFIG_UFS_FS=m
+CONFIG_EROFS_FS=m
+CONFIG_EROFS_FS_ZIP_LZMA=y
+CONFIG_EROFS_FS_PCPU_KTHREAD=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
@@ -807,6 +867,10 @@ CONFIG_NFSD=y
CONFIG_NFSD_V3_ACL=y
CONFIG_NFSD_V4=y
CONFIG_NFSD_BLOCKLAYOUT=y
+CONFIG_CEPH_FS=m
+CONFIG_CEPH_FSCACHE=y
+CONFIG_CEPH_FS_POSIX_ACL=y
+CONFIG_CEPH_FS_SECURITY_LABEL=y
CONFIG_CIFS=m
# CONFIG_CIFS_DEBUG is not set
CONFIG_9P_FS=y
@@ -814,6 +878,7 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_936=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_UTF8=y
+CONFIG_DLM=m
CONFIG_KEY_DH_OPERATIONS=y
CONFIG_SECURITY=y
CONFIG_SECURITY_SELINUX=y
@@ -847,6 +912,7 @@ CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_CRC32_LOONGARCH=m
CONFIG_CRYPTO_DEV_VIRTIO=m
CONFIG_PRINTK_TIME=y
CONFIG_STRIP_ASM_SYMS=y
diff --git a/arch/loongarch/include/asm/asm-prototypes.h b/arch/loongarch/include/asm/asm-prototypes.h
index ed06d3997420..cf8e1a4e7c19 100644
--- a/arch/loongarch/include/asm/asm-prototypes.h
+++ b/arch/loongarch/include/asm/asm-prototypes.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/uaccess.h>
#include <asm/fpu.h>
+#include <asm/lbt.h>
#include <asm/mmu_context.h>
#include <asm/page.h>
#include <asm/ftrace.h>
diff --git a/arch/loongarch/include/asm/asmmacro.h b/arch/loongarch/include/asm/asmmacro.h
index 79e1d53fea89..c9544f358c33 100644
--- a/arch/loongarch/include/asm/asmmacro.h
+++ b/arch/loongarch/include/asm/asmmacro.h
@@ -10,113 +10,6 @@
#include <asm/fpregdef.h>
#include <asm/loongarch.h>
- .macro parse_v var val
- \var = \val
- .endm
-
- .macro parse_r var r
- \var = -1
- .ifc \r, $r0
- \var = 0
- .endif
- .ifc \r, $r1
- \var = 1
- .endif
- .ifc \r, $r2
- \var = 2
- .endif
- .ifc \r, $r3
- \var = 3
- .endif
- .ifc \r, $r4
- \var = 4
- .endif
- .ifc \r, $r5
- \var = 5
- .endif
- .ifc \r, $r6
- \var = 6
- .endif
- .ifc \r, $r7
- \var = 7
- .endif
- .ifc \r, $r8
- \var = 8
- .endif
- .ifc \r, $r9
- \var = 9
- .endif
- .ifc \r, $r10
- \var = 10
- .endif
- .ifc \r, $r11
- \var = 11
- .endif
- .ifc \r, $r12
- \var = 12
- .endif
- .ifc \r, $r13
- \var = 13
- .endif
- .ifc \r, $r14
- \var = 14
- .endif
- .ifc \r, $r15
- \var = 15
- .endif
- .ifc \r, $r16
- \var = 16
- .endif
- .ifc \r, $r17
- \var = 17
- .endif
- .ifc \r, $r18
- \var = 18
- .endif
- .ifc \r, $r19
- \var = 19
- .endif
- .ifc \r, $r20
- \var = 20
- .endif
- .ifc \r, $r21
- \var = 21
- .endif
- .ifc \r, $r22
- \var = 22
- .endif
- .ifc \r, $r23
- \var = 23
- .endif
- .ifc \r, $r24
- \var = 24
- .endif
- .ifc \r, $r25
- \var = 25
- .endif
- .ifc \r, $r26
- \var = 26
- .endif
- .ifc \r, $r27
- \var = 27
- .endif
- .ifc \r, $r28
- \var = 28
- .endif
- .ifc \r, $r29
- \var = 29
- .endif
- .ifc \r, $r30
- \var = 30
- .endif
- .ifc \r, $r31
- \var = 31
- .endif
- .iflt \var
- .error "Unable to parse register name \r"
- .endif
- .endm
-
.macro cpu_save_nonscratch thread
stptr.d s0, \thread, THREAD_REG23
stptr.d s1, \thread, THREAD_REG24
@@ -148,12 +41,51 @@
.macro fpu_save_csr thread tmp
movfcsr2gr \tmp, fcsr0
- stptr.w \tmp, \thread, THREAD_FCSR
+ stptr.w \tmp, \thread, THREAD_FCSR
+#ifdef CONFIG_CPU_HAS_LBT
+ /* TM bit is always 0 if LBT not supported */
+ andi \tmp, \tmp, FPU_CSR_TM
+ beqz \tmp, 1f
+ /* Save FTOP */
+ x86mftop \tmp
+ stptr.w \tmp, \thread, THREAD_FTOP
+ /* Turn off TM to ensure the order of FPR in memory independent of TM */
+ x86clrtm
+1:
+#endif
.endm
- .macro fpu_restore_csr thread tmp
- ldptr.w \tmp, \thread, THREAD_FCSR
- movgr2fcsr fcsr0, \tmp
+ .macro fpu_restore_csr thread tmp0 tmp1
+ ldptr.w \tmp0, \thread, THREAD_FCSR
+ movgr2fcsr fcsr0, \tmp0
+#ifdef CONFIG_CPU_HAS_LBT
+ /* TM bit is always 0 if LBT not supported */
+ andi \tmp0, \tmp0, FPU_CSR_TM
+ beqz \tmp0, 2f
+ /* Restore FTOP */
+ ldptr.w \tmp0, \thread, THREAD_FTOP
+ andi \tmp0, \tmp0, 0x7
+ la.pcrel \tmp1, 1f
+ alsl.d \tmp1, \tmp0, \tmp1, 3
+ jr \tmp1
+1:
+ x86mttop 0
+ b 2f
+ x86mttop 1
+ b 2f
+ x86mttop 2
+ b 2f
+ x86mttop 3
+ b 2f
+ x86mttop 4
+ b 2f
+ x86mttop 5
+ b 2f
+ x86mttop 6
+ b 2f
+ x86mttop 7
+2:
+#endif
.endm
.macro fpu_save_cc thread tmp0 tmp1
@@ -353,7 +285,7 @@
.macro lsx_restore_all thread tmp0 tmp1
lsx_restore_data \thread, \tmp0
fpu_restore_cc \thread, \tmp0, \tmp1
- fpu_restore_csr \thread, \tmp0
+ fpu_restore_csr \thread, \tmp0, \tmp1
.endm
.macro lsx_save_upper vd base tmp off
@@ -563,7 +495,7 @@
.macro lasx_restore_all thread tmp0 tmp1
lasx_restore_data \thread, \tmp0
fpu_restore_cc \thread, \tmp0, \tmp1
- fpu_restore_csr \thread, \tmp0
+ fpu_restore_csr \thread, \tmp0, \tmp1
.endm
.macro lasx_save_upper xd base tmp off
diff --git a/arch/loongarch/include/asm/kasan.h b/arch/loongarch/include/asm/kasan.h
new file mode 100644
index 000000000000..deeff8158f45
--- /dev/null
+++ b/arch/loongarch/include/asm/kasan.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_KASAN_H
+#define __ASM_KASAN_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/linkage.h>
+#include <linux/mmzone.h>
+#include <asm/addrspace.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+
+#define __HAVE_ARCH_SHADOW_MAP
+
+#define KASAN_SHADOW_SCALE_SHIFT 3
+#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
+
+#define XRANGE_SHIFT (48)
+
+/* Valid address length */
+#define XRANGE_SHADOW_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3)
+/* Used for taking out the valid address */
+#define XRANGE_SHADOW_MASK GENMASK_ULL(XRANGE_SHADOW_SHIFT - 1, 0)
+/* One segment whole address space size */
+#define XRANGE_SIZE (XRANGE_SHADOW_MASK + 1)
+
+/* 64-bit segment value. */
+#define XKPRANGE_UC_SEG (0x8000)
+#define XKPRANGE_CC_SEG (0x9000)
+#define XKVRANGE_VC_SEG (0xffff)
+
+/* Cached */
+#define XKPRANGE_CC_START CACHE_BASE
+#define XKPRANGE_CC_SIZE XRANGE_SIZE
+#define XKPRANGE_CC_KASAN_OFFSET (0)
+#define XKPRANGE_CC_SHADOW_SIZE (XKPRANGE_CC_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
+#define XKPRANGE_CC_SHADOW_END (XKPRANGE_CC_KASAN_OFFSET + XKPRANGE_CC_SHADOW_SIZE)
+
+/* UnCached */
+#define XKPRANGE_UC_START UNCACHE_BASE
+#define XKPRANGE_UC_SIZE XRANGE_SIZE
+#define XKPRANGE_UC_KASAN_OFFSET XKPRANGE_CC_SHADOW_END
+#define XKPRANGE_UC_SHADOW_SIZE (XKPRANGE_UC_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
+#define XKPRANGE_UC_SHADOW_END (XKPRANGE_UC_KASAN_OFFSET + XKPRANGE_UC_SHADOW_SIZE)
+
+/* VMALLOC (Cached or UnCached) */
+#define XKVRANGE_VC_START MODULES_VADDR
+#define XKVRANGE_VC_SIZE round_up(KFENCE_AREA_END - MODULES_VADDR + 1, PGDIR_SIZE)
+#define XKVRANGE_VC_KASAN_OFFSET XKPRANGE_UC_SHADOW_END
+#define XKVRANGE_VC_SHADOW_SIZE (XKVRANGE_VC_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
+#define XKVRANGE_VC_SHADOW_END (XKVRANGE_VC_KASAN_OFFSET + XKVRANGE_VC_SHADOW_SIZE)
+
+/* KAsan shadow memory start right after vmalloc. */
+#define KASAN_SHADOW_START round_up(KFENCE_AREA_END, PGDIR_SIZE)
+#define KASAN_SHADOW_SIZE (XKVRANGE_VC_SHADOW_END - XKPRANGE_CC_KASAN_OFFSET)
+#define KASAN_SHADOW_END round_up(KASAN_SHADOW_START + KASAN_SHADOW_SIZE, PGDIR_SIZE)
+
+#define XKPRANGE_CC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_CC_KASAN_OFFSET)
+#define XKPRANGE_UC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_UC_KASAN_OFFSET)
+#define XKVRANGE_VC_SHADOW_OFFSET (KASAN_SHADOW_START + XKVRANGE_VC_KASAN_OFFSET)
+
+extern bool kasan_early_stage;
+extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
+
+#define kasan_arch_is_ready kasan_arch_is_ready
+static __always_inline bool kasan_arch_is_ready(void)
+{
+ return !kasan_early_stage;
+}
+
+static inline void *kasan_mem_to_shadow(const void *addr)
+{
+ if (!kasan_arch_is_ready()) {
+ return (void *)(kasan_early_shadow_page);
+ } else {
+ unsigned long maddr = (unsigned long)addr;
+ unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff;
+ unsigned long offset = 0;
+
+ maddr &= XRANGE_SHADOW_MASK;
+ switch (xrange) {
+ case XKPRANGE_CC_SEG:
+ offset = XKPRANGE_CC_SHADOW_OFFSET;
+ break;
+ case XKPRANGE_UC_SEG:
+ offset = XKPRANGE_UC_SHADOW_OFFSET;
+ break;
+ case XKVRANGE_VC_SEG:
+ offset = XKVRANGE_VC_SHADOW_OFFSET;
+ break;
+ default:
+ WARN_ON(1);
+ return NULL;
+ }
+
+ return (void *)((maddr >> KASAN_SHADOW_SCALE_SHIFT) + offset);
+ }
+}
+
+static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
+{
+ unsigned long addr = (unsigned long)shadow_addr;
+
+ if (unlikely(addr > KASAN_SHADOW_END) ||
+ unlikely(addr < KASAN_SHADOW_START)) {
+ WARN_ON(1);
+ return NULL;
+ }
+
+ if (addr >= XKVRANGE_VC_SHADOW_OFFSET)
+ return (void *)(((addr - XKVRANGE_VC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKVRANGE_VC_START);
+ else if (addr >= XKPRANGE_UC_SHADOW_OFFSET)
+ return (void *)(((addr - XKPRANGE_UC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_UC_START);
+ else if (addr >= XKPRANGE_CC_SHADOW_OFFSET)
+ return (void *)(((addr - XKPRANGE_CC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_CC_START);
+ else {
+ WARN_ON(1);
+ return NULL;
+ }
+}
+
+void kasan_init(void);
+asmlinkage void kasan_early_init(void);
+
+#endif
+#endif
diff --git a/arch/loongarch/include/asm/kfence.h b/arch/loongarch/include/asm/kfence.h
new file mode 100644
index 000000000000..6c82aea1c993
--- /dev/null
+++ b/arch/loongarch/include/asm/kfence.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KFENCE support for LoongArch.
+ *
+ * Author: Enze Li <lienze@kylinos.cn>
+ * Copyright (C) 2022-2023 KylinSoft Corporation.
+ */
+
+#ifndef _ASM_LOONGARCH_KFENCE_H
+#define _ASM_LOONGARCH_KFENCE_H
+
+#include <linux/kfence.h>
+#include <asm/pgtable.h>
+#include <asm/tlb.h>
+
+static inline bool arch_kfence_init_pool(void)
+{
+ int err;
+ char *kfence_pool = __kfence_pool;
+ struct vm_struct *area;
+
+ area = __get_vm_area_caller(KFENCE_POOL_SIZE, VM_IOREMAP,
+ KFENCE_AREA_START, KFENCE_AREA_END,
+ __builtin_return_address(0));
+ if (!area)
+ return false;
+
+ __kfence_pool = (char *)area->addr;
+ err = ioremap_page_range((unsigned long)__kfence_pool,
+ (unsigned long)__kfence_pool + KFENCE_POOL_SIZE,
+ virt_to_phys((void *)kfence_pool), PAGE_KERNEL);
+ if (err) {
+ free_vm_area(area);
+ __kfence_pool = kfence_pool;
+ return false;
+ }
+
+ return true;
+}
+
+/* Protect the given page and flush TLB. */
+static inline bool kfence_protect_page(unsigned long addr, bool protect)
+{
+ pte_t *pte = virt_to_kpte(addr);
+
+ if (WARN_ON(!pte) || pte_none(*pte))
+ return false;
+
+ if (protect)
+ set_pte(pte, __pte(pte_val(*pte) & ~(_PAGE_VALID | _PAGE_PRESENT)));
+ else
+ set_pte(pte, __pte(pte_val(*pte) | (_PAGE_VALID | _PAGE_PRESENT)));
+
+ preempt_disable();
+ local_flush_tlb_one(addr);
+ preempt_enable();
+
+ return true;
+}
+
+#endif /* _ASM_LOONGARCH_KFENCE_H */
diff --git a/arch/loongarch/include/asm/kgdb.h b/arch/loongarch/include/asm/kgdb.h
new file mode 100644
index 000000000000..2041ae58b161
--- /dev/null
+++ b/arch/loongarch/include/asm/kgdb.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef _ASM_LOONGARCH_KGDB_H
+#define _ASM_LOONGARCH_KGDB_H
+
+#define GDB_SIZEOF_REG sizeof(u64)
+
+/* gdb remote procotol expects the following register layout. */
+
+/*
+ * General purpose registers:
+ * r0-r31: 64 bit
+ * orig_a0: 64 bit
+ * pc : 64 bit
+ * csr_badvaddr: 64 bit
+ */
+#define DBG_PT_REGS_BASE 0
+#define DBG_PT_REGS_NUM 35
+#define DBG_PT_REGS_END (DBG_PT_REGS_BASE + DBG_PT_REGS_NUM - 1)
+
+/*
+ * Floating point registers:
+ * f0-f31: 64 bit
+ */
+#define DBG_FPR_BASE (DBG_PT_REGS_END + 1)
+#define DBG_FPR_NUM 32
+#define DBG_FPR_END (DBG_FPR_BASE + DBG_FPR_NUM - 1)
+
+/*
+ * Condition Flag registers:
+ * fcc0-fcc8: 8 bit
+ */
+#define DBG_FCC_BASE (DBG_FPR_END + 1)
+#define DBG_FCC_NUM 8
+#define DBG_FCC_END (DBG_FCC_BASE + DBG_FCC_NUM - 1)
+
+/*
+ * Floating-point Control and Status registers:
+ * fcsr: 32 bit
+ */
+#define DBG_FCSR_NUM 1
+#define DBG_FCSR (DBG_FCC_END + 1)
+
+#define DBG_MAX_REG_NUM (DBG_FCSR + 1)
+
+/*
+ * Size of I/O buffer for gdb packet.
+ * considering to hold all register contents, size is set
+ */
+#define BUFMAX 2048
+
+/*
+ * Number of bytes required for gdb_regs buffer.
+ * PT_REGS and FPR: 8 bytes; FCSR: 4 bytes; FCC: 1 bytes.
+ * GDB fails to connect for size beyond this with error
+ * "'g' packet reply is too long"
+ */
+#define NUMREGBYTES ((DBG_PT_REGS_NUM + DBG_FPR_NUM) * GDB_SIZEOF_REG + DBG_FCC_NUM * 1 + DBG_FCSR_NUM * 4)
+
+#define BREAK_INSTR_SIZE 4
+#define CACHE_FLUSH_IS_SAFE 0
+
+/* Register numbers of various important registers. */
+enum dbg_loongarch_regnum {
+ DBG_LOONGARCH_ZERO = 0,
+ DBG_LOONGARCH_RA,
+ DBG_LOONGARCH_TP,
+ DBG_LOONGARCH_SP,
+ DBG_LOONGARCH_A0,
+ DBG_LOONGARCH_FP = 22,
+ DBG_LOONGARCH_S0,
+ DBG_LOONGARCH_S1,
+ DBG_LOONGARCH_S2,
+ DBG_LOONGARCH_S3,
+ DBG_LOONGARCH_S4,
+ DBG_LOONGARCH_S5,
+ DBG_LOONGARCH_S6,
+ DBG_LOONGARCH_S7,
+ DBG_LOONGARCH_S8,
+ DBG_LOONGARCH_ORIG_A0,
+ DBG_LOONGARCH_PC,
+ DBG_LOONGARCH_BADV
+};
+
+void kgdb_breakinst(void);
+void arch_kgdb_breakpoint(void);
+
+#ifdef CONFIG_KGDB
+bool kgdb_breakpoint_handler(struct pt_regs *regs);
+#else /* !CONFIG_KGDB */
+static inline bool kgdb_breakpoint_handler(struct pt_regs *regs) { return false; }
+#endif /* CONFIG_KGDB */
+
+#endif /* __ASM_KGDB_H_ */
diff --git a/arch/loongarch/include/asm/lbt.h b/arch/loongarch/include/asm/lbt.h
new file mode 100644
index 000000000000..e671978bf552
--- /dev/null
+++ b/arch/loongarch/include/asm/lbt.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Qi Hu <huqi@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
+ */
+#ifndef _ASM_LBT_H
+#define _ASM_LBT_H
+
+#include <asm/cpu.h>
+#include <asm/current.h>
+#include <asm/loongarch.h>
+#include <asm/processor.h>
+
+extern void _init_lbt(void);
+extern void _save_lbt(struct loongarch_lbt *);
+extern void _restore_lbt(struct loongarch_lbt *);
+
+static inline int is_lbt_enabled(void)
+{
+ if (!cpu_has_lbt)
+ return 0;
+
+ return (csr_read32(LOONGARCH_CSR_EUEN) & CSR_EUEN_LBTEN) ?
+ 1 : 0;
+}
+
+static inline int is_lbt_owner(void)
+{
+ return test_thread_flag(TIF_USEDLBT);
+}
+
+#ifdef CONFIG_CPU_HAS_LBT
+
+static inline void enable_lbt(void)
+{
+ if (cpu_has_lbt)
+ csr_xchg32(CSR_EUEN_LBTEN, CSR_EUEN_LBTEN, LOONGARCH_CSR_EUEN);
+}
+
+static inline void disable_lbt(void)
+{
+ if (cpu_has_lbt)
+ csr_xchg32(0, CSR_EUEN_LBTEN, LOONGARCH_CSR_EUEN);
+}
+
+static inline void __own_lbt(void)
+{
+ enable_lbt();
+ set_thread_flag(TIF_USEDLBT);
+ KSTK_EUEN(current) |= CSR_EUEN_LBTEN;
+}
+
+static inline void own_lbt_inatomic(int restore)
+{
+ if (cpu_has_lbt && !is_lbt_owner()) {
+ __own_lbt();
+ if (restore)
+ _restore_lbt(&current->thread.lbt);
+ }
+}
+
+static inline void own_lbt(int restore)
+{
+ preempt_disable();
+ own_lbt_inatomic(restore);
+ preempt_enable();
+}
+
+static inline void lose_lbt_inatomic(int save, struct task_struct *tsk)
+{
+ if (cpu_has_lbt && is_lbt_owner()) {
+ if (save)
+ _save_lbt(&tsk->thread.lbt);
+
+ disable_lbt();
+ clear_tsk_thread_flag(tsk, TIF_USEDLBT);
+ }
+ KSTK_EUEN(tsk) &= ~(CSR_EUEN_LBTEN);
+}
+
+static inline void lose_lbt(int save)
+{
+ preempt_disable();
+ lose_lbt_inatomic(save, current);
+ preempt_enable();
+}
+
+static inline void init_lbt(void)
+{
+ __own_lbt();
+ _init_lbt();
+}
+#else
+static inline void own_lbt_inatomic(int restore) {}
+static inline void lose_lbt_inatomic(int save, struct task_struct *tsk) {}
+static inline void init_lbt(void) {}
+static inline void lose_lbt(int save) {}
+#endif
+
+static inline int thread_lbt_context_live(void)
+{
+ if (!cpu_has_lbt)
+ return 0;
+
+ return test_thread_flag(TIF_LBT_CTX_LIVE);
+}
+
+#endif /* _ASM_LBT_H */
diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h
index 10748a20a2ab..33531d432b49 100644
--- a/arch/loongarch/include/asm/loongarch.h
+++ b/arch/loongarch/include/asm/loongarch.h
@@ -12,49 +12,6 @@
#ifndef __ASSEMBLY__
#include <larchintrin.h>
-/*
- * parse_r var, r - Helper assembler macro for parsing register names.
- *
- * This converts the register name in $n form provided in \r to the
- * corresponding register number, which is assigned to the variable \var. It is
- * needed to allow explicit encoding of instructions in inline assembly where
- * registers are chosen by the compiler in $n form, allowing us to avoid using
- * fixed register numbers.
- *
- * It also allows newer instructions (not implemented by the assembler) to be
- * transparently implemented using assembler macros, instead of needing separate
- * cases depending on toolchain support.
- *
- * Simple usage example:
- * __asm__ __volatile__("parse_r addr, %0\n\t"
- * "#invtlb op, 0, %0\n\t"
- * ".word ((0x6498000) | (addr << 10) | (0 << 5) | op)"
- * : "=r" (status);
- */
-
-/* Match an individual register number and assign to \var */
-#define _IFC_REG(n) \
- ".ifc \\r, $r" #n "\n\t" \
- "\\var = " #n "\n\t" \
- ".endif\n\t"
-
-__asm__(".macro parse_r var r\n\t"
- "\\var = -1\n\t"
- _IFC_REG(0) _IFC_REG(1) _IFC_REG(2) _IFC_REG(3)
- _IFC_REG(4) _IFC_REG(5) _IFC_REG(6) _IFC_REG(7)
- _IFC_REG(8) _IFC_REG(9) _IFC_REG(10) _IFC_REG(11)
- _IFC_REG(12) _IFC_REG(13) _IFC_REG(14) _IFC_REG(15)
- _IFC_REG(16) _IFC_REG(17) _IFC_REG(18) _IFC_REG(19)
- _IFC_REG(20) _IFC_REG(21) _IFC_REG(22) _IFC_REG(23)
- _IFC_REG(24) _IFC_REG(25) _IFC_REG(26) _IFC_REG(27)
- _IFC_REG(28) _IFC_REG(29) _IFC_REG(30) _IFC_REG(31)
- ".iflt \\var\n\t"
- ".error \"Unable to parse register name \\r\"\n\t"
- ".endif\n\t"
- ".endm");
-
-#undef _IFC_REG
-
/* CPUCFG */
#define read_cpucfg(reg) __cpucfg(reg)
@@ -1453,6 +1410,10 @@ __BUILD_CSR_OP(tlbidx)
#define FPU_CSR_RU 0x200 /* towards +Infinity */
#define FPU_CSR_RD 0x300 /* towards -Infinity */
+/* Bit 6 of FPU Status Register specify the LBT TOP simulation mode */
+#define FPU_CSR_TM_SHIFT 0x6
+#define FPU_CSR_TM (_ULCAST_(1) << FPU_CSR_TM_SHIFT)
+
#define read_fcsr(source) \
({ \
unsigned int __res; \
diff --git a/arch/loongarch/include/asm/mmzone.h b/arch/loongarch/include/asm/mmzone.h
index fe67d0b4b33d..2b9a90727e19 100644
--- a/arch/loongarch/include/asm/mmzone.h
+++ b/arch/loongarch/include/asm/mmzone.h
@@ -13,6 +13,4 @@ extern struct pglist_data *node_data[];
#define NODE_DATA(nid) (node_data[(nid)])
-extern void setup_zero_pages(void);
-
#endif /* _ASM_MMZONE_H_ */
diff --git a/arch/loongarch/include/asm/page.h b/arch/loongarch/include/asm/page.h
index 26e8dccb6619..63f137ce82a4 100644
--- a/arch/loongarch/include/asm/page.h
+++ b/arch/loongarch/include/asm/page.h
@@ -84,7 +84,12 @@ typedef struct { unsigned long pgprot; } pgprot_t;
#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x))
#define virt_to_pfn(kaddr) PFN_DOWN(PHYSADDR(kaddr))
-#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
+
+#define virt_to_page(kaddr) \
+({ \
+ (likely((unsigned long)kaddr < vm_map_base)) ? \
+ dmw_virt_to_page((unsigned long)kaddr) : tlb_virt_to_page((unsigned long)kaddr);\
+})
extern int __virt_addr_valid(volatile void *kaddr);
#define virt_addr_valid(kaddr) __virt_addr_valid((volatile void *)(kaddr))
diff --git a/arch/loongarch/include/asm/pgalloc.h b/arch/loongarch/include/asm/pgalloc.h
index 23f5b1107246..79470f0b4f1d 100644
--- a/arch/loongarch/include/asm/pgalloc.h
+++ b/arch/loongarch/include/asm/pgalloc.h
@@ -94,4 +94,5 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
#endif /* __PAGETABLE_PUD_FOLDED */
+extern pte_t * __init populate_kernel_pte(unsigned long addr);
#endif /* _ASM_PGALLOC_H */
diff --git a/arch/loongarch/include/asm/pgtable.h b/arch/loongarch/include/asm/pgtable.h
index 06963a172319..29d9b12298bc 100644
--- a/arch/loongarch/include/asm/pgtable.h
+++ b/arch/loongarch/include/asm/pgtable.h
@@ -70,12 +70,9 @@ struct vm_area_struct;
* for zero-mapped memory areas etc..
*/
-extern unsigned long empty_zero_page;
-extern unsigned long zero_page_mask;
+extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
-#define ZERO_PAGE(vaddr) \
- (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
-#define __HAVE_COLOR_ZERO_PAGE
+#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
/*
* TLB refill handlers may also map the vmalloc area into xkvrange.
@@ -85,14 +82,30 @@ extern unsigned long zero_page_mask;
#define MODULES_VADDR (vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE))
#define MODULES_END (MODULES_VADDR + SZ_256M)
+#ifdef CONFIG_KFENCE
+#define KFENCE_AREA_SIZE (((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 + 2) * PAGE_SIZE)
+#else
+#define KFENCE_AREA_SIZE 0
+#endif
+
#define VMALLOC_START MODULES_END
+
+#ifndef CONFIG_KASAN
#define VMALLOC_END \
(vm_map_base + \
- min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE - VMEMMAP_SIZE)
+ min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE)
+#else
+#define VMALLOC_END \
+ (vm_map_base + \
+ min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits) / 2) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE)
+#endif
#define vmemmap ((struct page *)((VMALLOC_END + PMD_SIZE) & PMD_MASK))
#define VMEMMAP_END ((unsigned long)vmemmap + VMEMMAP_SIZE - 1)
+#define KFENCE_AREA_START (VMEMMAP_END + 1)
+#define KFENCE_AREA_END (KFENCE_AREA_START + KFENCE_AREA_SIZE - 1)
+
#define pte_ERROR(e) \
pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
#ifndef __PAGETABLE_PMD_FOLDED
@@ -350,6 +363,9 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
extern pgd_t swapper_pg_dir[];
extern pgd_t invalid_pg_dir[];
+struct page *dmw_virt_to_page(unsigned long kaddr);
+struct page *tlb_virt_to_page(unsigned long kaddr);
+
/*
* The following only work if pte_present() is true.
* Undefined behaviour if not..
@@ -596,6 +612,9 @@ static inline long pmd_protnone(pmd_t pmd)
}
#endif /* CONFIG_NUMA_BALANCING */
+#define pmd_leaf(pmd) ((pmd_val(pmd) & _PAGE_HUGE) != 0)
+#define pud_leaf(pud) ((pud_val(pud) & _PAGE_HUGE) != 0)
+
/*
* We provide our own get_unmapped area to cope with the virtual aliasing
* constraints placed on us by the cache architecture.
diff --git a/arch/loongarch/include/asm/processor.h b/arch/loongarch/include/asm/processor.h
index 636e1c66398c..c3bc44b5f5b3 100644
--- a/arch/loongarch/include/asm/processor.h
+++ b/arch/loongarch/include/asm/processor.h
@@ -80,11 +80,22 @@ BUILD_FPR_ACCESS(32)
BUILD_FPR_ACCESS(64)
struct loongarch_fpu {
- unsigned int fcsr;
uint64_t fcc; /* 8x8 */
+ uint32_t fcsr;
+ uint32_t ftop;
union fpureg fpr[NUM_FPU_REGS];
};
+struct loongarch_lbt {
+ /* Scratch registers */
+ unsigned long scr0;
+ unsigned long scr1;
+ unsigned long scr2;
+ unsigned long scr3;
+ /* Eflags register */
+ unsigned long eflags;
+};
+
#define INIT_CPUMASK { \
{0,} \
}
@@ -113,15 +124,6 @@ struct thread_struct {
unsigned long csr_ecfg;
unsigned long csr_badvaddr; /* Last user fault */
- /* Scratch registers */
- unsigned long scr0;
- unsigned long scr1;
- unsigned long scr2;
- unsigned long scr3;
-
- /* Eflags register */
- unsigned long eflags;
-
/* Other stuff associated with the thread. */
unsigned long trap_nr;
unsigned long error_code;
@@ -133,6 +135,7 @@ struct thread_struct {
* context because they are conditionally copied at fork().
*/
struct loongarch_fpu fpu FPU_ALIGN;
+ struct loongarch_lbt lbt; /* Also conditionally copied */
/* Hardware breakpoints pinned to this task. */
struct perf_event *hbp_break[LOONGARCH_MAX_BRP];
@@ -174,8 +177,9 @@ struct thread_struct {
* FPU & vector registers \
*/ \
.fpu = { \
- .fcsr = 0, \
.fcc = 0, \
+ .fcsr = 0, \
+ .ftop = 0, \
.fpr = {{{0,},},}, \
}, \
.hbp_break = {0}, \
diff --git a/arch/loongarch/include/asm/setup.h b/arch/loongarch/include/asm/setup.h
index be05c0e706a2..a0bc159ce8bd 100644
--- a/arch/loongarch/include/asm/setup.h
+++ b/arch/loongarch/include/asm/setup.h
@@ -7,6 +7,7 @@
#define _LOONGARCH_SETUP_H
#include <linux/types.h>
+#include <asm/sections.h>
#include <uapi/asm/setup.h>
#define VECSIZE 0x200
@@ -33,8 +34,13 @@ extern long __la_abs_end;
extern long __rela_dyn_begin;
extern long __rela_dyn_end;
-extern void * __init relocate_kernel(void);
+extern unsigned long __init relocate_kernel(void);
#endif
+static inline unsigned long kaslr_offset(void)
+{
+ return (unsigned long)&_text - VMLINUX_LOAD_ADDRESS;
+}
+
#endif /* __SETUP_H */
diff --git a/arch/loongarch/include/asm/stackframe.h b/arch/loongarch/include/asm/stackframe.h
index 7df80e6ae9d2..4fb1e6408b98 100644
--- a/arch/loongarch/include/asm/stackframe.h
+++ b/arch/loongarch/include/asm/stackframe.h
@@ -158,6 +158,10 @@
cfi_st u0, PT_R21, \docfi
csrrd u0, PERCPU_BASE_KS
9:
+#ifdef CONFIG_KGDB
+ li.w t0, CSR_CRMD_WE
+ csrxchg t0, t0, LOONGARCH_CSR_CRMD
+#endif
.endm
.macro SAVE_ALL docfi=0
diff --git a/arch/loongarch/include/asm/string.h b/arch/loongarch/include/asm/string.h
index 7b29cc9c70aa..5bb5a90d2681 100644
--- a/arch/loongarch/include/asm/string.h
+++ b/arch/loongarch/include/asm/string.h
@@ -7,11 +7,31 @@
#define __HAVE_ARCH_MEMSET
extern void *memset(void *__s, int __c, size_t __count);
+extern void *__memset(void *__s, int __c, size_t __count);
#define __HAVE_ARCH_MEMCPY
extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
+extern void *__memcpy(void *__to, __const__ void *__from, size_t __n);
#define __HAVE_ARCH_MEMMOVE
extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
+extern void *__memmove(void *__dest, __const__ void *__src, size_t __n);
+
+#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
+
+/*
+ * For files that are not instrumented (e.g. mm/slub.c) we
+ * should use not instrumented version of mem* functions.
+ */
+
+#define memset(s, c, n) __memset(s, c, n)
+#define memcpy(dst, src, len) __memcpy(dst, src, len)
+#define memmove(dst, src, len) __memmove(dst, src, len)
+
+#ifndef __NO_FORTIFY
+#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
+#endif
+
+#endif
#endif /* _ASM_STRING_H */
diff --git a/arch/loongarch/include/asm/switch_to.h b/arch/loongarch/include/asm/switch_to.h
index 24e3094bebab..5b225aff3ba2 100644
--- a/arch/loongarch/include/asm/switch_to.h
+++ b/arch/loongarch/include/asm/switch_to.h
@@ -7,6 +7,7 @@
#include <asm/cpu-features.h>
#include <asm/fpu.h>
+#include <asm/lbt.h>
struct task_struct;
@@ -34,6 +35,7 @@ extern asmlinkage struct task_struct *__switch_to(struct task_struct *prev,
#define switch_to(prev, next, last) \
do { \
lose_fpu_inatomic(1, prev); \
+ lose_lbt_inatomic(1, prev); \
hw_breakpoint_thread_switch(next); \
(last) = __switch_to(prev, next, task_thread_info(next), \
__builtin_return_address(0), __builtin_frame_address(0)); \
diff --git a/arch/loongarch/include/asm/thread_info.h b/arch/loongarch/include/asm/thread_info.h
index 1a3354ca056e..8cb653d49a54 100644
--- a/arch/loongarch/include/asm/thread_info.h
+++ b/arch/loongarch/include/asm/thread_info.h
@@ -84,6 +84,8 @@ register unsigned long current_stack_pointer __asm__("$sp");
#define TIF_SINGLESTEP 16 /* Single Step */
#define TIF_LSX_CTX_LIVE 17 /* LSX context must be preserved */
#define TIF_LASX_CTX_LIVE 18 /* LASX context must be preserved */
+#define TIF_USEDLBT 19 /* LBT was used by this task this quantum (SMP) */
+#define TIF_LBT_CTX_LIVE 20 /* LBT context must be preserved */
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
@@ -101,6 +103,8 @@ register unsigned long current_stack_pointer __asm__("$sp");
#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
#define _TIF_LSX_CTX_LIVE (1<<TIF_LSX_CTX_LIVE)
#define _TIF_LASX_CTX_LIVE (1<<TIF_LASX_CTX_LIVE)
+#define _TIF_USEDLBT (1<<TIF_USEDLBT)
+#define _TIF_LBT_CTX_LIVE (1<<TIF_LBT_CTX_LIVE)
#endif /* __KERNEL__ */
#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/loongarch/include/asm/xor.h b/arch/loongarch/include/asm/xor.h
new file mode 100644
index 000000000000..12467fffee46
--- /dev/null
+++ b/arch/loongarch/include/asm/xor.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2023 WANG Xuerui <git@xen0n.name>
+ */
+#ifndef _ASM_LOONGARCH_XOR_H
+#define _ASM_LOONGARCH_XOR_H
+
+#include <asm/cpu-features.h>
+#include <asm/xor_simd.h>
+
+#ifdef CONFIG_CPU_HAS_LSX
+static struct xor_block_template xor_block_lsx = {
+ .name = "lsx",
+ .do_2 = xor_lsx_2,
+ .do_3 = xor_lsx_3,
+ .do_4 = xor_lsx_4,
+ .do_5 = xor_lsx_5,
+};
+
+#define XOR_SPEED_LSX() \
+ do { \
+ if (cpu_has_lsx) \
+ xor_speed(&xor_block_lsx); \
+ } while (0)
+#else /* CONFIG_CPU_HAS_LSX */
+#define XOR_SPEED_LSX()
+#endif /* CONFIG_CPU_HAS_LSX */
+
+#ifdef CONFIG_CPU_HAS_LASX
+static struct xor_block_template xor_block_lasx = {
+ .name = "lasx",
+ .do_2 = xor_lasx_2,
+ .do_3 = xor_lasx_3,
+ .do_4 = xor_lasx_4,
+ .do_5 = xor_lasx_5,
+};
+
+#define XOR_SPEED_LASX() \
+ do { \
+ if (cpu_has_lasx) \
+ xor_speed(&xor_block_lasx); \
+ } while (0)
+#else /* CONFIG_CPU_HAS_LASX */
+#define XOR_SPEED_LASX()
+#endif /* CONFIG_CPU_HAS_LASX */
+
+/*
+ * For grins, also test the generic routines.
+ *
+ * More importantly: it cannot be ruled out at this point of time, that some
+ * future (maybe reduced) models could run the vector algorithms slower than
+ * the scalar ones, maybe for errata or micro-op reasons. It may be
+ * appropriate to revisit this after one or two more uarch generations.
+ */
+#include <asm-generic/xor.h>
+
+#undef XOR_TRY_TEMPLATES
+#define XOR_TRY_TEMPLATES \
+do { \
+ xor_speed(&xor_block_8regs); \
+ xor_speed(&xor_block_8regs_p); \
+ xor_speed(&xor_block_32regs); \
+ xor_speed(&xor_block_32regs_p); \
+ XOR_SPEED_LSX(); \
+ XOR_SPEED_LASX(); \
+} while (0)
+
+#endif /* _ASM_LOONGARCH_XOR_H */
diff --git a/arch/loongarch/include/asm/xor_simd.h b/arch/loongarch/include/asm/xor_simd.h
new file mode 100644
index 000000000000..471b96332f38
--- /dev/null
+++ b/arch/loongarch/include/asm/xor_simd.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2023 WANG Xuerui <git@xen0n.name>
+ */
+#ifndef _ASM_LOONGARCH_XOR_SIMD_H
+#define _ASM_LOONGARCH_XOR_SIMD_H
+
+#ifdef CONFIG_CPU_HAS_LSX
+void xor_lsx_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2);
+void xor_lsx_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2, const unsigned long * __restrict p3);
+void xor_lsx_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2, const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4);
+void xor_lsx_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2, const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4, const unsigned long * __restrict p5);
+#endif /* CONFIG_CPU_HAS_LSX */
+
+#ifdef CONFIG_CPU_HAS_LASX
+void xor_lasx_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2);
+void xor_lasx_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2, const unsigned long * __restrict p3);
+void xor_lasx_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2, const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4);
+void xor_lasx_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2, const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4, const unsigned long * __restrict p5);
+#endif /* CONFIG_CPU_HAS_LASX */
+
+#endif /* _ASM_LOONGARCH_XOR_SIMD_H */
diff --git a/arch/loongarch/include/uapi/asm/ptrace.h b/arch/loongarch/include/uapi/asm/ptrace.h
index 06e3be52cb04..ac915f841650 100644
--- a/arch/loongarch/include/uapi/asm/ptrace.h
+++ b/arch/loongarch/include/uapi/asm/ptrace.h
@@ -56,6 +56,12 @@ struct user_lasx_state {
uint64_t vregs[32*4];
};
+struct user_lbt_state {
+ uint64_t scr[4];
+ uint32_t eflags;
+ uint32_t ftop;
+};
+
struct user_watch_state {
uint64_t dbg_info;
struct {
diff --git a/arch/loongarch/include/uapi/asm/sigcontext.h b/arch/loongarch/include/uapi/asm/sigcontext.h
index 4cd7d16f7037..6c22f616b8f1 100644
--- a/arch/loongarch/include/uapi/asm/sigcontext.h
+++ b/arch/loongarch/include/uapi/asm/sigcontext.h
@@ -59,4 +59,14 @@ struct lasx_context {
__u32 fcsr;
};
+/* LBT context */
+#define LBT_CTX_MAGIC 0x42540001
+#define LBT_CTX_ALIGN 8
+struct lbt_context {
+ __u64 regs[4];
+ __u32 eflags;
+ __u32 ftop;
+};
+
+
#endif /* _UAPI_ASM_SIGCONTEXT_H */
diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile
index 8e279f04f9e7..c56ea0b75448 100644
--- a/arch/loongarch/kernel/Makefile
+++ b/arch/loongarch/kernel/Makefile
@@ -15,6 +15,8 @@ obj-$(CONFIG_EFI) += efi.o
obj-$(CONFIG_CPU_HAS_FPU) += fpu.o kfpu.o
+obj-$(CONFIG_CPU_HAS_LBT) += lbt.o
+
obj-$(CONFIG_ARCH_STRICT_ALIGN) += unaligned.o
ifdef CONFIG_FUNCTION_TRACER
@@ -32,6 +34,12 @@ ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_rethook_trampoline.o = $(CC_FLAGS_FTRACE)
endif
+KASAN_SANITIZE_efi.o := n
+KASAN_SANITIZE_cpu-probe.o := n
+KASAN_SANITIZE_traps.o := n
+KASAN_SANITIZE_smp.o := n
+KASAN_SANITIZE_vdso.o := n
+
obj-$(CONFIG_MODULES) += module.o module-sections.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
@@ -54,6 +62,7 @@ obj-$(CONFIG_UNWINDER_PROLOGUE) += unwind_prologue.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_regs.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
+obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_RETHOOK) += rethook.o rethook_trampoline.o
obj-$(CONFIG_UPROBES) += uprobes.o
diff --git a/arch/loongarch/kernel/asm-offsets.c b/arch/loongarch/kernel/asm-offsets.c
index 505e4bf59603..8da0726777ed 100644
--- a/arch/loongarch/kernel/asm-offsets.c
+++ b/arch/loongarch/kernel/asm-offsets.c
@@ -118,13 +118,6 @@ void output_thread_defines(void)
OFFSET(THREAD_CSRECFG, task_struct,
thread.csr_ecfg);
- OFFSET(THREAD_SCR0, task_struct, thread.scr0);
- OFFSET(THREAD_SCR1, task_struct, thread.scr1);
- OFFSET(THREAD_SCR2, task_struct, thread.scr2);
- OFFSET(THREAD_SCR3, task_struct, thread.scr3);
-
- OFFSET(THREAD_EFLAGS, task_struct, thread.eflags);
-
OFFSET(THREAD_FPU, task_struct, thread.fpu);
OFFSET(THREAD_BVADDR, task_struct, \
@@ -172,6 +165,17 @@ void output_thread_fpu_defines(void)
OFFSET(THREAD_FCSR, loongarch_fpu, fcsr);
OFFSET(THREAD_FCC, loongarch_fpu, fcc);
+ OFFSET(THREAD_FTOP, loongarch_fpu, ftop);
+ BLANK();
+}
+
+void output_thread_lbt_defines(void)
+{
+ OFFSET(THREAD_SCR0, loongarch_lbt, scr0);
+ OFFSET(THREAD_SCR1, loongarch_lbt, scr1);
+ OFFSET(THREAD_SCR2, loongarch_lbt, scr2);
+ OFFSET(THREAD_SCR3, loongarch_lbt, scr3);
+ OFFSET(THREAD_EFLAGS, loongarch_lbt, eflags);
BLANK();
}
diff --git a/arch/loongarch/kernel/cpu-probe.c b/arch/loongarch/kernel/cpu-probe.c
index e925579c7a71..55320813ee08 100644
--- a/arch/loongarch/kernel/cpu-probe.c
+++ b/arch/loongarch/kernel/cpu-probe.c
@@ -144,6 +144,20 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
c->options |= LOONGARCH_CPU_LVZ;
elf_hwcap |= HWCAP_LOONGARCH_LVZ;
}
+#ifdef CONFIG_CPU_HAS_LBT
+ if (config & CPUCFG2_X86BT) {
+ c->options |= LOONGARCH_CPU_LBT_X86;
+ elf_hwcap |= HWCAP_LOONGARCH_LBT_X86;
+ }
+ if (config & CPUCFG2_ARMBT) {
+ c->options |= LOONGARCH_CPU_LBT_ARM;
+ elf_hwcap |= HWCAP_LOONGARCH_LBT_ARM;
+ }
+ if (config & CPUCFG2_MIPSBT) {
+ c->options |= LOONGARCH_CPU_LBT_MIPS;
+ elf_hwcap |= HWCAP_LOONGARCH_LBT_MIPS;
+ }
+#endif
config = read_cpucfg(LOONGARCH_CPUCFG6);
if (config & CPUCFG6_PMP)
diff --git a/arch/loongarch/kernel/entry.S b/arch/loongarch/kernel/entry.S
index d737e3cf42d3..65518bb8f472 100644
--- a/arch/loongarch/kernel/entry.S
+++ b/arch/loongarch/kernel/entry.S
@@ -58,6 +58,11 @@ SYM_FUNC_START(handle_syscall)
SAVE_STATIC
+#ifdef CONFIG_KGDB
+ li.w t1, CSR_CRMD_WE
+ csrxchg t1, t1, LOONGARCH_CSR_CRMD
+#endif
+
move u0, t0
li.d tp, ~_THREAD_MASK
and tp, tp, sp
diff --git a/arch/loongarch/kernel/fpu.S b/arch/loongarch/kernel/fpu.S
index 501094a09f5d..d53ab10f4644 100644
--- a/arch/loongarch/kernel/fpu.S
+++ b/arch/loongarch/kernel/fpu.S
@@ -22,7 +22,7 @@
.macro EX insn, reg, src, offs
.ex\@: \insn \reg, \src, \offs
- _asm_extable .ex\@, fault
+ _asm_extable .ex\@, .L_fpu_fault
.endm
.macro sc_save_fp base
@@ -138,6 +138,13 @@
.macro sc_save_fcsr base, tmp0
movfcsr2gr \tmp0, fcsr0
EX st.w \tmp0, \base, 0
+#if defined(CONFIG_CPU_HAS_LBT)
+ /* TM bit is always 0 if LBT not supported */
+ andi \tmp0, \tmp0, FPU_CSR_TM
+ beqz \tmp0, 1f
+ x86clrtm
+1:
+#endif
.endm
.macro sc_restore_fcsr base, tmp0
@@ -309,7 +316,7 @@ EXPORT_SYMBOL(_save_fp)
*/
SYM_FUNC_START(_restore_fp)
fpu_restore_double a0 t1 # clobbers t1
- fpu_restore_csr a0 t1
+ fpu_restore_csr a0 t1 t2
fpu_restore_cc a0 t1 t2 # clobbers t1, t2
jr ra
SYM_FUNC_END(_restore_fp)
@@ -514,7 +521,6 @@ SYM_FUNC_START(_restore_lasx_context)
jr ra
SYM_FUNC_END(_restore_lasx_context)
-SYM_FUNC_START(fault)
+.L_fpu_fault:
li.w a0, -EFAULT # failure
jr ra
-SYM_FUNC_END(fault)
diff --git a/arch/loongarch/kernel/head.S b/arch/loongarch/kernel/head.S
index 5e828a8bc0a0..53b883db0786 100644
--- a/arch/loongarch/kernel/head.S
+++ b/arch/loongarch/kernel/head.S
@@ -95,12 +95,17 @@ SYM_CODE_START(kernel_entry) # kernel entry point
PTR_LI sp, (_THREAD_SIZE - PT_SIZE)
PTR_ADD sp, sp, tp
set_saved_sp sp, t0, t1
-#endif
- /* relocate_kernel() returns the new kernel entry point */
- jr a0
- ASM_BUG()
+ /* Jump to the new kernel: new_pc = current_pc + random_offset */
+ pcaddi t0, 0
+ add.d t0, t0, a0
+ jirl zero, t0, 0xc
+#endif /* CONFIG_RANDOMIZE_BASE */
+
+#endif /* CONFIG_RELOCATABLE */
+#ifdef CONFIG_KASAN
+ bl kasan_early_init
#endif
bl start_kernel
diff --git a/arch/loongarch/kernel/kfpu.c b/arch/loongarch/kernel/kfpu.c
index 5c46ae8c6cac..ec5b28e570c9 100644
--- a/arch/loongarch/kernel/kfpu.c
+++ b/arch/loongarch/kernel/kfpu.c
@@ -8,19 +8,40 @@
#include <asm/fpu.h>
#include <asm/smp.h>
+static unsigned int euen_mask = CSR_EUEN_FPEN;
+
+/*
+ * The critical section between kernel_fpu_begin() and kernel_fpu_end()
+ * is non-reentrant. It is the caller's responsibility to avoid reentrance.
+ * See drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c as an example.
+ */
static DEFINE_PER_CPU(bool, in_kernel_fpu);
+static DEFINE_PER_CPU(unsigned int, euen_current);
void kernel_fpu_begin(void)
{
+ unsigned int *euen_curr;
+
preempt_disable();
WARN_ON(this_cpu_read(in_kernel_fpu));
this_cpu_write(in_kernel_fpu, true);
+ euen_curr = this_cpu_ptr(&euen_current);
- if (!is_fpu_owner())
- enable_fpu();
+ *euen_curr = csr_xchg32(euen_mask, euen_mask, LOONGARCH_CSR_EUEN);
+
+#ifdef CONFIG_CPU_HAS_LASX
+ if (*euen_curr & CSR_EUEN_LASXEN)
+ _save_lasx(&current->thread.fpu);
+ else
+#endif
+#ifdef CONFIG_CPU_HAS_LSX
+ if (*euen_curr & CSR_EUEN_LSXEN)
+ _save_lsx(&current->thread.fpu);
else
+#endif
+ if (*euen_curr & CSR_EUEN_FPEN)
_save_fp(&current->thread.fpu);
write_fcsr(LOONGARCH_FCSR0, 0);
@@ -29,15 +50,41 @@ EXPORT_SYMBOL_GPL(kernel_fpu_begin);
void kernel_fpu_end(void)
{
+ unsigned int *euen_curr;
+
WARN_ON(!this_cpu_read(in_kernel_fpu));
- if (!is_fpu_owner())
- disable_fpu();
+ euen_curr = this_cpu_ptr(&euen_current);
+
+#ifdef CONFIG_CPU_HAS_LASX
+ if (*euen_curr & CSR_EUEN_LASXEN)
+ _restore_lasx(&current->thread.fpu);
else
+#endif
+#ifdef CONFIG_CPU_HAS_LSX
+ if (*euen_curr & CSR_EUEN_LSXEN)
+ _restore_lsx(&current->thread.fpu);
+ else
+#endif
+ if (*euen_curr & CSR_EUEN_FPEN)
_restore_fp(&current->thread.fpu);
+ *euen_curr = csr_xchg32(*euen_curr, euen_mask, LOONGARCH_CSR_EUEN);
+
this_cpu_write(in_kernel_fpu, false);
preempt_enable();
}
EXPORT_SYMBOL_GPL(kernel_fpu_end);
+
+static int __init init_euen_mask(void)
+{
+ if (cpu_has_lsx)
+ euen_mask |= CSR_EUEN_LSXEN;
+
+ if (cpu_has_lasx)
+ euen_mask |= CSR_EUEN_LASXEN;
+
+ return 0;
+}
+arch_initcall(init_euen_mask);
diff --git a/arch/loongarch/kernel/kgdb.c b/arch/loongarch/kernel/kgdb.c
new file mode 100644
index 000000000000..445c452d72a7
--- /dev/null
+++ b/arch/loongarch/kernel/kgdb.c
@@ -0,0 +1,727 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * LoongArch KGDB support
+ *
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#include <linux/hw_breakpoint.h>
+#include <linux/kdebug.h>
+#include <linux/kgdb.h>
+#include <linux/processor.h>
+#include <linux/ptrace.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+
+#include <asm/cacheflush.h>
+#include <asm/fpu.h>
+#include <asm/hw_breakpoint.h>
+#include <asm/inst.h>
+#include <asm/irq_regs.h>
+#include <asm/ptrace.h>
+#include <asm/sigcontext.h>
+
+int kgdb_watch_activated;
+static unsigned int stepped_opcode;
+static unsigned long stepped_address;
+
+struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
+ { "r0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[0]) },
+ { "r1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[1]) },
+ { "r2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[2]) },
+ { "r3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[3]) },
+ { "r4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[4]) },
+ { "r5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[5]) },
+ { "r6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[6]) },
+ { "r7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[7]) },
+ { "r8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[8]) },
+ { "r9", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[9]) },
+ { "r10", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[10]) },
+ { "r11", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[11]) },
+ { "r12", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[12]) },
+ { "r13", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[13]) },
+ { "r14", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[14]) },
+ { "r15", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[15]) },
+ { "r16", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[16]) },
+ { "r17", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[17]) },
+ { "r18", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[18]) },
+ { "r19", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[19]) },
+ { "r20", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[20]) },
+ { "r21", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[21]) },
+ { "r22", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[22]) },
+ { "r23", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[23]) },
+ { "r24", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[24]) },
+ { "r25", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[25]) },
+ { "r26", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[26]) },
+ { "r27", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[27]) },
+ { "r28", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[28]) },
+ { "r29", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[29]) },
+ { "r30", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[30]) },
+ { "r31", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[31]) },
+ { "orig_a0", GDB_SIZEOF_REG, offsetof(struct pt_regs, orig_a0) },
+ { "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, csr_era) },
+ { "badv", GDB_SIZEOF_REG, offsetof(struct pt_regs, csr_badvaddr) },
+ { "f0", GDB_SIZEOF_REG, 0 },
+ { "f1", GDB_SIZEOF_REG, 1 },
+ { "f2", GDB_SIZEOF_REG, 2 },
+ { "f3", GDB_SIZEOF_REG, 3 },
+ { "f4", GDB_SIZEOF_REG, 4 },
+ { "f5", GDB_SIZEOF_REG, 5 },
+ { "f6", GDB_SIZEOF_REG, 6 },
+ { "f7", GDB_SIZEOF_REG, 7 },
+ { "f8", GDB_SIZEOF_REG, 8 },
+ { "f9", GDB_SIZEOF_REG, 9 },
+ { "f10", GDB_SIZEOF_REG, 10 },
+ { "f11", GDB_SIZEOF_REG, 11 },
+ { "f12", GDB_SIZEOF_REG, 12 },
+ { "f13", GDB_SIZEOF_REG, 13 },
+ { "f14", GDB_SIZEOF_REG, 14 },
+ { "f15", GDB_SIZEOF_REG, 15 },
+ { "f16", GDB_SIZEOF_REG, 16 },
+ { "f17", GDB_SIZEOF_REG, 17 },
+ { "f18", GDB_SIZEOF_REG, 18 },
+ { "f19", GDB_SIZEOF_REG, 19 },
+ { "f20", GDB_SIZEOF_REG, 20 },
+ { "f21", GDB_SIZEOF_REG, 21 },
+ { "f22", GDB_SIZEOF_REG, 22 },
+ { "f23", GDB_SIZEOF_REG, 23 },
+ { "f24", GDB_SIZEOF_REG, 24 },
+ { "f25", GDB_SIZEOF_REG, 25 },
+ { "f26", GDB_SIZEOF_REG, 26 },
+ { "f27", GDB_SIZEOF_REG, 27 },
+ { "f28", GDB_SIZEOF_REG, 28 },
+ { "f29", GDB_SIZEOF_REG, 29 },
+ { "f30", GDB_SIZEOF_REG, 30 },
+ { "f31", GDB_SIZEOF_REG, 31 },
+ { "fcc0", 1, 0 },
+ { "fcc1", 1, 1 },
+ { "fcc2", 1, 2 },
+ { "fcc3", 1, 3 },
+ { "fcc4", 1, 4 },
+ { "fcc5", 1, 5 },
+ { "fcc6", 1, 6 },
+ { "fcc7", 1, 7 },
+ { "fcsr", 4, 0 },
+};
+
+char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
+{
+ int reg_offset, reg_size;
+
+ if (regno < 0 || regno >= DBG_MAX_REG_NUM)
+ return NULL;
+
+ reg_offset = dbg_reg_def[regno].offset;
+ reg_size = dbg_reg_def[regno].size;
+
+ if (reg_offset == -1)
+ goto out;
+
+ /* Handle general-purpose/orig_a0/pc/badv registers */
+ if (regno <= DBG_PT_REGS_END) {
+ memcpy(mem, (void *)regs + reg_offset, reg_size);
+ goto out;
+ }
+
+ if (!(regs->csr_euen & CSR_EUEN_FPEN))
+ goto out;
+
+ save_fp(current);
+
+ /* Handle FP registers */
+ switch (regno) {
+ case DBG_FCSR: /* Process the fcsr */
+ memcpy(mem, (void *)&current->thread.fpu.fcsr, reg_size);
+ break;
+ case DBG_FCC_BASE ... DBG_FCC_END: /* Process the fcc */
+ memcpy(mem, (void *)&current->thread.fpu.fcc + reg_offset, reg_size);
+ break;
+ case DBG_FPR_BASE ... DBG_FPR_END: /* Process the fpr */
+ memcpy(mem, (void *)&current->thread.fpu.fpr[reg_offset], reg_size);
+ break;
+ default:
+ break;
+ }
+
+out:
+ return dbg_reg_def[regno].name;
+}
+
+int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
+{
+ int reg_offset, reg_size;
+
+ if (regno < 0 || regno >= DBG_MAX_REG_NUM)
+ return -EINVAL;
+
+ reg_offset = dbg_reg_def[regno].offset;
+ reg_size = dbg_reg_def[regno].size;
+
+ if (reg_offset == -1)
+ return 0;
+
+ /* Handle general-purpose/orig_a0/pc/badv registers */
+ if (regno <= DBG_PT_REGS_END) {
+ memcpy((void *)regs + reg_offset, mem, reg_size);
+ return 0;
+ }
+
+ if (!(regs->csr_euen & CSR_EUEN_FPEN))
+ return 0;
+
+ /* Handle FP registers */
+ switch (regno) {
+ case DBG_FCSR: /* Process the fcsr */
+ memcpy((void *)&current->thread.fpu.fcsr, mem, reg_size);
+ break;
+ case DBG_FCC_BASE ... DBG_FCC_END: /* Process the fcc */
+ memcpy((void *)&current->thread.fpu.fcc + reg_offset, mem, reg_size);
+ break;
+ case DBG_FPR_BASE ... DBG_FPR_END: /* Process the fpr */
+ memcpy((void *)&current->thread.fpu.fpr[reg_offset], mem, reg_size);
+ break;
+ default:
+ break;
+ }
+
+ restore_fp(current);
+
+ return 0;
+}
+
+/*
+ * Similar to regs_to_gdb_regs() except that process is sleeping and so
+ * we may not be able to get all the info.
+ */
+void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
+{
+ /* Initialize to zero */
+ memset((char *)gdb_regs, 0, NUMREGBYTES);
+
+ gdb_regs[DBG_LOONGARCH_RA] = p->thread.reg01;
+ gdb_regs[DBG_LOONGARCH_TP] = (long)p;
+ gdb_regs[DBG_LOONGARCH_SP] = p->thread.reg03;
+
+ /* S0 - S8 */
+ gdb_regs[DBG_LOONGARCH_S0] = p->thread.reg23;
+ gdb_regs[DBG_LOONGARCH_S1] = p->thread.reg24;
+ gdb_regs[DBG_LOONGARCH_S2] = p->thread.reg25;
+ gdb_regs[DBG_LOONGARCH_S3] = p->thread.reg26;
+ gdb_regs[DBG_LOONGARCH_S4] = p->thread.reg27;
+ gdb_regs[DBG_LOONGARCH_S5] = p->thread.reg28;
+ gdb_regs[DBG_LOONGARCH_S6] = p->thread.reg29;
+ gdb_regs[DBG_LOONGARCH_S7] = p->thread.reg30;
+ gdb_regs[DBG_LOONGARCH_S8] = p->thread.reg31;
+
+ /*
+ * PC use return address (RA), i.e. the moment after return from __switch_to()
+ */
+ gdb_regs[DBG_LOONGARCH_PC] = p->thread.reg01;
+}
+
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
+{
+ regs->csr_era = pc;
+}
+
+void arch_kgdb_breakpoint(void)
+{
+ __asm__ __volatile__ ( \
+ ".globl kgdb_breakinst\n\t" \
+ "nop\n" \
+ "kgdb_breakinst:\tbreak 2\n\t"); /* BRK_KDB = 2 */
+}
+
+/*
+ * Calls linux_debug_hook before the kernel dies. If KGDB is enabled,
+ * then try to fall into the debugger
+ */
+static int kgdb_loongarch_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
+{
+ struct die_args *args = (struct die_args *)ptr;
+ struct pt_regs *regs = args->regs;
+
+ /* Userspace events, ignore. */
+ if (user_mode(regs))
+ return NOTIFY_DONE;
+
+ if (!kgdb_io_module_registered)
+ return NOTIFY_DONE;
+
+ if (atomic_read(&kgdb_active) != -1)
+ kgdb_nmicallback(smp_processor_id(), regs);
+
+ if (kgdb_handle_exception(args->trapnr, args->signr, cmd, regs))
+ return NOTIFY_DONE;
+
+ if (atomic_read(&kgdb_setting_breakpoint))
+ if (regs->csr_era == (unsigned long)&kgdb_breakinst)
+ regs->csr_era += LOONGARCH_INSN_SIZE;
+
+ return NOTIFY_STOP;
+}
+
+bool kgdb_breakpoint_handler(struct pt_regs *regs)
+{
+ struct die_args args = {
+ .regs = regs,
+ .str = "Break",
+ .err = BRK_KDB,
+ .trapnr = read_csr_excode(),
+ .signr = SIGTRAP,
+
+ };
+
+ return (kgdb_loongarch_notify(NULL, DIE_TRAP, &args) == NOTIFY_STOP) ? true : false;
+}
+
+static struct notifier_block kgdb_notifier = {
+ .notifier_call = kgdb_loongarch_notify,
+};
+
+static inline void kgdb_arch_update_addr(struct pt_regs *regs,
+ char *remcom_in_buffer)
+{
+ unsigned long addr;
+ char *ptr;
+
+ ptr = &remcom_in_buffer[1];
+ if (kgdb_hex2long(&ptr, &addr))
+ regs->csr_era = addr;
+}
+
+/* Calculate the new address for after a step */
+static int get_step_address(struct pt_regs *regs, unsigned long *next_addr)
+{
+ char cj_val;
+ unsigned int si, si_l, si_h, rd, rj, cj;
+ unsigned long pc = instruction_pointer(regs);
+ union loongarch_instruction *ip = (union loongarch_instruction *)pc;
+
+ if (pc & 3) {
+ pr_warn("%s: invalid pc 0x%lx\n", __func__, pc);
+ return -EINVAL;
+ }
+
+ *next_addr = pc + LOONGARCH_INSN_SIZE;
+
+ si_h = ip->reg0i26_format.immediate_h;
+ si_l = ip->reg0i26_format.immediate_l;
+ switch (ip->reg0i26_format.opcode) {
+ case b_op:
+ *next_addr = pc + sign_extend64((si_h << 16 | si_l) << 2, 27);
+ return 0;
+ case bl_op:
+ *next_addr = pc + sign_extend64((si_h << 16 | si_l) << 2, 27);
+ regs->regs[1] = pc + LOONGARCH_INSN_SIZE;
+ return 0;
+ }
+
+ rj = ip->reg1i21_format.rj;
+ cj = (rj & 0x07) + DBG_FCC_BASE;
+ si_l = ip->reg1i21_format.immediate_l;
+ si_h = ip->reg1i21_format.immediate_h;
+ dbg_get_reg(cj, &cj_val, regs);
+ switch (ip->reg1i21_format.opcode) {
+ case beqz_op:
+ if (regs->regs[rj] == 0)
+ *next_addr = pc + sign_extend64((si_h << 16 | si_l) << 2, 22);
+ return 0;
+ case bnez_op:
+ if (regs->regs[rj] != 0)
+ *next_addr = pc + sign_extend64((si_h << 16 | si_l) << 2, 22);
+ return 0;
+ case bceqz_op: /* bceqz_op = bcnez_op */
+ if (((rj & 0x18) == 0x00) && !cj_val) /* bceqz */
+ *next_addr = pc + sign_extend64((si_h << 16 | si_l) << 2, 22);
+ if (((rj & 0x18) == 0x08) && cj_val) /* bcnez */
+ *next_addr = pc + sign_extend64((si_h << 16 | si_l) << 2, 22);
+ return 0;
+ }
+
+ rj = ip->reg2i16_format.rj;
+ rd = ip->reg2i16_format.rd;
+ si = ip->reg2i16_format.immediate;
+ switch (ip->reg2i16_format.opcode) {
+ case beq_op:
+ if (regs->regs[rj] == regs->regs[rd])
+ *next_addr = pc + sign_extend64(si << 2, 17);
+ return 0;
+ case bne_op:
+ if (regs->regs[rj] != regs->regs[rd])
+ *next_addr = pc + sign_extend64(si << 2, 17);
+ return 0;
+ case blt_op:
+ if ((long)regs->regs[rj] < (long)regs->regs[rd])
+ *next_addr = pc + sign_extend64(si << 2, 17);
+ return 0;
+ case bge_op:
+ if ((long)regs->regs[rj] >= (long)regs->regs[rd])
+ *next_addr = pc + sign_extend64(si << 2, 17);
+ return 0;
+ case bltu_op:
+ if (regs->regs[rj] < regs->regs[rd])
+ *next_addr = pc + sign_extend64(si << 2, 17);
+ return 0;
+ case bgeu_op:
+ if (regs->regs[rj] >= regs->regs[rd])
+ *next_addr = pc + sign_extend64(si << 2, 17);
+ return 0;
+ case jirl_op:
+ regs->regs[rd] = pc + LOONGARCH_INSN_SIZE;
+ *next_addr = regs->regs[rj] + sign_extend64(si << 2, 17);
+ return 0;
+ }
+
+ return 0;
+}
+
+static int do_single_step(struct pt_regs *regs)
+{
+ int error = 0;
+ unsigned long addr = 0; /* Determine where the target instruction will send us to */
+
+ error = get_step_address(regs, &addr);
+ if (error)
+ return error;
+
+ /* Store the opcode in the stepped address */
+ error = get_kernel_nofault(stepped_opcode, (void *)addr);
+ if (error)
+ return error;
+
+ stepped_address = addr;
+
+ /* Replace the opcode with the break instruction */
+ error = copy_to_kernel_nofault((void *)stepped_address,
+ arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
+ flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
+
+ if (error) {
+ stepped_opcode = 0;
+ stepped_address = 0;
+ } else {
+ kgdb_single_step = 1;
+ atomic_set(&kgdb_cpu_doing_single_step, raw_smp_processor_id());
+ }
+
+ return error;
+}
+
+/* Undo a single step */
+static void undo_single_step(struct pt_regs *regs)
+{
+ if (stepped_opcode) {
+ copy_to_kernel_nofault((void *)stepped_address,
+ (void *)&stepped_opcode, BREAK_INSTR_SIZE);
+ flush_icache_range(stepped_address, stepped_address + BREAK_INSTR_SIZE);
+ }
+
+ stepped_opcode = 0;
+ stepped_address = 0;
+ kgdb_single_step = 0;
+ atomic_set(&kgdb_cpu_doing_single_step, -1);
+}
+
+int kgdb_arch_handle_exception(int vector, int signo, int err_code,
+ char *remcom_in_buffer, char *remcom_out_buffer,
+ struct pt_regs *regs)
+{
+ int ret = 0;
+
+ undo_single_step(regs);
+ regs->csr_prmd |= CSR_PRMD_PWE;
+
+ switch (remcom_in_buffer[0]) {
+ case 'D':
+ case 'k':
+ regs->csr_prmd &= ~CSR_PRMD_PWE;
+ fallthrough;
+ case 'c':
+ kgdb_arch_update_addr(regs, remcom_in_buffer);
+ break;
+ case 's':
+ kgdb_arch_update_addr(regs, remcom_in_buffer);
+ ret = do_single_step(regs);
+ break;
+ default:
+ ret = -1;
+ }
+
+ return ret;
+}
+
+static struct hw_breakpoint {
+ unsigned int enabled;
+ unsigned long addr;
+ int len;
+ int type;
+ struct perf_event * __percpu *pev;
+} breakinfo[LOONGARCH_MAX_BRP];
+
+static int hw_break_reserve_slot(int breakno)
+{
+ int cpu, cnt = 0;
+ struct perf_event **pevent;
+
+ for_each_online_cpu(cpu) {
+ cnt++;
+ pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
+ if (dbg_reserve_bp_slot(*pevent))
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ for_each_online_cpu(cpu) {
+ cnt--;
+ if (!cnt)
+ break;
+ pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
+ dbg_release_bp_slot(*pevent);
+ }
+
+ return -1;
+}
+
+static int hw_break_release_slot(int breakno)
+{
+ int cpu;
+ struct perf_event **pevent;
+
+ if (dbg_is_early)
+ return 0;
+
+ for_each_online_cpu(cpu) {
+ pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
+ if (dbg_release_bp_slot(*pevent))
+ /*
+ * The debugger is responsible for handing the retry on
+ * remove failure.
+ */
+ return -1;
+ }
+
+ return 0;
+}
+
+static int kgdb_set_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype)
+{
+ int i;
+
+ for (i = 0; i < LOONGARCH_MAX_BRP; i++)
+ if (!breakinfo[i].enabled)
+ break;
+
+ if (i == LOONGARCH_MAX_BRP)
+ return -1;
+
+ switch (bptype) {
+ case BP_HARDWARE_BREAKPOINT:
+ breakinfo[i].type = HW_BREAKPOINT_X;
+ break;
+ case BP_READ_WATCHPOINT:
+ breakinfo[i].type = HW_BREAKPOINT_R;
+ break;
+ case BP_WRITE_WATCHPOINT:
+ breakinfo[i].type = HW_BREAKPOINT_W;
+ break;
+ case BP_ACCESS_WATCHPOINT:
+ breakinfo[i].type = HW_BREAKPOINT_RW;
+ break;
+ default:
+ return -1;
+ }
+
+ switch (len) {
+ case 1:
+ breakinfo[i].len = HW_BREAKPOINT_LEN_1;
+ break;
+ case 2:
+ breakinfo[i].len = HW_BREAKPOINT_LEN_2;
+ break;
+ case 4:
+ breakinfo[i].len = HW_BREAKPOINT_LEN_4;
+ break;
+ case 8:
+ breakinfo[i].len = HW_BREAKPOINT_LEN_8;
+ break;
+ default:
+ return -1;
+ }
+
+ breakinfo[i].addr = addr;
+ if (hw_break_reserve_slot(i)) {
+ breakinfo[i].addr = 0;
+ return -1;
+ }
+ breakinfo[i].enabled = 1;
+
+ return 0;
+}
+
+static int kgdb_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype)
+{
+ int i;
+
+ for (i = 0; i < LOONGARCH_MAX_BRP; i++)
+ if (breakinfo[i].addr == addr && breakinfo[i].enabled)
+ break;
+
+ if (i == LOONGARCH_MAX_BRP)
+ return -1;
+
+ if (hw_break_release_slot(i)) {
+ pr_err("Cannot remove hw breakpoint at %lx\n", addr);
+ return -1;
+ }
+ breakinfo[i].enabled = 0;
+
+ return 0;
+}
+
+static void kgdb_disable_hw_break(struct pt_regs *regs)
+{
+ int i;
+ int cpu = raw_smp_processor_id();
+ struct perf_event *bp;
+
+ for (i = 0; i < LOONGARCH_MAX_BRP; i++) {
+ if (!breakinfo[i].enabled)
+ continue;
+
+ bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
+ if (bp->attr.disabled == 1)
+ continue;
+
+ arch_uninstall_hw_breakpoint(bp);
+ bp->attr.disabled = 1;
+ }
+
+ /* Disable hardware debugging while we are in kgdb */
+ csr_xchg32(0, CSR_CRMD_WE, LOONGARCH_CSR_CRMD);
+}
+
+static void kgdb_remove_all_hw_break(void)
+{
+ int i;
+ int cpu = raw_smp_processor_id();
+ struct perf_event *bp;
+
+ for (i = 0; i < LOONGARCH_MAX_BRP; i++) {
+ if (!breakinfo[i].enabled)
+ continue;
+
+ bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
+ if (!bp->attr.disabled) {
+ arch_uninstall_hw_breakpoint(bp);
+ bp->attr.disabled = 1;
+ continue;
+ }
+
+ if (hw_break_release_slot(i))
+ pr_err("KGDB: hw bpt remove failed %lx\n", breakinfo[i].addr);
+ breakinfo[i].enabled = 0;
+ }
+
+ csr_xchg32(0, CSR_CRMD_WE, LOONGARCH_CSR_CRMD);
+ kgdb_watch_activated = 0;
+}
+
+static void kgdb_correct_hw_break(void)
+{
+ int i, activated = 0;
+
+ for (i = 0; i < LOONGARCH_MAX_BRP; i++) {
+ struct perf_event *bp;
+ int val;
+ int cpu = raw_smp_processor_id();
+
+ if (!breakinfo[i].enabled)
+ continue;
+
+ bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
+ if (bp->attr.disabled != 1)
+ continue;
+
+ bp->attr.bp_addr = breakinfo[i].addr;
+ bp->attr.bp_len = breakinfo[i].len;
+ bp->attr.bp_type = breakinfo[i].type;
+
+ val = hw_breakpoint_arch_parse(bp, &bp->attr, counter_arch_bp(bp));
+ if (val)
+ return;
+
+ val = arch_install_hw_breakpoint(bp);
+ if (!val)
+ bp->attr.disabled = 0;
+ activated = 1;
+ }
+
+ csr_xchg32(activated ? CSR_CRMD_WE : 0, CSR_CRMD_WE, LOONGARCH_CSR_CRMD);
+ kgdb_watch_activated = activated;
+}
+
+const struct kgdb_arch arch_kgdb_ops = {
+ .gdb_bpt_instr = {0x02, 0x00, break_op >> 1, 0x00}, /* BRK_KDB = 2 */
+ .flags = KGDB_HW_BREAKPOINT,
+ .set_hw_breakpoint = kgdb_set_hw_break,
+ .remove_hw_breakpoint = kgdb_remove_hw_break,
+ .disable_hw_break = kgdb_disable_hw_break,
+ .remove_all_hw_break = kgdb_remove_all_hw_break,
+ .correct_hw_break = kgdb_correct_hw_break,
+};
+
+int kgdb_arch_init(void)
+{
+ return register_die_notifier(&kgdb_notifier);
+}
+
+void kgdb_arch_late(void)
+{
+ int i, cpu;
+ struct perf_event_attr attr;
+ struct perf_event **pevent;
+
+ hw_breakpoint_init(&attr);
+
+ attr.bp_addr = (unsigned long)kgdb_arch_init;
+ attr.bp_len = HW_BREAKPOINT_LEN_4;
+ attr.bp_type = HW_BREAKPOINT_W;
+ attr.disabled = 1;
+
+ for (i = 0; i < LOONGARCH_MAX_BRP; i++) {
+ if (breakinfo[i].pev)
+ continue;
+
+ breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL, NULL);
+ if (IS_ERR((void * __force)breakinfo[i].pev)) {
+ pr_err("kgdb: Could not allocate hw breakpoints.\n");
+ breakinfo[i].pev = NULL;
+ return;
+ }
+
+ for_each_online_cpu(cpu) {
+ pevent = per_cpu_ptr(breakinfo[i].pev, cpu);
+ if (pevent[0]->destroy) {
+ pevent[0]->destroy = NULL;
+ release_bp_slot(*pevent);
+ }
+ }
+ }
+}
+
+void kgdb_arch_exit(void)
+{
+ int i;
+
+ for (i = 0; i < LOONGARCH_MAX_BRP; i++) {
+ if (breakinfo[i].pev) {
+ unregister_wide_hw_breakpoint(breakinfo[i].pev);
+ breakinfo[i].pev = NULL;
+ }
+ }
+
+ unregister_die_notifier(&kgdb_notifier);
+}
diff --git a/arch/loongarch/kernel/lbt.S b/arch/loongarch/kernel/lbt.S
new file mode 100644
index 000000000000..9c75120a26d8
--- /dev/null
+++ b/arch/loongarch/kernel/lbt.S
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Qi Hu <huqi@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
+ */
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/asm-extable.h>
+#include <asm/asm-offsets.h>
+#include <asm/errno.h>
+#include <asm/regdef.h>
+
+#define SCR_REG_WIDTH 8
+
+ .macro EX insn, reg, src, offs
+.ex\@: \insn \reg, \src, \offs
+ _asm_extable .ex\@, .L_lbt_fault
+ .endm
+
+/*
+ * Save a thread's lbt context.
+ */
+SYM_FUNC_START(_save_lbt)
+ movscr2gr t1, $scr0 # save scr
+ stptr.d t1, a0, THREAD_SCR0
+ movscr2gr t1, $scr1
+ stptr.d t1, a0, THREAD_SCR1
+ movscr2gr t1, $scr2
+ stptr.d t1, a0, THREAD_SCR2
+ movscr2gr t1, $scr3
+ stptr.d t1, a0, THREAD_SCR3
+
+ x86mfflag t1, 0x3f # save eflags
+ stptr.d t1, a0, THREAD_EFLAGS
+ jr ra
+SYM_FUNC_END(_save_lbt)
+EXPORT_SYMBOL(_save_lbt)
+
+/*
+ * Restore a thread's lbt context.
+ */
+SYM_FUNC_START(_restore_lbt)
+ ldptr.d t1, a0, THREAD_SCR0 # restore scr
+ movgr2scr $scr0, t1
+ ldptr.d t1, a0, THREAD_SCR1
+ movgr2scr $scr1, t1
+ ldptr.d t1, a0, THREAD_SCR2
+ movgr2scr $scr2, t1
+ ldptr.d t1, a0, THREAD_SCR3
+ movgr2scr $scr3, t1
+
+ ldptr.d t1, a0, THREAD_EFLAGS # restore eflags
+ x86mtflag t1, 0x3f
+ jr ra
+SYM_FUNC_END(_restore_lbt)
+EXPORT_SYMBOL(_restore_lbt)
+
+/*
+ * Load scr/eflag with zero.
+ */
+SYM_FUNC_START(_init_lbt)
+ movgr2scr $scr0, zero
+ movgr2scr $scr1, zero
+ movgr2scr $scr2, zero
+ movgr2scr $scr3, zero
+
+ x86mtflag zero, 0x3f
+ jr ra
+SYM_FUNC_END(_init_lbt)
+
+/*
+ * a0: scr
+ * a1: eflag
+ */
+SYM_FUNC_START(_save_lbt_context)
+ movscr2gr t1, $scr0 # save scr
+ EX st.d t1, a0, (0 * SCR_REG_WIDTH)
+ movscr2gr t1, $scr1
+ EX st.d t1, a0, (1 * SCR_REG_WIDTH)
+ movscr2gr t1, $scr2
+ EX st.d t1, a0, (2 * SCR_REG_WIDTH)
+ movscr2gr t1, $scr3
+ EX st.d t1, a0, (3 * SCR_REG_WIDTH)
+
+ x86mfflag t1, 0x3f # save eflags
+ EX st.w t1, a1, 0
+ li.w a0, 0 # success
+ jr ra
+SYM_FUNC_END(_save_lbt_context)
+
+/*
+ * a0: scr
+ * a1: eflag
+ */
+SYM_FUNC_START(_restore_lbt_context)
+ EX ld.d t1, a0, (0 * SCR_REG_WIDTH) # restore scr
+ movgr2scr $scr0, t1
+ EX ld.d t1, a0, (1 * SCR_REG_WIDTH)
+ movgr2scr $scr1, t1
+ EX ld.d t1, a0, (2 * SCR_REG_WIDTH)
+ movgr2scr $scr2, t1
+ EX ld.d t1, a0, (3 * SCR_REG_WIDTH)
+ movgr2scr $scr3, t1
+
+ EX ld.w t1, a1, 0 # restore eflags
+ x86mtflag t1, 0x3f
+ li.w a0, 0 # success
+ jr ra
+SYM_FUNC_END(_restore_lbt_context)
+
+/*
+ * a0: ftop
+ */
+SYM_FUNC_START(_save_ftop_context)
+ x86mftop t1
+ st.w t1, a0, 0
+ li.w a0, 0 # success
+ jr ra
+SYM_FUNC_END(_save_ftop_context)
+
+/*
+ * a0: ftop
+ */
+SYM_FUNC_START(_restore_ftop_context)
+ ld.w t1, a0, 0
+ andi t1, t1, 0x7
+ la.pcrel a0, 1f
+ alsl.d a0, t1, a0, 3
+ jr a0
+1:
+ x86mttop 0
+ b 2f
+ x86mttop 1
+ b 2f
+ x86mttop 2
+ b 2f
+ x86mttop 3
+ b 2f
+ x86mttop 4
+ b 2f
+ x86mttop 5
+ b 2f
+ x86mttop 6
+ b 2f
+ x86mttop 7
+2:
+ li.w a0, 0 # success
+ jr ra
+SYM_FUNC_END(_restore_ftop_context)
+
+.L_lbt_fault:
+ li.w a0, -EFAULT # failure
+ jr ra
diff --git a/arch/loongarch/kernel/numa.c b/arch/loongarch/kernel/numa.c
index 708665895b47..c7d33c489e04 100644
--- a/arch/loongarch/kernel/numa.c
+++ b/arch/loongarch/kernel/numa.c
@@ -67,39 +67,7 @@ static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
void __init pcpu_populate_pte(unsigned long addr)
{
- pgd_t *pgd = pgd_offset_k(addr);
- p4d_t *p4d = p4d_offset(pgd, addr);
- pud_t *pud;
- pmd_t *pmd;
-
- if (p4d_none(*p4d)) {
- pud_t *new;
-
- new = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- pgd_populate(&init_mm, pgd, new);
-#ifndef __PAGETABLE_PUD_FOLDED
- pud_init(new);
-#endif
- }
-
- pud = pud_offset(p4d, addr);
- if (pud_none(*pud)) {
- pmd_t *new;
-
- new = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- pud_populate(&init_mm, pud, new);
-#ifndef __PAGETABLE_PMD_FOLDED
- pmd_init(new);
-#endif
- }
-
- pmd = pmd_offset(pud, addr);
- if (!pmd_present(*pmd)) {
- pte_t *new;
-
- new = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- pmd_populate_kernel(&init_mm, pmd, new);
- }
+ populate_kernel_pte(addr);
}
void __init setup_per_cpu_areas(void)
@@ -470,7 +438,6 @@ void __init mem_init(void)
{
high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT);
memblock_free_all();
- setup_zero_pages(); /* This comes from node 0 */
}
int pcibus_to_node(struct pci_bus *bus)
diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c
index ba457e43f5be..3cb082e0c992 100644
--- a/arch/loongarch/kernel/process.c
+++ b/arch/loongarch/kernel/process.c
@@ -38,6 +38,7 @@
#include <asm/cpu.h>
#include <asm/elf.h>
#include <asm/fpu.h>
+#include <asm/lbt.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/irq_regs.h>
@@ -82,9 +83,11 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
euen = regs->csr_euen & ~(CSR_EUEN_FPEN);
regs->csr_euen = euen;
lose_fpu(0);
+ lose_lbt(0);
clear_thread_flag(TIF_LSX_CTX_LIVE);
clear_thread_flag(TIF_LASX_CTX_LIVE);
+ clear_thread_flag(TIF_LBT_CTX_LIVE);
clear_used_math();
regs->csr_era = pc;
regs->regs[3] = sp;
@@ -121,10 +124,14 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
preempt_enable();
- if (used_math())
- memcpy(dst, src, sizeof(struct task_struct));
- else
+ if (!used_math())
memcpy(dst, src, offsetof(struct task_struct, thread.fpu.fpr));
+ else
+ memcpy(dst, src, offsetof(struct task_struct, thread.lbt.scr0));
+
+#ifdef CONFIG_CPU_HAS_LBT
+ memcpy(&dst->thread.lbt, &src->thread.lbt, sizeof(struct loongarch_lbt));
+#endif
return 0;
}
@@ -189,8 +196,10 @@ out:
ptrace_hw_copy_thread(p);
clear_tsk_thread_flag(p, TIF_USEDFPU);
clear_tsk_thread_flag(p, TIF_USEDSIMD);
+ clear_tsk_thread_flag(p, TIF_USEDLBT);
clear_tsk_thread_flag(p, TIF_LSX_CTX_LIVE);
clear_tsk_thread_flag(p, TIF_LASX_CTX_LIVE);
+ clear_tsk_thread_flag(p, TIF_LBT_CTX_LIVE);
return 0;
}
diff --git a/arch/loongarch/kernel/ptrace.c b/arch/loongarch/kernel/ptrace.c
index f72adbf530c6..c114c5ef1332 100644
--- a/arch/loongarch/kernel/ptrace.c
+++ b/arch/loongarch/kernel/ptrace.c
@@ -38,6 +38,7 @@
#include <asm/cpu.h>
#include <asm/cpu-info.h>
#include <asm/fpu.h>
+#include <asm/lbt.h>
#include <asm/loongarch.h>
#include <asm/page.h>
#include <asm/pgtable.h>
@@ -338,6 +339,46 @@ static int simd_set(struct task_struct *target,
#endif /* CONFIG_CPU_HAS_LSX */
+#ifdef CONFIG_CPU_HAS_LBT
+static int lbt_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ int r;
+
+ r = membuf_write(&to, &target->thread.lbt.scr0, sizeof(target->thread.lbt.scr0));
+ r = membuf_write(&to, &target->thread.lbt.scr1, sizeof(target->thread.lbt.scr1));
+ r = membuf_write(&to, &target->thread.lbt.scr2, sizeof(target->thread.lbt.scr2));
+ r = membuf_write(&to, &target->thread.lbt.scr3, sizeof(target->thread.lbt.scr3));
+ r = membuf_write(&to, &target->thread.lbt.eflags, sizeof(u32));
+ r = membuf_write(&to, &target->thread.fpu.ftop, sizeof(u32));
+
+ return r;
+}
+
+static int lbt_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int err = 0;
+ const int eflags_start = 4 * sizeof(target->thread.lbt.scr0);
+ const int ftop_start = eflags_start + sizeof(u32);
+
+ err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.lbt.scr0,
+ 0, 4 * sizeof(target->thread.lbt.scr0));
+ err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.lbt.eflags,
+ eflags_start, ftop_start);
+ err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.fpu.ftop,
+ ftop_start, ftop_start + sizeof(u32));
+
+ return err;
+}
+#endif /* CONFIG_CPU_HAS_LBT */
+
#ifdef CONFIG_HAVE_HW_BREAKPOINT
/*
@@ -802,6 +843,9 @@ enum loongarch_regset {
#ifdef CONFIG_CPU_HAS_LASX
REGSET_LASX,
#endif
+#ifdef CONFIG_CPU_HAS_LBT
+ REGSET_LBT,
+#endif
#ifdef CONFIG_HAVE_HW_BREAKPOINT
REGSET_HW_BREAK,
REGSET_HW_WATCH,
@@ -853,6 +897,16 @@ static const struct user_regset loongarch64_regsets[] = {
.set = simd_set,
},
#endif
+#ifdef CONFIG_CPU_HAS_LBT
+ [REGSET_LBT] = {
+ .core_note_type = NT_LOONGARCH_LBT,
+ .n = 5,
+ .size = sizeof(u64),
+ .align = sizeof(u64),
+ .regset_get = lbt_get,
+ .set = lbt_set,
+ },
+#endif
#ifdef CONFIG_HAVE_HW_BREAKPOINT
[REGSET_HW_BREAK] = {
.core_note_type = NT_LOONGARCH_HW_BREAK,
diff --git a/arch/loongarch/kernel/relocate.c b/arch/loongarch/kernel/relocate.c
index 01f94d1e3edf..6c3eff9af9fb 100644
--- a/arch/loongarch/kernel/relocate.c
+++ b/arch/loongarch/kernel/relocate.c
@@ -157,12 +157,11 @@ static inline void __init update_reloc_offset(unsigned long *addr, long random_o
*new_addr = (unsigned long)reloc_offset;
}
-void * __init relocate_kernel(void)
+unsigned long __init relocate_kernel(void)
{
unsigned long kernel_length;
unsigned long random_offset = 0;
void *location_new = _text; /* Default to original kernel start */
- void *kernel_entry = start_kernel; /* Default to original kernel entry point */
char *cmdline = early_ioremap(fw_arg1, COMMAND_LINE_SIZE); /* Boot command line is passed in fw_arg1 */
strscpy(boot_command_line, cmdline, COMMAND_LINE_SIZE);
@@ -190,9 +189,6 @@ void * __init relocate_kernel(void)
reloc_offset += random_offset;
- /* Return the new kernel's entry point */
- kernel_entry = RELOCATED_KASLR(start_kernel);
-
/* The current thread is now within the relocated kernel */
__current_thread_info = RELOCATED_KASLR(__current_thread_info);
@@ -204,7 +200,7 @@ void * __init relocate_kernel(void)
relocate_absolute(random_offset);
- return kernel_entry;
+ return random_offset;
}
/*
diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
index 9d830ab4e302..7783f0a3d742 100644
--- a/arch/loongarch/kernel/setup.c
+++ b/arch/loongarch/kernel/setup.c
@@ -626,4 +626,8 @@ void __init setup_arch(char **cmdline_p)
#endif
paging_init();
+
+#ifdef CONFIG_KASAN
+ kasan_init();
+#endif
}
diff --git a/arch/loongarch/kernel/signal.c b/arch/loongarch/kernel/signal.c
index ceb899366c0a..504fdfe85203 100644
--- a/arch/loongarch/kernel/signal.c
+++ b/arch/loongarch/kernel/signal.c
@@ -32,6 +32,7 @@
#include <asm/cacheflush.h>
#include <asm/cpu-features.h>
#include <asm/fpu.h>
+#include <asm/lbt.h>
#include <asm/ucontext.h>
#include <asm/vdso.h>
@@ -44,6 +45,9 @@
/* Make sure we will not lose FPU ownership */
#define lock_fpu_owner() ({ preempt_disable(); pagefault_disable(); })
#define unlock_fpu_owner() ({ pagefault_enable(); preempt_enable(); })
+/* Make sure we will not lose LBT ownership */
+#define lock_lbt_owner() ({ preempt_disable(); pagefault_disable(); })
+#define unlock_lbt_owner() ({ pagefault_enable(); preempt_enable(); })
/* Assembly functions to move context to/from the FPU */
extern asmlinkage int
@@ -59,6 +63,13 @@ _save_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
extern asmlinkage int
_restore_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
+#ifdef CONFIG_CPU_HAS_LBT
+extern asmlinkage int _save_lbt_context(void __user *regs, void __user *eflags);
+extern asmlinkage int _restore_lbt_context(void __user *regs, void __user *eflags);
+extern asmlinkage int _save_ftop_context(void __user *ftop);
+extern asmlinkage int _restore_ftop_context(void __user *ftop);
+#endif
+
struct rt_sigframe {
struct siginfo rs_info;
struct ucontext rs_uctx;
@@ -75,6 +86,7 @@ struct extctx_layout {
struct _ctx_layout fpu;
struct _ctx_layout lsx;
struct _ctx_layout lasx;
+ struct _ctx_layout lbt;
struct _ctx_layout end;
};
@@ -215,6 +227,52 @@ static int copy_lasx_from_sigcontext(struct lasx_context __user *ctx)
return err;
}
+#ifdef CONFIG_CPU_HAS_LBT
+static int copy_lbt_to_sigcontext(struct lbt_context __user *ctx)
+{
+ int err = 0;
+ uint64_t __user *regs = (uint64_t *)&ctx->regs;
+ uint32_t __user *eflags = (uint32_t *)&ctx->eflags;
+
+ err |= __put_user(current->thread.lbt.scr0, &regs[0]);
+ err |= __put_user(current->thread.lbt.scr1, &regs[1]);
+ err |= __put_user(current->thread.lbt.scr2, &regs[2]);
+ err |= __put_user(current->thread.lbt.scr3, &regs[3]);
+ err |= __put_user(current->thread.lbt.eflags, eflags);
+
+ return err;
+}
+
+static int copy_lbt_from_sigcontext(struct lbt_context __user *ctx)
+{
+ int err = 0;
+ uint64_t __user *regs = (uint64_t *)&ctx->regs;
+ uint32_t __user *eflags = (uint32_t *)&ctx->eflags;
+
+ err |= __get_user(current->thread.lbt.scr0, &regs[0]);
+ err |= __get_user(current->thread.lbt.scr1, &regs[1]);
+ err |= __get_user(current->thread.lbt.scr2, &regs[2]);
+ err |= __get_user(current->thread.lbt.scr3, &regs[3]);
+ err |= __get_user(current->thread.lbt.eflags, eflags);
+
+ return err;
+}
+
+static int copy_ftop_to_sigcontext(struct lbt_context __user *ctx)
+{
+ uint32_t __user *ftop = &ctx->ftop;
+
+ return __put_user(current->thread.fpu.ftop, ftop);
+}
+
+static int copy_ftop_from_sigcontext(struct lbt_context __user *ctx)
+{
+ uint32_t __user *ftop = &ctx->ftop;
+
+ return __get_user(current->thread.fpu.ftop, ftop);
+}
+#endif
+
/*
* Wrappers for the assembly _{save,restore}_fp_context functions.
*/
@@ -272,6 +330,41 @@ static int restore_hw_lasx_context(struct lasx_context __user *ctx)
return _restore_lasx_context(regs, fcc, fcsr);
}
+/*
+ * Wrappers for the assembly _{save,restore}_lbt_context functions.
+ */
+#ifdef CONFIG_CPU_HAS_LBT
+static int save_hw_lbt_context(struct lbt_context __user *ctx)
+{
+ uint64_t __user *regs = (uint64_t *)&ctx->regs;
+ uint32_t __user *eflags = (uint32_t *)&ctx->eflags;
+
+ return _save_lbt_context(regs, eflags);
+}
+
+static int restore_hw_lbt_context(struct lbt_context __user *ctx)
+{
+ uint64_t __user *regs = (uint64_t *)&ctx->regs;
+ uint32_t __user *eflags = (uint32_t *)&ctx->eflags;
+
+ return _restore_lbt_context(regs, eflags);
+}
+
+static int save_hw_ftop_context(struct lbt_context __user *ctx)
+{
+ uint32_t __user *ftop = &ctx->ftop;
+
+ return _save_ftop_context(ftop);
+}
+
+static int restore_hw_ftop_context(struct lbt_context __user *ctx)
+{
+ uint32_t __user *ftop = &ctx->ftop;
+
+ return _restore_ftop_context(ftop);
+}
+#endif
+
static int fcsr_pending(unsigned int __user *fcsr)
{
int err, sig = 0;
@@ -519,6 +612,77 @@ static int protected_restore_lasx_context(struct extctx_layout *extctx)
return err ?: sig;
}
+#ifdef CONFIG_CPU_HAS_LBT
+static int protected_save_lbt_context(struct extctx_layout *extctx)
+{
+ int err = 0;
+ struct sctx_info __user *info = extctx->lbt.addr;
+ struct lbt_context __user *lbt_ctx =
+ (struct lbt_context *)get_ctx_through_ctxinfo(info);
+ uint64_t __user *regs = (uint64_t *)&lbt_ctx->regs;
+ uint32_t __user *eflags = (uint32_t *)&lbt_ctx->eflags;
+
+ while (1) {
+ lock_lbt_owner();
+ if (is_lbt_owner())
+ err |= save_hw_lbt_context(lbt_ctx);
+ else
+ err |= copy_lbt_to_sigcontext(lbt_ctx);
+ if (is_fpu_owner())
+ err |= save_hw_ftop_context(lbt_ctx);
+ else
+ err |= copy_ftop_to_sigcontext(lbt_ctx);
+ unlock_lbt_owner();
+
+ err |= __put_user(LBT_CTX_MAGIC, &info->magic);
+ err |= __put_user(extctx->lbt.size, &info->size);
+
+ if (likely(!err))
+ break;
+ /* Touch the LBT context and try again */
+ err = __put_user(0, &regs[0]) | __put_user(0, eflags);
+
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
+static int protected_restore_lbt_context(struct extctx_layout *extctx)
+{
+ int err = 0, tmp __maybe_unused;
+ struct sctx_info __user *info = extctx->lbt.addr;
+ struct lbt_context __user *lbt_ctx =
+ (struct lbt_context *)get_ctx_through_ctxinfo(info);
+ uint64_t __user *regs = (uint64_t *)&lbt_ctx->regs;
+ uint32_t __user *eflags = (uint32_t *)&lbt_ctx->eflags;
+
+ while (1) {
+ lock_lbt_owner();
+ if (is_lbt_owner())
+ err |= restore_hw_lbt_context(lbt_ctx);
+ else
+ err |= copy_lbt_from_sigcontext(lbt_ctx);
+ if (is_fpu_owner())
+ err |= restore_hw_ftop_context(lbt_ctx);
+ else
+ err |= copy_ftop_from_sigcontext(lbt_ctx);
+ unlock_lbt_owner();
+
+ if (likely(!err))
+ break;
+ /* Touch the LBT context and try again */
+ err = __get_user(tmp, &regs[0]) | __get_user(tmp, eflags);
+
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+#endif
+
static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
struct extctx_layout *extctx)
{
@@ -539,6 +703,11 @@ static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
else if (extctx->fpu.addr)
err |= protected_save_fpu_context(extctx);
+#ifdef CONFIG_CPU_HAS_LBT
+ if (extctx->lbt.addr)
+ err |= protected_save_lbt_context(extctx);
+#endif
+
/* Set the "end" magic */
info = (struct sctx_info *)extctx->end.addr;
err |= __put_user(0, &info->magic);
@@ -584,6 +753,13 @@ static int parse_extcontext(struct sigcontext __user *sc, struct extctx_layout *
extctx->lasx.addr = info;
break;
+ case LBT_CTX_MAGIC:
+ if (size < (sizeof(struct sctx_info) +
+ sizeof(struct lbt_context)))
+ goto invalid;
+ extctx->lbt.addr = info;
+ break;
+
default:
goto invalid;
}
@@ -636,6 +812,11 @@ static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc
else if (extctx.fpu.addr)
err |= protected_restore_fpu_context(&extctx);
+#ifdef CONFIG_CPU_HAS_LBT
+ if (extctx.lbt.addr)
+ err |= protected_restore_lbt_context(&extctx);
+#endif
+
bad:
return err;
}
@@ -700,6 +881,13 @@ static unsigned long setup_extcontext(struct extctx_layout *extctx, unsigned lon
sizeof(struct fpu_context), FPU_CTX_ALIGN, new_sp);
}
+#ifdef CONFIG_CPU_HAS_LBT
+ if (cpu_has_lbt && thread_lbt_context_live()) {
+ new_sp = extframe_alloc(extctx, &extctx->lbt,
+ sizeof(struct lbt_context), LBT_CTX_ALIGN, new_sp);
+ }
+#endif
+
return new_sp;
}
diff --git a/arch/loongarch/kernel/stacktrace.c b/arch/loongarch/kernel/stacktrace.c
index 2463d2fea21f..92270f14db94 100644
--- a/arch/loongarch/kernel/stacktrace.c
+++ b/arch/loongarch/kernel/stacktrace.c
@@ -18,17 +18,19 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
struct pt_regs dummyregs;
struct unwind_state state;
- regs = &dummyregs;
+ if (!regs) {
+ regs = &dummyregs;
- if (task == current) {
- regs->regs[3] = (unsigned long)__builtin_frame_address(0);
- regs->csr_era = (unsigned long)__builtin_return_address(0);
- } else {
- regs->regs[3] = thread_saved_fp(task);
- regs->csr_era = thread_saved_ra(task);
+ if (task == current) {
+ regs->regs[3] = (unsigned long)__builtin_frame_address(0);
+ regs->csr_era = (unsigned long)__builtin_return_address(0);
+ } else {
+ regs->regs[3] = thread_saved_fp(task);
+ regs->csr_era = thread_saved_ra(task);
+ }
+ regs->regs[1] = 0;
}
- regs->regs[1] = 0;
for (unwind_start(&state, task, regs);
!unwind_done(&state) && !unwind_error(&state); unwind_next_frame(&state)) {
addr = unwind_get_return_address(&state);
diff --git a/arch/loongarch/kernel/sysrq.c b/arch/loongarch/kernel/sysrq.c
index 366baef72d29..e663c10fa39c 100644
--- a/arch/loongarch/kernel/sysrq.c
+++ b/arch/loongarch/kernel/sysrq.c
@@ -43,7 +43,7 @@ static void sysrq_tlbdump_othercpus(struct work_struct *dummy)
static DECLARE_WORK(sysrq_tlbdump, sysrq_tlbdump_othercpus);
#endif
-static void sysrq_handle_tlbdump(int key)
+static void sysrq_handle_tlbdump(u8 key)
{
sysrq_tlbdump_single(NULL);
#ifdef CONFIG_SMP
diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c
index 89699db45cec..65214774ef7c 100644
--- a/arch/loongarch/kernel/traps.c
+++ b/arch/loongarch/kernel/traps.c
@@ -36,7 +36,9 @@
#include <asm/break.h>
#include <asm/cpu.h>
#include <asm/fpu.h>
+#include <asm/lbt.h>
#include <asm/inst.h>
+#include <asm/kgdb.h>
#include <asm/loongarch.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
@@ -702,6 +704,11 @@ asmlinkage void noinstr do_bp(struct pt_regs *regs)
* pertain to them.
*/
switch (bcode) {
+ case BRK_KDB:
+ if (kgdb_breakpoint_handler(regs))
+ goto out;
+ else
+ break;
case BRK_KPROBE_BP:
if (kprobe_breakpoint_handler(regs))
goto out;
@@ -768,6 +775,9 @@ asmlinkage void noinstr do_watch(struct pt_regs *regs)
#ifndef CONFIG_HAVE_HW_BREAKPOINT
pr_warn("Hardware watch point handler not implemented!\n");
#else
+ if (kgdb_breakpoint_handler(regs))
+ goto out;
+
if (test_tsk_thread_flag(current, TIF_SINGLESTEP)) {
int llbit = (csr_read32(LOONGARCH_CSR_LLBCTL) & 0x1);
unsigned long pc = instruction_pointer(regs);
@@ -966,13 +976,47 @@ out:
irqentry_exit(regs, state);
}
+static void init_restore_lbt(void)
+{
+ if (!thread_lbt_context_live()) {
+ /* First time LBT context user */
+ init_lbt();
+ set_thread_flag(TIF_LBT_CTX_LIVE);
+ } else {
+ if (!is_lbt_owner())
+ own_lbt_inatomic(1);
+ }
+
+ BUG_ON(!is_lbt_enabled());
+}
+
asmlinkage void noinstr do_lbt(struct pt_regs *regs)
{
irqentry_state_t state = irqentry_enter(regs);
- local_irq_enable();
- force_sig(SIGILL);
- local_irq_disable();
+ /*
+ * BTD (Binary Translation Disable exception) can be triggered
+ * during FP save/restore if TM (Top Mode) is on, which may
+ * cause irq_enable during 'switch_to'. To avoid this situation
+ * (including the user using 'MOVGR2GCSR' to turn on TM, which
+ * will not trigger the BTE), we need to check PRMD first.
+ */
+ if (regs->csr_prmd & CSR_PRMD_PIE)
+ local_irq_enable();
+
+ if (!cpu_has_lbt) {
+ force_sig(SIGILL);
+ goto out;
+ }
+ BUG_ON(is_lbt_enabled());
+
+ preempt_disable();
+ init_restore_lbt();
+ preempt_enable();
+
+out:
+ if (regs->csr_prmd & CSR_PRMD_PIE)
+ local_irq_disable();
irqentry_exit(regs, state);
}
diff --git a/arch/loongarch/lib/Makefile b/arch/loongarch/lib/Makefile
index d60d4e096cfa..a77bf160bfc4 100644
--- a/arch/loongarch/lib/Makefile
+++ b/arch/loongarch/lib/Makefile
@@ -6,4 +6,6 @@
lib-y += delay.o memset.o memcpy.o memmove.o \
clear_user.o copy_user.o csum.o dump_tlb.o unaligned.o
+obj-$(CONFIG_CPU_HAS_LSX) += xor_simd.o xor_simd_glue.o
+
obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
diff --git a/arch/loongarch/lib/clear_user.S b/arch/loongarch/lib/clear_user.S
index 0790eadce166..be741544e62b 100644
--- a/arch/loongarch/lib/clear_user.S
+++ b/arch/loongarch/lib/clear_user.S
@@ -11,19 +11,6 @@
#include <asm/cpu.h>
#include <asm/regdef.h>
-.irp to, 0, 1, 2, 3, 4, 5, 6, 7
-.L_fixup_handle_\to\():
- sub.d a0, a2, a0
- addi.d a0, a0, (\to) * (-8)
- jr ra
-.endr
-
-.irp to, 0, 2, 4
-.L_fixup_handle_s\to\():
- addi.d a0, a1, -\to
- jr ra
-.endr
-
SYM_FUNC_START(__clear_user)
/*
* Some CPUs support hardware unaligned access
@@ -51,7 +38,7 @@ SYM_FUNC_START(__clear_user_generic)
2: move a0, a1
jr ra
- _asm_extable 1b, .L_fixup_handle_s0
+ _asm_extable 1b, 2b
SYM_FUNC_END(__clear_user_generic)
/*
@@ -173,33 +160,47 @@ SYM_FUNC_START(__clear_user_fast)
jr ra
/* fixup and ex_table */
- _asm_extable 0b, .L_fixup_handle_0
- _asm_extable 1b, .L_fixup_handle_0
- _asm_extable 2b, .L_fixup_handle_1
- _asm_extable 3b, .L_fixup_handle_2
- _asm_extable 4b, .L_fixup_handle_3
- _asm_extable 5b, .L_fixup_handle_4
- _asm_extable 6b, .L_fixup_handle_5
- _asm_extable 7b, .L_fixup_handle_6
- _asm_extable 8b, .L_fixup_handle_7
- _asm_extable 9b, .L_fixup_handle_0
- _asm_extable 10b, .L_fixup_handle_1
- _asm_extable 11b, .L_fixup_handle_2
- _asm_extable 12b, .L_fixup_handle_3
- _asm_extable 13b, .L_fixup_handle_0
- _asm_extable 14b, .L_fixup_handle_1
- _asm_extable 15b, .L_fixup_handle_0
- _asm_extable 16b, .L_fixup_handle_0
- _asm_extable 17b, .L_fixup_handle_s0
- _asm_extable 18b, .L_fixup_handle_s0
- _asm_extable 19b, .L_fixup_handle_s0
- _asm_extable 20b, .L_fixup_handle_s2
- _asm_extable 21b, .L_fixup_handle_s0
- _asm_extable 22b, .L_fixup_handle_s0
- _asm_extable 23b, .L_fixup_handle_s4
- _asm_extable 24b, .L_fixup_handle_s0
- _asm_extable 25b, .L_fixup_handle_s4
- _asm_extable 26b, .L_fixup_handle_s0
- _asm_extable 27b, .L_fixup_handle_s4
- _asm_extable 28b, .L_fixup_handle_s0
+.Llarge_fixup:
+ sub.d a1, a2, a0
+
+.Lsmall_fixup:
+29: st.b zero, a0, 0
+ addi.d a0, a0, 1
+ addi.d a1, a1, -1
+ bgt a1, zero, 29b
+
+.Lexit:
+ move a0, a1
+ jr ra
+
+ _asm_extable 0b, .Lsmall_fixup
+ _asm_extable 1b, .Llarge_fixup
+ _asm_extable 2b, .Llarge_fixup
+ _asm_extable 3b, .Llarge_fixup
+ _asm_extable 4b, .Llarge_fixup
+ _asm_extable 5b, .Llarge_fixup
+ _asm_extable 6b, .Llarge_fixup
+ _asm_extable 7b, .Llarge_fixup
+ _asm_extable 8b, .Llarge_fixup
+ _asm_extable 9b, .Llarge_fixup
+ _asm_extable 10b, .Llarge_fixup
+ _asm_extable 11b, .Llarge_fixup
+ _asm_extable 12b, .Llarge_fixup
+ _asm_extable 13b, .Llarge_fixup
+ _asm_extable 14b, .Llarge_fixup
+ _asm_extable 15b, .Llarge_fixup
+ _asm_extable 16b, .Llarge_fixup
+ _asm_extable 17b, .Lexit
+ _asm_extable 18b, .Lsmall_fixup
+ _asm_extable 19b, .Lsmall_fixup
+ _asm_extable 20b, .Lsmall_fixup
+ _asm_extable 21b, .Lsmall_fixup
+ _asm_extable 22b, .Lsmall_fixup
+ _asm_extable 23b, .Lsmall_fixup
+ _asm_extable 24b, .Lsmall_fixup
+ _asm_extable 25b, .Lsmall_fixup
+ _asm_extable 26b, .Lsmall_fixup
+ _asm_extable 27b, .Lsmall_fixup
+ _asm_extable 28b, .Lsmall_fixup
+ _asm_extable 29b, .Lexit
SYM_FUNC_END(__clear_user_fast)
diff --git a/arch/loongarch/lib/copy_user.S b/arch/loongarch/lib/copy_user.S
index bfe3d2793d00..feec3d362803 100644
--- a/arch/loongarch/lib/copy_user.S
+++ b/arch/loongarch/lib/copy_user.S
@@ -11,19 +11,6 @@
#include <asm/cpu.h>
#include <asm/regdef.h>
-.irp to, 0, 1, 2, 3, 4, 5, 6, 7
-.L_fixup_handle_\to\():
- sub.d a0, a2, a0
- addi.d a0, a0, (\to) * (-8)
- jr ra
-.endr
-
-.irp to, 0, 2, 4
-.L_fixup_handle_s\to\():
- addi.d a0, a2, -\to
- jr ra
-.endr
-
SYM_FUNC_START(__copy_user)
/*
* Some CPUs support hardware unaligned access
@@ -54,8 +41,8 @@ SYM_FUNC_START(__copy_user_generic)
3: move a0, a2
jr ra
- _asm_extable 1b, .L_fixup_handle_s0
- _asm_extable 2b, .L_fixup_handle_s0
+ _asm_extable 1b, 3b
+ _asm_extable 2b, 3b
SYM_FUNC_END(__copy_user_generic)
/*
@@ -69,10 +56,10 @@ SYM_FUNC_START(__copy_user_fast)
sltui t0, a2, 9
bnez t0, .Lsmall
- add.d a3, a1, a2
- add.d a2, a0, a2
0: ld.d t0, a1, 0
1: st.d t0, a0, 0
+ add.d a3, a1, a2
+ add.d a2, a0, a2
/* align up destination address */
andi t1, a0, 7
@@ -94,7 +81,6 @@ SYM_FUNC_START(__copy_user_fast)
7: ld.d t5, a1, 40
8: ld.d t6, a1, 48
9: ld.d t7, a1, 56
- addi.d a1, a1, 64
10: st.d t0, a0, 0
11: st.d t1, a0, 8
12: st.d t2, a0, 16
@@ -103,6 +89,7 @@ SYM_FUNC_START(__copy_user_fast)
15: st.d t5, a0, 40
16: st.d t6, a0, 48
17: st.d t7, a0, 56
+ addi.d a1, a1, 64
addi.d a0, a0, 64
bltu a1, a4, .Lloop64
@@ -114,11 +101,11 @@ SYM_FUNC_START(__copy_user_fast)
19: ld.d t1, a1, 8
20: ld.d t2, a1, 16
21: ld.d t3, a1, 24
- addi.d a1, a1, 32
22: st.d t0, a0, 0
23: st.d t1, a0, 8
24: st.d t2, a0, 16
25: st.d t3, a0, 24
+ addi.d a1, a1, 32
addi.d a0, a0, 32
.Llt32:
@@ -126,9 +113,9 @@ SYM_FUNC_START(__copy_user_fast)
bgeu a1, a4, .Llt16
26: ld.d t0, a1, 0
27: ld.d t1, a1, 8
- addi.d a1, a1, 16
28: st.d t0, a0, 0
29: st.d t1, a0, 8
+ addi.d a1, a1, 16
addi.d a0, a0, 16
.Llt16:
@@ -136,6 +123,7 @@ SYM_FUNC_START(__copy_user_fast)
bgeu a1, a4, .Llt8
30: ld.d t0, a1, 0
31: st.d t0, a0, 0
+ addi.d a1, a1, 8
addi.d a0, a0, 8
.Llt8:
@@ -214,62 +202,79 @@ SYM_FUNC_START(__copy_user_fast)
jr ra
/* fixup and ex_table */
- _asm_extable 0b, .L_fixup_handle_0
- _asm_extable 1b, .L_fixup_handle_0
- _asm_extable 2b, .L_fixup_handle_0
- _asm_extable 3b, .L_fixup_handle_0
- _asm_extable 4b, .L_fixup_handle_0
- _asm_extable 5b, .L_fixup_handle_0
- _asm_extable 6b, .L_fixup_handle_0
- _asm_extable 7b, .L_fixup_handle_0
- _asm_extable 8b, .L_fixup_handle_0
- _asm_extable 9b, .L_fixup_handle_0
- _asm_extable 10b, .L_fixup_handle_0
- _asm_extable 11b, .L_fixup_handle_1
- _asm_extable 12b, .L_fixup_handle_2
- _asm_extable 13b, .L_fixup_handle_3
- _asm_extable 14b, .L_fixup_handle_4
- _asm_extable 15b, .L_fixup_handle_5
- _asm_extable 16b, .L_fixup_handle_6
- _asm_extable 17b, .L_fixup_handle_7
- _asm_extable 18b, .L_fixup_handle_0
- _asm_extable 19b, .L_fixup_handle_0
- _asm_extable 20b, .L_fixup_handle_0
- _asm_extable 21b, .L_fixup_handle_0
- _asm_extable 22b, .L_fixup_handle_0
- _asm_extable 23b, .L_fixup_handle_1
- _asm_extable 24b, .L_fixup_handle_2
- _asm_extable 25b, .L_fixup_handle_3
- _asm_extable 26b, .L_fixup_handle_0
- _asm_extable 27b, .L_fixup_handle_0
- _asm_extable 28b, .L_fixup_handle_0
- _asm_extable 29b, .L_fixup_handle_1
- _asm_extable 30b, .L_fixup_handle_0
- _asm_extable 31b, .L_fixup_handle_0
- _asm_extable 32b, .L_fixup_handle_0
- _asm_extable 33b, .L_fixup_handle_0
- _asm_extable 34b, .L_fixup_handle_s0
- _asm_extable 35b, .L_fixup_handle_s0
- _asm_extable 36b, .L_fixup_handle_s0
- _asm_extable 37b, .L_fixup_handle_s0
- _asm_extable 38b, .L_fixup_handle_s0
- _asm_extable 39b, .L_fixup_handle_s0
- _asm_extable 40b, .L_fixup_handle_s0
- _asm_extable 41b, .L_fixup_handle_s2
- _asm_extable 42b, .L_fixup_handle_s0
- _asm_extable 43b, .L_fixup_handle_s0
- _asm_extable 44b, .L_fixup_handle_s0
- _asm_extable 45b, .L_fixup_handle_s0
- _asm_extable 46b, .L_fixup_handle_s0
- _asm_extable 47b, .L_fixup_handle_s4
- _asm_extable 48b, .L_fixup_handle_s0
- _asm_extable 49b, .L_fixup_handle_s0
- _asm_extable 50b, .L_fixup_handle_s0
- _asm_extable 51b, .L_fixup_handle_s4
- _asm_extable 52b, .L_fixup_handle_s0
- _asm_extable 53b, .L_fixup_handle_s0
- _asm_extable 54b, .L_fixup_handle_s0
- _asm_extable 55b, .L_fixup_handle_s4
- _asm_extable 56b, .L_fixup_handle_s0
- _asm_extable 57b, .L_fixup_handle_s0
+.Llarge_fixup:
+ sub.d a2, a2, a0
+
+.Lsmall_fixup:
+58: ld.b t0, a1, 0
+59: st.b t0, a0, 0
+ addi.d a0, a0, 1
+ addi.d a1, a1, 1
+ addi.d a2, a2, -1
+ bgt a2, zero, 58b
+
+.Lexit:
+ move a0, a2
+ jr ra
+
+ _asm_extable 0b, .Lsmall_fixup
+ _asm_extable 1b, .Lsmall_fixup
+ _asm_extable 2b, .Llarge_fixup
+ _asm_extable 3b, .Llarge_fixup
+ _asm_extable 4b, .Llarge_fixup
+ _asm_extable 5b, .Llarge_fixup
+ _asm_extable 6b, .Llarge_fixup
+ _asm_extable 7b, .Llarge_fixup
+ _asm_extable 8b, .Llarge_fixup
+ _asm_extable 9b, .Llarge_fixup
+ _asm_extable 10b, .Llarge_fixup
+ _asm_extable 11b, .Llarge_fixup
+ _asm_extable 12b, .Llarge_fixup
+ _asm_extable 13b, .Llarge_fixup
+ _asm_extable 14b, .Llarge_fixup
+ _asm_extable 15b, .Llarge_fixup
+ _asm_extable 16b, .Llarge_fixup
+ _asm_extable 17b, .Llarge_fixup
+ _asm_extable 18b, .Llarge_fixup
+ _asm_extable 19b, .Llarge_fixup
+ _asm_extable 20b, .Llarge_fixup
+ _asm_extable 21b, .Llarge_fixup
+ _asm_extable 22b, .Llarge_fixup
+ _asm_extable 23b, .Llarge_fixup
+ _asm_extable 24b, .Llarge_fixup
+ _asm_extable 25b, .Llarge_fixup
+ _asm_extable 26b, .Llarge_fixup
+ _asm_extable 27b, .Llarge_fixup
+ _asm_extable 28b, .Llarge_fixup
+ _asm_extable 29b, .Llarge_fixup
+ _asm_extable 30b, .Llarge_fixup
+ _asm_extable 31b, .Llarge_fixup
+ _asm_extable 32b, .Llarge_fixup
+ _asm_extable 33b, .Llarge_fixup
+ _asm_extable 34b, .Lexit
+ _asm_extable 35b, .Lexit
+ _asm_extable 36b, .Lsmall_fixup
+ _asm_extable 37b, .Lsmall_fixup
+ _asm_extable 38b, .Lsmall_fixup
+ _asm_extable 39b, .Lsmall_fixup
+ _asm_extable 40b, .Lsmall_fixup
+ _asm_extable 41b, .Lsmall_fixup
+ _asm_extable 42b, .Lsmall_fixup
+ _asm_extable 43b, .Lsmall_fixup
+ _asm_extable 44b, .Lsmall_fixup
+ _asm_extable 45b, .Lsmall_fixup
+ _asm_extable 46b, .Lsmall_fixup
+ _asm_extable 47b, .Lsmall_fixup
+ _asm_extable 48b, .Lsmall_fixup
+ _asm_extable 49b, .Lsmall_fixup
+ _asm_extable 50b, .Lsmall_fixup
+ _asm_extable 51b, .Lsmall_fixup
+ _asm_extable 52b, .Lsmall_fixup
+ _asm_extable 53b, .Lsmall_fixup
+ _asm_extable 54b, .Lsmall_fixup
+ _asm_extable 55b, .Lsmall_fixup
+ _asm_extable 56b, .Lsmall_fixup
+ _asm_extable 57b, .Lsmall_fixup
+ _asm_extable 58b, .Lexit
+ _asm_extable 59b, .Lexit
SYM_FUNC_END(__copy_user_fast)
diff --git a/arch/loongarch/lib/memcpy.S b/arch/loongarch/lib/memcpy.S
index cc30b3b6252f..fa1148878d2b 100644
--- a/arch/loongarch/lib/memcpy.S
+++ b/arch/loongarch/lib/memcpy.S
@@ -10,6 +10,8 @@
#include <asm/cpu.h>
#include <asm/regdef.h>
+.section .noinstr.text, "ax"
+
SYM_FUNC_START(memcpy)
/*
* Some CPUs support hardware unaligned access
@@ -17,9 +19,13 @@ SYM_FUNC_START(memcpy)
ALTERNATIVE "b __memcpy_generic", \
"b __memcpy_fast", CPU_FEATURE_UAL
SYM_FUNC_END(memcpy)
-_ASM_NOKPROBE(memcpy)
+SYM_FUNC_ALIAS(__memcpy, memcpy)
EXPORT_SYMBOL(memcpy)
+EXPORT_SYMBOL(__memcpy)
+
+_ASM_NOKPROBE(memcpy)
+_ASM_NOKPROBE(__memcpy)
/*
* void *__memcpy_generic(void *dst, const void *src, size_t n)
diff --git a/arch/loongarch/lib/memmove.S b/arch/loongarch/lib/memmove.S
index 7dc76d1484b6..82dae062fec8 100644
--- a/arch/loongarch/lib/memmove.S
+++ b/arch/loongarch/lib/memmove.S
@@ -10,23 +10,29 @@
#include <asm/cpu.h>
#include <asm/regdef.h>
+.section .noinstr.text, "ax"
+
SYM_FUNC_START(memmove)
- blt a0, a1, memcpy /* dst < src, memcpy */
- blt a1, a0, rmemcpy /* src < dst, rmemcpy */
- jr ra /* dst == src, return */
+ blt a0, a1, __memcpy /* dst < src, memcpy */
+ blt a1, a0, __rmemcpy /* src < dst, rmemcpy */
+ jr ra /* dst == src, return */
SYM_FUNC_END(memmove)
-_ASM_NOKPROBE(memmove)
+SYM_FUNC_ALIAS(__memmove, memmove)
EXPORT_SYMBOL(memmove)
+EXPORT_SYMBOL(__memmove)
+
+_ASM_NOKPROBE(memmove)
+_ASM_NOKPROBE(__memmove)
-SYM_FUNC_START(rmemcpy)
+SYM_FUNC_START(__rmemcpy)
/*
* Some CPUs support hardware unaligned access
*/
ALTERNATIVE "b __rmemcpy_generic", \
"b __rmemcpy_fast", CPU_FEATURE_UAL
-SYM_FUNC_END(rmemcpy)
-_ASM_NOKPROBE(rmemcpy)
+SYM_FUNC_END(__rmemcpy)
+_ASM_NOKPROBE(__rmemcpy)
/*
* void *__rmemcpy_generic(void *dst, const void *src, size_t n)
diff --git a/arch/loongarch/lib/memset.S b/arch/loongarch/lib/memset.S
index 3f20f7996e8e..06d3ca54cbfe 100644
--- a/arch/loongarch/lib/memset.S
+++ b/arch/loongarch/lib/memset.S
@@ -16,6 +16,8 @@
bstrins.d \r0, \r0, 63, 32
.endm
+.section .noinstr.text, "ax"
+
SYM_FUNC_START(memset)
/*
* Some CPUs support hardware unaligned access
@@ -23,9 +25,13 @@ SYM_FUNC_START(memset)
ALTERNATIVE "b __memset_generic", \
"b __memset_fast", CPU_FEATURE_UAL
SYM_FUNC_END(memset)
-_ASM_NOKPROBE(memset)
+SYM_FUNC_ALIAS(__memset, memset)
EXPORT_SYMBOL(memset)
+EXPORT_SYMBOL(__memset)
+
+_ASM_NOKPROBE(memset)
+_ASM_NOKPROBE(__memset)
/*
* void *__memset_generic(void *s, int c, size_t n)
diff --git a/arch/loongarch/lib/xor_simd.c b/arch/loongarch/lib/xor_simd.c
new file mode 100644
index 000000000000..84cd24b728c4
--- /dev/null
+++ b/arch/loongarch/lib/xor_simd.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * LoongArch SIMD XOR operations
+ *
+ * Copyright (C) 2023 WANG Xuerui <git@xen0n.name>
+ */
+
+#include "xor_simd.h"
+
+/*
+ * Process one cache line (64 bytes) per loop. This is assuming all future
+ * popular LoongArch cores are similar performance-characteristics-wise to the
+ * current models.
+ */
+#define LINE_WIDTH 64
+
+#ifdef CONFIG_CPU_HAS_LSX
+
+#define LD(reg, base, offset) \
+ "vld $vr" #reg ", %[" #base "], " #offset "\n\t"
+#define ST(reg, base, offset) \
+ "vst $vr" #reg ", %[" #base "], " #offset "\n\t"
+#define XOR(dj, k) "vxor.v $vr" #dj ", $vr" #dj ", $vr" #k "\n\t"
+
+#define LD_INOUT_LINE(base) \
+ LD(0, base, 0) \
+ LD(1, base, 16) \
+ LD(2, base, 32) \
+ LD(3, base, 48)
+
+#define LD_AND_XOR_LINE(base) \
+ LD(4, base, 0) \
+ LD(5, base, 16) \
+ LD(6, base, 32) \
+ LD(7, base, 48) \
+ XOR(0, 4) \
+ XOR(1, 5) \
+ XOR(2, 6) \
+ XOR(3, 7)
+
+#define ST_LINE(base) \
+ ST(0, base, 0) \
+ ST(1, base, 16) \
+ ST(2, base, 32) \
+ ST(3, base, 48)
+
+#define XOR_FUNC_NAME(nr) __xor_lsx_##nr
+#include "xor_template.c"
+
+#undef LD
+#undef ST
+#undef XOR
+#undef LD_INOUT_LINE
+#undef LD_AND_XOR_LINE
+#undef ST_LINE
+#undef XOR_FUNC_NAME
+
+#endif /* CONFIG_CPU_HAS_LSX */
+
+#ifdef CONFIG_CPU_HAS_LASX
+
+#define LD(reg, base, offset) \
+ "xvld $xr" #reg ", %[" #base "], " #offset "\n\t"
+#define ST(reg, base, offset) \
+ "xvst $xr" #reg ", %[" #base "], " #offset "\n\t"
+#define XOR(dj, k) "xvxor.v $xr" #dj ", $xr" #dj ", $xr" #k "\n\t"
+
+#define LD_INOUT_LINE(base) \
+ LD(0, base, 0) \
+ LD(1, base, 32)
+
+#define LD_AND_XOR_LINE(base) \
+ LD(2, base, 0) \
+ LD(3, base, 32) \
+ XOR(0, 2) \
+ XOR(1, 3)
+
+#define ST_LINE(base) \
+ ST(0, base, 0) \
+ ST(1, base, 32)
+
+#define XOR_FUNC_NAME(nr) __xor_lasx_##nr
+#include "xor_template.c"
+
+#undef LD
+#undef ST
+#undef XOR
+#undef LD_INOUT_LINE
+#undef LD_AND_XOR_LINE
+#undef ST_LINE
+#undef XOR_FUNC_NAME
+
+#endif /* CONFIG_CPU_HAS_LASX */
diff --git a/arch/loongarch/lib/xor_simd.h b/arch/loongarch/lib/xor_simd.h
new file mode 100644
index 000000000000..f50f32514d80
--- /dev/null
+++ b/arch/loongarch/lib/xor_simd.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Simple interface to link xor_simd.c and xor_simd_glue.c
+ *
+ * Separating these files ensures that no SIMD instructions are run outside of
+ * the kfpu critical section.
+ */
+
+#ifndef __LOONGARCH_LIB_XOR_SIMD_H
+#define __LOONGARCH_LIB_XOR_SIMD_H
+
+#ifdef CONFIG_CPU_HAS_LSX
+void __xor_lsx_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2);
+void __xor_lsx_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2, const unsigned long * __restrict p3);
+void __xor_lsx_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2, const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4);
+void __xor_lsx_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2, const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4, const unsigned long * __restrict p5);
+#endif /* CONFIG_CPU_HAS_LSX */
+
+#ifdef CONFIG_CPU_HAS_LASX
+void __xor_lasx_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2);
+void __xor_lasx_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2, const unsigned long * __restrict p3);
+void __xor_lasx_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2, const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4);
+void __xor_lasx_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2, const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4, const unsigned long * __restrict p5);
+#endif /* CONFIG_CPU_HAS_LASX */
+
+#endif /* __LOONGARCH_LIB_XOR_SIMD_H */
diff --git a/arch/loongarch/lib/xor_simd_glue.c b/arch/loongarch/lib/xor_simd_glue.c
new file mode 100644
index 000000000000..393f689dbcf6
--- /dev/null
+++ b/arch/loongarch/lib/xor_simd_glue.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * LoongArch SIMD XOR operations
+ *
+ * Copyright (C) 2023 WANG Xuerui <git@xen0n.name>
+ */
+
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <asm/fpu.h>
+#include <asm/xor_simd.h>
+#include "xor_simd.h"
+
+#define MAKE_XOR_GLUE_2(flavor) \
+void xor_##flavor##_2(unsigned long bytes, unsigned long * __restrict p1, \
+ const unsigned long * __restrict p2) \
+{ \
+ kernel_fpu_begin(); \
+ __xor_##flavor##_2(bytes, p1, p2); \
+ kernel_fpu_end(); \
+} \
+EXPORT_SYMBOL_GPL(xor_##flavor##_2)
+
+#define MAKE_XOR_GLUE_3(flavor) \
+void xor_##flavor##_3(unsigned long bytes, unsigned long * __restrict p1, \
+ const unsigned long * __restrict p2, \
+ const unsigned long * __restrict p3) \
+{ \
+ kernel_fpu_begin(); \
+ __xor_##flavor##_3(bytes, p1, p2, p3); \
+ kernel_fpu_end(); \
+} \
+EXPORT_SYMBOL_GPL(xor_##flavor##_3)
+
+#define MAKE_XOR_GLUE_4(flavor) \
+void xor_##flavor##_4(unsigned long bytes, unsigned long * __restrict p1, \
+ const unsigned long * __restrict p2, \
+ const unsigned long * __restrict p3, \
+ const unsigned long * __restrict p4) \
+{ \
+ kernel_fpu_begin(); \
+ __xor_##flavor##_4(bytes, p1, p2, p3, p4); \
+ kernel_fpu_end(); \
+} \
+EXPORT_SYMBOL_GPL(xor_##flavor##_4)
+
+#define MAKE_XOR_GLUE_5(flavor) \
+void xor_##flavor##_5(unsigned long bytes, unsigned long * __restrict p1, \
+ const unsigned long * __restrict p2, \
+ const unsigned long * __restrict p3, \
+ const unsigned long * __restrict p4, \
+ const unsigned long * __restrict p5) \
+{ \
+ kernel_fpu_begin(); \
+ __xor_##flavor##_5(bytes, p1, p2, p3, p4, p5); \
+ kernel_fpu_end(); \
+} \
+EXPORT_SYMBOL_GPL(xor_##flavor##_5)
+
+#define MAKE_XOR_GLUES(flavor) \
+ MAKE_XOR_GLUE_2(flavor); \
+ MAKE_XOR_GLUE_3(flavor); \
+ MAKE_XOR_GLUE_4(flavor); \
+ MAKE_XOR_GLUE_5(flavor)
+
+#ifdef CONFIG_CPU_HAS_LSX
+MAKE_XOR_GLUES(lsx);
+#endif
+
+#ifdef CONFIG_CPU_HAS_LASX
+MAKE_XOR_GLUES(lasx);
+#endif
diff --git a/arch/loongarch/lib/xor_template.c b/arch/loongarch/lib/xor_template.c
new file mode 100644
index 000000000000..0358ced7fe33
--- /dev/null
+++ b/arch/loongarch/lib/xor_template.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2023 WANG Xuerui <git@xen0n.name>
+ *
+ * Template for XOR operations, instantiated in xor_simd.c.
+ *
+ * Expected preprocessor definitions:
+ *
+ * - LINE_WIDTH
+ * - XOR_FUNC_NAME(nr)
+ * - LD_INOUT_LINE(buf)
+ * - LD_AND_XOR_LINE(buf)
+ * - ST_LINE(buf)
+ */
+
+void XOR_FUNC_NAME(2)(unsigned long bytes,
+ unsigned long * __restrict v1,
+ const unsigned long * __restrict v2)
+{
+ unsigned long lines = bytes / LINE_WIDTH;
+
+ do {
+ __asm__ __volatile__ (
+ LD_INOUT_LINE(v1)
+ LD_AND_XOR_LINE(v2)
+ ST_LINE(v1)
+ : : [v1] "r"(v1), [v2] "r"(v2) : "memory"
+ );
+
+ v1 += LINE_WIDTH / sizeof(unsigned long);
+ v2 += LINE_WIDTH / sizeof(unsigned long);
+ } while (--lines > 0);
+}
+
+void XOR_FUNC_NAME(3)(unsigned long bytes,
+ unsigned long * __restrict v1,
+ const unsigned long * __restrict v2,
+ const unsigned long * __restrict v3)
+{
+ unsigned long lines = bytes / LINE_WIDTH;
+
+ do {
+ __asm__ __volatile__ (
+ LD_INOUT_LINE(v1)
+ LD_AND_XOR_LINE(v2)
+ LD_AND_XOR_LINE(v3)
+ ST_LINE(v1)
+ : : [v1] "r"(v1), [v2] "r"(v2), [v3] "r"(v3) : "memory"
+ );
+
+ v1 += LINE_WIDTH / sizeof(unsigned long);
+ v2 += LINE_WIDTH / sizeof(unsigned long);
+ v3 += LINE_WIDTH / sizeof(unsigned long);
+ } while (--lines > 0);
+}
+
+void XOR_FUNC_NAME(4)(unsigned long bytes,
+ unsigned long * __restrict v1,
+ const unsigned long * __restrict v2,
+ const unsigned long * __restrict v3,
+ const unsigned long * __restrict v4)
+{
+ unsigned long lines = bytes / LINE_WIDTH;
+
+ do {
+ __asm__ __volatile__ (
+ LD_INOUT_LINE(v1)
+ LD_AND_XOR_LINE(v2)
+ LD_AND_XOR_LINE(v3)
+ LD_AND_XOR_LINE(v4)
+ ST_LINE(v1)
+ : : [v1] "r"(v1), [v2] "r"(v2), [v3] "r"(v3), [v4] "r"(v4)
+ : "memory"
+ );
+
+ v1 += LINE_WIDTH / sizeof(unsigned long);
+ v2 += LINE_WIDTH / sizeof(unsigned long);
+ v3 += LINE_WIDTH / sizeof(unsigned long);
+ v4 += LINE_WIDTH / sizeof(unsigned long);
+ } while (--lines > 0);
+}
+
+void XOR_FUNC_NAME(5)(unsigned long bytes,
+ unsigned long * __restrict v1,
+ const unsigned long * __restrict v2,
+ const unsigned long * __restrict v3,
+ const unsigned long * __restrict v4,
+ const unsigned long * __restrict v5)
+{
+ unsigned long lines = bytes / LINE_WIDTH;
+
+ do {
+ __asm__ __volatile__ (
+ LD_INOUT_LINE(v1)
+ LD_AND_XOR_LINE(v2)
+ LD_AND_XOR_LINE(v3)
+ LD_AND_XOR_LINE(v4)
+ LD_AND_XOR_LINE(v5)
+ ST_LINE(v1)
+ : : [v1] "r"(v1), [v2] "r"(v2), [v3] "r"(v3), [v4] "r"(v4),
+ [v5] "r"(v5) : "memory"
+ );
+
+ v1 += LINE_WIDTH / sizeof(unsigned long);
+ v2 += LINE_WIDTH / sizeof(unsigned long);
+ v3 += LINE_WIDTH / sizeof(unsigned long);
+ v4 += LINE_WIDTH / sizeof(unsigned long);
+ v5 += LINE_WIDTH / sizeof(unsigned long);
+ } while (--lines > 0);
+}
diff --git a/arch/loongarch/mm/Makefile b/arch/loongarch/mm/Makefile
index 8ffc6383f836..e4d1e581dbae 100644
--- a/arch/loongarch/mm/Makefile
+++ b/arch/loongarch/mm/Makefile
@@ -7,3 +7,6 @@ obj-y += init.o cache.o tlb.o tlbex.o extable.o \
fault.o ioremap.o maccess.o mmap.o pgtable.o page.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+obj-$(CONFIG_KASAN) += kasan_init.o
+
+KASAN_SANITIZE_kasan_init.o := n
diff --git a/arch/loongarch/mm/cache.c b/arch/loongarch/mm/cache.c
index 72685a48eaf0..6be04d36ca07 100644
--- a/arch/loongarch/mm/cache.c
+++ b/arch/loongarch/mm/cache.c
@@ -156,7 +156,6 @@ void cpu_cache_init(void)
current_cpu_data.cache_leaves_present = leaf;
current_cpu_data.options |= LOONGARCH_CPU_PREFETCH;
- shm_align_mask = PAGE_SIZE - 1;
}
static const pgprot_t protection_map[16] = {
diff --git a/arch/loongarch/mm/fault.c b/arch/loongarch/mm/fault.c
index da5b6d518cdb..e6376e3dce86 100644
--- a/arch/loongarch/mm/fault.c
+++ b/arch/loongarch/mm/fault.c
@@ -23,6 +23,7 @@
#include <linux/kprobes.h>
#include <linux/perf_event.h>
#include <linux/uaccess.h>
+#include <linux/kfence.h>
#include <asm/branch.h>
#include <asm/mmu_context.h>
@@ -30,7 +31,8 @@
int show_unhandled_signals = 1;
-static void __kprobes no_context(struct pt_regs *regs, unsigned long address)
+static void __kprobes no_context(struct pt_regs *regs,
+ unsigned long write, unsigned long address)
{
const int field = sizeof(unsigned long) * 2;
@@ -38,6 +40,9 @@ static void __kprobes no_context(struct pt_regs *regs, unsigned long address)
if (fixup_exception(regs))
return;
+ if (kfence_handle_page_fault(address, write, regs))
+ return;
+
/*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
@@ -51,14 +56,15 @@ static void __kprobes no_context(struct pt_regs *regs, unsigned long address)
die("Oops", regs);
}
-static void __kprobes do_out_of_memory(struct pt_regs *regs, unsigned long address)
+static void __kprobes do_out_of_memory(struct pt_regs *regs,
+ unsigned long write, unsigned long address)
{
/*
* We ran out of memory, call the OOM killer, and return the userspace
* (which will retry the fault, or kill us if we got oom-killed).
*/
if (!user_mode(regs)) {
- no_context(regs, address);
+ no_context(regs, write, address);
return;
}
pagefault_out_of_memory();
@@ -69,7 +75,7 @@ static void __kprobes do_sigbus(struct pt_regs *regs,
{
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs)) {
- no_context(regs, address);
+ no_context(regs, write, address);
return;
}
@@ -90,7 +96,7 @@ static void __kprobes do_sigsegv(struct pt_regs *regs,
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs)) {
- no_context(regs, address);
+ no_context(regs, write, address);
return;
}
@@ -149,7 +155,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
*/
if (address & __UA_LIMIT) {
if (!user_mode(regs))
- no_context(regs, address);
+ no_context(regs, write, address);
else
do_sigsegv(regs, write, address, si_code);
return;
@@ -211,7 +217,7 @@ good_area:
if (fault_signal_pending(fault, regs)) {
if (!user_mode(regs))
- no_context(regs, address);
+ no_context(regs, write, address);
return;
}
@@ -232,7 +238,7 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
mmap_read_unlock(mm);
if (fault & VM_FAULT_OOM) {
- do_out_of_memory(regs, address);
+ do_out_of_memory(regs, write, address);
return;
} else if (fault & VM_FAULT_SIGSEGV) {
do_sigsegv(regs, write, address, si_code);
diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c
index 3b7d8129570b..f3fe8c06ba4d 100644
--- a/arch/loongarch/mm/init.c
+++ b/arch/loongarch/mm/init.c
@@ -35,33 +35,8 @@
#include <asm/pgalloc.h>
#include <asm/tlb.h>
-/*
- * We have up to 8 empty zeroed pages so we can map one of the right colour
- * when needed. Since page is never written to after the initialization we
- * don't have to care about aliases on other CPUs.
- */
-unsigned long empty_zero_page, zero_page_mask;
+unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
EXPORT_SYMBOL(empty_zero_page);
-EXPORT_SYMBOL(zero_page_mask);
-
-void setup_zero_pages(void)
-{
- unsigned int order, i;
- struct page *page;
-
- order = 0;
-
- empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
- if (!empty_zero_page)
- panic("Oh boy, that early out of memory?");
-
- page = virt_to_page((void *)empty_zero_page);
- split_page(page, order);
- for (i = 0; i < (1 << order); i++, page++)
- mark_page_reserved(page);
-
- zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
-}
void copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma)
@@ -106,7 +81,6 @@ void __init mem_init(void)
high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
memblock_free_all();
- setup_zero_pages(); /* Setup zeroed pages. */
}
#endif /* !CONFIG_NUMA */
@@ -191,43 +165,42 @@ void vmemmap_free(unsigned long start, unsigned long end, struct vmem_altmap *al
#endif
#endif
-static pte_t *fixmap_pte(unsigned long addr)
+pte_t * __init populate_kernel_pte(unsigned long addr)
{
- pgd_t *pgd;
- p4d_t *p4d;
+ pgd_t *pgd = pgd_offset_k(addr);
+ p4d_t *p4d = p4d_offset(pgd, addr);
pud_t *pud;
pmd_t *pmd;
- pgd = pgd_offset_k(addr);
- p4d = p4d_offset(pgd, addr);
-
- if (pgd_none(*pgd)) {
- pud_t *new __maybe_unused;
-
- new = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
- pgd_populate(&init_mm, pgd, new);
+ if (p4d_none(*p4d)) {
+ pud = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!pud)
+ panic("%s: Failed to allocate memory\n", __func__);
+ p4d_populate(&init_mm, p4d, pud);
#ifndef __PAGETABLE_PUD_FOLDED
- pud_init(new);
+ pud_init(pud);
#endif
}
pud = pud_offset(p4d, addr);
if (pud_none(*pud)) {
- pmd_t *new __maybe_unused;
-
- new = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
- pud_populate(&init_mm, pud, new);
+ pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!pmd)
+ panic("%s: Failed to allocate memory\n", __func__);
+ pud_populate(&init_mm, pud, pmd);
#ifndef __PAGETABLE_PMD_FOLDED
- pmd_init(new);
+ pmd_init(pmd);
#endif
}
pmd = pmd_offset(pud, addr);
- if (pmd_none(*pmd)) {
- pte_t *new __maybe_unused;
+ if (!pmd_present(*pmd)) {
+ pte_t *pte;
- new = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
- pmd_populate_kernel(&init_mm, pmd, new);
+ pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!pte)
+ panic("%s: Failed to allocate memory\n", __func__);
+ pmd_populate_kernel(&init_mm, pmd, pte);
}
return pte_offset_kernel(pmd, addr);
@@ -241,7 +214,7 @@ void __init __set_fixmap(enum fixed_addresses idx,
BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
- ptep = fixmap_pte(addr);
+ ptep = populate_kernel_pte(addr);
if (!pte_none(*ptep)) {
pte_ERROR(*ptep);
return;
diff --git a/arch/loongarch/mm/kasan_init.c b/arch/loongarch/mm/kasan_init.c
new file mode 100644
index 000000000000..da68bc1a4643
--- /dev/null
+++ b/arch/loongarch/mm/kasan_init.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+#define pr_fmt(fmt) "kasan: " fmt
+#include <linux/kasan.h>
+#include <linux/memblock.h>
+#include <linux/sched/task.h>
+
+#include <asm/tlbflush.h>
+#include <asm/pgalloc.h>
+#include <asm-generic/sections.h>
+
+static pgd_t kasan_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
+
+#ifdef __PAGETABLE_PUD_FOLDED
+#define __p4d_none(early, p4d) (0)
+#else
+#define __p4d_none(early, p4d) (early ? (p4d_val(p4d) == 0) : \
+(__pa(p4d_val(p4d)) == (unsigned long)__pa(kasan_early_shadow_pud)))
+#endif
+
+#ifdef __PAGETABLE_PMD_FOLDED
+#define __pud_none(early, pud) (0)
+#else
+#define __pud_none(early, pud) (early ? (pud_val(pud) == 0) : \
+(__pa(pud_val(pud)) == (unsigned long)__pa(kasan_early_shadow_pmd)))
+#endif
+
+#define __pmd_none(early, pmd) (early ? (pmd_val(pmd) == 0) : \
+(__pa(pmd_val(pmd)) == (unsigned long)__pa(kasan_early_shadow_pte)))
+
+#define __pte_none(early, pte) (early ? pte_none(pte) : \
+((pte_val(pte) & _PFN_MASK) == (unsigned long)__pa(kasan_early_shadow_page)))
+
+bool kasan_early_stage = true;
+
+/*
+ * Alloc memory for shadow memory page table.
+ */
+static phys_addr_t __init kasan_alloc_zeroed_page(int node)
+{
+ void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
+ __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, node);
+ if (!p)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE, node, __pa(MAX_DMA_ADDRESS));
+
+ return __pa(p);
+}
+
+static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node, bool early)
+{
+ if (__pmd_none(early, READ_ONCE(*pmdp))) {
+ phys_addr_t pte_phys = early ?
+ __pa_symbol(kasan_early_shadow_pte) : kasan_alloc_zeroed_page(node);
+ if (!early)
+ memcpy(__va(pte_phys), kasan_early_shadow_pte, sizeof(kasan_early_shadow_pte));
+ pmd_populate_kernel(NULL, pmdp, (pte_t *)__va(pte_phys));
+ }
+
+ return pte_offset_kernel(pmdp, addr);
+}
+
+static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node, bool early)
+{
+ if (__pud_none(early, READ_ONCE(*pudp))) {
+ phys_addr_t pmd_phys = early ?
+ __pa_symbol(kasan_early_shadow_pmd) : kasan_alloc_zeroed_page(node);
+ if (!early)
+ memcpy(__va(pmd_phys), kasan_early_shadow_pmd, sizeof(kasan_early_shadow_pmd));
+ pud_populate(&init_mm, pudp, (pmd_t *)__va(pmd_phys));
+ }
+
+ return pmd_offset(pudp, addr);
+}
+
+static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node, bool early)
+{
+ if (__p4d_none(early, READ_ONCE(*p4dp))) {
+ phys_addr_t pud_phys = early ?
+ __pa_symbol(kasan_early_shadow_pud) : kasan_alloc_zeroed_page(node);
+ if (!early)
+ memcpy(__va(pud_phys), kasan_early_shadow_pud, sizeof(kasan_early_shadow_pud));
+ p4d_populate(&init_mm, p4dp, (pud_t *)__va(pud_phys));
+ }
+
+ return pud_offset(p4dp, addr);
+}
+
+static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
+ unsigned long end, int node, bool early)
+{
+ unsigned long next;
+ pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early);
+
+ do {
+ phys_addr_t page_phys = early ?
+ __pa_symbol(kasan_early_shadow_page)
+ : kasan_alloc_zeroed_page(node);
+ next = addr + PAGE_SIZE;
+ set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
+ } while (ptep++, addr = next, addr != end && __pte_none(early, READ_ONCE(*ptep)));
+}
+
+static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
+ unsigned long end, int node, bool early)
+{
+ unsigned long next;
+ pmd_t *pmdp = kasan_pmd_offset(pudp, addr, node, early);
+
+ do {
+ next = pmd_addr_end(addr, end);
+ kasan_pte_populate(pmdp, addr, next, node, early);
+ } while (pmdp++, addr = next, addr != end && __pmd_none(early, READ_ONCE(*pmdp)));
+}
+
+static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr,
+ unsigned long end, int node, bool early)
+{
+ unsigned long next;
+ pud_t *pudp = kasan_pud_offset(p4dp, addr, node, early);
+
+ do {
+ next = pud_addr_end(addr, end);
+ kasan_pmd_populate(pudp, addr, next, node, early);
+ } while (pudp++, addr = next, addr != end);
+}
+
+static void __init kasan_p4d_populate(pgd_t *pgdp, unsigned long addr,
+ unsigned long end, int node, bool early)
+{
+ unsigned long next;
+ p4d_t *p4dp = p4d_offset(pgdp, addr);
+
+ do {
+ next = p4d_addr_end(addr, end);
+ kasan_pud_populate(p4dp, addr, next, node, early);
+ } while (p4dp++, addr = next, addr != end);
+}
+
+static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
+ int node, bool early)
+{
+ unsigned long next;
+ pgd_t *pgdp;
+
+ pgdp = pgd_offset_k(addr);
+
+ do {
+ next = pgd_addr_end(addr, end);
+ kasan_p4d_populate(pgdp, addr, next, node, early);
+ } while (pgdp++, addr = next, addr != end);
+
+}
+
+/* Set up full kasan mappings, ensuring that the mapped pages are zeroed */
+static void __init kasan_map_populate(unsigned long start, unsigned long end,
+ int node)
+{
+ kasan_pgd_populate(start & PAGE_MASK, PAGE_ALIGN(end), node, false);
+}
+
+asmlinkage void __init kasan_early_init(void)
+{
+ BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE));
+ BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
+}
+
+static inline void kasan_set_pgd(pgd_t *pgdp, pgd_t pgdval)
+{
+ WRITE_ONCE(*pgdp, pgdval);
+}
+
+static void __init clear_pgds(unsigned long start, unsigned long end)
+{
+ /*
+ * Remove references to kasan page tables from
+ * swapper_pg_dir. pgd_clear() can't be used
+ * here because it's nop on 2,3-level pagetable setups
+ */
+ for (; start < end; start += PGDIR_SIZE)
+ kasan_set_pgd((pgd_t *)pgd_offset_k(start), __pgd(0));
+}
+
+void __init kasan_init(void)
+{
+ u64 i;
+ phys_addr_t pa_start, pa_end;
+
+ /*
+ * PGD was populated as invalid_pmd_table or invalid_pud_table
+ * in pagetable_init() which depends on how many levels of page
+ * table you are using, but we had to clean the gpd of kasan
+ * shadow memory, as the pgd value is none-zero.
+ * The assertion pgd_none is going to be false and the formal populate
+ * afterwards is not going to create any new pgd at all.
+ */
+ memcpy(kasan_pg_dir, swapper_pg_dir, sizeof(kasan_pg_dir));
+ csr_write64(__pa_symbol(kasan_pg_dir), LOONGARCH_CSR_PGDH);
+ local_flush_tlb_all();
+
+ clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
+
+ /* Maps everything to a single page of zeroes */
+ kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE, true);
+
+ kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_START),
+ kasan_mem_to_shadow((void *)KFENCE_AREA_END));
+
+ kasan_early_stage = false;
+
+ /* Populate the linear mapping */
+ for_each_mem_range(i, &pa_start, &pa_end) {
+ void *start = (void *)phys_to_virt(pa_start);
+ void *end = (void *)phys_to_virt(pa_end);
+
+ if (start >= end)
+ break;
+
+ kasan_map_populate((unsigned long)kasan_mem_to_shadow(start),
+ (unsigned long)kasan_mem_to_shadow(end), NUMA_NO_NODE);
+ }
+
+ /* Populate modules mapping */
+ kasan_map_populate((unsigned long)kasan_mem_to_shadow((void *)MODULES_VADDR),
+ (unsigned long)kasan_mem_to_shadow((void *)MODULES_END), NUMA_NO_NODE);
+ /*
+ * KAsan may reuse the contents of kasan_early_shadow_pte directly, so we
+ * should make sure that it maps the zero page read-only.
+ */
+ for (i = 0; i < PTRS_PER_PTE; i++)
+ set_pte(&kasan_early_shadow_pte[i],
+ pfn_pte(__phys_to_pfn(__pa_symbol(kasan_early_shadow_page)), PAGE_KERNEL_RO));
+
+ memset(kasan_early_shadow_page, 0, PAGE_SIZE);
+ csr_write64(__pa_symbol(swapper_pg_dir), LOONGARCH_CSR_PGDH);
+ local_flush_tlb_all();
+
+ /* At this point kasan is fully initialized. Enable error messages */
+ init_task.kasan_depth = 0;
+ pr_info("KernelAddressSanitizer initialized.\n");
+}
diff --git a/arch/loongarch/mm/mmap.c b/arch/loongarch/mm/mmap.c
index fbe1a4856fc4..a9630a81b38a 100644
--- a/arch/loongarch/mm/mmap.c
+++ b/arch/loongarch/mm/mmap.c
@@ -8,12 +8,11 @@
#include <linux/mm.h>
#include <linux/mman.h>
-unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
-EXPORT_SYMBOL(shm_align_mask);
+#define SHM_ALIGN_MASK (SHMLBA - 1)
-#define COLOUR_ALIGN(addr, pgoff) \
- ((((addr) + shm_align_mask) & ~shm_align_mask) + \
- (((pgoff) << PAGE_SHIFT) & shm_align_mask))
+#define COLOUR_ALIGN(addr, pgoff) \
+ ((((addr) + SHM_ALIGN_MASK) & ~SHM_ALIGN_MASK) \
+ + (((pgoff) << PAGE_SHIFT) & SHM_ALIGN_MASK))
enum mmap_allocation_direction {UP, DOWN};
@@ -40,7 +39,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
* cache aliasing constraints.
*/
if ((flags & MAP_SHARED) &&
- ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
+ ((addr - (pgoff << PAGE_SHIFT)) & SHM_ALIGN_MASK))
return -EINVAL;
return addr;
}
@@ -63,7 +62,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
}
info.length = len;
- info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
+ info.align_mask = do_color_align ? (PAGE_MASK & SHM_ALIGN_MASK) : 0;
info.align_offset = pgoff << PAGE_SHIFT;
if (dir == DOWN) {
diff --git a/arch/loongarch/mm/pgtable.c b/arch/loongarch/mm/pgtable.c
index b14343e211b6..71d0539e2d0b 100644
--- a/arch/loongarch/mm/pgtable.c
+++ b/arch/loongarch/mm/pgtable.c
@@ -9,6 +9,18 @@
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
+struct page *dmw_virt_to_page(unsigned long kaddr)
+{
+ return pfn_to_page(virt_to_pfn(kaddr));
+}
+EXPORT_SYMBOL_GPL(dmw_virt_to_page);
+
+struct page *tlb_virt_to_page(unsigned long kaddr)
+{
+ return pfn_to_page(pte_pfn(*virt_to_kpte(kaddr)));
+}
+EXPORT_SYMBOL_GPL(tlb_virt_to_page);
+
pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *init, *ret = NULL;
diff --git a/arch/loongarch/vdso/Makefile b/arch/loongarch/vdso/Makefile
index a50308b6fc25..5c97d1463328 100644
--- a/arch/loongarch/vdso/Makefile
+++ b/arch/loongarch/vdso/Makefile
@@ -1,6 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
# Objects to go into the VDSO.
+KASAN_SANITIZE := n
+KCOV_INSTRUMENT := n
+
# Include the generic Makefile to check the built vdso.
include $(srctree)/lib/vdso/Makefile
diff --git a/arch/m68k/coldfire/dma_timer.c b/arch/m68k/coldfire/dma_timer.c
index cbb289439606..91e6728f51ed 100644
--- a/arch/m68k/coldfire/dma_timer.c
+++ b/arch/m68k/coldfire/dma_timer.c
@@ -48,7 +48,7 @@ static struct clocksource clocksource_cf_dt = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-static int __init init_cf_dt_clocksource(void)
+static int __init init_cf_dt_clocksource(void)
{
/*
* We setup DMA timer 0 in free run mode. This incrementing counter is
diff --git a/arch/m68k/emu/nfcon.c b/arch/m68k/emu/nfcon.c
index 6fdc13610565..3a74d493eb3e 100644
--- a/arch/m68k/emu/nfcon.c
+++ b/arch/m68k/emu/nfcon.c
@@ -70,16 +70,16 @@ static void nfcon_tty_close(struct tty_struct *tty, struct file *filp)
{
}
-static int nfcon_tty_write(struct tty_struct *tty, const unsigned char *buf,
- int count)
+static ssize_t nfcon_tty_write(struct tty_struct *tty, const u8 *buf,
+ size_t count)
{
nfputs(buf, count);
return count;
}
-static int nfcon_tty_put_char(struct tty_struct *tty, unsigned char ch)
+static int nfcon_tty_put_char(struct tty_struct *tty, u8 ch)
{
- char temp[2] = { ch, 0 };
+ u8 temp[2] = { ch, 0 };
nf_call(stderr_id, virt_to_phys(temp));
return 1;
diff --git a/arch/m68k/include/asm/ide.h b/arch/m68k/include/asm/ide.h
deleted file mode 100644
index 05cc7dc00e0c..000000000000
--- a/arch/m68k/include/asm/ide.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 1994-1996 Linus Torvalds & authors
- */
-
-/* Copyright(c) 1996 Kars de Jong */
-/* Based on the ide driver from 1.2.13pl8 */
-
-/*
- * Credits (alphabetical):
- *
- * - Bjoern Brauel
- * - Kars de Jong
- * - Torsten Ebeling
- * - Dwight Engen
- * - Thorsten Floeck
- * - Roman Hodek
- * - Guenther Kelleter
- * - Chris Lawrence
- * - Michael Rausch
- * - Christian Sauer
- * - Michael Schmitz
- * - Jes Soerensen
- * - Michael Thurm
- * - Geert Uytterhoeven
- */
-
-#ifndef _M68K_IDE_H
-#define _M68K_IDE_H
-
-#ifdef __KERNEL__
-#include <asm/setup.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-
-#ifdef CONFIG_MMU
-
-/*
- * Get rid of defs from io.h - ide has its private and conflicting versions
- * Since so far no single m68k platform uses ISA/PCI I/O space for IDE, we
- * always use the `raw' MMIO versions
- */
-#undef readb
-#undef readw
-#undef writeb
-#undef writew
-
-#define readb in_8
-#define readw in_be16
-#define __ide_mm_insw(port, addr, n) raw_insw((u16 *)port, addr, n)
-#define __ide_mm_insl(port, addr, n) raw_insl((u32 *)port, addr, n)
-#define writeb(val, port) out_8(port, val)
-#define writew(val, port) out_be16(port, val)
-#define __ide_mm_outsw(port, addr, n) raw_outsw((u16 *)port, addr, n)
-#define __ide_mm_outsl(port, addr, n) raw_outsl((u32 *)port, addr, n)
-
-#else
-
-#define __ide_mm_insw(port, addr, n) io_insw((unsigned int)port, addr, n)
-#define __ide_mm_insl(port, addr, n) io_insl((unsigned int)port, addr, n)
-#define __ide_mm_outsw(port, addr, n) io_outsw((unsigned int)port, addr, n)
-#define __ide_mm_outsl(port, addr, n) io_outsl((unsigned int)port, addr, n)
-
-#endif /* CONFIG_MMU */
-
-#endif /* __KERNEL__ */
-#endif /* _M68K_IDE_H */
diff --git a/arch/m68k/kernel/pcibios.c b/arch/m68k/kernel/pcibios.c
index b0e110d3d2e6..9504eb19d73a 100644
--- a/arch/m68k/kernel/pcibios.c
+++ b/arch/m68k/kernel/pcibios.c
@@ -92,9 +92,3 @@ void pcibios_fixup_bus(struct pci_bus *bus)
pci_write_config_byte(dev, PCI_LATENCY_TIMER, 32);
}
}
-
-char *pcibios_setup(char *str)
-{
- return str;
-}
-
diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h
index 337f23eabc71..86a4ce07c192 100644
--- a/arch/microblaze/include/asm/page.h
+++ b/arch/microblaze/include/asm/page.h
@@ -99,9 +99,6 @@ extern int page_is_ram(unsigned long pfn);
# define phys_to_pfn(phys) (PFN_DOWN(phys))
# define pfn_to_phys(pfn) (PFN_PHYS(pfn))
-# define virt_to_pfn(vaddr) (phys_to_pfn((__pa(vaddr))))
-# define pfn_to_virt(pfn) __va(pfn_to_phys((pfn)))
-
# define virt_to_page(kaddr) (pfn_to_page(__pa(kaddr) >> PAGE_SHIFT))
# define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
# define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
@@ -109,11 +106,6 @@ extern int page_is_ram(unsigned long pfn);
# define ARCH_PFN_OFFSET (memory_start >> PAGE_SHIFT)
# endif /* __ASSEMBLY__ */
-#define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr)))
-
-# define __pa(x) __virt_to_phys((unsigned long)(x))
-# define __va(x) ((void *)__phys_to_virt((unsigned long)(x)))
-
/* Convert between virtual and physical address for MMU. */
/* Handle MicroBlaze processor with virtual memory. */
#define __virt_to_phys(addr) \
@@ -125,6 +117,25 @@ extern int page_is_ram(unsigned long pfn);
#define tovirt(rd, rs) \
addik rd, rs, (CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR)
+#ifndef __ASSEMBLY__
+
+# define __pa(x) __virt_to_phys((unsigned long)(x))
+# define __va(x) ((void *)__phys_to_virt((unsigned long)(x)))
+
+static inline unsigned long virt_to_pfn(const void *vaddr)
+{
+ return phys_to_pfn(__pa(vaddr));
+}
+
+static inline const void *pfn_to_virt(unsigned long pfn)
+{
+ return __va(pfn_to_phys((pfn)));
+}
+
+#define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr)))
+
+#endif /* __ASSEMBLY__ */
+
#define TOPHYS(addr) __virt_to_phys(addr)
#endif /* __KERNEL__ */
diff --git a/arch/microblaze/include/asm/setup.h b/arch/microblaze/include/asm/setup.h
index 3657f5e78a3d..bf2600f75959 100644
--- a/arch/microblaze/include/asm/setup.h
+++ b/arch/microblaze/include/asm/setup.h
@@ -25,7 +25,5 @@ void machine_shutdown(void);
void machine_halt(void);
void machine_power_off(void);
-extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
-
# endif /* __ASSEMBLY__ */
#endif /* _ASM_MICROBLAZE_SETUP_H */
diff --git a/arch/microblaze/kernel/reset.c b/arch/microblaze/kernel/reset.c
index 5f4722908164..2f66c7963084 100644
--- a/arch/microblaze/kernel/reset.c
+++ b/arch/microblaze/kernel/reset.c
@@ -9,7 +9,6 @@
#include <linux/init.h>
#include <linux/delay.h>
-#include <linux/of_platform.h>
#include <linux/reboot.h>
void machine_shutdown(void)
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index 353fabdfcbc5..3827dc76edd8 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -270,22 +270,6 @@ asmlinkage void __init mmu_init(void)
memblock_dump_all();
}
-void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask)
-{
- void *p;
-
- if (mem_init_done) {
- p = kzalloc(size, mask);
- } else {
- p = memblock_alloc(size, SMP_CACHE_BYTES);
- if (!p)
- panic("%s: Failed to allocate %zu bytes\n",
- __func__, size);
- }
-
- return p;
-}
-
static const pgprot_t protection_map[16] = {
[VM_NONE] = PAGE_NONE,
[VM_READ] = PAGE_READONLY_X,
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index a47593d72f6f..f49807e1f19b 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -181,12 +181,16 @@ endif
cflags-$(CONFIG_CAVIUM_CN63XXP1) += -Wa,-mfix-cn63xxp1
cflags-$(CONFIG_CPU_BMIPS) += -march=mips32 -Wa,-mips32 -Wa,--trap
-cflags-$(CONFIG_CPU_LOONGSON2E) += $(call cc-option,-march=loongson2e) -Wa,--trap
-cflags-$(CONFIG_CPU_LOONGSON2F) += $(call cc-option,-march=loongson2f) -Wa,--trap
-cflags-$(CONFIG_CPU_LOONGSON64) += $(call cc-option,-march=loongson3a,-march=mips64r2) -Wa,--trap
+cflags-$(CONFIG_CPU_LOONGSON2E) += -march=loongson2e -Wa,--trap
+cflags-$(CONFIG_CPU_LOONGSON2F) += -march=loongson2f -Wa,--trap
# Some -march= flags enable MMI instructions, and GCC complains about that
# support being enabled alongside -msoft-float. Thus explicitly disable MMI.
cflags-$(CONFIG_CPU_LOONGSON2EF) += $(call cc-option,-mno-loongson-mmi)
+ifdef CONFIG_CPU_LOONGSON64
+cflags-$(CONFIG_CPU_LOONGSON64) += -Wa,--trap
+cflags-$(CONFIG_CC_IS_GCC) += -march=loongson3a
+cflags-$(CONFIG_CC_IS_CLANG) += -march=mips64r2
+endif
cflags-$(CONFIG_CPU_LOONGSON64) += $(call cc-option,-mno-loongson-mmi)
cflags-$(CONFIG_CPU_R4000_WORKAROUNDS) += $(call cc-option,-mfix-r4000,)
@@ -299,8 +303,8 @@ ifdef CONFIG_64BIT
endif
endif
- ifeq ($(KBUILD_SYM32)$(call cc-option-yn,-msym32), yy)
- cflags-y += -msym32 -DKBUILD_64BIT_SYM32
+ ifeq ($(KBUILD_SYM32), y)
+ cflags-$(KBUILD_SYM32) += -msym32 -DKBUILD_64BIT_SYM32
else
ifeq ($(CONFIG_CPU_DADDI_WORKAROUNDS), y)
$(error CONFIG_CPU_DADDI_WORKAROUNDS unsupported without -msym32)
@@ -341,7 +345,7 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
KBUILD_LDFLAGS += -m $(ld-emul)
-ifdef CONFIG_MIPS
+ifdef need-compiler
CHECKFLAGS += $(shell $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \
grep -E -vw '__GNUC_(MINOR_|PATCHLEVEL_)?_' | \
sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/" -e 's/\$$/&&/g')
diff --git a/arch/mips/bmips/setup.c b/arch/mips/bmips/setup.c
index 053805cb741c..ec180ab92eaa 100644
--- a/arch/mips/bmips/setup.c
+++ b/arch/mips/bmips/setup.c
@@ -16,7 +16,6 @@
#include <linux/of.h>
#include <linux/of_clk.h>
#include <linux/of_fdt.h>
-#include <linux/of_platform.h>
#include <linux/libfdt.h>
#include <linux/smp.h>
#include <asm/addrspace.h>
diff --git a/arch/mips/cavium-octeon/Makefile b/arch/mips/cavium-octeon/Makefile
index 7c02e542959a..2a5926578841 100644
--- a/arch/mips/cavium-octeon/Makefile
+++ b/arch/mips/cavium-octeon/Makefile
@@ -18,4 +18,3 @@ obj-y += crypto/
obj-$(CONFIG_MTD) += flash_setup.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_OCTEON_ILM) += oct_ilm.o
-obj-$(CONFIG_USB) += octeon-usb.o
diff --git a/arch/mips/cavium-octeon/flash_setup.c b/arch/mips/cavium-octeon/flash_setup.c
index c8a8c6d359b9..3395acde4d60 100644
--- a/arch/mips/cavium-octeon/flash_setup.c
+++ b/arch/mips/cavium-octeon/flash_setup.c
@@ -12,7 +12,8 @@
#include <linux/semaphore.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/mtd/partitions.h>
#include <asm/octeon/octeon.h>
diff --git a/arch/mips/cavium-octeon/octeon-memcpy.S b/arch/mips/cavium-octeon/octeon-memcpy.S
index 25860fba6218..fef0c6de3fa1 100644
--- a/arch/mips/cavium-octeon/octeon-memcpy.S
+++ b/arch/mips/cavium-octeon/octeon-memcpy.S
@@ -13,9 +13,9 @@
* Mnemonic names for arguments to memcpy/__copy_user
*/
+#include <linux/export.h>
#include <asm/asm.h>
#include <asm/asm-offsets.h>
-#include <asm/export.h>
#include <asm/regdef.h>
#define dst a0
diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
index ce05c0dd3acd..f76783c24338 100644
--- a/arch/mips/cavium-octeon/octeon-platform.c
+++ b/arch/mips/cavium-octeon/octeon-platform.c
@@ -8,8 +8,10 @@
*/
#include <linux/etherdevice.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_fdt.h>
+#include <linux/platform_device.h>
#include <linux/libfdt.h>
#include <asm/octeon/octeon.h>
@@ -450,7 +452,6 @@ static const struct of_device_id octeon_ids[] __initconst = {
{ .compatible = "cavium,octeon-3860-bootbus", },
{ .compatible = "cavium,mdio-mux", },
{ .compatible = "gpio-leds", },
- { .compatible = "cavium,octeon-7130-usb-uctl", },
{},
};
diff --git a/arch/mips/configs/ip22_defconfig b/arch/mips/configs/ip22_defconfig
index 44821f497261..dc49b09d492b 100644
--- a/arch/mips/configs/ip22_defconfig
+++ b/arch/mips/configs/ip22_defconfig
@@ -127,7 +127,6 @@ CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IP_NF_MANGLE=m
-CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
CONFIG_IP_NF_RAW=m
diff --git a/arch/mips/configs/loongson3_defconfig b/arch/mips/configs/loongson3_defconfig
index 2b4133176930..07839a4b397e 100644
--- a/arch/mips/configs/loongson3_defconfig
+++ b/arch/mips/configs/loongson3_defconfig
@@ -283,6 +283,7 @@ CONFIG_DRM_AMDGPU_USERPTR=y
CONFIG_DRM_AMD_ACP=y
CONFIG_DRM_AMD_DC=y
CONFIG_DRM_AMD_DC_SI=y
+CONFIG_DRM_AST=m
CONFIG_DRM_RADEON=m
CONFIG_DRM_QXL=y
CONFIG_DRM_VIRTIO_GPU=y
diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig
index 743209047792..ae1a7793e810 100644
--- a/arch/mips/configs/malta_defconfig
+++ b/arch/mips/configs/malta_defconfig
@@ -127,7 +127,6 @@ CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IP_NF_MANGLE=m
-CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
CONFIG_IP_NF_RAW=m
diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig
index dd2b9c181f32..c07e30f63d8b 100644
--- a/arch/mips/configs/malta_kvm_defconfig
+++ b/arch/mips/configs/malta_kvm_defconfig
@@ -131,7 +131,6 @@ CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IP_NF_MANGLE=m
-CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
CONFIG_IP_NF_RAW=m
diff --git a/arch/mips/configs/maltaup_xpa_defconfig b/arch/mips/configs/maltaup_xpa_defconfig
index 97c2d7f530b3..0a5701020d3f 100644
--- a/arch/mips/configs/maltaup_xpa_defconfig
+++ b/arch/mips/configs/maltaup_xpa_defconfig
@@ -128,7 +128,6 @@ CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IP_NF_MANGLE=m
-CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
CONFIG_IP_NF_RAW=m
diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig
index e0e312dd968a..5c5e2186210c 100644
--- a/arch/mips/configs/rm200_defconfig
+++ b/arch/mips/configs/rm200_defconfig
@@ -90,7 +90,6 @@ CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IP_NF_MANGLE=m
-CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
CONFIG_IP_NF_RAW=m
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index dee172716581..7ba67a0d6c97 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -7,7 +7,6 @@ generated-y += unistd_nr_n32.h
generated-y += unistd_nr_n64.h
generated-y += unistd_nr_o32.h
-generic-y += export.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
generic-y += parport.h
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 04cedf9f8811..54a85f1d4f2c 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -896,7 +896,6 @@ static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
-#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
-int kvm_arch_flush_remote_tlb(struct kvm *kvm);
+#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS
#endif /* __MIPS_KVM_HOST_H__ */
diff --git a/arch/mips/include/asm/mach-loongson32/loongson1.h b/arch/mips/include/asm/mach-loongson32/loongson1.h
index 7971272345d3..84f45461c832 100644
--- a/arch/mips/include/asm/mach-loongson32/loongson1.h
+++ b/arch/mips/include/asm/mach-loongson32/loongson1.h
@@ -45,8 +45,6 @@
#define LS1X_NAND_BASE 0x1fe78000
#define LS1X_CLK_BASE 0x1fe78030
-#include <regs-clk.h>
#include <regs-mux.h>
-#include <regs-rtc.h>
#endif /* __ASM_MACH_LOONGSON32_LOONGSON1_H */
diff --git a/arch/mips/include/asm/mach-loongson32/regs-clk.h b/arch/mips/include/asm/mach-loongson32/regs-clk.h
deleted file mode 100644
index 98136fa8bee1..000000000000
--- a/arch/mips/include/asm/mach-loongson32/regs-clk.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
- *
- * Loongson 1 Clock Register Definitions.
- */
-
-#ifndef __ASM_MACH_LOONGSON32_REGS_CLK_H
-#define __ASM_MACH_LOONGSON32_REGS_CLK_H
-
-#define LS1X_CLK_REG(x) \
- ((void __iomem *)KSEG1ADDR(LS1X_CLK_BASE + (x)))
-
-#define LS1X_CLK_PLL_FREQ LS1X_CLK_REG(0x0)
-#define LS1X_CLK_PLL_DIV LS1X_CLK_REG(0x4)
-
-#if defined(CONFIG_LOONGSON1_LS1B)
-/* Clock PLL Divisor Register Bits */
-#define DIV_DC_EN BIT(31)
-#define DIV_DC_RST BIT(30)
-#define DIV_CPU_EN BIT(25)
-#define DIV_CPU_RST BIT(24)
-#define DIV_DDR_EN BIT(19)
-#define DIV_DDR_RST BIT(18)
-#define RST_DC_EN BIT(5)
-#define RST_DC BIT(4)
-#define RST_DDR_EN BIT(3)
-#define RST_DDR BIT(2)
-#define RST_CPU_EN BIT(1)
-#define RST_CPU BIT(0)
-
-#define DIV_DC_SHIFT 26
-#define DIV_CPU_SHIFT 20
-#define DIV_DDR_SHIFT 14
-
-#define DIV_DC_WIDTH 4
-#define DIV_CPU_WIDTH 4
-#define DIV_DDR_WIDTH 4
-
-#define BYPASS_DC_SHIFT 12
-#define BYPASS_DDR_SHIFT 10
-#define BYPASS_CPU_SHIFT 8
-
-#define BYPASS_DC_WIDTH 1
-#define BYPASS_DDR_WIDTH 1
-#define BYPASS_CPU_WIDTH 1
-
-#elif defined(CONFIG_LOONGSON1_LS1C)
-/* PLL/SDRAM Frequency configuration register Bits */
-#define PLL_VALID BIT(31)
-#define FRAC_N GENMASK(23, 16)
-#define RST_TIME GENMASK(3, 2)
-#define SDRAM_DIV GENMASK(1, 0)
-
-/* CPU/CAMERA/DC Frequency configuration register Bits */
-#define DIV_DC_EN BIT(31)
-#define DIV_DC GENMASK(30, 24)
-#define DIV_CAM_EN BIT(23)
-#define DIV_CAM GENMASK(22, 16)
-#define DIV_CPU_EN BIT(15)
-#define DIV_CPU GENMASK(14, 8)
-#define DIV_DC_SEL_EN BIT(5)
-#define DIV_DC_SEL BIT(4)
-#define DIV_CAM_SEL_EN BIT(3)
-#define DIV_CAM_SEL BIT(2)
-#define DIV_CPU_SEL_EN BIT(1)
-#define DIV_CPU_SEL BIT(0)
-
-#define DIV_DC_SHIFT 24
-#define DIV_CAM_SHIFT 16
-#define DIV_CPU_SHIFT 8
-#define DIV_DDR_SHIFT 0
-
-#define DIV_DC_WIDTH 7
-#define DIV_CAM_WIDTH 7
-#define DIV_CPU_WIDTH 7
-#define DIV_DDR_WIDTH 2
-
-#endif
-
-#endif /* __ASM_MACH_LOONGSON32_REGS_CLK_H */
diff --git a/arch/mips/include/asm/mach-loongson32/regs-rtc.h b/arch/mips/include/asm/mach-loongson32/regs-rtc.h
deleted file mode 100644
index a3d096be1607..000000000000
--- a/arch/mips/include/asm/mach-loongson32/regs-rtc.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2016 Yang Ling <gnaygnil@gmail.com>
- *
- * Loongson 1 RTC timer Register Definitions.
- */
-
-#ifndef __ASM_MACH_LOONGSON32_REGS_RTC_H
-#define __ASM_MACH_LOONGSON32_REGS_RTC_H
-
-#define LS1X_RTC_REG(x) \
- ((void __iomem *)KSEG1ADDR(LS1X_RTC_BASE + (x)))
-
-#define LS1X_RTC_CTRL LS1X_RTC_REG(0x40)
-
-#define RTC_EXTCLK_OK (BIT(5) | BIT(8))
-#define RTC_EXTCLK_EN BIT(8)
-
-#endif /* __ASM_MACH_LOONGSON32_REGS_RTC_H */
diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
index cff52b283e03..fcec579f64e9 100644
--- a/arch/mips/kernel/mcount.S
+++ b/arch/mips/kernel/mcount.S
@@ -10,7 +10,7 @@
* Author: Wu Zhangjin <wuzhangjin@gmail.com>
*/
-#include <asm/export.h>
+#include <linux/export.h>
#include <asm/regdef.h>
#include <asm/stackframe.h>
#include <asm/ftrace.h>
diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S
index 9b7c8ab6f08c..447a3ea14aa1 100644
--- a/arch/mips/kernel/octeon_switch.S
+++ b/arch/mips/kernel/octeon_switch.S
@@ -11,7 +11,6 @@
* written by Carsten Langgaard, carstenl@mips.com
*/
#include <asm/asm.h>
-#include <asm/export.h>
#include <asm/asm-offsets.h>
#include <asm/mipsregs.h>
#include <asm/regdef.h>
diff --git a/arch/mips/kernel/r2300_fpu.S b/arch/mips/kernel/r2300_fpu.S
index 6c745aa9e825..c000b22e3fcd 100644
--- a/arch/mips/kernel/r2300_fpu.S
+++ b/arch/mips/kernel/r2300_fpu.S
@@ -11,10 +11,10 @@
* Further modifications to make this work:
* Copyright (c) 1998 Harald Koerfgen
*/
+#include <linux/export.h>
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/errno.h>
-#include <asm/export.h>
#include <asm/fpregdef.h>
#include <asm/mipsregs.h>
#include <asm/asm-offsets.h>
diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S
index 71b1aafae1bb..48e63943e6f7 100644
--- a/arch/mips/kernel/r2300_switch.S
+++ b/arch/mips/kernel/r2300_switch.S
@@ -13,7 +13,6 @@
*/
#include <asm/asm.h>
#include <asm/cachectl.h>
-#include <asm/export.h>
#include <asm/fpregdef.h>
#include <asm/mipsregs.h>
#include <asm/asm-offsets.h>
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index 4e8c98517d9d..4bb97ee89904 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -12,10 +12,10 @@
* Copyright (C) 2000 MIPS Technologies, Inc.
* Copyright (C) 1999, 2001 Silicon Graphics, Inc.
*/
+#include <linux/export.h>
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/errno.h>
-#include <asm/export.h>
#include <asm/fpregdef.h>
#include <asm/mipsregs.h>
#include <asm/asm-offsets.h>
diff --git a/arch/mips/kernel/sysrq.c b/arch/mips/kernel/sysrq.c
index 9c1a2019113b..2e98049fe783 100644
--- a/arch/mips/kernel/sysrq.c
+++ b/arch/mips/kernel/sysrq.c
@@ -44,7 +44,7 @@ static void sysrq_tlbdump_othercpus(struct work_struct *dummy)
static DECLARE_WORK(sysrq_tlbdump, sysrq_tlbdump_othercpus);
#endif
-static void sysrq_handle_tlbdump(int key)
+static void sysrq_handle_tlbdump(u8 key)
{
sysrq_tlbdump_single(NULL);
#ifdef CONFIG_SMP
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index aa5583a7b05b..231ac052b506 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -199,7 +199,7 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
/* Flush slot from GPA */
kvm_mips_flush_gpa_pt(kvm, slot->base_gfn,
slot->base_gfn + slot->npages - 1);
- kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
+ kvm_flush_remote_tlbs_memslot(kvm, slot);
spin_unlock(&kvm->mmu_lock);
}
@@ -235,7 +235,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
needs_flush = kvm_mips_mkclean_gpa_pt(kvm, new->base_gfn,
new->base_gfn + new->npages - 1);
if (needs_flush)
- kvm_arch_flush_remote_tlbs_memslot(kvm, new);
+ kvm_flush_remote_tlbs_memslot(kvm, new);
spin_unlock(&kvm->mmu_lock);
}
}
@@ -981,18 +981,12 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
}
-int kvm_arch_flush_remote_tlb(struct kvm *kvm)
+int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
{
kvm_mips_callbacks->prepare_flush_shadow(kvm);
return 1;
}
-void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
- const struct kvm_memory_slot *memslot)
-{
- kvm_flush_remote_tlbs(kvm);
-}
-
int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
{
int r;
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index e8c08988ed37..7b2ac1319d70 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -447,7 +447,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
gpa_t gpa = range->start << PAGE_SHIFT;
- pte_t hva_pte = range->pte;
+ pte_t hva_pte = range->arg.pte;
pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
pte_t old_pte;
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index 20622bf0a9b3..8f208007b8e8 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -10,7 +10,7 @@
#include <linux/sched.h>
#include <linux/irqchip.h>
#include <linux/irqdomain.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
diff --git a/arch/mips/lantiq/xway/dcdc.c b/arch/mips/lantiq/xway/dcdc.c
index 96199966a350..4a808f8c5beb 100644
--- a/arch/mips/lantiq/xway/dcdc.c
+++ b/arch/mips/lantiq/xway/dcdc.c
@@ -6,7 +6,8 @@
*/
#include <linux/ioport.h>
-#include <linux/of_platform.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
#include <lantiq_soc.h>
diff --git a/arch/mips/lantiq/xway/gptu.c b/arch/mips/lantiq/xway/gptu.c
index a492b1eb1925..8d52001301de 100644
--- a/arch/mips/lantiq/xway/gptu.c
+++ b/arch/mips/lantiq/xway/gptu.c
@@ -8,8 +8,9 @@
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/init.h>
-#include <linux/of_platform.h>
+#include <linux/mod_devicetable.h>
#include <linux/of_irq.h>
+#include <linux/platform_device.h>
#include <lantiq_soc.h>
#include "../clk.h"
diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c
index d444a1b98a72..3ed078225222 100644
--- a/arch/mips/lantiq/xway/sysctrl.c
+++ b/arch/mips/lantiq/xway/sysctrl.c
@@ -10,7 +10,6 @@
#include <linux/clkdev.h>
#include <linux/spinlock.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
#include <linux/of_address.h>
#include <lantiq_soc.h>
diff --git a/arch/mips/lantiq/xway/vmmc.c b/arch/mips/lantiq/xway/vmmc.c
index 2796e87dfcae..37c133052ef7 100644
--- a/arch/mips/lantiq/xway/vmmc.c
+++ b/arch/mips/lantiq/xway/vmmc.c
@@ -7,7 +7,8 @@
#include <linux/err.h>
#include <linux/export.h>
#include <linux/gpio/consumer.h>
-#include <linux/of_platform.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <lantiq_soc.h>
diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S
index 7767137c3e49..3d2ff4118d79 100644
--- a/arch/mips/lib/csum_partial.S
+++ b/arch/mips/lib/csum_partial.S
@@ -11,9 +11,9 @@
* Copyright (C) 2014 Imagination Technologies Ltd.
*/
#include <linux/errno.h>
+#include <linux/export.h>
#include <asm/asm.h>
#include <asm/asm-offsets.h>
-#include <asm/export.h>
#include <asm/regdef.h>
#ifdef CONFIG_64BIT
diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S
index 18a43f2e29c8..a4b4e805ff13 100644
--- a/arch/mips/lib/memcpy.S
+++ b/arch/mips/lib/memcpy.S
@@ -32,9 +32,9 @@
#undef CONFIG_CPU_HAS_PREFETCH
#endif
+#include <linux/export.h>
#include <asm/asm.h>
#include <asm/asm-offsets.h>
-#include <asm/export.h>
#include <asm/regdef.h>
#define dst a0
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
index 0b342bae9a98..79405c32cc85 100644
--- a/arch/mips/lib/memset.S
+++ b/arch/mips/lib/memset.S
@@ -8,9 +8,9 @@
* Copyright (C) 2007 by Maciej W. Rozycki
* Copyright (C) 2011, 2012 MIPS Technologies, Inc.
*/
+#include <linux/export.h>
#include <asm/asm.h>
#include <asm/asm-offsets.h>
-#include <asm/export.h>
#include <asm/regdef.h>
#if LONGSIZE == 4
diff --git a/arch/mips/lib/strncpy_user.S b/arch/mips/lib/strncpy_user.S
index 13aaa9927ad1..94f4203563c1 100644
--- a/arch/mips/lib/strncpy_user.S
+++ b/arch/mips/lib/strncpy_user.S
@@ -7,9 +7,9 @@
* Copyright (C) 2011 MIPS Technologies, Inc.
*/
#include <linux/errno.h>
+#include <linux/export.h>
#include <asm/asm.h>
#include <asm/asm-offsets.h>
-#include <asm/export.h>
#include <asm/regdef.h>
#define EX(insn,reg,addr,handler) \
diff --git a/arch/mips/lib/strnlen_user.S b/arch/mips/lib/strnlen_user.S
index 6de31b616f9c..c192a6f6cd84 100644
--- a/arch/mips/lib/strnlen_user.S
+++ b/arch/mips/lib/strnlen_user.S
@@ -6,9 +6,9 @@
* Copyright (c) 1996, 1998, 1999, 2004 by Ralf Baechle
* Copyright (c) 1999 Silicon Graphics, Inc.
*/
+#include <linux/export.h>
#include <asm/asm.h>
#include <asm/asm-offsets.h>
-#include <asm/export.h>
#include <asm/regdef.h>
#define EX(insn,reg,addr,handler) \
diff --git a/arch/mips/loongson32/common/platform.c b/arch/mips/loongson32/common/platform.c
index 64d7979394e6..8075590a9f83 100644
--- a/arch/mips/loongson32/common/platform.c
+++ b/arch/mips/loongson32/common/platform.c
@@ -265,14 +265,6 @@ struct platform_device ls1x_ehci_pdev = {
};
/* Real Time Clock */
-void __init ls1x_rtc_set_extclk(struct platform_device *pdev)
-{
- u32 val = __raw_readl(LS1X_RTC_CTRL);
-
- if (!(val & RTC_EXTCLK_OK))
- __raw_writel(val | RTC_EXTCLK_EN, LS1X_RTC_CTRL);
-}
-
struct platform_device ls1x_rtc_pdev = {
.name = "ls1x-rtc",
.id = -1,
diff --git a/arch/mips/loongson64/smp.c b/arch/mips/loongson64/smp.c
index cdecd7af11a6..e015a26a40f7 100644
--- a/arch/mips/loongson64/smp.c
+++ b/arch/mips/loongson64/smp.c
@@ -187,181 +187,181 @@ static void csr_ipi_probe(void)
static void ipi_set0_regs_init(void)
{
- ipi_set0_regs[0] = (void *)
+ ipi_set0_regs[0] = (void __iomem *)
(SMP_CORE_GROUP0_BASE + SMP_CORE0_OFFSET + SET0);
- ipi_set0_regs[1] = (void *)
+ ipi_set0_regs[1] = (void __iomem *)
(SMP_CORE_GROUP0_BASE + SMP_CORE1_OFFSET + SET0);
- ipi_set0_regs[2] = (void *)
+ ipi_set0_regs[2] = (void __iomem *)
(SMP_CORE_GROUP0_BASE + SMP_CORE2_OFFSET + SET0);
- ipi_set0_regs[3] = (void *)
+ ipi_set0_regs[3] = (void __iomem *)
(SMP_CORE_GROUP0_BASE + SMP_CORE3_OFFSET + SET0);
- ipi_set0_regs[4] = (void *)
+ ipi_set0_regs[4] = (void __iomem *)
(SMP_CORE_GROUP1_BASE + SMP_CORE0_OFFSET + SET0);
- ipi_set0_regs[5] = (void *)
+ ipi_set0_regs[5] = (void __iomem *)
(SMP_CORE_GROUP1_BASE + SMP_CORE1_OFFSET + SET0);
- ipi_set0_regs[6] = (void *)
+ ipi_set0_regs[6] = (void __iomem *)
(SMP_CORE_GROUP1_BASE + SMP_CORE2_OFFSET + SET0);
- ipi_set0_regs[7] = (void *)
+ ipi_set0_regs[7] = (void __iomem *)
(SMP_CORE_GROUP1_BASE + SMP_CORE3_OFFSET + SET0);
- ipi_set0_regs[8] = (void *)
+ ipi_set0_regs[8] = (void __iomem *)
(SMP_CORE_GROUP2_BASE + SMP_CORE0_OFFSET + SET0);
- ipi_set0_regs[9] = (void *)
+ ipi_set0_regs[9] = (void __iomem *)
(SMP_CORE_GROUP2_BASE + SMP_CORE1_OFFSET + SET0);
- ipi_set0_regs[10] = (void *)
+ ipi_set0_regs[10] = (void __iomem *)
(SMP_CORE_GROUP2_BASE + SMP_CORE2_OFFSET + SET0);
- ipi_set0_regs[11] = (void *)
+ ipi_set0_regs[11] = (void __iomem *)
(SMP_CORE_GROUP2_BASE + SMP_CORE3_OFFSET + SET0);
- ipi_set0_regs[12] = (void *)
+ ipi_set0_regs[12] = (void __iomem *)
(SMP_CORE_GROUP3_BASE + SMP_CORE0_OFFSET + SET0);
- ipi_set0_regs[13] = (void *)
+ ipi_set0_regs[13] = (void __iomem *)
(SMP_CORE_GROUP3_BASE + SMP_CORE1_OFFSET + SET0);
- ipi_set0_regs[14] = (void *)
+ ipi_set0_regs[14] = (void __iomem *)
(SMP_CORE_GROUP3_BASE + SMP_CORE2_OFFSET + SET0);
- ipi_set0_regs[15] = (void *)
+ ipi_set0_regs[15] = (void __iomem *)
(SMP_CORE_GROUP3_BASE + SMP_CORE3_OFFSET + SET0);
}
static void ipi_clear0_regs_init(void)
{
- ipi_clear0_regs[0] = (void *)
+ ipi_clear0_regs[0] = (void __iomem *)
(SMP_CORE_GROUP0_BASE + SMP_CORE0_OFFSET + CLEAR0);
- ipi_clear0_regs[1] = (void *)
+ ipi_clear0_regs[1] = (void __iomem *)
(SMP_CORE_GROUP0_BASE + SMP_CORE1_OFFSET + CLEAR0);
- ipi_clear0_regs[2] = (void *)
+ ipi_clear0_regs[2] = (void __iomem *)
(SMP_CORE_GROUP0_BASE + SMP_CORE2_OFFSET + CLEAR0);
- ipi_clear0_regs[3] = (void *)
+ ipi_clear0_regs[3] = (void __iomem *)
(SMP_CORE_GROUP0_BASE + SMP_CORE3_OFFSET + CLEAR0);
- ipi_clear0_regs[4] = (void *)
+ ipi_clear0_regs[4] = (void __iomem *)
(SMP_CORE_GROUP1_BASE + SMP_CORE0_OFFSET + CLEAR0);
- ipi_clear0_regs[5] = (void *)
+ ipi_clear0_regs[5] = (void __iomem *)
(SMP_CORE_GROUP1_BASE + SMP_CORE1_OFFSET + CLEAR0);
- ipi_clear0_regs[6] = (void *)
+ ipi_clear0_regs[6] = (void __iomem *)
(SMP_CORE_GROUP1_BASE + SMP_CORE2_OFFSET + CLEAR0);
- ipi_clear0_regs[7] = (void *)
+ ipi_clear0_regs[7] = (void __iomem *)
(SMP_CORE_GROUP1_BASE + SMP_CORE3_OFFSET + CLEAR0);
- ipi_clear0_regs[8] = (void *)
+ ipi_clear0_regs[8] = (void __iomem *)
(SMP_CORE_GROUP2_BASE + SMP_CORE0_OFFSET + CLEAR0);
- ipi_clear0_regs[9] = (void *)
+ ipi_clear0_regs[9] = (void __iomem *)
(SMP_CORE_GROUP2_BASE + SMP_CORE1_OFFSET + CLEAR0);
- ipi_clear0_regs[10] = (void *)
+ ipi_clear0_regs[10] = (void __iomem *)
(SMP_CORE_GROUP2_BASE + SMP_CORE2_OFFSET + CLEAR0);
- ipi_clear0_regs[11] = (void *)
+ ipi_clear0_regs[11] = (void __iomem *)
(SMP_CORE_GROUP2_BASE + SMP_CORE3_OFFSET + CLEAR0);
- ipi_clear0_regs[12] = (void *)
+ ipi_clear0_regs[12] = (void __iomem *)
(SMP_CORE_GROUP3_BASE + SMP_CORE0_OFFSET + CLEAR0);
- ipi_clear0_regs[13] = (void *)
+ ipi_clear0_regs[13] = (void __iomem *)
(SMP_CORE_GROUP3_BASE + SMP_CORE1_OFFSET + CLEAR0);
- ipi_clear0_regs[14] = (void *)
+ ipi_clear0_regs[14] = (void __iomem *)
(SMP_CORE_GROUP3_BASE + SMP_CORE2_OFFSET + CLEAR0);
- ipi_clear0_regs[15] = (void *)
+ ipi_clear0_regs[15] = (void __iomem *)
(SMP_CORE_GROUP3_BASE + SMP_CORE3_OFFSET + CLEAR0);
}
static void ipi_status0_regs_init(void)
{
- ipi_status0_regs[0] = (void *)
+ ipi_status0_regs[0] = (void __iomem *)
(SMP_CORE_GROUP0_BASE + SMP_CORE0_OFFSET + STATUS0);
- ipi_status0_regs[1] = (void *)
+ ipi_status0_regs[1] = (void __iomem *)
(SMP_CORE_GROUP0_BASE + SMP_CORE1_OFFSET + STATUS0);
- ipi_status0_regs[2] = (void *)
+ ipi_status0_regs[2] = (void __iomem *)
(SMP_CORE_GROUP0_BASE + SMP_CORE2_OFFSET + STATUS0);
- ipi_status0_regs[3] = (void *)
+ ipi_status0_regs[3] = (void __iomem *)
(SMP_CORE_GROUP0_BASE + SMP_CORE3_OFFSET + STATUS0);
- ipi_status0_regs[4] = (void *)
+ ipi_status0_regs[4] = (void __iomem *)
(SMP_CORE_GROUP1_BASE + SMP_CORE0_OFFSET + STATUS0);
- ipi_status0_regs[5] = (void *)
+ ipi_status0_regs[5] = (void __iomem *)
(SMP_CORE_GROUP1_BASE + SMP_CORE1_OFFSET + STATUS0);
- ipi_status0_regs[6] = (void *)
+ ipi_status0_regs[6] = (void __iomem *)
(SMP_CORE_GROUP1_BASE + SMP_CORE2_OFFSET + STATUS0);
- ipi_status0_regs[7] = (void *)
+ ipi_status0_regs[7] = (void __iomem *)
(SMP_CORE_GROUP1_BASE + SMP_CORE3_OFFSET + STATUS0);
- ipi_status0_regs[8] = (void *)
+ ipi_status0_regs[8] = (void __iomem *)
(SMP_CORE_GROUP2_BASE + SMP_CORE0_OFFSET + STATUS0);
- ipi_status0_regs[9] = (void *)
+ ipi_status0_regs[9] = (void __iomem *)
(SMP_CORE_GROUP2_BASE + SMP_CORE1_OFFSET + STATUS0);
- ipi_status0_regs[10] = (void *)
+ ipi_status0_regs[10] = (void __iomem *)
(SMP_CORE_GROUP2_BASE + SMP_CORE2_OFFSET + STATUS0);
- ipi_status0_regs[11] = (void *)
+ ipi_status0_regs[11] = (void __iomem *)
(SMP_CORE_GROUP2_BASE + SMP_CORE3_OFFSET + STATUS0);
- ipi_status0_regs[12] = (void *)
+ ipi_status0_regs[12] = (void __iomem *)
(SMP_CORE_GROUP3_BASE + SMP_CORE0_OFFSET + STATUS0);
- ipi_status0_regs[13] = (void *)
+ ipi_status0_regs[13] = (void __iomem *)
(SMP_CORE_GROUP3_BASE + SMP_CORE1_OFFSET + STATUS0);
- ipi_status0_regs[14] = (void *)
+ ipi_status0_regs[14] = (void __iomem *)
(SMP_CORE_GROUP3_BASE + SMP_CORE2_OFFSET + STATUS0);
- ipi_status0_regs[15] = (void *)
+ ipi_status0_regs[15] = (void __iomem *)
(SMP_CORE_GROUP3_BASE + SMP_CORE3_OFFSET + STATUS0);
}
static void ipi_en0_regs_init(void)
{
- ipi_en0_regs[0] = (void *)
+ ipi_en0_regs[0] = (void __iomem *)
(SMP_CORE_GROUP0_BASE + SMP_CORE0_OFFSET + EN0);
- ipi_en0_regs[1] = (void *)
+ ipi_en0_regs[1] = (void __iomem *)
(SMP_CORE_GROUP0_BASE + SMP_CORE1_OFFSET + EN0);
- ipi_en0_regs[2] = (void *)
+ ipi_en0_regs[2] = (void __iomem *)
(SMP_CORE_GROUP0_BASE + SMP_CORE2_OFFSET + EN0);
- ipi_en0_regs[3] = (void *)
+ ipi_en0_regs[3] = (void __iomem *)
(SMP_CORE_GROUP0_BASE + SMP_CORE3_OFFSET + EN0);
- ipi_en0_regs[4] = (void *)
+ ipi_en0_regs[4] = (void __iomem *)
(SMP_CORE_GROUP1_BASE + SMP_CORE0_OFFSET + EN0);
- ipi_en0_regs[5] = (void *)
+ ipi_en0_regs[5] = (void __iomem *)
(SMP_CORE_GROUP1_BASE + SMP_CORE1_OFFSET + EN0);
- ipi_en0_regs[6] = (void *)
+ ipi_en0_regs[6] = (void __iomem *)
(SMP_CORE_GROUP1_BASE + SMP_CORE2_OFFSET + EN0);
- ipi_en0_regs[7] = (void *)
+ ipi_en0_regs[7] = (void __iomem *)
(SMP_CORE_GROUP1_BASE + SMP_CORE3_OFFSET + EN0);
- ipi_en0_regs[8] = (void *)
+ ipi_en0_regs[8] = (void __iomem *)
(SMP_CORE_GROUP2_BASE + SMP_CORE0_OFFSET + EN0);
- ipi_en0_regs[9] = (void *)
+ ipi_en0_regs[9] = (void __iomem *)
(SMP_CORE_GROUP2_BASE + SMP_CORE1_OFFSET + EN0);
- ipi_en0_regs[10] = (void *)
+ ipi_en0_regs[10] = (void __iomem *)
(SMP_CORE_GROUP2_BASE + SMP_CORE2_OFFSET + EN0);
- ipi_en0_regs[11] = (void *)
+ ipi_en0_regs[11] = (void __iomem *)
(SMP_CORE_GROUP2_BASE + SMP_CORE3_OFFSET + EN0);
- ipi_en0_regs[12] = (void *)
+ ipi_en0_regs[12] = (void __iomem *)
(SMP_CORE_GROUP3_BASE + SMP_CORE0_OFFSET + EN0);
- ipi_en0_regs[13] = (void *)
+ ipi_en0_regs[13] = (void __iomem *)
(SMP_CORE_GROUP3_BASE + SMP_CORE1_OFFSET + EN0);
- ipi_en0_regs[14] = (void *)
+ ipi_en0_regs[14] = (void __iomem *)
(SMP_CORE_GROUP3_BASE + SMP_CORE2_OFFSET + EN0);
- ipi_en0_regs[15] = (void *)
+ ipi_en0_regs[15] = (void __iomem *)
(SMP_CORE_GROUP3_BASE + SMP_CORE3_OFFSET + EN0);
}
static void ipi_mailbox_buf_init(void)
{
- ipi_mailbox_buf[0] = (void *)
+ ipi_mailbox_buf[0] = (void __iomem *)
(SMP_CORE_GROUP0_BASE + SMP_CORE0_OFFSET + BUF);
- ipi_mailbox_buf[1] = (void *)
+ ipi_mailbox_buf[1] = (void __iomem *)
(SMP_CORE_GROUP0_BASE + SMP_CORE1_OFFSET + BUF);
- ipi_mailbox_buf[2] = (void *)
+ ipi_mailbox_buf[2] = (void __iomem *)
(SMP_CORE_GROUP0_BASE + SMP_CORE2_OFFSET + BUF);
- ipi_mailbox_buf[3] = (void *)
+ ipi_mailbox_buf[3] = (void __iomem *)
(SMP_CORE_GROUP0_BASE + SMP_CORE3_OFFSET + BUF);
- ipi_mailbox_buf[4] = (void *)
+ ipi_mailbox_buf[4] = (void __iomem *)
(SMP_CORE_GROUP1_BASE + SMP_CORE0_OFFSET + BUF);
- ipi_mailbox_buf[5] = (void *)
+ ipi_mailbox_buf[5] = (void __iomem *)
(SMP_CORE_GROUP1_BASE + SMP_CORE1_OFFSET + BUF);
- ipi_mailbox_buf[6] = (void *)
+ ipi_mailbox_buf[6] = (void __iomem *)
(SMP_CORE_GROUP1_BASE + SMP_CORE2_OFFSET + BUF);
- ipi_mailbox_buf[7] = (void *)
+ ipi_mailbox_buf[7] = (void __iomem *)
(SMP_CORE_GROUP1_BASE + SMP_CORE3_OFFSET + BUF);
- ipi_mailbox_buf[8] = (void *)
+ ipi_mailbox_buf[8] = (void __iomem *)
(SMP_CORE_GROUP2_BASE + SMP_CORE0_OFFSET + BUF);
- ipi_mailbox_buf[9] = (void *)
+ ipi_mailbox_buf[9] = (void __iomem *)
(SMP_CORE_GROUP2_BASE + SMP_CORE1_OFFSET + BUF);
- ipi_mailbox_buf[10] = (void *)
+ ipi_mailbox_buf[10] = (void __iomem *)
(SMP_CORE_GROUP2_BASE + SMP_CORE2_OFFSET + BUF);
- ipi_mailbox_buf[11] = (void *)
+ ipi_mailbox_buf[11] = (void __iomem *)
(SMP_CORE_GROUP2_BASE + SMP_CORE3_OFFSET + BUF);
- ipi_mailbox_buf[12] = (void *)
+ ipi_mailbox_buf[12] = (void __iomem *)
(SMP_CORE_GROUP3_BASE + SMP_CORE0_OFFSET + BUF);
- ipi_mailbox_buf[13] = (void *)
+ ipi_mailbox_buf[13] = (void __iomem *)
(SMP_CORE_GROUP3_BASE + SMP_CORE1_OFFSET + BUF);
- ipi_mailbox_buf[14] = (void *)
+ ipi_mailbox_buf[14] = (void __iomem *)
(SMP_CORE_GROUP3_BASE + SMP_CORE2_OFFSET + BUF);
- ipi_mailbox_buf[15] = (void *)
+ ipi_mailbox_buf[15] = (void __iomem *)
(SMP_CORE_GROUP3_BASE + SMP_CORE3_OFFSET + BUF);
}
diff --git a/arch/mips/mm/page-funcs.S b/arch/mips/mm/page-funcs.S
index 43181ac0a1af..42d0516ca18a 100644
--- a/arch/mips/mm/page-funcs.S
+++ b/arch/mips/mm/page-funcs.S
@@ -8,8 +8,8 @@
* Copyright (C) 2012 MIPS Technologies, Inc.
* Copyright (C) 2012 Ralf Baechle <ralf@linux-mips.org>
*/
+#include <linux/export.h>
#include <asm/asm.h>
-#include <asm/export.h>
#include <asm/regdef.h>
#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
diff --git a/arch/mips/mm/tlb-funcs.S b/arch/mips/mm/tlb-funcs.S
index 00fef578c8cd..2705d7dcb33e 100644
--- a/arch/mips/mm/tlb-funcs.S
+++ b/arch/mips/mm/tlb-funcs.S
@@ -11,8 +11,8 @@
* Copyright (C) 2012 MIPS Technologies, Inc.
* Copyright (C) 2012 Ralf Baechle <ralf@linux-mips.org>
*/
+#include <linux/export.h>
#include <asm/asm.h>
-#include <asm/export.h>
#include <asm/regdef.h>
#define FASTPATH_SIZE 128
diff --git a/arch/mips/pci/pci-lantiq.c b/arch/mips/pci/pci-lantiq.c
index 79e29bf42a24..80f7293166bb 100644
--- a/arch/mips/pci/pci-lantiq.c
+++ b/arch/mips/pci/pci-lantiq.c
@@ -13,9 +13,9 @@
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/clk.h>
-#include <linux/of_platform.h>
-#include <linux/of_irq.h>
+#include <linux/of.h>
#include <linux/of_pci.h>
+#include <linux/platform_device.h>
#include <asm/addrspace.h>
diff --git a/arch/mips/pci/pci-rt2880.c b/arch/mips/pci/pci-rt2880.c
index e9dd01431f21..1cada09fa5db 100644
--- a/arch/mips/pci/pci-rt2880.c
+++ b/arch/mips/pci/pci-rt2880.c
@@ -13,9 +13,8 @@
#include <linux/pci.h>
#include <linux/io.h>
#include <linux/init.h>
-#include <linux/of_platform.h>
-#include <linux/of_irq.h>
-#include <linux/of_pci.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
#include <asm/mach-ralink/rt288x.h>
diff --git a/arch/mips/pic32/pic32mzda/config.c b/arch/mips/pic32/pic32mzda/config.c
index f69532007717..73be5689e0df 100644
--- a/arch/mips/pic32/pic32mzda/config.c
+++ b/arch/mips/pic32/pic32mzda/config.c
@@ -5,7 +5,7 @@
*/
#include <linux/init.h>
#include <linux/io.h>
-#include <linux/of_platform.h>
+#include <linux/spinlock.h>
#include <asm/mach-pic32/pic32.h>
diff --git a/arch/mips/ralink/ill_acc.c b/arch/mips/ralink/ill_acc.c
index f395ae218470..25341b2319d0 100644
--- a/arch/mips/ralink/ill_acc.c
+++ b/arch/mips/ralink/ill_acc.c
@@ -5,8 +5,10 @@
*/
#include <linux/interrupt.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_irq.h>
+#include <linux/platform_device.h>
#include <asm/mach-ralink/ralink_regs.h>
diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c
index fa353bc13947..46aef0a1b22a 100644
--- a/arch/mips/ralink/irq.c
+++ b/arch/mips/ralink/irq.c
@@ -7,7 +7,7 @@
#include <linux/io.h>
#include <linux/bitops.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/irqdomain.h>
diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c
index 45d60c094496..7f90068c68f2 100644
--- a/arch/mips/ralink/of.c
+++ b/arch/mips/ralink/of.c
@@ -14,7 +14,7 @@
#include <linux/of_fdt.h>
#include <linux/kernel.h>
#include <linux/memblock.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/of_address.h>
#include <asm/reboot.h>
diff --git a/arch/mips/ralink/prom.c b/arch/mips/ralink/prom.c
index aaac1e6ec7d9..c3b96861844c 100644
--- a/arch/mips/ralink/prom.c
+++ b/arch/mips/ralink/prom.c
@@ -7,8 +7,6 @@
*/
#include <linux/string.h>
-#include <linux/of_fdt.h>
-#include <linux/of_platform.h>
#include <asm/bootinfo.h>
#include <asm/addrspace.h>
diff --git a/arch/mips/txx9/generic/pci.c b/arch/mips/txx9/generic/pci.c
index e98845543b77..5ae30b78d38d 100644
--- a/arch/mips/txx9/generic/pci.c
+++ b/arch/mips/txx9/generic/pci.c
@@ -51,6 +51,7 @@ int __init txx9_pci66_check(struct pci_controller *hose, int top_bus,
unsigned short vid;
int cap66 = -1;
u16 stat;
+ int ret;
/* It seems SLC90E66 needs some time after PCI reset... */
mdelay(80);
@@ -60,9 +61,9 @@ int __init txx9_pci66_check(struct pci_controller *hose, int top_bus,
for (pci_devfn = 0; pci_devfn < 0xff; pci_devfn++) {
if (PCI_FUNC(pci_devfn))
continue;
- if (early_read_config_word(hose, top_bus, current_bus,
- pci_devfn, PCI_VENDOR_ID, &vid) !=
- PCIBIOS_SUCCESSFUL)
+ ret = early_read_config_word(hose, top_bus, current_bus,
+ pci_devfn, PCI_VENDOR_ID, &vid);
+ if (ret != PCIBIOS_SUCCESSFUL)
continue;
if (vid == 0xffff)
continue;
@@ -343,26 +344,28 @@ static void tc35815_fixup(struct pci_dev *dev)
static void final_fixup(struct pci_dev *dev)
{
+ unsigned long timeout;
unsigned char bist;
+ int ret;
/* Do build-in self test */
- if (pci_read_config_byte(dev, PCI_BIST, &bist) == PCIBIOS_SUCCESSFUL &&
- (bist & PCI_BIST_CAPABLE)) {
- unsigned long timeout;
- pci_set_power_state(dev, PCI_D0);
- pr_info("PCI: %s BIST...", pci_name(dev));
- pci_write_config_byte(dev, PCI_BIST, PCI_BIST_START);
- timeout = jiffies + HZ * 2; /* timeout after 2 sec */
- do {
- pci_read_config_byte(dev, PCI_BIST, &bist);
- if (time_after(jiffies, timeout))
- break;
- } while (bist & PCI_BIST_START);
- if (bist & (PCI_BIST_CODE_MASK | PCI_BIST_START))
- pr_cont("failed. (0x%x)\n", bist);
- else
- pr_cont("OK.\n");
- }
+ ret = pci_read_config_byte(dev, PCI_BIST, &bist);
+ if ((ret != PCIBIOS_SUCCESSFUL) || !(bist & PCI_BIST_CAPABLE))
+ return;
+
+ pci_set_power_state(dev, PCI_D0);
+ pr_info("PCI: %s BIST...", pci_name(dev));
+ pci_write_config_byte(dev, PCI_BIST, PCI_BIST_START);
+ timeout = jiffies + HZ * 2; /* timeout after 2 sec */
+ do {
+ pci_read_config_byte(dev, PCI_BIST, &bist);
+ if (time_after(jiffies, timeout))
+ break;
+ } while (bist & PCI_BIST_START);
+ if (bist & (PCI_BIST_CODE_MASK | PCI_BIST_START))
+ pr_cont("failed. (0x%x)\n", bist);
+ else
+ pr_cont("OK.\n");
}
#ifdef CONFIG_TOSHIBA_FPCIB0
diff --git a/arch/mips/vdso/vdso.lds.S b/arch/mips/vdso/vdso.lds.S
index d90b65724d78..836465e3bcb8 100644
--- a/arch/mips/vdso/vdso.lds.S
+++ b/arch/mips/vdso/vdso.lds.S
@@ -94,7 +94,9 @@ VERSION
#ifndef CONFIG_MIPS_DISABLE_VDSO
global:
__vdso_clock_gettime;
+#ifdef CONFIG_MIPS_CLOCK_VSYSCALL
__vdso_gettimeofday;
+#endif
__vdso_clock_getres;
#if _MIPS_SIM != _MIPS_SIM_ABI64
__vdso_clock_gettime64;
diff --git a/arch/openrisc/include/asm/bug.h b/arch/openrisc/include/asm/bug.h
new file mode 100644
index 000000000000..6d04776eaf10
--- /dev/null
+++ b/arch/openrisc/include/asm/bug.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_OPENRISC_BUG_H
+#define __ASM_OPENRISC_BUG_H
+
+#include <asm-generic/bug.h>
+
+struct pt_regs;
+
+void __noreturn die(const char *str, struct pt_regs *regs, long err);
+
+#endif /* __ASM_OPENRISC_BUG_H */
diff --git a/arch/openrisc/include/asm/page.h b/arch/openrisc/include/asm/page.h
index 52b0d7e76446..44fc1fd56717 100644
--- a/arch/openrisc/include/asm/page.h
+++ b/arch/openrisc/include/asm/page.h
@@ -72,8 +72,15 @@ typedef struct page *pgtable_t;
#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET))
#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
-#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
-#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
+static inline unsigned long virt_to_pfn(const void *kaddr)
+{
+ return __pa(kaddr) >> PAGE_SHIFT;
+}
+
+static inline void * pfn_to_virt(unsigned long pfn)
+{
+ return (void *)((unsigned long)__va(pfn) << PAGE_SHIFT);
+}
#define virt_to_page(addr) \
(mem_map + (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT))
diff --git a/arch/openrisc/include/asm/processor.h b/arch/openrisc/include/asm/processor.h
index ed9efb430afa..3b736e74e6ed 100644
--- a/arch/openrisc/include/asm/processor.h
+++ b/arch/openrisc/include/asm/processor.h
@@ -73,6 +73,7 @@ struct thread_struct {
void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp);
unsigned long __get_wchan(struct task_struct *p);
+void show_registers(struct pt_regs *regs);
#define cpu_relax() barrier()
diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c
index dfa558f98ed8..86e02929f3ac 100644
--- a/arch/openrisc/kernel/process.c
+++ b/arch/openrisc/kernel/process.c
@@ -14,6 +14,7 @@
*/
#define __KERNEL_SYSCALLS__
+#include <linux/cpu.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
@@ -38,6 +39,7 @@
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/spr_defs.h>
+#include <asm/switch_to.h>
#include <linux/smp.h>
@@ -119,8 +121,6 @@ void flush_thread(void)
void show_regs(struct pt_regs *regs)
{
- extern void show_registers(struct pt_regs *regs);
-
show_regs_print_info(KERN_DEFAULT);
/* __PHX__ cleanup this mess */
show_registers(regs);
diff --git a/arch/openrisc/kernel/ptrace.c b/arch/openrisc/kernel/ptrace.c
index 0b7d2ca6ba3b..1eeac3b62e9d 100644
--- a/arch/openrisc/kernel/ptrace.c
+++ b/arch/openrisc/kernel/ptrace.c
@@ -27,6 +27,10 @@
#include <asm/thread_info.h>
#include <asm/page.h>
+asmlinkage long do_syscall_trace_enter(struct pt_regs *regs);
+
+asmlinkage void do_syscall_trace_leave(struct pt_regs *regs);
+
/*
* Copy the thread state to a regset that can be interpreted by userspace.
*
diff --git a/arch/openrisc/kernel/signal.c b/arch/openrisc/kernel/signal.c
index 2e7257a433ff..e2f21a5d8ad9 100644
--- a/arch/openrisc/kernel/signal.c
+++ b/arch/openrisc/kernel/signal.c
@@ -34,6 +34,11 @@ struct rt_sigframe {
unsigned char retcode[16]; /* trampoline code */
};
+asmlinkage long _sys_rt_sigreturn(struct pt_regs *regs);
+
+asmlinkage int do_work_pending(struct pt_regs *regs, unsigned int thread_flags,
+ int syscall);
+
static int restore_sigcontext(struct pt_regs *regs,
struct sigcontext __user *sc)
{
@@ -224,7 +229,7 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
* mode below.
*/
-int do_signal(struct pt_regs *regs, int syscall)
+static int do_signal(struct pt_regs *regs, int syscall)
{
struct ksignal ksig;
unsigned long continue_addr = 0;
diff --git a/arch/openrisc/kernel/smp.c b/arch/openrisc/kernel/smp.c
index 0a7a059e2dff..1c5a2d71d675 100644
--- a/arch/openrisc/kernel/smp.c
+++ b/arch/openrisc/kernel/smp.c
@@ -23,6 +23,8 @@
#include <asm/cacheflush.h>
#include <asm/time.h>
+asmlinkage __init void secondary_start_kernel(void);
+
static void (*smp_cross_call)(const struct cpumask *, unsigned int);
unsigned long secondary_release = -1;
diff --git a/arch/openrisc/kernel/time.c b/arch/openrisc/kernel/time.c
index 8e26c1af5441..764c7bfb5df3 100644
--- a/arch/openrisc/kernel/time.c
+++ b/arch/openrisc/kernel/time.c
@@ -25,6 +25,8 @@
#include <asm/cpuinfo.h>
#include <asm/time.h>
+irqreturn_t __irq_entry timer_interrupt(struct pt_regs *regs);
+
/* Test the timer ticks to count, used in sync routine */
inline void openrisc_timer_set(unsigned long count)
{
diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c
index 0aa6b07efda1..9370888c9a7e 100644
--- a/arch/openrisc/kernel/traps.c
+++ b/arch/openrisc/kernel/traps.c
@@ -30,14 +30,23 @@
#include <linux/kallsyms.h>
#include <linux/uaccess.h>
+#include <asm/bug.h>
#include <asm/io.h>
+#include <asm/processor.h>
#include <asm/unwinder.h>
#include <asm/sections.h>
-static int kstack_depth_to_print = 0x180;
int lwa_flag;
static unsigned long __user *lwa_addr;
+asmlinkage void unhandled_exception(struct pt_regs *regs, int ea, int vector);
+asmlinkage void do_trap(struct pt_regs *regs, unsigned long address);
+asmlinkage void do_fpe_trap(struct pt_regs *regs, unsigned long address);
+asmlinkage void do_unaligned_access(struct pt_regs *regs, unsigned long address);
+asmlinkage void do_bus_fault(struct pt_regs *regs, unsigned long address);
+asmlinkage void do_illegal_instruction(struct pt_regs *regs,
+ unsigned long address);
+
static void print_trace(void *data, unsigned long addr, int reliable)
{
const char *loglvl = data;
@@ -143,80 +152,6 @@ bad:
printk("\n");
}
-void nommu_dump_state(struct pt_regs *regs,
- unsigned long ea, unsigned long vector)
-{
- int i;
- unsigned long addr, stack = regs->sp;
-
- printk("\n\r[nommu_dump_state] :: ea %lx, vector %lx\n\r", ea, vector);
-
- printk("CPU #: %d\n"
- " PC: %08lx SR: %08lx SP: %08lx\n",
- 0, regs->pc, regs->sr, regs->sp);
- printk("GPR00: %08lx GPR01: %08lx GPR02: %08lx GPR03: %08lx\n",
- 0L, regs->gpr[1], regs->gpr[2], regs->gpr[3]);
- printk("GPR04: %08lx GPR05: %08lx GPR06: %08lx GPR07: %08lx\n",
- regs->gpr[4], regs->gpr[5], regs->gpr[6], regs->gpr[7]);
- printk("GPR08: %08lx GPR09: %08lx GPR10: %08lx GPR11: %08lx\n",
- regs->gpr[8], regs->gpr[9], regs->gpr[10], regs->gpr[11]);
- printk("GPR12: %08lx GPR13: %08lx GPR14: %08lx GPR15: %08lx\n",
- regs->gpr[12], regs->gpr[13], regs->gpr[14], regs->gpr[15]);
- printk("GPR16: %08lx GPR17: %08lx GPR18: %08lx GPR19: %08lx\n",
- regs->gpr[16], regs->gpr[17], regs->gpr[18], regs->gpr[19]);
- printk("GPR20: %08lx GPR21: %08lx GPR22: %08lx GPR23: %08lx\n",
- regs->gpr[20], regs->gpr[21], regs->gpr[22], regs->gpr[23]);
- printk("GPR24: %08lx GPR25: %08lx GPR26: %08lx GPR27: %08lx\n",
- regs->gpr[24], regs->gpr[25], regs->gpr[26], regs->gpr[27]);
- printk("GPR28: %08lx GPR29: %08lx GPR30: %08lx GPR31: %08lx\n",
- regs->gpr[28], regs->gpr[29], regs->gpr[30], regs->gpr[31]);
- printk(" RES: %08lx oGPR11: %08lx\n",
- regs->gpr[11], regs->orig_gpr11);
-
- printk("Process %s (pid: %d, stackpage=%08lx)\n",
- ((struct task_struct *)(__pa(current)))->comm,
- ((struct task_struct *)(__pa(current)))->pid,
- (unsigned long)current);
-
- printk("\nStack: ");
- printk("Stack dump [0x%08lx]:\n", (unsigned long)stack);
- for (i = 0; i < kstack_depth_to_print; i++) {
- if (((long)stack & (THREAD_SIZE - 1)) == 0)
- break;
- stack++;
-
- printk("%lx :: sp + %02d: 0x%08lx\n", stack, i * 4,
- *((unsigned long *)(__pa(stack))));
- }
- printk("\n");
-
- printk("Call Trace: ");
- i = 1;
- while (((long)stack & (THREAD_SIZE - 1)) != 0) {
- addr = *((unsigned long *)__pa(stack));
- stack++;
-
- if (kernel_text_address(addr)) {
- if (i && ((i % 6) == 0))
- printk("\n ");
- printk(" [<%08lx>]", addr);
- i++;
- }
- }
- printk("\n");
-
- printk("\nCode: ");
-
- for (i = -24; i < 24; i++) {
- unsigned long word;
-
- word = ((unsigned long *)(__pa(regs->pc)))[i];
-
- print_data(regs->pc, word, i);
- }
- printk("\n");
-}
-
/* This is normally the 'Oops' routine */
void __noreturn die(const char *str, struct pt_regs *regs, long err)
{
diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c
index a9dcd4381d1a..29e232d78d82 100644
--- a/arch/openrisc/mm/fault.c
+++ b/arch/openrisc/mm/fault.c
@@ -18,6 +18,7 @@
#include <linux/perf_event.h>
#include <linux/uaccess.h>
+#include <asm/bug.h>
#include <asm/mmu_context.h>
#include <asm/siginfo.h>
#include <asm/signal.h>
@@ -30,7 +31,8 @@
*/
volatile pgd_t *current_pgd[NR_CPUS];
-extern void __noreturn die(char *, struct pt_regs *, long);
+asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
+ unsigned long vector, int write_acc);
/*
* This routine handles page faults. It determines the address,
diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
index d531ab82be12..1dcd78c8f0e9 100644
--- a/arch/openrisc/mm/init.c
+++ b/arch/openrisc/mm/init.c
@@ -123,8 +123,6 @@ static void __init map_ram(void)
void __init paging_init(void)
{
- extern void tlb_init(void);
-
int i;
printk(KERN_INFO "Setting up paging and PTEs.\n");
diff --git a/arch/openrisc/mm/ioremap.c b/arch/openrisc/mm/ioremap.c
index 91c8259d4b7e..f59ea4c10b0f 100644
--- a/arch/openrisc/mm/ioremap.c
+++ b/arch/openrisc/mm/ioremap.c
@@ -22,7 +22,7 @@
extern int mem_init_done;
-/**
+/*
* OK, this one's a bit tricky... ioremap can get called before memory is
* initialized (early serial console does this) and will want to alloc a page
* for its mapping. No userspace pages will ever get allocated before memory
diff --git a/arch/openrisc/mm/tlb.c b/arch/openrisc/mm/tlb.c
index e2f2a3c3bb22..3115f2e4f864 100644
--- a/arch/openrisc/mm/tlb.c
+++ b/arch/openrisc/mm/tlb.c
@@ -182,12 +182,3 @@ void destroy_context(struct mm_struct *mm)
flush_tlb_mm(mm);
}
-
-/* called once during VM initialization, from init.c */
-
-void __init tlb_init(void)
-{
- /* Do nothing... */
- /* invalidate the entire TLB */
- /* flush_tlb_all(); */
-}
diff --git a/arch/parisc/include/asm/ide.h b/arch/parisc/include/asm/ide.h
deleted file mode 100644
index 7aa75b93a1b6..000000000000
--- a/arch/parisc/include/asm/ide.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * linux/include/asm-parisc/ide.h
- *
- * Copyright (C) 1994-1996 Linus Torvalds & authors
- */
-
-/*
- * This file contains the PARISC architecture specific IDE code.
- */
-
-#ifndef __ASM_PARISC_IDE_H
-#define __ASM_PARISC_IDE_H
-
-/* Generic I/O and MEMIO string operations. */
-
-#define __ide_insw insw
-#define __ide_insl insl
-#define __ide_outsw outsw
-#define __ide_outsl outsl
-
-static __inline__ void __ide_mm_insw(void __iomem *port, void *addr, u32 count)
-{
- while (count--) {
- *(u16 *)addr = __raw_readw(port);
- addr += 2;
- }
-}
-
-static __inline__ void __ide_mm_insl(void __iomem *port, void *addr, u32 count)
-{
- while (count--) {
- *(u32 *)addr = __raw_readl(port);
- addr += 4;
- }
-}
-
-static __inline__ void __ide_mm_outsw(void __iomem *port, void *addr, u32 count)
-{
- while (count--) {
- __raw_writew(*(u16 *)addr, port);
- addr += 2;
- }
-}
-
-static __inline__ void __ide_mm_outsl(void __iomem *port, void *addr, u32 count)
-{
- while (count--) {
- __raw_writel(*(u32 *)addr, port);
- addr += 4;
- }
-}
-
-#endif /* __ASM_PARISC_IDE_H */
diff --git a/arch/powerpc/configs/disable-werror.config b/arch/powerpc/configs/disable-werror.config
index 6ea12a12432c..7776b91da37f 100644
--- a/arch/powerpc/configs/disable-werror.config
+++ b/arch/powerpc/configs/disable-werror.config
@@ -1 +1,2 @@
+# Help: Disable -Werror
CONFIG_PPC_DISABLE_WERROR=y
diff --git a/arch/powerpc/configs/security.config b/arch/powerpc/configs/security.config
index 1c91a35c6a73..0d54e29e2cdf 100644
--- a/arch/powerpc/configs/security.config
+++ b/arch/powerpc/configs/security.config
@@ -1,3 +1,5 @@
+# Help: Common security options for PowerPC builds
+
# This is the equivalent of booting with lockdown=integrity
CONFIG_SECURITY=y
CONFIG_SECURITYFS=y
@@ -12,4 +14,4 @@ CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y
# UBSAN bounds checking is very cheap and good for hardening
CONFIG_UBSAN=y
-# CONFIG_UBSAN_MISC is not set \ No newline at end of file
+# CONFIG_UBSAN_MISC is not set
diff --git a/arch/powerpc/crypto/Kconfig b/arch/powerpc/crypto/Kconfig
index 803da4a8a9a2..6fc2248ca561 100644
--- a/arch/powerpc/crypto/Kconfig
+++ b/arch/powerpc/crypto/Kconfig
@@ -113,7 +113,7 @@ config CRYPTO_AES_GCM_P10
config CRYPTO_CHACHA20_P10
tristate "Ciphers: ChaCha20, XChacha20, XChacha12 (P10 or later)"
- depends on PPC64 && CPU_LITTLE_ENDIAN
+ depends on PPC64 && CPU_LITTLE_ENDIAN && VSX
select CRYPTO_SKCIPHER
select CRYPTO_LIB_CHACHA_GENERIC
select CRYPTO_ARCH_HAVE_LIB_CHACHA
@@ -127,7 +127,7 @@ config CRYPTO_CHACHA20_P10
config CRYPTO_POLY1305_P10
tristate "Hash functions: Poly1305 (P10 or later)"
- depends on PPC64 && CPU_LITTLE_ENDIAN
+ depends on PPC64 && CPU_LITTLE_ENDIAN && VSX
select CRYPTO_HASH
select CRYPTO_LIB_POLY1305_GENERIC
help
diff --git a/arch/powerpc/include/asm/fs_pd.h b/arch/powerpc/include/asm/fs_pd.h
deleted file mode 100644
index d530f68b4eef..000000000000
--- a/arch/powerpc/include/asm/fs_pd.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Platform information definitions.
- *
- * 2006 (c) MontaVista Software, Inc.
- * Vitaly Bordug <vbordug@ru.mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#ifndef FS_PD_H
-#define FS_PD_H
-#include <sysdev/fsl_soc.h>
-#include <asm/time.h>
-
-static inline int uart_baudrate(void)
-{
- return get_baudrate();
-}
-
-static inline int uart_clock(void)
-{
- return ppc_proc_freq;
-}
-
-#endif
diff --git a/arch/powerpc/include/asm/ide.h b/arch/powerpc/include/asm/ide.h
deleted file mode 100644
index ce87a4441ca3..000000000000
--- a/arch/powerpc/include/asm/ide.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 1994-1996 Linus Torvalds & authors
- *
- * This file contains the powerpc architecture specific IDE code.
- */
-#ifndef _ASM_POWERPC_IDE_H
-#define _ASM_POWERPC_IDE_H
-
-#include <linux/compiler.h>
-#include <asm/io.h>
-
-#define __ide_mm_insw(p, a, c) readsw((void __iomem *)(p), (a), (c))
-#define __ide_mm_insl(p, a, c) readsl((void __iomem *)(p), (a), (c))
-#define __ide_mm_outsw(p, a, c) writesw((void __iomem *)(p), (a), (c))
-#define __ide_mm_outsl(p, a, c) writesl((void __iomem *)(p), (a), (c))
-
-#endif /* _ASM_POWERPC_IDE_H */
diff --git a/arch/powerpc/platforms/8xx/mpc885ads_setup.c b/arch/powerpc/platforms/8xx/mpc885ads_setup.c
index 76c7cd78c17e..2d899be746eb 100644
--- a/arch/powerpc/platforms/8xx/mpc885ads_setup.c
+++ b/arch/powerpc/platforms/8xx/mpc885ads_setup.c
@@ -21,7 +21,6 @@
#include <linux/device.h>
#include <linux/delay.h>
-#include <linux/fs_uart_pd.h>
#include <linux/fsl_devices.h>
#include <linux/mii.h>
#include <linux/of_address.h>
diff --git a/arch/powerpc/platforms/8xx/tqm8xx_setup.c b/arch/powerpc/platforms/8xx/tqm8xx_setup.c
index 1670dfd30809..d97a7910c594 100644
--- a/arch/powerpc/platforms/8xx/tqm8xx_setup.c
+++ b/arch/powerpc/platforms/8xx/tqm8xx_setup.c
@@ -24,7 +24,6 @@
#include <linux/device.h>
#include <linux/delay.h>
-#include <linux/fs_uart_pd.h>
#include <linux/fsl_devices.h>
#include <linux/mii.h>
#include <linux/of_fdt.h>
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c
index 528506f6e2b8..3949ceb79e64 100644
--- a/arch/powerpc/sysdev/fsl_soc.c
+++ b/arch/powerpc/sysdev/fsl_soc.c
@@ -22,7 +22,6 @@
#include <linux/phy.h>
#include <linux/spi/spi.h>
#include <linux/fsl_devices.h>
-#include <linux/fs_uart_pd.h>
#include <linux/reboot.h>
#include <linux/atomic.h>
@@ -35,7 +34,6 @@
#include <asm/cpm2.h>
#include <asm/fsl_hcalls.h> /* For the Freescale hypervisor */
-extern void init_smc_ioports(struct fs_uart_platform_info*);
static phys_addr_t immrbase = -1;
phys_addr_t get_immrbase(void)
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 5888fcd8e408..b3b94cd37713 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -3988,7 +3988,7 @@ static void xmon_init(int enable)
}
#ifdef CONFIG_MAGIC_SYSRQ
-static void sysrq_handle_xmon(int key)
+static void sysrq_handle_xmon(u8 key)
{
if (xmon_is_locked_down()) {
clear_all_bpt();
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index f9fbfbf11ad2..5138dce1a0b4 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -35,6 +35,7 @@ config RISCV
select ARCH_HAS_SET_MEMORY if MMU
select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL
select ARCH_HAS_STRICT_MODULE_RWX if MMU && !XIP_KERNEL
+ select ARCH_HAS_SYSCALL_WRAPPER
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_HAS_VDSO_DATA
@@ -42,12 +43,14 @@ config RISCV
select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
select ARCH_STACKWALK
select ARCH_SUPPORTS_ATOMIC_RMW
+ select ARCH_SUPPORTS_CFI_CLANG
select ARCH_SUPPORTS_DEBUG_PAGEALLOC if MMU
select ARCH_SUPPORTS_HUGETLBFS if MMU
select ARCH_SUPPORTS_PAGE_TABLE_CHECK if MMU
select ARCH_SUPPORTS_PER_VMA_LOCK if MMU
select ARCH_USE_MEMTEST
select ARCH_USE_QUEUED_RWLOCKS
+ select ARCH_USES_CFI_TRAPS if CFI_CLANG
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_GENERAL_HUGETLB if !RISCV_ISA_SVNAPOT
@@ -62,6 +65,7 @@ config RISCV
select COMMON_CLK
select CPU_PM if CPU_IDLE || HIBERNATION
select EDAC_SUPPORT
+ select FRAME_POINTER if PERF_EVENTS || (FUNCTION_TRACER && !DYNAMIC_FTRACE)
select GENERIC_ARCH_TOPOLOGY
select GENERIC_ATOMIC64 if !64BIT
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
@@ -130,6 +134,7 @@ config RISCV
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_POSIX_CPU_TIMERS_TASK_WORK
+ select HAVE_PREEMPT_DYNAMIC_KEY if !XIP_KERNEL
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RETHOOK if !XIP_KERNEL
select HAVE_RSEQ
@@ -267,6 +272,7 @@ config RISCV_DMA_NONCOHERENT
select ARCH_HAS_SETUP_DMA_OPS
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
+ select DMA_BOUNCE_UNALIGNED_KMALLOC if SWIOTLB
select DMA_DIRECT_REMAP
config AS_HAS_INSN
@@ -836,6 +842,24 @@ config XIP_PHYS_ADDR
be linked for and stored to. This address is dependent on your
own flash usage.
+config RISCV_ISA_FALLBACK
+ bool "Permit falling back to parsing riscv,isa for extension support by default"
+ default y
+ help
+ Parsing the "riscv,isa" devicetree property has been deprecated and
+ replaced by a list of explicitly defined strings. For compatibility
+ with existing platforms, the kernel will fall back to parsing the
+ "riscv,isa" property if the replacements are not found.
+
+ Selecting N here will result in a kernel that does not use the
+ fallback, unless the commandline "riscv_isa_fallback" parameter is
+ present.
+
+ Please see the dt-binding, located at
+ Documentation/devicetree/bindings/riscv/extensions.yaml for details
+ on the replacement properties, "riscv,isa-base" and
+ "riscv,isa-extensions".
+
endmenu # "Boot options"
config BUILTIN_DTB
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index 6ec6d52a4180..1329e060c548 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -87,9 +87,6 @@ endif
ifeq ($(CONFIG_CMODEL_MEDANY),y)
KBUILD_CFLAGS += -mcmodel=medany
endif
-ifeq ($(CONFIG_PERF_EVENTS),y)
- KBUILD_CFLAGS += -fno-omit-frame-pointer
-endif
# Avoid generating .eh_frame sections.
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables
diff --git a/arch/riscv/configs/32-bit.config b/arch/riscv/configs/32-bit.config
index f6af0f708df4..16ee163847b4 100644
--- a/arch/riscv/configs/32-bit.config
+++ b/arch/riscv/configs/32-bit.config
@@ -1,3 +1,4 @@
+# Help: Build a 32-bit image
CONFIG_ARCH_RV32I=y
CONFIG_32BIT=y
# CONFIG_PORTABLE is not set
diff --git a/arch/riscv/configs/64-bit.config b/arch/riscv/configs/64-bit.config
index 313edc554d84..d872a2d533f2 100644
--- a/arch/riscv/configs/64-bit.config
+++ b/arch/riscv/configs/64-bit.config
@@ -1,2 +1,3 @@
+# Help: Build a 64-bit image
CONFIG_ARCH_RV64I=y
CONFIG_64BIT=y
diff --git a/arch/riscv/include/asm/alternative-macros.h b/arch/riscv/include/asm/alternative-macros.h
index b8c55fb3ab2c..721ec275ce57 100644
--- a/arch/riscv/include/asm/alternative-macros.h
+++ b/arch/riscv/include/asm/alternative-macros.h
@@ -146,7 +146,7 @@
* vendor_id: The CPU vendor ID.
* patch_id: The patch ID (erratum ID or cpufeature ID).
* CONFIG_k: The Kconfig of this patch ID. When Kconfig is disabled, the old
- * content will alwyas be executed.
+ * content will always be executed.
*/
#define ALTERNATIVE(old_content, new_content, vendor_id, patch_id, CONFIG_k) \
_ALTERNATIVE_CFG(old_content, new_content, vendor_id, patch_id, CONFIG_k)
diff --git a/arch/riscv/include/asm/cache.h b/arch/riscv/include/asm/cache.h
index d3036df23ccb..2174fe7bac9a 100644
--- a/arch/riscv/include/asm/cache.h
+++ b/arch/riscv/include/asm/cache.h
@@ -13,6 +13,7 @@
#ifdef CONFIG_RISCV_DMA_NONCOHERENT
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+#define ARCH_KMALLOC_MINALIGN (8)
#endif
/*
@@ -23,4 +24,17 @@
#define ARCH_SLAB_MINALIGN 16
#endif
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_RISCV_DMA_NONCOHERENT
+extern int dma_cache_alignment;
+#define dma_get_cache_alignment dma_get_cache_alignment
+static inline int dma_get_cache_alignment(void)
+{
+ return dma_cache_alignment;
+}
+#endif
+
+#endif /* __ASSEMBLY__ */
+
#endif /* _ASM_RISCV_CACHE_H */
diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h
index c4dca559bb97..3cb53c4df27c 100644
--- a/arch/riscv/include/asm/cacheflush.h
+++ b/arch/riscv/include/asm/cacheflush.h
@@ -58,8 +58,10 @@ void riscv_init_cbo_blocksizes(void);
#ifdef CONFIG_RISCV_DMA_NONCOHERENT
void riscv_noncoherent_supported(void);
+void __init riscv_set_dma_cache_alignment(void);
#else
static inline void riscv_noncoherent_supported(void) {}
+static inline void riscv_set_dma_cache_alignment(void) {}
#endif
/*
diff --git a/arch/riscv/include/asm/cfi.h b/arch/riscv/include/asm/cfi.h
new file mode 100644
index 000000000000..56bf9d69d5e3
--- /dev/null
+++ b/arch/riscv/include/asm/cfi.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RISCV_CFI_H
+#define _ASM_RISCV_CFI_H
+
+/*
+ * Clang Control Flow Integrity (CFI) support.
+ *
+ * Copyright (C) 2023 Google LLC
+ */
+
+#include <linux/cfi.h>
+
+#ifdef CONFIG_CFI_CLANG
+enum bug_trap_type handle_cfi_failure(struct pt_regs *regs);
+#else
+static inline enum bug_trap_type handle_cfi_failure(struct pt_regs *regs)
+{
+ return BUG_TRAP_TYPE_NONE;
+}
+#endif /* CONFIG_CFI_CLANG */
+
+#endif /* _ASM_RISCV_CFI_H */
diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h
index 7bac43a3176e..777cb8299551 100644
--- a/arch/riscv/include/asm/csr.h
+++ b/arch/riscv/include/asm/csr.h
@@ -54,6 +54,7 @@
#ifndef CONFIG_64BIT
#define SATP_PPN _AC(0x003FFFFF, UL)
#define SATP_MODE_32 _AC(0x80000000, UL)
+#define SATP_MODE_SHIFT 31
#define SATP_ASID_BITS 9
#define SATP_ASID_SHIFT 22
#define SATP_ASID_MASK _AC(0x1FF, UL)
@@ -62,6 +63,7 @@
#define SATP_MODE_39 _AC(0x8000000000000000, UL)
#define SATP_MODE_48 _AC(0x9000000000000000, UL)
#define SATP_MODE_57 _AC(0xa000000000000000, UL)
+#define SATP_MODE_SHIFT 60
#define SATP_ASID_BITS 16
#define SATP_ASID_SHIFT 44
#define SATP_ASID_MASK _AC(0xFFFF, UL)
diff --git a/arch/riscv/include/asm/elf.h b/arch/riscv/include/asm/elf.h
index c24280774caf..b3b2dfbdf945 100644
--- a/arch/riscv/include/asm/elf.h
+++ b/arch/riscv/include/asm/elf.h
@@ -41,6 +41,7 @@ extern bool compat_elf_check_arch(Elf32_Ehdr *hdr);
#define compat_elf_check_arch compat_elf_check_arch
#define CORE_DUMP_USE_REGSET
+#define ELF_FDPIC_CORE_EFLAGS 0
#define ELF_EXEC_PAGESIZE (PAGE_SIZE)
/*
@@ -49,7 +50,7 @@ extern bool compat_elf_check_arch(Elf32_Ehdr *hdr);
* the loader. We need to make sure that it is out of the way of the program
* that it will "exec", and that there is sufficient room for the brk.
*/
-#define ELF_ET_DYN_BASE ((TASK_SIZE / 3) * 2)
+#define ELF_ET_DYN_BASE ((DEFAULT_MAP_WINDOW / 3) * 2)
#ifdef CONFIG_64BIT
#ifdef CONFIG_COMPAT
@@ -69,6 +70,13 @@ extern bool compat_elf_check_arch(Elf32_Ehdr *hdr);
#define ELF_HWCAP riscv_get_elf_hwcap()
extern unsigned long elf_hwcap;
+#define ELF_FDPIC_PLAT_INIT(_r, _exec_map_addr, _interp_map_addr, dynamic_addr) \
+ do { \
+ (_r)->a1 = _exec_map_addr; \
+ (_r)->a2 = _interp_map_addr; \
+ (_r)->a3 = dynamic_addr; \
+ } while (0)
+
/*
* This yields a string that ld.so will use to load implementation
* specific libraries for optimization. This is more specific in
@@ -78,7 +86,6 @@ extern unsigned long elf_hwcap;
#define COMPAT_ELF_PLATFORM (NULL)
-#ifdef CONFIG_MMU
#define ARCH_DLINFO \
do { \
/* \
@@ -115,6 +122,8 @@ do { \
else \
NEW_AUX_ENT(AT_IGNORE, 0); \
} while (0)
+
+#ifdef CONFIG_MMU
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
struct linux_binprm;
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h
index f041bfa7f6a0..b7b58258f6c7 100644
--- a/arch/riscv/include/asm/hwcap.h
+++ b/arch/riscv/include/asm/hwcap.h
@@ -14,12 +14,17 @@
#include <uapi/asm/hwcap.h>
#define RISCV_ISA_EXT_a ('a' - 'a')
+#define RISCV_ISA_EXT_b ('b' - 'a')
#define RISCV_ISA_EXT_c ('c' - 'a')
#define RISCV_ISA_EXT_d ('d' - 'a')
#define RISCV_ISA_EXT_f ('f' - 'a')
#define RISCV_ISA_EXT_h ('h' - 'a')
#define RISCV_ISA_EXT_i ('i' - 'a')
+#define RISCV_ISA_EXT_j ('j' - 'a')
+#define RISCV_ISA_EXT_k ('k' - 'a')
#define RISCV_ISA_EXT_m ('m' - 'a')
+#define RISCV_ISA_EXT_p ('p' - 'a')
+#define RISCV_ISA_EXT_q ('q' - 'a')
#define RISCV_ISA_EXT_s ('s' - 'a')
#define RISCV_ISA_EXT_u ('u' - 'a')
#define RISCV_ISA_EXT_v ('v' - 'a')
@@ -55,7 +60,6 @@
#define RISCV_ISA_EXT_ZIHPM 42
#define RISCV_ISA_EXT_MAX 64
-#define RISCV_ISA_EXT_NAME_LEN_MAX 32
#ifdef CONFIG_RISCV_M_MODE
#define RISCV_ISA_EXT_SxAIA RISCV_ISA_EXT_SMAIA
@@ -70,12 +74,15 @@
unsigned long riscv_get_elf_hwcap(void);
struct riscv_isa_ext_data {
- /* Name of the extension displayed to userspace via /proc/cpuinfo */
- char uprop[RISCV_ISA_EXT_NAME_LEN_MAX];
- /* The logical ISA extension ID */
- unsigned int isa_ext_id;
+ const unsigned int id;
+ const char *name;
+ const char *property;
};
+extern const struct riscv_isa_ext_data riscv_isa_ext[];
+extern const size_t riscv_isa_ext_count;
+extern bool riscv_isa_fallback;
+
unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap);
#define riscv_isa_extension_mask(ext) BIT_MASK(RISCV_ISA_EXT_##ext)
diff --git a/arch/riscv/include/asm/insn.h b/arch/riscv/include/asm/insn.h
index fce00400c9bc..06e439eeef9a 100644
--- a/arch/riscv/include/asm/insn.h
+++ b/arch/riscv/include/asm/insn.h
@@ -63,6 +63,7 @@
#define RVG_RS1_OPOFF 15
#define RVG_RS2_OPOFF 20
#define RVG_RD_OPOFF 7
+#define RVG_RS1_MASK GENMASK(4, 0)
#define RVG_RD_MASK GENMASK(4, 0)
/* The bit field of immediate value in RVC J instruction */
@@ -130,6 +131,7 @@
#define RVC_C2_RS1_OPOFF 7
#define RVC_C2_RS2_OPOFF 2
#define RVC_C2_RD_OPOFF 7
+#define RVC_C2_RS1_MASK GENMASK(4, 0)
/* parts of opcode for RVG*/
#define RVG_OPCODE_FENCE 0x0f
@@ -289,6 +291,10 @@ static __always_inline bool riscv_insn_is_c_jalr(u32 code)
#define RV_X(X, s, mask) (((X) >> (s)) & (mask))
#define RVC_X(X, s, mask) RV_X(X, s, mask)
+#define RV_EXTRACT_RS1_REG(x) \
+ ({typeof(x) x_ = (x); \
+ (RV_X(x_, RVG_RS1_OPOFF, RVG_RS1_MASK)); })
+
#define RV_EXTRACT_RD_REG(x) \
({typeof(x) x_ = (x); \
(RV_X(x_, RVG_RD_OPOFF, RVG_RD_MASK)); })
@@ -316,6 +322,10 @@ static __always_inline bool riscv_insn_is_c_jalr(u32 code)
(RV_X(x_, RV_B_IMM_11_OPOFF, RV_B_IMM_11_MASK) << RV_B_IMM_11_OFF) | \
(RV_IMM_SIGN(x_) << RV_B_IMM_SIGN_OFF); })
+#define RVC_EXTRACT_C2_RS1_REG(x) \
+ ({typeof(x) x_ = (x); \
+ (RV_X(x_, RVC_C2_RS1_OPOFF, RVC_C2_RS1_MASK)); })
+
#define RVC_EXTRACT_JTYPE_IMM(x) \
({typeof(x) x_ = (x); \
(RVC_X(x_, RVC_J_IMM_3_1_OPOFF, RVC_J_IMM_3_1_MASK) << RVC_J_IMM_3_1_OFF) | \
diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h
index 2d8ee53b66c7..1ebf20dfbaa6 100644
--- a/arch/riscv/include/asm/kvm_host.h
+++ b/arch/riscv/include/asm/kvm_host.h
@@ -337,6 +337,15 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
void __kvm_riscv_switch_to(struct kvm_vcpu_arch *vcpu_arch);
+void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu);
+unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu);
+int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
+ u64 __user *uindices);
+int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg);
+int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg);
+
int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu);
diff --git a/arch/riscv/include/asm/kvm_vcpu_vector.h b/arch/riscv/include/asm/kvm_vcpu_vector.h
index ff994fdd6d0d..27f5bccdd8b0 100644
--- a/arch/riscv/include/asm/kvm_vcpu_vector.h
+++ b/arch/riscv/include/asm/kvm_vcpu_vector.h
@@ -74,9 +74,7 @@ static inline void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu)
#endif
int kvm_riscv_vcpu_get_reg_vector(struct kvm_vcpu *vcpu,
- const struct kvm_one_reg *reg,
- unsigned long rtype);
+ const struct kvm_one_reg *reg);
int kvm_riscv_vcpu_set_reg_vector(struct kvm_vcpu *vcpu,
- const struct kvm_one_reg *reg,
- unsigned long rtype);
+ const struct kvm_one_reg *reg);
#endif
diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h
index 0099dc116168..355504b37f8e 100644
--- a/arch/riscv/include/asm/mmu.h
+++ b/arch/riscv/include/asm/mmu.h
@@ -20,6 +20,10 @@ typedef struct {
/* A local icache flush is needed before user execution can resume. */
cpumask_t icache_stale_mask;
#endif
+#ifdef CONFIG_BINFMT_ELF_FDPIC
+ unsigned long exec_fdpic_loadmap;
+ unsigned long interp_fdpic_loadmap;
+#endif
} mm_context_t;
void __init create_pgd_mapping(pgd_t *pgdp, uintptr_t va, phys_addr_t pa,
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 2f1c0cde2ca4..b2ba3f79cfe9 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -62,11 +62,16 @@
* struct pages to map half the virtual address space. Then
* position vmemmap directly below the VMALLOC region.
*/
+#define VA_BITS_SV32 32
#ifdef CONFIG_64BIT
+#define VA_BITS_SV39 39
+#define VA_BITS_SV48 48
+#define VA_BITS_SV57 57
+
#define VA_BITS (pgtable_l5_enabled ? \
- 57 : (pgtable_l4_enabled ? 48 : 39))
+ VA_BITS_SV57 : (pgtable_l4_enabled ? VA_BITS_SV48 : VA_BITS_SV39))
#else
-#define VA_BITS 32
+#define VA_BITS VA_BITS_SV32
#endif
#define VMEMMAP_SHIFT \
@@ -111,11 +116,27 @@
#include <asm/page.h>
#include <asm/tlbflush.h>
#include <linux/mm_types.h>
+#include <asm/compat.h>
#define __page_val_to_pfn(_val) (((_val) & _PAGE_PFN_MASK) >> _PAGE_PFN_SHIFT)
#ifdef CONFIG_64BIT
#include <asm/pgtable-64.h>
+
+#define VA_USER_SV39 (UL(1) << (VA_BITS_SV39 - 1))
+#define VA_USER_SV48 (UL(1) << (VA_BITS_SV48 - 1))
+#define VA_USER_SV57 (UL(1) << (VA_BITS_SV57 - 1))
+
+#ifdef CONFIG_COMPAT
+#define MMAP_VA_BITS_64 ((VA_BITS >= VA_BITS_SV48) ? VA_BITS_SV48 : VA_BITS)
+#define MMAP_MIN_VA_BITS_64 (VA_BITS_SV39)
+#define MMAP_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_VA_BITS_64)
+#define MMAP_MIN_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_MIN_VA_BITS_64)
+#else
+#define MMAP_VA_BITS ((VA_BITS >= VA_BITS_SV48) ? VA_BITS_SV48 : VA_BITS)
+#define MMAP_MIN_VA_BITS (VA_BITS_SV39)
+#endif /* CONFIG_COMPAT */
+
#else
#include <asm/pgtable-32.h>
#endif /* CONFIG_64BIT */
@@ -843,14 +864,16 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
* Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32.
* Note that PGDIR_SIZE must evenly divide TASK_SIZE.
* Task size is:
- * - 0x9fc00000 (~2.5GB) for RV32.
- * - 0x4000000000 ( 256GB) for RV64 using SV39 mmu
- * - 0x800000000000 ( 128TB) for RV64 using SV48 mmu
+ * - 0x9fc00000 (~2.5GB) for RV32.
+ * - 0x4000000000 ( 256GB) for RV64 using SV39 mmu
+ * - 0x800000000000 ( 128TB) for RV64 using SV48 mmu
+ * - 0x100000000000000 ( 64PB) for RV64 using SV57 mmu
*
* Note that PGDIR_SIZE must evenly divide TASK_SIZE since "RISC-V
* Instruction Set Manual Volume II: Privileged Architecture" states that
* "load and store effective addresses, which are 64bits, must have bits
* 63–48 all equal to bit 47, or else a page-fault exception will occur."
+ * Similarly for SV57, bits 63–57 must be equal to bit 56.
*/
#ifdef CONFIG_64BIT
#define TASK_SIZE_64 (PGDIR_SIZE * PTRS_PER_PGD / 2)
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
index c950a8d9edef..3e23e1786d05 100644
--- a/arch/riscv/include/asm/processor.h
+++ b/arch/riscv/include/asm/processor.h
@@ -13,19 +13,59 @@
#include <asm/ptrace.h>
+#ifdef CONFIG_64BIT
+#define DEFAULT_MAP_WINDOW (UL(1) << (MMAP_VA_BITS - 1))
+#define STACK_TOP_MAX TASK_SIZE_64
+
+#define arch_get_mmap_end(addr, len, flags) \
+({ \
+ unsigned long mmap_end; \
+ typeof(addr) _addr = (addr); \
+ if ((_addr) == 0 || (IS_ENABLED(CONFIG_COMPAT) && is_compat_task())) \
+ mmap_end = STACK_TOP_MAX; \
+ else if ((_addr) >= VA_USER_SV57) \
+ mmap_end = STACK_TOP_MAX; \
+ else if ((((_addr) >= VA_USER_SV48)) && (VA_BITS >= VA_BITS_SV48)) \
+ mmap_end = VA_USER_SV48; \
+ else \
+ mmap_end = VA_USER_SV39; \
+ mmap_end; \
+})
+
+#define arch_get_mmap_base(addr, base) \
+({ \
+ unsigned long mmap_base; \
+ typeof(addr) _addr = (addr); \
+ typeof(base) _base = (base); \
+ unsigned long rnd_gap = DEFAULT_MAP_WINDOW - (_base); \
+ if ((_addr) == 0 || (IS_ENABLED(CONFIG_COMPAT) && is_compat_task())) \
+ mmap_base = (_base); \
+ else if (((_addr) >= VA_USER_SV57) && (VA_BITS >= VA_BITS_SV57)) \
+ mmap_base = VA_USER_SV57 - rnd_gap; \
+ else if ((((_addr) >= VA_USER_SV48)) && (VA_BITS >= VA_BITS_SV48)) \
+ mmap_base = VA_USER_SV48 - rnd_gap; \
+ else \
+ mmap_base = VA_USER_SV39 - rnd_gap; \
+ mmap_base; \
+})
+
+#else
+#define DEFAULT_MAP_WINDOW TASK_SIZE
+#define STACK_TOP_MAX TASK_SIZE
+#endif
+#define STACK_ALIGN 16
+
+#define STACK_TOP DEFAULT_MAP_WINDOW
+
/*
* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
*/
-#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
-
-#define STACK_TOP TASK_SIZE
#ifdef CONFIG_64BIT
-#define STACK_TOP_MAX TASK_SIZE_64
+#define TASK_UNMAPPED_BASE PAGE_ALIGN((UL(1) << MMAP_MIN_VA_BITS) / 3)
#else
-#define STACK_TOP_MAX TASK_SIZE
+#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
#endif
-#define STACK_ALIGN 16
#ifndef __ASSEMBLY__
diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h
index 0148c6bd9675..121fff429dce 100644
--- a/arch/riscv/include/asm/syscall.h
+++ b/arch/riscv/include/asm/syscall.h
@@ -75,7 +75,7 @@ static inline int syscall_get_arch(struct task_struct *task)
#endif
}
-typedef long (*syscall_t)(ulong, ulong, ulong, ulong, ulong, ulong, ulong);
+typedef long (*syscall_t)(const struct pt_regs *);
static inline void syscall_handler(struct pt_regs *regs, ulong syscall)
{
syscall_t fn;
@@ -87,8 +87,7 @@ static inline void syscall_handler(struct pt_regs *regs, ulong syscall)
#endif
fn = sys_call_table[syscall];
- regs->a0 = fn(regs->orig_a0, regs->a1, regs->a2,
- regs->a3, regs->a4, regs->a5, regs->a6);
+ regs->a0 = fn(regs);
}
static inline bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs)
diff --git a/arch/riscv/include/asm/syscall_wrapper.h b/arch/riscv/include/asm/syscall_wrapper.h
new file mode 100644
index 000000000000..1d7942c8a6cb
--- /dev/null
+++ b/arch/riscv/include/asm/syscall_wrapper.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * syscall_wrapper.h - riscv specific wrappers to syscall definitions
+ *
+ * Based on arch/arm64/include/syscall_wrapper.h
+ */
+
+#ifndef __ASM_SYSCALL_WRAPPER_H
+#define __ASM_SYSCALL_WRAPPER_H
+
+#include <asm/ptrace.h>
+
+asmlinkage long __riscv_sys_ni_syscall(const struct pt_regs *);
+
+#define SC_RISCV_REGS_TO_ARGS(x, ...) \
+ __MAP(x,__SC_ARGS \
+ ,,regs->orig_a0,,regs->a1,,regs->a2 \
+ ,,regs->a3,,regs->a4,,regs->a5,,regs->a6)
+
+#ifdef CONFIG_COMPAT
+
+#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \
+ asmlinkage long __riscv_compat_sys##name(const struct pt_regs *regs); \
+ ALLOW_ERROR_INJECTION(__riscv_compat_sys##name, ERRNO); \
+ static long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
+ static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
+ asmlinkage long __riscv_compat_sys##name(const struct pt_regs *regs) \
+ { \
+ return __se_compat_sys##name(SC_RISCV_REGS_TO_ARGS(x,__VA_ARGS__)); \
+ } \
+ static long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
+ { \
+ return __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__)); \
+ } \
+ static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
+
+#define COMPAT_SYSCALL_DEFINE0(sname) \
+ asmlinkage long __riscv_compat_sys_##sname(const struct pt_regs *__unused); \
+ ALLOW_ERROR_INJECTION(__riscv_compat_sys_##sname, ERRNO); \
+ asmlinkage long __riscv_compat_sys_##sname(const struct pt_regs *__unused)
+
+#define COND_SYSCALL_COMPAT(name) \
+ asmlinkage long __weak __riscv_compat_sys_##name(const struct pt_regs *regs); \
+ asmlinkage long __weak __riscv_compat_sys_##name(const struct pt_regs *regs) \
+ { \
+ return sys_ni_syscall(); \
+ }
+
+#define COMPAT_SYS_NI(name) \
+ SYSCALL_ALIAS(__riscv_compat_sys_##name, sys_ni_posix_timers);
+
+#endif /* CONFIG_COMPAT */
+
+#define __SYSCALL_DEFINEx(x, name, ...) \
+ asmlinkage long __riscv_sys##name(const struct pt_regs *regs); \
+ ALLOW_ERROR_INJECTION(__riscv_sys##name, ERRNO); \
+ static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
+ static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
+ asmlinkage long __riscv_sys##name(const struct pt_regs *regs) \
+ { \
+ return __se_sys##name(SC_RISCV_REGS_TO_ARGS(x,__VA_ARGS__)); \
+ } \
+ static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
+ { \
+ long ret = __do_sys##name(__MAP(x,__SC_CAST,__VA_ARGS__)); \
+ __MAP(x,__SC_TEST,__VA_ARGS__); \
+ __PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \
+ return ret; \
+ } \
+ static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
+
+#define SYSCALL_DEFINE0(sname) \
+ SYSCALL_METADATA(_##sname, 0); \
+ asmlinkage long __riscv_sys_##sname(const struct pt_regs *__unused); \
+ ALLOW_ERROR_INJECTION(__riscv_sys_##sname, ERRNO); \
+ asmlinkage long __riscv_sys_##sname(const struct pt_regs *__unused)
+
+#define COND_SYSCALL(name) \
+ asmlinkage long __weak __riscv_sys_##name(const struct pt_regs *regs); \
+ asmlinkage long __weak __riscv_sys_##name(const struct pt_regs *regs) \
+ { \
+ return sys_ni_syscall(); \
+ }
+
+#define SYS_NI(name) SYSCALL_ALIAS(__riscv_sys_##name, sys_ni_posix_timers);
+
+#endif /* __ASM_SYSCALL_WRAPPER_H */
diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h
index 930fdc4101cd..992c5e407104 100644
--- a/arch/riscv/include/uapi/asm/kvm.h
+++ b/arch/riscv/include/uapi/asm/kvm.h
@@ -55,6 +55,7 @@ struct kvm_riscv_config {
unsigned long marchid;
unsigned long mimpid;
unsigned long zicboz_block_size;
+ unsigned long satp_mode;
};
/* CORE registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */
@@ -124,6 +125,12 @@ enum KVM_RISCV_ISA_EXT_ID {
KVM_RISCV_ISA_EXT_SSAIA,
KVM_RISCV_ISA_EXT_V,
KVM_RISCV_ISA_EXT_SVNAPOT,
+ KVM_RISCV_ISA_EXT_ZBA,
+ KVM_RISCV_ISA_EXT_ZBS,
+ KVM_RISCV_ISA_EXT_ZICNTR,
+ KVM_RISCV_ISA_EXT_ZICSR,
+ KVM_RISCV_ISA_EXT_ZIFENCEI,
+ KVM_RISCV_ISA_EXT_ZIHPM,
KVM_RISCV_ISA_EXT_MAX,
};
@@ -193,6 +200,15 @@ enum KVM_RISCV_SBI_EXT_ID {
/* ISA Extension registers are mapped as type 7 */
#define KVM_REG_RISCV_ISA_EXT (0x07 << KVM_REG_RISCV_TYPE_SHIFT)
+#define KVM_REG_RISCV_ISA_SINGLE (0x0 << KVM_REG_RISCV_SUBTYPE_SHIFT)
+#define KVM_REG_RISCV_ISA_MULTI_EN (0x1 << KVM_REG_RISCV_SUBTYPE_SHIFT)
+#define KVM_REG_RISCV_ISA_MULTI_DIS (0x2 << KVM_REG_RISCV_SUBTYPE_SHIFT)
+#define KVM_REG_RISCV_ISA_MULTI_REG(__ext_id) \
+ ((__ext_id) / __BITS_PER_LONG)
+#define KVM_REG_RISCV_ISA_MULTI_MASK(__ext_id) \
+ (1UL << ((__ext_id) % __BITS_PER_LONG))
+#define KVM_REG_RISCV_ISA_MULTI_REG_LAST \
+ KVM_REG_RISCV_ISA_MULTI_REG(KVM_RISCV_ISA_EXT_MAX - 1)
/* SBI extension registers are mapped as type 8 */
#define KVM_REG_RISCV_SBI_EXT (0x08 << KVM_REG_RISCV_TYPE_SHIFT)
diff --git a/arch/riscv/include/uapi/asm/ptrace.h b/arch/riscv/include/uapi/asm/ptrace.h
index 283800130614..6d2d9afaabea 100644
--- a/arch/riscv/include/uapi/asm/ptrace.h
+++ b/arch/riscv/include/uapi/asm/ptrace.h
@@ -10,6 +10,11 @@
#include <linux/types.h>
+#define PTRACE_GETFDPIC 33
+
+#define PTRACE_GETFDPIC_EXEC 0
+#define PTRACE_GETFDPIC_INTERP 1
+
/*
* User-mode register state for core dumps, ptrace, sigcontext
*
diff --git a/arch/riscv/include/uapi/asm/sigcontext.h b/arch/riscv/include/uapi/asm/sigcontext.h
index 8c8712aa9551..cd4f175dc837 100644
--- a/arch/riscv/include/uapi/asm/sigcontext.h
+++ b/arch/riscv/include/uapi/asm/sigcontext.h
@@ -25,7 +25,7 @@ struct __sc_riscv_v_state {
* Signal context structure
*
* This contains the context saved before a signal handler is invoked;
- * it is restored by sys_sigreturn / sys_rt_sigreturn.
+ * it is restored by sys_rt_sigreturn.
*/
struct sigcontext {
struct user_regs_struct sc_regs;
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index 506cc4a9a45a..6ac56af42f4a 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -91,6 +91,8 @@ obj-$(CONFIG_CRASH_CORE) += crash_core.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
+obj-$(CONFIG_CFI_CLANG) += cfi.o
+
obj-$(CONFIG_EFI) += efi.o
obj-$(CONFIG_COMPAT) += compat_syscall_table.o
obj-$(CONFIG_COMPAT) += compat_signal.o
diff --git a/arch/riscv/kernel/cfi.c b/arch/riscv/kernel/cfi.c
new file mode 100644
index 000000000000..820158d7a291
--- /dev/null
+++ b/arch/riscv/kernel/cfi.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Clang Control Flow Integrity (CFI) support.
+ *
+ * Copyright (C) 2023 Google LLC
+ */
+#include <asm/cfi.h>
+#include <asm/insn.h>
+
+/*
+ * Returns the target address and the expected type when regs->epc points
+ * to a compiler-generated CFI trap.
+ */
+static bool decode_cfi_insn(struct pt_regs *regs, unsigned long *target,
+ u32 *type)
+{
+ unsigned long *regs_ptr = (unsigned long *)regs;
+ int rs1_num;
+ u32 insn;
+
+ *target = *type = 0;
+
+ /*
+ * The compiler generates the following instruction sequence
+ * for indirect call checks:
+ *
+ *   lw t1, -4(<reg>)
+ * lui t2, <hi20>
+ * addiw t2, t2, <lo12>
+ * beq t1, t2, .Ltmp1
+ * ebreak ; <- regs->epc
+ * .Ltmp1:
+ * jalr <reg>
+ *
+ * We can read the expected type and the target address from the
+ * registers passed to the beq/jalr instructions.
+ */
+ if (get_kernel_nofault(insn, (void *)regs->epc - 4))
+ return false;
+ if (!riscv_insn_is_beq(insn))
+ return false;
+
+ *type = (u32)regs_ptr[RV_EXTRACT_RS1_REG(insn)];
+
+ if (get_kernel_nofault(insn, (void *)regs->epc) ||
+ get_kernel_nofault(insn, (void *)regs->epc + GET_INSN_LENGTH(insn)))
+ return false;
+
+ if (riscv_insn_is_jalr(insn))
+ rs1_num = RV_EXTRACT_RS1_REG(insn);
+ else if (riscv_insn_is_c_jalr(insn))
+ rs1_num = RVC_EXTRACT_C2_RS1_REG(insn);
+ else
+ return false;
+
+ *target = regs_ptr[rs1_num];
+
+ return true;
+}
+
+/*
+ * Checks if the ebreak trap is because of a CFI failure, and handles the trap
+ * if needed. Returns a bug_trap_type value similarly to report_bug.
+ */
+enum bug_trap_type handle_cfi_failure(struct pt_regs *regs)
+{
+ unsigned long target;
+ u32 type;
+
+ if (!is_cfi_trap(regs->epc))
+ return BUG_TRAP_TYPE_NONE;
+
+ if (!decode_cfi_insn(regs, &target, &type))
+ return report_cfi_failure_noaddr(regs, regs->epc);
+
+ return report_cfi_failure(regs, regs->epc, &target, type);
+}
diff --git a/arch/riscv/kernel/compat_syscall_table.c b/arch/riscv/kernel/compat_syscall_table.c
index 651f2b009c28..ad7f2d712f5f 100644
--- a/arch/riscv/kernel/compat_syscall_table.c
+++ b/arch/riscv/kernel/compat_syscall_table.c
@@ -9,11 +9,15 @@
#include <asm/syscall.h>
#undef __SYSCALL
-#define __SYSCALL(nr, call) [nr] = (call),
+#define __SYSCALL(nr, call) asmlinkage long __riscv_##call(const struct pt_regs *);
+#include <asm/unistd.h>
+
+#undef __SYSCALL
+#define __SYSCALL(nr, call) [nr] = __riscv_##call,
asmlinkage long compat_sys_rt_sigreturn(void);
void * const compat_sys_call_table[__NR_syscalls] = {
- [0 ... __NR_syscalls - 1] = sys_ni_syscall,
+ [0 ... __NR_syscalls - 1] = __riscv_sys_ni_syscall,
#include <asm/unistd.h>
};
diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c
index 35b854cf078e..c17dacb1141c 100644
--- a/arch/riscv/kernel/cpu.c
+++ b/arch/riscv/kernel/cpu.c
@@ -46,7 +46,7 @@ int riscv_of_processor_hartid(struct device_node *node, unsigned long *hart)
return 0;
}
-int riscv_early_of_processor_hartid(struct device_node *node, unsigned long *hart)
+int __init riscv_early_of_processor_hartid(struct device_node *node, unsigned long *hart)
{
const char *isa;
@@ -66,16 +66,53 @@ int riscv_early_of_processor_hartid(struct device_node *node, unsigned long *har
return -ENODEV;
}
+ if (of_property_read_string(node, "riscv,isa-base", &isa))
+ goto old_interface;
+
+ if (IS_ENABLED(CONFIG_32BIT) && strncasecmp(isa, "rv32i", 5)) {
+ pr_warn("CPU with hartid=%lu does not support rv32i", *hart);
+ return -ENODEV;
+ }
+
+ if (IS_ENABLED(CONFIG_64BIT) && strncasecmp(isa, "rv64i", 5)) {
+ pr_warn("CPU with hartid=%lu does not support rv64i", *hart);
+ return -ENODEV;
+ }
+
+ if (!of_property_present(node, "riscv,isa-extensions"))
+ return -ENODEV;
+
+ if (of_property_match_string(node, "riscv,isa-extensions", "i") < 0 ||
+ of_property_match_string(node, "riscv,isa-extensions", "m") < 0 ||
+ of_property_match_string(node, "riscv,isa-extensions", "a") < 0) {
+ pr_warn("CPU with hartid=%lu does not support ima", *hart);
+ return -ENODEV;
+ }
+
+ return 0;
+
+old_interface:
+ if (!riscv_isa_fallback) {
+ pr_warn("CPU with hartid=%lu is invalid: this kernel does not parse \"riscv,isa\"",
+ *hart);
+ return -ENODEV;
+ }
+
if (of_property_read_string(node, "riscv,isa", &isa)) {
- pr_warn("CPU with hartid=%lu has no \"riscv,isa\" property\n", *hart);
+ pr_warn("CPU with hartid=%lu has no \"riscv,isa-base\" or \"riscv,isa\" property\n",
+ *hart);
return -ENODEV;
}
- if (IS_ENABLED(CONFIG_32BIT) && strncasecmp(isa, "rv32ima", 7))
+ if (IS_ENABLED(CONFIG_32BIT) && strncasecmp(isa, "rv32ima", 7)) {
+ pr_warn("CPU with hartid=%lu does not support rv32ima", *hart);
return -ENODEV;
+ }
- if (IS_ENABLED(CONFIG_64BIT) && strncasecmp(isa, "rv64ima", 7))
+ if (IS_ENABLED(CONFIG_64BIT) && strncasecmp(isa, "rv64ima", 7)) {
+ pr_warn("CPU with hartid=%lu does not support rv64ima", *hart);
return -ENODEV;
+ }
return 0;
}
@@ -165,132 +202,46 @@ arch_initcall(riscv_cpuinfo_init);
#ifdef CONFIG_PROC_FS
-#define __RISCV_ISA_EXT_DATA(UPROP, EXTID) \
- { \
- .uprop = #UPROP, \
- .isa_ext_id = EXTID, \
- }
-
-/*
- * The canonical order of ISA extension names in the ISA string is defined in
- * chapter 27 of the unprivileged specification.
- *
- * Ordinarily, for in-kernel data structures, this order is unimportant but
- * isa_ext_arr defines the order of the ISA string in /proc/cpuinfo.
- *
- * The specification uses vague wording, such as should, when it comes to
- * ordering, so for our purposes the following rules apply:
- *
- * 1. All multi-letter extensions must be separated from other extensions by an
- * underscore.
- *
- * 2. Additional standard extensions (starting with 'Z') must be sorted after
- * single-letter extensions and before any higher-privileged extensions.
-
- * 3. The first letter following the 'Z' conventionally indicates the most
- * closely related alphabetical extension category, IMAFDQLCBKJTPVH.
- * If multiple 'Z' extensions are named, they must be ordered first by
- * category, then alphabetically within a category.
- *
- * 3. Standard supervisor-level extensions (starting with 'S') must be listed
- * after standard unprivileged extensions. If multiple supervisor-level
- * extensions are listed, they must be ordered alphabetically.
- *
- * 4. Standard machine-level extensions (starting with 'Zxm') must be listed
- * after any lower-privileged, standard extensions. If multiple
- * machine-level extensions are listed, they must be ordered
- * alphabetically.
- *
- * 5. Non-standard extensions (starting with 'X') must be listed after all
- * standard extensions. If multiple non-standard extensions are listed, they
- * must be ordered alphabetically.
- *
- * An example string following the order is:
- * rv64imadc_zifoo_zigoo_zafoo_sbar_scar_zxmbaz_xqux_xrux
- *
- * New entries to this struct should follow the ordering rules described above.
- */
-static struct riscv_isa_ext_data isa_ext_arr[] = {
- __RISCV_ISA_EXT_DATA(zicbom, RISCV_ISA_EXT_ZICBOM),
- __RISCV_ISA_EXT_DATA(zicboz, RISCV_ISA_EXT_ZICBOZ),
- __RISCV_ISA_EXT_DATA(zicntr, RISCV_ISA_EXT_ZICNTR),
- __RISCV_ISA_EXT_DATA(zicsr, RISCV_ISA_EXT_ZICSR),
- __RISCV_ISA_EXT_DATA(zifencei, RISCV_ISA_EXT_ZIFENCEI),
- __RISCV_ISA_EXT_DATA(zihintpause, RISCV_ISA_EXT_ZIHINTPAUSE),
- __RISCV_ISA_EXT_DATA(zihpm, RISCV_ISA_EXT_ZIHPM),
- __RISCV_ISA_EXT_DATA(zba, RISCV_ISA_EXT_ZBA),
- __RISCV_ISA_EXT_DATA(zbb, RISCV_ISA_EXT_ZBB),
- __RISCV_ISA_EXT_DATA(zbs, RISCV_ISA_EXT_ZBS),
- __RISCV_ISA_EXT_DATA(smaia, RISCV_ISA_EXT_SMAIA),
- __RISCV_ISA_EXT_DATA(ssaia, RISCV_ISA_EXT_SSAIA),
- __RISCV_ISA_EXT_DATA(sscofpmf, RISCV_ISA_EXT_SSCOFPMF),
- __RISCV_ISA_EXT_DATA(sstc, RISCV_ISA_EXT_SSTC),
- __RISCV_ISA_EXT_DATA(svinval, RISCV_ISA_EXT_SVINVAL),
- __RISCV_ISA_EXT_DATA(svnapot, RISCV_ISA_EXT_SVNAPOT),
- __RISCV_ISA_EXT_DATA(svpbmt, RISCV_ISA_EXT_SVPBMT),
- __RISCV_ISA_EXT_DATA("", RISCV_ISA_EXT_MAX),
-};
-
-static void print_isa_ext(struct seq_file *f)
+static void print_isa(struct seq_file *f)
{
- struct riscv_isa_ext_data *edata;
- int i = 0, arr_sz;
-
- arr_sz = ARRAY_SIZE(isa_ext_arr) - 1;
+ seq_puts(f, "isa\t\t: ");
- /* No extension support available */
- if (arr_sz <= 0)
- return;
+ if (IS_ENABLED(CONFIG_32BIT))
+ seq_write(f, "rv32", 4);
+ else
+ seq_write(f, "rv64", 4);
- for (i = 0; i <= arr_sz; i++) {
- edata = &isa_ext_arr[i];
- if (!__riscv_isa_extension_available(NULL, edata->isa_ext_id))
+ for (int i = 0; i < riscv_isa_ext_count; i++) {
+ if (!__riscv_isa_extension_available(NULL, riscv_isa_ext[i].id))
continue;
- seq_printf(f, "_%s", edata->uprop);
- }
-}
-/*
- * These are the only valid base (single letter) ISA extensions as per the spec.
- * It also specifies the canonical order in which it appears in the spec.
- * Some of the extension may just be a place holder for now (B, K, P, J).
- * This should be updated once corresponding extensions are ratified.
- */
-static const char base_riscv_exts[13] = "imafdqcbkjpvh";
+ /* Only multi-letter extensions are split by underscores */
+ if (strnlen(riscv_isa_ext[i].name, 2) != 1)
+ seq_puts(f, "_");
-static void print_isa(struct seq_file *f, const char *isa)
-{
- int i;
-
- seq_puts(f, "isa\t\t: ");
- /* Print the rv[64/32] part */
- seq_write(f, isa, 4);
- for (i = 0; i < sizeof(base_riscv_exts); i++) {
- if (__riscv_isa_extension_available(NULL, base_riscv_exts[i] - 'a'))
- /* Print only enabled the base ISA extensions */
- seq_write(f, &base_riscv_exts[i], 1);
+ seq_printf(f, "%s", riscv_isa_ext[i].name);
}
- print_isa_ext(f);
+
seq_puts(f, "\n");
}
static void print_mmu(struct seq_file *f)
{
- char sv_type[16];
+ const char *sv_type;
#ifdef CONFIG_MMU
#if defined(CONFIG_32BIT)
- strncpy(sv_type, "sv32", 5);
+ sv_type = "sv32";
#elif defined(CONFIG_64BIT)
if (pgtable_l5_enabled)
- strncpy(sv_type, "sv57", 5);
+ sv_type = "sv57";
else if (pgtable_l4_enabled)
- strncpy(sv_type, "sv48", 5);
+ sv_type = "sv48";
else
- strncpy(sv_type, "sv39", 5);
+ sv_type = "sv39";
#endif
#else
- strncpy(sv_type, "none", 5);
+ sv_type = "none";
#endif /* CONFIG_MMU */
seq_printf(f, "mmu\t\t: %s\n", sv_type);
}
@@ -321,27 +272,21 @@ static int c_show(struct seq_file *m, void *v)
unsigned long cpu_id = (unsigned long)v - 1;
struct riscv_cpuinfo *ci = per_cpu_ptr(&riscv_cpuinfo, cpu_id);
struct device_node *node;
- const char *compat, *isa;
+ const char *compat;
seq_printf(m, "processor\t: %lu\n", cpu_id);
seq_printf(m, "hart\t\t: %lu\n", cpuid_to_hartid_map(cpu_id));
+ print_isa(m);
+ print_mmu(m);
if (acpi_disabled) {
node = of_get_cpu_node(cpu_id, NULL);
- if (!of_property_read_string(node, "riscv,isa", &isa))
- print_isa(m, isa);
- print_mmu(m);
if (!of_property_read_string(node, "compatible", &compat) &&
strcmp(compat, "riscv"))
seq_printf(m, "uarch\t\t: %s\n", compat);
of_node_put(node);
- } else {
- if (!acpi_get_riscv_isa(NULL, cpu_id, &isa))
- print_isa(m, isa);
-
- print_mmu(m);
}
seq_printf(m, "mvendorid\t: 0x%lx\n", ci->mvendorid);
diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
index 3cc99e928744..ef7b4fd9e876 100644
--- a/arch/riscv/kernel/cpufeature.c
+++ b/arch/riscv/kernel/cpufeature.c
@@ -98,29 +98,252 @@ static bool riscv_isa_extension_check(int id)
return true;
}
-void __init riscv_fill_hwcap(void)
+#define __RISCV_ISA_EXT_DATA(_name, _id) { \
+ .name = #_name, \
+ .property = #_name, \
+ .id = _id, \
+}
+
+/*
+ * The canonical order of ISA extension names in the ISA string is defined in
+ * chapter 27 of the unprivileged specification.
+ *
+ * Ordinarily, for in-kernel data structures, this order is unimportant but
+ * isa_ext_arr defines the order of the ISA string in /proc/cpuinfo.
+ *
+ * The specification uses vague wording, such as should, when it comes to
+ * ordering, so for our purposes the following rules apply:
+ *
+ * 1. All multi-letter extensions must be separated from other extensions by an
+ * underscore.
+ *
+ * 2. Additional standard extensions (starting with 'Z') must be sorted after
+ * single-letter extensions and before any higher-privileged extensions.
+ *
+ * 3. The first letter following the 'Z' conventionally indicates the most
+ * closely related alphabetical extension category, IMAFDQLCBKJTPVH.
+ * If multiple 'Z' extensions are named, they must be ordered first by
+ * category, then alphabetically within a category.
+ *
+ * 3. Standard supervisor-level extensions (starting with 'S') must be listed
+ * after standard unprivileged extensions. If multiple supervisor-level
+ * extensions are listed, they must be ordered alphabetically.
+ *
+ * 4. Standard machine-level extensions (starting with 'Zxm') must be listed
+ * after any lower-privileged, standard extensions. If multiple
+ * machine-level extensions are listed, they must be ordered
+ * alphabetically.
+ *
+ * 5. Non-standard extensions (starting with 'X') must be listed after all
+ * standard extensions. If multiple non-standard extensions are listed, they
+ * must be ordered alphabetically.
+ *
+ * An example string following the order is:
+ * rv64imadc_zifoo_zigoo_zafoo_sbar_scar_zxmbaz_xqux_xrux
+ *
+ * New entries to this struct should follow the ordering rules described above.
+ */
+const struct riscv_isa_ext_data riscv_isa_ext[] = {
+ __RISCV_ISA_EXT_DATA(i, RISCV_ISA_EXT_i),
+ __RISCV_ISA_EXT_DATA(m, RISCV_ISA_EXT_m),
+ __RISCV_ISA_EXT_DATA(a, RISCV_ISA_EXT_a),
+ __RISCV_ISA_EXT_DATA(f, RISCV_ISA_EXT_f),
+ __RISCV_ISA_EXT_DATA(d, RISCV_ISA_EXT_d),
+ __RISCV_ISA_EXT_DATA(q, RISCV_ISA_EXT_q),
+ __RISCV_ISA_EXT_DATA(c, RISCV_ISA_EXT_c),
+ __RISCV_ISA_EXT_DATA(b, RISCV_ISA_EXT_b),
+ __RISCV_ISA_EXT_DATA(k, RISCV_ISA_EXT_k),
+ __RISCV_ISA_EXT_DATA(j, RISCV_ISA_EXT_j),
+ __RISCV_ISA_EXT_DATA(p, RISCV_ISA_EXT_p),
+ __RISCV_ISA_EXT_DATA(v, RISCV_ISA_EXT_v),
+ __RISCV_ISA_EXT_DATA(h, RISCV_ISA_EXT_h),
+ __RISCV_ISA_EXT_DATA(zicbom, RISCV_ISA_EXT_ZICBOM),
+ __RISCV_ISA_EXT_DATA(zicboz, RISCV_ISA_EXT_ZICBOZ),
+ __RISCV_ISA_EXT_DATA(zicntr, RISCV_ISA_EXT_ZICNTR),
+ __RISCV_ISA_EXT_DATA(zicsr, RISCV_ISA_EXT_ZICSR),
+ __RISCV_ISA_EXT_DATA(zifencei, RISCV_ISA_EXT_ZIFENCEI),
+ __RISCV_ISA_EXT_DATA(zihintpause, RISCV_ISA_EXT_ZIHINTPAUSE),
+ __RISCV_ISA_EXT_DATA(zihpm, RISCV_ISA_EXT_ZIHPM),
+ __RISCV_ISA_EXT_DATA(zba, RISCV_ISA_EXT_ZBA),
+ __RISCV_ISA_EXT_DATA(zbb, RISCV_ISA_EXT_ZBB),
+ __RISCV_ISA_EXT_DATA(zbs, RISCV_ISA_EXT_ZBS),
+ __RISCV_ISA_EXT_DATA(smaia, RISCV_ISA_EXT_SMAIA),
+ __RISCV_ISA_EXT_DATA(ssaia, RISCV_ISA_EXT_SSAIA),
+ __RISCV_ISA_EXT_DATA(sscofpmf, RISCV_ISA_EXT_SSCOFPMF),
+ __RISCV_ISA_EXT_DATA(sstc, RISCV_ISA_EXT_SSTC),
+ __RISCV_ISA_EXT_DATA(svinval, RISCV_ISA_EXT_SVINVAL),
+ __RISCV_ISA_EXT_DATA(svnapot, RISCV_ISA_EXT_SVNAPOT),
+ __RISCV_ISA_EXT_DATA(svpbmt, RISCV_ISA_EXT_SVPBMT),
+};
+
+const size_t riscv_isa_ext_count = ARRAY_SIZE(riscv_isa_ext);
+
+static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct riscv_isainfo *isainfo,
+ unsigned long *isa2hwcap, const char *isa)
+{
+ /*
+ * For all possible cpus, we have already validated in
+ * the boot process that they at least contain "rv" and
+ * whichever of "32"/"64" this kernel supports, and so this
+ * section can be skipped.
+ */
+ isa += 4;
+
+ while (*isa) {
+ const char *ext = isa++;
+ const char *ext_end = isa;
+ bool ext_long = false, ext_err = false;
+
+ switch (*ext) {
+ case 's':
+ /*
+ * Workaround for invalid single-letter 's' & 'u'(QEMU).
+ * No need to set the bit in riscv_isa as 's' & 'u' are
+ * not valid ISA extensions. It works until multi-letter
+ * extension starting with "Su" appears.
+ */
+ if (ext[-1] != '_' && ext[1] == 'u') {
+ ++isa;
+ ext_err = true;
+ break;
+ }
+ fallthrough;
+ case 'S':
+ case 'x':
+ case 'X':
+ case 'z':
+ case 'Z':
+ /*
+ * Before attempting to parse the extension itself, we find its end.
+ * As multi-letter extensions must be split from other multi-letter
+ * extensions with an "_", the end of a multi-letter extension will
+ * either be the null character or the "_" at the start of the next
+ * multi-letter extension.
+ *
+ * Next, as the extensions version is currently ignored, we
+ * eliminate that portion. This is done by parsing backwards from
+ * the end of the extension, removing any numbers. This may be a
+ * major or minor number however, so the process is repeated if a
+ * minor number was found.
+ *
+ * ext_end is intended to represent the first character *after* the
+ * name portion of an extension, but will be decremented to the last
+ * character itself while eliminating the extensions version number.
+ * A simple re-increment solves this problem.
+ */
+ ext_long = true;
+ for (; *isa && *isa != '_'; ++isa)
+ if (unlikely(!isalnum(*isa)))
+ ext_err = true;
+
+ ext_end = isa;
+ if (unlikely(ext_err))
+ break;
+
+ if (!isdigit(ext_end[-1]))
+ break;
+
+ while (isdigit(*--ext_end))
+ ;
+
+ if (tolower(ext_end[0]) != 'p' || !isdigit(ext_end[-1])) {
+ ++ext_end;
+ break;
+ }
+
+ while (isdigit(*--ext_end))
+ ;
+
+ ++ext_end;
+ break;
+ default:
+ /*
+ * Things are a little easier for single-letter extensions, as they
+ * are parsed forwards.
+ *
+ * After checking that our starting position is valid, we need to
+ * ensure that, when isa was incremented at the start of the loop,
+ * that it arrived at the start of the next extension.
+ *
+ * If we are already on a non-digit, there is nothing to do. Either
+ * we have a multi-letter extension's _, or the start of an
+ * extension.
+ *
+ * Otherwise we have found the current extension's major version
+ * number. Parse past it, and a subsequent p/minor version number
+ * if present. The `p` extension must not appear immediately after
+ * a number, so there is no fear of missing it.
+ *
+ */
+ if (unlikely(!isalpha(*ext))) {
+ ext_err = true;
+ break;
+ }
+
+ if (!isdigit(*isa))
+ break;
+
+ while (isdigit(*++isa))
+ ;
+
+ if (tolower(*isa) != 'p')
+ break;
+
+ if (!isdigit(*++isa)) {
+ --isa;
+ break;
+ }
+
+ while (isdigit(*++isa))
+ ;
+
+ break;
+ }
+
+ /*
+ * The parser expects that at the start of an iteration isa points to the
+ * first character of the next extension. As we stop parsing an extension
+ * on meeting a non-alphanumeric character, an extra increment is needed
+ * where the succeeding extension is a multi-letter prefixed with an "_".
+ */
+ if (*isa == '_')
+ ++isa;
+
+#define SET_ISA_EXT_MAP(name, bit) \
+ do { \
+ if ((ext_end - ext == strlen(name)) && \
+ !strncasecmp(ext, name, strlen(name)) && \
+ riscv_isa_extension_check(bit)) \
+ set_bit(bit, isainfo->isa); \
+ } while (false) \
+
+ if (unlikely(ext_err))
+ continue;
+ if (!ext_long) {
+ int nr = tolower(*ext) - 'a';
+
+ if (riscv_isa_extension_check(nr)) {
+ *this_hwcap |= isa2hwcap[nr];
+ set_bit(nr, isainfo->isa);
+ }
+ } else {
+ for (int i = 0; i < riscv_isa_ext_count; i++)
+ SET_ISA_EXT_MAP(riscv_isa_ext[i].name,
+ riscv_isa_ext[i].id);
+ }
+#undef SET_ISA_EXT_MAP
+ }
+}
+
+static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap)
{
struct device_node *node;
const char *isa;
- char print_str[NUM_ALPHA_EXTS + 1];
- int i, j, rc;
- unsigned long isa2hwcap[26] = {0};
+ int rc;
struct acpi_table_header *rhct;
acpi_status status;
unsigned int cpu;
- isa2hwcap['i' - 'a'] = COMPAT_HWCAP_ISA_I;
- isa2hwcap['m' - 'a'] = COMPAT_HWCAP_ISA_M;
- isa2hwcap['a' - 'a'] = COMPAT_HWCAP_ISA_A;
- isa2hwcap['f' - 'a'] = COMPAT_HWCAP_ISA_F;
- isa2hwcap['d' - 'a'] = COMPAT_HWCAP_ISA_D;
- isa2hwcap['c' - 'a'] = COMPAT_HWCAP_ISA_C;
- isa2hwcap['v' - 'a'] = COMPAT_HWCAP_ISA_V;
-
- elf_hwcap = 0;
-
- bitmap_zero(riscv_isa, RISCV_ISA_EXT_MAX);
-
if (!acpi_disabled) {
status = acpi_get_table(ACPI_SIG_RHCT, 0, &rhct);
if (ACPI_FAILURE(status))
@@ -152,169 +375,7 @@ void __init riscv_fill_hwcap(void)
}
}
- /*
- * For all possible cpus, we have already validated in
- * the boot process that they at least contain "rv" and
- * whichever of "32"/"64" this kernel supports, and so this
- * section can be skipped.
- */
- isa += 4;
-
- while (*isa) {
- const char *ext = isa++;
- const char *ext_end = isa;
- bool ext_long = false, ext_err = false;
-
- switch (*ext) {
- case 's':
- /*
- * Workaround for invalid single-letter 's' & 'u'(QEMU).
- * No need to set the bit in riscv_isa as 's' & 'u' are
- * not valid ISA extensions. It works until multi-letter
- * extension starting with "Su" appears.
- */
- if (ext[-1] != '_' && ext[1] == 'u') {
- ++isa;
- ext_err = true;
- break;
- }
- fallthrough;
- case 'S':
- case 'x':
- case 'X':
- case 'z':
- case 'Z':
- /*
- * Before attempting to parse the extension itself, we find its end.
- * As multi-letter extensions must be split from other multi-letter
- * extensions with an "_", the end of a multi-letter extension will
- * either be the null character or the "_" at the start of the next
- * multi-letter extension.
- *
- * Next, as the extensions version is currently ignored, we
- * eliminate that portion. This is done by parsing backwards from
- * the end of the extension, removing any numbers. This may be a
- * major or minor number however, so the process is repeated if a
- * minor number was found.
- *
- * ext_end is intended to represent the first character *after* the
- * name portion of an extension, but will be decremented to the last
- * character itself while eliminating the extensions version number.
- * A simple re-increment solves this problem.
- */
- ext_long = true;
- for (; *isa && *isa != '_'; ++isa)
- if (unlikely(!isalnum(*isa)))
- ext_err = true;
-
- ext_end = isa;
- if (unlikely(ext_err))
- break;
-
- if (!isdigit(ext_end[-1]))
- break;
-
- while (isdigit(*--ext_end))
- ;
-
- if (tolower(ext_end[0]) != 'p' || !isdigit(ext_end[-1])) {
- ++ext_end;
- break;
- }
-
- while (isdigit(*--ext_end))
- ;
-
- ++ext_end;
- break;
- default:
- /*
- * Things are a little easier for single-letter extensions, as they
- * are parsed forwards.
- *
- * After checking that our starting position is valid, we need to
- * ensure that, when isa was incremented at the start of the loop,
- * that it arrived at the start of the next extension.
- *
- * If we are already on a non-digit, there is nothing to do. Either
- * we have a multi-letter extension's _, or the start of an
- * extension.
- *
- * Otherwise we have found the current extension's major version
- * number. Parse past it, and a subsequent p/minor version number
- * if present. The `p` extension must not appear immediately after
- * a number, so there is no fear of missing it.
- *
- */
- if (unlikely(!isalpha(*ext))) {
- ext_err = true;
- break;
- }
-
- if (!isdigit(*isa))
- break;
-
- while (isdigit(*++isa))
- ;
-
- if (tolower(*isa) != 'p')
- break;
-
- if (!isdigit(*++isa)) {
- --isa;
- break;
- }
-
- while (isdigit(*++isa))
- ;
-
- break;
- }
-
- /*
- * The parser expects that at the start of an iteration isa points to the
- * first character of the next extension. As we stop parsing an extension
- * on meeting a non-alphanumeric character, an extra increment is needed
- * where the succeeding extension is a multi-letter prefixed with an "_".
- */
- if (*isa == '_')
- ++isa;
-
-#define SET_ISA_EXT_MAP(name, bit) \
- do { \
- if ((ext_end - ext == sizeof(name) - 1) && \
- !strncasecmp(ext, name, sizeof(name) - 1) && \
- riscv_isa_extension_check(bit)) \
- set_bit(bit, isainfo->isa); \
- } while (false) \
-
- if (unlikely(ext_err))
- continue;
- if (!ext_long) {
- int nr = tolower(*ext) - 'a';
-
- if (riscv_isa_extension_check(nr)) {
- this_hwcap |= isa2hwcap[nr];
- set_bit(nr, isainfo->isa);
- }
- } else {
- /* sorted alphabetically */
- SET_ISA_EXT_MAP("smaia", RISCV_ISA_EXT_SMAIA);
- SET_ISA_EXT_MAP("ssaia", RISCV_ISA_EXT_SSAIA);
- SET_ISA_EXT_MAP("sscofpmf", RISCV_ISA_EXT_SSCOFPMF);
- SET_ISA_EXT_MAP("sstc", RISCV_ISA_EXT_SSTC);
- SET_ISA_EXT_MAP("svinval", RISCV_ISA_EXT_SVINVAL);
- SET_ISA_EXT_MAP("svnapot", RISCV_ISA_EXT_SVNAPOT);
- SET_ISA_EXT_MAP("svpbmt", RISCV_ISA_EXT_SVPBMT);
- SET_ISA_EXT_MAP("zba", RISCV_ISA_EXT_ZBA);
- SET_ISA_EXT_MAP("zbb", RISCV_ISA_EXT_ZBB);
- SET_ISA_EXT_MAP("zbs", RISCV_ISA_EXT_ZBS);
- SET_ISA_EXT_MAP("zicbom", RISCV_ISA_EXT_ZICBOM);
- SET_ISA_EXT_MAP("zicboz", RISCV_ISA_EXT_ZICBOZ);
- SET_ISA_EXT_MAP("zihintpause", RISCV_ISA_EXT_ZIHINTPAUSE);
- }
-#undef SET_ISA_EXT_MAP
- }
+ riscv_parse_isa_string(&this_hwcap, isainfo, isa2hwcap, isa);
/*
* These ones were as they were part of the base ISA when the
@@ -346,9 +407,107 @@ void __init riscv_fill_hwcap(void)
if (!acpi_disabled && rhct)
acpi_put_table((struct acpi_table_header *)rhct);
+}
+
+static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap)
+{
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu) {
+ unsigned long this_hwcap = 0;
+ struct device_node *cpu_node;
+ struct riscv_isainfo *isainfo = &hart_isa[cpu];
+
+ cpu_node = of_cpu_device_node_get(cpu);
+ if (!cpu_node) {
+ pr_warn("Unable to find cpu node\n");
+ continue;
+ }
+
+ if (!of_property_present(cpu_node, "riscv,isa-extensions")) {
+ of_node_put(cpu_node);
+ continue;
+ }
+
+ for (int i = 0; i < riscv_isa_ext_count; i++) {
+ if (of_property_match_string(cpu_node, "riscv,isa-extensions",
+ riscv_isa_ext[i].property) < 0)
+ continue;
+
+ if (!riscv_isa_extension_check(riscv_isa_ext[i].id))
+ continue;
+
+ /* Only single letter extensions get set in hwcap */
+ if (strnlen(riscv_isa_ext[i].name, 2) == 1)
+ this_hwcap |= isa2hwcap[riscv_isa_ext[i].id];
+
+ set_bit(riscv_isa_ext[i].id, isainfo->isa);
+ }
+
+ of_node_put(cpu_node);
+
+ /*
+ * All "okay" harts should have same isa. Set HWCAP based on
+ * common capabilities of every "okay" hart, in case they don't.
+ */
+ if (elf_hwcap)
+ elf_hwcap &= this_hwcap;
+ else
+ elf_hwcap = this_hwcap;
+
+ if (bitmap_empty(riscv_isa, RISCV_ISA_EXT_MAX))
+ bitmap_copy(riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX);
+ else
+ bitmap_and(riscv_isa, riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX);
+ }
+
+ if (bitmap_empty(riscv_isa, RISCV_ISA_EXT_MAX))
+ return -ENOENT;
+
+ return 0;
+}
+
+#ifdef CONFIG_RISCV_ISA_FALLBACK
+bool __initdata riscv_isa_fallback = true;
+#else
+bool __initdata riscv_isa_fallback;
+static int __init riscv_isa_fallback_setup(char *__unused)
+{
+ riscv_isa_fallback = true;
+ return 1;
+}
+early_param("riscv_isa_fallback", riscv_isa_fallback_setup);
+#endif
+
+void __init riscv_fill_hwcap(void)
+{
+ char print_str[NUM_ALPHA_EXTS + 1];
+ unsigned long isa2hwcap[26] = {0};
+ int i, j;
+
+ isa2hwcap['i' - 'a'] = COMPAT_HWCAP_ISA_I;
+ isa2hwcap['m' - 'a'] = COMPAT_HWCAP_ISA_M;
+ isa2hwcap['a' - 'a'] = COMPAT_HWCAP_ISA_A;
+ isa2hwcap['f' - 'a'] = COMPAT_HWCAP_ISA_F;
+ isa2hwcap['d' - 'a'] = COMPAT_HWCAP_ISA_D;
+ isa2hwcap['c' - 'a'] = COMPAT_HWCAP_ISA_C;
+ isa2hwcap['v' - 'a'] = COMPAT_HWCAP_ISA_V;
+
+ if (!acpi_disabled) {
+ riscv_fill_hwcap_from_isa_string(isa2hwcap);
+ } else {
+ int ret = riscv_fill_hwcap_from_ext_list(isa2hwcap);
+
+ if (ret && riscv_isa_fallback) {
+ pr_info("Falling back to deprecated \"riscv,isa\"\n");
+ riscv_fill_hwcap_from_isa_string(isa2hwcap);
+ }
+ }
- /* We don't support systems with F but without D, so mask those out
- * here. */
+ /*
+ * We don't support systems with F but without D, so mask those out
+ * here.
+ */
if ((elf_hwcap & COMPAT_HWCAP_ISA_F) && !(elf_hwcap & COMPAT_HWCAP_ISA_D)) {
pr_info("This kernel does not support systems with F but not D\n");
elf_hwcap &= ~COMPAT_HWCAP_ISA_F;
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
index 11c3b94c4534..3710ea5d160f 100644
--- a/arch/riscv/kernel/head.S
+++ b/arch/riscv/kernel/head.S
@@ -289,10 +289,6 @@ clear_bss:
blt a3, a4, clear_bss
clear_bss_done:
#endif
- /* Save hart ID and DTB physical address */
- mv s0, a0
- mv s1, a1
-
la a2, boot_cpu_hartid
XIP_FIXUP_OFFSET a2
REG_S a0, (a2)
@@ -306,7 +302,7 @@ clear_bss_done:
la a0, __dtb_start
XIP_FIXUP_OFFSET a0
#else
- mv a0, s1
+ mv a0, a1
#endif /* CONFIG_BUILTIN_DTB */
call setup_vm
#ifdef CONFIG_MMU
diff --git a/arch/riscv/kernel/mcount.S b/arch/riscv/kernel/mcount.S
index 8a6e5a9e842a..8818a8fa9ff3 100644
--- a/arch/riscv/kernel/mcount.S
+++ b/arch/riscv/kernel/mcount.S
@@ -3,6 +3,7 @@
#include <linux/init.h>
#include <linux/linkage.h>
+#include <linux/cfi_types.h>
#include <asm/asm.h>
#include <asm/csr.h>
#include <asm/unistd.h>
@@ -47,15 +48,19 @@
addi sp, sp, 4*SZREG
.endm
-ENTRY(ftrace_stub)
+SYM_TYPED_FUNC_START(ftrace_stub)
#ifdef CONFIG_DYNAMIC_FTRACE
.global MCOUNT_NAME
.set MCOUNT_NAME, ftrace_stub
#endif
ret
-ENDPROC(ftrace_stub)
+SYM_FUNC_END(ftrace_stub)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+SYM_TYPED_FUNC_START(ftrace_stub_graph)
+ ret
+SYM_FUNC_END(ftrace_stub_graph)
+
ENTRY(return_to_handler)
/*
* On implementing the frame point test, the ideal way is to compare the
diff --git a/arch/riscv/kernel/probes/decode-insn.c b/arch/riscv/kernel/probes/decode-insn.c
index 64f6183b4717..65d9590bfb9f 100644
--- a/arch/riscv/kernel/probes/decode-insn.c
+++ b/arch/riscv/kernel/probes/decode-insn.c
@@ -29,13 +29,14 @@ riscv_probe_decode_insn(probe_opcode_t *addr, struct arch_probe_insn *api)
* TODO: the REJECTED ones below need to be implemented
*/
#ifdef CONFIG_RISCV_ISA_C
- RISCV_INSN_REJECTED(c_j, insn);
- RISCV_INSN_REJECTED(c_jr, insn);
RISCV_INSN_REJECTED(c_jal, insn);
- RISCV_INSN_REJECTED(c_jalr, insn);
- RISCV_INSN_REJECTED(c_beqz, insn);
- RISCV_INSN_REJECTED(c_bnez, insn);
RISCV_INSN_REJECTED(c_ebreak, insn);
+
+ RISCV_INSN_SET_SIMULATE(c_j, insn);
+ RISCV_INSN_SET_SIMULATE(c_jr, insn);
+ RISCV_INSN_SET_SIMULATE(c_jalr, insn);
+ RISCV_INSN_SET_SIMULATE(c_beqz, insn);
+ RISCV_INSN_SET_SIMULATE(c_bnez, insn);
#endif
RISCV_INSN_SET_SIMULATE(jal, insn);
diff --git a/arch/riscv/kernel/probes/simulate-insn.c b/arch/riscv/kernel/probes/simulate-insn.c
index 7441ac8a6843..d3099d67816d 100644
--- a/arch/riscv/kernel/probes/simulate-insn.c
+++ b/arch/riscv/kernel/probes/simulate-insn.c
@@ -188,3 +188,108 @@ bool __kprobes simulate_branch(u32 opcode, unsigned long addr, struct pt_regs *r
return true;
}
+
+bool __kprobes simulate_c_j(u32 opcode, unsigned long addr, struct pt_regs *regs)
+{
+ /*
+ * 15 13 12 2 1 0
+ * | funct3 | offset[11|4|9:8|10|6|7|3:1|5] | opcode |
+ * 3 11 2
+ */
+
+ s32 offset;
+
+ offset = ((opcode >> 3) & 0x7) << 1;
+ offset |= ((opcode >> 11) & 0x1) << 4;
+ offset |= ((opcode >> 2) & 0x1) << 5;
+ offset |= ((opcode >> 7) & 0x1) << 6;
+ offset |= ((opcode >> 6) & 0x1) << 7;
+ offset |= ((opcode >> 9) & 0x3) << 8;
+ offset |= ((opcode >> 8) & 0x1) << 10;
+ offset |= ((opcode >> 12) & 0x1) << 11;
+
+ instruction_pointer_set(regs, addr + sign_extend32(offset, 11));
+
+ return true;
+}
+
+static bool __kprobes simulate_c_jr_jalr(u32 opcode, unsigned long addr, struct pt_regs *regs,
+ bool is_jalr)
+{
+ /*
+ * 15 12 11 7 6 2 1 0
+ * | funct4 | rs1 | rs2 | op |
+ * 4 5 5 2
+ */
+
+ unsigned long jump_addr;
+
+ u32 rs1 = (opcode >> 7) & 0x1f;
+
+ if (rs1 == 0) /* C.JR is only valid when rs1 != x0 */
+ return false;
+
+ if (!rv_insn_reg_get_val(regs, rs1, &jump_addr))
+ return false;
+
+ if (is_jalr && !rv_insn_reg_set_val(regs, 1, addr + 2))
+ return false;
+
+ instruction_pointer_set(regs, jump_addr);
+
+ return true;
+}
+
+bool __kprobes simulate_c_jr(u32 opcode, unsigned long addr, struct pt_regs *regs)
+{
+ return simulate_c_jr_jalr(opcode, addr, regs, false);
+}
+
+bool __kprobes simulate_c_jalr(u32 opcode, unsigned long addr, struct pt_regs *regs)
+{
+ return simulate_c_jr_jalr(opcode, addr, regs, true);
+}
+
+static bool __kprobes simulate_c_bnez_beqz(u32 opcode, unsigned long addr, struct pt_regs *regs,
+ bool is_bnez)
+{
+ /*
+ * 15 13 12 10 9 7 6 2 1 0
+ * | funct3 | offset[8|4:3] | rs1' | offset[7:6|2:1|5] | op |
+ * 3 3 3 5 2
+ */
+
+ s32 offset;
+ u32 rs1;
+ unsigned long rs1_val;
+
+ rs1 = 0x8 | ((opcode >> 7) & 0x7);
+
+ if (!rv_insn_reg_get_val(regs, rs1, &rs1_val))
+ return false;
+
+ if ((rs1_val != 0 && is_bnez) || (rs1_val == 0 && !is_bnez)) {
+ offset = ((opcode >> 3) & 0x3) << 1;
+ offset |= ((opcode >> 10) & 0x3) << 3;
+ offset |= ((opcode >> 2) & 0x1) << 5;
+ offset |= ((opcode >> 5) & 0x3) << 6;
+ offset |= ((opcode >> 12) & 0x1) << 8;
+ offset = sign_extend32(offset, 8);
+ } else {
+ offset = 2;
+ }
+
+ instruction_pointer_set(regs, addr + offset);
+
+ return true;
+}
+
+bool __kprobes simulate_c_bnez(u32 opcode, unsigned long addr, struct pt_regs *regs)
+{
+ return simulate_c_bnez_beqz(opcode, addr, regs, true);
+}
+
+bool __kprobes simulate_c_beqz(u32 opcode, unsigned long addr, struct pt_regs *regs)
+{
+ return simulate_c_bnez_beqz(opcode, addr, regs, false);
+}
diff --git a/arch/riscv/kernel/probes/simulate-insn.h b/arch/riscv/kernel/probes/simulate-insn.h
index 61e35db31001..44ebbc444db9 100644
--- a/arch/riscv/kernel/probes/simulate-insn.h
+++ b/arch/riscv/kernel/probes/simulate-insn.h
@@ -24,5 +24,10 @@ bool simulate_auipc(u32 opcode, unsigned long addr, struct pt_regs *regs);
bool simulate_branch(u32 opcode, unsigned long addr, struct pt_regs *regs);
bool simulate_jal(u32 opcode, unsigned long addr, struct pt_regs *regs);
bool simulate_jalr(u32 opcode, unsigned long addr, struct pt_regs *regs);
+bool simulate_c_j(u32 opcode, unsigned long addr, struct pt_regs *regs);
+bool simulate_c_jr(u32 opcode, unsigned long addr, struct pt_regs *regs);
+bool simulate_c_jalr(u32 opcode, unsigned long addr, struct pt_regs *regs);
+bool simulate_c_bnez(u32 opcode, unsigned long addr, struct pt_regs *regs);
+bool simulate_c_beqz(u32 opcode, unsigned long addr, struct pt_regs *regs);
#endif /* _RISCV_KERNEL_PROBES_SIMULATE_INSN_H */
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 971fe776e2f8..32c2e1eb71bd 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -178,6 +178,11 @@ static void __init init_resources(void)
if (ret < 0)
goto error;
}
+ if (crashk_low_res.start != crashk_low_res.end) {
+ ret = add_resource(&iomem_resource, &crashk_low_res);
+ if (ret < 0)
+ goto error;
+ }
#endif
#ifdef CONFIG_CRASH_DUMP
@@ -311,6 +316,7 @@ void __init setup_arch(char **cmdline_p)
if (IS_ENABLED(CONFIG_RISCV_ISA_ZICBOM) &&
riscv_isa_extension_available(NULL, ZICBOM))
riscv_noncoherent_supported();
+ riscv_set_dma_cache_alignment();
}
static int __init topology_init(void)
diff --git a/arch/riscv/kernel/suspend_entry.S b/arch/riscv/kernel/suspend_entry.S
index 12b52afe09a4..f7960c7c5f9e 100644
--- a/arch/riscv/kernel/suspend_entry.S
+++ b/arch/riscv/kernel/suspend_entry.S
@@ -5,6 +5,7 @@
*/
#include <linux/linkage.h>
+#include <linux/cfi_types.h>
#include <asm/asm.h>
#include <asm/asm-offsets.h>
#include <asm/assembler.h>
@@ -58,7 +59,7 @@ ENTRY(__cpu_suspend_enter)
ret
END(__cpu_suspend_enter)
-ENTRY(__cpu_resume_enter)
+SYM_TYPED_FUNC_START(__cpu_resume_enter)
/* Load the global pointer */
.option push
.option norelax
@@ -94,4 +95,4 @@ ENTRY(__cpu_resume_enter)
/* Return to C code */
ret
-END(__cpu_resume_enter)
+SYM_FUNC_END(__cpu_resume_enter)
diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c
index 26ef5526bfb4..473159b5f303 100644
--- a/arch/riscv/kernel/sys_riscv.c
+++ b/arch/riscv/kernel/sys_riscv.c
@@ -335,3 +335,9 @@ SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs,
return do_riscv_hwprobe(pairs, pair_count, cpu_count,
cpus, flags);
}
+
+/* Not defined using SYSCALL_DEFINE0 to avoid error injection */
+asmlinkage long __riscv_sys_ni_syscall(const struct pt_regs *__unused)
+{
+ return -ENOSYS;
+}
diff --git a/arch/riscv/kernel/syscall_table.c b/arch/riscv/kernel/syscall_table.c
index 44b1420a2270..dda913764903 100644
--- a/arch/riscv/kernel/syscall_table.c
+++ b/arch/riscv/kernel/syscall_table.c
@@ -10,9 +10,13 @@
#include <asm/syscall.h>
#undef __SYSCALL
-#define __SYSCALL(nr, call) [nr] = (call),
+#define __SYSCALL(nr, call) asmlinkage long __riscv_##call(const struct pt_regs *);
+#include <asm/unistd.h>
+
+#undef __SYSCALL
+#define __SYSCALL(nr, call) [nr] = __riscv_##call,
void * const sys_call_table[__NR_syscalls] = {
- [0 ... __NR_syscalls - 1] = sys_ni_syscall,
+ [0 ... __NR_syscalls - 1] = __riscv_sys_ni_syscall,
#include <asm/unistd.h>
};
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index f798c853bede..19807c4d3805 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -21,6 +21,7 @@
#include <asm/asm-prototypes.h>
#include <asm/bug.h>
+#include <asm/cfi.h>
#include <asm/csr.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
@@ -271,7 +272,8 @@ void handle_break(struct pt_regs *regs)
== NOTIFY_STOP)
return;
#endif
- else if (report_bug(regs->epc, regs) == BUG_TRAP_TYPE_WARN)
+ else if (report_bug(regs->epc, regs) == BUG_TRAP_TYPE_WARN ||
+ handle_cfi_failure(regs) == BUG_TRAP_TYPE_WARN)
regs->epc += get_break_insn_length(regs->epc);
else
die(regs, "Kernel BUG");
diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile
index fee0671e2dc1..4c2067fc59fc 100644
--- a/arch/riscv/kvm/Makefile
+++ b/arch/riscv/kvm/Makefile
@@ -19,6 +19,7 @@ kvm-y += vcpu_exit.o
kvm-y += vcpu_fp.o
kvm-y += vcpu_vector.o
kvm-y += vcpu_insn.o
+kvm-y += vcpu_onereg.o
kvm-y += vcpu_switch.o
kvm-y += vcpu_sbi.o
kvm-$(CONFIG_RISCV_SBI_V01) += vcpu_sbi_v01.o
diff --git a/arch/riscv/kvm/aia.c b/arch/riscv/kvm/aia.c
index 585a3b42c52c..74bb27440527 100644
--- a/arch/riscv/kvm/aia.c
+++ b/arch/riscv/kvm/aia.c
@@ -176,7 +176,7 @@ int kvm_riscv_vcpu_aia_get_csr(struct kvm_vcpu *vcpu,
struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
if (reg_num >= sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long))
- return -EINVAL;
+ return -ENOENT;
*out_val = 0;
if (kvm_riscv_aia_available())
@@ -192,7 +192,7 @@ int kvm_riscv_vcpu_aia_set_csr(struct kvm_vcpu *vcpu,
struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
if (reg_num >= sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long))
- return -EINVAL;
+ return -ENOENT;
if (kvm_riscv_aia_available()) {
((unsigned long *)csr)[reg_num] = val;
diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c
index f2eb47925806..068c74593871 100644
--- a/arch/riscv/kvm/mmu.c
+++ b/arch/riscv/kvm/mmu.c
@@ -406,12 +406,6 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
{
}
-void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
- const struct kvm_memory_slot *memslot)
-{
- kvm_flush_remote_tlbs(kvm);
-}
-
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free)
{
}
@@ -559,7 +553,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
int ret;
- kvm_pfn_t pfn = pte_pfn(range->pte);
+ kvm_pfn_t pfn = pte_pfn(range->arg.pte);
if (!kvm->arch.pgd)
return false;
diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
index d12ef99901fc..82229db1ce73 100644
--- a/arch/riscv/kvm/vcpu.c
+++ b/arch/riscv/kvm/vcpu.c
@@ -13,16 +13,12 @@
#include <linux/kdebug.h>
#include <linux/module.h>
#include <linux/percpu.h>
-#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/sched/signal.h>
#include <linux/fs.h>
#include <linux/kvm_host.h>
#include <asm/csr.h>
#include <asm/cacheflush.h>
-#include <asm/hwcap.h>
-#include <asm/sbi.h>
-#include <asm/vector.h>
#include <asm/kvm_vcpu_vector.h>
const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
@@ -46,79 +42,6 @@ const struct kvm_stats_header kvm_vcpu_stats_header = {
sizeof(kvm_vcpu_stats_desc),
};
-#define KVM_RISCV_BASE_ISA_MASK GENMASK(25, 0)
-
-#define KVM_ISA_EXT_ARR(ext) [KVM_RISCV_ISA_EXT_##ext] = RISCV_ISA_EXT_##ext
-
-/* Mapping between KVM ISA Extension ID & Host ISA extension ID */
-static const unsigned long kvm_isa_ext_arr[] = {
- [KVM_RISCV_ISA_EXT_A] = RISCV_ISA_EXT_a,
- [KVM_RISCV_ISA_EXT_C] = RISCV_ISA_EXT_c,
- [KVM_RISCV_ISA_EXT_D] = RISCV_ISA_EXT_d,
- [KVM_RISCV_ISA_EXT_F] = RISCV_ISA_EXT_f,
- [KVM_RISCV_ISA_EXT_H] = RISCV_ISA_EXT_h,
- [KVM_RISCV_ISA_EXT_I] = RISCV_ISA_EXT_i,
- [KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m,
- [KVM_RISCV_ISA_EXT_V] = RISCV_ISA_EXT_v,
-
- KVM_ISA_EXT_ARR(SSAIA),
- KVM_ISA_EXT_ARR(SSTC),
- KVM_ISA_EXT_ARR(SVINVAL),
- KVM_ISA_EXT_ARR(SVNAPOT),
- KVM_ISA_EXT_ARR(SVPBMT),
- KVM_ISA_EXT_ARR(ZBB),
- KVM_ISA_EXT_ARR(ZIHINTPAUSE),
- KVM_ISA_EXT_ARR(ZICBOM),
- KVM_ISA_EXT_ARR(ZICBOZ),
-};
-
-static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
-{
- unsigned long i;
-
- for (i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
- if (kvm_isa_ext_arr[i] == base_ext)
- return i;
- }
-
- return KVM_RISCV_ISA_EXT_MAX;
-}
-
-static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)
-{
- switch (ext) {
- case KVM_RISCV_ISA_EXT_H:
- return false;
- case KVM_RISCV_ISA_EXT_V:
- return riscv_v_vstate_ctrl_user_allowed();
- default:
- break;
- }
-
- return true;
-}
-
-static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
-{
- switch (ext) {
- case KVM_RISCV_ISA_EXT_A:
- case KVM_RISCV_ISA_EXT_C:
- case KVM_RISCV_ISA_EXT_I:
- case KVM_RISCV_ISA_EXT_M:
- case KVM_RISCV_ISA_EXT_SSAIA:
- case KVM_RISCV_ISA_EXT_SSTC:
- case KVM_RISCV_ISA_EXT_SVINVAL:
- case KVM_RISCV_ISA_EXT_SVNAPOT:
- case KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
- case KVM_RISCV_ISA_EXT_ZBB:
- return false;
- default:
- break;
- }
-
- return true;
-}
-
static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
@@ -176,7 +99,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
int rc;
struct kvm_cpu_context *cntx;
struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
- unsigned long host_isa, i;
/* Mark this VCPU never ran */
vcpu->arch.ran_atleast_once = false;
@@ -184,12 +106,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX);
/* Setup ISA features available to VCPU */
- for (i = 0; i < ARRAY_SIZE(kvm_isa_ext_arr); i++) {
- host_isa = kvm_isa_ext_arr[i];
- if (__riscv_isa_extension_available(NULL, host_isa) &&
- kvm_riscv_vcpu_isa_enable_allowed(i))
- set_bit(host_isa, vcpu->arch.isa);
- }
+ kvm_riscv_vcpu_setup_isa(vcpu);
/* Setup vendor, arch, and implementation details */
vcpu->arch.mvendorid = sbi_get_mvendorid();
@@ -294,450 +211,6 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
-static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
- const struct kvm_one_reg *reg)
-{
- unsigned long __user *uaddr =
- (unsigned long __user *)(unsigned long)reg->addr;
- unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
- KVM_REG_SIZE_MASK |
- KVM_REG_RISCV_CONFIG);
- unsigned long reg_val;
-
- if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
- return -EINVAL;
-
- switch (reg_num) {
- case KVM_REG_RISCV_CONFIG_REG(isa):
- reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK;
- break;
- case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
- if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
- return -EINVAL;
- reg_val = riscv_cbom_block_size;
- break;
- case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
- if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
- return -EINVAL;
- reg_val = riscv_cboz_block_size;
- break;
- case KVM_REG_RISCV_CONFIG_REG(mvendorid):
- reg_val = vcpu->arch.mvendorid;
- break;
- case KVM_REG_RISCV_CONFIG_REG(marchid):
- reg_val = vcpu->arch.marchid;
- break;
- case KVM_REG_RISCV_CONFIG_REG(mimpid):
- reg_val = vcpu->arch.mimpid;
- break;
- default:
- return -EINVAL;
- }
-
- if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
- return -EFAULT;
-
- return 0;
-}
-
-static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
- const struct kvm_one_reg *reg)
-{
- unsigned long __user *uaddr =
- (unsigned long __user *)(unsigned long)reg->addr;
- unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
- KVM_REG_SIZE_MASK |
- KVM_REG_RISCV_CONFIG);
- unsigned long i, isa_ext, reg_val;
-
- if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
- return -EINVAL;
-
- if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
- return -EFAULT;
-
- switch (reg_num) {
- case KVM_REG_RISCV_CONFIG_REG(isa):
- /*
- * This ONE REG interface is only defined for
- * single letter extensions.
- */
- if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
- return -EINVAL;
-
- if (!vcpu->arch.ran_atleast_once) {
- /* Ignore the enable/disable request for certain extensions */
- for (i = 0; i < RISCV_ISA_EXT_BASE; i++) {
- isa_ext = kvm_riscv_vcpu_base2isa_ext(i);
- if (isa_ext >= KVM_RISCV_ISA_EXT_MAX) {
- reg_val &= ~BIT(i);
- continue;
- }
- if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext))
- if (reg_val & BIT(i))
- reg_val &= ~BIT(i);
- if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext))
- if (!(reg_val & BIT(i)))
- reg_val |= BIT(i);
- }
- reg_val &= riscv_isa_extension_base(NULL);
- /* Do not modify anything beyond single letter extensions */
- reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) |
- (reg_val & KVM_RISCV_BASE_ISA_MASK);
- vcpu->arch.isa[0] = reg_val;
- kvm_riscv_vcpu_fp_reset(vcpu);
- } else {
- return -EOPNOTSUPP;
- }
- break;
- case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
- return -EOPNOTSUPP;
- case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
- return -EOPNOTSUPP;
- case KVM_REG_RISCV_CONFIG_REG(mvendorid):
- if (!vcpu->arch.ran_atleast_once)
- vcpu->arch.mvendorid = reg_val;
- else
- return -EBUSY;
- break;
- case KVM_REG_RISCV_CONFIG_REG(marchid):
- if (!vcpu->arch.ran_atleast_once)
- vcpu->arch.marchid = reg_val;
- else
- return -EBUSY;
- break;
- case KVM_REG_RISCV_CONFIG_REG(mimpid):
- if (!vcpu->arch.ran_atleast_once)
- vcpu->arch.mimpid = reg_val;
- else
- return -EBUSY;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu,
- const struct kvm_one_reg *reg)
-{
- struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
- unsigned long __user *uaddr =
- (unsigned long __user *)(unsigned long)reg->addr;
- unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
- KVM_REG_SIZE_MASK |
- KVM_REG_RISCV_CORE);
- unsigned long reg_val;
-
- if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
- return -EINVAL;
- if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
- return -EINVAL;
-
- if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
- reg_val = cntx->sepc;
- else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
- reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
- reg_val = ((unsigned long *)cntx)[reg_num];
- else if (reg_num == KVM_REG_RISCV_CORE_REG(mode))
- reg_val = (cntx->sstatus & SR_SPP) ?
- KVM_RISCV_MODE_S : KVM_RISCV_MODE_U;
- else
- return -EINVAL;
-
- if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
- return -EFAULT;
-
- return 0;
-}
-
-static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
- const struct kvm_one_reg *reg)
-{
- struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
- unsigned long __user *uaddr =
- (unsigned long __user *)(unsigned long)reg->addr;
- unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
- KVM_REG_SIZE_MASK |
- KVM_REG_RISCV_CORE);
- unsigned long reg_val;
-
- if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
- return -EINVAL;
- if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
- return -EINVAL;
-
- if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
- return -EFAULT;
-
- if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
- cntx->sepc = reg_val;
- else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
- reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
- ((unsigned long *)cntx)[reg_num] = reg_val;
- else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) {
- if (reg_val == KVM_RISCV_MODE_S)
- cntx->sstatus |= SR_SPP;
- else
- cntx->sstatus &= ~SR_SPP;
- } else
- return -EINVAL;
-
- return 0;
-}
-
-static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu *vcpu,
- unsigned long reg_num,
- unsigned long *out_val)
-{
- struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
-
- if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
- return -EINVAL;
-
- if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
- kvm_riscv_vcpu_flush_interrupts(vcpu);
- *out_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
- *out_val |= csr->hvip & ~IRQ_LOCAL_MASK;
- } else
- *out_val = ((unsigned long *)csr)[reg_num];
-
- return 0;
-}
-
-static inline int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu,
- unsigned long reg_num,
- unsigned long reg_val)
-{
- struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
-
- if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
- return -EINVAL;
-
- if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
- reg_val &= VSIP_VALID_MASK;
- reg_val <<= VSIP_TO_HVIP_SHIFT;
- }
-
- ((unsigned long *)csr)[reg_num] = reg_val;
-
- if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
- WRITE_ONCE(vcpu->arch.irqs_pending_mask[0], 0);
-
- return 0;
-}
-
-static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
- const struct kvm_one_reg *reg)
-{
- int rc;
- unsigned long __user *uaddr =
- (unsigned long __user *)(unsigned long)reg->addr;
- unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
- KVM_REG_SIZE_MASK |
- KVM_REG_RISCV_CSR);
- unsigned long reg_val, reg_subtype;
-
- if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
- return -EINVAL;
-
- reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
- reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
- switch (reg_subtype) {
- case KVM_REG_RISCV_CSR_GENERAL:
- rc = kvm_riscv_vcpu_general_get_csr(vcpu, reg_num, &reg_val);
- break;
- case KVM_REG_RISCV_CSR_AIA:
- rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, &reg_val);
- break;
- default:
- rc = -EINVAL;
- break;
- }
- if (rc)
- return rc;
-
- if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
- return -EFAULT;
-
- return 0;
-}
-
-static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
- const struct kvm_one_reg *reg)
-{
- int rc;
- unsigned long __user *uaddr =
- (unsigned long __user *)(unsigned long)reg->addr;
- unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
- KVM_REG_SIZE_MASK |
- KVM_REG_RISCV_CSR);
- unsigned long reg_val, reg_subtype;
-
- if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
- return -EINVAL;
-
- if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
- return -EFAULT;
-
- reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
- reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
- switch (reg_subtype) {
- case KVM_REG_RISCV_CSR_GENERAL:
- rc = kvm_riscv_vcpu_general_set_csr(vcpu, reg_num, reg_val);
- break;
- case KVM_REG_RISCV_CSR_AIA:
- rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val);
- break;
- default:
- rc = -EINVAL;
- break;
- }
- if (rc)
- return rc;
-
- return 0;
-}
-
-static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu,
- const struct kvm_one_reg *reg)
-{
- unsigned long __user *uaddr =
- (unsigned long __user *)(unsigned long)reg->addr;
- unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
- KVM_REG_SIZE_MASK |
- KVM_REG_RISCV_ISA_EXT);
- unsigned long reg_val = 0;
- unsigned long host_isa_ext;
-
- if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
- return -EINVAL;
-
- if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
- reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
- return -EINVAL;
-
- host_isa_ext = kvm_isa_ext_arr[reg_num];
- if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext))
- reg_val = 1; /* Mark the given extension as available */
-
- if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
- return -EFAULT;
-
- return 0;
-}
-
-static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
- const struct kvm_one_reg *reg)
-{
- unsigned long __user *uaddr =
- (unsigned long __user *)(unsigned long)reg->addr;
- unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
- KVM_REG_SIZE_MASK |
- KVM_REG_RISCV_ISA_EXT);
- unsigned long reg_val;
- unsigned long host_isa_ext;
-
- if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
- return -EINVAL;
-
- if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
- reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
- return -EINVAL;
-
- if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
- return -EFAULT;
-
- host_isa_ext = kvm_isa_ext_arr[reg_num];
- if (!__riscv_isa_extension_available(NULL, host_isa_ext))
- return -EOPNOTSUPP;
-
- if (!vcpu->arch.ran_atleast_once) {
- /*
- * All multi-letter extension and a few single letter
- * extension can be disabled
- */
- if (reg_val == 1 &&
- kvm_riscv_vcpu_isa_enable_allowed(reg_num))
- set_bit(host_isa_ext, vcpu->arch.isa);
- else if (!reg_val &&
- kvm_riscv_vcpu_isa_disable_allowed(reg_num))
- clear_bit(host_isa_ext, vcpu->arch.isa);
- else
- return -EINVAL;
- kvm_riscv_vcpu_fp_reset(vcpu);
- } else {
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
- const struct kvm_one_reg *reg)
-{
- switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
- case KVM_REG_RISCV_CONFIG:
- return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
- case KVM_REG_RISCV_CORE:
- return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
- case KVM_REG_RISCV_CSR:
- return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
- case KVM_REG_RISCV_TIMER:
- return kvm_riscv_vcpu_set_reg_timer(vcpu, reg);
- case KVM_REG_RISCV_FP_F:
- return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
- KVM_REG_RISCV_FP_F);
- case KVM_REG_RISCV_FP_D:
- return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
- KVM_REG_RISCV_FP_D);
- case KVM_REG_RISCV_ISA_EXT:
- return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
- case KVM_REG_RISCV_SBI_EXT:
- return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg);
- case KVM_REG_RISCV_VECTOR:
- return kvm_riscv_vcpu_set_reg_vector(vcpu, reg,
- KVM_REG_RISCV_VECTOR);
- default:
- break;
- }
-
- return -EINVAL;
-}
-
-static int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
- const struct kvm_one_reg *reg)
-{
- switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
- case KVM_REG_RISCV_CONFIG:
- return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
- case KVM_REG_RISCV_CORE:
- return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
- case KVM_REG_RISCV_CSR:
- return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
- case KVM_REG_RISCV_TIMER:
- return kvm_riscv_vcpu_get_reg_timer(vcpu, reg);
- case KVM_REG_RISCV_FP_F:
- return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
- KVM_REG_RISCV_FP_F);
- case KVM_REG_RISCV_FP_D:
- return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
- KVM_REG_RISCV_FP_D);
- case KVM_REG_RISCV_ISA_EXT:
- return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
- case KVM_REG_RISCV_SBI_EXT:
- return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg);
- case KVM_REG_RISCV_VECTOR:
- return kvm_riscv_vcpu_get_reg_vector(vcpu, reg,
- KVM_REG_RISCV_VECTOR);
- default:
- break;
- }
-
- return -EINVAL;
-}
-
long kvm_arch_vcpu_async_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -781,6 +254,24 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = kvm_riscv_vcpu_get_reg(vcpu, &reg);
break;
}
+ case KVM_GET_REG_LIST: {
+ struct kvm_reg_list __user *user_list = argp;
+ struct kvm_reg_list reg_list;
+ unsigned int n;
+
+ r = -EFAULT;
+ if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
+ break;
+ n = reg_list.n;
+ reg_list.n = kvm_riscv_vcpu_num_regs(vcpu);
+ if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
+ break;
+ r = -E2BIG;
+ if (n < reg_list.n)
+ break;
+ r = kvm_riscv_vcpu_copy_reg_indices(vcpu, user_list->reg);
+ break;
+ }
default:
break;
}
diff --git a/arch/riscv/kvm/vcpu_fp.c b/arch/riscv/kvm/vcpu_fp.c
index 9d8cbc42057a..08ba48a395aa 100644
--- a/arch/riscv/kvm/vcpu_fp.c
+++ b/arch/riscv/kvm/vcpu_fp.c
@@ -96,7 +96,7 @@ int kvm_riscv_vcpu_get_reg_fp(struct kvm_vcpu *vcpu,
reg_num <= KVM_REG_RISCV_FP_F_REG(f[31]))
reg_val = &cntx->fp.f.f[reg_num];
else
- return -EINVAL;
+ return -ENOENT;
} else if ((rtype == KVM_REG_RISCV_FP_D) &&
riscv_isa_extension_available(vcpu->arch.isa, d)) {
if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) {
@@ -109,9 +109,9 @@ int kvm_riscv_vcpu_get_reg_fp(struct kvm_vcpu *vcpu,
return -EINVAL;
reg_val = &cntx->fp.d.f[reg_num];
} else
- return -EINVAL;
+ return -ENOENT;
} else
- return -EINVAL;
+ return -ENOENT;
if (copy_to_user(uaddr, reg_val, KVM_REG_SIZE(reg->id)))
return -EFAULT;
@@ -141,7 +141,7 @@ int kvm_riscv_vcpu_set_reg_fp(struct kvm_vcpu *vcpu,
reg_num <= KVM_REG_RISCV_FP_F_REG(f[31]))
reg_val = &cntx->fp.f.f[reg_num];
else
- return -EINVAL;
+ return -ENOENT;
} else if ((rtype == KVM_REG_RISCV_FP_D) &&
riscv_isa_extension_available(vcpu->arch.isa, d)) {
if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) {
@@ -154,9 +154,9 @@ int kvm_riscv_vcpu_set_reg_fp(struct kvm_vcpu *vcpu,
return -EINVAL;
reg_val = &cntx->fp.d.f[reg_num];
} else
- return -EINVAL;
+ return -ENOENT;
} else
- return -EINVAL;
+ return -ENOENT;
if (copy_from_user(reg_val, uaddr, KVM_REG_SIZE(reg->id)))
return -EFAULT;
diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c
new file mode 100644
index 000000000000..1b7e9fa265cb
--- /dev/null
+++ b/arch/riscv/kvm/vcpu_onereg.c
@@ -0,0 +1,1051 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2023 Ventana Micro Systems Inc.
+ *
+ * Authors:
+ * Anup Patel <apatel@ventanamicro.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/uaccess.h>
+#include <linux/kvm_host.h>
+#include <asm/cacheflush.h>
+#include <asm/hwcap.h>
+#include <asm/kvm_vcpu_vector.h>
+#include <asm/vector.h>
+
+#define KVM_RISCV_BASE_ISA_MASK GENMASK(25, 0)
+
+#define KVM_ISA_EXT_ARR(ext) \
+[KVM_RISCV_ISA_EXT_##ext] = RISCV_ISA_EXT_##ext
+
+/* Mapping between KVM ISA Extension ID & Host ISA extension ID */
+static const unsigned long kvm_isa_ext_arr[] = {
+ /* Single letter extensions (alphabetically sorted) */
+ [KVM_RISCV_ISA_EXT_A] = RISCV_ISA_EXT_a,
+ [KVM_RISCV_ISA_EXT_C] = RISCV_ISA_EXT_c,
+ [KVM_RISCV_ISA_EXT_D] = RISCV_ISA_EXT_d,
+ [KVM_RISCV_ISA_EXT_F] = RISCV_ISA_EXT_f,
+ [KVM_RISCV_ISA_EXT_H] = RISCV_ISA_EXT_h,
+ [KVM_RISCV_ISA_EXT_I] = RISCV_ISA_EXT_i,
+ [KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m,
+ [KVM_RISCV_ISA_EXT_V] = RISCV_ISA_EXT_v,
+ /* Multi letter extensions (alphabetically sorted) */
+ KVM_ISA_EXT_ARR(SSAIA),
+ KVM_ISA_EXT_ARR(SSTC),
+ KVM_ISA_EXT_ARR(SVINVAL),
+ KVM_ISA_EXT_ARR(SVNAPOT),
+ KVM_ISA_EXT_ARR(SVPBMT),
+ KVM_ISA_EXT_ARR(ZBA),
+ KVM_ISA_EXT_ARR(ZBB),
+ KVM_ISA_EXT_ARR(ZBS),
+ KVM_ISA_EXT_ARR(ZICBOM),
+ KVM_ISA_EXT_ARR(ZICBOZ),
+ KVM_ISA_EXT_ARR(ZICNTR),
+ KVM_ISA_EXT_ARR(ZICSR),
+ KVM_ISA_EXT_ARR(ZIFENCEI),
+ KVM_ISA_EXT_ARR(ZIHINTPAUSE),
+ KVM_ISA_EXT_ARR(ZIHPM),
+};
+
+static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
+{
+ unsigned long i;
+
+ for (i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
+ if (kvm_isa_ext_arr[i] == base_ext)
+ return i;
+ }
+
+ return KVM_RISCV_ISA_EXT_MAX;
+}
+
+static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)
+{
+ switch (ext) {
+ case KVM_RISCV_ISA_EXT_H:
+ return false;
+ case KVM_RISCV_ISA_EXT_V:
+ return riscv_v_vstate_ctrl_user_allowed();
+ default:
+ break;
+ }
+
+ return true;
+}
+
+static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
+{
+ switch (ext) {
+ case KVM_RISCV_ISA_EXT_A:
+ case KVM_RISCV_ISA_EXT_C:
+ case KVM_RISCV_ISA_EXT_I:
+ case KVM_RISCV_ISA_EXT_M:
+ case KVM_RISCV_ISA_EXT_SSAIA:
+ case KVM_RISCV_ISA_EXT_SSTC:
+ case KVM_RISCV_ISA_EXT_SVINVAL:
+ case KVM_RISCV_ISA_EXT_SVNAPOT:
+ case KVM_RISCV_ISA_EXT_ZBA:
+ case KVM_RISCV_ISA_EXT_ZBB:
+ case KVM_RISCV_ISA_EXT_ZBS:
+ case KVM_RISCV_ISA_EXT_ZICNTR:
+ case KVM_RISCV_ISA_EXT_ZICSR:
+ case KVM_RISCV_ISA_EXT_ZIFENCEI:
+ case KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
+ case KVM_RISCV_ISA_EXT_ZIHPM:
+ return false;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu)
+{
+ unsigned long host_isa, i;
+
+ for (i = 0; i < ARRAY_SIZE(kvm_isa_ext_arr); i++) {
+ host_isa = kvm_isa_ext_arr[i];
+ if (__riscv_isa_extension_available(NULL, host_isa) &&
+ kvm_riscv_vcpu_isa_enable_allowed(i))
+ set_bit(host_isa, vcpu->arch.isa);
+ }
+}
+
+static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg)
+{
+ unsigned long __user *uaddr =
+ (unsigned long __user *)(unsigned long)reg->addr;
+ unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
+ KVM_REG_SIZE_MASK |
+ KVM_REG_RISCV_CONFIG);
+ unsigned long reg_val;
+
+ if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
+ return -EINVAL;
+
+ switch (reg_num) {
+ case KVM_REG_RISCV_CONFIG_REG(isa):
+ reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK;
+ break;
+ case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
+ if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
+ return -ENOENT;
+ reg_val = riscv_cbom_block_size;
+ break;
+ case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
+ if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
+ return -ENOENT;
+ reg_val = riscv_cboz_block_size;
+ break;
+ case KVM_REG_RISCV_CONFIG_REG(mvendorid):
+ reg_val = vcpu->arch.mvendorid;
+ break;
+ case KVM_REG_RISCV_CONFIG_REG(marchid):
+ reg_val = vcpu->arch.marchid;
+ break;
+ case KVM_REG_RISCV_CONFIG_REG(mimpid):
+ reg_val = vcpu->arch.mimpid;
+ break;
+ case KVM_REG_RISCV_CONFIG_REG(satp_mode):
+ reg_val = satp_mode >> SATP_MODE_SHIFT;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg)
+{
+ unsigned long __user *uaddr =
+ (unsigned long __user *)(unsigned long)reg->addr;
+ unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
+ KVM_REG_SIZE_MASK |
+ KVM_REG_RISCV_CONFIG);
+ unsigned long i, isa_ext, reg_val;
+
+ if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
+ return -EINVAL;
+
+ if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
+ return -EFAULT;
+
+ switch (reg_num) {
+ case KVM_REG_RISCV_CONFIG_REG(isa):
+ /*
+ * This ONE REG interface is only defined for
+ * single letter extensions.
+ */
+ if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
+ return -EINVAL;
+
+ /*
+ * Return early (i.e. do nothing) if reg_val is the same
+ * value retrievable via kvm_riscv_vcpu_get_reg_config().
+ */
+ if (reg_val == (vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK))
+ break;
+
+ if (!vcpu->arch.ran_atleast_once) {
+ /* Ignore the enable/disable request for certain extensions */
+ for (i = 0; i < RISCV_ISA_EXT_BASE; i++) {
+ isa_ext = kvm_riscv_vcpu_base2isa_ext(i);
+ if (isa_ext >= KVM_RISCV_ISA_EXT_MAX) {
+ reg_val &= ~BIT(i);
+ continue;
+ }
+ if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext))
+ if (reg_val & BIT(i))
+ reg_val &= ~BIT(i);
+ if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext))
+ if (!(reg_val & BIT(i)))
+ reg_val |= BIT(i);
+ }
+ reg_val &= riscv_isa_extension_base(NULL);
+ /* Do not modify anything beyond single letter extensions */
+ reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) |
+ (reg_val & KVM_RISCV_BASE_ISA_MASK);
+ vcpu->arch.isa[0] = reg_val;
+ kvm_riscv_vcpu_fp_reset(vcpu);
+ } else {
+ return -EBUSY;
+ }
+ break;
+ case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
+ if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
+ return -ENOENT;
+ if (reg_val != riscv_cbom_block_size)
+ return -EINVAL;
+ break;
+ case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
+ if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
+ return -ENOENT;
+ if (reg_val != riscv_cboz_block_size)
+ return -EINVAL;
+ break;
+ case KVM_REG_RISCV_CONFIG_REG(mvendorid):
+ if (reg_val == vcpu->arch.mvendorid)
+ break;
+ if (!vcpu->arch.ran_atleast_once)
+ vcpu->arch.mvendorid = reg_val;
+ else
+ return -EBUSY;
+ break;
+ case KVM_REG_RISCV_CONFIG_REG(marchid):
+ if (reg_val == vcpu->arch.marchid)
+ break;
+ if (!vcpu->arch.ran_atleast_once)
+ vcpu->arch.marchid = reg_val;
+ else
+ return -EBUSY;
+ break;
+ case KVM_REG_RISCV_CONFIG_REG(mimpid):
+ if (reg_val == vcpu->arch.mimpid)
+ break;
+ if (!vcpu->arch.ran_atleast_once)
+ vcpu->arch.mimpid = reg_val;
+ else
+ return -EBUSY;
+ break;
+ case KVM_REG_RISCV_CONFIG_REG(satp_mode):
+ if (reg_val != (satp_mode >> SATP_MODE_SHIFT))
+ return -EINVAL;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg)
+{
+ struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
+ unsigned long __user *uaddr =
+ (unsigned long __user *)(unsigned long)reg->addr;
+ unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
+ KVM_REG_SIZE_MASK |
+ KVM_REG_RISCV_CORE);
+ unsigned long reg_val;
+
+ if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
+ return -EINVAL;
+ if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
+ return -ENOENT;
+
+ if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
+ reg_val = cntx->sepc;
+ else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
+ reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
+ reg_val = ((unsigned long *)cntx)[reg_num];
+ else if (reg_num == KVM_REG_RISCV_CORE_REG(mode))
+ reg_val = (cntx->sstatus & SR_SPP) ?
+ KVM_RISCV_MODE_S : KVM_RISCV_MODE_U;
+ else
+ return -ENOENT;
+
+ if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg)
+{
+ struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
+ unsigned long __user *uaddr =
+ (unsigned long __user *)(unsigned long)reg->addr;
+ unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
+ KVM_REG_SIZE_MASK |
+ KVM_REG_RISCV_CORE);
+ unsigned long reg_val;
+
+ if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
+ return -EINVAL;
+ if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
+ return -ENOENT;
+
+ if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
+ return -EFAULT;
+
+ if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
+ cntx->sepc = reg_val;
+ else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
+ reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
+ ((unsigned long *)cntx)[reg_num] = reg_val;
+ else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) {
+ if (reg_val == KVM_RISCV_MODE_S)
+ cntx->sstatus |= SR_SPP;
+ else
+ cntx->sstatus &= ~SR_SPP;
+ } else
+ return -ENOENT;
+
+ return 0;
+}
+
+static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu *vcpu,
+ unsigned long reg_num,
+ unsigned long *out_val)
+{
+ struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
+
+ if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
+ return -ENOENT;
+
+ if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
+ kvm_riscv_vcpu_flush_interrupts(vcpu);
+ *out_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
+ *out_val |= csr->hvip & ~IRQ_LOCAL_MASK;
+ } else
+ *out_val = ((unsigned long *)csr)[reg_num];
+
+ return 0;
+}
+
+static int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu,
+ unsigned long reg_num,
+ unsigned long reg_val)
+{
+ struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
+
+ if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
+ return -ENOENT;
+
+ if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
+ reg_val &= VSIP_VALID_MASK;
+ reg_val <<= VSIP_TO_HVIP_SHIFT;
+ }
+
+ ((unsigned long *)csr)[reg_num] = reg_val;
+
+ if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
+ WRITE_ONCE(vcpu->arch.irqs_pending_mask[0], 0);
+
+ return 0;
+}
+
+static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg)
+{
+ int rc;
+ unsigned long __user *uaddr =
+ (unsigned long __user *)(unsigned long)reg->addr;
+ unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
+ KVM_REG_SIZE_MASK |
+ KVM_REG_RISCV_CSR);
+ unsigned long reg_val, reg_subtype;
+
+ if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
+ return -EINVAL;
+
+ reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
+ reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
+ switch (reg_subtype) {
+ case KVM_REG_RISCV_CSR_GENERAL:
+ rc = kvm_riscv_vcpu_general_get_csr(vcpu, reg_num, &reg_val);
+ break;
+ case KVM_REG_RISCV_CSR_AIA:
+ rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, &reg_val);
+ break;
+ default:
+ rc = -ENOENT;
+ break;
+ }
+ if (rc)
+ return rc;
+
+ if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg)
+{
+ int rc;
+ unsigned long __user *uaddr =
+ (unsigned long __user *)(unsigned long)reg->addr;
+ unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
+ KVM_REG_SIZE_MASK |
+ KVM_REG_RISCV_CSR);
+ unsigned long reg_val, reg_subtype;
+
+ if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
+ return -EINVAL;
+
+ if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
+ return -EFAULT;
+
+ reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
+ reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
+ switch (reg_subtype) {
+ case KVM_REG_RISCV_CSR_GENERAL:
+ rc = kvm_riscv_vcpu_general_set_csr(vcpu, reg_num, reg_val);
+ break;
+ case KVM_REG_RISCV_CSR_AIA:
+ rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val);
+ break;
+ default:
+ rc = -ENOENT;
+ break;
+ }
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int riscv_vcpu_get_isa_ext_single(struct kvm_vcpu *vcpu,
+ unsigned long reg_num,
+ unsigned long *reg_val)
+{
+ unsigned long host_isa_ext;
+
+ if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
+ reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
+ return -ENOENT;
+
+ *reg_val = 0;
+ host_isa_ext = kvm_isa_ext_arr[reg_num];
+ if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext))
+ *reg_val = 1; /* Mark the given extension as available */
+
+ return 0;
+}
+
+static int riscv_vcpu_set_isa_ext_single(struct kvm_vcpu *vcpu,
+ unsigned long reg_num,
+ unsigned long reg_val)
+{
+ unsigned long host_isa_ext;
+
+ if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
+ reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
+ return -ENOENT;
+
+ host_isa_ext = kvm_isa_ext_arr[reg_num];
+ if (!__riscv_isa_extension_available(NULL, host_isa_ext))
+ return -ENOENT;
+
+ if (reg_val == test_bit(host_isa_ext, vcpu->arch.isa))
+ return 0;
+
+ if (!vcpu->arch.ran_atleast_once) {
+ /*
+ * All multi-letter extension and a few single letter
+ * extension can be disabled
+ */
+ if (reg_val == 1 &&
+ kvm_riscv_vcpu_isa_enable_allowed(reg_num))
+ set_bit(host_isa_ext, vcpu->arch.isa);
+ else if (!reg_val &&
+ kvm_riscv_vcpu_isa_disable_allowed(reg_num))
+ clear_bit(host_isa_ext, vcpu->arch.isa);
+ else
+ return -EINVAL;
+ kvm_riscv_vcpu_fp_reset(vcpu);
+ } else {
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int riscv_vcpu_get_isa_ext_multi(struct kvm_vcpu *vcpu,
+ unsigned long reg_num,
+ unsigned long *reg_val)
+{
+ unsigned long i, ext_id, ext_val;
+
+ if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
+ return -ENOENT;
+
+ for (i = 0; i < BITS_PER_LONG; i++) {
+ ext_id = i + reg_num * BITS_PER_LONG;
+ if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
+ break;
+
+ ext_val = 0;
+ riscv_vcpu_get_isa_ext_single(vcpu, ext_id, &ext_val);
+ if (ext_val)
+ *reg_val |= KVM_REG_RISCV_ISA_MULTI_MASK(ext_id);
+ }
+
+ return 0;
+}
+
+static int riscv_vcpu_set_isa_ext_multi(struct kvm_vcpu *vcpu,
+ unsigned long reg_num,
+ unsigned long reg_val, bool enable)
+{
+ unsigned long i, ext_id;
+
+ if (reg_num > KVM_REG_RISCV_ISA_MULTI_REG_LAST)
+ return -ENOENT;
+
+ for_each_set_bit(i, &reg_val, BITS_PER_LONG) {
+ ext_id = i + reg_num * BITS_PER_LONG;
+ if (ext_id >= KVM_RISCV_ISA_EXT_MAX)
+ break;
+
+ riscv_vcpu_set_isa_ext_single(vcpu, ext_id, enable);
+ }
+
+ return 0;
+}
+
+static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg)
+{
+ int rc;
+ unsigned long __user *uaddr =
+ (unsigned long __user *)(unsigned long)reg->addr;
+ unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
+ KVM_REG_SIZE_MASK |
+ KVM_REG_RISCV_ISA_EXT);
+ unsigned long reg_val, reg_subtype;
+
+ if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
+ return -EINVAL;
+
+ reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
+ reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
+
+ reg_val = 0;
+ switch (reg_subtype) {
+ case KVM_REG_RISCV_ISA_SINGLE:
+ rc = riscv_vcpu_get_isa_ext_single(vcpu, reg_num, &reg_val);
+ break;
+ case KVM_REG_RISCV_ISA_MULTI_EN:
+ case KVM_REG_RISCV_ISA_MULTI_DIS:
+ rc = riscv_vcpu_get_isa_ext_multi(vcpu, reg_num, &reg_val);
+ if (!rc && reg_subtype == KVM_REG_RISCV_ISA_MULTI_DIS)
+ reg_val = ~reg_val;
+ break;
+ default:
+ rc = -ENOENT;
+ }
+ if (rc)
+ return rc;
+
+ if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg)
+{
+ unsigned long __user *uaddr =
+ (unsigned long __user *)(unsigned long)reg->addr;
+ unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
+ KVM_REG_SIZE_MASK |
+ KVM_REG_RISCV_ISA_EXT);
+ unsigned long reg_val, reg_subtype;
+
+ if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
+ return -EINVAL;
+
+ reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
+ reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
+
+ if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
+ return -EFAULT;
+
+ switch (reg_subtype) {
+ case KVM_REG_RISCV_ISA_SINGLE:
+ return riscv_vcpu_set_isa_ext_single(vcpu, reg_num, reg_val);
+ case KVM_REG_RISCV_SBI_MULTI_EN:
+ return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, true);
+ case KVM_REG_RISCV_SBI_MULTI_DIS:
+ return riscv_vcpu_set_isa_ext_multi(vcpu, reg_num, reg_val, false);
+ default:
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+static int copy_config_reg_indices(const struct kvm_vcpu *vcpu,
+ u64 __user *uindices)
+{
+ int n = 0;
+
+ for (int i = 0; i < sizeof(struct kvm_riscv_config)/sizeof(unsigned long);
+ i++) {
+ u64 size;
+ u64 reg;
+
+ /*
+ * Avoid reporting config reg if the corresponding extension
+ * was not available.
+ */
+ if (i == KVM_REG_RISCV_CONFIG_REG(zicbom_block_size) &&
+ !riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
+ continue;
+ else if (i == KVM_REG_RISCV_CONFIG_REG(zicboz_block_size) &&
+ !riscv_isa_extension_available(vcpu->arch.isa, ZICBOZ))
+ continue;
+
+ size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
+ reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CONFIG | i;
+
+ if (uindices) {
+ if (put_user(reg, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+
+ n++;
+ }
+
+ return n;
+}
+
+static unsigned long num_config_regs(const struct kvm_vcpu *vcpu)
+{
+ return copy_config_reg_indices(vcpu, NULL);
+}
+
+static inline unsigned long num_core_regs(void)
+{
+ return sizeof(struct kvm_riscv_core) / sizeof(unsigned long);
+}
+
+static int copy_core_reg_indices(u64 __user *uindices)
+{
+ int n = num_core_regs();
+
+ for (int i = 0; i < n; i++) {
+ u64 size = IS_ENABLED(CONFIG_32BIT) ?
+ KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
+ u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CORE | i;
+
+ if (uindices) {
+ if (put_user(reg, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+ }
+
+ return n;
+}
+
+static inline unsigned long num_csr_regs(const struct kvm_vcpu *vcpu)
+{
+ unsigned long n = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
+
+ if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA))
+ n += sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
+
+ return n;
+}
+
+static int copy_csr_reg_indices(const struct kvm_vcpu *vcpu,
+ u64 __user *uindices)
+{
+ int n1 = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
+ int n2 = 0;
+
+ /* copy general csr regs */
+ for (int i = 0; i < n1; i++) {
+ u64 size = IS_ENABLED(CONFIG_32BIT) ?
+ KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
+ u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
+ KVM_REG_RISCV_CSR_GENERAL | i;
+
+ if (uindices) {
+ if (put_user(reg, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+ }
+
+ /* copy AIA csr regs */
+ if (riscv_isa_extension_available(vcpu->arch.isa, SSAIA)) {
+ n2 = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
+
+ for (int i = 0; i < n2; i++) {
+ u64 size = IS_ENABLED(CONFIG_32BIT) ?
+ KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
+ u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_CSR |
+ KVM_REG_RISCV_CSR_AIA | i;
+
+ if (uindices) {
+ if (put_user(reg, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+ }
+ }
+
+ return n1 + n2;
+}
+
+static inline unsigned long num_timer_regs(void)
+{
+ return sizeof(struct kvm_riscv_timer) / sizeof(u64);
+}
+
+static int copy_timer_reg_indices(u64 __user *uindices)
+{
+ int n = num_timer_regs();
+
+ for (int i = 0; i < n; i++) {
+ u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
+ KVM_REG_RISCV_TIMER | i;
+
+ if (uindices) {
+ if (put_user(reg, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+ }
+
+ return n;
+}
+
+static inline unsigned long num_fp_f_regs(const struct kvm_vcpu *vcpu)
+{
+ const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
+
+ if (riscv_isa_extension_available(vcpu->arch.isa, f))
+ return sizeof(cntx->fp.f) / sizeof(u32);
+ else
+ return 0;
+}
+
+static int copy_fp_f_reg_indices(const struct kvm_vcpu *vcpu,
+ u64 __user *uindices)
+{
+ int n = num_fp_f_regs(vcpu);
+
+ for (int i = 0; i < n; i++) {
+ u64 reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 |
+ KVM_REG_RISCV_FP_F | i;
+
+ if (uindices) {
+ if (put_user(reg, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+ }
+
+ return n;
+}
+
+static inline unsigned long num_fp_d_regs(const struct kvm_vcpu *vcpu)
+{
+ const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
+
+ if (riscv_isa_extension_available(vcpu->arch.isa, d))
+ return sizeof(cntx->fp.d.f) / sizeof(u64) + 1;
+ else
+ return 0;
+}
+
+static int copy_fp_d_reg_indices(const struct kvm_vcpu *vcpu,
+ u64 __user *uindices)
+{
+ int i;
+ int n = num_fp_d_regs(vcpu);
+ u64 reg;
+
+ /* copy fp.d.f indices */
+ for (i = 0; i < n-1; i++) {
+ reg = KVM_REG_RISCV | KVM_REG_SIZE_U64 |
+ KVM_REG_RISCV_FP_D | i;
+
+ if (uindices) {
+ if (put_user(reg, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+ }
+
+ /* copy fp.d.fcsr indices */
+ reg = KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | i;
+ if (uindices) {
+ if (put_user(reg, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+
+ return n;
+}
+
+static int copy_isa_ext_reg_indices(const struct kvm_vcpu *vcpu,
+ u64 __user *uindices)
+{
+ unsigned int n = 0;
+ unsigned long isa_ext;
+
+ for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
+ u64 size = IS_ENABLED(CONFIG_32BIT) ?
+ KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
+ u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_ISA_EXT | i;
+
+ isa_ext = kvm_isa_ext_arr[i];
+ if (!__riscv_isa_extension_available(vcpu->arch.isa, isa_ext))
+ continue;
+
+ if (uindices) {
+ if (put_user(reg, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+
+ n++;
+ }
+
+ return n;
+}
+
+static inline unsigned long num_isa_ext_regs(const struct kvm_vcpu *vcpu)
+{
+ return copy_isa_ext_reg_indices(vcpu, NULL);;
+}
+
+static inline unsigned long num_sbi_ext_regs(void)
+{
+ /*
+ * number of KVM_REG_RISCV_SBI_SINGLE +
+ * 2 x (number of KVM_REG_RISCV_SBI_MULTI)
+ */
+ return KVM_RISCV_SBI_EXT_MAX + 2*(KVM_REG_RISCV_SBI_MULTI_REG_LAST+1);
+}
+
+static int copy_sbi_ext_reg_indices(u64 __user *uindices)
+{
+ int n;
+
+ /* copy KVM_REG_RISCV_SBI_SINGLE */
+ n = KVM_RISCV_SBI_EXT_MAX;
+ for (int i = 0; i < n; i++) {
+ u64 size = IS_ENABLED(CONFIG_32BIT) ?
+ KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
+ u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
+ KVM_REG_RISCV_SBI_SINGLE | i;
+
+ if (uindices) {
+ if (put_user(reg, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+ }
+
+ /* copy KVM_REG_RISCV_SBI_MULTI */
+ n = KVM_REG_RISCV_SBI_MULTI_REG_LAST + 1;
+ for (int i = 0; i < n; i++) {
+ u64 size = IS_ENABLED(CONFIG_32BIT) ?
+ KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
+ u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
+ KVM_REG_RISCV_SBI_MULTI_EN | i;
+
+ if (uindices) {
+ if (put_user(reg, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+
+ reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
+ KVM_REG_RISCV_SBI_MULTI_DIS | i;
+
+ if (uindices) {
+ if (put_user(reg, uindices))
+ return -EFAULT;
+ uindices++;
+ }
+ }
+
+ return num_sbi_ext_regs();
+}
+
+/*
+ * kvm_riscv_vcpu_num_regs - how many registers do we present via KVM_GET/SET_ONE_REG
+ *
+ * This is for all registers.
+ */
+unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu)
+{
+ unsigned long res = 0;
+
+ res += num_config_regs(vcpu);
+ res += num_core_regs();
+ res += num_csr_regs(vcpu);
+ res += num_timer_regs();
+ res += num_fp_f_regs(vcpu);
+ res += num_fp_d_regs(vcpu);
+ res += num_isa_ext_regs(vcpu);
+ res += num_sbi_ext_regs();
+
+ return res;
+}
+
+/*
+ * kvm_riscv_vcpu_copy_reg_indices - get indices of all registers.
+ */
+int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
+ u64 __user *uindices)
+{
+ int ret;
+
+ ret = copy_config_reg_indices(vcpu, uindices);
+ if (ret < 0)
+ return ret;
+ uindices += ret;
+
+ ret = copy_core_reg_indices(uindices);
+ if (ret < 0)
+ return ret;
+ uindices += ret;
+
+ ret = copy_csr_reg_indices(vcpu, uindices);
+ if (ret < 0)
+ return ret;
+ uindices += ret;
+
+ ret = copy_timer_reg_indices(uindices);
+ if (ret < 0)
+ return ret;
+ uindices += ret;
+
+ ret = copy_fp_f_reg_indices(vcpu, uindices);
+ if (ret < 0)
+ return ret;
+ uindices += ret;
+
+ ret = copy_fp_d_reg_indices(vcpu, uindices);
+ if (ret < 0)
+ return ret;
+ uindices += ret;
+
+ ret = copy_isa_ext_reg_indices(vcpu, uindices);
+ if (ret < 0)
+ return ret;
+ uindices += ret;
+
+ ret = copy_sbi_ext_reg_indices(uindices);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg)
+{
+ switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
+ case KVM_REG_RISCV_CONFIG:
+ return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
+ case KVM_REG_RISCV_CORE:
+ return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
+ case KVM_REG_RISCV_CSR:
+ return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
+ case KVM_REG_RISCV_TIMER:
+ return kvm_riscv_vcpu_set_reg_timer(vcpu, reg);
+ case KVM_REG_RISCV_FP_F:
+ return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
+ KVM_REG_RISCV_FP_F);
+ case KVM_REG_RISCV_FP_D:
+ return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
+ KVM_REG_RISCV_FP_D);
+ case KVM_REG_RISCV_ISA_EXT:
+ return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
+ case KVM_REG_RISCV_SBI_EXT:
+ return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg);
+ case KVM_REG_RISCV_VECTOR:
+ return kvm_riscv_vcpu_set_reg_vector(vcpu, reg);
+ default:
+ break;
+ }
+
+ return -ENOENT;
+}
+
+int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg)
+{
+ switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
+ case KVM_REG_RISCV_CONFIG:
+ return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
+ case KVM_REG_RISCV_CORE:
+ return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
+ case KVM_REG_RISCV_CSR:
+ return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
+ case KVM_REG_RISCV_TIMER:
+ return kvm_riscv_vcpu_get_reg_timer(vcpu, reg);
+ case KVM_REG_RISCV_FP_F:
+ return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
+ KVM_REG_RISCV_FP_F);
+ case KVM_REG_RISCV_FP_D:
+ return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
+ KVM_REG_RISCV_FP_D);
+ case KVM_REG_RISCV_ISA_EXT:
+ return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
+ case KVM_REG_RISCV_SBI_EXT:
+ return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg);
+ case KVM_REG_RISCV_VECTOR:
+ return kvm_riscv_vcpu_get_reg_vector(vcpu, reg);
+ default:
+ break;
+ }
+
+ return -ENOENT;
+}
diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c
index 7b46e04fb667..9cd97091c723 100644
--- a/arch/riscv/kvm/vcpu_sbi.c
+++ b/arch/riscv/kvm/vcpu_sbi.c
@@ -140,8 +140,10 @@ static int riscv_vcpu_set_sbi_ext_single(struct kvm_vcpu *vcpu,
const struct kvm_riscv_sbi_extension_entry *sext = NULL;
struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
- if (reg_num >= KVM_RISCV_SBI_EXT_MAX ||
- (reg_val != 1 && reg_val != 0))
+ if (reg_num >= KVM_RISCV_SBI_EXT_MAX)
+ return -ENOENT;
+
+ if (reg_val != 1 && reg_val != 0)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
@@ -175,7 +177,7 @@ static int riscv_vcpu_get_sbi_ext_single(struct kvm_vcpu *vcpu,
struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
if (reg_num >= KVM_RISCV_SBI_EXT_MAX)
- return -EINVAL;
+ return -ENOENT;
for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
if (sbi_ext[i].ext_idx == reg_num) {
@@ -206,7 +208,7 @@ static int riscv_vcpu_set_sbi_ext_multi(struct kvm_vcpu *vcpu,
unsigned long i, ext_id;
if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
- return -EINVAL;
+ return -ENOENT;
for_each_set_bit(i, &reg_val, BITS_PER_LONG) {
ext_id = i + reg_num * BITS_PER_LONG;
@@ -226,7 +228,7 @@ static int riscv_vcpu_get_sbi_ext_multi(struct kvm_vcpu *vcpu,
unsigned long i, ext_id, ext_val;
if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
- return -EINVAL;
+ return -ENOENT;
for (i = 0; i < BITS_PER_LONG; i++) {
ext_id = i + reg_num * BITS_PER_LONG;
@@ -272,7 +274,7 @@ int kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu *vcpu,
case KVM_REG_RISCV_SBI_MULTI_DIS:
return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, false);
default:
- return -EINVAL;
+ return -ENOENT;
}
return 0;
@@ -307,7 +309,7 @@ int kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu *vcpu,
reg_val = ~reg_val;
break;
default:
- rc = -EINVAL;
+ rc = -ENOENT;
}
if (rc)
return rc;
diff --git a/arch/riscv/kvm/vcpu_timer.c b/arch/riscv/kvm/vcpu_timer.c
index 3ac2ff6a65da..75486b25ac45 100644
--- a/arch/riscv/kvm/vcpu_timer.c
+++ b/arch/riscv/kvm/vcpu_timer.c
@@ -170,7 +170,7 @@ int kvm_riscv_vcpu_get_reg_timer(struct kvm_vcpu *vcpu,
if (KVM_REG_SIZE(reg->id) != sizeof(u64))
return -EINVAL;
if (reg_num >= sizeof(struct kvm_riscv_timer) / sizeof(u64))
- return -EINVAL;
+ return -ENOENT;
switch (reg_num) {
case KVM_REG_RISCV_TIMER_REG(frequency):
@@ -187,7 +187,7 @@ int kvm_riscv_vcpu_get_reg_timer(struct kvm_vcpu *vcpu,
KVM_RISCV_TIMER_STATE_OFF;
break;
default:
- return -EINVAL;
+ return -ENOENT;
}
if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
@@ -211,14 +211,15 @@ int kvm_riscv_vcpu_set_reg_timer(struct kvm_vcpu *vcpu,
if (KVM_REG_SIZE(reg->id) != sizeof(u64))
return -EINVAL;
if (reg_num >= sizeof(struct kvm_riscv_timer) / sizeof(u64))
- return -EINVAL;
+ return -ENOENT;
if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
return -EFAULT;
switch (reg_num) {
case KVM_REG_RISCV_TIMER_REG(frequency):
- ret = -EOPNOTSUPP;
+ if (reg_val != riscv_timebase)
+ return -EINVAL;
break;
case KVM_REG_RISCV_TIMER_REG(time):
gt->time_delta = reg_val - get_cycles64();
@@ -233,7 +234,7 @@ int kvm_riscv_vcpu_set_reg_timer(struct kvm_vcpu *vcpu,
ret = kvm_riscv_vcpu_timer_cancel(t);
break;
default:
- ret = -EINVAL;
+ ret = -ENOENT;
break;
}
diff --git a/arch/riscv/kvm/vcpu_vector.c b/arch/riscv/kvm/vcpu_vector.c
index edd2eecbddc2..b430cbb69521 100644
--- a/arch/riscv/kvm/vcpu_vector.c
+++ b/arch/riscv/kvm/vcpu_vector.c
@@ -91,95 +91,93 @@ void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu)
}
#endif
-static void *kvm_riscv_vcpu_vreg_addr(struct kvm_vcpu *vcpu,
- unsigned long reg_num,
- size_t reg_size)
+static int kvm_riscv_vcpu_vreg_addr(struct kvm_vcpu *vcpu,
+ unsigned long reg_num,
+ size_t reg_size,
+ void **reg_addr)
{
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
- void *reg_val;
size_t vlenb = riscv_v_vsize / 32;
if (reg_num < KVM_REG_RISCV_VECTOR_REG(0)) {
if (reg_size != sizeof(unsigned long))
- return NULL;
+ return -EINVAL;
switch (reg_num) {
case KVM_REG_RISCV_VECTOR_CSR_REG(vstart):
- reg_val = &cntx->vector.vstart;
+ *reg_addr = &cntx->vector.vstart;
break;
case KVM_REG_RISCV_VECTOR_CSR_REG(vl):
- reg_val = &cntx->vector.vl;
+ *reg_addr = &cntx->vector.vl;
break;
case KVM_REG_RISCV_VECTOR_CSR_REG(vtype):
- reg_val = &cntx->vector.vtype;
+ *reg_addr = &cntx->vector.vtype;
break;
case KVM_REG_RISCV_VECTOR_CSR_REG(vcsr):
- reg_val = &cntx->vector.vcsr;
+ *reg_addr = &cntx->vector.vcsr;
break;
case KVM_REG_RISCV_VECTOR_CSR_REG(datap):
default:
- return NULL;
+ return -ENOENT;
}
} else if (reg_num <= KVM_REG_RISCV_VECTOR_REG(31)) {
if (reg_size != vlenb)
- return NULL;
- reg_val = cntx->vector.datap
- + (reg_num - KVM_REG_RISCV_VECTOR_REG(0)) * vlenb;
+ return -EINVAL;
+ *reg_addr = cntx->vector.datap +
+ (reg_num - KVM_REG_RISCV_VECTOR_REG(0)) * vlenb;
} else {
- return NULL;
+ return -ENOENT;
}
- return reg_val;
+ return 0;
}
int kvm_riscv_vcpu_get_reg_vector(struct kvm_vcpu *vcpu,
- const struct kvm_one_reg *reg,
- unsigned long rtype)
+ const struct kvm_one_reg *reg)
{
unsigned long *isa = vcpu->arch.isa;
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
- rtype);
- void *reg_val = NULL;
+ KVM_REG_RISCV_VECTOR);
size_t reg_size = KVM_REG_SIZE(reg->id);
+ void *reg_addr;
+ int rc;
- if (rtype == KVM_REG_RISCV_VECTOR &&
- riscv_isa_extension_available(isa, v)) {
- reg_val = kvm_riscv_vcpu_vreg_addr(vcpu, reg_num, reg_size);
- }
+ if (!riscv_isa_extension_available(isa, v))
+ return -ENOENT;
- if (!reg_val)
- return -EINVAL;
+ rc = kvm_riscv_vcpu_vreg_addr(vcpu, reg_num, reg_size, &reg_addr);
+ if (rc)
+ return rc;
- if (copy_to_user(uaddr, reg_val, reg_size))
+ if (copy_to_user(uaddr, reg_addr, reg_size))
return -EFAULT;
return 0;
}
int kvm_riscv_vcpu_set_reg_vector(struct kvm_vcpu *vcpu,
- const struct kvm_one_reg *reg,
- unsigned long rtype)
+ const struct kvm_one_reg *reg)
{
unsigned long *isa = vcpu->arch.isa;
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
- rtype);
- void *reg_val = NULL;
+ KVM_REG_RISCV_VECTOR);
size_t reg_size = KVM_REG_SIZE(reg->id);
+ void *reg_addr;
+ int rc;
- if (rtype == KVM_REG_RISCV_VECTOR &&
- riscv_isa_extension_available(isa, v)) {
- reg_val = kvm_riscv_vcpu_vreg_addr(vcpu, reg_num, reg_size);
- }
+ if (!riscv_isa_extension_available(isa, v))
+ return -ENOENT;
- if (!reg_val)
- return -EINVAL;
+ rc = kvm_riscv_vcpu_vreg_addr(vcpu, reg_num, reg_size, &reg_addr);
+ if (rc)
+ return rc;
- if (copy_from_user(reg_val, uaddr, reg_size))
+ if (copy_from_user(reg_addr, uaddr, reg_size))
return -EFAULT;
return 0;
diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
index 12e22e7330e7..217fd4de6134 100644
--- a/arch/riscv/mm/context.c
+++ b/arch/riscv/mm/context.c
@@ -67,7 +67,7 @@ static void __flush_context(void)
lockdep_assert_held(&context_lock);
/* Update the list of reserved ASIDs and the ASID bitmap. */
- bitmap_clear(context_asid_map, 0, num_asids);
+ bitmap_zero(context_asid_map, num_asids);
/* Mark already active ASIDs as used */
for_each_possible_cpu(i) {
diff --git a/arch/riscv/mm/dma-noncoherent.c b/arch/riscv/mm/dma-noncoherent.c
index d51a75864e53..7270b4d8c05b 100644
--- a/arch/riscv/mm/dma-noncoherent.c
+++ b/arch/riscv/mm/dma-noncoherent.c
@@ -11,6 +11,8 @@
#include <asm/cacheflush.h>
static bool noncoherent_supported __ro_after_init;
+int dma_cache_alignment __ro_after_init = ARCH_DMA_MINALIGN;
+EXPORT_SYMBOL_GPL(dma_cache_alignment);
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
@@ -78,3 +80,9 @@ void riscv_noncoherent_supported(void)
"Non-coherent DMA support enabled without a block size\n");
noncoherent_supported = true;
}
+
+void __init riscv_set_dma_cache_alignment(void)
+{
+ if (!noncoherent_supported)
+ dma_cache_alignment = 1;
+}
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index c07ff3e2c90a..943c18d6ef4d 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -1299,6 +1299,28 @@ static inline void setup_vm_final(void)
}
#endif /* CONFIG_MMU */
+/* Reserve 128M low memory by default for swiotlb buffer */
+#define DEFAULT_CRASH_KERNEL_LOW_SIZE (128UL << 20)
+
+static int __init reserve_crashkernel_low(unsigned long long low_size)
+{
+ unsigned long long low_base;
+
+ low_base = memblock_phys_alloc_range(low_size, PMD_SIZE, 0, dma32_phys_limit);
+ if (!low_base) {
+ pr_err("cannot allocate crashkernel low memory (size:0x%llx).\n", low_size);
+ return -ENOMEM;
+ }
+
+ pr_info("crashkernel low memory reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
+ low_base, low_base + low_size, low_size >> 20);
+
+ crashk_low_res.start = low_base;
+ crashk_low_res.end = low_base + low_size - 1;
+
+ return 0;
+}
+
/*
* reserve_crashkernel() - reserves memory for crash kernel
*
@@ -1310,8 +1332,12 @@ static void __init reserve_crashkernel(void)
{
unsigned long long crash_base = 0;
unsigned long long crash_size = 0;
+ unsigned long long crash_low_size = 0;
unsigned long search_start = memblock_start_of_DRAM();
- unsigned long search_end = memblock_end_of_DRAM();
+ unsigned long search_end = (unsigned long)dma32_phys_limit;
+ char *cmdline = boot_command_line;
+ bool fixed_base = false;
+ bool high = false;
int ret = 0;
@@ -1327,14 +1353,36 @@ static void __init reserve_crashkernel(void)
return;
}
- ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
+ ret = parse_crashkernel(cmdline, memblock_phys_mem_size(),
&crash_size, &crash_base);
- if (ret || !crash_size)
+ if (ret == -ENOENT) {
+ /* Fallback to crashkernel=X,[high,low] */
+ ret = parse_crashkernel_high(cmdline, 0, &crash_size, &crash_base);
+ if (ret || !crash_size)
+ return;
+
+ /*
+ * crashkernel=Y,low is valid only when crashkernel=X,high
+ * is passed.
+ */
+ ret = parse_crashkernel_low(cmdline, 0, &crash_low_size, &crash_base);
+ if (ret == -ENOENT)
+ crash_low_size = DEFAULT_CRASH_KERNEL_LOW_SIZE;
+ else if (ret)
+ return;
+
+ search_start = (unsigned long)dma32_phys_limit;
+ search_end = memblock_end_of_DRAM();
+ high = true;
+ } else if (ret || !crash_size) {
+ /* Invalid argument value specified */
return;
+ }
crash_size = PAGE_ALIGN(crash_size);
if (crash_base) {
+ fixed_base = true;
search_start = crash_base;
search_end = crash_base + crash_size;
}
@@ -1347,12 +1395,37 @@ static void __init reserve_crashkernel(void)
* swiotlb can work on the crash kernel.
*/
crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE,
- search_start,
- min(search_end, (unsigned long)(SZ_4G - 1)));
+ search_start, search_end);
if (crash_base == 0) {
- /* Try again without restricting region to 32bit addressible memory */
+ /*
+ * For crashkernel=size[KMG]@offset[KMG], print out failure
+ * message if can't reserve the specified region.
+ */
+ if (fixed_base) {
+ pr_warn("crashkernel: allocating failed with given size@offset\n");
+ return;
+ }
+
+ if (high) {
+ /*
+ * For crashkernel=size[KMG],high, if the first attempt was
+ * for high memory, fall back to low memory.
+ */
+ search_start = memblock_start_of_DRAM();
+ search_end = (unsigned long)dma32_phys_limit;
+ } else {
+ /*
+ * For crashkernel=size[KMG], if the first attempt was for
+ * low memory, fall back to high memory, the minimum required
+ * low memory will be reserved later.
+ */
+ search_start = (unsigned long)dma32_phys_limit;
+ search_end = memblock_end_of_DRAM();
+ crash_low_size = DEFAULT_CRASH_KERNEL_LOW_SIZE;
+ }
+
crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE,
- search_start, search_end);
+ search_start, search_end);
if (crash_base == 0) {
pr_warn("crashkernel: couldn't allocate %lldKB\n",
crash_size >> 10);
@@ -1360,6 +1433,12 @@ static void __init reserve_crashkernel(void)
}
}
+ if ((crash_base >= dma32_phys_limit) && crash_low_size &&
+ reserve_crashkernel_low(crash_low_size)) {
+ memblock_phys_free(crash_base, crash_size);
+ return;
+ }
+
pr_info("crashkernel: reserved 0x%016llx - 0x%016llx (%lld MB)\n",
crash_base, crash_base + crash_size, crash_size >> 20);
diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
index a01bc15dce24..5e39dcf23fdb 100644
--- a/arch/riscv/mm/kasan_init.c
+++ b/arch/riscv/mm/kasan_init.c
@@ -22,9 +22,9 @@
* region is not and then we have to go down to the PUD level.
*/
-pgd_t tmp_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
-p4d_t tmp_p4d[PTRS_PER_P4D] __page_aligned_bss;
-pud_t tmp_pud[PTRS_PER_PUD] __page_aligned_bss;
+static pgd_t tmp_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
+static p4d_t tmp_p4d[PTRS_PER_P4D] __page_aligned_bss;
+static pud_t tmp_pud[PTRS_PER_PUD] __page_aligned_bss;
static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end)
{
@@ -438,7 +438,7 @@ static void __init kasan_shallow_populate(void *start, void *end)
kasan_shallow_populate_pgd(vaddr, vend);
}
-static void create_tmp_mapping(void)
+static void __init create_tmp_mapping(void)
{
void *ptr;
p4d_t *base_p4d;
diff --git a/arch/riscv/purgatory/Makefile b/arch/riscv/purgatory/Makefile
index dc20e166983e..9e6476719abb 100644
--- a/arch/riscv/purgatory/Makefile
+++ b/arch/riscv/purgatory/Makefile
@@ -77,6 +77,10 @@ ifdef CONFIG_STACKPROTECTOR_STRONG
PURGATORY_CFLAGS_REMOVE += -fstack-protector-strong
endif
+ifdef CONFIG_CFI_CLANG
+PURGATORY_CFLAGS_REMOVE += $(CC_FLAGS_CFI)
+endif
+
CFLAGS_REMOVE_purgatory.o += $(PURGATORY_CFLAGS_REMOVE)
CFLAGS_purgatory.o += $(PURGATORY_CFLAGS)
diff --git a/arch/s390/boot/ipl_parm.c b/arch/s390/boot/ipl_parm.c
index 8753cb0339e5..7b7521762633 100644
--- a/arch/s390/boot/ipl_parm.c
+++ b/arch/s390/boot/ipl_parm.c
@@ -19,7 +19,6 @@ struct parmarea parmarea __section(".parmarea") = {
};
char __bootdata(early_command_line)[COMMAND_LINE_SIZE];
-int __bootdata(noexec_disabled);
unsigned int __bootdata_preserved(zlib_dfltcc_support) = ZLIB_DFLTCC_FULL;
struct ipl_parameter_block __bootdata_preserved(ipl_block);
@@ -290,12 +289,6 @@ void parse_boot_command_line(void)
zlib_dfltcc_support = ZLIB_DFLTCC_FULL_DEBUG;
}
- if (!strcmp(param, "noexec")) {
- rc = kstrtobool(val, &enabled);
- if (!rc && !enabled)
- noexec_disabled = 1;
- }
-
if (!strcmp(param, "facilities") && val)
modify_fac_list(val);
diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
index b9681cb22753..d3e48bd9c394 100644
--- a/arch/s390/boot/startup.c
+++ b/arch/s390/boot/startup.c
@@ -53,10 +53,8 @@ static void detect_facilities(void)
}
if (test_facility(78))
machine.has_edat2 = 1;
- if (!noexec_disabled && test_facility(130)) {
+ if (test_facility(130))
machine.has_nx = 1;
- __ctl_set_bit(0, 20);
- }
}
static void setup_lpp(void)
diff --git a/arch/s390/boot/vmem.c b/arch/s390/boot/vmem.c
index c67f59db7a51..01257ce3b89c 100644
--- a/arch/s390/boot/vmem.c
+++ b/arch/s390/boot/vmem.c
@@ -287,7 +287,9 @@ static void pgtable_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long e
if (kasan_pte_populate_zero_shadow(pte, mode))
continue;
entry = __pte(_pa(addr, PAGE_SIZE, mode));
- entry = set_pte_bit(entry, PAGE_KERNEL_EXEC);
+ entry = set_pte_bit(entry, PAGE_KERNEL);
+ if (!machine.has_nx)
+ entry = clear_pte_bit(entry, __pgprot(_PAGE_NOEXEC));
set_pte(pte, entry);
pages++;
}
@@ -311,7 +313,9 @@ static void pgtable_pmd_populate(pud_t *pud, unsigned long addr, unsigned long e
continue;
if (can_large_pmd(pmd, addr, next)) {
entry = __pmd(_pa(addr, _SEGMENT_SIZE, mode));
- entry = set_pmd_bit(entry, SEGMENT_KERNEL_EXEC);
+ entry = set_pmd_bit(entry, SEGMENT_KERNEL);
+ if (!machine.has_nx)
+ entry = clear_pmd_bit(entry, __pgprot(_SEGMENT_ENTRY_NOEXEC));
set_pmd(pmd, entry);
pages++;
continue;
@@ -342,7 +346,9 @@ static void pgtable_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long e
continue;
if (can_large_pud(pud, addr, next)) {
entry = __pud(_pa(addr, _REGION3_SIZE, mode));
- entry = set_pud_bit(entry, REGION3_KERNEL_EXEC);
+ entry = set_pud_bit(entry, REGION3_KERNEL);
+ if (!machine.has_nx)
+ entry = clear_pud_bit(entry, __pgprot(_REGION_ENTRY_NOEXEC));
set_pud(pud, entry);
pages++;
continue;
diff --git a/arch/s390/configs/btf.config b/arch/s390/configs/btf.config
index 39227b4511af..eb7f84f5925c 100644
--- a/arch/s390/configs/btf.config
+++ b/arch/s390/configs/btf.config
@@ -1 +1,2 @@
+# Help: Enable BTF debug info
CONFIG_DEBUG_INFO_BTF=y
diff --git a/arch/s390/configs/kasan.config b/arch/s390/configs/kasan.config
index 700a8b25c3ff..84c2b551e992 100644
--- a/arch/s390/configs/kasan.config
+++ b/arch/s390/configs/kasan.config
@@ -1,3 +1,4 @@
+# Help: Enable KASan for debugging
CONFIG_KASAN=y
CONFIG_KASAN_INLINE=y
CONFIG_KASAN_VMALLOC=y
diff --git a/arch/s390/include/asm/airq.h b/arch/s390/include/asm/airq.h
index e82e5626e139..c4c28c2609a5 100644
--- a/arch/s390/include/asm/airq.h
+++ b/arch/s390/include/asm/airq.h
@@ -18,7 +18,6 @@ struct airq_struct {
struct hlist_node list; /* Handler queueing. */
void (*handler)(struct airq_struct *airq, struct tpi_info *tpi_info);
u8 *lsi_ptr; /* Local-Summary-Indicator pointer */
- u8 lsi_mask; /* Local-Summary-Indicator mask */
u8 isc; /* Interrupt-subclass */
u8 flags;
};
diff --git a/arch/s390/include/asm/dma.h b/arch/s390/include/asm/dma.h
index c260adb25997..7fe3e31956d7 100644
--- a/arch/s390/include/asm/dma.h
+++ b/arch/s390/include/asm/dma.h
@@ -9,6 +9,6 @@
* to DMA. It _is_ used for the s390 memory zone split at 2GB caused
* by the 31 bit heritage.
*/
-#define MAX_DMA_ADDRESS 0x80000000
+#define MAX_DMA_ADDRESS __va(0x80000000)
#endif /* _ASM_S390_DMA_H */
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 91bfecb91321..427f9528a7b6 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -817,6 +817,8 @@ struct kvm_s390_cpu_model {
__u64 *fac_list;
u64 cpuid;
unsigned short ibc;
+ /* subset of available UV-features for pv-guests enabled by user space */
+ struct kvm_s390_vm_cpu_uv_feat uv_feat_guest;
};
typedef int (*crypto_hook)(struct kvm_vcpu *vcpu);
diff --git a/arch/s390/include/asm/sections.h b/arch/s390/include/asm/sections.h
index 3fecaa4e8b74..0486e6ef62bf 100644
--- a/arch/s390/include/asm/sections.h
+++ b/arch/s390/include/asm/sections.h
@@ -23,7 +23,7 @@
*/
#define __bootdata_preserved(var) __section(".boot.preserved.data." #var) var
-extern unsigned long __samode31, __eamode31;
-extern unsigned long __stext_amode31, __etext_amode31;
+extern char *__samode31, *__eamode31;
+extern char *__stext_amode31, *__etext_amode31;
#endif
diff --git a/arch/s390/include/asm/set_memory.h b/arch/s390/include/asm/set_memory.h
index 7a3eefd7a242..06fbabe2f66c 100644
--- a/arch/s390/include/asm/set_memory.h
+++ b/arch/s390/include/asm/set_memory.h
@@ -24,43 +24,41 @@ enum {
#define SET_MEMORY_INV BIT(_SET_MEMORY_INV_BIT)
#define SET_MEMORY_DEF BIT(_SET_MEMORY_DEF_BIT)
-int __set_memory(unsigned long addr, int numpages, unsigned long flags);
-
-static inline int set_memory_ro(unsigned long addr, int numpages)
-{
- return __set_memory(addr, numpages, SET_MEMORY_RO);
-}
-
-static inline int set_memory_rw(unsigned long addr, int numpages)
-{
- return __set_memory(addr, numpages, SET_MEMORY_RW);
-}
-
-static inline int set_memory_nx(unsigned long addr, int numpages)
-{
- return __set_memory(addr, numpages, SET_MEMORY_NX);
-}
-
-static inline int set_memory_x(unsigned long addr, int numpages)
-{
- return __set_memory(addr, numpages, SET_MEMORY_X);
-}
+int __set_memory(unsigned long addr, unsigned long numpages, unsigned long flags);
#define set_memory_rox set_memory_rox
-static inline int set_memory_rox(unsigned long addr, int numpages)
-{
- return __set_memory(addr, numpages, SET_MEMORY_RO | SET_MEMORY_X);
-}
-static inline int set_memory_rwnx(unsigned long addr, int numpages)
-{
- return __set_memory(addr, numpages, SET_MEMORY_RW | SET_MEMORY_NX);
+/*
+ * Generate two variants of each set_memory() function:
+ *
+ * set_memory_yy(unsigned long addr, int numpages);
+ * __set_memory_yy(void *start, void *end);
+ *
+ * The second variant exists for both convenience to avoid the usual
+ * (unsigned long) casts, but unlike the first variant it can also be used
+ * for areas larger than 8TB, which may happen at memory initialization.
+ */
+#define __SET_MEMORY_FUNC(fname, flags) \
+static inline int fname(unsigned long addr, int numpages) \
+{ \
+ return __set_memory(addr, numpages, (flags)); \
+} \
+ \
+static inline int __##fname(void *start, void *end) \
+{ \
+ unsigned long numpages; \
+ \
+ numpages = (end - start) >> PAGE_SHIFT; \
+ return __set_memory((unsigned long)start, numpages, (flags)); \
}
-static inline int set_memory_4k(unsigned long addr, int numpages)
-{
- return __set_memory(addr, numpages, SET_MEMORY_4K);
-}
+__SET_MEMORY_FUNC(set_memory_ro, SET_MEMORY_RO)
+__SET_MEMORY_FUNC(set_memory_rw, SET_MEMORY_RW)
+__SET_MEMORY_FUNC(set_memory_nx, SET_MEMORY_NX)
+__SET_MEMORY_FUNC(set_memory_x, SET_MEMORY_X)
+__SET_MEMORY_FUNC(set_memory_rox, SET_MEMORY_RO | SET_MEMORY_X)
+__SET_MEMORY_FUNC(set_memory_rwnx, SET_MEMORY_RW | SET_MEMORY_NX)
+__SET_MEMORY_FUNC(set_memory_4k, SET_MEMORY_4K)
int set_direct_map_invalid_noflush(struct page *page);
int set_direct_map_default_noflush(struct page *page);
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index b30fe91166e3..25cadc2b9cff 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -72,7 +72,6 @@ extern unsigned int zlib_dfltcc_support;
#define ZLIB_DFLTCC_INFLATE_ONLY 3
#define ZLIB_DFLTCC_FULL_DEBUG 4
-extern int noexec_disabled;
extern unsigned long ident_map_size;
extern unsigned long max_mappable;
diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h
index d2cd42bb2c26..0e7bd3873907 100644
--- a/arch/s390/include/asm/uv.h
+++ b/arch/s390/include/asm/uv.h
@@ -99,6 +99,8 @@ enum uv_cmds_inst {
enum uv_feat_ind {
BIT_UV_FEAT_MISC = 0,
BIT_UV_FEAT_AIV = 1,
+ BIT_UV_FEAT_AP = 4,
+ BIT_UV_FEAT_AP_INTR = 5,
};
struct uv_cb_header {
@@ -159,7 +161,15 @@ struct uv_cb_cgc {
u64 guest_handle;
u64 conf_base_stor_origin;
u64 conf_virt_stor_origin;
- u64 reserved30;
+ u8 reserved30[6];
+ union {
+ struct {
+ u16 : 14;
+ u16 ap_instr_intr : 1;
+ u16 ap_allow_instr : 1;
+ };
+ u16 raw;
+ } flags;
u64 guest_stor_origin;
u64 guest_stor_len;
u64 guest_sca;
@@ -397,6 +407,13 @@ struct uv_info {
extern struct uv_info uv_info;
+static inline bool uv_has_feature(u8 feature_bit)
+{
+ if (feature_bit >= sizeof(uv_info.uv_feature_indications) * 8)
+ return false;
+ return test_bit_inv(feature_bit, &uv_info.uv_feature_indications);
+}
+
#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
extern int prot_virt_guest;
diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h
index a73cf01a1606..abe926d43cbe 100644
--- a/arch/s390/include/uapi/asm/kvm.h
+++ b/arch/s390/include/uapi/asm/kvm.h
@@ -159,6 +159,22 @@ struct kvm_s390_vm_cpu_subfunc {
__u8 reserved[1728];
};
+#define KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST 6
+#define KVM_S390_VM_CPU_MACHINE_UV_FEAT_GUEST 7
+
+#define KVM_S390_VM_CPU_UV_FEAT_NR_BITS 64
+struct kvm_s390_vm_cpu_uv_feat {
+ union {
+ struct {
+ __u64 : 4;
+ __u64 ap : 1; /* bit 4 */
+ __u64 ap_intr : 1; /* bit 5 */
+ __u64 : 58;
+ };
+ __u64 feat;
+ };
+};
+
/* kvm attributes for crypto */
#define KVM_S390_VM_CRYPTO_ENABLE_AES_KW 0
#define KVM_S390_VM_CRYPTO_ENABLE_DEA_KW 1
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 2dd5976a55ac..442ce0489e1a 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -44,7 +44,6 @@ early_param(#param, ignore_decompressor_param_##param)
decompressor_handled_param(mem);
decompressor_handled_param(vmalloc);
decompressor_handled_param(dfltcc);
-decompressor_handled_param(noexec);
decompressor_handled_param(facilities);
decompressor_handled_param(nokaslr);
#if IS_ENABLED(CONFIG_KVM)
@@ -233,10 +232,8 @@ static __init void detect_machine_facilities(void)
S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
__ctl_set_bit(0, 17);
}
- if (test_facility(130) && !noexec_disabled) {
+ if (test_facility(130))
S390_lowcore.machine_flags |= MACHINE_FLAG_NX;
- __ctl_set_bit(0, 20);
- }
if (test_facility(133))
S390_lowcore.machine_flags |= MACHINE_FLAG_GS;
if (test_facility(139) && (tod_clock_base.tod >> 63)) {
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index 12a2bd4fc88c..ce65fc01671f 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -216,8 +216,8 @@ void arch_crash_save_vmcoreinfo(void)
VMCOREINFO_SYMBOL(lowcore_ptr);
VMCOREINFO_SYMBOL(high_memory);
VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS);
- vmcoreinfo_append_str("SAMODE31=%lx\n", __samode31);
- vmcoreinfo_append_str("EAMODE31=%lx\n", __eamode31);
+ vmcoreinfo_append_str("SAMODE31=%lx\n", (unsigned long)__samode31);
+ vmcoreinfo_append_str("EAMODE31=%lx\n", (unsigned long)__eamode31);
vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
abs_lc = get_abs_lowcore();
abs_lc->vmcore_info = paddr_vmcoreinfo_note();
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index c744104e4a9c..de6ad0fb2328 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -97,10 +97,10 @@ EXPORT_SYMBOL(console_irq);
* relocated above 2 GB, because it has to use 31 bit addresses.
* Such code and data is part of the .amode31 section.
*/
-unsigned long __amode31_ref __samode31 = (unsigned long)&_samode31;
-unsigned long __amode31_ref __eamode31 = (unsigned long)&_eamode31;
-unsigned long __amode31_ref __stext_amode31 = (unsigned long)&_stext_amode31;
-unsigned long __amode31_ref __etext_amode31 = (unsigned long)&_etext_amode31;
+char __amode31_ref *__samode31 = _samode31;
+char __amode31_ref *__eamode31 = _eamode31;
+char __amode31_ref *__stext_amode31 = _stext_amode31;
+char __amode31_ref *__etext_amode31 = _etext_amode31;
struct exception_table_entry __amode31_ref *__start_amode31_ex_table = _start_amode31_ex_table;
struct exception_table_entry __amode31_ref *__stop_amode31_ex_table = _stop_amode31_ex_table;
@@ -145,7 +145,6 @@ static u32 __amode31_ref *__ctl_duald = __ctl_duald_amode31;
static u32 __amode31_ref *__ctl_linkage_stack = __ctl_linkage_stack_amode31;
static u32 __amode31_ref *__ctl_duct = __ctl_duct_amode31;
-int __bootdata(noexec_disabled);
unsigned long __bootdata_preserved(max_mappable);
unsigned long __bootdata(ident_map_size);
struct physmem_info __bootdata(physmem_info);
@@ -771,15 +770,15 @@ static void __init setup_memory(void)
static void __init relocate_amode31_section(void)
{
unsigned long amode31_size = __eamode31 - __samode31;
- long amode31_offset = physmem_info.reserved[RR_AMODE31].start - __samode31;
- long *ptr;
+ long amode31_offset, *ptr;
+ amode31_offset = physmem_info.reserved[RR_AMODE31].start - (unsigned long)__samode31;
pr_info("Relocating AMODE31 section of size 0x%08lx\n", amode31_size);
/* Move original AMODE31 section to the new one */
- memmove((void *)physmem_info.reserved[RR_AMODE31].start, (void *)__samode31, amode31_size);
+ memmove((void *)physmem_info.reserved[RR_AMODE31].start, __samode31, amode31_size);
/* Zero out the old AMODE31 section to catch invalid accesses within it */
- memset((void *)__samode31, 0, amode31_size);
+ memset(__samode31, 0, amode31_size);
/* Update all AMODE31 region references */
for (ptr = _start_amode31_refs; ptr != _end_amode31_refs; ptr++)
diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c
index b771f1b4cdd1..fc07bc39e698 100644
--- a/arch/s390/kernel/uv.c
+++ b/arch/s390/kernel/uv.c
@@ -258,7 +258,7 @@ static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_str
* shared page from a different protected VM will automatically also
* transfer its ownership.
*/
- if (test_bit_inv(BIT_UV_FEAT_MISC, &uv_info.uv_feature_indications))
+ if (uv_has_feature(BIT_UV_FEAT_MISC))
return false;
if (uvcb->cmd == UVC_CMD_UNPIN_PAGE_SHARED)
return false;
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 341abafb96e4..b16352083ff9 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -228,6 +228,21 @@ static int handle_itdb(struct kvm_vcpu *vcpu)
#define per_event(vcpu) (vcpu->arch.sie_block->iprcc & PGM_PER)
+static bool should_handle_per_event(const struct kvm_vcpu *vcpu)
+{
+ if (!guestdbg_enabled(vcpu) || !per_event(vcpu))
+ return false;
+ if (guestdbg_sstep_enabled(vcpu) &&
+ vcpu->arch.sie_block->iprcc != PGM_PER) {
+ /*
+ * __vcpu_run() will exit after delivering the concurrently
+ * indicated condition.
+ */
+ return false;
+ }
+ return true;
+}
+
static int handle_prog(struct kvm_vcpu *vcpu)
{
psw_t psw;
@@ -242,7 +257,7 @@ static int handle_prog(struct kvm_vcpu *vcpu)
if (kvm_s390_pv_cpu_is_protected(vcpu))
return -EOPNOTSUPP;
- if (guestdbg_enabled(vcpu) && per_event(vcpu)) {
+ if (should_handle_per_event(vcpu)) {
rc = kvm_s390_handle_per_event(vcpu);
if (rc)
return rc;
@@ -571,6 +586,19 @@ static int handle_pv_notification(struct kvm_vcpu *vcpu)
return handle_instruction(vcpu);
}
+static bool should_handle_per_ifetch(const struct kvm_vcpu *vcpu, int rc)
+{
+ /* Process PER, also if the instruction is processed in user space. */
+ if (!(vcpu->arch.sie_block->icptstatus & 0x02))
+ return false;
+ if (rc != 0 && rc != -EOPNOTSUPP)
+ return false;
+ if (guestdbg_sstep_enabled(vcpu) && vcpu->arch.local_int.pending_irqs)
+ /* __vcpu_run() will exit after delivering the interrupt. */
+ return false;
+ return true;
+}
+
int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
{
int rc, per_rc = 0;
@@ -605,8 +633,8 @@ int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
rc = handle_partial_execution(vcpu);
break;
case ICPT_KSS:
- rc = kvm_s390_skey_check_enable(vcpu);
- break;
+ /* Instruction will be redriven, skip the PER check. */
+ return kvm_s390_skey_check_enable(vcpu);
case ICPT_MCHKREQ:
case ICPT_INT_ENABLE:
/*
@@ -633,9 +661,7 @@ int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
return -EOPNOTSUPP;
}
- /* process PER, also if the instruction is processed in user space */
- if (vcpu->arch.sie_block->icptstatus & 0x02 &&
- (!rc || rc == -EOPNOTSUPP))
+ if (should_handle_per_ifetch(vcpu, rc))
per_rc = kvm_s390_handle_per_ifetch_icpt(vcpu);
return per_rc ? per_rc : rc;
}
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 9bd0a873f3b1..c1b47d608a2b 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -1392,6 +1392,7 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
int rc = 0;
+ bool delivered = false;
unsigned long irq_type;
unsigned long irqs;
@@ -1465,6 +1466,19 @@ int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
WARN_ONCE(1, "Unknown pending irq type %ld", irq_type);
clear_bit(irq_type, &li->pending_irqs);
}
+ delivered |= !rc;
+ }
+
+ /*
+ * We delivered at least one interrupt and modified the PC. Force a
+ * singlestep event now.
+ */
+ if (delivered && guestdbg_sstep_enabled(vcpu)) {
+ struct kvm_debug_exit_arch *debug_exit = &vcpu->run->debug.arch;
+
+ debug_exit->addr = vcpu->arch.sie_block->gpsw.addr;
+ debug_exit->type = KVM_SINGLESTEP;
+ vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING;
}
set_intercept_indicators(vcpu);
@@ -3398,7 +3412,6 @@ static void gib_alert_irq_handler(struct airq_struct *airq,
static struct airq_struct gib_alert_irq = {
.handler = gib_alert_irq_handler,
- .lsi_ptr = &gib_alert_irq.lsi_mask,
};
void kvm_s390_gib_destroy(void)
@@ -3438,6 +3451,8 @@ int __init kvm_s390_gib_init(u8 nisc)
rc = -EIO;
goto out_free_gib;
}
+ /* adapter interrupts used for AP (applicable here) don't use the LSI */
+ *gib_alert_irq.lsi_ptr = 0xff;
gib->nisc = nisc;
gib_origin = virt_to_phys(gib);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index d1e768bcfe1d..b3f17e014cab 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1531,6 +1531,39 @@ static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
return 0;
}
+#define KVM_S390_VM_CPU_UV_FEAT_GUEST_MASK \
+( \
+ ((struct kvm_s390_vm_cpu_uv_feat){ \
+ .ap = 1, \
+ .ap_intr = 1, \
+ }) \
+ .feat \
+)
+
+static int kvm_s390_set_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ struct kvm_s390_vm_cpu_uv_feat __user *ptr = (void __user *)attr->addr;
+ unsigned long data, filter;
+
+ filter = uv_info.uv_feature_indications & KVM_S390_VM_CPU_UV_FEAT_GUEST_MASK;
+ if (get_user(data, &ptr->feat))
+ return -EFAULT;
+ if (!bitmap_subset(&data, &filter, KVM_S390_VM_CPU_UV_FEAT_NR_BITS))
+ return -EINVAL;
+
+ mutex_lock(&kvm->lock);
+ if (kvm->created_vcpus) {
+ mutex_unlock(&kvm->lock);
+ return -EBUSY;
+ }
+ kvm->arch.model.uv_feat_guest.feat = data;
+ mutex_unlock(&kvm->lock);
+
+ VM_EVENT(kvm, 3, "SET: guest UV-feat: 0x%16.16lx", data);
+
+ return 0;
+}
+
static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
{
int ret = -ENXIO;
@@ -1545,6 +1578,9 @@ static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
ret = kvm_s390_set_processor_subfunc(kvm, attr);
break;
+ case KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST:
+ ret = kvm_s390_set_uv_feat(kvm, attr);
+ break;
}
return ret;
}
@@ -1777,6 +1813,33 @@ static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
return 0;
}
+static int kvm_s390_get_processor_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ struct kvm_s390_vm_cpu_uv_feat __user *dst = (void __user *)attr->addr;
+ unsigned long feat = kvm->arch.model.uv_feat_guest.feat;
+
+ if (put_user(feat, &dst->feat))
+ return -EFAULT;
+ VM_EVENT(kvm, 3, "GET: guest UV-feat: 0x%16.16lx", feat);
+
+ return 0;
+}
+
+static int kvm_s390_get_machine_uv_feat(struct kvm *kvm, struct kvm_device_attr *attr)
+{
+ struct kvm_s390_vm_cpu_uv_feat __user *dst = (void __user *)attr->addr;
+ unsigned long feat;
+
+ BUILD_BUG_ON(sizeof(*dst) != sizeof(uv_info.uv_feature_indications));
+
+ feat = uv_info.uv_feature_indications & KVM_S390_VM_CPU_UV_FEAT_GUEST_MASK;
+ if (put_user(feat, &dst->feat))
+ return -EFAULT;
+ VM_EVENT(kvm, 3, "GET: guest UV-feat: 0x%16.16lx", feat);
+
+ return 0;
+}
+
static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
{
int ret = -ENXIO;
@@ -1800,6 +1863,12 @@ static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
ret = kvm_s390_get_machine_subfunc(kvm, attr);
break;
+ case KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST:
+ ret = kvm_s390_get_processor_uv_feat(kvm, attr);
+ break;
+ case KVM_S390_VM_CPU_MACHINE_UV_FEAT_GUEST:
+ ret = kvm_s390_get_machine_uv_feat(kvm, attr);
+ break;
}
return ret;
}
@@ -1952,6 +2021,8 @@ static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
case KVM_S390_VM_CPU_MACHINE_FEAT:
case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
+ case KVM_S390_VM_CPU_MACHINE_UV_FEAT_GUEST:
+ case KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST:
ret = 0;
break;
default:
@@ -2406,7 +2477,7 @@ static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
struct kvm_vcpu *vcpu;
/* Disable the GISA if the ultravisor does not support AIV. */
- if (!test_bit_inv(BIT_UV_FEAT_AIV, &uv_info.uv_feature_indications))
+ if (!uv_has_feature(BIT_UV_FEAT_AIV))
kvm_s390_gisa_disable(kvm);
kvm_for_each_vcpu(i, vcpu, kvm) {
@@ -3296,6 +3367,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
kvm->arch.model.ibc = sclp.ibc & 0x0fff;
+ kvm->arch.model.uv_feat_guest.feat = 0;
+
kvm_s390_crypto_init(kvm);
if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) {
@@ -4611,7 +4684,7 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu)
if (!kvm_is_ucontrol(vcpu->kvm)) {
rc = kvm_s390_deliver_pending_interrupts(vcpu);
- if (rc)
+ if (rc || guestdbg_exit_pending(vcpu))
return rc;
}
@@ -4738,7 +4811,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
do {
rc = vcpu_pre_run(vcpu);
- if (rc)
+ if (rc || guestdbg_exit_pending(vcpu))
break;
kvm_vcpu_srcu_read_unlock(vcpu);
@@ -5383,6 +5456,7 @@ long kvm_arch_vcpu_async_ioctl(struct file *filp,
{
struct kvm_vcpu *vcpu = filp->private_data;
void __user *argp = (void __user *)arg;
+ int rc;
switch (ioctl) {
case KVM_S390_IRQ: {
@@ -5390,7 +5464,8 @@ long kvm_arch_vcpu_async_ioctl(struct file *filp,
if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
return -EFAULT;
- return kvm_s390_inject_vcpu(vcpu, &s390irq);
+ rc = kvm_s390_inject_vcpu(vcpu, &s390irq);
+ break;
}
case KVM_S390_INTERRUPT: {
struct kvm_s390_interrupt s390int;
@@ -5400,10 +5475,25 @@ long kvm_arch_vcpu_async_ioctl(struct file *filp,
return -EFAULT;
if (s390int_to_s390irq(&s390int, &s390irq))
return -EINVAL;
- return kvm_s390_inject_vcpu(vcpu, &s390irq);
+ rc = kvm_s390_inject_vcpu(vcpu, &s390irq);
+ break;
}
+ default:
+ rc = -ENOIOCTLCMD;
+ break;
}
- return -ENOIOCTLCMD;
+
+ /*
+ * To simplify single stepping of userspace-emulated instructions,
+ * KVM_EXIT_S390_SIEIC exit sets KVM_GUESTDBG_EXIT_PENDING (see
+ * should_handle_per_ifetch()). However, if userspace emulation injects
+ * an interrupt, it needs to be cleared, so that KVM_EXIT_DEBUG happens
+ * after (and not before) the interrupt delivery.
+ */
+ if (!rc)
+ vcpu->guest_debug &= ~KVM_GUESTDBG_EXIT_PENDING;
+
+ return rc;
}
static int kvm_s390_handle_pv_vcpu_dump(struct kvm_vcpu *vcpu,
diff --git a/arch/s390/kvm/pv.c b/arch/s390/kvm/pv.c
index 8d3f39a8a11e..75e81ba26d04 100644
--- a/arch/s390/kvm/pv.c
+++ b/arch/s390/kvm/pv.c
@@ -285,7 +285,8 @@ static int kvm_s390_pv_deinit_vm_fast(struct kvm *kvm, u16 *rc, u16 *rrc)
WRITE_ONCE(kvm->arch.gmap->guest_handle, 0);
KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY VM FAST: rc %x rrc %x",
uvcb.header.rc, uvcb.header.rrc);
- WARN_ONCE(cc, "protvirt destroy vm fast failed handle %llx rc %x rrc %x",
+ WARN_ONCE(cc && uvcb.header.rc != 0x104,
+ "protvirt destroy vm fast failed handle %llx rc %x rrc %x",
kvm_s390_pv_get_handle(kvm), uvcb.header.rc, uvcb.header.rrc);
/* Intended memory leak on "impossible" error */
if (!cc)
@@ -575,12 +576,14 @@ int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
uvcb.conf_base_stor_origin =
virt_to_phys((void *)kvm->arch.pv.stor_base);
uvcb.conf_virt_stor_origin = (u64)kvm->arch.pv.stor_var;
+ uvcb.flags.ap_allow_instr = kvm->arch.model.uv_feat_guest.ap;
+ uvcb.flags.ap_instr_intr = kvm->arch.model.uv_feat_guest.ap_intr;
cc = uv_call_sched(0, (u64)&uvcb);
*rc = uvcb.header.rc;
*rrc = uvcb.header.rrc;
- KVM_UV_EVENT(kvm, 3, "PROTVIRT CREATE VM: handle %llx len %llx rc %x rrc %x",
- uvcb.guest_handle, uvcb.guest_stor_len, *rc, *rrc);
+ KVM_UV_EVENT(kvm, 3, "PROTVIRT CREATE VM: handle %llx len %llx rc %x rrc %x flags %04x",
+ uvcb.guest_handle, uvcb.guest_stor_len, *rc, *rrc, uvcb.flags.raw);
/* Outputs */
kvm->arch.pv.handle = uvcb.guest_handle;
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index afa5db750d92..b51666967aa1 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -290,8 +290,8 @@ static int pt_dump_init(void)
max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2;
max_addr = 1UL << (max_addr * 11 + 31);
address_markers[IDENTITY_AFTER_END_NR].start_address = ident_map_size;
- address_markers[AMODE31_START_NR].start_address = __samode31;
- address_markers[AMODE31_END_NR].start_address = __eamode31;
+ address_markers[AMODE31_START_NR].start_address = (unsigned long)__samode31;
+ address_markers[AMODE31_END_NR].start_address = (unsigned long)__eamode31;
address_markers[MODULES_NR].start_address = MODULES_VADDR;
address_markers[MODULES_END_NR].start_address = MODULES_END;
address_markers[ABS_LOWCORE_NR].start_address = __abs_lowcore;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 099c4824dd8a..b678295931c3 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -598,7 +598,7 @@ void do_secure_storage_access(struct pt_regs *regs)
* reliable without the misc UV feature so we need to check
* for that as well.
*/
- if (test_bit_inv(BIT_UV_FEAT_MISC, &uv_info.uv_feature_indications) &&
+ if (uv_has_feature(BIT_UV_FEAT_MISC) &&
!test_bit_inv(61, &regs->int_parm_long)) {
/*
* When this happens, userspace did something that it
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 8d94e29adcdb..8b94d2212d33 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -98,7 +98,7 @@ void __init paging_init(void)
sparse_init();
zone_dma_bits = 31;
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
- max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
+ max_zone_pfns[ZONE_DMA] = virt_to_pfn(MAX_DMA_ADDRESS);
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
free_area_init(max_zone_pfns);
}
@@ -107,7 +107,7 @@ void mark_rodata_ro(void)
{
unsigned long size = __end_ro_after_init - __start_ro_after_init;
- set_memory_ro((unsigned long)__start_ro_after_init, size >> PAGE_SHIFT);
+ __set_memory_ro(__start_ro_after_init, __end_ro_after_init);
pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
debug_checkwx();
}
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index e5ec76271b16..b87e96c64b61 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -373,7 +373,7 @@ static int change_page_attr_alias(unsigned long addr, unsigned long end,
return rc;
}
-int __set_memory(unsigned long addr, int numpages, unsigned long flags)
+int __set_memory(unsigned long addr, unsigned long numpages, unsigned long flags)
{
unsigned long end;
int rc;
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index e44243b9c0a4..6957d2ed97bf 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -5,7 +5,6 @@
#include <linux/memory_hotplug.h>
#include <linux/memblock.h>
-#include <linux/kasan.h>
#include <linux/pfn.h>
#include <linux/mm.h>
#include <linux/init.h>
@@ -291,14 +290,9 @@ out:
static void try_free_pmd_table(pud_t *pud, unsigned long start)
{
- const unsigned long end = start + PUD_SIZE;
pmd_t *pmd;
int i;
- /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
- if (end > VMALLOC_START)
- return;
-
pmd = pmd_offset(pud, start);
for (i = 0; i < PTRS_PER_PMD; i++, pmd++)
if (!pmd_none(*pmd))
@@ -363,14 +357,9 @@ out:
static void try_free_pud_table(p4d_t *p4d, unsigned long start)
{
- const unsigned long end = start + P4D_SIZE;
pud_t *pud;
int i;
- /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
- if (end > VMALLOC_START)
- return;
-
pud = pud_offset(p4d, start);
for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
if (!pud_none(*pud))
@@ -413,14 +402,9 @@ out:
static void try_free_p4d_table(pgd_t *pgd, unsigned long start)
{
- const unsigned long end = start + PGDIR_SIZE;
p4d_t *p4d;
int i;
- /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
- if (end > VMALLOC_START)
- return;
-
p4d = p4d_offset(pgd, start);
for (i = 0; i < PTRS_PER_P4D; i++, p4d++) {
if (!p4d_none(*p4d))
@@ -440,6 +424,9 @@ static int modify_pagetable(unsigned long start, unsigned long end, bool add,
if (WARN_ON_ONCE(!PAGE_ALIGNED(start | end)))
return -EINVAL;
+ /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
+ if (WARN_ON_ONCE(end > VMALLOC_START))
+ return -EINVAL;
for (addr = start; addr < end; addr = next) {
next = pgd_addr_end(addr, end);
pgd = pgd_offset_k(addr);
@@ -650,122 +637,29 @@ void vmem_unmap_4k_page(unsigned long addr)
mutex_unlock(&vmem_mutex);
}
-static int __init memblock_region_cmp(const void *a, const void *b)
-{
- const struct memblock_region *r1 = a;
- const struct memblock_region *r2 = b;
-
- if (r1->base < r2->base)
- return -1;
- if (r1->base > r2->base)
- return 1;
- return 0;
-}
-
-static void __init memblock_region_swap(void *a, void *b, int size)
-{
- swap(*(struct memblock_region *)a, *(struct memblock_region *)b);
-}
-
-#ifdef CONFIG_KASAN
-#define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
-
-static inline int set_memory_kasan(unsigned long start, unsigned long end)
-{
- start = PAGE_ALIGN_DOWN(__sha(start));
- end = PAGE_ALIGN(__sha(end));
- return set_memory_rwnx(start, (end - start) >> PAGE_SHIFT);
-}
-#endif
-
-/*
- * map whole physical memory to virtual memory (identity mapping)
- * we reserve enough space in the vmalloc area for vmemmap to hotplug
- * additional memory segments.
- */
void __init vmem_map_init(void)
{
- struct memblock_region memory_rwx_regions[] = {
- {
- .base = 0,
- .size = sizeof(struct lowcore),
- .flags = MEMBLOCK_NONE,
-#ifdef CONFIG_NUMA
- .nid = NUMA_NO_NODE,
-#endif
- },
- {
- .base = __pa(_stext),
- .size = _etext - _stext,
- .flags = MEMBLOCK_NONE,
-#ifdef CONFIG_NUMA
- .nid = NUMA_NO_NODE,
-#endif
- },
- {
- .base = __pa(_sinittext),
- .size = _einittext - _sinittext,
- .flags = MEMBLOCK_NONE,
-#ifdef CONFIG_NUMA
- .nid = NUMA_NO_NODE,
-#endif
- },
- {
- .base = __stext_amode31,
- .size = __etext_amode31 - __stext_amode31,
- .flags = MEMBLOCK_NONE,
-#ifdef CONFIG_NUMA
- .nid = NUMA_NO_NODE,
-#endif
- },
- };
- struct memblock_type memory_rwx = {
- .regions = memory_rwx_regions,
- .cnt = ARRAY_SIZE(memory_rwx_regions),
- .max = ARRAY_SIZE(memory_rwx_regions),
- };
- phys_addr_t base, end;
- u64 i;
-
+ __set_memory_rox(_stext, _etext);
+ __set_memory_ro(_etext, __end_rodata);
+ __set_memory_rox(_sinittext, _einittext);
+ __set_memory_rox(__stext_amode31, __etext_amode31);
/*
- * Set RW+NX attribute on all memory, except regions enumerated with
- * memory_rwx exclude type. These regions need different attributes,
- * which are enforced afterwards.
- *
- * __for_each_mem_range() iterate and exclude types should be sorted.
- * The relative location of _stext and _sinittext is hardcoded in the
- * linker script. However a location of __stext_amode31 and the kernel
- * image itself are chosen dynamically. Thus, sort the exclude type.
+ * If the BEAR-enhancement facility is not installed the first
+ * prefix page is used to return to the previous context with
+ * an LPSWE instruction and therefore must be executable.
*/
- sort(&memory_rwx_regions,
- ARRAY_SIZE(memory_rwx_regions), sizeof(memory_rwx_regions[0]),
- memblock_region_cmp, memblock_region_swap);
- __for_each_mem_range(i, &memblock.memory, &memory_rwx,
- NUMA_NO_NODE, MEMBLOCK_NONE, &base, &end, NULL) {
- set_memory_rwnx((unsigned long)__va(base),
- (end - base) >> PAGE_SHIFT);
+ if (!static_key_enabled(&cpu_has_bear))
+ set_memory_x(0, 1);
+ if (debug_pagealloc_enabled()) {
+ /*
+ * Use RELOC_HIDE() as long as __va(0) translates to NULL,
+ * since performing pointer arithmetic on a NULL pointer
+ * has undefined behavior and generates compiler warnings.
+ */
+ __set_memory_4k(__va(0), RELOC_HIDE(__va(0), ident_map_size));
}
-
-#ifdef CONFIG_KASAN
- for_each_mem_range(i, &base, &end)
- set_memory_kasan(base, end);
-#endif
- set_memory_rox((unsigned long)_stext,
- (unsigned long)(_etext - _stext) >> PAGE_SHIFT);
- set_memory_ro((unsigned long)_etext,
- (unsigned long)(__end_rodata - _etext) >> PAGE_SHIFT);
- set_memory_rox((unsigned long)_sinittext,
- (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT);
- set_memory_rox(__stext_amode31,
- (__etext_amode31 - __stext_amode31) >> PAGE_SHIFT);
-
- /* lowcore must be executable for LPSWE */
- if (static_key_enabled(&cpu_has_bear))
- set_memory_nx(0, 1);
- set_memory_nx(PAGE_SIZE, 1);
- if (debug_pagealloc_enabled())
- set_memory_4k(0, ident_map_size >> PAGE_SHIFT);
-
+ if (MACHINE_HAS_NX)
+ ctl_set_bit(0, 20);
pr_info("Write protected kernel read-only data: %luk\n",
(unsigned long)(__end_rodata - _stext) >> 10);
}
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 5e9371fbf3d5..de2fb12120d2 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -2088,6 +2088,7 @@ struct bpf_tramp_jit {
*/
int r14_off; /* Offset of saved %r14 */
int run_ctx_off; /* Offset of struct bpf_tramp_run_ctx */
+ int tccnt_off; /* Offset of saved tailcall counter */
int do_fexit; /* do_fexit: label */
};
@@ -2258,12 +2259,16 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
tjit->r14_off = alloc_stack(tjit, sizeof(u64));
tjit->run_ctx_off = alloc_stack(tjit,
sizeof(struct bpf_tramp_run_ctx));
+ tjit->tccnt_off = alloc_stack(tjit, sizeof(u64));
/* The caller has already reserved STACK_FRAME_OVERHEAD bytes. */
tjit->stack_size -= STACK_FRAME_OVERHEAD;
tjit->orig_stack_args_off = tjit->stack_size + STACK_FRAME_OVERHEAD;
/* aghi %r15,-stack_size */
EMIT4_IMM(0xa70b0000, REG_15, -tjit->stack_size);
+ /* mvc tccnt_off(4,%r15),stack_size+STK_OFF_TCCNT(%r15) */
+ _EMIT6(0xd203f000 | tjit->tccnt_off,
+ 0xf000 | (tjit->stack_size + STK_OFF_TCCNT));
/* stmg %r2,%rN,fwd_reg_args_off(%r15) */
if (nr_reg_args)
EMIT6_DISP_LH(0xeb000000, 0x0024, REG_2,
@@ -2400,6 +2405,8 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
(nr_stack_args * sizeof(u64) - 1) << 16 |
tjit->stack_args_off,
0xf000 | tjit->orig_stack_args_off);
+ /* mvc STK_OFF_TCCNT(4,%r15),tccnt_off(%r15) */
+ _EMIT6(0xd203f000 | STK_OFF_TCCNT, 0xf000 | tjit->tccnt_off);
/* lgr %r1,%r8 */
EMIT4(0xb9040000, REG_1, REG_8);
/* %r1() */
@@ -2456,6 +2463,9 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
if (flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET))
EMIT6_DISP_LH(0xe3000000, 0x0004, REG_2, REG_0, REG_15,
tjit->retval_off);
+ /* mvc stack_size+STK_OFF_TCCNT(4,%r15),tccnt_off(%r15) */
+ _EMIT6(0xd203f000 | (tjit->stack_size + STK_OFF_TCCNT),
+ 0xf000 | tjit->tccnt_off);
/* aghi %r15,stack_size */
EMIT4_IMM(0xa70b0000, REG_15, tjit->stack_size);
/* Emit an expoline for the following indirect jump. */
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index 595ca0be286b..43b0ae4c2c21 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -2,6 +2,5 @@
generated-y += syscall_table_32.h
generated-y += syscall_table_64.h
generic-y += agp.h
-generic-y += export.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
diff --git a/arch/sparc/include/asm/ide.h b/arch/sparc/include/asm/ide.h
deleted file mode 100644
index 904cc6cbc155..000000000000
--- a/arch/sparc/include/asm/ide.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* ide.h: SPARC PCI specific IDE glue.
- *
- * Copyright (C) 1997 David S. Miller (davem@davemloft.net)
- * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
- * Adaptation from sparc64 version to sparc by Pete Zaitcev.
- */
-
-#ifndef _SPARC_IDE_H
-#define _SPARC_IDE_H
-
-#ifdef __KERNEL__
-
-#include <asm/io.h>
-#ifdef CONFIG_SPARC64
-#include <asm/spitfire.h>
-#include <asm/cacheflush.h>
-#include <asm/page.h>
-#else
-#include <linux/pgtable.h>
-#include <asm/psr.h>
-#endif
-
-#define __ide_insl(data_reg, buffer, wcount) \
- __ide_insw(data_reg, buffer, (wcount)<<1)
-#define __ide_outsl(data_reg, buffer, wcount) \
- __ide_outsw(data_reg, buffer, (wcount)<<1)
-
-/* On sparc, I/O ports and MMIO registers are accessed identically. */
-#define __ide_mm_insw __ide_insw
-#define __ide_mm_insl __ide_insl
-#define __ide_mm_outsw __ide_outsw
-#define __ide_mm_outsl __ide_outsl
-
-static inline void __ide_insw(void __iomem *port, void *dst, u32 count)
-{
-#if defined(CONFIG_SPARC64) && defined(DCACHE_ALIASING_POSSIBLE)
- unsigned long end = (unsigned long)dst + (count << 1);
-#endif
- u16 *ps = dst;
- u32 *pi;
-
- if(((unsigned long)ps) & 0x2) {
- *ps++ = __raw_readw(port);
- count--;
- }
- pi = (u32 *)ps;
- while(count >= 2) {
- u32 w;
-
- w = __raw_readw(port) << 16;
- w |= __raw_readw(port);
- *pi++ = w;
- count -= 2;
- }
- ps = (u16 *)pi;
- if(count)
- *ps++ = __raw_readw(port);
-
-#if defined(CONFIG_SPARC64) && defined(DCACHE_ALIASING_POSSIBLE)
- __flush_dcache_range((unsigned long)dst, end);
-#endif
-}
-
-static inline void __ide_outsw(void __iomem *port, const void *src, u32 count)
-{
-#if defined(CONFIG_SPARC64) && defined(DCACHE_ALIASING_POSSIBLE)
- unsigned long end = (unsigned long)src + (count << 1);
-#endif
- const u16 *ps = src;
- const u32 *pi;
-
- if(((unsigned long)src) & 0x2) {
- __raw_writew(*ps++, port);
- count--;
- }
- pi = (const u32 *)ps;
- while(count >= 2) {
- u32 w;
-
- w = *pi++;
- __raw_writew((w >> 16), port);
- __raw_writew(w, port);
- count -= 2;
- }
- ps = (const u16 *)pi;
- if(count)
- __raw_writew(*ps, port);
-
-#if defined(CONFIG_SPARC64) && defined(DCACHE_ALIASING_POSSIBLE)
- __flush_dcache_range((unsigned long)src, end);
-#endif
-}
-
-#endif /* __KERNEL__ */
-
-#endif /* _SPARC_IDE_H */
diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h
index 8a0c3c11c9ce..587fb7841096 100644
--- a/arch/sparc/include/asm/vio.h
+++ b/arch/sparc/include/asm/vio.h
@@ -284,7 +284,7 @@ struct vio_dring_state {
struct ldc_trans_cookie cookies[VIO_MAX_RING_COOKIES];
};
-#define VIO_TAG_SIZE ((int)sizeof(struct vio_msg_tag))
+#define VIO_TAG_SIZE (sizeof(struct vio_msg_tag))
#define VIO_VCC_MTU_SIZE (LDC_PACKET_SIZE - VIO_TAG_SIZE)
struct vio_vcc {
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
index a269ad2fe6df..a3fdee4cd6fa 100644
--- a/arch/sparc/kernel/entry.S
+++ b/arch/sparc/kernel/entry.S
@@ -8,6 +8,7 @@
* Copyright (C) 1997 Anton Blanchard (anton@progsoc.uts.edu.au)
*/
+#include <linux/export.h>
#include <linux/linkage.h>
#include <linux/errno.h>
#include <linux/pgtable.h>
@@ -30,7 +31,6 @@
#include <asm/unistd.h>
#include <asm/asmmacro.h>
-#include <asm/export.h>
#define curptr g6
diff --git a/arch/sparc/kernel/head_32.S b/arch/sparc/kernel/head_32.S
index 6044b82b9767..964c61b5cd03 100644
--- a/arch/sparc/kernel/head_32.S
+++ b/arch/sparc/kernel/head_32.S
@@ -11,6 +11,7 @@
* CompactPCI platform by Eric Brower, 1999.
*/
+#include <linux/export.h>
#include <linux/version.h>
#include <linux/init.h>
@@ -25,7 +26,6 @@
#include <asm/thread_info.h> /* TI_UWINMASK */
#include <asm/errno.h>
#include <asm/pgtable.h> /* PGDIR_SHIFT */
-#include <asm/export.h>
.data
/* The following are used with the prom_vector node-ops to figure out
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index 72a5bdc833ea..cf0549134234 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -9,6 +9,7 @@
#include <linux/version.h>
#include <linux/errno.h>
+#include <linux/export.h>
#include <linux/threads.h>
#include <linux/init.h>
#include <linux/linkage.h>
@@ -33,7 +34,6 @@
#include <asm/estate.h>
#include <asm/sfafsr.h>
#include <asm/unistd.h>
-#include <asm/export.h>
/* This section from from _start to sparc64_boot_end should fit into
* 0x0000000000404000 to 0x0000000000408000.
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index 1ea3f37fa985..529adfecd58c 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -295,7 +295,7 @@ void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu)
#ifdef CONFIG_MAGIC_SYSRQ
-static void sysrq_handle_globreg(int key)
+static void sysrq_handle_globreg(u8 key)
{
trigger_all_cpu_backtrace();
}
@@ -370,7 +370,7 @@ static void pmu_snapshot_all_cpus(void)
spin_unlock_irqrestore(&global_cpu_snapshot_lock, flags);
}
-static void sysrq_handle_globpmu(int key)
+static void sysrq_handle_globpmu(u8 key)
{
pmu_snapshot_all_cpus();
}
diff --git a/arch/sparc/lib/U1memcpy.S b/arch/sparc/lib/U1memcpy.S
index a6f4ee391897..635398ec7540 100644
--- a/arch/sparc/lib/U1memcpy.S
+++ b/arch/sparc/lib/U1memcpy.S
@@ -6,10 +6,10 @@
*/
#ifdef __KERNEL__
+#include <linux/export.h>
#include <linux/linkage.h>
#include <asm/visasm.h>
#include <asm/asi.h>
-#include <asm/export.h>
#define GLOBAL_SPARE g7
#else
#define GLOBAL_SPARE g5
diff --git a/arch/sparc/lib/VISsave.S b/arch/sparc/lib/VISsave.S
index 9c8eb2017d5b..31a0c336c185 100644
--- a/arch/sparc/lib/VISsave.S
+++ b/arch/sparc/lib/VISsave.S
@@ -7,6 +7,7 @@
* Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
*/
+#include <linux/export.h>
#include <linux/linkage.h>
#include <asm/asi.h>
@@ -14,7 +15,6 @@
#include <asm/ptrace.h>
#include <asm/visasm.h>
#include <asm/thread_info.h>
-#include <asm/export.h>
/* On entry: %o5=current FPRS value, %g7 is callers address */
/* May clobber %o5, %g1, %g2, %g3, %g7, %icc, %xcc */
diff --git a/arch/sparc/lib/ashldi3.S b/arch/sparc/lib/ashldi3.S
index 2d72de88af90..2a9e7c4fb260 100644
--- a/arch/sparc/lib/ashldi3.S
+++ b/arch/sparc/lib/ashldi3.S
@@ -6,8 +6,8 @@
* Copyright (C) 1999 David S. Miller (davem@redhat.com)
*/
+#include <linux/export.h>
#include <linux/linkage.h>
-#include <asm/export.h>
.text
ENTRY(__ashldi3)
diff --git a/arch/sparc/lib/ashrdi3.S b/arch/sparc/lib/ashrdi3.S
index 05dfda9f5005..8fd0b311722f 100644
--- a/arch/sparc/lib/ashrdi3.S
+++ b/arch/sparc/lib/ashrdi3.S
@@ -6,8 +6,8 @@
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
*/
+#include <linux/export.h>
#include <linux/linkage.h>
-#include <asm/export.h>
.text
ENTRY(__ashrdi3)
diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
index 8245d4a97301..4f8cab2fb9cd 100644
--- a/arch/sparc/lib/atomic_64.S
+++ b/arch/sparc/lib/atomic_64.S
@@ -4,10 +4,10 @@
* Copyright (C) 1999, 2007 2012 David S. Miller (davem@davemloft.net)
*/
+#include <linux/export.h>
#include <linux/linkage.h>
#include <asm/asi.h>
#include <asm/backoff.h>
-#include <asm/export.h>
.text
diff --git a/arch/sparc/lib/bitops.S b/arch/sparc/lib/bitops.S
index 9d647f977618..9c91cbb310e7 100644
--- a/arch/sparc/lib/bitops.S
+++ b/arch/sparc/lib/bitops.S
@@ -4,10 +4,10 @@
* Copyright (C) 2000, 2007 David S. Miller (davem@davemloft.net)
*/
+#include <linux/export.h>
#include <linux/linkage.h>
#include <asm/asi.h>
#include <asm/backoff.h>
-#include <asm/export.h>
.text
diff --git a/arch/sparc/lib/blockops.S b/arch/sparc/lib/blockops.S
index 76ddd1ff6833..5b92959a4d48 100644
--- a/arch/sparc/lib/blockops.S
+++ b/arch/sparc/lib/blockops.S
@@ -5,9 +5,9 @@
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
+#include <linux/export.h>
#include <linux/linkage.h>
#include <asm/page.h>
-#include <asm/export.h>
/* Zero out 64 bytes of memory at (buf + offset).
* Assumes %g1 contains zero.
diff --git a/arch/sparc/lib/bzero.S b/arch/sparc/lib/bzero.S
index 87fec4cbe10c..2bfa44a6b25e 100644
--- a/arch/sparc/lib/bzero.S
+++ b/arch/sparc/lib/bzero.S
@@ -5,8 +5,8 @@
* Copyright (C) 2005 David S. Miller <davem@davemloft.net>
*/
+#include <linux/export.h>
#include <linux/linkage.h>
-#include <asm/export.h>
.text
diff --git a/arch/sparc/lib/checksum_32.S b/arch/sparc/lib/checksum_32.S
index 781e39b3c009..84ad709cbecb 100644
--- a/arch/sparc/lib/checksum_32.S
+++ b/arch/sparc/lib/checksum_32.S
@@ -14,8 +14,8 @@
* BSD4.4 portable checksum routine
*/
+#include <linux/export.h>
#include <asm/errno.h>
-#include <asm/export.h>
#define CSUM_BIGCHUNK(buf, offset, sum, t0, t1, t2, t3, t4, t5) \
ldd [buf + offset + 0x00], t0; \
diff --git a/arch/sparc/lib/checksum_64.S b/arch/sparc/lib/checksum_64.S
index 9700ef1730df..32b626f3fe4d 100644
--- a/arch/sparc/lib/checksum_64.S
+++ b/arch/sparc/lib/checksum_64.S
@@ -14,7 +14,7 @@
* BSD4.4 portable checksum routine
*/
-#include <asm/export.h>
+#include <linux/export.h>
.text
csum_partial_fix_alignment:
diff --git a/arch/sparc/lib/clear_page.S b/arch/sparc/lib/clear_page.S
index 302d3454a994..e63458194f5a 100644
--- a/arch/sparc/lib/clear_page.S
+++ b/arch/sparc/lib/clear_page.S
@@ -5,13 +5,13 @@
* Copyright (C) 1997 Jakub Jelinek (jakub@redhat.com)
*/
+#include <linux/export.h>
#include <linux/pgtable.h>
#include <asm/visasm.h>
#include <asm/thread_info.h>
#include <asm/page.h>
#include <asm/spitfire.h>
#include <asm/head.h>
-#include <asm/export.h>
/* What we used to do was lock a TLB entry into a specific
* TLB slot, clear the page with interrupts disabled, then
diff --git a/arch/sparc/lib/copy_in_user.S b/arch/sparc/lib/copy_in_user.S
index 66e90bf528e2..e23e6a69ff92 100644
--- a/arch/sparc/lib/copy_in_user.S
+++ b/arch/sparc/lib/copy_in_user.S
@@ -4,9 +4,9 @@
* Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
*/
+#include <linux/export.h>
#include <linux/linkage.h>
#include <asm/asi.h>
-#include <asm/export.h>
#define XCC xcc
diff --git a/arch/sparc/lib/copy_page.S b/arch/sparc/lib/copy_page.S
index 5ebcfd479f4f..7a041f3ebc58 100644
--- a/arch/sparc/lib/copy_page.S
+++ b/arch/sparc/lib/copy_page.S
@@ -5,13 +5,13 @@
* Copyright (C) 1997 Jakub Jelinek (jakub@redhat.com)
*/
+#include <linux/export.h>
#include <asm/visasm.h>
#include <asm/thread_info.h>
#include <asm/page.h>
#include <linux/pgtable.h>
#include <asm/spitfire.h>
#include <asm/head.h>
-#include <asm/export.h>
/* What we used to do was lock a TLB entry into a specific
* TLB slot, clear the page with interrupts disabled, then
diff --git a/arch/sparc/lib/copy_user.S b/arch/sparc/lib/copy_user.S
index 954572c78539..7bb2ef68881d 100644
--- a/arch/sparc/lib/copy_user.S
+++ b/arch/sparc/lib/copy_user.S
@@ -12,11 +12,11 @@
* Returns 0 if successful, otherwise count of bytes not copied yet
*/
+#include <linux/export.h>
#include <asm/ptrace.h>
#include <asm/asmmacro.h>
#include <asm/page.h>
#include <asm/thread_info.h>
-#include <asm/export.h>
/* Work around cpp -rob */
#define ALLOC #alloc
diff --git a/arch/sparc/lib/csum_copy.S b/arch/sparc/lib/csum_copy.S
index d839956407a7..f968e83bc93b 100644
--- a/arch/sparc/lib/csum_copy.S
+++ b/arch/sparc/lib/csum_copy.S
@@ -4,7 +4,7 @@
* Copyright (C) 2005 David S. Miller <davem@davemloft.net>
*/
-#include <asm/export.h>
+#include <linux/export.h>
#ifdef __KERNEL__
#define GLOBAL_SPARE %g7
diff --git a/arch/sparc/lib/divdi3.S b/arch/sparc/lib/divdi3.S
index a7389409d9fa..4ba901acd572 100644
--- a/arch/sparc/lib/divdi3.S
+++ b/arch/sparc/lib/divdi3.S
@@ -5,7 +5,7 @@ This file is part of GNU CC.
*/
-#include <asm/export.h>
+#include <linux/export.h>
.text
.align 4
.globl __divdi3
diff --git a/arch/sparc/lib/ffs.S b/arch/sparc/lib/ffs.S
index 5a11d864fa05..3a9ad8ffdfe8 100644
--- a/arch/sparc/lib/ffs.S
+++ b/arch/sparc/lib/ffs.S
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/export.h>
#include <linux/linkage.h>
-#include <asm/export.h>
.register %g2,#scratch
diff --git a/arch/sparc/lib/fls.S b/arch/sparc/lib/fls.S
index 06b8d300bcae..ccf97fb7d8cd 100644
--- a/arch/sparc/lib/fls.S
+++ b/arch/sparc/lib/fls.S
@@ -5,8 +5,8 @@
* and onward.
*/
+#include <linux/export.h>
#include <linux/linkage.h>
-#include <asm/export.h>
.text
.register %g2, #scratch
diff --git a/arch/sparc/lib/fls64.S b/arch/sparc/lib/fls64.S
index c83e22ae9586..87005b67d378 100644
--- a/arch/sparc/lib/fls64.S
+++ b/arch/sparc/lib/fls64.S
@@ -5,8 +5,8 @@
* and onward.
*/
+#include <linux/export.h>
#include <linux/linkage.h>
-#include <asm/export.h>
.text
.register %g2, #scratch
diff --git a/arch/sparc/lib/hweight.S b/arch/sparc/lib/hweight.S
index 0ddbbb031822..eebee59b0655 100644
--- a/arch/sparc/lib/hweight.S
+++ b/arch/sparc/lib/hweight.S
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/export.h>
#include <linux/linkage.h>
-#include <asm/export.h>
.text
.align 32
diff --git a/arch/sparc/lib/ipcsum.S b/arch/sparc/lib/ipcsum.S
index 531d89c9d5d9..7fa8fd4b795a 100644
--- a/arch/sparc/lib/ipcsum.S
+++ b/arch/sparc/lib/ipcsum.S
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/export.h>
#include <linux/linkage.h>
-#include <asm/export.h>
.text
ENTRY(ip_fast_csum) /* %o0 = iph, %o1 = ihl */
diff --git a/arch/sparc/lib/locks.S b/arch/sparc/lib/locks.S
index 9a1289a3fb28..47a39f4384a2 100644
--- a/arch/sparc/lib/locks.S
+++ b/arch/sparc/lib/locks.S
@@ -7,11 +7,11 @@
* Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
*/
+#include <linux/export.h>
#include <asm/ptrace.h>
#include <asm/psr.h>
#include <asm/smp.h>
#include <asm/spinlock.h>
-#include <asm/export.h>
.text
.align 4
diff --git a/arch/sparc/lib/lshrdi3.S b/arch/sparc/lib/lshrdi3.S
index 509ca6682da8..09bf581a0ba5 100644
--- a/arch/sparc/lib/lshrdi3.S
+++ b/arch/sparc/lib/lshrdi3.S
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/export.h>
#include <linux/linkage.h>
-#include <asm/export.h>
ENTRY(__lshrdi3)
cmp %o2, 0
diff --git a/arch/sparc/lib/mcount.S b/arch/sparc/lib/mcount.S
index deba6fa0bc78..f7f7910eb41e 100644
--- a/arch/sparc/lib/mcount.S
+++ b/arch/sparc/lib/mcount.S
@@ -6,8 +6,8 @@
* This can also be tweaked for kernel stack overflow detection.
*/
+#include <linux/export.h>
#include <linux/linkage.h>
-#include <asm/export.h>
/*
* This is the main variant and is called by C code. GCC's -pg option
diff --git a/arch/sparc/lib/memcmp.S b/arch/sparc/lib/memcmp.S
index a18076ef5af1..c87e8000feba 100644
--- a/arch/sparc/lib/memcmp.S
+++ b/arch/sparc/lib/memcmp.S
@@ -5,9 +5,9 @@
* Copyright (C) 2000, 2008 David S. Miller (davem@davemloft.net)
*/
+#include <linux/export.h>
#include <linux/linkage.h>
#include <asm/asm.h>
-#include <asm/export.h>
.text
ENTRY(memcmp)
diff --git a/arch/sparc/lib/memcpy.S b/arch/sparc/lib/memcpy.S
index ee823d8c9215..57b1ae0f5924 100644
--- a/arch/sparc/lib/memcpy.S
+++ b/arch/sparc/lib/memcpy.S
@@ -8,7 +8,8 @@
* Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
-#include <asm/export.h>
+#include <linux/export.h>
+
#define FUNC(x) \
.globl x; \
.type x,@function; \
diff --git a/arch/sparc/lib/memmove.S b/arch/sparc/lib/memmove.S
index 3132b6316144..543dda7b9dac 100644
--- a/arch/sparc/lib/memmove.S
+++ b/arch/sparc/lib/memmove.S
@@ -5,8 +5,8 @@
* Copyright (C) 1996, 1997, 1998, 1999 Jakub Jelinek (jj@ultra.linux.cz)
*/
+#include <linux/export.h>
#include <linux/linkage.h>
-#include <asm/export.h>
.text
ENTRY(memmove) /* o0=dst o1=src o2=len */
diff --git a/arch/sparc/lib/memscan_32.S b/arch/sparc/lib/memscan_32.S
index c4c2d5b3a2e9..5386a3a20019 100644
--- a/arch/sparc/lib/memscan_32.S
+++ b/arch/sparc/lib/memscan_32.S
@@ -5,7 +5,7 @@
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
-#include <asm/export.h>
+#include <linux/export.h>
/* In essence, this is just a fancy strlen. */
diff --git a/arch/sparc/lib/memscan_64.S b/arch/sparc/lib/memscan_64.S
index 36dd638905c3..70a4f21057f2 100644
--- a/arch/sparc/lib/memscan_64.S
+++ b/arch/sparc/lib/memscan_64.S
@@ -6,7 +6,7 @@
* Copyright (C) 1998 David S. Miller (davem@redhat.com)
*/
- #include <asm/export.h>
+#include <linux/export.h>
#define HI_MAGIC 0x8080808080808080
#define LO_MAGIC 0x0101010101010101
diff --git a/arch/sparc/lib/memset.S b/arch/sparc/lib/memset.S
index eaff68213fdf..a33419dbb464 100644
--- a/arch/sparc/lib/memset.S
+++ b/arch/sparc/lib/memset.S
@@ -9,8 +9,8 @@
* clear_user.
*/
+#include <linux/export.h>
#include <asm/ptrace.h>
-#include <asm/export.h>
/* Work around cpp -rob */
#define ALLOC #alloc
diff --git a/arch/sparc/lib/muldi3.S b/arch/sparc/lib/muldi3.S
index 53054dee66d6..7e1e8cd30a22 100644
--- a/arch/sparc/lib/muldi3.S
+++ b/arch/sparc/lib/muldi3.S
@@ -5,7 +5,7 @@ This file is part of GNU CC.
*/
-#include <asm/export.h>
+#include <linux/export.h>
.text
.align 4
.globl __muldi3
diff --git a/arch/sparc/lib/multi3.S b/arch/sparc/lib/multi3.S
index 2f187b299345..5bb4c122a2cf 100644
--- a/arch/sparc/lib/multi3.S
+++ b/arch/sparc/lib/multi3.S
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/export.h>
#include <linux/linkage.h>
-#include <asm/export.h>
.text
.align 4
diff --git a/arch/sparc/lib/strlen.S b/arch/sparc/lib/strlen.S
index dd111bbad5df..27478b3f1647 100644
--- a/arch/sparc/lib/strlen.S
+++ b/arch/sparc/lib/strlen.S
@@ -6,9 +6,9 @@
* Copyright (C) 1996, 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
+#include <linux/export.h>
#include <linux/linkage.h>
#include <asm/asm.h>
-#include <asm/export.h>
#define LO_MAGIC 0x01010101
#define HI_MAGIC 0x80808080
diff --git a/arch/sparc/lib/strncmp_32.S b/arch/sparc/lib/strncmp_32.S
index 794733f036b6..387bbf621548 100644
--- a/arch/sparc/lib/strncmp_32.S
+++ b/arch/sparc/lib/strncmp_32.S
@@ -4,8 +4,8 @@
* generic strncmp routine.
*/
+#include <linux/export.h>
#include <linux/linkage.h>
-#include <asm/export.h>
.text
ENTRY(strncmp)
diff --git a/arch/sparc/lib/strncmp_64.S b/arch/sparc/lib/strncmp_64.S
index 3d37d65f674c..76c1207ecf5a 100644
--- a/arch/sparc/lib/strncmp_64.S
+++ b/arch/sparc/lib/strncmp_64.S
@@ -5,9 +5,9 @@
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
+#include <linux/export.h>
#include <linux/linkage.h>
#include <asm/asi.h>
-#include <asm/export.h>
.text
ENTRY(strncmp)
diff --git a/arch/sparc/lib/xor.S b/arch/sparc/lib/xor.S
index f6af7c7ee6fc..35461e3b2a9b 100644
--- a/arch/sparc/lib/xor.S
+++ b/arch/sparc/lib/xor.S
@@ -9,12 +9,12 @@
* Copyright (C) 2006 David S. Miller <davem@davemloft.net>
*/
+#include <linux/export.h>
#include <linux/linkage.h>
#include <asm/visasm.h>
#include <asm/asi.h>
#include <asm/dcu.h>
#include <asm/spitfire.h>
-#include <asm/export.h>
/*
* Requirements:
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index 0d41c94ec3ac..b44d79d778c7 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -128,6 +128,7 @@ void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
goto no_cache_flush;
/* A real file page? */
+ folio = page_folio(page);
mapping = folio_flush_mapping(folio);
if (!mapping)
goto no_cache_flush;
diff --git a/arch/um/Kbuild b/arch/um/Kbuild
index a4e40e534e6a..6cf0c1e5927b 100644
--- a/arch/um/Kbuild
+++ b/arch/um/Kbuild
@@ -1 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
+
+obj-y += kernel/ drivers/ os-Linux/
diff --git a/arch/um/Makefile b/arch/um/Makefile
index da4d5256af2f..82f05f250634 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -18,15 +18,10 @@ else
endif
ARCH_DIR := arch/um
-OS := $(shell uname -s)
# We require bash because the vmlinux link and loader script cpp use bash
# features.
SHELL := /bin/bash
-core-y += $(ARCH_DIR)/kernel/ \
- $(ARCH_DIR)/drivers/ \
- $(ARCH_DIR)/os-$(OS)/
-
MODE_INCLUDE += -I$(srctree)/$(ARCH_DIR)/include/shared/skas
HEADER_ARCH := $(SUBARCH)
@@ -78,7 +73,7 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -I%,,$(KBUILD_CFLAGS))) \
-idirafter $(objtree)/include -D__KERNEL__ -D__UM_HOST__
#This will adjust *FLAGS accordingly to the platform.
-include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
+include $(srctree)/$(ARCH_DIR)/Makefile-os-Linux
KBUILD_CPPFLAGS += -I$(srctree)/$(HOST_DIR)/include \
-I$(srctree)/$(HOST_DIR)/include/uapi \
@@ -155,4 +150,4 @@ archclean:
@find . \( -name '*.bb' -o -name '*.bbg' -o -name '*.da' \
-o -name '*.gcov' \) -type f -print | xargs rm -f
-export HEADER_ARCH SUBARCH USER_CFLAGS CFLAGS_NO_HARDENING OS DEV_NULL_PATH
+export HEADER_ARCH SUBARCH USER_CFLAGS CFLAGS_NO_HARDENING DEV_NULL_PATH
diff --git a/arch/um/configs/i386_defconfig b/arch/um/configs/i386_defconfig
index 630be793759e..e543cbac8792 100644
--- a/arch/um/configs/i386_defconfig
+++ b/arch/um/configs/i386_defconfig
@@ -34,6 +34,7 @@ CONFIG_TTY_CHAN=y
CONFIG_XTERM_CHAN=y
CONFIG_CON_CHAN="pts"
CONFIG_SSL_CHAN="pts"
+CONFIG_SOUND=m
CONFIG_UML_SOUND=m
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
diff --git a/arch/um/configs/x86_64_defconfig b/arch/um/configs/x86_64_defconfig
index 8540d3370272..939cb12318ca 100644
--- a/arch/um/configs/x86_64_defconfig
+++ b/arch/um/configs/x86_64_defconfig
@@ -32,6 +32,7 @@ CONFIG_TTY_CHAN=y
CONFIG_XTERM_CHAN=y
CONFIG_CON_CHAN="pts"
CONFIG_SSL_CHAN="pts"
+CONFIG_SOUND=m
CONFIG_UML_SOUND=m
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
diff --git a/arch/um/drivers/Kconfig b/arch/um/drivers/Kconfig
index 36911b1fddcf..b94b2618e7d8 100644
--- a/arch/um/drivers/Kconfig
+++ b/arch/um/drivers/Kconfig
@@ -111,24 +111,14 @@ config SSL_CHAN
config UML_SOUND
tristate "Sound support"
+ depends on SOUND
+ select SOUND_OSS_CORE
help
This option enables UML sound support. If enabled, it will pull in
- soundcore and the UML hostaudio relay, which acts as a intermediary
+ the UML hostaudio relay, which acts as a intermediary
between the host's dsp and mixer devices and the UML sound system.
It is safe to say 'Y' here.
-config SOUND
- tristate
- default UML_SOUND
-
-config SOUND_OSS_CORE
- bool
- default UML_SOUND
-
-config HOSTAUDIO
- tristate
- default UML_SOUND
-
endmenu
menu "UML Network Devices"
diff --git a/arch/um/drivers/Makefile b/arch/um/drivers/Makefile
index a461a950f051..0e6af81096fd 100644
--- a/arch/um/drivers/Makefile
+++ b/arch/um/drivers/Makefile
@@ -54,7 +54,7 @@ obj-$(CONFIG_UML_NET) += net.o
obj-$(CONFIG_MCONSOLE) += mconsole.o
obj-$(CONFIG_MMAPPER) += mmapper_kern.o
obj-$(CONFIG_BLK_DEV_UBD) += ubd.o
-obj-$(CONFIG_HOSTAUDIO) += hostaudio.o
+obj-$(CONFIG_UML_SOUND) += hostaudio.o
obj-$(CONFIG_NULL_CHAN) += null.o
obj-$(CONFIG_PORT_CHAN) += port.o
obj-$(CONFIG_PTY_CHAN) += pty.o
diff --git a/arch/um/drivers/hostaudio_kern.c b/arch/um/drivers/hostaudio_kern.c
index 5b064d360cb7..c42b793bce65 100644
--- a/arch/um/drivers/hostaudio_kern.c
+++ b/arch/um/drivers/hostaudio_kern.c
@@ -310,7 +310,7 @@ static const struct file_operations hostmixer_fops = {
.release = hostmixer_release,
};
-struct {
+static struct {
int dev_audio;
int dev_mixer;
} module_data;
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
index 02b0befd6763..b98545f3edb5 100644
--- a/arch/um/drivers/line.c
+++ b/arch/um/drivers/line.c
@@ -184,7 +184,7 @@ void line_flush_chars(struct tty_struct *tty)
line_flush_buffer(tty);
}
-int line_write(struct tty_struct *tty, const unsigned char *buf, int len)
+ssize_t line_write(struct tty_struct *tty, const u8 *buf, size_t len)
{
struct line *line = tty->driver_data;
unsigned long flags;
diff --git a/arch/um/drivers/line.h b/arch/um/drivers/line.h
index f15be75a3bf3..e84fb9b4165e 100644
--- a/arch/um/drivers/line.h
+++ b/arch/um/drivers/line.h
@@ -64,8 +64,7 @@ extern void line_cleanup(struct tty_struct *tty);
extern void line_hangup(struct tty_struct *tty);
extern int line_setup(char **conf, unsigned nlines, char **def,
char *init, char *name);
-extern int line_write(struct tty_struct *tty, const unsigned char *buf,
- int len);
+extern ssize_t line_write(struct tty_struct *tty, const u8 *buf, size_t len);
extern unsigned int line_chars_in_buffer(struct tty_struct *tty);
extern void line_flush_buffer(struct tty_struct *tty);
extern void line_flush_chars(struct tty_struct *tty);
diff --git a/arch/um/drivers/port_kern.c b/arch/um/drivers/port_kern.c
index efa8b7304090..c52b3ff3c092 100644
--- a/arch/um/drivers/port_kern.c
+++ b/arch/um/drivers/port_kern.c
@@ -144,7 +144,7 @@ static void port_work_proc(struct work_struct *unused)
local_irq_restore(flags);
}
-DECLARE_WORK(port_work, port_work_proc);
+static DECLARE_WORK(port_work, port_work_proc);
static irqreturn_t port_interrupt(int irq, void *data)
{
diff --git a/arch/um/drivers/slirp_kern.c b/arch/um/drivers/slirp_kern.c
index 2d9769237f08..0a6151ee9572 100644
--- a/arch/um/drivers/slirp_kern.c
+++ b/arch/um/drivers/slirp_kern.c
@@ -15,7 +15,7 @@ struct slirp_init {
struct arg_list_dummy_wrapper argw; /* XXX should be simpler... */
};
-void slirp_init(struct net_device *dev, void *data)
+static void slirp_init(struct net_device *dev, void *data)
{
struct uml_net_private *private;
struct slirp_data *spri;
diff --git a/arch/um/drivers/virt-pci.c b/arch/um/drivers/virt-pci.c
index 7699ca5f35d4..ffe2ee8a0246 100644
--- a/arch/um/drivers/virt-pci.c
+++ b/arch/um/drivers/virt-pci.c
@@ -544,6 +544,7 @@ static void um_pci_irq_vq_cb(struct virtqueue *vq)
}
}
+#ifdef CONFIG_OF
/* Copied from arch/x86/kernel/devicetree.c */
struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
{
@@ -562,6 +563,7 @@ struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
}
return NULL;
}
+#endif
static int um_pci_init_vqs(struct um_pci_device *dev)
{
diff --git a/arch/um/drivers/xterm_kern.c b/arch/um/drivers/xterm_kern.c
index 50f11b7b4774..8011e51993d5 100644
--- a/arch/um/drivers/xterm_kern.c
+++ b/arch/um/drivers/xterm_kern.c
@@ -9,6 +9,7 @@
#include <asm/irq.h>
#include <irq_kern.h>
#include <os.h>
+#include "xterm.h"
struct xterm_wait {
struct completion ready;
diff --git a/arch/um/include/shared/irq_kern.h b/arch/um/include/shared/irq_kern.h
index f2dc817abb7c..44357fa6ee29 100644
--- a/arch/um/include/shared/irq_kern.h
+++ b/arch/um/include/shared/irq_kern.h
@@ -76,4 +76,5 @@ static inline bool um_irq_timetravel_handler_used(void)
}
void um_free_irq(int irq, void *dev_id);
+void free_irqs(void);
#endif
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
index a8873d9bc28b..635d44606bfe 100644
--- a/arch/um/kernel/irq.c
+++ b/arch/um/kernel/irq.c
@@ -23,8 +23,6 @@
#include <linux/time-internal.h>
-extern void free_irqs(void);
-
/* When epoll triggers we do not know why it did so
* we can also have different IRQs for read and write.
* This is why we keep a small irq_reg array for each fd -
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 95315d3474a2..5bfe5caaa444 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -335,9 +335,5 @@ define archhelp
echo ' bzdisk/fdimage*/hdimage/isoimage also accept:'
echo ' FDARGS="..." arguments for the booted kernel'
echo ' FDINITRD=file initrd for the booted kernel'
- echo ''
- echo ' kvm_guest.config - Enable Kconfig items for running this kernel as a KVM guest'
- echo ' xen.config - Enable Kconfig items for running this kernel as a Xen guest'
- echo ' x86_debug.config - Enable tip tree debugging options for testing'
endef
diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c
index 187e13b15e9a..97bfe5f0531f 100644
--- a/arch/x86/hyperv/hv_apic.c
+++ b/arch/x86/hyperv/hv_apic.c
@@ -175,8 +175,11 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector,
(exclude_self && weight == 1 && cpumask_test_cpu(this_cpu, mask)))
return true;
- if (!hv_hypercall_pg)
- return false;
+ /* A fully enlightened TDX VM uses GHCI rather than hv_hypercall_pg. */
+ if (!hv_hypercall_pg) {
+ if (ms_hyperv.paravisor_present || !hv_isolation_type_tdx())
+ return false;
+ }
if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
return false;
@@ -229,9 +232,15 @@ static bool __send_ipi_one(int cpu, int vector)
trace_hyperv_send_ipi_one(cpu, vector);
- if (!hv_hypercall_pg || (vp == VP_INVAL))
+ if (vp == VP_INVAL)
return false;
+ /* A fully enlightened TDX VM uses GHCI rather than hv_hypercall_pg. */
+ if (!hv_hypercall_pg) {
+ if (ms_hyperv.paravisor_present || !hv_isolation_type_tdx())
+ return false;
+ }
+
if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
return false;
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
index 507d98331e7c..783ed339f341 100644
--- a/arch/x86/hyperv/hv_init.c
+++ b/arch/x86/hyperv/hv_init.c
@@ -19,6 +19,7 @@
#include <asm/hyperv-tlfs.h>
#include <asm/mshyperv.h>
#include <asm/idtentry.h>
+#include <asm/set_memory.h>
#include <linux/kexec.h>
#include <linux/version.h>
#include <linux/vmalloc.h>
@@ -52,7 +53,7 @@ static int hyperv_init_ghcb(void)
void *ghcb_va;
void **ghcb_base;
- if (!hv_isolation_type_snp())
+ if (!ms_hyperv.paravisor_present || !hv_isolation_type_snp())
return 0;
if (!hv_ghcb_pg)
@@ -80,7 +81,7 @@ static int hyperv_init_ghcb(void)
static int hv_cpu_init(unsigned int cpu)
{
union hv_vp_assist_msr_contents msr = { 0 };
- struct hv_vp_assist_page **hvp = &hv_vp_assist_page[cpu];
+ struct hv_vp_assist_page **hvp;
int ret;
ret = hv_common_cpu_init(cpu);
@@ -90,6 +91,7 @@ static int hv_cpu_init(unsigned int cpu)
if (!hv_vp_assist_page)
return 0;
+ hvp = &hv_vp_assist_page[cpu];
if (hv_root_partition) {
/*
* For root partition we get the hypervisor provided VP assist
@@ -107,8 +109,21 @@ static int hv_cpu_init(unsigned int cpu)
* in hv_cpu_die(), otherwise a CPU may not be stopped in the
* case of CPU offlining and the VM will hang.
*/
- if (!*hvp)
+ if (!*hvp) {
*hvp = __vmalloc(PAGE_SIZE, GFP_KERNEL | __GFP_ZERO);
+
+ /*
+ * Hyper-V should never specify a VM that is a Confidential
+ * VM and also running in the root partition. Root partition
+ * is blocked to run in Confidential VM. So only decrypt assist
+ * page in non-root partition here.
+ */
+ if (*hvp && !ms_hyperv.paravisor_present && hv_isolation_type_snp()) {
+ WARN_ON_ONCE(set_memory_decrypted((unsigned long)(*hvp), 1));
+ memset(*hvp, 0, PAGE_SIZE);
+ }
+ }
+
if (*hvp)
msr.pfn = vmalloc_to_pfn(*hvp);
@@ -379,6 +394,36 @@ static void __init hv_get_partition_id(void)
local_irq_restore(flags);
}
+static u8 __init get_vtl(void)
+{
+ u64 control = HV_HYPERCALL_REP_COMP_1 | HVCALL_GET_VP_REGISTERS;
+ struct hv_get_vp_registers_input *input;
+ struct hv_get_vp_registers_output *output;
+ unsigned long flags;
+ u64 ret;
+
+ local_irq_save(flags);
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ output = (struct hv_get_vp_registers_output *)input;
+
+ memset(input, 0, struct_size(input, element, 1));
+ input->header.partitionid = HV_PARTITION_ID_SELF;
+ input->header.vpindex = HV_VP_INDEX_SELF;
+ input->header.inputvtl = 0;
+ input->element[0].name0 = HV_X64_REGISTER_VSM_VP_STATUS;
+
+ ret = hv_do_hypercall(control, input, output);
+ if (hv_result_success(ret)) {
+ ret = output->as64.low & HV_X64_VTL_MASK;
+ } else {
+ pr_err("Failed to get VTL(%lld) and set VTL to zero by default.\n", ret);
+ ret = 0;
+ }
+
+ local_irq_restore(flags);
+ return ret;
+}
+
/*
* This function is to be invoked early in the boot sequence after the
* hypervisor has been detected.
@@ -399,14 +444,24 @@ void __init hyperv_init(void)
if (hv_common_init())
return;
- hv_vp_assist_page = kcalloc(num_possible_cpus(),
- sizeof(*hv_vp_assist_page), GFP_KERNEL);
+ /*
+ * The VP assist page is useless to a TDX guest: the only use we
+ * would have for it is lazy EOI, which can not be used with TDX.
+ */
+ if (hv_isolation_type_tdx())
+ hv_vp_assist_page = NULL;
+ else
+ hv_vp_assist_page = kcalloc(num_possible_cpus(),
+ sizeof(*hv_vp_assist_page),
+ GFP_KERNEL);
if (!hv_vp_assist_page) {
ms_hyperv.hints &= ~HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
- goto common_free;
+
+ if (!hv_isolation_type_tdx())
+ goto common_free;
}
- if (hv_isolation_type_snp()) {
+ if (ms_hyperv.paravisor_present && hv_isolation_type_snp()) {
/* Negotiate GHCB Version. */
if (!hv_ghcb_negotiate_protocol())
hv_ghcb_terminate(SEV_TERM_SET_GEN,
@@ -426,12 +481,32 @@ void __init hyperv_init(void)
* Setup the hypercall page and enable hypercalls.
* 1. Register the guest ID
* 2. Enable the hypercall and register the hypercall page
+ *
+ * A TDX VM with no paravisor only uses TDX GHCI rather than hv_hypercall_pg:
+ * when the hypercall input is a page, such a VM must pass a decrypted
+ * page to Hyper-V, e.g. hv_post_message() uses the per-CPU page
+ * hyperv_pcpu_input_arg, which is decrypted if no paravisor is present.
+ *
+ * A TDX VM with the paravisor uses hv_hypercall_pg for most hypercalls,
+ * which are handled by the paravisor and the VM must use an encrypted
+ * input page: in such a VM, the hyperv_pcpu_input_arg is encrypted and
+ * used in the hypercalls, e.g. see hv_mark_gpa_visibility() and
+ * hv_arch_irq_unmask(). Such a VM uses TDX GHCI for two hypercalls:
+ * 1. HVCALL_SIGNAL_EVENT: see vmbus_set_event() and _hv_do_fast_hypercall8().
+ * 2. HVCALL_POST_MESSAGE: the input page must be a decrypted page, i.e.
+ * hv_post_message() in such a VM can't use the encrypted hyperv_pcpu_input_arg;
+ * instead, hv_post_message() uses the post_msg_page, which is decrypted
+ * in such a VM and is only used in such a VM.
*/
guest_id = hv_generate_guest_id(LINUX_VERSION_CODE);
wrmsrl(HV_X64_MSR_GUEST_OS_ID, guest_id);
- /* Hyper-V requires to write guest os id via ghcb in SNP IVM. */
- hv_ghcb_msr_write(HV_X64_MSR_GUEST_OS_ID, guest_id);
+ /* With the paravisor, the VM must also write the ID via GHCB/GHCI */
+ hv_ivm_msr_write(HV_X64_MSR_GUEST_OS_ID, guest_id);
+
+ /* A TDX VM with no paravisor only uses TDX GHCI rather than hv_hypercall_pg */
+ if (hv_isolation_type_tdx() && !ms_hyperv.paravisor_present)
+ goto skip_hypercall_pg_init;
hv_hypercall_pg = __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START,
VMALLOC_END, GFP_KERNEL, PAGE_KERNEL_ROX,
@@ -472,6 +547,7 @@ void __init hyperv_init(void)
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
}
+skip_hypercall_pg_init:
/*
* Some versions of Hyper-V that provide IBT in guest VMs have a bug
* in that there's no ENDBR64 instruction at the entry to the
@@ -527,11 +603,15 @@ void __init hyperv_init(void)
/* Query the VMs extended capability once, so that it can be cached. */
hv_query_ext_cap(0);
+ /* Find the VTL */
+ if (!ms_hyperv.paravisor_present && hv_isolation_type_snp())
+ ms_hyperv.vtl = get_vtl();
+
return;
clean_guest_os_id:
wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
- hv_ghcb_msr_write(HV_X64_MSR_GUEST_OS_ID, 0);
+ hv_ivm_msr_write(HV_X64_MSR_GUEST_OS_ID, 0);
cpuhp_remove_state(cpuhp);
free_ghcb_page:
free_percpu(hv_ghcb_pg);
@@ -552,7 +632,7 @@ void hyperv_cleanup(void)
/* Reset our OS id */
wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
- hv_ghcb_msr_write(HV_X64_MSR_GUEST_OS_ID, 0);
+ hv_ivm_msr_write(HV_X64_MSR_GUEST_OS_ID, 0);
/*
* Reset hypercall page reference before reset the page,
@@ -615,6 +695,9 @@ bool hv_is_hyperv_initialized(void)
if (x86_hyper_type != X86_HYPER_MS_HYPERV)
return false;
+ /* A TDX VM with no paravisor uses TDX GHCI call rather than hv_hypercall_pg */
+ if (hv_isolation_type_tdx() && !ms_hyperv.paravisor_present)
+ return true;
/*
* Verify that earlier initialization succeeded by checking
* that the hypercall page is setup
diff --git a/arch/x86/hyperv/ivm.c b/arch/x86/hyperv/ivm.c
index 28be6df88063..8c6bf07f7d2b 100644
--- a/arch/x86/hyperv/ivm.c
+++ b/arch/x86/hyperv/ivm.c
@@ -18,6 +18,11 @@
#include <asm/mshyperv.h>
#include <asm/hypervisor.h>
#include <asm/mtrr.h>
+#include <asm/io_apic.h>
+#include <asm/realmode.h>
+#include <asm/e820/api.h>
+#include <asm/desc.h>
+#include <uapi/asm/vmx.h>
#ifdef CONFIG_AMD_MEM_ENCRYPT
@@ -56,8 +61,10 @@ union hv_ghcb {
} hypercall;
} __packed __aligned(HV_HYP_PAGE_SIZE);
+/* Only used in an SNP VM with the paravisor */
static u16 hv_ghcb_version __ro_after_init;
+/* Functions only used in an SNP VM with the paravisor go here. */
u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size)
{
union hv_ghcb *hv_ghcb;
@@ -175,7 +182,7 @@ bool hv_ghcb_negotiate_protocol(void)
return true;
}
-void hv_ghcb_msr_write(u64 msr, u64 value)
+static void hv_ghcb_msr_write(u64 msr, u64 value)
{
union hv_ghcb *hv_ghcb;
void **ghcb_base;
@@ -203,9 +210,8 @@ void hv_ghcb_msr_write(u64 msr, u64 value)
local_irq_restore(flags);
}
-EXPORT_SYMBOL_GPL(hv_ghcb_msr_write);
-void hv_ghcb_msr_read(u64 msr, u64 *value)
+static void hv_ghcb_msr_read(u64 msr, u64 *value)
{
union hv_ghcb *hv_ghcb;
void **ghcb_base;
@@ -235,7 +241,217 @@ void hv_ghcb_msr_read(u64 msr, u64 *value)
| ((u64)lower_32_bits(hv_ghcb->ghcb.save.rdx) << 32);
local_irq_restore(flags);
}
-EXPORT_SYMBOL_GPL(hv_ghcb_msr_read);
+
+/* Only used in a fully enlightened SNP VM, i.e. without the paravisor */
+static u8 ap_start_input_arg[PAGE_SIZE] __bss_decrypted __aligned(PAGE_SIZE);
+static u8 ap_start_stack[PAGE_SIZE] __aligned(PAGE_SIZE);
+static DEFINE_PER_CPU(struct sev_es_save_area *, hv_sev_vmsa);
+
+/* Functions only used in an SNP VM without the paravisor go here. */
+
+#define hv_populate_vmcb_seg(seg, gdtr_base) \
+do { \
+ if (seg.selector) { \
+ seg.base = 0; \
+ seg.limit = HV_AP_SEGMENT_LIMIT; \
+ seg.attrib = *(u16 *)(gdtr_base + seg.selector + 5); \
+ seg.attrib = (seg.attrib & 0xFF) | ((seg.attrib >> 4) & 0xF00); \
+ } \
+} while (0) \
+
+static int snp_set_vmsa(void *va, bool vmsa)
+{
+ u64 attrs;
+
+ /*
+ * Running at VMPL0 allows the kernel to change the VMSA bit for a page
+ * using the RMPADJUST instruction. However, for the instruction to
+ * succeed it must target the permissions of a lesser privileged
+ * (higher numbered) VMPL level, so use VMPL1 (refer to the RMPADJUST
+ * instruction in the AMD64 APM Volume 3).
+ */
+ attrs = 1;
+ if (vmsa)
+ attrs |= RMPADJUST_VMSA_PAGE_BIT;
+
+ return rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs);
+}
+
+static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa)
+{
+ int err;
+
+ err = snp_set_vmsa(vmsa, false);
+ if (err)
+ pr_err("clear VMSA page failed (%u), leaking page\n", err);
+ else
+ free_page((unsigned long)vmsa);
+}
+
+int hv_snp_boot_ap(int cpu, unsigned long start_ip)
+{
+ struct sev_es_save_area *vmsa = (struct sev_es_save_area *)
+ __get_free_page(GFP_KERNEL | __GFP_ZERO);
+ struct sev_es_save_area *cur_vmsa;
+ struct desc_ptr gdtr;
+ u64 ret, retry = 5;
+ struct hv_enable_vp_vtl *start_vp_input;
+ unsigned long flags;
+
+ if (!vmsa)
+ return -ENOMEM;
+
+ native_store_gdt(&gdtr);
+
+ vmsa->gdtr.base = gdtr.address;
+ vmsa->gdtr.limit = gdtr.size;
+
+ asm volatile("movl %%es, %%eax;" : "=a" (vmsa->es.selector));
+ hv_populate_vmcb_seg(vmsa->es, vmsa->gdtr.base);
+
+ asm volatile("movl %%cs, %%eax;" : "=a" (vmsa->cs.selector));
+ hv_populate_vmcb_seg(vmsa->cs, vmsa->gdtr.base);
+
+ asm volatile("movl %%ss, %%eax;" : "=a" (vmsa->ss.selector));
+ hv_populate_vmcb_seg(vmsa->ss, vmsa->gdtr.base);
+
+ asm volatile("movl %%ds, %%eax;" : "=a" (vmsa->ds.selector));
+ hv_populate_vmcb_seg(vmsa->ds, vmsa->gdtr.base);
+
+ vmsa->efer = native_read_msr(MSR_EFER);
+
+ asm volatile("movq %%cr4, %%rax;" : "=a" (vmsa->cr4));
+ asm volatile("movq %%cr3, %%rax;" : "=a" (vmsa->cr3));
+ asm volatile("movq %%cr0, %%rax;" : "=a" (vmsa->cr0));
+
+ vmsa->xcr0 = 1;
+ vmsa->g_pat = HV_AP_INIT_GPAT_DEFAULT;
+ vmsa->rip = (u64)secondary_startup_64_no_verify;
+ vmsa->rsp = (u64)&ap_start_stack[PAGE_SIZE];
+
+ /*
+ * Set the SNP-specific fields for this VMSA:
+ * VMPL level
+ * SEV_FEATURES (matches the SEV STATUS MSR right shifted 2 bits)
+ */
+ vmsa->vmpl = 0;
+ vmsa->sev_features = sev_status >> 2;
+
+ ret = snp_set_vmsa(vmsa, true);
+ if (!ret) {
+ pr_err("RMPADJUST(%llx) failed: %llx\n", (u64)vmsa, ret);
+ free_page((u64)vmsa);
+ return ret;
+ }
+
+ local_irq_save(flags);
+ start_vp_input = (struct hv_enable_vp_vtl *)ap_start_input_arg;
+ memset(start_vp_input, 0, sizeof(*start_vp_input));
+ start_vp_input->partition_id = -1;
+ start_vp_input->vp_index = cpu;
+ start_vp_input->target_vtl.target_vtl = ms_hyperv.vtl;
+ *(u64 *)&start_vp_input->vp_context = __pa(vmsa) | 1;
+
+ do {
+ ret = hv_do_hypercall(HVCALL_START_VP,
+ start_vp_input, NULL);
+ } while (hv_result(ret) == HV_STATUS_TIME_OUT && retry--);
+
+ local_irq_restore(flags);
+
+ if (!hv_result_success(ret)) {
+ pr_err("HvCallStartVirtualProcessor failed: %llx\n", ret);
+ snp_cleanup_vmsa(vmsa);
+ vmsa = NULL;
+ }
+
+ cur_vmsa = per_cpu(hv_sev_vmsa, cpu);
+ /* Free up any previous VMSA page */
+ if (cur_vmsa)
+ snp_cleanup_vmsa(cur_vmsa);
+
+ /* Record the current VMSA page */
+ per_cpu(hv_sev_vmsa, cpu) = vmsa;
+
+ return ret;
+}
+
+#else
+static inline void hv_ghcb_msr_write(u64 msr, u64 value) {}
+static inline void hv_ghcb_msr_read(u64 msr, u64 *value) {}
+#endif /* CONFIG_AMD_MEM_ENCRYPT */
+
+#ifdef CONFIG_INTEL_TDX_GUEST
+static void hv_tdx_msr_write(u64 msr, u64 val)
+{
+ struct tdx_hypercall_args args = {
+ .r10 = TDX_HYPERCALL_STANDARD,
+ .r11 = EXIT_REASON_MSR_WRITE,
+ .r12 = msr,
+ .r13 = val,
+ };
+
+ u64 ret = __tdx_hypercall(&args);
+
+ WARN_ONCE(ret, "Failed to emulate MSR write: %lld\n", ret);
+}
+
+static void hv_tdx_msr_read(u64 msr, u64 *val)
+{
+ struct tdx_hypercall_args args = {
+ .r10 = TDX_HYPERCALL_STANDARD,
+ .r11 = EXIT_REASON_MSR_READ,
+ .r12 = msr,
+ };
+
+ u64 ret = __tdx_hypercall_ret(&args);
+
+ if (WARN_ONCE(ret, "Failed to emulate MSR read: %lld\n", ret))
+ *val = 0;
+ else
+ *val = args.r11;
+}
+
+u64 hv_tdx_hypercall(u64 control, u64 param1, u64 param2)
+{
+ struct tdx_hypercall_args args = { };
+
+ args.r10 = control;
+ args.rdx = param1;
+ args.r8 = param2;
+
+ (void)__tdx_hypercall_ret(&args);
+
+ return args.r11;
+}
+
+#else
+static inline void hv_tdx_msr_write(u64 msr, u64 value) {}
+static inline void hv_tdx_msr_read(u64 msr, u64 *value) {}
+#endif /* CONFIG_INTEL_TDX_GUEST */
+
+#if defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST)
+void hv_ivm_msr_write(u64 msr, u64 value)
+{
+ if (!ms_hyperv.paravisor_present)
+ return;
+
+ if (hv_isolation_type_tdx())
+ hv_tdx_msr_write(msr, value);
+ else if (hv_isolation_type_snp())
+ hv_ghcb_msr_write(msr, value);
+}
+
+void hv_ivm_msr_read(u64 msr, u64 *value)
+{
+ if (!ms_hyperv.paravisor_present)
+ return;
+
+ if (hv_isolation_type_tdx())
+ hv_tdx_msr_read(msr, value);
+ else if (hv_isolation_type_snp())
+ hv_ghcb_msr_read(msr, value);
+}
/*
* hv_mark_gpa_visibility - Set pages visible to host via hvcall.
@@ -358,13 +574,34 @@ static bool hv_is_private_mmio(u64 addr)
void __init hv_vtom_init(void)
{
+ enum hv_isolation_type type = hv_get_isolation_type();
+
+ switch (type) {
+ case HV_ISOLATION_TYPE_VBS:
+ fallthrough;
/*
* By design, a VM using vTOM doesn't see the SEV setting,
* so SEV initialization is bypassed and sev_status isn't set.
* Set it here to indicate a vTOM VM.
+ *
+ * Note: if CONFIG_AMD_MEM_ENCRYPT is not set, sev_status is
+ * defined as 0ULL, to which we can't assigned a value.
*/
- sev_status = MSR_AMD64_SNP_VTOM;
- cc_vendor = CC_VENDOR_AMD;
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+ case HV_ISOLATION_TYPE_SNP:
+ sev_status = MSR_AMD64_SNP_VTOM;
+ cc_vendor = CC_VENDOR_AMD;
+ break;
+#endif
+
+ case HV_ISOLATION_TYPE_TDX:
+ cc_vendor = CC_VENDOR_INTEL;
+ break;
+
+ default:
+ panic("hv_vtom_init: unsupported isolation type %d\n", type);
+ }
+
cc_set_mask(ms_hyperv.shared_gpa_boundary);
physical_mask &= ms_hyperv.shared_gpa_boundary - 1;
@@ -377,7 +614,7 @@ void __init hv_vtom_init(void)
mtrr_overwrite_state(NULL, 0, MTRR_TYPE_WRBACK);
}
-#endif /* CONFIG_AMD_MEM_ENCRYPT */
+#endif /* defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST) */
enum hv_isolation_type hv_get_isolation_type(void)
{
@@ -405,10 +642,20 @@ bool hv_is_isolation_supported(void)
DEFINE_STATIC_KEY_FALSE(isolation_type_snp);
/*
- * hv_isolation_type_snp - Check system runs in the AMD SEV-SNP based
+ * hv_isolation_type_snp - Check if the system runs in an AMD SEV-SNP based
* isolation VM.
*/
bool hv_isolation_type_snp(void)
{
return static_branch_unlikely(&isolation_type_snp);
}
+
+DEFINE_STATIC_KEY_FALSE(isolation_type_tdx);
+/*
+ * hv_isolation_type_tdx - Check if the system runs in an Intel TDX based
+ * isolated VM.
+ */
+bool hv_isolation_type_tdx(void)
+{
+ return static_branch_unlikely(&isolation_type_tdx);
+}
diff --git a/arch/x86/include/asm/audit.h b/arch/x86/include/asm/audit.h
index 36aec57ea7a3..fa918f01333e 100644
--- a/arch/x86/include/asm/audit.h
+++ b/arch/x86/include/asm/audit.h
@@ -4,4 +4,11 @@
int ia32_classify_syscall(unsigned int syscall);
+extern unsigned ia32_dir_class[];
+extern unsigned ia32_write_class[];
+extern unsigned ia32_read_class[];
+extern unsigned ia32_chattr_class[];
+extern unsigned ia32_signal_class[];
+
+
#endif /* _ASM_X86_AUDIT_H */
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 2061ed1c398f..58cb9495e40f 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -439,6 +439,7 @@
#define X86_FEATURE_SEV_ES (19*32+ 3) /* AMD Secure Encrypted Virtualization - Encrypted State */
#define X86_FEATURE_V_TSC_AUX (19*32+ 9) /* "" Virtual TSC_AUX */
#define X86_FEATURE_SME_COHERENT (19*32+10) /* "" AMD hardware-enforced cache coherency */
+#define X86_FEATURE_DEBUG_SWAP (19*32+14) /* AMD SEV-ES full debug state swap support */
/* AMD-defined Extended Feature 2 EAX, CPUID level 0x80000021 (EAX), word 20 */
#define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* "" No Nested Data Breakpoints */
diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h
index cea95dcd27c2..2ff26f53cd62 100644
--- a/arch/x86/include/asm/hyperv-tlfs.h
+++ b/arch/x86/include/asm/hyperv-tlfs.h
@@ -169,7 +169,8 @@
enum hv_isolation_type {
HV_ISOLATION_TYPE_NONE = 0,
HV_ISOLATION_TYPE_VBS = 1,
- HV_ISOLATION_TYPE_SNP = 2
+ HV_ISOLATION_TYPE_SNP = 2,
+ HV_ISOLATION_TYPE_TDX = 3
};
/* Hyper-V specific model specific registers (MSRs) */
@@ -301,6 +302,13 @@ enum hv_isolation_type {
#define HV_X64_MSR_TIME_REF_COUNT HV_REGISTER_TIME_REF_COUNT
#define HV_X64_MSR_REFERENCE_TSC HV_REGISTER_REFERENCE_TSC
+/*
+ * Registers are only accessible via HVCALL_GET_VP_REGISTERS hvcall and
+ * there is not associated MSR address.
+ */
+#define HV_X64_REGISTER_VSM_VP_STATUS 0x000D0003
+#define HV_X64_VTL_MASK GENMASK(3, 0)
+
/* Hyper-V memory host visibility */
enum hv_mem_host_visibility {
VMBUS_PAGE_NOT_VISIBLE = 0,
diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h
index 3be6a98751f0..c9f6a6c5de3c 100644
--- a/arch/x86/include/asm/kexec.h
+++ b/arch/x86/include/asm/kexec.h
@@ -205,8 +205,6 @@ int arch_kimage_file_post_load_cleanup(struct kimage *image);
#endif
#endif
-typedef void crash_vmclear_fn(void);
-extern crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss;
extern void kdump_nmi_shootdown_cpus(void);
#ifdef CONFIG_CRASH_HOTPLUG
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 3bc146dfd38d..1a4def36d5bb 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -288,13 +288,13 @@ struct kvm_kernel_irq_routing_entry;
* kvm_mmu_page_role tracks the properties of a shadow page (where shadow page
* also includes TDP pages) to determine whether or not a page can be used in
* the given MMU context. This is a subset of the overall kvm_cpu_role to
- * minimize the size of kvm_memory_slot.arch.gfn_track, i.e. allows allocating
- * 2 bytes per gfn instead of 4 bytes per gfn.
+ * minimize the size of kvm_memory_slot.arch.gfn_write_track, i.e. allows
+ * allocating 2 bytes per gfn instead of 4 bytes per gfn.
*
* Upper-level shadow pages having gptes are tracked for write-protection via
- * gfn_track. As above, gfn_track is a 16 bit counter, so KVM must not create
- * more than 2^16-1 upper-level shadow pages at a single gfn, otherwise
- * gfn_track will overflow and explosions will ensure.
+ * gfn_write_track. As above, gfn_write_track is a 16 bit counter, so KVM must
+ * not create more than 2^16-1 upper-level shadow pages at a single gfn,
+ * otherwise gfn_write_track will overflow and explosions will ensue.
*
* A unique shadow page (SP) for a gfn is created if and only if an existing SP
* cannot be reused. The ability to reuse a SP is tracked by its role, which
@@ -746,7 +746,6 @@ struct kvm_vcpu_arch {
u64 smi_count;
bool at_instruction_boundary;
bool tpr_access_reporting;
- bool xsaves_enabled;
bool xfd_no_write_intercept;
u64 ia32_xss;
u64 microcode_version;
@@ -831,6 +830,25 @@ struct kvm_vcpu_arch {
struct kvm_cpuid_entry2 *cpuid_entries;
struct kvm_hypervisor_cpuid kvm_cpuid;
+ /*
+ * FIXME: Drop this macro and use KVM_NR_GOVERNED_FEATURES directly
+ * when "struct kvm_vcpu_arch" is no longer defined in an
+ * arch/x86/include/asm header. The max is mostly arbitrary, i.e.
+ * can be increased as necessary.
+ */
+#define KVM_MAX_NR_GOVERNED_FEATURES BITS_PER_LONG
+
+ /*
+ * Track whether or not the guest is allowed to use features that are
+ * governed by KVM, where "governed" means KVM needs to manage state
+ * and/or explicitly enable the feature in hardware. Typically, but
+ * not always, governed features can be used by the guest if and only
+ * if both KVM and userspace want to expose the feature to the guest.
+ */
+ struct {
+ DECLARE_BITMAP(enabled, KVM_MAX_NR_GOVERNED_FEATURES);
+ } governed_features;
+
u64 reserved_gpa_bits;
int maxphyaddr;
@@ -1005,7 +1023,7 @@ struct kvm_lpage_info {
struct kvm_arch_memory_slot {
struct kvm_rmap_head *rmap[KVM_NR_PAGE_SIZES];
struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
- unsigned short *gfn_track[KVM_PAGE_TRACK_MAX];
+ unsigned short *gfn_write_track;
};
/*
@@ -1247,8 +1265,9 @@ struct kvm_arch {
* create an NX huge page (without hanging the guest).
*/
struct list_head possible_nx_huge_pages;
- struct kvm_page_track_notifier_node mmu_sp_tracker;
+#ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING
struct kvm_page_track_notifier_head track_notifier_head;
+#endif
/*
* Protects marking pages unsync during page faults, as TDP MMU page
* faults only take mmu_lock for read. For simplicity, the unsync
@@ -1655,8 +1674,8 @@ struct kvm_x86_ops {
u64 (*get_l2_tsc_offset)(struct kvm_vcpu *vcpu);
u64 (*get_l2_tsc_multiplier)(struct kvm_vcpu *vcpu);
- void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
- void (*write_tsc_multiplier)(struct kvm_vcpu *vcpu, u64 multiplier);
+ void (*write_tsc_offset)(struct kvm_vcpu *vcpu);
+ void (*write_tsc_multiplier)(struct kvm_vcpu *vcpu);
/*
* Retrieve somewhat arbitrary exit information. Intended to
@@ -1795,8 +1814,8 @@ static inline struct kvm *kvm_arch_alloc_vm(void)
#define __KVM_HAVE_ARCH_VM_FREE
void kvm_arch_free_vm(struct kvm *kvm);
-#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
-static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
+#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS
+static inline int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
{
if (kvm_x86_ops.flush_remote_tlbs &&
!static_call(kvm_x86_flush_remote_tlbs)(kvm))
@@ -1805,6 +1824,8 @@ static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
return -ENOTSUPP;
}
+#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE
+
#define kvm_arch_pmi_in_guest(vcpu) \
((vcpu) && (vcpu)->arch.handling_intr_from_guest)
@@ -1833,7 +1854,6 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
const struct kvm_memory_slot *memslot);
void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
const struct kvm_memory_slot *memslot);
-void kvm_mmu_zap_all(struct kvm *kvm);
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages);
diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h
index eb186bc57f6a..3d040741044b 100644
--- a/arch/x86/include/asm/kvm_page_track.h
+++ b/arch/x86/include/asm/kvm_page_track.h
@@ -2,11 +2,9 @@
#ifndef _ASM_X86_KVM_PAGE_TRACK_H
#define _ASM_X86_KVM_PAGE_TRACK_H
-enum kvm_page_track_mode {
- KVM_PAGE_TRACK_WRITE,
- KVM_PAGE_TRACK_MAX,
-};
+#include <linux/kvm_types.h>
+#ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING
/*
* The notifier represented by @kvm_page_track_notifier_node is linked into
* the head which will be notified when guest is triggering the track event.
@@ -26,54 +24,39 @@ struct kvm_page_track_notifier_node {
* It is called when guest is writing the write-tracked page
* and write emulation is finished at that time.
*
- * @vcpu: the vcpu where the write access happened.
* @gpa: the physical address written by guest.
* @new: the data was written to the address.
* @bytes: the written length.
* @node: this node
*/
- void (*track_write)(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
- int bytes, struct kvm_page_track_notifier_node *node);
+ void (*track_write)(gpa_t gpa, const u8 *new, int bytes,
+ struct kvm_page_track_notifier_node *node);
+
/*
- * It is called when memory slot is being moved or removed
- * users can drop write-protection for the pages in that memory slot
+ * Invoked when a memory region is removed from the guest. Or in KVM
+ * terms, when a memslot is deleted.
*
- * @kvm: the kvm where memory slot being moved or removed
- * @slot: the memory slot being moved or removed
- * @node: this node
+ * @gfn: base gfn of the region being removed
+ * @nr_pages: number of pages in the to-be-removed region
+ * @node: this node
*/
- void (*track_flush_slot)(struct kvm *kvm, struct kvm_memory_slot *slot,
- struct kvm_page_track_notifier_node *node);
+ void (*track_remove_region)(gfn_t gfn, unsigned long nr_pages,
+ struct kvm_page_track_notifier_node *node);
};
-int kvm_page_track_init(struct kvm *kvm);
-void kvm_page_track_cleanup(struct kvm *kvm);
+int kvm_page_track_register_notifier(struct kvm *kvm,
+ struct kvm_page_track_notifier_node *n);
+void kvm_page_track_unregister_notifier(struct kvm *kvm,
+ struct kvm_page_track_notifier_node *n);
-bool kvm_page_track_write_tracking_enabled(struct kvm *kvm);
-int kvm_page_track_write_tracking_alloc(struct kvm_memory_slot *slot);
-
-void kvm_page_track_free_memslot(struct kvm_memory_slot *slot);
-int kvm_page_track_create_memslot(struct kvm *kvm,
- struct kvm_memory_slot *slot,
- unsigned long npages);
-
-void kvm_slot_page_track_add_page(struct kvm *kvm,
- struct kvm_memory_slot *slot, gfn_t gfn,
- enum kvm_page_track_mode mode);
-void kvm_slot_page_track_remove_page(struct kvm *kvm,
- struct kvm_memory_slot *slot, gfn_t gfn,
- enum kvm_page_track_mode mode);
-bool kvm_slot_page_track_is_active(struct kvm *kvm,
- const struct kvm_memory_slot *slot,
- gfn_t gfn, enum kvm_page_track_mode mode);
+int kvm_write_track_add_gfn(struct kvm *kvm, gfn_t gfn);
+int kvm_write_track_remove_gfn(struct kvm *kvm, gfn_t gfn);
+#else
+/*
+ * Allow defining a node in a structure even if page tracking is disabled, e.g.
+ * to play nice with testing headers via direct inclusion from the command line.
+ */
+struct kvm_page_track_notifier_node {};
+#endif /* CONFIG_KVM_EXTERNAL_WRITE_TRACKING */
-void
-kvm_page_track_register_notifier(struct kvm *kvm,
- struct kvm_page_track_notifier_node *n);
-void
-kvm_page_track_unregister_notifier(struct kvm *kvm,
- struct kvm_page_track_notifier_node *n);
-void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
- int bytes);
-void kvm_page_track_flush_slot(struct kvm *kvm, struct kvm_memory_slot *slot);
#endif
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index fa83d88e4c99..033b53f993c6 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -26,6 +26,7 @@
union hv_ghcb;
DECLARE_STATIC_KEY_FALSE(isolation_type_snp);
+DECLARE_STATIC_KEY_FALSE(isolation_type_tdx);
typedef int (*hyperv_fill_flush_list_func)(
struct hv_guest_mapping_flush_list *flush,
@@ -40,6 +41,7 @@ static inline unsigned char hv_get_nmi_reason(void)
#if IS_ENABLED(CONFIG_HYPERV)
extern int hyperv_init_cpuhp;
+extern bool hyperv_paravisor_present;
extern void *hv_hypercall_pg;
@@ -47,10 +49,25 @@ extern u64 hv_current_partition_id;
extern union hv_ghcb * __percpu *hv_ghcb_pg;
+bool hv_isolation_type_snp(void);
+bool hv_isolation_type_tdx(void);
+u64 hv_tdx_hypercall(u64 control, u64 param1, u64 param2);
+
+/*
+ * DEFAULT INIT GPAT and SEGMENT LIMIT value in struct VMSA
+ * to start AP in enlightened SEV guest.
+ */
+#define HV_AP_INIT_GPAT_DEFAULT 0x0007040600070406ULL
+#define HV_AP_SEGMENT_LIMIT 0xffffffff
+
int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages);
int hv_call_add_logical_proc(int node, u32 lp_index, u32 acpi_id);
int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags);
+/*
+ * If the hypercall involves no input or output parameters, the hypervisor
+ * ignores the corresponding GPA pointer.
+ */
static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
{
u64 input_address = input ? virt_to_phys(input) : 0;
@@ -58,6 +75,19 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
u64 hv_status;
#ifdef CONFIG_X86_64
+ if (hv_isolation_type_tdx() && !hyperv_paravisor_present)
+ return hv_tdx_hypercall(control, input_address, output_address);
+
+ if (hv_isolation_type_snp() && !hyperv_paravisor_present) {
+ __asm__ __volatile__("mov %4, %%r8\n"
+ "vmmcall"
+ : "=a" (hv_status), ASM_CALL_CONSTRAINT,
+ "+c" (control), "+d" (input_address)
+ : "r" (output_address)
+ : "cc", "memory", "r8", "r9", "r10", "r11");
+ return hv_status;
+ }
+
if (!hv_hypercall_pg)
return U64_MAX;
@@ -101,7 +131,16 @@ static inline u64 _hv_do_fast_hypercall8(u64 control, u64 input1)
u64 hv_status;
#ifdef CONFIG_X86_64
- {
+ if (hv_isolation_type_tdx() && !hyperv_paravisor_present)
+ return hv_tdx_hypercall(control, input1, 0);
+
+ if (hv_isolation_type_snp() && !hyperv_paravisor_present) {
+ __asm__ __volatile__(
+ "vmmcall"
+ : "=a" (hv_status), ASM_CALL_CONSTRAINT,
+ "+c" (control), "+d" (input1)
+ :: "cc", "r8", "r9", "r10", "r11");
+ } else {
__asm__ __volatile__(CALL_NOSPEC
: "=a" (hv_status), ASM_CALL_CONSTRAINT,
"+c" (control), "+d" (input1)
@@ -146,7 +185,17 @@ static inline u64 _hv_do_fast_hypercall16(u64 control, u64 input1, u64 input2)
u64 hv_status;
#ifdef CONFIG_X86_64
- {
+ if (hv_isolation_type_tdx() && !hyperv_paravisor_present)
+ return hv_tdx_hypercall(control, input1, input2);
+
+ if (hv_isolation_type_snp() && !hyperv_paravisor_present) {
+ __asm__ __volatile__("mov %4, %%r8\n"
+ "vmmcall"
+ : "=a" (hv_status), ASM_CALL_CONSTRAINT,
+ "+c" (control), "+d" (input1)
+ : "r" (input2)
+ : "cc", "r8", "r9", "r10", "r11");
+ } else {
__asm__ __volatile__("mov %4, %%r8\n"
CALL_NOSPEC
: "=a" (hv_status), ASM_CALL_CONSTRAINT,
@@ -225,20 +274,24 @@ int hv_map_ioapic_interrupt(int ioapic_id, bool level, int vcpu, int vector,
int hv_unmap_ioapic_interrupt(int ioapic_id, struct hv_interrupt_entry *entry);
#ifdef CONFIG_AMD_MEM_ENCRYPT
-void hv_ghcb_msr_write(u64 msr, u64 value);
-void hv_ghcb_msr_read(u64 msr, u64 *value);
bool hv_ghcb_negotiate_protocol(void);
void __noreturn hv_ghcb_terminate(unsigned int set, unsigned int reason);
-void hv_vtom_init(void);
+int hv_snp_boot_ap(int cpu, unsigned long start_ip);
#else
-static inline void hv_ghcb_msr_write(u64 msr, u64 value) {}
-static inline void hv_ghcb_msr_read(u64 msr, u64 *value) {}
static inline bool hv_ghcb_negotiate_protocol(void) { return false; }
static inline void hv_ghcb_terminate(unsigned int set, unsigned int reason) {}
-static inline void hv_vtom_init(void) {}
+static inline int hv_snp_boot_ap(int cpu, unsigned long start_ip) { return 0; }
#endif
-extern bool hv_isolation_type_snp(void);
+#if defined(CONFIG_AMD_MEM_ENCRYPT) || defined(CONFIG_INTEL_TDX_GUEST)
+void hv_vtom_init(void);
+void hv_ivm_msr_write(u64 msr, u64 value);
+void hv_ivm_msr_read(u64 msr, u64 *value);
+#else
+static inline void hv_vtom_init(void) {}
+static inline void hv_ivm_msr_write(u64 msr, u64 value) {}
+static inline void hv_ivm_msr_read(u64 msr, u64 *value) {}
+#endif
static inline bool hv_is_synic_reg(unsigned int reg)
{
diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
index 9177b4354c3f..6536873f8fc0 100644
--- a/arch/x86/include/asm/reboot.h
+++ b/arch/x86/include/asm/reboot.h
@@ -25,7 +25,14 @@ void __noreturn machine_real_restart(unsigned int type);
#define MRR_BIOS 0
#define MRR_APM 1
+#if IS_ENABLED(CONFIG_KVM_INTEL) || IS_ENABLED(CONFIG_KVM_AMD)
+typedef void (cpu_emergency_virt_cb)(void);
+void cpu_emergency_register_virt_callback(cpu_emergency_virt_cb *callback);
+void cpu_emergency_unregister_virt_callback(cpu_emergency_virt_cb *callback);
void cpu_emergency_disable_virtualization(void);
+#else
+static inline void cpu_emergency_disable_virtualization(void) {}
+#endif /* CONFIG_KVM_INTEL || CONFIG_KVM_AMD */
typedef void (*nmi_shootdown_cb)(int, struct pt_regs*);
void nmi_shootdown_cpus(nmi_shootdown_cb callback);
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index e7c7379d6ac7..19bf955b67e0 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -288,6 +288,7 @@ static_assert((X2AVIC_MAX_PHYSICAL_ID & AVIC_PHYSICAL_MAX_INDEX_MASK) == X2AVIC_
#define AVIC_HPA_MASK ~((0xFFFULL << 52) | 0xFFF)
+#define SVM_SEV_FEAT_DEBUG_SWAP BIT(5)
struct vmcb_seg {
u16 selector;
@@ -345,7 +346,7 @@ struct vmcb_save_area {
u64 last_excp_from;
u64 last_excp_to;
u8 reserved_0x298[72];
- u32 spec_ctrl; /* Guest version of SPEC_CTRL at 0x2E0 */
+ u64 spec_ctrl; /* Guest version of SPEC_CTRL at 0x2E0 */
} __packed;
/* Save area definition for SEV-ES and SEV-SNP guests */
@@ -512,7 +513,7 @@ struct ghcb {
} __packed;
-#define EXPECTED_VMCB_SAVE_AREA_SIZE 740
+#define EXPECTED_VMCB_SAVE_AREA_SIZE 744
#define EXPECTED_GHCB_SAVE_AREA_SIZE 1032
#define EXPECTED_SEV_ES_SAVE_AREA_SIZE 1648
#define EXPECTED_VMCB_CONTROL_AREA_SIZE 1024
diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h
deleted file mode 100644
index 3b12e6b99412..000000000000
--- a/arch/x86/include/asm/virtext.h
+++ /dev/null
@@ -1,154 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* CPU virtualization extensions handling
- *
- * This should carry the code for handling CPU virtualization extensions
- * that needs to live in the kernel core.
- *
- * Author: Eduardo Habkost <ehabkost@redhat.com>
- *
- * Copyright (C) 2008, Red Hat Inc.
- *
- * Contains code from KVM, Copyright (C) 2006 Qumranet, Inc.
- */
-#ifndef _ASM_X86_VIRTEX_H
-#define _ASM_X86_VIRTEX_H
-
-#include <asm/processor.h>
-
-#include <asm/vmx.h>
-#include <asm/svm.h>
-#include <asm/tlbflush.h>
-
-/*
- * VMX functions:
- */
-
-static inline int cpu_has_vmx(void)
-{
- unsigned long ecx = cpuid_ecx(1);
- return test_bit(5, &ecx); /* CPUID.1:ECX.VMX[bit 5] -> VT */
-}
-
-
-/**
- * cpu_vmxoff() - Disable VMX on the current CPU
- *
- * Disable VMX and clear CR4.VMXE (even if VMXOFF faults)
- *
- * Note, VMXOFF causes a #UD if the CPU is !post-VMXON, but it's impossible to
- * atomically track post-VMXON state, e.g. this may be called in NMI context.
- * Eat all faults as all other faults on VMXOFF faults are mode related, i.e.
- * faults are guaranteed to be due to the !post-VMXON check unless the CPU is
- * magically in RM, VM86, compat mode, or at CPL>0.
- */
-static inline int cpu_vmxoff(void)
-{
- asm_volatile_goto("1: vmxoff\n\t"
- _ASM_EXTABLE(1b, %l[fault])
- ::: "cc", "memory" : fault);
-
- cr4_clear_bits(X86_CR4_VMXE);
- return 0;
-
-fault:
- cr4_clear_bits(X86_CR4_VMXE);
- return -EIO;
-}
-
-static inline int cpu_vmx_enabled(void)
-{
- return __read_cr4() & X86_CR4_VMXE;
-}
-
-/** Disable VMX if it is enabled on the current CPU
- *
- * You shouldn't call this if cpu_has_vmx() returns 0.
- */
-static inline void __cpu_emergency_vmxoff(void)
-{
- if (cpu_vmx_enabled())
- cpu_vmxoff();
-}
-
-/** Disable VMX if it is supported and enabled on the current CPU
- */
-static inline void cpu_emergency_vmxoff(void)
-{
- if (cpu_has_vmx())
- __cpu_emergency_vmxoff();
-}
-
-
-
-
-/*
- * SVM functions:
- */
-
-/** Check if the CPU has SVM support
- *
- * You can use the 'msg' arg to get a message describing the problem,
- * if the function returns zero. Simply pass NULL if you are not interested
- * on the messages; gcc should take care of not generating code for
- * the messages on this case.
- */
-static inline int cpu_has_svm(const char **msg)
-{
- if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
- boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) {
- if (msg)
- *msg = "not amd or hygon";
- return 0;
- }
-
- if (boot_cpu_data.extended_cpuid_level < SVM_CPUID_FUNC) {
- if (msg)
- *msg = "can't execute cpuid_8000000a";
- return 0;
- }
-
- if (!boot_cpu_has(X86_FEATURE_SVM)) {
- if (msg)
- *msg = "svm not available";
- return 0;
- }
- return 1;
-}
-
-
-/** Disable SVM on the current CPU
- *
- * You should call this only if cpu_has_svm() returned true.
- */
-static inline void cpu_svm_disable(void)
-{
- uint64_t efer;
-
- wrmsrl(MSR_VM_HSAVE_PA, 0);
- rdmsrl(MSR_EFER, efer);
- if (efer & EFER_SVME) {
- /*
- * Force GIF=1 prior to disabling SVM to ensure INIT and NMI
- * aren't blocked, e.g. if a fatal error occurred between CLGI
- * and STGI. Note, STGI may #UD if SVM is disabled from NMI
- * context between reading EFER and executing STGI. In that
- * case, GIF must already be set, otherwise the NMI would have
- * been blocked, so just eat the fault.
- */
- asm_volatile_goto("1: stgi\n\t"
- _ASM_EXTABLE(1b, %l[fault])
- ::: "memory" : fault);
-fault:
- wrmsrl(MSR_EFER, efer & ~EFER_SVME);
- }
-}
-
-/** Makes sure SVM is disabled, if it is supported on the CPU
- */
-static inline void cpu_emergency_svm_disable(void)
-{
- if (cpu_has_svm(NULL))
- cpu_svm_disable();
-}
-
-#endif /* _ASM_X86_VIRTEX_H */
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 0d02c4aafa6f..0e73616b82f3 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -71,7 +71,7 @@
#define SECONDARY_EXEC_RDSEED_EXITING VMCS_CONTROL_BIT(RDSEED_EXITING)
#define SECONDARY_EXEC_ENABLE_PML VMCS_CONTROL_BIT(PAGE_MOD_LOGGING)
#define SECONDARY_EXEC_PT_CONCEAL_VMX VMCS_CONTROL_BIT(PT_CONCEAL_VMX)
-#define SECONDARY_EXEC_XSAVES VMCS_CONTROL_BIT(XSAVES)
+#define SECONDARY_EXEC_ENABLE_XSAVES VMCS_CONTROL_BIT(XSAVES)
#define SECONDARY_EXEC_MODE_BASED_EPT_EXEC VMCS_CONTROL_BIT(MODE_BASED_EPT_EXEC)
#define SECONDARY_EXEC_PT_USE_GPA VMCS_CONTROL_BIT(PT_USE_GPA)
#define SECONDARY_EXEC_TSC_SCALING VMCS_CONTROL_BIT(TSC_SCALING)
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index c6c15ce1952f..5934ee5bc087 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -239,12 +239,6 @@ extern int (*console_blank_hook)(int);
#endif
/*
- * The apm_bios device is one of the misc char devices.
- * This is its minor number.
- */
-#define APM_MINOR_DEV 134
-
-/*
* Various options can be changed at boot time as follows:
* (We allow underscores for compatibility with the modules code)
* apm=on/off enable/disable APM
diff --git a/arch/x86/kernel/audit_64.c b/arch/x86/kernel/audit_64.c
index 44c3601cfdc4..190c120f4285 100644
--- a/arch/x86/kernel/audit_64.c
+++ b/arch/x86/kernel/audit_64.c
@@ -63,11 +63,6 @@ int audit_classify_syscall(int abi, unsigned syscall)
static int __init audit_classes_init(void)
{
#ifdef CONFIG_IA32_EMULATION
- extern __u32 ia32_dir_class[];
- extern __u32 ia32_write_class[];
- extern __u32 ia32_read_class[];
- extern __u32 ia32_chattr_class[];
- extern __u32 ia32_signal_class[];
audit_register_class(AUDIT_CLASS_WRITE_32, ia32_write_class);
audit_register_class(AUDIT_CLASS_READ_32, ia32_read_class);
audit_register_class(AUDIT_CLASS_DIR_WRITE_32, ia32_dir_class);
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 6d75fab10161..382d4e6b848d 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1280,11 +1280,11 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
VULNBL_INTEL_STEPPINGS(BROADWELL_G, X86_STEPPING_ANY, SRBDS),
VULNBL_INTEL_STEPPINGS(BROADWELL_X, X86_STEPPING_ANY, MMIO),
VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS),
- VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED),
VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED | GDS),
- VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED),
- VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED | GDS),
- VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED | GDS),
+ VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
+ VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
+ VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
+ VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
VULNBL_INTEL_STEPPINGS(CANNONLAKE_L, X86_STEPPING_ANY, RETBLEED),
VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS),
VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPING_ANY, MMIO | GDS),
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 0100468e72ca..e6bba12c759c 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -32,6 +32,7 @@
#include <asm/nmi.h>
#include <clocksource/hyperv_timer.h>
#include <asm/numa.h>
+#include <asm/svm.h>
/* Is Linux running as the root partition? */
bool hv_root_partition;
@@ -39,6 +40,10 @@ bool hv_root_partition;
bool hv_nested;
struct ms_hyperv_info ms_hyperv;
+/* Used in modules via hv_do_hypercall(): see arch/x86/include/asm/mshyperv.h */
+bool hyperv_paravisor_present __ro_after_init;
+EXPORT_SYMBOL_GPL(hyperv_paravisor_present);
+
#if IS_ENABLED(CONFIG_HYPERV)
static inline unsigned int hv_get_nested_reg(unsigned int reg)
{
@@ -65,8 +70,8 @@ u64 hv_get_non_nested_register(unsigned int reg)
{
u64 value;
- if (hv_is_synic_reg(reg) && hv_isolation_type_snp())
- hv_ghcb_msr_read(reg, &value);
+ if (hv_is_synic_reg(reg) && ms_hyperv.paravisor_present)
+ hv_ivm_msr_read(reg, &value);
else
rdmsrl(reg, value);
return value;
@@ -75,8 +80,8 @@ EXPORT_SYMBOL_GPL(hv_get_non_nested_register);
void hv_set_non_nested_register(unsigned int reg, u64 value)
{
- if (hv_is_synic_reg(reg) && hv_isolation_type_snp()) {
- hv_ghcb_msr_write(reg, value);
+ if (hv_is_synic_reg(reg) && ms_hyperv.paravisor_present) {
+ hv_ivm_msr_write(reg, value);
/* Write proxy bit via wrmsl instruction */
if (hv_is_sint_reg(reg))
@@ -295,6 +300,15 @@ static void __init hv_smp_prepare_cpus(unsigned int max_cpus)
native_smp_prepare_cpus(max_cpus);
+ /*
+ * Override wakeup_secondary_cpu_64 callback for SEV-SNP
+ * enlightened guest.
+ */
+ if (!ms_hyperv.paravisor_present && hv_isolation_type_snp()) {
+ apic->wakeup_secondary_cpu_64 = hv_snp_boot_ap;
+ return;
+ }
+
#ifdef CONFIG_X86_64
for_each_present_cpu(i) {
if (i == 0)
@@ -313,6 +327,26 @@ static void __init hv_smp_prepare_cpus(unsigned int max_cpus)
}
#endif
+/*
+ * When a fully enlightened TDX VM runs on Hyper-V, the firmware sets the
+ * HW_REDUCED flag: refer to acpi_tb_create_local_fadt(). Consequently ttyS0
+ * interrupts can't work because request_irq() -> ... -> irq_to_desc() returns
+ * NULL for ttyS0. This happens because mp_config_acpi_legacy_irqs() sees a
+ * nr_legacy_irqs() of 0, so it doesn't initialize the array 'mp_irqs[]', and
+ * later setup_IO_APIC_irqs() -> find_irq_entry() fails to find the legacy irqs
+ * from the array and hence doesn't create the necessary irq description info.
+ *
+ * Clone arch/x86/kernel/acpi/boot.c: acpi_generic_reduced_hw_init() here,
+ * except don't change 'legacy_pic', which keeps its default value
+ * 'default_legacy_pic'. This way, mp_config_acpi_legacy_irqs() sees a non-zero
+ * nr_legacy_irqs() and eventually serial console interrupts works properly.
+ */
+static void __init reduced_hw_init(void)
+{
+ x86_init.timers.timer_init = x86_init_noop;
+ x86_init.irqs.pre_vector_init = x86_init_noop;
+}
+
static void __init ms_hyperv_init_platform(void)
{
int hv_max_functions_eax;
@@ -399,11 +433,33 @@ static void __init ms_hyperv_init_platform(void)
ms_hyperv.shared_gpa_boundary =
BIT_ULL(ms_hyperv.shared_gpa_boundary_bits);
+ hyperv_paravisor_present = !!ms_hyperv.paravisor_present;
+
pr_info("Hyper-V: Isolation Config: Group A 0x%x, Group B 0x%x\n",
ms_hyperv.isolation_config_a, ms_hyperv.isolation_config_b);
- if (hv_get_isolation_type() == HV_ISOLATION_TYPE_SNP)
+
+ if (hv_get_isolation_type() == HV_ISOLATION_TYPE_SNP) {
static_branch_enable(&isolation_type_snp);
+ } else if (hv_get_isolation_type() == HV_ISOLATION_TYPE_TDX) {
+ static_branch_enable(&isolation_type_tdx);
+
+ /* A TDX VM must use x2APIC and doesn't use lazy EOI. */
+ ms_hyperv.hints &= ~HV_X64_APIC_ACCESS_RECOMMENDED;
+
+ if (!ms_hyperv.paravisor_present) {
+ /* To be supported: more work is required. */
+ ms_hyperv.features &= ~HV_MSR_REFERENCE_TSC_AVAILABLE;
+
+ /* HV_REGISTER_CRASH_CTL is unsupported. */
+ ms_hyperv.misc_features &= ~HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
+
+ /* Don't trust Hyper-V's TLB-flushing hypercalls. */
+ ms_hyperv.hints &= ~HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
+
+ x86_init.acpi.reduced_hw_early_init = reduced_hw_init;
+ }
+ }
}
if (hv_max_functions_eax >= HYPERV_CPUID_NESTED_FEATURES) {
@@ -473,7 +529,7 @@ static void __init ms_hyperv_init_platform(void)
#if IS_ENABLED(CONFIG_HYPERV)
if ((hv_get_isolation_type() == HV_ISOLATION_TYPE_VBS) ||
- (hv_get_isolation_type() == HV_ISOLATION_TYPE_SNP))
+ ms_hyperv.paravisor_present)
hv_vtom_init();
/*
* Setup the hook to get control post apic initialization.
@@ -497,7 +553,8 @@ static void __init ms_hyperv_init_platform(void)
# ifdef CONFIG_SMP
smp_ops.smp_prepare_boot_cpu = hv_smp_prepare_boot_cpu;
- if (hv_root_partition)
+ if (hv_root_partition ||
+ (!ms_hyperv.paravisor_present && hv_isolation_type_snp()))
smp_ops.smp_prepare_cpus = hv_smp_prepare_cpus;
# endif
@@ -560,6 +617,22 @@ static bool __init ms_hyperv_msi_ext_dest_id(void)
return eax & HYPERV_VS_PROPERTIES_EAX_EXTENDED_IOAPIC_RTE;
}
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+static void hv_sev_es_hcall_prepare(struct ghcb *ghcb, struct pt_regs *regs)
+{
+ /* RAX and CPL are already in the GHCB */
+ ghcb_set_rcx(ghcb, regs->cx);
+ ghcb_set_rdx(ghcb, regs->dx);
+ ghcb_set_r8(ghcb, regs->r8);
+}
+
+static bool hv_sev_es_hcall_finish(struct ghcb *ghcb, struct pt_regs *regs)
+{
+ /* No checking of the return state needed */
+ return true;
+}
+#endif
+
const __initconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
.name = "Microsoft Hyper-V",
.detect = ms_hyperv_platform,
@@ -567,4 +640,8 @@ const __initconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
.init.x2apic_available = ms_hyperv_x2apic_available,
.init.msi_ext_dest_id = ms_hyperv_msi_ext_dest_id,
.init.init_platform = ms_hyperv_init_platform,
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+ .runtime.sev_es_hcall_prepare = hv_sev_es_hcall_prepare,
+ .runtime.sev_es_hcall_finish = hv_sev_es_hcall_finish,
+#endif
};
diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
index 458cb7419502..8f559eeae08e 100644
--- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
+++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
@@ -45,7 +45,21 @@ static u64 prefetch_disable_bits;
*/
static unsigned int pseudo_lock_major;
static unsigned long pseudo_lock_minor_avail = GENMASK(MINORBITS, 0);
-static struct class *pseudo_lock_class;
+
+static char *pseudo_lock_devnode(const struct device *dev, umode_t *mode)
+{
+ const struct rdtgroup *rdtgrp;
+
+ rdtgrp = dev_get_drvdata(dev);
+ if (mode)
+ *mode = 0600;
+ return kasprintf(GFP_KERNEL, "pseudo_lock/%s", rdtgrp->kn->name);
+}
+
+static const struct class pseudo_lock_class = {
+ .name = "pseudo_lock",
+ .devnode = pseudo_lock_devnode,
+};
/**
* get_prefetch_disable_bits - prefetch disable bits of supported platforms
@@ -1353,7 +1367,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
&pseudo_measure_fops);
}
- dev = device_create(pseudo_lock_class, NULL,
+ dev = device_create(&pseudo_lock_class, NULL,
MKDEV(pseudo_lock_major, new_minor),
rdtgrp, "%s", rdtgrp->kn->name);
@@ -1383,7 +1397,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
goto out;
out_device:
- device_destroy(pseudo_lock_class, MKDEV(pseudo_lock_major, new_minor));
+ device_destroy(&pseudo_lock_class, MKDEV(pseudo_lock_major, new_minor));
out_debugfs:
debugfs_remove_recursive(plr->debugfs_dir);
pseudo_lock_minor_release(new_minor);
@@ -1424,7 +1438,7 @@ void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp)
pseudo_lock_cstates_relax(plr);
debugfs_remove_recursive(rdtgrp->plr->debugfs_dir);
- device_destroy(pseudo_lock_class, MKDEV(pseudo_lock_major, plr->minor));
+ device_destroy(&pseudo_lock_class, MKDEV(pseudo_lock_major, plr->minor));
pseudo_lock_minor_release(plr->minor);
free:
@@ -1560,16 +1574,6 @@ static const struct file_operations pseudo_lock_dev_fops = {
.mmap = pseudo_lock_dev_mmap,
};
-static char *pseudo_lock_devnode(const struct device *dev, umode_t *mode)
-{
- const struct rdtgroup *rdtgrp;
-
- rdtgrp = dev_get_drvdata(dev);
- if (mode)
- *mode = 0600;
- return kasprintf(GFP_KERNEL, "pseudo_lock/%s", rdtgrp->kn->name);
-}
-
int rdt_pseudo_lock_init(void)
{
int ret;
@@ -1580,21 +1584,18 @@ int rdt_pseudo_lock_init(void)
pseudo_lock_major = ret;
- pseudo_lock_class = class_create("pseudo_lock");
- if (IS_ERR(pseudo_lock_class)) {
- ret = PTR_ERR(pseudo_lock_class);
+ ret = class_register(&pseudo_lock_class);
+ if (ret) {
unregister_chrdev(pseudo_lock_major, "pseudo_lock");
return ret;
}
- pseudo_lock_class->devnode = pseudo_lock_devnode;
return 0;
}
void rdt_pseudo_lock_release(void)
{
- class_destroy(pseudo_lock_class);
- pseudo_lock_class = NULL;
+ class_unregister(&pseudo_lock_class);
unregister_chrdev(pseudo_lock_major, "pseudo_lock");
pseudo_lock_major = 0;
}
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index bdc0d5539b57..dae436253de4 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -40,7 +40,6 @@
#include <asm/processor.h>
#include <asm/msr.h>
-static struct class *cpuid_class;
static enum cpuhp_state cpuhp_cpuid_state;
struct cpuid_regs_done {
@@ -124,26 +123,31 @@ static const struct file_operations cpuid_fops = {
.open = cpuid_open,
};
+static char *cpuid_devnode(const struct device *dev, umode_t *mode)
+{
+ return kasprintf(GFP_KERNEL, "cpu/%u/cpuid", MINOR(dev->devt));
+}
+
+static const struct class cpuid_class = {
+ .name = "cpuid",
+ .devnode = cpuid_devnode,
+};
+
static int cpuid_device_create(unsigned int cpu)
{
struct device *dev;
- dev = device_create(cpuid_class, NULL, MKDEV(CPUID_MAJOR, cpu), NULL,
+ dev = device_create(&cpuid_class, NULL, MKDEV(CPUID_MAJOR, cpu), NULL,
"cpu%d", cpu);
return PTR_ERR_OR_ZERO(dev);
}
static int cpuid_device_destroy(unsigned int cpu)
{
- device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu));
+ device_destroy(&cpuid_class, MKDEV(CPUID_MAJOR, cpu));
return 0;
}
-static char *cpuid_devnode(const struct device *dev, umode_t *mode)
-{
- return kasprintf(GFP_KERNEL, "cpu/%u/cpuid", MINOR(dev->devt));
-}
-
static int __init cpuid_init(void)
{
int err;
@@ -154,12 +158,9 @@ static int __init cpuid_init(void)
CPUID_MAJOR);
return -EBUSY;
}
- cpuid_class = class_create("cpuid");
- if (IS_ERR(cpuid_class)) {
- err = PTR_ERR(cpuid_class);
+ err = class_register(&cpuid_class);
+ if (err)
goto out_chrdev;
- }
- cpuid_class->devnode = cpuid_devnode;
err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/cpuid:online",
cpuid_device_create, cpuid_device_destroy);
@@ -170,7 +171,7 @@ static int __init cpuid_init(void)
return 0;
out_class:
- class_destroy(cpuid_class);
+ class_unregister(&cpuid_class);
out_chrdev:
__unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid");
return err;
@@ -180,7 +181,7 @@ module_init(cpuid_init);
static void __exit cpuid_exit(void)
{
cpuhp_remove_state(cpuhp_cpuid_state);
- class_destroy(cpuid_class);
+ class_unregister(&cpuid_class);
__unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid");
}
module_exit(cpuid_exit);
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index 587c7743fd21..c92d88680dbf 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -48,27 +48,6 @@ struct crash_memmap_data {
unsigned int type;
};
-/*
- * This is used to VMCLEAR all VMCSs loaded on the
- * processor. And when loading kvm_intel module, the
- * callback function pointer will be assigned.
- *
- * protected by rcu.
- */
-crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss = NULL;
-EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss);
-
-static inline void cpu_crash_vmclear_loaded_vmcss(void)
-{
- crash_vmclear_fn *do_vmclear_operation = NULL;
-
- rcu_read_lock();
- do_vmclear_operation = rcu_dereference(crash_vmclear_loaded_vmcss);
- if (do_vmclear_operation)
- do_vmclear_operation();
- rcu_read_unlock();
-}
-
#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
@@ -76,11 +55,6 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
crash_save_cpu(regs, cpu);
/*
- * VMCLEAR VMCSs loaded on all cpus if needed.
- */
- cpu_crash_vmclear_loaded_vmcss();
-
- /*
* Disable Intel PT to stop its logging
*/
cpu_emergency_stop_pt();
@@ -133,11 +107,6 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
crash_smp_send_stop();
- /*
- * VMCLEAR VMCSs loaded on this cpu if needed.
- */
- cpu_crash_vmclear_loaded_vmcss();
-
cpu_emergency_disable_virtualization();
/*
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 41dac93b8ea4..cadf68737e6b 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -71,7 +71,7 @@ static unsigned short xsave_cpuid_features[] __initdata = {
[XFEATURE_ZMM_Hi256] = X86_FEATURE_AVX512F,
[XFEATURE_Hi16_ZMM] = X86_FEATURE_AVX512F,
[XFEATURE_PT_UNIMPLEMENTED_SO_FAR] = X86_FEATURE_INTEL_PT,
- [XFEATURE_PKRU] = X86_FEATURE_PKU,
+ [XFEATURE_PKRU] = X86_FEATURE_OSPKE,
[XFEATURE_PASID] = X86_FEATURE_ENQCMD,
[XFEATURE_CET_USER] = X86_FEATURE_SHSTK,
[XFEATURE_XTILE_CFG] = X86_FEATURE_AMX_TILE,
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
index 4d8aff05a509..30a55207c000 100644
--- a/arch/x86/kernel/i8259.c
+++ b/arch/x86/kernel/i8259.c
@@ -231,9 +231,7 @@ struct irq_chip i8259A_chip = {
};
static char irq_trigger[2];
-/**
- * ELCR registers (0x4d0, 0x4d1) control edge/level of IRQ
- */
+/* ELCR registers (0x4d0, 0x4d1) control edge/level of IRQ */
static void restore_ELCR(char *trigger)
{
outb(trigger[0], PIC_ELCR1);
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index 7bb17d37db01..e17c16c54a37 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -39,7 +39,6 @@
#include <asm/cpufeature.h>
#include <asm/msr.h>
-static struct class *msr_class;
static enum cpuhp_state cpuhp_msr_state;
enum allow_write_msrs {
@@ -235,26 +234,31 @@ static const struct file_operations msr_fops = {
.compat_ioctl = msr_ioctl,
};
+static char *msr_devnode(const struct device *dev, umode_t *mode)
+{
+ return kasprintf(GFP_KERNEL, "cpu/%u/msr", MINOR(dev->devt));
+}
+
+static const struct class msr_class = {
+ .name = "msr",
+ .devnode = msr_devnode,
+};
+
static int msr_device_create(unsigned int cpu)
{
struct device *dev;
- dev = device_create(msr_class, NULL, MKDEV(MSR_MAJOR, cpu), NULL,
+ dev = device_create(&msr_class, NULL, MKDEV(MSR_MAJOR, cpu), NULL,
"msr%d", cpu);
return PTR_ERR_OR_ZERO(dev);
}
static int msr_device_destroy(unsigned int cpu)
{
- device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu));
+ device_destroy(&msr_class, MKDEV(MSR_MAJOR, cpu));
return 0;
}
-static char *msr_devnode(const struct device *dev, umode_t *mode)
-{
- return kasprintf(GFP_KERNEL, "cpu/%u/msr", MINOR(dev->devt));
-}
-
static int __init msr_init(void)
{
int err;
@@ -263,12 +267,9 @@ static int __init msr_init(void)
pr_err("unable to get major %d for msr\n", MSR_MAJOR);
return -EBUSY;
}
- msr_class = class_create("msr");
- if (IS_ERR(msr_class)) {
- err = PTR_ERR(msr_class);
+ err = class_register(&msr_class);
+ if (err)
goto out_chrdev;
- }
- msr_class->devnode = msr_devnode;
err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/msr:online",
msr_device_create, msr_device_destroy);
@@ -278,7 +279,7 @@ static int __init msr_init(void)
return 0;
out_class:
- class_destroy(msr_class);
+ class_unregister(&msr_class);
out_chrdev:
__unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr");
return err;
@@ -288,7 +289,7 @@ module_init(msr_init);
static void __exit msr_exit(void)
{
cpuhp_remove_state(cpuhp_msr_state);
- class_destroy(msr_class);
+ class_unregister(&msr_class);
__unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr");
}
module_exit(msr_exit)
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 3adbe97015c1..830425e6d38e 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -22,7 +22,6 @@
#include <asm/reboot_fixups.h>
#include <asm/reboot.h>
#include <asm/pci_x86.h>
-#include <asm/virtext.h>
#include <asm/cpu.h>
#include <asm/nmi.h>
#include <asm/smp.h>
@@ -530,9 +529,54 @@ static inline void kb_wait(void)
static inline void nmi_shootdown_cpus_on_restart(void);
+#if IS_ENABLED(CONFIG_KVM_INTEL) || IS_ENABLED(CONFIG_KVM_AMD)
+/* RCU-protected callback to disable virtualization prior to reboot. */
+static cpu_emergency_virt_cb __rcu *cpu_emergency_virt_callback;
+
+void cpu_emergency_register_virt_callback(cpu_emergency_virt_cb *callback)
+{
+ if (WARN_ON_ONCE(rcu_access_pointer(cpu_emergency_virt_callback)))
+ return;
+
+ rcu_assign_pointer(cpu_emergency_virt_callback, callback);
+}
+EXPORT_SYMBOL_GPL(cpu_emergency_register_virt_callback);
+
+void cpu_emergency_unregister_virt_callback(cpu_emergency_virt_cb *callback)
+{
+ if (WARN_ON_ONCE(rcu_access_pointer(cpu_emergency_virt_callback) != callback))
+ return;
+
+ rcu_assign_pointer(cpu_emergency_virt_callback, NULL);
+ synchronize_rcu();
+}
+EXPORT_SYMBOL_GPL(cpu_emergency_unregister_virt_callback);
+
+/*
+ * Disable virtualization, i.e. VMX or SVM, to ensure INIT is recognized during
+ * reboot. VMX blocks INIT if the CPU is post-VMXON, and SVM blocks INIT if
+ * GIF=0, i.e. if the crash occurred between CLGI and STGI.
+ */
+void cpu_emergency_disable_virtualization(void)
+{
+ cpu_emergency_virt_cb *callback;
+
+ /*
+ * IRQs must be disabled as KVM enables virtualization in hardware via
+ * function call IPIs, i.e. IRQs need to be disabled to guarantee
+ * virtualization stays disabled.
+ */
+ lockdep_assert_irqs_disabled();
+
+ rcu_read_lock();
+ callback = rcu_dereference(cpu_emergency_virt_callback);
+ if (callback)
+ callback();
+ rcu_read_unlock();
+}
+
static void emergency_reboot_disable_virtualization(void)
{
- /* Just make sure we won't change CPUs while doing this */
local_irq_disable();
/*
@@ -545,7 +589,7 @@ static void emergency_reboot_disable_virtualization(void)
* Do the NMI shootdown even if virtualization is off on _this_ CPU, as
* other CPUs may have virtualization enabled.
*/
- if (cpu_has_vmx() || cpu_has_svm(NULL)) {
+ if (rcu_access_pointer(cpu_emergency_virt_callback)) {
/* Safely force _this_ CPU out of VMX/SVM operation. */
cpu_emergency_disable_virtualization();
@@ -553,7 +597,9 @@ static void emergency_reboot_disable_virtualization(void)
nmi_shootdown_cpus_on_restart();
}
}
-
+#else
+static void emergency_reboot_disable_virtualization(void) { }
+#endif /* CONFIG_KVM_INTEL || CONFIG_KVM_AMD */
void __attribute__((weak)) mach_reboot_fixups(void)
{
@@ -787,21 +833,9 @@ void machine_crash_shutdown(struct pt_regs *regs)
}
#endif
-
/* This is the CPU performing the emergency shutdown work. */
int crashing_cpu = -1;
-/*
- * Disable virtualization, i.e. VMX or SVM, to ensure INIT is recognized during
- * reboot. VMX blocks INIT if the CPU is post-VMXON, and SVM blocks INIT if
- * GIF=0, i.e. if the crash occurred between CLGI and STGI.
- */
-void cpu_emergency_disable_virtualization(void)
-{
- cpu_emergency_vmxoff();
- cpu_emergency_svm_disable();
-}
-
#if defined(CONFIG_SMP)
static nmi_shootdown_cb shootdown_callback;
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 89ca7f4c1464..ed90f148140d 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -101,7 +101,7 @@ config X86_SGX_KVM
config KVM_AMD
tristate "KVM for AMD processors support"
- depends on KVM
+ depends on KVM && (CPU_SUP_AMD || CPU_SUP_HYGON)
help
Provides support for KVM on AMD processors equipped with the AMD-V
(SVM) extensions.
@@ -138,6 +138,19 @@ config KVM_XEN
If in doubt, say "N".
+config KVM_PROVE_MMU
+ bool "Prove KVM MMU correctness"
+ depends on DEBUG_KERNEL
+ depends on KVM
+ depends on EXPERT
+ help
+ Enables runtime assertions in KVM's MMU that are too costly to enable
+ in anything remotely resembling a production environment, e.g. this
+ gates code that verifies a to-be-freed page table doesn't have any
+ present SPTEs.
+
+ If in doubt, say "N".
+
config KVM_EXTERNAL_WRITE_TRACKING
bool
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index d3432687c9e6..0544e30b4946 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -11,6 +11,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kvm_host.h>
+#include "linux/lockdep.h"
#include <linux/export.h>
#include <linux/vmalloc.h>
#include <linux/uaccess.h>
@@ -84,6 +85,18 @@ static inline struct kvm_cpuid_entry2 *cpuid_entry2_find(
struct kvm_cpuid_entry2 *e;
int i;
+ /*
+ * KVM has a semi-arbitrary rule that querying the guest's CPUID model
+ * with IRQs disabled is disallowed. The CPUID model can legitimately
+ * have over one hundred entries, i.e. the lookup is slow, and IRQs are
+ * typically disabled in KVM only when KVM is in a performance critical
+ * path, e.g. the core VM-Enter/VM-Exit run loop. Nothing will break
+ * if this rule is violated, this assertion is purely to flag potential
+ * performance issues. If this fires, consider moving the lookup out
+ * of the hotpath, e.g. by caching information during CPUID updates.
+ */
+ lockdep_assert_irqs_enabled();
+
for (i = 0; i < nent; i++) {
e = &entries[i];
@@ -312,6 +325,27 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
struct kvm_cpuid_entry2 *best;
+ bool allow_gbpages;
+
+ BUILD_BUG_ON(KVM_NR_GOVERNED_FEATURES > KVM_MAX_NR_GOVERNED_FEATURES);
+ bitmap_zero(vcpu->arch.governed_features.enabled,
+ KVM_MAX_NR_GOVERNED_FEATURES);
+
+ /*
+ * If TDP is enabled, let the guest use GBPAGES if they're supported in
+ * hardware. The hardware page walker doesn't let KVM disable GBPAGES,
+ * i.e. won't treat them as reserved, and KVM doesn't redo the GVA->GPA
+ * walk for performance and complexity reasons. Not to mention KVM
+ * _can't_ solve the problem because GVA->GPA walks aren't visible to
+ * KVM once a TDP translation is installed. Mimic hardware behavior so
+ * that KVM's is at least consistent, i.e. doesn't randomly inject #PF.
+ * If TDP is disabled, honor *only* guest CPUID as KVM has full control
+ * and can install smaller shadow pages if the host lacks 1GiB support.
+ */
+ allow_gbpages = tdp_enabled ? boot_cpu_has(X86_FEATURE_GBPAGES) :
+ guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES);
+ if (allow_gbpages)
+ kvm_governed_feature_set(vcpu, X86_FEATURE_GBPAGES);
best = kvm_find_cpuid_entry(vcpu, 1);
if (best && apic) {
@@ -647,7 +681,8 @@ void kvm_set_cpu_caps(void)
);
kvm_cpu_cap_init_kvm_defined(CPUID_7_1_EDX,
- F(AVX_VNNI_INT8) | F(AVX_NE_CONVERT) | F(PREFETCHITI)
+ F(AVX_VNNI_INT8) | F(AVX_NE_CONVERT) | F(PREFETCHITI) |
+ F(AMX_COMPLEX)
);
kvm_cpu_cap_mask(CPUID_D_1_EAX,
@@ -1154,6 +1189,9 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
cpuid_entry_override(entry, CPUID_8000_0001_EDX);
cpuid_entry_override(entry, CPUID_8000_0001_ECX);
break;
+ case 0x80000005:
+ /* Pass host L1 cache and TLB info. */
+ break;
case 0x80000006:
/* Drop reserved bits, pass host L2 cache and TLB info. */
entry->edx &= ~GENMASK(17, 16);
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index b1658c0de847..284fa4704553 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -232,4 +232,50 @@ static __always_inline bool guest_pv_has(struct kvm_vcpu *vcpu,
return vcpu->arch.pv_cpuid.features & (1u << kvm_feature);
}
+enum kvm_governed_features {
+#define KVM_GOVERNED_FEATURE(x) KVM_GOVERNED_##x,
+#include "governed_features.h"
+ KVM_NR_GOVERNED_FEATURES
+};
+
+static __always_inline int kvm_governed_feature_index(unsigned int x86_feature)
+{
+ switch (x86_feature) {
+#define KVM_GOVERNED_FEATURE(x) case x: return KVM_GOVERNED_##x;
+#include "governed_features.h"
+ default:
+ return -1;
+ }
+}
+
+static __always_inline bool kvm_is_governed_feature(unsigned int x86_feature)
+{
+ return kvm_governed_feature_index(x86_feature) >= 0;
+}
+
+static __always_inline void kvm_governed_feature_set(struct kvm_vcpu *vcpu,
+ unsigned int x86_feature)
+{
+ BUILD_BUG_ON(!kvm_is_governed_feature(x86_feature));
+
+ __set_bit(kvm_governed_feature_index(x86_feature),
+ vcpu->arch.governed_features.enabled);
+}
+
+static __always_inline void kvm_governed_feature_check_and_set(struct kvm_vcpu *vcpu,
+ unsigned int x86_feature)
+{
+ if (kvm_cpu_cap_has(x86_feature) && guest_cpuid_has(vcpu, x86_feature))
+ kvm_governed_feature_set(vcpu, x86_feature);
+}
+
+static __always_inline bool guest_can_use(struct kvm_vcpu *vcpu,
+ unsigned int x86_feature)
+{
+ BUILD_BUG_ON(!kvm_is_governed_feature(x86_feature));
+
+ return test_bit(kvm_governed_feature_index(x86_feature),
+ vcpu->arch.governed_features.enabled);
+}
+
#endif
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 936a397a08cd..2673cd5c46cb 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -1799,13 +1799,11 @@ static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
op->addr.mem,
&op->val,
op->bytes);
- break;
case OP_MEM_STR:
return segmented_write(ctxt,
op->addr.mem,
op->data,
op->bytes * op->count);
- break;
case OP_XMM:
kvm_write_sse_reg(op->addr.xmm, &op->vec_val);
break;
diff --git a/arch/x86/kvm/governed_features.h b/arch/x86/kvm/governed_features.h
new file mode 100644
index 000000000000..423a73395c10
--- /dev/null
+++ b/arch/x86/kvm/governed_features.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#if !defined(KVM_GOVERNED_FEATURE) || defined(KVM_GOVERNED_X86_FEATURE)
+BUILD_BUG()
+#endif
+
+#define KVM_GOVERNED_X86_FEATURE(x) KVM_GOVERNED_FEATURE(X86_FEATURE_##x)
+
+KVM_GOVERNED_X86_FEATURE(GBPAGES)
+KVM_GOVERNED_X86_FEATURE(XSAVES)
+KVM_GOVERNED_X86_FEATURE(VMX)
+KVM_GOVERNED_X86_FEATURE(NRIPS)
+KVM_GOVERNED_X86_FEATURE(TSCRATEMSR)
+KVM_GOVERNED_X86_FEATURE(V_VMSAVE_VMLOAD)
+KVM_GOVERNED_X86_FEATURE(LBRV)
+KVM_GOVERNED_X86_FEATURE(PAUSEFILTER)
+KVM_GOVERNED_X86_FEATURE(PFTHRESHOLD)
+KVM_GOVERNED_X86_FEATURE(VGIF)
+KVM_GOVERNED_X86_FEATURE(VNMI)
+
+#undef KVM_GOVERNED_X86_FEATURE
+#undef KVM_GOVERNED_FEATURE
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index b28fd020066f..7c2dac6824e2 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1293,7 +1293,6 @@ static bool hv_check_msr_access(struct kvm_vcpu_hv *hv_vcpu, u32 msr)
case HV_X64_MSR_VP_ASSIST_PAGE:
return hv_vcpu->cpuid_cache.features_eax &
HV_MSR_APIC_ACCESS_AVAILABLE;
- break;
case HV_X64_MSR_TSC_FREQUENCY:
case HV_X64_MSR_APIC_FREQUENCY:
return hv_vcpu->cpuid_cache.features_eax &
diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
index ab65f3a47dfd..be7aeb9b8ea3 100644
--- a/arch/x86/kvm/kvm_emulate.h
+++ b/arch/x86/kvm/kvm_emulate.h
@@ -213,7 +213,6 @@ struct x86_emulate_ops {
bool (*get_cpuid)(struct x86_emulate_ctxt *ctxt, u32 *eax, u32 *ebx,
u32 *ecx, u32 *edx, bool exact_only);
- bool (*guest_has_long_mode)(struct x86_emulate_ctxt *ctxt);
bool (*guest_has_movbe)(struct x86_emulate_ctxt *ctxt);
bool (*guest_has_fxsr)(struct x86_emulate_ctxt *ctxt);
bool (*guest_has_rdpid)(struct x86_emulate_ctxt *ctxt);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index a983a16163b1..dcd60b39e794 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -376,7 +376,8 @@ void kvm_recalculate_apic_map(struct kvm *kvm)
struct kvm_vcpu *vcpu;
unsigned long i;
u32 max_id = 255; /* enough space for any xAPIC ID */
- bool xapic_id_mismatch = false;
+ bool xapic_id_mismatch;
+ int r;
/* Read kvm->arch.apic_map_dirty before kvm->arch.apic_map. */
if (atomic_read_acquire(&kvm->arch.apic_map_dirty) == CLEAN)
@@ -386,9 +387,14 @@ void kvm_recalculate_apic_map(struct kvm *kvm)
"Dirty APIC map without an in-kernel local APIC");
mutex_lock(&kvm->arch.apic_map_lock);
+
+retry:
/*
- * Read kvm->arch.apic_map_dirty before kvm->arch.apic_map
- * (if clean) or the APIC registers (if dirty).
+ * Read kvm->arch.apic_map_dirty before kvm->arch.apic_map (if clean)
+ * or the APIC registers (if dirty). Note, on retry the map may have
+ * not yet been marked dirty by whatever task changed a vCPU's x2APIC
+ * ID, i.e. the map may still show up as in-progress. In that case
+ * this task still needs to retry and complete its calculation.
*/
if (atomic_cmpxchg_acquire(&kvm->arch.apic_map_dirty,
DIRTY, UPDATE_IN_PROGRESS) == CLEAN) {
@@ -397,6 +403,15 @@ void kvm_recalculate_apic_map(struct kvm *kvm)
return;
}
+ /*
+ * Reset the mismatch flag between attempts so that KVM does the right
+ * thing if a vCPU changes its xAPIC ID, but do NOT reset max_id, i.e.
+ * keep max_id strictly increasing. Disallowing max_id from shrinking
+ * ensures KVM won't get stuck in an infinite loop, e.g. if the vCPU
+ * with the highest x2APIC ID is toggling its APIC on and off.
+ */
+ xapic_id_mismatch = false;
+
kvm_for_each_vcpu(i, vcpu, kvm)
if (kvm_apic_present(vcpu))
max_id = max(max_id, kvm_x2apic_id(vcpu->arch.apic));
@@ -415,9 +430,15 @@ void kvm_recalculate_apic_map(struct kvm *kvm)
if (!kvm_apic_present(vcpu))
continue;
- if (kvm_recalculate_phys_map(new, vcpu, &xapic_id_mismatch)) {
+ r = kvm_recalculate_phys_map(new, vcpu, &xapic_id_mismatch);
+ if (r) {
kvfree(new);
new = NULL;
+ if (r == -E2BIG) {
+ cond_resched();
+ goto retry;
+ }
+
goto out;
}
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 92d5a1924fc1..253fb2093d5d 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -121,6 +121,8 @@ void kvm_mmu_unload(struct kvm_vcpu *vcpu);
void kvm_mmu_free_obsolete_roots(struct kvm_vcpu *vcpu);
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu);
+void kvm_mmu_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
+ int bytes);
static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
{
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index ec169f5c7dce..e1d011c67cc6 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -25,6 +25,7 @@
#include "kvm_cache_regs.h"
#include "smm.h"
#include "kvm_emulate.h"
+#include "page_track.h"
#include "cpuid.h"
#include "spte.h"
@@ -53,7 +54,7 @@
#include <asm/io.h>
#include <asm/set_memory.h>
#include <asm/vmx.h>
-#include <asm/kvm_page_track.h>
+
#include "trace.h"
extern bool itlb_multihit_kvm_mitigation;
@@ -115,11 +116,6 @@ static int max_huge_page_level __read_mostly;
static int tdp_root_level __read_mostly;
static int max_tdp_level __read_mostly;
-#ifdef MMU_DEBUG
-bool dbg = 0;
-module_param(dbg, bool, 0644);
-#endif
-
#define PTE_PREFETCH_NUM 8
#include <trace/events/kvm.h>
@@ -278,16 +274,12 @@ static inline bool kvm_available_flush_remote_tlbs_range(void)
return kvm_x86_ops.flush_remote_tlbs_range;
}
-void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t start_gfn,
- gfn_t nr_pages)
+int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages)
{
- int ret = -EOPNOTSUPP;
+ if (!kvm_x86_ops.flush_remote_tlbs_range)
+ return -EOPNOTSUPP;
- if (kvm_x86_ops.flush_remote_tlbs_range)
- ret = static_call(kvm_x86_flush_remote_tlbs_range)(kvm, start_gfn,
- nr_pages);
- if (ret)
- kvm_flush_remote_tlbs(kvm);
+ return static_call(kvm_x86_flush_remote_tlbs_range)(kvm, gfn, nr_pages);
}
static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index);
@@ -490,7 +482,7 @@ retry:
*/
static void mmu_spte_set(u64 *sptep, u64 new_spte)
{
- WARN_ON(is_shadow_present_pte(*sptep));
+ WARN_ON_ONCE(is_shadow_present_pte(*sptep));
__set_spte(sptep, new_spte);
}
@@ -502,7 +494,7 @@ static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte)
{
u64 old_spte = *sptep;
- WARN_ON(!is_shadow_present_pte(new_spte));
+ WARN_ON_ONCE(!is_shadow_present_pte(new_spte));
check_spte_writable_invariants(new_spte);
if (!is_shadow_present_pte(old_spte)) {
@@ -515,7 +507,7 @@ static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte)
else
old_spte = __update_clear_spte_slow(sptep, new_spte);
- WARN_ON(spte_to_pfn(old_spte) != spte_to_pfn(new_spte));
+ WARN_ON_ONCE(spte_to_pfn(old_spte) != spte_to_pfn(new_spte));
return old_spte;
}
@@ -597,7 +589,7 @@ static u64 mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep)
* by a refcounted page, the refcount is elevated.
*/
page = kvm_pfn_to_refcounted_page(pfn);
- WARN_ON(page && !page_count(page));
+ WARN_ON_ONCE(page && !page_count(page));
if (is_accessed_spte(old_spte))
kvm_set_pfn_accessed(pfn);
@@ -812,7 +804,7 @@ static void update_gfn_disallow_lpage_count(const struct kvm_memory_slot *slot,
for (i = PG_LEVEL_2M; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
linfo = lpage_info_slot(gfn, slot, i);
linfo->disallow_lpage += count;
- WARN_ON(linfo->disallow_lpage < 0);
+ WARN_ON_ONCE(linfo->disallow_lpage < 0);
}
}
@@ -839,8 +831,7 @@ static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
/* the non-leaf shadow pages are keeping readonly. */
if (sp->role.level > PG_LEVEL_4K)
- return kvm_slot_page_track_add_page(kvm, slot, gfn,
- KVM_PAGE_TRACK_WRITE);
+ return __kvm_write_track_add_gfn(kvm, slot, gfn);
kvm_mmu_gfn_disallow_lpage(slot, gfn);
@@ -886,8 +877,7 @@ static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
slots = kvm_memslots_for_spte_role(kvm, sp->role);
slot = __gfn_to_memslot(slots, gfn);
if (sp->role.level > PG_LEVEL_4K)
- return kvm_slot_page_track_remove_page(kvm, slot, gfn,
- KVM_PAGE_TRACK_WRITE);
+ return __kvm_write_track_remove_gfn(kvm, slot, gfn);
kvm_mmu_gfn_allow_lpage(slot, gfn);
}
@@ -941,10 +931,8 @@ static int pte_list_add(struct kvm_mmu_memory_cache *cache, u64 *spte,
int count = 0;
if (!rmap_head->val) {
- rmap_printk("%p %llx 0->1\n", spte, *spte);
rmap_head->val = (unsigned long)spte;
} else if (!(rmap_head->val & 1)) {
- rmap_printk("%p %llx 1->many\n", spte, *spte);
desc = kvm_mmu_memory_cache_alloc(cache);
desc->sptes[0] = (u64 *)rmap_head->val;
desc->sptes[1] = spte;
@@ -953,7 +941,6 @@ static int pte_list_add(struct kvm_mmu_memory_cache *cache, u64 *spte,
rmap_head->val = (unsigned long)desc | 1;
++count;
} else {
- rmap_printk("%p %llx many->many\n", spte, *spte);
desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
count = desc->tail_count + desc->spte_count;
@@ -973,7 +960,8 @@ static int pte_list_add(struct kvm_mmu_memory_cache *cache, u64 *spte,
return count;
}
-static void pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head,
+static void pte_list_desc_remove_entry(struct kvm *kvm,
+ struct kvm_rmap_head *rmap_head,
struct pte_list_desc *desc, int i)
{
struct pte_list_desc *head_desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
@@ -984,7 +972,7 @@ static void pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head,
* when adding an entry and the previous head is full, and heads are
* removed (this flow) when they become empty.
*/
- BUG_ON(j < 0);
+ KVM_BUG_ON_DATA_CORRUPTION(j < 0, kvm);
/*
* Replace the to-be-freed SPTE with the last valid entry from the head
@@ -1009,35 +997,34 @@ static void pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head,
mmu_free_pte_list_desc(head_desc);
}
-static void pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
+static void pte_list_remove(struct kvm *kvm, u64 *spte,
+ struct kvm_rmap_head *rmap_head)
{
struct pte_list_desc *desc;
int i;
- if (!rmap_head->val) {
- pr_err("%s: %p 0->BUG\n", __func__, spte);
- BUG();
- } else if (!(rmap_head->val & 1)) {
- rmap_printk("%p 1->0\n", spte);
- if ((u64 *)rmap_head->val != spte) {
- pr_err("%s: %p 1->BUG\n", __func__, spte);
- BUG();
- }
+ if (KVM_BUG_ON_DATA_CORRUPTION(!rmap_head->val, kvm))
+ return;
+
+ if (!(rmap_head->val & 1)) {
+ if (KVM_BUG_ON_DATA_CORRUPTION((u64 *)rmap_head->val != spte, kvm))
+ return;
+
rmap_head->val = 0;
} else {
- rmap_printk("%p many->many\n", spte);
desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
while (desc) {
for (i = 0; i < desc->spte_count; ++i) {
if (desc->sptes[i] == spte) {
- pte_list_desc_remove_entry(rmap_head, desc, i);
+ pte_list_desc_remove_entry(kvm, rmap_head,
+ desc, i);
return;
}
}
desc = desc->more;
}
- pr_err("%s: %p many->many\n", __func__, spte);
- BUG();
+
+ KVM_BUG_ON_DATA_CORRUPTION(true, kvm);
}
}
@@ -1045,7 +1032,7 @@ static void kvm_zap_one_rmap_spte(struct kvm *kvm,
struct kvm_rmap_head *rmap_head, u64 *sptep)
{
mmu_spte_clear_track_bits(kvm, sptep);
- pte_list_remove(sptep, rmap_head);
+ pte_list_remove(kvm, sptep, rmap_head);
}
/* Return true if at least one SPTE was zapped, false otherwise */
@@ -1120,7 +1107,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
slot = __gfn_to_memslot(slots, gfn);
rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
- pte_list_remove(spte, rmap_head);
+ pte_list_remove(kvm, spte, rmap_head);
}
/*
@@ -1212,7 +1199,7 @@ static void drop_large_spte(struct kvm *kvm, u64 *sptep, bool flush)
struct kvm_mmu_page *sp;
sp = sptep_to_sp(sptep);
- WARN_ON(sp->role.level == PG_LEVEL_4K);
+ WARN_ON_ONCE(sp->role.level == PG_LEVEL_4K);
drop_spte(kvm, sptep);
@@ -1241,8 +1228,6 @@ static bool spte_write_protect(u64 *sptep, bool pt_protect)
!(pt_protect && is_mmu_writable_spte(spte)))
return false;
- rmap_printk("spte %p %llx\n", sptep, *sptep);
-
if (pt_protect)
spte &= ~shadow_mmu_writable_mask;
spte = spte & ~PT_WRITABLE_MASK;
@@ -1267,9 +1252,7 @@ static bool spte_clear_dirty(u64 *sptep)
{
u64 spte = *sptep;
- rmap_printk("spte %p %llx\n", sptep, *sptep);
-
- MMU_WARN_ON(!spte_ad_enabled(spte));
+ KVM_MMU_WARN_ON(!spte_ad_enabled(spte));
spte &= ~shadow_dirty_mask;
return mmu_spte_update(sptep, spte);
}
@@ -1475,14 +1458,11 @@ static bool kvm_set_pte_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
u64 new_spte;
kvm_pfn_t new_pfn;
- WARN_ON(pte_huge(pte));
+ WARN_ON_ONCE(pte_huge(pte));
new_pfn = pte_pfn(pte);
restart:
for_each_rmap_spte(rmap_head, &iter, sptep) {
- rmap_printk("spte %p %llx gfn %llx (%d)\n",
- sptep, *sptep, gfn, level);
-
need_flush = true;
if (pte_write(pte)) {
@@ -1588,7 +1568,7 @@ static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm,
for_each_slot_rmap_range(range->slot, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
range->start, range->end - 1, &iterator)
ret |= handler(kvm, iterator.rmap, range->slot, iterator.gfn,
- iterator.level, range->pte);
+ iterator.level, range->arg.pte);
return ret;
}
@@ -1710,21 +1690,19 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
return young;
}
-#ifdef MMU_DEBUG
-static int is_empty_shadow_page(u64 *spt)
+static void kvm_mmu_check_sptes_at_free(struct kvm_mmu_page *sp)
{
- u64 *pos;
- u64 *end;
+#ifdef CONFIG_KVM_PROVE_MMU
+ int i;
- for (pos = spt, end = pos + SPTE_ENT_PER_PAGE; pos != end; pos++)
- if (is_shadow_present_pte(*pos)) {
- printk(KERN_ERR "%s: %p %llx\n", __func__,
- pos, *pos);
- return 0;
- }
- return 1;
-}
+ for (i = 0; i < SPTE_ENT_PER_PAGE; i++) {
+ if (KVM_MMU_WARN_ON(is_shadow_present_pte(sp->spt[i])))
+ pr_err_ratelimited("SPTE %llx (@ %p) for gfn %llx shadow-present at free",
+ sp->spt[i], &sp->spt[i],
+ kvm_mmu_page_get_gfn(sp, i));
+ }
#endif
+}
/*
* This value is the sum of all of the kvm instances's
@@ -1752,7 +1730,8 @@ static void kvm_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
static void kvm_mmu_free_shadow_page(struct kvm_mmu_page *sp)
{
- MMU_WARN_ON(!is_empty_shadow_page(sp->spt));
+ kvm_mmu_check_sptes_at_free(sp);
+
hlist_del(&sp->hash_link);
list_del(&sp->link);
free_page((unsigned long)sp->spt);
@@ -1775,16 +1754,16 @@ static void mmu_page_add_parent_pte(struct kvm_mmu_memory_cache *cache,
pte_list_add(cache, parent_pte, &sp->parent_ptes);
}
-static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
+static void mmu_page_remove_parent_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
u64 *parent_pte)
{
- pte_list_remove(parent_pte, &sp->parent_ptes);
+ pte_list_remove(kvm, parent_pte, &sp->parent_ptes);
}
-static void drop_parent_pte(struct kvm_mmu_page *sp,
+static void drop_parent_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
u64 *parent_pte)
{
- mmu_page_remove_parent_pte(sp, parent_pte);
+ mmu_page_remove_parent_pte(kvm, sp, parent_pte);
mmu_spte_clear_no_track(parent_pte);
}
@@ -1840,7 +1819,7 @@ static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx)
{
--sp->unsync_children;
- WARN_ON((int)sp->unsync_children < 0);
+ WARN_ON_ONCE((int)sp->unsync_children < 0);
__clear_bit(idx, sp->unsync_child_bitmap);
}
@@ -1898,7 +1877,7 @@ static int mmu_unsync_walk(struct kvm_mmu_page *sp,
static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{
- WARN_ON(!sp->unsync);
+ WARN_ON_ONCE(!sp->unsync);
trace_kvm_mmu_sync_page(sp);
sp->unsync = 0;
--kvm->stat.mmu_unsync;
@@ -2073,11 +2052,11 @@ static int mmu_pages_first(struct kvm_mmu_pages *pvec,
if (pvec->nr == 0)
return 0;
- WARN_ON(pvec->page[0].idx != INVALID_INDEX);
+ WARN_ON_ONCE(pvec->page[0].idx != INVALID_INDEX);
sp = pvec->page[0].sp;
level = sp->role.level;
- WARN_ON(level == PG_LEVEL_4K);
+ WARN_ON_ONCE(level == PG_LEVEL_4K);
parents->parent[level-2] = sp;
@@ -2099,7 +2078,7 @@ static void mmu_pages_clear_parents(struct mmu_page_path *parents)
if (!sp)
return;
- WARN_ON(idx == INVALID_INDEX);
+ WARN_ON_ONCE(idx == INVALID_INDEX);
clear_unsync_child_bit(sp, idx);
level++;
} while (!sp->unsync_children);
@@ -2220,7 +2199,7 @@ static struct kvm_mmu_page *kvm_mmu_find_shadow_page(struct kvm *kvm,
if (ret < 0)
break;
- WARN_ON(!list_empty(&invalid_list));
+ WARN_ON_ONCE(!list_empty(&invalid_list));
if (ret > 0)
kvm_flush_remote_tlbs(kvm);
}
@@ -2499,7 +2478,7 @@ static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
if (child->role.access == direct_access)
return;
- drop_parent_pte(child, sptep);
+ drop_parent_pte(vcpu->kvm, child, sptep);
kvm_flush_remote_tlbs_sptep(vcpu->kvm, sptep);
}
}
@@ -2517,7 +2496,7 @@ static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
drop_spte(kvm, spte);
} else {
child = spte_to_child_sp(pte);
- drop_parent_pte(child, spte);
+ drop_parent_pte(kvm, child, spte);
/*
* Recursively zap nested TDP SPs, parentless SPs are
@@ -2548,13 +2527,13 @@ static int kvm_mmu_page_unlink_children(struct kvm *kvm,
return zapped;
}
-static void kvm_mmu_unlink_parents(struct kvm_mmu_page *sp)
+static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
{
u64 *sptep;
struct rmap_iterator iter;
while ((sptep = rmap_get_first(&sp->parent_ptes, &iter)))
- drop_parent_pte(sp, sptep);
+ drop_parent_pte(kvm, sp, sptep);
}
static int mmu_zap_unsync_children(struct kvm *kvm,
@@ -2593,7 +2572,7 @@ static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
++kvm->stat.mmu_shadow_zapped;
*nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list);
*nr_zapped += kvm_mmu_page_unlink_children(kvm, sp, invalid_list);
- kvm_mmu_unlink_parents(sp);
+ kvm_mmu_unlink_parents(kvm, sp);
/* Zapping children means active_mmu_pages has become unstable. */
list_unstable = *nr_zapped;
@@ -2675,7 +2654,7 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
kvm_flush_remote_tlbs(kvm);
list_for_each_entry_safe(sp, nsp, invalid_list, link) {
- WARN_ON(!sp->role.invalid || sp->root_count);
+ WARN_ON_ONCE(!sp->role.invalid || sp->root_count);
kvm_mmu_free_shadow_page(sp);
}
}
@@ -2775,12 +2754,9 @@ int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
LIST_HEAD(invalid_list);
int r;
- pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
r = 0;
write_lock(&kvm->mmu_lock);
for_each_gfn_valid_sp_with_gptes(kvm, sp, gfn) {
- pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
- sp->role.word);
r = 1;
kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
}
@@ -2831,7 +2807,7 @@ int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot,
* track machinery is used to write-protect upper-level shadow pages,
* i.e. this guards the role.level == 4K assertion below!
*/
- if (kvm_slot_page_track_is_active(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE))
+ if (kvm_gfn_is_write_tracked(kvm, slot, gfn))
return -EPERM;
/*
@@ -2873,7 +2849,7 @@ int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot,
continue;
}
- WARN_ON(sp->role.level != PG_LEVEL_4K);
+ WARN_ON_ONCE(sp->role.level != PG_LEVEL_4K);
kvm_unsync_page(kvm, sp);
}
if (locked)
@@ -2938,9 +2914,6 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
bool prefetch = !fault || fault->prefetch;
bool write_fault = fault && fault->write;
- pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
- *sptep, write_fault, gfn);
-
if (unlikely(is_noslot_pfn(pfn))) {
vcpu->stat.pf_mmio_spte_created++;
mark_mmio_spte(vcpu, sptep, gfn, pte_access);
@@ -2957,11 +2930,9 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
u64 pte = *sptep;
child = spte_to_child_sp(pte);
- drop_parent_pte(child, sptep);
+ drop_parent_pte(vcpu->kvm, child, sptep);
flush = true;
} else if (pfn != spte_to_pfn(*sptep)) {
- pgprintk("hfn old %llx new %llx\n",
- spte_to_pfn(*sptep), pfn);
drop_spte(vcpu->kvm, sptep);
flush = true;
} else
@@ -2986,8 +2957,6 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
if (flush)
kvm_flush_remote_tlbs_gfn(vcpu->kvm, gfn, level);
- pgprintk("%s: setting spte %llx\n", __func__, *sptep);
-
if (!was_rmapped) {
WARN_ON_ONCE(ret == RET_PF_SPURIOUS);
rmap_add(vcpu, slot, sptep, gfn, pte_access);
@@ -3033,7 +3002,7 @@ static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
u64 *spte, *start = NULL;
int i;
- WARN_ON(!sp->role.direct);
+ WARN_ON_ONCE(!sp->role.direct);
i = spte_index(sptep) & ~(PTE_PREFETCH_NUM - 1);
spte = sp->spt + i;
@@ -3574,12 +3543,8 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
if (!VALID_PAGE(*root_hpa))
return;
- /*
- * The "root" may be a special root, e.g. a PAE entry, treat it as a
- * SPTE to ensure any non-PA bits are dropped.
- */
- sp = spte_to_child_sp(*root_hpa);
- if (WARN_ON(!sp))
+ sp = root_to_sp(*root_hpa);
+ if (WARN_ON_ONCE(!sp))
return;
if (is_tdp_mmu_page(sp))
@@ -3624,7 +3589,9 @@ void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu,
&invalid_list);
if (free_active_root) {
- if (to_shadow_page(mmu->root.hpa)) {
+ if (kvm_mmu_is_dummy_root(mmu->root.hpa)) {
+ /* Nothing to cleanup for dummy roots. */
+ } else if (root_to_sp(mmu->root.hpa)) {
mmu_free_root_page(kvm, &mmu->root.hpa, &invalid_list);
} else if (mmu->pae_root) {
for (i = 0; i < 4; ++i) {
@@ -3648,6 +3615,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_free_roots);
void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu)
{
unsigned long roots_to_free = 0;
+ struct kvm_mmu_page *sp;
hpa_t root_hpa;
int i;
@@ -3662,8 +3630,8 @@ void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu)
if (!VALID_PAGE(root_hpa))
continue;
- if (!to_shadow_page(root_hpa) ||
- to_shadow_page(root_hpa)->role.guest_mode)
+ sp = root_to_sp(root_hpa);
+ if (!sp || sp->role.guest_mode)
roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
}
@@ -3671,19 +3639,6 @@ void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu)
}
EXPORT_SYMBOL_GPL(kvm_mmu_free_guest_mode_roots);
-
-static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
-{
- int ret = 0;
-
- if (!kvm_vcpu_is_visible_gfn(vcpu, root_gfn)) {
- kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
- ret = 1;
- }
-
- return ret;
-}
-
static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, int quadrant,
u8 level)
{
@@ -3821,8 +3776,10 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
root_pgd = kvm_mmu_get_guest_pgd(vcpu, mmu);
root_gfn = root_pgd >> PAGE_SHIFT;
- if (mmu_check_root(vcpu, root_gfn))
- return 1;
+ if (!kvm_vcpu_is_visible_gfn(vcpu, root_gfn)) {
+ mmu->root.hpa = kvm_mmu_get_dummy_root();
+ return 0;
+ }
/*
* On SVM, reading PDPTRs might access guest memory, which might fault
@@ -3834,8 +3791,8 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
if (!(pdptrs[i] & PT_PRESENT_MASK))
continue;
- if (mmu_check_root(vcpu, pdptrs[i] >> PAGE_SHIFT))
- return 1;
+ if (!kvm_vcpu_is_visible_gfn(vcpu, pdptrs[i] >> PAGE_SHIFT))
+ pdptrs[i] = 0;
}
}
@@ -4002,7 +3959,7 @@ static bool is_unsync_root(hpa_t root)
{
struct kvm_mmu_page *sp;
- if (!VALID_PAGE(root))
+ if (!VALID_PAGE(root) || kvm_mmu_is_dummy_root(root))
return false;
/*
@@ -4018,7 +3975,7 @@ static bool is_unsync_root(hpa_t root)
* requirement isn't satisfied.
*/
smp_rmb();
- sp = to_shadow_page(root);
+ sp = root_to_sp(root);
/*
* PAE roots (somewhat arbitrarily) aren't backed by shadow pages, the
@@ -4048,11 +4005,12 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
if (vcpu->arch.mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL) {
hpa_t root = vcpu->arch.mmu->root.hpa;
- sp = to_shadow_page(root);
if (!is_unsync_root(root))
return;
+ sp = root_to_sp(root);
+
write_lock(&vcpu->kvm->mmu_lock);
mmu_sync_children(vcpu, sp, true);
write_unlock(&vcpu->kvm->mmu_lock);
@@ -4194,7 +4152,7 @@ static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
return RET_PF_EMULATE;
reserved = get_mmio_spte(vcpu, addr, &spte);
- if (WARN_ON(reserved))
+ if (WARN_ON_ONCE(reserved))
return -EINVAL;
if (is_mmio_spte(spte)) {
@@ -4232,7 +4190,7 @@ static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
* guest is writing the page which is write tracked which can
* not be fixed by page fault handler.
*/
- if (kvm_slot_page_track_is_active(vcpu->kvm, fault->slot, fault->gfn, KVM_PAGE_TRACK_WRITE))
+ if (kvm_gfn_is_write_tracked(vcpu->kvm, fault->slot, fault->gfn))
return true;
return false;
@@ -4382,7 +4340,7 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
struct kvm_page_fault *fault)
{
- struct kvm_mmu_page *sp = to_shadow_page(vcpu->arch.mmu->root.hpa);
+ struct kvm_mmu_page *sp = root_to_sp(vcpu->arch.mmu->root.hpa);
/* Special roots, e.g. pae_root, are not backed by shadow pages. */
if (sp && is_obsolete_sp(vcpu->kvm, sp))
@@ -4407,6 +4365,10 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
{
int r;
+ /* Dummy roots are used only for shadowing bad guest roots. */
+ if (WARN_ON_ONCE(kvm_mmu_is_dummy_root(vcpu->arch.mmu->root.hpa)))
+ return RET_PF_RETRY;
+
if (page_fault_handle_page_track(vcpu, fault))
return RET_PF_EMULATE;
@@ -4443,8 +4405,6 @@ out_unlock:
static int nonpaging_page_fault(struct kvm_vcpu *vcpu,
struct kvm_page_fault *fault)
{
- pgprintk("%s: gva %lx error %x\n", __func__, fault->addr, fault->error_code);
-
/* This path builds a PAE pagetable, we can map 2mb pages at maximum. */
fault->max_level = PG_LEVEL_2M;
return direct_page_fault(vcpu, fault);
@@ -4562,9 +4522,19 @@ static void nonpaging_init_context(struct kvm_mmu *context)
static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd,
union kvm_mmu_page_role role)
{
- return (role.direct || pgd == root->pgd) &&
- VALID_PAGE(root->hpa) &&
- role.word == to_shadow_page(root->hpa)->role.word;
+ struct kvm_mmu_page *sp;
+
+ if (!VALID_PAGE(root->hpa))
+ return false;
+
+ if (!role.direct && pgd != root->pgd)
+ return false;
+
+ sp = root_to_sp(root->hpa);
+ if (WARN_ON_ONCE(!sp))
+ return false;
+
+ return role.word == sp->role.word;
}
/*
@@ -4634,11 +4604,10 @@ static bool fast_pgd_switch(struct kvm *kvm, struct kvm_mmu *mmu,
gpa_t new_pgd, union kvm_mmu_page_role new_role)
{
/*
- * For now, limit the caching to 64-bit hosts+VMs in order to avoid
- * having to deal with PDPTEs. We may add support for 32-bit hosts/VMs
- * later if necessary.
+ * Limit reuse to 64-bit hosts+VMs without "special" roots in order to
+ * avoid having to deal with PDPTEs and other complexities.
*/
- if (VALID_PAGE(mmu->root.hpa) && !to_shadow_page(mmu->root.hpa))
+ if (VALID_PAGE(mmu->root.hpa) && !root_to_sp(mmu->root.hpa))
kvm_mmu_free_roots(kvm, mmu, KVM_MMU_ROOT_CURRENT);
if (VALID_PAGE(mmu->root.hpa))
@@ -4684,9 +4653,12 @@ void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
* If this is a direct root page, it doesn't have a write flooding
* count. Otherwise, clear the write flooding count.
*/
- if (!new_role.direct)
- __clear_sp_write_flooding_count(
- to_shadow_page(vcpu->arch.mmu->root.hpa));
+ if (!new_role.direct) {
+ struct kvm_mmu_page *sp = root_to_sp(vcpu->arch.mmu->root.hpa);
+
+ if (!WARN_ON_ONCE(!sp))
+ __clear_sp_write_flooding_count(sp);
+ }
}
EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd);
@@ -4808,28 +4780,13 @@ static void __reset_rsvds_bits_mask(struct rsvd_bits_validate *rsvd_check,
}
}
-static bool guest_can_use_gbpages(struct kvm_vcpu *vcpu)
-{
- /*
- * If TDP is enabled, let the guest use GBPAGES if they're supported in
- * hardware. The hardware page walker doesn't let KVM disable GBPAGES,
- * i.e. won't treat them as reserved, and KVM doesn't redo the GVA->GPA
- * walk for performance and complexity reasons. Not to mention KVM
- * _can't_ solve the problem because GVA->GPA walks aren't visible to
- * KVM once a TDP translation is installed. Mimic hardware behavior so
- * that KVM's is at least consistent, i.e. doesn't randomly inject #PF.
- */
- return tdp_enabled ? boot_cpu_has(X86_FEATURE_GBPAGES) :
- guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES);
-}
-
static void reset_guest_rsvds_bits_mask(struct kvm_vcpu *vcpu,
struct kvm_mmu *context)
{
__reset_rsvds_bits_mask(&context->guest_rsvd_check,
vcpu->arch.reserved_gpa_bits,
context->cpu_role.base.level, is_efer_nx(context),
- guest_can_use_gbpages(vcpu),
+ guest_can_use(vcpu, X86_FEATURE_GBPAGES),
is_cr4_pse(context),
guest_cpuid_is_amd_or_hygon(vcpu));
}
@@ -4906,7 +4863,8 @@ static void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
__reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
context->root_role.level,
context->root_role.efer_nx,
- guest_can_use_gbpages(vcpu), is_pse, is_amd);
+ guest_can_use(vcpu, X86_FEATURE_GBPAGES),
+ is_pse, is_amd);
if (!shadow_me_mask)
return;
@@ -5467,8 +5425,8 @@ void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
* physical address properties) in a single VM would require tracking
* all relevant CPUID information in kvm_mmu_page_role. That is very
* undesirable as it would increase the memory requirements for
- * gfn_track (see struct kvm_mmu_page_role comments). For now that
- * problem is swept under the rug; KVM's CPUID API is horrific and
+ * gfn_write_track (see struct kvm_mmu_page_role comments). For now
+ * that problem is swept under the rug; KVM's CPUID API is horrific and
* it's all but impossible to solve it without introducing a new API.
*/
vcpu->arch.root_mmu.root_role.word = 0;
@@ -5531,9 +5489,9 @@ void kvm_mmu_unload(struct kvm_vcpu *vcpu)
struct kvm *kvm = vcpu->kvm;
kvm_mmu_free_roots(kvm, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL);
- WARN_ON(VALID_PAGE(vcpu->arch.root_mmu.root.hpa));
+ WARN_ON_ONCE(VALID_PAGE(vcpu->arch.root_mmu.root.hpa));
kvm_mmu_free_roots(kvm, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
- WARN_ON(VALID_PAGE(vcpu->arch.guest_mmu.root.hpa));
+ WARN_ON_ONCE(VALID_PAGE(vcpu->arch.guest_mmu.root.hpa));
vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
}
@@ -5546,16 +5504,21 @@ static bool is_obsolete_root(struct kvm *kvm, hpa_t root_hpa)
/*
* When freeing obsolete roots, treat roots as obsolete if they don't
- * have an associated shadow page. This does mean KVM will get false
+ * have an associated shadow page, as it's impossible to determine if
+ * such roots are fresh or stale. This does mean KVM will get false
* positives and free roots that don't strictly need to be freed, but
* such false positives are relatively rare:
*
- * (a) only PAE paging and nested NPT has roots without shadow pages
+ * (a) only PAE paging and nested NPT have roots without shadow pages
+ * (or any shadow paging flavor with a dummy root, see note below)
* (b) remote reloads due to a memslot update obsoletes _all_ roots
* (c) KVM doesn't track previous roots for PAE paging, and the guest
* is unlikely to zap an in-use PGD.
+ *
+ * Note! Dummy roots are unique in that they are obsoleted by memslot
+ * _creation_! See also FNAME(fetch).
*/
- sp = to_shadow_page(root_hpa);
+ sp = root_to_sp(root_hpa);
return !sp || is_obsolete_sp(kvm, sp);
}
@@ -5634,9 +5597,6 @@ static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
{
unsigned offset, pte_size, misaligned;
- pgprintk("misaligned: gpa %llx bytes %d role %x\n",
- gpa, bytes, sp->role.word);
-
offset = offset_in_page(gpa);
pte_size = sp->role.has_4_byte_gpte ? 4 : 8;
@@ -5684,9 +5644,8 @@ static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
return spte;
}
-static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
- const u8 *new, int bytes,
- struct kvm_page_track_notifier_node *node)
+void kvm_mmu_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
+ int bytes)
{
gfn_t gfn = gpa >> PAGE_SHIFT;
struct kvm_mmu_page *sp;
@@ -5702,8 +5661,6 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
return;
- pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
-
write_lock(&vcpu->kvm->mmu_lock);
gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes);
@@ -5742,7 +5699,18 @@ int noinline kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 err
int r, emulation_type = EMULTYPE_PF;
bool direct = vcpu->arch.mmu->root_role.direct;
- if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root.hpa)))
+ /*
+ * IMPLICIT_ACCESS is a KVM-defined flag used to correctly perform SMAP
+ * checks when emulating instructions that triggers implicit access.
+ * WARN if hardware generates a fault with an error code that collides
+ * with the KVM-defined value. Clear the flag and continue on, i.e.
+ * don't terminate the VM, as KVM can't possibly be relying on a flag
+ * that KVM doesn't know about.
+ */
+ if (WARN_ON_ONCE(error_code & PFERR_IMPLICIT_ACCESS))
+ error_code &= ~PFERR_IMPLICIT_ACCESS;
+
+ if (WARN_ON_ONCE(!VALID_PAGE(vcpu->arch.mmu->root.hpa)))
return RET_PF_RETRY;
r = RET_PF_INVALID;
@@ -6099,7 +6067,7 @@ restart:
* pages. Skip the bogus page, otherwise we'll get stuck in an
* infinite loop if the page gets put back on the list (again).
*/
- if (WARN_ON(sp->role.invalid))
+ if (WARN_ON_ONCE(sp->role.invalid))
continue;
/*
@@ -6199,16 +6167,8 @@ static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
}
-static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
- struct kvm_memory_slot *slot,
- struct kvm_page_track_notifier_node *node)
-{
- kvm_mmu_zap_all_fast(kvm);
-}
-
int kvm_mmu_init_vm(struct kvm *kvm)
{
- struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
int r;
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
@@ -6222,10 +6182,6 @@ int kvm_mmu_init_vm(struct kvm *kvm)
return r;
}
- node->track_write = kvm_mmu_pte_write;
- node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
- kvm_page_track_register_notifier(kvm, node);
-
kvm->arch.split_page_header_cache.kmem_cache = mmu_page_header_cache;
kvm->arch.split_page_header_cache.gfp_zero = __GFP_ZERO;
@@ -6246,10 +6202,6 @@ static void mmu_free_vm_memory_caches(struct kvm *kvm)
void kvm_mmu_uninit_vm(struct kvm *kvm)
{
- struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
-
- kvm_page_track_unregister_notifier(kvm, node);
-
if (tdp_mmu_enabled)
kvm_mmu_uninit_tdp_mmu(kvm);
@@ -6670,7 +6622,7 @@ static void kvm_rmap_zap_collapsible_sptes(struct kvm *kvm,
*/
if (walk_slot_rmaps(kvm, slot, kvm_mmu_zap_collapsible_spte,
PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL - 1, true))
- kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
+ kvm_flush_remote_tlbs_memslot(kvm, slot);
}
void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
@@ -6689,20 +6641,6 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
}
}
-void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
- const struct kvm_memory_slot *memslot)
-{
- /*
- * All current use cases for flushing the TLBs for a specific memslot
- * related to dirty logging, and many do the TLB flush out of mmu_lock.
- * The interaction between the various operations on memslot must be
- * serialized by slots_locks to ensure the TLB flush from one operation
- * is observed by any other operation on the same memslot.
- */
- lockdep_assert_held(&kvm->slots_lock);
- kvm_flush_remote_tlbs_range(kvm, memslot->base_gfn, memslot->npages);
-}
-
void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
const struct kvm_memory_slot *memslot)
{
@@ -6732,7 +6670,7 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
*/
}
-void kvm_mmu_zap_all(struct kvm *kvm)
+static void kvm_mmu_zap_all(struct kvm *kvm)
{
struct kvm_mmu_page *sp, *node;
LIST_HEAD(invalid_list);
@@ -6741,7 +6679,7 @@ void kvm_mmu_zap_all(struct kvm *kvm)
write_lock(&kvm->mmu_lock);
restart:
list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
- if (WARN_ON(sp->role.invalid))
+ if (WARN_ON_ONCE(sp->role.invalid))
continue;
if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
goto restart;
@@ -6757,9 +6695,20 @@ restart:
write_unlock(&kvm->mmu_lock);
}
+void kvm_arch_flush_shadow_all(struct kvm *kvm)
+{
+ kvm_mmu_zap_all(kvm);
+}
+
+void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *slot)
+{
+ kvm_mmu_zap_all_fast(kvm);
+}
+
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen)
{
- WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
+ WARN_ON_ONCE(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
gen &= MMIO_SPTE_GEN_MASK;
@@ -6862,7 +6811,7 @@ static void mmu_destroy_caches(void)
static int get_nx_huge_pages(char *buffer, const struct kernel_param *kp)
{
if (nx_hugepage_mitigation_hard_disabled)
- return sprintf(buffer, "never\n");
+ return sysfs_emit(buffer, "never\n");
return param_get_bool(buffer, kp);
}
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index d39af5639ce9..b102014e2c60 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -6,18 +6,10 @@
#include <linux/kvm_host.h>
#include <asm/kvm_host.h>
-#undef MMU_DEBUG
-
-#ifdef MMU_DEBUG
-extern bool dbg;
-
-#define pgprintk(x...) do { if (dbg) printk(x); } while (0)
-#define rmap_printk(fmt, args...) do { if (dbg) printk("%s: " fmt, __func__, ## args); } while (0)
-#define MMU_WARN_ON(x) WARN_ON(x)
+#ifdef CONFIG_KVM_PROVE_MMU
+#define KVM_MMU_WARN_ON(x) WARN_ON_ONCE(x)
#else
-#define pgprintk(x...) do { } while (0)
-#define rmap_printk(x...) do { } while (0)
-#define MMU_WARN_ON(x) do { } while (0)
+#define KVM_MMU_WARN_ON(x) BUILD_BUG_ON_INVALID(x)
#endif
/* Page table builder macros common to shadow (host) PTEs and guest PTEs. */
@@ -44,6 +36,16 @@ extern bool dbg;
#define INVALID_PAE_ROOT 0
#define IS_VALID_PAE_ROOT(x) (!!(x))
+static inline hpa_t kvm_mmu_get_dummy_root(void)
+{
+ return my_zero_pfn(0) << PAGE_SHIFT;
+}
+
+static inline bool kvm_mmu_is_dummy_root(hpa_t shadow_page)
+{
+ return is_zero_pfn(shadow_page >> PAGE_SHIFT);
+}
+
typedef u64 __rcu *tdp_ptep_t;
struct kvm_mmu_page {
@@ -170,9 +172,6 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
struct kvm_memory_slot *slot, u64 gfn,
int min_level);
-void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t start_gfn,
- gfn_t nr_pages);
-
/* Flush the given page (huge or not) of guest memory. */
static inline void kvm_flush_remote_tlbs_gfn(struct kvm *kvm, gfn_t gfn, int level)
{
diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c
index 0a2ac438d647..c87da11f3a04 100644
--- a/arch/x86/kvm/mmu/page_track.c
+++ b/arch/x86/kvm/mmu/page_track.c
@@ -12,13 +12,13 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/lockdep.h>
#include <linux/kvm_host.h>
#include <linux/rculist.h>
-#include <asm/kvm_page_track.h>
-
#include "mmu.h"
#include "mmu_internal.h"
+#include "page_track.h"
bool kvm_page_track_write_tracking_enabled(struct kvm *kvm)
{
@@ -28,103 +28,64 @@ bool kvm_page_track_write_tracking_enabled(struct kvm *kvm)
void kvm_page_track_free_memslot(struct kvm_memory_slot *slot)
{
- int i;
-
- for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) {
- kvfree(slot->arch.gfn_track[i]);
- slot->arch.gfn_track[i] = NULL;
- }
+ kvfree(slot->arch.gfn_write_track);
+ slot->arch.gfn_write_track = NULL;
}
-int kvm_page_track_create_memslot(struct kvm *kvm,
- struct kvm_memory_slot *slot,
- unsigned long npages)
+static int __kvm_page_track_write_tracking_alloc(struct kvm_memory_slot *slot,
+ unsigned long npages)
{
- int i;
-
- for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) {
- if (i == KVM_PAGE_TRACK_WRITE &&
- !kvm_page_track_write_tracking_enabled(kvm))
- continue;
-
- slot->arch.gfn_track[i] =
- __vcalloc(npages, sizeof(*slot->arch.gfn_track[i]),
- GFP_KERNEL_ACCOUNT);
- if (!slot->arch.gfn_track[i])
- goto track_free;
- }
+ const size_t size = sizeof(*slot->arch.gfn_write_track);
- return 0;
+ if (!slot->arch.gfn_write_track)
+ slot->arch.gfn_write_track = __vcalloc(npages, size,
+ GFP_KERNEL_ACCOUNT);
-track_free:
- kvm_page_track_free_memslot(slot);
- return -ENOMEM;
+ return slot->arch.gfn_write_track ? 0 : -ENOMEM;
}
-static inline bool page_track_mode_is_valid(enum kvm_page_track_mode mode)
+int kvm_page_track_create_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ unsigned long npages)
{
- if (mode < 0 || mode >= KVM_PAGE_TRACK_MAX)
- return false;
+ if (!kvm_page_track_write_tracking_enabled(kvm))
+ return 0;
- return true;
+ return __kvm_page_track_write_tracking_alloc(slot, npages);
}
int kvm_page_track_write_tracking_alloc(struct kvm_memory_slot *slot)
{
- unsigned short *gfn_track;
-
- if (slot->arch.gfn_track[KVM_PAGE_TRACK_WRITE])
- return 0;
-
- gfn_track = __vcalloc(slot->npages, sizeof(*gfn_track),
- GFP_KERNEL_ACCOUNT);
- if (gfn_track == NULL)
- return -ENOMEM;
-
- slot->arch.gfn_track[KVM_PAGE_TRACK_WRITE] = gfn_track;
- return 0;
+ return __kvm_page_track_write_tracking_alloc(slot, slot->npages);
}
-static void update_gfn_track(struct kvm_memory_slot *slot, gfn_t gfn,
- enum kvm_page_track_mode mode, short count)
+static void update_gfn_write_track(struct kvm_memory_slot *slot, gfn_t gfn,
+ short count)
{
int index, val;
index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K);
- val = slot->arch.gfn_track[mode][index];
+ val = slot->arch.gfn_write_track[index];
- if (WARN_ON(val + count < 0 || val + count > USHRT_MAX))
+ if (WARN_ON_ONCE(val + count < 0 || val + count > USHRT_MAX))
return;
- slot->arch.gfn_track[mode][index] += count;
+ slot->arch.gfn_write_track[index] += count;
}
-/*
- * add guest page to the tracking pool so that corresponding access on that
- * page will be intercepted.
- *
- * It should be called under the protection both of mmu-lock and kvm->srcu
- * or kvm->slots_lock.
- *
- * @kvm: the guest instance we are interested in.
- * @slot: the @gfn belongs to.
- * @gfn: the guest page.
- * @mode: tracking mode, currently only write track is supported.
- */
-void kvm_slot_page_track_add_page(struct kvm *kvm,
- struct kvm_memory_slot *slot, gfn_t gfn,
- enum kvm_page_track_mode mode)
+void __kvm_write_track_add_gfn(struct kvm *kvm, struct kvm_memory_slot *slot,
+ gfn_t gfn)
{
+ lockdep_assert_held_write(&kvm->mmu_lock);
- if (WARN_ON(!page_track_mode_is_valid(mode)))
- return;
+ lockdep_assert_once(lockdep_is_held(&kvm->slots_lock) ||
+ srcu_read_lock_held(&kvm->srcu));
- if (WARN_ON(mode == KVM_PAGE_TRACK_WRITE &&
- !kvm_page_track_write_tracking_enabled(kvm)))
+ if (KVM_BUG_ON(!kvm_page_track_write_tracking_enabled(kvm), kvm))
return;
- update_gfn_track(slot, gfn, mode, 1);
+ update_gfn_write_track(slot, gfn, 1);
/*
* new track stops large page mapping for the
@@ -132,37 +93,22 @@ void kvm_slot_page_track_add_page(struct kvm *kvm,
*/
kvm_mmu_gfn_disallow_lpage(slot, gfn);
- if (mode == KVM_PAGE_TRACK_WRITE)
- if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K))
- kvm_flush_remote_tlbs(kvm);
+ if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K))
+ kvm_flush_remote_tlbs(kvm);
}
-EXPORT_SYMBOL_GPL(kvm_slot_page_track_add_page);
-/*
- * remove the guest page from the tracking pool which stops the interception
- * of corresponding access on that page. It is the opposed operation of
- * kvm_slot_page_track_add_page().
- *
- * It should be called under the protection both of mmu-lock and kvm->srcu
- * or kvm->slots_lock.
- *
- * @kvm: the guest instance we are interested in.
- * @slot: the @gfn belongs to.
- * @gfn: the guest page.
- * @mode: tracking mode, currently only write track is supported.
- */
-void kvm_slot_page_track_remove_page(struct kvm *kvm,
- struct kvm_memory_slot *slot, gfn_t gfn,
- enum kvm_page_track_mode mode)
+void __kvm_write_track_remove_gfn(struct kvm *kvm,
+ struct kvm_memory_slot *slot, gfn_t gfn)
{
- if (WARN_ON(!page_track_mode_is_valid(mode)))
- return;
+ lockdep_assert_held_write(&kvm->mmu_lock);
- if (WARN_ON(mode == KVM_PAGE_TRACK_WRITE &&
- !kvm_page_track_write_tracking_enabled(kvm)))
+ lockdep_assert_once(lockdep_is_held(&kvm->slots_lock) ||
+ srcu_read_lock_held(&kvm->srcu));
+
+ if (KVM_BUG_ON(!kvm_page_track_write_tracking_enabled(kvm), kvm))
return;
- update_gfn_track(slot, gfn, mode, -1);
+ update_gfn_write_track(slot, gfn, -1);
/*
* allow large page mapping for the tracked page
@@ -170,31 +116,26 @@ void kvm_slot_page_track_remove_page(struct kvm *kvm,
*/
kvm_mmu_gfn_allow_lpage(slot, gfn);
}
-EXPORT_SYMBOL_GPL(kvm_slot_page_track_remove_page);
/*
* check if the corresponding access on the specified guest page is tracked.
*/
-bool kvm_slot_page_track_is_active(struct kvm *kvm,
- const struct kvm_memory_slot *slot,
- gfn_t gfn, enum kvm_page_track_mode mode)
+bool kvm_gfn_is_write_tracked(struct kvm *kvm,
+ const struct kvm_memory_slot *slot, gfn_t gfn)
{
int index;
- if (WARN_ON(!page_track_mode_is_valid(mode)))
- return false;
-
if (!slot)
return false;
- if (mode == KVM_PAGE_TRACK_WRITE &&
- !kvm_page_track_write_tracking_enabled(kvm))
+ if (!kvm_page_track_write_tracking_enabled(kvm))
return false;
index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K);
- return !!READ_ONCE(slot->arch.gfn_track[mode][index]);
+ return !!READ_ONCE(slot->arch.gfn_write_track[index]);
}
+#ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING
void kvm_page_track_cleanup(struct kvm *kvm)
{
struct kvm_page_track_notifier_head *head;
@@ -216,17 +157,22 @@ int kvm_page_track_init(struct kvm *kvm)
* register the notifier so that event interception for the tracked guest
* pages can be received.
*/
-void
-kvm_page_track_register_notifier(struct kvm *kvm,
- struct kvm_page_track_notifier_node *n)
+int kvm_page_track_register_notifier(struct kvm *kvm,
+ struct kvm_page_track_notifier_node *n)
{
struct kvm_page_track_notifier_head *head;
+ if (!kvm || kvm->mm != current->mm)
+ return -ESRCH;
+
+ kvm_get_kvm(kvm);
+
head = &kvm->arch.track_notifier_head;
write_lock(&kvm->mmu_lock);
hlist_add_head_rcu(&n->node, &head->track_notifier_list);
write_unlock(&kvm->mmu_lock);
+ return 0;
}
EXPORT_SYMBOL_GPL(kvm_page_track_register_notifier);
@@ -234,9 +180,8 @@ EXPORT_SYMBOL_GPL(kvm_page_track_register_notifier);
* stop receiving the event interception. It is the opposed operation of
* kvm_page_track_register_notifier().
*/
-void
-kvm_page_track_unregister_notifier(struct kvm *kvm,
- struct kvm_page_track_notifier_node *n)
+void kvm_page_track_unregister_notifier(struct kvm *kvm,
+ struct kvm_page_track_notifier_node *n)
{
struct kvm_page_track_notifier_head *head;
@@ -246,6 +191,8 @@ kvm_page_track_unregister_notifier(struct kvm *kvm,
hlist_del_rcu(&n->node);
write_unlock(&kvm->mmu_lock);
synchronize_srcu(&head->track_srcu);
+
+ kvm_put_kvm(kvm);
}
EXPORT_SYMBOL_GPL(kvm_page_track_unregister_notifier);
@@ -256,34 +203,30 @@ EXPORT_SYMBOL_GPL(kvm_page_track_unregister_notifier);
* The node should figure out if the written page is the one that node is
* interested in by itself.
*/
-void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
- int bytes)
+void __kvm_page_track_write(struct kvm *kvm, gpa_t gpa, const u8 *new, int bytes)
{
struct kvm_page_track_notifier_head *head;
struct kvm_page_track_notifier_node *n;
int idx;
- head = &vcpu->kvm->arch.track_notifier_head;
+ head = &kvm->arch.track_notifier_head;
if (hlist_empty(&head->track_notifier_list))
return;
idx = srcu_read_lock(&head->track_srcu);
hlist_for_each_entry_srcu(n, &head->track_notifier_list, node,
- srcu_read_lock_held(&head->track_srcu))
+ srcu_read_lock_held(&head->track_srcu))
if (n->track_write)
- n->track_write(vcpu, gpa, new, bytes, n);
+ n->track_write(gpa, new, bytes, n);
srcu_read_unlock(&head->track_srcu, idx);
}
/*
- * Notify the node that memory slot is being removed or moved so that it can
- * drop write-protection for the pages in the memory slot.
- *
- * The node should figure out it has any write-protected pages in this slot
- * by itself.
+ * Notify external page track nodes that a memory region is being removed from
+ * the VM, e.g. so that users can free any associated metadata.
*/
-void kvm_page_track_flush_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
+void kvm_page_track_delete_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
{
struct kvm_page_track_notifier_head *head;
struct kvm_page_track_notifier_node *n;
@@ -296,8 +239,69 @@ void kvm_page_track_flush_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
idx = srcu_read_lock(&head->track_srcu);
hlist_for_each_entry_srcu(n, &head->track_notifier_list, node,
- srcu_read_lock_held(&head->track_srcu))
- if (n->track_flush_slot)
- n->track_flush_slot(kvm, slot, n);
+ srcu_read_lock_held(&head->track_srcu))
+ if (n->track_remove_region)
+ n->track_remove_region(slot->base_gfn, slot->npages, n);
srcu_read_unlock(&head->track_srcu, idx);
}
+
+/*
+ * add guest page to the tracking pool so that corresponding access on that
+ * page will be intercepted.
+ *
+ * @kvm: the guest instance we are interested in.
+ * @gfn: the guest page.
+ */
+int kvm_write_track_add_gfn(struct kvm *kvm, gfn_t gfn)
+{
+ struct kvm_memory_slot *slot;
+ int idx;
+
+ idx = srcu_read_lock(&kvm->srcu);
+
+ slot = gfn_to_memslot(kvm, gfn);
+ if (!slot) {
+ srcu_read_unlock(&kvm->srcu, idx);
+ return -EINVAL;
+ }
+
+ write_lock(&kvm->mmu_lock);
+ __kvm_write_track_add_gfn(kvm, slot, gfn);
+ write_unlock(&kvm->mmu_lock);
+
+ srcu_read_unlock(&kvm->srcu, idx);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_write_track_add_gfn);
+
+/*
+ * remove the guest page from the tracking pool which stops the interception
+ * of corresponding access on that page.
+ *
+ * @kvm: the guest instance we are interested in.
+ * @gfn: the guest page.
+ */
+int kvm_write_track_remove_gfn(struct kvm *kvm, gfn_t gfn)
+{
+ struct kvm_memory_slot *slot;
+ int idx;
+
+ idx = srcu_read_lock(&kvm->srcu);
+
+ slot = gfn_to_memslot(kvm, gfn);
+ if (!slot) {
+ srcu_read_unlock(&kvm->srcu, idx);
+ return -EINVAL;
+ }
+
+ write_lock(&kvm->mmu_lock);
+ __kvm_write_track_remove_gfn(kvm, slot, gfn);
+ write_unlock(&kvm->mmu_lock);
+
+ srcu_read_unlock(&kvm->srcu, idx);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_write_track_remove_gfn);
+#endif
diff --git a/arch/x86/kvm/mmu/page_track.h b/arch/x86/kvm/mmu/page_track.h
new file mode 100644
index 000000000000..d4d72ed999b1
--- /dev/null
+++ b/arch/x86/kvm/mmu/page_track.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __KVM_X86_PAGE_TRACK_H
+#define __KVM_X86_PAGE_TRACK_H
+
+#include <linux/kvm_host.h>
+
+#include <asm/kvm_page_track.h>
+
+
+bool kvm_page_track_write_tracking_enabled(struct kvm *kvm);
+int kvm_page_track_write_tracking_alloc(struct kvm_memory_slot *slot);
+
+void kvm_page_track_free_memslot(struct kvm_memory_slot *slot);
+int kvm_page_track_create_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ unsigned long npages);
+
+void __kvm_write_track_add_gfn(struct kvm *kvm, struct kvm_memory_slot *slot,
+ gfn_t gfn);
+void __kvm_write_track_remove_gfn(struct kvm *kvm,
+ struct kvm_memory_slot *slot, gfn_t gfn);
+
+bool kvm_gfn_is_write_tracked(struct kvm *kvm,
+ const struct kvm_memory_slot *slot, gfn_t gfn);
+
+#ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING
+int kvm_page_track_init(struct kvm *kvm);
+void kvm_page_track_cleanup(struct kvm *kvm);
+
+void __kvm_page_track_write(struct kvm *kvm, gpa_t gpa, const u8 *new, int bytes);
+void kvm_page_track_delete_slot(struct kvm *kvm, struct kvm_memory_slot *slot);
+
+static inline bool kvm_page_track_has_external_user(struct kvm *kvm)
+{
+ return !hlist_empty(&kvm->arch.track_notifier_head.track_notifier_list);
+}
+#else
+static inline int kvm_page_track_init(struct kvm *kvm) { return 0; }
+static inline void kvm_page_track_cleanup(struct kvm *kvm) { }
+
+static inline void __kvm_page_track_write(struct kvm *kvm, gpa_t gpa,
+ const u8 *new, int bytes) { }
+static inline void kvm_page_track_delete_slot(struct kvm *kvm,
+ struct kvm_memory_slot *slot) { }
+
+static inline bool kvm_page_track_has_external_user(struct kvm *kvm) { return false; }
+
+#endif /* CONFIG_KVM_EXTERNAL_WRITE_TRACKING */
+
+static inline void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
+ const u8 *new, int bytes)
+{
+ __kvm_page_track_write(vcpu->kvm, gpa, new, bytes);
+
+ kvm_mmu_track_write(vcpu, gpa, new, bytes);
+}
+
+#endif /* __KVM_X86_PAGE_TRACK_H */
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 0662e0278e70..c85255073f67 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -338,7 +338,6 @@ retry_walk:
}
#endif
walker->max_level = walker->level;
- ASSERT(!(is_long_mode(vcpu) && !is_pae(vcpu)));
/*
* FIXME: on Intel processors, loads of the PDPTE registers for PAE paging
@@ -348,9 +347,21 @@ retry_walk:
nested_access = (have_ad ? PFERR_WRITE_MASK : 0) | PFERR_USER_MASK;
pte_access = ~0;
+
+ /*
+ * Queue a page fault for injection if this assertion fails, as callers
+ * assume that walker.fault contains sane info on a walk failure. I.e.
+ * avoid making the situation worse by inducing even worse badness
+ * between when the assertion fails and when KVM kicks the vCPU out to
+ * userspace (because the VM is bugged).
+ */
+ if (KVM_BUG_ON(is_long_mode(vcpu) && !is_pae(vcpu), vcpu->kvm))
+ goto error;
+
++walker->level;
do {
+ struct kvm_memory_slot *slot;
unsigned long host_addr;
pt_access = pte_access;
@@ -381,7 +392,11 @@ retry_walk:
if (unlikely(real_gpa == INVALID_GPA))
return 0;
- host_addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gpa_to_gfn(real_gpa),
+ slot = kvm_vcpu_gfn_to_memslot(vcpu, gpa_to_gfn(real_gpa));
+ if (!kvm_is_visible_memslot(slot))
+ goto error;
+
+ host_addr = gfn_to_hva_memslot_prot(slot, gpa_to_gfn(real_gpa),
&walker->pte_writable[walker->level - 1]);
if (unlikely(kvm_is_error_hva(host_addr)))
goto error;
@@ -456,9 +471,6 @@ retry_walk:
goto retry_walk;
}
- pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
- __func__, (u64)pte, walker->pte_access,
- walker->pt_access[walker->level - 1]);
return 1;
error:
@@ -529,8 +541,6 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
return false;
- pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
-
gfn = gpte_to_gfn(gpte);
pte_access = sp->role.access & FNAME(gpte_access)(gpte);
FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);
@@ -638,8 +648,19 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
if (FNAME(gpte_changed)(vcpu, gw, top_level))
goto out_gpte_changed;
- if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root.hpa)))
+ if (WARN_ON_ONCE(!VALID_PAGE(vcpu->arch.mmu->root.hpa)))
+ goto out_gpte_changed;
+
+ /*
+ * Load a new root and retry the faulting instruction in the extremely
+ * unlikely scenario that the guest root gfn became visible between
+ * loading a dummy root and handling the resulting page fault, e.g. if
+ * userspace create a memslot in the interim.
+ */
+ if (unlikely(kvm_mmu_is_dummy_root(vcpu->arch.mmu->root.hpa))) {
+ kvm_make_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu);
goto out_gpte_changed;
+ }
for_each_shadow_entry(vcpu, fault->addr, it) {
gfn_t table_gfn;
@@ -758,7 +779,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
struct guest_walker walker;
int r;
- pgprintk("%s: addr %lx err %x\n", __func__, fault->addr, fault->error_code);
WARN_ON_ONCE(fault->is_tdp);
/*
@@ -773,7 +793,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
* The page is not mapped by the guest. Let the guest handle it.
*/
if (!r) {
- pgprintk("%s: guest page fault\n", __func__);
if (!fault->prefetch)
kvm_inject_emulated_page_fault(vcpu, &walker.fault);
@@ -837,7 +856,7 @@ static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
{
int offset = 0;
- WARN_ON(sp->role.level != PG_LEVEL_4K);
+ WARN_ON_ONCE(sp->role.level != PG_LEVEL_4K);
if (PTTYPE == 32)
offset = sp->role.quadrant << SPTE_LEVEL_BITS;
diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c
index cf2c6426a6fc..4a599130e9c9 100644
--- a/arch/x86/kvm/mmu/spte.c
+++ b/arch/x86/kvm/mmu/spte.c
@@ -61,7 +61,7 @@ static u64 generation_mmio_spte_mask(u64 gen)
{
u64 mask;
- WARN_ON(gen & ~MMIO_SPTE_GEN_MASK);
+ WARN_ON_ONCE(gen & ~MMIO_SPTE_GEN_MASK);
mask = (gen << MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_SPTE_GEN_LOW_MASK;
mask |= (gen << MMIO_SPTE_GEN_HIGH_SHIFT) & MMIO_SPTE_GEN_HIGH_MASK;
@@ -221,8 +221,6 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
* shadow pages and unsync'ing pages is not allowed.
*/
if (mmu_try_to_unsync_pages(vcpu->kvm, slot, gfn, can_unsync, prefetch)) {
- pgprintk("%s: found shadow page for %llx, marking ro\n",
- __func__, gfn);
wrprot = true;
pte_access &= ~ACC_WRITE_MASK;
spte &= ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
@@ -242,7 +240,7 @@ out:
if ((spte & PT_WRITABLE_MASK) && kvm_slot_dirty_track_enabled(slot)) {
/* Enforced by kvm_mmu_hugepage_adjust. */
- WARN_ON(level > PG_LEVEL_4K);
+ WARN_ON_ONCE(level > PG_LEVEL_4K);
mark_page_dirty_in_slot(vcpu->kvm, slot, gfn);
}
diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
index 1279db2eab44..a129951c9a88 100644
--- a/arch/x86/kvm/mmu/spte.h
+++ b/arch/x86/kvm/mmu/spte.h
@@ -3,6 +3,7 @@
#ifndef KVM_X86_MMU_SPTE_H
#define KVM_X86_MMU_SPTE_H
+#include "mmu.h"
#include "mmu_internal.h"
/*
@@ -236,6 +237,18 @@ static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep)
return to_shadow_page(__pa(sptep));
}
+static inline struct kvm_mmu_page *root_to_sp(hpa_t root)
+{
+ if (kvm_mmu_is_dummy_root(root))
+ return NULL;
+
+ /*
+ * The "root" may be a special root, e.g. a PAE entry, treat it as a
+ * SPTE to ensure any non-PA bits are dropped.
+ */
+ return spte_to_child_sp(root);
+}
+
static inline bool is_mmio_spte(u64 spte)
{
return (spte & shadow_mmio_mask) == shadow_mmio_value &&
@@ -265,13 +278,13 @@ static inline bool sp_ad_disabled(struct kvm_mmu_page *sp)
static inline bool spte_ad_enabled(u64 spte)
{
- MMU_WARN_ON(!is_shadow_present_pte(spte));
+ KVM_MMU_WARN_ON(!is_shadow_present_pte(spte));
return (spte & SPTE_TDP_AD_MASK) != SPTE_TDP_AD_DISABLED;
}
static inline bool spte_ad_need_write_protect(u64 spte)
{
- MMU_WARN_ON(!is_shadow_present_pte(spte));
+ KVM_MMU_WARN_ON(!is_shadow_present_pte(spte));
/*
* This is benign for non-TDP SPTEs as SPTE_TDP_AD_ENABLED is '0',
* and non-TDP SPTEs will never set these bits. Optimize for 64-bit
@@ -282,13 +295,13 @@ static inline bool spte_ad_need_write_protect(u64 spte)
static inline u64 spte_shadow_accessed_mask(u64 spte)
{
- MMU_WARN_ON(!is_shadow_present_pte(spte));
+ KVM_MMU_WARN_ON(!is_shadow_present_pte(spte));
return spte_ad_enabled(spte) ? shadow_accessed_mask : 0;
}
static inline u64 spte_shadow_dirty_mask(u64 spte)
{
- MMU_WARN_ON(!is_shadow_present_pte(spte));
+ KVM_MMU_WARN_ON(!is_shadow_present_pte(spte));
return spte_ad_enabled(spte) ? shadow_dirty_mask : 0;
}
diff --git a/arch/x86/kvm/mmu/tdp_iter.c b/arch/x86/kvm/mmu/tdp_iter.c
index d2eb0d4f8710..bd30ebfb2f2c 100644
--- a/arch/x86/kvm/mmu/tdp_iter.c
+++ b/arch/x86/kvm/mmu/tdp_iter.c
@@ -39,13 +39,14 @@ void tdp_iter_restart(struct tdp_iter *iter)
void tdp_iter_start(struct tdp_iter *iter, struct kvm_mmu_page *root,
int min_level, gfn_t next_last_level_gfn)
{
- int root_level = root->role.level;
-
- WARN_ON(root_level < 1);
- WARN_ON(root_level > PT64_ROOT_MAX_LEVEL);
+ if (WARN_ON_ONCE(!root || (root->role.level < 1) ||
+ (root->role.level > PT64_ROOT_MAX_LEVEL))) {
+ iter->valid = false;
+ return;
+ }
iter->next_last_level_gfn = next_last_level_gfn;
- iter->root_level = root_level;
+ iter->root_level = root->role.level;
iter->min_level = min_level;
iter->pt_path[iter->root_level - 1] = (tdp_ptep_t)root->spt;
iter->as_id = kvm_mmu_page_as_id(root);
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 512163d52194..6c63f2d1675f 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -475,9 +475,9 @@ static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
bool is_leaf = is_present && is_last_spte(new_spte, level);
bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
- WARN_ON(level > PT64_ROOT_MAX_LEVEL);
- WARN_ON(level < PG_LEVEL_4K);
- WARN_ON(gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
+ WARN_ON_ONCE(level > PT64_ROOT_MAX_LEVEL);
+ WARN_ON_ONCE(level < PG_LEVEL_4K);
+ WARN_ON_ONCE(gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
/*
* If this warning were to trigger it would indicate that there was a
@@ -522,9 +522,9 @@ static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
* impact the guest since both the former and current SPTEs
* are nonpresent.
*/
- if (WARN_ON(!is_mmio_spte(old_spte) &&
- !is_mmio_spte(new_spte) &&
- !is_removed_spte(new_spte)))
+ if (WARN_ON_ONCE(!is_mmio_spte(old_spte) &&
+ !is_mmio_spte(new_spte) &&
+ !is_removed_spte(new_spte)))
pr_err("Unexpected SPTE change! Nonpresent SPTEs\n"
"should not be replaced with another,\n"
"different nonpresent SPTE, unless one or both\n"
@@ -661,7 +661,7 @@ static u64 tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,
* should be used. If operating under the MMU lock in write mode, the
* use of the removed SPTE should not be necessary.
*/
- WARN_ON(is_removed_spte(old_spte) || is_removed_spte(new_spte));
+ WARN_ON_ONCE(is_removed_spte(old_spte) || is_removed_spte(new_spte));
old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, new_spte, level);
@@ -689,7 +689,7 @@ static inline void tdp_mmu_iter_set_spte(struct kvm *kvm, struct tdp_iter *iter,
else
#define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end) \
- for_each_tdp_pte(_iter, to_shadow_page(_mmu->root.hpa), _start, _end)
+ for_each_tdp_pte(_iter, root_to_sp(_mmu->root.hpa), _start, _end)
/*
* Yield if the MMU lock is contended or this thread needs to return control
@@ -709,7 +709,7 @@ static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm,
struct tdp_iter *iter,
bool flush, bool shared)
{
- WARN_ON(iter->yielded);
+ WARN_ON_ONCE(iter->yielded);
/* Ensure forward progress has been made before yielding. */
if (iter->next_last_level_gfn == iter->yielded_gfn)
@@ -728,7 +728,7 @@ static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm,
rcu_read_lock();
- WARN_ON(iter->gfn > iter->next_last_level_gfn);
+ WARN_ON_ONCE(iter->gfn > iter->next_last_level_gfn);
iter->yielded = true;
}
@@ -1241,7 +1241,7 @@ static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
u64 new_spte;
/* Huge pages aren't expected to be modified without first being zapped. */
- WARN_ON(pte_huge(range->pte) || range->start + 1 != range->end);
+ WARN_ON_ONCE(pte_huge(range->arg.pte) || range->start + 1 != range->end);
if (iter->level != PG_LEVEL_4K ||
!is_shadow_present_pte(iter->old_spte))
@@ -1255,9 +1255,9 @@ static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
*/
tdp_mmu_iter_set_spte(kvm, iter, 0);
- if (!pte_write(range->pte)) {
+ if (!pte_write(range->arg.pte)) {
new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte,
- pte_pfn(range->pte));
+ pte_pfn(range->arg.pte));
tdp_mmu_iter_set_spte(kvm, iter, new_spte);
}
@@ -1548,8 +1548,8 @@ retry:
if (!is_shadow_present_pte(iter.old_spte))
continue;
- MMU_WARN_ON(kvm_ad_enabled() &&
- spte_ad_need_write_protect(iter.old_spte));
+ KVM_MMU_WARN_ON(kvm_ad_enabled() &&
+ spte_ad_need_write_protect(iter.old_spte));
if (!(iter.old_spte & dbit))
continue;
@@ -1600,6 +1600,8 @@ static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
shadow_dirty_mask;
struct tdp_iter iter;
+ lockdep_assert_held_write(&kvm->mmu_lock);
+
rcu_read_lock();
tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask),
@@ -1607,8 +1609,8 @@ static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
if (!mask)
break;
- MMU_WARN_ON(kvm_ad_enabled() &&
- spte_ad_need_write_protect(iter.old_spte));
+ KVM_MMU_WARN_ON(kvm_ad_enabled() &&
+ spte_ad_need_write_protect(iter.old_spte));
if (iter.level > PG_LEVEL_4K ||
!(mask & (1UL << (iter.gfn - gfn))))
@@ -1646,7 +1648,6 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
{
struct kvm_mmu_page *root;
- lockdep_assert_held_write(&kvm->mmu_lock);
for_each_tdp_mmu_root(kvm, root, slot->as_id)
clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
}
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index bf653df86112..edb89b51b383 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -382,9 +382,6 @@ static bool check_pmu_event_filter(struct kvm_pmc *pmc)
struct kvm_x86_pmu_event_filter *filter;
struct kvm *kvm = pmc->vcpu->kvm;
- if (!static_call(kvm_x86_pmu_hw_event_available)(pmc))
- return false;
-
filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
if (!filter)
return true;
@@ -398,6 +395,7 @@ static bool check_pmu_event_filter(struct kvm_pmc *pmc)
static bool pmc_event_is_allowed(struct kvm_pmc *pmc)
{
return pmc_is_globally_enabled(pmc) && pmc_speculative_in_use(pmc) &&
+ static_call(kvm_x86_pmu_hw_event_available)(pmc) &&
check_pmu_event_filter(pmc);
}
diff --git a/arch/x86/kvm/reverse_cpuid.h b/arch/x86/kvm/reverse_cpuid.h
index 56cbdb24400a..b81650678375 100644
--- a/arch/x86/kvm/reverse_cpuid.h
+++ b/arch/x86/kvm/reverse_cpuid.h
@@ -43,6 +43,7 @@ enum kvm_only_cpuid_leafs {
/* Intel-defined sub-features, CPUID level 0x00000007:1 (EDX) */
#define X86_FEATURE_AVX_VNNI_INT8 KVM_X86_FEATURE(CPUID_7_1_EDX, 4)
#define X86_FEATURE_AVX_NE_CONVERT KVM_X86_FEATURE(CPUID_7_1_EDX, 5)
+#define X86_FEATURE_AMX_COMPLEX KVM_X86_FEATURE(CPUID_7_1_EDX, 8)
#define X86_FEATURE_PREFETCHITI KVM_X86_FEATURE(CPUID_7_1_EDX, 14)
/* CPUID level 0x80000007 (EDX). */
diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index cfc8ab773025..2092db892d7d 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -791,6 +791,7 @@ static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
int ret = 0;
unsigned long flags;
struct amd_svm_iommu_ir *ir;
+ u64 entry;
/**
* In some cases, the existing irte is updated and re-set,
@@ -824,6 +825,18 @@ static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
ir->data = pi->ir_data;
spin_lock_irqsave(&svm->ir_list_lock, flags);
+
+ /*
+ * Update the target pCPU for IOMMU doorbells if the vCPU is running.
+ * If the vCPU is NOT running, i.e. is blocking or scheduled out, KVM
+ * will update the pCPU info when the vCPU awkened and/or scheduled in.
+ * See also avic_vcpu_load().
+ */
+ entry = READ_ONCE(*(svm->avic_physical_id_cache));
+ if (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK)
+ amd_iommu_update_ga(entry & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK,
+ true, pi->ir_data);
+
list_add(&ir->node, &svm->ir_list);
spin_unlock_irqrestore(&svm->ir_list_lock, flags);
out:
@@ -986,10 +999,11 @@ static inline int
avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
{
int ret = 0;
- unsigned long flags;
struct amd_svm_iommu_ir *ir;
struct vcpu_svm *svm = to_svm(vcpu);
+ lockdep_assert_held(&svm->ir_list_lock);
+
if (!kvm_arch_has_assigned_device(vcpu->kvm))
return 0;
@@ -997,19 +1011,15 @@ avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
* Here, we go through the per-vcpu ir_list to update all existing
* interrupt remapping table entry targeting this vcpu.
*/
- spin_lock_irqsave(&svm->ir_list_lock, flags);
-
if (list_empty(&svm->ir_list))
- goto out;
+ return 0;
list_for_each_entry(ir, &svm->ir_list, node) {
ret = amd_iommu_update_ga(cpu, r, ir->data);
if (ret)
- break;
+ return ret;
}
-out:
- spin_unlock_irqrestore(&svm->ir_list_lock, flags);
- return ret;
+ return 0;
}
void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
@@ -1017,6 +1027,7 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
u64 entry;
int h_physical_id = kvm_cpu_get_apicid(cpu);
struct vcpu_svm *svm = to_svm(vcpu);
+ unsigned long flags;
lockdep_assert_preemption_disabled();
@@ -1033,6 +1044,15 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (kvm_vcpu_is_blocking(vcpu))
return;
+ /*
+ * Grab the per-vCPU interrupt remapping lock even if the VM doesn't
+ * _currently_ have assigned devices, as that can change. Holding
+ * ir_list_lock ensures that either svm_ir_list_add() will consume
+ * up-to-date entry information, or that this task will wait until
+ * svm_ir_list_add() completes to set the new target pCPU.
+ */
+ spin_lock_irqsave(&svm->ir_list_lock, flags);
+
entry = READ_ONCE(*(svm->avic_physical_id_cache));
WARN_ON_ONCE(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
@@ -1042,25 +1062,48 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, true);
+
+ spin_unlock_irqrestore(&svm->ir_list_lock, flags);
}
void avic_vcpu_put(struct kvm_vcpu *vcpu)
{
u64 entry;
struct vcpu_svm *svm = to_svm(vcpu);
+ unsigned long flags;
lockdep_assert_preemption_disabled();
+ /*
+ * Note, reading the Physical ID entry outside of ir_list_lock is safe
+ * as only the pCPU that has loaded (or is loading) the vCPU is allowed
+ * to modify the entry, and preemption is disabled. I.e. the vCPU
+ * can't be scheduled out and thus avic_vcpu_{put,load}() can't run
+ * recursively.
+ */
entry = READ_ONCE(*(svm->avic_physical_id_cache));
/* Nothing to do if IsRunning == '0' due to vCPU blocking. */
if (!(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK))
return;
+ /*
+ * Take and hold the per-vCPU interrupt remapping lock while updating
+ * the Physical ID entry even though the lock doesn't protect against
+ * multiple writers (see above). Holding ir_list_lock ensures that
+ * either svm_ir_list_add() will consume up-to-date entry information,
+ * or that this task will wait until svm_ir_list_add() completes to
+ * mark the vCPU as not running.
+ */
+ spin_lock_irqsave(&svm->ir_list_lock, flags);
+
avic_update_iommu_vcpu_affinity(vcpu, -1, 0);
entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
+
+ spin_unlock_irqrestore(&svm->ir_list_lock, flags);
+
}
void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 96936ddf1b3c..dd496c9e5f91 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -107,7 +107,7 @@ static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
static bool nested_vmcb_needs_vls_intercept(struct vcpu_svm *svm)
{
- if (!svm->v_vmload_vmsave_enabled)
+ if (!guest_can_use(&svm->vcpu, X86_FEATURE_V_VMSAVE_VMLOAD))
return true;
if (!nested_npt_enabled(svm))
@@ -552,6 +552,7 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12
bool new_vmcb12 = false;
struct vmcb *vmcb01 = svm->vmcb01.ptr;
struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
+ struct kvm_vcpu *vcpu = &svm->vcpu;
nested_vmcb02_compute_g_pat(svm);
@@ -577,18 +578,18 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12
vmcb_mark_dirty(vmcb02, VMCB_DT);
}
- kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
+ kvm_set_rflags(vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
- svm_set_efer(&svm->vcpu, svm->nested.save.efer);
+ svm_set_efer(vcpu, svm->nested.save.efer);
- svm_set_cr0(&svm->vcpu, svm->nested.save.cr0);
- svm_set_cr4(&svm->vcpu, svm->nested.save.cr4);
+ svm_set_cr0(vcpu, svm->nested.save.cr0);
+ svm_set_cr4(vcpu, svm->nested.save.cr4);
svm->vcpu.arch.cr2 = vmcb12->save.cr2;
- kvm_rax_write(&svm->vcpu, vmcb12->save.rax);
- kvm_rsp_write(&svm->vcpu, vmcb12->save.rsp);
- kvm_rip_write(&svm->vcpu, vmcb12->save.rip);
+ kvm_rax_write(vcpu, vmcb12->save.rax);
+ kvm_rsp_write(vcpu, vmcb12->save.rsp);
+ kvm_rip_write(vcpu, vmcb12->save.rip);
/* In case we don't even reach vcpu_run, the fields are not updated */
vmcb02->save.rax = vmcb12->save.rax;
@@ -602,7 +603,8 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12
vmcb_mark_dirty(vmcb02, VMCB_DR);
}
- if (unlikely(svm->lbrv_enabled && (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
+ if (unlikely(guest_can_use(vcpu, X86_FEATURE_LBRV) &&
+ (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
/*
* Reserved bits of DEBUGCTL are ignored. Be consistent with
* svm_set_msr's definition of reserved bits.
@@ -658,7 +660,8 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
* exit_int_info, exit_int_info_err, next_rip, insn_len, insn_bytes.
*/
- if (svm->vgif_enabled && (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK))
+ if (guest_can_use(vcpu, X86_FEATURE_VGIF) &&
+ (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK))
int_ctl_vmcb12_bits |= (V_GIF_MASK | V_GIF_ENABLE_MASK);
else
int_ctl_vmcb01_bits |= (V_GIF_MASK | V_GIF_ENABLE_MASK);
@@ -695,10 +698,9 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
vmcb02->control.tsc_offset = vcpu->arch.tsc_offset;
- if (svm->tsc_ratio_msr != kvm_caps.default_tsc_scaling_ratio) {
- WARN_ON(!svm->tsc_scaling_enabled);
+ if (guest_can_use(vcpu, X86_FEATURE_TSCRATEMSR) &&
+ svm->tsc_ratio_msr != kvm_caps.default_tsc_scaling_ratio)
nested_svm_update_tsc_ratio_msr(vcpu);
- }
vmcb02->control.int_ctl =
(svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
@@ -717,7 +719,7 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
* what a nrips=0 CPU would do (L1 is responsible for advancing RIP
* prior to injecting the event).
*/
- if (svm->nrips_enabled)
+ if (guest_can_use(vcpu, X86_FEATURE_NRIPS))
vmcb02->control.next_rip = svm->nested.ctl.next_rip;
else if (boot_cpu_has(X86_FEATURE_NRIPS))
vmcb02->control.next_rip = vmcb12_rip;
@@ -727,7 +729,7 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
svm->soft_int_injected = true;
svm->soft_int_csbase = vmcb12_csbase;
svm->soft_int_old_rip = vmcb12_rip;
- if (svm->nrips_enabled)
+ if (guest_can_use(vcpu, X86_FEATURE_NRIPS))
svm->soft_int_next_rip = svm->nested.ctl.next_rip;
else
svm->soft_int_next_rip = vmcb12_rip;
@@ -735,15 +737,21 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
vmcb02->control.virt_ext = vmcb01->control.virt_ext &
LBR_CTL_ENABLE_MASK;
- if (svm->lbrv_enabled)
+ if (guest_can_use(vcpu, X86_FEATURE_LBRV))
vmcb02->control.virt_ext |=
(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK);
if (!nested_vmcb_needs_vls_intercept(svm))
vmcb02->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
- pause_count12 = svm->pause_filter_enabled ? svm->nested.ctl.pause_filter_count : 0;
- pause_thresh12 = svm->pause_threshold_enabled ? svm->nested.ctl.pause_filter_thresh : 0;
+ if (guest_can_use(vcpu, X86_FEATURE_PAUSEFILTER))
+ pause_count12 = svm->nested.ctl.pause_filter_count;
+ else
+ pause_count12 = 0;
+ if (guest_can_use(vcpu, X86_FEATURE_PFTHRESHOLD))
+ pause_thresh12 = svm->nested.ctl.pause_filter_thresh;
+ else
+ pause_thresh12 = 0;
if (kvm_pause_in_guest(svm->vcpu.kvm)) {
/* use guest values since host doesn't intercept PAUSE */
vmcb02->control.pause_filter_count = pause_count12;
@@ -1027,7 +1035,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
if (vmcb12->control.exit_code != SVM_EXIT_ERR)
nested_save_pending_event_to_vmcb12(svm, vmcb12);
- if (svm->nrips_enabled)
+ if (guest_can_use(vcpu, X86_FEATURE_NRIPS))
vmcb12->control.next_rip = vmcb02->control.next_rip;
vmcb12->control.int_ctl = svm->nested.ctl.int_ctl;
@@ -1066,7 +1074,8 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
if (!nested_exit_on_intr(svm))
kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
- if (unlikely(svm->lbrv_enabled && (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
+ if (unlikely(guest_can_use(vcpu, X86_FEATURE_LBRV) &&
+ (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
svm_copy_lbrs(vmcb12, vmcb02);
svm_update_lbrv(vcpu);
} else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
@@ -1101,10 +1110,10 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
vmcb_mark_dirty(vmcb01, VMCB_INTERCEPTS);
}
- if (svm->tsc_ratio_msr != kvm_caps.default_tsc_scaling_ratio) {
- WARN_ON(!svm->tsc_scaling_enabled);
+ if (kvm_caps.has_tsc_control &&
+ vcpu->arch.tsc_scaling_ratio != vcpu->arch.l1_tsc_scaling_ratio) {
vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
- __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
+ svm_write_tsc_multiplier(vcpu);
}
svm->nested.ctl.nested_cr3 = 0;
@@ -1537,7 +1546,7 @@ void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu)
vcpu->arch.tsc_scaling_ratio =
kvm_calc_nested_tsc_multiplier(vcpu->arch.l1_tsc_scaling_ratio,
svm->tsc_ratio_msr);
- __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
+ svm_write_tsc_multiplier(vcpu);
}
/* Inverse operation of nested_copy_vmcb_control_to_cache(). asid is copied too. */
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index d3aec1f2cad2..b9a0a939d59f 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -23,6 +23,7 @@
#include <asm/pkru.h>
#include <asm/trapnr.h>
#include <asm/fpu/xcr.h>
+#include <asm/debugreg.h>
#include "mmu.h"
#include "x86.h"
@@ -54,9 +55,14 @@ module_param_named(sev, sev_enabled, bool, 0444);
/* enable/disable SEV-ES support */
static bool sev_es_enabled = true;
module_param_named(sev_es, sev_es_enabled, bool, 0444);
+
+/* enable/disable SEV-ES DebugSwap support */
+static bool sev_es_debug_swap_enabled = true;
+module_param_named(debug_swap, sev_es_debug_swap_enabled, bool, 0444);
#else
#define sev_enabled false
#define sev_es_enabled false
+#define sev_es_debug_swap_enabled false
#endif /* CONFIG_KVM_AMD_SEV */
static u8 sev_enc_bit;
@@ -606,6 +612,9 @@ static int sev_es_sync_vmsa(struct vcpu_svm *svm)
save->xss = svm->vcpu.arch.ia32_xss;
save->dr6 = svm->vcpu.arch.dr6;
+ if (sev_es_debug_swap_enabled)
+ save->sev_features |= SVM_SEV_FEAT_DEBUG_SWAP;
+
pr_debug("Virtual Machine Save Area (VMSA):\n");
print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1, save, sizeof(*save), false);
@@ -619,6 +628,11 @@ static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
struct vcpu_svm *svm = to_svm(vcpu);
int ret;
+ if (vcpu->guest_debug) {
+ pr_warn_once("KVM_SET_GUEST_DEBUG for SEV-ES guest is not supported");
+ return -EINVAL;
+ }
+
/* Perform some pre-encryption checks against the VMSA */
ret = sev_es_sync_vmsa(svm);
if (ret)
@@ -1725,7 +1739,7 @@ static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
* Note, the source is not required to have the same number of
* vCPUs as the destination when migrating a vanilla SEV VM.
*/
- src_vcpu = kvm_get_vcpu(dst_kvm, i);
+ src_vcpu = kvm_get_vcpu(src_kvm, i);
src_svm = to_svm(src_vcpu);
/*
@@ -2171,7 +2185,7 @@ void __init sev_hardware_setup(void)
bool sev_es_supported = false;
bool sev_supported = false;
- if (!sev_enabled || !npt_enabled)
+ if (!sev_enabled || !npt_enabled || !nrips)
goto out;
/*
@@ -2256,6 +2270,9 @@ out:
sev_enabled = sev_supported;
sev_es_enabled = sev_es_supported;
+ if (!sev_es_enabled || !cpu_feature_enabled(X86_FEATURE_DEBUG_SWAP) ||
+ !cpu_feature_enabled(X86_FEATURE_NO_NESTED_DATA_BP))
+ sev_es_debug_swap_enabled = false;
#endif
}
@@ -2881,7 +2898,10 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
svm->sev_es.ghcb_sa);
break;
case SVM_VMGEXIT_NMI_COMPLETE:
- ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_IRET);
+ ++vcpu->stat.nmi_window_exits;
+ svm->nmi_masked = false;
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
+ ret = 1;
break;
case SVM_VMGEXIT_AP_HLT_LOOP:
ret = kvm_emulate_ap_reset_hold(vcpu);
@@ -2944,6 +2964,7 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
static void sev_es_init_vmcb(struct vcpu_svm *svm)
{
+ struct vmcb *vmcb = svm->vmcb01.ptr;
struct kvm_vcpu *vcpu = &svm->vcpu;
svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ES_ENABLE;
@@ -2952,9 +2973,12 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm)
/*
* An SEV-ES guest requires a VMSA area that is a separate from the
* VMCB page. Do not include the encryption mask on the VMSA physical
- * address since hardware will access it using the guest key.
+ * address since hardware will access it using the guest key. Note,
+ * the VMSA will be NULL if this vCPU is the destination for intrahost
+ * migration, and will be copied later.
*/
- svm->vmcb->control.vmsa_pa = __pa(svm->sev_es.vmsa);
+ if (svm->sev_es.vmsa)
+ svm->vmcb->control.vmsa_pa = __pa(svm->sev_es.vmsa);
/* Can't intercept CR register access, HV can't modify CR registers */
svm_clr_intercept(svm, INTERCEPT_CR0_READ);
@@ -2972,8 +2996,23 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm)
svm_set_intercept(svm, TRAP_CR4_WRITE);
svm_set_intercept(svm, TRAP_CR8_WRITE);
- /* No support for enable_vmware_backdoor */
- clr_exception_intercept(svm, GP_VECTOR);
+ vmcb->control.intercepts[INTERCEPT_DR] = 0;
+ if (!sev_es_debug_swap_enabled) {
+ vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
+ vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
+ recalc_intercepts(svm);
+ } else {
+ /*
+ * Disable #DB intercept iff DebugSwap is enabled. KVM doesn't
+ * allow debugging SEV-ES guests, and enables DebugSwap iff
+ * NO_NESTED_DATA_BP is supported, so there's no reason to
+ * intercept #DB when DebugSwap is enabled. For simplicity
+ * with respect to guest debug, intercept #DB for other VMs
+ * even if NO_NESTED_DATA_BP is supported, i.e. even if the
+ * guest can't DoS the CPU with infinite #DB vectoring.
+ */
+ clr_exception_intercept(svm, DB_VECTOR);
+ }
/* Can't intercept XSETBV, HV can't modify XCR0 directly */
svm_clr_intercept(svm, INTERCEPT_XSETBV);
@@ -3000,6 +3039,12 @@ void sev_init_vmcb(struct vcpu_svm *svm)
svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
clr_exception_intercept(svm, UD_VECTOR);
+ /*
+ * Don't intercept #GP for SEV guests, e.g. for the VMware backdoor, as
+ * KVM can't decrypt guest memory to decode the faulting instruction.
+ */
+ clr_exception_intercept(svm, GP_VECTOR);
+
if (sev_es_guest(svm->vcpu.kvm))
sev_es_init_vmcb(svm);
}
@@ -3018,20 +3063,41 @@ void sev_es_vcpu_reset(struct vcpu_svm *svm)
void sev_es_prepare_switch_to_guest(struct sev_es_save_area *hostsa)
{
/*
- * As an SEV-ES guest, hardware will restore the host state on VMEXIT,
- * of which one step is to perform a VMLOAD. KVM performs the
- * corresponding VMSAVE in svm_prepare_guest_switch for both
- * traditional and SEV-ES guests.
+ * All host state for SEV-ES guests is categorized into three swap types
+ * based on how it is handled by hardware during a world switch:
+ *
+ * A: VMRUN: Host state saved in host save area
+ * VMEXIT: Host state loaded from host save area
+ *
+ * B: VMRUN: Host state _NOT_ saved in host save area
+ * VMEXIT: Host state loaded from host save area
+ *
+ * C: VMRUN: Host state _NOT_ saved in host save area
+ * VMEXIT: Host state initialized to default(reset) values
+ *
+ * Manually save type-B state, i.e. state that is loaded by VMEXIT but
+ * isn't saved by VMRUN, that isn't already saved by VMSAVE (performed
+ * by common SVM code).
*/
-
- /* XCR0 is restored on VMEXIT, save the current host value */
hostsa->xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
-
- /* PKRU is restored on VMEXIT, save the current host value */
hostsa->pkru = read_pkru();
-
- /* MSR_IA32_XSS is restored on VMEXIT, save the currnet host value */
hostsa->xss = host_xss;
+
+ /*
+ * If DebugSwap is enabled, debug registers are loaded but NOT saved by
+ * the CPU (Type-B). If DebugSwap is disabled/unsupported, the CPU both
+ * saves and loads debug registers (Type-A).
+ */
+ if (sev_es_debug_swap_enabled) {
+ hostsa->dr0 = native_get_debugreg(0);
+ hostsa->dr1 = native_get_debugreg(1);
+ hostsa->dr2 = native_get_debugreg(2);
+ hostsa->dr3 = native_get_debugreg(3);
+ hostsa->dr0_addr_mask = amd_get_dr_addr_mask(0);
+ hostsa->dr1_addr_mask = amd_get_dr_addr_mask(1);
+ hostsa->dr2_addr_mask = amd_get_dr_addr_mask(2);
+ hostsa->dr3_addr_mask = amd_get_dr_addr_mask(3);
+ }
}
void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index d4bfdc607fe7..f283eb47f6ac 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -39,10 +39,9 @@
#include <asm/spec-ctrl.h>
#include <asm/cpu_device_id.h>
#include <asm/traps.h>
+#include <asm/reboot.h>
#include <asm/fpu/api.h>
-#include <asm/virtext.h>
-
#include <trace/events/ipi.h>
#include "trace.h"
@@ -203,7 +202,7 @@ static int nested = true;
module_param(nested, int, S_IRUGO);
/* enable/disable Next RIP Save */
-static int nrips = true;
+int nrips = true;
module_param(nrips, int, 0444);
/* enable/disable Virtual VMLOAD VMSAVE */
@@ -365,6 +364,8 @@ static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
}
+static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
+ void *insn, int insn_len);
static int __svm_skip_emulated_instruction(struct kvm_vcpu *vcpu,
bool commit_side_effects)
@@ -385,6 +386,14 @@ static int __svm_skip_emulated_instruction(struct kvm_vcpu *vcpu,
}
if (!svm->next_rip) {
+ /*
+ * FIXME: Drop this when kvm_emulate_instruction() does the
+ * right thing and treats "can't emulate" as outright failure
+ * for EMULTYPE_SKIP.
+ */
+ if (!svm_can_emulate_instruction(vcpu, EMULTYPE_SKIP, NULL, 0))
+ return 0;
+
if (unlikely(!commit_side_effects))
old_rflags = svm->vmcb->save.rflags;
@@ -517,14 +526,21 @@ static void svm_init_osvw(struct kvm_vcpu *vcpu)
vcpu->arch.osvw.status |= 1;
}
-static bool kvm_is_svm_supported(void)
+static bool __kvm_is_svm_supported(void)
{
- int cpu = raw_smp_processor_id();
- const char *msg;
+ int cpu = smp_processor_id();
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+
u64 vm_cr;
- if (!cpu_has_svm(&msg)) {
- pr_err("SVM not supported by CPU %d, %s\n", cpu, msg);
+ if (c->x86_vendor != X86_VENDOR_AMD &&
+ c->x86_vendor != X86_VENDOR_HYGON) {
+ pr_err("CPU %d isn't AMD or Hygon\n", cpu);
+ return false;
+ }
+
+ if (!cpu_has(c, X86_FEATURE_SVM)) {
+ pr_err("SVM not supported by CPU %d\n", cpu);
return false;
}
@@ -542,25 +558,55 @@ static bool kvm_is_svm_supported(void)
return true;
}
+static bool kvm_is_svm_supported(void)
+{
+ bool supported;
+
+ migrate_disable();
+ supported = __kvm_is_svm_supported();
+ migrate_enable();
+
+ return supported;
+}
+
static int svm_check_processor_compat(void)
{
- if (!kvm_is_svm_supported())
+ if (!__kvm_is_svm_supported())
return -EIO;
return 0;
}
-void __svm_write_tsc_multiplier(u64 multiplier)
+static void __svm_write_tsc_multiplier(u64 multiplier)
{
- preempt_disable();
-
if (multiplier == __this_cpu_read(current_tsc_ratio))
- goto out;
+ return;
wrmsrl(MSR_AMD64_TSC_RATIO, multiplier);
__this_cpu_write(current_tsc_ratio, multiplier);
-out:
- preempt_enable();
+}
+
+static inline void kvm_cpu_svm_disable(void)
+{
+ uint64_t efer;
+
+ wrmsrl(MSR_VM_HSAVE_PA, 0);
+ rdmsrl(MSR_EFER, efer);
+ if (efer & EFER_SVME) {
+ /*
+ * Force GIF=1 prior to disabling SVM, e.g. to ensure INIT and
+ * NMI aren't blocked.
+ */
+ stgi();
+ wrmsrl(MSR_EFER, efer & ~EFER_SVME);
+ }
+}
+
+static void svm_emergency_disable(void)
+{
+ kvm_rebooting = true;
+
+ kvm_cpu_svm_disable();
}
static void svm_hardware_disable(void)
@@ -569,7 +615,7 @@ static void svm_hardware_disable(void)
if (tsc_scaling)
__svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT);
- cpu_svm_disable();
+ kvm_cpu_svm_disable();
amd_pmu_disable_virt();
}
@@ -677,6 +723,39 @@ free_save_area:
}
+static void set_dr_intercepts(struct vcpu_svm *svm)
+{
+ struct vmcb *vmcb = svm->vmcb01.ptr;
+
+ vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ);
+ vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ);
+ vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ);
+ vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ);
+ vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ);
+ vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ);
+ vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ);
+ vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE);
+ vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE);
+ vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE);
+ vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE);
+ vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE);
+ vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE);
+ vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE);
+ vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
+ vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
+
+ recalc_intercepts(svm);
+}
+
+static void clr_dr_intercepts(struct vcpu_svm *svm)
+{
+ struct vmcb *vmcb = svm->vmcb01.ptr;
+
+ vmcb->control.intercepts[INTERCEPT_DR] = 0;
+
+ recalc_intercepts(svm);
+}
+
static int direct_access_msr_slot(u32 msr)
{
u32 i;
@@ -947,50 +1026,24 @@ static void svm_disable_lbrv(struct kvm_vcpu *vcpu)
svm_copy_lbrs(svm->vmcb01.ptr, svm->vmcb);
}
-static int svm_get_lbr_msr(struct vcpu_svm *svm, u32 index)
+static struct vmcb *svm_get_lbr_vmcb(struct vcpu_svm *svm)
{
/*
- * If the LBR virtualization is disabled, the LBR msrs are always
- * kept in the vmcb01 to avoid copying them on nested guest entries.
- *
- * If nested, and the LBR virtualization is enabled/disabled, the msrs
- * are moved between the vmcb01 and vmcb02 as needed.
+ * If LBR virtualization is disabled, the LBR MSRs are always kept in
+ * vmcb01. If LBR virtualization is enabled and L1 is running VMs of
+ * its own, the MSRs are moved between vmcb01 and vmcb02 as needed.
*/
- struct vmcb *vmcb =
- (svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK) ?
- svm->vmcb : svm->vmcb01.ptr;
-
- switch (index) {
- case MSR_IA32_DEBUGCTLMSR:
- return vmcb->save.dbgctl;
- case MSR_IA32_LASTBRANCHFROMIP:
- return vmcb->save.br_from;
- case MSR_IA32_LASTBRANCHTOIP:
- return vmcb->save.br_to;
- case MSR_IA32_LASTINTFROMIP:
- return vmcb->save.last_excp_from;
- case MSR_IA32_LASTINTTOIP:
- return vmcb->save.last_excp_to;
- default:
- KVM_BUG(false, svm->vcpu.kvm,
- "%s: Unknown MSR 0x%x", __func__, index);
- return 0;
- }
+ return svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK ? svm->vmcb :
+ svm->vmcb01.ptr;
}
void svm_update_lbrv(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
-
- bool enable_lbrv = svm_get_lbr_msr(svm, MSR_IA32_DEBUGCTLMSR) &
- DEBUGCTLMSR_LBR;
-
- bool current_enable_lbrv = !!(svm->vmcb->control.virt_ext &
- LBR_CTL_ENABLE_MASK);
-
- if (unlikely(is_guest_mode(vcpu) && svm->lbrv_enabled))
- if (unlikely(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))
- enable_lbrv = true;
+ bool current_enable_lbrv = svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK;
+ bool enable_lbrv = (svm_get_lbr_vmcb(svm)->save.dbgctl & DEBUGCTLMSR_LBR) ||
+ (is_guest_mode(vcpu) && guest_can_use(vcpu, X86_FEATURE_LBRV) &&
+ (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK));
if (enable_lbrv == current_enable_lbrv)
return;
@@ -1101,21 +1154,23 @@ static u64 svm_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu)
return svm->tsc_ratio_msr;
}
-static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+static void svm_write_tsc_offset(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
svm->vmcb01.ptr->control.tsc_offset = vcpu->arch.l1_tsc_offset;
- svm->vmcb->control.tsc_offset = offset;
+ svm->vmcb->control.tsc_offset = vcpu->arch.tsc_offset;
vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
}
-static void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier)
+void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu)
{
- __svm_write_tsc_multiplier(multiplier);
+ preempt_disable();
+ if (to_svm(vcpu)->guest_state_loaded)
+ __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
+ preempt_enable();
}
-
/* Evaluate instruction intercepts that depend on guest CPUID features. */
static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu,
struct vcpu_svm *svm)
@@ -1156,8 +1211,6 @@ static inline void init_vmcb_after_set_cpuid(struct kvm_vcpu *vcpu)
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 0, 0);
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 0, 0);
-
- svm->v_vmload_vmsave_enabled = false;
} else {
/*
* If hardware supports Virtual VMLOAD VMSAVE then enable it
@@ -1201,10 +1254,9 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
* Guest access to VMware backdoor ports could legitimately
* trigger #GP because of TSS I/O permission bitmap.
* We intercept those #GP and allow access to them anyway
- * as VMware does. Don't intercept #GP for SEV guests as KVM can't
- * decrypt guest memory to decode the faulting instruction.
+ * as VMware does.
*/
- if (enable_vmware_backdoor && !sev_guest(vcpu->kvm))
+ if (enable_vmware_backdoor)
set_exception_intercept(svm, GP_VECTOR);
svm_set_intercept(svm, INTERCEPT_INTR);
@@ -1949,7 +2001,7 @@ static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
- if (vcpu->arch.guest_state_protected)
+ if (WARN_ON_ONCE(sev_es_guest(vcpu->kvm)))
return;
get_debugreg(vcpu->arch.db[0], 0);
@@ -2510,12 +2562,13 @@ static int iret_interception(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ WARN_ON_ONCE(sev_es_guest(vcpu->kvm));
+
++vcpu->stat.nmi_window_exits;
svm->awaiting_iret_completion = true;
svm_clr_iret_intercept(svm);
- if (!sev_es_guest(vcpu->kvm))
- svm->nmi_iret_rip = kvm_rip_read(vcpu);
+ svm->nmi_iret_rip = kvm_rip_read(vcpu);
kvm_make_request(KVM_REQ_EVENT, vcpu);
return 1;
@@ -2680,6 +2733,13 @@ static int dr_interception(struct kvm_vcpu *vcpu)
unsigned long val;
int err = 0;
+ /*
+ * SEV-ES intercepts DR7 only to disable guest debugging and the guest issues a VMGEXIT
+ * for DR7 write only. KVM cannot change DR7 (always swapped as type 'A') so return early.
+ */
+ if (sev_es_guest(vcpu->kvm))
+ return 1;
+
if (vcpu->guest_debug == 0) {
/*
* No more DR vmexits; force a reload of the debug registers
@@ -2764,7 +2824,8 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
switch (msr_info->index) {
case MSR_AMD64_TSC_RATIO:
- if (!msr_info->host_initiated && !svm->tsc_scaling_enabled)
+ if (!msr_info->host_initiated &&
+ !guest_can_use(vcpu, X86_FEATURE_TSCRATEMSR))
return 1;
msr_info->data = svm->tsc_ratio_msr;
break;
@@ -2802,11 +2863,19 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = svm->tsc_aux;
break;
case MSR_IA32_DEBUGCTLMSR:
+ msr_info->data = svm_get_lbr_vmcb(svm)->save.dbgctl;
+ break;
case MSR_IA32_LASTBRANCHFROMIP:
+ msr_info->data = svm_get_lbr_vmcb(svm)->save.br_from;
+ break;
case MSR_IA32_LASTBRANCHTOIP:
+ msr_info->data = svm_get_lbr_vmcb(svm)->save.br_to;
+ break;
case MSR_IA32_LASTINTFROMIP:
+ msr_info->data = svm_get_lbr_vmcb(svm)->save.last_excp_from;
+ break;
case MSR_IA32_LASTINTTOIP:
- msr_info->data = svm_get_lbr_msr(svm, msr_info->index);
+ msr_info->data = svm_get_lbr_vmcb(svm)->save.last_excp_to;
break;
case MSR_VM_HSAVE_PA:
msr_info->data = svm->nested.hsave_msr;
@@ -2906,7 +2975,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
switch (ecx) {
case MSR_AMD64_TSC_RATIO:
- if (!svm->tsc_scaling_enabled) {
+ if (!guest_can_use(vcpu, X86_FEATURE_TSCRATEMSR)) {
if (!msr->host_initiated)
return 1;
@@ -2928,7 +2997,8 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
svm->tsc_ratio_msr = data;
- if (svm->tsc_scaling_enabled && is_guest_mode(vcpu))
+ if (guest_can_use(vcpu, X86_FEATURE_TSCRATEMSR) &&
+ is_guest_mode(vcpu))
nested_svm_update_tsc_ratio_msr(vcpu);
break;
@@ -3037,13 +3107,8 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
if (data & DEBUGCTL_RESERVED_BITS)
return 1;
- if (svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK)
- svm->vmcb->save.dbgctl = data;
- else
- svm->vmcb01.ptr->save.dbgctl = data;
-
+ svm_get_lbr_vmcb(svm)->save.dbgctl = data;
svm_update_lbrv(vcpu);
-
break;
case MSR_VM_HSAVE_PA:
/*
@@ -3769,6 +3834,19 @@ static void svm_enable_nmi_window(struct kvm_vcpu *vcpu)
if (svm_get_nmi_mask(vcpu) && !svm->awaiting_iret_completion)
return; /* IRET will cause a vm exit */
+ /*
+ * SEV-ES guests are responsible for signaling when a vCPU is ready to
+ * receive a new NMI, as SEV-ES guests can't be single-stepped, i.e.
+ * KVM can't intercept and single-step IRET to detect when NMIs are
+ * unblocked (architecturally speaking). See SVM_VMGEXIT_NMI_COMPLETE.
+ *
+ * Note, GIF is guaranteed to be '1' for SEV-ES guests as hardware
+ * ignores SEV-ES guest writes to EFER.SVME *and* CLGI/STGI are not
+ * supported NAEs in the GHCB protocol.
+ */
+ if (sev_es_guest(vcpu->kvm))
+ return;
+
if (!gif_set(svm)) {
if (vgif)
svm_set_intercept(svm, INTERCEPT_STGI);
@@ -3918,12 +3996,11 @@ static void svm_complete_interrupts(struct kvm_vcpu *vcpu)
svm->soft_int_injected = false;
/*
- * If we've made progress since setting HF_IRET_MASK, we've
+ * If we've made progress since setting awaiting_iret_completion, we've
* executed an IRET and can allow NMI injection.
*/
if (svm->awaiting_iret_completion &&
- (sev_es_guest(vcpu->kvm) ||
- kvm_rip_read(vcpu) != svm->nmi_iret_rip)) {
+ kvm_rip_read(vcpu) != svm->nmi_iret_rip) {
svm->awaiting_iret_completion = false;
svm->nmi_masked = false;
kvm_make_request(KVM_REQ_EVENT, vcpu);
@@ -4209,28 +4286,37 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
struct vcpu_svm *svm = to_svm(vcpu);
struct kvm_cpuid_entry2 *best;
- vcpu->arch.xsaves_enabled = guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
- boot_cpu_has(X86_FEATURE_XSAVE) &&
- boot_cpu_has(X86_FEATURE_XSAVES);
-
- /* Update nrips enabled cache */
- svm->nrips_enabled = kvm_cpu_cap_has(X86_FEATURE_NRIPS) &&
- guest_cpuid_has(vcpu, X86_FEATURE_NRIPS);
-
- svm->tsc_scaling_enabled = tsc_scaling && guest_cpuid_has(vcpu, X86_FEATURE_TSCRATEMSR);
- svm->lbrv_enabled = lbrv && guest_cpuid_has(vcpu, X86_FEATURE_LBRV);
-
- svm->v_vmload_vmsave_enabled = vls && guest_cpuid_has(vcpu, X86_FEATURE_V_VMSAVE_VMLOAD);
-
- svm->pause_filter_enabled = kvm_cpu_cap_has(X86_FEATURE_PAUSEFILTER) &&
- guest_cpuid_has(vcpu, X86_FEATURE_PAUSEFILTER);
+ /*
+ * SVM doesn't provide a way to disable just XSAVES in the guest, KVM
+ * can only disable all variants of by disallowing CR4.OSXSAVE from
+ * being set. As a result, if the host has XSAVE and XSAVES, and the
+ * guest has XSAVE enabled, the guest can execute XSAVES without
+ * faulting. Treat XSAVES as enabled in this case regardless of
+ * whether it's advertised to the guest so that KVM context switches
+ * XSS on VM-Enter/VM-Exit. Failure to do so would effectively give
+ * the guest read/write access to the host's XSS.
+ */
+ if (boot_cpu_has(X86_FEATURE_XSAVE) &&
+ boot_cpu_has(X86_FEATURE_XSAVES) &&
+ guest_cpuid_has(vcpu, X86_FEATURE_XSAVE))
+ kvm_governed_feature_set(vcpu, X86_FEATURE_XSAVES);
- svm->pause_threshold_enabled = kvm_cpu_cap_has(X86_FEATURE_PFTHRESHOLD) &&
- guest_cpuid_has(vcpu, X86_FEATURE_PFTHRESHOLD);
+ kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_NRIPS);
+ kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_TSCRATEMSR);
+ kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_LBRV);
- svm->vgif_enabled = vgif && guest_cpuid_has(vcpu, X86_FEATURE_VGIF);
+ /*
+ * Intercept VMLOAD if the vCPU mode is Intel in order to emulate that
+ * VMLOAD drops bits 63:32 of SYSENTER (ignoring the fact that exposing
+ * SVM on Intel is bonkers and extremely unlikely to work).
+ */
+ if (!guest_cpuid_is_intel(vcpu))
+ kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_V_VMSAVE_VMLOAD);
- svm->vnmi_enabled = vnmi && guest_cpuid_has(vcpu, X86_FEATURE_VNMI);
+ kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_PAUSEFILTER);
+ kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_PFTHRESHOLD);
+ kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_VGIF);
+ kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_VNMI);
svm_recalc_instruction_intercepts(vcpu, svm);
@@ -4651,16 +4737,25 @@ static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
* and cannot be decrypted by KVM, i.e. KVM would read cyphertext and
* decode garbage.
*
- * Inject #UD if KVM reached this point without an instruction buffer.
- * In practice, this path should never be hit by a well-behaved guest,
- * e.g. KVM doesn't intercept #UD or #GP for SEV guests, but this path
- * is still theoretically reachable, e.g. via unaccelerated fault-like
- * AVIC access, and needs to be handled by KVM to avoid putting the
- * guest into an infinite loop. Injecting #UD is somewhat arbitrary,
- * but its the least awful option given lack of insight into the guest.
+ * If KVM is NOT trying to simply skip an instruction, inject #UD if
+ * KVM reached this point without an instruction buffer. In practice,
+ * this path should never be hit by a well-behaved guest, e.g. KVM
+ * doesn't intercept #UD or #GP for SEV guests, but this path is still
+ * theoretically reachable, e.g. via unaccelerated fault-like AVIC
+ * access, and needs to be handled by KVM to avoid putting the guest
+ * into an infinite loop. Injecting #UD is somewhat arbitrary, but
+ * its the least awful option given lack of insight into the guest.
+ *
+ * If KVM is trying to skip an instruction, simply resume the guest.
+ * If a #NPF occurs while the guest is vectoring an INT3/INTO, then KVM
+ * will attempt to re-inject the INT3/INTO and skip the instruction.
+ * In that scenario, retrying the INT3/INTO and hoping the guest will
+ * make forward progress is the only option that has a chance of
+ * success (and in practice it will work the vast majority of the time).
*/
if (unlikely(!insn)) {
- kvm_queue_exception(vcpu, UD_VECTOR);
+ if (!(emul_type & EMULTYPE_SKIP))
+ kvm_queue_exception(vcpu, UD_VECTOR);
return false;
}
@@ -5112,9 +5207,11 @@ static __init int svm_hardware_setup(void)
svm_adjust_mmio_mask();
+ nrips = nrips && boot_cpu_has(X86_FEATURE_NRIPS);
+
/*
* Note, SEV setup consumes npt_enabled and enable_mmio_caching (which
- * may be modified by svm_adjust_mmio_mask()).
+ * may be modified by svm_adjust_mmio_mask()), as well as nrips.
*/
sev_hardware_setup();
@@ -5126,11 +5223,6 @@ static __init int svm_hardware_setup(void)
goto err;
}
- if (nrips) {
- if (!boot_cpu_has(X86_FEATURE_NRIPS))
- nrips = false;
- }
-
enable_apicv = avic = avic && avic_hardware_setup();
if (!enable_apicv) {
@@ -5213,6 +5305,13 @@ static struct kvm_x86_init_ops svm_init_ops __initdata = {
.pmu_ops = &amd_pmu_ops,
};
+static void __svm_exit(void)
+{
+ kvm_x86_vendor_exit();
+
+ cpu_emergency_unregister_virt_callback(svm_emergency_disable);
+}
+
static int __init svm_init(void)
{
int r;
@@ -5226,6 +5325,8 @@ static int __init svm_init(void)
if (r)
return r;
+ cpu_emergency_register_virt_callback(svm_emergency_disable);
+
/*
* Common KVM initialization _must_ come last, after this, /dev/kvm is
* exposed to userspace!
@@ -5238,14 +5339,14 @@ static int __init svm_init(void)
return 0;
err_kvm_init:
- kvm_x86_vendor_exit();
+ __svm_exit();
return r;
}
static void __exit svm_exit(void)
{
kvm_exit();
- kvm_x86_vendor_exit();
+ __svm_exit();
}
module_init(svm_init)
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 8239c8de45ac..f41253958357 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -22,6 +22,7 @@
#include <asm/svm.h>
#include <asm/sev-common.h>
+#include "cpuid.h"
#include "kvm_cache_regs.h"
#define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
@@ -33,6 +34,7 @@
#define MSRPM_OFFSETS 32
extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
extern bool npt_enabled;
+extern int nrips;
extern int vgif;
extern bool intercept_smi;
extern bool x2avic_enabled;
@@ -260,16 +262,6 @@ struct vcpu_svm {
unsigned long soft_int_next_rip;
bool soft_int_injected;
- /* optional nested SVM features that are enabled for this guest */
- bool nrips_enabled : 1;
- bool tsc_scaling_enabled : 1;
- bool v_vmload_vmsave_enabled : 1;
- bool lbrv_enabled : 1;
- bool pause_filter_enabled : 1;
- bool pause_threshold_enabled : 1;
- bool vgif_enabled : 1;
- bool vnmi_enabled : 1;
-
u32 ldr_reg;
u32 dfr_reg;
struct page *avic_backing_page;
@@ -406,48 +398,6 @@ static inline bool vmcb12_is_intercept(struct vmcb_ctrl_area_cached *control, u3
return test_bit(bit, (unsigned long *)&control->intercepts);
}
-static inline void set_dr_intercepts(struct vcpu_svm *svm)
-{
- struct vmcb *vmcb = svm->vmcb01.ptr;
-
- if (!sev_es_guest(svm->vcpu.kvm)) {
- vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ);
- vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ);
- vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ);
- vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ);
- vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ);
- vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ);
- vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ);
- vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE);
- vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE);
- vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE);
- vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE);
- vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE);
- vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE);
- vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE);
- }
-
- vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
- vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
-
- recalc_intercepts(svm);
-}
-
-static inline void clr_dr_intercepts(struct vcpu_svm *svm)
-{
- struct vmcb *vmcb = svm->vmcb01.ptr;
-
- vmcb->control.intercepts[INTERCEPT_DR] = 0;
-
- /* DR7 access must remain intercepted for an SEV-ES guest */
- if (sev_es_guest(svm->vcpu.kvm)) {
- vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
- vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
- }
-
- recalc_intercepts(svm);
-}
-
static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit)
{
struct vmcb *vmcb = svm->vmcb01.ptr;
@@ -493,7 +443,8 @@ static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit)
static inline bool nested_vgif_enabled(struct vcpu_svm *svm)
{
- return svm->vgif_enabled && (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK);
+ return guest_can_use(&svm->vcpu, X86_FEATURE_VGIF) &&
+ (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK);
}
static inline struct vmcb *get_vgif_vmcb(struct vcpu_svm *svm)
@@ -544,7 +495,7 @@ static inline bool nested_npt_enabled(struct vcpu_svm *svm)
static inline bool nested_vnmi_enabled(struct vcpu_svm *svm)
{
- return svm->vnmi_enabled &&
+ return guest_can_use(&svm->vcpu, X86_FEATURE_VNMI) &&
(svm->nested.ctl.int_ctl & V_NMI_ENABLE_MASK);
}
@@ -660,7 +611,7 @@ int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
bool has_error_code, u32 error_code);
int nested_svm_exit_special(struct vcpu_svm *svm);
void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu);
-void __svm_write_tsc_multiplier(u64 multiplier);
+void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu);
void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
struct vmcb_control_area *control);
void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h
index d0abee35d7ba..41a4533f9989 100644
--- a/arch/x86/kvm/vmx/capabilities.h
+++ b/arch/x86/kvm/vmx/capabilities.h
@@ -252,7 +252,7 @@ static inline bool cpu_has_vmx_pml(void)
static inline bool cpu_has_vmx_xsaves(void)
{
return vmcs_config.cpu_based_2nd_exec_ctrl &
- SECONDARY_EXEC_XSAVES;
+ SECONDARY_EXEC_ENABLE_XSAVES;
}
static inline bool cpu_has_vmx_waitpkg(void)
diff --git a/arch/x86/kvm/vmx/hyperv.c b/arch/x86/kvm/vmx/hyperv.c
index 79450e1ed7cf..313b8bb5b8a7 100644
--- a/arch/x86/kvm/vmx/hyperv.c
+++ b/arch/x86/kvm/vmx/hyperv.c
@@ -78,7 +78,7 @@
SECONDARY_EXEC_DESC | \
SECONDARY_EXEC_ENABLE_RDTSCP | \
SECONDARY_EXEC_ENABLE_INVPCID | \
- SECONDARY_EXEC_XSAVES | \
+ SECONDARY_EXEC_ENABLE_XSAVES | \
SECONDARY_EXEC_RDSEED_EXITING | \
SECONDARY_EXEC_RDRAND_EXITING | \
SECONDARY_EXEC_TSC_SCALING | \
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 516391cc0d64..c5ec0ef51ff7 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -2307,7 +2307,7 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs0
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
SECONDARY_EXEC_ENABLE_INVPCID |
SECONDARY_EXEC_ENABLE_RDTSCP |
- SECONDARY_EXEC_XSAVES |
+ SECONDARY_EXEC_ENABLE_XSAVES |
SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE |
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
SECONDARY_EXEC_APIC_REGISTER_VIRT |
@@ -6331,7 +6331,7 @@ static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu,
* If if it were, XSS would have to be checked against
* the XSS exit bitmap in vmcs12.
*/
- return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
+ return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_XSAVES);
case EXIT_REASON_UMWAIT:
case EXIT_REASON_TPAUSE:
return nested_cpu_has2(vmcs12,
@@ -6426,7 +6426,7 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
vmx = to_vmx(vcpu);
vmcs12 = get_vmcs12(vcpu);
- if (nested_vmx_allowed(vcpu) &&
+ if (guest_can_use(vcpu, X86_FEATURE_VMX) &&
(vmx->nested.vmxon || vmx->nested.smm.vmxon)) {
kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
kvm_state.hdr.vmx.vmcs12_pa = vmx->nested.current_vmptr;
@@ -6567,7 +6567,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
if (kvm_state->flags & ~KVM_STATE_NESTED_EVMCS)
return -EINVAL;
} else {
- if (!nested_vmx_allowed(vcpu))
+ if (!guest_can_use(vcpu, X86_FEATURE_VMX))
return -EINVAL;
if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa))
@@ -6601,7 +6601,8 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
return -EINVAL;
if ((kvm_state->flags & KVM_STATE_NESTED_EVMCS) &&
- (!nested_vmx_allowed(vcpu) || !vmx->nested.enlightened_vmcs_enabled))
+ (!guest_can_use(vcpu, X86_FEATURE_VMX) ||
+ !vmx->nested.enlightened_vmcs_enabled))
return -EINVAL;
vmx_leave_nested(vcpu);
@@ -6874,7 +6875,7 @@ static void nested_vmx_setup_secondary_ctls(u32 ept_caps,
SECONDARY_EXEC_ENABLE_INVPCID |
SECONDARY_EXEC_ENABLE_VMFUNC |
SECONDARY_EXEC_RDSEED_EXITING |
- SECONDARY_EXEC_XSAVES |
+ SECONDARY_EXEC_ENABLE_XSAVES |
SECONDARY_EXEC_TSC_SCALING |
SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
diff --git a/arch/x86/kvm/vmx/nested.h b/arch/x86/kvm/vmx/nested.h
index 96952263b029..b4b9d51438c6 100644
--- a/arch/x86/kvm/vmx/nested.h
+++ b/arch/x86/kvm/vmx/nested.h
@@ -168,7 +168,7 @@ static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12)
{
- return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
+ return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_XSAVES);
}
static inline bool nested_cpu_has_pml(struct vmcs12 *vmcs12)
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 80c769c58a87..f2efa0bf7ae8 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -22,23 +22,51 @@
#define MSR_PMC_FULL_WIDTH_BIT (MSR_IA32_PMC0 - MSR_IA32_PERFCTR0)
+enum intel_pmu_architectural_events {
+ /*
+ * The order of the architectural events matters as support for each
+ * event is enumerated via CPUID using the index of the event.
+ */
+ INTEL_ARCH_CPU_CYCLES,
+ INTEL_ARCH_INSTRUCTIONS_RETIRED,
+ INTEL_ARCH_REFERENCE_CYCLES,
+ INTEL_ARCH_LLC_REFERENCES,
+ INTEL_ARCH_LLC_MISSES,
+ INTEL_ARCH_BRANCHES_RETIRED,
+ INTEL_ARCH_BRANCHES_MISPREDICTED,
+
+ NR_REAL_INTEL_ARCH_EVENTS,
+
+ /*
+ * Pseudo-architectural event used to implement IA32_FIXED_CTR2, a.k.a.
+ * TSC reference cycles. The architectural reference cycles event may
+ * or may not actually use the TSC as the reference, e.g. might use the
+ * core crystal clock or the bus clock (yeah, "architectural").
+ */
+ PSEUDO_ARCH_REFERENCE_CYCLES = NR_REAL_INTEL_ARCH_EVENTS,
+ NR_INTEL_ARCH_EVENTS,
+};
+
static struct {
u8 eventsel;
u8 unit_mask;
} const intel_arch_events[] = {
- [0] = { 0x3c, 0x00 },
- [1] = { 0xc0, 0x00 },
- [2] = { 0x3c, 0x01 },
- [3] = { 0x2e, 0x4f },
- [4] = { 0x2e, 0x41 },
- [5] = { 0xc4, 0x00 },
- [6] = { 0xc5, 0x00 },
- /* The above index must match CPUID 0x0A.EBX bit vector */
- [7] = { 0x00, 0x03 },
+ [INTEL_ARCH_CPU_CYCLES] = { 0x3c, 0x00 },
+ [INTEL_ARCH_INSTRUCTIONS_RETIRED] = { 0xc0, 0x00 },
+ [INTEL_ARCH_REFERENCE_CYCLES] = { 0x3c, 0x01 },
+ [INTEL_ARCH_LLC_REFERENCES] = { 0x2e, 0x4f },
+ [INTEL_ARCH_LLC_MISSES] = { 0x2e, 0x41 },
+ [INTEL_ARCH_BRANCHES_RETIRED] = { 0xc4, 0x00 },
+ [INTEL_ARCH_BRANCHES_MISPREDICTED] = { 0xc5, 0x00 },
+ [PSEUDO_ARCH_REFERENCE_CYCLES] = { 0x00, 0x03 },
};
/* mapping between fixed pmc index and intel_arch_events array */
-static int fixed_pmc_events[] = {1, 0, 7};
+static int fixed_pmc_events[] = {
+ [0] = INTEL_ARCH_INSTRUCTIONS_RETIRED,
+ [1] = INTEL_ARCH_CPU_CYCLES,
+ [2] = PSEUDO_ARCH_REFERENCE_CYCLES,
+};
static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
{
@@ -80,16 +108,18 @@ static bool intel_hw_event_available(struct kvm_pmc *pmc)
u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
int i;
- for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++) {
+ BUILD_BUG_ON(ARRAY_SIZE(intel_arch_events) != NR_INTEL_ARCH_EVENTS);
+
+ /*
+ * Disallow events reported as unavailable in guest CPUID. Note, this
+ * doesn't apply to pseudo-architectural events.
+ */
+ for (i = 0; i < NR_REAL_INTEL_ARCH_EVENTS; i++) {
if (intel_arch_events[i].eventsel != event_select ||
intel_arch_events[i].unit_mask != unit_mask)
continue;
- /* disable event that reported as not present by cpuid */
- if ((i < 7) && !(pmu->available_event_types & (1 << i)))
- return false;
-
- break;
+ return pmu->available_event_types & BIT(i);
}
return true;
@@ -438,16 +468,17 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
static void setup_fixed_pmc_eventsel(struct kvm_pmu *pmu)
{
- size_t size = ARRAY_SIZE(fixed_pmc_events);
- struct kvm_pmc *pmc;
- u32 event;
int i;
+ BUILD_BUG_ON(ARRAY_SIZE(fixed_pmc_events) != KVM_PMC_MAX_FIXED);
+
for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
- pmc = &pmu->fixed_counters[i];
- event = fixed_pmc_events[array_index_nospec(i, size)];
+ int index = array_index_nospec(i, KVM_PMC_MAX_FIXED);
+ struct kvm_pmc *pmc = &pmu->fixed_counters[index];
+ u32 event = fixed_pmc_events[index];
+
pmc->eventsel = (intel_arch_events[event].unit_mask << 8) |
- intel_arch_events[event].eventsel;
+ intel_arch_events[event].eventsel;
}
}
@@ -508,10 +539,8 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
if (pmu->version == 1) {
pmu->nr_arch_fixed_counters = 0;
} else {
- pmu->nr_arch_fixed_counters =
- min3(ARRAY_SIZE(fixed_pmc_events),
- (size_t) edx.split.num_counters_fixed,
- (size_t)kvm_pmu_cap.num_counters_fixed);
+ pmu->nr_arch_fixed_counters = min_t(int, edx.split.num_counters_fixed,
+ kvm_pmu_cap.num_counters_fixed);
edx.split.bit_width_fixed = min_t(int, edx.split.bit_width_fixed,
kvm_pmu_cap.bit_width_fixed);
pmu->counter_bitmask[KVM_PMC_FIXED] =
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index b483a8baaacf..72e3943f3693 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -41,13 +41,12 @@
#include <asm/idtentry.h>
#include <asm/io.h>
#include <asm/irq_remapping.h>
-#include <asm/kexec.h>
+#include <asm/reboot.h>
#include <asm/perf_event.h>
#include <asm/mmu_context.h>
#include <asm/mshyperv.h>
#include <asm/mwait.h>
#include <asm/spec-ctrl.h>
-#include <asm/virtext.h>
#include <asm/vmx.h>
#include "capabilities.h"
@@ -237,9 +236,6 @@ static const struct {
#define L1D_CACHE_ORDER 4
static void *vmx_l1d_flush_pages;
-/* Control for disabling CPU Fill buffer clear */
-static bool __read_mostly vmx_fb_clear_ctrl_available;
-
static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
{
struct page *page;
@@ -255,14 +251,9 @@ static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
return 0;
}
- if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) {
- u64 msr;
-
- rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr);
- if (msr & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) {
- l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED;
- return 0;
- }
+ if (host_arch_capabilities & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) {
+ l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED;
+ return 0;
}
/* If set to auto use the default l1tf mitigation method */
@@ -366,22 +357,9 @@ static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp)
{
if (WARN_ON_ONCE(l1tf_vmx_mitigation >= ARRAY_SIZE(vmentry_l1d_param)))
- return sprintf(s, "???\n");
+ return sysfs_emit(s, "???\n");
- return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option);
-}
-
-static void vmx_setup_fb_clear_ctrl(void)
-{
- u64 msr;
-
- if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES) &&
- !boot_cpu_has_bug(X86_BUG_MDS) &&
- !boot_cpu_has_bug(X86_BUG_TAA)) {
- rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr);
- if (msr & ARCH_CAP_FB_CLEAR_CTRL)
- vmx_fb_clear_ctrl_available = true;
- }
+ return sysfs_emit(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option);
}
static __always_inline void vmx_disable_fb_clear(struct vcpu_vmx *vmx)
@@ -409,7 +387,9 @@ static __always_inline void vmx_enable_fb_clear(struct vcpu_vmx *vmx)
static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
{
- vmx->disable_fb_clear = vmx_fb_clear_ctrl_available;
+ vmx->disable_fb_clear = (host_arch_capabilities & ARCH_CAP_FB_CLEAR_CTRL) &&
+ !boot_cpu_has_bug(X86_BUG_MDS) &&
+ !boot_cpu_has_bug(X86_BUG_TAA);
/*
* If guest will not execute VERW, there is no need to set FB_CLEAR_DIS
@@ -754,17 +734,51 @@ static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx,
return ret;
}
-#ifdef CONFIG_KEXEC_CORE
-static void crash_vmclear_local_loaded_vmcss(void)
+/*
+ * Disable VMX and clear CR4.VMXE (even if VMXOFF faults)
+ *
+ * Note, VMXOFF causes a #UD if the CPU is !post-VMXON, but it's impossible to
+ * atomically track post-VMXON state, e.g. this may be called in NMI context.
+ * Eat all faults as all other faults on VMXOFF faults are mode related, i.e.
+ * faults are guaranteed to be due to the !post-VMXON check unless the CPU is
+ * magically in RM, VM86, compat mode, or at CPL>0.
+ */
+static int kvm_cpu_vmxoff(void)
+{
+ asm_volatile_goto("1: vmxoff\n\t"
+ _ASM_EXTABLE(1b, %l[fault])
+ ::: "cc", "memory" : fault);
+
+ cr4_clear_bits(X86_CR4_VMXE);
+ return 0;
+
+fault:
+ cr4_clear_bits(X86_CR4_VMXE);
+ return -EIO;
+}
+
+static void vmx_emergency_disable(void)
{
int cpu = raw_smp_processor_id();
struct loaded_vmcs *v;
+ kvm_rebooting = true;
+
+ /*
+ * Note, CR4.VMXE can be _cleared_ in NMI context, but it can only be
+ * set in task context. If this races with VMX is disabled by an NMI,
+ * VMCLEAR and VMXOFF may #UD, but KVM will eat those faults due to
+ * kvm_rebooting set.
+ */
+ if (!(__read_cr4() & X86_CR4_VMXE))
+ return;
+
list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
loaded_vmcss_on_cpu_link)
vmcs_clear(v->vmcs);
+
+ kvm_cpu_vmxoff();
}
-#endif /* CONFIG_KEXEC_CORE */
static void __loaded_vmcs_clear(void *arg)
{
@@ -1899,25 +1913,14 @@ u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu)
return kvm_caps.default_tsc_scaling_ratio;
}
-static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu)
{
- vmcs_write64(TSC_OFFSET, offset);
+ vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
}
-static void vmx_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier)
+static void vmx_write_tsc_multiplier(struct kvm_vcpu *vcpu)
{
- vmcs_write64(TSC_MULTIPLIER, multiplier);
-}
-
-/*
- * nested_vmx_allowed() checks whether a guest should be allowed to use VMX
- * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for
- * all guests if the "nested" module option is off, and can also be disabled
- * for a single guest by disabling its VMX cpuid bit.
- */
-bool nested_vmx_allowed(struct kvm_vcpu *vcpu)
-{
- return nested && guest_cpuid_has(vcpu, X86_FEATURE_VMX);
+ vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio);
}
/*
@@ -2047,7 +2050,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
[msr_info->index - MSR_IA32_SGXLEPUBKEYHASH0];
break;
case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR:
- if (!nested_vmx_allowed(vcpu))
+ if (!guest_can_use(vcpu, X86_FEATURE_VMX))
return 1;
if (vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index,
&msr_info->data))
@@ -2355,7 +2358,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR:
if (!msr_info->host_initiated)
return 1; /* they are read-only */
- if (!nested_vmx_allowed(vcpu))
+ if (!guest_can_use(vcpu, X86_FEATURE_VMX))
return 1;
return vmx_set_vmx_msr(vcpu, msr_index, data);
case MSR_IA32_RTIT_CTL:
@@ -2729,11 +2732,11 @@ static int setup_vmcs_config(struct vmcs_config *vmcs_conf,
return 0;
}
-static bool kvm_is_vmx_supported(void)
+static bool __kvm_is_vmx_supported(void)
{
- int cpu = raw_smp_processor_id();
+ int cpu = smp_processor_id();
- if (!cpu_has_vmx()) {
+ if (!(cpuid_ecx(1) & feature_bit(VMX))) {
pr_err("VMX not supported by CPU %d\n", cpu);
return false;
}
@@ -2747,13 +2750,24 @@ static bool kvm_is_vmx_supported(void)
return true;
}
+static bool kvm_is_vmx_supported(void)
+{
+ bool supported;
+
+ migrate_disable();
+ supported = __kvm_is_vmx_supported();
+ migrate_enable();
+
+ return supported;
+}
+
static int vmx_check_processor_compat(void)
{
int cpu = raw_smp_processor_id();
struct vmcs_config vmcs_conf;
struct vmx_capability vmx_cap;
- if (!kvm_is_vmx_supported())
+ if (!__kvm_is_vmx_supported())
return -EIO;
if (setup_vmcs_config(&vmcs_conf, &vmx_cap) < 0) {
@@ -2833,7 +2847,7 @@ static void vmx_hardware_disable(void)
{
vmclear_local_loaded_vmcss();
- if (cpu_vmxoff())
+ if (kvm_cpu_vmxoff())
kvm_spurious_fault();
hv_reset_evmcs();
@@ -3071,13 +3085,6 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
vmx->rmode.vm86_active = 1;
- /*
- * Very old userspace does not call KVM_SET_TSS_ADDR before entering
- * vcpu. Warn the user that an update is overdue.
- */
- if (!kvm_vmx->tss_addr)
- pr_warn_once("KVM_SET_TSS_ADDR needs to be called before running vCPU\n");
-
vmx_segment_cache_clear(vmx);
vmcs_writel(GUEST_TR_BASE, kvm_vmx->tss_addr);
@@ -3350,7 +3357,7 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
vmx->emulation_required = vmx_emulation_required(vcpu);
}
-static int vmx_get_max_tdp_level(void)
+static int vmx_get_max_ept_level(void)
{
if (cpu_has_vmx_ept_5levels())
return 5;
@@ -4553,16 +4560,19 @@ vmx_adjust_secondary_exec_control(struct vcpu_vmx *vmx, u32 *exec_control,
* based on a single guest CPUID bit, with a dedicated feature bit. This also
* verifies that the control is actually supported by KVM and hardware.
*/
-#define vmx_adjust_sec_exec_control(vmx, exec_control, name, feat_name, ctrl_name, exiting) \
-({ \
- bool __enabled; \
- \
- if (cpu_has_vmx_##name()) { \
- __enabled = guest_cpuid_has(&(vmx)->vcpu, \
- X86_FEATURE_##feat_name); \
- vmx_adjust_secondary_exec_control(vmx, exec_control, \
- SECONDARY_EXEC_##ctrl_name, __enabled, exiting); \
- } \
+#define vmx_adjust_sec_exec_control(vmx, exec_control, name, feat_name, ctrl_name, exiting) \
+({ \
+ struct kvm_vcpu *__vcpu = &(vmx)->vcpu; \
+ bool __enabled; \
+ \
+ if (cpu_has_vmx_##name()) { \
+ if (kvm_is_governed_feature(X86_FEATURE_##feat_name)) \
+ __enabled = guest_can_use(__vcpu, X86_FEATURE_##feat_name); \
+ else \
+ __enabled = guest_cpuid_has(__vcpu, X86_FEATURE_##feat_name); \
+ vmx_adjust_secondary_exec_control(vmx, exec_control, SECONDARY_EXEC_##ctrl_name,\
+ __enabled, exiting); \
+ } \
})
/* More macro magic for ENABLE_/opt-in versus _EXITING/opt-out controls. */
@@ -4622,19 +4632,7 @@ static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
if (!enable_pml || !atomic_read(&vcpu->kvm->nr_memslots_dirty_logging))
exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
- if (cpu_has_vmx_xsaves()) {
- /* Exposing XSAVES only when XSAVE is exposed */
- bool xsaves_enabled =
- boot_cpu_has(X86_FEATURE_XSAVE) &&
- guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
- guest_cpuid_has(vcpu, X86_FEATURE_XSAVES);
-
- vcpu->arch.xsaves_enabled = xsaves_enabled;
-
- vmx_adjust_secondary_exec_control(vmx, &exec_control,
- SECONDARY_EXEC_XSAVES,
- xsaves_enabled, false);
- }
+ vmx_adjust_sec_exec_feature(vmx, &exec_control, xsaves, XSAVES);
/*
* RDPID is also gated by ENABLE_RDTSCP, turn on the control if either
@@ -4653,6 +4651,7 @@ static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
SECONDARY_EXEC_ENABLE_RDTSCP,
rdpid_or_rdtscp_enabled, false);
}
+
vmx_adjust_sec_exec_feature(vmx, &exec_control, invpcid, INVPCID);
vmx_adjust_sec_exec_exiting(vmx, &exec_control, rdrand, RDRAND);
@@ -6796,8 +6795,10 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu)
vmcs_write64(APIC_ACCESS_ADDR, pfn_to_hpa(pfn));
read_unlock(&vcpu->kvm->mmu_lock);
- vmx_flush_tlb_current(vcpu);
-
+ /*
+ * No need for a manual TLB flush at this point, KVM has already done a
+ * flush if there were SPTEs pointing at the previous page.
+ */
out:
/*
* Do not pin apic access page in memory, the MMU notifier
@@ -7243,13 +7244,20 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
flags);
vcpu->arch.cr2 = native_read_cr2();
+ vcpu->arch.regs_avail &= ~VMX_REGS_LAZY_LOAD_SET;
+
+ vmx->idt_vectoring_info = 0;
vmx_enable_fb_clear(vmx);
- if (unlikely(vmx->fail))
+ if (unlikely(vmx->fail)) {
vmx->exit_reason.full = 0xdead;
- else
- vmx->exit_reason.full = vmcs_read32(VM_EXIT_REASON);
+ goto out;
+ }
+
+ vmx->exit_reason.full = vmcs_read32(VM_EXIT_REASON);
+ if (likely(!vmx->exit_reason.failed_vmentry))
+ vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
if ((u16)vmx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI &&
is_nmi(vmx_get_intr_info(vcpu))) {
@@ -7258,6 +7266,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
kvm_after_interrupt(vcpu);
}
+out:
guest_state_exit_irqoff();
}
@@ -7379,8 +7388,6 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
loadsegment(es, __USER_DS);
#endif
- vcpu->arch.regs_avail &= ~VMX_REGS_LAZY_LOAD_SET;
-
pt_guest_exit(vmx);
kvm_load_host_xsave_state(vcpu);
@@ -7397,17 +7404,12 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
vmx->nested.nested_run_pending = 0;
}
- vmx->idt_vectoring_info = 0;
-
if (unlikely(vmx->fail))
return EXIT_FASTPATH_NONE;
if (unlikely((u16)vmx->exit_reason.basic == EXIT_REASON_MCE_DURING_VMENTRY))
kvm_machine_check();
- if (likely(!vmx->exit_reason.failed_vmentry))
- vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
-
trace_kvm_exit(vcpu, KVM_ISA_VMX);
if (unlikely(vmx->exit_reason.failed_vmentry))
@@ -7751,8 +7753,16 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- /* xsaves_enabled is recomputed in vmx_compute_secondary_exec_control(). */
- vcpu->arch.xsaves_enabled = false;
+ /*
+ * XSAVES is effectively enabled if and only if XSAVE is also exposed
+ * to the guest. XSAVES depends on CR4.OSXSAVE, and CR4.OSXSAVE can be
+ * set if and only if XSAVE is supported.
+ */
+ if (boot_cpu_has(X86_FEATURE_XSAVE) &&
+ guest_cpuid_has(vcpu, X86_FEATURE_XSAVE))
+ kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_XSAVES);
+
+ kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_VMX);
vmx_setup_uret_msrs(vmx);
@@ -7760,7 +7770,7 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
vmcs_set_secondary_exec_control(vmx,
vmx_secondary_exec_control(vmx));
- if (nested_vmx_allowed(vcpu))
+ if (guest_can_use(vcpu, X86_FEATURE_VMX))
vmx->msr_ia32_feature_control_valid_bits |=
FEAT_CTL_VMX_ENABLED_INSIDE_SMX |
FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX;
@@ -7769,7 +7779,7 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
~(FEAT_CTL_VMX_ENABLED_INSIDE_SMX |
FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX);
- if (nested_vmx_allowed(vcpu))
+ if (guest_can_use(vcpu, X86_FEATURE_VMX))
nested_vmx_cr_fixed1_bits_update(vcpu);
if (boot_cpu_has(X86_FEATURE_INTEL_PT) &&
@@ -8526,7 +8536,7 @@ static __init int hardware_setup(void)
*/
vmx_setup_me_spte_mask();
- kvm_configure_mmu(enable_ept, 0, vmx_get_max_tdp_level(),
+ kvm_configure_mmu(enable_ept, 0, vmx_get_max_ept_level(),
ept_caps_to_lpage_level(vmx_capability.ept));
/*
@@ -8622,10 +8632,8 @@ static void __vmx_exit(void)
{
allow_smaller_maxphyaddr = false;
-#ifdef CONFIG_KEXEC_CORE
- RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
- synchronize_rcu();
-#endif
+ cpu_emergency_unregister_virt_callback(vmx_emergency_disable);
+
vmx_cleanup_l1d_flush();
}
@@ -8666,18 +8674,14 @@ static int __init vmx_init(void)
if (r)
goto err_l1d_flush;
- vmx_setup_fb_clear_ctrl();
-
for_each_possible_cpu(cpu) {
INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
pi_init_cpu(cpu);
}
-#ifdef CONFIG_KEXEC_CORE
- rcu_assign_pointer(crash_vmclear_loaded_vmcss,
- crash_vmclear_local_loaded_vmcss);
-#endif
+ cpu_emergency_register_virt_callback(vmx_emergency_disable);
+
vmx_check_vmcs12_offsets();
/*
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 32384ba38499..c2130d2c8e24 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -374,7 +374,6 @@ struct kvm_vmx {
u64 *pid_table;
};
-bool nested_vmx_allowed(struct kvm_vcpu *vcpu);
void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
struct loaded_vmcs *buddy);
int allocate_vpid(void);
@@ -562,7 +561,7 @@ static inline u8 vmx_get_rvi(void)
SECONDARY_EXEC_APIC_REGISTER_VIRT | \
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | \
SECONDARY_EXEC_SHADOW_VMCS | \
- SECONDARY_EXEC_XSAVES | \
+ SECONDARY_EXEC_ENABLE_XSAVES | \
SECONDARY_EXEC_RDSEED_EXITING | \
SECONDARY_EXEC_RDRAND_EXITING | \
SECONDARY_EXEC_ENABLE_PML | \
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c381770bcbf1..6c9c81e82e65 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -25,6 +25,7 @@
#include "tss.h"
#include "kvm_cache_regs.h"
#include "kvm_emulate.h"
+#include "mmu/page_track.h"
#include "x86.h"
#include "cpuid.h"
#include "pmu.h"
@@ -237,6 +238,9 @@ EXPORT_SYMBOL_GPL(enable_apicv);
u64 __read_mostly host_xss;
EXPORT_SYMBOL_GPL(host_xss);
+u64 __read_mostly host_arch_capabilities;
+EXPORT_SYMBOL_GPL(host_arch_capabilities);
+
const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
KVM_GENERIC_VM_STATS(),
STATS_DESC_COUNTER(VM, mmu_shadow_zapped),
@@ -1021,7 +1025,7 @@ void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu)
if (vcpu->arch.xcr0 != host_xcr0)
xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
- if (vcpu->arch.xsaves_enabled &&
+ if (guest_can_use(vcpu, X86_FEATURE_XSAVES) &&
vcpu->arch.ia32_xss != host_xss)
wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss);
}
@@ -1052,7 +1056,7 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
if (vcpu->arch.xcr0 != host_xcr0)
xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
- if (vcpu->arch.xsaves_enabled &&
+ if (guest_can_use(vcpu, X86_FEATURE_XSAVES) &&
vcpu->arch.ia32_xss != host_xss)
wrmsrl(MSR_IA32_XSS, host_xss);
}
@@ -1620,12 +1624,7 @@ static bool kvm_is_immutable_feature_msr(u32 msr)
static u64 kvm_get_arch_capabilities(void)
{
- u64 data = 0;
-
- if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) {
- rdmsrl(MSR_IA32_ARCH_CAPABILITIES, data);
- data &= KVM_SUPPORTED_ARCH_CAP;
- }
+ u64 data = host_arch_capabilities & KVM_SUPPORTED_ARCH_CAP;
/*
* If nx_huge_pages is enabled, KVM's shadow paging will ensure that
@@ -2631,7 +2630,7 @@ static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 l1_offset)
else
vcpu->arch.tsc_offset = l1_offset;
- static_call(kvm_x86_write_tsc_offset)(vcpu, vcpu->arch.tsc_offset);
+ static_call(kvm_x86_write_tsc_offset)(vcpu);
}
static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier)
@@ -2647,8 +2646,7 @@ static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multipli
vcpu->arch.tsc_scaling_ratio = l1_multiplier;
if (kvm_caps.has_tsc_control)
- static_call(kvm_x86_write_tsc_multiplier)(
- vcpu, vcpu->arch.tsc_scaling_ratio);
+ static_call(kvm_x86_write_tsc_multiplier)(vcpu);
}
static inline bool kvm_check_tsc_unstable(void)
@@ -4665,7 +4663,6 @@ static int kvm_x86_dev_get_attr(struct kvm_device_attr *attr)
return 0;
default:
return -ENXIO;
- break;
}
}
@@ -6532,7 +6529,7 @@ static void kvm_free_msr_filter(struct kvm_x86_msr_filter *msr_filter)
static int kvm_add_msr_filter(struct kvm_x86_msr_filter *msr_filter,
struct kvm_msr_filter_range *user_range)
{
- unsigned long *bitmap = NULL;
+ unsigned long *bitmap;
size_t bitmap_size;
if (!user_range->nmsrs)
@@ -8245,11 +8242,6 @@ static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt,
return kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx, exact_only);
}
-static bool emulator_guest_has_long_mode(struct x86_emulate_ctxt *ctxt)
-{
- return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_LM);
-}
-
static bool emulator_guest_has_movbe(struct x86_emulate_ctxt *ctxt)
{
return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_MOVBE);
@@ -8351,7 +8343,6 @@ static const struct x86_emulate_ops emulate_ops = {
.fix_hypercall = emulator_fix_hypercall,
.intercept = emulator_intercept,
.get_cpuid = emulator_get_cpuid,
- .guest_has_long_mode = emulator_guest_has_long_mode,
.guest_has_movbe = emulator_guest_has_movbe,
.guest_has_fxsr = emulator_guest_has_fxsr,
.guest_has_rdpid = emulator_guest_has_rdpid,
@@ -9172,7 +9163,7 @@ static int kvmclock_cpu_down_prep(unsigned int cpu)
static void tsc_khz_changed(void *data)
{
struct cpufreq_freqs *freq = data;
- unsigned long khz = 0;
+ unsigned long khz;
WARN_ON_ONCE(boot_cpu_has(X86_FEATURE_CONSTANT_TSC));
@@ -9512,6 +9503,9 @@ static int __kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
kvm_init_pmu_capability(ops->pmu_ops);
+ if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
+ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, host_arch_capabilities);
+
r = ops->hardware_setup();
if (r != 0)
goto out_mmu_exit;
@@ -11111,12 +11105,17 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
r = -EINTR;
goto out;
}
+
/*
- * It should be impossible for the hypervisor timer to be in
- * use before KVM has ever run the vCPU.
+ * Don't bother switching APIC timer emulation from the
+ * hypervisor timer to the software timer, the only way for the
+ * APIC timer to be active is if userspace stuffed vCPU state,
+ * i.e. put the vCPU into a nonsensical state. Only an INIT
+ * will transition the vCPU out of UNINITIALIZED (without more
+ * state stuffing from userspace), which will reset the local
+ * APIC and thus cancel the timer or drop the IRQ (if the timer
+ * already expired).
*/
- WARN_ON_ONCE(kvm_lapic_hv_timer_in_use(vcpu));
-
kvm_vcpu_srcu_read_unlock(vcpu);
kvm_vcpu_block(vcpu);
kvm_vcpu_srcu_read_lock(vcpu);
@@ -11798,15 +11797,22 @@ static int sync_regs(struct kvm_vcpu *vcpu)
__set_regs(vcpu, &vcpu->run->s.regs.regs);
vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_REGS;
}
+
if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_SREGS) {
- if (__set_sregs(vcpu, &vcpu->run->s.regs.sregs))
+ struct kvm_sregs sregs = vcpu->run->s.regs.sregs;
+
+ if (__set_sregs(vcpu, &sregs))
return -EINVAL;
+
vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_SREGS;
}
+
if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_EVENTS) {
- if (kvm_vcpu_ioctl_x86_set_vcpu_events(
- vcpu, &vcpu->run->s.regs.events))
+ struct kvm_vcpu_events events = vcpu->run->s.regs.events;
+
+ if (kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events))
return -EINVAL;
+
vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_EVENTS;
}
@@ -12627,6 +12633,13 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
+ /*
+ * KVM doesn't support moving memslots when there are external page
+ * trackers attached to the VM, i.e. if KVMGT is in use.
+ */
+ if (change == KVM_MR_MOVE && kvm_page_track_has_external_user(kvm))
+ return -EINVAL;
+
if (change == KVM_MR_CREATE || change == KVM_MR_MOVE) {
if ((new->base_gfn + new->npages - 1) > kvm_mmu_max_gfn())
return -EINVAL;
@@ -12772,7 +12785,7 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
* See is_writable_pte() for more details (the case involving
* access-tracked SPTEs is particularly relevant).
*/
- kvm_arch_flush_remote_tlbs_memslot(kvm, new);
+ kvm_flush_remote_tlbs_memslot(kvm, new);
}
}
@@ -12781,6 +12794,9 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
+ if (change == KVM_MR_DELETE)
+ kvm_page_track_delete_slot(kvm, old);
+
if (!kvm->arch.n_requested_mmu_pages &&
(change == KVM_MR_CREATE || change == KVM_MR_DELETE)) {
unsigned long nr_mmu_pages;
@@ -12797,17 +12813,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
kvm_arch_free_memslot(kvm, old);
}
-void kvm_arch_flush_shadow_all(struct kvm *kvm)
-{
- kvm_mmu_zap_all(kvm);
-}
-
-void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
- struct kvm_memory_slot *slot)
-{
- kvm_page_track_flush_slot(kvm, slot);
-}
-
static inline bool kvm_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
{
return (is_guest_mode(vcpu) &&
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 82e3dafc5453..1e7be1f6ab29 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -323,6 +323,7 @@ fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu);
extern u64 host_xcr0;
extern u64 host_xss;
+extern u64 host_arch_capabilities;
extern struct kvm_caps kvm_caps;
diff --git a/arch/x86/um/Makefile b/arch/x86/um/Makefile
index ee89f6bb9242..8bc72a51b257 100644
--- a/arch/x86/um/Makefile
+++ b/arch/x86/um/Makefile
@@ -13,15 +13,16 @@ obj-y = bugs_$(BITS).o delay.o fault.o ldt.o \
ptrace_$(BITS).o ptrace_user.o setjmp_$(BITS).o signal.o \
stub_$(BITS).o stub_segv.o \
sys_call_table_$(BITS).o sysrq_$(BITS).o tls_$(BITS).o \
- mem_$(BITS).o subarch.o os-$(OS)/
+ mem_$(BITS).o subarch.o os-Linux/
ifeq ($(CONFIG_X86_32),y)
-obj-y += checksum_32.o syscalls_32.o
+obj-y += syscalls_32.o
obj-$(CONFIG_ELF_CORE) += elfcore.o
subarch-y = ../lib/string_32.o ../lib/atomic64_32.o ../lib/atomic64_cx8_32.o
subarch-y += ../lib/cmpxchg8b_emu.o ../lib/atomic64_386_32.o
+subarch-y += ../lib/checksum_32.o
subarch-y += ../kernel/sys_ia32.o
else
diff --git a/arch/x86/um/asm/mm_context.h b/arch/x86/um/asm/mm_context.h
index 4a73d63e4760..dc32dc023c2f 100644
--- a/arch/x86/um/asm/mm_context.h
+++ b/arch/x86/um/asm/mm_context.h
@@ -11,8 +11,6 @@
#include <linux/mutex.h>
#include <asm/ldt.h>
-extern void ldt_host_info(void);
-
#define LDT_PAGES_MAX \
((LDT_ENTRIES * LDT_ENTRY_SIZE)/PAGE_SIZE)
#define LDT_ENTRIES_PER_PAGE \
diff --git a/arch/x86/um/checksum_32.S b/arch/x86/um/checksum_32.S
deleted file mode 100644
index aed782ab7721..000000000000
--- a/arch/x86/um/checksum_32.S
+++ /dev/null
@@ -1,214 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * INET An implementation of the TCP/IP protocol suite for the LINUX
- * operating system. INET is implemented using the BSD Socket
- * interface as the means of communication with the user level.
- *
- * IP/TCP/UDP checksumming routines
- *
- * Authors: Jorge Cwik, <jorge@laser.satlink.net>
- * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
- * Tom May, <ftom@netcom.com>
- * Pentium Pro/II routines:
- * Alexander Kjeldaas <astor@guardian.no>
- * Finn Arne Gangstad <finnag@guardian.no>
- * Lots of code moved from tcp.c and ip.c; see those files
- * for more names.
- *
- * Changes: Ingo Molnar, converted csum_partial_copy() to 2.1 exception
- * handling.
- * Andi Kleen, add zeroing on error
- * converted to pure assembler
- */
-
-#include <asm/errno.h>
-#include <asm/asm.h>
-#include <asm/export.h>
-
-/*
- * computes a partial checksum, e.g. for TCP/UDP fragments
- */
-
-/*
-unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
- */
-
-.text
-.align 4
-.globl csum_partial
-
-#ifndef CONFIG_X86_USE_PPRO_CHECKSUM
-
- /*
- * Experiments with Ethernet and SLIP connections show that buff
- * is aligned on either a 2-byte or 4-byte boundary. We get at
- * least a twofold speedup on 486 and Pentium if it is 4-byte aligned.
- * Fortunately, it is easy to convert 2-byte alignment to 4-byte
- * alignment for the unrolled loop.
- */
-csum_partial:
- pushl %esi
- pushl %ebx
- movl 20(%esp),%eax # Function arg: unsigned int sum
- movl 16(%esp),%ecx # Function arg: int len
- movl 12(%esp),%esi # Function arg: unsigned char *buff
- testl $2, %esi # Check alignment.
- jz 2f # Jump if alignment is ok.
- subl $2, %ecx # Alignment uses up two bytes.
- jae 1f # Jump if we had at least two bytes.
- addl $2, %ecx # ecx was < 2. Deal with it.
- jmp 4f
-1: movw (%esi), %bx
- addl $2, %esi
- addw %bx, %ax
- adcl $0, %eax
-2:
- movl %ecx, %edx
- shrl $5, %ecx
- jz 2f
- testl %esi, %esi
-1: movl (%esi), %ebx
- adcl %ebx, %eax
- movl 4(%esi), %ebx
- adcl %ebx, %eax
- movl 8(%esi), %ebx
- adcl %ebx, %eax
- movl 12(%esi), %ebx
- adcl %ebx, %eax
- movl 16(%esi), %ebx
- adcl %ebx, %eax
- movl 20(%esi), %ebx
- adcl %ebx, %eax
- movl 24(%esi), %ebx
- adcl %ebx, %eax
- movl 28(%esi), %ebx
- adcl %ebx, %eax
- lea 32(%esi), %esi
- dec %ecx
- jne 1b
- adcl $0, %eax
-2: movl %edx, %ecx
- andl $0x1c, %edx
- je 4f
- shrl $2, %edx # This clears CF
-3: adcl (%esi), %eax
- lea 4(%esi), %esi
- dec %edx
- jne 3b
- adcl $0, %eax
-4: andl $3, %ecx
- jz 7f
- cmpl $2, %ecx
- jb 5f
- movw (%esi),%cx
- leal 2(%esi),%esi
- je 6f
- shll $16,%ecx
-5: movb (%esi),%cl
-6: addl %ecx,%eax
- adcl $0, %eax
-7:
- popl %ebx
- popl %esi
- RET
-
-#else
-
-/* Version for PentiumII/PPro */
-
-csum_partial:
- pushl %esi
- pushl %ebx
- movl 20(%esp),%eax # Function arg: unsigned int sum
- movl 16(%esp),%ecx # Function arg: int len
- movl 12(%esp),%esi # Function arg: const unsigned char *buf
-
- testl $2, %esi
- jnz 30f
-10:
- movl %ecx, %edx
- movl %ecx, %ebx
- andl $0x7c, %ebx
- shrl $7, %ecx
- addl %ebx,%esi
- shrl $2, %ebx
- negl %ebx
- lea 45f(%ebx,%ebx,2), %ebx
- testl %esi, %esi
- jmp *%ebx
-
- # Handle 2-byte-aligned regions
-20: addw (%esi), %ax
- lea 2(%esi), %esi
- adcl $0, %eax
- jmp 10b
-
-30: subl $2, %ecx
- ja 20b
- je 32f
- movzbl (%esi),%ebx # csumming 1 byte, 2-aligned
- addl %ebx, %eax
- adcl $0, %eax
- jmp 80f
-32:
- addw (%esi), %ax # csumming 2 bytes, 2-aligned
- adcl $0, %eax
- jmp 80f
-
-40:
- addl -128(%esi), %eax
- adcl -124(%esi), %eax
- adcl -120(%esi), %eax
- adcl -116(%esi), %eax
- adcl -112(%esi), %eax
- adcl -108(%esi), %eax
- adcl -104(%esi), %eax
- adcl -100(%esi), %eax
- adcl -96(%esi), %eax
- adcl -92(%esi), %eax
- adcl -88(%esi), %eax
- adcl -84(%esi), %eax
- adcl -80(%esi), %eax
- adcl -76(%esi), %eax
- adcl -72(%esi), %eax
- adcl -68(%esi), %eax
- adcl -64(%esi), %eax
- adcl -60(%esi), %eax
- adcl -56(%esi), %eax
- adcl -52(%esi), %eax
- adcl -48(%esi), %eax
- adcl -44(%esi), %eax
- adcl -40(%esi), %eax
- adcl -36(%esi), %eax
- adcl -32(%esi), %eax
- adcl -28(%esi), %eax
- adcl -24(%esi), %eax
- adcl -20(%esi), %eax
- adcl -16(%esi), %eax
- adcl -12(%esi), %eax
- adcl -8(%esi), %eax
- adcl -4(%esi), %eax
-45:
- lea 128(%esi), %esi
- adcl $0, %eax
- dec %ecx
- jge 40b
- movl %edx, %ecx
-50: andl $3, %ecx
- jz 80f
-
- # Handle the last 1-3 bytes without jumping
- notl %ecx # 1->2, 2->1, 3->0, higher bits are masked
- movl $0xffffff,%ebx # by the shll and shrl instructions
- shll $3,%ecx
- shrl %cl,%ebx
- andl -128(%esi),%ebx # esi is 4-aligned so should be ok
- addl %ebx,%eax
- adcl $0,%eax
-80:
- popl %ebx
- popl %esi
- RET
-
-#endif
- EXPORT_SYMBOL(csum_partial)
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index a5488cc40f58..7d792077e5fd 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -71,6 +71,9 @@ config ARCH_HAS_ILOG2_U32
config ARCH_HAS_ILOG2_U64
def_bool n
+config ARCH_MTD_XIP
+ def_bool y
+
config NO_IOPORT_MAP
def_bool n
diff --git a/arch/xtensa/include/asm/core.h b/arch/xtensa/include/asm/core.h
index 0e1bb6f019d6..3f5ffae89b58 100644
--- a/arch/xtensa/include/asm/core.h
+++ b/arch/xtensa/include/asm/core.h
@@ -52,4 +52,13 @@
#define XTENSA_STACK_ALIGNMENT 16
#endif
+#ifndef XCHAL_HW_MIN_VERSION
+#if defined(XCHAL_HW_MIN_VERSION_MAJOR) && defined(XCHAL_HW_MIN_VERSION_MINOR)
+#define XCHAL_HW_MIN_VERSION (XCHAL_HW_MIN_VERSION_MAJOR * 100 + \
+ XCHAL_HW_MIN_VERSION_MINOR)
+#else
+#define XCHAL_HW_MIN_VERSION 0
+#endif
+#endif
+
#endif
diff --git a/arch/xtensa/include/asm/mtd-xip.h b/arch/xtensa/include/asm/mtd-xip.h
new file mode 100644
index 000000000000..514325155cf8
--- /dev/null
+++ b/arch/xtensa/include/asm/mtd-xip.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ASM_MTD_XIP_H
+#define _ASM_MTD_XIP_H
+
+#include <asm/processor.h>
+
+#define xip_irqpending() (xtensa_get_sr(interrupt) & xtensa_get_sr(intenable))
+#define xip_currtime() (xtensa_get_sr(ccount))
+#define xip_elapsed_since(x) ((xtensa_get_sr(ccount) - (x)) / 1000) /* should work up to 1GHz */
+#define xip_cpu_idle() do { asm volatile ("waiti 0"); } while (0)
+
+#endif /* _ASM_MTD_XIP_H */
+
diff --git a/arch/xtensa/include/asm/sections.h b/arch/xtensa/include/asm/sections.h
index 3bc6b9afa993..e5da6d7092be 100644
--- a/arch/xtensa/include/asm/sections.h
+++ b/arch/xtensa/include/asm/sections.h
@@ -34,6 +34,10 @@ extern char _SecondaryResetVector_text_start[];
extern char _SecondaryResetVector_text_end[];
#endif
#ifdef CONFIG_XIP_KERNEL
+#ifdef CONFIG_VECTORS_ADDR
+extern char _xip_text_start[];
+extern char _xip_text_end[];
+#endif
extern char _xip_start[];
extern char _xip_end[];
#endif
diff --git a/arch/xtensa/kernel/perf_event.c b/arch/xtensa/kernel/perf_event.c
index a0d05c8598d0..183618090d05 100644
--- a/arch/xtensa/kernel/perf_event.c
+++ b/arch/xtensa/kernel/perf_event.c
@@ -13,17 +13,26 @@
#include <linux/perf_event.h>
#include <linux/platform_device.h>
+#include <asm/core.h>
#include <asm/processor.h>
#include <asm/stacktrace.h>
+#define XTENSA_HWVERSION_RG_2015_0 260000
+
+#if XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RG_2015_0
+#define XTENSA_PMU_ERI_BASE 0x00101000
+#else
+#define XTENSA_PMU_ERI_BASE 0x00001000
+#endif
+
/* Global control/status for all perf counters */
-#define XTENSA_PMU_PMG 0x1000
+#define XTENSA_PMU_PMG XTENSA_PMU_ERI_BASE
/* Perf counter values */
-#define XTENSA_PMU_PM(i) (0x1080 + (i) * 4)
+#define XTENSA_PMU_PM(i) (XTENSA_PMU_ERI_BASE + 0x80 + (i) * 4)
/* Perf counter control registers */
-#define XTENSA_PMU_PMCTRL(i) (0x1100 + (i) * 4)
+#define XTENSA_PMU_PMCTRL(i) (XTENSA_PMU_ERI_BASE + 0x100 + (i) * 4)
/* Perf counter status registers */
-#define XTENSA_PMU_PMSTAT(i) (0x1180 + (i) * 4)
+#define XTENSA_PMU_PMSTAT(i) (XTENSA_PMU_ERI_BASE + 0x180 + (i) * 4)
#define XTENSA_PMU_PMG_PMEN 0x1
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index aba3ff4e60d8..52d6e4870a04 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -311,6 +311,9 @@ void __init setup_arch(char **cmdline_p)
mem_reserve(__pa(_stext), __pa(_end));
#ifdef CONFIG_XIP_KERNEL
+#ifdef CONFIG_VECTORS_ADDR
+ mem_reserve(__pa(_xip_text_start), __pa(_xip_text_end));
+#endif
mem_reserve(__pa(_xip_start), __pa(_xip_end));
#endif
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index c14fd96f459d..f47e9bbbd291 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -118,6 +118,7 @@ SECTIONS
SECTION_VECTOR2 (.DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR)
*(.exception.text)
+ *(.xiptext)
#endif
IRQENTRY_TEXT
@@ -201,6 +202,9 @@ SECTIONS
.DebugInterruptVector.text);
RELOCATE_ENTRY(_exception_text,
.exception.text);
+#ifdef CONFIG_XIP_KERNEL
+ RELOCATE_ENTRY(_xip_text, .xiptext);
+#endif
#endif
#ifdef CONFIG_XIP_KERNEL
RELOCATE_ENTRY(_xip_data, .data);
@@ -319,7 +323,12 @@ SECTIONS
LAST)
#undef LAST
#define LAST .exception.text
-
+ SECTION_VECTOR4 (_xip_text,
+ .xiptext,
+ ,
+ LAST)
+#undef LAST
+#define LAST .xiptext
#endif
. = (LOADADDR(LAST) + SIZEOF(LAST) + 3) & ~ 3;
diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c
index 10b79d3c74e0..7d1f8b398a46 100644
--- a/arch/xtensa/platforms/iss/console.c
+++ b/arch/xtensa/platforms/iss/console.c
@@ -52,8 +52,7 @@ static void rs_close(struct tty_struct *tty, struct file * filp)
}
-static int rs_write(struct tty_struct * tty,
- const unsigned char *buf, int count)
+static ssize_t rs_write(struct tty_struct * tty, const u8 *buf, size_t count)
{
/* see drivers/char/serialX.c to reference original version */
@@ -82,32 +81,12 @@ static void rs_poll(struct timer_list *unused)
mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
}
-
-static int rs_put_char(struct tty_struct *tty, unsigned char ch)
-{
- return rs_write(tty, &ch, 1);
-}
-
-static void rs_flush_chars(struct tty_struct *tty)
-{
-}
-
static unsigned int rs_write_room(struct tty_struct *tty)
{
/* Let's say iss can always accept 2K characters.. */
return 2 * 1024;
}
-static void rs_hangup(struct tty_struct *tty)
-{
- /* Stub, once again.. */
-}
-
-static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
-{
- /* Stub, once again.. */
-}
-
static int rs_proc_show(struct seq_file *m, void *v)
{
seq_printf(m, "serinfo:1.0 driver:0.1\n");
@@ -118,11 +97,7 @@ static const struct tty_operations serial_ops = {
.open = rs_open,
.close = rs_close,
.write = rs_write,
- .put_char = rs_put_char,
- .flush_chars = rs_flush_chars,
.write_room = rs_write_room,
- .hangup = rs_hangup,
- .wait_until_sent = rs_wait_until_sent,
.proc_show = rs_proc_show,
};
diff --git a/block/blk-map.c b/block/blk-map.c
index 44d74a30ddac..8584babf3ea0 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -315,12 +315,11 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
n = bytes;
if (!bio_add_hw_page(rq->q, bio, page, n, offs,
- max_sectors, &same_page)) {
- if (same_page)
- bio_release_page(bio, page);
+ max_sectors, &same_page))
break;
- }
+ if (same_page)
+ bio_release_page(bio, page);
bytes -= n;
offs = 0;
}
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 7397ff199d66..38a881cf97d0 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -697,11 +697,41 @@ static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
return true;
}
+static unsigned int calculate_io_allowed(u32 iops_limit,
+ unsigned long jiffy_elapsed)
+{
+ unsigned int io_allowed;
+ u64 tmp;
+
+ /*
+ * jiffy_elapsed should not be a big value as minimum iops can be
+ * 1 then at max jiffy elapsed should be equivalent of 1 second as we
+ * will allow dispatch after 1 second and after that slice should
+ * have been trimmed.
+ */
+
+ tmp = (u64)iops_limit * jiffy_elapsed;
+ do_div(tmp, HZ);
+
+ if (tmp > UINT_MAX)
+ io_allowed = UINT_MAX;
+ else
+ io_allowed = tmp;
+
+ return io_allowed;
+}
+
+static u64 calculate_bytes_allowed(u64 bps_limit, unsigned long jiffy_elapsed)
+{
+ return mul_u64_u64_div_u64(bps_limit, (u64)jiffy_elapsed, (u64)HZ);
+}
+
/* Trim the used slices and adjust slice start accordingly */
static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
{
- unsigned long nr_slices, time_elapsed, io_trim;
- u64 bytes_trim, tmp;
+ unsigned long time_elapsed;
+ long long bytes_trim;
+ int io_trim;
BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
@@ -723,67 +753,38 @@ static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
- time_elapsed = jiffies - tg->slice_start[rw];
-
- nr_slices = time_elapsed / tg->td->throtl_slice;
-
- if (!nr_slices)
+ time_elapsed = rounddown(jiffies - tg->slice_start[rw],
+ tg->td->throtl_slice);
+ if (!time_elapsed)
return;
- tmp = tg_bps_limit(tg, rw) * tg->td->throtl_slice * nr_slices;
- do_div(tmp, HZ);
- bytes_trim = tmp;
- io_trim = (tg_iops_limit(tg, rw) * tg->td->throtl_slice * nr_slices) /
- HZ;
-
- if (!bytes_trim && !io_trim)
+ bytes_trim = calculate_bytes_allowed(tg_bps_limit(tg, rw),
+ time_elapsed) +
+ tg->carryover_bytes[rw];
+ io_trim = calculate_io_allowed(tg_iops_limit(tg, rw), time_elapsed) +
+ tg->carryover_ios[rw];
+ if (bytes_trim <= 0 && io_trim <= 0)
return;
- if (tg->bytes_disp[rw] >= bytes_trim)
+ tg->carryover_bytes[rw] = 0;
+ if ((long long)tg->bytes_disp[rw] >= bytes_trim)
tg->bytes_disp[rw] -= bytes_trim;
else
tg->bytes_disp[rw] = 0;
- if (tg->io_disp[rw] >= io_trim)
+ tg->carryover_ios[rw] = 0;
+ if ((int)tg->io_disp[rw] >= io_trim)
tg->io_disp[rw] -= io_trim;
else
tg->io_disp[rw] = 0;
- tg->slice_start[rw] += nr_slices * tg->td->throtl_slice;
+ tg->slice_start[rw] += time_elapsed;
throtl_log(&tg->service_queue,
- "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
- rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
- tg->slice_start[rw], tg->slice_end[rw], jiffies);
-}
-
-static unsigned int calculate_io_allowed(u32 iops_limit,
- unsigned long jiffy_elapsed)
-{
- unsigned int io_allowed;
- u64 tmp;
-
- /*
- * jiffy_elapsed should not be a big value as minimum iops can be
- * 1 then at max jiffy elapsed should be equivalent of 1 second as we
- * will allow dispatch after 1 second and after that slice should
- * have been trimmed.
- */
-
- tmp = (u64)iops_limit * jiffy_elapsed;
- do_div(tmp, HZ);
-
- if (tmp > UINT_MAX)
- io_allowed = UINT_MAX;
- else
- io_allowed = tmp;
-
- return io_allowed;
-}
-
-static u64 calculate_bytes_allowed(u64 bps_limit, unsigned long jiffy_elapsed)
-{
- return mul_u64_u64_div_u64(bps_limit, (u64)jiffy_elapsed, (u64)HZ);
+ "[%c] trim slice nr=%lu bytes=%lld io=%d start=%lu end=%lu jiffies=%lu",
+ rw == READ ? 'R' : 'W', time_elapsed / tg->td->throtl_slice,
+ bytes_trim, io_trim, tg->slice_start[rw], tg->slice_end[rw],
+ jiffies);
}
static void __tg_update_carryover(struct throtl_grp *tg, bool rw)
@@ -816,7 +817,7 @@ static void tg_update_carryover(struct throtl_grp *tg)
__tg_update_carryover(tg, WRITE);
/* see comments in struct throtl_grp for meaning of these fields. */
- throtl_log(&tg->service_queue, "%s: %llu %llu %u %u\n", __func__,
+ throtl_log(&tg->service_queue, "%s: %lld %lld %d %d\n", __func__,
tg->carryover_bytes[READ], tg->carryover_bytes[WRITE],
tg->carryover_ios[READ], tg->carryover_ios[WRITE]);
}
@@ -825,7 +826,7 @@ static unsigned long tg_within_iops_limit(struct throtl_grp *tg, struct bio *bio
u32 iops_limit)
{
bool rw = bio_data_dir(bio);
- unsigned int io_allowed;
+ int io_allowed;
unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
if (iops_limit == UINT_MAX) {
@@ -838,9 +839,8 @@ static unsigned long tg_within_iops_limit(struct throtl_grp *tg, struct bio *bio
jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice);
io_allowed = calculate_io_allowed(iops_limit, jiffy_elapsed_rnd) +
tg->carryover_ios[rw];
- if (tg->io_disp[rw] + 1 <= io_allowed) {
+ if (io_allowed > 0 && tg->io_disp[rw] + 1 <= io_allowed)
return 0;
- }
/* Calc approx time to dispatch */
jiffy_wait = jiffy_elapsed_rnd - jiffy_elapsed;
@@ -851,7 +851,8 @@ static unsigned long tg_within_bps_limit(struct throtl_grp *tg, struct bio *bio,
u64 bps_limit)
{
bool rw = bio_data_dir(bio);
- u64 bytes_allowed, extra_bytes;
+ long long bytes_allowed;
+ u64 extra_bytes;
unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
unsigned int bio_size = throtl_bio_data_size(bio);
@@ -869,9 +870,8 @@ static unsigned long tg_within_bps_limit(struct throtl_grp *tg, struct bio *bio,
jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
bytes_allowed = calculate_bytes_allowed(bps_limit, jiffy_elapsed_rnd) +
tg->carryover_bytes[rw];
- if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) {
+ if (bytes_allowed > 0 && tg->bytes_disp[rw] + bio_size <= bytes_allowed)
return 0;
- }
/* Calc approx time to dispatch */
extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
diff --git a/block/blk-throttle.h b/block/blk-throttle.h
index d1ccbfe9f797..bffbc9cfc8ab 100644
--- a/block/blk-throttle.h
+++ b/block/blk-throttle.h
@@ -127,8 +127,8 @@ struct throtl_grp {
* bytes/ios are waited already in previous configuration, and they will
* be used to calculate wait time under new configuration.
*/
- uint64_t carryover_bytes[2];
- unsigned int carryover_ios[2];
+ long long carryover_bytes[2];
+ int carryover_ios[2];
unsigned long last_check_time;
diff --git a/block/fops.c b/block/fops.c
index a24a624d3bf7..acff3d5d22d4 100644
--- a/block/fops.c
+++ b/block/fops.c
@@ -671,10 +671,6 @@ static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
iov_iter_truncate(from, size);
}
- ret = file_remove_privs(file);
- if (ret)
- return ret;
-
ret = file_update_time(file);
if (ret)
return ret;
diff --git a/block/ioctl.c b/block/ioctl.c
index 648670ddb164..d5f5cd61efd7 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -20,6 +20,8 @@ static int blkpg_do_ioctl(struct block_device *bdev,
struct blkpg_partition p;
long long start, length;
+ if (disk->flags & GENHD_FL_NO_PART)
+ return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (copy_from_user(&p, upart, sizeof(struct blkpg_partition)))
diff --git a/drivers/accel/ivpu/ivpu_jsm_msg.c b/drivers/accel/ivpu/ivpu_jsm_msg.c
index 831bfd2b2d39..bdddef2c59ee 100644
--- a/drivers/accel/ivpu/ivpu_jsm_msg.c
+++ b/drivers/accel/ivpu/ivpu_jsm_msg.c
@@ -118,8 +118,7 @@ int ivpu_jsm_dyndbg_control(struct ivpu_device *vdev, char *command, size_t size
struct vpu_jsm_msg resp;
int ret;
- if (!strncpy(req.payload.dyndbg_control.dyndbg_cmd, command, VPU_DYNDBG_CMD_MAX_LEN - 1))
- return -ENOMEM;
+ strscpy(req.payload.dyndbg_control.dyndbg_cmd, command, VPU_DYNDBG_CMD_MAX_LEN);
ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_DYNDBG_CONTROL_RSP, &resp,
VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
diff --git a/drivers/accessibility/speakup/spk_ttyio.c b/drivers/accessibility/speakup/spk_ttyio.c
index 07373b3debd1..4c0a6e1f019d 100644
--- a/drivers/accessibility/speakup/spk_ttyio.c
+++ b/drivers/accessibility/speakup/spk_ttyio.c
@@ -71,15 +71,14 @@ static void spk_ttyio_ldisc_close(struct tty_struct *tty)
kfree(tty->disc_data);
}
-static int spk_ttyio_receive_buf2(struct tty_struct *tty,
- const unsigned char *cp,
- const char *fp, int count)
+static size_t spk_ttyio_receive_buf2(struct tty_struct *tty, const u8 *cp,
+ const u8 *fp, size_t count)
{
struct spk_ldisc_data *ldisc_data = tty->disc_data;
struct spk_synth *synth = ldisc_data->synth;
if (synth->read_buff_add) {
- int i;
+ unsigned int i;
for (i = 0; i < count; i++)
synth->read_buff_add(cp[i]);
diff --git a/drivers/acpi/arm64/amba.c b/drivers/acpi/arm64/amba.c
index b2a7631d7ac7..60be8ee1dbdc 100644
--- a/drivers/acpi/arm64/amba.c
+++ b/drivers/acpi/arm64/amba.c
@@ -22,7 +22,6 @@
static const struct acpi_device_id amba_id_list[] = {
{"ARMH0061", 0}, /* PL061 GPIO Device */
{"ARMH0330", 0}, /* ARM DMA Controller DMA-330 */
- {"ARMHC500", 0}, /* ARM CoreSight ETM4x */
{"ARMHC501", 0}, /* ARM CoreSight ETR */
{"ARMHC502", 0}, /* ARM CoreSight STM */
{"ARMHC503", 0}, /* ARM CoreSight Debug */
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 531a9e3df717..691d4b7686ee 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1584,7 +1584,7 @@ static const struct iommu_ops *acpi_iommu_configure_id(struct device *dev,
* If we have reason to believe the IOMMU driver missed the initial
* iommu_probe_device() call for dev, replay it to get things in order.
*/
- if (!err && dev->bus && !device_iommu_mapped(dev))
+ if (!err && dev->bus)
err = iommu_probe_device(dev);
/* Ignore all other errors apart from EPROBE_DEFER */
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 419590f41ed5..f14e68266ccd 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -492,26 +492,22 @@ static int thermal_get_temp(struct thermal_zone_device *thermal, int *temp)
}
static int thermal_get_trend(struct thermal_zone_device *thermal,
- int trip_index, enum thermal_trend *trend)
+ struct thermal_trip *trip,
+ enum thermal_trend *trend)
{
struct acpi_thermal *tz = thermal_zone_device_priv(thermal);
struct acpi_thermal_trip *acpi_trip;
- int t, i;
+ int t;
- if (!tz || trip_index < 0)
+ if (!tz || !trip)
return -EINVAL;
- if (tz->trips.critical.valid)
- trip_index--;
-
- if (tz->trips.hot.valid)
- trip_index--;
-
- if (trip_index < 0)
+ acpi_trip = trip->priv;
+ if (!acpi_trip || !acpi_trip->valid)
return -EINVAL;
- acpi_trip = &tz->trips.passive.trip;
- if (acpi_trip->valid && !trip_index--) {
+ switch (trip->type) {
+ case THERMAL_TRIP_PASSIVE:
t = tz->trips.passive.tc1 * (tz->temperature -
tz->last_temperature) +
tz->trips.passive.tc2 * (tz->temperature -
@@ -524,19 +520,18 @@ static int thermal_get_trend(struct thermal_zone_device *thermal,
*trend = THERMAL_TREND_STABLE;
return 0;
- }
-
- t = acpi_thermal_temp(tz, tz->temperature);
- for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
- acpi_trip = &tz->trips.active[i].trip;
- if (acpi_trip->valid && !trip_index--) {
- if (t > acpi_thermal_temp(tz, acpi_trip->temperature)) {
- *trend = THERMAL_TREND_RAISING;
- return 0;
- }
+ case THERMAL_TRIP_ACTIVE:
+ t = acpi_thermal_temp(tz, tz->temperature);
+ if (t <= trip->temperature)
break;
- }
+
+ *trend = THERMAL_TREND_RAISING;
+
+ return 0;
+
+ default:
+ break;
}
return -EINVAL;
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index ce88af9eb562..09e72967b8ab 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -528,6 +528,7 @@ static void amba_device_release(struct device *dev)
{
struct amba_device *d = to_amba_device(dev);
+ of_node_put(d->dev.of_node);
if (d->res.parent)
release_resource(&d->res);
mutex_destroy(&d->periphid_lock);
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index d720f93d8b19..367afac5f1bf 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -6557,6 +6557,7 @@ static int __init binder_init(void)
struct binder_device *device;
struct hlist_node *tmp;
char *device_names = NULL;
+ const struct binder_debugfs_entry *db_entry;
ret = binder_alloc_shrinker_init();
if (ret)
@@ -6566,19 +6567,16 @@ static int __init binder_init(void)
atomic_set(&binder_transaction_log_failed.cur, ~0U);
binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
- if (binder_debugfs_dir_entry_root) {
- const struct binder_debugfs_entry *db_entry;
- binder_for_each_debugfs_entry(db_entry)
- debugfs_create_file(db_entry->name,
- db_entry->mode,
- binder_debugfs_dir_entry_root,
- db_entry->data,
- db_entry->fops);
+ binder_for_each_debugfs_entry(db_entry)
+ debugfs_create_file(db_entry->name,
+ db_entry->mode,
+ binder_debugfs_dir_entry_root,
+ db_entry->data,
+ db_entry->fops);
- binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
- binder_debugfs_dir_entry_root);
- }
+ binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
+ binder_debugfs_dir_entry_root);
if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
strcmp(binder_devices_param, "") != 0) {
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index faebe9f5412a..81effec17b3d 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -19,7 +19,6 @@
#include <linux/mutex.h>
#include <linux/mount.h>
#include <linux/fs_parser.h>
-#include <linux/radix-tree.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index addba109406b..abb5911c9d09 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -421,6 +421,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x34d3), board_ahci_low_power }, /* Ice Lake LP AHCI */
{ PCI_VDEVICE(INTEL, 0x02d3), board_ahci_low_power }, /* Comet Lake PCH-U AHCI */
{ PCI_VDEVICE(INTEL, 0x02d7), board_ahci_low_power }, /* Comet Lake PCH RAID */
+ /* Elkhart Lake IDs 0x4b60 & 0x4b62 https://sata-io.org/product/8803 not tested yet */
+ { PCI_VDEVICE(INTEL, 0x4b63), board_ahci_low_power }, /* Elkhart Lake AHCI */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -807,7 +809,7 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
- const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
+ const unsigned int *timing = sata_ehc_deb_timing(&link->eh_context);
struct ata_port *ap = link->ap;
struct ahci_port_priv *pp = ap->private_data;
struct ahci_host_priv *hpriv = ap->host->private_data;
diff --git a/drivers/ata/ahci_ceva.c b/drivers/ata/ahci_ceva.c
index c2b6be083af4..64f7f7d6ba84 100644
--- a/drivers/ata/ahci_ceva.c
+++ b/drivers/ata/ahci_ceva.c
@@ -10,7 +10,7 @@
#include <linux/kernel.h>
#include <linux/libata.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include "ahci.h"
diff --git a/drivers/ata/ahci_dwc.c b/drivers/ata/ahci_dwc.c
index 9604a2f6ed48..ed263de3fd70 100644
--- a/drivers/ata/ahci_dwc.c
+++ b/drivers/ata/ahci_dwc.c
@@ -15,7 +15,7 @@
#include <linux/log2.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/regmap.h>
diff --git a/drivers/ata/ahci_mtk.c b/drivers/ata/ahci_mtk.c
index 5083fb6c4927..adc851cd5578 100644
--- a/drivers/ata/ahci_mtk.c
+++ b/drivers/ata/ahci_mtk.c
@@ -11,6 +11,7 @@
#include <linux/libata.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/regmap.h>
diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c
index 764501518582..f3187351e8a6 100644
--- a/drivers/ata/ahci_mvebu.c
+++ b/drivers/ata/ahci_mvebu.c
@@ -15,7 +15,7 @@
#include <linux/kernel.h>
#include <linux/mbus.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include "ahci.h"
diff --git a/drivers/ata/ahci_octeon.c b/drivers/ata/ahci_octeon.c
index e89807fa928e..9accf8923891 100644
--- a/drivers/ata/ahci_octeon.c
+++ b/drivers/ata/ahci_octeon.c
@@ -31,13 +31,11 @@ static int ahci_octeon_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
- struct resource *res;
void __iomem *base;
u64 cfg;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
index 3d01b118c9a1..b1a4e57578e2 100644
--- a/drivers/ata/ahci_qoriq.c
+++ b/drivers/ata/ahci_qoriq.c
@@ -12,9 +12,7 @@
#include <linux/pm.h>
#include <linux/ahci_platform.h>
#include <linux/device.h>
-#include <linux/of_address.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/libata.h>
#include "ahci.h"
@@ -90,7 +88,7 @@ MODULE_DEVICE_TABLE(acpi, ahci_qoriq_acpi_match);
static int ahci_qoriq_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
- const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
+ const unsigned int *timing = sata_ehc_deb_timing(&link->eh_context);
void __iomem *port_mmio = ahci_port_base(link->ap);
u32 px_cmd, px_is, px_val;
struct ata_port *ap = link->ap;
diff --git a/drivers/ata/ahci_seattle.c b/drivers/ata/ahci_seattle.c
index 2c32d58c6ae7..59f97aa7ac75 100644
--- a/drivers/ata/ahci_seattle.c
+++ b/drivers/ata/ahci_seattle.c
@@ -12,7 +12,6 @@
#include <linux/module.h>
#include <linux/pm.h>
#include <linux/device.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/libata.h>
#include <linux/ahci_platform.h>
@@ -132,8 +131,7 @@ static const struct ata_port_info *ahci_seattle_get_port_info(
if (!plat_data)
return &ahci_port_info;
- plat_data->sgpio_ctrl = devm_ioremap_resource(dev,
- platform_get_resource(pdev, IORESOURCE_MEM, 1));
+ plat_data->sgpio_ctrl = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(plat_data->sgpio_ctrl))
return &ahci_port_info;
diff --git a/drivers/ata/ahci_sunxi.c b/drivers/ata/ahci_sunxi.c
index 04531fa95e40..58b2683954dd 100644
--- a/drivers/ata/ahci_sunxi.c
+++ b/drivers/ata/ahci_sunxi.c
@@ -13,8 +13,8 @@
#include <linux/clk.h>
#include <linux/errno.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include "ahci.h"
diff --git a/drivers/ata/ahci_tegra.c b/drivers/ata/ahci_tegra.c
index 21c20793e517..8703c2a4658b 100644
--- a/drivers/ata/ahci_tegra.c
+++ b/drivers/ata/ahci_tegra.c
@@ -12,7 +12,7 @@
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
@@ -530,8 +530,7 @@ static int tegra_ahci_probe(struct platform_device *pdev)
tegra->pdev = pdev;
tegra->soc = of_device_get_match_data(&pdev->dev);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- tegra->sata_regs = devm_ioremap_resource(&pdev->dev, res);
+ tegra->sata_regs = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(tegra->sata_regs))
return PTR_ERR(tegra->sata_regs);
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
index eb773f2e28fc..ccef5e63bdf9 100644
--- a/drivers/ata/ahci_xgene.c
+++ b/drivers/ata/ahci_xgene.c
@@ -110,9 +110,8 @@ static int xgene_ahci_init_memram(struct xgene_ahci_context *ctx)
* @timeout : timeout for achieving the value.
*/
static int xgene_ahci_poll_reg_val(struct ata_port *ap,
- void __iomem *reg, unsigned
- int val, unsigned long interval,
- unsigned long timeout)
+ void __iomem *reg, unsigned int val,
+ unsigned int interval, unsigned int timeout)
{
unsigned long deadline;
unsigned int tmp;
@@ -350,7 +349,7 @@ static void xgene_ahci_set_phy_cfg(struct xgene_ahci_context *ctx, int channel)
static int xgene_ahci_do_hardreset(struct ata_link *link,
unsigned long deadline, bool *online)
{
- const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
+ const unsigned int *timing = sata_ehc_deb_timing(&link->eh_context);
struct ata_port *ap = link->ap;
struct ahci_host_priv *hpriv = ap->host->private_data;
struct xgene_ahci_context *ctx = hpriv->plat_data;
@@ -755,20 +754,17 @@ static int xgene_ahci_probe(struct platform_device *pdev)
ctx->dev = dev;
/* Retrieve the IP core resource */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- ctx->csr_core = devm_ioremap_resource(dev, res);
+ ctx->csr_core = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(ctx->csr_core))
return PTR_ERR(ctx->csr_core);
/* Retrieve the IP diagnostic resource */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- ctx->csr_diag = devm_ioremap_resource(dev, res);
+ ctx->csr_diag = devm_platform_ioremap_resource(pdev, 2);
if (IS_ERR(ctx->csr_diag))
return PTR_ERR(ctx->csr_diag);
/* Retrieve the IP AXI resource */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
- ctx->csr_axi = devm_ioremap_resource(dev, res);
+ ctx->csr_axi = devm_platform_ioremap_resource(pdev, 3);
if (IS_ERR(ctx->csr_axi))
return PTR_ERR(ctx->csr_axi);
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 06aec35f88f2..e2bacedf28ef 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -1403,7 +1403,7 @@ EXPORT_SYMBOL_GPL(ahci_kick_engine);
static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
struct ata_taskfile *tf, int is_cmd, u16 flags,
- unsigned long timeout_msec)
+ unsigned int timeout_msec)
{
const u32 cmd_fis_len = 5; /* five dwords */
struct ahci_port_priv *pp = ap->private_data;
@@ -1448,7 +1448,8 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
struct ahci_host_priv *hpriv = ap->host->private_data;
struct ahci_port_priv *pp = ap->private_data;
const char *reason = NULL;
- unsigned long now, msecs;
+ unsigned long now;
+ unsigned int msecs;
struct ata_taskfile tf;
bool fbs_disabled = false;
int rc;
@@ -1587,7 +1588,7 @@ static int ahci_pmp_retry_softreset(struct ata_link *link, unsigned int *class,
int ahci_do_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline, bool *online)
{
- const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
+ const unsigned int *timing = sata_ehc_deb_timing(&link->eh_context);
struct ata_port *ap = link->ap;
struct ahci_port_priv *pp = ap->private_data;
struct ahci_host_priv *hpriv = ap->host->private_data;
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index 9a8d43f54adc..581704e61f28 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -20,6 +20,7 @@
#include <linux/ahci_platform.h>
#include <linux/phy/phy.h>
#include <linux/pm_runtime.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/reset.h>
#include "ahci.h"
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 04db0f2c683a..74314311295f 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -1586,13 +1586,11 @@ static unsigned ata_exec_internal_sg(struct ata_device *dev,
}
}
- if (ap->ops->error_handler)
- ata_eh_release(ap);
+ ata_eh_release(ap);
rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
- if (ap->ops->error_handler)
- ata_eh_acquire(ap);
+ ata_eh_acquire(ap);
ata_sff_flush_pio_task(ap);
@@ -1607,10 +1605,7 @@ static unsigned ata_exec_internal_sg(struct ata_device *dev,
if (qc->flags & ATA_QCFLAG_ACTIVE) {
qc->err_mask |= AC_ERR_TIMEOUT;
- if (ap->ops->error_handler)
- ata_port_freeze(ap);
- else
- ata_qc_complete(qc);
+ ata_port_freeze(ap);
ata_dev_warn(dev, "qc timeout after %u msecs (cmd 0x%x)\n",
timeout, command);
@@ -3063,144 +3058,6 @@ int ata_cable_sata(struct ata_port *ap)
EXPORT_SYMBOL_GPL(ata_cable_sata);
/**
- * ata_bus_probe - Reset and probe ATA bus
- * @ap: Bus to probe
- *
- * Master ATA bus probing function. Initiates a hardware-dependent
- * bus reset, then attempts to identify any devices found on
- * the bus.
- *
- * LOCKING:
- * PCI/etc. bus probe sem.
- *
- * RETURNS:
- * Zero on success, negative errno otherwise.
- */
-
-int ata_bus_probe(struct ata_port *ap)
-{
- unsigned int classes[ATA_MAX_DEVICES];
- int tries[ATA_MAX_DEVICES];
- int rc;
- struct ata_device *dev;
-
- ata_for_each_dev(dev, &ap->link, ALL)
- tries[dev->devno] = ATA_PROBE_MAX_TRIES;
-
- retry:
- ata_for_each_dev(dev, &ap->link, ALL) {
- /* If we issue an SRST then an ATA drive (not ATAPI)
- * may change configuration and be in PIO0 timing. If
- * we do a hard reset (or are coming from power on)
- * this is true for ATA or ATAPI. Until we've set a
- * suitable controller mode we should not touch the
- * bus as we may be talking too fast.
- */
- dev->pio_mode = XFER_PIO_0;
- dev->dma_mode = 0xff;
-
- /* If the controller has a pio mode setup function
- * then use it to set the chipset to rights. Don't
- * touch the DMA setup as that will be dealt with when
- * configuring devices.
- */
- if (ap->ops->set_piomode)
- ap->ops->set_piomode(ap, dev);
- }
-
- /* reset and determine device classes */
- ap->ops->phy_reset(ap);
-
- ata_for_each_dev(dev, &ap->link, ALL) {
- if (dev->class != ATA_DEV_UNKNOWN)
- classes[dev->devno] = dev->class;
- else
- classes[dev->devno] = ATA_DEV_NONE;
-
- dev->class = ATA_DEV_UNKNOWN;
- }
-
- /* read IDENTIFY page and configure devices. We have to do the identify
- specific sequence bass-ackwards so that PDIAG- is released by
- the slave device */
-
- ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
- if (tries[dev->devno])
- dev->class = classes[dev->devno];
-
- if (!ata_dev_enabled(dev))
- continue;
-
- rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
- dev->id);
- if (rc)
- goto fail;
- }
-
- /* Now ask for the cable type as PDIAG- should have been released */
- if (ap->ops->cable_detect)
- ap->cbl = ap->ops->cable_detect(ap);
-
- /* We may have SATA bridge glue hiding here irrespective of
- * the reported cable types and sensed types. When SATA
- * drives indicate we have a bridge, we don't know which end
- * of the link the bridge is which is a problem.
- */
- ata_for_each_dev(dev, &ap->link, ENABLED)
- if (ata_id_is_sata(dev->id))
- ap->cbl = ATA_CBL_SATA;
-
- /* After the identify sequence we can now set up the devices. We do
- this in the normal order so that the user doesn't get confused */
-
- ata_for_each_dev(dev, &ap->link, ENABLED) {
- ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
- rc = ata_dev_configure(dev);
- ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
- if (rc)
- goto fail;
- }
-
- /* configure transfer mode */
- rc = ata_set_mode(&ap->link, &dev);
- if (rc)
- goto fail;
-
- ata_for_each_dev(dev, &ap->link, ENABLED)
- return 0;
-
- return -ENODEV;
-
- fail:
- tries[dev->devno]--;
-
- switch (rc) {
- case -EINVAL:
- /* eeek, something went very wrong, give up */
- tries[dev->devno] = 0;
- break;
-
- case -ENODEV:
- /* give it just one more chance */
- tries[dev->devno] = min(tries[dev->devno], 1);
- fallthrough;
- case -EIO:
- if (tries[dev->devno] == 1) {
- /* This is the last chance, better to slow
- * down than lose it.
- */
- sata_down_spd_limit(&ap->link, 0);
- ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
- }
- }
-
- if (!tries[dev->devno])
- ata_dev_disable(dev);
-
- goto retry;
-}
-
-/**
* sata_print_link_status - Print SATA link status
* @link: SATA link to printk link status about
*
@@ -3782,7 +3639,7 @@ int ata_std_prereset(struct ata_link *link, unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct ata_eh_context *ehc = &link->eh_context;
- const unsigned long *timing = sata_ehc_deb_timing(ehc);
+ const unsigned int *timing = sata_ehc_deb_timing(ehc);
int rc;
/* if we're about to do hardreset, nothing more to do */
@@ -3824,7 +3681,7 @@ EXPORT_SYMBOL_GPL(ata_std_prereset);
int sata_std_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
- const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
+ const unsigned int *timing = sata_ehc_deb_timing(&link->eh_context);
bool online;
int rc;
@@ -4213,10 +4070,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
ATA_HORKAGE_ZERO_AFTER_TRIM },
{ "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM },
- { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
- ATA_HORKAGE_ZERO_AFTER_TRIM },
{ "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM },
+ { "Micron_1100_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
+ ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
+ ATA_HORKAGE_ZERO_AFTER_TRIM },
{ "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM },
{ "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
@@ -4874,126 +4733,103 @@ static void ata_verify_xfer(struct ata_queued_cmd *qc)
void ata_qc_complete(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
+ struct ata_device *dev = qc->dev;
+ struct ata_eh_info *ehi = &dev->link->eh_info;
/* Trigger the LED (if available) */
ledtrig_disk_activity(!!(qc->tf.flags & ATA_TFLAG_WRITE));
- /* XXX: New EH and old EH use different mechanisms to
- * synchronize EH with regular execution path.
- *
- * In new EH, a qc owned by EH is marked with ATA_QCFLAG_EH.
- * Normal execution path is responsible for not accessing a
- * qc owned by EH. libata core enforces the rule by returning NULL
- * from ata_qc_from_tag() for qcs owned by EH.
+ /*
+ * In order to synchronize EH with the regular execution path, a qc that
+ * is owned by EH is marked with ATA_QCFLAG_EH.
*
- * Old EH depends on ata_qc_complete() nullifying completion
- * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
- * not synchronize with interrupt handler. Only PIO task is
- * taken care of.
+ * The normal execution path is responsible for not accessing a qc owned
+ * by EH. libata core enforces the rule by returning NULL from
+ * ata_qc_from_tag() for qcs owned by EH.
*/
- if (ap->ops->error_handler) {
- struct ata_device *dev = qc->dev;
- struct ata_eh_info *ehi = &dev->link->eh_info;
-
- if (unlikely(qc->err_mask))
- qc->flags |= ATA_QCFLAG_EH;
+ if (unlikely(qc->err_mask))
+ qc->flags |= ATA_QCFLAG_EH;
- /*
- * Finish internal commands without any further processing
- * and always with the result TF filled.
- */
- if (unlikely(ata_tag_internal(qc->tag))) {
- fill_result_tf(qc);
- trace_ata_qc_complete_internal(qc);
- __ata_qc_complete(qc);
- return;
- }
+ /*
+ * Finish internal commands without any further processing and always
+ * with the result TF filled.
+ */
+ if (unlikely(ata_tag_internal(qc->tag))) {
+ fill_result_tf(qc);
+ trace_ata_qc_complete_internal(qc);
+ __ata_qc_complete(qc);
+ return;
+ }
- /*
- * Non-internal qc has failed. Fill the result TF and
- * summon EH.
- */
- if (unlikely(qc->flags & ATA_QCFLAG_EH)) {
- fill_result_tf(qc);
- trace_ata_qc_complete_failed(qc);
- ata_qc_schedule_eh(qc);
- return;
- }
+ /* Non-internal qc has failed. Fill the result TF and summon EH. */
+ if (unlikely(qc->flags & ATA_QCFLAG_EH)) {
+ fill_result_tf(qc);
+ trace_ata_qc_complete_failed(qc);
+ ata_qc_schedule_eh(qc);
+ return;
+ }
- WARN_ON_ONCE(ata_port_is_frozen(ap));
+ WARN_ON_ONCE(ata_port_is_frozen(ap));
- /* read result TF if requested */
- if (qc->flags & ATA_QCFLAG_RESULT_TF)
- fill_result_tf(qc);
+ /* read result TF if requested */
+ if (qc->flags & ATA_QCFLAG_RESULT_TF)
+ fill_result_tf(qc);
- trace_ata_qc_complete_done(qc);
+ trace_ata_qc_complete_done(qc);
+ /*
+ * For CDL commands that completed without an error, check if we have
+ * sense data (ATA_SENSE is set). If we do, then the command may have
+ * been aborted by the device due to a limit timeout using the policy
+ * 0xD. For these commands, invoke EH to get the command sense data.
+ */
+ if (qc->result_tf.status & ATA_SENSE &&
+ ((ata_is_ncq(qc->tf.protocol) &&
+ dev->flags & ATA_DFLAG_CDL_ENABLED) ||
+ (!ata_is_ncq(qc->tf.protocol) &&
+ ata_id_sense_reporting_enabled(dev->id)))) {
/*
- * For CDL commands that completed without an error, check if
- * we have sense data (ATA_SENSE is set). If we do, then the
- * command may have been aborted by the device due to a limit
- * timeout using the policy 0xD. For these commands, invoke EH
- * to get the command sense data.
+ * Tell SCSI EH to not overwrite scmd->result even if this
+ * command is finished with result SAM_STAT_GOOD.
*/
- if (qc->result_tf.status & ATA_SENSE &&
- ((ata_is_ncq(qc->tf.protocol) &&
- dev->flags & ATA_DFLAG_CDL_ENABLED) ||
- (!ata_is_ncq(qc->tf.protocol) &&
- ata_id_sense_reporting_enabled(dev->id)))) {
- /*
- * Tell SCSI EH to not overwrite scmd->result even if
- * this command is finished with result SAM_STAT_GOOD.
- */
- qc->scsicmd->flags |= SCMD_FORCE_EH_SUCCESS;
- qc->flags |= ATA_QCFLAG_EH_SUCCESS_CMD;
- ehi->dev_action[dev->devno] |= ATA_EH_GET_SUCCESS_SENSE;
+ qc->scsicmd->flags |= SCMD_FORCE_EH_SUCCESS;
+ qc->flags |= ATA_QCFLAG_EH_SUCCESS_CMD;
+ ehi->dev_action[dev->devno] |= ATA_EH_GET_SUCCESS_SENSE;
- /*
- * set pending so that ata_qc_schedule_eh() does not
- * trigger fast drain, and freeze the port.
- */
- ap->pflags |= ATA_PFLAG_EH_PENDING;
- ata_qc_schedule_eh(qc);
- return;
- }
-
- /* Some commands need post-processing after successful
- * completion.
+ /*
+ * set pending so that ata_qc_schedule_eh() does not trigger
+ * fast drain, and freeze the port.
*/
- switch (qc->tf.command) {
- case ATA_CMD_SET_FEATURES:
- if (qc->tf.feature != SETFEATURES_WC_ON &&
- qc->tf.feature != SETFEATURES_WC_OFF &&
- qc->tf.feature != SETFEATURES_RA_ON &&
- qc->tf.feature != SETFEATURES_RA_OFF)
- break;
- fallthrough;
- case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
- case ATA_CMD_SET_MULTI: /* multi_count changed */
- /* revalidate device */
- ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
- ata_port_schedule_eh(ap);
- break;
+ ap->pflags |= ATA_PFLAG_EH_PENDING;
+ ata_qc_schedule_eh(qc);
+ return;
+ }
- case ATA_CMD_SLEEP:
- dev->flags |= ATA_DFLAG_SLEEPING;
+ /* Some commands need post-processing after successful completion. */
+ switch (qc->tf.command) {
+ case ATA_CMD_SET_FEATURES:
+ if (qc->tf.feature != SETFEATURES_WC_ON &&
+ qc->tf.feature != SETFEATURES_WC_OFF &&
+ qc->tf.feature != SETFEATURES_RA_ON &&
+ qc->tf.feature != SETFEATURES_RA_OFF)
break;
- }
-
- if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
- ata_verify_xfer(qc);
+ fallthrough;
+ case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
+ case ATA_CMD_SET_MULTI: /* multi_count changed */
+ /* revalidate device */
+ ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
+ ata_port_schedule_eh(ap);
+ break;
- __ata_qc_complete(qc);
- } else {
- if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
- return;
+ case ATA_CMD_SLEEP:
+ dev->flags |= ATA_DFLAG_SLEEPING;
+ break;
+ }
- /* read result TF if failed or requested */
- if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
- fill_result_tf(qc);
+ if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
+ ata_verify_xfer(qc);
- __ata_qc_complete(qc);
- }
+ __ata_qc_complete(qc);
}
EXPORT_SYMBOL_GPL(ata_qc_complete);
@@ -5039,11 +4875,8 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
struct ata_link *link = qc->dev->link;
u8 prot = qc->tf.protocol;
- /* Make sure only one non-NCQ command is outstanding. The
- * check is skipped for old EH because it reuses active qc to
- * request ATAPI sense.
- */
- WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
+ /* Make sure only one non-NCQ command is outstanding. */
+ WARN_ON_ONCE(ata_tag_valid(link->active_tag));
if (ata_is_ncq(prot)) {
WARN_ON_ONCE(link->sactive & (1 << qc->hw_tag));
@@ -5896,7 +5729,7 @@ void ata_host_init(struct ata_host *host, struct device *dev,
}
EXPORT_SYMBOL_GPL(ata_host_init);
-void __ata_port_probe(struct ata_port *ap)
+void ata_port_probe(struct ata_port *ap)
{
struct ata_eh_info *ehi = &ap->link.eh_info;
unsigned long flags;
@@ -5914,20 +5747,7 @@ void __ata_port_probe(struct ata_port *ap)
spin_unlock_irqrestore(ap->lock, flags);
}
-
-int ata_port_probe(struct ata_port *ap)
-{
- int rc = 0;
-
- if (ap->ops->error_handler) {
- __ata_port_probe(ap);
- ata_port_wait_eh(ap);
- } else {
- rc = ata_bus_probe(ap);
- }
- return rc;
-}
-
+EXPORT_SYMBOL_GPL(ata_port_probe);
static void async_port_probe(void *data, async_cookie_t cookie)
{
@@ -5943,7 +5763,8 @@ static void async_port_probe(void *data, async_cookie_t cookie)
if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
async_synchronize_cookie(cookie);
- (void)ata_port_probe(ap);
+ ata_port_probe(ap);
+ ata_port_wait_eh(ap);
/* in order to keep device order, we need to synchronize at this point */
async_synchronize_cookie(cookie);
@@ -6130,9 +5951,6 @@ static void ata_port_detach(struct ata_port *ap)
struct ata_link *link;
struct ata_device *dev;
- if (!ap->ops->error_handler)
- goto skip_eh;
-
/* tell EH we're leaving & flush EH */
spin_lock_irqsave(ap->lock, flags);
ap->pflags |= ATA_PFLAG_UNLOADING;
@@ -6148,7 +5966,6 @@ static void ata_port_detach(struct ata_port *ap)
cancel_delayed_work_sync(&ap->hotplug_task);
cancel_delayed_work_sync(&ap->scsi_rescan_task);
- skip_eh:
/* clean up zpodd on port removal */
ata_for_each_link(link, ap, HOST_FIRST) {
ata_for_each_dev(dev, link, ALL) {
@@ -6684,7 +6501,7 @@ EXPORT_SYMBOL_GPL(ata_msleep);
* The final register value.
*/
u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
- unsigned long interval, unsigned long timeout)
+ unsigned int interval, unsigned int timeout)
{
unsigned long deadline;
u32 tmp;
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 35e03679b0bf..159ba6ba19eb 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -78,12 +78,12 @@ enum {
* are mostly for error handling, hotplug and those outlier devices that
* take an exceptionally long time to recover from reset.
*/
-static const unsigned long ata_eh_reset_timeouts[] = {
+static const unsigned int ata_eh_reset_timeouts[] = {
10000, /* most drives spin up by 10sec */
10000, /* > 99% working drives spin up before 20sec */
35000, /* give > 30 secs of idleness for outlier devices */
5000, /* and sweet one last chance */
- ULONG_MAX, /* > 1 min has elapsed, give up */
+ UINT_MAX, /* > 1 min has elapsed, give up */
};
static const unsigned int ata_eh_identify_timeouts[] = {
@@ -571,13 +571,10 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
/* make sure sff pio task is not running */
ata_sff_flush_pio_task(ap);
- if (!ap->ops->error_handler)
- return;
-
/* synchronize with host lock and sort out timeouts */
/*
- * For new EH, all qcs are finished in one of three ways -
+ * For EH, all qcs are finished in one of three ways -
* normal completion, error completion, and SCSI timeout.
* Both completions can race against SCSI timeout. When normal
* completion wins, the qc never reaches EH. When error
@@ -659,94 +656,87 @@ EXPORT_SYMBOL(ata_scsi_cmd_error_handler);
void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
{
unsigned long flags;
+ struct ata_link *link;
- /* invoke error handler */
- if (ap->ops->error_handler) {
- struct ata_link *link;
-
- /* acquire EH ownership */
- ata_eh_acquire(ap);
+ /* acquire EH ownership */
+ ata_eh_acquire(ap);
repeat:
- /* kill fast drain timer */
- del_timer_sync(&ap->fastdrain_timer);
+ /* kill fast drain timer */
+ del_timer_sync(&ap->fastdrain_timer);
- /* process port resume request */
- ata_eh_handle_port_resume(ap);
+ /* process port resume request */
+ ata_eh_handle_port_resume(ap);
- /* fetch & clear EH info */
- spin_lock_irqsave(ap->lock, flags);
+ /* fetch & clear EH info */
+ spin_lock_irqsave(ap->lock, flags);
- ata_for_each_link(link, ap, HOST_FIRST) {
- struct ata_eh_context *ehc = &link->eh_context;
- struct ata_device *dev;
+ ata_for_each_link(link, ap, HOST_FIRST) {
+ struct ata_eh_context *ehc = &link->eh_context;
+ struct ata_device *dev;
- memset(&link->eh_context, 0, sizeof(link->eh_context));
- link->eh_context.i = link->eh_info;
- memset(&link->eh_info, 0, sizeof(link->eh_info));
+ memset(&link->eh_context, 0, sizeof(link->eh_context));
+ link->eh_context.i = link->eh_info;
+ memset(&link->eh_info, 0, sizeof(link->eh_info));
- ata_for_each_dev(dev, link, ENABLED) {
- int devno = dev->devno;
+ ata_for_each_dev(dev, link, ENABLED) {
+ int devno = dev->devno;
- ehc->saved_xfer_mode[devno] = dev->xfer_mode;
- if (ata_ncq_enabled(dev))
- ehc->saved_ncq_enabled |= 1 << devno;
- }
+ ehc->saved_xfer_mode[devno] = dev->xfer_mode;
+ if (ata_ncq_enabled(dev))
+ ehc->saved_ncq_enabled |= 1 << devno;
}
+ }
- ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
- ap->pflags &= ~ATA_PFLAG_EH_PENDING;
- ap->excl_link = NULL; /* don't maintain exclusion over EH */
+ ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
+ ap->pflags &= ~ATA_PFLAG_EH_PENDING;
+ ap->excl_link = NULL; /* don't maintain exclusion over EH */
- spin_unlock_irqrestore(ap->lock, flags);
+ spin_unlock_irqrestore(ap->lock, flags);
- /* invoke EH, skip if unloading or suspended */
- if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
- ap->ops->error_handler(ap);
- else {
- /* if unloading, commence suicide */
- if ((ap->pflags & ATA_PFLAG_UNLOADING) &&
- !(ap->pflags & ATA_PFLAG_UNLOADED))
- ata_eh_unload(ap);
- ata_eh_finish(ap);
- }
+ /* invoke EH, skip if unloading or suspended */
+ if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
+ ap->ops->error_handler(ap);
+ else {
+ /* if unloading, commence suicide */
+ if ((ap->pflags & ATA_PFLAG_UNLOADING) &&
+ !(ap->pflags & ATA_PFLAG_UNLOADED))
+ ata_eh_unload(ap);
+ ata_eh_finish(ap);
+ }
- /* process port suspend request */
- ata_eh_handle_port_suspend(ap);
+ /* process port suspend request */
+ ata_eh_handle_port_suspend(ap);
- /* Exception might have happened after ->error_handler
- * recovered the port but before this point. Repeat
- * EH in such case.
- */
- spin_lock_irqsave(ap->lock, flags);
+ /*
+ * Exception might have happened after ->error_handler recovered the
+ * port but before this point. Repeat EH in such case.
+ */
+ spin_lock_irqsave(ap->lock, flags);
- if (ap->pflags & ATA_PFLAG_EH_PENDING) {
- if (--ap->eh_tries) {
- spin_unlock_irqrestore(ap->lock, flags);
- goto repeat;
- }
- ata_port_err(ap,
- "EH pending after %d tries, giving up\n",
- ATA_EH_MAX_TRIES);
- ap->pflags &= ~ATA_PFLAG_EH_PENDING;
+ if (ap->pflags & ATA_PFLAG_EH_PENDING) {
+ if (--ap->eh_tries) {
+ spin_unlock_irqrestore(ap->lock, flags);
+ goto repeat;
}
+ ata_port_err(ap,
+ "EH pending after %d tries, giving up\n",
+ ATA_EH_MAX_TRIES);
+ ap->pflags &= ~ATA_PFLAG_EH_PENDING;
+ }
- /* this run is complete, make sure EH info is clear */
- ata_for_each_link(link, ap, HOST_FIRST)
- memset(&link->eh_info, 0, sizeof(link->eh_info));
+ /* this run is complete, make sure EH info is clear */
+ ata_for_each_link(link, ap, HOST_FIRST)
+ memset(&link->eh_info, 0, sizeof(link->eh_info));
- /* end eh (clear host_eh_scheduled) while holding
- * ap->lock such that if exception occurs after this
- * point but before EH completion, SCSI midlayer will
- * re-initiate EH.
- */
- ap->ops->end_eh(ap);
+ /*
+ * end eh (clear host_eh_scheduled) while holding ap->lock such that if
+ * exception occurs after this point but before EH completion, SCSI
+ * midlayer will re-initiate EH.
+ */
+ ap->ops->end_eh(ap);
- spin_unlock_irqrestore(ap->lock, flags);
- ata_eh_release(ap);
- } else {
- WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL);
- ap->ops->eng_timeout(ap);
- }
+ spin_unlock_irqrestore(ap->lock, flags);
+ ata_eh_release(ap);
scsi_eh_flush_done_q(&ap->eh_done_q);
@@ -912,8 +902,6 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
- WARN_ON(!ap->ops->error_handler);
-
qc->flags |= ATA_QCFLAG_EH;
ata_eh_set_pending(ap, 1);
@@ -934,8 +922,6 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
*/
void ata_std_sched_eh(struct ata_port *ap)
{
- WARN_ON(!ap->ops->error_handler);
-
if (ap->pflags & ATA_PFLAG_INITIALIZING)
return;
@@ -989,8 +975,6 @@ static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
struct ata_queued_cmd *qc;
int tag, nr_aborted = 0;
- WARN_ON(!ap->ops->error_handler);
-
/* we're gonna abort all commands, no need for fast drain */
ata_eh_set_pending(ap, 0);
@@ -1065,8 +1049,6 @@ EXPORT_SYMBOL_GPL(ata_port_abort);
*/
static void __ata_port_freeze(struct ata_port *ap)
{
- WARN_ON(!ap->ops->error_handler);
-
if (ap->ops->freeze)
ap->ops->freeze(ap);
@@ -1091,8 +1073,6 @@ static void __ata_port_freeze(struct ata_port *ap)
*/
int ata_port_freeze(struct ata_port *ap)
{
- WARN_ON(!ap->ops->error_handler);
-
__ata_port_freeze(ap);
return ata_port_abort(ap);
@@ -1112,9 +1092,6 @@ void ata_eh_freeze_port(struct ata_port *ap)
{
unsigned long flags;
- if (!ap->ops->error_handler)
- return;
-
spin_lock_irqsave(ap->lock, flags);
__ata_port_freeze(ap);
spin_unlock_irqrestore(ap->lock, flags);
@@ -1134,9 +1111,6 @@ void ata_eh_thaw_port(struct ata_port *ap)
{
unsigned long flags;
- if (!ap->ops->error_handler)
- return;
-
spin_lock_irqsave(ap->lock, flags);
ap->pflags &= ~ATA_PFLAG_FROZEN;
@@ -2575,7 +2549,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
/*
* Prepare to reset
*/
- while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX)
+ while (ata_eh_reset_timeouts[max_tries] != UINT_MAX)
max_tries++;
if (link->flags & ATA_LFLAG_RST_ONCE)
max_tries = 1;
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
index 85e279a12f62..5d31c08be013 100644
--- a/drivers/ata/libata-sata.c
+++ b/drivers/ata/libata-sata.c
@@ -19,11 +19,11 @@
#include "libata-transport.h"
/* debounce timing parameters in msecs { interval, duration, timeout } */
-const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
+const unsigned int sata_deb_timing_normal[] = { 5, 100, 2000 };
EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
-const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
+const unsigned int sata_deb_timing_hotplug[] = { 25, 500, 2000 };
EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
-const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
+const unsigned int sata_deb_timing_long[] = { 100, 2000, 5000 };
EXPORT_SYMBOL_GPL(sata_deb_timing_long);
/**
@@ -232,11 +232,11 @@ EXPORT_SYMBOL_GPL(ata_tf_from_fis);
* RETURNS:
* 0 on success, -errno on failure.
*/
-int sata_link_debounce(struct ata_link *link, const unsigned long *params,
+int sata_link_debounce(struct ata_link *link, const unsigned int *params,
unsigned long deadline)
{
- unsigned long interval = params[0];
- unsigned long duration = params[1];
+ unsigned int interval = params[0];
+ unsigned int duration = params[1];
unsigned long last_jiffies, t;
u32 last, cur;
int rc;
@@ -295,7 +295,7 @@ EXPORT_SYMBOL_GPL(sata_link_debounce);
* RETURNS:
* 0 on success, -errno on failure.
*/
-int sata_link_resume(struct ata_link *link, const unsigned long *params,
+int sata_link_resume(struct ata_link *link, const unsigned int *params,
unsigned long deadline)
{
int tries = ATA_LINK_RESUME_TRIES;
@@ -528,7 +528,7 @@ EXPORT_SYMBOL_GPL(sata_set_spd);
* RETURNS:
* 0 on success, -errno otherwise.
*/
-int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
+int sata_link_hardreset(struct ata_link *link, const unsigned int *timing,
unsigned long deadline,
bool *online, int (*check_ready)(struct ata_link *))
{
@@ -1139,92 +1139,12 @@ struct ata_port *ata_sas_port_alloc(struct ata_host *host,
ap->flags |= port_info->flags;
ap->ops = port_info->port_ops;
ap->cbl = ATA_CBL_SATA;
+ ap->print_id = atomic_inc_return(&ata_print_id);
return ap;
}
EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
-/**
- * ata_sas_port_start - Set port up for dma.
- * @ap: Port to initialize
- *
- * Called just after data structures for each port are
- * initialized.
- *
- * May be used as the port_start() entry in ata_port_operations.
- *
- * LOCKING:
- * Inherited from caller.
- */
-int ata_sas_port_start(struct ata_port *ap)
-{
- /*
- * the port is marked as frozen at allocation time, but if we don't
- * have new eh, we won't thaw it
- */
- if (!ap->ops->error_handler)
- ap->pflags &= ~ATA_PFLAG_FROZEN;
- return 0;
-}
-EXPORT_SYMBOL_GPL(ata_sas_port_start);
-
-/**
- * ata_sas_port_stop - Undo ata_sas_port_start()
- * @ap: Port to shut down
- *
- * May be used as the port_stop() entry in ata_port_operations.
- *
- * LOCKING:
- * Inherited from caller.
- */
-
-void ata_sas_port_stop(struct ata_port *ap)
-{
-}
-EXPORT_SYMBOL_GPL(ata_sas_port_stop);
-
-/**
- * ata_sas_async_probe - simply schedule probing and return
- * @ap: Port to probe
- *
- * For batch scheduling of probe for sas attached ata devices, assumes
- * the port has already been through ata_sas_port_init()
- */
-void ata_sas_async_probe(struct ata_port *ap)
-{
- __ata_port_probe(ap);
-}
-EXPORT_SYMBOL_GPL(ata_sas_async_probe);
-
-int ata_sas_sync_probe(struct ata_port *ap)
-{
- return ata_port_probe(ap);
-}
-EXPORT_SYMBOL_GPL(ata_sas_sync_probe);
-
-
-/**
- * ata_sas_port_init - Initialize a SATA device
- * @ap: SATA port to initialize
- *
- * LOCKING:
- * PCI/etc. bus probe sem.
- *
- * RETURNS:
- * Zero on success, non-zero on error.
- */
-
-int ata_sas_port_init(struct ata_port *ap)
-{
- int rc = ap->ops->port_start(ap);
-
- if (rc)
- return rc;
- ap->print_id = atomic_inc_return(&ata_print_id);
- return 0;
-}
-EXPORT_SYMBOL_GPL(ata_sas_port_init);
-
int ata_sas_tport_add(struct device *parent, struct ata_port *ap)
{
return ata_tport_add(parent, ap);
@@ -1238,20 +1158,6 @@ void ata_sas_tport_delete(struct ata_port *ap)
EXPORT_SYMBOL_GPL(ata_sas_tport_delete);
/**
- * ata_sas_port_destroy - Destroy a SATA port allocated by ata_sas_port_alloc
- * @ap: SATA port to destroy
- *
- */
-
-void ata_sas_port_destroy(struct ata_port *ap)
-{
- if (ap->ops->port_stop)
- ap->ops->port_stop(ap);
- kfree(ap);
-}
-EXPORT_SYMBOL_GPL(ata_sas_port_destroy);
-
-/**
* ata_sas_slave_configure - Default slave_config routine for libata devices
* @sdev: SCSI device to configure
* @ap: ATA port to which SCSI device is attached
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index c6ece32de8e3..e4e4175e3e83 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -135,11 +135,11 @@ static ssize_t ata_scsi_park_store(struct device *device,
struct scsi_device *sdev = to_scsi_device(device);
struct ata_port *ap;
struct ata_device *dev;
- long int input;
+ int input;
unsigned long flags;
int rc;
- rc = kstrtol(buf, 10, &input);
+ rc = kstrtoint(buf, 10, &input);
if (rc)
return rc;
if (input < -2)
@@ -710,47 +710,6 @@ static void ata_qc_set_pc_nbytes(struct ata_queued_cmd *qc)
}
/**
- * ata_dump_status - user friendly display of error info
- * @ap: the port in question
- * @tf: ptr to filled out taskfile
- *
- * Decode and dump the ATA error/status registers for the user so
- * that they have some idea what really happened at the non
- * make-believe layer.
- *
- * LOCKING:
- * inherited from caller
- */
-static void ata_dump_status(struct ata_port *ap, struct ata_taskfile *tf)
-{
- u8 stat = tf->status, err = tf->error;
-
- if (stat & ATA_BUSY) {
- ata_port_warn(ap, "status=0x%02x {Busy} ", stat);
- } else {
- ata_port_warn(ap, "status=0x%02x { %s%s%s%s%s%s%s} ", stat,
- stat & ATA_DRDY ? "DriveReady " : "",
- stat & ATA_DF ? "DeviceFault " : "",
- stat & ATA_DSC ? "SeekComplete " : "",
- stat & ATA_DRQ ? "DataRequest " : "",
- stat & ATA_CORR ? "CorrectedError " : "",
- stat & ATA_SENSE ? "Sense " : "",
- stat & ATA_ERR ? "Error " : "");
- if (err)
- ata_port_warn(ap, "error=0x%02x {%s%s%s%s%s%s", err,
- err & ATA_ABORTED ?
- "DriveStatusError " : "",
- err & ATA_ICRC ?
- (err & ATA_ABORTED ?
- "BadCRC " : "Sector ") : "",
- err & ATA_UNC ? "UncorrectableError " : "",
- err & ATA_IDNF ? "SectorIdNotFound " : "",
- err & ATA_TRK0NF ? "TrackZeroNotFound " : "",
- err & ATA_AMNF ? "AddrMarkNotFound " : "");
- }
-}
-
-/**
* ata_to_sense_error - convert ATA error to SCSI error
* @id: ATA device number
* @drv_stat: value contained in ATA status register
@@ -758,7 +717,6 @@ static void ata_dump_status(struct ata_port *ap, struct ata_taskfile *tf)
* @sk: the sense key we'll fill out
* @asc: the additional sense code we'll fill out
* @ascq: the additional sense code qualifier we'll fill out
- * @verbose: be verbose
*
* Converts an ATA error into a SCSI error. Fill out pointers to
* SK, ASC, and ASCQ bytes for later use in fixed or descriptor
@@ -768,7 +726,7 @@ static void ata_dump_status(struct ata_port *ap, struct ata_taskfile *tf)
* spin_lock_irqsave(host lock)
*/
static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk,
- u8 *asc, u8 *ascq, int verbose)
+ u8 *asc, u8 *ascq)
{
int i;
@@ -847,7 +805,7 @@ static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk,
*sk = sense_table[i][1];
*asc = sense_table[i][2];
*ascq = sense_table[i][3];
- goto translate_done;
+ return;
}
}
}
@@ -862,7 +820,7 @@ static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk,
*sk = stat_table[i][1];
*asc = stat_table[i][2];
*ascq = stat_table[i][3];
- goto translate_done;
+ return;
}
}
@@ -873,12 +831,6 @@ static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk,
*sk = ABORTED_COMMAND;
*asc = 0x00;
*ascq = 0x00;
-
- translate_done:
- if (verbose)
- pr_err("ata%u: translated ATA stat/err 0x%02x/%02x to SCSI SK/ASC/ASCQ 0x%x/%02x/%02x\n",
- id, drv_stat, drv_err, *sk, *asc, *ascq);
- return;
}
/*
@@ -904,7 +856,6 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
struct ata_taskfile *tf = &qc->result_tf;
unsigned char *sb = cmd->sense_buffer;
unsigned char *desc = sb + 8;
- int verbose = qc->ap->ops->error_handler == NULL;
u8 sense_key, asc, ascq;
memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
@@ -916,7 +867,7 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
if (qc->err_mask ||
tf->status & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
ata_to_sense_error(qc->ap->print_id, tf->status, tf->error,
- &sense_key, &asc, &ascq, verbose);
+ &sense_key, &asc, &ascq);
ata_scsi_set_sense(qc->dev, cmd, sense_key, asc, ascq);
} else {
/*
@@ -999,7 +950,6 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
struct scsi_cmnd *cmd = qc->scsicmd;
struct ata_taskfile *tf = &qc->result_tf;
unsigned char *sb = cmd->sense_buffer;
- int verbose = qc->ap->ops->error_handler == NULL;
u64 block;
u8 sense_key, asc, ascq;
@@ -1017,7 +967,7 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
if (qc->err_mask ||
tf->status & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
ata_to_sense_error(qc->ap->print_id, tf->status, tf->error,
- &sense_key, &asc, &ascq, verbose);
+ &sense_key, &asc, &ascq);
ata_scsi_set_sense(dev, cmd, sense_key, asc, ascq);
} else {
/* Could not decode error */
@@ -1186,9 +1136,6 @@ void ata_scsi_slave_destroy(struct scsi_device *sdev)
unsigned long flags;
struct ata_device *dev;
- if (!ap->ops->error_handler)
- return;
-
spin_lock_irqsave(ap->lock, flags);
dev = __ata_scsi_find_dev(ap, sdev);
if (dev && dev->sdev) {
@@ -1675,7 +1622,6 @@ static void ata_qc_done(struct ata_queued_cmd *qc)
static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
{
- struct ata_port *ap = qc->ap;
struct scsi_cmnd *cmd = qc->scsicmd;
u8 *cdb = cmd->cmnd;
int need_sense = (qc->err_mask != 0) &&
@@ -1699,9 +1645,6 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
/* Keep the SCSI ML and status byte, clear host byte. */
cmd->result &= 0x0000ffff;
- if (need_sense && !ap->ops->error_handler)
- ata_dump_status(ap, &qc->result_tf);
-
ata_qc_done(qc);
}
@@ -2608,71 +2551,6 @@ static unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf)
return 0;
}
-static void atapi_sense_complete(struct ata_queued_cmd *qc)
-{
- if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) {
- /* FIXME: not quite right; we don't want the
- * translation of taskfile registers into
- * a sense descriptors, since that's only
- * correct for ATA, not ATAPI
- */
- ata_gen_passthru_sense(qc);
- }
-
- ata_qc_done(qc);
-}
-
-/* is it pointless to prefer PIO for "safety reasons"? */
-static inline int ata_pio_use_silly(struct ata_port *ap)
-{
- return (ap->flags & ATA_FLAG_PIO_DMA);
-}
-
-static void atapi_request_sense(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- struct scsi_cmnd *cmd = qc->scsicmd;
-
- memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
-
-#ifdef CONFIG_ATA_SFF
- if (ap->ops->sff_tf_read)
- ap->ops->sff_tf_read(ap, &qc->tf);
-#endif
-
- /* fill these in, for the case where they are -not- overwritten */
- cmd->sense_buffer[0] = 0x70;
- cmd->sense_buffer[2] = qc->tf.error >> 4;
-
- ata_qc_reinit(qc);
-
- /* setup sg table and init transfer direction */
- sg_init_one(&qc->sgent, cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
- ata_sg_init(qc, &qc->sgent, 1);
- qc->dma_dir = DMA_FROM_DEVICE;
-
- memset(&qc->cdb, 0, qc->dev->cdb_len);
- qc->cdb[0] = REQUEST_SENSE;
- qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
-
- qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
- qc->tf.command = ATA_CMD_PACKET;
-
- if (ata_pio_use_silly(ap)) {
- qc->tf.protocol = ATAPI_PROT_DMA;
- qc->tf.feature |= ATAPI_PKT_DMA;
- } else {
- qc->tf.protocol = ATAPI_PROT_PIO;
- qc->tf.lbam = SCSI_SENSE_BUFFERSIZE;
- qc->tf.lbah = 0;
- }
- qc->nbytes = SCSI_SENSE_BUFFERSIZE;
-
- qc->complete_fn = atapi_sense_complete;
-
- ata_qc_issue(qc);
-}
-
/*
* ATAPI devices typically report zero for their SCSI version, and sometimes
* deviate from the spec WRT response data format. If SCSI version is
@@ -2698,9 +2576,8 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
struct scsi_cmnd *cmd = qc->scsicmd;
unsigned int err_mask = qc->err_mask;
- /* handle completion from new EH */
- if (unlikely(qc->ap->ops->error_handler &&
- (err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID))) {
+ /* handle completion from EH */
+ if (unlikely(err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID)) {
if (!(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
/* FIXME: not quite right; we don't want the
@@ -2732,23 +2609,10 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
return;
}
- /* successful completion or old EH failure path */
- if (unlikely(err_mask & AC_ERR_DEV)) {
- cmd->result = SAM_STAT_CHECK_CONDITION;
- atapi_request_sense(qc);
- return;
- } else if (unlikely(err_mask)) {
- /* FIXME: not quite right; we don't want the
- * translation of taskfile registers into
- * a sense descriptors, since that's only
- * correct for ATA, not ATAPI
- */
- ata_gen_passthru_sense(qc);
- } else {
- if (cmd->cmnd[0] == INQUIRY && (cmd->cmnd[1] & 0x03) == 0)
- atapi_fixup_inquiry(cmd);
- cmd->result = SAM_STAT_GOOD;
- }
+ /* successful completion path */
+ if (cmd->cmnd[0] == INQUIRY && (cmd->cmnd[1] & 0x03) == 0)
+ atapi_fixup_inquiry(cmd);
+ cmd->result = SAM_STAT_GOOD;
ata_qc_done(qc);
}
@@ -4797,9 +4661,6 @@ int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
unsigned long flags;
int devno, rc = 0;
- if (!ap->ops->error_handler)
- return -EOPNOTSUPP;
-
if (lun != SCAN_WILD_CARD && lun)
return -EINVAL;
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 9d28badfe41d..8fcc622fcb3d 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -883,31 +883,21 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
{
struct ata_port *ap = qc->ap;
- if (ap->ops->error_handler) {
- if (in_wq) {
- /* EH might have kicked in while host lock is
- * released.
- */
- qc = ata_qc_from_tag(ap, qc->tag);
- if (qc) {
- if (likely(!(qc->err_mask & AC_ERR_HSM))) {
- ata_sff_irq_on(ap);
- ata_qc_complete(qc);
- } else
- ata_port_freeze(ap);
- }
- } else {
- if (likely(!(qc->err_mask & AC_ERR_HSM)))
+ if (in_wq) {
+ /* EH might have kicked in while host lock is released. */
+ qc = ata_qc_from_tag(ap, qc->tag);
+ if (qc) {
+ if (likely(!(qc->err_mask & AC_ERR_HSM))) {
+ ata_sff_irq_on(ap);
ata_qc_complete(qc);
- else
+ } else
ata_port_freeze(ap);
}
} else {
- if (in_wq) {
- ata_sff_irq_on(ap);
- ata_qc_complete(qc);
- } else
+ if (likely(!(qc->err_mask & AC_ERR_HSM)))
ata_qc_complete(qc);
+ else
+ ata_port_freeze(ap);
}
}
@@ -1971,7 +1961,7 @@ int sata_sff_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
struct ata_eh_context *ehc = &link->eh_context;
- const unsigned long *timing = sata_ehc_deb_timing(ehc);
+ const unsigned int *timing = sata_ehc_deb_timing(ehc);
bool online;
int rc;
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index cf993885d2b2..6e7d352803bd 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -78,8 +78,6 @@ extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg);
extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg);
extern struct ata_port *ata_port_alloc(struct ata_host *host);
extern const char *sata_spd_string(unsigned int spd);
-extern int ata_port_probe(struct ata_port *ap);
-extern void __ata_port_probe(struct ata_port *ap);
extern unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
u8 page, void *buf, unsigned int sectors);
@@ -124,7 +122,6 @@ extern void ata_scsi_media_change_notify(struct ata_device *dev);
extern void ata_scsi_hotplug(struct work_struct *work);
extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
extern void ata_scsi_dev_rescan(struct work_struct *work);
-extern int ata_bus_probe(struct ata_port *ap);
extern int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
unsigned int id, u64 lun);
void ata_scsi_sdev_config(struct scsi_device *sdev);
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index 314eaa167954..d0c6924d25b6 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -917,15 +917,13 @@ static int arasan_cf_probe(struct platform_device *pdev)
return ret;
}
-static int arasan_cf_remove(struct platform_device *pdev)
+static void arasan_cf_remove(struct platform_device *pdev)
{
struct ata_host *host = platform_get_drvdata(pdev);
struct arasan_cf_dev *acdev = host->ports[0]->private_data;
ata_host_detach(host);
cf_exit(acdev);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -966,7 +964,7 @@ MODULE_DEVICE_TABLE(of, arasan_cf_id_table);
static struct platform_driver arasan_cf_driver = {
.probe = arasan_cf_probe,
- .remove = arasan_cf_remove,
+ .remove_new = arasan_cf_remove,
.driver = {
.name = DRIVER_NAME,
.pm = &arasan_cf_pm_ops,
diff --git a/drivers/ata/pata_buddha.c b/drivers/ata/pata_buddha.c
index 49bc619b83e2..c36ee991d5e5 100644
--- a/drivers/ata/pata_buddha.c
+++ b/drivers/ata/pata_buddha.c
@@ -27,7 +27,6 @@
#include <asm/amigahw.h>
#include <asm/amigaints.h>
-#include <asm/ide.h>
#include <asm/setup.h>
#define DRV_NAME "pata_buddha"
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index c6e043e05d43..c84a20892f1b 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -40,6 +40,7 @@
#include <linux/ata.h>
#include <linux/libata.h>
#include <linux/platform_device.h>
+#include <linux/sys_soc.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/ktime.h>
@@ -910,6 +911,12 @@ static struct ata_port_operations ep93xx_pata_port_ops = {
.port_start = ep93xx_pata_port_start,
};
+static const struct soc_device_attribute ep93xx_soc_table[] = {
+ { .revision = "E1", .data = (void *)ATA_UDMA3 },
+ { .revision = "E2", .data = (void *)ATA_UDMA4 },
+ { /* sentinel */ }
+};
+
static int ep93xx_pata_probe(struct platform_device *pdev)
{
struct ep93xx_pata_data *drv_data;
@@ -939,7 +946,7 @@ static int ep93xx_pata_probe(struct platform_device *pdev)
drv_data = devm_kzalloc(&pdev->dev, sizeof(*drv_data), GFP_KERNEL);
if (!drv_data) {
- err = -ENXIO;
+ err = -ENOMEM;
goto err_rel_gpio;
}
@@ -952,7 +959,7 @@ static int ep93xx_pata_probe(struct platform_device *pdev)
/* allocate host */
host = ata_host_alloc(&pdev->dev, 1);
if (!host) {
- err = -ENXIO;
+ err = -ENOMEM;
goto err_rel_dma;
}
@@ -976,12 +983,11 @@ static int ep93xx_pata_probe(struct platform_device *pdev)
* so this driver supports only UDMA modes.
*/
if (drv_data->dma_rx_channel && drv_data->dma_tx_channel) {
- int chip_rev = ep93xx_chip_revision();
+ const struct soc_device_attribute *match;
- if (chip_rev == EP93XX_CHIP_REV_E1)
- ap->udma_mask = ATA_UDMA3;
- else if (chip_rev == EP93XX_CHIP_REV_E2)
- ap->udma_mask = ATA_UDMA4;
+ match = soc_device_match(ep93xx_soc_table);
+ if (match)
+ ap->udma_mask = (unsigned int) match->data;
else
ap->udma_mask = ATA_UDMA2;
}
@@ -1004,7 +1010,7 @@ err_rel_gpio:
return err;
}
-static int ep93xx_pata_remove(struct platform_device *pdev)
+static void ep93xx_pata_remove(struct platform_device *pdev)
{
struct ata_host *host = platform_get_drvdata(pdev);
struct ep93xx_pata_data *drv_data = host->private_data;
@@ -1013,7 +1019,6 @@ static int ep93xx_pata_remove(struct platform_device *pdev)
ep93xx_pata_release_dma(drv_data);
ep93xx_pata_clear_regs(drv_data->ide_base);
ep93xx_ide_release_gpio(pdev);
- return 0;
}
static struct platform_driver ep93xx_pata_platform_driver = {
@@ -1021,7 +1026,7 @@ static struct platform_driver ep93xx_pata_platform_driver = {
.name = DRV_NAME,
},
.probe = ep93xx_pata_probe,
- .remove = ep93xx_pata_remove,
+ .remove_new = ep93xx_pata_remove,
};
module_platform_driver(ep93xx_pata_platform_driver);
diff --git a/drivers/ata/pata_falcon.c b/drivers/ata/pata_falcon.c
index 996516e64f13..0c2ae430f5aa 100644
--- a/drivers/ata/pata_falcon.c
+++ b/drivers/ata/pata_falcon.c
@@ -28,11 +28,15 @@
#include <asm/atarihw.h>
#include <asm/atariints.h>
#include <asm/atari_stdma.h>
-#include <asm/ide.h>
#define DRV_NAME "pata_falcon"
#define DRV_VERSION "0.1.0"
+static int pata_falcon_swap_mask;
+
+module_param_named(data_swab, pata_falcon_swap_mask, int, 0444);
+MODULE_PARM_DESC(data_swab, "Data byte swap enable/disable bitmap (0x1==drive1, 0x2==drive2, 0x4==drive3, 0x8==drive4, default==0)");
+
static const struct scsi_host_template pata_falcon_sht = {
ATA_PIO_SHT(DRV_NAME),
};
@@ -50,7 +54,7 @@ static unsigned int pata_falcon_data_xfer(struct ata_queued_cmd *qc,
if (dev->class == ATA_DEV_ATA && cmd &&
!blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)))
- swap = 0;
+ swap = (uintptr_t)ap->private_data & BIT(dev->devno);
/* Transfer multiple of 2 bytes */
if (rw == READ) {
@@ -123,8 +127,9 @@ static int __init pata_falcon_init_one(struct platform_device *pdev)
struct resource *base_res, *ctl_res, *irq_res;
struct ata_host *host;
struct ata_port *ap;
- void __iomem *base;
- int irq = 0;
+ void __iomem *base, *ctl_base;
+ int mask_shift = 0; /* Q40 & Falcon default */
+ int irq = 0, io_offset = 1, reg_shift = 2; /* Falcon defaults */
dev_info(&pdev->dev, "Atari Falcon and Q40/Q60 PATA controller\n");
@@ -165,26 +170,38 @@ static int __init pata_falcon_init_one(struct platform_device *pdev)
ap->pio_mask = ATA_PIO4;
ap->flags |= ATA_FLAG_SLAVE_POSS | ATA_FLAG_NO_IORDY;
- base = (void __iomem *)base_mem_res->start;
/* N.B. this assumes data_addr will be used for word-sized I/O only */
- ap->ioaddr.data_addr = base + 0 + 0 * 4;
- ap->ioaddr.error_addr = base + 1 + 1 * 4;
- ap->ioaddr.feature_addr = base + 1 + 1 * 4;
- ap->ioaddr.nsect_addr = base + 1 + 2 * 4;
- ap->ioaddr.lbal_addr = base + 1 + 3 * 4;
- ap->ioaddr.lbam_addr = base + 1 + 4 * 4;
- ap->ioaddr.lbah_addr = base + 1 + 5 * 4;
- ap->ioaddr.device_addr = base + 1 + 6 * 4;
- ap->ioaddr.status_addr = base + 1 + 7 * 4;
- ap->ioaddr.command_addr = base + 1 + 7 * 4;
-
- base = (void __iomem *)ctl_mem_res->start;
- ap->ioaddr.altstatus_addr = base + 1;
- ap->ioaddr.ctl_addr = base + 1;
-
- ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx",
- (unsigned long)base_mem_res->start,
- (unsigned long)ctl_mem_res->start);
+ ap->ioaddr.data_addr = (void __iomem *)base_mem_res->start;
+
+ if (base_res) { /* only Q40 has IO resources */
+ io_offset = 0x10000;
+ reg_shift = 0;
+ base = (void __iomem *)base_res->start;
+ ctl_base = (void __iomem *)ctl_res->start;
+ } else {
+ base = (void __iomem *)base_mem_res->start;
+ ctl_base = (void __iomem *)ctl_mem_res->start;
+ }
+
+ ap->ioaddr.error_addr = base + io_offset + (1 << reg_shift);
+ ap->ioaddr.feature_addr = base + io_offset + (1 << reg_shift);
+ ap->ioaddr.nsect_addr = base + io_offset + (2 << reg_shift);
+ ap->ioaddr.lbal_addr = base + io_offset + (3 << reg_shift);
+ ap->ioaddr.lbam_addr = base + io_offset + (4 << reg_shift);
+ ap->ioaddr.lbah_addr = base + io_offset + (5 << reg_shift);
+ ap->ioaddr.device_addr = base + io_offset + (6 << reg_shift);
+ ap->ioaddr.status_addr = base + io_offset + (7 << reg_shift);
+ ap->ioaddr.command_addr = base + io_offset + (7 << reg_shift);
+
+ ap->ioaddr.altstatus_addr = ctl_base + io_offset;
+ ap->ioaddr.ctl_addr = ctl_base + io_offset;
+
+ ata_port_desc(ap, "cmd %px ctl %px data %px",
+ base, ctl_base, ap->ioaddr.data_addr);
+
+ if (pdev->id > 0)
+ mask_shift = 2;
+ ap->private_data = (void *)(uintptr_t)(pata_falcon_swap_mask >> mask_shift);
irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (irq_res && irq_res->start > 0) {
diff --git a/drivers/ata/pata_ftide010.c b/drivers/ata/pata_ftide010.c
index 6f6734c09b11..4d6ef90ccc77 100644
--- a/drivers/ata/pata_ftide010.c
+++ b/drivers/ata/pata_ftide010.c
@@ -14,8 +14,7 @@
#include <linux/module.h>
#include <linux/libata.h>
#include <linux/bitops.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/clk.h>
#include "sata_gemini.h"
@@ -470,11 +469,7 @@ static int pata_ftide010_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
- ftide->base = devm_ioremap_resource(dev, res);
+ ftide->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(ftide->base))
return PTR_ERR(ftide->base);
@@ -541,15 +536,13 @@ err_dis_clk:
return ret;
}
-static int pata_ftide010_remove(struct platform_device *pdev)
+static void pata_ftide010_remove(struct platform_device *pdev)
{
struct ata_host *host = platform_get_drvdata(pdev);
struct ftide010 *ftide = host->private_data;
ata_host_detach(ftide->host);
clk_disable_unprepare(ftide->pclk);
-
- return 0;
}
static const struct of_device_id pata_ftide010_of_match[] = {
@@ -563,10 +556,11 @@ static struct platform_driver pata_ftide010_driver = {
.of_match_table = pata_ftide010_of_match,
},
.probe = pata_ftide010_probe,
- .remove = pata_ftide010_remove,
+ .remove_new = pata_ftide010_remove,
};
module_platform_driver(pata_ftide010_driver);
+MODULE_DESCRIPTION("low level driver for Faraday Technology FTIDE010");
MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/pata_gayle.c b/drivers/ata/pata_gayle.c
index e5aa07f92106..3bdbe2b65a2b 100644
--- a/drivers/ata/pata_gayle.c
+++ b/drivers/ata/pata_gayle.c
@@ -27,7 +27,6 @@
#include <asm/amigahw.h>
#include <asm/amigaints.h>
#include <asm/amigayle.h>
-#include <asm/ide.h>
#include <asm/setup.h>
#define DRV_NAME "pata_gayle"
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c
index 4013f28679a9..d0aa8fc929b4 100644
--- a/drivers/ata/pata_imx.c
+++ b/drivers/ata/pata_imx.c
@@ -141,21 +141,15 @@ static int pata_imx_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- priv->clk = devm_clk_get(&pdev->dev, NULL);
+ priv->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(priv->clk)) {
- dev_err(&pdev->dev, "Failed to get clock\n");
+ dev_err(&pdev->dev, "Failed to get and enable clock\n");
return PTR_ERR(priv->clk);
}
- ret = clk_prepare_enable(priv->clk);
- if (ret)
- return ret;
-
host = ata_host_alloc(&pdev->dev, 1);
- if (!host) {
- ret = -ENOMEM;
- goto err;
- }
+ if (!host)
+ return -ENOMEM;
host->private_data = priv;
ap = host->ports[0];
@@ -164,12 +158,9 @@ static int pata_imx_probe(struct platform_device *pdev)
ap->pio_mask = ATA_PIO4;
ap->flags |= ATA_FLAG_SLAVE_POSS;
- io_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->host_regs = devm_ioremap_resource(&pdev->dev, io_res);
- if (IS_ERR(priv->host_regs)) {
- ret = PTR_ERR(priv->host_regs);
- goto err;
- }
+ priv->host_regs = devm_platform_get_and_ioremap_resource(pdev, 0, &io_res);
+ if (IS_ERR(priv->host_regs))
+ return PTR_ERR(priv->host_regs);
ap->ioaddr.cmd_addr = priv->host_regs + PATA_IMX_DRIVE_DATA;
ap->ioaddr.ctl_addr = priv->host_regs + PATA_IMX_DRIVE_CONTROL;
@@ -195,16 +186,12 @@ static int pata_imx_probe(struct platform_device *pdev)
&pata_imx_sht);
if (ret)
- goto err;
+ return ret;
return 0;
-err:
- clk_disable_unprepare(priv->clk);
-
- return ret;
}
-static int pata_imx_remove(struct platform_device *pdev)
+static void pata_imx_remove(struct platform_device *pdev)
{
struct ata_host *host = platform_get_drvdata(pdev);
struct pata_imx_priv *priv = host->private_data;
@@ -212,10 +199,6 @@ static int pata_imx_remove(struct platform_device *pdev)
ata_host_detach(host);
__raw_writel(0, priv->host_regs + PATA_IMX_ATA_INT_EN);
-
- clk_disable_unprepare(priv->clk);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -266,7 +249,7 @@ MODULE_DEVICE_TABLE(of, imx_pata_dt_ids);
static struct platform_driver pata_imx_driver = {
.probe = pata_imx_probe,
- .remove = pata_imx_remove,
+ .remove_new = pata_imx_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = imx_pata_dt_ids,
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index b1daa4d3fcd9..246bb4f8f1f7 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -242,12 +242,6 @@ static int ixp4xx_pata_probe(struct platform_device *pdev)
int ret;
int irq;
- cmd = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ctl = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-
- if (!cmd || !ctl)
- return -EINVAL;
-
ixpp = devm_kzalloc(dev, sizeof(*ixpp), GFP_KERNEL);
if (!ixpp)
return -ENOMEM;
@@ -271,18 +265,18 @@ static int ixp4xx_pata_probe(struct platform_device *pdev)
if (ret)
return ret;
- ixpp->cmd = devm_ioremap_resource(dev, cmd);
- ixpp->ctl = devm_ioremap_resource(dev, ctl);
- if (IS_ERR(ixpp->cmd) || IS_ERR(ixpp->ctl))
- return -ENOMEM;
+ ixpp->cmd = devm_platform_get_and_ioremap_resource(pdev, 0, &cmd);
+ if (IS_ERR(ixpp->cmd))
+ return PTR_ERR(ixpp->cmd);
+
+ ixpp->ctl = devm_platform_get_and_ioremap_resource(pdev, 1, &ctl);
+ if (IS_ERR(ixpp->ctl))
+ return PTR_ERR(ixpp->ctl);
irq = platform_get_irq(pdev, 0);
- if (irq > 0)
- irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
- else if (irq < 0)
+ if (irq < 0)
return irq;
- else
- return -EINVAL;
+ irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
/* Just one port to set up */
ixp4xx_setup_port(ixpp->host->ports[0], ixpp, cmd->start, ctl->start);
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index 66c9dea4ea6e..6c317a461a1f 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -19,9 +19,10 @@
#include <linux/gfp.h>
#include <linux/delay.h>
#include <linux/libata.h>
+#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/types.h>
#include <asm/cacheflush.h>
@@ -800,8 +801,7 @@ static int mpc52xx_ata_probe(struct platform_device *op)
return rv;
}
-static int
-mpc52xx_ata_remove(struct platform_device *op)
+static void mpc52xx_ata_remove(struct platform_device *op)
{
struct ata_host *host = platform_get_drvdata(op);
struct mpc52xx_ata_priv *priv = host->private_data;
@@ -815,8 +815,6 @@ mpc52xx_ata_remove(struct platform_device *op)
irq_dispose_mapping(task_irq);
bcom_ata_release(priv->dmatsk);
irq_dispose_mapping(priv->ata_irq);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -857,7 +855,7 @@ static const struct of_device_id mpc52xx_ata_of_match[] = {
static struct platform_driver mpc52xx_ata_of_platform_driver = {
.probe = mpc52xx_ata_probe,
- .remove = mpc52xx_ata_remove,
+ .remove_new = mpc52xx_ata_remove,
#ifdef CONFIG_PM_SLEEP
.suspend = mpc52xx_ata_suspend,
.resume = mpc52xx_ata_resume,
diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c
index ea402e02c46e..5275c6464f57 100644
--- a/drivers/ata/pata_pxa.c
+++ b/drivers/ata/pata_pxa.c
@@ -295,7 +295,7 @@ static int pxa_ata_probe(struct platform_device *pdev)
return ret;
}
-static int pxa_ata_remove(struct platform_device *pdev)
+static void pxa_ata_remove(struct platform_device *pdev)
{
struct ata_host *host = platform_get_drvdata(pdev);
struct pata_pxa_data *data = host->ports[0]->private_data;
@@ -303,13 +303,11 @@ static int pxa_ata_remove(struct platform_device *pdev)
dma_release_channel(data->dma_chan);
ata_host_detach(host);
-
- return 0;
}
static struct platform_driver pxa_ata_driver = {
.probe = pxa_ata_probe,
- .remove = pxa_ata_remove,
+ .remove_new = pxa_ata_remove,
.driver = {
.name = DRV_NAME,
},
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
index 3974d294a341..0fa253ad7c93 100644
--- a/drivers/ata/pata_rb532_cf.c
+++ b/drivers/ata/pata_rb532_cf.c
@@ -155,18 +155,16 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
return 0;
}
-static int rb532_pata_driver_remove(struct platform_device *pdev)
+static void rb532_pata_driver_remove(struct platform_device *pdev)
{
struct ata_host *ah = platform_get_drvdata(pdev);
ata_host_detach(ah);
-
- return 0;
}
static struct platform_driver rb532_pata_platform_driver = {
.probe = rb532_pata_driver_probe,
- .remove = rb532_pata_driver_remove,
+ .remove_new = rb532_pata_driver_remove,
.driver = {
.name = DRV_NAME,
},
diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
index 3b62ea482f1a..93882e976ede 100644
--- a/drivers/ata/pata_sl82c105.c
+++ b/drivers/ata/pata_sl82c105.c
@@ -180,8 +180,7 @@ static void sl82c105_bmdma_start(struct ata_queued_cmd *qc)
* document.
*
* This function is also called to turn off DMA when a timeout occurs
- * during DMA operation. In both cases we need to reset the engine,
- * so no actual eng_timeout handler is required.
+ * during DMA operation. In both cases we need to reset the engine.
*
* We assume bmdma_stop is always called if bmdma_start as called. If
* not then we may need to wrap qc_issue.
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index fabdd1e380f9..52f5168e4db5 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -18,9 +18,8 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/dmaengine.h>
-#include <linux/of_address.h>
+#include <linux/of.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
#include <linux/libata.h>
@@ -1211,7 +1210,7 @@ error_out:
return err;
}
-static int sata_dwc_remove(struct platform_device *ofdev)
+static void sata_dwc_remove(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
struct ata_host *host = dev_get_drvdata(dev);
@@ -1227,7 +1226,6 @@ static int sata_dwc_remove(struct platform_device *ofdev)
#endif
dev_dbg(dev, "done\n");
- return 0;
}
static const struct of_device_id sata_dwc_match[] = {
@@ -1242,7 +1240,7 @@ static struct platform_driver sata_dwc_driver = {
.of_match_table = sata_dwc_match,
},
.probe = sata_dwc_probe,
- .remove = sata_dwc_remove,
+ .remove_new = sata_dwc_remove,
};
module_platform_driver(sata_dwc_driver);
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index ccd99b9aa9ff..01aa05f4c3f5 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -12,6 +12,9 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -19,9 +22,6 @@
#include <scsi/scsi_cmnd.h>
#include <linux/libata.h>
#include <asm/io.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/of_platform.h>
static unsigned int intr_coalescing_count;
module_param(intr_coalescing_count, int, S_IRUGO);
@@ -1526,7 +1526,7 @@ error_exit_with_cleanup:
return retval;
}
-static int sata_fsl_remove(struct platform_device *ofdev)
+static void sata_fsl_remove(struct platform_device *ofdev)
{
struct ata_host *host = platform_get_drvdata(ofdev);
struct sata_fsl_host_priv *host_priv = host->private_data;
@@ -1535,8 +1535,6 @@ static int sata_fsl_remove(struct platform_device *ofdev)
device_remove_file(&ofdev->dev, &host_priv->rx_watermark);
ata_host_detach(host);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -1591,7 +1589,7 @@ static struct platform_driver fsl_sata_driver = {
.of_match_table = fsl_sata_match,
},
.probe = sata_fsl_probe,
- .remove = sata_fsl_remove,
+ .remove_new = sata_fsl_remove,
#ifdef CONFIG_PM_SLEEP
.suspend = sata_fsl_suspend,
.resume = sata_fsl_resume,
diff --git a/drivers/ata/sata_gemini.c b/drivers/ata/sata_gemini.c
index c42cc9bbbc4e..400b22ee99c3 100644
--- a/drivers/ata/sata_gemini.c
+++ b/drivers/ata/sata_gemini.c
@@ -12,8 +12,7 @@
#include <linux/regmap.h>
#include <linux/delay.h>
#include <linux/reset.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/pinctrl/consumer.h>
@@ -400,7 +399,7 @@ out_unprep_clk:
return ret;
}
-static int gemini_sata_remove(struct platform_device *pdev)
+static void gemini_sata_remove(struct platform_device *pdev)
{
struct sata_gemini *sg = platform_get_drvdata(pdev);
@@ -409,8 +408,6 @@ static int gemini_sata_remove(struct platform_device *pdev)
clk_unprepare(sg->sata0_pclk);
}
sg_singleton = NULL;
-
- return 0;
}
static const struct of_device_id gemini_sata_of_match[] = {
@@ -424,10 +421,11 @@ static struct platform_driver gemini_sata_driver = {
.of_match_table = gemini_sata_of_match,
},
.probe = gemini_sata_probe,
- .remove = gemini_sata_remove,
+ .remove_new = gemini_sata_remove,
};
module_platform_driver(gemini_sata_driver);
+MODULE_DESCRIPTION("low level driver for Cortina Systems Gemini SATA bridge");
MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
index d6b324d03e59..63ef7bb073ce 100644
--- a/drivers/ata/sata_highbank.c
+++ b/drivers/ata/sata_highbank.c
@@ -13,7 +13,7 @@
#include <linux/io.h>
#include <linux/spinlock.h>
#include <linux/device.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/libata.h>
@@ -385,7 +385,7 @@ static int highbank_initialize_phys(struct device *dev, void __iomem *addr)
static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
- static const unsigned long timing[] = { 5, 100, 500};
+ static const unsigned int timing[] = { 5, 100, 500};
struct ata_port *ap = link->ap;
struct ahci_port_priv *pp = ap->private_data;
struct ahci_host_priv *hpriv = ap->host->private_data;
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index 2c8c78ed86c1..db9c255dc9f2 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -619,7 +619,7 @@ static int inic_hardreset(struct ata_link *link, unsigned int *class,
struct ata_port *ap = link->ap;
void __iomem *port_base = inic_port_base(ap);
void __iomem *idma_ctl = port_base + PORT_IDMA_CTL;
- const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
+ const unsigned int *timing = sata_ehc_deb_timing(&link->eh_context);
int rc;
/* hammer it into sane state */
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index d404e631d152..d105db5c7d81 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -3633,7 +3633,7 @@ static int mv_hardreset(struct ata_link *link, unsigned int *class,
/* Workaround for errata FEr SATA#10 (part 2) */
do {
- const unsigned long *timing =
+ const unsigned int *timing =
sata_ehc_deb_timing(&link->eh_context);
rc = sata_link_hardreset(link, timing, deadline + extra,
@@ -4210,7 +4210,7 @@ err:
* A platform bus SATA device has been unplugged. Perform the needed
* cleanup. Also called on module unload for any active devices.
*/
-static int mv_platform_remove(struct platform_device *pdev)
+static void mv_platform_remove(struct platform_device *pdev)
{
struct ata_host *host = platform_get_drvdata(pdev);
struct mv_host_priv *hpriv = host->private_data;
@@ -4228,7 +4228,6 @@ static int mv_platform_remove(struct platform_device *pdev)
}
phy_power_off(hpriv->port_phys[port]);
}
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -4284,7 +4283,7 @@ MODULE_DEVICE_TABLE(of, mv_sata_dt_ids);
static struct platform_driver mv_platform_driver = {
.probe = mv_platform_probe,
- .remove = mv_platform_remove,
+ .remove_new = mv_platform_remove,
.suspend = mv_platform_suspend,
.resume = mv_platform_resume,
.driver = {
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index abf5651c87ab..0a0cee755bde 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -1529,7 +1529,7 @@ static int nv_hardreset(struct ata_link *link, unsigned int *class,
sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
NULL, NULL);
else {
- const unsigned long *timing = sata_ehc_deb_timing(ehc);
+ const unsigned int *timing = sata_ehc_deb_timing(ehc);
int rc;
if (!(ehc->i.flags & ATA_EHI_QUIET))
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 34790f15c1b8..c1469d076880 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -11,7 +11,7 @@
#include <linux/module.h>
#include <linux/ata.h>
#include <linux/libata.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/err.h>
@@ -861,15 +861,11 @@ static int sata_rcar_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct ata_host *host;
struct sata_rcar_priv *priv;
- struct resource *mem;
- int irq;
- int ret = 0;
+ int irq, ret;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
- if (!irq)
- return -EINVAL;
priv = devm_kzalloc(dev, sizeof(struct sata_rcar_priv), GFP_KERNEL);
if (!priv)
@@ -890,8 +886,7 @@ static int sata_rcar_probe(struct platform_device *pdev)
host->private_data = priv;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(dev, mem);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
ret = PTR_ERR(priv->base);
goto err_pm_put;
@@ -914,7 +909,7 @@ err_pm_put:
return ret;
}
-static int sata_rcar_remove(struct platform_device *pdev)
+static void sata_rcar_remove(struct platform_device *pdev)
{
struct ata_host *host = platform_get_drvdata(pdev);
struct sata_rcar_priv *priv = host->private_data;
@@ -930,8 +925,6 @@ static int sata_rcar_remove(struct platform_device *pdev)
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -1016,7 +1009,7 @@ static const struct dev_pm_ops sata_rcar_pm_ops = {
static struct platform_driver sata_rcar_driver = {
.probe = sata_rcar_probe,
- .remove = sata_rcar_remove,
+ .remove_new = sata_rcar_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = sata_rcar_match,
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index e72a0257990d..142e70bfc498 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -597,7 +597,7 @@ static int sil24_init_port(struct ata_port *ap)
static int sil24_exec_polled_cmd(struct ata_port *ap, int pmp,
const struct ata_taskfile *tf,
int is_cmd, u32 ctrl,
- unsigned long timeout_msec)
+ unsigned int timeout_msec)
{
void __iomem *port = sil24_port_base(ap);
struct sil24_port_priv *pp = ap->private_data;
@@ -651,7 +651,7 @@ static int sil24_softreset(struct ata_link *link, unsigned int *class,
{
struct ata_port *ap = link->ap;
int pmp = sata_srst_pmp(link);
- unsigned long timeout_msec = 0;
+ unsigned int timeout_msec = 0;
struct ata_taskfile tf;
const char *reason;
int rc;
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index ccc016072637..b51d7a9d0d90 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -232,7 +232,6 @@ static const struct scsi_host_template pdc_sata_sht = {
.dma_boundary = ATA_DMA_BOUNDARY,
};
-/* TODO: inherit from base port_ops after converting to new EH */
static struct ata_port_operations pdc_20621_ops = {
.inherits = &ata_sff_port_ops,
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 704ba73e1459..b7d7f410c256 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -2306,12 +2306,12 @@ static void device_platform_notify(struct device *dev)
static void device_platform_notify_remove(struct device *dev)
{
- acpi_device_notify_remove(dev);
+ if (platform_notify_remove)
+ platform_notify_remove(dev);
software_node_notify_remove(dev);
- if (platform_notify_remove)
- platform_notify_remove(dev);
+ acpi_device_notify_remove(dev);
}
/**
@@ -3528,18 +3528,17 @@ int device_add(struct device *dev)
* the name, and force the use of dev_name()
*/
if (dev->init_name) {
- dev_set_name(dev, "%s", dev->init_name);
+ error = dev_set_name(dev, "%s", dev->init_name);
dev->init_name = NULL;
}
+ if (dev_name(dev))
+ error = 0;
/* subsystems can specify simple device enumeration */
- if (!dev_name(dev) && dev->bus && dev->bus->dev_name)
- dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id);
-
- if (!dev_name(dev)) {
- error = -EINVAL;
+ else if (dev->bus && dev->bus->dev_name)
+ error = dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id);
+ if (error)
goto name_error;
- }
pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
@@ -3815,6 +3814,17 @@ void device_del(struct device *dev)
device_platform_notify_remove(dev);
device_links_purge(dev);
+ /*
+ * If a device does not have a driver attached, we need to clean
+ * up any managed resources. We do this in device_release(), but
+ * it's never called (and we leak the device) if a managed
+ * resource holds a reference to the device. So release all
+ * managed resources here, like we do in driver_detach(). We
+ * still need to do so again in device_release() in case someone
+ * adds a new resource after this point, though.
+ */
+ devres_release_all(dev);
+
bus_notify(dev, BUS_NOTIFY_REMOVED_DEVICE);
kobject_uevent(&dev->kobj, KOBJ_REMOVE);
glue_dir = get_glue_dir(dev);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 43dab03958f1..9ea22e165acd 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -19,6 +19,7 @@
#include <linux/cpufeature.h>
#include <linux/tick.h>
#include <linux/pm_qos.h>
+#include <linux/delay.h>
#include <linux/sched/isolation.h>
#include "base.h"
@@ -50,12 +51,30 @@ static int cpu_subsys_online(struct device *dev)
int cpuid = dev->id;
int from_nid, to_nid;
int ret;
+ int retries = 0;
from_nid = cpu_to_node(cpuid);
if (from_nid == NUMA_NO_NODE)
return -ENODEV;
+retry:
ret = cpu_device_up(dev);
+
+ /*
+ * If -EBUSY is returned, it is likely that hotplug is temporarily
+ * disabled when cpu_hotplug_disable() was called. This condition is
+ * transient. So we retry after waiting for an exponentially
+ * increasing delay up to a total of at least 620ms as some PCI
+ * device initialization can take quite a while.
+ */
+ if (ret == -EBUSY) {
+ retries++;
+ if (retries > 5)
+ return ret;
+ msleep(10 * (1 << retries));
+ goto retry;
+ }
+
/*
* When hot adding memory to memoryless node and enabling a cpu
* on the node, node number of the cpu may internally change.
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 878aa7646b37..a528cec24264 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -693,6 +693,8 @@ re_probe:
device_remove(dev);
driver_sysfs_remove(dev);
+ if (dev->bus && dev->bus->dma_cleanup)
+ dev->bus->dma_cleanup(dev);
device_unbind_cleanup(dev);
goto re_probe;
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 8e871ba9162f..493d533f8375 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -20,7 +20,6 @@
#include <linux/pm_runtime.h>
#include <linux/swap.h>
#include <linux/slab.h>
-#include <linux/hugetlb.h>
static struct bus_type node_subsys = {
.name = "node",
diff --git a/drivers/base/test/.kunitconfig b/drivers/base/test/.kunitconfig
new file mode 100644
index 000000000000..473923f0998b
--- /dev/null
+++ b/drivers/base/test/.kunitconfig
@@ -0,0 +1,2 @@
+CONFIG_KUNIT=y
+CONFIG_DM_KUNIT_TEST=y
diff --git a/drivers/base/test/Kconfig b/drivers/base/test/Kconfig
index 610a1ba7a467..9d42051f8f8e 100644
--- a/drivers/base/test/Kconfig
+++ b/drivers/base/test/Kconfig
@@ -9,6 +9,10 @@ config TEST_ASYNC_DRIVER_PROBE
If unsure say N.
+config DM_KUNIT_TEST
+ tristate "KUnit Tests for the device model" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+
config DRIVER_PE_KUNIT_TEST
bool "KUnit Tests for property entry API" if !KUNIT_ALL_TESTS
depends on KUNIT=y
diff --git a/drivers/base/test/Makefile b/drivers/base/test/Makefile
index 7f76fee6f989..e321dfc7e922 100644
--- a/drivers/base/test/Makefile
+++ b/drivers/base/test/Makefile
@@ -1,5 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_TEST_ASYNC_DRIVER_PROBE) += test_async_driver_probe.o
+obj-$(CONFIG_DM_KUNIT_TEST) += root-device-test.o
+obj-$(CONFIG_DM_KUNIT_TEST) += platform-device-test.o
+
obj-$(CONFIG_DRIVER_PE_KUNIT_TEST) += property-entry-test.o
CFLAGS_property-entry-test.o += $(DISABLE_STRUCTLEAK_PLUGIN)
diff --git a/drivers/base/test/platform-device-test.c b/drivers/base/test/platform-device-test.c
new file mode 100644
index 000000000000..ea05b8785743
--- /dev/null
+++ b/drivers/base/test/platform-device-test.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <kunit/resource.h>
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+
+#define DEVICE_NAME "test"
+
+struct test_priv {
+ bool probe_done;
+ bool release_done;
+ wait_queue_head_t probe_wq;
+ wait_queue_head_t release_wq;
+ struct device *dev;
+};
+
+static int platform_device_devm_init(struct kunit *test)
+{
+ struct test_priv *priv;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+ init_waitqueue_head(&priv->probe_wq);
+ init_waitqueue_head(&priv->release_wq);
+
+ test->priv = priv;
+
+ return 0;
+}
+
+static void devm_device_action(void *ptr)
+{
+ struct test_priv *priv = ptr;
+
+ priv->release_done = true;
+ wake_up_interruptible(&priv->release_wq);
+}
+
+static void devm_put_device_action(void *ptr)
+{
+ struct test_priv *priv = ptr;
+
+ put_device(priv->dev);
+ priv->release_done = true;
+ wake_up_interruptible(&priv->release_wq);
+}
+
+#define RELEASE_TIMEOUT_MS 100
+
+/*
+ * Tests that a platform bus, non-probed device will run its
+ * device-managed actions when unregistered.
+ */
+static void platform_device_devm_register_unregister_test(struct kunit *test)
+{
+ struct platform_device *pdev;
+ struct test_priv *priv = test->priv;
+ int ret;
+
+ pdev = platform_device_alloc(DEVICE_NAME, PLATFORM_DEVID_NONE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
+
+ ret = platform_device_add(pdev);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ priv->dev = &pdev->dev;
+
+ ret = devm_add_action_or_reset(priv->dev, devm_device_action, priv);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ platform_device_unregister(pdev);
+
+ ret = wait_event_interruptible_timeout(priv->release_wq, priv->release_done,
+ msecs_to_jiffies(RELEASE_TIMEOUT_MS));
+ KUNIT_EXPECT_GT(test, ret, 0);
+}
+
+/*
+ * Tests that a platform bus, non-probed device will run its
+ * device-managed actions when unregistered, even if someone still holds
+ * a reference to it.
+ */
+static void platform_device_devm_register_get_unregister_with_devm_test(struct kunit *test)
+{
+ struct platform_device *pdev;
+ struct test_priv *priv = test->priv;
+ int ret;
+
+ pdev = platform_device_alloc(DEVICE_NAME, PLATFORM_DEVID_NONE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
+
+ ret = platform_device_add(pdev);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ priv->dev = &pdev->dev;
+
+ get_device(priv->dev);
+
+ ret = devm_add_action_or_reset(priv->dev, devm_put_device_action, priv);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ platform_device_unregister(pdev);
+
+ ret = wait_event_interruptible_timeout(priv->release_wq, priv->release_done,
+ msecs_to_jiffies(RELEASE_TIMEOUT_MS));
+ KUNIT_EXPECT_GT(test, ret, 0);
+}
+
+static int fake_probe(struct platform_device *pdev)
+{
+ struct test_priv *priv = platform_get_drvdata(pdev);
+
+ priv->probe_done = true;
+ wake_up_interruptible(&priv->probe_wq);
+
+ return 0;
+}
+
+static struct platform_driver fake_driver = {
+ .probe = fake_probe,
+ .driver = {
+ .name = DEVICE_NAME,
+ },
+};
+
+/*
+ * Tests that a platform bus, probed device will run its device-managed
+ * actions when unregistered.
+ */
+static void probed_platform_device_devm_register_unregister_test(struct kunit *test)
+{
+ struct platform_device *pdev;
+ struct test_priv *priv = test->priv;
+ int ret;
+
+ ret = platform_driver_register(&fake_driver);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ pdev = platform_device_alloc(DEVICE_NAME, PLATFORM_DEVID_NONE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
+
+ priv->dev = &pdev->dev;
+ platform_set_drvdata(pdev, priv);
+
+ ret = platform_device_add(pdev);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = wait_event_interruptible_timeout(priv->probe_wq, priv->probe_done,
+ msecs_to_jiffies(RELEASE_TIMEOUT_MS));
+ KUNIT_ASSERT_GT(test, ret, 0);
+
+ ret = devm_add_action_or_reset(priv->dev, devm_device_action, priv);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ platform_device_unregister(pdev);
+
+ ret = wait_event_interruptible_timeout(priv->release_wq, priv->release_done,
+ msecs_to_jiffies(RELEASE_TIMEOUT_MS));
+ KUNIT_EXPECT_GT(test, ret, 0);
+
+ platform_driver_unregister(&fake_driver);
+}
+
+/*
+ * Tests that a platform bus, probed device will run its device-managed
+ * actions when unregistered, even if someone still holds a reference to
+ * it.
+ */
+static void probed_platform_device_devm_register_get_unregister_with_devm_test(struct kunit *test)
+{
+ struct platform_device *pdev;
+ struct test_priv *priv = test->priv;
+ int ret;
+
+ ret = platform_driver_register(&fake_driver);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ pdev = platform_device_alloc(DEVICE_NAME, PLATFORM_DEVID_NONE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
+
+ priv->dev = &pdev->dev;
+ platform_set_drvdata(pdev, priv);
+
+ ret = platform_device_add(pdev);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = wait_event_interruptible_timeout(priv->probe_wq, priv->probe_done,
+ msecs_to_jiffies(RELEASE_TIMEOUT_MS));
+ KUNIT_ASSERT_GT(test, ret, 0);
+
+ get_device(priv->dev);
+
+ ret = devm_add_action_or_reset(priv->dev, devm_put_device_action, priv);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ platform_device_unregister(pdev);
+
+ ret = wait_event_interruptible_timeout(priv->release_wq, priv->release_done,
+ msecs_to_jiffies(RELEASE_TIMEOUT_MS));
+ KUNIT_EXPECT_GT(test, ret, 0);
+
+ platform_driver_unregister(&fake_driver);
+}
+
+static struct kunit_case platform_device_devm_tests[] = {
+ KUNIT_CASE(platform_device_devm_register_unregister_test),
+ KUNIT_CASE(platform_device_devm_register_get_unregister_with_devm_test),
+ KUNIT_CASE(probed_platform_device_devm_register_unregister_test),
+ KUNIT_CASE(probed_platform_device_devm_register_get_unregister_with_devm_test),
+ {}
+};
+
+static struct kunit_suite platform_device_devm_test_suite = {
+ .name = "platform-device-devm",
+ .init = platform_device_devm_init,
+ .test_cases = platform_device_devm_tests,
+};
+
+kunit_test_suite(platform_device_devm_test_suite);
+
+MODULE_DESCRIPTION("Test module for platform devices");
+MODULE_AUTHOR("Maxime Ripard <mripard@kernel.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/base/test/root-device-test.c b/drivers/base/test/root-device-test.c
new file mode 100644
index 000000000000..9aea23c9123e
--- /dev/null
+++ b/drivers/base/test/root-device-test.c
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright 2023 Maxime Ripard <mripard@kernel.org>
+
+#include <kunit/resource.h>
+
+#include <linux/device.h>
+
+#define DEVICE_NAME "test"
+
+struct test_priv {
+ bool probe_done;
+ bool release_done;
+ wait_queue_head_t release_wq;
+ struct device *dev;
+};
+
+static int root_device_devm_init(struct kunit *test)
+{
+ struct test_priv *priv;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+ init_waitqueue_head(&priv->release_wq);
+
+ test->priv = priv;
+
+ return 0;
+}
+
+static void devm_device_action(void *ptr)
+{
+ struct test_priv *priv = ptr;
+
+ priv->release_done = true;
+ wake_up_interruptible(&priv->release_wq);
+}
+
+#define RELEASE_TIMEOUT_MS 100
+
+/*
+ * Tests that a bus-less, non-probed device will run its device-managed
+ * actions when unregistered.
+ */
+static void root_device_devm_register_unregister_test(struct kunit *test)
+{
+ struct test_priv *priv = test->priv;
+ int ret;
+
+ priv->dev = root_device_register(DEVICE_NAME);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->dev);
+
+ ret = devm_add_action_or_reset(priv->dev, devm_device_action, priv);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ root_device_unregister(priv->dev);
+
+ ret = wait_event_interruptible_timeout(priv->release_wq, priv->release_done,
+ msecs_to_jiffies(RELEASE_TIMEOUT_MS));
+ KUNIT_EXPECT_GT(test, ret, 0);
+}
+
+static void devm_put_device_action(void *ptr)
+{
+ struct test_priv *priv = ptr;
+
+ put_device(priv->dev);
+ priv->release_done = true;
+ wake_up_interruptible(&priv->release_wq);
+}
+
+/*
+ * Tests that a bus-less, non-probed device will run its device-managed
+ * actions when unregistered, even if someone still holds a reference to
+ * it.
+ */
+static void root_device_devm_register_get_unregister_with_devm_test(struct kunit *test)
+{
+ struct test_priv *priv = test->priv;
+ int ret;
+
+ priv->dev = root_device_register(DEVICE_NAME);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->dev);
+
+ get_device(priv->dev);
+
+ ret = devm_add_action_or_reset(priv->dev, devm_put_device_action, priv);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ root_device_unregister(priv->dev);
+
+ ret = wait_event_interruptible_timeout(priv->release_wq, priv->release_done,
+ msecs_to_jiffies(RELEASE_TIMEOUT_MS));
+ KUNIT_EXPECT_GT(test, ret, 0);
+}
+
+static struct kunit_case root_device_devm_tests[] = {
+ KUNIT_CASE(root_device_devm_register_unregister_test),
+ KUNIT_CASE(root_device_devm_register_get_unregister_with_devm_test),
+ {}
+};
+
+static struct kunit_suite root_device_devm_test_suite = {
+ .name = "root-device-devm",
+ .init = root_device_devm_init,
+ .test_cases = root_device_devm_tests,
+};
+
+kunit_test_suite(root_device_devm_test_suite);
+
+MODULE_DESCRIPTION("Test module for root devices");
+MODULE_AUTHOR("Maxime Ripard <mripard@kernel.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/base/test/test_async_driver_probe.c b/drivers/base/test/test_async_driver_probe.c
index 929410d0dd6f..3465800baa6c 100644
--- a/drivers/base/test/test_async_driver_probe.c
+++ b/drivers/base/test/test_async_driver_probe.c
@@ -84,7 +84,7 @@ test_platform_device_register_node(char *name, int id, int nid)
pdev = platform_device_alloc(name, id);
if (!pdev)
- return NULL;
+ return ERR_PTR(-ENOMEM);
if (nid != NUMA_NO_NODE)
set_dev_node(&pdev->dev, nid);
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 79ab532aabaf..6bc86106c7b2 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1557,7 +1557,7 @@ static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *pa
do {
int sent;
- bvec_set_page(&bvec, page, offset, len);
+ bvec_set_page(&bvec, page, len, offset);
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len);
sent = sock_sendmsg(socket, &msg);
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 864013019d6b..968090935eb2 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -1643,9 +1643,12 @@ static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
struct nullb_queue *nq = hctx->driver_data;
LIST_HEAD(list);
int nr = 0;
+ struct request *rq;
spin_lock(&nq->poll_lock);
list_splice_init(&nq->poll_list, &list);
+ list_for_each_entry(rq, &list, queuelist)
+ blk_mq_set_request_complete(rq);
spin_unlock(&nq->poll_lock);
while (!list_empty(&list)) {
@@ -1671,16 +1674,21 @@ static enum blk_eh_timer_return null_timeout_rq(struct request *rq)
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
- pr_info("rq %p timed out\n", rq);
-
if (hctx->type == HCTX_TYPE_POLL) {
struct nullb_queue *nq = hctx->driver_data;
spin_lock(&nq->poll_lock);
+ /* The request may have completed meanwhile. */
+ if (blk_mq_request_completed(rq)) {
+ spin_unlock(&nq->poll_lock);
+ return BLK_EH_DONE;
+ }
list_del_init(&rq->queuelist);
spin_unlock(&nq->poll_lock);
}
+ pr_info("rq %p timed out\n", rq);
+
/*
* If the device is marked as blocking (i.e. memory backed or zoned
* device), the submission path may be blocked waiting for resources
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 2328cc05be36..3de11f077144 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -7199,7 +7199,6 @@ static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
static ssize_t do_rbd_remove(const char *buf, size_t count)
{
struct rbd_device *rbd_dev = NULL;
- struct list_head *tmp;
int dev_id;
char opt_buf[6];
bool force = false;
@@ -7226,8 +7225,7 @@ static ssize_t do_rbd_remove(const char *buf, size_t count)
ret = -ENOENT;
spin_lock(&rbd_dev_list_lock);
- list_for_each(tmp, &rbd_dev_list) {
- rbd_dev = list_entry(tmp, struct rbd_device, node);
+ list_for_each_entry(rbd_dev, &rbd_dev_list, node) {
if (rbd_dev->dev_id == dev_id) {
ret = 0;
break;
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index a76eb98c0047..a26367e9fb19 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -599,7 +599,7 @@ static void hci_uart_tty_wakeup(struct tty_struct *tty)
* Return Value: None
*/
static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data,
- const char *flags, int count)
+ const u8 *flags, size_t count)
{
struct hci_uart *hu = tty->disc_data;
@@ -807,20 +807,14 @@ static int hci_uart_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
* We don't provide read/write/poll interface for user space.
*/
static ssize_t hci_uart_tty_read(struct tty_struct *tty, struct file *file,
- unsigned char *buf, size_t nr,
- void **cookie, unsigned long offset)
+ u8 *buf, size_t nr, void **cookie,
+ unsigned long offset)
{
return 0;
}
static ssize_t hci_uart_tty_write(struct tty_struct *tty, struct file *file,
- const unsigned char *data, size_t count)
-{
- return 0;
-}
-
-static __poll_t hci_uart_tty_poll(struct tty_struct *tty,
- struct file *filp, poll_table *wait)
+ const u8 *data, size_t count)
{
return 0;
}
@@ -835,7 +829,6 @@ static struct tty_ldisc_ops hci_uart_ldisc = {
.write = hci_uart_tty_write,
.ioctl = hci_uart_tty_ioctl,
.compat_ioctl = hci_uart_tty_ioctl,
- .poll = hci_uart_tty_poll,
.receive_buf = hci_uart_tty_receive,
.write_wakeup = hci_uart_tty_wakeup,
};
diff --git a/drivers/bus/mhi/host/boot.c b/drivers/bus/mhi/host/boot.c
index d2a19b07ccb8..edc0ec5a0933 100644
--- a/drivers/bus/mhi/host/boot.c
+++ b/drivers/bus/mhi/host/boot.c
@@ -365,12 +365,10 @@ error_alloc_mhi_buf:
}
static void mhi_firmware_copy(struct mhi_controller *mhi_cntrl,
- const struct firmware *firmware,
+ const u8 *buf, size_t remainder,
struct image_info *img_info)
{
- size_t remainder = firmware->size;
size_t to_cpy;
- const u8 *buf = firmware->data;
struct mhi_buf *mhi_buf = img_info->mhi_buf;
struct bhi_vec_entry *bhi_vec = img_info->bhi_vec;
@@ -393,9 +391,10 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
struct device *dev = &mhi_cntrl->mhi_dev->dev;
enum mhi_pm_state new_state;
const char *fw_name;
+ const u8 *fw_data;
void *buf;
dma_addr_t dma_addr;
- size_t size;
+ size_t size, fw_sz;
int i, ret;
if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
@@ -425,6 +424,20 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
fw_name = (mhi_cntrl->ee == MHI_EE_EDL) ?
mhi_cntrl->edl_image : mhi_cntrl->fw_image;
+ /* check if the driver has already provided the firmware data */
+ if (!fw_name && mhi_cntrl->fbc_download &&
+ mhi_cntrl->fw_data && mhi_cntrl->fw_sz) {
+ if (!mhi_cntrl->sbl_size) {
+ dev_err(dev, "fw_data provided but no sbl_size\n");
+ goto error_fw_load;
+ }
+
+ size = mhi_cntrl->sbl_size;
+ fw_data = mhi_cntrl->fw_data;
+ fw_sz = mhi_cntrl->fw_sz;
+ goto skip_req_fw;
+ }
+
if (!fw_name || (mhi_cntrl->fbc_download && (!mhi_cntrl->sbl_size ||
!mhi_cntrl->seg_len))) {
dev_err(dev,
@@ -444,6 +457,10 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
if (size > firmware->size)
size = firmware->size;
+ fw_data = firmware->data;
+ fw_sz = firmware->size;
+
+skip_req_fw:
buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, size, &dma_addr,
GFP_KERNEL);
if (!buf) {
@@ -452,7 +469,7 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
}
/* Download image using BHI */
- memcpy(buf, firmware->data, size);
+ memcpy(buf, fw_data, size);
ret = mhi_fw_load_bhi(mhi_cntrl, dma_addr, size);
dma_free_coherent(mhi_cntrl->cntrl_dev, size, buf, dma_addr);
@@ -464,7 +481,7 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
}
/* Wait for ready since EDL image was loaded */
- if (fw_name == mhi_cntrl->edl_image) {
+ if (fw_name && fw_name == mhi_cntrl->edl_image) {
release_firmware(firmware);
goto fw_load_ready_state;
}
@@ -478,15 +495,14 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
* device transitioning into MHI READY state
*/
if (mhi_cntrl->fbc_download) {
- ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image,
- firmware->size);
+ ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image, fw_sz);
if (ret) {
release_firmware(firmware);
goto error_fw_load;
}
/* Load the firmware into BHIE vec table */
- mhi_firmware_copy(mhi_cntrl, firmware, mhi_cntrl->fbc_image);
+ mhi_firmware_copy(mhi_cntrl, fw_data, fw_sz, mhi_cntrl->fbc_image);
}
release_firmware(firmware);
diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c
index f72fcb66f408..f78aefd2d7a3 100644
--- a/drivers/bus/mhi/host/init.c
+++ b/drivers/bus/mhi/host/init.c
@@ -759,7 +759,7 @@ static int parse_ch_cfg(struct mhi_controller *mhi_cntrl,
* so to avoid any memory possible allocation failures, vzalloc is
* used here
*/
- mhi_cntrl->mhi_chan = vzalloc(mhi_cntrl->max_chan *
+ mhi_cntrl->mhi_chan = vcalloc(mhi_cntrl->max_chan,
sizeof(*mhi_cntrl->mhi_chan));
if (!mhi_cntrl->mhi_chan)
return -ENOMEM;
diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c
index 74a75439c713..dcf627b36e82 100644
--- a/drivers/bus/mhi/host/main.c
+++ b/drivers/bus/mhi/host/main.c
@@ -938,7 +938,6 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
if (!mhi_chan->configured)
break;
parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
- event_quota--;
}
break;
default:
diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
index db0a0b062d8e..08f3f039dbdd 100644
--- a/drivers/bus/mhi/host/pci_generic.c
+++ b/drivers/bus/mhi/host/pci_generic.c
@@ -212,6 +212,19 @@ struct mhi_pci_dev_info {
.offload_channel = false, \
}
+#define MHI_EVENT_CONFIG_SW_DATA(ev_ring, el_count) \
+ { \
+ .num_elements = el_count, \
+ .irq_moderation_ms = 0, \
+ .irq = (ev_ring) + 1, \
+ .priority = 1, \
+ .mode = MHI_DB_BRST_DISABLE, \
+ .data_type = MHI_ER_DATA, \
+ .hardware_event = false, \
+ .client_managed = false, \
+ .offload_channel = false, \
+ }
+
#define MHI_EVENT_CONFIG_HW_DATA(ev_ring, el_count, ch_num) \
{ \
.num_elements = el_count, \
@@ -237,8 +250,10 @@ static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = {
MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 8, 0),
MHI_CHANNEL_CONFIG_UL_FP(34, "FIREHOSE", 32, 0),
MHI_CHANNEL_CONFIG_DL_FP(35, "FIREHOSE", 32, 0),
- MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 2),
- MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 3),
+ MHI_CHANNEL_CONFIG_UL(46, "IP_SW0", 64, 2),
+ MHI_CHANNEL_CONFIG_DL(47, "IP_SW0", 64, 3),
+ MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 4),
+ MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 5),
};
static struct mhi_event_config modem_qcom_v1_mhi_events[] = {
@@ -246,9 +261,12 @@ static struct mhi_event_config modem_qcom_v1_mhi_events[] = {
MHI_EVENT_CONFIG_CTRL(0, 64),
/* DIAG dedicated event ring */
MHI_EVENT_CONFIG_DATA(1, 128),
+ /* Software channels dedicated event ring */
+ MHI_EVENT_CONFIG_SW_DATA(2, 64),
+ MHI_EVENT_CONFIG_SW_DATA(3, 64),
/* Hardware channels request dedicated hardware event rings */
- MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100),
- MHI_EVENT_CONFIG_HW_DATA(3, 2048, 101)
+ MHI_EVENT_CONFIG_HW_DATA(4, 1024, 100),
+ MHI_EVENT_CONFIG_HW_DATA(5, 2048, 101)
};
static const struct mhi_controller_config modem_qcom_v1_mhiv_config = {
@@ -334,6 +352,16 @@ static const struct mhi_pci_dev_info mhi_quectel_em1xx_info = {
.sideband_wake = true,
};
+static const struct mhi_pci_dev_info mhi_quectel_rm5xx_info = {
+ .name = "quectel-rm5xx",
+ .edl = "qcom/prog_firehose_sdx6x.elf",
+ .config = &modem_quectel_em1xx_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = true,
+};
+
static const struct mhi_channel_config mhi_foxconn_sdx55_channels[] = {
MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 32, 0),
MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 32, 0),
@@ -567,12 +595,23 @@ static const struct pci_device_id mhi_pci_id_table[] = {
/* Telit FN990 */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2010),
.driver_data = (kernel_ulong_t) &mhi_telit_fn990_info },
+ /* Telit FE990 */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2015),
+ .driver_data = (kernel_ulong_t) &mhi_telit_fn990_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308),
.driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1001), /* EM120R-GL (sdx24) */
.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1002), /* EM160R-GL (sdx24) */
.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
+ /* RM520N-GL (sdx6x), eSIM */
+ { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1004),
+ .driver_data = (kernel_ulong_t) &mhi_quectel_rm5xx_info },
+ /* RM520N-GL (sdx6x), Lenovo variant */
+ { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1007),
+ .driver_data = (kernel_ulong_t) &mhi_quectel_rm5xx_info },
+ { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x100d), /* EM160R-GL (sdx24) */
+ .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x2001), /* EM120R-GL for FCCL (sdx24) */
.driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info },
/* T99W175 (sdx55), Both for eSIM and Non-eSIM */
@@ -605,6 +644,12 @@ static const struct pci_device_id mhi_pci_id_table[] = {
/* T99W510 (sdx24), variant 3 */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f2),
.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx24_info },
+ /* DW5932e-eSIM (sdx62), With eSIM */
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f5),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
+ /* DW5932e (sdx62), Non-eSIM */
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0f9),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx65_info },
/* MV31-W (Cinterion) */
{ PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00b3),
.driver_data = (kernel_ulong_t) &mhi_mv31_info },
diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c
index 083459028a4b..8a4362d75fc4 100644
--- a/drivers/bus/mhi/host/pm.c
+++ b/drivers/bus/mhi/host/pm.c
@@ -470,6 +470,10 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl)
/* Trigger MHI RESET so that the device will not access host memory */
if (!MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state)) {
+ /* Skip MHI RESET if in RDDM state */
+ if (mhi_cntrl->rddm_image && mhi_get_exec_env(mhi_cntrl) == MHI_EE_RDDM)
+ goto skip_mhi_reset;
+
dev_dbg(dev, "Triggering MHI Reset in device\n");
mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
@@ -495,6 +499,7 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl)
}
}
+skip_mhi_reset:
dev_dbg(dev,
"Waiting for all pending event ring processing to complete\n");
mhi_event = mhi_cntrl->mhi_event;
diff --git a/drivers/cdx/controller/cdx_controller.c b/drivers/cdx/controller/cdx_controller.c
index dc52f95f8978..bb4ae7970e21 100644
--- a/drivers/cdx/controller/cdx_controller.c
+++ b/drivers/cdx/controller/cdx_controller.c
@@ -5,7 +5,8 @@
* Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
*/
-#include <linux/of_platform.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/cdx/cdx_bus.h>
diff --git a/drivers/cdx/controller/cdx_rpmsg.c b/drivers/cdx/controller/cdx_rpmsg.c
index f37e639d6ce3..04b578a0be17 100644
--- a/drivers/cdx/controller/cdx_rpmsg.c
+++ b/drivers/cdx/controller/cdx_rpmsg.c
@@ -7,7 +7,8 @@
#include <linux/rpmsg.h>
#include <linux/remoteproc.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/cdx/cdx_bus.h>
#include <linux/module.h>
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c
index 62de7f4ba864..84411b13c49f 100644
--- a/drivers/char/agp/uninorth-agp.c
+++ b/drivers/char/agp/uninorth-agp.c
@@ -3,6 +3,7 @@
* UniNorth AGPGART routines.
*/
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/init.h>
diff --git a/drivers/char/bsr.c b/drivers/char/bsr.c
index 12143854aeac..70d31aed9011 100644
--- a/drivers/char/bsr.c
+++ b/drivers/char/bsr.c
@@ -6,11 +6,10 @@
* Author: Sonny Rao <sonnyrao@us.ibm.com>
*/
+#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/cdev.h>
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index ea6b4013bc38..23f6f2eda84c 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -28,8 +28,13 @@
DEFINE_IDR(dev_nums_idr);
static DEFINE_MUTEX(idr_lock);
-struct class *tpm_class;
-struct class *tpmrm_class;
+const struct class tpm_class = {
+ .name = "tpm",
+ .shutdown_pre = tpm_class_shutdown,
+};
+const struct class tpmrm_class = {
+ .name = "tmprm",
+};
dev_t tpm_devt;
static int tpm_request_locality(struct tpm_chip *chip)
@@ -336,7 +341,7 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev,
device_initialize(&chip->dev);
- chip->dev.class = tpm_class;
+ chip->dev.class = &tpm_class;
chip->dev.release = tpm_dev_release;
chip->dev.parent = pdev;
chip->dev.groups = chip->groups;
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 586ca10b0d72..66b16d26eecc 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -476,18 +476,15 @@ static int __init tpm_init(void)
{
int rc;
- tpm_class = class_create("tpm");
- if (IS_ERR(tpm_class)) {
+ rc = class_register(&tpm_class);
+ if (rc) {
pr_err("couldn't create tpm class\n");
- return PTR_ERR(tpm_class);
+ return rc;
}
- tpm_class->shutdown_pre = tpm_class_shutdown;
-
- tpmrm_class = class_create("tpmrm");
- if (IS_ERR(tpmrm_class)) {
+ rc = class_register(&tpmrm_class);
+ if (rc) {
pr_err("couldn't create tpmrm class\n");
- rc = PTR_ERR(tpmrm_class);
goto out_destroy_tpm_class;
}
@@ -508,9 +505,9 @@ static int __init tpm_init(void)
out_unreg_chrdev:
unregister_chrdev_region(tpm_devt, 2 * TPM_NUM_DEVICES);
out_destroy_tpmrm_class:
- class_destroy(tpmrm_class);
+ class_unregister(&tpmrm_class);
out_destroy_tpm_class:
- class_destroy(tpm_class);
+ class_unregister(&tpm_class);
return rc;
}
@@ -518,8 +515,8 @@ out_destroy_tpm_class:
static void __exit tpm_exit(void)
{
idr_destroy(&dev_nums_idr);
- class_destroy(tpm_class);
- class_destroy(tpmrm_class);
+ class_unregister(&tpm_class);
+ class_unregister(&tpmrm_class);
unregister_chrdev_region(tpm_devt, 2*TPM_NUM_DEVICES);
tpm_dev_common_exit();
}
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 460bb85dd142..61445f1dc46d 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -230,8 +230,8 @@ enum tpm2_pt_props {
* compiler warnings about stack frame size. */
#define TPM_MAX_RNG_DATA 128
-extern struct class *tpm_class;
-extern struct class *tpmrm_class;
+extern const struct class tpm_class;
+extern const struct class tpmrm_class;
extern dev_t tpm_devt;
extern const struct file_operations tpm_fops;
extern const struct file_operations tpmrm_fops;
diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c
index ffb35f0154c1..363afdd4d1d3 100644
--- a/drivers/char/tpm/tpm2-space.c
+++ b/drivers/char/tpm/tpm2-space.c
@@ -606,7 +606,7 @@ int tpm_devs_add(struct tpm_chip *chip)
device_initialize(&chip->devs);
chip->devs.parent = chip->dev.parent;
- chip->devs.class = tpmrm_class;
+ chip->devs.class = &tpmrm_class;
/*
* Get extra reference on main device to hold on behalf of devs.
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index 9eb1a1859012..ea085b14ab7c 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -463,28 +463,6 @@ static bool crb_req_canceled(struct tpm_chip *chip, u8 status)
return (cancel & CRB_CANCEL_INVOKE) == CRB_CANCEL_INVOKE;
}
-static int crb_check_flags(struct tpm_chip *chip)
-{
- u32 val;
- int ret;
-
- ret = crb_request_locality(chip, 0);
- if (ret)
- return ret;
-
- ret = tpm2_get_tpm_pt(chip, TPM2_PT_MANUFACTURER, &val, NULL);
- if (ret)
- goto release;
-
- if (val == 0x414D4400U /* AMD */)
- chip->flags |= TPM_CHIP_FLAG_HWRNG_DISABLED;
-
-release:
- crb_relinquish_locality(chip, 0);
-
- return ret;
-}
-
static const struct tpm_class_ops tpm_crb = {
.flags = TPM_OPS_AUTO_STARTUP,
.status = crb_status,
@@ -797,12 +775,13 @@ static int crb_acpi_add(struct acpi_device *device)
FW_BUG "TPM2 ACPI table has wrong size %u for start method type %d\n",
buf->header.length,
ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON);
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
crb_pluton = ACPI_ADD_PTR(struct tpm2_crb_pluton, buf, sizeof(*buf));
rc = crb_map_pluton(dev, priv, buf, crb_pluton);
if (rc)
- return rc;
+ goto out;
}
priv->sm = sm;
@@ -826,9 +805,14 @@ static int crb_acpi_add(struct acpi_device *device)
if (rc)
goto out;
- rc = crb_check_flags(chip);
- if (rc)
- goto out;
+#ifdef CONFIG_X86
+ /* A quirk for https://www.amd.com/en/support/kb/faq/pa-410 */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+ priv->sm != ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON) {
+ dev_info(dev, "Disabling hwrng\n");
+ chip->flags |= TPM_CHIP_FLAG_HWRNG_DISABLED;
+ }
+#endif /* CONFIG_X86 */
rc = tpm_chip_register(chip);
diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c
index ed45d04905c2..5af804c17a75 100644
--- a/drivers/char/ttyprintk.c
+++ b/drivers/char/ttyprintk.c
@@ -51,7 +51,7 @@ static void tpk_flush(void)
}
}
-static int tpk_printk(const unsigned char *buf, int count)
+static int tpk_printk(const u8 *buf, int count)
{
int i;
@@ -103,8 +103,7 @@ static void tpk_close(struct tty_struct *tty, struct file *filp)
/*
* TTY operations write function.
*/
-static int tpk_write(struct tty_struct *tty,
- const unsigned char *buf, int count)
+static ssize_t tpk_write(struct tty_struct *tty, const u8 *buf, size_t count)
{
struct ttyprintk_port *tpkp = tty->driver_data;
unsigned long flags;
diff --git a/drivers/char/xillybus/Kconfig b/drivers/char/xillybus/Kconfig
index a8036dad437e..f51d533390a9 100644
--- a/drivers/char/xillybus/Kconfig
+++ b/drivers/char/xillybus/Kconfig
@@ -29,7 +29,7 @@ config XILLYBUS_PCIE
config XILLYBUS_OF
tristate "Xillybus over Device Tree"
- depends on OF && HAS_DMA
+ depends on OF && HAS_DMA && HAS_IOMEM
help
Set to M if you want Xillybus to find its resources from the
Open Firmware Flattened Device Tree. If the target is an embedded
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index c4d671a5a13d..0ba0dc4ecf06 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -461,13 +461,6 @@ config VF_PIT_TIMER
help
Support for Periodic Interrupt Timer on Freescale Vybrid Family SoCs.
-config OXNAS_RPS_TIMER
- bool "Oxford Semiconductor OXNAS RPS Timers driver" if COMPILE_TEST
- select TIMER_OF
- select CLKSRC_MMIO
- help
- This enables support for the Oxford Semiconductor OXNAS RPS timers.
-
config SYS_SUPPORTS_SH_CMT
bool
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 5d93c9e3fc55..368c3461dab8 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -54,7 +54,6 @@ obj-$(CONFIG_MTK_TIMER) += timer-mediatek.o
obj-$(CONFIG_MTK_CPUX_TIMER) += timer-mediatek-cpux.o
obj-$(CONFIG_CLKSRC_PISTACHIO) += timer-pistachio.o
obj-$(CONFIG_CLKSRC_TI_32K) += timer-ti-32k.o
-obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o
obj-$(CONFIG_OWL_TIMER) += timer-owl.o
obj-$(CONFIG_MILBEAUT_TIMER) += timer-milbeaut.o
obj-$(CONFIG_SPRD_TIMER) += timer-sprd.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index e733a2a1927a..7dd2c615bce2 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -792,6 +792,13 @@ static __always_inline void set_next_event_mem(const int access, unsigned long e
u64 cnt;
ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
+
+ /* Timer must be disabled before programming CVAL */
+ if (ctrl & ARCH_TIMER_CTRL_ENABLE) {
+ ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
+ arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
+ }
+
ctrl |= ARCH_TIMER_CTRL_ENABLE;
ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
index e56307a81f4d..8ff7cd4e20bb 100644
--- a/drivers/clocksource/hyperv_timer.c
+++ b/drivers/clocksource/hyperv_timer.c
@@ -390,7 +390,7 @@ static __always_inline u64 read_hv_clock_msr(void)
static union {
struct ms_hyperv_tsc_page page;
u8 reserved[PAGE_SIZE];
-} tsc_pg __aligned(PAGE_SIZE);
+} tsc_pg __bss_decrypted __aligned(PAGE_SIZE);
static struct ms_hyperv_tsc_page *tsc_page = &tsc_pg.page;
static unsigned long tsc_pfn;
diff --git a/drivers/clocksource/timer-loongson1-pwm.c b/drivers/clocksource/timer-loongson1-pwm.c
index 6335fee03017..244d66835508 100644
--- a/drivers/clocksource/timer-loongson1-pwm.c
+++ b/drivers/clocksource/timer-loongson1-pwm.c
@@ -28,7 +28,7 @@
#define CNTR_WIDTH 24
-DEFINE_RAW_SPINLOCK(ls1x_timer_lock);
+static DEFINE_RAW_SPINLOCK(ls1x_timer_lock);
struct ls1x_clocksource {
void __iomem *reg_base;
diff --git a/drivers/clocksource/timer-oxnas-rps.c b/drivers/clocksource/timer-oxnas-rps.c
deleted file mode 100644
index d514b44e67dd..000000000000
--- a/drivers/clocksource/timer-oxnas-rps.c
+++ /dev/null
@@ -1,288 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * drivers/clocksource/timer-oxnas-rps.c
- *
- * Copyright (C) 2009 Oxford Semiconductor Ltd
- * Copyright (C) 2013 Ma Haijun <mahaijuns@gmail.com>
- * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-#include <linux/clk.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/of_irq.h>
-#include <linux/of_address.h>
-#include <linux/clockchips.h>
-#include <linux/sched_clock.h>
-
-/* TIMER1 used as tick
- * TIMER2 used as clocksource
- */
-
-/* Registers definitions */
-
-#define TIMER_LOAD_REG 0x0
-#define TIMER_CURR_REG 0x4
-#define TIMER_CTRL_REG 0x8
-#define TIMER_CLRINT_REG 0xC
-
-#define TIMER_BITS 24
-
-#define TIMER_MAX_VAL (BIT(TIMER_BITS) - 1)
-
-#define TIMER_PERIODIC BIT(6)
-#define TIMER_ENABLE BIT(7)
-
-#define TIMER_DIV1 (0)
-#define TIMER_DIV16 (1 << 2)
-#define TIMER_DIV256 (2 << 2)
-
-#define TIMER1_REG_OFFSET 0
-#define TIMER2_REG_OFFSET 0x20
-
-/* Clockevent & Clocksource data */
-
-struct oxnas_rps_timer {
- struct clock_event_device clkevent;
- void __iomem *clksrc_base;
- void __iomem *clkevt_base;
- unsigned long timer_period;
- unsigned int timer_prescaler;
- struct clk *clk;
- int irq;
-};
-
-static irqreturn_t oxnas_rps_timer_irq(int irq, void *dev_id)
-{
- struct oxnas_rps_timer *rps = dev_id;
-
- writel_relaxed(0, rps->clkevt_base + TIMER_CLRINT_REG);
-
- rps->clkevent.event_handler(&rps->clkevent);
-
- return IRQ_HANDLED;
-}
-
-static void oxnas_rps_timer_config(struct oxnas_rps_timer *rps,
- unsigned long period,
- unsigned int periodic)
-{
- uint32_t cfg = rps->timer_prescaler;
-
- if (period)
- cfg |= TIMER_ENABLE;
-
- if (periodic)
- cfg |= TIMER_PERIODIC;
-
- writel_relaxed(period, rps->clkevt_base + TIMER_LOAD_REG);
- writel_relaxed(cfg, rps->clkevt_base + TIMER_CTRL_REG);
-}
-
-static int oxnas_rps_timer_shutdown(struct clock_event_device *evt)
-{
- struct oxnas_rps_timer *rps =
- container_of(evt, struct oxnas_rps_timer, clkevent);
-
- oxnas_rps_timer_config(rps, 0, 0);
-
- return 0;
-}
-
-static int oxnas_rps_timer_set_periodic(struct clock_event_device *evt)
-{
- struct oxnas_rps_timer *rps =
- container_of(evt, struct oxnas_rps_timer, clkevent);
-
- oxnas_rps_timer_config(rps, rps->timer_period, 1);
-
- return 0;
-}
-
-static int oxnas_rps_timer_set_oneshot(struct clock_event_device *evt)
-{
- struct oxnas_rps_timer *rps =
- container_of(evt, struct oxnas_rps_timer, clkevent);
-
- oxnas_rps_timer_config(rps, rps->timer_period, 0);
-
- return 0;
-}
-
-static int oxnas_rps_timer_next_event(unsigned long delta,
- struct clock_event_device *evt)
-{
- struct oxnas_rps_timer *rps =
- container_of(evt, struct oxnas_rps_timer, clkevent);
-
- oxnas_rps_timer_config(rps, delta, 0);
-
- return 0;
-}
-
-static int __init oxnas_rps_clockevent_init(struct oxnas_rps_timer *rps)
-{
- ulong clk_rate = clk_get_rate(rps->clk);
- ulong timer_rate;
-
- /* Start with prescaler 1 */
- rps->timer_prescaler = TIMER_DIV1;
- rps->timer_period = DIV_ROUND_UP(clk_rate, HZ);
- timer_rate = clk_rate;
-
- if (rps->timer_period > TIMER_MAX_VAL) {
- rps->timer_prescaler = TIMER_DIV16;
- timer_rate = clk_rate / 16;
- rps->timer_period = DIV_ROUND_UP(timer_rate, HZ);
- }
- if (rps->timer_period > TIMER_MAX_VAL) {
- rps->timer_prescaler = TIMER_DIV256;
- timer_rate = clk_rate / 256;
- rps->timer_period = DIV_ROUND_UP(timer_rate, HZ);
- }
-
- rps->clkevent.name = "oxnas-rps";
- rps->clkevent.features = CLOCK_EVT_FEAT_PERIODIC |
- CLOCK_EVT_FEAT_ONESHOT |
- CLOCK_EVT_FEAT_DYNIRQ;
- rps->clkevent.tick_resume = oxnas_rps_timer_shutdown;
- rps->clkevent.set_state_shutdown = oxnas_rps_timer_shutdown;
- rps->clkevent.set_state_periodic = oxnas_rps_timer_set_periodic;
- rps->clkevent.set_state_oneshot = oxnas_rps_timer_set_oneshot;
- rps->clkevent.set_next_event = oxnas_rps_timer_next_event;
- rps->clkevent.rating = 200;
- rps->clkevent.cpumask = cpu_possible_mask;
- rps->clkevent.irq = rps->irq;
- clockevents_config_and_register(&rps->clkevent,
- timer_rate,
- 1,
- TIMER_MAX_VAL);
-
- pr_info("Registered clock event rate %luHz prescaler %x period %lu\n",
- clk_rate,
- rps->timer_prescaler,
- rps->timer_period);
-
- return 0;
-}
-
-/* Clocksource */
-
-static void __iomem *timer_sched_base;
-
-static u64 notrace oxnas_rps_read_sched_clock(void)
-{
- return ~readl_relaxed(timer_sched_base);
-}
-
-static int __init oxnas_rps_clocksource_init(struct oxnas_rps_timer *rps)
-{
- ulong clk_rate = clk_get_rate(rps->clk);
- int ret;
-
- /* use prescale 16 */
- clk_rate = clk_rate / 16;
-
- writel_relaxed(TIMER_MAX_VAL, rps->clksrc_base + TIMER_LOAD_REG);
- writel_relaxed(TIMER_PERIODIC | TIMER_ENABLE | TIMER_DIV16,
- rps->clksrc_base + TIMER_CTRL_REG);
-
- timer_sched_base = rps->clksrc_base + TIMER_CURR_REG;
- sched_clock_register(oxnas_rps_read_sched_clock,
- TIMER_BITS, clk_rate);
- ret = clocksource_mmio_init(timer_sched_base,
- "oxnas_rps_clocksource_timer",
- clk_rate, 250, TIMER_BITS,
- clocksource_mmio_readl_down);
- if (WARN_ON(ret)) {
- pr_err("can't register clocksource\n");
- return ret;
- }
-
- pr_info("Registered clocksource rate %luHz\n", clk_rate);
-
- return 0;
-}
-
-static int __init oxnas_rps_timer_init(struct device_node *np)
-{
- struct oxnas_rps_timer *rps;
- void __iomem *base;
- int ret;
-
- rps = kzalloc(sizeof(*rps), GFP_KERNEL);
- if (!rps)
- return -ENOMEM;
-
- rps->clk = of_clk_get(np, 0);
- if (IS_ERR(rps->clk)) {
- ret = PTR_ERR(rps->clk);
- goto err_alloc;
- }
-
- ret = clk_prepare_enable(rps->clk);
- if (ret)
- goto err_clk;
-
- base = of_iomap(np, 0);
- if (!base) {
- ret = -ENXIO;
- goto err_clk_prepare;
- }
-
- rps->irq = irq_of_parse_and_map(np, 0);
- if (!rps->irq) {
- ret = -EINVAL;
- goto err_iomap;
- }
-
- rps->clkevt_base = base + TIMER1_REG_OFFSET;
- rps->clksrc_base = base + TIMER2_REG_OFFSET;
-
- /* Disable timers */
- writel_relaxed(0, rps->clkevt_base + TIMER_CTRL_REG);
- writel_relaxed(0, rps->clksrc_base + TIMER_CTRL_REG);
- writel_relaxed(0, rps->clkevt_base + TIMER_LOAD_REG);
- writel_relaxed(0, rps->clksrc_base + TIMER_LOAD_REG);
- writel_relaxed(0, rps->clkevt_base + TIMER_CLRINT_REG);
- writel_relaxed(0, rps->clksrc_base + TIMER_CLRINT_REG);
-
- ret = request_irq(rps->irq, oxnas_rps_timer_irq,
- IRQF_TIMER | IRQF_IRQPOLL,
- "rps-timer", rps);
- if (ret)
- goto err_iomap;
-
- ret = oxnas_rps_clocksource_init(rps);
- if (ret)
- goto err_irqreq;
-
- ret = oxnas_rps_clockevent_init(rps);
- if (ret)
- goto err_irqreq;
-
- return 0;
-
-err_irqreq:
- free_irq(rps->irq, rps);
-err_iomap:
- iounmap(base);
-err_clk_prepare:
- clk_disable_unprepare(rps->clk);
-err_clk:
- clk_put(rps->clk);
-err_alloc:
- kfree(rps);
-
- return ret;
-}
-
-TIMER_OF_DECLARE(ox810se_rps,
- "oxsemi,ox810se-rps-timer", oxnas_rps_timer_init);
-TIMER_OF_DECLARE(ox820_rps,
- "oxsemi,ox820-rps-timer", oxnas_rps_timer_init);
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index 7d5fa9069906..69fee3540d37 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -16,9 +16,7 @@
#include <linux/irqreturn.h>
#include <linux/reset.h>
#include <linux/slab.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
+#include <linux/platform_device.h>
#define TIMER_IRQ_EN_REG 0x00
#define TIMER_IRQ_EN(val) BIT(val)
@@ -40,26 +38,16 @@ struct sun5i_timer {
struct clk *clk;
struct notifier_block clk_rate_cb;
u32 ticks_per_jiffy;
-};
-
-#define to_sun5i_timer(x) \
- container_of(x, struct sun5i_timer, clk_rate_cb)
-
-struct sun5i_timer_clksrc {
- struct sun5i_timer timer;
struct clocksource clksrc;
-};
-
-#define to_sun5i_timer_clksrc(x) \
- container_of(x, struct sun5i_timer_clksrc, clksrc)
-
-struct sun5i_timer_clkevt {
- struct sun5i_timer timer;
struct clock_event_device clkevt;
};
-#define to_sun5i_timer_clkevt(x) \
- container_of(x, struct sun5i_timer_clkevt, clkevt)
+#define nb_to_sun5i_timer(x) \
+ container_of(x, struct sun5i_timer, clk_rate_cb)
+#define clksrc_to_sun5i_timer(x) \
+ container_of(x, struct sun5i_timer, clksrc)
+#define clkevt_to_sun5i_timer(x) \
+ container_of(x, struct sun5i_timer, clkevt)
/*
* When we disable a timer, we need to wait at least for 2 cycles of
@@ -67,30 +55,30 @@ struct sun5i_timer_clkevt {
* that is already setup and runs at the same frequency than the other
* timers, and we never will be disabled.
*/
-static void sun5i_clkevt_sync(struct sun5i_timer_clkevt *ce)
+static void sun5i_clkevt_sync(struct sun5i_timer *ce)
{
- u32 old = readl(ce->timer.base + TIMER_CNTVAL_LO_REG(1));
+ u32 old = readl(ce->base + TIMER_CNTVAL_LO_REG(1));
- while ((old - readl(ce->timer.base + TIMER_CNTVAL_LO_REG(1))) < TIMER_SYNC_TICKS)
+ while ((old - readl(ce->base + TIMER_CNTVAL_LO_REG(1))) < TIMER_SYNC_TICKS)
cpu_relax();
}
-static void sun5i_clkevt_time_stop(struct sun5i_timer_clkevt *ce, u8 timer)
+static void sun5i_clkevt_time_stop(struct sun5i_timer *ce, u8 timer)
{
- u32 val = readl(ce->timer.base + TIMER_CTL_REG(timer));
- writel(val & ~TIMER_CTL_ENABLE, ce->timer.base + TIMER_CTL_REG(timer));
+ u32 val = readl(ce->base + TIMER_CTL_REG(timer));
+ writel(val & ~TIMER_CTL_ENABLE, ce->base + TIMER_CTL_REG(timer));
sun5i_clkevt_sync(ce);
}
-static void sun5i_clkevt_time_setup(struct sun5i_timer_clkevt *ce, u8 timer, u32 delay)
+static void sun5i_clkevt_time_setup(struct sun5i_timer *ce, u8 timer, u32 delay)
{
- writel(delay, ce->timer.base + TIMER_INTVAL_LO_REG(timer));
+ writel(delay, ce->base + TIMER_INTVAL_LO_REG(timer));
}
-static void sun5i_clkevt_time_start(struct sun5i_timer_clkevt *ce, u8 timer, bool periodic)
+static void sun5i_clkevt_time_start(struct sun5i_timer *ce, u8 timer, bool periodic)
{
- u32 val = readl(ce->timer.base + TIMER_CTL_REG(timer));
+ u32 val = readl(ce->base + TIMER_CTL_REG(timer));
if (periodic)
val &= ~TIMER_CTL_ONESHOT;
@@ -98,12 +86,12 @@ static void sun5i_clkevt_time_start(struct sun5i_timer_clkevt *ce, u8 timer, boo
val |= TIMER_CTL_ONESHOT;
writel(val | TIMER_CTL_ENABLE | TIMER_CTL_RELOAD,
- ce->timer.base + TIMER_CTL_REG(timer));
+ ce->base + TIMER_CTL_REG(timer));
}
static int sun5i_clkevt_shutdown(struct clock_event_device *clkevt)
{
- struct sun5i_timer_clkevt *ce = to_sun5i_timer_clkevt(clkevt);
+ struct sun5i_timer *ce = clkevt_to_sun5i_timer(clkevt);
sun5i_clkevt_time_stop(ce, 0);
return 0;
@@ -111,7 +99,7 @@ static int sun5i_clkevt_shutdown(struct clock_event_device *clkevt)
static int sun5i_clkevt_set_oneshot(struct clock_event_device *clkevt)
{
- struct sun5i_timer_clkevt *ce = to_sun5i_timer_clkevt(clkevt);
+ struct sun5i_timer *ce = clkevt_to_sun5i_timer(clkevt);
sun5i_clkevt_time_stop(ce, 0);
sun5i_clkevt_time_start(ce, 0, false);
@@ -120,10 +108,10 @@ static int sun5i_clkevt_set_oneshot(struct clock_event_device *clkevt)
static int sun5i_clkevt_set_periodic(struct clock_event_device *clkevt)
{
- struct sun5i_timer_clkevt *ce = to_sun5i_timer_clkevt(clkevt);
+ struct sun5i_timer *ce = clkevt_to_sun5i_timer(clkevt);
sun5i_clkevt_time_stop(ce, 0);
- sun5i_clkevt_time_setup(ce, 0, ce->timer.ticks_per_jiffy);
+ sun5i_clkevt_time_setup(ce, 0, ce->ticks_per_jiffy);
sun5i_clkevt_time_start(ce, 0, true);
return 0;
}
@@ -131,7 +119,7 @@ static int sun5i_clkevt_set_periodic(struct clock_event_device *clkevt)
static int sun5i_clkevt_next_event(unsigned long evt,
struct clock_event_device *clkevt)
{
- struct sun5i_timer_clkevt *ce = to_sun5i_timer_clkevt(clkevt);
+ struct sun5i_timer *ce = clkevt_to_sun5i_timer(clkevt);
sun5i_clkevt_time_stop(ce, 0);
sun5i_clkevt_time_setup(ce, 0, evt - TIMER_SYNC_TICKS);
@@ -142,9 +130,9 @@ static int sun5i_clkevt_next_event(unsigned long evt,
static irqreturn_t sun5i_timer_interrupt(int irq, void *dev_id)
{
- struct sun5i_timer_clkevt *ce = dev_id;
+ struct sun5i_timer *ce = dev_id;
- writel(0x1, ce->timer.base + TIMER_IRQ_ST_REG);
+ writel(0x1, ce->base + TIMER_IRQ_ST_REG);
ce->clkevt.event_handler(&ce->clkevt);
return IRQ_HANDLED;
@@ -152,17 +140,16 @@ static irqreturn_t sun5i_timer_interrupt(int irq, void *dev_id)
static u64 sun5i_clksrc_read(struct clocksource *clksrc)
{
- struct sun5i_timer_clksrc *cs = to_sun5i_timer_clksrc(clksrc);
+ struct sun5i_timer *cs = clksrc_to_sun5i_timer(clksrc);
- return ~readl(cs->timer.base + TIMER_CNTVAL_LO_REG(1));
+ return ~readl(cs->base + TIMER_CNTVAL_LO_REG(1));
}
-static int sun5i_rate_cb_clksrc(struct notifier_block *nb,
- unsigned long event, void *data)
+static int sun5i_rate_cb(struct notifier_block *nb,
+ unsigned long event, void *data)
{
struct clk_notifier_data *ndata = data;
- struct sun5i_timer *timer = to_sun5i_timer(nb);
- struct sun5i_timer_clksrc *cs = container_of(timer, struct sun5i_timer_clksrc, timer);
+ struct sun5i_timer *cs = nb_to_sun5i_timer(nb);
switch (event) {
case PRE_RATE_CHANGE:
@@ -171,6 +158,8 @@ static int sun5i_rate_cb_clksrc(struct notifier_block *nb,
case POST_RATE_CHANGE:
clocksource_register_hz(&cs->clksrc, ndata->new_rate);
+ clockevents_update_freq(&cs->clkevt, ndata->new_rate);
+ cs->ticks_per_jiffy = DIV_ROUND_UP(ndata->new_rate, HZ);
break;
default:
@@ -180,47 +169,18 @@ static int sun5i_rate_cb_clksrc(struct notifier_block *nb,
return NOTIFY_DONE;
}
-static int __init sun5i_setup_clocksource(struct device_node *node,
- void __iomem *base,
- struct clk *clk, int irq)
+static int sun5i_setup_clocksource(struct platform_device *pdev,
+ unsigned long rate)
{
- struct sun5i_timer_clksrc *cs;
- unsigned long rate;
+ struct sun5i_timer *cs = platform_get_drvdata(pdev);
+ void __iomem *base = cs->base;
int ret;
- cs = kzalloc(sizeof(*cs), GFP_KERNEL);
- if (!cs)
- return -ENOMEM;
-
- ret = clk_prepare_enable(clk);
- if (ret) {
- pr_err("Couldn't enable parent clock\n");
- goto err_free;
- }
-
- rate = clk_get_rate(clk);
- if (!rate) {
- pr_err("Couldn't get parent clock rate\n");
- ret = -EINVAL;
- goto err_disable_clk;
- }
-
- cs->timer.base = base;
- cs->timer.clk = clk;
- cs->timer.clk_rate_cb.notifier_call = sun5i_rate_cb_clksrc;
- cs->timer.clk_rate_cb.next = NULL;
-
- ret = clk_notifier_register(clk, &cs->timer.clk_rate_cb);
- if (ret) {
- pr_err("Unable to register clock notifier.\n");
- goto err_disable_clk;
- }
-
writel(~0, base + TIMER_INTVAL_LO_REG(1));
writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD,
base + TIMER_CTL_REG(1));
- cs->clksrc.name = node->name;
+ cs->clksrc.name = pdev->dev.of_node->name;
cs->clksrc.rating = 340;
cs->clksrc.read = sun5i_clksrc_read;
cs->clksrc.mask = CLOCKSOURCE_MASK(32);
@@ -228,74 +188,23 @@ static int __init sun5i_setup_clocksource(struct device_node *node,
ret = clocksource_register_hz(&cs->clksrc, rate);
if (ret) {
- pr_err("Couldn't register clock source.\n");
- goto err_remove_notifier;
+ dev_err(&pdev->dev, "Couldn't register clock source.\n");
+ return ret;
}
return 0;
-
-err_remove_notifier:
- clk_notifier_unregister(clk, &cs->timer.clk_rate_cb);
-err_disable_clk:
- clk_disable_unprepare(clk);
-err_free:
- kfree(cs);
- return ret;
}
-static int sun5i_rate_cb_clkevt(struct notifier_block *nb,
- unsigned long event, void *data)
+static int sun5i_setup_clockevent(struct platform_device *pdev,
+ unsigned long rate, int irq)
{
- struct clk_notifier_data *ndata = data;
- struct sun5i_timer *timer = to_sun5i_timer(nb);
- struct sun5i_timer_clkevt *ce = container_of(timer, struct sun5i_timer_clkevt, timer);
-
- if (event == POST_RATE_CHANGE) {
- clockevents_update_freq(&ce->clkevt, ndata->new_rate);
- ce->timer.ticks_per_jiffy = DIV_ROUND_UP(ndata->new_rate, HZ);
- }
-
- return NOTIFY_DONE;
-}
-
-static int __init sun5i_setup_clockevent(struct device_node *node, void __iomem *base,
- struct clk *clk, int irq)
-{
- struct sun5i_timer_clkevt *ce;
- unsigned long rate;
+ struct device *dev = &pdev->dev;
+ struct sun5i_timer *ce = platform_get_drvdata(pdev);
+ void __iomem *base = ce->base;
int ret;
u32 val;
- ce = kzalloc(sizeof(*ce), GFP_KERNEL);
- if (!ce)
- return -ENOMEM;
-
- ret = clk_prepare_enable(clk);
- if (ret) {
- pr_err("Couldn't enable parent clock\n");
- goto err_free;
- }
-
- rate = clk_get_rate(clk);
- if (!rate) {
- pr_err("Couldn't get parent clock rate\n");
- ret = -EINVAL;
- goto err_disable_clk;
- }
-
- ce->timer.base = base;
- ce->timer.ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
- ce->timer.clk = clk;
- ce->timer.clk_rate_cb.notifier_call = sun5i_rate_cb_clkevt;
- ce->timer.clk_rate_cb.next = NULL;
-
- ret = clk_notifier_register(clk, &ce->timer.clk_rate_cb);
- if (ret) {
- pr_err("Unable to register clock notifier.\n");
- goto err_disable_clk;
- }
-
- ce->clkevt.name = node->name;
+ ce->clkevt.name = dev->of_node->name;
ce->clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
ce->clkevt.set_next_event = sun5i_clkevt_next_event;
ce->clkevt.set_state_shutdown = sun5i_clkevt_shutdown;
@@ -313,60 +222,109 @@ static int __init sun5i_setup_clockevent(struct device_node *node, void __iomem
clockevents_config_and_register(&ce->clkevt, rate,
TIMER_SYNC_TICKS, 0xffffffff);
- ret = request_irq(irq, sun5i_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL,
- "sun5i_timer0", ce);
+ ret = devm_request_irq(dev, irq, sun5i_timer_interrupt,
+ IRQF_TIMER | IRQF_IRQPOLL,
+ "sun5i_timer0", ce);
if (ret) {
- pr_err("Unable to register interrupt\n");
- goto err_remove_notifier;
+ dev_err(dev, "Unable to register interrupt\n");
+ return ret;
}
return 0;
-
-err_remove_notifier:
- clk_notifier_unregister(clk, &ce->timer.clk_rate_cb);
-err_disable_clk:
- clk_disable_unprepare(clk);
-err_free:
- kfree(ce);
- return ret;
}
-static int __init sun5i_timer_init(struct device_node *node)
+static int sun5i_timer_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ struct sun5i_timer *st;
struct reset_control *rstc;
void __iomem *timer_base;
struct clk *clk;
+ unsigned long rate;
int irq, ret;
- timer_base = of_io_request_and_map(node, 0, of_node_full_name(node));
+ st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, st);
+
+ timer_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(timer_base)) {
- pr_err("Can't map registers\n");
+ dev_err(dev, "Can't map registers\n");
return PTR_ERR(timer_base);
}
- irq = irq_of_parse_and_map(node, 0);
- if (irq <= 0) {
- pr_err("Can't parse IRQ\n");
- return -EINVAL;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "Can't get IRQ\n");
+ return irq;
}
- clk = of_clk_get(node, 0);
+ clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(clk)) {
- pr_err("Can't get timer clock\n");
+ dev_err(dev, "Can't get timer clock\n");
return PTR_ERR(clk);
}
- rstc = of_reset_control_get(node, NULL);
- if (!IS_ERR(rstc))
+ rate = clk_get_rate(clk);
+ if (!rate) {
+ dev_err(dev, "Couldn't get parent clock rate\n");
+ return -EINVAL;
+ }
+
+ st->base = timer_base;
+ st->ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
+ st->clk = clk;
+ st->clk_rate_cb.notifier_call = sun5i_rate_cb;
+ st->clk_rate_cb.next = NULL;
+
+ ret = devm_clk_notifier_register(dev, clk, &st->clk_rate_cb);
+ if (ret) {
+ dev_err(dev, "Unable to register clock notifier.\n");
+ return ret;
+ }
+
+ rstc = devm_reset_control_get_optional_exclusive(dev, NULL);
+ if (rstc)
reset_control_deassert(rstc);
- ret = sun5i_setup_clocksource(node, timer_base, clk, irq);
+ ret = sun5i_setup_clocksource(pdev, rate);
if (ret)
return ret;
- return sun5i_setup_clockevent(node, timer_base, clk, irq);
+ ret = sun5i_setup_clockevent(pdev, rate, irq);
+ if (ret)
+ goto err_unreg_clocksource;
+
+ return 0;
+
+err_unreg_clocksource:
+ clocksource_unregister(&st->clksrc);
+ return ret;
}
-TIMER_OF_DECLARE(sun5i_a13, "allwinner,sun5i-a13-hstimer",
- sun5i_timer_init);
-TIMER_OF_DECLARE(sun7i_a20, "allwinner,sun7i-a20-hstimer",
- sun5i_timer_init);
+
+static void sun5i_timer_remove(struct platform_device *pdev)
+{
+ struct sun5i_timer *st = platform_get_drvdata(pdev);
+
+ clocksource_unregister(&st->clksrc);
+}
+
+static const struct of_device_id sun5i_timer_of_match[] = {
+ { .compatible = "allwinner,sun5i-a13-hstimer" },
+ { .compatible = "allwinner,sun7i-a20-hstimer" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sun5i_timer_of_match);
+
+static struct platform_driver sun5i_timer_driver = {
+ .probe = sun5i_timer_probe,
+ .remove_new = sun5i_timer_remove,
+ .driver = {
+ .name = "sun5i-timer",
+ .of_match_table = sun5i_timer_of_match,
+ .suppress_bind_attrs = true,
+ },
+};
+module_platform_driver(sun5i_timer_driver);
diff --git a/drivers/counter/Kconfig b/drivers/counter/Kconfig
index 62962ae84b77..497bc05dca4d 100644
--- a/drivers/counter/Kconfig
+++ b/drivers/counter/Kconfig
@@ -92,7 +92,7 @@ config MICROCHIP_TCB_CAPTURE
config RZ_MTU3_CNT
tristate "Renesas RZ/G2L MTU3a counter driver"
- depends on RZ_MTU3 || COMPILE_TEST
+ depends on RZ_MTU3
help
Enable support for MTU3a counter driver found on Renesas RZ/G2L alike
SoCs. This IP supports both 16-bit and 32-bit phase counting mode
diff --git a/drivers/counter/microchip-tcb-capture.c b/drivers/counter/microchip-tcb-capture.c
index e2d1dc6ca668..975e431d1590 100644
--- a/drivers/counter/microchip-tcb-capture.c
+++ b/drivers/counter/microchip-tcb-capture.c
@@ -10,7 +10,6 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <soc/at91/atmel_tcb.h>
diff --git a/drivers/counter/rz-mtu3-cnt.c b/drivers/counter/rz-mtu3-cnt.c
index 48c83933aa2f..ee821493b166 100644
--- a/drivers/counter/rz-mtu3-cnt.c
+++ b/drivers/counter/rz-mtu3-cnt.c
@@ -500,8 +500,8 @@ static int rz_mtu3_count_enable_write(struct counter_device *counter,
int ret = 0;
if (enable) {
- pm_runtime_get_sync(ch->dev);
mutex_lock(&priv->lock);
+ pm_runtime_get_sync(ch->dev);
ret = rz_mtu3_initialize_counter(counter, count->id);
if (ret == 0)
priv->count_is_enabled[count->id] = true;
@@ -510,8 +510,8 @@ static int rz_mtu3_count_enable_write(struct counter_device *counter,
mutex_lock(&priv->lock);
rz_mtu3_terminate_counter(counter, count->id);
priv->count_is_enabled[count->id] = false;
- mutex_unlock(&priv->lock);
pm_runtime_put(ch->dev);
+ mutex_unlock(&priv->lock);
}
return ret;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index a757f90aa9d6..60ed89000e82 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -86,6 +86,7 @@ static void cpufreq_governor_limits(struct cpufreq_policy *policy);
static int cpufreq_set_policy(struct cpufreq_policy *policy,
struct cpufreq_governor *new_gov,
unsigned int new_pol);
+static bool cpufreq_boost_supported(void);
/*
* Two notifier lists: the "policy" list is involved in the
@@ -455,8 +456,10 @@ void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
policy->cur,
policy->cpuinfo.max_freq);
+ spin_lock(&policy->transition_lock);
policy->transition_ongoing = false;
policy->transition_task = NULL;
+ spin_unlock(&policy->transition_lock);
wake_up(&policy->transition_wait);
}
@@ -621,6 +624,40 @@ static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
}
define_one_global_rw(boost);
+static ssize_t show_local_boost(struct cpufreq_policy *policy, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", policy->boost_enabled);
+}
+
+static ssize_t store_local_boost(struct cpufreq_policy *policy,
+ const char *buf, size_t count)
+{
+ int ret, enable;
+
+ ret = kstrtoint(buf, 10, &enable);
+ if (ret || enable < 0 || enable > 1)
+ return -EINVAL;
+
+ if (!cpufreq_driver->boost_enabled)
+ return -EINVAL;
+
+ if (policy->boost_enabled == enable)
+ return count;
+
+ cpus_read_lock();
+ ret = cpufreq_driver->set_boost(policy, enable);
+ cpus_read_unlock();
+
+ if (ret)
+ return ret;
+
+ policy->boost_enabled = enable;
+
+ return count;
+}
+
+static struct freq_attr local_boost = __ATTR(boost, 0644, show_local_boost, store_local_boost);
+
static struct cpufreq_governor *find_governor(const char *str_governor)
{
struct cpufreq_governor *t;
@@ -1055,6 +1092,12 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
return ret;
}
+ if (cpufreq_boost_supported()) {
+ ret = sysfs_create_file(&policy->kobj, &local_boost.attr);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -1943,16 +1986,16 @@ void cpufreq_resume(void)
for_each_active_policy(policy) {
if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
- pr_err("%s: Failed to resume driver: %p\n", __func__,
- policy);
+ pr_err("%s: Failed to resume driver: %s\n", __func__,
+ cpufreq_driver->name);
} else if (has_target()) {
down_write(&policy->rwsem);
ret = cpufreq_start_governor(policy);
up_write(&policy->rwsem);
if (ret)
- pr_err("%s: Failed to start governor for policy: %p\n",
- __func__, policy);
+ pr_err("%s: Failed to start governor for CPU%u's policy\n",
+ __func__, policy->cpu);
}
}
}
@@ -2716,6 +2759,8 @@ int cpufreq_boost_trigger_state(int state)
ret = cpufreq_driver->set_boost(policy, state);
if (ret)
goto err_reset_state;
+
+ policy->boost_enabled = state;
}
cpus_read_unlock();
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 85da677c43d6..af44ee6a6430 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -439,7 +439,7 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
ret = gov->init(dbs_data);
if (ret)
- goto free_policy_dbs_info;
+ goto free_dbs_data;
/*
* The sampling interval should not be less than the transition latency
@@ -474,6 +474,8 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
if (!have_governor_per_policy())
gov->gdbs_data = NULL;
gov->exit(dbs_data);
+
+free_dbs_data:
kfree(dbs_data);
free_policy_dbs_info:
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 84fe37def0f1..6f8b5ea7aeae 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -232,8 +232,8 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy,
status = ioread16(&pcch_hdr->status);
iowrite16(0, &pcch_hdr->status);
- cpufreq_freq_transition_end(policy, &freqs, status != CMD_COMPLETE);
spin_unlock(&pcc_lock);
+ cpufreq_freq_transition_end(policy, &freqs, status != CMD_COMPLETE);
if (status != CMD_COMPLETE) {
pr_debug("target: FAILED for cpu %d, with status: 0x%x\n",
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 08fdd0e2ed1b..4ccae1a3b884 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -474,25 +474,6 @@ config MXS_DMA
Support the MXS DMA engine. This engine including APBH-DMA
and APBX-DMA is integrated into some Freescale chips.
-config MX3_IPU
- bool "MX3x Image Processing Unit support"
- depends on ARCH_MXC
- select DMA_ENGINE
- default y
- help
- If you plan to use the Image Processing unit in the i.MX3x, say
- Y here. If unsure, select Y.
-
-config MX3_IPU_IRQS
- int "Number of dynamically mapped interrupts for IPU"
- depends on MX3_IPU
- range 2 137
- default 4
- help
- Out of 137 interrupt sources on i.MX31 IPU only very few are used.
- To avoid bloating the irq_desc[] array we allocate a sufficient
- number of IRQ slots and map them dynamically to specific sources.
-
config NBPFAXI_DMA
tristate "Renesas Type-AXI NBPF DMA support"
select DMA_ENGINE
@@ -699,7 +680,7 @@ config XGENE_DMA
config XILINX_DMA
tristate "Xilinx AXI DMAS Engine"
- depends on (ARCH_ZYNQ || MICROBLAZE || ARM64)
+ depends on HAS_IOMEM
select DMA_ENGINE
help
Enable support for Xilinx AXI VDMA Soft IP.
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index a4fd1ce29510..83553a97a010 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -32,8 +32,10 @@ obj-$(CONFIG_DW_DMAC_CORE) += dw/
obj-$(CONFIG_DW_EDMA) += dw-edma/
obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
obj-$(CONFIG_FSL_DMA) += fsldma.o
-obj-$(CONFIG_FSL_EDMA) += fsl-edma.o fsl-edma-common.o
-obj-$(CONFIG_MCF_EDMA) += mcf-edma.o fsl-edma-common.o
+fsl-edma-objs := fsl-edma-main.o fsl-edma-common.o
+obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
+mcf-edma-objs := mcf-edma-main.o fsl-edma-common.o
+obj-$(CONFIG_MCF_EDMA) += mcf-edma.o
obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o
obj-$(CONFIG_FSL_RAID) += fsl_raid.o
obj-$(CONFIG_HISI_DMA) += hisi_dma.o
@@ -55,7 +57,6 @@ obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o
obj-$(CONFIG_MV_XOR) += mv_xor.o
obj-$(CONFIG_MV_XOR_V2) += mv_xor_v2.o
obj-$(CONFIG_MXS_DMA) += mxs-dma.o
-obj-$(CONFIG_MX3_IPU) += ipu/
obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o
obj-$(CONFIG_OWL_DMA) += owl-dma.o
obj-$(CONFIG_PCH_DMA) += pch_dma.o
diff --git a/drivers/dma/apple-admac.c b/drivers/dma/apple-admac.c
index 4cf8da77bdd9..3af795635c5c 100644
--- a/drivers/dma/apple-admac.c
+++ b/drivers/dma/apple-admac.c
@@ -10,8 +10,9 @@
#include <linux/device.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_dma.h>
+#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index ee3a219e3a89..b2876f67471f 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -20,7 +20,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/overflow.h>
-#include <linux/of_device.h>
+#include <linux/of_platform.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c
index 064761289a73..94ea35330eb5 100644
--- a/drivers/dma/bcm-sba-raid.c
+++ b/drivers/dma/bcm-sba-raid.c
@@ -35,7 +35,9 @@
#include <linux/mailbox_client.h>
#include <linux/mailbox/brcm-message.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/raid/pq.h>
diff --git a/drivers/dma/bestcomm/bestcomm.c b/drivers/dma/bestcomm/bestcomm.c
index eabbcfcaa7cb..80096f94032d 100644
--- a/drivers/dma/bestcomm/bestcomm.c
+++ b/drivers/dma/bestcomm/bestcomm.c
@@ -14,9 +14,8 @@
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/mpc52xx.h>
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index 9c1a6e9a9c03..adbd47bd6adf 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -13,7 +13,6 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 826b98284fa1..b7388ae62d7f 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1147,69 +1147,27 @@ int dma_async_device_register(struct dma_device *device)
device->owner = device->dev->driver->owner;
- if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) {
- dev_err(device->dev,
- "Device claims capability %s, but op is not defined\n",
- "DMA_MEMCPY");
- return -EIO;
- }
-
- if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) {
- dev_err(device->dev,
- "Device claims capability %s, but op is not defined\n",
- "DMA_XOR");
- return -EIO;
- }
-
- if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val) {
- dev_err(device->dev,
- "Device claims capability %s, but op is not defined\n",
- "DMA_XOR_VAL");
- return -EIO;
- }
-
- if (dma_has_cap(DMA_PQ, device->cap_mask) && !device->device_prep_dma_pq) {
- dev_err(device->dev,
- "Device claims capability %s, but op is not defined\n",
- "DMA_PQ");
- return -EIO;
- }
-
- if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val) {
- dev_err(device->dev,
- "Device claims capability %s, but op is not defined\n",
- "DMA_PQ_VAL");
- return -EIO;
- }
-
- if (dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset) {
- dev_err(device->dev,
- "Device claims capability %s, but op is not defined\n",
- "DMA_MEMSET");
- return -EIO;
- }
-
- if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt) {
- dev_err(device->dev,
- "Device claims capability %s, but op is not defined\n",
- "DMA_INTERRUPT");
- return -EIO;
- }
-
- if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) {
- dev_err(device->dev,
- "Device claims capability %s, but op is not defined\n",
- "DMA_CYCLIC");
- return -EIO;
- }
-
- if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) {
- dev_err(device->dev,
- "Device claims capability %s, but op is not defined\n",
- "DMA_INTERLEAVE");
- return -EIO;
- }
+#define CHECK_CAP(_name, _type) \
+{ \
+ if (dma_has_cap(_type, device->cap_mask) && !device->device_prep_##_name) { \
+ dev_err(device->dev, \
+ "Device claims capability %s, but op is not defined\n", \
+ __stringify(_type)); \
+ return -EIO; \
+ } \
+}
+ CHECK_CAP(dma_memcpy, DMA_MEMCPY);
+ CHECK_CAP(dma_xor, DMA_XOR);
+ CHECK_CAP(dma_xor_val, DMA_XOR_VAL);
+ CHECK_CAP(dma_pq, DMA_PQ);
+ CHECK_CAP(dma_pq_val, DMA_PQ_VAL);
+ CHECK_CAP(dma_memset, DMA_MEMSET);
+ CHECK_CAP(dma_interrupt, DMA_INTERRUPT);
+ CHECK_CAP(dma_cyclic, DMA_CYCLIC);
+ CHECK_CAP(interleaved_dma, DMA_INTERLEAVE);
+
+#undef CHECK_CAP
if (!device->device_tx_status) {
dev_err(device->dev, "Device tx_status is not defined\n");
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
index 796b6caf0bab..dd02f84e404d 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -21,7 +21,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/dma/dw/rzn1-dmamux.c b/drivers/dma/dw/rzn1-dmamux.c
index f9912c3dd4d7..4fb8508419db 100644
--- a/drivers/dma/dw/rzn1-dmamux.c
+++ b/drivers/dma/dw/rzn1-dmamux.c
@@ -5,8 +5,10 @@
* Based on TI crossbar driver written by Peter Ujfalusi <peter.ujfalusi@ti.com>
*/
#include <linux/bitops.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_dma.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/soc/renesas/r9a06g032-sysctrl.h>
#include <linux/types.h>
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 5338a94f1a69..5c4a448a1254 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -1320,11 +1320,9 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev)
struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct ep93xx_dma_engine *edma;
struct dma_device *dma_dev;
- size_t edma_size;
int ret, i;
- edma_size = pdata->num_channels * sizeof(struct ep93xx_dma_chan);
- edma = kzalloc(sizeof(*edma) + edma_size, GFP_KERNEL);
+ edma = kzalloc(struct_size(edma, channels, pdata->num_channels), GFP_KERNEL);
if (!edma)
return -ENOMEM;
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index a06a1575a2a5..a0f5741abcc4 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -7,6 +7,8 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_domain.h>
#include "fsl-edma-common.h"
@@ -40,14 +42,73 @@
#define EDMA64_ERRH 0x28
#define EDMA64_ERRL 0x2c
-#define EDMA_TCD 0x1000
+void fsl_edma_tx_chan_handler(struct fsl_edma_chan *fsl_chan)
+{
+ spin_lock(&fsl_chan->vchan.lock);
+
+ if (!fsl_chan->edesc) {
+ /* terminate_all called before */
+ spin_unlock(&fsl_chan->vchan.lock);
+ return;
+ }
+
+ if (!fsl_chan->edesc->iscyclic) {
+ list_del(&fsl_chan->edesc->vdesc.node);
+ vchan_cookie_complete(&fsl_chan->edesc->vdesc);
+ fsl_chan->edesc = NULL;
+ fsl_chan->status = DMA_COMPLETE;
+ fsl_chan->idle = true;
+ } else {
+ vchan_cyclic_callback(&fsl_chan->edesc->vdesc);
+ }
+
+ if (!fsl_chan->edesc)
+ fsl_edma_xfer_desc(fsl_chan);
+
+ spin_unlock(&fsl_chan->vchan.lock);
+}
+
+static void fsl_edma3_enable_request(struct fsl_edma_chan *fsl_chan)
+{
+ u32 val, flags;
+
+ flags = fsl_edma_drvflags(fsl_chan);
+ val = edma_readl_chreg(fsl_chan, ch_sbr);
+ /* Remote/local swapped wrongly on iMX8 QM Audio edma */
+ if (flags & FSL_EDMA_DRV_QUIRK_SWAPPED) {
+ if (!fsl_chan->is_rxchan)
+ val |= EDMA_V3_CH_SBR_RD;
+ else
+ val |= EDMA_V3_CH_SBR_WR;
+ } else {
+ if (fsl_chan->is_rxchan)
+ val |= EDMA_V3_CH_SBR_RD;
+ else
+ val |= EDMA_V3_CH_SBR_WR;
+ }
+
+ if (fsl_chan->is_remote)
+ val &= ~(EDMA_V3_CH_SBR_RD | EDMA_V3_CH_SBR_WR);
+
+ edma_writel_chreg(fsl_chan, val, ch_sbr);
+
+ if (flags & FSL_EDMA_DRV_HAS_CHMUX)
+ edma_writel_chreg(fsl_chan, fsl_chan->srcid, ch_mux);
+
+ val = edma_readl_chreg(fsl_chan, ch_csr);
+ val |= EDMA_V3_CH_CSR_ERQ;
+ edma_writel_chreg(fsl_chan, val, ch_csr);
+}
static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
{
struct edma_regs *regs = &fsl_chan->edma->regs;
u32 ch = fsl_chan->vchan.chan.chan_id;
- if (fsl_chan->edma->drvdata->version == v1) {
+ if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_SPLIT_REG)
+ return fsl_edma3_enable_request(fsl_chan);
+
+ if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_WRAP_IO) {
edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), regs->seei);
edma_writeb(fsl_chan->edma, ch, regs->serq);
} else {
@@ -59,12 +120,29 @@ static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
}
}
+static void fsl_edma3_disable_request(struct fsl_edma_chan *fsl_chan)
+{
+ u32 val = edma_readl_chreg(fsl_chan, ch_csr);
+ u32 flags;
+
+ flags = fsl_edma_drvflags(fsl_chan);
+
+ if (flags & FSL_EDMA_DRV_HAS_CHMUX)
+ edma_writel_chreg(fsl_chan, 0, ch_mux);
+
+ val &= ~EDMA_V3_CH_CSR_ERQ;
+ edma_writel_chreg(fsl_chan, val, ch_csr);
+}
+
void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
{
struct edma_regs *regs = &fsl_chan->edma->regs;
u32 ch = fsl_chan->vchan.chan.chan_id;
- if (fsl_chan->edma->drvdata->version == v1) {
+ if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_SPLIT_REG)
+ return fsl_edma3_disable_request(fsl_chan);
+
+ if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_WRAP_IO) {
edma_writeb(fsl_chan->edma, ch, regs->cerq);
edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), regs->ceei);
} else {
@@ -75,7 +153,6 @@ void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
iowrite8(EDMA_CEEI_CEEI(ch), regs->ceei);
}
}
-EXPORT_SYMBOL_GPL(fsl_edma_disable_request);
static void mux_configure8(struct fsl_edma_chan *fsl_chan, void __iomem *addr,
u32 off, u32 slot, bool enable)
@@ -112,36 +189,33 @@ void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
int endian_diff[4] = {3, 1, -1, -3};
u32 dmamux_nr = fsl_chan->edma->drvdata->dmamuxs;
+ if (!dmamux_nr)
+ return;
+
chans_per_mux = fsl_chan->edma->n_chans / dmamux_nr;
ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
- if (fsl_chan->edma->drvdata->mux_swap)
+ if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_MUX_SWAP)
ch_off += endian_diff[ch_off % 4];
muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
slot = EDMAMUX_CHCFG_SOURCE(slot);
- if (fsl_chan->edma->drvdata->version == v3)
+ if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_CONFIG32)
mux_configure32(fsl_chan, muxaddr, ch_off, slot, enable);
else
mux_configure8(fsl_chan, muxaddr, ch_off, slot, enable);
}
-EXPORT_SYMBOL_GPL(fsl_edma_chan_mux);
static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
{
- switch (addr_width) {
- case 1:
- return EDMA_TCD_ATTR_SSIZE_8BIT | EDMA_TCD_ATTR_DSIZE_8BIT;
- case 2:
- return EDMA_TCD_ATTR_SSIZE_16BIT | EDMA_TCD_ATTR_DSIZE_16BIT;
- case 4:
- return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
- case 8:
- return EDMA_TCD_ATTR_SSIZE_64BIT | EDMA_TCD_ATTR_DSIZE_64BIT;
- default:
- return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
- }
+ u32 val;
+
+ if (addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+
+ val = ffs(addr_width) - 1;
+ return val | (val << 8);
}
void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
@@ -155,7 +229,6 @@ void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
fsl_desc->tcd[i].ptcd);
kfree(fsl_desc);
}
-EXPORT_SYMBOL_GPL(fsl_edma_free_desc);
int fsl_edma_terminate_all(struct dma_chan *chan)
{
@@ -170,9 +243,12 @@ int fsl_edma_terminate_all(struct dma_chan *chan)
vchan_get_all_descriptors(&fsl_chan->vchan, &head);
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
+
+ if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_PD)
+ pm_runtime_allow(fsl_chan->pd_dev);
+
return 0;
}
-EXPORT_SYMBOL_GPL(fsl_edma_terminate_all);
int fsl_edma_pause(struct dma_chan *chan)
{
@@ -188,7 +264,6 @@ int fsl_edma_pause(struct dma_chan *chan)
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
return 0;
}
-EXPORT_SYMBOL_GPL(fsl_edma_pause);
int fsl_edma_resume(struct dma_chan *chan)
{
@@ -204,7 +279,6 @@ int fsl_edma_resume(struct dma_chan *chan)
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
return 0;
}
-EXPORT_SYMBOL_GPL(fsl_edma_resume);
static void fsl_edma_unprep_slave_dma(struct fsl_edma_chan *fsl_chan)
{
@@ -265,36 +339,41 @@ int fsl_edma_slave_config(struct dma_chan *chan,
return 0;
}
-EXPORT_SYMBOL_GPL(fsl_edma_slave_config);
static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
struct virt_dma_desc *vdesc, bool in_progress)
{
struct fsl_edma_desc *edesc = fsl_chan->edesc;
- struct edma_regs *regs = &fsl_chan->edma->regs;
- u32 ch = fsl_chan->vchan.chan.chan_id;
enum dma_transfer_direction dir = edesc->dirn;
dma_addr_t cur_addr, dma_addr;
size_t len, size;
+ u32 nbytes = 0;
int i;
/* calculate the total size in this desc */
- for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++)
- len += le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
- * le16_to_cpu(edesc->tcd[i].vtcd->biter);
+ for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++) {
+ nbytes = le32_to_cpu(edesc->tcd[i].vtcd->nbytes);
+ if (nbytes & (EDMA_V3_TCD_NBYTES_DMLOE | EDMA_V3_TCD_NBYTES_SMLOE))
+ nbytes = EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(nbytes);
+ len += nbytes * le16_to_cpu(edesc->tcd[i].vtcd->biter);
+ }
if (!in_progress)
return len;
if (dir == DMA_MEM_TO_DEV)
- cur_addr = edma_readl(fsl_chan->edma, &regs->tcd[ch].saddr);
+ cur_addr = edma_read_tcdreg(fsl_chan, saddr);
else
- cur_addr = edma_readl(fsl_chan->edma, &regs->tcd[ch].daddr);
+ cur_addr = edma_read_tcdreg(fsl_chan, daddr);
/* figure out the finished and calculate the residue */
for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
- size = le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
- * le16_to_cpu(edesc->tcd[i].vtcd->biter);
+ nbytes = le32_to_cpu(edesc->tcd[i].vtcd->nbytes);
+ if (nbytes & (EDMA_V3_TCD_NBYTES_DMLOE | EDMA_V3_TCD_NBYTES_SMLOE))
+ nbytes = EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(nbytes);
+
+ size = nbytes * le16_to_cpu(edesc->tcd[i].vtcd->biter);
+
if (dir == DMA_MEM_TO_DEV)
dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr);
else
@@ -340,14 +419,10 @@ enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
return fsl_chan->status;
}
-EXPORT_SYMBOL_GPL(fsl_edma_tx_status);
static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
struct fsl_edma_hw_tcd *tcd)
{
- struct fsl_edma_engine *edma = fsl_chan->edma;
- struct edma_regs *regs = &fsl_chan->edma->regs;
- u32 ch = fsl_chan->vchan.chan.chan_id;
u16 csr = 0;
/*
@@ -356,23 +431,22 @@ static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
* big- or little-endian obeying the eDMA engine model endian,
* and this is performed from specific edma_write functions
*/
- edma_writew(edma, 0, &regs->tcd[ch].csr);
+ edma_write_tcdreg(fsl_chan, 0, csr);
- edma_writel(edma, (s32)tcd->saddr, &regs->tcd[ch].saddr);
- edma_writel(edma, (s32)tcd->daddr, &regs->tcd[ch].daddr);
+ edma_write_tcdreg(fsl_chan, tcd->saddr, saddr);
+ edma_write_tcdreg(fsl_chan, tcd->daddr, daddr);
- edma_writew(edma, (s16)tcd->attr, &regs->tcd[ch].attr);
- edma_writew(edma, tcd->soff, &regs->tcd[ch].soff);
+ edma_write_tcdreg(fsl_chan, tcd->attr, attr);
+ edma_write_tcdreg(fsl_chan, tcd->soff, soff);
- edma_writel(edma, (s32)tcd->nbytes, &regs->tcd[ch].nbytes);
- edma_writel(edma, (s32)tcd->slast, &regs->tcd[ch].slast);
+ edma_write_tcdreg(fsl_chan, tcd->nbytes, nbytes);
+ edma_write_tcdreg(fsl_chan, tcd->slast, slast);
- edma_writew(edma, (s16)tcd->citer, &regs->tcd[ch].citer);
- edma_writew(edma, (s16)tcd->biter, &regs->tcd[ch].biter);
- edma_writew(edma, (s16)tcd->doff, &regs->tcd[ch].doff);
+ edma_write_tcdreg(fsl_chan, tcd->citer, citer);
+ edma_write_tcdreg(fsl_chan, tcd->biter, biter);
+ edma_write_tcdreg(fsl_chan, tcd->doff, doff);
- edma_writel(edma, (s32)tcd->dlast_sga,
- &regs->tcd[ch].dlast_sga);
+ edma_write_tcdreg(fsl_chan, tcd->dlast_sga, dlast_sga);
if (fsl_chan->is_sw) {
csr = le16_to_cpu(tcd->csr);
@@ -380,16 +454,19 @@ static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
tcd->csr = cpu_to_le16(csr);
}
- edma_writew(edma, (s16)tcd->csr, &regs->tcd[ch].csr);
+ edma_write_tcdreg(fsl_chan, tcd->csr, csr);
}
static inline
-void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
+void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
+ struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
u16 biter, u16 doff, u32 dlast_sga, bool major_int,
bool disable_req, bool enable_sg)
{
+ struct dma_slave_config *cfg = &fsl_chan->cfg;
u16 csr = 0;
+ u32 burst;
/*
* eDMA hardware SGs require the TCDs to be stored in little
@@ -404,6 +481,21 @@ void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
tcd->soff = cpu_to_le16(soff);
+ if (fsl_chan->is_multi_fifo) {
+ /* set mloff to support multiple fifo */
+ burst = cfg->direction == DMA_DEV_TO_MEM ?
+ cfg->src_addr_width : cfg->dst_addr_width;
+ nbytes |= EDMA_V3_TCD_NBYTES_MLOFF(-(burst * 4));
+ /* enable DMLOE/SMLOE */
+ if (cfg->direction == DMA_MEM_TO_DEV) {
+ nbytes |= EDMA_V3_TCD_NBYTES_DMLOE;
+ nbytes &= ~EDMA_V3_TCD_NBYTES_SMLOE;
+ } else {
+ nbytes |= EDMA_V3_TCD_NBYTES_SMLOE;
+ nbytes &= ~EDMA_V3_TCD_NBYTES_DMLOE;
+ }
+ }
+
tcd->nbytes = cpu_to_le32(nbytes);
tcd->slast = cpu_to_le32(slast);
@@ -422,6 +514,12 @@ void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
if (enable_sg)
csr |= EDMA_TCD_CSR_E_SG;
+ if (fsl_chan->is_rxchan)
+ csr |= EDMA_TCD_CSR_ACTIVE;
+
+ if (fsl_chan->is_sw)
+ csr |= EDMA_TCD_CSR_START;
+
tcd->csr = cpu_to_le16(csr);
}
@@ -461,6 +559,7 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
struct fsl_edma_desc *fsl_desc;
dma_addr_t dma_buf_next;
+ bool major_int = true;
int sg_len, i;
u32 src_addr, dst_addr, last_sg, nbytes;
u16 soff, doff, iter;
@@ -504,23 +603,28 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
src_addr = dma_buf_next;
dst_addr = fsl_chan->dma_dev_addr;
soff = fsl_chan->cfg.dst_addr_width;
- doff = 0;
- } else {
+ doff = fsl_chan->is_multi_fifo ? 4 : 0;
+ } else if (direction == DMA_DEV_TO_MEM) {
src_addr = fsl_chan->dma_dev_addr;
dst_addr = dma_buf_next;
- soff = 0;
+ soff = fsl_chan->is_multi_fifo ? 4 : 0;
doff = fsl_chan->cfg.src_addr_width;
+ } else {
+ /* DMA_DEV_TO_DEV */
+ src_addr = fsl_chan->cfg.src_addr;
+ dst_addr = fsl_chan->cfg.dst_addr;
+ soff = doff = 0;
+ major_int = false;
}
- fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, dst_addr,
+ fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr, dst_addr,
fsl_chan->attr, soff, nbytes, 0, iter,
- iter, doff, last_sg, true, false, true);
+ iter, doff, last_sg, major_int, false, true);
dma_buf_next += period_len;
}
return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
}
-EXPORT_SYMBOL_GPL(fsl_edma_prep_dma_cyclic);
struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
@@ -564,23 +668,51 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
dst_addr = fsl_chan->dma_dev_addr;
soff = fsl_chan->cfg.dst_addr_width;
doff = 0;
- } else {
+ } else if (direction == DMA_DEV_TO_MEM) {
src_addr = fsl_chan->dma_dev_addr;
dst_addr = sg_dma_address(sg);
soff = 0;
doff = fsl_chan->cfg.src_addr_width;
+ } else {
+ /* DMA_DEV_TO_DEV */
+ src_addr = fsl_chan->cfg.src_addr;
+ dst_addr = fsl_chan->cfg.dst_addr;
+ soff = 0;
+ doff = 0;
}
+ /*
+ * Choose the suitable burst length if sg_dma_len is not
+ * multiple of burst length so that the whole transfer length is
+ * multiple of minor loop(burst length).
+ */
+ if (sg_dma_len(sg) % nbytes) {
+ u32 width = (direction == DMA_DEV_TO_MEM) ? doff : soff;
+ u32 burst = (direction == DMA_DEV_TO_MEM) ?
+ fsl_chan->cfg.src_maxburst :
+ fsl_chan->cfg.dst_maxburst;
+ int j;
+
+ for (j = burst; j > 1; j--) {
+ if (!(sg_dma_len(sg) % (j * width))) {
+ nbytes = j * width;
+ break;
+ }
+ }
+ /* Set burst size as 1 if there's no suitable one */
+ if (j == 1)
+ nbytes = width;
+ }
iter = sg_dma_len(sg) / nbytes;
if (i < sg_len - 1) {
last_sg = fsl_desc->tcd[(i + 1)].ptcd;
- fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
+ fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr,
dst_addr, fsl_chan->attr, soff,
nbytes, 0, iter, iter, doff, last_sg,
false, false, true);
} else {
last_sg = 0;
- fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
+ fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[i].vtcd, src_addr,
dst_addr, fsl_chan->attr, soff,
nbytes, 0, iter, iter, doff, last_sg,
true, true, false);
@@ -589,7 +721,6 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
}
-EXPORT_SYMBOL_GPL(fsl_edma_prep_slave_sg);
struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(struct dma_chan *chan,
dma_addr_t dma_dst, dma_addr_t dma_src,
@@ -606,13 +737,12 @@ struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(struct dma_chan *chan,
fsl_chan->is_sw = true;
/* To match with copy_align and max_seg_size so 1 tcd is enough */
- fsl_edma_fill_tcd(fsl_desc->tcd[0].vtcd, dma_src, dma_dst,
- EDMA_TCD_ATTR_SSIZE_32BYTE | EDMA_TCD_ATTR_DSIZE_32BYTE,
+ fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[0].vtcd, dma_src, dma_dst,
+ fsl_edma_get_tcd_attr(DMA_SLAVE_BUSWIDTH_32_BYTES),
32, len, 0, 1, 1, 32, 0, true, true, false);
return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
}
-EXPORT_SYMBOL_GPL(fsl_edma_prep_memcpy);
void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
{
@@ -629,7 +759,6 @@ void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
fsl_chan->status = DMA_IN_PROGRESS;
fsl_chan->idle = false;
}
-EXPORT_SYMBOL_GPL(fsl_edma_xfer_desc);
void fsl_edma_issue_pending(struct dma_chan *chan)
{
@@ -649,7 +778,6 @@ void fsl_edma_issue_pending(struct dma_chan *chan)
spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
}
-EXPORT_SYMBOL_GPL(fsl_edma_issue_pending);
int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
{
@@ -660,7 +788,6 @@ int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
32, 0);
return 0;
}
-EXPORT_SYMBOL_GPL(fsl_edma_alloc_chan_resources);
void fsl_edma_free_chan_resources(struct dma_chan *chan)
{
@@ -683,7 +810,6 @@ void fsl_edma_free_chan_resources(struct dma_chan *chan)
fsl_chan->tcd_pool = NULL;
fsl_chan->is_sw = false;
}
-EXPORT_SYMBOL_GPL(fsl_edma_free_chan_resources);
void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
{
@@ -695,12 +821,10 @@ void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
tasklet_kill(&chan->vchan.task);
}
}
-EXPORT_SYMBOL_GPL(fsl_edma_cleanup_vchan);
/*
- * On the 32 channels Vybrid/mpc577x edma version (here called "v1"),
- * register offsets are different compared to ColdFire mcf5441x 64 channels
- * edma (here called "v2").
+ * On the 32 channels Vybrid/mpc577x edma version, register offsets are
+ * different compared to ColdFire mcf5441x 64 channels edma.
*
* This function sets up register offsets as per proper declared version
* so must be called in xxx_edma_probe() just after setting the
@@ -708,41 +832,30 @@ EXPORT_SYMBOL_GPL(fsl_edma_cleanup_vchan);
*/
void fsl_edma_setup_regs(struct fsl_edma_engine *edma)
{
+ bool is64 = !!(edma->drvdata->flags & FSL_EDMA_DRV_EDMA64);
+
edma->regs.cr = edma->membase + EDMA_CR;
edma->regs.es = edma->membase + EDMA_ES;
edma->regs.erql = edma->membase + EDMA_ERQ;
edma->regs.eeil = edma->membase + EDMA_EEI;
- edma->regs.serq = edma->membase + ((edma->drvdata->version == v2) ?
- EDMA64_SERQ : EDMA_SERQ);
- edma->regs.cerq = edma->membase + ((edma->drvdata->version == v2) ?
- EDMA64_CERQ : EDMA_CERQ);
- edma->regs.seei = edma->membase + ((edma->drvdata->version == v2) ?
- EDMA64_SEEI : EDMA_SEEI);
- edma->regs.ceei = edma->membase + ((edma->drvdata->version == v2) ?
- EDMA64_CEEI : EDMA_CEEI);
- edma->regs.cint = edma->membase + ((edma->drvdata->version == v2) ?
- EDMA64_CINT : EDMA_CINT);
- edma->regs.cerr = edma->membase + ((edma->drvdata->version == v2) ?
- EDMA64_CERR : EDMA_CERR);
- edma->regs.ssrt = edma->membase + ((edma->drvdata->version == v2) ?
- EDMA64_SSRT : EDMA_SSRT);
- edma->regs.cdne = edma->membase + ((edma->drvdata->version == v2) ?
- EDMA64_CDNE : EDMA_CDNE);
- edma->regs.intl = edma->membase + ((edma->drvdata->version == v2) ?
- EDMA64_INTL : EDMA_INTR);
- edma->regs.errl = edma->membase + ((edma->drvdata->version == v2) ?
- EDMA64_ERRL : EDMA_ERR);
-
- if (edma->drvdata->version == v2) {
+ edma->regs.serq = edma->membase + (is64 ? EDMA64_SERQ : EDMA_SERQ);
+ edma->regs.cerq = edma->membase + (is64 ? EDMA64_CERQ : EDMA_CERQ);
+ edma->regs.seei = edma->membase + (is64 ? EDMA64_SEEI : EDMA_SEEI);
+ edma->regs.ceei = edma->membase + (is64 ? EDMA64_CEEI : EDMA_CEEI);
+ edma->regs.cint = edma->membase + (is64 ? EDMA64_CINT : EDMA_CINT);
+ edma->regs.cerr = edma->membase + (is64 ? EDMA64_CERR : EDMA_CERR);
+ edma->regs.ssrt = edma->membase + (is64 ? EDMA64_SSRT : EDMA_SSRT);
+ edma->regs.cdne = edma->membase + (is64 ? EDMA64_CDNE : EDMA_CDNE);
+ edma->regs.intl = edma->membase + (is64 ? EDMA64_INTL : EDMA_INTR);
+ edma->regs.errl = edma->membase + (is64 ? EDMA64_ERRL : EDMA_ERR);
+
+ if (is64) {
edma->regs.erqh = edma->membase + EDMA64_ERQH;
edma->regs.eeih = edma->membase + EDMA64_EEIH;
edma->regs.errh = edma->membase + EDMA64_ERRH;
edma->regs.inth = edma->membase + EDMA64_INTH;
}
-
- edma->regs.tcd = edma->membase + EDMA_TCD;
}
-EXPORT_SYMBOL_GPL(fsl_edma_setup_regs);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
index 004ec4a6bc86..3cc0cc8fc2d0 100644
--- a/drivers/dma/fsl-edma-common.h
+++ b/drivers/dma/fsl-edma-common.h
@@ -29,16 +29,6 @@
#define EDMA_TCD_ATTR_DMOD(x) (((x) & GENMASK(4, 0)) << 3)
#define EDMA_TCD_ATTR_SSIZE(x) (((x) & GENMASK(2, 0)) << 8)
#define EDMA_TCD_ATTR_SMOD(x) (((x) & GENMASK(4, 0)) << 11)
-#define EDMA_TCD_ATTR_DSIZE_8BIT 0
-#define EDMA_TCD_ATTR_DSIZE_16BIT BIT(0)
-#define EDMA_TCD_ATTR_DSIZE_32BIT BIT(1)
-#define EDMA_TCD_ATTR_DSIZE_64BIT (BIT(0) | BIT(1))
-#define EDMA_TCD_ATTR_DSIZE_32BYTE (BIT(2) | BIT(0))
-#define EDMA_TCD_ATTR_SSIZE_8BIT 0
-#define EDMA_TCD_ATTR_SSIZE_16BIT (EDMA_TCD_ATTR_DSIZE_16BIT << 8)
-#define EDMA_TCD_ATTR_SSIZE_32BIT (EDMA_TCD_ATTR_DSIZE_32BIT << 8)
-#define EDMA_TCD_ATTR_SSIZE_64BIT (EDMA_TCD_ATTR_DSIZE_64BIT << 8)
-#define EDMA_TCD_ATTR_SSIZE_32BYTE (EDMA_TCD_ATTR_DSIZE_32BYTE << 8)
#define EDMA_TCD_CITER_CITER(x) ((x) & GENMASK(14, 0))
#define EDMA_TCD_BITER_BITER(x) ((x) & GENMASK(14, 0))
@@ -52,16 +42,32 @@
#define EDMA_TCD_CSR_ACTIVE BIT(6)
#define EDMA_TCD_CSR_DONE BIT(7)
+#define EDMA_V3_TCD_NBYTES_MLOFF_NBYTES(x) ((x) & GENMASK(9, 0))
+#define EDMA_V3_TCD_NBYTES_MLOFF(x) (x << 10)
+#define EDMA_V3_TCD_NBYTES_DMLOE (1 << 30)
+#define EDMA_V3_TCD_NBYTES_SMLOE (1 << 31)
+
#define EDMAMUX_CHCFG_DIS 0x0
#define EDMAMUX_CHCFG_ENBL 0x80
#define EDMAMUX_CHCFG_SOURCE(n) ((n) & 0x3F)
#define DMAMUX_NR 2
+#define EDMA_TCD 0x1000
+
#define FSL_EDMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
+
+#define EDMA_V3_CH_SBR_RD BIT(22)
+#define EDMA_V3_CH_SBR_WR BIT(21)
+#define EDMA_V3_CH_CSR_ERQ BIT(0)
+#define EDMA_V3_CH_CSR_EARQ BIT(1)
+#define EDMA_V3_CH_CSR_EEI BIT(2)
+#define EDMA_V3_CH_CSR_DONE BIT(30)
+#define EDMA_V3_CH_CSR_ACTIVE BIT(31)
+
enum fsl_edma_pm_state {
RUNNING = 0,
SUSPENDED,
@@ -81,6 +87,18 @@ struct fsl_edma_hw_tcd {
__le16 biter;
};
+struct fsl_edma3_ch_reg {
+ __le32 ch_csr;
+ __le32 ch_es;
+ __le32 ch_int;
+ __le32 ch_sbr;
+ __le32 ch_pri;
+ __le32 ch_mux;
+ __le32 ch_mattr; /* edma4, reserved for edma3 */
+ __le32 ch_reserved;
+ struct fsl_edma_hw_tcd tcd;
+} __packed;
+
/*
* These are iomem pointers, for both v32 and v64.
*/
@@ -103,7 +121,6 @@ struct edma_regs {
void __iomem *intl;
void __iomem *errh;
void __iomem *errl;
- struct fsl_edma_hw_tcd __iomem *tcd;
};
struct fsl_edma_sw_tcd {
@@ -126,7 +143,20 @@ struct fsl_edma_chan {
dma_addr_t dma_dev_addr;
u32 dma_dev_size;
enum dma_data_direction dma_dir;
- char chan_name[16];
+ char chan_name[32];
+ struct fsl_edma_hw_tcd __iomem *tcd;
+ u32 real_count;
+ struct work_struct issue_worker;
+ struct platform_device *pdev;
+ struct device *pd_dev;
+ u32 srcid;
+ struct clk *clk;
+ int priority;
+ int hw_chanid;
+ int txirq;
+ bool is_rxchan;
+ bool is_remote;
+ bool is_multi_fifo;
};
struct fsl_edma_desc {
@@ -138,17 +168,32 @@ struct fsl_edma_desc {
struct fsl_edma_sw_tcd tcd[];
};
-enum edma_version {
- v1, /* 32ch, Vybrid, mpc57x, etc */
- v2, /* 64ch Coldfire */
- v3, /* 32ch, i.mx7ulp */
-};
+#define FSL_EDMA_DRV_HAS_DMACLK BIT(0)
+#define FSL_EDMA_DRV_MUX_SWAP BIT(1)
+#define FSL_EDMA_DRV_CONFIG32 BIT(2)
+#define FSL_EDMA_DRV_WRAP_IO BIT(3)
+#define FSL_EDMA_DRV_EDMA64 BIT(4)
+#define FSL_EDMA_DRV_HAS_PD BIT(5)
+#define FSL_EDMA_DRV_HAS_CHCLK BIT(6)
+#define FSL_EDMA_DRV_HAS_CHMUX BIT(7)
+/* imx8 QM audio edma remote local swapped */
+#define FSL_EDMA_DRV_QUIRK_SWAPPED BIT(8)
+/* control and status register is in tcd address space, edma3 reg layout */
+#define FSL_EDMA_DRV_SPLIT_REG BIT(9)
+#define FSL_EDMA_DRV_BUS_8BYTE BIT(10)
+#define FSL_EDMA_DRV_DEV_TO_DEV BIT(11)
+#define FSL_EDMA_DRV_ALIGN_64BYTE BIT(12)
+
+#define FSL_EDMA_DRV_EDMA3 (FSL_EDMA_DRV_SPLIT_REG | \
+ FSL_EDMA_DRV_BUS_8BYTE | \
+ FSL_EDMA_DRV_DEV_TO_DEV | \
+ FSL_EDMA_DRV_ALIGN_64BYTE)
struct fsl_edma_drvdata {
- enum edma_version version;
- u32 dmamuxs;
- bool has_dmaclk;
- bool mux_swap;
+ u32 dmamuxs; /* only used before v3 */
+ u32 chreg_off;
+ u32 chreg_space_sz;
+ u32 flags;
int (*setup_irq)(struct platform_device *pdev,
struct fsl_edma_engine *fsl_edma);
};
@@ -159,6 +204,7 @@ struct fsl_edma_engine {
void __iomem *muxbase[DMAMUX_NR];
struct clk *muxclk[DMAMUX_NR];
struct clk *dmaclk;
+ struct clk *chclk;
struct mutex fsl_edma_mutex;
const struct fsl_edma_drvdata *drvdata;
u32 n_chans;
@@ -166,9 +212,28 @@ struct fsl_edma_engine {
int errirq;
bool big_endian;
struct edma_regs regs;
+ u64 chan_masked;
struct fsl_edma_chan chans[];
};
+#define edma_read_tcdreg(chan, __name) \
+(sizeof(chan->tcd->__name) == sizeof(u32) ? \
+ edma_readl(chan->edma, &chan->tcd->__name) : \
+ edma_readw(chan->edma, &chan->tcd->__name))
+
+#define edma_write_tcdreg(chan, val, __name) \
+(sizeof(chan->tcd->__name) == sizeof(u32) ? \
+ edma_writel(chan->edma, (u32 __force)val, &chan->tcd->__name) : \
+ edma_writew(chan->edma, (u16 __force)val, &chan->tcd->__name))
+
+#define edma_readl_chreg(chan, __name) \
+ edma_readl(chan->edma, \
+ (void __iomem *)&(container_of(chan->tcd, struct fsl_edma3_ch_reg, tcd)->__name))
+
+#define edma_writel_chreg(chan, val, __name) \
+ edma_writel(chan->edma, val, \
+ (void __iomem *)&(container_of(chan->tcd, struct fsl_edma3_ch_reg, tcd)->__name))
+
/*
* R/W functions for big- or little-endian registers:
* The eDMA controller's endian is independent of the CPU core's endian.
@@ -183,6 +248,14 @@ static inline u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr)
return ioread32(addr);
}
+static inline u16 edma_readw(struct fsl_edma_engine *edma, void __iomem *addr)
+{
+ if (edma->big_endian)
+ return ioread16be(addr);
+ else
+ return ioread16(addr);
+}
+
static inline void edma_writeb(struct fsl_edma_engine *edma,
u8 val, void __iomem *addr)
{
@@ -217,11 +290,23 @@ static inline struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan)
return container_of(chan, struct fsl_edma_chan, vchan.chan);
}
+static inline u32 fsl_edma_drvflags(struct fsl_edma_chan *fsl_chan)
+{
+ return fsl_chan->edma->drvdata->flags;
+}
+
static inline struct fsl_edma_desc *to_fsl_edma_desc(struct virt_dma_desc *vd)
{
return container_of(vd, struct fsl_edma_desc, vdesc);
}
+static inline void fsl_edma_err_chan_handler(struct fsl_edma_chan *fsl_chan)
+{
+ fsl_chan->status = DMA_ERROR;
+ fsl_chan->idle = true;
+}
+
+void fsl_edma_tx_chan_handler(struct fsl_edma_chan *fsl_chan);
void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan);
void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
unsigned int slot, bool enable);
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma-main.c
index e40769666e39..63d48d046f04 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma-main.c
@@ -18,9 +18,15 @@
#include <linux/of_irq.h>
#include <linux/of_dma.h>
#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_domain.h>
#include "fsl-edma-common.h"
+#define ARGS_RX BIT(0)
+#define ARGS_REMOTE BIT(1)
+#define ARGS_MULTI_FIFO BIT(2)
+
static void fsl_edma_synchronize(struct dma_chan *chan)
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
@@ -33,7 +39,6 @@ static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
struct fsl_edma_engine *fsl_edma = dev_id;
unsigned int intr, ch;
struct edma_regs *regs = &fsl_edma->regs;
- struct fsl_edma_chan *fsl_chan;
intr = edma_readl(fsl_edma, regs->intl);
if (!intr)
@@ -42,33 +47,25 @@ static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
for (ch = 0; ch < fsl_edma->n_chans; ch++) {
if (intr & (0x1 << ch)) {
edma_writeb(fsl_edma, EDMA_CINT_CINT(ch), regs->cint);
+ fsl_edma_tx_chan_handler(&fsl_edma->chans[ch]);
+ }
+ }
+ return IRQ_HANDLED;
+}
- fsl_chan = &fsl_edma->chans[ch];
-
- spin_lock(&fsl_chan->vchan.lock);
+static irqreturn_t fsl_edma3_tx_handler(int irq, void *dev_id)
+{
+ struct fsl_edma_chan *fsl_chan = dev_id;
+ unsigned int intr;
- if (!fsl_chan->edesc) {
- /* terminate_all called before */
- spin_unlock(&fsl_chan->vchan.lock);
- continue;
- }
+ intr = edma_readl_chreg(fsl_chan, ch_int);
+ if (!intr)
+ return IRQ_HANDLED;
- if (!fsl_chan->edesc->iscyclic) {
- list_del(&fsl_chan->edesc->vdesc.node);
- vchan_cookie_complete(&fsl_chan->edesc->vdesc);
- fsl_chan->edesc = NULL;
- fsl_chan->status = DMA_COMPLETE;
- fsl_chan->idle = true;
- } else {
- vchan_cyclic_callback(&fsl_chan->edesc->vdesc);
- }
+ edma_writel_chreg(fsl_chan, 1, ch_int);
- if (!fsl_chan->edesc)
- fsl_edma_xfer_desc(fsl_chan);
+ fsl_edma_tx_chan_handler(fsl_chan);
- spin_unlock(&fsl_chan->vchan.lock);
- }
- }
return IRQ_HANDLED;
}
@@ -86,8 +83,7 @@ static irqreturn_t fsl_edma_err_handler(int irq, void *dev_id)
if (err & (0x1 << ch)) {
fsl_edma_disable_request(&fsl_edma->chans[ch]);
edma_writeb(fsl_edma, EDMA_CERR_CERR(ch), regs->cerr);
- fsl_edma->chans[ch].status = DMA_ERROR;
- fsl_edma->chans[ch].idle = true;
+ fsl_edma_err_chan_handler(&fsl_edma->chans[ch]);
}
}
return IRQ_HANDLED;
@@ -134,11 +130,58 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
return NULL;
}
+static struct dma_chan *fsl_edma3_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data;
+ struct dma_chan *chan, *_chan;
+ struct fsl_edma_chan *fsl_chan;
+ bool b_chmux;
+ int i;
+
+ if (dma_spec->args_count != 3)
+ return NULL;
+
+ b_chmux = !!(fsl_edma->drvdata->flags & FSL_EDMA_DRV_HAS_CHMUX);
+
+ mutex_lock(&fsl_edma->fsl_edma_mutex);
+ list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels,
+ device_node) {
+
+ if (chan->client_count)
+ continue;
+
+ fsl_chan = to_fsl_edma_chan(chan);
+ i = fsl_chan - fsl_edma->chans;
+
+ chan = dma_get_slave_channel(chan);
+ chan->device->privatecnt++;
+ fsl_chan->priority = dma_spec->args[1];
+ fsl_chan->is_rxchan = dma_spec->args[2] & ARGS_RX;
+ fsl_chan->is_remote = dma_spec->args[2] & ARGS_REMOTE;
+ fsl_chan->is_multi_fifo = dma_spec->args[2] & ARGS_MULTI_FIFO;
+
+ if (!b_chmux && i == dma_spec->args[0]) {
+ mutex_unlock(&fsl_edma->fsl_edma_mutex);
+ return chan;
+ } else if (b_chmux && !fsl_chan->srcid) {
+ /* if controller support channel mux, choose a free channel */
+ fsl_chan->srcid = dma_spec->args[0];
+ mutex_unlock(&fsl_edma->fsl_edma_mutex);
+ return chan;
+ }
+ }
+ mutex_unlock(&fsl_edma->fsl_edma_mutex);
+ return NULL;
+}
+
static int
fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
{
int ret;
+ edma_writel(fsl_edma, ~0, fsl_edma->regs.intl);
+
fsl_edma->txirq = platform_get_irq_byname(pdev, "edma-tx");
if (fsl_edma->txirq < 0)
return fsl_edma->txirq;
@@ -173,6 +216,37 @@ fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma
return 0;
}
+static int fsl_edma3_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < fsl_edma->n_chans; i++) {
+
+ struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i];
+
+ if (fsl_edma->chan_masked & BIT(i))
+ continue;
+
+ /* request channel irq */
+ fsl_chan->txirq = platform_get_irq(pdev, i);
+ if (fsl_chan->txirq < 0) {
+ dev_err(&pdev->dev, "Can't get chan %d's irq.\n", i);
+ return -EINVAL;
+ }
+
+ ret = devm_request_irq(&pdev->dev, fsl_chan->txirq,
+ fsl_edma3_tx_handler, IRQF_SHARED,
+ fsl_chan->chan_name, fsl_chan);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't register chan%d's IRQ.\n", i);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int
fsl_edma2_irq_init(struct platform_device *pdev,
struct fsl_edma_engine *fsl_edma)
@@ -180,6 +254,8 @@ fsl_edma2_irq_init(struct platform_device *pdev,
int i, ret, irq;
int count;
+ edma_writel(fsl_edma, ~0, fsl_edma->regs.intl);
+
count = platform_irq_count(pdev);
dev_dbg(&pdev->dev, "%s Found %d interrupts\r\n", __func__, count);
if (count <= 2) {
@@ -197,8 +273,6 @@ fsl_edma2_irq_init(struct platform_device *pdev,
if (irq < 0)
return -ENXIO;
- sprintf(fsl_edma->chans[i].chan_name, "eDMA2-CH%02d", i);
-
/* The last IRQ is for eDMA err */
if (i == count - 1)
ret = devm_request_irq(&pdev->dev, irq,
@@ -236,33 +310,110 @@ static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma, int nr_clocks)
}
static struct fsl_edma_drvdata vf610_data = {
- .version = v1,
.dmamuxs = DMAMUX_NR,
+ .flags = FSL_EDMA_DRV_WRAP_IO,
+ .chreg_off = EDMA_TCD,
+ .chreg_space_sz = sizeof(struct fsl_edma_hw_tcd),
.setup_irq = fsl_edma_irq_init,
};
static struct fsl_edma_drvdata ls1028a_data = {
- .version = v1,
.dmamuxs = DMAMUX_NR,
- .mux_swap = true,
+ .flags = FSL_EDMA_DRV_MUX_SWAP | FSL_EDMA_DRV_WRAP_IO,
+ .chreg_off = EDMA_TCD,
+ .chreg_space_sz = sizeof(struct fsl_edma_hw_tcd),
.setup_irq = fsl_edma_irq_init,
};
static struct fsl_edma_drvdata imx7ulp_data = {
- .version = v3,
.dmamuxs = 1,
- .has_dmaclk = true,
+ .chreg_off = EDMA_TCD,
+ .chreg_space_sz = sizeof(struct fsl_edma_hw_tcd),
+ .flags = FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_CONFIG32,
.setup_irq = fsl_edma2_irq_init,
};
+static struct fsl_edma_drvdata imx8qm_data = {
+ .flags = FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3,
+ .chreg_space_sz = 0x10000,
+ .chreg_off = 0x10000,
+ .setup_irq = fsl_edma3_irq_init,
+};
+
+static struct fsl_edma_drvdata imx8qm_audio_data = {
+ .flags = FSL_EDMA_DRV_QUIRK_SWAPPED | FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3,
+ .chreg_space_sz = 0x10000,
+ .chreg_off = 0x10000,
+ .setup_irq = fsl_edma3_irq_init,
+};
+
+static struct fsl_edma_drvdata imx93_data3 = {
+ .flags = FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA3,
+ .chreg_space_sz = 0x10000,
+ .chreg_off = 0x10000,
+ .setup_irq = fsl_edma3_irq_init,
+};
+
+static struct fsl_edma_drvdata imx93_data4 = {
+ .flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA3,
+ .chreg_space_sz = 0x8000,
+ .chreg_off = 0x10000,
+ .setup_irq = fsl_edma3_irq_init,
+};
+
static const struct of_device_id fsl_edma_dt_ids[] = {
{ .compatible = "fsl,vf610-edma", .data = &vf610_data},
{ .compatible = "fsl,ls1028a-edma", .data = &ls1028a_data},
{ .compatible = "fsl,imx7ulp-edma", .data = &imx7ulp_data},
+ { .compatible = "fsl,imx8qm-edma", .data = &imx8qm_data},
+ { .compatible = "fsl,imx8qm-adma", .data = &imx8qm_audio_data},
+ { .compatible = "fsl,imx93-edma3", .data = &imx93_data3},
+ { .compatible = "fsl,imx93-edma4", .data = &imx93_data4},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids);
+static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
+{
+ struct fsl_edma_chan *fsl_chan;
+ struct device_link *link;
+ struct device *pd_chan;
+ struct device *dev;
+ int i;
+
+ dev = &pdev->dev;
+
+ for (i = 0; i < fsl_edma->n_chans; i++) {
+ if (fsl_edma->chan_masked & BIT(i))
+ continue;
+
+ fsl_chan = &fsl_edma->chans[i];
+
+ pd_chan = dev_pm_domain_attach_by_id(dev, i);
+ if (IS_ERR_OR_NULL(pd_chan)) {
+ dev_err(dev, "Failed attach pd %d\n", i);
+ return -EINVAL;
+ }
+
+ link = device_link_add(dev, pd_chan, DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_RPM_ACTIVE);
+ if (IS_ERR(link)) {
+ dev_err(dev, "Failed to add device_link to %d: %ld\n", i,
+ PTR_ERR(link));
+ return -EINVAL;
+ }
+
+ fsl_chan->pd_dev = pd_chan;
+
+ pm_runtime_use_autosuspend(fsl_chan->pd_dev);
+ pm_runtime_set_autosuspend_delay(fsl_chan->pd_dev, 200);
+ pm_runtime_set_active(fsl_chan->pd_dev);
+ }
+
+ return 0;
+}
+
static int fsl_edma_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id =
@@ -270,9 +421,9 @@ static int fsl_edma_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct fsl_edma_engine *fsl_edma;
const struct fsl_edma_drvdata *drvdata = NULL;
- struct fsl_edma_chan *fsl_chan;
+ u32 chan_mask[2] = {0, 0};
struct edma_regs *regs;
- int len, chans;
+ int chans;
int ret, i;
if (of_id)
@@ -288,8 +439,8 @@ static int fsl_edma_probe(struct platform_device *pdev)
return ret;
}
- len = sizeof(*fsl_edma) + sizeof(*fsl_chan) * chans;
- fsl_edma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+ fsl_edma = devm_kzalloc(&pdev->dev, struct_size(fsl_edma, chans, chans),
+ GFP_KERNEL);
if (!fsl_edma)
return -ENOMEM;
@@ -301,26 +452,42 @@ static int fsl_edma_probe(struct platform_device *pdev)
if (IS_ERR(fsl_edma->membase))
return PTR_ERR(fsl_edma->membase);
- fsl_edma_setup_regs(fsl_edma);
- regs = &fsl_edma->regs;
+ if (!(drvdata->flags & FSL_EDMA_DRV_SPLIT_REG)) {
+ fsl_edma_setup_regs(fsl_edma);
+ regs = &fsl_edma->regs;
+ }
- if (drvdata->has_dmaclk) {
- fsl_edma->dmaclk = devm_clk_get(&pdev->dev, "dma");
+ if (drvdata->flags & FSL_EDMA_DRV_HAS_DMACLK) {
+ fsl_edma->dmaclk = devm_clk_get_enabled(&pdev->dev, "dma");
if (IS_ERR(fsl_edma->dmaclk)) {
dev_err(&pdev->dev, "Missing DMA block clock.\n");
return PTR_ERR(fsl_edma->dmaclk);
}
+ }
- ret = clk_prepare_enable(fsl_edma->dmaclk);
- if (ret) {
- dev_err(&pdev->dev, "DMA clk block failed.\n");
- return ret;
+ if (drvdata->flags & FSL_EDMA_DRV_HAS_CHCLK) {
+ fsl_edma->chclk = devm_clk_get_enabled(&pdev->dev, "mp");
+ if (IS_ERR(fsl_edma->chclk)) {
+ dev_err(&pdev->dev, "Missing MP block clock.\n");
+ return PTR_ERR(fsl_edma->chclk);
}
}
+ ret = of_property_read_variable_u32_array(np, "dma-channel-mask", chan_mask, 1, 2);
+
+ if (ret > 0) {
+ fsl_edma->chan_masked = chan_mask[1];
+ fsl_edma->chan_masked <<= 32;
+ fsl_edma->chan_masked |= chan_mask[0];
+ }
+
for (i = 0; i < fsl_edma->drvdata->dmamuxs; i++) {
char clkname[32];
+ /* eDMAv3 mux register move to TCD area if ch_mux exist */
+ if (drvdata->flags & FSL_EDMA_DRV_SPLIT_REG)
+ break;
+
fsl_edma->muxbase[i] = devm_platform_ioremap_resource(pdev,
1 + i);
if (IS_ERR(fsl_edma->muxbase[i])) {
@@ -330,26 +497,32 @@ static int fsl_edma_probe(struct platform_device *pdev)
}
sprintf(clkname, "dmamux%d", i);
- fsl_edma->muxclk[i] = devm_clk_get(&pdev->dev, clkname);
+ fsl_edma->muxclk[i] = devm_clk_get_enabled(&pdev->dev, clkname);
if (IS_ERR(fsl_edma->muxclk[i])) {
dev_err(&pdev->dev, "Missing DMAMUX block clock.\n");
/* on error: disable all previously enabled clks */
- fsl_disable_clocks(fsl_edma, i);
return PTR_ERR(fsl_edma->muxclk[i]);
}
-
- ret = clk_prepare_enable(fsl_edma->muxclk[i]);
- if (ret)
- /* on error: disable all previously enabled clks */
- fsl_disable_clocks(fsl_edma, i);
-
}
fsl_edma->big_endian = of_property_read_bool(np, "big-endian");
+ if (drvdata->flags & FSL_EDMA_DRV_HAS_PD) {
+ ret = fsl_edma3_attach_pd(pdev, fsl_edma);
+ if (ret)
+ return ret;
+ }
+
INIT_LIST_HEAD(&fsl_edma->dma_dev.channels);
for (i = 0; i < fsl_edma->n_chans; i++) {
struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i];
+ int len;
+
+ if (fsl_edma->chan_masked & BIT(i))
+ continue;
+
+ snprintf(fsl_chan->chan_name, sizeof(fsl_chan->chan_name), "%s-CH%02d",
+ dev_name(&pdev->dev), i);
fsl_chan->edma = fsl_edma;
fsl_chan->pm_state = RUNNING;
@@ -357,13 +530,19 @@ static int fsl_edma_probe(struct platform_device *pdev)
fsl_chan->idle = true;
fsl_chan->dma_dir = DMA_NONE;
fsl_chan->vchan.desc_free = fsl_edma_free_desc;
+
+ len = (drvdata->flags & FSL_EDMA_DRV_SPLIT_REG) ?
+ offsetof(struct fsl_edma3_ch_reg, tcd) : 0;
+ fsl_chan->tcd = fsl_edma->membase
+ + i * drvdata->chreg_space_sz + drvdata->chreg_off + len;
+
+ fsl_chan->pdev = pdev;
vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
- edma_writew(fsl_edma, 0x0, &regs->tcd[i].csr);
+ edma_write_tcdreg(fsl_chan, 0, csr);
fsl_edma_chan_mux(fsl_chan, 0, false);
}
- edma_writel(fsl_edma, ~0, regs->intl);
ret = fsl_edma->drvdata->setup_irq(pdev, fsl_edma);
if (ret)
return ret;
@@ -391,33 +570,47 @@ static int fsl_edma_probe(struct platform_device *pdev)
fsl_edma->dma_dev.src_addr_widths = FSL_EDMA_BUSWIDTHS;
fsl_edma->dma_dev.dst_addr_widths = FSL_EDMA_BUSWIDTHS;
+
+ if (drvdata->flags & FSL_EDMA_DRV_BUS_8BYTE) {
+ fsl_edma->dma_dev.src_addr_widths |= BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
+ fsl_edma->dma_dev.dst_addr_widths |= BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
+ }
+
fsl_edma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ if (drvdata->flags & FSL_EDMA_DRV_DEV_TO_DEV)
+ fsl_edma->dma_dev.directions |= BIT(DMA_DEV_TO_DEV);
+
+ fsl_edma->dma_dev.copy_align = drvdata->flags & FSL_EDMA_DRV_ALIGN_64BYTE ?
+ DMAENGINE_ALIGN_64_BYTES :
+ DMAENGINE_ALIGN_32_BYTES;
- fsl_edma->dma_dev.copy_align = DMAENGINE_ALIGN_32_BYTES;
/* Per worst case 'nbytes = 1' take CITER as the max_seg_size */
dma_set_max_seg_size(fsl_edma->dma_dev.dev, 0x3fff);
+ fsl_edma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+
platform_set_drvdata(pdev, fsl_edma);
ret = dma_async_device_register(&fsl_edma->dma_dev);
if (ret) {
dev_err(&pdev->dev,
"Can't register Freescale eDMA engine. (%d)\n", ret);
- fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs);
return ret;
}
- ret = of_dma_controller_register(np, fsl_edma_xlate, fsl_edma);
+ ret = of_dma_controller_register(np,
+ drvdata->flags & FSL_EDMA_DRV_SPLIT_REG ? fsl_edma3_xlate : fsl_edma_xlate,
+ fsl_edma);
if (ret) {
dev_err(&pdev->dev,
"Can't register Freescale eDMA of_dma. (%d)\n", ret);
dma_async_device_unregister(&fsl_edma->dma_dev);
- fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs);
return ret;
}
/* enable round robin arbitration */
- edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
+ if (!(drvdata->flags & FSL_EDMA_DRV_SPLIT_REG))
+ edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
return 0;
}
@@ -470,7 +663,7 @@ static int fsl_edma_resume_early(struct device *dev)
for (i = 0; i < fsl_edma->n_chans; i++) {
fsl_chan = &fsl_edma->chans[i];
fsl_chan->pm_state = RUNNING;
- edma_writew(fsl_edma, 0x0, &regs->tcd[i].csr);
+ edma_write_tcdreg(fsl_chan, 0, csr);
if (fsl_chan->slave_id != 0)
fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id, true);
}
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
index eddb2688f234..a8cc8a4bc610 100644
--- a/drivers/dma/fsl-qdma.c
+++ b/drivers/dma/fsl-qdma.c
@@ -13,10 +13,10 @@
#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/of_dma.h>
#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
#include "virt-dma.h"
#include "fsldma.h"
diff --git a/drivers/dma/fsl_raid.c b/drivers/dma/fsl_raid.c
index fdf3500d96a9..0b9ca93ce3dc 100644
--- a/drivers/dma/fsl_raid.c
+++ b/drivers/dma/fsl_raid.c
@@ -60,9 +60,10 @@
*/
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_irq.h>
-#include <linux/of_address.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/dmaengine.h>
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index f8459cc5315d..ddcf736d283d 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -28,9 +28,10 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
+#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/fsldma.h>
#include "dmaengine.h"
#include "fsldma.h"
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index 9a15f0d12c79..22d6f4e455b7 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -299,21 +299,6 @@ void idxd_wqs_unmap_portal(struct idxd_device *idxd)
}
}
-static void __idxd_wq_set_priv_locked(struct idxd_wq *wq, int priv)
-{
- struct idxd_device *idxd = wq->idxd;
- union wqcfg wqcfg;
- unsigned int offset;
-
- offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PRIVL_IDX);
- spin_lock(&idxd->dev_lock);
- wqcfg.bits[WQCFG_PRIVL_IDX] = ioread32(idxd->reg_base + offset);
- wqcfg.priv = priv;
- wq->wqcfg->bits[WQCFG_PRIVL_IDX] = wqcfg.bits[WQCFG_PRIVL_IDX];
- iowrite32(wqcfg.bits[WQCFG_PRIVL_IDX], idxd->reg_base + offset);
- spin_unlock(&idxd->dev_lock);
-}
-
static void __idxd_wq_set_pasid_locked(struct idxd_wq *wq, int pasid)
{
struct idxd_device *idxd = wq->idxd;
@@ -784,8 +769,6 @@ static int idxd_device_evl_setup(struct idxd_device *idxd)
goto err_alloc;
}
- memset(addr, 0, size);
-
spin_lock(&evl->lock);
evl->log = addr;
evl->dma = dma_addr;
@@ -1421,15 +1404,14 @@ int drv_enable_wq(struct idxd_wq *wq)
}
/*
- * In the event that the WQ is configurable for pasid and priv bits.
- * For kernel wq, the driver should setup the pasid, pasid_en, and priv bit.
- * However, for non-kernel wq, the driver should only set the pasid_en bit for
- * shared wq. A dedicated wq that is not 'kernel' type will configure pasid and
+ * In the event that the WQ is configurable for pasid, the driver
+ * should setup the pasid, pasid_en bit. This is true for both kernel
+ * and user shared workqueues. There is no need to setup priv bit in
+ * that in-kernel DMA will also do user privileged requests.
+ * A dedicated wq that is not 'kernel' type will configure pasid and
* pasid_en later on so there is no need to setup.
*/
if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
- int priv = 0;
-
if (wq_pasid_enabled(wq)) {
if (is_idxd_wq_kernel(wq) || wq_shared(wq)) {
u32 pasid = wq_dedicated(wq) ? idxd->pasid : 0;
@@ -1437,10 +1419,6 @@ int drv_enable_wq(struct idxd_wq *wq)
__idxd_wq_set_pasid_locked(wq, pasid);
}
}
-
- if (is_idxd_wq_kernel(wq))
- priv = 1;
- __idxd_wq_set_priv_locked(wq, priv);
}
rc = 0;
@@ -1548,6 +1526,15 @@ int idxd_device_drv_probe(struct idxd_dev *idxd_dev)
if (rc < 0)
return -ENXIO;
+ /*
+ * System PASID is preserved across device disable/enable cycle, but
+ * genconfig register content gets cleared during device reset. We
+ * need to re-enable user interrupts for kernel work queue completion
+ * IRQ to function.
+ */
+ if (idxd->pasid != IOMMU_PASID_INVALID)
+ idxd_set_user_intr(idxd, 1);
+
rc = idxd_device_evl_setup(idxd);
if (rc < 0) {
idxd->cmd_status = IDXD_SCMD_DEV_EVL_ERR;
diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c
index eb35ca313684..07623fb0f52f 100644
--- a/drivers/dma/idxd/dma.c
+++ b/drivers/dma/idxd/dma.c
@@ -75,9 +75,10 @@ static inline void idxd_prep_desc_common(struct idxd_wq *wq,
hw->xfer_size = len;
/*
* For dedicated WQ, this field is ignored and HW will use the WQCFG.priv
- * field instead. This field should be set to 1 for kernel descriptors.
+ * field instead. This field should be set to 0 for kernel descriptors
+ * since kernel DMA on VT-d supports "user" privilege only.
*/
- hw->priv = 1;
+ hw->priv = 0;
hw->completion_addr = compl;
}
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index 5428a2e1b1ec..e269ca1f4862 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -473,6 +473,15 @@ static inline struct idxd_device *ie_to_idxd(struct idxd_irq_entry *ie)
return container_of(ie, struct idxd_device, ie);
}
+static inline void idxd_set_user_intr(struct idxd_device *idxd, bool enable)
+{
+ union gencfg_reg reg;
+
+ reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
+ reg.user_int_en = enable;
+ iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
+}
+
extern struct bus_type dsa_bus_type;
extern bool support_enqcmd;
@@ -651,8 +660,6 @@ int idxd_register_bus_type(void);
void idxd_unregister_bus_type(void);
int idxd_register_devices(struct idxd_device *idxd);
void idxd_unregister_devices(struct idxd_device *idxd);
-int idxd_register_driver(void);
-void idxd_unregister_driver(void);
void idxd_wqs_quiesce(struct idxd_device *idxd);
bool idxd_queue_int_handle_resubmit(struct idxd_desc *desc);
void multi_u64_to_bmap(unsigned long *bmap, u64 *val, int count);
@@ -664,8 +671,6 @@ void idxd_mask_error_interrupts(struct idxd_device *idxd);
void idxd_unmask_error_interrupts(struct idxd_device *idxd);
/* device control */
-int idxd_register_idxd_drv(void);
-void idxd_unregister_idxd_drv(void);
int idxd_device_drv_probe(struct idxd_dev *idxd_dev);
void idxd_device_drv_remove(struct idxd_dev *idxd_dev);
int drv_enable_wq(struct idxd_wq *wq);
@@ -710,7 +715,6 @@ int idxd_enqcmds(struct idxd_wq *wq, void __iomem *portal, const void *desc);
/* dmaengine */
int idxd_register_dma_device(struct idxd_device *idxd);
void idxd_unregister_dma_device(struct idxd_device *idxd);
-void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
void idxd_dma_complete_txd(struct idxd_desc *desc,
enum idxd_complete_type comp_type, bool free_desc);
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 1aa823974cda..0eb1c827a215 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -550,14 +550,59 @@ static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_d
static int idxd_enable_system_pasid(struct idxd_device *idxd)
{
- return -EOPNOTSUPP;
+ struct pci_dev *pdev = idxd->pdev;
+ struct device *dev = &pdev->dev;
+ struct iommu_domain *domain;
+ ioasid_t pasid;
+ int ret;
+
+ /*
+ * Attach a global PASID to the DMA domain so that we can use ENQCMDS
+ * to submit work on buffers mapped by DMA API.
+ */
+ domain = iommu_get_domain_for_dev(dev);
+ if (!domain)
+ return -EPERM;
+
+ pasid = iommu_alloc_global_pasid(dev);
+ if (pasid == IOMMU_PASID_INVALID)
+ return -ENOSPC;
+
+ /*
+ * DMA domain is owned by the driver, it should support all valid
+ * types such as DMA-FQ, identity, etc.
+ */
+ ret = iommu_attach_device_pasid(domain, dev, pasid);
+ if (ret) {
+ dev_err(dev, "failed to attach device pasid %d, domain type %d",
+ pasid, domain->type);
+ iommu_free_global_pasid(pasid);
+ return ret;
+ }
+
+ /* Since we set user privilege for kernel DMA, enable completion IRQ */
+ idxd_set_user_intr(idxd, 1);
+ idxd->pasid = pasid;
+
+ return ret;
}
static void idxd_disable_system_pasid(struct idxd_device *idxd)
{
+ struct pci_dev *pdev = idxd->pdev;
+ struct device *dev = &pdev->dev;
+ struct iommu_domain *domain;
+
+ domain = iommu_get_domain_for_dev(dev);
+ if (!domain)
+ return;
+
+ iommu_detach_device_pasid(domain, dev, idxd->pasid);
+ iommu_free_global_pasid(idxd->pasid);
- iommu_sva_unbind_device(idxd->sva);
+ idxd_set_user_intr(idxd, 0);
idxd->sva = NULL;
+ idxd->pasid = IOMMU_PASID_INVALID;
}
static int idxd_enable_sva(struct pci_dev *pdev)
@@ -600,8 +645,9 @@ static int idxd_probe(struct idxd_device *idxd)
} else {
set_bit(IDXD_FLAG_USER_PASID_ENABLED, &idxd->flags);
- if (idxd_enable_system_pasid(idxd))
- dev_warn(dev, "No in-kernel DMA with PASID.\n");
+ rc = idxd_enable_system_pasid(idxd);
+ if (rc)
+ dev_warn(dev, "No in-kernel DMA with PASID. %d\n", rc);
else
set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
}
diff --git a/drivers/dma/idxd/perfmon.c b/drivers/dma/idxd/perfmon.c
index d73004f47cf4..fdda6d604262 100644
--- a/drivers/dma/idxd/perfmon.c
+++ b/drivers/dma/idxd/perfmon.c
@@ -245,12 +245,11 @@ static void perfmon_pmu_event_update(struct perf_event *event)
int shift = 64 - idxd->idxd_pmu->counter_width;
struct hw_perf_event *hwc = &event->hw;
+ prev_raw_count = local64_read(&hwc->prev_count);
do {
- prev_raw_count = local64_read(&hwc->prev_count);
new_raw_count = perfmon_pmu_read_counter(event);
- } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
- new_raw_count) != prev_raw_count);
-
+ } while (!local64_try_cmpxchg(&hwc->prev_count,
+ &prev_raw_count, new_raw_count));
n = (new_raw_count << shift);
p = (prev_raw_count << shift);
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 293739ac5596..7caba90d85b3 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -948,13 +948,6 @@ static ssize_t wq_name_store(struct device *dev,
if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
return -EINVAL;
- /*
- * This is temporarily placed here until we have SVM support for
- * dmaengine.
- */
- if (wq->type == IDXD_WQT_KERNEL && device_pasid_enabled(wq->idxd))
- return -EOPNOTSUPP;
-
input = kstrndup(buf, count, GFP_KERNEL);
if (!input)
return -ENOMEM;
@@ -1095,8 +1088,8 @@ static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute
if (wq->state != IDXD_WQ_DISABLED)
return -EPERM;
- if (!idxd->hw.wq_cap.wq_ats_support)
- return -EOPNOTSUPP;
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
rc = kstrtobool(buf, &ats_dis);
if (rc < 0)
@@ -1131,8 +1124,8 @@ static ssize_t wq_prs_disable_store(struct device *dev, struct device_attribute
if (wq->state != IDXD_WQ_DISABLED)
return -EPERM;
- if (!idxd->hw.wq_cap.wq_prs_support)
- return -EOPNOTSUPP;
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
rc = kstrtobool(buf, &prs_dis);
if (rc < 0)
@@ -1288,12 +1281,9 @@ static struct attribute *idxd_wq_attributes[] = {
NULL,
};
-static bool idxd_wq_attr_op_config_invisible(struct attribute *attr,
- struct idxd_device *idxd)
-{
- return attr == &dev_attr_wq_op_config.attr &&
- !idxd->hw.wq_cap.op_config;
-}
+/* A WQ attr is invisible if the feature is not supported in WQCAP. */
+#define idxd_wq_attr_invisible(name, cap_field, a, idxd) \
+ ((a) == &dev_attr_wq_##name.attr && !(idxd)->hw.wq_cap.cap_field)
static bool idxd_wq_attr_max_batch_size_invisible(struct attribute *attr,
struct idxd_device *idxd)
@@ -1303,13 +1293,6 @@ static bool idxd_wq_attr_max_batch_size_invisible(struct attribute *attr,
idxd->data->type == IDXD_TYPE_IAX;
}
-static bool idxd_wq_attr_wq_prs_disable_invisible(struct attribute *attr,
- struct idxd_device *idxd)
-{
- return attr == &dev_attr_wq_prs_disable.attr &&
- !idxd->hw.wq_cap.wq_prs_support;
-}
-
static umode_t idxd_wq_attr_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
@@ -1317,13 +1300,16 @@ static umode_t idxd_wq_attr_visible(struct kobject *kobj,
struct idxd_wq *wq = confdev_to_wq(dev);
struct idxd_device *idxd = wq->idxd;
- if (idxd_wq_attr_op_config_invisible(attr, idxd))
+ if (idxd_wq_attr_invisible(op_config, op_config, attr, idxd))
return 0;
if (idxd_wq_attr_max_batch_size_invisible(attr, idxd))
return 0;
- if (idxd_wq_attr_wq_prs_disable_invisible(attr, idxd))
+ if (idxd_wq_attr_invisible(prs_disable, wq_prs_support, attr, idxd))
+ return 0;
+
+ if (idxd_wq_attr_invisible(ats_disable, wq_ats_support, attr, idxd))
return 0;
return attr->mode;
@@ -1480,7 +1466,7 @@ static ssize_t pasid_enabled_show(struct device *dev,
{
struct idxd_device *idxd = confdev_to_idxd(dev);
- return sysfs_emit(buf, "%u\n", device_pasid_enabled(idxd));
+ return sysfs_emit(buf, "%u\n", device_user_pasid_enabled(idxd));
}
static DEVICE_ATTR_RO(pasid_enabled);
diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c
index ad084552640f..9be0d3226e19 100644
--- a/drivers/dma/img-mdc-dma.c
+++ b/drivers/dma/img-mdc-dma.c
@@ -17,7 +17,6 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index f040751690af..114f254b9f50 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -21,7 +21,7 @@
#include <linux/clk.h>
#include <linux/dmaengine.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_dma.h>
#include <asm/irq.h>
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 7a912f90c2a9..51012bd39900 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -31,7 +31,6 @@
#include <linux/dmaengine.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/workqueue.h>
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
index 289c59ed74b9..17f6b6367113 100644
--- a/drivers/dma/ioat/dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -51,7 +51,7 @@
/* pack PCI B/D/F into a u16 */
static inline u16 dcaid_from_pcidev(struct pci_dev *pci)
{
- return (pci->bus->number << 8) | pci->devfn;
+ return pci_dev_id(pci);
}
static int dca_enabled_in_bios(struct pci_dev *pdev)
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 35e06b382603..a180171087a8 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -74,6 +74,7 @@ struct ioatdma_device {
struct dca_provider *dca;
enum ioat_irq_mode irq_mode;
u32 cap;
+ int chancnt;
/* shadow version for CB3.3 chan reset errata workaround */
u64 msixtba0;
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index c4602bfc9c74..9c364e92cb82 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -420,7 +420,7 @@ int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma)
msix:
/* The number of MSI-X vectors should equal the number of channels */
- msixcnt = ioat_dma->dma_dev.chancnt;
+ msixcnt = ioat_dma->chancnt;
for (i = 0; i < msixcnt; i++)
ioat_dma->msix_entries[i].entry = i;
@@ -511,7 +511,7 @@ static int ioat_probe(struct ioatdma_device *ioat_dma)
dma_cap_set(DMA_MEMCPY, dma->cap_mask);
dma->dev = &pdev->dev;
- if (!dma->chancnt) {
+ if (!ioat_dma->chancnt) {
dev_err(dev, "channel enumeration error\n");
goto err_setup_interrupts;
}
@@ -567,15 +567,16 @@ static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
struct device *dev = &ioat_dma->pdev->dev;
struct dma_device *dma = &ioat_dma->dma_dev;
u8 xfercap_log;
+ int chancnt;
int i;
INIT_LIST_HEAD(&dma->channels);
- dma->chancnt = readb(ioat_dma->reg_base + IOAT_CHANCNT_OFFSET);
- dma->chancnt &= 0x1f; /* bits [4:0] valid */
- if (dma->chancnt > ARRAY_SIZE(ioat_dma->idx)) {
+ chancnt = readb(ioat_dma->reg_base + IOAT_CHANCNT_OFFSET);
+ chancnt &= 0x1f; /* bits [4:0] valid */
+ if (chancnt > ARRAY_SIZE(ioat_dma->idx)) {
dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
- dma->chancnt, ARRAY_SIZE(ioat_dma->idx));
- dma->chancnt = ARRAY_SIZE(ioat_dma->idx);
+ chancnt, ARRAY_SIZE(ioat_dma->idx));
+ chancnt = ARRAY_SIZE(ioat_dma->idx);
}
xfercap_log = readb(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET);
xfercap_log &= 0x1f; /* bits [4:0] valid */
@@ -583,7 +584,7 @@ static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
return;
dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
- for (i = 0; i < dma->chancnt; i++) {
+ for (i = 0; i < chancnt; i++) {
ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
if (!ioat_chan)
break;
@@ -596,7 +597,7 @@ static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
break;
}
}
- dma->chancnt = i;
+ ioat_dma->chancnt = i;
}
/**
diff --git a/drivers/dma/ipu/Makefile b/drivers/dma/ipu/Makefile
deleted file mode 100644
index c79ff116daf6..000000000000
--- a/drivers/dma/ipu/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-y += ipu_irq.o ipu_idmac.o
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
deleted file mode 100644
index d799b99c18bd..000000000000
--- a/drivers/dma/ipu/ipu_idmac.c
+++ /dev/null
@@ -1,1801 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2008
- * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
- *
- * Copyright (C) 2005-2007 Freescale Semiconductor, Inc. All Rights Reserved.
- */
-
-#include <linux/dma-mapping.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/err.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-#include <linux/list.h>
-#include <linux/clk.h>
-#include <linux/vmalloc.h>
-#include <linux/string.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/dma/ipu-dma.h>
-
-#include "../dmaengine.h"
-#include "ipu_intern.h"
-
-#define FS_VF_IN_VALID 0x00000002
-#define FS_ENC_IN_VALID 0x00000001
-
-static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan,
- bool wait_for_stop);
-
-/*
- * There can be only one, we could allocate it dynamically, but then we'd have
- * to add an extra parameter to some functions, and use something as ugly as
- * struct ipu *ipu = to_ipu(to_idmac(ichan->dma_chan.device));
- * in the ISR
- */
-static struct ipu ipu_data;
-
-#define to_ipu(id) container_of(id, struct ipu, idmac)
-
-static u32 __idmac_read_icreg(struct ipu *ipu, unsigned long reg)
-{
- return __raw_readl(ipu->reg_ic + reg);
-}
-
-#define idmac_read_icreg(ipu, reg) __idmac_read_icreg(ipu, reg - IC_CONF)
-
-static void __idmac_write_icreg(struct ipu *ipu, u32 value, unsigned long reg)
-{
- __raw_writel(value, ipu->reg_ic + reg);
-}
-
-#define idmac_write_icreg(ipu, v, reg) __idmac_write_icreg(ipu, v, reg - IC_CONF)
-
-static u32 idmac_read_ipureg(struct ipu *ipu, unsigned long reg)
-{
- return __raw_readl(ipu->reg_ipu + reg);
-}
-
-static void idmac_write_ipureg(struct ipu *ipu, u32 value, unsigned long reg)
-{
- __raw_writel(value, ipu->reg_ipu + reg);
-}
-
-/*****************************************************************************
- * IPU / IC common functions
- */
-static void dump_idmac_reg(struct ipu *ipu)
-{
- dev_dbg(ipu->dev, "IDMAC_CONF 0x%x, IC_CONF 0x%x, IDMAC_CHA_EN 0x%x, "
- "IDMAC_CHA_PRI 0x%x, IDMAC_CHA_BUSY 0x%x\n",
- idmac_read_icreg(ipu, IDMAC_CONF),
- idmac_read_icreg(ipu, IC_CONF),
- idmac_read_icreg(ipu, IDMAC_CHA_EN),
- idmac_read_icreg(ipu, IDMAC_CHA_PRI),
- idmac_read_icreg(ipu, IDMAC_CHA_BUSY));
- dev_dbg(ipu->dev, "BUF0_RDY 0x%x, BUF1_RDY 0x%x, CUR_BUF 0x%x, "
- "DB_MODE 0x%x, TASKS_STAT 0x%x\n",
- idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY),
- idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY),
- idmac_read_ipureg(ipu, IPU_CHA_CUR_BUF),
- idmac_read_ipureg(ipu, IPU_CHA_DB_MODE_SEL),
- idmac_read_ipureg(ipu, IPU_TASKS_STAT));
-}
-
-static uint32_t bytes_per_pixel(enum pixel_fmt fmt)
-{
- switch (fmt) {
- case IPU_PIX_FMT_GENERIC: /* generic data */
- case IPU_PIX_FMT_RGB332:
- case IPU_PIX_FMT_YUV420P:
- case IPU_PIX_FMT_YUV422P:
- default:
- return 1;
- case IPU_PIX_FMT_RGB565:
- case IPU_PIX_FMT_YUYV:
- case IPU_PIX_FMT_UYVY:
- return 2;
- case IPU_PIX_FMT_BGR24:
- case IPU_PIX_FMT_RGB24:
- return 3;
- case IPU_PIX_FMT_GENERIC_32: /* generic data */
- case IPU_PIX_FMT_BGR32:
- case IPU_PIX_FMT_RGB32:
- case IPU_PIX_FMT_ABGR32:
- return 4;
- }
-}
-
-/* Enable direct write to memory by the Camera Sensor Interface */
-static void ipu_ic_enable_task(struct ipu *ipu, enum ipu_channel channel)
-{
- uint32_t ic_conf, mask;
-
- switch (channel) {
- case IDMAC_IC_0:
- mask = IC_CONF_PRPENC_EN;
- break;
- case IDMAC_IC_7:
- mask = IC_CONF_RWS_EN | IC_CONF_PRPENC_EN;
- break;
- default:
- return;
- }
- ic_conf = idmac_read_icreg(ipu, IC_CONF) | mask;
- idmac_write_icreg(ipu, ic_conf, IC_CONF);
-}
-
-/* Called under spin_lock_irqsave(&ipu_data.lock) */
-static void ipu_ic_disable_task(struct ipu *ipu, enum ipu_channel channel)
-{
- uint32_t ic_conf, mask;
-
- switch (channel) {
- case IDMAC_IC_0:
- mask = IC_CONF_PRPENC_EN;
- break;
- case IDMAC_IC_7:
- mask = IC_CONF_RWS_EN | IC_CONF_PRPENC_EN;
- break;
- default:
- return;
- }
- ic_conf = idmac_read_icreg(ipu, IC_CONF) & ~mask;
- idmac_write_icreg(ipu, ic_conf, IC_CONF);
-}
-
-static uint32_t ipu_channel_status(struct ipu *ipu, enum ipu_channel channel)
-{
- uint32_t stat = TASK_STAT_IDLE;
- uint32_t task_stat_reg = idmac_read_ipureg(ipu, IPU_TASKS_STAT);
-
- switch (channel) {
- case IDMAC_IC_7:
- stat = (task_stat_reg & TSTAT_CSI2MEM_MASK) >>
- TSTAT_CSI2MEM_OFFSET;
- break;
- case IDMAC_IC_0:
- case IDMAC_SDC_0:
- case IDMAC_SDC_1:
- default:
- break;
- }
- return stat;
-}
-
-struct chan_param_mem_planar {
- /* Word 0 */
- u32 xv:10;
- u32 yv:10;
- u32 xb:12;
-
- u32 yb:12;
- u32 res1:2;
- u32 nsb:1;
- u32 lnpb:6;
- u32 ubo_l:11;
-
- u32 ubo_h:15;
- u32 vbo_l:17;
-
- u32 vbo_h:9;
- u32 res2:3;
- u32 fw:12;
- u32 fh_l:8;
-
- u32 fh_h:4;
- u32 res3:28;
-
- /* Word 1 */
- u32 eba0;
-
- u32 eba1;
-
- u32 bpp:3;
- u32 sl:14;
- u32 pfs:3;
- u32 bam:3;
- u32 res4:2;
- u32 npb:6;
- u32 res5:1;
-
- u32 sat:2;
- u32 res6:30;
-} __attribute__ ((packed));
-
-struct chan_param_mem_interleaved {
- /* Word 0 */
- u32 xv:10;
- u32 yv:10;
- u32 xb:12;
-
- u32 yb:12;
- u32 sce:1;
- u32 res1:1;
- u32 nsb:1;
- u32 lnpb:6;
- u32 sx:10;
- u32 sy_l:1;
-
- u32 sy_h:9;
- u32 ns:10;
- u32 sm:10;
- u32 sdx_l:3;
-
- u32 sdx_h:2;
- u32 sdy:5;
- u32 sdrx:1;
- u32 sdry:1;
- u32 sdr1:1;
- u32 res2:2;
- u32 fw:12;
- u32 fh_l:8;
-
- u32 fh_h:4;
- u32 res3:28;
-
- /* Word 1 */
- u32 eba0;
-
- u32 eba1;
-
- u32 bpp:3;
- u32 sl:14;
- u32 pfs:3;
- u32 bam:3;
- u32 res4:2;
- u32 npb:6;
- u32 res5:1;
-
- u32 sat:2;
- u32 scc:1;
- u32 ofs0:5;
- u32 ofs1:5;
- u32 ofs2:5;
- u32 ofs3:5;
- u32 wid0:3;
- u32 wid1:3;
- u32 wid2:3;
-
- u32 wid3:3;
- u32 dec_sel:1;
- u32 res6:28;
-} __attribute__ ((packed));
-
-union chan_param_mem {
- struct chan_param_mem_planar pp;
- struct chan_param_mem_interleaved ip;
-};
-
-static void ipu_ch_param_set_plane_offset(union chan_param_mem *params,
- u32 u_offset, u32 v_offset)
-{
- params->pp.ubo_l = u_offset & 0x7ff;
- params->pp.ubo_h = u_offset >> 11;
- params->pp.vbo_l = v_offset & 0x1ffff;
- params->pp.vbo_h = v_offset >> 17;
-}
-
-static void ipu_ch_param_set_size(union chan_param_mem *params,
- uint32_t pixel_fmt, uint16_t width,
- uint16_t height, uint16_t stride)
-{
- u32 u_offset;
- u32 v_offset;
-
- params->pp.fw = width - 1;
- params->pp.fh_l = height - 1;
- params->pp.fh_h = (height - 1) >> 8;
- params->pp.sl = stride - 1;
-
- switch (pixel_fmt) {
- case IPU_PIX_FMT_GENERIC:
- /*Represents 8-bit Generic data */
- params->pp.bpp = 3;
- params->pp.pfs = 7;
- params->pp.npb = 31;
- params->pp.sat = 2; /* SAT = use 32-bit access */
- break;
- case IPU_PIX_FMT_GENERIC_32:
- /*Represents 32-bit Generic data */
- params->pp.bpp = 0;
- params->pp.pfs = 7;
- params->pp.npb = 7;
- params->pp.sat = 2; /* SAT = use 32-bit access */
- break;
- case IPU_PIX_FMT_RGB565:
- params->ip.bpp = 2;
- params->ip.pfs = 4;
- params->ip.npb = 15;
- params->ip.sat = 2; /* SAT = 32-bit access */
- params->ip.ofs0 = 0; /* Red bit offset */
- params->ip.ofs1 = 5; /* Green bit offset */
- params->ip.ofs2 = 11; /* Blue bit offset */
- params->ip.ofs3 = 16; /* Alpha bit offset */
- params->ip.wid0 = 4; /* Red bit width - 1 */
- params->ip.wid1 = 5; /* Green bit width - 1 */
- params->ip.wid2 = 4; /* Blue bit width - 1 */
- break;
- case IPU_PIX_FMT_BGR24:
- params->ip.bpp = 1; /* 24 BPP & RGB PFS */
- params->ip.pfs = 4;
- params->ip.npb = 7;
- params->ip.sat = 2; /* SAT = 32-bit access */
- params->ip.ofs0 = 0; /* Red bit offset */
- params->ip.ofs1 = 8; /* Green bit offset */
- params->ip.ofs2 = 16; /* Blue bit offset */
- params->ip.ofs3 = 24; /* Alpha bit offset */
- params->ip.wid0 = 7; /* Red bit width - 1 */
- params->ip.wid1 = 7; /* Green bit width - 1 */
- params->ip.wid2 = 7; /* Blue bit width - 1 */
- break;
- case IPU_PIX_FMT_RGB24:
- params->ip.bpp = 1; /* 24 BPP & RGB PFS */
- params->ip.pfs = 4;
- params->ip.npb = 7;
- params->ip.sat = 2; /* SAT = 32-bit access */
- params->ip.ofs0 = 16; /* Red bit offset */
- params->ip.ofs1 = 8; /* Green bit offset */
- params->ip.ofs2 = 0; /* Blue bit offset */
- params->ip.ofs3 = 24; /* Alpha bit offset */
- params->ip.wid0 = 7; /* Red bit width - 1 */
- params->ip.wid1 = 7; /* Green bit width - 1 */
- params->ip.wid2 = 7; /* Blue bit width - 1 */
- break;
- case IPU_PIX_FMT_BGRA32:
- case IPU_PIX_FMT_BGR32:
- case IPU_PIX_FMT_ABGR32:
- params->ip.bpp = 0;
- params->ip.pfs = 4;
- params->ip.npb = 7;
- params->ip.sat = 2; /* SAT = 32-bit access */
- params->ip.ofs0 = 8; /* Red bit offset */
- params->ip.ofs1 = 16; /* Green bit offset */
- params->ip.ofs2 = 24; /* Blue bit offset */
- params->ip.ofs3 = 0; /* Alpha bit offset */
- params->ip.wid0 = 7; /* Red bit width - 1 */
- params->ip.wid1 = 7; /* Green bit width - 1 */
- params->ip.wid2 = 7; /* Blue bit width - 1 */
- params->ip.wid3 = 7; /* Alpha bit width - 1 */
- break;
- case IPU_PIX_FMT_RGBA32:
- case IPU_PIX_FMT_RGB32:
- params->ip.bpp = 0;
- params->ip.pfs = 4;
- params->ip.npb = 7;
- params->ip.sat = 2; /* SAT = 32-bit access */
- params->ip.ofs0 = 24; /* Red bit offset */
- params->ip.ofs1 = 16; /* Green bit offset */
- params->ip.ofs2 = 8; /* Blue bit offset */
- params->ip.ofs3 = 0; /* Alpha bit offset */
- params->ip.wid0 = 7; /* Red bit width - 1 */
- params->ip.wid1 = 7; /* Green bit width - 1 */
- params->ip.wid2 = 7; /* Blue bit width - 1 */
- params->ip.wid3 = 7; /* Alpha bit width - 1 */
- break;
- case IPU_PIX_FMT_UYVY:
- params->ip.bpp = 2;
- params->ip.pfs = 6;
- params->ip.npb = 7;
- params->ip.sat = 2; /* SAT = 32-bit access */
- break;
- case IPU_PIX_FMT_YUV420P2:
- case IPU_PIX_FMT_YUV420P:
- params->ip.bpp = 3;
- params->ip.pfs = 3;
- params->ip.npb = 7;
- params->ip.sat = 2; /* SAT = 32-bit access */
- u_offset = stride * height;
- v_offset = u_offset + u_offset / 4;
- ipu_ch_param_set_plane_offset(params, u_offset, v_offset);
- break;
- case IPU_PIX_FMT_YVU422P:
- params->ip.bpp = 3;
- params->ip.pfs = 2;
- params->ip.npb = 7;
- params->ip.sat = 2; /* SAT = 32-bit access */
- v_offset = stride * height;
- u_offset = v_offset + v_offset / 2;
- ipu_ch_param_set_plane_offset(params, u_offset, v_offset);
- break;
- case IPU_PIX_FMT_YUV422P:
- params->ip.bpp = 3;
- params->ip.pfs = 2;
- params->ip.npb = 7;
- params->ip.sat = 2; /* SAT = 32-bit access */
- u_offset = stride * height;
- v_offset = u_offset + u_offset / 2;
- ipu_ch_param_set_plane_offset(params, u_offset, v_offset);
- break;
- default:
- dev_err(ipu_data.dev,
- "mx3 ipu: unimplemented pixel format %d\n", pixel_fmt);
- break;
- }
-
- params->pp.nsb = 1;
-}
-
-static void ipu_ch_param_set_buffer(union chan_param_mem *params,
- dma_addr_t buf0, dma_addr_t buf1)
-{
- params->pp.eba0 = buf0;
- params->pp.eba1 = buf1;
-}
-
-static void ipu_ch_param_set_rotation(union chan_param_mem *params,
- enum ipu_rotate_mode rotate)
-{
- params->pp.bam = rotate;
-}
-
-static void ipu_write_param_mem(uint32_t addr, uint32_t *data,
- uint32_t num_words)
-{
- for (; num_words > 0; num_words--) {
- dev_dbg(ipu_data.dev,
- "write param mem - addr = 0x%08X, data = 0x%08X\n",
- addr, *data);
- idmac_write_ipureg(&ipu_data, addr, IPU_IMA_ADDR);
- idmac_write_ipureg(&ipu_data, *data++, IPU_IMA_DATA);
- addr++;
- if ((addr & 0x7) == 5) {
- addr &= ~0x7; /* set to word 0 */
- addr += 8; /* increment to next row */
- }
- }
-}
-
-static int calc_resize_coeffs(uint32_t in_size, uint32_t out_size,
- uint32_t *resize_coeff,
- uint32_t *downsize_coeff)
-{
- uint32_t temp_size;
- uint32_t temp_downsize;
-
- *resize_coeff = 1 << 13;
- *downsize_coeff = 1 << 13;
-
- /* Cannot downsize more than 8:1 */
- if (out_size << 3 < in_size)
- return -EINVAL;
-
- /* compute downsizing coefficient */
- temp_downsize = 0;
- temp_size = in_size;
- while (temp_size >= out_size * 2 && temp_downsize < 2) {
- temp_size >>= 1;
- temp_downsize++;
- }
- *downsize_coeff = temp_downsize;
-
- /*
- * compute resizing coefficient using the following formula:
- * resize_coeff = M*(SI -1)/(SO - 1)
- * where M = 2^13, SI - input size, SO - output size
- */
- *resize_coeff = (8192L * (temp_size - 1)) / (out_size - 1);
- if (*resize_coeff >= 16384L) {
- dev_err(ipu_data.dev, "Warning! Overflow on resize coeff.\n");
- *resize_coeff = 0x3FFF;
- }
-
- dev_dbg(ipu_data.dev, "resizing from %u -> %u pixels, "
- "downsize=%u, resize=%u.%lu (reg=%u)\n", in_size, out_size,
- *downsize_coeff, *resize_coeff >= 8192L ? 1 : 0,
- ((*resize_coeff & 0x1FFF) * 10000L) / 8192L, *resize_coeff);
-
- return 0;
-}
-
-static enum ipu_color_space format_to_colorspace(enum pixel_fmt fmt)
-{
- switch (fmt) {
- case IPU_PIX_FMT_RGB565:
- case IPU_PIX_FMT_BGR24:
- case IPU_PIX_FMT_RGB24:
- case IPU_PIX_FMT_BGR32:
- case IPU_PIX_FMT_RGB32:
- return IPU_COLORSPACE_RGB;
- default:
- return IPU_COLORSPACE_YCBCR;
- }
-}
-
-static int ipu_ic_init_prpenc(struct ipu *ipu,
- union ipu_channel_param *params, bool src_is_csi)
-{
- uint32_t reg, ic_conf;
- uint32_t downsize_coeff, resize_coeff;
- enum ipu_color_space in_fmt, out_fmt;
-
- /* Setup vertical resizing */
- calc_resize_coeffs(params->video.in_height,
- params->video.out_height,
- &resize_coeff, &downsize_coeff);
- reg = (downsize_coeff << 30) | (resize_coeff << 16);
-
- /* Setup horizontal resizing */
- calc_resize_coeffs(params->video.in_width,
- params->video.out_width,
- &resize_coeff, &downsize_coeff);
- reg |= (downsize_coeff << 14) | resize_coeff;
-
- /* Setup color space conversion */
- in_fmt = format_to_colorspace(params->video.in_pixel_fmt);
- out_fmt = format_to_colorspace(params->video.out_pixel_fmt);
-
- /*
- * Colourspace conversion unsupported yet - see _init_csc() in
- * Freescale sources
- */
- if (in_fmt != out_fmt) {
- dev_err(ipu->dev, "Colourspace conversion unsupported!\n");
- return -EOPNOTSUPP;
- }
-
- idmac_write_icreg(ipu, reg, IC_PRP_ENC_RSC);
-
- ic_conf = idmac_read_icreg(ipu, IC_CONF);
-
- if (src_is_csi)
- ic_conf &= ~IC_CONF_RWS_EN;
- else
- ic_conf |= IC_CONF_RWS_EN;
-
- idmac_write_icreg(ipu, ic_conf, IC_CONF);
-
- return 0;
-}
-
-static uint32_t dma_param_addr(uint32_t dma_ch)
-{
- /* Channel Parameter Memory */
- return 0x10000 | (dma_ch << 4);
-}
-
-static void ipu_channel_set_priority(struct ipu *ipu, enum ipu_channel channel,
- bool prio)
-{
- u32 reg = idmac_read_icreg(ipu, IDMAC_CHA_PRI);
-
- if (prio)
- reg |= 1UL << channel;
- else
- reg &= ~(1UL << channel);
-
- idmac_write_icreg(ipu, reg, IDMAC_CHA_PRI);
-
- dump_idmac_reg(ipu);
-}
-
-static uint32_t ipu_channel_conf_mask(enum ipu_channel channel)
-{
- uint32_t mask;
-
- switch (channel) {
- case IDMAC_IC_0:
- case IDMAC_IC_7:
- mask = IPU_CONF_CSI_EN | IPU_CONF_IC_EN;
- break;
- case IDMAC_SDC_0:
- case IDMAC_SDC_1:
- mask = IPU_CONF_SDC_EN | IPU_CONF_DI_EN;
- break;
- default:
- mask = 0;
- break;
- }
-
- return mask;
-}
-
-/**
- * ipu_enable_channel() - enable an IPU channel.
- * @idmac: IPU DMAC context.
- * @ichan: IDMAC channel.
- * @return: 0 on success or negative error code on failure.
- */
-static int ipu_enable_channel(struct idmac *idmac, struct idmac_channel *ichan)
-{
- struct ipu *ipu = to_ipu(idmac);
- enum ipu_channel channel = ichan->dma_chan.chan_id;
- uint32_t reg;
- unsigned long flags;
-
- spin_lock_irqsave(&ipu->lock, flags);
-
- /* Reset to buffer 0 */
- idmac_write_ipureg(ipu, 1UL << channel, IPU_CHA_CUR_BUF);
- ichan->active_buffer = 0;
- ichan->status = IPU_CHANNEL_ENABLED;
-
- switch (channel) {
- case IDMAC_SDC_0:
- case IDMAC_SDC_1:
- case IDMAC_IC_7:
- ipu_channel_set_priority(ipu, channel, true);
- break;
- default:
- break;
- }
-
- reg = idmac_read_icreg(ipu, IDMAC_CHA_EN);
-
- idmac_write_icreg(ipu, reg | (1UL << channel), IDMAC_CHA_EN);
-
- ipu_ic_enable_task(ipu, channel);
-
- spin_unlock_irqrestore(&ipu->lock, flags);
- return 0;
-}
-
-/**
- * ipu_init_channel_buffer() - initialize a buffer for logical IPU channel.
- * @ichan: IDMAC channel.
- * @pixel_fmt: pixel format of buffer. Pixel format is a FOURCC ASCII code.
- * @width: width of buffer in pixels.
- * @height: height of buffer in pixels.
- * @stride: stride length of buffer in pixels.
- * @rot_mode: rotation mode of buffer. A rotation setting other than
- * IPU_ROTATE_VERT_FLIP should only be used for input buffers of
- * rotation channels.
- * @phyaddr_0: buffer 0 physical address.
- * @phyaddr_1: buffer 1 physical address. Setting this to a value other than
- * NULL enables double buffering mode.
- * @return: 0 on success or negative error code on failure.
- */
-static int ipu_init_channel_buffer(struct idmac_channel *ichan,
- enum pixel_fmt pixel_fmt,
- uint16_t width, uint16_t height,
- uint32_t stride,
- enum ipu_rotate_mode rot_mode,
- dma_addr_t phyaddr_0, dma_addr_t phyaddr_1)
-{
- enum ipu_channel channel = ichan->dma_chan.chan_id;
- struct idmac *idmac = to_idmac(ichan->dma_chan.device);
- struct ipu *ipu = to_ipu(idmac);
- union chan_param_mem params = {};
- unsigned long flags;
- uint32_t reg;
- uint32_t stride_bytes;
-
- stride_bytes = stride * bytes_per_pixel(pixel_fmt);
-
- if (stride_bytes % 4) {
- dev_err(ipu->dev,
- "Stride length must be 32-bit aligned, stride = %d, bytes = %d\n",
- stride, stride_bytes);
- return -EINVAL;
- }
-
- /* IC channel's stride must be a multiple of 8 pixels */
- if ((channel <= IDMAC_IC_13) && (stride % 8)) {
- dev_err(ipu->dev, "Stride must be 8 pixel multiple\n");
- return -EINVAL;
- }
-
- /* Build parameter memory data for DMA channel */
- ipu_ch_param_set_size(&params, pixel_fmt, width, height, stride_bytes);
- ipu_ch_param_set_buffer(&params, phyaddr_0, phyaddr_1);
- ipu_ch_param_set_rotation(&params, rot_mode);
-
- spin_lock_irqsave(&ipu->lock, flags);
-
- ipu_write_param_mem(dma_param_addr(channel), (uint32_t *)&params, 10);
-
- reg = idmac_read_ipureg(ipu, IPU_CHA_DB_MODE_SEL);
-
- if (phyaddr_1)
- reg |= 1UL << channel;
- else
- reg &= ~(1UL << channel);
-
- idmac_write_ipureg(ipu, reg, IPU_CHA_DB_MODE_SEL);
-
- ichan->status = IPU_CHANNEL_READY;
-
- spin_unlock_irqrestore(&ipu->lock, flags);
-
- return 0;
-}
-
-/**
- * ipu_select_buffer() - mark a channel's buffer as ready.
- * @channel: channel ID.
- * @buffer_n: buffer number to mark ready.
- */
-static void ipu_select_buffer(enum ipu_channel channel, int buffer_n)
-{
- /* No locking - this is a write-one-to-set register, cleared by IPU */
- if (buffer_n == 0)
- /* Mark buffer 0 as ready. */
- idmac_write_ipureg(&ipu_data, 1UL << channel, IPU_CHA_BUF0_RDY);
- else
- /* Mark buffer 1 as ready. */
- idmac_write_ipureg(&ipu_data, 1UL << channel, IPU_CHA_BUF1_RDY);
-}
-
-/**
- * ipu_update_channel_buffer() - update physical address of a channel buffer.
- * @ichan: IDMAC channel.
- * @buffer_n: buffer number to update.
- * 0 or 1 are the only valid values.
- * @phyaddr: buffer physical address.
- */
-/* Called under spin_lock(_irqsave)(&ichan->lock) */
-static void ipu_update_channel_buffer(struct idmac_channel *ichan,
- int buffer_n, dma_addr_t phyaddr)
-{
- enum ipu_channel channel = ichan->dma_chan.chan_id;
- uint32_t reg;
- unsigned long flags;
-
- spin_lock_irqsave(&ipu_data.lock, flags);
-
- if (buffer_n == 0) {
- reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY);
- if (reg & (1UL << channel)) {
- ipu_ic_disable_task(&ipu_data, channel);
- ichan->status = IPU_CHANNEL_READY;
- }
-
- /* 44.3.3.1.9 - Row Number 1 (WORD1, offset 0) */
- idmac_write_ipureg(&ipu_data, dma_param_addr(channel) +
- 0x0008UL, IPU_IMA_ADDR);
- idmac_write_ipureg(&ipu_data, phyaddr, IPU_IMA_DATA);
- } else {
- reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY);
- if (reg & (1UL << channel)) {
- ipu_ic_disable_task(&ipu_data, channel);
- ichan->status = IPU_CHANNEL_READY;
- }
-
- /* Check if double-buffering is already enabled */
- reg = idmac_read_ipureg(&ipu_data, IPU_CHA_DB_MODE_SEL);
-
- if (!(reg & (1UL << channel)))
- idmac_write_ipureg(&ipu_data, reg | (1UL << channel),
- IPU_CHA_DB_MODE_SEL);
-
- /* 44.3.3.1.9 - Row Number 1 (WORD1, offset 1) */
- idmac_write_ipureg(&ipu_data, dma_param_addr(channel) +
- 0x0009UL, IPU_IMA_ADDR);
- idmac_write_ipureg(&ipu_data, phyaddr, IPU_IMA_DATA);
- }
-
- spin_unlock_irqrestore(&ipu_data.lock, flags);
-}
-
-/* Called under spin_lock_irqsave(&ichan->lock) */
-static int ipu_submit_buffer(struct idmac_channel *ichan,
- struct idmac_tx_desc *desc, struct scatterlist *sg, int buf_idx)
-{
- unsigned int chan_id = ichan->dma_chan.chan_id;
- struct device *dev = &ichan->dma_chan.dev->device;
-
- if (async_tx_test_ack(&desc->txd))
- return -EINTR;
-
- /*
- * On first invocation this shouldn't be necessary, the call to
- * ipu_init_channel_buffer() above will set addresses for us, so we
- * could make it conditional on status >= IPU_CHANNEL_ENABLED, but
- * doing it again shouldn't hurt either.
- */
- ipu_update_channel_buffer(ichan, buf_idx, sg_dma_address(sg));
-
- ipu_select_buffer(chan_id, buf_idx);
- dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n",
- sg, chan_id, buf_idx);
-
- return 0;
-}
-
-/* Called under spin_lock_irqsave(&ichan->lock) */
-static int ipu_submit_channel_buffers(struct idmac_channel *ichan,
- struct idmac_tx_desc *desc)
-{
- struct scatterlist *sg;
- int i, ret = 0;
-
- for (i = 0, sg = desc->sg; i < 2 && sg; i++) {
- if (!ichan->sg[i]) {
- ichan->sg[i] = sg;
-
- ret = ipu_submit_buffer(ichan, desc, sg, i);
- if (ret < 0)
- return ret;
-
- sg = sg_next(sg);
- }
- }
-
- return ret;
-}
-
-static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx)
-{
- struct idmac_tx_desc *desc = to_tx_desc(tx);
- struct idmac_channel *ichan = to_idmac_chan(tx->chan);
- struct idmac *idmac = to_idmac(tx->chan->device);
- struct ipu *ipu = to_ipu(idmac);
- struct device *dev = &ichan->dma_chan.dev->device;
- dma_cookie_t cookie;
- unsigned long flags;
- int ret;
-
- /* Sanity check */
- if (!list_empty(&desc->list)) {
- /* The descriptor doesn't belong to client */
- dev_err(dev, "Descriptor %p not prepared!\n", tx);
- return -EBUSY;
- }
-
- mutex_lock(&ichan->chan_mutex);
-
- async_tx_clear_ack(tx);
-
- if (ichan->status < IPU_CHANNEL_READY) {
- struct idmac_video_param *video = &ichan->params.video;
- /*
- * Initial buffer assignment - the first two sg-entries from
- * the descriptor will end up in the IDMAC buffers
- */
- dma_addr_t dma_1 = sg_is_last(desc->sg) ? 0 :
- sg_dma_address(&desc->sg[1]);
-
- WARN_ON(ichan->sg[0] || ichan->sg[1]);
-
- cookie = ipu_init_channel_buffer(ichan,
- video->out_pixel_fmt,
- video->out_width,
- video->out_height,
- video->out_stride,
- IPU_ROTATE_NONE,
- sg_dma_address(&desc->sg[0]),
- dma_1);
- if (cookie < 0)
- goto out;
- }
-
- dev_dbg(dev, "Submitting sg %p\n", &desc->sg[0]);
-
- cookie = dma_cookie_assign(tx);
-
- /* ipu->lock can be taken under ichan->lock, but not v.v. */
- spin_lock_irqsave(&ichan->lock, flags);
-
- list_add_tail(&desc->list, &ichan->queue);
- /* submit_buffers() atomically verifies and fills empty sg slots */
- ret = ipu_submit_channel_buffers(ichan, desc);
-
- spin_unlock_irqrestore(&ichan->lock, flags);
-
- if (ret < 0) {
- cookie = ret;
- goto dequeue;
- }
-
- if (ichan->status < IPU_CHANNEL_ENABLED) {
- ret = ipu_enable_channel(idmac, ichan);
- if (ret < 0) {
- cookie = ret;
- goto dequeue;
- }
- }
-
- dump_idmac_reg(ipu);
-
-dequeue:
- if (cookie < 0) {
- spin_lock_irqsave(&ichan->lock, flags);
- list_del_init(&desc->list);
- spin_unlock_irqrestore(&ichan->lock, flags);
- tx->cookie = cookie;
- ichan->dma_chan.cookie = cookie;
- }
-
-out:
- mutex_unlock(&ichan->chan_mutex);
-
- return cookie;
-}
-
-/* Called with ichan->chan_mutex held */
-static int idmac_desc_alloc(struct idmac_channel *ichan, int n)
-{
- struct idmac_tx_desc *desc =
- vmalloc(array_size(n, sizeof(struct idmac_tx_desc)));
- struct idmac *idmac = to_idmac(ichan->dma_chan.device);
-
- if (!desc)
- return -ENOMEM;
-
- /* No interrupts, just disable the tasklet for a moment */
- tasklet_disable(&to_ipu(idmac)->tasklet);
-
- ichan->n_tx_desc = n;
- ichan->desc = desc;
- INIT_LIST_HEAD(&ichan->queue);
- INIT_LIST_HEAD(&ichan->free_list);
-
- while (n--) {
- struct dma_async_tx_descriptor *txd = &desc->txd;
-
- memset(txd, 0, sizeof(*txd));
- dma_async_tx_descriptor_init(txd, &ichan->dma_chan);
- txd->tx_submit = idmac_tx_submit;
-
- list_add(&desc->list, &ichan->free_list);
-
- desc++;
- }
-
- tasklet_enable(&to_ipu(idmac)->tasklet);
-
- return 0;
-}
-
-/**
- * ipu_init_channel() - initialize an IPU channel.
- * @idmac: IPU DMAC context.
- * @ichan: pointer to the channel object.
- * @return 0 on success or negative error code on failure.
- */
-static int ipu_init_channel(struct idmac *idmac, struct idmac_channel *ichan)
-{
- union ipu_channel_param *params = &ichan->params;
- uint32_t ipu_conf;
- enum ipu_channel channel = ichan->dma_chan.chan_id;
- unsigned long flags;
- uint32_t reg;
- struct ipu *ipu = to_ipu(idmac);
- int ret = 0, n_desc = 0;
-
- dev_dbg(ipu->dev, "init channel = %d\n", channel);
-
- if (channel != IDMAC_SDC_0 && channel != IDMAC_SDC_1 &&
- channel != IDMAC_IC_7)
- return -EINVAL;
-
- spin_lock_irqsave(&ipu->lock, flags);
-
- switch (channel) {
- case IDMAC_IC_7:
- n_desc = 16;
- reg = idmac_read_icreg(ipu, IC_CONF);
- idmac_write_icreg(ipu, reg & ~IC_CONF_CSI_MEM_WR_EN, IC_CONF);
- break;
- case IDMAC_IC_0:
- n_desc = 16;
- reg = idmac_read_ipureg(ipu, IPU_FS_PROC_FLOW);
- idmac_write_ipureg(ipu, reg & ~FS_ENC_IN_VALID, IPU_FS_PROC_FLOW);
- ret = ipu_ic_init_prpenc(ipu, params, true);
- break;
- case IDMAC_SDC_0:
- case IDMAC_SDC_1:
- n_desc = 4;
- break;
- default:
- break;
- }
-
- ipu->channel_init_mask |= 1L << channel;
-
- /* Enable IPU sub module */
- ipu_conf = idmac_read_ipureg(ipu, IPU_CONF) |
- ipu_channel_conf_mask(channel);
- idmac_write_ipureg(ipu, ipu_conf, IPU_CONF);
-
- spin_unlock_irqrestore(&ipu->lock, flags);
-
- if (n_desc && !ichan->desc)
- ret = idmac_desc_alloc(ichan, n_desc);
-
- dump_idmac_reg(ipu);
-
- return ret;
-}
-
-/**
- * ipu_uninit_channel() - uninitialize an IPU channel.
- * @idmac: IPU DMAC context.
- * @ichan: pointer to the channel object.
- */
-static void ipu_uninit_channel(struct idmac *idmac, struct idmac_channel *ichan)
-{
- enum ipu_channel channel = ichan->dma_chan.chan_id;
- unsigned long flags;
- uint32_t reg;
- unsigned long chan_mask = 1UL << channel;
- uint32_t ipu_conf;
- struct ipu *ipu = to_ipu(idmac);
-
- spin_lock_irqsave(&ipu->lock, flags);
-
- if (!(ipu->channel_init_mask & chan_mask)) {
- dev_err(ipu->dev, "Channel already uninitialized %d\n",
- channel);
- spin_unlock_irqrestore(&ipu->lock, flags);
- return;
- }
-
- /* Reset the double buffer */
- reg = idmac_read_ipureg(ipu, IPU_CHA_DB_MODE_SEL);
- idmac_write_ipureg(ipu, reg & ~chan_mask, IPU_CHA_DB_MODE_SEL);
-
- ichan->sec_chan_en = false;
-
- switch (channel) {
- case IDMAC_IC_7:
- reg = idmac_read_icreg(ipu, IC_CONF);
- idmac_write_icreg(ipu, reg & ~(IC_CONF_RWS_EN | IC_CONF_PRPENC_EN),
- IC_CONF);
- break;
- case IDMAC_IC_0:
- reg = idmac_read_icreg(ipu, IC_CONF);
- idmac_write_icreg(ipu, reg & ~(IC_CONF_PRPENC_EN | IC_CONF_PRPENC_CSC1),
- IC_CONF);
- break;
- case IDMAC_SDC_0:
- case IDMAC_SDC_1:
- default:
- break;
- }
-
- ipu->channel_init_mask &= ~(1L << channel);
-
- ipu_conf = idmac_read_ipureg(ipu, IPU_CONF) &
- ~ipu_channel_conf_mask(channel);
- idmac_write_ipureg(ipu, ipu_conf, IPU_CONF);
-
- spin_unlock_irqrestore(&ipu->lock, flags);
-
- ichan->n_tx_desc = 0;
- vfree(ichan->desc);
- ichan->desc = NULL;
-}
-
-/**
- * ipu_disable_channel() - disable an IPU channel.
- * @idmac: IPU DMAC context.
- * @ichan: channel object pointer.
- * @wait_for_stop: flag to set whether to wait for channel end of frame or
- * return immediately.
- * @return: 0 on success or negative error code on failure.
- */
-static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan,
- bool wait_for_stop)
-{
- enum ipu_channel channel = ichan->dma_chan.chan_id;
- struct ipu *ipu = to_ipu(idmac);
- uint32_t reg;
- unsigned long flags;
- unsigned long chan_mask = 1UL << channel;
- unsigned int timeout;
-
- if (wait_for_stop && channel != IDMAC_SDC_1 && channel != IDMAC_SDC_0) {
- timeout = 40;
- /* This waiting always fails. Related to spurious irq problem */
- while ((idmac_read_icreg(ipu, IDMAC_CHA_BUSY) & chan_mask) ||
- (ipu_channel_status(ipu, channel) == TASK_STAT_ACTIVE)) {
- timeout--;
- msleep(10);
-
- if (!timeout) {
- dev_dbg(ipu->dev,
- "Warning: timeout waiting for channel %u to "
- "stop: buf0_rdy = 0x%08X, buf1_rdy = 0x%08X, "
- "busy = 0x%08X, tstat = 0x%08X\n", channel,
- idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY),
- idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY),
- idmac_read_icreg(ipu, IDMAC_CHA_BUSY),
- idmac_read_ipureg(ipu, IPU_TASKS_STAT));
- break;
- }
- }
- dev_dbg(ipu->dev, "timeout = %d * 10ms\n", 40 - timeout);
- }
- /* SDC BG and FG must be disabled before DMA is disabled */
- if (wait_for_stop && (channel == IDMAC_SDC_0 ||
- channel == IDMAC_SDC_1)) {
- for (timeout = 5;
- timeout && !ipu_irq_status(ichan->eof_irq); timeout--)
- msleep(5);
- }
-
- spin_lock_irqsave(&ipu->lock, flags);
-
- /* Disable IC task */
- ipu_ic_disable_task(ipu, channel);
-
- /* Disable DMA channel(s) */
- reg = idmac_read_icreg(ipu, IDMAC_CHA_EN);
- idmac_write_icreg(ipu, reg & ~chan_mask, IDMAC_CHA_EN);
-
- spin_unlock_irqrestore(&ipu->lock, flags);
-
- return 0;
-}
-
-static struct scatterlist *idmac_sg_next(struct idmac_channel *ichan,
- struct idmac_tx_desc **desc, struct scatterlist *sg)
-{
- struct scatterlist *sgnew = sg ? sg_next(sg) : NULL;
-
- if (sgnew)
- /* next sg-element in this list */
- return sgnew;
-
- if ((*desc)->list.next == &ichan->queue)
- /* No more descriptors on the queue */
- return NULL;
-
- /* Fetch next descriptor */
- *desc = list_entry((*desc)->list.next, struct idmac_tx_desc, list);
- return (*desc)->sg;
-}
-
-/*
- * We have several possibilities here:
- * current BUF next BUF
- *
- * not last sg next not last sg
- * not last sg next last sg
- * last sg first sg from next descriptor
- * last sg NULL
- *
- * Besides, the descriptor queue might be empty or not. We process all these
- * cases carefully.
- */
-static irqreturn_t idmac_interrupt(int irq, void *dev_id)
-{
- struct idmac_channel *ichan = dev_id;
- struct device *dev = &ichan->dma_chan.dev->device;
- unsigned int chan_id = ichan->dma_chan.chan_id;
- struct scatterlist **sg, *sgnext, *sgnew = NULL;
- /* Next transfer descriptor */
- struct idmac_tx_desc *desc, *descnew;
- bool done = false;
- u32 ready0, ready1, curbuf, err;
- struct dmaengine_desc_callback cb;
-
- /* IDMAC has cleared the respective BUFx_RDY bit, we manage the buffer */
-
- dev_dbg(dev, "IDMAC irq %d, buf %d\n", irq, ichan->active_buffer);
-
- spin_lock(&ipu_data.lock);
-
- ready0 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY);
- ready1 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY);
- curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF);
- err = idmac_read_ipureg(&ipu_data, IPU_INT_STAT_4);
-
- if (err & (1 << chan_id)) {
- idmac_write_ipureg(&ipu_data, 1 << chan_id, IPU_INT_STAT_4);
- spin_unlock(&ipu_data.lock);
- /*
- * Doing this
- * ichan->sg[0] = ichan->sg[1] = NULL;
- * you can force channel re-enable on the next tx_submit(), but
- * this is dirty - think about descriptors with multiple
- * sg elements.
- */
- dev_warn(dev, "NFB4EOF on channel %d, ready %x, %x, cur %x\n",
- chan_id, ready0, ready1, curbuf);
- return IRQ_HANDLED;
- }
- spin_unlock(&ipu_data.lock);
-
- /* Other interrupts do not interfere with this channel */
- spin_lock(&ichan->lock);
- if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) ||
- (!ichan->active_buffer && (ready0 >> chan_id) & 1)
- )) {
- spin_unlock(&ichan->lock);
- dev_dbg(dev,
- "IRQ with active buffer still ready on channel %x, "
- "active %d, ready %x, %x!\n", chan_id,
- ichan->active_buffer, ready0, ready1);
- return IRQ_NONE;
- }
-
- if (unlikely(list_empty(&ichan->queue))) {
- ichan->sg[ichan->active_buffer] = NULL;
- spin_unlock(&ichan->lock);
- dev_err(dev,
- "IRQ without queued buffers on channel %x, active %d, "
- "ready %x, %x!\n", chan_id,
- ichan->active_buffer, ready0, ready1);
- return IRQ_NONE;
- }
-
- /*
- * active_buffer is a software flag, it shows which buffer we are
- * currently expecting back from the hardware, IDMAC should be
- * processing the other buffer already
- */
- sg = &ichan->sg[ichan->active_buffer];
- sgnext = ichan->sg[!ichan->active_buffer];
-
- if (!*sg) {
- spin_unlock(&ichan->lock);
- return IRQ_HANDLED;
- }
-
- desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list);
- descnew = desc;
-
- dev_dbg(dev, "IDMAC irq %d, dma %#llx, next dma %#llx, current %d, curbuf %#x\n",
- irq, (u64)sg_dma_address(*sg),
- sgnext ? (u64)sg_dma_address(sgnext) : 0,
- ichan->active_buffer, curbuf);
-
- /* Find the descriptor of sgnext */
- sgnew = idmac_sg_next(ichan, &descnew, *sg);
- if (sgnext != sgnew)
- dev_err(dev, "Submitted buffer %p, next buffer %p\n", sgnext, sgnew);
-
- /*
- * if sgnext == NULL sg must be the last element in a scatterlist and
- * queue must be empty
- */
- if (unlikely(!sgnext)) {
- if (!WARN_ON(sg_next(*sg)))
- dev_dbg(dev, "Underrun on channel %x\n", chan_id);
- ichan->sg[!ichan->active_buffer] = sgnew;
-
- if (unlikely(sgnew)) {
- ipu_submit_buffer(ichan, descnew, sgnew, !ichan->active_buffer);
- } else {
- spin_lock(&ipu_data.lock);
- ipu_ic_disable_task(&ipu_data, chan_id);
- spin_unlock(&ipu_data.lock);
- ichan->status = IPU_CHANNEL_READY;
- /* Continue to check for complete descriptor */
- }
- }
-
- /* Calculate and submit the next sg element */
- sgnew = idmac_sg_next(ichan, &descnew, sgnew);
-
- if (unlikely(!sg_next(*sg)) || !sgnext) {
- /*
- * Last element in scatterlist done, remove from the queue,
- * _init for debugging
- */
- list_del_init(&desc->list);
- done = true;
- }
-
- *sg = sgnew;
-
- if (likely(sgnew) &&
- ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) {
- dmaengine_desc_get_callback(&descnew->txd, &cb);
-
- list_del_init(&descnew->list);
- spin_unlock(&ichan->lock);
-
- dmaengine_desc_callback_invoke(&cb, NULL);
- spin_lock(&ichan->lock);
- }
-
- /* Flip the active buffer - even if update above failed */
- ichan->active_buffer = !ichan->active_buffer;
- if (done)
- dma_cookie_complete(&desc->txd);
-
- dmaengine_desc_get_callback(&desc->txd, &cb);
-
- spin_unlock(&ichan->lock);
-
- if (done && (desc->txd.flags & DMA_PREP_INTERRUPT))
- dmaengine_desc_callback_invoke(&cb, NULL);
-
- return IRQ_HANDLED;
-}
-
-static void ipu_gc_tasklet(struct tasklet_struct *t)
-{
- struct ipu *ipu = from_tasklet(ipu, t, tasklet);
- int i;
-
- for (i = 0; i < IPU_CHANNELS_NUM; i++) {
- struct idmac_channel *ichan = ipu->channel + i;
- struct idmac_tx_desc *desc;
- unsigned long flags;
- struct scatterlist *sg;
- int j, k;
-
- for (j = 0; j < ichan->n_tx_desc; j++) {
- desc = ichan->desc + j;
- spin_lock_irqsave(&ichan->lock, flags);
- if (async_tx_test_ack(&desc->txd)) {
- list_move(&desc->list, &ichan->free_list);
- for_each_sg(desc->sg, sg, desc->sg_len, k) {
- if (ichan->sg[0] == sg)
- ichan->sg[0] = NULL;
- else if (ichan->sg[1] == sg)
- ichan->sg[1] = NULL;
- }
- async_tx_clear_ack(&desc->txd);
- }
- spin_unlock_irqrestore(&ichan->lock, flags);
- }
- }
-}
-
-/* Allocate and initialise a transfer descriptor. */
-static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan,
- struct scatterlist *sgl, unsigned int sg_len,
- enum dma_transfer_direction direction, unsigned long tx_flags,
- void *context)
-{
- struct idmac_channel *ichan = to_idmac_chan(chan);
- struct idmac_tx_desc *desc = NULL;
- struct dma_async_tx_descriptor *txd = NULL;
- unsigned long flags;
-
- /* We only can handle these three channels so far */
- if (chan->chan_id != IDMAC_SDC_0 && chan->chan_id != IDMAC_SDC_1 &&
- chan->chan_id != IDMAC_IC_7)
- return NULL;
-
- if (!is_slave_direction(direction)) {
- dev_err(chan->device->dev, "Invalid DMA direction %d!\n", direction);
- return NULL;
- }
-
- mutex_lock(&ichan->chan_mutex);
-
- spin_lock_irqsave(&ichan->lock, flags);
- if (!list_empty(&ichan->free_list)) {
- desc = list_entry(ichan->free_list.next,
- struct idmac_tx_desc, list);
-
- list_del_init(&desc->list);
-
- desc->sg_len = sg_len;
- desc->sg = sgl;
- txd = &desc->txd;
- txd->flags = tx_flags;
- }
- spin_unlock_irqrestore(&ichan->lock, flags);
-
- mutex_unlock(&ichan->chan_mutex);
-
- tasklet_schedule(&to_ipu(to_idmac(chan->device))->tasklet);
-
- return txd;
-}
-
-/* Re-select the current buffer and re-activate the channel */
-static void idmac_issue_pending(struct dma_chan *chan)
-{
- struct idmac_channel *ichan = to_idmac_chan(chan);
- struct idmac *idmac = to_idmac(chan->device);
- struct ipu *ipu = to_ipu(idmac);
- unsigned long flags;
-
- /* This is not always needed, but doesn't hurt either */
- spin_lock_irqsave(&ipu->lock, flags);
- ipu_select_buffer(chan->chan_id, ichan->active_buffer);
- spin_unlock_irqrestore(&ipu->lock, flags);
-
- /*
- * Might need to perform some parts of initialisation from
- * ipu_enable_channel(), but not all, we do not want to reset to buffer
- * 0, don't need to set priority again either, but re-enabling the task
- * and the channel might be a good idea.
- */
-}
-
-static int idmac_pause(struct dma_chan *chan)
-{
- struct idmac_channel *ichan = to_idmac_chan(chan);
- struct idmac *idmac = to_idmac(chan->device);
- struct ipu *ipu = to_ipu(idmac);
- struct list_head *list, *tmp;
- unsigned long flags;
-
- mutex_lock(&ichan->chan_mutex);
-
- spin_lock_irqsave(&ipu->lock, flags);
- ipu_ic_disable_task(ipu, chan->chan_id);
-
- /* Return all descriptors into "prepared" state */
- list_for_each_safe(list, tmp, &ichan->queue)
- list_del_init(list);
-
- ichan->sg[0] = NULL;
- ichan->sg[1] = NULL;
-
- spin_unlock_irqrestore(&ipu->lock, flags);
-
- ichan->status = IPU_CHANNEL_INITIALIZED;
-
- mutex_unlock(&ichan->chan_mutex);
-
- return 0;
-}
-
-static int __idmac_terminate_all(struct dma_chan *chan)
-{
- struct idmac_channel *ichan = to_idmac_chan(chan);
- struct idmac *idmac = to_idmac(chan->device);
- struct ipu *ipu = to_ipu(idmac);
- unsigned long flags;
- int i;
-
- ipu_disable_channel(idmac, ichan,
- ichan->status >= IPU_CHANNEL_ENABLED);
-
- tasklet_disable(&ipu->tasklet);
-
- /* ichan->queue is modified in ISR, have to spinlock */
- spin_lock_irqsave(&ichan->lock, flags);
- list_splice_init(&ichan->queue, &ichan->free_list);
-
- if (ichan->desc)
- for (i = 0; i < ichan->n_tx_desc; i++) {
- struct idmac_tx_desc *desc = ichan->desc + i;
- if (list_empty(&desc->list))
- /* Descriptor was prepared, but not submitted */
- list_add(&desc->list, &ichan->free_list);
-
- async_tx_clear_ack(&desc->txd);
- }
-
- ichan->sg[0] = NULL;
- ichan->sg[1] = NULL;
- spin_unlock_irqrestore(&ichan->lock, flags);
-
- tasklet_enable(&ipu->tasklet);
-
- ichan->status = IPU_CHANNEL_INITIALIZED;
-
- return 0;
-}
-
-static int idmac_terminate_all(struct dma_chan *chan)
-{
- struct idmac_channel *ichan = to_idmac_chan(chan);
- int ret;
-
- mutex_lock(&ichan->chan_mutex);
-
- ret = __idmac_terminate_all(chan);
-
- mutex_unlock(&ichan->chan_mutex);
-
- return ret;
-}
-
-#ifdef DEBUG
-static irqreturn_t ic_sof_irq(int irq, void *dev_id)
-{
- struct idmac_channel *ichan = dev_id;
- printk(KERN_DEBUG "Got SOF IRQ %d on Channel %d\n",
- irq, ichan->dma_chan.chan_id);
- disable_irq_nosync(irq);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t ic_eof_irq(int irq, void *dev_id)
-{
- struct idmac_channel *ichan = dev_id;
- printk(KERN_DEBUG "Got EOF IRQ %d on Channel %d\n",
- irq, ichan->dma_chan.chan_id);
- disable_irq_nosync(irq);
- return IRQ_HANDLED;
-}
-
-static int ic_sof = -EINVAL, ic_eof = -EINVAL;
-#endif
-
-static int idmac_alloc_chan_resources(struct dma_chan *chan)
-{
- struct idmac_channel *ichan = to_idmac_chan(chan);
- struct idmac *idmac = to_idmac(chan->device);
- int ret;
-
- /* dmaengine.c now guarantees to only offer free channels */
- BUG_ON(chan->client_count > 1);
- WARN_ON(ichan->status != IPU_CHANNEL_FREE);
-
- dma_cookie_init(chan);
-
- ret = ipu_irq_map(chan->chan_id);
- if (ret < 0)
- goto eimap;
-
- ichan->eof_irq = ret;
-
- /*
- * Important to first disable the channel, because maybe someone
- * used it before us, e.g., the bootloader
- */
- ipu_disable_channel(idmac, ichan, true);
-
- ret = ipu_init_channel(idmac, ichan);
- if (ret < 0)
- goto eichan;
-
- ret = request_irq(ichan->eof_irq, idmac_interrupt, 0,
- ichan->eof_name, ichan);
- if (ret < 0)
- goto erirq;
-
-#ifdef DEBUG
- if (chan->chan_id == IDMAC_IC_7) {
- ic_sof = ipu_irq_map(69);
- if (ic_sof > 0) {
- ret = request_irq(ic_sof, ic_sof_irq, 0, "IC SOF", ichan);
- if (ret)
- dev_err(&chan->dev->device, "request irq failed for IC SOF");
- }
- ic_eof = ipu_irq_map(70);
- if (ic_eof > 0) {
- ret = request_irq(ic_eof, ic_eof_irq, 0, "IC EOF", ichan);
- if (ret)
- dev_err(&chan->dev->device, "request irq failed for IC EOF");
- }
- }
-#endif
-
- ichan->status = IPU_CHANNEL_INITIALIZED;
-
- dev_dbg(&chan->dev->device, "Found channel 0x%x, irq %d\n",
- chan->chan_id, ichan->eof_irq);
-
- return ret;
-
-erirq:
- ipu_uninit_channel(idmac, ichan);
-eichan:
- ipu_irq_unmap(chan->chan_id);
-eimap:
- return ret;
-}
-
-static void idmac_free_chan_resources(struct dma_chan *chan)
-{
- struct idmac_channel *ichan = to_idmac_chan(chan);
- struct idmac *idmac = to_idmac(chan->device);
-
- mutex_lock(&ichan->chan_mutex);
-
- __idmac_terminate_all(chan);
-
- if (ichan->status > IPU_CHANNEL_FREE) {
-#ifdef DEBUG
- if (chan->chan_id == IDMAC_IC_7) {
- if (ic_sof > 0) {
- free_irq(ic_sof, ichan);
- ipu_irq_unmap(69);
- ic_sof = -EINVAL;
- }
- if (ic_eof > 0) {
- free_irq(ic_eof, ichan);
- ipu_irq_unmap(70);
- ic_eof = -EINVAL;
- }
- }
-#endif
- free_irq(ichan->eof_irq, ichan);
- ipu_irq_unmap(chan->chan_id);
- }
-
- ichan->status = IPU_CHANNEL_FREE;
-
- ipu_uninit_channel(idmac, ichan);
-
- mutex_unlock(&ichan->chan_mutex);
-
- tasklet_schedule(&to_ipu(idmac)->tasklet);
-}
-
-static enum dma_status idmac_tx_status(struct dma_chan *chan,
- dma_cookie_t cookie, struct dma_tx_state *txstate)
-{
- return dma_cookie_status(chan, cookie, txstate);
-}
-
-static int __init ipu_idmac_init(struct ipu *ipu)
-{
- struct idmac *idmac = &ipu->idmac;
- struct dma_device *dma = &idmac->dma;
- int i;
-
- dma_cap_set(DMA_SLAVE, dma->cap_mask);
- dma_cap_set(DMA_PRIVATE, dma->cap_mask);
-
- /* Compulsory common fields */
- dma->dev = ipu->dev;
- dma->device_alloc_chan_resources = idmac_alloc_chan_resources;
- dma->device_free_chan_resources = idmac_free_chan_resources;
- dma->device_tx_status = idmac_tx_status;
- dma->device_issue_pending = idmac_issue_pending;
-
- /* Compulsory for DMA_SLAVE fields */
- dma->device_prep_slave_sg = idmac_prep_slave_sg;
- dma->device_pause = idmac_pause;
- dma->device_terminate_all = idmac_terminate_all;
-
- INIT_LIST_HEAD(&dma->channels);
- for (i = 0; i < IPU_CHANNELS_NUM; i++) {
- struct idmac_channel *ichan = ipu->channel + i;
- struct dma_chan *dma_chan = &ichan->dma_chan;
-
- spin_lock_init(&ichan->lock);
- mutex_init(&ichan->chan_mutex);
-
- ichan->status = IPU_CHANNEL_FREE;
- ichan->sec_chan_en = false;
- snprintf(ichan->eof_name, sizeof(ichan->eof_name), "IDMAC EOF %d", i);
-
- dma_chan->device = &idmac->dma;
- dma_cookie_init(dma_chan);
- dma_chan->chan_id = i;
- list_add_tail(&dma_chan->device_node, &dma->channels);
- }
-
- idmac_write_icreg(ipu, 0x00000070, IDMAC_CONF);
-
- return dma_async_device_register(&idmac->dma);
-}
-
-static void ipu_idmac_exit(struct ipu *ipu)
-{
- int i;
- struct idmac *idmac = &ipu->idmac;
-
- for (i = 0; i < IPU_CHANNELS_NUM; i++) {
- struct idmac_channel *ichan = ipu->channel + i;
-
- idmac_terminate_all(&ichan->dma_chan);
- }
-
- dma_async_device_unregister(&idmac->dma);
-}
-
-/*****************************************************************************
- * IPU common probe / remove
- */
-
-static int __init ipu_probe(struct platform_device *pdev)
-{
- struct resource *mem_ipu, *mem_ic;
- int ret;
-
- spin_lock_init(&ipu_data.lock);
-
- mem_ipu = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mem_ic = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!mem_ipu || !mem_ic)
- return -EINVAL;
-
- ipu_data.dev = &pdev->dev;
-
- platform_set_drvdata(pdev, &ipu_data);
-
- ret = platform_get_irq(pdev, 0);
- if (ret < 0)
- goto err_noirq;
-
- ipu_data.irq_fn = ret;
- ret = platform_get_irq(pdev, 1);
- if (ret < 0)
- goto err_noirq;
-
- ipu_data.irq_err = ret;
-
- dev_dbg(&pdev->dev, "fn irq %u, err irq %u\n",
- ipu_data.irq_fn, ipu_data.irq_err);
-
- /* Remap IPU common registers */
- ipu_data.reg_ipu = ioremap(mem_ipu->start, resource_size(mem_ipu));
- if (!ipu_data.reg_ipu) {
- ret = -ENOMEM;
- goto err_ioremap_ipu;
- }
-
- /* Remap Image Converter and Image DMA Controller registers */
- ipu_data.reg_ic = ioremap(mem_ic->start, resource_size(mem_ic));
- if (!ipu_data.reg_ic) {
- ret = -ENOMEM;
- goto err_ioremap_ic;
- }
-
- /* Get IPU clock */
- ipu_data.ipu_clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(ipu_data.ipu_clk)) {
- ret = PTR_ERR(ipu_data.ipu_clk);
- goto err_clk_get;
- }
-
- /* Make sure IPU HSP clock is running */
- clk_prepare_enable(ipu_data.ipu_clk);
-
- /* Disable all interrupts */
- idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_1);
- idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_2);
- idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_3);
- idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_4);
- idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_5);
-
- dev_dbg(&pdev->dev, "%s @ 0x%08lx, fn irq %u, err irq %u\n", pdev->name,
- (unsigned long)mem_ipu->start, ipu_data.irq_fn, ipu_data.irq_err);
-
- ret = ipu_irq_attach_irq(&ipu_data, pdev);
- if (ret < 0)
- goto err_attach_irq;
-
- /* Initialize DMA engine */
- ret = ipu_idmac_init(&ipu_data);
- if (ret < 0)
- goto err_idmac_init;
-
- tasklet_setup(&ipu_data.tasklet, ipu_gc_tasklet);
-
- ipu_data.dev = &pdev->dev;
-
- dev_dbg(ipu_data.dev, "IPU initialized\n");
-
- return 0;
-
-err_idmac_init:
-err_attach_irq:
- ipu_irq_detach_irq(&ipu_data, pdev);
- clk_disable_unprepare(ipu_data.ipu_clk);
- clk_put(ipu_data.ipu_clk);
-err_clk_get:
- iounmap(ipu_data.reg_ic);
-err_ioremap_ic:
- iounmap(ipu_data.reg_ipu);
-err_ioremap_ipu:
-err_noirq:
- dev_err(&pdev->dev, "Failed to probe IPU: %d\n", ret);
- return ret;
-}
-
-static int ipu_remove(struct platform_device *pdev)
-{
- struct ipu *ipu = platform_get_drvdata(pdev);
-
- ipu_idmac_exit(ipu);
- ipu_irq_detach_irq(ipu, pdev);
- clk_disable_unprepare(ipu->ipu_clk);
- clk_put(ipu->ipu_clk);
- iounmap(ipu->reg_ic);
- iounmap(ipu->reg_ipu);
- tasklet_kill(&ipu->tasklet);
-
- return 0;
-}
-
-/*
- * We need two MEM resources - with IPU-common and Image Converter registers,
- * including PF_CONF and IDMAC_* registers, and two IRQs - function and error
- */
-static struct platform_driver ipu_platform_driver = {
- .driver = {
- .name = "ipu-core",
- },
- .remove = ipu_remove,
-};
-
-static int __init ipu_init(void)
-{
- return platform_driver_probe(&ipu_platform_driver, ipu_probe);
-}
-subsys_initcall(ipu_init);
-
-MODULE_DESCRIPTION("IPU core driver");
-MODULE_AUTHOR("Guennadi Liakhovetski <lg@denx.de>");
-MODULE_ALIAS("platform:ipu-core");
diff --git a/drivers/dma/ipu/ipu_intern.h b/drivers/dma/ipu/ipu_intern.h
deleted file mode 100644
index e7ec1dec3edf..000000000000
--- a/drivers/dma/ipu/ipu_intern.h
+++ /dev/null
@@ -1,173 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2008
- * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
- *
- * Copyright (C) 2005-2007 Freescale Semiconductor, Inc. All Rights Reserved.
- */
-
-#ifndef _IPU_INTERN_H_
-#define _IPU_INTERN_H_
-
-#include <linux/dmaengine.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-
-/* IPU Common registers */
-#define IPU_CONF 0x00
-#define IPU_CHA_BUF0_RDY 0x04
-#define IPU_CHA_BUF1_RDY 0x08
-#define IPU_CHA_DB_MODE_SEL 0x0C
-#define IPU_CHA_CUR_BUF 0x10
-#define IPU_FS_PROC_FLOW 0x14
-#define IPU_FS_DISP_FLOW 0x18
-#define IPU_TASKS_STAT 0x1C
-#define IPU_IMA_ADDR 0x20
-#define IPU_IMA_DATA 0x24
-#define IPU_INT_CTRL_1 0x28
-#define IPU_INT_CTRL_2 0x2C
-#define IPU_INT_CTRL_3 0x30
-#define IPU_INT_CTRL_4 0x34
-#define IPU_INT_CTRL_5 0x38
-#define IPU_INT_STAT_1 0x3C
-#define IPU_INT_STAT_2 0x40
-#define IPU_INT_STAT_3 0x44
-#define IPU_INT_STAT_4 0x48
-#define IPU_INT_STAT_5 0x4C
-#define IPU_BRK_CTRL_1 0x50
-#define IPU_BRK_CTRL_2 0x54
-#define IPU_BRK_STAT 0x58
-#define IPU_DIAGB_CTRL 0x5C
-
-/* IPU_CONF Register bits */
-#define IPU_CONF_CSI_EN 0x00000001
-#define IPU_CONF_IC_EN 0x00000002
-#define IPU_CONF_ROT_EN 0x00000004
-#define IPU_CONF_PF_EN 0x00000008
-#define IPU_CONF_SDC_EN 0x00000010
-#define IPU_CONF_ADC_EN 0x00000020
-#define IPU_CONF_DI_EN 0x00000040
-#define IPU_CONF_DU_EN 0x00000080
-#define IPU_CONF_PXL_ENDIAN 0x00000100
-
-/* Image Converter Registers */
-#define IC_CONF 0x88
-#define IC_PRP_ENC_RSC 0x8C
-#define IC_PRP_VF_RSC 0x90
-#define IC_PP_RSC 0x94
-#define IC_CMBP_1 0x98
-#define IC_CMBP_2 0x9C
-#define PF_CONF 0xA0
-#define IDMAC_CONF 0xA4
-#define IDMAC_CHA_EN 0xA8
-#define IDMAC_CHA_PRI 0xAC
-#define IDMAC_CHA_BUSY 0xB0
-
-/* Image Converter Register bits */
-#define IC_CONF_PRPENC_EN 0x00000001
-#define IC_CONF_PRPENC_CSC1 0x00000002
-#define IC_CONF_PRPENC_ROT_EN 0x00000004
-#define IC_CONF_PRPVF_EN 0x00000100
-#define IC_CONF_PRPVF_CSC1 0x00000200
-#define IC_CONF_PRPVF_CSC2 0x00000400
-#define IC_CONF_PRPVF_CMB 0x00000800
-#define IC_CONF_PRPVF_ROT_EN 0x00001000
-#define IC_CONF_PP_EN 0x00010000
-#define IC_CONF_PP_CSC1 0x00020000
-#define IC_CONF_PP_CSC2 0x00040000
-#define IC_CONF_PP_CMB 0x00080000
-#define IC_CONF_PP_ROT_EN 0x00100000
-#define IC_CONF_IC_GLB_LOC_A 0x10000000
-#define IC_CONF_KEY_COLOR_EN 0x20000000
-#define IC_CONF_RWS_EN 0x40000000
-#define IC_CONF_CSI_MEM_WR_EN 0x80000000
-
-#define IDMA_CHAN_INVALID 0x000000FF
-#define IDMA_IC_0 0x00000001
-#define IDMA_IC_1 0x00000002
-#define IDMA_IC_2 0x00000004
-#define IDMA_IC_3 0x00000008
-#define IDMA_IC_4 0x00000010
-#define IDMA_IC_5 0x00000020
-#define IDMA_IC_6 0x00000040
-#define IDMA_IC_7 0x00000080
-#define IDMA_IC_8 0x00000100
-#define IDMA_IC_9 0x00000200
-#define IDMA_IC_10 0x00000400
-#define IDMA_IC_11 0x00000800
-#define IDMA_IC_12 0x00001000
-#define IDMA_IC_13 0x00002000
-#define IDMA_SDC_BG 0x00004000
-#define IDMA_SDC_FG 0x00008000
-#define IDMA_SDC_MASK 0x00010000
-#define IDMA_SDC_PARTIAL 0x00020000
-#define IDMA_ADC_SYS1_WR 0x00040000
-#define IDMA_ADC_SYS2_WR 0x00080000
-#define IDMA_ADC_SYS1_CMD 0x00100000
-#define IDMA_ADC_SYS2_CMD 0x00200000
-#define IDMA_ADC_SYS1_RD 0x00400000
-#define IDMA_ADC_SYS2_RD 0x00800000
-#define IDMA_PF_QP 0x01000000
-#define IDMA_PF_BSP 0x02000000
-#define IDMA_PF_Y_IN 0x04000000
-#define IDMA_PF_U_IN 0x08000000
-#define IDMA_PF_V_IN 0x10000000
-#define IDMA_PF_Y_OUT 0x20000000
-#define IDMA_PF_U_OUT 0x40000000
-#define IDMA_PF_V_OUT 0x80000000
-
-#define TSTAT_PF_H264_PAUSE 0x00000001
-#define TSTAT_CSI2MEM_MASK 0x0000000C
-#define TSTAT_CSI2MEM_OFFSET 2
-#define TSTAT_VF_MASK 0x00000600
-#define TSTAT_VF_OFFSET 9
-#define TSTAT_VF_ROT_MASK 0x000C0000
-#define TSTAT_VF_ROT_OFFSET 18
-#define TSTAT_ENC_MASK 0x00000180
-#define TSTAT_ENC_OFFSET 7
-#define TSTAT_ENC_ROT_MASK 0x00030000
-#define TSTAT_ENC_ROT_OFFSET 16
-#define TSTAT_PP_MASK 0x00001800
-#define TSTAT_PP_OFFSET 11
-#define TSTAT_PP_ROT_MASK 0x00300000
-#define TSTAT_PP_ROT_OFFSET 20
-#define TSTAT_PF_MASK 0x00C00000
-#define TSTAT_PF_OFFSET 22
-#define TSTAT_ADCSYS1_MASK 0x03000000
-#define TSTAT_ADCSYS1_OFFSET 24
-#define TSTAT_ADCSYS2_MASK 0x0C000000
-#define TSTAT_ADCSYS2_OFFSET 26
-
-#define TASK_STAT_IDLE 0
-#define TASK_STAT_ACTIVE 1
-#define TASK_STAT_WAIT4READY 2
-
-struct idmac {
- struct dma_device dma;
-};
-
-struct ipu {
- void __iomem *reg_ipu;
- void __iomem *reg_ic;
- unsigned int irq_fn; /* IPU Function IRQ to the CPU */
- unsigned int irq_err; /* IPU Error IRQ to the CPU */
- unsigned int irq_base; /* Beginning of the IPU IRQ range */
- unsigned long channel_init_mask;
- spinlock_t lock;
- struct clk *ipu_clk;
- struct device *dev;
- struct idmac idmac;
- struct idmac_channel channel[IPU_CHANNELS_NUM];
- struct tasklet_struct tasklet;
-};
-
-#define to_idmac(d) container_of(d, struct idmac, dma)
-
-extern int ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev);
-extern void ipu_irq_detach_irq(struct ipu *ipu, struct platform_device *dev);
-
-extern bool ipu_irq_status(uint32_t irq);
-extern int ipu_irq_map(unsigned int source);
-extern int ipu_irq_unmap(unsigned int source);
-
-#endif
diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c
deleted file mode 100644
index 97d9a6f04f2a..000000000000
--- a/drivers/dma/ipu/ipu_irq.c
+++ /dev/null
@@ -1,367 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2008
- * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
- */
-
-#include <linux/init.h>
-#include <linux/err.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/dma/ipu-dma.h>
-
-#include "ipu_intern.h"
-
-/*
- * Register read / write - shall be inlined by the compiler
- */
-static u32 ipu_read_reg(struct ipu *ipu, unsigned long reg)
-{
- return __raw_readl(ipu->reg_ipu + reg);
-}
-
-static void ipu_write_reg(struct ipu *ipu, u32 value, unsigned long reg)
-{
- __raw_writel(value, ipu->reg_ipu + reg);
-}
-
-
-/*
- * IPU IRQ chip driver
- */
-
-#define IPU_IRQ_NR_FN_BANKS 3
-#define IPU_IRQ_NR_ERR_BANKS 2
-#define IPU_IRQ_NR_BANKS (IPU_IRQ_NR_FN_BANKS + IPU_IRQ_NR_ERR_BANKS)
-
-struct ipu_irq_bank {
- unsigned int control;
- unsigned int status;
- struct ipu *ipu;
-};
-
-static struct ipu_irq_bank irq_bank[IPU_IRQ_NR_BANKS] = {
- /* 3 groups of functional interrupts */
- {
- .control = IPU_INT_CTRL_1,
- .status = IPU_INT_STAT_1,
- }, {
- .control = IPU_INT_CTRL_2,
- .status = IPU_INT_STAT_2,
- }, {
- .control = IPU_INT_CTRL_3,
- .status = IPU_INT_STAT_3,
- },
- /* 2 groups of error interrupts */
- {
- .control = IPU_INT_CTRL_4,
- .status = IPU_INT_STAT_4,
- }, {
- .control = IPU_INT_CTRL_5,
- .status = IPU_INT_STAT_5,
- },
-};
-
-struct ipu_irq_map {
- unsigned int irq;
- int source;
- struct ipu_irq_bank *bank;
- struct ipu *ipu;
-};
-
-static struct ipu_irq_map irq_map[CONFIG_MX3_IPU_IRQS];
-/* Protects allocations from the above array of maps */
-static DEFINE_MUTEX(map_lock);
-/* Protects register accesses and individual mappings */
-static DEFINE_RAW_SPINLOCK(bank_lock);
-
-static struct ipu_irq_map *src2map(unsigned int src)
-{
- int i;
-
- for (i = 0; i < CONFIG_MX3_IPU_IRQS; i++)
- if (irq_map[i].source == src)
- return irq_map + i;
-
- return NULL;
-}
-
-static void ipu_irq_unmask(struct irq_data *d)
-{
- struct ipu_irq_map *map = irq_data_get_irq_chip_data(d);
- struct ipu_irq_bank *bank;
- uint32_t reg;
- unsigned long lock_flags;
-
- raw_spin_lock_irqsave(&bank_lock, lock_flags);
-
- bank = map->bank;
- if (!bank) {
- raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
- pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq);
- return;
- }
-
- reg = ipu_read_reg(bank->ipu, bank->control);
- reg |= (1UL << (map->source & 31));
- ipu_write_reg(bank->ipu, reg, bank->control);
-
- raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
-}
-
-static void ipu_irq_mask(struct irq_data *d)
-{
- struct ipu_irq_map *map = irq_data_get_irq_chip_data(d);
- struct ipu_irq_bank *bank;
- uint32_t reg;
- unsigned long lock_flags;
-
- raw_spin_lock_irqsave(&bank_lock, lock_flags);
-
- bank = map->bank;
- if (!bank) {
- raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
- pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq);
- return;
- }
-
- reg = ipu_read_reg(bank->ipu, bank->control);
- reg &= ~(1UL << (map->source & 31));
- ipu_write_reg(bank->ipu, reg, bank->control);
-
- raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
-}
-
-static void ipu_irq_ack(struct irq_data *d)
-{
- struct ipu_irq_map *map = irq_data_get_irq_chip_data(d);
- struct ipu_irq_bank *bank;
- unsigned long lock_flags;
-
- raw_spin_lock_irqsave(&bank_lock, lock_flags);
-
- bank = map->bank;
- if (!bank) {
- raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
- pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq);
- return;
- }
-
- ipu_write_reg(bank->ipu, 1UL << (map->source & 31), bank->status);
- raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
-}
-
-/**
- * ipu_irq_status() - returns the current interrupt status of the specified IRQ.
- * @irq: interrupt line to get status for.
- * @return: true if the interrupt is pending/asserted or false if the
- * interrupt is not pending.
- */
-bool ipu_irq_status(unsigned int irq)
-{
- struct ipu_irq_map *map = irq_get_chip_data(irq);
- struct ipu_irq_bank *bank;
- unsigned long lock_flags;
- bool ret;
-
- raw_spin_lock_irqsave(&bank_lock, lock_flags);
- bank = map->bank;
- ret = bank && ipu_read_reg(bank->ipu, bank->status) &
- (1UL << (map->source & 31));
- raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
-
- return ret;
-}
-
-/**
- * ipu_irq_map() - map an IPU interrupt source to an IRQ number
- * @source: interrupt source bit position (see below)
- * @return: mapped IRQ number or negative error code
- *
- * The source parameter has to be explained further. On i.MX31 IPU has 137 IRQ
- * sources, they are broken down in 5 32-bit registers, like 32, 32, 24, 32, 17.
- * However, the source argument of this function is not the sequence number of
- * the possible IRQ, but rather its bit position. So, first interrupt in fourth
- * register has source number 96, and not 88. This makes calculations easier,
- * and also provides forward compatibility with any future IPU implementations
- * with any interrupt bit assignments.
- */
-int ipu_irq_map(unsigned int source)
-{
- int i, ret = -ENOMEM;
- struct ipu_irq_map *map;
-
- might_sleep();
-
- mutex_lock(&map_lock);
- map = src2map(source);
- if (map) {
- pr_err("IPU: Source %u already mapped to IRQ %u\n", source, map->irq);
- ret = -EBUSY;
- goto out;
- }
-
- for (i = 0; i < CONFIG_MX3_IPU_IRQS; i++) {
- if (irq_map[i].source < 0) {
- unsigned long lock_flags;
-
- raw_spin_lock_irqsave(&bank_lock, lock_flags);
- irq_map[i].source = source;
- irq_map[i].bank = irq_bank + source / 32;
- raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
-
- ret = irq_map[i].irq;
- pr_debug("IPU: mapped source %u to IRQ %u\n",
- source, ret);
- break;
- }
- }
-out:
- mutex_unlock(&map_lock);
-
- if (ret < 0)
- pr_err("IPU: couldn't map source %u: %d\n", source, ret);
-
- return ret;
-}
-
-/**
- * ipu_irq_unmap() - unmap an IPU interrupt source
- * @source: interrupt source bit position (see ipu_irq_map())
- * @return: 0 or negative error code
- */
-int ipu_irq_unmap(unsigned int source)
-{
- int i, ret = -EINVAL;
-
- might_sleep();
-
- mutex_lock(&map_lock);
- for (i = 0; i < CONFIG_MX3_IPU_IRQS; i++) {
- if (irq_map[i].source == source) {
- unsigned long lock_flags;
-
- pr_debug("IPU: unmapped source %u from IRQ %u\n",
- source, irq_map[i].irq);
-
- raw_spin_lock_irqsave(&bank_lock, lock_flags);
- irq_map[i].source = -EINVAL;
- irq_map[i].bank = NULL;
- raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
-
- ret = 0;
- break;
- }
- }
- mutex_unlock(&map_lock);
-
- return ret;
-}
-
-/* Chained IRQ handler for IPU function and error interrupt */
-static void ipu_irq_handler(struct irq_desc *desc)
-{
- struct ipu *ipu = irq_desc_get_handler_data(desc);
- u32 status;
- int i, line;
-
- for (i = 0; i < IPU_IRQ_NR_BANKS; i++) {
- struct ipu_irq_bank *bank = irq_bank + i;
-
- raw_spin_lock(&bank_lock);
- status = ipu_read_reg(ipu, bank->status);
- /*
- * Don't think we have to clear all interrupts here, they will
- * be acked by ->handle_irq() (handle_level_irq). However, we
- * might want to clear unhandled interrupts after the loop...
- */
- status &= ipu_read_reg(ipu, bank->control);
- raw_spin_unlock(&bank_lock);
- while ((line = ffs(status))) {
- struct ipu_irq_map *map;
- unsigned int irq;
-
- line--;
- status &= ~(1UL << line);
-
- raw_spin_lock(&bank_lock);
- map = src2map(32 * i + line);
- if (!map) {
- raw_spin_unlock(&bank_lock);
- pr_err("IPU: Interrupt on unmapped source %u bank %d\n",
- line, i);
- continue;
- }
- irq = map->irq;
- raw_spin_unlock(&bank_lock);
- generic_handle_irq(irq);
- }
- }
-}
-
-static struct irq_chip ipu_irq_chip = {
- .name = "ipu_irq",
- .irq_ack = ipu_irq_ack,
- .irq_mask = ipu_irq_mask,
- .irq_unmask = ipu_irq_unmask,
-};
-
-/* Install the IRQ handler */
-int __init ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev)
-{
- unsigned int irq, i;
- int irq_base = irq_alloc_descs(-1, 0, CONFIG_MX3_IPU_IRQS,
- numa_node_id());
-
- if (irq_base < 0)
- return irq_base;
-
- for (i = 0; i < IPU_IRQ_NR_BANKS; i++)
- irq_bank[i].ipu = ipu;
-
- for (i = 0; i < CONFIG_MX3_IPU_IRQS; i++) {
- int ret;
-
- irq = irq_base + i;
- ret = irq_set_chip(irq, &ipu_irq_chip);
- if (ret < 0)
- return ret;
- ret = irq_set_chip_data(irq, irq_map + i);
- if (ret < 0)
- return ret;
- irq_map[i].ipu = ipu;
- irq_map[i].irq = irq;
- irq_map[i].source = -EINVAL;
- irq_set_handler(irq, handle_level_irq);
- irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
- }
-
- irq_set_chained_handler_and_data(ipu->irq_fn, ipu_irq_handler, ipu);
-
- irq_set_chained_handler_and_data(ipu->irq_err, ipu_irq_handler, ipu);
-
- ipu->irq_base = irq_base;
-
- return 0;
-}
-
-void ipu_irq_detach_irq(struct ipu *ipu, struct platform_device *dev)
-{
- unsigned int irq, irq_base;
-
- irq_base = ipu->irq_base;
-
- irq_set_chained_handler_and_data(ipu->irq_fn, NULL, NULL);
-
- irq_set_chained_handler_and_data(ipu->irq_err, NULL, NULL);
-
- for (irq = irq_base; irq < irq_base + CONFIG_MX3_IPU_IRQS; irq++) {
- irq_set_status_flags(irq, IRQ_NOREQUEST);
- irq_set_chip(irq, NULL);
- irq_set_chip_data(irq, NULL);
- }
-}
diff --git a/drivers/dma/lgm/lgm-dma.c b/drivers/dma/lgm/lgm-dma.c
index 1709d159af7e..4117c7b67e9c 100644
--- a/drivers/dma/lgm/lgm-dma.c
+++ b/drivers/dma/lgm/lgm-dma.c
@@ -1732,9 +1732,4 @@ static struct platform_driver intel_ldma_driver = {
* registered DMA channels and DMA capabilities to clients before their
* initialization.
*/
-static int __init intel_ldma_init(void)
-{
- return platform_driver_register(&intel_ldma_driver);
-}
-
-device_initcall(intel_ldma_init);
+builtin_platform_driver(intel_ldma_driver);
diff --git a/drivers/dma/lpc18xx-dmamux.c b/drivers/dma/lpc18xx-dmamux.c
index df98cae8792b..2b6436f4b193 100644
--- a/drivers/dma/lpc18xx-dmamux.c
+++ b/drivers/dma/lpc18xx-dmamux.c
@@ -12,8 +12,10 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/mfd/syscon.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_dma.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/spinlock.h>
diff --git a/drivers/dma/mcf-edma.c b/drivers/dma/mcf-edma-main.c
index 9413fad08a60..b359421ee9ea 100644
--- a/drivers/dma/mcf-edma.c
+++ b/drivers/dma/mcf-edma-main.c
@@ -19,7 +19,6 @@ static irqreturn_t mcf_edma_tx_handler(int irq, void *dev_id)
struct fsl_edma_engine *mcf_edma = dev_id;
struct edma_regs *regs = &mcf_edma->regs;
unsigned int ch;
- struct fsl_edma_chan *mcf_chan;
u64 intmap;
intmap = ioread32(regs->inth);
@@ -31,31 +30,7 @@ static irqreturn_t mcf_edma_tx_handler(int irq, void *dev_id)
for (ch = 0; ch < mcf_edma->n_chans; ch++) {
if (intmap & BIT(ch)) {
iowrite8(EDMA_MASK_CH(ch), regs->cint);
-
- mcf_chan = &mcf_edma->chans[ch];
-
- spin_lock(&mcf_chan->vchan.lock);
-
- if (!mcf_chan->edesc) {
- /* terminate_all called before */
- spin_unlock(&mcf_chan->vchan.lock);
- continue;
- }
-
- if (!mcf_chan->edesc->iscyclic) {
- list_del(&mcf_chan->edesc->vdesc.node);
- vchan_cookie_complete(&mcf_chan->edesc->vdesc);
- mcf_chan->edesc = NULL;
- mcf_chan->status = DMA_COMPLETE;
- mcf_chan->idle = true;
- } else {
- vchan_cyclic_callback(&mcf_chan->edesc->vdesc);
- }
-
- if (!mcf_chan->edesc)
- fsl_edma_xfer_desc(mcf_chan);
-
- spin_unlock(&mcf_chan->vchan.lock);
+ fsl_edma_tx_chan_handler(&mcf_edma->chans[ch]);
}
}
@@ -76,8 +51,7 @@ static irqreturn_t mcf_edma_err_handler(int irq, void *dev_id)
if (err & BIT(ch)) {
fsl_edma_disable_request(&mcf_edma->chans[ch]);
iowrite8(EDMA_CERR_CERR(ch), regs->cerr);
- mcf_edma->chans[ch].status = DMA_ERROR;
- mcf_edma->chans[ch].idle = true;
+ fsl_edma_err_chan_handler(&mcf_edma->chans[ch]);
}
}
@@ -172,7 +146,7 @@ static void mcf_edma_irq_free(struct platform_device *pdev,
}
static struct fsl_edma_drvdata mcf_data = {
- .version = v2,
+ .flags = FSL_EDMA_DRV_EDMA64,
.setup_irq = mcf_edma_irq_init,
};
@@ -180,9 +154,8 @@ static int mcf_edma_probe(struct platform_device *pdev)
{
struct mcf_edma_platform_data *pdata;
struct fsl_edma_engine *mcf_edma;
- struct fsl_edma_chan *mcf_chan;
struct edma_regs *regs;
- int ret, i, len, chans;
+ int ret, i, chans;
pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
@@ -197,8 +170,8 @@ static int mcf_edma_probe(struct platform_device *pdev)
chans = pdata->dma_channels;
}
- len = sizeof(*mcf_edma) + sizeof(*mcf_chan) * chans;
- mcf_edma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+ mcf_edma = devm_kzalloc(&pdev->dev, struct_size(mcf_edma, chans, chans),
+ GFP_KERNEL);
if (!mcf_edma)
return -ENOMEM;
@@ -227,7 +200,9 @@ static int mcf_edma_probe(struct platform_device *pdev)
mcf_chan->dma_dir = DMA_NONE;
mcf_chan->vchan.desc_free = fsl_edma_free_desc;
vchan_init(&mcf_chan->vchan, &mcf_edma->dma_dev);
- iowrite32(0x0, &regs->tcd[i].csr);
+ mcf_chan->tcd = mcf_edma->membase + EDMA_TCD
+ + i * sizeof(struct fsl_edma_hw_tcd);
+ iowrite32(0x0, &mcf_chan->tcd->csr);
}
iowrite32(~0, regs->inth);
diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c
index 9ae92b8940ef..324b7387b1b9 100644
--- a/drivers/dma/mediatek/mtk-cqdma.c
+++ b/drivers/dma/mediatek/mtk-cqdma.c
@@ -18,7 +18,6 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c
index 69cc61c0b262..64120767d983 100644
--- a/drivers/dma/mediatek/mtk-hsdma.c
+++ b/drivers/dma/mediatek/mtk-hsdma.c
@@ -17,7 +17,6 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c
index a1517ef1f4a0..c51dc017b48a 100644
--- a/drivers/dma/mediatek/mtk-uart-apdma.c
+++ b/drivers/dma/mediatek/mtk-uart-apdma.c
@@ -16,7 +16,6 @@
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -551,7 +550,6 @@ static int mtk_uart_apdma_probe(struct platform_device *pdev)
}
pm_runtime_enable(&pdev->dev);
- pm_runtime_set_active(&pdev->dev);
rc = dma_async_device_register(&mtkd->ddev);
if (rc)
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 4a51fdbf5aa9..1104017320b8 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -36,11 +36,11 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/of_dma.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/random.h>
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index acc4d53e4630..cfb9962417ef 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -21,7 +21,6 @@
#include <linux/module.h>
#include <linux/stmp_device.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/list.h>
#include <linux/dma/mxs-dma.h>
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index e72e8c10355e..0b2f96fd8bf0 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -15,7 +15,6 @@
#include <linux/log2.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
index b6e0ac8314e5..384476757c5e 100644
--- a/drivers/dma/owl-dma.c
+++ b/drivers/dma/owl-dma.c
@@ -20,8 +20,9 @@
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_dma.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include "virt-dma.h"
@@ -1116,7 +1117,7 @@ static int owl_dma_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "dma-channels %d, dma-requests %d\n",
nr_channels, nr_requests);
- od->devid = (enum owl_dma_id)of_device_get_match_data(&pdev->dev);
+ od->devid = (uintptr_t)of_device_get_match_data(&pdev->dev);
od->nr_pchans = nr_channels;
od->nr_vchans = nr_requests;
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 686c270ef710..f9b82dff3387 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -28,7 +28,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <asm/dcr.h>
#include <asm/dcr-regs.h>
#include "adma.h"
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index 932628b319c8..1c93864e0e4d 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -2160,8 +2160,7 @@ static int gpi_probe(struct platform_device *pdev)
return -ENOMEM;
gpi_dev->dev = &pdev->dev;
- gpi_dev->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- gpi_dev->regs = devm_ioremap_resource(gpi_dev->dev, gpi_dev->res);
+ gpi_dev->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &gpi_dev->res);
if (IS_ERR(gpi_dev->regs))
return PTR_ERR(gpi_dev->regs);
gpi_dev->ee_base = gpi_dev->regs;
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index 344525c3a32f..834ae519c15d 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -45,12 +45,12 @@
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/list.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/of_dma.h>
-#include <linux/of_device.h>
#include <linux/property.h>
#include <linux/delay.h>
#include <linux/acpi.h>
@@ -765,17 +765,15 @@ static int hidma_probe(struct platform_device *pdev)
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- trca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- trca = devm_ioremap_resource(&pdev->dev, trca_resource);
+ trca = devm_platform_get_and_ioremap_resource(pdev, 0, &trca_resource);
if (IS_ERR(trca)) {
- rc = -ENOMEM;
+ rc = PTR_ERR(trca);
goto bailout;
}
- evca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- evca = devm_ioremap_resource(&pdev->dev, evca_resource);
+ evca = devm_platform_get_and_ioremap_resource(pdev, 1, &evca_resource);
if (IS_ERR(evca)) {
- rc = -ENOMEM;
+ rc = PTR_ERR(evca);
goto bailout;
}
@@ -785,7 +783,7 @@ static int hidma_probe(struct platform_device *pdev)
*/
chirq = platform_get_irq(pdev, 0);
if (chirq < 0) {
- rc = -ENODEV;
+ rc = chirq;
goto bailout;
}
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c
index 05e96b31d871..1d675f31252b 100644
--- a/drivers/dma/qcom/hidma_mgmt.c
+++ b/drivers/dma/qcom/hidma_mgmt.c
@@ -176,10 +176,9 @@ static int hidma_mgmt_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- virtaddr = devm_ioremap_resource(&pdev->dev, res);
+ virtaddr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(virtaddr)) {
- rc = -ENOMEM;
+ rc = PTR_ERR(virtaddr);
goto out;
}
diff --git a/drivers/dma/sh/rz-dmac.c b/drivers/dma/sh/rz-dmac.c
index 9479f29692d3..f777addda8ba 100644
--- a/drivers/dma/sh/rz-dmac.c
+++ b/drivers/dma/sh/rz-dmac.c
@@ -9,6 +9,7 @@
* Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
*/
+#include <linux/bitfield.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
@@ -145,8 +146,8 @@ struct rz_dmac {
#define CHCFG_REQD BIT(3)
#define CHCFG_SEL(bits) ((bits) & 0x07)
#define CHCFG_MEM_COPY (0x80400008)
-#define CHCFG_FILL_DDS(a) (((a) << 16) & GENMASK(19, 16))
-#define CHCFG_FILL_SDS(a) (((a) << 12) & GENMASK(15, 12))
+#define CHCFG_FILL_DDS_MASK GENMASK(19, 16)
+#define CHCFG_FILL_SDS_MASK GENMASK(15, 12)
#define CHCFG_FILL_TM(a) (((a) & BIT(5)) << 22)
#define CHCFG_FILL_AM(a) (((a) & GENMASK(4, 2)) << 6)
#define CHCFG_FILL_LVL(a) (((a) & BIT(1)) << 5)
@@ -607,13 +608,15 @@ static int rz_dmac_config(struct dma_chan *chan,
if (val == CHCFG_DS_INVALID)
return -EINVAL;
- channel->chcfg |= CHCFG_FILL_DDS(val);
+ channel->chcfg &= ~CHCFG_FILL_DDS_MASK;
+ channel->chcfg |= FIELD_PREP(CHCFG_FILL_DDS_MASK, val);
val = rz_dmac_ds_to_val_mapping(config->src_addr_width);
if (val == CHCFG_DS_INVALID)
return -EINVAL;
- channel->chcfg |= CHCFG_FILL_SDS(val);
+ channel->chcfg &= ~CHCFG_FILL_SDS_MASK;
+ channel->chcfg |= FIELD_PREP(CHCFG_FILL_SDS_MASK, val);
return 0;
}
@@ -947,7 +950,6 @@ static int rz_dmac_probe(struct platform_device *pdev)
dma_register_err:
of_dma_controller_free(pdev->dev.of_node);
err:
- reset_control_assert(dmac->rstc);
channel_num = i ? i - 1 : 0;
for (i = 0; i < channel_num; i++) {
struct rz_dmac_chan *channel = &dmac->channels[i];
@@ -958,6 +960,7 @@ err:
channel->lmdesc.base_dma);
}
+ reset_control_assert(dmac->rstc);
err_pm_runtime_put:
pm_runtime_put(&pdev->dev);
err_pm_disable:
@@ -971,6 +974,8 @@ static int rz_dmac_remove(struct platform_device *pdev)
struct rz_dmac *dmac = platform_get_drvdata(pdev);
unsigned int i;
+ dma_async_device_unregister(&dmac->engine);
+ of_dma_controller_free(pdev->dev.of_node);
for (i = 0; i < dmac->n_channels; i++) {
struct rz_dmac_chan *channel = &dmac->channels[i];
@@ -979,8 +984,6 @@ static int rz_dmac_remove(struct platform_device *pdev)
channel->lmdesc.base,
channel->lmdesc.base_dma);
}
- of_dma_controller_free(pdev->dev.of_node);
- dma_async_device_unregister(&dmac->engine);
reset_control_assert(dmac->rstc);
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index 5aafe548ca5f..00067b29e232 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -23,7 +23,6 @@
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/rculist.h>
@@ -678,7 +677,7 @@ static int sh_dmae_probe(struct platform_device *pdev)
int err, errirq, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
struct sh_dmae_device *shdev;
struct dma_device *dma_dev;
- struct resource *chan, *dmars, *errirq_res, *chanirq_res;
+ struct resource *dmars, *errirq_res, *chanirq_res;
if (pdev->dev.of_node)
pdata = of_device_get_match_data(&pdev->dev);
@@ -689,7 +688,6 @@ static int sh_dmae_probe(struct platform_device *pdev)
if (!pdata || !pdata->channel_num)
return -ENODEV;
- chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
/* DMARS area is optional */
dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
/*
@@ -709,7 +707,7 @@ static int sh_dmae_probe(struct platform_device *pdev)
* requested with the IRQF_SHARED flag
*/
errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!chan || !errirq_res)
+ if (!errirq_res)
return -ENODEV;
shdev = devm_kzalloc(&pdev->dev, sizeof(struct sh_dmae_device),
@@ -719,7 +717,7 @@ static int sh_dmae_probe(struct platform_device *pdev)
dma_dev = &shdev->shdma_dev.dma_dev;
- shdev->chan_reg = devm_ioremap_resource(&pdev->dev, chan);
+ shdev->chan_reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(shdev->chan_reg))
return PTR_ERR(shdev->chan_reg);
if (dmars) {
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index 2b639adb48ba..168aa0bd73a0 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -15,7 +15,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_dma.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 825001bde42c..89e82508c133 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -3590,6 +3590,10 @@ static int __init d40_probe(struct platform_device *pdev)
spin_lock_init(&base->lcla_pool.lock);
base->irq = platform_get_irq(pdev, 0);
+ if (base->irq < 0) {
+ ret = base->irq;
+ goto destroy_cache;
+ }
ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
if (ret) {
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index 37674029cb42..5c36811aa134 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -1581,8 +1581,7 @@ static int stm32_dma_probe(struct platform_device *pdev)
dd = &dmadev->ddev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dmadev->base = devm_ioremap_resource(&pdev->dev, res);
+ dmadev->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(dmadev->base))
return PTR_ERR(dmadev->base);
diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c
index e415bd9f4f2b..8d77e2a7939a 100644
--- a/drivers/dma/stm32-dmamux.c
+++ b/drivers/dma/stm32-dmamux.c
@@ -15,8 +15,10 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_dma.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/slab.h>
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index 1d0e9dd72ab3..0de234022c6d 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -24,7 +24,6 @@
#include <linux/log2.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index ebfd29888b2f..2469efddf540 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -14,8 +14,8 @@
#include <linux/dmapool.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_dma.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/slab.h>
diff --git a/drivers/dma/tegra186-gpc-dma.c b/drivers/dma/tegra186-gpc-dma.c
index 8f67f453a492..33b101001100 100644
--- a/drivers/dma/tegra186-gpc-dma.c
+++ b/drivers/dma/tegra186-gpc-dma.c
@@ -13,7 +13,7 @@
#include <linux/iopoll.h>
#include <linux/minmax.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index cc6b91f48979..063022f9df76 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -17,7 +17,6 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index b97004036071..e557bada1510 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -8,9 +8,10 @@
#include <linux/clk.h>
#include <linux/iopoll.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_dma.h>
#include <linux/of_irq.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
diff --git a/drivers/dma/ti/dma-crossbar.c b/drivers/dma/ti/dma-crossbar.c
index f744ddbbbad7..7f17ee87a6dc 100644
--- a/drivers/dma/ti/dma-crossbar.c
+++ b/drivers/dma/ti/dma-crossbar.c
@@ -3,14 +3,15 @@
* Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
* Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
*/
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/io.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_dma.h>
+#include <linux/of_platform.h>
#define TI_XBAR_DRA7 0
#define TI_XBAR_AM335X 1
diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index 9ea91c640c32..aa8e2e8ac260 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -20,7 +20,6 @@
#include <linux/of_dma.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/platform_data/edma.h>
diff --git a/drivers/dma/ti/k3-udma-private.c b/drivers/dma/ti/k3-udma-private.c
index 85e00701473c..05228bf00033 100644
--- a/drivers/dma/ti/k3-udma-private.c
+++ b/drivers/dma/ti/k3-udma-private.c
@@ -3,6 +3,8 @@
* Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
* Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
*/
+#include <linux/of.h>
+#include <linux/of_platform.h>
int xudma_navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
{
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index eb4dc5fffe64..30fd2f386f36 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -20,7 +20,6 @@
#include <linux/sys_soc.h>
#include <linux/of.h>
#include <linux/of_dma.h>
-#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/workqueue.h>
#include <linux/completion.h>
diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
index 02e1c08c596d..cf96cf915c0c 100644
--- a/drivers/dma/ti/omap-dma.c
+++ b/drivers/dma/ti/omap-dma.c
@@ -16,8 +16,8 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/of.h>
#include <linux/of_dma.h>
-#include <linux/of_device.h>
#include "../virt-dma.h"
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index 3589b4ef50b8..bb4ff8c86733 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -18,8 +18,9 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include "dmaengine.h"
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index ac09f0e5f58d..0a3b2e22f23d 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -41,10 +41,10 @@
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
-#include <linux/of_address.h>
+#include <linux/of.h>
#include <linux/of_dma.h>
-#include <linux/of_platform.h>
#include <linux/of_irq.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/io-64-nonatomic-lo-hi.h>
@@ -173,12 +173,15 @@
#define XILINX_DMA_MAX_TRANS_LEN_MAX 23
#define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26
#define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
+#define XILINX_DMA_CR_DELAY_MAX GENMASK(31, 24)
#define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
#define XILINX_DMA_CR_COALESCE_SHIFT 16
+#define XILINX_DMA_CR_DELAY_SHIFT 24
#define XILINX_DMA_BD_SOP BIT(27)
#define XILINX_DMA_BD_EOP BIT(26)
+#define XILINX_DMA_BD_COMP_MASK BIT(31)
#define XILINX_DMA_COALESCE_MAX 255
-#define XILINX_DMA_NUM_DESCS 255
+#define XILINX_DMA_NUM_DESCS 512
#define XILINX_DMA_NUM_APP_WORDS 5
/* AXI CDMA Specific Registers/Offsets */
@@ -410,6 +413,7 @@ struct xilinx_dma_tx_descriptor {
* @stop_transfer: Differentiate b/w DMA IP's quiesce
* @tdest: TDEST value for mcdma
* @has_vflip: S2MM vertical flip
+ * @irq_delay: Interrupt delay timeout
*/
struct xilinx_dma_chan {
struct xilinx_dma_device *xdev;
@@ -448,6 +452,7 @@ struct xilinx_dma_chan {
int (*stop_transfer)(struct xilinx_dma_chan *chan);
u16 tdest;
bool has_vflip;
+ u8 irq_delay;
};
/**
@@ -493,6 +498,7 @@ struct xilinx_dma_config {
* @s2mm_chan_id: DMA s2mm channel identifier
* @mm2s_chan_id: DMA mm2s channel identifier
* @max_buffer_len: Max buffer length
+ * @has_axistream_connected: AXI DMA connected to AXI Stream IP
*/
struct xilinx_dma_device {
void __iomem *regs;
@@ -511,6 +517,7 @@ struct xilinx_dma_device {
u32 s2mm_chan_id;
u32 mm2s_chan_id;
u32 max_buffer_len;
+ bool has_axistream_connected;
};
/* Macros */
@@ -623,6 +630,29 @@ static inline void xilinx_aximcdma_buf(struct xilinx_dma_chan *chan,
}
}
+/**
+ * xilinx_dma_get_metadata_ptr- Populate metadata pointer and payload length
+ * @tx: async transaction descriptor
+ * @payload_len: metadata payload length
+ * @max_len: metadata max length
+ * Return: The app field pointer.
+ */
+static void *xilinx_dma_get_metadata_ptr(struct dma_async_tx_descriptor *tx,
+ size_t *payload_len, size_t *max_len)
+{
+ struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx);
+ struct xilinx_axidma_tx_segment *seg;
+
+ *max_len = *payload_len = sizeof(u32) * XILINX_DMA_NUM_APP_WORDS;
+ seg = list_first_entry(&desc->segments,
+ struct xilinx_axidma_tx_segment, node);
+ return seg->hw.app;
+}
+
+static struct dma_descriptor_metadata_ops xilinx_dma_metadata_ops = {
+ .get_ptr = xilinx_dma_get_metadata_ptr,
+};
+
/* -----------------------------------------------------------------------------
* Descriptors and segments alloc and free
*/
@@ -1535,6 +1565,9 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
if (chan->has_sg)
xilinx_write(chan, XILINX_DMA_REG_CURDESC,
head_desc->async_tx.phys);
+ reg &= ~XILINX_DMA_CR_DELAY_MAX;
+ reg |= chan->irq_delay << XILINX_DMA_CR_DELAY_SHIFT;
+ dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
xilinx_dma_start(chan);
@@ -1683,6 +1716,14 @@ static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
return;
list_for_each_entry_safe(desc, next, &chan->active_list, node) {
+ if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+ struct xilinx_axidma_tx_segment *seg;
+
+ seg = list_last_entry(&desc->segments,
+ struct xilinx_axidma_tx_segment, node);
+ if (!(seg->hw.status & XILINX_DMA_BD_COMP_MASK) && chan->has_sg)
+ break;
+ }
if (chan->has_sg && chan->xdev->dma_config->dmatype !=
XDMA_TYPE_VDMA)
desc->residue = xilinx_dma_get_residue(chan, desc);
@@ -1816,7 +1857,7 @@ static irqreturn_t xilinx_mcdma_irq_handler(int irq, void *data)
spin_unlock(&chan->lock);
}
- tasklet_schedule(&chan->tasklet);
+ tasklet_hi_schedule(&chan->tasklet);
return IRQ_HANDLED;
}
@@ -1864,15 +1905,8 @@ static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
}
}
- if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) {
- /*
- * Device takes too long to do the transfer when user requires
- * responsiveness.
- */
- dev_dbg(chan->dev, "Inter-packet latency too long\n");
- }
-
- if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
+ if (status & (XILINX_DMA_DMASR_FRM_CNT_IRQ |
+ XILINX_DMA_DMASR_DLY_CNT_IRQ)) {
spin_lock(&chan->lock);
xilinx_dma_complete_descriptor(chan);
chan->idle = true;
@@ -2221,6 +2255,9 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
segment->hw.control |= XILINX_DMA_BD_EOP;
}
+ if (chan->xdev->has_axistream_connected)
+ desc->async_tx.metadata_ops = &xilinx_dma_metadata_ops;
+
return &desc->async_tx;
error:
@@ -2796,6 +2833,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
/* Retrieve the channel properties from the device tree */
has_dre = of_property_read_bool(node, "xlnx,include-dre");
+ of_property_read_u8(node, "xlnx,irq-delay", &chan->irq_delay);
+
chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
err = of_property_read_u32(node, "xlnx,datawidth", &value);
@@ -3067,6 +3106,11 @@ static int xilinx_dma_probe(struct platform_device *pdev)
}
}
+ if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
+ xdev->has_axistream_connected =
+ of_property_read_bool(node, "xlnx,axistream-connected");
+ }
+
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
err = of_property_read_u32(node, "xlnx,num-fstores",
&num_frames);
@@ -3092,6 +3136,10 @@ static int xilinx_dma_probe(struct platform_device *pdev)
else
xdev->ext_addr = false;
+ /* Set metadata mode */
+ if (xdev->has_axistream_connected)
+ xdev->common.desc_metadata_modes = DESC_METADATA_ENGINE;
+
/* Set the dma mask bits */
err = dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width));
if (err < 0) {
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index 9360f43b8e0f..bd8c3cc2eaab 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -11,8 +11,9 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_dma.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/io-64-nonatomic-lo-hi.h>
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index 0ef1971d22bb..8de9023c2a38 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -62,6 +62,7 @@ config EXTCON_INTEL_CHT_WC
tristate "Intel Cherrytrail Whiskey Cove PMIC extcon driver"
depends on INTEL_SOC_PMIC_CHTWC
depends on USB_SUPPORT
+ depends on POWER_SUPPLY
select USB_ROLE_SWITCH
help
Say Y here to enable extcon support for charger detection / control
diff --git a/drivers/firmware/google/Kconfig b/drivers/firmware/google/Kconfig
index 1bc7cbf2f65d..41b78f5cb735 100644
--- a/drivers/firmware/google/Kconfig
+++ b/drivers/firmware/google/Kconfig
@@ -59,7 +59,7 @@ config GOOGLE_MEMCONSOLE_X86_LEGACY
config GOOGLE_FRAMEBUFFER_COREBOOT
tristate "Coreboot Framebuffer"
- depends on FB_SIMPLE
+ depends on FB_SIMPLE || DRM_SIMPLEDRM
depends on GOOGLE_COREBOOT_TABLE
help
This option enables the kernel to search for a framebuffer in
diff --git a/drivers/firmware/stratix10-rsu.c b/drivers/firmware/stratix10-rsu.c
index ab3074705380..4f7a7abada48 100644
--- a/drivers/firmware/stratix10-rsu.c
+++ b/drivers/firmware/stratix10-rsu.c
@@ -33,6 +33,10 @@
#define INVALID_RETRY_COUNTER 0xFF
#define INVALID_DCMF_VERSION 0xFF
#define INVALID_DCMF_STATUS 0xFFFFFFFF
+#define INVALID_SPT_ADDRESS 0x0
+
+#define RSU_GET_SPT_CMD 0x5A
+#define RSU_GET_SPT_RESP_LEN (4 * sizeof(unsigned int))
typedef void (*rsu_callback)(struct stratix10_svc_client *client,
struct stratix10_svc_cb_data *data);
@@ -58,6 +62,9 @@ typedef void (*rsu_callback)(struct stratix10_svc_client *client,
* @dcmf_status.dcmf3: dcmf3 status
* @retry_counter: the current image's retry counter
* @max_retry: the preset max retry value
+ * @spt0_address: address of spt0
+ * @spt1_address: address of spt1
+ * @get_spt_response_buf: response from sdm for get_spt command
*/
struct stratix10_rsu_priv {
struct stratix10_svc_chan *chan;
@@ -89,6 +96,11 @@ struct stratix10_rsu_priv {
unsigned int retry_counter;
unsigned int max_retry;
+
+ unsigned long spt0_address;
+ unsigned long spt1_address;
+
+ unsigned int *get_spt_response_buf;
};
/**
@@ -258,6 +270,36 @@ static void rsu_dcmf_status_callback(struct stratix10_svc_client *client,
complete(&priv->completion);
}
+static void rsu_get_spt_callback(struct stratix10_svc_client *client,
+ struct stratix10_svc_cb_data *data)
+{
+ struct stratix10_rsu_priv *priv = client->priv;
+ unsigned long *mbox_err = (unsigned long *)data->kaddr1;
+ unsigned long *resp_len = (unsigned long *)data->kaddr2;
+
+ if (data->status != BIT(SVC_STATUS_OK) || (*mbox_err) ||
+ (*resp_len != RSU_GET_SPT_RESP_LEN))
+ goto error;
+
+ priv->spt0_address = priv->get_spt_response_buf[0];
+ priv->spt0_address <<= 32;
+ priv->spt0_address |= priv->get_spt_response_buf[1];
+
+ priv->spt1_address = priv->get_spt_response_buf[2];
+ priv->spt1_address <<= 32;
+ priv->spt1_address |= priv->get_spt_response_buf[3];
+
+ goto complete;
+
+error:
+ dev_err(client->dev, "failed to get SPTs\n");
+
+complete:
+ stratix10_svc_free_memory(priv->chan, priv->get_spt_response_buf);
+ priv->get_spt_response_buf = NULL;
+ complete(&priv->completion);
+}
+
/**
* rsu_send_msg() - send a message to Intel service layer
* @priv: pointer to rsu private data
@@ -287,6 +329,14 @@ static int rsu_send_msg(struct stratix10_rsu_priv *priv,
if (arg)
msg.arg[0] = arg;
+ if (command == COMMAND_MBOX_SEND_CMD) {
+ msg.arg[1] = 0;
+ msg.payload = NULL;
+ msg.payload_length = 0;
+ msg.payload_output = priv->get_spt_response_buf;
+ msg.payload_length_output = RSU_GET_SPT_RESP_LEN;
+ }
+
ret = stratix10_svc_send(priv->chan, &msg);
if (ret < 0)
goto status_done;
@@ -571,6 +621,34 @@ static ssize_t notify_store(struct device *dev,
return count;
}
+static ssize_t spt0_address_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ if (priv->spt0_address == INVALID_SPT_ADDRESS)
+ return -EIO;
+
+ return scnprintf(buf, PAGE_SIZE, "0x%08lx\n", priv->spt0_address);
+}
+
+static ssize_t spt1_address_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct stratix10_rsu_priv *priv = dev_get_drvdata(dev);
+
+ if (!priv)
+ return -ENODEV;
+
+ if (priv->spt1_address == INVALID_SPT_ADDRESS)
+ return -EIO;
+
+ return scnprintf(buf, PAGE_SIZE, "0x%08lx\n", priv->spt1_address);
+}
+
static DEVICE_ATTR_RO(current_image);
static DEVICE_ATTR_RO(fail_image);
static DEVICE_ATTR_RO(state);
@@ -589,6 +667,8 @@ static DEVICE_ATTR_RO(dcmf2_status);
static DEVICE_ATTR_RO(dcmf3_status);
static DEVICE_ATTR_WO(reboot_image);
static DEVICE_ATTR_WO(notify);
+static DEVICE_ATTR_RO(spt0_address);
+static DEVICE_ATTR_RO(spt1_address);
static struct attribute *rsu_attrs[] = {
&dev_attr_current_image.attr,
@@ -609,6 +689,8 @@ static struct attribute *rsu_attrs[] = {
&dev_attr_dcmf3_status.attr,
&dev_attr_reboot_image.attr,
&dev_attr_notify.attr,
+ &dev_attr_spt0_address.attr,
+ &dev_attr_spt1_address.attr,
NULL
};
@@ -638,11 +720,13 @@ static int stratix10_rsu_probe(struct platform_device *pdev)
priv->dcmf_version.dcmf1 = INVALID_DCMF_VERSION;
priv->dcmf_version.dcmf2 = INVALID_DCMF_VERSION;
priv->dcmf_version.dcmf3 = INVALID_DCMF_VERSION;
- priv->max_retry = INVALID_RETRY_COUNTER;
priv->dcmf_status.dcmf0 = INVALID_DCMF_STATUS;
priv->dcmf_status.dcmf1 = INVALID_DCMF_STATUS;
priv->dcmf_status.dcmf2 = INVALID_DCMF_STATUS;
priv->dcmf_status.dcmf3 = INVALID_DCMF_STATUS;
+ priv->max_retry = INVALID_RETRY_COUNTER;
+ priv->spt0_address = INVALID_SPT_ADDRESS;
+ priv->spt1_address = INVALID_SPT_ADDRESS;
mutex_init(&priv->lock);
priv->chan = stratix10_svc_request_channel_byname(&priv->client,
@@ -692,6 +776,20 @@ static int stratix10_rsu_probe(struct platform_device *pdev)
stratix10_svc_free_channel(priv->chan);
}
+ priv->get_spt_response_buf =
+ stratix10_svc_allocate_memory(priv->chan, RSU_GET_SPT_RESP_LEN);
+
+ if (IS_ERR(priv->get_spt_response_buf)) {
+ dev_err(dev, "failed to allocate get spt buffer\n");
+ } else {
+ ret = rsu_send_msg(priv, COMMAND_MBOX_SEND_CMD,
+ RSU_GET_SPT_CMD, rsu_get_spt_callback);
+ if (ret) {
+ dev_err(dev, "Error, getting SPT table %i\n", ret);
+ stratix10_svc_free_channel(priv->chan);
+ }
+ }
+
return ret;
}
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index 2d674126160f..c693da60e9a9 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -37,6 +37,7 @@
#define SVC_NUM_CHANNEL 3
#define FPGA_CONFIG_DATA_CLAIM_TIMEOUT_MS 200
#define FPGA_CONFIG_STATUS_TIMEOUT_SEC 30
+#define BYTE_TO_WORD_SIZE 4
/* stratix10 service layer clients */
#define STRATIX10_RSU "stratix10-rsu"
@@ -361,6 +362,13 @@ static void svc_thread_recv_status_ok(struct stratix10_svc_data *p_data,
cb_data->kaddr2 = svc_pa_to_va(res.a2);
cb_data->kaddr3 = &res.a3;
break;
+ case COMMAND_MBOX_SEND_CMD:
+ cb_data->status = BIT(SVC_STATUS_OK);
+ cb_data->kaddr1 = &res.a1;
+ /* SDM return size in u8. Convert size to u32 word */
+ res.a2 = res.a2 * BYTE_TO_WORD_SIZE;
+ cb_data->kaddr2 = &res.a2;
+ break;
default:
pr_warn("it shouldn't happen\n");
break;
@@ -534,6 +542,15 @@ static int svc_normal_to_secure_thread(void *data)
a1 = 0;
a2 = 0;
break;
+ case COMMAND_MBOX_SEND_CMD:
+ a0 = INTEL_SIP_SMC_MBOX_SEND_CMD;
+ a1 = pdata->arg[0];
+ a2 = (unsigned long)pdata->paddr;
+ a3 = (unsigned long)pdata->size / BYTE_TO_WORD_SIZE;
+ a4 = pdata->arg[1];
+ a5 = (unsigned long)pdata->paddr_output;
+ a6 = (unsigned long)pdata->size_output / BYTE_TO_WORD_SIZE;
+ break;
default:
pr_warn("it shouldn't happen\n");
break;
@@ -597,6 +614,7 @@ static int svc_normal_to_secure_thread(void *data)
case COMMAND_FCS_DATA_ENCRYPTION:
case COMMAND_FCS_DATA_DECRYPTION:
case COMMAND_FCS_RANDOM_NUMBER_GEN:
+ case COMMAND_MBOX_SEND_CMD:
cbdata->status = BIT(SVC_STATUS_INVALID_PARAM);
cbdata->kaddr1 = NULL;
cbdata->kaddr2 = NULL;
@@ -756,7 +774,7 @@ svc_create_memory_pool(struct platform_device *pdev,
paddr = begin;
size = end - begin;
va = devm_memremap(dev, paddr, size, MEMREMAP_WC);
- if (!va) {
+ if (IS_ERR(va)) {
dev_err(dev, "fail to remap shared memory\n");
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index 0a00763b9f28..2f689ac4ba3a 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -276,4 +276,6 @@ config FPGA_MGR_LATTICE_SYSCONFIG_SPI
FPGA manager driver support for Lattice FPGAs programming over slave
SPI sysCONFIG interface.
+source "drivers/fpga/tests/Kconfig"
+
endif # FPGA
diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile
index 72e554b4d2f7..352a2612623e 100644
--- a/drivers/fpga/Makefile
+++ b/drivers/fpga/Makefile
@@ -55,3 +55,6 @@ obj-$(CONFIG_FPGA_DFL_NIOS_INTEL_PAC_N3000) += dfl-n3000-nios.o
# Drivers for FPGAs which implement DFL
obj-$(CONFIG_FPGA_DFL_PCI) += dfl-pci.o
+
+# KUnit tests
+obj-$(CONFIG_FPGA_KUNIT_TESTS) += tests/
diff --git a/drivers/fpga/altera-fpga2sdram.c b/drivers/fpga/altera-fpga2sdram.c
index ff3a646fd9e3..1fa2ccc321ab 100644
--- a/drivers/fpga/altera-fpga2sdram.c
+++ b/drivers/fpga/altera-fpga2sdram.c
@@ -27,7 +27,7 @@
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#define ALT_SDR_CTL_FPGAPORTRST_OFST 0x80
diff --git a/drivers/fpga/altera-freeze-bridge.c b/drivers/fpga/altera-freeze-bridge.c
index 445f4b011167..0c3fb8226908 100644
--- a/drivers/fpga/altera-freeze-bridge.c
+++ b/drivers/fpga/altera-freeze-bridge.c
@@ -7,8 +7,9 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/fpga/fpga-bridge.h>
#define FREEZE_CSR_STATUS_OFFSET 0
@@ -198,13 +199,11 @@ static const struct fpga_bridge_ops altera_freeze_br_br_ops = {
.enable_show = altera_freeze_br_enable_show,
};
-#ifdef CONFIG_OF
static const struct of_device_id altera_freeze_br_of_match[] = {
{ .compatible = "altr,freeze-bridge-controller", },
{},
};
MODULE_DEVICE_TABLE(of, altera_freeze_br_of_match);
-#endif
static int altera_freeze_br_probe(struct platform_device *pdev)
{
@@ -213,14 +212,12 @@ static int altera_freeze_br_probe(struct platform_device *pdev)
void __iomem *base_addr;
struct altera_freeze_br_data *priv;
struct fpga_bridge *br;
- struct resource *res;
u32 status, revision;
if (!np)
return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base_addr = devm_ioremap_resource(dev, res);
+ base_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base_addr))
return PTR_ERR(base_addr);
@@ -270,7 +267,7 @@ static struct platform_driver altera_freeze_br_driver = {
.remove = altera_freeze_br_remove,
.driver = {
.name = "altera_freeze_br",
- .of_match_table = of_match_ptr(altera_freeze_br_of_match),
+ .of_match_table = altera_freeze_br_of_match,
},
};
diff --git a/drivers/fpga/altera-pr-ip-core-plat.c b/drivers/fpga/altera-pr-ip-core-plat.c
index b008a6b8d2d3..9dc263930007 100644
--- a/drivers/fpga/altera-pr-ip-core-plat.c
+++ b/drivers/fpga/altera-pr-ip-core-plat.c
@@ -9,19 +9,16 @@
*/
#include <linux/fpga/altera-pr-ip-core.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
static int alt_pr_platform_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
void __iomem *reg_base;
- struct resource *res;
/* First mmio base is for register access */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- reg_base = devm_ioremap_resource(dev, res);
-
+ reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg_base))
return PTR_ERR(reg_base);
diff --git a/drivers/fpga/dfl-fme-main.c b/drivers/fpga/dfl-fme-main.c
index bcb5d34b3b82..3dcf990bd261 100644
--- a/drivers/fpga/dfl-fme-main.c
+++ b/drivers/fpga/dfl-fme-main.c
@@ -19,6 +19,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/uaccess.h>
+#include <linux/units.h>
#include <linux/fpga-dfl.h>
#include "dfl.h"
@@ -231,19 +232,19 @@ static int thermal_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
switch (attr) {
case hwmon_temp_input:
v = readq(feature->ioaddr + FME_THERM_RDSENSOR_FMT1);
- *val = (long)(FIELD_GET(FPGA_TEMPERATURE, v) * 1000);
+ *val = (long)(FIELD_GET(FPGA_TEMPERATURE, v) * MILLI);
break;
case hwmon_temp_max:
v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
- *val = (long)(FIELD_GET(TEMP_THRESHOLD1, v) * 1000);
+ *val = (long)(FIELD_GET(TEMP_THRESHOLD1, v) * MILLI);
break;
case hwmon_temp_crit:
v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
- *val = (long)(FIELD_GET(TEMP_THRESHOLD2, v) * 1000);
+ *val = (long)(FIELD_GET(TEMP_THRESHOLD2, v) * MILLI);
break;
case hwmon_temp_emergency:
v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
- *val = (long)(FIELD_GET(TRIP_THRESHOLD, v) * 1000);
+ *val = (long)(FIELD_GET(TRIP_THRESHOLD, v) * MILLI);
break;
case hwmon_temp_max_alarm:
v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
@@ -382,15 +383,15 @@ static int power_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
switch (attr) {
case hwmon_power_input:
v = readq(feature->ioaddr + FME_PWR_STATUS);
- *val = (long)(FIELD_GET(PWR_CONSUMED, v) * 1000000);
+ *val = (long)(FIELD_GET(PWR_CONSUMED, v) * MICRO);
break;
case hwmon_power_max:
v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
- *val = (long)(FIELD_GET(PWR_THRESHOLD1, v) * 1000000);
+ *val = (long)(FIELD_GET(PWR_THRESHOLD1, v) * MICRO);
break;
case hwmon_power_crit:
v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
- *val = (long)(FIELD_GET(PWR_THRESHOLD2, v) * 1000000);
+ *val = (long)(FIELD_GET(PWR_THRESHOLD2, v) * MICRO);
break;
case hwmon_power_max_alarm:
v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
@@ -415,7 +416,7 @@ static int power_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
int ret = 0;
u64 v;
- val = clamp_val(val / 1000000, 0, PWR_THRESHOLD_MAX);
+ val = clamp_val(val / MICRO, 0, PWR_THRESHOLD_MAX);
mutex_lock(&pdata->lock);
diff --git a/drivers/fpga/dfl-fme-mgr.c b/drivers/fpga/dfl-fme-mgr.c
index af0785783b52..ab228d8837a0 100644
--- a/drivers/fpga/dfl-fme-mgr.c
+++ b/drivers/fpga/dfl-fme-mgr.c
@@ -280,7 +280,6 @@ static int fme_mgr_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct fme_mgr_priv *priv;
struct fpga_manager *mgr;
- struct resource *res;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -290,8 +289,7 @@ static int fme_mgr_probe(struct platform_device *pdev)
priv->ioaddr = pdata->ioaddr;
if (!priv->ioaddr) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->ioaddr = devm_ioremap_resource(dev, res);
+ priv->ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->ioaddr))
return PTR_ERR(priv->ioaddr);
}
diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c
index 1bc04378118c..98b8fd16183e 100644
--- a/drivers/fpga/dfl-pci.c
+++ b/drivers/fpga/dfl-pci.c
@@ -156,19 +156,12 @@ static int *cci_pci_create_irq_table(struct pci_dev *pcidev, unsigned int nvec)
static int find_dfls_by_vsec(struct pci_dev *pcidev, struct dfl_fpga_enum_info *info)
{
- u32 bir, offset, vndr_hdr, dfl_cnt, dfl_res;
- int dfl_res_off, i, bars, voff = 0;
+ u32 bir, offset, dfl_cnt, dfl_res;
+ int dfl_res_off, i, bars, voff;
resource_size_t start, len;
- while ((voff = pci_find_next_ext_capability(pcidev, voff, PCI_EXT_CAP_ID_VNDR))) {
- vndr_hdr = 0;
- pci_read_config_dword(pcidev, voff + PCI_VNDR_HEADER, &vndr_hdr);
-
- if (PCI_VNDR_HEADER_ID(vndr_hdr) == PCI_VSEC_ID_INTEL_DFLS &&
- pcidev->vendor == PCI_VENDOR_ID_INTEL)
- break;
- }
-
+ voff = pci_find_vsec_capability(pcidev, PCI_VENDOR_ID_INTEL,
+ PCI_VSEC_ID_INTEL_DFLS);
if (!voff) {
dev_dbg(&pcidev->dev, "%s no DFL VSEC found\n", __func__);
return -ENODEV;
diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c
index a6c25dee9cc1..a024be2b84e2 100644
--- a/drivers/fpga/fpga-bridge.c
+++ b/drivers/fpga/fpga-bridge.c
@@ -14,7 +14,7 @@
#include <linux/spinlock.h>
static DEFINE_IDA(fpga_bridge_ida);
-static struct class *fpga_bridge_class;
+static const struct class fpga_bridge_class;
/* Lock for adding/removing bridges to linked lists*/
static DEFINE_SPINLOCK(bridge_list_lock);
@@ -87,19 +87,20 @@ err_dev:
/**
* of_fpga_bridge_get - get an exclusive reference to an fpga bridge
*
- * @np: node pointer of an FPGA bridge
- * @info: fpga image specific information
+ * @np: node pointer of an FPGA bridge.
+ * @info: fpga image specific information.
*
- * Return fpga_bridge struct if successful.
- * Return -EBUSY if someone already has a reference to the bridge.
- * Return -ENODEV if @np is not an FPGA Bridge.
+ * Return:
+ * * fpga_bridge struct pointer if successful.
+ * * -EBUSY if someone already has a reference to the bridge.
+ * * -ENODEV if @np is not an FPGA Bridge or can't take parent driver refcount.
*/
struct fpga_bridge *of_fpga_bridge_get(struct device_node *np,
struct fpga_image_info *info)
{
struct device *dev;
- dev = class_find_device_by_of_node(fpga_bridge_class, np);
+ dev = class_find_device_by_of_node(&fpga_bridge_class, np);
if (!dev)
return ERR_PTR(-ENODEV);
@@ -126,7 +127,7 @@ struct fpga_bridge *fpga_bridge_get(struct device *dev,
{
struct device *bridge_dev;
- bridge_dev = class_find_device(fpga_bridge_class, NULL, dev,
+ bridge_dev = class_find_device(&fpga_bridge_class, NULL, dev,
fpga_bridge_dev_match);
if (!bridge_dev)
return ERR_PTR(-ENODEV);
@@ -155,9 +156,9 @@ EXPORT_SYMBOL_GPL(fpga_bridge_put);
* fpga_bridges_enable - enable bridges in a list
* @bridge_list: list of FPGA bridges
*
- * Enable each bridge in the list. If list is empty, do nothing.
+ * Enable each bridge in the list. If list is empty, do nothing.
*
- * Return 0 for success or empty bridge list; return error code otherwise.
+ * Return: 0 for success or empty bridge list or an error code otherwise.
*/
int fpga_bridges_enable(struct list_head *bridge_list)
{
@@ -179,9 +180,9 @@ EXPORT_SYMBOL_GPL(fpga_bridges_enable);
*
* @bridge_list: list of FPGA bridges
*
- * Disable each bridge in the list. If list is empty, do nothing.
+ * Disable each bridge in the list. If list is empty, do nothing.
*
- * Return 0 for success or empty bridge list; return error code otherwise.
+ * Return: 0 for success or empty bridge list or an error code otherwise.
*/
int fpga_bridges_disable(struct list_head *bridge_list)
{
@@ -230,7 +231,7 @@ EXPORT_SYMBOL_GPL(fpga_bridges_put);
*
* Get an exclusive reference to the bridge and it to the list.
*
- * Return 0 for success, error code from of_fpga_bridge_get() otherwise.
+ * Return: 0 for success, error code from of_fpga_bridge_get() otherwise.
*/
int of_fpga_bridge_get_to_list(struct device_node *np,
struct fpga_image_info *info,
@@ -260,7 +261,7 @@ EXPORT_SYMBOL_GPL(of_fpga_bridge_get_to_list);
*
* Get an exclusive reference to the bridge and it to the list.
*
- * Return 0 for success, error code from fpga_bridge_get() otherwise.
+ * Return: 0 for success, error code from fpga_bridge_get() otherwise.
*/
int fpga_bridge_get_to_list(struct device *dev,
struct fpga_image_info *info,
@@ -359,7 +360,7 @@ fpga_bridge_register(struct device *parent, const char *name,
bridge->priv = priv;
bridge->dev.groups = br_ops->groups;
- bridge->dev.class = fpga_bridge_class;
+ bridge->dev.class = &fpga_bridge_class;
bridge->dev.parent = parent;
bridge->dev.of_node = parent->of_node;
bridge->dev.id = id;
@@ -415,21 +416,20 @@ static void fpga_bridge_dev_release(struct device *dev)
kfree(bridge);
}
+static const struct class fpga_bridge_class = {
+ .name = "fpga_bridge",
+ .dev_groups = fpga_bridge_groups,
+ .dev_release = fpga_bridge_dev_release,
+};
+
static int __init fpga_bridge_dev_init(void)
{
- fpga_bridge_class = class_create("fpga_bridge");
- if (IS_ERR(fpga_bridge_class))
- return PTR_ERR(fpga_bridge_class);
-
- fpga_bridge_class->dev_groups = fpga_bridge_groups;
- fpga_bridge_class->dev_release = fpga_bridge_dev_release;
-
- return 0;
+ return class_register(&fpga_bridge_class);
}
static void __exit fpga_bridge_dev_exit(void)
{
- class_destroy(fpga_bridge_class);
+ class_unregister(&fpga_bridge_class);
ida_destroy(&fpga_bridge_ida);
}
diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c
index eb583f86a0b9..06651389c592 100644
--- a/drivers/fpga/fpga-mgr.c
+++ b/drivers/fpga/fpga-mgr.c
@@ -19,7 +19,7 @@
#include <linux/highmem.h>
static DEFINE_IDA(fpga_mgr_ida);
-static struct class *fpga_mgr_class;
+static const struct class fpga_mgr_class;
struct fpga_mgr_devres {
struct fpga_manager *mgr;
@@ -693,7 +693,7 @@ static int fpga_mgr_dev_match(struct device *dev, const void *data)
*/
struct fpga_manager *fpga_mgr_get(struct device *dev)
{
- struct device *mgr_dev = class_find_device(fpga_mgr_class, NULL, dev,
+ struct device *mgr_dev = class_find_device(&fpga_mgr_class, NULL, dev,
fpga_mgr_dev_match);
if (!mgr_dev)
return ERR_PTR(-ENODEV);
@@ -713,7 +713,7 @@ struct fpga_manager *of_fpga_mgr_get(struct device_node *node)
{
struct device *dev;
- dev = class_find_device_by_of_node(fpga_mgr_class, node);
+ dev = class_find_device_by_of_node(&fpga_mgr_class, node);
if (!dev)
return ERR_PTR(-ENODEV);
@@ -809,7 +809,7 @@ fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *in
mgr->priv = info->priv;
mgr->compat_id = info->compat_id;
- mgr->dev.class = fpga_mgr_class;
+ mgr->dev.class = &fpga_mgr_class;
mgr->dev.groups = mops->groups;
mgr->dev.parent = parent;
mgr->dev.of_node = parent->of_node;
@@ -967,23 +967,22 @@ static void fpga_mgr_dev_release(struct device *dev)
kfree(mgr);
}
+static const struct class fpga_mgr_class = {
+ .name = "fpga_manager",
+ .dev_groups = fpga_mgr_groups,
+ .dev_release = fpga_mgr_dev_release,
+};
+
static int __init fpga_mgr_class_init(void)
{
pr_info("FPGA manager framework\n");
- fpga_mgr_class = class_create("fpga_manager");
- if (IS_ERR(fpga_mgr_class))
- return PTR_ERR(fpga_mgr_class);
-
- fpga_mgr_class->dev_groups = fpga_mgr_groups;
- fpga_mgr_class->dev_release = fpga_mgr_dev_release;
-
- return 0;
+ return class_register(&fpga_mgr_class);
}
static void __exit fpga_mgr_class_exit(void)
{
- class_destroy(fpga_mgr_class);
+ class_unregister(&fpga_mgr_class);
ida_destroy(&fpga_mgr_ida);
}
diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c
index ccf6fdab1360..b364a929425c 100644
--- a/drivers/fpga/fpga-region.c
+++ b/drivers/fpga/fpga-region.c
@@ -16,7 +16,7 @@
#include <linux/spinlock.h>
static DEFINE_IDA(fpga_region_ida);
-static struct class *fpga_region_class;
+static const struct class fpga_region_class;
struct fpga_region *
fpga_region_class_find(struct device *start, const void *data,
@@ -24,7 +24,7 @@ fpga_region_class_find(struct device *start, const void *data,
{
struct device *dev;
- dev = class_find_device(fpga_region_class, start, data, match);
+ dev = class_find_device(&fpga_region_class, start, data, match);
if (!dev)
return NULL;
@@ -38,9 +38,10 @@ EXPORT_SYMBOL_GPL(fpga_region_class_find);
*
* Caller should call fpga_region_put() when done with region.
*
- * Return fpga_region struct if successful.
- * Return -EBUSY if someone already has a reference to the region.
- * Return -ENODEV if @np is not an FPGA Region.
+ * Return:
+ * * fpga_region struct if successful.
+ * * -EBUSY if someone already has a reference to the region.
+ * * -ENODEV if can't take parent driver module refcount.
*/
static struct fpga_region *fpga_region_get(struct fpga_region *region)
{
@@ -91,7 +92,7 @@ static void fpga_region_put(struct fpga_region *region)
* The caller will need to call fpga_bridges_put() before attempting to
* reprogram the region.
*
- * Return 0 for success or negative error code.
+ * Return: 0 for success or negative error code.
*/
int fpga_region_program_fpga(struct fpga_region *region)
{
@@ -216,7 +217,7 @@ fpga_region_register_full(struct device *parent, const struct fpga_region_info *
mutex_init(&region->mutex);
INIT_LIST_HEAD(&region->bridge_list);
- region->dev.class = fpga_region_class;
+ region->dev.class = &fpga_region_class;
region->dev.parent = parent;
region->dev.of_node = parent->of_node;
region->dev.id = id;
@@ -287,25 +288,25 @@ static void fpga_region_dev_release(struct device *dev)
kfree(region);
}
+static const struct class fpga_region_class = {
+ .name = "fpga_region",
+ .dev_groups = fpga_region_groups,
+ .dev_release = fpga_region_dev_release,
+};
+
/**
- * fpga_region_init - init function for fpga_region class
- * Creates the fpga_region class and registers a reconfig notifier.
+ * fpga_region_init - creates the fpga_region class.
+ *
+ * Return: 0 on success or ERR_PTR() on error.
*/
static int __init fpga_region_init(void)
{
- fpga_region_class = class_create("fpga_region");
- if (IS_ERR(fpga_region_class))
- return PTR_ERR(fpga_region_class);
-
- fpga_region_class->dev_groups = fpga_region_groups;
- fpga_region_class->dev_release = fpga_region_dev_release;
-
- return 0;
+ return class_register(&fpga_region_class);
}
static void __exit fpga_region_exit(void)
{
- class_destroy(fpga_region_class);
+ class_unregister(&fpga_region_class);
ida_destroy(&fpga_region_ida);
}
diff --git a/drivers/fpga/microchip-spi.c b/drivers/fpga/microchip-spi.c
index d6070e7f5205..2a82c726d6e5 100644
--- a/drivers/fpga/microchip-spi.c
+++ b/drivers/fpga/microchip-spi.c
@@ -8,7 +8,7 @@
#include <linux/fpga/fpga-mgr.h>
#include <linux/iopoll.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/spi/spi.h>
#define MPF_SPI_ISC_ENABLE 0x0B
diff --git a/drivers/fpga/of-fpga-region.c b/drivers/fpga/of-fpga-region.c
index ae82532fc127..a6affd83f275 100644
--- a/drivers/fpga/of-fpga-region.c
+++ b/drivers/fpga/of-fpga-region.c
@@ -12,7 +12,9 @@
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
diff --git a/drivers/fpga/socfpga-a10.c b/drivers/fpga/socfpga-a10.c
index ac8e89b8a5cc..cc4861e345c9 100644
--- a/drivers/fpga/socfpga-a10.c
+++ b/drivers/fpga/socfpga-a10.c
@@ -471,7 +471,6 @@ static int socfpga_a10_fpga_probe(struct platform_device *pdev)
struct a10_fpga_priv *priv;
void __iomem *reg_base;
struct fpga_manager *mgr;
- struct resource *res;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -479,14 +478,12 @@ static int socfpga_a10_fpga_probe(struct platform_device *pdev)
return -ENOMEM;
/* First mmio base is for register access */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg_base = devm_ioremap_resource(dev, res);
+ reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg_base))
return PTR_ERR(reg_base);
/* Second mmio base is for writing FPGA image data */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- priv->fpga_data_addr = devm_ioremap_resource(dev, res);
+ priv->fpga_data_addr = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(priv->fpga_data_addr))
return PTR_ERR(priv->fpga_data_addr);
diff --git a/drivers/fpga/socfpga.c b/drivers/fpga/socfpga.c
index 7e0741f99696..723ea0ad3f09 100644
--- a/drivers/fpga/socfpga.c
+++ b/drivers/fpga/socfpga.c
@@ -545,20 +545,17 @@ static int socfpga_fpga_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct socfpga_fpga_priv *priv;
struct fpga_manager *mgr;
- struct resource *res;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->fpga_base_addr = devm_ioremap_resource(dev, res);
+ priv->fpga_base_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->fpga_base_addr))
return PTR_ERR(priv->fpga_base_addr);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- priv->fpga_data_addr = devm_ioremap_resource(dev, res);
+ priv->fpga_data_addr = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(priv->fpga_data_addr))
return PTR_ERR(priv->fpga_data_addr);
diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c
index f7f01982a512..cacb9cc5757e 100644
--- a/drivers/fpga/stratix10-soc.c
+++ b/drivers/fpga/stratix10-soc.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
/*
* FPGA programming requires a higher level of privilege (EL3), per the SoC
diff --git a/drivers/fpga/tests/.kunitconfig b/drivers/fpga/tests/.kunitconfig
new file mode 100644
index 000000000000..a1c2a2974c39
--- /dev/null
+++ b/drivers/fpga/tests/.kunitconfig
@@ -0,0 +1,5 @@
+CONFIG_KUNIT=y
+CONFIG_FPGA=y
+CONFIG_FPGA_REGION=y
+CONFIG_FPGA_BRIDGE=y
+CONFIG_FPGA_KUNIT_TESTS=y
diff --git a/drivers/fpga/tests/Kconfig b/drivers/fpga/tests/Kconfig
new file mode 100644
index 000000000000..e4a64815f16d
--- /dev/null
+++ b/drivers/fpga/tests/Kconfig
@@ -0,0 +1,11 @@
+config FPGA_KUNIT_TESTS
+ tristate "KUnit test for the FPGA subsystem" if !KUNIT_ALL_TESTS
+ depends on FPGA && FPGA_REGION && FPGA_BRIDGE && KUNIT=y
+ default KUNIT_ALL_TESTS
+ help
+ This builds unit tests for the FPGA subsystem
+
+ For more information on KUnit and unit tests in general,
+ please refer to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
diff --git a/drivers/fpga/tests/Makefile b/drivers/fpga/tests/Makefile
new file mode 100644
index 000000000000..bb78215c645c
--- /dev/null
+++ b/drivers/fpga/tests/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for KUnit test suites for the FPGA subsystem
+#
+
+obj-$(CONFIG_FPGA_KUNIT_TESTS) += fpga-mgr-test.o fpga-bridge-test.o fpga-region-test.o
diff --git a/drivers/fpga/tests/fpga-bridge-test.c b/drivers/fpga/tests/fpga-bridge-test.c
new file mode 100644
index 000000000000..1d258002cdd7
--- /dev/null
+++ b/drivers/fpga/tests/fpga-bridge-test.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test for the FPGA Bridge
+ *
+ * Copyright (C) 2023 Red Hat, Inc.
+ *
+ * Author: Marco Pagani <marpagan@redhat.com>
+ */
+
+#include <kunit/test.h>
+#include <linux/device.h>
+#include <linux/fpga/fpga-bridge.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+struct bridge_stats {
+ bool enable;
+};
+
+struct bridge_ctx {
+ struct fpga_bridge *bridge;
+ struct platform_device *pdev;
+ struct bridge_stats stats;
+};
+
+static int op_enable_set(struct fpga_bridge *bridge, bool enable)
+{
+ struct bridge_stats *stats = bridge->priv;
+
+ stats->enable = enable;
+
+ return 0;
+}
+
+/*
+ * Fake FPGA bridge that implements only the enable_set op to track
+ * the state.
+ */
+static const struct fpga_bridge_ops fake_bridge_ops = {
+ .enable_set = op_enable_set,
+};
+
+/**
+ * register_test_bridge() - Register a fake FPGA bridge for testing.
+ * @test: KUnit test context object.
+ *
+ * Return: Context of the newly registered FPGA bridge.
+ */
+static struct bridge_ctx *register_test_bridge(struct kunit *test)
+{
+ struct bridge_ctx *ctx;
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
+ ctx->pdev = platform_device_register_simple("bridge_pdev", PLATFORM_DEVID_AUTO, NULL, 0);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->pdev);
+
+ ctx->bridge = fpga_bridge_register(&ctx->pdev->dev, "Fake FPGA bridge", &fake_bridge_ops,
+ &ctx->stats);
+ KUNIT_ASSERT_FALSE(test, IS_ERR_OR_NULL(ctx->bridge));
+
+ return ctx;
+}
+
+static void unregister_test_bridge(struct bridge_ctx *ctx)
+{
+ fpga_bridge_unregister(ctx->bridge);
+ platform_device_unregister(ctx->pdev);
+}
+
+static void fpga_bridge_test_get(struct kunit *test)
+{
+ struct bridge_ctx *ctx = test->priv;
+ struct fpga_bridge *bridge;
+
+ bridge = fpga_bridge_get(&ctx->pdev->dev, NULL);
+ KUNIT_EXPECT_PTR_EQ(test, bridge, ctx->bridge);
+
+ bridge = fpga_bridge_get(&ctx->pdev->dev, NULL);
+ KUNIT_EXPECT_EQ(test, PTR_ERR(bridge), -EBUSY);
+
+ fpga_bridge_put(ctx->bridge);
+}
+
+static void fpga_bridge_test_toggle(struct kunit *test)
+{
+ struct bridge_ctx *ctx = test->priv;
+ int ret;
+
+ ret = fpga_bridge_disable(ctx->bridge);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_FALSE(test, ctx->stats.enable);
+
+ ret = fpga_bridge_enable(ctx->bridge);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_TRUE(test, ctx->stats.enable);
+}
+
+/* Test the functions for getting and controlling a list of bridges */
+static void fpga_bridge_test_get_put_list(struct kunit *test)
+{
+ struct list_head bridge_list;
+ struct bridge_ctx *ctx_0, *ctx_1;
+ int ret;
+
+ ctx_0 = test->priv;
+ ctx_1 = register_test_bridge(test);
+
+ INIT_LIST_HEAD(&bridge_list);
+
+ /* Get bridge 0 and add it to the list */
+ ret = fpga_bridge_get_to_list(&ctx_0->pdev->dev, NULL, &bridge_list);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_PTR_EQ(test, ctx_0->bridge,
+ list_first_entry_or_null(&bridge_list, struct fpga_bridge, node));
+
+ /* Get bridge 1 and add it to the list */
+ ret = fpga_bridge_get_to_list(&ctx_1->pdev->dev, NULL, &bridge_list);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_PTR_EQ(test, ctx_1->bridge,
+ list_first_entry_or_null(&bridge_list, struct fpga_bridge, node));
+
+ /* Disable an then enable both bridges from the list */
+ ret = fpga_bridges_disable(&bridge_list);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_FALSE(test, ctx_0->stats.enable);
+ KUNIT_EXPECT_FALSE(test, ctx_1->stats.enable);
+
+ ret = fpga_bridges_enable(&bridge_list);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_TRUE(test, ctx_0->stats.enable);
+ KUNIT_EXPECT_TRUE(test, ctx_1->stats.enable);
+
+ /* Put and remove both bridges from the list */
+ fpga_bridges_put(&bridge_list);
+
+ KUNIT_EXPECT_TRUE(test, list_empty(&bridge_list));
+
+ unregister_test_bridge(ctx_1);
+}
+
+static int fpga_bridge_test_init(struct kunit *test)
+{
+ test->priv = register_test_bridge(test);
+
+ return 0;
+}
+
+static void fpga_bridge_test_exit(struct kunit *test)
+{
+ unregister_test_bridge(test->priv);
+}
+
+static struct kunit_case fpga_bridge_test_cases[] = {
+ KUNIT_CASE(fpga_bridge_test_get),
+ KUNIT_CASE(fpga_bridge_test_toggle),
+ KUNIT_CASE(fpga_bridge_test_get_put_list),
+ {}
+};
+
+static struct kunit_suite fpga_bridge_suite = {
+ .name = "fpga_bridge",
+ .init = fpga_bridge_test_init,
+ .exit = fpga_bridge_test_exit,
+ .test_cases = fpga_bridge_test_cases,
+};
+
+kunit_test_suite(fpga_bridge_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/fpga/tests/fpga-mgr-test.c b/drivers/fpga/tests/fpga-mgr-test.c
new file mode 100644
index 000000000000..6acec55b60ce
--- /dev/null
+++ b/drivers/fpga/tests/fpga-mgr-test.c
@@ -0,0 +1,327 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test for the FPGA Manager
+ *
+ * Copyright (C) 2023 Red Hat, Inc.
+ *
+ * Author: Marco Pagani <marpagan@redhat.com>
+ */
+
+#include <kunit/test.h>
+#include <linux/device.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/types.h>
+
+#define HEADER_FILL 'H'
+#define IMAGE_FILL 'P'
+#define IMAGE_BLOCK 1024
+
+#define HEADER_SIZE IMAGE_BLOCK
+#define IMAGE_SIZE (IMAGE_BLOCK * 4)
+
+struct mgr_stats {
+ bool header_match;
+ bool image_match;
+ u32 seq_num;
+ u32 op_parse_header_seq;
+ u32 op_write_init_seq;
+ u32 op_write_seq;
+ u32 op_write_sg_seq;
+ u32 op_write_complete_seq;
+ enum fpga_mgr_states op_parse_header_state;
+ enum fpga_mgr_states op_write_init_state;
+ enum fpga_mgr_states op_write_state;
+ enum fpga_mgr_states op_write_sg_state;
+ enum fpga_mgr_states op_write_complete_state;
+};
+
+struct mgr_ctx {
+ struct fpga_image_info *img_info;
+ struct fpga_manager *mgr;
+ struct platform_device *pdev;
+ struct mgr_stats stats;
+};
+
+/**
+ * init_test_buffer() - Allocate and initialize a test image in a buffer.
+ * @test: KUnit test context object.
+ * @count: image size in bytes.
+ *
+ * Return: pointer to the newly allocated image.
+ */
+static char *init_test_buffer(struct kunit *test, size_t count)
+{
+ char *buf;
+
+ KUNIT_ASSERT_GE(test, count, HEADER_SIZE);
+
+ buf = kunit_kzalloc(test, count, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+
+ memset(buf, HEADER_FILL, HEADER_SIZE);
+ memset(buf + HEADER_SIZE, IMAGE_FILL, count - HEADER_SIZE);
+
+ return buf;
+}
+
+/*
+ * Check the image header. Do not return an error code if the image check fails
+ * since, in this case, it is a failure of the FPGA manager itself, not this
+ * op that tests it.
+ */
+static int op_parse_header(struct fpga_manager *mgr, struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ struct mgr_stats *stats = mgr->priv;
+ size_t i;
+
+ stats->op_parse_header_state = mgr->state;
+ stats->op_parse_header_seq = stats->seq_num++;
+
+ /* Set header_size and data_size for later */
+ info->header_size = HEADER_SIZE;
+ info->data_size = info->count - HEADER_SIZE;
+
+ stats->header_match = true;
+ for (i = 0; i < info->header_size; i++) {
+ if (buf[i] != HEADER_FILL) {
+ stats->header_match = false;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int op_write_init(struct fpga_manager *mgr, struct fpga_image_info *info,
+ const char *buf, size_t count)
+{
+ struct mgr_stats *stats = mgr->priv;
+
+ stats->op_write_init_state = mgr->state;
+ stats->op_write_init_seq = stats->seq_num++;
+
+ return 0;
+}
+
+/*
+ * Check the image data. As with op_parse_header, do not return an error code
+ * if the image check fails.
+ */
+static int op_write(struct fpga_manager *mgr, const char *buf, size_t count)
+{
+ struct mgr_stats *stats = mgr->priv;
+ size_t i;
+
+ stats->op_write_state = mgr->state;
+ stats->op_write_seq = stats->seq_num++;
+
+ stats->image_match = true;
+ for (i = 0; i < count; i++) {
+ if (buf[i] != IMAGE_FILL) {
+ stats->image_match = false;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Check the image data, but first skip the header since write_sg will get
+ * the whole image in sg_table. As with op_parse_header, do not return an
+ * error code if the image check fails.
+ */
+static int op_write_sg(struct fpga_manager *mgr, struct sg_table *sgt)
+{
+ struct mgr_stats *stats = mgr->priv;
+ struct sg_mapping_iter miter;
+ char *img;
+ size_t i;
+
+ stats->op_write_sg_state = mgr->state;
+ stats->op_write_sg_seq = stats->seq_num++;
+
+ stats->image_match = true;
+ sg_miter_start(&miter, sgt->sgl, sgt->nents, SG_MITER_FROM_SG);
+
+ if (!sg_miter_skip(&miter, HEADER_SIZE)) {
+ stats->image_match = false;
+ goto out;
+ }
+
+ while (sg_miter_next(&miter)) {
+ img = miter.addr;
+ for (i = 0; i < miter.length; i++) {
+ if (img[i] != IMAGE_FILL) {
+ stats->image_match = false;
+ goto out;
+ }
+ }
+ }
+out:
+ sg_miter_stop(&miter);
+ return 0;
+}
+
+static int op_write_complete(struct fpga_manager *mgr, struct fpga_image_info *info)
+{
+ struct mgr_stats *stats = mgr->priv;
+
+ stats->op_write_complete_state = mgr->state;
+ stats->op_write_complete_seq = stats->seq_num++;
+
+ return 0;
+}
+
+/*
+ * Fake FPGA manager that implements all ops required to check the programming
+ * sequence using a single contiguous buffer and a scatter gather table.
+ */
+static const struct fpga_manager_ops fake_mgr_ops = {
+ .skip_header = true,
+ .parse_header = op_parse_header,
+ .write_init = op_write_init,
+ .write = op_write,
+ .write_sg = op_write_sg,
+ .write_complete = op_write_complete,
+};
+
+static void fpga_mgr_test_get(struct kunit *test)
+{
+ struct mgr_ctx *ctx = test->priv;
+ struct fpga_manager *mgr;
+
+ mgr = fpga_mgr_get(&ctx->pdev->dev);
+ KUNIT_EXPECT_PTR_EQ(test, mgr, ctx->mgr);
+
+ fpga_mgr_put(ctx->mgr);
+}
+
+static void fpga_mgr_test_lock(struct kunit *test)
+{
+ struct mgr_ctx *ctx = test->priv;
+ int ret;
+
+ ret = fpga_mgr_lock(ctx->mgr);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ ret = fpga_mgr_lock(ctx->mgr);
+ KUNIT_EXPECT_EQ(test, ret, -EBUSY);
+
+ fpga_mgr_unlock(ctx->mgr);
+}
+
+/* Check the programming sequence using an image in a buffer */
+static void fpga_mgr_test_img_load_buf(struct kunit *test)
+{
+ struct mgr_ctx *ctx = test->priv;
+ char *img_buf;
+ int ret;
+
+ img_buf = init_test_buffer(test, IMAGE_SIZE);
+
+ ctx->img_info->count = IMAGE_SIZE;
+ ctx->img_info->buf = img_buf;
+
+ ret = fpga_mgr_load(ctx->mgr, ctx->img_info);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_TRUE(test, ctx->stats.header_match);
+ KUNIT_EXPECT_TRUE(test, ctx->stats.image_match);
+
+ KUNIT_EXPECT_EQ(test, ctx->stats.op_parse_header_state, FPGA_MGR_STATE_PARSE_HEADER);
+ KUNIT_EXPECT_EQ(test, ctx->stats.op_write_init_state, FPGA_MGR_STATE_WRITE_INIT);
+ KUNIT_EXPECT_EQ(test, ctx->stats.op_write_state, FPGA_MGR_STATE_WRITE);
+ KUNIT_EXPECT_EQ(test, ctx->stats.op_write_complete_state, FPGA_MGR_STATE_WRITE_COMPLETE);
+
+ KUNIT_EXPECT_EQ(test, ctx->stats.op_write_init_seq, ctx->stats.op_parse_header_seq + 1);
+ KUNIT_EXPECT_EQ(test, ctx->stats.op_write_seq, ctx->stats.op_parse_header_seq + 2);
+ KUNIT_EXPECT_EQ(test, ctx->stats.op_write_complete_seq, ctx->stats.op_parse_header_seq + 3);
+}
+
+/* Check the programming sequence using an image in a scatter gather table */
+static void fpga_mgr_test_img_load_sgt(struct kunit *test)
+{
+ struct mgr_ctx *ctx = test->priv;
+ struct sg_table *sgt;
+ char *img_buf;
+ int ret;
+
+ img_buf = init_test_buffer(test, IMAGE_SIZE);
+
+ sgt = kunit_kzalloc(test, sizeof(*sgt), GFP_KERNEL);
+ ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ sg_init_one(sgt->sgl, img_buf, IMAGE_SIZE);
+
+ ctx->img_info->sgt = sgt;
+
+ ret = fpga_mgr_load(ctx->mgr, ctx->img_info);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_TRUE(test, ctx->stats.header_match);
+ KUNIT_EXPECT_TRUE(test, ctx->stats.image_match);
+
+ KUNIT_EXPECT_EQ(test, ctx->stats.op_parse_header_state, FPGA_MGR_STATE_PARSE_HEADER);
+ KUNIT_EXPECT_EQ(test, ctx->stats.op_write_init_state, FPGA_MGR_STATE_WRITE_INIT);
+ KUNIT_EXPECT_EQ(test, ctx->stats.op_write_sg_state, FPGA_MGR_STATE_WRITE);
+ KUNIT_EXPECT_EQ(test, ctx->stats.op_write_complete_state, FPGA_MGR_STATE_WRITE_COMPLETE);
+
+ KUNIT_EXPECT_EQ(test, ctx->stats.op_write_init_seq, ctx->stats.op_parse_header_seq + 1);
+ KUNIT_EXPECT_EQ(test, ctx->stats.op_write_sg_seq, ctx->stats.op_parse_header_seq + 2);
+ KUNIT_EXPECT_EQ(test, ctx->stats.op_write_complete_seq, ctx->stats.op_parse_header_seq + 3);
+
+ sg_free_table(ctx->img_info->sgt);
+}
+
+static int fpga_mgr_test_init(struct kunit *test)
+{
+ struct mgr_ctx *ctx;
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
+ ctx->pdev = platform_device_register_simple("mgr_pdev", PLATFORM_DEVID_AUTO, NULL, 0);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->pdev);
+
+ ctx->mgr = devm_fpga_mgr_register(&ctx->pdev->dev, "Fake FPGA Manager", &fake_mgr_ops,
+ &ctx->stats);
+ KUNIT_ASSERT_FALSE(test, IS_ERR_OR_NULL(ctx->mgr));
+
+ ctx->img_info = fpga_image_info_alloc(&ctx->pdev->dev);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->img_info);
+
+ test->priv = ctx;
+
+ return 0;
+}
+
+static void fpga_mgr_test_exit(struct kunit *test)
+{
+ struct mgr_ctx *ctx = test->priv;
+
+ fpga_image_info_free(ctx->img_info);
+ platform_device_unregister(ctx->pdev);
+}
+
+static struct kunit_case fpga_mgr_test_cases[] = {
+ KUNIT_CASE(fpga_mgr_test_get),
+ KUNIT_CASE(fpga_mgr_test_lock),
+ KUNIT_CASE(fpga_mgr_test_img_load_buf),
+ KUNIT_CASE(fpga_mgr_test_img_load_sgt),
+ {}
+};
+
+static struct kunit_suite fpga_mgr_suite = {
+ .name = "fpga_mgr",
+ .init = fpga_mgr_test_init,
+ .exit = fpga_mgr_test_exit,
+ .test_cases = fpga_mgr_test_cases,
+};
+
+kunit_test_suite(fpga_mgr_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/fpga/tests/fpga-region-test.c b/drivers/fpga/tests/fpga-region-test.c
new file mode 100644
index 000000000000..9f9d50ee7871
--- /dev/null
+++ b/drivers/fpga/tests/fpga-region-test.c
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit test for the FPGA Region
+ *
+ * Copyright (C) 2023 Red Hat, Inc.
+ *
+ * Author: Marco Pagani <marpagan@redhat.com>
+ */
+
+#include <kunit/test.h>
+#include <linux/fpga/fpga-bridge.h>
+#include <linux/fpga/fpga-mgr.h>
+#include <linux/fpga/fpga-region.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+struct mgr_stats {
+ u32 write_count;
+};
+
+struct bridge_stats {
+ bool enable;
+ u32 cycles_count;
+};
+
+struct test_ctx {
+ struct fpga_manager *mgr;
+ struct platform_device *mgr_pdev;
+ struct fpga_bridge *bridge;
+ struct platform_device *bridge_pdev;
+ struct fpga_region *region;
+ struct platform_device *region_pdev;
+ struct bridge_stats bridge_stats;
+ struct mgr_stats mgr_stats;
+};
+
+static int op_write(struct fpga_manager *mgr, const char *buf, size_t count)
+{
+ struct mgr_stats *stats = mgr->priv;
+
+ stats->write_count++;
+
+ return 0;
+}
+
+/*
+ * Fake FPGA manager that implements only the write op to count the number
+ * of programming cycles. The internals of the programming sequence are
+ * tested in the Manager suite since they are outside the responsibility
+ * of the Region.
+ */
+static const struct fpga_manager_ops fake_mgr_ops = {
+ .write = op_write,
+};
+
+static int op_enable_set(struct fpga_bridge *bridge, bool enable)
+{
+ struct bridge_stats *stats = bridge->priv;
+
+ if (!stats->enable && enable)
+ stats->cycles_count++;
+
+ stats->enable = enable;
+
+ return 0;
+}
+
+/*
+ * Fake FPGA bridge that implements only enable_set op to count the number
+ * of activation cycles.
+ */
+static const struct fpga_bridge_ops fake_bridge_ops = {
+ .enable_set = op_enable_set,
+};
+
+static int fake_region_get_bridges(struct fpga_region *region)
+{
+ struct fpga_bridge *bridge = region->priv;
+
+ return fpga_bridge_get_to_list(bridge->dev.parent, region->info, &region->bridge_list);
+}
+
+static int fake_region_match(struct device *dev, const void *data)
+{
+ return dev->parent == data;
+}
+
+static void fpga_region_test_class_find(struct kunit *test)
+{
+ struct test_ctx *ctx = test->priv;
+ struct fpga_region *region;
+
+ region = fpga_region_class_find(NULL, &ctx->region_pdev->dev, fake_region_match);
+ KUNIT_EXPECT_PTR_EQ(test, region, ctx->region);
+}
+
+/*
+ * FPGA Region programming test. The Region must call get_bridges() to get
+ * and control the bridges, and then the Manager for the actual programming.
+ */
+static void fpga_region_test_program_fpga(struct kunit *test)
+{
+ struct test_ctx *ctx = test->priv;
+ struct fpga_image_info *img_info;
+ char img_buf[4];
+ int ret;
+
+ img_info = fpga_image_info_alloc(&ctx->mgr_pdev->dev);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, img_info);
+
+ img_info->buf = img_buf;
+ img_info->count = sizeof(img_buf);
+
+ ctx->region->info = img_info;
+ ret = fpga_region_program_fpga(ctx->region);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_EQ(test, 1, ctx->mgr_stats.write_count);
+ KUNIT_EXPECT_EQ(test, 1, ctx->bridge_stats.cycles_count);
+
+ fpga_bridges_put(&ctx->region->bridge_list);
+
+ ret = fpga_region_program_fpga(ctx->region);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_EQ(test, 2, ctx->mgr_stats.write_count);
+ KUNIT_EXPECT_EQ(test, 2, ctx->bridge_stats.cycles_count);
+
+ fpga_bridges_put(&ctx->region->bridge_list);
+
+ fpga_image_info_free(img_info);
+}
+
+/*
+ * The configuration used in this test suite uses a single bridge to
+ * limit the code under test to a single unit. The functions used by the
+ * Region for getting and controlling bridges are tested (with a list of
+ * multiple bridges) in the Bridge suite.
+ */
+static int fpga_region_test_init(struct kunit *test)
+{
+ struct test_ctx *ctx;
+ struct fpga_region_info region_info = { 0 };
+
+ ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
+ ctx->mgr_pdev = platform_device_register_simple("mgr_pdev", PLATFORM_DEVID_AUTO, NULL, 0);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->mgr_pdev);
+
+ ctx->mgr = devm_fpga_mgr_register(&ctx->mgr_pdev->dev, "Fake FPGA Manager", &fake_mgr_ops,
+ &ctx->mgr_stats);
+ KUNIT_ASSERT_FALSE(test, IS_ERR_OR_NULL(ctx->mgr));
+
+ ctx->bridge_pdev = platform_device_register_simple("bridge_pdev", PLATFORM_DEVID_AUTO,
+ NULL, 0);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->bridge_pdev);
+
+ ctx->bridge = fpga_bridge_register(&ctx->bridge_pdev->dev, "Fake FPGA Bridge",
+ &fake_bridge_ops, &ctx->bridge_stats);
+ KUNIT_ASSERT_FALSE(test, IS_ERR_OR_NULL(ctx->bridge));
+
+ ctx->bridge_stats.enable = true;
+
+ ctx->region_pdev = platform_device_register_simple("region_pdev", PLATFORM_DEVID_AUTO,
+ NULL, 0);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->region_pdev);
+
+ region_info.mgr = ctx->mgr;
+ region_info.priv = ctx->bridge;
+ region_info.get_bridges = fake_region_get_bridges;
+
+ ctx->region = fpga_region_register_full(&ctx->region_pdev->dev, &region_info);
+ KUNIT_ASSERT_FALSE(test, IS_ERR_OR_NULL(ctx->region));
+
+ test->priv = ctx;
+
+ return 0;
+}
+
+static void fpga_region_test_exit(struct kunit *test)
+{
+ struct test_ctx *ctx = test->priv;
+
+ fpga_region_unregister(ctx->region);
+ platform_device_unregister(ctx->region_pdev);
+
+ fpga_bridge_unregister(ctx->bridge);
+ platform_device_unregister(ctx->bridge_pdev);
+
+ platform_device_unregister(ctx->mgr_pdev);
+}
+
+static struct kunit_case fpga_region_test_cases[] = {
+ KUNIT_CASE(fpga_region_test_class_find),
+ KUNIT_CASE(fpga_region_test_program_fpga),
+
+ {}
+};
+
+static struct kunit_suite fpga_region_suite = {
+ .name = "fpga_mgr",
+ .init = fpga_region_test_init,
+ .exit = fpga_region_test_exit,
+ .test_cases = fpga_region_test_cases,
+};
+
+kunit_test_suite(fpga_region_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/fpga/ts73xx-fpga.c b/drivers/fpga/ts73xx-fpga.c
index 8e6e9c840d9d..4e1d2a4d3df4 100644
--- a/drivers/fpga/ts73xx-fpga.c
+++ b/drivers/fpga/ts73xx-fpga.c
@@ -103,7 +103,6 @@ static int ts73xx_fpga_probe(struct platform_device *pdev)
struct device *kdev = &pdev->dev;
struct ts73xx_fpga_priv *priv;
struct fpga_manager *mgr;
- struct resource *res;
priv = devm_kzalloc(kdev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -111,8 +110,7 @@ static int ts73xx_fpga_probe(struct platform_device *pdev)
priv->dev = kdev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->io_base = devm_ioremap_resource(kdev, res);
+ priv->io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->io_base))
return PTR_ERR(priv->io_base);
diff --git a/drivers/fpga/xilinx-pr-decoupler.c b/drivers/fpga/xilinx-pr-decoupler.c
index b76d85449b8f..208d9560f56d 100644
--- a/drivers/fpga/xilinx-pr-decoupler.c
+++ b/drivers/fpga/xilinx-pr-decoupler.c
@@ -108,7 +108,6 @@ static int xlnx_pr_decoupler_probe(struct platform_device *pdev)
struct xlnx_pr_decoupler_data *priv;
struct fpga_bridge *br;
int err;
- struct resource *res;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -122,8 +121,7 @@ static int xlnx_pr_decoupler_probe(struct platform_device *pdev)
priv->ipconfig = match->data;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->io_base = devm_ioremap_resource(&pdev->dev, res);
+ priv->io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->io_base))
return PTR_ERR(priv->io_base);
diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c
index f8214cae9b6e..96611d424a10 100644
--- a/drivers/fpga/zynq-fpga.c
+++ b/drivers/fpga/zynq-fpga.c
@@ -555,7 +555,6 @@ static int zynq_fpga_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct zynq_fpga_priv *priv;
struct fpga_manager *mgr;
- struct resource *res;
int err;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -563,8 +562,7 @@ static int zynq_fpga_probe(struct platform_device *pdev)
return -ENOMEM;
spin_lock_init(&priv->dma_lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->io_base = devm_ioremap_resource(dev, res);
+ priv->io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->io_base))
return PTR_ERR(priv->io_base);
diff --git a/drivers/fsi/Kconfig b/drivers/fsi/Kconfig
index e6668a869913..79a31593618a 100644
--- a/drivers/fsi/Kconfig
+++ b/drivers/fsi/Kconfig
@@ -62,6 +62,15 @@ config FSI_MASTER_ASPEED
Enable it for your BMC kernel in an OpenPower or IBM Power system.
+config FSI_MASTER_I2CR
+ tristate "IBM I2C Responder virtual FSI master"
+ depends on I2C
+ help
+ This option enables a virtual FSI master in order to access a CFAM
+ behind an IBM I2C Responder (I2CR) chip. The I2CR is an I2C device
+ that translates I2C commands to CFAM or SCOM operations, effectively
+ implementing an FSI master and bus.
+
config FSI_SCOM
tristate "SCOM FSI client device driver"
help
@@ -85,4 +94,12 @@ config FSI_OCC
provide the raw sensor data as well as perform thermal and power
management on the system.
+config I2CR_SCOM
+ tristate "IBM I2C Responder SCOM driver"
+ depends on FSI_MASTER_I2CR
+ help
+ This option enables an I2C Responder based SCOM device driver. The
+ I2CR has the capability to directly perform SCOM operations instead
+ of using the FSI2PIB engine.
+
endif
diff --git a/drivers/fsi/Makefile b/drivers/fsi/Makefile
index da218a1ad8e1..5550aa15e0b1 100644
--- a/drivers/fsi/Makefile
+++ b/drivers/fsi/Makefile
@@ -4,7 +4,9 @@ obj-$(CONFIG_FSI) += fsi-core.o
obj-$(CONFIG_FSI_MASTER_HUB) += fsi-master-hub.o
obj-$(CONFIG_FSI_MASTER_ASPEED) += fsi-master-aspeed.o
obj-$(CONFIG_FSI_MASTER_GPIO) += fsi-master-gpio.o
+obj-$(CONFIG_FSI_MASTER_I2CR) += fsi-master-i2cr.o
obj-$(CONFIG_FSI_MASTER_AST_CF) += fsi-master-ast-cf.o
obj-$(CONFIG_FSI_SCOM) += fsi-scom.o
obj-$(CONFIG_FSI_SBEFIFO) += fsi-sbefifo.o
obj-$(CONFIG_FSI_OCC) += fsi-occ.o
+obj-$(CONFIG_I2CR_SCOM) += i2cr-scom.o
diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
index 0b927c9f4267..097d5a780264 100644
--- a/drivers/fsi/fsi-core.c
+++ b/drivers/fsi/fsi-core.c
@@ -16,6 +16,8 @@
#include <linux/idr.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/cdev.h>
@@ -23,6 +25,10 @@
#include <linux/uaccess.h>
#include "fsi-master.h"
+#include "fsi-slave.h"
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/fsi.h>
#define FSI_SLAVE_CONF_NEXT_MASK GENMASK(31, 31)
#define FSI_SLAVE_CONF_SLOTS_MASK GENMASK(23, 16)
@@ -78,26 +84,6 @@ static const int engine_page_size = 0x400;
static DEFINE_IDA(master_ida);
-struct fsi_slave {
- struct device dev;
- struct fsi_master *master;
- struct cdev cdev;
- int cdev_idx;
- int id; /* FSI address */
- int link; /* FSI link# */
- u32 cfam_id;
- int chip_id;
- uint32_t size; /* size of slave address space */
- u8 t_send_delay;
- u8 t_echo_delay;
-};
-
-#define CREATE_TRACE_POINTS
-#include <trace/events/fsi.h>
-
-#define to_fsi_master(d) container_of(d, struct fsi_master, dev)
-#define to_fsi_slave(d) container_of(d, struct fsi_slave, dev)
-
static const int slave_retries = 2;
static int discard_errors;
@@ -415,28 +401,18 @@ EXPORT_SYMBOL_GPL(fsi_slave_release_range);
static bool fsi_device_node_matches(struct device *dev, struct device_node *np,
uint32_t addr, uint32_t size)
{
- unsigned int len, na, ns;
- const __be32 *prop;
- uint32_t psize;
-
- na = of_n_addr_cells(np);
- ns = of_n_size_cells(np);
-
- if (na != 1 || ns != 1)
- return false;
+ u64 paddr, psize;
- prop = of_get_property(np, "reg", &len);
- if (!prop || len != 8)
+ if (of_property_read_reg(np, 0, &paddr, &psize))
return false;
- if (of_read_number(prop, 1) != addr)
+ if (paddr != addr)
return false;
- psize = of_read_number(prop + 1, 1);
if (psize != size) {
dev_warn(dev,
- "node %s matches probed address, but not size (got 0x%x, expected 0x%x)",
- of_node_full_name(np), psize, size);
+ "node %pOF matches probed address, but not size (got 0x%llx, expected 0x%x)",
+ np, psize, size);
}
return true;
@@ -653,24 +629,12 @@ static void fsi_slave_release(struct device *dev)
static bool fsi_slave_node_matches(struct device_node *np,
int link, uint8_t id)
{
- unsigned int len, na, ns;
- const __be32 *prop;
-
- na = of_n_addr_cells(np);
- ns = of_n_size_cells(np);
-
- /* Ensure we have the correct format for addresses and sizes in
- * reg properties
- */
- if (na != 2 || ns != 0)
- return false;
+ u64 addr;
- prop = of_get_property(np, "reg", &len);
- if (!prop || len != 8)
+ if (of_property_read_reg(np, 0, &addr, NULL))
return false;
- return (of_read_number(prop, 1) == link) &&
- (of_read_number(prop + 1, 1) == id);
+ return addr == (((u64)link << 32) | id);
}
/* Find a matching node for the slave at (link, id). Returns NULL if none
@@ -949,9 +913,13 @@ static int __fsi_get_new_minor(struct fsi_slave *slave, enum fsi_dev_type type,
/* Check if we qualify for legacy numbering */
if (cid >= 0 && cid < 16 && type < 4) {
- /* Try reserving the legacy number */
- id = (cid << 4) | type;
- id = ida_simple_get(&fsi_minor_ida, id, id + 1, GFP_KERNEL);
+ /*
+ * Try reserving the legacy number, which has 0 - 0x3f reserved
+ * in the ida range. cid goes up to 0xf and type contains two
+ * bits, so construct the id with the below two bit shift.
+ */
+ id = (cid << 2) | type;
+ id = ida_alloc_range(&fsi_minor_ida, id, id, GFP_KERNEL);
if (id >= 0) {
*out_index = fsi_adjust_index(cid);
*out_dev = fsi_base_dev + id;
@@ -962,8 +930,8 @@ static int __fsi_get_new_minor(struct fsi_slave *slave, enum fsi_dev_type type,
return id;
/* Fallback to non-legacy allocation */
}
- id = ida_simple_get(&fsi_minor_ida, FSI_CHAR_LEGACY_TOP,
- FSI_CHAR_MAX_DEVICES, GFP_KERNEL);
+ id = ida_alloc_range(&fsi_minor_ida, FSI_CHAR_LEGACY_TOP,
+ FSI_CHAR_MAX_DEVICES - 1, GFP_KERNEL);
if (id < 0)
return id;
*out_index = fsi_adjust_index(id);
@@ -971,16 +939,42 @@ static int __fsi_get_new_minor(struct fsi_slave *slave, enum fsi_dev_type type,
return 0;
}
+static const char *const fsi_dev_type_names[] = {
+ "cfam",
+ "sbefifo",
+ "scom",
+ "occ",
+};
+
int fsi_get_new_minor(struct fsi_device *fdev, enum fsi_dev_type type,
dev_t *out_dev, int *out_index)
{
+ if (fdev->dev.of_node) {
+ int aid = of_alias_get_id(fdev->dev.of_node, fsi_dev_type_names[type]);
+
+ if (aid >= 0) {
+ /* Use the same scheme as the legacy numbers. */
+ int id = (aid << 2) | type;
+
+ id = ida_alloc_range(&fsi_minor_ida, id, id, GFP_KERNEL);
+ if (id >= 0) {
+ *out_index = aid;
+ *out_dev = fsi_base_dev + id;
+ return 0;
+ }
+
+ if (id != -ENOSPC)
+ return id;
+ }
+ }
+
return __fsi_get_new_minor(fdev->slave, type, out_dev, out_index);
}
EXPORT_SYMBOL_GPL(fsi_get_new_minor);
void fsi_free_minor(dev_t dev)
{
- ida_simple_remove(&fsi_minor_ida, MINOR(dev));
+ ida_free(&fsi_minor_ida, MINOR(dev));
}
EXPORT_SYMBOL_GPL(fsi_free_minor);
@@ -1210,6 +1204,7 @@ static int fsi_master_scan(struct fsi_master *master)
{
int link, rc;
+ trace_fsi_master_scan(master, true);
for (link = 0; link < master->n_links; link++) {
rc = fsi_master_link_enable(master, link);
if (rc) {
@@ -1251,6 +1246,7 @@ static int fsi_master_remove_slave(struct device *dev, void *arg)
static void fsi_master_unscan(struct fsi_master *master)
{
+ trace_fsi_master_scan(master, false);
device_for_each_child(&master->dev, NULL, fsi_master_remove_slave);
}
@@ -1313,41 +1309,53 @@ int fsi_master_register(struct fsi_master *master)
struct device_node *np;
mutex_init(&master->scan_lock);
- master->idx = ida_simple_get(&master_ida, 0, INT_MAX, GFP_KERNEL);
+
+ /* Alloc the requested index if it's non-zero */
+ if (master->idx) {
+ master->idx = ida_alloc_range(&master_ida, master->idx,
+ master->idx, GFP_KERNEL);
+ } else {
+ master->idx = ida_alloc(&master_ida, GFP_KERNEL);
+ }
+
if (master->idx < 0)
return master->idx;
- dev_set_name(&master->dev, "fsi%d", master->idx);
+ if (!dev_name(&master->dev))
+ dev_set_name(&master->dev, "fsi%d", master->idx);
+
master->dev.class = &fsi_master_class;
+ mutex_lock(&master->scan_lock);
rc = device_register(&master->dev);
if (rc) {
- ida_simple_remove(&master_ida, master->idx);
- return rc;
+ ida_free(&master_ida, master->idx);
+ goto out;
}
np = dev_of_node(&master->dev);
if (!of_property_read_bool(np, "no-scan-on-init")) {
- mutex_lock(&master->scan_lock);
fsi_master_scan(master);
- mutex_unlock(&master->scan_lock);
}
-
- return 0;
+out:
+ mutex_unlock(&master->scan_lock);
+ return rc;
}
EXPORT_SYMBOL_GPL(fsi_master_register);
void fsi_master_unregister(struct fsi_master *master)
{
- if (master->idx >= 0) {
- ida_simple_remove(&master_ida, master->idx);
- master->idx = -1;
- }
+ int idx = master->idx;
+
+ trace_fsi_master_unregister(master);
mutex_lock(&master->scan_lock);
fsi_master_unscan(master);
+ master->n_links = 0;
mutex_unlock(&master->scan_lock);
+
device_unregister(&master->dev);
+ ida_free(&master_ida, idx);
}
EXPORT_SYMBOL_GPL(fsi_master_unregister);
@@ -1366,8 +1374,14 @@ static int fsi_bus_match(struct device *dev, struct device_driver *drv)
if (id->engine_type != fsi_dev->engine_type)
continue;
if (id->version == FSI_VERSION_ANY ||
- id->version == fsi_dev->version)
- return 1;
+ id->version == fsi_dev->version) {
+ if (drv->of_match_table) {
+ if (of_driver_match_device(dev, drv))
+ return 1;
+ } else {
+ return 1;
+ }
+ }
}
return 0;
diff --git a/drivers/fsi/fsi-master-aspeed.c b/drivers/fsi/fsi-master-aspeed.c
index 7cec1772820d..f0a19cd451a0 100644
--- a/drivers/fsi/fsi-master-aspeed.c
+++ b/drivers/fsi/fsi-master-aspeed.c
@@ -376,7 +376,7 @@ static int aspeed_master_break(struct fsi_master *master, int link)
static void aspeed_master_release(struct device *dev)
{
struct fsi_master_aspeed *aspeed =
- to_fsi_master_aspeed(dev_to_fsi_master(dev));
+ to_fsi_master_aspeed(to_fsi_master(dev));
kfree(aspeed);
}
@@ -454,6 +454,8 @@ static ssize_t cfam_reset_store(struct device *dev, struct device_attribute *att
gpiod_set_value(aspeed->cfam_reset_gpio, 1);
usleep_range(900, 1000);
gpiod_set_value(aspeed->cfam_reset_gpio, 0);
+ usleep_range(900, 1000);
+ opb_writel(aspeed, ctrl_base + FSI_MRESP0, cpu_to_be32(FSI_MRESP_RST_ALL_MASTER));
mutex_unlock(&aspeed->lock);
trace_fsi_master_aspeed_cfam_reset(false);
diff --git a/drivers/fsi/fsi-master-ast-cf.c b/drivers/fsi/fsi-master-ast-cf.c
index 5f608ef8b53c..812dfa9a9140 100644
--- a/drivers/fsi/fsi-master-ast-cf.c
+++ b/drivers/fsi/fsi-master-ast-cf.c
@@ -1133,7 +1133,7 @@ static int fsi_master_acf_gpio_request(void *data)
/* Note: This doesn't require holding out mutex */
- /* Write reqest */
+ /* Write request */
iowrite8(ARB_ARM_REQ, master->sram + ARB_REG);
/*
@@ -1190,7 +1190,7 @@ static int fsi_master_acf_gpio_release(void *data)
static void fsi_master_acf_release(struct device *dev)
{
- struct fsi_master_acf *master = to_fsi_master_acf(dev_to_fsi_master(dev));
+ struct fsi_master_acf *master = to_fsi_master_acf(to_fsi_master(dev));
/* Cleanup, stop coprocessor */
mutex_lock(&master->lock);
@@ -1441,3 +1441,4 @@ static struct platform_driver fsi_master_acf = {
module_platform_driver(fsi_master_acf);
MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(FW_FILE_NAME);
diff --git a/drivers/fsi/fsi-master-gpio.c b/drivers/fsi/fsi-master-gpio.c
index 7d5f29b4b595..ed03da4f2447 100644
--- a/drivers/fsi/fsi-master-gpio.c
+++ b/drivers/fsi/fsi-master-gpio.c
@@ -761,7 +761,7 @@ static DEVICE_ATTR(external_mode, 0664,
static void fsi_master_gpio_release(struct device *dev)
{
- struct fsi_master_gpio *master = to_fsi_master_gpio(dev_to_fsi_master(dev));
+ struct fsi_master_gpio *master = to_fsi_master_gpio(to_fsi_master(dev));
of_node_put(dev_of_node(master->dev));
diff --git a/drivers/fsi/fsi-master-hub.c b/drivers/fsi/fsi-master-hub.c
index 01f0a796111e..6d8b6e8854e5 100644
--- a/drivers/fsi/fsi-master-hub.c
+++ b/drivers/fsi/fsi-master-hub.c
@@ -105,7 +105,7 @@ static int hub_master_link_enable(struct fsi_master *master, int link,
static void hub_master_release(struct device *dev)
{
- struct fsi_master_hub *hub = to_fsi_master_hub(dev_to_fsi_master(dev));
+ struct fsi_master_hub *hub = to_fsi_master_hub(to_fsi_master(dev));
kfree(hub);
}
diff --git a/drivers/fsi/fsi-master-i2cr.c b/drivers/fsi/fsi-master-i2cr.c
new file mode 100644
index 000000000000..40f1f4d231e5
--- /dev/null
+++ b/drivers/fsi/fsi-master-i2cr.c
@@ -0,0 +1,316 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) IBM Corporation 2023 */
+
+#include <linux/device.h>
+#include <linux/fsi.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+
+#include "fsi-master-i2cr.h"
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/fsi_master_i2cr.h>
+
+#define I2CR_ADDRESS_CFAM(a) ((a) >> 2)
+#define I2CR_INITIAL_PARITY true
+
+#define I2CR_STATUS_CMD 0x60002
+#define I2CR_STATUS_ERR BIT_ULL(61)
+#define I2CR_ERROR_CMD 0x60004
+#define I2CR_LOG_CMD 0x60008
+
+static const u8 i2cr_cfam[] = {
+ 0xc0, 0x02, 0x0d, 0xa6,
+ 0x80, 0x01, 0x10, 0x02,
+ 0x80, 0x01, 0x10, 0x02,
+ 0x80, 0x01, 0x10, 0x02,
+ 0x80, 0x01, 0x80, 0x52,
+ 0x80, 0x01, 0x10, 0x02,
+ 0x80, 0x01, 0x10, 0x02,
+ 0x80, 0x01, 0x10, 0x02,
+ 0x80, 0x01, 0x10, 0x02,
+ 0x80, 0x01, 0x22, 0x2d,
+ 0x00, 0x00, 0x00, 0x00,
+ 0xde, 0xad, 0xc0, 0xde
+};
+
+static bool i2cr_check_parity32(u32 v, bool parity)
+{
+ u32 i;
+
+ for (i = 0; i < 32; ++i) {
+ if (v & (1u << i))
+ parity = !parity;
+ }
+
+ return parity;
+}
+
+static bool i2cr_check_parity64(u64 v)
+{
+ u32 i;
+ bool parity = I2CR_INITIAL_PARITY;
+
+ for (i = 0; i < 64; ++i) {
+ if (v & (1llu << i))
+ parity = !parity;
+ }
+
+ return parity;
+}
+
+static u32 i2cr_get_command(u32 address, bool parity)
+{
+ address <<= 1;
+
+ if (i2cr_check_parity32(address, parity))
+ address |= 1;
+
+ return address;
+}
+
+static int i2cr_transfer(struct i2c_client *client, u32 command, u64 *data)
+{
+ struct i2c_msg msgs[2];
+ int ret;
+
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = sizeof(command);
+ msgs[0].buf = (__u8 *)&command;
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = sizeof(*data);
+ msgs[1].buf = (__u8 *)data;
+
+ ret = i2c_transfer(client->adapter, msgs, 2);
+ if (ret == 2)
+ return 0;
+
+ trace_i2cr_i2c_error(client, command, ret);
+
+ if (ret < 0)
+ return ret;
+
+ return -EIO;
+}
+
+static int i2cr_check_status(struct i2c_client *client)
+{
+ u64 status;
+ int ret;
+
+ ret = i2cr_transfer(client, I2CR_STATUS_CMD, &status);
+ if (ret)
+ return ret;
+
+ if (status & I2CR_STATUS_ERR) {
+ u32 buf[3] = { 0, 0, 0 };
+ u64 error;
+ u64 log;
+
+ i2cr_transfer(client, I2CR_ERROR_CMD, &error);
+ i2cr_transfer(client, I2CR_LOG_CMD, &log);
+
+ trace_i2cr_status_error(client, status, error, log);
+
+ buf[0] = I2CR_STATUS_CMD;
+ i2c_master_send(client, (const char *)buf, sizeof(buf));
+
+ buf[0] = I2CR_ERROR_CMD;
+ i2c_master_send(client, (const char *)buf, sizeof(buf));
+
+ buf[0] = I2CR_LOG_CMD;
+ i2c_master_send(client, (const char *)buf, sizeof(buf));
+
+ dev_err(&client->dev, "status:%016llx error:%016llx log:%016llx\n", status, error,
+ log);
+ return -EREMOTEIO;
+ }
+
+ trace_i2cr_status(client, status);
+ return 0;
+}
+
+int fsi_master_i2cr_read(struct fsi_master_i2cr *i2cr, u32 addr, u64 *data)
+{
+ u32 command = i2cr_get_command(addr, I2CR_INITIAL_PARITY);
+ int ret;
+
+ mutex_lock(&i2cr->lock);
+
+ ret = i2cr_transfer(i2cr->client, command, data);
+ if (ret)
+ goto unlock;
+
+ ret = i2cr_check_status(i2cr->client);
+ if (ret)
+ goto unlock;
+
+ trace_i2cr_read(i2cr->client, command, data);
+
+unlock:
+ mutex_unlock(&i2cr->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(fsi_master_i2cr_read);
+
+int fsi_master_i2cr_write(struct fsi_master_i2cr *i2cr, u32 addr, u64 data)
+{
+ u32 buf[3] = { 0 };
+ int ret;
+
+ buf[0] = i2cr_get_command(addr, i2cr_check_parity64(data));
+ memcpy(&buf[1], &data, sizeof(data));
+
+ mutex_lock(&i2cr->lock);
+
+ ret = i2c_master_send(i2cr->client, (const char *)buf, sizeof(buf));
+ if (ret == sizeof(buf)) {
+ ret = i2cr_check_status(i2cr->client);
+ if (!ret)
+ trace_i2cr_write(i2cr->client, buf[0], data);
+ } else {
+ trace_i2cr_i2c_error(i2cr->client, buf[0], ret);
+
+ if (ret >= 0)
+ ret = -EIO;
+ }
+
+ mutex_unlock(&i2cr->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(fsi_master_i2cr_write);
+
+static int i2cr_read(struct fsi_master *master, int link, uint8_t id, uint32_t addr, void *val,
+ size_t size)
+{
+ struct fsi_master_i2cr *i2cr = container_of(master, struct fsi_master_i2cr, master);
+ u64 data;
+ size_t i;
+ int ret;
+
+ if (link || id || (addr & 0xffff0000) || !(size == 1 || size == 2 || size == 4))
+ return -EINVAL;
+
+ /*
+ * The I2CR doesn't have CFAM or FSI slave address space - only the
+ * engines. In order for this to work with the FSI core, we need to
+ * emulate at minimum the CFAM config table so that the appropriate
+ * engines are discovered.
+ */
+ if (addr < 0xc00) {
+ if (addr > sizeof(i2cr_cfam) - 4)
+ addr = (addr & 0x3) + (sizeof(i2cr_cfam) - 4);
+
+ memcpy(val, &i2cr_cfam[addr], size);
+ return 0;
+ }
+
+ ret = fsi_master_i2cr_read(i2cr, I2CR_ADDRESS_CFAM(addr), &data);
+ if (ret)
+ return ret;
+
+ /*
+ * FSI core expects up to 4 bytes BE back, while I2CR replied with LE
+ * bytes on the wire.
+ */
+ for (i = 0; i < size; ++i)
+ ((u8 *)val)[i] = ((u8 *)&data)[7 - i];
+
+ return 0;
+}
+
+static int i2cr_write(struct fsi_master *master, int link, uint8_t id, uint32_t addr,
+ const void *val, size_t size)
+{
+ struct fsi_master_i2cr *i2cr = container_of(master, struct fsi_master_i2cr, master);
+ u64 data = 0;
+ size_t i;
+
+ if (link || id || (addr & 0xffff0000) || !(size == 1 || size == 2 || size == 4))
+ return -EINVAL;
+
+ /* I2CR writes to CFAM or FSI slave address are a successful no-op. */
+ if (addr < 0xc00)
+ return 0;
+
+ /*
+ * FSI core passes up to 4 bytes BE, while the I2CR expects LE bytes on
+ * the wire.
+ */
+ for (i = 0; i < size; ++i)
+ ((u8 *)&data)[7 - i] = ((u8 *)val)[i];
+
+ return fsi_master_i2cr_write(i2cr, I2CR_ADDRESS_CFAM(addr), data);
+}
+
+static void i2cr_release(struct device *dev)
+{
+ struct fsi_master_i2cr *i2cr = to_fsi_master_i2cr(to_fsi_master(dev));
+
+ of_node_put(dev->of_node);
+
+ kfree(i2cr);
+}
+
+static int i2cr_probe(struct i2c_client *client)
+{
+ struct fsi_master_i2cr *i2cr;
+ int ret;
+
+ i2cr = kzalloc(sizeof(*i2cr), GFP_KERNEL);
+ if (!i2cr)
+ return -ENOMEM;
+
+ /* Only one I2CR on any given I2C bus (fixed I2C device address) */
+ i2cr->master.idx = client->adapter->nr;
+ dev_set_name(&i2cr->master.dev, "i2cr%d", i2cr->master.idx);
+ i2cr->master.dev.parent = &client->dev;
+ i2cr->master.dev.of_node = of_node_get(dev_of_node(&client->dev));
+ i2cr->master.dev.release = i2cr_release;
+
+ i2cr->master.n_links = 1;
+ i2cr->master.read = i2cr_read;
+ i2cr->master.write = i2cr_write;
+
+ mutex_init(&i2cr->lock);
+ i2cr->client = client;
+
+ ret = fsi_master_register(&i2cr->master);
+ if (ret)
+ return ret;
+
+ i2c_set_clientdata(client, i2cr);
+ return 0;
+}
+
+static void i2cr_remove(struct i2c_client *client)
+{
+ struct fsi_master_i2cr *i2cr = i2c_get_clientdata(client);
+
+ fsi_master_unregister(&i2cr->master);
+}
+
+static const struct of_device_id i2cr_ids[] = {
+ { .compatible = "ibm,i2cr-fsi-master" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, i2cr_ids);
+
+static struct i2c_driver i2cr_driver = {
+ .probe = i2cr_probe,
+ .remove = i2cr_remove,
+ .driver = {
+ .name = "fsi-master-i2cr",
+ .of_match_table = i2cr_ids,
+ },
+};
+
+module_i2c_driver(i2cr_driver)
+
+MODULE_AUTHOR("Eddie James <eajames@linux.ibm.com>");
+MODULE_DESCRIPTION("IBM I2C Responder virtual FSI master driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/fsi/fsi-master-i2cr.h b/drivers/fsi/fsi-master-i2cr.h
new file mode 100644
index 000000000000..96636bf28cac
--- /dev/null
+++ b/drivers/fsi/fsi-master-i2cr.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) IBM Corporation 2023 */
+
+#ifndef DRIVERS_FSI_MASTER_I2CR_H
+#define DRIVERS_FSI_MASTER_I2CR_H
+
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+
+#include "fsi-master.h"
+
+struct i2c_client;
+
+struct fsi_master_i2cr {
+ struct fsi_master master;
+ struct mutex lock; /* protect HW access */
+ struct i2c_client *client;
+};
+
+#define to_fsi_master_i2cr(m) container_of(m, struct fsi_master_i2cr, master)
+
+int fsi_master_i2cr_read(struct fsi_master_i2cr *i2cr, u32 addr, u64 *data);
+int fsi_master_i2cr_write(struct fsi_master_i2cr *i2cr, u32 addr, u64 data);
+
+static inline bool is_fsi_master_i2cr(struct fsi_master *master)
+{
+ if (master->dev.parent && master->dev.parent->type == &i2c_client_type)
+ return true;
+
+ return false;
+}
+
+#endif /* DRIVERS_FSI_MASTER_I2CR_H */
diff --git a/drivers/fsi/fsi-master.h b/drivers/fsi/fsi-master.h
index 4762315a46ba..967622c1cabf 100644
--- a/drivers/fsi/fsi-master.h
+++ b/drivers/fsi/fsi-master.h
@@ -136,7 +136,7 @@ struct fsi_master {
u8 t_send_delay, u8 t_echo_delay);
};
-#define dev_to_fsi_master(d) container_of(d, struct fsi_master, dev)
+#define to_fsi_master(d) container_of(d, struct fsi_master, dev)
/**
* fsi_master registration & lifetime: the fsi_master_register() and
diff --git a/drivers/fsi/fsi-occ.c b/drivers/fsi/fsi-occ.c
index abdd37d5507f..da35ca9e84a6 100644
--- a/drivers/fsi/fsi-occ.c
+++ b/drivers/fsi/fsi-occ.c
@@ -15,7 +15,7 @@
#include <linux/mutex.h>
#include <linux/fsi-occ.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/slab.h>
diff --git a/drivers/fsi/fsi-sbefifo.c b/drivers/fsi/fsi-sbefifo.c
index 9912b7a6a4b9..0a98517f3959 100644
--- a/drivers/fsi/fsi-sbefifo.c
+++ b/drivers/fsi/fsi-sbefifo.c
@@ -22,8 +22,8 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
@@ -81,7 +81,7 @@
enum sbe_state
{
- SBE_STATE_UNKNOWN = 0x0, // Unkown, initial state
+ SBE_STATE_UNKNOWN = 0x0, // Unknown, initial state
SBE_STATE_IPLING = 0x1, // IPL'ing - autonomous mode (transient)
SBE_STATE_ISTEP = 0x2, // ISTEP - Running IPL by steps (transient)
SBE_STATE_MPIPL = 0x3, // MPIPL
@@ -127,6 +127,7 @@ struct sbefifo {
bool dead;
bool async_ffdc;
bool timed_out;
+ u32 timeout_in_cmd_ms;
u32 timeout_start_rsp_ms;
};
@@ -136,6 +137,7 @@ struct sbefifo_user {
void *cmd_page;
void *pending_cmd;
size_t pending_len;
+ u32 cmd_timeout_ms;
u32 read_timeout_ms;
};
@@ -508,7 +510,7 @@ static int sbefifo_send_command(struct sbefifo *sbefifo,
rc = sbefifo_wait(sbefifo, true, &status, timeout);
if (rc < 0)
return rc;
- timeout = msecs_to_jiffies(SBEFIFO_TIMEOUT_IN_CMD);
+ timeout = msecs_to_jiffies(sbefifo->timeout_in_cmd_ms);
vacant = sbefifo_vacant(status);
len = chunk = min(vacant, remaining);
@@ -730,7 +732,7 @@ static int __sbefifo_submit(struct sbefifo *sbefifo,
* @response: The output response buffer
* @resp_len: In: Response buffer size, Out: Response size
*
- * This will perform the entire operation. If the reponse buffer
+ * This will perform the entire operation. If the response buffer
* overflows, returns -EOVERFLOW
*/
int sbefifo_submit(struct device *dev, const __be32 *command, size_t cmd_len,
@@ -802,6 +804,7 @@ static int sbefifo_user_open(struct inode *inode, struct file *file)
return -ENOMEM;
}
mutex_init(&user->file_lock);
+ user->cmd_timeout_ms = SBEFIFO_TIMEOUT_IN_CMD;
user->read_timeout_ms = SBEFIFO_TIMEOUT_START_RSP;
return 0;
@@ -845,9 +848,11 @@ static ssize_t sbefifo_user_read(struct file *file, char __user *buf,
rc = mutex_lock_interruptible(&sbefifo->lock);
if (rc)
goto bail;
+ sbefifo->timeout_in_cmd_ms = user->cmd_timeout_ms;
sbefifo->timeout_start_rsp_ms = user->read_timeout_ms;
rc = __sbefifo_submit(sbefifo, user->pending_cmd, cmd_len, &resp_iter);
sbefifo->timeout_start_rsp_ms = SBEFIFO_TIMEOUT_START_RSP;
+ sbefifo->timeout_in_cmd_ms = SBEFIFO_TIMEOUT_IN_CMD;
mutex_unlock(&sbefifo->lock);
if (rc < 0)
goto bail;
@@ -937,7 +942,7 @@ static int sbefifo_user_release(struct inode *inode, struct file *file)
return 0;
}
-static int sbefifo_read_timeout(struct sbefifo_user *user, void __user *argp)
+static int sbefifo_cmd_timeout(struct sbefifo_user *user, void __user *argp)
{
struct device *dev = &user->sbefifo->dev;
u32 timeout;
@@ -946,18 +951,32 @@ static int sbefifo_read_timeout(struct sbefifo_user *user, void __user *argp)
return -EFAULT;
if (timeout == 0) {
- user->read_timeout_ms = SBEFIFO_TIMEOUT_START_RSP;
- dev_dbg(dev, "Timeout reset to %d\n", user->read_timeout_ms);
+ user->cmd_timeout_ms = SBEFIFO_TIMEOUT_IN_CMD;
+ dev_dbg(dev, "Command timeout reset to %us\n", user->cmd_timeout_ms / 1000);
return 0;
}
- if (timeout < 10 || timeout > 120)
- return -EINVAL;
+ user->cmd_timeout_ms = timeout * 1000; /* user timeout is in sec */
+ dev_dbg(dev, "Command timeout set to %us\n", timeout);
+ return 0;
+}
- user->read_timeout_ms = timeout * 1000; /* user timeout is in sec */
+static int sbefifo_read_timeout(struct sbefifo_user *user, void __user *argp)
+{
+ struct device *dev = &user->sbefifo->dev;
+ u32 timeout;
- dev_dbg(dev, "Timeout set to %d\n", user->read_timeout_ms);
+ if (get_user(timeout, (__u32 __user *)argp))
+ return -EFAULT;
+ if (timeout == 0) {
+ user->read_timeout_ms = SBEFIFO_TIMEOUT_START_RSP;
+ dev_dbg(dev, "Timeout reset to %us\n", user->read_timeout_ms / 1000);
+ return 0;
+ }
+
+ user->read_timeout_ms = timeout * 1000; /* user timeout is in sec */
+ dev_dbg(dev, "Timeout set to %us\n", timeout);
return 0;
}
@@ -971,6 +990,9 @@ static long sbefifo_user_ioctl(struct file *file, unsigned int cmd, unsigned lon
mutex_lock(&user->file_lock);
switch (cmd) {
+ case FSI_SBEFIFO_CMD_TIMEOUT_SECONDS:
+ rc = sbefifo_cmd_timeout(user, (void __user *)arg);
+ break;
case FSI_SBEFIFO_READ_TIMEOUT_SECONDS:
rc = sbefifo_read_timeout(user, (void __user *)arg);
break;
@@ -1025,16 +1047,9 @@ static int sbefifo_probe(struct device *dev)
sbefifo->fsi_dev = fsi_dev;
dev_set_drvdata(dev, sbefifo);
mutex_init(&sbefifo->lock);
+ sbefifo->timeout_in_cmd_ms = SBEFIFO_TIMEOUT_IN_CMD;
sbefifo->timeout_start_rsp_ms = SBEFIFO_TIMEOUT_START_RSP;
- /*
- * Try cleaning up the FIFO. If this fails, we still register the
- * driver and will try cleaning things up again on the next access.
- */
- rc = sbefifo_cleanup_hw(sbefifo);
- if (rc && rc != -ESHUTDOWN)
- dev_err(dev, "Initial HW cleanup failed, will retry later\n");
-
/* Create chardev for userspace access */
sbefifo->dev.type = &fsi_cdev_type;
sbefifo->dev.parent = dev;
diff --git a/drivers/fsi/fsi-scom.c b/drivers/fsi/fsi-scom.c
index bcb756dc9866..61dbda9dbe2b 100644
--- a/drivers/fsi/fsi-scom.c
+++ b/drivers/fsi/fsi-scom.c
@@ -10,6 +10,7 @@
#include <linux/cdev.h>
#include <linux/delay.h>
#include <linux/fs.h>
+#include <linux/mod_devicetable.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/list.h>
@@ -587,6 +588,12 @@ static int scom_remove(struct device *dev)
return 0;
}
+static const struct of_device_id scom_of_ids[] = {
+ { .compatible = "ibm,fsi2pib" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, scom_of_ids);
+
static const struct fsi_device_id scom_ids[] = {
{
.engine_type = FSI_ENGID_SCOM,
@@ -600,6 +607,7 @@ static struct fsi_driver scom_drv = {
.drv = {
.name = "scom",
.bus = &fsi_bus_type,
+ .of_match_table = scom_of_ids,
.probe = scom_probe,
.remove = scom_remove,
}
diff --git a/drivers/fsi/fsi-slave.h b/drivers/fsi/fsi-slave.h
new file mode 100644
index 000000000000..1d63a585829d
--- /dev/null
+++ b/drivers/fsi/fsi-slave.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) IBM Corporation 2023 */
+
+#ifndef DRIVERS_FSI_SLAVE_H
+#define DRIVERS_FSI_SLAVE_H
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+
+struct fsi_master;
+
+struct fsi_slave {
+ struct device dev;
+ struct fsi_master *master;
+ struct cdev cdev;
+ int cdev_idx;
+ int id; /* FSI address */
+ int link; /* FSI link# */
+ u32 cfam_id;
+ int chip_id;
+ uint32_t size; /* size of slave address space */
+ u8 t_send_delay;
+ u8 t_echo_delay;
+};
+
+#define to_fsi_slave(d) container_of(d, struct fsi_slave, dev)
+
+#endif /* DRIVERS_FSI_SLAVE_H */
diff --git a/drivers/fsi/i2cr-scom.c b/drivers/fsi/i2cr-scom.c
new file mode 100644
index 000000000000..cb7e02213032
--- /dev/null
+++ b/drivers/fsi/i2cr-scom.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) IBM Corporation 2023 */
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/fsi.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+
+#include "fsi-master-i2cr.h"
+#include "fsi-slave.h"
+
+struct i2cr_scom {
+ struct device dev;
+ struct cdev cdev;
+ struct fsi_master_i2cr *i2cr;
+};
+
+static loff_t i2cr_scom_llseek(struct file *file, loff_t offset, int whence)
+{
+ switch (whence) {
+ case SEEK_CUR:
+ break;
+ case SEEK_SET:
+ file->f_pos = offset;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return offset;
+}
+
+static ssize_t i2cr_scom_read(struct file *filep, char __user *buf, size_t len, loff_t *offset)
+{
+ struct i2cr_scom *scom = filep->private_data;
+ u64 data;
+ int ret;
+
+ if (len != sizeof(data))
+ return -EINVAL;
+
+ ret = fsi_master_i2cr_read(scom->i2cr, (u32)*offset, &data);
+ if (ret)
+ return ret;
+
+ ret = copy_to_user(buf, &data, len);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static ssize_t i2cr_scom_write(struct file *filep, const char __user *buf, size_t len,
+ loff_t *offset)
+{
+ struct i2cr_scom *scom = filep->private_data;
+ u64 data;
+ int ret;
+
+ if (len != sizeof(data))
+ return -EINVAL;
+
+ ret = copy_from_user(&data, buf, len);
+ if (ret)
+ return ret;
+
+ ret = fsi_master_i2cr_write(scom->i2cr, (u32)*offset, data);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static const struct file_operations i2cr_scom_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .llseek = i2cr_scom_llseek,
+ .read = i2cr_scom_read,
+ .write = i2cr_scom_write,
+};
+
+static int i2cr_scom_probe(struct device *dev)
+{
+ struct fsi_device *fsi_dev = to_fsi_dev(dev);
+ struct i2cr_scom *scom;
+ int didx;
+ int ret;
+
+ if (!is_fsi_master_i2cr(fsi_dev->slave->master))
+ return -ENODEV;
+
+ scom = devm_kzalloc(dev, sizeof(*scom), GFP_KERNEL);
+ if (!scom)
+ return -ENOMEM;
+
+ scom->i2cr = to_fsi_master_i2cr(fsi_dev->slave->master);
+ dev_set_drvdata(dev, scom);
+
+ scom->dev.type = &fsi_cdev_type;
+ scom->dev.parent = dev;
+ device_initialize(&scom->dev);
+
+ ret = fsi_get_new_minor(fsi_dev, fsi_dev_scom, &scom->dev.devt, &didx);
+ if (ret)
+ return ret;
+
+ dev_set_name(&scom->dev, "scom%d", didx);
+ cdev_init(&scom->cdev, &i2cr_scom_fops);
+ ret = cdev_device_add(&scom->cdev, &scom->dev);
+ if (ret)
+ fsi_free_minor(scom->dev.devt);
+
+ return ret;
+}
+
+static int i2cr_scom_remove(struct device *dev)
+{
+ struct i2cr_scom *scom = dev_get_drvdata(dev);
+
+ cdev_device_del(&scom->cdev, &scom->dev);
+ fsi_free_minor(scom->dev.devt);
+
+ return 0;
+}
+
+static const struct of_device_id i2cr_scom_of_ids[] = {
+ { .compatible = "ibm,i2cr-scom" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, i2cr_scom_of_ids);
+
+static const struct fsi_device_id i2cr_scom_ids[] = {
+ { 0x5, FSI_VERSION_ANY },
+ { }
+};
+
+static struct fsi_driver i2cr_scom_driver = {
+ .id_table = i2cr_scom_ids,
+ .drv = {
+ .name = "i2cr_scom",
+ .bus = &fsi_bus_type,
+ .of_match_table = i2cr_scom_of_ids,
+ .probe = i2cr_scom_probe,
+ .remove = i2cr_scom_remove,
+ }
+};
+
+module_fsi_driver(i2cr_scom_driver);
+
+MODULE_AUTHOR("Eddie James <eajames@linux.ibm.com>");
+MODULE_DESCRIPTION("IBM I2C Responder SCOM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index 0a7264aabe48..324e942c0650 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -575,6 +575,26 @@ static int zynq_gpio_set_wake(struct irq_data *data, unsigned int on)
return 0;
}
+static int zynq_gpio_irq_reqres(struct irq_data *d)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(chip->parent);
+ if (ret < 0)
+ return ret;
+
+ return gpiochip_reqres_irq(chip, d->hwirq);
+}
+
+static void zynq_gpio_irq_relres(struct irq_data *d)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+
+ gpiochip_relres_irq(chip, d->hwirq);
+ pm_runtime_put(chip->parent);
+}
+
/* irq chip descriptor */
static const struct irq_chip zynq_gpio_level_irqchip = {
.name = DRIVER_NAME,
@@ -584,9 +604,10 @@ static const struct irq_chip zynq_gpio_level_irqchip = {
.irq_unmask = zynq_gpio_irq_unmask,
.irq_set_type = zynq_gpio_set_irq_type,
.irq_set_wake = zynq_gpio_set_wake,
+ .irq_request_resources = zynq_gpio_irq_reqres,
+ .irq_release_resources = zynq_gpio_irq_relres,
.flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED |
IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_IMMUTABLE,
- GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static const struct irq_chip zynq_gpio_edge_irqchip = {
@@ -597,8 +618,9 @@ static const struct irq_chip zynq_gpio_edge_irqchip = {
.irq_unmask = zynq_gpio_irq_unmask,
.irq_set_type = zynq_gpio_set_irq_type,
.irq_set_wake = zynq_gpio_set_wake,
+ .irq_request_resources = zynq_gpio_irq_reqres,
+ .irq_release_resources = zynq_gpio_irq_relres,
.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_IMMUTABLE,
- GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static void zynq_gpio_handle_bank_irq(struct zynq_gpio *gpio,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index df633e9ce920..cdf6087706aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -442,9 +442,7 @@ void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev,
mem_info->local_mem_size_public,
mem_info->local_mem_size_private);
- if (amdgpu_sriov_vf(adev))
- mem_info->mem_clk_max = adev->clock.default_mclk / 100;
- else if (adev->pm.dpm_enabled) {
+ if (adev->pm.dpm_enabled) {
if (amdgpu_emu_mode == 1)
mem_info->mem_clk_max = 0;
else
@@ -463,9 +461,7 @@ uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct amdgpu_device *adev)
uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct amdgpu_device *adev)
{
/* the sclk is in quantas of 10kHz */
- if (amdgpu_sriov_vf(adev))
- return adev->clock.default_sclk / 100;
- else if (adev->pm.dpm_enabled)
+ if (adev->pm.dpm_enabled)
return amdgpu_dpm_get_sclk(adev, false) / 100;
else
return 100;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index 835980e94b9e..fb2681dd6b33 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -217,6 +217,7 @@ union umc_info {
struct atom_umc_info_v3_1 v31;
struct atom_umc_info_v3_2 v32;
struct atom_umc_info_v3_3 v33;
+ struct atom_umc_info_v4_0 v40;
};
union vram_info {
@@ -508,9 +509,8 @@ bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
if (amdgpu_atom_parse_data_header(mode_info->atom_context,
index, &size, &frev, &crev, &data_offset)) {
+ umc_info = (union umc_info *)(mode_info->atom_context->bios + data_offset);
if (frev == 3) {
- umc_info = (union umc_info *)
- (mode_info->atom_context->bios + data_offset);
switch (crev) {
case 1:
umc_config = le32_to_cpu(umc_info->v31.umc_config);
@@ -533,6 +533,20 @@ bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
/* unsupported crev */
return false;
}
+ } else if (frev == 4) {
+ switch (crev) {
+ case 0:
+ umc_config1 = le32_to_cpu(umc_info->v40.umc_config1);
+ ecc_default_enabled =
+ (umc_config1 & UMC_CONFIG1__ENABLE_ECC_CAPABLE) ? true : false;
+ break;
+ default:
+ /* unsupported crev */
+ return false;
+ }
+ } else {
+ /* unsupported frev */
+ return false;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 49dd9aa8da70..efdb1c48f431 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -127,7 +127,6 @@ static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p,
{
struct drm_gem_object *gobj;
unsigned long size;
- int r;
gobj = drm_gem_object_lookup(p->filp, data->handle);
if (gobj == NULL)
@@ -137,23 +136,14 @@ static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p,
drm_gem_object_put(gobj);
size = amdgpu_bo_size(p->uf_bo);
- if (size != PAGE_SIZE || (data->offset + 8) > size) {
- r = -EINVAL;
- goto error_unref;
- }
+ if (size != PAGE_SIZE || data->offset > (size - 8))
+ return -EINVAL;
- if (amdgpu_ttm_tt_get_usermm(p->uf_bo->tbo.ttm)) {
- r = -EINVAL;
- goto error_unref;
- }
+ if (amdgpu_ttm_tt_get_usermm(p->uf_bo->tbo.ttm))
+ return -EINVAL;
*offset = data->offset;
-
return 0;
-
-error_unref:
- amdgpu_bo_unref(&p->uf_bo);
- return r;
}
static int amdgpu_cs_p1_bo_handles(struct amdgpu_cs_parser *p,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index e77f048c99d8..3f001a50b34a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -885,13 +885,20 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
*/
static int amdgpu_device_asic_init(struct amdgpu_device *adev)
{
+ int ret;
+
amdgpu_asic_pre_asic_init(adev);
if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3) ||
- adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0))
- return amdgpu_atomfirmware_asic_init(adev, true);
- else
+ adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0)) {
+ amdgpu_psp_wait_for_bootloader(adev);
+ ret = amdgpu_atomfirmware_asic_init(adev, true);
+ return ret;
+ } else {
return amdgpu_atom_asic_init(adev->mode_info.atom_context);
+ }
+
+ return 0;
}
/**
@@ -4694,9 +4701,12 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
}
if (ret)
- dev_err(adev->dev, "GPU mode1 reset failed\n");
+ goto mode1_reset_failed;
amdgpu_device_load_pci_state(adev->pdev);
+ ret = amdgpu_psp_wait_for_bootloader(adev);
+ if (ret)
+ goto mode1_reset_failed;
/* wait for asic to come out of reset */
for (i = 0; i < adev->usec_timeout; i++) {
@@ -4707,7 +4717,17 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
udelay(1);
}
+ if (i >= adev->usec_timeout) {
+ ret = -ETIMEDOUT;
+ goto mode1_reset_failed;
+ }
+
amdgpu_atombios_scratch_regs_engine_hung(adev, false);
+
+ return 0;
+
+mode1_reset_failed:
+ dev_err(adev->dev, "GPU mode1 reset failed\n");
return ret;
}
@@ -4849,7 +4869,7 @@ static void amdgpu_reset_capture_coredumpm(struct amdgpu_device *adev)
struct drm_device *dev = adev_to_drm(adev);
ktime_get_ts64(&adev->reset_time);
- dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_KERNEL,
+ dev_coredumpm(dev->dev, THIS_MODULE, adev, 0, GFP_NOWAIT,
amdgpu_devcoredump_read, amdgpu_devcoredump_free);
}
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 74ffe6581c85..7d5e7ad28ba8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -1390,6 +1390,7 @@ union gc_info {
struct gc_info_v1_1 v1_1;
struct gc_info_v1_2 v1_2;
struct gc_info_v2_0 v2;
+ struct gc_info_v2_1 v2_1;
};
static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
@@ -1465,6 +1466,15 @@ static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
+ if (gc_info->v2.header.version_minor == 1) {
+ adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v2_1.gc_num_tcp_per_sh);
+ adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v2_1.gc_tcp_size_per_cu);
+ adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v2_1.gc_num_sdp_interface); /* per XCD */
+ adev->gfx.config.gc_num_cu_per_sqc = le32_to_cpu(gc_info->v2_1.gc_num_cu_per_sqc);
+ adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_instruction_cache_size_per_sqc);
+ adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_scalar_data_cache_size_per_sqc);
+ adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v2_1.gc_tcc_size); /* per XCD */
+ }
break;
default:
dev_err(adev->dev,
@@ -1478,6 +1488,7 @@ static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
union mall_info {
struct mall_info_v1_0 v1;
+ struct mall_info_v2_0 v2;
};
static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
@@ -1518,6 +1529,10 @@ static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
adev->gmc.mall_size = mall_size;
adev->gmc.m_half_use = half_use;
break;
+ case 2:
+ mall_size_per_umc = le32_to_cpu(mall_info->v2.mall_size_per_umc);
+ adev->gmc.mall_size = mall_size_per_umc * adev->gmc.num_umc;
+ break;
default:
dev_err(adev->dev,
"Unhandled MALL info table %d.%d\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index d20dd3f852fc..363e6a2cad8c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -38,6 +38,8 @@
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -532,11 +534,29 @@ bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
return true;
}
+static int amdgpu_dirtyfb(struct drm_framebuffer *fb, struct drm_file *file,
+ unsigned int flags, unsigned int color,
+ struct drm_clip_rect *clips, unsigned int num_clips)
+{
+
+ if (file)
+ return -ENOSYS;
+
+ return drm_atomic_helper_dirtyfb(fb, file, flags, color, clips,
+ num_clips);
+}
+
static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
.destroy = drm_gem_fb_destroy,
.create_handle = drm_gem_fb_create_handle,
};
+static const struct drm_framebuffer_funcs amdgpu_fb_funcs_atomic = {
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+ .dirty = amdgpu_dirtyfb
+};
+
uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
uint64_t bo_flags)
{
@@ -1139,7 +1159,11 @@ static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev,
if (ret)
goto err;
- ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
+ if (drm_drv_uses_atomic_modeset(dev))
+ ret = drm_framebuffer_init(dev, &rfb->base,
+ &amdgpu_fb_funcs_atomic);
+ else
+ ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
if (ret)
goto err;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index a4ff515ce896..395c1768b9fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -241,6 +241,9 @@ struct amdgpu_gfx_config {
uint32_t gc_gl1c_per_sa;
uint32_t gc_gl1c_size_per_instance;
uint32_t gc_gl2c_per_gpu;
+ uint32_t gc_tcp_size_per_cu;
+ uint32_t gc_num_cu_per_sqc;
+ uint32_t gc_tcc_size;
};
struct amdgpu_cu_info {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 8fdca54bb8a1..429ef212c1f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -2078,6 +2078,17 @@ int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
}
/* SECUREDISPLAY end */
+int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev)
+{
+ struct psp_context *psp = &adev->psp;
+ int ret = 0;
+
+ if (!amdgpu_sriov_vf(adev) && psp->funcs && psp->funcs->wait_for_bootloader != NULL)
+ ret = psp->funcs->wait_for_bootloader(psp);
+
+ return ret;
+}
+
static int psp_hw_start(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 3384eb94fde0..3e67ed63e638 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -109,6 +109,7 @@ enum psp_reg_prog_id {
struct psp_funcs {
int (*init_microcode)(struct psp_context *psp);
+ int (*wait_for_bootloader)(struct psp_context *psp);
int (*bootloader_load_kdb)(struct psp_context *psp);
int (*bootloader_load_spl)(struct psp_context *psp);
int (*bootloader_load_sysdrv)(struct psp_context *psp);
@@ -533,4 +534,6 @@ int psp_spatial_partition(struct psp_context *psp, int mode);
int is_psp_fw_valid(struct psp_bin_desc bin);
+int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 7689395e44fd..3c4600e15b86 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -764,7 +764,7 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
union ta_ras_cmd_input *info;
- int ret = 0;
+ int ret;
if (!con)
return -EINVAL;
@@ -773,7 +773,7 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
if (enable &&
head->block != AMDGPU_RAS_BLOCK__GFX &&
!amdgpu_ras_is_feature_allowed(adev, head))
- goto out;
+ return 0;
/* Only enable gfx ras feature from host side */
if (head->block == AMDGPU_RAS_BLOCK__GFX &&
@@ -801,16 +801,16 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
enable ? "enable":"disable",
get_ras_block_str(head),
amdgpu_ras_is_poison_mode_supported(adev), ret);
- goto out;
+ return ret;
}
+
+ kfree(info);
}
/* setup the obj */
__amdgpu_ras_feature_enable(adev, head, enable);
-out:
- if (head->block == AMDGPU_RAS_BLOCK__GFX)
- kfree(info);
- return ret;
+
+ return 0;
}
/* Only used in device probe stage and called only once. */
@@ -2399,6 +2399,7 @@ static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev)) {
switch (adev->ip_versions[MP0_HWIP][0]) {
case IP_VERSION(13, 0, 2):
+ case IP_VERSION(13, 0, 6):
return true;
default:
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 4764d2171f92..595d5e535aca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -158,9 +158,10 @@ static bool __is_ras_eeprom_supported(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 7): /* Sienna cichlid */
case IP_VERSION(13, 0, 0):
case IP_VERSION(13, 0, 2): /* Aldebaran */
- case IP_VERSION(13, 0, 6):
case IP_VERSION(13, 0, 10):
return true;
+ case IP_VERSION(13, 0, 6):
+ return (adev->gmc.is_app_apu) ? false : true;
default:
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index 57ed4e5c294c..0a26a00074a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -203,6 +203,9 @@ static void gfx_v9_4_3_init_golden_registers(struct amdgpu_device *adev)
if (adev->rev_id == 0) {
WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL1,
REDUCE_FIFO_DEPTH_BY_2, 2);
+ } else {
+ WREG32_FIELD15_PREREG(GC, dev_inst, TCP_UTCL1_CNTL2,
+ SPARE, 0x1);
}
}
}
@@ -860,11 +863,15 @@ static int gfx_v9_4_3_sw_init(void *handle)
if (r)
return r;
- r = amdgpu_gfx_sysfs_init(adev);
+ r = amdgpu_gfx_ras_sw_init(adev);
if (r)
return r;
- return amdgpu_gfx_ras_sw_init(adev);
+
+ if (!amdgpu_sriov_vf(adev))
+ r = amdgpu_gfx_sysfs_init(adev);
+
+ return r;
}
static int gfx_v9_4_3_sw_fini(void *handle)
@@ -885,7 +892,8 @@ static int gfx_v9_4_3_sw_fini(void *handle)
gfx_v9_4_3_mec_fini(adev);
amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
gfx_v9_4_3_free_microcode(adev);
- amdgpu_gfx_sysfs_fini(adev);
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_gfx_sysfs_fini(adev);
return 0;
}
@@ -2219,15 +2227,6 @@ static void gfx_v9_4_3_xcc_update_sram_fgcg(struct amdgpu_device *adev,
WREG32_SOC15(GC, GET_INST(GC, xcc_id),
regRLC_CGTT_MGCG_OVERRIDE, data);
- def = data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CLK_CNTL);
-
- if (enable)
- data &= ~RLC_CLK_CNTL__RLC_SRAM_CLK_GATER_OVERRIDE_MASK;
- else
- data |= RLC_CLK_CNTL__RLC_SRAM_CLK_GATER_OVERRIDE_MASK;
-
- if (def != data)
- WREG32_SOC15(GC, GET_INST(GC, xcc_id), regRLC_CLK_CNTL, data);
}
static void gfx_v9_4_3_xcc_update_repeater_fgcg(struct amdgpu_device *adev,
@@ -4048,7 +4047,8 @@ static void gfx_v9_4_3_inst_enable_watchdog_timer(struct amdgpu_device *adev,
uint32_t i;
uint32_t data;
- data = REG_SET_FIELD(0, SQ_TIMEOUT_CONFIG, TIMEOUT_FATAL_DISABLE,
+ data = RREG32_SOC15(GC, GET_INST(GC, 0), regSQ_TIMEOUT_CONFIG);
+ data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, TIMEOUT_FATAL_DISABLE,
amdgpu_watchdog_timer.timeout_fatal_disable ? 1 : 0);
if (amdgpu_watchdog_timer.timeout_fatal_disable &&
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
index 15612915bb6c..1de79d660285 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
@@ -360,8 +360,10 @@ static int jpeg_v4_0_3_hw_fini(void *handle)
cancel_delayed_work_sync(&adev->jpeg.idle_work);
- if (adev->jpeg.cur_state != AMD_PG_STATE_GATE)
- ret = jpeg_v4_0_3_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ if (!amdgpu_sriov_vf(adev)) {
+ if (adev->jpeg.cur_state != AMD_PG_STATE_GATE)
+ ret = jpeg_v4_0_3_set_powergating_state(adev, AMD_PG_STATE_GATE);
+ }
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
index 9ea072374cb7..f85eec05d218 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
@@ -437,6 +437,24 @@ static void nbio_v7_9_init_registers(struct amdgpu_device *adev)
XCC_DOORBELL_FENCE__SHUB_SLV_MODE_MASK);
}
+
+ if (!amdgpu_sriov_vf(adev)) {
+ u32 baco_cntl;
+ for_each_inst(i, adev->aid_mask) {
+ baco_cntl = RREG32_SOC15(NBIO, i, regBIF_BX0_BACO_CNTL);
+ if (baco_cntl & (BIF_BX0_BACO_CNTL__BACO_DUMMY_EN_MASK |
+ BIF_BX0_BACO_CNTL__BACO_EN_MASK)) {
+ baco_cntl &= ~(
+ BIF_BX0_BACO_CNTL__BACO_DUMMY_EN_MASK |
+ BIF_BX0_BACO_CNTL__BACO_EN_MASK);
+ dev_dbg(adev->dev,
+ "Unsetting baco dummy mode %x",
+ baco_cntl);
+ WREG32_SOC15(NBIO, i, regBIF_BX0_BACO_CNTL,
+ baco_cntl);
+ }
+ }
+ }
}
static u64 nbio_v7_9_get_pcie_replay_count(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index 10b17bd5aebe..469eed084976 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -133,12 +133,32 @@ static bool psp_v13_0_is_sos_alive(struct psp_context *psp)
return sol_reg != 0x0;
}
-static int psp_v13_0_wait_for_bootloader(struct psp_context *psp)
+static int psp_v13_0_wait_for_vmbx_ready(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
+ int retry_loop, ret;
- int ret;
- int retry_loop;
+ for (retry_loop = 0; retry_loop < 70; retry_loop++) {
+ /* Wait for bootloader to signify that is
+ ready having bit 31 of C2PMSG_33 set to 1 */
+ ret = psp_wait_for(
+ psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_33),
+ 0x80000000, 0xffffffff, false);
+
+ if (ret == 0)
+ break;
+ }
+
+ if (ret)
+ dev_warn(adev->dev, "Bootloader wait timed out");
+
+ return ret;
+}
+
+static int psp_v13_0_wait_for_bootloader(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+ int retry_loop, ret;
/* Wait for bootloader to signify that it is ready having bit 31 of
* C2PMSG_35 set to 1. All other bits are expected to be cleared.
@@ -157,6 +177,19 @@ static int psp_v13_0_wait_for_bootloader(struct psp_context *psp)
return ret;
}
+static int psp_v13_0_wait_for_bootloader_steady_state(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+
+ if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 6)) {
+ psp_v13_0_wait_for_vmbx_ready(psp);
+
+ return psp_v13_0_wait_for_bootloader(psp);
+ }
+
+ return 0;
+}
+
static int psp_v13_0_bootloader_load_component(struct psp_context *psp,
struct psp_bin_desc *bin_desc,
enum psp_bootloader_cmd bl_cmd)
@@ -714,6 +747,7 @@ static int psp_v13_0_fatal_error_recovery_quirk(struct psp_context *psp)
static const struct psp_funcs psp_v13_0_funcs = {
.init_microcode = psp_v13_0_init_microcode,
+ .wait_for_bootloader = psp_v13_0_wait_for_bootloader_steady_state,
.bootloader_load_kdb = psp_v13_0_bootloader_load_kdb,
.bootloader_load_spl = psp_v13_0_bootloader_load_spl,
.bootloader_load_sysdrv = psp_v13_0_bootloader_load_sysdrv,
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index c45721ca916e..f5be40d7ba36 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -559,8 +559,10 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
*/
if (amdgpu_gpu_recovery == 4 || amdgpu_gpu_recovery == 5)
return AMD_RESET_METHOD_MODE2;
+ else if (!(adev->flags & AMD_IS_APU))
+ return AMD_RESET_METHOD_MODE1;
else
- return AMD_RESET_METHOD_NONE;
+ return AMD_RESET_METHOD_MODE2;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index f0731a6a5306..830396b1c3b1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -384,7 +384,7 @@ static void event_interrupt_wq_v9(struct kfd_node *dev,
default:
break;
}
- kfd_signal_event_interrupt(pasid, context_id0 & 0xffffff, 24);
+ kfd_signal_event_interrupt(pasid, sq_int_data, 24);
} else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) {
kfd_set_dbg_ev_from_interrupt(dev, pasid,
KFD_DEBUG_DOORBELL_ID(context_id0),
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
index 2319467d2d95..0bbf0edbabd4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
@@ -457,6 +457,7 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
mqd->is_occupied = kfd_is_occupied_cp;
mqd->mqd_size = sizeof(struct v11_compute_mqd);
mqd->get_wave_state = get_wave_state;
+ mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
@@ -472,6 +473,7 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
mqd->destroy_mqd = destroy_hiq_mqd;
mqd->is_occupied = kfd_is_occupied_cp;
mqd->mqd_size = sizeof(struct v11_compute_mqd);
+ mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
@@ -501,6 +503,7 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
mqd->destroy_mqd = kfd_destroy_mqd_sdma;
mqd->is_occupied = kfd_is_occupied_sdma;
mqd->mqd_size = sizeof(struct v11_sdma_mqd);
+ mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 011561605983..bb16b795d1bc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1686,6 +1686,8 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
WRITE_ONCE(p->svms.faulting_task, NULL);
if (r) {
pr_debug("failed %d to get svm range pages\n", r);
+ if (r == -EBUSY)
+ r = -EAGAIN;
goto unreserve_out;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 268cb99a4c4b..88ba8b66de1f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -65,6 +65,7 @@
#include "amdgpu_dm_debugfs.h"
#endif
#include "amdgpu_dm_psr.h"
+#include "amdgpu_dm_replay.h"
#include "ivsrcid/ivsrcid_vislands30.h"
@@ -4265,6 +4266,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
enum dc_connection_type new_connection_type = dc_connection_none;
const struct dc_plane_cap *plane;
bool psr_feature_enabled = false;
+ bool replay_feature_enabled = false;
int max_overlay = dm->dc->caps.max_slave_planes;
dm->display_indexes_num = dm->dc->caps.max_streams;
@@ -4374,6 +4376,20 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
}
}
+ if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) {
+ switch (adev->ip_versions[DCE_HWIP][0]) {
+ case IP_VERSION(3, 1, 4):
+ case IP_VERSION(3, 1, 5):
+ case IP_VERSION(3, 1, 6):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
+ replay_feature_enabled = true;
+ break;
+ default:
+ replay_feature_enabled = amdgpu_dc_feature_mask & DC_REPLAY_MASK;
+ break;
+ }
+ }
/* loops over all connectors on the board */
for (i = 0; i < link_cnt; i++) {
struct dc_link *link = NULL;
@@ -4422,6 +4438,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
amdgpu_dm_update_connector_after_detect(aconnector);
setup_backlight_device(dm, aconnector);
+ /*
+ * Disable psr if replay can be enabled
+ */
+ if (replay_feature_enabled && amdgpu_dm_setup_replay(link, aconnector))
+ psr_feature_enabled = false;
+
if (psr_feature_enabled)
amdgpu_dm_set_psr_caps(link);
@@ -6004,7 +6026,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
if (recalculate_timing)
drm_mode_set_crtcinfo(&saved_mode, 0);
- else
+ else if (!old_stream)
drm_mode_set_crtcinfo(&mode, 0);
/*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index 30d4c6fd95f5..97b7a0b8a1c2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -29,6 +29,7 @@
#include "dc.h"
#include "amdgpu.h"
#include "amdgpu_dm_psr.h"
+#include "amdgpu_dm_replay.h"
#include "amdgpu_dm_crtc.h"
#include "amdgpu_dm_plane.h"
#include "amdgpu_dm_trace.h"
@@ -123,7 +124,12 @@ static void vblank_control_worker(struct work_struct *work)
* fill_dc_dirty_rects().
*/
if (vblank_work->stream && vblank_work->stream->link) {
- if (vblank_work->enable) {
+ /*
+ * Prioritize replay, instead of psr
+ */
+ if (vblank_work->stream->link->replay_settings.replay_feature_enabled)
+ amdgpu_dm_replay_enable(vblank_work->stream, false);
+ else if (vblank_work->enable) {
if (vblank_work->stream->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 &&
vblank_work->stream->link->psr_settings.psr_allow_active)
amdgpu_dm_psr_disable(vblank_work->stream);
@@ -132,6 +138,7 @@ static void vblank_control_worker(struct work_struct *work)
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
!amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base) &&
#endif
+ vblank_work->stream->link->panel_config.psr.disallow_replay &&
vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
amdgpu_dm_psr_enable(vblank_work->stream);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index 8eeca160d434..cc74dd69acf2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -1269,6 +1269,13 @@ void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane,
attributes.rotation_angle = 0;
attributes.attribute_flags.value = 0;
+ /* Enable cursor degamma ROM on DCN3+ for implicit sRGB degamma in DRM
+ * legacy gamma setup.
+ */
+ if (crtc_state->cm_is_degamma_srgb &&
+ adev->dm.dc->caps.color.dpp.gamma_corr)
+ attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1;
+
attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
if (crtc_state->stream) {
@@ -1468,6 +1475,15 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
drm_plane_create_blend_mode_property(plane, blend_caps);
}
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
+ drm_plane_create_zpos_immutable_property(plane, 0);
+ } else if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
+ unsigned int zpos = 1 + drm_plane_index(plane);
+ drm_plane_create_zpos_property(plane, zpos, 1, 254);
+ } else if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+ drm_plane_create_zpos_immutable_property(plane, 255);
+ }
+
if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
plane_cap &&
(plane_cap->pixel_format_support.nv12 ||
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index 69ffd4424dc7..1b8c2aef4633 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -78,3 +78,4 @@ DC_EDID += dc_edid_parser.o
AMD_DISPLAY_DMUB = $(addprefix $(AMDDALPATH)/dc/,$(DC_DMUB))
AMD_DISPLAY_EDID = $(addprefix $(AMDDALPATH)/dc/,$(DC_EDID))
AMD_DISPLAY_FILES += $(AMD_DISPLAY_DMUB) $(AMD_DISPLAY_EDID)
+
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
index 3e0da873cf4c..1042cf1a3ab0 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
@@ -32,6 +32,7 @@
#define MAX_INSTANCE 6
#define MAX_SEGMENT 6
+#define SMU_REGISTER_WRITE_RETRY_COUNT 5
struct IP_BASE_INSTANCE {
unsigned int segment[MAX_SEGMENT];
@@ -132,6 +133,8 @@ static int dcn315_smu_send_msg_with_param(
unsigned int msg_id, unsigned int param)
{
uint32_t result;
+ uint32_t i = 0;
+ uint32_t read_back_data;
result = dcn315_smu_wait_for_response(clk_mgr, 10, 200000);
@@ -148,10 +151,19 @@ static int dcn315_smu_send_msg_with_param(
/* Set the parameter register for the SMU message, unit is Mhz */
REG_WRITE(MP1_SMN_C2PMSG_37, param);
- /* Trigger the message transaction by writing the message ID */
- generic_write_indirect_reg(CTX,
- REG_NBIO(RSMU_INDEX), REG_NBIO(RSMU_DATA),
- mmMP1_C2PMSG_3, msg_id);
+ for (i = 0; i < SMU_REGISTER_WRITE_RETRY_COUNT; i++) {
+ /* Trigger the message transaction by writing the message ID */
+ generic_write_indirect_reg(CTX,
+ REG_NBIO(RSMU_INDEX), REG_NBIO(RSMU_DATA),
+ mmMP1_C2PMSG_3, msg_id);
+ read_back_data = generic_read_indirect_reg(CTX,
+ REG_NBIO(RSMU_INDEX), REG_NBIO(RSMU_DATA),
+ mmMP1_C2PMSG_3);
+ if (read_back_data == msg_id)
+ break;
+ udelay(2);
+ smu_print("SMU msg id write fail %x times. \n", i + 1);
+ }
result = dcn315_smu_wait_for_response(clk_mgr, 10, 200000);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 566d7045b2de..3a9077b60029 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -2073,12 +2073,12 @@ enum dc_status dc_commit_streams(struct dc *dc,
}
}
- /* Check for case where we are going from odm 2:1 to max
- * pipe scenario. For these cases, we will call
- * commit_minimal_transition_state() to exit out of odm 2:1
- * first before processing new streams
+ /* ODM Combine 2:1 power optimization is only applied for single stream
+ * scenario, it uses extra pipes than needed to reduce power consumption
+ * We need to switch off this feature to make room for new streams.
*/
- if (stream_count == dc->res_pool->pipe_count) {
+ if (stream_count > dc->current_state->stream_count &&
+ dc->current_state->stream_count == 1) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->next_odm_pipe)
@@ -3501,6 +3501,45 @@ static void commit_planes_for_stream_fast(struct dc *dc,
top_pipe_to_program->stream->update_flags.raw = 0;
}
+static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state *dc_context)
+{
+/*
+ * This function calls HWSS to wait for any potentially double buffered
+ * operations to complete. It should be invoked as a pre-amble prior
+ * to full update programming before asserting any HW locks.
+ */
+ int pipe_idx;
+ int opp_inst;
+ int opp_count = dc->res_pool->pipe_count;
+ struct hubp *hubp;
+ int mpcc_inst;
+ const struct pipe_ctx *pipe_ctx;
+
+ for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) {
+ pipe_ctx = &dc_context->res_ctx.pipe_ctx[pipe_idx];
+
+ if (!pipe_ctx->stream)
+ continue;
+
+ if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear)
+ pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg);
+
+ hubp = pipe_ctx->plane_res.hubp;
+ if (!hubp)
+ continue;
+
+ mpcc_inst = hubp->inst;
+ // MPCC inst is equal to pipe index in practice
+ for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
+ if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) {
+ dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
+ dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false;
+ break;
+ }
+ }
+ }
+}
+
static void commit_planes_for_stream(struct dc *dc,
struct dc_surface_update *srf_updates,
int surface_count,
@@ -3519,24 +3558,9 @@ static void commit_planes_for_stream(struct dc *dc,
// dc->current_state anymore, so we have to cache it before we apply
// the new SubVP context
subvp_prev_use = false;
-
-
dc_z10_restore(dc);
-
- if (update_type == UPDATE_TYPE_FULL) {
- /* wait for all double-buffer activity to clear on all pipes */
- int pipe_idx;
-
- for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];
-
- if (!pipe_ctx->stream)
- continue;
-
- if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear)
- pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg);
- }
- }
+ if (update_type == UPDATE_TYPE_FULL)
+ wait_for_outstanding_hw_updates(dc, context);
if (update_type == UPDATE_TYPE_FULL) {
dc_allow_idle_optimizations(dc, false);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 65fa9e21ad9c..e72f15ac0048 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1106,29 +1106,6 @@ void dcn20_blank_pixel_data(
v_active,
offset);
- if (!blank && dc->debug.enable_single_display_2to1_odm_policy) {
- /* when exiting dynamic ODM need to reinit DPG state for unused pipes */
- struct pipe_ctx *old_odm_pipe = dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx].next_odm_pipe;
-
- odm_pipe = pipe_ctx->next_odm_pipe;
-
- while (old_odm_pipe) {
- if (!odm_pipe || old_odm_pipe->pipe_idx != odm_pipe->pipe_idx)
- dc->hwss.set_disp_pattern_generator(dc,
- old_odm_pipe,
- CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
- CONTROLLER_DP_COLOR_SPACE_UDEFINED,
- COLOR_DEPTH_888,
- NULL,
- 0,
- 0,
- 0);
- old_odm_pipe = old_odm_pipe->next_odm_pipe;
- if (odm_pipe)
- odm_pipe = odm_pipe->next_odm_pipe;
- }
- }
-
if (!blank)
if (stream_res->abm) {
dc->hwss.set_pipe(pipe_ctx);
@@ -1584,17 +1561,6 @@ static void dcn20_update_dchubp_dpp(
|| plane_state->update_flags.bits.global_alpha_change
|| plane_state->update_flags.bits.per_pixel_alpha_change) {
// MPCC inst is equal to pipe index in practice
- int mpcc_inst = hubp->inst;
- int opp_inst;
- int opp_count = dc->res_pool->pipe_count;
-
- for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
- if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) {
- dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
- dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false;
- break;
- }
- }
hws->funcs.update_mpcc(dc, pipe_ctx);
}
@@ -1722,11 +1688,16 @@ static void dcn20_program_pipe(
struct dc_state *context)
{
struct dce_hwseq *hws = dc->hwseq;
- /* Only need to unblank on top pipe */
- if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.abm_level)
- && !pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe)
- hws->funcs.blank_pixel_data(dc, pipe_ctx, !pipe_ctx->plane_state->visible);
+ /* Only need to unblank on top pipe */
+ if (resource_is_pipe_type(pipe_ctx, OTG_MASTER)) {
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.odm ||
+ pipe_ctx->stream->update_flags.bits.abm_level)
+ hws->funcs.blank_pixel_data(dc, pipe_ctx,
+ !pipe_ctx->plane_state ||
+ !pipe_ctx->plane_state->visible);
+ }
/* Only update TG on top pipe */
if (pipe_ctx->update_flags.bits.global_sync && !pipe_ctx->top_pipe
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
index 6cef62d7a2e5..255713ec29bb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
@@ -987,3 +987,20 @@ void dcn30_prepare_bandwidth(struct dc *dc,
}
}
+void dcn30_set_static_screen_control(struct pipe_ctx **pipe_ctx,
+ int num_pipes, const struct dc_static_screen_params *params)
+{
+ unsigned int i;
+ unsigned int triggers = 0;
+
+ if (params->triggers.surface_update)
+ triggers |= 0x100;
+ if (params->triggers.cursor_update)
+ triggers |= 0x8;
+ if (params->triggers.force_trigger)
+ triggers |= 0x1;
+
+ for (i = 0; i < num_pipes; i++)
+ pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(pipe_ctx[i]->stream_res.tg,
+ triggers, params->num_frames);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h
index a24a8e33a3d2..ce19c54097f8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h
@@ -87,5 +87,7 @@ void dcn30_set_hubp_blank(const struct dc *dc,
void dcn30_prepare_bandwidth(struct dc *dc,
struct dc_state *context);
+void dcn30_set_static_screen_control(struct pipe_ctx **pipe_ctx,
+ int num_pipes, const struct dc_static_screen_params *params);
#endif /* __DC_HWSS_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
index 3d19acaa12f3..0de8b2783cf6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
@@ -64,7 +64,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
.update_bandwidth = dcn20_update_bandwidth,
.set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
- .set_static_screen_control = dcn10_set_static_screen_control,
+ .set_static_screen_control = dcn30_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dcn30_set_avmute,
.log_hw_state = dcn10_log_hw_state,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
index 257df8660b4c..61205cdbe2d5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
@@ -75,6 +75,7 @@ static const struct hw_sequencer_funcs dcn301_funcs = {
.get_hw_state = dcn10_get_hw_state,
.clear_status_bits = dcn10_clear_status_bits,
.wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+ .edp_backlight_control = dce110_edp_backlight_control,
.edp_power_control = dce110_edp_power_control,
.edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
.set_cursor_position = dcn10_set_cursor_position,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
index fc25cc300a17..1d7bc1e39afe 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
@@ -67,7 +67,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.update_bandwidth = dcn20_update_bandwidth,
.set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
- .set_static_screen_control = dcn10_set_static_screen_control,
+ .set_static_screen_control = dcn30_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dcn30_set_avmute,
.log_hw_state = dcn10_log_hw_state,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
index ca8fe55c33b8..4ef85c3a0688 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
@@ -69,7 +69,7 @@ static const struct hw_sequencer_funcs dcn314_funcs = {
.update_bandwidth = dcn20_update_bandwidth,
.set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
- .set_static_screen_control = dcn10_set_static_screen_control,
+ .set_static_screen_control = dcn30_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dcn30_set_avmute,
.log_hw_state = dcn10_log_hw_state,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
index 777b2fac20c4..c7417147dff1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
@@ -65,7 +65,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.update_bandwidth = dcn20_update_bandwidth,
.set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
- .set_static_screen_control = dcn10_set_static_screen_control,
+ .set_static_screen_control = dcn30_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dcn30_set_avmute,
.log_hw_state = dcn10_log_hw_state,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
index 935cd23e6a01..f9d601c8c721 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
@@ -2564,18 +2564,128 @@ static int find_optimal_free_pipe_as_secondary_dpp_pipe(
return free_pipe_idx;
}
+static struct pipe_ctx *find_idle_secondary_pipe_check_mpo(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ const struct pipe_ctx *primary_pipe)
+{
+ int i;
+ struct pipe_ctx *secondary_pipe = NULL;
+ struct pipe_ctx *next_odm_mpo_pipe = NULL;
+ int primary_index, preferred_pipe_idx;
+ struct pipe_ctx *old_primary_pipe = NULL;
+
+ /*
+ * Modified from find_idle_secondary_pipe
+ * With windowed MPO and ODM, we want to avoid the case where we want a
+ * free pipe for the left side but the free pipe is being used on the
+ * right side.
+ * Add check on current_state if the primary_pipe is the left side,
+ * to check the right side ( primary_pipe->next_odm_pipe ) to see if
+ * it is using a pipe for MPO ( primary_pipe->next_odm_pipe->bottom_pipe )
+ * - If so, then don't use this pipe
+ * EXCEPTION - 3 plane ( 2 MPO plane ) case
+ * - in this case, the primary pipe has already gotten a free pipe for the
+ * MPO window in the left
+ * - when it tries to get a free pipe for the MPO window on the right,
+ * it will see that it is already assigned to the right side
+ * ( primary_pipe->next_odm_pipe ). But in this case, we want this
+ * free pipe, since it will be for the right side. So add an
+ * additional condition, that skipping the free pipe on the right only
+ * applies if the primary pipe has no bottom pipe currently assigned
+ */
+ if (primary_pipe) {
+ primary_index = primary_pipe->pipe_idx;
+ old_primary_pipe = &primary_pipe->stream->ctx->dc->current_state->res_ctx.pipe_ctx[primary_index];
+ if ((old_primary_pipe->next_odm_pipe) && (old_primary_pipe->next_odm_pipe->bottom_pipe)
+ && (!primary_pipe->bottom_pipe))
+ next_odm_mpo_pipe = old_primary_pipe->next_odm_pipe->bottom_pipe;
+
+ preferred_pipe_idx = (pool->pipe_count - 1) - primary_pipe->pipe_idx;
+ if ((res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) &&
+ !(next_odm_mpo_pipe && next_odm_mpo_pipe->pipe_idx == preferred_pipe_idx)) {
+ secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
+ secondary_pipe->pipe_idx = preferred_pipe_idx;
+ }
+ }
+
+ /*
+ * search backwards for the second pipe to keep pipe
+ * assignment more consistent
+ */
+ if (!secondary_pipe)
+ for (i = pool->pipe_count - 1; i >= 0; i--) {
+ if ((res_ctx->pipe_ctx[i].stream == NULL) &&
+ !(next_odm_mpo_pipe && next_odm_mpo_pipe->pipe_idx == i)) {
+ secondary_pipe = &res_ctx->pipe_ctx[i];
+ secondary_pipe->pipe_idx = i;
+ break;
+ }
+ }
+
+ return secondary_pipe;
+}
+
+static struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer(
+ struct dc_state *state,
+ const struct resource_pool *pool,
+ struct dc_stream_state *stream,
+ const struct pipe_ctx *head_pipe)
+{
+ struct resource_context *res_ctx = &state->res_ctx;
+ struct pipe_ctx *idle_pipe, *pipe;
+ struct resource_context *old_ctx = &stream->ctx->dc->current_state->res_ctx;
+ int head_index;
+
+ if (!head_pipe)
+ ASSERT(0);
+
+ /*
+ * Modified from dcn20_acquire_idle_pipe_for_layer
+ * Check if head_pipe in old_context already has bottom_pipe allocated.
+ * - If so, check if that pipe is available in the current context.
+ * -- If so, reuse pipe from old_context
+ */
+ head_index = head_pipe->pipe_idx;
+ pipe = &old_ctx->pipe_ctx[head_index];
+ if (pipe->bottom_pipe && res_ctx->pipe_ctx[pipe->bottom_pipe->pipe_idx].stream == NULL) {
+ idle_pipe = &res_ctx->pipe_ctx[pipe->bottom_pipe->pipe_idx];
+ idle_pipe->pipe_idx = pipe->bottom_pipe->pipe_idx;
+ } else {
+ idle_pipe = find_idle_secondary_pipe_check_mpo(res_ctx, pool, head_pipe);
+ if (!idle_pipe)
+ return NULL;
+ }
+
+ idle_pipe->stream = head_pipe->stream;
+ idle_pipe->stream_res.tg = head_pipe->stream_res.tg;
+ idle_pipe->stream_res.opp = head_pipe->stream_res.opp;
+
+ idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx];
+ idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx];
+ idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx];
+ idle_pipe->plane_res.mpcc_inst = pool->dpps[idle_pipe->pipe_idx]->inst;
+
+ return idle_pipe;
+}
+
struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_dpp_pipe(
const struct dc_state *cur_ctx,
struct dc_state *new_ctx,
const struct resource_pool *pool,
const struct pipe_ctx *opp_head_pipe)
{
- int free_pipe_idx =
- find_optimal_free_pipe_as_secondary_dpp_pipe(
- &cur_ctx->res_ctx, &new_ctx->res_ctx,
- pool, opp_head_pipe);
+
+ int free_pipe_idx;
struct pipe_ctx *free_pipe;
+ if (!opp_head_pipe->stream->ctx->dc->config.enable_windowed_mpo_odm)
+ return dcn32_acquire_idle_pipe_for_head_pipe_in_layer(
+ new_ctx, pool, opp_head_pipe->stream, opp_head_pipe);
+
+ free_pipe_idx = find_optimal_free_pipe_as_secondary_dpp_pipe(
+ &cur_ctx->res_ctx, &new_ctx->res_ctx,
+ pool, opp_head_pipe);
if (free_pipe_idx >= 0) {
free_pipe = &new_ctx->res_ctx.pipe_ctx[free_pipe_idx];
free_pipe->pipe_idx = free_pipe_idx;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index 8afda5ecc0cd..5805fb02af14 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -1099,6 +1099,11 @@ void dcn20_calculate_dlg_params(struct dc *dc,
context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz =
pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest;
+ if (dc->ctx->dce_version < DCN_VERSION_3_1 &&
+ context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid)
+ dcn20_adjust_freesync_v_startup(
+ &context->res_ctx.pipe_ctx[i].stream->timing,
+ &context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start);
pipe_idx++;
}
@@ -1927,7 +1932,6 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co
int vlevel = 0;
int pipe_split_from[MAX_PIPES];
int pipe_cnt = 0;
- int i = 0;
display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
DC_LOGGER_INIT(dc->ctx->logger);
@@ -1951,15 +1955,6 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co
dcn20_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, fast_validate);
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- if (!context->res_ctx.pipe_ctx[i].stream)
- continue;
- if (context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid)
- dcn20_adjust_freesync_v_startup(
- &context->res_ctx.pipe_ctx[i].stream->timing,
- &context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start);
- }
-
BW_VAL_TRACE_END_WATERMARKS();
goto validate_out;
@@ -2232,7 +2227,6 @@ bool dcn21_validate_bandwidth_fp(struct dc *dc,
int vlevel = 0;
int pipe_split_from[MAX_PIPES];
int pipe_cnt = 0;
- int i = 0;
display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
DC_LOGGER_INIT(dc->ctx->logger);
@@ -2261,15 +2255,6 @@ bool dcn21_validate_bandwidth_fp(struct dc *dc,
dcn21_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, fast_validate);
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- if (!context->res_ctx.pipe_ctx[i].stream)
- continue;
- if (context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid)
- dcn20_adjust_freesync_v_startup(
- &context->res_ctx.pipe_ctx[i].stream->timing,
- &context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start);
- }
-
BW_VAL_TRACE_END_WATERMARKS();
goto validate_out;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
index 07adb614366e..fb21572750e8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
@@ -293,6 +293,17 @@ static unsigned int micro_sec_to_vert_lines(unsigned int num_us, struct dc_crtc_
return num_lines;
}
+static unsigned int get_vertical_back_porch(struct dc_crtc_timing *timing)
+{
+ unsigned int v_active = 0, v_blank = 0, v_back_porch = 0;
+
+ v_active = timing->v_border_top + timing->v_addressable + timing->v_border_bottom;
+ v_blank = timing->v_total - v_active;
+ v_back_porch = v_blank - timing->v_front_porch - timing->v_sync_width;
+
+ return v_back_porch;
+}
+
int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
bool fast_validate)
@@ -310,6 +321,7 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_crtc_timing *timing;
unsigned int num_lines = 0;
+ unsigned int v_back_porch = 0;
if (!res_ctx->pipe_ctx[i].stream)
continue;
@@ -323,9 +335,16 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
else
pipes[pipe_cnt].pipe.dest.vtotal = timing->v_total;
+ v_back_porch = get_vertical_back_porch(timing);
+
pipes[pipe_cnt].pipe.dest.vblank_nom = timing->v_total - pipes[pipe_cnt].pipe.dest.vactive;
pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, num_lines);
- pipes[pipe_cnt].pipe.dest.vblank_nom = max(pipes[pipe_cnt].pipe.dest.vblank_nom, timing->v_sync_width);
+ // vblank_nom should not smaller than (VSync (timing->v_sync_width + v_back_porch) + 2)
+ // + 2 is because
+ // 1 -> VStartup_start should be 1 line before VSync
+ // 1 -> always reserve 1 line between start of vblank to vstartup signal
+ pipes[pipe_cnt].pipe.dest.vblank_nom =
+ max(pipes[pipe_cnt].pipe.dest.vblank_nom, timing->v_sync_width + v_back_porch + 2);
pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, max_allowed_vblank_nom);
if (pipe->plane_state &&
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index dbd60811f95d..ef3a67409021 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -338,7 +338,9 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
* - Delta for CEIL: delta_from_mid_point_in_us_1
* - Delta for FLOOR: delta_from_mid_point_in_us_2
*/
- if ((last_render_time_in_us / mid_point_frames_ceil) < in_out_vrr->min_duration_in_us) {
+ if (mid_point_frames_ceil &&
+ (last_render_time_in_us / mid_point_frames_ceil) <
+ in_out_vrr->min_duration_in_us) {
/* Check for out of range.
* If using CEIL produces a value that is out of range,
* then we are forced to use FLOOR.
@@ -385,8 +387,9 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
/* Either we've calculated the number of frames to insert,
* or we need to insert min duration frames
*/
- if (last_render_time_in_us / frames_to_insert <
- in_out_vrr->min_duration_in_us){
+ if (frames_to_insert &&
+ (last_render_time_in_us / frames_to_insert) <
+ in_out_vrr->min_duration_in_us){
frames_to_insert -= (frames_to_insert > 1) ?
1 : 0;
}
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index abe829bbd54a..67d7b7ee8a2a 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -240,6 +240,7 @@ enum DC_FEATURE_MASK {
DC_DISABLE_LTTPR_DP2_0 = (1 << 6), //0x40, disabled by default
DC_PSR_ALLOW_SMU_OPT = (1 << 7), //0x80, disabled by default
DC_PSR_ALLOW_MULTI_DISP_OPT = (1 << 8), //0x100, disabled by default
+ DC_REPLAY_MASK = (1 << 9), //0x200, disabled by default for dcn < 3.1.4
};
enum DC_DEBUG_MASK {
@@ -250,6 +251,7 @@ enum DC_DEBUG_MASK {
DC_DISABLE_PSR = 0x10,
DC_FORCE_SUBVP_MCLK_SWITCH = 0x20,
DC_DISABLE_MPO = 0x40,
+ DC_DISABLE_REPLAY = 0x50,
DC_ENABLE_DPIA_TRACE = 0x80,
};
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index e68c1e280322..fa7d6ced786f 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -3117,6 +3117,24 @@ enum atom_umc_config1_def {
UMC_CONFIG1__ENABLE_ECC_CAPABLE = 0x00010000,
};
+struct atom_umc_info_v4_0 {
+ struct atom_common_table_header table_header;
+ uint32_t ucode_reserved[5];
+ uint8_t umcip_min_ver;
+ uint8_t umcip_max_ver;
+ uint8_t vram_type;
+ uint8_t umc_config;
+ uint32_t mem_refclk_10khz;
+ uint32_t clk_reserved[4];
+ uint32_t golden_reserved;
+ uint32_t umc_config1;
+ uint32_t reserved[2];
+ uint8_t channel_num;
+ uint8_t channel_width;
+ uint8_t channel_reserve[2];
+ uint8_t umc_info_reserved[16];
+};
+
/*
***************************************************************************
Data Table vram_info structure
diff --git a/drivers/gpu/drm/amd/include/discovery.h b/drivers/gpu/drm/amd/include/discovery.h
index f43e29722ef7..7a9d473d0917 100644
--- a/drivers/gpu/drm/amd/include/discovery.h
+++ b/drivers/gpu/drm/amd/include/discovery.h
@@ -30,7 +30,7 @@
#define GC_TABLE_ID 0x4347
#define HARVEST_TABLE_SIGNATURE 0x56524148
#define VCN_INFO_TABLE_ID 0x004E4356
-#define MALL_INFO_TABLE_ID 0x4D414C4C
+#define MALL_INFO_TABLE_ID 0x4C4C414D
typedef enum
{
@@ -280,6 +280,36 @@ struct gc_info_v2_0 {
uint32_t gc_num_packer_per_sc;
};
+struct gc_info_v2_1 {
+ struct gpu_info_header header;
+
+ uint32_t gc_num_se;
+ uint32_t gc_num_cu_per_sh;
+ uint32_t gc_num_sh_per_se;
+ uint32_t gc_num_rb_per_se;
+ uint32_t gc_num_tccs;
+ uint32_t gc_num_gprs;
+ uint32_t gc_num_max_gs_thds;
+ uint32_t gc_gs_table_depth;
+ uint32_t gc_gsprim_buff_depth;
+ uint32_t gc_parameter_cache_depth;
+ uint32_t gc_double_offchip_lds_buffer;
+ uint32_t gc_wave_size;
+ uint32_t gc_max_waves_per_simd;
+ uint32_t gc_max_scratch_slots_per_cu;
+ uint32_t gc_lds_size;
+ uint32_t gc_num_sc_per_se;
+ uint32_t gc_num_packer_per_sc;
+ /* new for v2_1 */
+ uint32_t gc_num_tcp_per_sh;
+ uint32_t gc_tcp_size_per_cu;
+ uint32_t gc_num_sdp_interface;
+ uint32_t gc_num_cu_per_sqc;
+ uint32_t gc_instruction_cache_size_per_sqc;
+ uint32_t gc_scalar_data_cache_size_per_sqc;
+ uint32_t gc_tcc_size;
+};
+
typedef struct harvest_info_header {
uint32_t signature; /* Table Signature */
uint32_t version; /* Table Version */
@@ -312,6 +342,12 @@ struct mall_info_v1_0 {
uint32_t reserved[5];
};
+struct mall_info_v2_0 {
+ struct mall_info_header header;
+ uint32_t mall_size_per_umc;
+ uint32_t reserved[8];
+};
+
#define VCN_INFO_TABLE_MAX_NUM_INSTANCES 4
struct vcn_info_header {
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 5b1d73b00ef7..41147da54458 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -3311,8 +3311,10 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
(gc_ver != IP_VERSION(9, 4, 3)) &&
(attr == &sensor_dev_attr_temp2_input.dev_attr.attr ||
attr == &sensor_dev_attr_temp2_label.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
attr == &sensor_dev_attr_temp3_input.dev_attr.attr ||
- attr == &sensor_dev_attr_temp3_label.dev_attr.attr))
+ attr == &sensor_dev_attr_temp3_label.dev_attr.attr ||
+ attr == &sensor_dev_attr_temp3_crit.dev_attr.attr))
return 0;
/* hotspot temperature for gc 9,4,3*/
@@ -3324,9 +3326,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
/* only SOC15 dGPUs support hotspot and mem temperatures */
if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0) ||
(gc_ver == IP_VERSION(9, 4, 3))) &&
- (attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
- attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
- attr == &sensor_dev_attr_temp3_crit.dev_attr.attr ||
+ (attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr ||
attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
@@ -3471,6 +3471,9 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
size = sizeof(uint32_t);
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&query, &size))
seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff);
+ size = sizeof(uint32_t);
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&query, &size))
+ seq_printf(m, "\t%u.%u W (current GPU)\n", query >> 8, query & 0xff);
size = sizeof(value);
seq_printf(m, "\n");
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index 95eb8a5eb54f..5a52098bcf16 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -1031,10 +1031,7 @@ struct pptable_funcs {
enum smu_feature_mask mask);
/**
- * @notify_display_change: Enable fast memory clock switching.
- *
- * Allows for fine grained memory clock switching but has more stringent
- * timing requirements.
+ * @notify_display_change: General interface call to let SMU know about DC change
*/
int (*notify_display_change)(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h
index 10cff75b44d5..e2ee855c7748 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h
@@ -138,7 +138,10 @@
#define PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel 0x4A
#define PPSMC_MSG_SetPriorityDeltaGain 0x4B
#define PPSMC_MSG_AllowIHHostInterrupt 0x4C
-#define PPSMC_Message_Count 0x4D
+
+#define PPSMC_MSG_DALNotPresent 0x4E
+
+#define PPSMC_Message_Count 0x4F
//Debug Dump Message
#define DEBUGSMC_MSG_TestMessage 0x1
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
index 252aef190c5c..9be4051c0865 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h
@@ -123,7 +123,7 @@ typedef enum {
VOLTAGE_GUARDBAND_COUNT
} GFX_GUARDBAND_e;
-#define SMU_METRICS_TABLE_VERSION 0x5
+#define SMU_METRICS_TABLE_VERSION 0x7
typedef struct __attribute__((packed, aligned(4))) {
uint32_t AccumulationCounter;
@@ -198,7 +198,7 @@ typedef struct __attribute__((packed, aligned(4))) {
uint32_t SocketThmResidencyAcc;
uint32_t VrThmResidencyAcc;
uint32_t HbmThmResidencyAcc;
- uint32_t spare;
+ uint32_t GfxLockXCDMak;
// New Items at end to maintain driver compatibility
uint32_t GfxclkFrequency[8];
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
index ae4f44c4b877..70a4a717fd3f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
@@ -83,13 +83,27 @@
#define PPSMC_MSG_GetMinGfxDpmFreq 0x32
#define PPSMC_MSG_GetMaxGfxDpmFreq 0x33
#define PPSMC_MSG_PrepareForDriverUnload 0x34
-#define PPSMC_Message_Count 0x35
+#define PPSMC_MSG_ReadThrottlerLimit 0x35
+#define PPSMC_MSG_QueryValidMcaCount 0x36
+#define PPSMC_MSG_McaBankDumpDW 0x37
+#define PPSMC_MSG_GetCTFLimit 0x38
+#define PPSMC_Message_Count 0x39
//PPSMC Reset Types for driver msg argument
#define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1
#define PPSMC_RESET_TYPE_DRIVER_MODE_2_RESET 0x2
#define PPSMC_RESET_TYPE_DRIVER_MODE_3_RESET 0x3
+//PPSMC Reset Types for driver msg argument
+#define PPSMC_THROTTLING_LIMIT_TYPE_SOCKET 0x1
+#define PPSMC_THROTTLING_LIMIT_TYPE_HBM 0x2
+
+//CTF/Throttle Limit types
+#define PPSMC_AID_THM_TYPE 0x1
+#define PPSMC_CCD_THM_TYPE 0x2
+#define PPSMC_XCD_THM_TYPE 0x3
+#define PPSMC_HBM_THM_TYPE 0x4
+
typedef uint32_t PPSMC_Result;
typedef uint32_t PPSMC_MSG;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index 297b70b9388f..e57265cf637c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -84,6 +84,7 @@
__SMU_DUMMY_MAP(SetTjMax), \
__SMU_DUMMY_MAP(SetFanTemperatureTarget), \
__SMU_DUMMY_MAP(PrepareMp1ForUnload), \
+ __SMU_DUMMY_MAP(GetCTFLimit), \
__SMU_DUMMY_MAP(DramLogSetDramAddrHigh), \
__SMU_DUMMY_MAP(DramLogSetDramAddrLow), \
__SMU_DUMMY_MAP(DramLogSetDramSize), \
@@ -245,7 +246,8 @@
__SMU_DUMMY_MAP(AllowGpo), \
__SMU_DUMMY_MAP(Mode2Reset), \
__SMU_DUMMY_MAP(RequestI2cTransaction), \
- __SMU_DUMMY_MAP(GetMetricsTable),
+ __SMU_DUMMY_MAP(GetMetricsTable), \
+ __SMU_DUMMY_MAP(DALNotPresent),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index f1282fc4b90a..0232adb95df3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -837,12 +837,8 @@ int smu_v13_0_notify_display_change(struct smu_context *smu)
{
int ret = 0;
- if (!smu->pm_enabled)
- return ret;
-
- if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
- smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
+ if (!amdgpu_device_has_dc_support(smu->adev))
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DALNotPresent, NULL);
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 8b7403ba89d7..3903a47669e4 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -162,6 +162,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0),
MSG_MAP(AllowIHHostInterrupt, PPSMC_MSG_AllowIHHostInterrupt, 0),
MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 0),
+ MSG_MAP(DALNotPresent, PPSMC_MSG_DALNotPresent, 0),
};
static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = {
@@ -2687,6 +2688,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.send_hbm_bad_channel_flag = smu_v13_0_0_send_bad_mem_channel_flag,
.gpo_control = smu_v13_0_gpo_control,
.get_ecc_info = smu_v13_0_0_get_ecc_info,
+ .notify_display_change = smu_v13_0_notify_display_change,
};
void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index 6ed9cd0a1e4e..199a673b8120 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -132,6 +132,7 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxClk, 0),
MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 0),
MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareForDriverUnload, 0),
+ MSG_MAP(GetCTFLimit, PPSMC_MSG_GetCTFLimit, 0),
};
static const struct cmn2asic_mapping smu_v13_0_6_clk_map[SMU_CLK_COUNT] = {
@@ -2081,6 +2082,55 @@ out:
return ret;
}
+static int smu_v13_0_6_get_thermal_temperature_range(struct smu_context *smu,
+ struct smu_temperature_range *range)
+{
+ struct amdgpu_device *adev = smu->adev;
+ u32 aid_temp, xcd_temp, mem_temp;
+ uint32_t smu_version;
+ u32 ccd_temp = 0;
+ int ret;
+
+ if (amdgpu_sriov_vf(smu->adev))
+ return 0;
+
+ if (!range)
+ return -EINVAL;
+
+ /*Check smu version, GetCtfLimit message only supported for smu version 85.69 or higher */
+ smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ if (smu_version < 0x554500)
+ return 0;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetCTFLimit,
+ PPSMC_AID_THM_TYPE, &aid_temp);
+ if (ret)
+ goto failed;
+
+ if (adev->flags & AMD_IS_APU) {
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetCTFLimit,
+ PPSMC_CCD_THM_TYPE, &ccd_temp);
+ if (ret)
+ goto failed;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetCTFLimit,
+ PPSMC_XCD_THM_TYPE, &xcd_temp);
+ if (ret)
+ goto failed;
+
+ range->hotspot_crit_max = max3(aid_temp, xcd_temp, ccd_temp) *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetCTFLimit,
+ PPSMC_HBM_THM_TYPE, &mem_temp);
+ if (ret)
+ goto failed;
+
+ range->mem_crit_max = mem_temp * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+failed:
+ return ret;
+}
+
static int smu_v13_0_6_mode1_reset(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
@@ -2108,8 +2158,7 @@ static int smu_v13_0_6_mode1_reset(struct smu_context *smu)
static bool smu_v13_0_6_is_mode1_reset_supported(struct smu_context *smu)
{
- /* TODO: Enable this when FW support is added */
- return false;
+ return true;
}
static bool smu_v13_0_6_is_mode2_reset_supported(struct smu_context *smu)
@@ -2177,6 +2226,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
.get_gpu_metrics = smu_v13_0_6_get_gpu_metrics,
+ .get_thermal_temperature_range = smu_v13_0_6_get_thermal_temperature_range,
.mode1_reset_is_support = smu_v13_0_6_is_mode1_reset_supported,
.mode2_reset_is_support = smu_v13_0_6_is_mode2_reset_supported,
.mode1_reset = smu_v13_0_6_mode1_reset,
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 61a5d450cc20..d612133e2cf7 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -301,7 +301,7 @@ static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn);
-static void drm_fb_helper_sysrq(int dummy1)
+static void drm_fb_helper_sysrq(u8 dummy1)
{
schedule_work(&drm_fb_helper_restore_work);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index e99a6fa03d45..a7e677598004 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -58,6 +58,7 @@ struct i915_perf_group;
typedef u32 intel_engine_mask_t;
#define ALL_ENGINES ((intel_engine_mask_t)~0ul)
+#define VIRTUAL_ENGINES BIT(BITS_PER_TYPE(intel_engine_mask_t) - 1)
struct intel_hw_status_page {
struct list_head timelines;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index a0e3ef1c65d2..b5b7f2fe8c78 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -5470,6 +5470,9 @@ guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
ve->base.flags = I915_ENGINE_IS_VIRTUAL;
+ BUILD_BUG_ON(ilog2(VIRTUAL_ENGINES) < I915_NUM_ENGINES);
+ ve->base.mask = VIRTUAL_ENGINES;
+
intel_context_init(&ve->context, &ve->base);
for (n = 0; n < count; n++) {
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 4ec85308379a..094fca9b0e73 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -49,22 +49,6 @@
static bool enable_out_of_sync = false;
static int preallocated_oos_pages = 8192;
-static bool intel_gvt_is_valid_gfn(struct intel_vgpu *vgpu, unsigned long gfn)
-{
- struct kvm *kvm = vgpu->vfio_device.kvm;
- int idx;
- bool ret;
-
- if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
- return false;
-
- idx = srcu_read_lock(&kvm->srcu);
- ret = kvm_is_visible_gfn(kvm, gfn);
- srcu_read_unlock(&kvm->srcu, idx);
-
- return ret;
-}
-
/*
* validate a gm address and related range size,
* translate it to host gm address
@@ -1161,31 +1145,6 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
ops->set_pfn(se, s->shadow_page.mfn);
}
-/*
- * Check if can do 2M page
- * @vgpu: target vgpu
- * @entry: target pfn's gtt entry
- *
- * Return 1 if 2MB huge gtt shadowing is possible, 0 if miscondition,
- * negative if found err.
- */
-static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
- struct intel_gvt_gtt_entry *entry)
-{
- const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
- kvm_pfn_t pfn;
-
- if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M))
- return 0;
-
- if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
- return -EINVAL;
- pfn = gfn_to_pfn(vgpu->vfio_device.kvm, ops->get_pfn(entry));
- if (is_error_noslot_pfn(pfn))
- return -EINVAL;
- return PageTransHuge(pfn_to_page(pfn));
-}
-
static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
struct intel_gvt_gtt_entry *se)
@@ -1279,7 +1238,7 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
{
const struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
struct intel_gvt_gtt_entry se = *ge;
- unsigned long gfn, page_size = PAGE_SIZE;
+ unsigned long gfn;
dma_addr_t dma_addr;
int ret;
@@ -1291,6 +1250,9 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
switch (ge->type) {
case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
gvt_vdbg_mm("shadow 4K gtt entry\n");
+ ret = intel_gvt_dma_map_guest_page(vgpu, gfn, PAGE_SIZE, &dma_addr);
+ if (ret)
+ return -ENXIO;
break;
case GTT_TYPE_PPGTT_PTE_64K_ENTRY:
gvt_vdbg_mm("shadow 64K gtt entry\n");
@@ -1302,25 +1264,20 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
return split_64KB_gtt_entry(vgpu, spt, index, &se);
case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
gvt_vdbg_mm("shadow 2M gtt entry\n");
- ret = is_2MB_gtt_possible(vgpu, ge);
- if (ret == 0)
+ if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M) ||
+ intel_gvt_dma_map_guest_page(vgpu, gfn,
+ I915_GTT_PAGE_SIZE_2M, &dma_addr))
return split_2MB_gtt_entry(vgpu, spt, index, &se);
- else if (ret < 0)
- return ret;
- page_size = I915_GTT_PAGE_SIZE_2M;
break;
case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
gvt_vgpu_err("GVT doesn't support 1GB entry\n");
return -EINVAL;
default:
GEM_BUG_ON(1);
+ return -EINVAL;
}
- /* direct shadow */
- ret = intel_gvt_dma_map_guest_page(vgpu, gfn, page_size, &dma_addr);
- if (ret)
- return -ENXIO;
-
+ /* Successfully shadowed a 4K or 2M page (without splitting). */
pte_ops->set_pfn(&se, dma_addr >> PAGE_SHIFT);
ppgtt_set_shadow_entry(spt, &se, index);
return 0;
@@ -1329,11 +1286,9 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt)
{
struct intel_vgpu *vgpu = spt->vgpu;
- struct intel_gvt *gvt = vgpu->gvt;
- const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
struct intel_vgpu_ppgtt_spt *s;
struct intel_gvt_gtt_entry se, ge;
- unsigned long gfn, i;
+ unsigned long i;
int ret;
trace_spt_change(spt->vgpu->id, "born", spt,
@@ -1350,13 +1305,6 @@ static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt)
ppgtt_generate_shadow_entry(&se, s, &ge);
ppgtt_set_shadow_entry(spt, &se, i);
} else {
- gfn = ops->get_pfn(&ge);
- if (!intel_gvt_is_valid_gfn(vgpu, gfn)) {
- ops->set_pfn(&se, gvt->gtt.scratch_mfn);
- ppgtt_set_shadow_entry(spt, &se, i);
- continue;
- }
-
ret = ppgtt_populate_shadow_entry(vgpu, spt, i, &ge);
if (ret)
goto fail;
@@ -1845,6 +1793,9 @@ static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm)
if (mm->ppgtt_mm.shadowed)
return 0;
+ if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
+ return -EINVAL;
+
mm->ppgtt_mm.shadowed = true;
for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.guest_pdps); index++) {
@@ -2331,14 +2282,6 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
m.val64 = e.val64;
m.type = e.type;
- /* one PTE update may be issued in multiple writes and the
- * first write may not construct a valid gfn
- */
- if (!intel_gvt_is_valid_gfn(vgpu, gfn)) {
- ops->set_pfn(&m, gvt->gtt.scratch_mfn);
- goto out;
- }
-
ret = intel_gvt_dma_map_guest_page(vgpu, gfn, PAGE_SIZE,
&dma_addr);
if (ret) {
@@ -2355,7 +2298,6 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
ops->clear_present(&m);
}
-out:
ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
ggtt_get_host_entry(ggtt_mm, &e, g_gtt_index);
@@ -2876,24 +2818,6 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
}
/**
- * intel_vgpu_reset_gtt - reset the all GTT related status
- * @vgpu: a vGPU
- *
- * This function is called from vfio core to reset reset all
- * GTT related status, including GGTT, PPGTT, scratch page.
- *
- */
-void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
-{
- /* Shadow pages are only created when there is no page
- * table tracking data, so remove page tracking data after
- * removing the shadow pages.
- */
- intel_vgpu_destroy_all_ppgtt_mm(vgpu);
- intel_vgpu_reset_ggtt(vgpu, true);
-}
-
-/**
* intel_gvt_restore_ggtt - restore all vGPU's ggtt entries
* @gvt: intel gvt device
*
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index a3b0f59ec8bd..4cb183e06e95 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -224,7 +224,6 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old);
void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu);
int intel_gvt_init_gtt(struct intel_gvt *gvt);
-void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu);
void intel_gvt_clean_gtt(struct intel_gvt *gvt);
struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 2d65800d8e93..53a0a42a50db 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -34,10 +34,11 @@
#define _GVT_H_
#include <uapi/linux/pci_regs.h>
-#include <linux/kvm_host.h>
#include <linux/vfio.h>
#include <linux/mdev.h>
+#include <asm/kvm_page_track.h>
+
#include "i915_drv.h"
#include "intel_gvt.h"
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 9cd9e9da60dd..42ce20e72db7 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -106,12 +106,10 @@ struct gvt_dma {
#define vfio_dev_to_vgpu(vfio_dev) \
container_of((vfio_dev), struct intel_vgpu, vfio_device)
-static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
- const u8 *val, int len,
- struct kvm_page_track_notifier_node *node);
-static void kvmgt_page_track_flush_slot(struct kvm *kvm,
- struct kvm_memory_slot *slot,
- struct kvm_page_track_notifier_node *node);
+static void kvmgt_page_track_write(gpa_t gpa, const u8 *val, int len,
+ struct kvm_page_track_notifier_node *node);
+static void kvmgt_page_track_remove_region(gfn_t gfn, unsigned long nr_pages,
+ struct kvm_page_track_notifier_node *node);
static ssize_t intel_vgpu_show_description(struct mdev_type *mtype, char *buf)
{
@@ -161,8 +159,7 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
if (npage == 0)
base_page = cur_page;
- else if (base_page + npage != cur_page) {
- gvt_vgpu_err("The pages are not continuous\n");
+ else if (page_to_pfn(base_page) + npage != page_to_pfn(cur_page)) {
ret = -EINVAL;
npage++;
goto err;
@@ -172,7 +169,8 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
*page = base_page;
return 0;
err:
- gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE);
+ if (npage)
+ gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE);
return ret;
}
@@ -352,6 +350,8 @@ __kvmgt_protect_table_find(struct intel_vgpu *info, gfn_t gfn)
{
struct kvmgt_pgfn *p, *res = NULL;
+ lockdep_assert_held(&info->vgpu_lock);
+
hash_for_each_possible(info->ptable, p, hnode, gfn) {
if (gfn == p->gfn) {
res = p;
@@ -654,21 +654,19 @@ out:
static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
{
struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
-
- if (!vgpu->vfio_device.kvm ||
- vgpu->vfio_device.kvm->mm != current->mm) {
- gvt_vgpu_err("KVM is required to use Intel vGPU\n");
- return -ESRCH;
- }
+ int ret;
if (__kvmgt_vgpu_exist(vgpu))
return -EEXIST;
vgpu->track_node.track_write = kvmgt_page_track_write;
- vgpu->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
- kvm_get_kvm(vgpu->vfio_device.kvm);
- kvm_page_track_register_notifier(vgpu->vfio_device.kvm,
- &vgpu->track_node);
+ vgpu->track_node.track_remove_region = kvmgt_page_track_remove_region;
+ ret = kvm_page_track_register_notifier(vgpu->vfio_device.kvm,
+ &vgpu->track_node);
+ if (ret) {
+ gvt_vgpu_err("KVM is required to use Intel vGPU\n");
+ return ret;
+ }
set_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status);
@@ -703,7 +701,6 @@ static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
kvm_page_track_unregister_notifier(vgpu->vfio_device.kvm,
&vgpu->track_node);
- kvm_put_kvm(vgpu->vfio_device.kvm);
kvmgt_protect_table_destroy(vgpu);
gvt_cache_destroy(vgpu);
@@ -1547,95 +1544,70 @@ static struct mdev_driver intel_vgpu_mdev_driver = {
int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn)
{
- struct kvm *kvm = info->vfio_device.kvm;
- struct kvm_memory_slot *slot;
- int idx;
+ int r;
if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, info->status))
return -ESRCH;
- idx = srcu_read_lock(&kvm->srcu);
- slot = gfn_to_memslot(kvm, gfn);
- if (!slot) {
- srcu_read_unlock(&kvm->srcu, idx);
- return -EINVAL;
- }
-
- write_lock(&kvm->mmu_lock);
-
if (kvmgt_gfn_is_write_protected(info, gfn))
- goto out;
+ return 0;
- kvm_slot_page_track_add_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
- kvmgt_protect_table_add(info, gfn);
+ r = kvm_write_track_add_gfn(info->vfio_device.kvm, gfn);
+ if (r)
+ return r;
-out:
- write_unlock(&kvm->mmu_lock);
- srcu_read_unlock(&kvm->srcu, idx);
+ kvmgt_protect_table_add(info, gfn);
return 0;
}
int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn)
{
- struct kvm *kvm = info->vfio_device.kvm;
- struct kvm_memory_slot *slot;
- int idx;
+ int r;
if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, info->status))
return -ESRCH;
- idx = srcu_read_lock(&kvm->srcu);
- slot = gfn_to_memslot(kvm, gfn);
- if (!slot) {
- srcu_read_unlock(&kvm->srcu, idx);
- return -EINVAL;
- }
-
- write_lock(&kvm->mmu_lock);
-
if (!kvmgt_gfn_is_write_protected(info, gfn))
- goto out;
+ return 0;
- kvm_slot_page_track_remove_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
- kvmgt_protect_table_del(info, gfn);
+ r = kvm_write_track_remove_gfn(info->vfio_device.kvm, gfn);
+ if (r)
+ return r;
-out:
- write_unlock(&kvm->mmu_lock);
- srcu_read_unlock(&kvm->srcu, idx);
+ kvmgt_protect_table_del(info, gfn);
return 0;
}
-static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
- const u8 *val, int len,
- struct kvm_page_track_notifier_node *node)
+static void kvmgt_page_track_write(gpa_t gpa, const u8 *val, int len,
+ struct kvm_page_track_notifier_node *node)
{
struct intel_vgpu *info =
container_of(node, struct intel_vgpu, track_node);
- if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa)))
+ mutex_lock(&info->vgpu_lock);
+
+ if (kvmgt_gfn_is_write_protected(info, gpa >> PAGE_SHIFT))
intel_vgpu_page_track_handler(info, gpa,
(void *)val, len);
+
+ mutex_unlock(&info->vgpu_lock);
}
-static void kvmgt_page_track_flush_slot(struct kvm *kvm,
- struct kvm_memory_slot *slot,
- struct kvm_page_track_notifier_node *node)
+static void kvmgt_page_track_remove_region(gfn_t gfn, unsigned long nr_pages,
+ struct kvm_page_track_notifier_node *node)
{
- int i;
- gfn_t gfn;
+ unsigned long i;
struct intel_vgpu *info =
container_of(node, struct intel_vgpu, track_node);
- write_lock(&kvm->mmu_lock);
- for (i = 0; i < slot->npages; i++) {
- gfn = slot->base_gfn + i;
- if (kvmgt_gfn_is_write_protected(info, gfn)) {
- kvm_slot_page_track_remove_page(kvm, slot, gfn,
- KVM_PAGE_TRACK_WRITE);
- kvmgt_protect_table_del(info, gfn);
- }
+ mutex_lock(&info->vgpu_lock);
+
+ for (i = 0; i < nr_pages; i++) {
+ if (kvmgt_gfn_is_write_protected(info, gfn + i))
+ kvmgt_protect_table_del(info, gfn + i);
}
- write_unlock(&kvm->mmu_lock);
+
+ mutex_unlock(&info->vgpu_lock);
}
void intel_vgpu_detach_regions(struct intel_vgpu *vgpu)
diff --git a/drivers/gpu/drm/i915/gvt/page_track.c b/drivers/gpu/drm/i915/gvt/page_track.c
index df34e73cba41..60a65435556d 100644
--- a/drivers/gpu/drm/i915/gvt/page_track.c
+++ b/drivers/gpu/drm/i915/gvt/page_track.c
@@ -162,13 +162,9 @@ int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa,
struct intel_vgpu_page_track *page_track;
int ret = 0;
- mutex_lock(&vgpu->vgpu_lock);
-
page_track = intel_vgpu_find_page_track(vgpu, gpa >> PAGE_SHIFT);
- if (!page_track) {
- ret = -ENXIO;
- goto out;
- }
+ if (!page_track)
+ return -ENXIO;
if (unlikely(vgpu->failsafe)) {
/* Remove write protection to prevent furture traps. */
@@ -179,7 +175,5 @@ int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa,
gvt_err("guest page write error, gpa %llx\n", gpa);
}
-out:
- mutex_unlock(&vgpu->vgpu_lock);
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 7c7da284990d..f59081066a19 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -134,9 +134,7 @@ static void i915_fence_release(struct dma_fence *fence)
i915_sw_fence_fini(&rq->semaphore);
/*
- * Keep one request on each engine for reserved use under mempressure
- * do not use with virtual engines as this really is only needed for
- * kernel contexts.
+ * Keep one request on each engine for reserved use under mempressure.
*
* We do not hold a reference to the engine here and so have to be
* very careful in what rq->engine we poke. The virtual engine is
@@ -166,8 +164,7 @@ static void i915_fence_release(struct dma_fence *fence)
* know that if the rq->execution_mask is a single bit, rq->engine
* can be a physical engine with the exact corresponding mask.
*/
- if (!intel_engine_is_virtual(rq->engine) &&
- is_power_of_2(rq->execution_mask) &&
+ if (is_power_of_2(rq->execution_mask) &&
!cmpxchg(&rq->engine->request_pool, NULL, rq))
return;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index a34924523133..a34917b048f9 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -1122,18 +1122,11 @@ nv04_page_flip_emit(struct nouveau_channel *chan,
PUSH_NVSQ(push, NV_SW, NV_SW_PAGE_FLIP, 0x00000000);
PUSH_KICK(push);
- ret = nouveau_fence_new(pfence);
+ ret = nouveau_fence_new(pfence, chan);
if (ret)
goto fail;
- ret = nouveau_fence_emit(*pfence, chan);
- if (ret)
- goto fail_fence_unref;
-
return 0;
-
-fail_fence_unref:
- nouveau_fence_unref(pfence);
fail:
spin_lock_irqsave(&dev->event_lock, flags);
list_del(&s->head);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 19cab37ac69c..0f3bd187ede6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -875,16 +875,10 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
if (ret)
goto out_unlock;
- ret = nouveau_fence_new(&fence);
+ ret = nouveau_fence_new(&fence, chan);
if (ret)
goto out_unlock;
- ret = nouveau_fence_emit(fence, chan);
- if (ret) {
- nouveau_fence_unref(&fence);
- goto out_unlock;
- }
-
/* TODO: figure out a better solution here
*
* wait on the fence here explicitly as going through
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 1fd5ccf41128..bb3d6e5c122f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -70,11 +70,9 @@ nouveau_channel_idle(struct nouveau_channel *chan)
struct nouveau_fence *fence = NULL;
int ret;
- ret = nouveau_fence_new(&fence);
+ ret = nouveau_fence_new(&fence, chan);
if (!ret) {
- ret = nouveau_fence_emit(fence, chan);
- if (!ret)
- ret = nouveau_fence_wait(fence, false, false);
+ ret = nouveau_fence_wait(fence, false, false);
nouveau_fence_unref(&fence);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 61e84562094a..12feecf71e75 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -209,8 +209,7 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
goto done;
}
- if (!nouveau_fence_new(&fence))
- nouveau_fence_emit(fence, dmem->migrate.chan);
+ nouveau_fence_new(&fence, dmem->migrate.chan);
migrate_vma_pages(&args);
nouveau_dmem_fence_done(&fence);
dma_unmap_page(drm->dev->dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
@@ -403,8 +402,7 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
}
}
- if (!nouveau_fence_new(&fence))
- nouveau_fence_emit(fence, chunk->drm->dmem->migrate.chan);
+ nouveau_fence_new(&fence, chunk->drm->dmem->migrate.chan);
migrate_device_pages(src_pfns, dst_pfns, npages);
nouveau_dmem_fence_done(&fence);
migrate_device_finalize(src_pfns, dst_pfns, npages);
@@ -677,8 +675,7 @@ static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
addr += PAGE_SIZE;
}
- if (!nouveau_fence_new(&fence))
- nouveau_fence_emit(fence, drm->dmem->migrate.chan);
+ nouveau_fence_new(&fence, drm->dmem->migrate.chan);
migrate_vma_pages(args);
nouveau_dmem_fence_done(&fence);
nouveau_pfns_map(svmm, args->vma->vm_mm, args->start, pfns, i);
diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c
index a90c4cd8cbb2..19024ce21fbb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_exec.c
+++ b/drivers/gpu/drm/nouveau/nouveau_exec.c
@@ -96,7 +96,8 @@ nouveau_exec_job_submit(struct nouveau_job *job)
unsigned long index;
int ret;
- ret = nouveau_fence_new(&exec_job->fence);
+ /* Create a new fence, but do not emit yet. */
+ ret = nouveau_fence_create(&exec_job->fence, exec_job->chan);
if (ret)
return ret;
@@ -170,13 +171,17 @@ nouveau_exec_job_run(struct nouveau_job *job)
nv50_dma_push(chan, p->va, p->va_len, no_prefetch);
}
- ret = nouveau_fence_emit(fence, chan);
+ ret = nouveau_fence_emit(fence);
if (ret) {
+ nouveau_fence_unref(&exec_job->fence);
NV_PRINTK(err, job->cli, "error fencing pushbuf: %d\n", ret);
WIND_RING(chan);
return ERR_PTR(ret);
}
+ /* The fence was emitted successfully, set the job's fence pointer to
+ * NULL in order to avoid freeing it up when the job is cleaned up.
+ */
exec_job->fence = NULL;
return &fence->base;
@@ -189,7 +194,7 @@ nouveau_exec_job_free(struct nouveau_job *job)
nouveau_job_free(job);
- nouveau_fence_unref(&exec_job->fence);
+ kfree(exec_job->fence);
kfree(exec_job->push.s);
kfree(exec_job);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 77c739a55b19..61d9e70da9fd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -205,16 +205,13 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
}
int
-nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
+nouveau_fence_emit(struct nouveau_fence *fence)
{
+ struct nouveau_channel *chan = fence->channel;
struct nouveau_fence_chan *fctx = chan->fence;
struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
int ret;
- if (unlikely(!chan->fence))
- return -ENODEV;
-
- fence->channel = chan;
fence->timeout = jiffies + (15 * HZ);
if (priv->uevent)
@@ -406,18 +403,41 @@ nouveau_fence_unref(struct nouveau_fence **pfence)
}
int
-nouveau_fence_new(struct nouveau_fence **pfence)
+nouveau_fence_create(struct nouveau_fence **pfence,
+ struct nouveau_channel *chan)
{
struct nouveau_fence *fence;
+ if (unlikely(!chan->fence))
+ return -ENODEV;
+
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (!fence)
return -ENOMEM;
+ fence->channel = chan;
+
*pfence = fence;
return 0;
}
+int
+nouveau_fence_new(struct nouveau_fence **pfence,
+ struct nouveau_channel *chan)
+{
+ int ret = 0;
+
+ ret = nouveau_fence_create(pfence, chan);
+ if (ret)
+ return ret;
+
+ ret = nouveau_fence_emit(*pfence);
+ if (ret)
+ nouveau_fence_unref(pfence);
+
+ return ret;
+}
+
static const char *nouveau_fence_get_get_driver_name(struct dma_fence *fence)
{
return "nouveau";
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 2c72d96ef17d..64d33ae7f356 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -17,10 +17,11 @@ struct nouveau_fence {
unsigned long timeout;
};
-int nouveau_fence_new(struct nouveau_fence **);
+int nouveau_fence_create(struct nouveau_fence **, struct nouveau_channel *);
+int nouveau_fence_new(struct nouveau_fence **, struct nouveau_channel *);
void nouveau_fence_unref(struct nouveau_fence **);
-int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *);
+int nouveau_fence_emit(struct nouveau_fence *);
bool nouveau_fence_done(struct nouveau_fence *);
int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr);
int nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive, bool intr);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index c0b10d8d3d03..a0d303e5ce3d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -914,11 +914,8 @@ revalidate:
}
}
- ret = nouveau_fence_new(&fence);
- if (!ret)
- ret = nouveau_fence_emit(fence, chan);
+ ret = nouveau_fence_new(&fence, chan);
if (ret) {
- nouveau_fence_unref(&fence);
NV_PRINTK(err, cli, "error fencing pushbuf: %d\n", ret);
WIND_RING(chan);
goto out;
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index e11c1c803676..0cea301cc9a9 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -412,6 +412,13 @@ config HID_GOOGLE_HAMMER
help
Say Y here if you have a Google Hammer device.
+config HID_GOOGLE_STADIA_FF
+ tristate "Google Stadia force feedback"
+ select INPUT_FF_MEMLESS
+ help
+ Say Y here if you want to enable force feedback support for the Google
+ Stadia controller.
+
config HID_VIVALDI
tristate "Vivaldi Keyboard"
select HID_VIVALDI_COMMON
@@ -1066,9 +1073,11 @@ config STEAM_FF
Deck.
config HID_STEELSERIES
- tristate "Steelseries SRW-S1 steering wheel support"
+ tristate "Steelseries devices support"
+ depends on USB_HID
help
- Support for Steelseries SRW-S1 steering wheel
+ Support for Steelseries SRW-S1 steering wheel, and the Steelseries
+ Arctis 1 Wireless for XBox headset.
config HID_SUNPLUS
tristate "Sunplus wireless desktop"
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 7a9e160158f7..8a06d0f840bc 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -55,6 +55,7 @@ obj-$(CONFIG_HID_GFRM) += hid-gfrm.o
obj-$(CONFIG_HID_GLORIOUS) += hid-glorious.o
obj-$(CONFIG_HID_VIVALDI_COMMON) += hid-vivaldi-common.o
obj-$(CONFIG_HID_GOOGLE_HAMMER) += hid-google-hammer.o
+obj-$(CONFIG_HID_GOOGLE_STADIA_FF) += hid-google-stadiaff.o
obj-$(CONFIG_HID_VIVALDI) += hid-vivaldi.o
obj-$(CONFIG_HID_GT683R) += hid-gt683r.o
obj-$(CONFIG_HID_GYRATION) += hid-gyration.o
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index d7b932925730..3ca45975c686 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -343,7 +343,8 @@ static const struct apple_non_apple_keyboard non_apple_keyboards[] = {
{ "SONiX USB DEVICE" },
{ "Keychron" },
{ "AONE" },
- { "GANSS" }
+ { "GANSS" },
+ { "Hailuck" },
};
static bool apple_is_non_apple_keyboard(struct hid_device *hdev)
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 27cadadda7c9..54c33a24f844 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -16,14 +16,14 @@
* https://www.silabs.com/documents/public/application-notes/an495-cp2112-interface-specification.pdf
*/
-#include <linux/gpio/consumer.h>
-#include <linux/gpio/machine.h>
+#include <linux/bitops.h>
#include <linux/gpio/driver.h>
#include <linux/hid.h>
#include <linux/hidraw.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/nls.h>
+#include <linux/string_choices.h>
#include <linux/usb/ch9.h>
#include "hid-ids.h"
@@ -31,6 +31,8 @@
#define CP2112_GPIO_CONFIG_LENGTH 5
#define CP2112_GPIO_GET_LENGTH 2
#define CP2112_GPIO_SET_LENGTH 3
+#define CP2112_GPIO_MAX_GPIO 8
+#define CP2112_GPIO_ALL_GPIO_MASK GENMASK(7, 0)
enum {
CP2112_GPIO_CONFIG = 0x02,
@@ -163,19 +165,17 @@ struct cp2112_device {
atomic_t read_avail;
atomic_t xfer_avail;
struct gpio_chip gc;
- struct irq_chip irq;
u8 *in_out_buffer;
struct mutex lock;
- struct gpio_desc *desc[8];
bool gpio_poll;
struct delayed_work gpio_poll_worker;
unsigned long irq_mask;
u8 gpio_prev_state;
};
-static int gpio_push_pull = 0xFF;
-module_param(gpio_push_pull, int, S_IRUGO | S_IWUSR);
+static int gpio_push_pull = CP2112_GPIO_ALL_GPIO_MASK;
+module_param(gpio_push_pull, int, 0644);
MODULE_PARM_DESC(gpio_push_pull, "GPIO push-pull configuration bitmask");
static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
@@ -197,7 +197,7 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
goto exit;
}
- buf[1] &= ~(1 << offset);
+ buf[1] &= ~BIT(offset);
buf[2] = gpio_push_pull;
ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
@@ -227,8 +227,8 @@ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
mutex_lock(&dev->lock);
buf[0] = CP2112_GPIO_SET;
- buf[1] = value ? 0xff : 0;
- buf[2] = 1 << offset;
+ buf[1] = value ? CP2112_GPIO_ALL_GPIO_MASK : 0;
+ buf[2] = BIT(offset);
ret = hid_hw_raw_request(hdev, CP2112_GPIO_SET, buf,
CP2112_GPIO_SET_LENGTH, HID_FEATURE_REPORT,
@@ -532,15 +532,13 @@ static int cp2112_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
hid_dbg(hdev, "I2C %d messages\n", num);
if (num == 1) {
+ hid_dbg(hdev, "I2C %s %#04x len %d\n",
+ str_read_write(msgs->flags & I2C_M_RD), msgs->addr, msgs->len);
if (msgs->flags & I2C_M_RD) {
- hid_dbg(hdev, "I2C read %#04x len %d\n",
- msgs->addr, msgs->len);
read_length = msgs->len;
read_buf = msgs->buf;
count = cp2112_read_req(buf, msgs->addr, msgs->len);
} else {
- hid_dbg(hdev, "I2C write %#04x len %d\n",
- msgs->addr, msgs->len);
count = cp2112_i2c_write_req(buf, msgs->addr,
msgs->buf, msgs->len);
}
@@ -648,7 +646,7 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr,
int ret;
hid_dbg(hdev, "%s addr 0x%x flags 0x%x cmd 0x%x size %d\n",
- read_write == I2C_SMBUS_WRITE ? "write" : "read",
+ str_write_read(read_write == I2C_SMBUS_WRITE),
addr, flags, command, size);
switch (size) {
@@ -895,7 +893,7 @@ static ssize_t name##_show(struct device *kdev, \
int ret = cp2112_get_usb_config(hdev, &cfg); \
if (ret) \
return ret; \
- return scnprintf(buf, PAGE_SIZE, format, ##__VA_ARGS__); \
+ return sysfs_emit(buf, format, ##__VA_ARGS__); \
} \
static DEVICE_ATTR_RW(name);
@@ -946,18 +944,10 @@ CP2112_CONFIG_ATTR(release_version, ({
#undef CP2112_CONFIG_ATTR
-struct cp2112_pstring_attribute {
- struct device_attribute attr;
- unsigned char report;
-};
-
-static ssize_t pstr_store(struct device *kdev,
- struct device_attribute *kattr, const char *buf,
- size_t count)
+static ssize_t pstr_store(struct device *kdev, struct device_attribute *kattr,
+ const char *buf, size_t count, int number)
{
struct hid_device *hdev = to_hid_device(kdev);
- struct cp2112_pstring_attribute *attr =
- container_of(kattr, struct cp2112_pstring_attribute, attr);
struct cp2112_string_report report;
int ret;
@@ -965,7 +955,7 @@ static ssize_t pstr_store(struct device *kdev,
ret = utf8s_to_utf16s(buf, count, UTF16_LITTLE_ENDIAN,
report.string, ARRAY_SIZE(report.string));
- report.report = attr->report;
+ report.report = number;
report.length = ret * sizeof(report.string[0]) + 2;
report.type = USB_DT_STRING;
@@ -983,17 +973,15 @@ static ssize_t pstr_store(struct device *kdev,
return count;
}
-static ssize_t pstr_show(struct device *kdev,
- struct device_attribute *kattr, char *buf)
+static ssize_t pstr_show(struct device *kdev, struct device_attribute *kattr,
+ char *buf, int number)
{
struct hid_device *hdev = to_hid_device(kdev);
- struct cp2112_pstring_attribute *attr =
- container_of(kattr, struct cp2112_pstring_attribute, attr);
struct cp2112_string_report report;
u8 length;
int ret;
- ret = cp2112_hid_get(hdev, attr->report, (u8 *)&report.contents,
+ ret = cp2112_hid_get(hdev, number, (u8 *)&report.contents,
sizeof(report.contents), HID_FEATURE_REPORT);
if (ret < 3) {
hid_err(hdev, "error reading %s string: %d\n", kattr->attr.name,
@@ -1018,10 +1006,16 @@ static ssize_t pstr_show(struct device *kdev,
}
#define CP2112_PSTR_ATTR(name, _report) \
-static struct cp2112_pstring_attribute dev_attr_##name = { \
- .attr = __ATTR(name, (S_IWUSR | S_IRUGO), pstr_show, pstr_store), \
- .report = _report, \
-};
+static ssize_t name##_store(struct device *kdev, struct device_attribute *kattr, \
+ const char *buf, size_t count) \
+{ \
+ return pstr_store(kdev, kattr, buf, count, _report); \
+} \
+static ssize_t name##_show(struct device *kdev, struct device_attribute *kattr, char *buf) \
+{ \
+ return pstr_show(kdev, kattr, buf, _report); \
+} \
+static DEVICE_ATTR_RW(name);
CP2112_PSTR_ATTR(manufacturer, CP2112_MANUFACTURER_STRING);
CP2112_PSTR_ATTR(product, CP2112_PRODUCT_STRING);
@@ -1036,9 +1030,9 @@ static const struct attribute_group cp2112_attr_group = {
&dev_attr_max_power.attr,
&dev_attr_power_mode.attr,
&dev_attr_release_version.attr,
- &dev_attr_manufacturer.attr.attr,
- &dev_attr_product.attr.attr,
- &dev_attr_serial.attr.attr,
+ &dev_attr_manufacturer.attr,
+ &dev_attr_product.attr,
+ &dev_attr_serial.attr,
NULL
}
};
@@ -1063,7 +1057,7 @@ static void chmod_sysfs_attrs(struct hid_device *hdev)
}
for (attr = cp2112_attr_group.attrs; *attr; ++attr) {
- umode_t mode = (buf[1] & 1) ? S_IWUSR | S_IRUGO : S_IRUGO;
+ umode_t mode = (buf[1] & 1) ? 0644 : 0444;
ret = sysfs_chmod_file(&hdev->dev.kobj, *attr, mode);
if (ret < 0)
hid_err(hdev, "error chmoding sysfs file %s\n",
@@ -1080,16 +1074,20 @@ static void cp2112_gpio_irq_mask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct cp2112_device *dev = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
- __clear_bit(d->hwirq, &dev->irq_mask);
+ __clear_bit(hwirq, &dev->irq_mask);
+ gpiochip_disable_irq(gc, hwirq);
}
static void cp2112_gpio_irq_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct cp2112_device *dev = gpiochip_get_data(gc);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
- __set_bit(d->hwirq, &dev->irq_mask);
+ gpiochip_enable_irq(gc, hwirq);
+ __set_bit(hwirq, &dev->irq_mask);
}
static void cp2112_gpio_poll_callback(struct work_struct *work)
@@ -1098,7 +1096,6 @@ static void cp2112_gpio_poll_callback(struct work_struct *work)
gpio_poll_worker.work);
struct irq_data *d;
u8 gpio_mask;
- u8 virqs = (u8)dev->irq_mask;
u32 irq_type;
int irq, virq, ret;
@@ -1109,15 +1106,10 @@ static void cp2112_gpio_poll_callback(struct work_struct *work)
goto exit;
gpio_mask = ret;
-
- while (virqs) {
- virq = ffs(virqs) - 1;
- virqs &= ~BIT(virq);
-
- if (!dev->gc.to_irq)
- break;
-
- irq = dev->gc.to_irq(&dev->gc, virq);
+ for_each_set_bit(virq, &dev->irq_mask, CP2112_GPIO_MAX_GPIO) {
+ irq = irq_find_mapping(dev->gc.irq.domain, virq);
+ if (!irq)
+ continue;
d = irq_get_irq_data(irq);
if (!d)
@@ -1175,6 +1167,7 @@ static void cp2112_gpio_irq_shutdown(struct irq_data *d)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct cp2112_device *dev = gpiochip_get_data(gc);
+ cp2112_gpio_irq_mask(d);
cancel_delayed_work_sync(&dev->gpio_poll_worker);
}
@@ -1183,50 +1176,17 @@ static int cp2112_gpio_irq_type(struct irq_data *d, unsigned int type)
return 0;
}
-static int __maybe_unused cp2112_allocate_irq(struct cp2112_device *dev,
- int pin)
-{
- int ret;
-
- if (dev->desc[pin])
- return -EINVAL;
-
- dev->desc[pin] = gpiochip_request_own_desc(&dev->gc, pin,
- "HID/I2C:Event",
- GPIO_ACTIVE_HIGH,
- GPIOD_IN);
- if (IS_ERR(dev->desc[pin])) {
- dev_err(dev->gc.parent, "Failed to request GPIO\n");
- return PTR_ERR(dev->desc[pin]);
- }
-
- ret = cp2112_gpio_direction_input(&dev->gc, pin);
- if (ret < 0) {
- dev_err(dev->gc.parent, "Failed to set GPIO to input dir\n");
- goto err_desc;
- }
-
- ret = gpiochip_lock_as_irq(&dev->gc, pin);
- if (ret) {
- dev_err(dev->gc.parent, "Failed to lock GPIO as interrupt\n");
- goto err_desc;
- }
-
- ret = gpiod_to_irq(dev->desc[pin]);
- if (ret < 0) {
- dev_err(dev->gc.parent, "Failed to translate GPIO to IRQ\n");
- goto err_lock;
- }
-
- return ret;
-
-err_lock:
- gpiochip_unlock_as_irq(&dev->gc, pin);
-err_desc:
- gpiochip_free_own_desc(dev->desc[pin]);
- dev->desc[pin] = NULL;
- return ret;
-}
+static const struct irq_chip cp2112_gpio_irqchip = {
+ .name = "cp2112-gpio",
+ .irq_startup = cp2112_gpio_irq_startup,
+ .irq_shutdown = cp2112_gpio_irq_shutdown,
+ .irq_ack = cp2112_gpio_irq_ack,
+ .irq_mask = cp2112_gpio_irq_mask,
+ .irq_unmask = cp2112_gpio_irq_unmask,
+ .irq_set_type = cp2112_gpio_irq_type,
+ .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
@@ -1333,21 +1293,12 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
dev->gc.set = cp2112_gpio_set;
dev->gc.get = cp2112_gpio_get;
dev->gc.base = -1;
- dev->gc.ngpio = 8;
+ dev->gc.ngpio = CP2112_GPIO_MAX_GPIO;
dev->gc.can_sleep = 1;
dev->gc.parent = &hdev->dev;
- dev->irq.name = "cp2112-gpio";
- dev->irq.irq_startup = cp2112_gpio_irq_startup;
- dev->irq.irq_shutdown = cp2112_gpio_irq_shutdown;
- dev->irq.irq_ack = cp2112_gpio_irq_ack;
- dev->irq.irq_mask = cp2112_gpio_irq_mask;
- dev->irq.irq_unmask = cp2112_gpio_irq_unmask;
- dev->irq.irq_set_type = cp2112_gpio_irq_type;
- dev->irq.flags = IRQCHIP_MASK_ON_SUSPEND;
-
girq = &dev->gc.irq;
- girq->chip = &dev->irq;
+ gpio_irq_chip_set_chip(girq, &cp2112_gpio_irqchip);
/* The event comes from the outside so no parent handler */
girq->parent_handler = NULL;
girq->num_parents = 0;
@@ -1389,7 +1340,6 @@ err_hid_stop:
static void cp2112_remove(struct hid_device *hdev)
{
struct cp2112_device *dev = hid_get_drvdata(hdev);
- int i;
sysfs_remove_group(&hdev->dev.kobj, &cp2112_attr_group);
i2c_del_adapter(&dev->adap);
@@ -1399,11 +1349,6 @@ static void cp2112_remove(struct hid_device *hdev)
cancel_delayed_work_sync(&dev->gpio_poll_worker);
}
- for (i = 0; i < ARRAY_SIZE(dev->desc); i++) {
- gpiochip_unlock_as_irq(&dev->gc, i);
- gpiochip_free_own_desc(dev->desc[i]);
- }
-
gpiochip_remove(&dev->gc);
/* i2c_del_adapter has finished removing all i2c devices from our
* adapter. Well behaved devices should no longer call our cp2112_xfer
diff --git a/drivers/hid/hid-google-stadiaff.c b/drivers/hid/hid-google-stadiaff.c
new file mode 100644
index 000000000000..3731575562ab
--- /dev/null
+++ b/drivers/hid/hid-google-stadiaff.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Stadia controller rumble support.
+ *
+ * Copyright 2023 Google LLC
+ */
+
+#include <linux/hid.h>
+#include <linux/input.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+#define STADIA_FF_REPORT_ID 5
+
+struct stadiaff_device {
+ struct hid_device *hid;
+ struct hid_report *report;
+ spinlock_t lock;
+ bool removed;
+ uint16_t strong_magnitude;
+ uint16_t weak_magnitude;
+ struct work_struct work;
+};
+
+static void stadiaff_work(struct work_struct *work)
+{
+ struct stadiaff_device *stadiaff =
+ container_of(work, struct stadiaff_device, work);
+ struct hid_field *rumble_field = stadiaff->report->field[0];
+ unsigned long flags;
+
+ spin_lock_irqsave(&stadiaff->lock, flags);
+ rumble_field->value[0] = stadiaff->strong_magnitude;
+ rumble_field->value[1] = stadiaff->weak_magnitude;
+ spin_unlock_irqrestore(&stadiaff->lock, flags);
+
+ hid_hw_request(stadiaff->hid, stadiaff->report, HID_REQ_SET_REPORT);
+}
+
+static int stadiaff_play(struct input_dev *dev, void *data,
+ struct ff_effect *effect)
+{
+ struct hid_device *hid = input_get_drvdata(dev);
+ struct stadiaff_device *stadiaff = hid_get_drvdata(hid);
+ unsigned long flags;
+
+ spin_lock_irqsave(&stadiaff->lock, flags);
+ if (!stadiaff->removed) {
+ stadiaff->strong_magnitude = effect->u.rumble.strong_magnitude;
+ stadiaff->weak_magnitude = effect->u.rumble.weak_magnitude;
+ schedule_work(&stadiaff->work);
+ }
+ spin_unlock_irqrestore(&stadiaff->lock, flags);
+
+ return 0;
+}
+
+static int stadiaff_init(struct hid_device *hid)
+{
+ struct stadiaff_device *stadiaff;
+ struct hid_report *report;
+ struct hid_input *hidinput;
+ struct input_dev *dev;
+ int error;
+
+ if (list_empty(&hid->inputs)) {
+ hid_err(hid, "no inputs found\n");
+ return -ENODEV;
+ }
+ hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+ dev = hidinput->input;
+
+ report = hid_validate_values(hid, HID_OUTPUT_REPORT,
+ STADIA_FF_REPORT_ID, 0, 2);
+ if (!report)
+ return -ENODEV;
+
+ stadiaff = devm_kzalloc(&hid->dev, sizeof(struct stadiaff_device),
+ GFP_KERNEL);
+ if (!stadiaff)
+ return -ENOMEM;
+
+ hid_set_drvdata(hid, stadiaff);
+
+ input_set_capability(dev, EV_FF, FF_RUMBLE);
+
+ error = input_ff_create_memless(dev, NULL, stadiaff_play);
+ if (error)
+ return error;
+
+ stadiaff->removed = false;
+ stadiaff->hid = hid;
+ stadiaff->report = report;
+ INIT_WORK(&stadiaff->work, stadiaff_work);
+ spin_lock_init(&stadiaff->lock);
+
+ hid_info(hid, "Force Feedback for Google Stadia controller\n");
+
+ return 0;
+}
+
+static int stadia_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int ret;
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "parse failed\n");
+ return ret;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF);
+ if (ret) {
+ hid_err(hdev, "hw start failed\n");
+ return ret;
+ }
+
+ ret = stadiaff_init(hdev);
+ if (ret) {
+ hid_err(hdev, "force feedback init failed\n");
+ hid_hw_stop(hdev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void stadia_remove(struct hid_device *hid)
+{
+ struct stadiaff_device *stadiaff = hid_get_drvdata(hid);
+ unsigned long flags;
+
+ spin_lock_irqsave(&stadiaff->lock, flags);
+ stadiaff->removed = true;
+ spin_unlock_irqrestore(&stadiaff->lock, flags);
+
+ cancel_work_sync(&stadiaff->work);
+ hid_hw_stop(hid);
+}
+
+static const struct hid_device_id stadia_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_STADIA) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_STADIA) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, stadia_devices);
+
+static struct hid_driver stadia_driver = {
+ .name = "stadia",
+ .id_table = stadia_devices,
+ .probe = stadia_probe,
+ .remove = stadia_remove,
+};
+module_hid_driver(stadia_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 8a310f8ff20f..7e499992a793 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -531,6 +531,7 @@
#define USB_DEVICE_ID_GOOGLE_DON 0x5050
#define USB_DEVICE_ID_GOOGLE_EEL 0x5057
#define USB_DEVICE_ID_GOOGLE_JEWEL 0x5061
+#define USB_DEVICE_ID_GOOGLE_STADIA 0x9400
#define USB_VENDOR_ID_GOTOP 0x08f2
#define USB_DEVICE_ID_SUPER_Q2 0x007f
@@ -866,6 +867,7 @@
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2 0xc534
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1 0xc539
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_1 0xc53f
+#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_2 0xc547
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_POWERPLAY 0xc53a
#define USB_DEVICE_ID_SPACETRAVELLER 0xc623
#define USB_DEVICE_ID_SPACENAVIGATOR 0xc626
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 851ee86eff32..0235cc1690a1 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -358,6 +358,9 @@ static const struct hid_device_id hid_battery_quirks[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI),
HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_MAGICTRACKPAD),
+ HID_BATTERY_QUIRK_IGNORE },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM,
USB_DEVICE_ID_ELECOM_BM084),
HID_BATTERY_QUIRK_IGNORE },
@@ -988,6 +991,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
return;
case 0x3c: /* Invert */
+ device->quirks &= ~HID_QUIRK_NOINVERT;
map_key_clear(BTN_TOOL_RUBBER);
break;
@@ -1013,9 +1017,13 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case 0x45: /* ERASER */
/*
* This event is reported when eraser tip touches the surface.
- * Actual eraser (BTN_TOOL_RUBBER) is set by Invert usage when
- * tool gets in proximity.
+ * Actual eraser (BTN_TOOL_RUBBER) is set and released either
+ * by Invert if tool reports proximity or by Eraser directly.
*/
+ if (!test_bit(BTN_TOOL_RUBBER, input->keybit)) {
+ device->quirks |= HID_QUIRK_NOINVERT;
+ set_bit(BTN_TOOL_RUBBER, input->keybit);
+ }
map_key_clear(BTN_TOUCH);
break;
@@ -1580,6 +1588,15 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
else if (report->tool != BTN_TOOL_RUBBER)
/* value is off, tool is not rubber, ignore */
return;
+ else if (*quirks & HID_QUIRK_NOINVERT &&
+ !test_bit(BTN_TOUCH, input->key)) {
+ /*
+ * There is no invert to release the tool, let hid_input
+ * send BTN_TOUCH with scancode and release the tool after.
+ */
+ hid_report_release_tool(report, input, BTN_TOOL_RUBBER);
+ return;
+ }
/* let hid-input set BTN_TOUCH */
break;
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 62180414efcc..8afe3be683ba 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -1285,6 +1285,9 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
* 50 msec should gives enough time to the receiver to be ready.
*/
msleep(50);
+
+ if (retval)
+ return retval;
}
/*
@@ -1306,7 +1309,7 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
buf[5] = 0x09;
buf[6] = 0x00;
- hid_hw_raw_request(hdev, REPORT_ID_HIDPP_SHORT, buf,
+ retval = hid_hw_raw_request(hdev, REPORT_ID_HIDPP_SHORT, buf,
HIDPP_REPORT_SHORT_LENGTH, HID_OUTPUT_REPORT,
HID_REQ_SET_REPORT);
@@ -1692,11 +1695,12 @@ static int logi_dj_raw_event(struct hid_device *hdev,
}
/*
* Mouse-only receivers send unnumbered mouse data. The 27 MHz
- * receiver uses 6 byte packets, the nano receiver 8 bytes.
+ * receiver uses 6 byte packets, the nano receiver 8 bytes,
+ * the lightspeed receiver (Pro X Superlight) 13 bytes.
*/
if (djrcv_dev->unnumbered_application == HID_GD_MOUSE &&
- size <= 8) {
- u8 mouse_report[9];
+ size <= 13){
+ u8 mouse_report[14];
/* Prepend report id */
mouse_report[0] = REPORT_TYPE_MOUSE;
@@ -1980,6 +1984,10 @@ static const struct hid_device_id logi_dj_receivers[] = {
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_1),
.driver_data = recvr_type_gaming_hidpp},
+ { /* Logitech lightspeed receiver (0xc547) */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_2),
+ .driver_data = recvr_type_gaming_hidpp},
{ /* Logitech 27 MHz HID++ 1.0 receiver (0xc513) */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER),
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 129b01be488d..05f5b5f588a2 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -228,7 +228,7 @@ struct hidpp_device {
#define HIDPP20_ERROR_INVALID_ARGS 0x02
#define HIDPP20_ERROR_OUT_OF_RANGE 0x03
#define HIDPP20_ERROR_HW_ERROR 0x04
-#define HIDPP20_ERROR_LOGITECH_INTERNAL 0x05
+#define HIDPP20_ERROR_NOT_ALLOWED 0x05
#define HIDPP20_ERROR_INVALID_FEATURE_INDEX 0x06
#define HIDPP20_ERROR_INVALID_FUNCTION_ID 0x07
#define HIDPP20_ERROR_BUSY 0x08
@@ -275,21 +275,22 @@ static int __hidpp_send_report(struct hid_device *hdev,
}
/*
- * hidpp_send_message_sync() returns 0 in case of success, and something else
- * in case of a failure.
- * - If ' something else' is positive, that means that an error has been raised
- * by the protocol itself.
- * - If ' something else' is negative, that means that we had a classic error
- * (-ENOMEM, -EPIPE, etc...)
+ * Effectively send the message to the device, waiting for its answer.
+ *
+ * Must be called with hidpp->send_mutex locked
+ *
+ * Same return protocol than hidpp_send_message_sync():
+ * - success on 0
+ * - negative error means transport error
+ * - positive value means protocol error
*/
-static int hidpp_send_message_sync(struct hidpp_device *hidpp,
+static int __do_hidpp_send_message_sync(struct hidpp_device *hidpp,
struct hidpp_report *message,
struct hidpp_report *response)
{
- int ret = -1;
- int max_retries = 3;
+ int ret;
- mutex_lock(&hidpp->send_mutex);
+ __must_hold(&hidpp->send_mutex);
hidpp->send_receive_buf = response;
hidpp->answer_available = false;
@@ -300,47 +301,74 @@ static int hidpp_send_message_sync(struct hidpp_device *hidpp,
*/
*response = *message;
- for (; max_retries != 0 && ret; max_retries--) {
- ret = __hidpp_send_report(hidpp->hid_dev, message);
+ ret = __hidpp_send_report(hidpp->hid_dev, message);
+ if (ret) {
+ dbg_hid("__hidpp_send_report returned err: %d\n", ret);
+ memset(response, 0, sizeof(struct hidpp_report));
+ return ret;
+ }
- if (ret) {
- dbg_hid("__hidpp_send_report returned err: %d\n", ret);
- memset(response, 0, sizeof(struct hidpp_report));
- break;
- }
+ if (!wait_event_timeout(hidpp->wait, hidpp->answer_available,
+ 5*HZ)) {
+ dbg_hid("%s:timeout waiting for response\n", __func__);
+ memset(response, 0, sizeof(struct hidpp_report));
+ return -ETIMEDOUT;
+ }
- if (!wait_event_timeout(hidpp->wait, hidpp->answer_available,
- 5*HZ)) {
- dbg_hid("%s:timeout waiting for response\n", __func__);
- memset(response, 0, sizeof(struct hidpp_report));
- ret = -ETIMEDOUT;
- break;
- }
+ if (response->report_id == REPORT_ID_HIDPP_SHORT &&
+ response->rap.sub_id == HIDPP_ERROR) {
+ ret = response->rap.params[1];
+ dbg_hid("%s:got hidpp error %02X\n", __func__, ret);
+ return ret;
+ }
- if (response->report_id == REPORT_ID_HIDPP_SHORT &&
- response->rap.sub_id == HIDPP_ERROR) {
- ret = response->rap.params[1];
- dbg_hid("%s:got hidpp error %02X\n", __func__, ret);
+ if ((response->report_id == REPORT_ID_HIDPP_LONG ||
+ response->report_id == REPORT_ID_HIDPP_VERY_LONG) &&
+ response->fap.feature_index == HIDPP20_ERROR) {
+ ret = response->fap.params[1];
+ dbg_hid("%s:got hidpp 2.0 error %02X\n", __func__, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * hidpp_send_message_sync() returns 0 in case of success, and something else
+ * in case of a failure.
+ *
+ * See __do_hidpp_send_message_sync() for a detailed explanation of the returned
+ * value.
+ */
+static int hidpp_send_message_sync(struct hidpp_device *hidpp,
+ struct hidpp_report *message,
+ struct hidpp_report *response)
+{
+ int ret;
+ int max_retries = 3;
+
+ mutex_lock(&hidpp->send_mutex);
+
+ do {
+ ret = __do_hidpp_send_message_sync(hidpp, message, response);
+ if (ret != HIDPP20_ERROR_BUSY)
break;
- }
- if ((response->report_id == REPORT_ID_HIDPP_LONG ||
- response->report_id == REPORT_ID_HIDPP_VERY_LONG) &&
- response->fap.feature_index == HIDPP20_ERROR) {
- ret = response->fap.params[1];
- if (ret != HIDPP20_ERROR_BUSY) {
- dbg_hid("%s:got hidpp 2.0 error %02X\n", __func__, ret);
- break;
- }
- dbg_hid("%s:got busy hidpp 2.0 error %02X, retrying\n", __func__, ret);
- }
- }
+ dbg_hid("%s:got busy hidpp 2.0 error %02X, retrying\n", __func__, ret);
+ } while (--max_retries);
mutex_unlock(&hidpp->send_mutex);
return ret;
}
+/*
+ * hidpp_send_fap_command_sync() returns 0 in case of success, and something else
+ * in case of a failure.
+ *
+ * See __do_hidpp_send_message_sync() for a detailed explanation of the returned
+ * value.
+ */
static int hidpp_send_fap_command_sync(struct hidpp_device *hidpp,
u8 feat_index, u8 funcindex_clientid, u8 *params, int param_count,
struct hidpp_report *response)
@@ -373,6 +401,13 @@ static int hidpp_send_fap_command_sync(struct hidpp_device *hidpp,
return ret;
}
+/*
+ * hidpp_send_rap_command_sync() returns 0 in case of success, and something else
+ * in case of a failure.
+ *
+ * See __do_hidpp_send_message_sync() for a detailed explanation of the returned
+ * value.
+ */
static int hidpp_send_rap_command_sync(struct hidpp_device *hidpp_dev,
u8 report_id, u8 sub_id, u8 reg_address, u8 *params, int param_count,
struct hidpp_report *response)
@@ -4620,6 +4655,8 @@ static const struct hid_device_id hidpp_devices[] = {
.driver_data = HIDPP_QUIRK_CLASS_G920 | HIDPP_QUIRK_FORCE_OUTPUT_REPORTS },
{ /* Logitech G Pro Gaming Mouse over USB */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC088) },
+ { /* Logitech G Pro X Superlight Gaming Mouse over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC094) },
{ /* G935 Gaming Headset */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0x0a87),
@@ -4647,6 +4684,8 @@ static const struct hid_device_id hidpp_devices[] = {
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb02a) },
{ /* MX Master 3 mouse over Bluetooth */
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb023) },
+ { /* MX Anywhere 3 mouse over Bluetooth */
+ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb025) },
{ /* MX Master 3S mouse over Bluetooth */
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb034) },
{}
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index e31be0cb8b85..521b2ffb4244 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1594,7 +1594,6 @@ static void mt_post_parse(struct mt_device *td, struct mt_application *app)
static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
{
struct mt_device *td = hid_get_drvdata(hdev);
- char *name;
const char *suffix = NULL;
struct mt_report_data *rdata;
struct mt_application *mt_application = NULL;
@@ -1645,15 +1644,9 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
break;
}
- if (suffix) {
- name = devm_kzalloc(&hi->input->dev,
- strlen(hdev->name) + strlen(suffix) + 2,
- GFP_KERNEL);
- if (name) {
- sprintf(name, "%s %s", hdev->name, suffix);
- hi->input->name = name;
- }
- }
+ if (suffix)
+ hi->input->name = devm_kasprintf(&hdev->dev, GFP_KERNEL,
+ "%s %s", hdev->name, suffix);
return 0;
}
diff --git a/drivers/hid/hid-nvidia-shield.c b/drivers/hid/hid-nvidia-shield.c
index a928ad2be62d..9a3576dbf421 100644
--- a/drivers/hid/hid-nvidia-shield.c
+++ b/drivers/hid/hid-nvidia-shield.c
@@ -6,11 +6,15 @@
*/
#include <linux/hid.h>
+#include <linux/idr.h>
#include <linux/input-event-codes.h>
#include <linux/input.h>
+#include <linux/jiffies.h>
#include <linux/leds.h>
#include <linux/module.h>
+#include <linux/power_supply.h>
#include <linux/spinlock.h>
+#include <linux/timer.h>
#include <linux/workqueue.h>
#include "hid-ids.h"
@@ -30,6 +34,8 @@ enum {
enum {
SHIELD_FW_VERSION_INITIALIZED = 0,
SHIELD_BOARD_INFO_INITIALIZED,
+ SHIELD_BATTERY_STATS_INITIALIZED,
+ SHIELD_CHARGER_STATE_INITIALIZED,
};
enum {
@@ -37,6 +43,7 @@ enum {
THUNDERSTRIKE_BOARD_INFO_UPDATE,
THUNDERSTRIKE_HAPTICS_UPDATE,
THUNDERSTRIKE_LED_UPDATE,
+ THUNDERSTRIKE_POWER_SUPPLY_STATS_UPDATE,
};
enum {
@@ -48,10 +55,46 @@ enum {
enum {
THUNDERSTRIKE_HOSTCMD_ID_FW_VERSION = 1,
THUNDERSTRIKE_HOSTCMD_ID_LED = 6,
+ THUNDERSTRIKE_HOSTCMD_ID_BATTERY,
THUNDERSTRIKE_HOSTCMD_ID_BOARD_INFO = 16,
THUNDERSTRIKE_HOSTCMD_ID_USB_INIT = 53,
THUNDERSTRIKE_HOSTCMD_ID_HAPTICS = 57,
- THUNDERSTRIKE_HOSTCMD_ID_BLUETOOTH_INIT = 58,
+ THUNDERSTRIKE_HOSTCMD_ID_CHARGER,
+};
+
+struct power_supply_dev {
+ struct power_supply *psy;
+ struct power_supply_desc desc;
+};
+
+struct thunderstrike_psy_prop_values {
+ int voltage_min;
+ int voltage_now;
+ int voltage_avg;
+ int voltage_boot;
+ int capacity;
+ int status;
+ int charge_type;
+ int temp;
+};
+
+static const enum power_supply_property thunderstrike_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_CHARGE_TYPE,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN,
+ POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_VOLTAGE_AVG,
+ POWER_SUPPLY_PROP_VOLTAGE_BOOT,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_SCOPE,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TEMP_MIN,
+ POWER_SUPPLY_PROP_TEMP_MAX,
+ POWER_SUPPLY_PROP_TEMP_ALERT_MIN,
+ POWER_SUPPLY_PROP_TEMP_ALERT_MAX,
};
enum thunderstrike_led_state {
@@ -60,6 +103,38 @@ enum thunderstrike_led_state {
} __packed;
static_assert(sizeof(enum thunderstrike_led_state) == 1);
+struct thunderstrike_hostcmd_battery {
+ __le16 voltage_avg;
+ u8 reserved_at_10;
+ __le16 thermistor;
+ __le16 voltage_min;
+ __le16 voltage_boot;
+ __le16 voltage_now;
+ u8 capacity;
+} __packed;
+
+enum thunderstrike_charger_type {
+ THUNDERSTRIKE_CHARGER_TYPE_NONE = 0,
+ THUNDERSTRIKE_CHARGER_TYPE_TRICKLE,
+ THUNDERSTRIKE_CHARGER_TYPE_NORMAL,
+} __packed;
+static_assert(sizeof(enum thunderstrike_charger_type) == 1);
+
+enum thunderstrike_charger_state {
+ THUNDERSTRIKE_CHARGER_STATE_UNKNOWN = 0,
+ THUNDERSTRIKE_CHARGER_STATE_DISABLED,
+ THUNDERSTRIKE_CHARGER_STATE_CHARGING,
+ THUNDERSTRIKE_CHARGER_STATE_FULL,
+ THUNDERSTRIKE_CHARGER_STATE_FAILED = 8,
+} __packed;
+static_assert(sizeof(enum thunderstrike_charger_state) == 1);
+
+struct thunderstrike_hostcmd_charger {
+ u8 connected;
+ enum thunderstrike_charger_type type;
+ enum thunderstrike_charger_state state;
+} __packed;
+
struct thunderstrike_hostcmd_board_info {
__le16 revision;
__le16 serial[7];
@@ -80,6 +155,8 @@ struct thunderstrike_hostcmd_resp_report {
struct thunderstrike_hostcmd_haptics motors;
__le16 fw_version;
enum thunderstrike_led_state led_state;
+ struct thunderstrike_hostcmd_battery battery;
+ struct thunderstrike_hostcmd_charger charger;
u8 payload[30];
} __packed;
} __packed;
@@ -109,6 +186,7 @@ static_assert(sizeof(struct thunderstrike_hostcmd_req_report) ==
/* Common struct for shield accessories. */
struct shield_device {
struct hid_device *hdev;
+ struct power_supply_dev battery_dev;
unsigned long initialized_flags;
const char *codename;
@@ -119,9 +197,17 @@ struct shield_device {
} board_info;
};
+/*
+ * Non-trivial to uniquely identify Thunderstrike controllers at initialization
+ * time. Use an ID allocator to help with this.
+ */
+static DEFINE_IDA(thunderstrike_ida);
+
struct thunderstrike {
struct shield_device base;
+ int id;
+
/* Sub-devices */
struct input_dev *haptics_dev;
struct led_classdev led_dev;
@@ -133,6 +219,9 @@ struct thunderstrike {
spinlock_t haptics_update_lock;
u8 led_state : 1;
enum thunderstrike_led_state led_value;
+ struct thunderstrike_psy_prop_values psy_stats;
+ spinlock_t psy_stats_lock;
+ struct timer_list psy_stats_timer;
struct work_struct hostcmd_req_work;
};
@@ -164,7 +253,7 @@ static struct input_dev *shield_allocate_input_dev(struct hid_device *hdev,
idev->id.product = hdev->product;
idev->id.version = hdev->version;
idev->uniq = hdev->uniq;
- idev->name = devm_kasprintf(&idev->dev, GFP_KERNEL, "%s %s", hdev->name,
+ idev->name = devm_kasprintf(&hdev->dev, GFP_KERNEL, "%s %s", hdev->name,
name_suffix);
if (!idev->name)
goto err_name;
@@ -247,6 +336,16 @@ static void thunderstrike_hostcmd_req_work_handler(struct work_struct *work)
thunderstrike_send_hostcmd_request(ts);
}
+ if (test_and_clear_bit(THUNDERSTRIKE_POWER_SUPPLY_STATS_UPDATE, &ts->update_flags)) {
+ thunderstrike_hostcmd_req_report_init(
+ report, THUNDERSTRIKE_HOSTCMD_ID_BATTERY);
+ thunderstrike_send_hostcmd_request(ts);
+
+ thunderstrike_hostcmd_req_report_init(
+ report, THUNDERSTRIKE_HOSTCMD_ID_CHARGER);
+ thunderstrike_send_hostcmd_request(ts);
+ }
+
if (test_and_clear_bit(THUNDERSTRIKE_BOARD_INFO_UPDATE, &ts->update_flags)) {
thunderstrike_hostcmd_req_report_init(
report, THUNDERSTRIKE_HOSTCMD_ID_BOARD_INFO);
@@ -352,6 +451,93 @@ static void thunderstrike_led_set_brightness(struct led_classdev *led,
schedule_work(&ts->hostcmd_req_work);
}
+static int thunderstrike_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct shield_device *shield_dev = power_supply_get_drvdata(psy);
+ struct thunderstrike_psy_prop_values prop_values;
+ struct thunderstrike *ts;
+ int ret = 0;
+
+ ts = container_of(shield_dev, struct thunderstrike, base);
+ spin_lock(&ts->psy_stats_lock);
+ prop_values = ts->psy_stats;
+ spin_unlock(&ts->psy_stats_lock);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = prop_values.status;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_TYPE:
+ val->intval = prop_values.charge_type;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN:
+ val->intval = prop_values.voltage_min;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
+ val->intval = 2900000; /* 2.9 V */
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+ val->intval = 2200000; /* 2.2 V */
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ val->intval = prop_values.voltage_now;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+ val->intval = prop_values.voltage_avg;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_BOOT:
+ val->intval = prop_values.voltage_boot;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = prop_values.capacity;
+ break;
+ case POWER_SUPPLY_PROP_SCOPE:
+ val->intval = POWER_SUPPLY_SCOPE_DEVICE;
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval = prop_values.temp;
+ break;
+ case POWER_SUPPLY_PROP_TEMP_MIN:
+ val->intval = 0; /* 0 C */
+ break;
+ case POWER_SUPPLY_PROP_TEMP_MAX:
+ val->intval = 400; /* 40 C */
+ break;
+ case POWER_SUPPLY_PROP_TEMP_ALERT_MIN:
+ val->intval = 15; /* 1.5 C */
+ break;
+ case POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
+ val->intval = 380; /* 38 C */
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static inline void thunderstrike_request_psy_stats(struct thunderstrike *ts)
+{
+ set_bit(THUNDERSTRIKE_POWER_SUPPLY_STATS_UPDATE, &ts->update_flags);
+ schedule_work(&ts->hostcmd_req_work);
+}
+
+static void thunderstrike_psy_stats_timer_handler(struct timer_list *timer)
+{
+ struct thunderstrike *ts =
+ container_of(timer, struct thunderstrike, psy_stats_timer);
+
+ thunderstrike_request_psy_stats(ts);
+ /* Query battery statistics from device every five minutes */
+ mod_timer(timer, jiffies + 300 * HZ);
+}
+
static void
thunderstrike_parse_fw_version_payload(struct shield_device *shield_dev,
__le16 fw_version)
@@ -416,13 +602,138 @@ thunderstrike_parse_led_payload(struct shield_device *shield_dev,
hid_dbg(shield_dev->hdev, "Thunderstrike led HOSTCMD response, 0x%02X\n", led_state);
}
+static void thunderstrike_parse_battery_payload(
+ struct shield_device *shield_dev,
+ struct thunderstrike_hostcmd_battery *battery)
+{
+ struct thunderstrike *ts = container_of(shield_dev, struct thunderstrike, base);
+ u16 hostcmd_voltage_boot = le16_to_cpu(battery->voltage_boot);
+ u16 hostcmd_voltage_avg = le16_to_cpu(battery->voltage_avg);
+ u16 hostcmd_voltage_min = le16_to_cpu(battery->voltage_min);
+ u16 hostcmd_voltage_now = le16_to_cpu(battery->voltage_now);
+ u16 hostcmd_thermistor = le16_to_cpu(battery->thermistor);
+ int voltage_boot, voltage_avg, voltage_min, voltage_now;
+ struct hid_device *hdev = shield_dev->hdev;
+ u8 capacity = battery->capacity;
+ int temp;
+
+ /* Convert thunderstrike device values to µV and tenths of degree Celsius */
+ voltage_boot = hostcmd_voltage_boot * 1000;
+ voltage_avg = hostcmd_voltage_avg * 1000;
+ voltage_min = hostcmd_voltage_min * 1000;
+ voltage_now = hostcmd_voltage_now * 1000;
+ temp = (1378 - (int)hostcmd_thermistor) * 10 / 19;
+
+ /* Copy converted values */
+ spin_lock(&ts->psy_stats_lock);
+ ts->psy_stats.voltage_boot = voltage_boot;
+ ts->psy_stats.voltage_avg = voltage_avg;
+ ts->psy_stats.voltage_min = voltage_min;
+ ts->psy_stats.voltage_now = voltage_now;
+ ts->psy_stats.capacity = capacity;
+ ts->psy_stats.temp = temp;
+ spin_unlock(&ts->psy_stats_lock);
+
+ set_bit(SHIELD_BATTERY_STATS_INITIALIZED, &shield_dev->initialized_flags);
+
+ hid_dbg(hdev,
+ "Thunderstrike battery HOSTCMD response, voltage_avg: %u voltage_now: %u\n",
+ hostcmd_voltage_avg, hostcmd_voltage_now);
+ hid_dbg(hdev,
+ "Thunderstrike battery HOSTCMD response, voltage_boot: %u voltage_min: %u\n",
+ hostcmd_voltage_boot, hostcmd_voltage_min);
+ hid_dbg(hdev,
+ "Thunderstrike battery HOSTCMD response, thermistor: %u\n",
+ hostcmd_thermistor);
+ hid_dbg(hdev,
+ "Thunderstrike battery HOSTCMD response, capacity: %u%%\n",
+ capacity);
+}
+
+static void thunderstrike_parse_charger_payload(
+ struct shield_device *shield_dev,
+ struct thunderstrike_hostcmd_charger *charger)
+{
+ struct thunderstrike *ts = container_of(shield_dev, struct thunderstrike, base);
+ int charge_type = POWER_SUPPLY_CHARGE_TYPE_UNKNOWN;
+ struct hid_device *hdev = shield_dev->hdev;
+ int status = POWER_SUPPLY_STATUS_UNKNOWN;
+
+ switch (charger->type) {
+ case THUNDERSTRIKE_CHARGER_TYPE_NONE:
+ charge_type = POWER_SUPPLY_CHARGE_TYPE_NONE;
+ break;
+ case THUNDERSTRIKE_CHARGER_TYPE_TRICKLE:
+ charge_type = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
+ break;
+ case THUNDERSTRIKE_CHARGER_TYPE_NORMAL:
+ charge_type = POWER_SUPPLY_CHARGE_TYPE_STANDARD;
+ break;
+ default:
+ hid_warn(hdev, "Unhandled Thunderstrike charger HOSTCMD type, %u\n",
+ charger->type);
+ break;
+ }
+
+ switch (charger->state) {
+ case THUNDERSTRIKE_CHARGER_STATE_UNKNOWN:
+ status = POWER_SUPPLY_STATUS_UNKNOWN;
+ break;
+ case THUNDERSTRIKE_CHARGER_STATE_DISABLED:
+ /* Indicates charger is disconnected */
+ break;
+ case THUNDERSTRIKE_CHARGER_STATE_CHARGING:
+ status = POWER_SUPPLY_STATUS_CHARGING;
+ break;
+ case THUNDERSTRIKE_CHARGER_STATE_FULL:
+ status = POWER_SUPPLY_STATUS_FULL;
+ break;
+ case THUNDERSTRIKE_CHARGER_STATE_FAILED:
+ status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ hid_err(hdev, "Thunderstrike device failed to charge\n");
+ break;
+ default:
+ hid_warn(hdev, "Unhandled Thunderstrike charger HOSTCMD state, %u\n",
+ charger->state);
+ break;
+ }
+
+ if (!charger->connected)
+ status = POWER_SUPPLY_STATUS_DISCHARGING;
+
+ spin_lock(&ts->psy_stats_lock);
+ ts->psy_stats.charge_type = charge_type;
+ ts->psy_stats.status = status;
+ spin_unlock(&ts->psy_stats_lock);
+
+ set_bit(SHIELD_CHARGER_STATE_INITIALIZED, &shield_dev->initialized_flags);
+
+ hid_dbg(hdev,
+ "Thunderstrike charger HOSTCMD response, connected: %u, type: %u, state: %u\n",
+ charger->connected, charger->type, charger->state);
+}
+
+static inline void thunderstrike_device_init_info(struct shield_device *shield_dev)
+{
+ struct thunderstrike *ts =
+ container_of(shield_dev, struct thunderstrike, base);
+
+ if (!test_bit(SHIELD_FW_VERSION_INITIALIZED, &shield_dev->initialized_flags))
+ thunderstrike_request_firmware_version(ts);
+
+ if (!test_bit(SHIELD_BOARD_INFO_INITIALIZED, &shield_dev->initialized_flags))
+ thunderstrike_request_board_info(ts);
+
+ if (!test_bit(SHIELD_BATTERY_STATS_INITIALIZED, &shield_dev->initialized_flags) ||
+ !test_bit(SHIELD_CHARGER_STATE_INITIALIZED, &shield_dev->initialized_flags))
+ thunderstrike_psy_stats_timer_handler(&ts->psy_stats_timer);
+}
+
static int thunderstrike_parse_report(struct shield_device *shield_dev,
struct hid_report *report, u8 *data,
int size)
{
struct thunderstrike_hostcmd_resp_report *hostcmd_resp_report;
- struct thunderstrike *ts =
- container_of(shield_dev, struct thunderstrike, base);
struct hid_device *hdev = shield_dev->hdev;
switch (report->id) {
@@ -445,6 +756,10 @@ static int thunderstrike_parse_report(struct shield_device *shield_dev,
case THUNDERSTRIKE_HOSTCMD_ID_LED:
thunderstrike_parse_led_payload(shield_dev, hostcmd_resp_report->led_state);
break;
+ case THUNDERSTRIKE_HOSTCMD_ID_BATTERY:
+ thunderstrike_parse_battery_payload(shield_dev,
+ &hostcmd_resp_report->battery);
+ break;
case THUNDERSTRIKE_HOSTCMD_ID_BOARD_INFO:
thunderstrike_parse_board_info_payload(
shield_dev, &hostcmd_resp_report->board_info);
@@ -453,14 +768,17 @@ static int thunderstrike_parse_report(struct shield_device *shield_dev,
thunderstrike_parse_haptics_payload(
shield_dev, &hostcmd_resp_report->motors);
break;
-
case THUNDERSTRIKE_HOSTCMD_ID_USB_INIT:
- case THUNDERSTRIKE_HOSTCMD_ID_BLUETOOTH_INIT:
/* May block HOSTCMD requests till received initially */
- thunderstrike_request_firmware_version(ts);
- thunderstrike_request_board_info(ts);
- /* Only HOSTCMD that can be triggered without a request */
- return 0;
+ thunderstrike_device_init_info(shield_dev);
+ break;
+ case THUNDERSTRIKE_HOSTCMD_ID_CHARGER:
+ /* May block HOSTCMD requests till received initially */
+ thunderstrike_device_init_info(shield_dev);
+
+ thunderstrike_parse_charger_payload(
+ shield_dev, &hostcmd_resp_report->charger);
+ break;
default:
hid_warn(hdev,
"Unhandled Thunderstrike HOSTCMD id %d\n",
@@ -480,7 +798,8 @@ static inline int thunderstrike_led_create(struct thunderstrike *ts)
{
struct led_classdev *led = &ts->led_dev;
- led->name = "thunderstrike:blue:led";
+ led->name = devm_kasprintf(&ts->base.hdev->dev, GFP_KERNEL,
+ "thunderstrike%d:blue:led", ts->id);
led->max_brightness = 1;
led->flags = LED_CORE_SUSPENDRESUME;
led->brightness_get = &thunderstrike_led_get_brightness;
@@ -489,6 +808,50 @@ static inline int thunderstrike_led_create(struct thunderstrike *ts)
return led_classdev_register(&ts->base.hdev->dev, led);
}
+static inline int thunderstrike_psy_create(struct shield_device *shield_dev)
+{
+ struct thunderstrike *ts = container_of(shield_dev, struct thunderstrike, base);
+ struct power_supply_config psy_cfg = { .drv_data = shield_dev, };
+ struct hid_device *hdev = shield_dev->hdev;
+ int ret;
+
+ /*
+ * Set an initial capacity and temperature value to avoid prematurely
+ * triggering alerts. Will be replaced by values queried from initial
+ * HOSTCMD requests.
+ */
+ ts->psy_stats.capacity = 100;
+ ts->psy_stats.temp = 182;
+
+ shield_dev->battery_dev.desc.properties = thunderstrike_battery_props;
+ shield_dev->battery_dev.desc.num_properties =
+ ARRAY_SIZE(thunderstrike_battery_props);
+ shield_dev->battery_dev.desc.get_property = thunderstrike_battery_get_property;
+ shield_dev->battery_dev.desc.type = POWER_SUPPLY_TYPE_BATTERY;
+ shield_dev->battery_dev.desc.name =
+ devm_kasprintf(&ts->base.hdev->dev, GFP_KERNEL,
+ "thunderstrike_%d", ts->id);
+
+ shield_dev->battery_dev.psy = power_supply_register(
+ &hdev->dev, &shield_dev->battery_dev.desc, &psy_cfg);
+ if (IS_ERR(shield_dev->battery_dev.psy)) {
+ hid_err(hdev, "Failed to register Thunderstrike battery device\n");
+ return PTR_ERR(shield_dev->battery_dev.psy);
+ }
+
+ ret = power_supply_powers(shield_dev->battery_dev.psy, &hdev->dev);
+ if (ret) {
+ hid_err(hdev, "Failed to associate battery device to Thunderstrike\n");
+ goto err;
+ }
+
+ return 0;
+
+err:
+ power_supply_unregister(shield_dev->battery_dev.psy);
+ return ret;
+}
+
static struct shield_device *thunderstrike_create(struct hid_device *hdev)
{
struct shield_device *shield_dev;
@@ -509,26 +872,47 @@ static struct shield_device *thunderstrike_create(struct hid_device *hdev)
shield_dev->codename = "Thunderstrike";
spin_lock_init(&ts->haptics_update_lock);
+ spin_lock_init(&ts->psy_stats_lock);
INIT_WORK(&ts->hostcmd_req_work, thunderstrike_hostcmd_req_work_handler);
hid_set_drvdata(hdev, shield_dev);
+ ts->id = ida_alloc(&thunderstrike_ida, GFP_KERNEL);
+ if (ts->id < 0)
+ return ERR_PTR(ts->id);
+
+ ts->haptics_dev = shield_haptics_create(shield_dev, thunderstrike_play_effect);
+ if (IS_ERR(ts->haptics_dev)) {
+ hid_err(hdev, "Failed to create Thunderstrike haptics instance\n");
+ ret = PTR_ERR(ts->haptics_dev);
+ goto err_id;
+ }
+
+ ret = thunderstrike_psy_create(shield_dev);
+ if (ret) {
+ hid_err(hdev, "Failed to create Thunderstrike power supply instance\n");
+ goto err_haptics;
+ }
+
ret = thunderstrike_led_create(ts);
if (ret) {
hid_err(hdev, "Failed to create Thunderstrike LED instance\n");
- return ERR_PTR(ret);
+ goto err_psy;
}
- ts->haptics_dev = shield_haptics_create(shield_dev, thunderstrike_play_effect);
- if (IS_ERR(ts->haptics_dev))
- goto err;
+ timer_setup(&ts->psy_stats_timer, thunderstrike_psy_stats_timer_handler, 0);
hid_info(hdev, "Registered Thunderstrike controller\n");
return shield_dev;
-err:
- led_classdev_unregister(&ts->led_dev);
- return ERR_CAST(ts->haptics_dev);
+err_psy:
+ power_supply_unregister(shield_dev->battery_dev.psy);
+err_haptics:
+ if (ts->haptics_dev)
+ input_unregister_device(ts->haptics_dev);
+err_id:
+ ida_free(&thunderstrike_ida, ts->id);
+ return ERR_PTR(ret);
}
static int android_input_mapping(struct hid_device *hdev, struct hid_input *hi,
@@ -683,8 +1067,7 @@ static int shield_probe(struct hid_device *hdev, const struct hid_device_id *id)
goto err_stop;
}
- thunderstrike_request_firmware_version(ts);
- thunderstrike_request_board_info(ts);
+ thunderstrike_device_init_info(shield_dev);
return ret;
@@ -704,9 +1087,12 @@ static void shield_remove(struct hid_device *hdev)
ts = container_of(dev, struct thunderstrike, base);
hid_hw_close(hdev);
- led_classdev_unregister(&ts->led_dev);
+ power_supply_unregister(dev->battery_dev.psy);
if (ts->haptics_dev)
input_unregister_device(ts->haptics_dev);
+ led_classdev_unregister(&ts->led_dev);
+ ida_free(&thunderstrike_ida, ts->id);
+ del_timer_sync(&ts->psy_stats_timer);
cancel_work_sync(&ts->hostcmd_req_work);
hid_hw_stop(hdev);
}
diff --git a/drivers/hid/hid-roccat-arvo.c b/drivers/hid/hid-roccat-arvo.c
index ea6b79b3aeeb..d55aaabab1ed 100644
--- a/drivers/hid/hid-roccat-arvo.c
+++ b/drivers/hid/hid-roccat-arvo.c
@@ -23,8 +23,6 @@
#include "hid-roccat-common.h"
#include "hid-roccat-arvo.h"
-static struct class *arvo_class;
-
static ssize_t arvo_sysfs_show_mode_key(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -268,6 +266,11 @@ static const struct attribute_group *arvo_groups[] = {
NULL,
};
+static const struct class arvo_class = {
+ .name = "arvo",
+ .dev_groups = arvo_groups,
+};
+
static int arvo_init_arvo_device_struct(struct usb_device *usb_dev,
struct arvo_device *arvo)
{
@@ -309,7 +312,7 @@ static int arvo_init_specials(struct hid_device *hdev)
goto exit_free;
}
- retval = roccat_connect(arvo_class, hdev,
+ retval = roccat_connect(&arvo_class, hdev,
sizeof(struct arvo_roccat_report));
if (retval < 0) {
hid_err(hdev, "couldn't init char dev\n");
@@ -433,21 +436,20 @@ static int __init arvo_init(void)
{
int retval;
- arvo_class = class_create("arvo");
- if (IS_ERR(arvo_class))
- return PTR_ERR(arvo_class);
- arvo_class->dev_groups = arvo_groups;
+ retval = class_register(&arvo_class);
+ if (retval)
+ return retval;
retval = hid_register_driver(&arvo_driver);
if (retval)
- class_destroy(arvo_class);
+ class_unregister(&arvo_class);
return retval;
}
static void __exit arvo_exit(void)
{
hid_unregister_driver(&arvo_driver);
- class_destroy(arvo_class);
+ class_unregister(&arvo_class);
}
module_init(arvo_init);
diff --git a/drivers/hid/hid-roccat-isku.c b/drivers/hid/hid-roccat-isku.c
index 3903a2cea00c..458060403397 100644
--- a/drivers/hid/hid-roccat-isku.c
+++ b/drivers/hid/hid-roccat-isku.c
@@ -23,8 +23,6 @@
#include "hid-roccat-common.h"
#include "hid-roccat-isku.h"
-static struct class *isku_class;
-
static void isku_profile_activated(struct isku_device *isku, uint new_profile)
{
isku->actual_profile = new_profile;
@@ -248,6 +246,11 @@ static const struct attribute_group *isku_groups[] = {
NULL,
};
+static const struct class isku_class = {
+ .name = "isku",
+ .dev_groups = isku_groups,
+};
+
static int isku_init_isku_device_struct(struct usb_device *usb_dev,
struct isku_device *isku)
{
@@ -289,7 +292,7 @@ static int isku_init_specials(struct hid_device *hdev)
goto exit_free;
}
- retval = roccat_connect(isku_class, hdev,
+ retval = roccat_connect(&isku_class, hdev,
sizeof(struct isku_roccat_report));
if (retval < 0) {
hid_err(hdev, "couldn't init char dev\n");
@@ -435,21 +438,21 @@ static struct hid_driver isku_driver = {
static int __init isku_init(void)
{
int retval;
- isku_class = class_create("isku");
- if (IS_ERR(isku_class))
- return PTR_ERR(isku_class);
- isku_class->dev_groups = isku_groups;
+
+ retval = class_register(&isku_class);
+ if (retval)
+ return retval;
retval = hid_register_driver(&isku_driver);
if (retval)
- class_destroy(isku_class);
+ class_unregister(&isku_class);
return retval;
}
static void __exit isku_exit(void)
{
hid_unregister_driver(&isku_driver);
- class_destroy(isku_class);
+ class_unregister(&isku_class);
}
module_init(isku_init);
diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
index 945ae236fb45..00a1abc7e839 100644
--- a/drivers/hid/hid-roccat-kone.c
+++ b/drivers/hid/hid-roccat-kone.c
@@ -89,9 +89,6 @@ static int kone_send(struct usb_device *usb_dev, uint usb_command,
return ((len < 0) ? len : ((len != size) ? -EIO : 0));
}
-/* kone_class is used for creating sysfs attributes via roccat char device */
-static struct class *kone_class;
-
static void kone_set_settings_checksum(struct kone_settings *settings)
{
uint16_t checksum = 0;
@@ -657,6 +654,12 @@ static const struct attribute_group *kone_groups[] = {
NULL,
};
+/* kone_class is used for creating sysfs attributes via roccat char device */
+static const struct class kone_class = {
+ .name = "kone",
+ .dev_groups = kone_groups,
+};
+
static int kone_init_kone_device_struct(struct usb_device *usb_dev,
struct kone_device *kone)
{
@@ -712,8 +715,8 @@ static int kone_init_specials(struct hid_device *hdev)
goto exit_free;
}
- retval = roccat_connect(kone_class, hdev,
- sizeof(struct kone_roccat_report));
+ retval = roccat_connect(&kone_class, hdev,
+ sizeof(struct kone_roccat_report));
if (retval < 0) {
hid_err(hdev, "couldn't init char dev\n");
/* be tolerant about not getting chrdev */
@@ -890,21 +893,20 @@ static int __init kone_init(void)
int retval;
/* class name has to be same as driver name */
- kone_class = class_create("kone");
- if (IS_ERR(kone_class))
- return PTR_ERR(kone_class);
- kone_class->dev_groups = kone_groups;
+ retval = class_register(&kone_class);
+ if (retval)
+ return retval;
retval = hid_register_driver(&kone_driver);
if (retval)
- class_destroy(kone_class);
+ class_unregister(&kone_class);
return retval;
}
static void __exit kone_exit(void)
{
hid_unregister_driver(&kone_driver);
- class_destroy(kone_class);
+ class_unregister(&kone_class);
}
module_init(kone_init);
diff --git a/drivers/hid/hid-roccat-koneplus.c b/drivers/hid/hid-roccat-koneplus.c
index 97b83b6f53dd..22b895436a7c 100644
--- a/drivers/hid/hid-roccat-koneplus.c
+++ b/drivers/hid/hid-roccat-koneplus.c
@@ -26,8 +26,6 @@
static uint profile_numbers[5] = {0, 1, 2, 3, 4};
-static struct class *koneplus_class;
-
static void koneplus_profile_activated(struct koneplus_device *koneplus,
uint new_profile)
{
@@ -356,6 +354,11 @@ static const struct attribute_group *koneplus_groups[] = {
NULL,
};
+static const struct class koneplus_class = {
+ .name = "koneplus",
+ .dev_groups = koneplus_groups,
+};
+
static int koneplus_init_koneplus_device_struct(struct usb_device *usb_dev,
struct koneplus_device *koneplus)
{
@@ -394,8 +397,8 @@ static int koneplus_init_specials(struct hid_device *hdev)
goto exit_free;
}
- retval = roccat_connect(koneplus_class, hdev,
- sizeof(struct koneplus_roccat_report));
+ retval = roccat_connect(&koneplus_class, hdev,
+ sizeof(struct koneplus_roccat_report));
if (retval < 0) {
hid_err(hdev, "couldn't init char dev\n");
} else {
@@ -549,21 +552,20 @@ static int __init koneplus_init(void)
int retval;
/* class name has to be same as driver name */
- koneplus_class = class_create("koneplus");
- if (IS_ERR(koneplus_class))
- return PTR_ERR(koneplus_class);
- koneplus_class->dev_groups = koneplus_groups;
+ retval = class_register(&koneplus_class);
+ if (retval)
+ return retval;
retval = hid_register_driver(&koneplus_driver);
if (retval)
- class_destroy(koneplus_class);
+ class_unregister(&koneplus_class);
return retval;
}
static void __exit koneplus_exit(void)
{
hid_unregister_driver(&koneplus_driver);
- class_destroy(koneplus_class);
+ class_unregister(&koneplus_class);
}
module_init(koneplus_init);
diff --git a/drivers/hid/hid-roccat-konepure.c b/drivers/hid/hid-roccat-konepure.c
index a297756f2410..beca8aef8bbb 100644
--- a/drivers/hid/hid-roccat-konepure.c
+++ b/drivers/hid/hid-roccat-konepure.c
@@ -36,8 +36,6 @@ struct konepure_mouse_report_button {
uint8_t unknown[2];
} __packed;
-static struct class *konepure_class;
-
ROCCAT_COMMON2_BIN_ATTRIBUTE_W(control, 0x04, 0x03);
ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(actual_profile, 0x05, 0x03);
ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(profile_settings, 0x06, 0x1f);
@@ -72,6 +70,11 @@ static const struct attribute_group *konepure_groups[] = {
NULL,
};
+static const struct class konepure_class = {
+ .name = "konepure",
+ .dev_groups = konepure_groups,
+};
+
static int konepure_init_specials(struct hid_device *hdev)
{
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
@@ -98,8 +101,8 @@ static int konepure_init_specials(struct hid_device *hdev)
goto exit_free;
}
- retval = roccat_connect(konepure_class, hdev,
- sizeof(struct konepure_mouse_report_button));
+ retval = roccat_connect(&konepure_class, hdev,
+ sizeof(struct konepure_mouse_report_button));
if (retval < 0) {
hid_err(hdev, "couldn't init char dev\n");
} else {
@@ -207,21 +210,20 @@ static int __init konepure_init(void)
{
int retval;
- konepure_class = class_create("konepure");
- if (IS_ERR(konepure_class))
- return PTR_ERR(konepure_class);
- konepure_class->dev_groups = konepure_groups;
+ retval = class_register(&konepure_class);
+ if (retval)
+ return retval;
retval = hid_register_driver(&konepure_driver);
if (retval)
- class_destroy(konepure_class);
+ class_unregister(&konepure_class);
return retval;
}
static void __exit konepure_exit(void)
{
hid_unregister_driver(&konepure_driver);
- class_destroy(konepure_class);
+ class_unregister(&konepure_class);
}
module_init(konepure_init);
diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c
index 1a1d96e11683..86af538c10d6 100644
--- a/drivers/hid/hid-roccat-kovaplus.c
+++ b/drivers/hid/hid-roccat-kovaplus.c
@@ -24,8 +24,6 @@
static uint profile_numbers[5] = {0, 1, 2, 3, 4};
-static struct class *kovaplus_class;
-
static uint kovaplus_convert_event_cpi(uint value)
{
return (value == 7 ? 4 : (value == 4 ? 3 : value));
@@ -409,6 +407,11 @@ static const struct attribute_group *kovaplus_groups[] = {
NULL,
};
+static const struct class kovaplus_class = {
+ .name = "kovaplus",
+ .dev_groups = kovaplus_groups,
+};
+
static int kovaplus_init_kovaplus_device_struct(struct usb_device *usb_dev,
struct kovaplus_device *kovaplus)
{
@@ -463,8 +466,8 @@ static int kovaplus_init_specials(struct hid_device *hdev)
goto exit_free;
}
- retval = roccat_connect(kovaplus_class, hdev,
- sizeof(struct kovaplus_roccat_report));
+ retval = roccat_connect(&kovaplus_class, hdev,
+ sizeof(struct kovaplus_roccat_report));
if (retval < 0) {
hid_err(hdev, "couldn't init char dev\n");
} else {
@@ -638,21 +641,20 @@ static int __init kovaplus_init(void)
{
int retval;
- kovaplus_class = class_create("kovaplus");
- if (IS_ERR(kovaplus_class))
- return PTR_ERR(kovaplus_class);
- kovaplus_class->dev_groups = kovaplus_groups;
+ retval = class_register(&kovaplus_class);
+ if (retval)
+ return retval;
retval = hid_register_driver(&kovaplus_driver);
if (retval)
- class_destroy(kovaplus_class);
+ class_unregister(&kovaplus_class);
return retval;
}
static void __exit kovaplus_exit(void)
{
hid_unregister_driver(&kovaplus_driver);
- class_destroy(kovaplus_class);
+ class_unregister(&kovaplus_class);
}
module_init(kovaplus_init);
diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c
index 15528c3b013c..5663b9cd9c69 100644
--- a/drivers/hid/hid-roccat-pyra.c
+++ b/drivers/hid/hid-roccat-pyra.c
@@ -26,9 +26,6 @@
static uint profile_numbers[5] = {0, 1, 2, 3, 4};
-/* pyra_class is used for creating sysfs attributes via roccat char device */
-static struct class *pyra_class;
-
static void profile_activated(struct pyra_device *pyra,
unsigned int new_profile)
{
@@ -366,6 +363,12 @@ static const struct attribute_group *pyra_groups[] = {
NULL,
};
+/* pyra_class is used for creating sysfs attributes via roccat char device */
+static const struct class pyra_class = {
+ .name = "pyra",
+ .dev_groups = pyra_groups,
+};
+
static int pyra_init_pyra_device_struct(struct usb_device *usb_dev,
struct pyra_device *pyra)
{
@@ -413,7 +416,7 @@ static int pyra_init_specials(struct hid_device *hdev)
goto exit_free;
}
- retval = roccat_connect(pyra_class, hdev,
+ retval = roccat_connect(&pyra_class, hdev,
sizeof(struct pyra_roccat_report));
if (retval < 0) {
hid_err(hdev, "couldn't init char dev\n");
@@ -585,21 +588,20 @@ static int __init pyra_init(void)
int retval;
/* class name has to be same as driver name */
- pyra_class = class_create("pyra");
- if (IS_ERR(pyra_class))
- return PTR_ERR(pyra_class);
- pyra_class->dev_groups = pyra_groups;
+ retval = class_register(&pyra_class);
+ if (retval)
+ return retval;
retval = hid_register_driver(&pyra_driver);
if (retval)
- class_destroy(pyra_class);
+ class_unregister(&pyra_class);
return retval;
}
static void __exit pyra_exit(void)
{
hid_unregister_driver(&pyra_driver);
- class_destroy(pyra_class);
+ class_unregister(&pyra_class);
}
module_init(pyra_init);
diff --git a/drivers/hid/hid-roccat-ryos.c b/drivers/hid/hid-roccat-ryos.c
index 0eb17a3b925d..57714a4525e2 100644
--- a/drivers/hid/hid-roccat-ryos.c
+++ b/drivers/hid/hid-roccat-ryos.c
@@ -28,8 +28,6 @@ struct ryos_report_special {
uint8_t data[4];
} __packed;
-static struct class *ryos_class;
-
ROCCAT_COMMON2_BIN_ATTRIBUTE_W(control, 0x04, 0x03);
ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(profile, 0x05, 0x03);
ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(keys_primary, 0x06, 0x7d);
@@ -80,6 +78,11 @@ static const struct attribute_group *ryos_groups[] = {
NULL,
};
+static const struct class ryos_class = {
+ .name = "ryos",
+ .dev_groups = ryos_groups,
+};
+
static int ryos_init_specials(struct hid_device *hdev)
{
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
@@ -106,7 +109,7 @@ static int ryos_init_specials(struct hid_device *hdev)
goto exit_free;
}
- retval = roccat_connect(ryos_class, hdev,
+ retval = roccat_connect(&ryos_class, hdev,
sizeof(struct ryos_report_special));
if (retval < 0) {
hid_err(hdev, "couldn't init char dev\n");
@@ -216,21 +219,20 @@ static int __init ryos_init(void)
{
int retval;
- ryos_class = class_create("ryos");
- if (IS_ERR(ryos_class))
- return PTR_ERR(ryos_class);
- ryos_class->dev_groups = ryos_groups;
+ retval = class_register(&ryos_class);
+ if (retval)
+ return retval;
retval = hid_register_driver(&ryos_driver);
if (retval)
- class_destroy(ryos_class);
+ class_unregister(&ryos_class);
return retval;
}
static void __exit ryos_exit(void)
{
hid_unregister_driver(&ryos_driver);
- class_destroy(ryos_class);
+ class_unregister(&ryos_class);
}
module_init(ryos_init);
diff --git a/drivers/hid/hid-roccat-savu.c b/drivers/hid/hid-roccat-savu.c
index 93be7acef673..2baa47a0efc5 100644
--- a/drivers/hid/hid-roccat-savu.c
+++ b/drivers/hid/hid-roccat-savu.c
@@ -22,8 +22,6 @@
#include "hid-roccat-common.h"
#include "hid-roccat-savu.h"
-static struct class *savu_class;
-
ROCCAT_COMMON2_BIN_ATTRIBUTE_W(control, 0x4, 0x03);
ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(profile, 0x5, 0x03);
ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(general, 0x6, 0x10);
@@ -52,6 +50,11 @@ static const struct attribute_group *savu_groups[] = {
NULL,
};
+static const struct class savu_class = {
+ .name = "savu",
+ .dev_groups = savu_groups,
+};
+
static int savu_init_specials(struct hid_device *hdev)
{
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
@@ -78,7 +81,7 @@ static int savu_init_specials(struct hid_device *hdev)
goto exit_free;
}
- retval = roccat_connect(savu_class, hdev,
+ retval = roccat_connect(&savu_class, hdev,
sizeof(struct savu_roccat_report));
if (retval < 0) {
hid_err(hdev, "couldn't init char dev\n");
@@ -204,21 +207,20 @@ static int __init savu_init(void)
{
int retval;
- savu_class = class_create("savu");
- if (IS_ERR(savu_class))
- return PTR_ERR(savu_class);
- savu_class->dev_groups = savu_groups;
+ retval = class_register(&savu_class);
+ if (retval)
+ return retval;
retval = hid_register_driver(&savu_driver);
if (retval)
- class_destroy(savu_class);
+ class_unregister(&savu_class);
return retval;
}
static void __exit savu_exit(void)
{
hid_unregister_driver(&savu_driver);
- class_destroy(savu_class);
+ class_unregister(&savu_class);
}
module_init(savu_init);
diff --git a/drivers/hid/hid-roccat.c b/drivers/hid/hid-roccat.c
index 6da80e442fdd..c7f7562e22e5 100644
--- a/drivers/hid/hid-roccat.c
+++ b/drivers/hid/hid-roccat.c
@@ -295,7 +295,7 @@ EXPORT_SYMBOL_GPL(roccat_report_event);
* Return value is minor device number in Range [0, ROCCAT_MAX_DEVICES] on
* success, a negative error code on failure.
*/
-int roccat_connect(struct class *klass, struct hid_device *hid, int report_size)
+int roccat_connect(const struct class *klass, struct hid_device *hid, int report_size)
{
unsigned int minor;
struct roccat_device *device;
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 83237b86c8ff..2eba152e8b90 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -632,7 +632,7 @@ static int sensor_hub_probe(struct hid_device *hdev,
}
INIT_LIST_HEAD(&hdev->inputs);
- ret = hid_hw_start(hdev, 0);
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret) {
hid_err(hdev, "hw start failed\n");
return ret;
diff --git a/drivers/hid/hid-steelseries.c b/drivers/hid/hid-steelseries.c
index aae3afc4107a..43d2cf7153d7 100644
--- a/drivers/hid/hid-steelseries.c
+++ b/drivers/hid/hid-steelseries.c
@@ -1,8 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * HID driver for Steelseries SRW-S1
+ * HID driver for Steelseries devices
*
* Copyright (c) 2013 Simon Wood
+ * Copyright (c) 2023 Bastien Nocera
*/
/*
@@ -11,10 +12,28 @@
#include <linux/device.h>
#include <linux/hid.h>
#include <linux/module.h>
+#include <linux/usb.h>
#include <linux/leds.h>
#include "hid-ids.h"
+#define STEELSERIES_SRWS1 BIT(0)
+#define STEELSERIES_ARCTIS_1 BIT(1)
+
+struct steelseries_device {
+ struct hid_device *hdev;
+ unsigned long quirks;
+
+ struct delayed_work battery_work;
+ spinlock_t lock;
+ bool removed;
+
+ struct power_supply_desc battery_desc;
+ struct power_supply *battery;
+ uint8_t battery_capacity;
+ bool headset_connected;
+};
+
#if IS_BUILTIN(CONFIG_LEDS_CLASS) || \
(IS_MODULE(CONFIG_LEDS_CLASS) && IS_MODULE(CONFIG_HID_STEELSERIES))
#define SRWS1_NUMBER_LEDS 15
@@ -353,9 +372,211 @@ static void steelseries_srws1_remove(struct hid_device *hdev)
}
#endif
+#define STEELSERIES_HEADSET_BATTERY_TIMEOUT_MS 3000
+
+#define ARCTIS_1_BATTERY_RESPONSE_LEN 8
+static const char arctis_1_battery_request[] = { 0x06, 0x12 };
+
+static int steelseries_headset_arctis_1_fetch_battery(struct hid_device *hdev)
+{
+ u8 *write_buf;
+ int ret;
+
+ /* Request battery information */
+ write_buf = kmemdup(arctis_1_battery_request, sizeof(arctis_1_battery_request), GFP_KERNEL);
+ if (!write_buf)
+ return -ENOMEM;
+
+ ret = hid_hw_raw_request(hdev, arctis_1_battery_request[0],
+ write_buf, sizeof(arctis_1_battery_request),
+ HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
+ if (ret < sizeof(arctis_1_battery_request)) {
+ hid_err(hdev, "hid_hw_raw_request() failed with %d\n", ret);
+ ret = -ENODATA;
+ }
+ kfree(write_buf);
+ return ret;
+}
+
+static void steelseries_headset_fetch_battery(struct hid_device *hdev)
+{
+ struct steelseries_device *sd = hid_get_drvdata(hdev);
+ int ret = 0;
+
+ if (sd->quirks & STEELSERIES_ARCTIS_1)
+ ret = steelseries_headset_arctis_1_fetch_battery(hdev);
+
+ if (ret < 0)
+ hid_dbg(hdev,
+ "Battery query failed (err: %d)\n", ret);
+}
+
+static void steelseries_headset_battery_timer_tick(struct work_struct *work)
+{
+ struct steelseries_device *sd = container_of(work,
+ struct steelseries_device, battery_work.work);
+ struct hid_device *hdev = sd->hdev;
+
+ steelseries_headset_fetch_battery(hdev);
+}
+
+static int steelseries_headset_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct steelseries_device *sd = power_supply_get_drvdata(psy);
+ int ret = 0;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = 1;
+ break;
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = sd->headset_connected ?
+ POWER_SUPPLY_STATUS_DISCHARGING :
+ POWER_SUPPLY_STATUS_UNKNOWN;
+ break;
+ case POWER_SUPPLY_PROP_SCOPE:
+ val->intval = POWER_SUPPLY_SCOPE_DEVICE;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ val->intval = sd->battery_capacity;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static void
+steelseries_headset_set_wireless_status(struct hid_device *hdev,
+ bool connected)
+{
+ struct usb_interface *intf;
+
+ if (!hid_is_usb(hdev))
+ return;
+
+ intf = to_usb_interface(hdev->dev.parent);
+ usb_set_wireless_status(intf, connected ?
+ USB_WIRELESS_STATUS_CONNECTED :
+ USB_WIRELESS_STATUS_DISCONNECTED);
+}
+
+static enum power_supply_property steelseries_headset_battery_props[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_SCOPE,
+ POWER_SUPPLY_PROP_CAPACITY,
+};
+
+static int steelseries_headset_battery_register(struct steelseries_device *sd)
+{
+ static atomic_t battery_no = ATOMIC_INIT(0);
+ struct power_supply_config battery_cfg = { .drv_data = sd, };
+ unsigned long n;
+ int ret;
+
+ sd->battery_desc.type = POWER_SUPPLY_TYPE_BATTERY;
+ sd->battery_desc.properties = steelseries_headset_battery_props;
+ sd->battery_desc.num_properties = ARRAY_SIZE(steelseries_headset_battery_props);
+ sd->battery_desc.get_property = steelseries_headset_battery_get_property;
+ sd->battery_desc.use_for_apm = 0;
+ n = atomic_inc_return(&battery_no) - 1;
+ sd->battery_desc.name = devm_kasprintf(&sd->hdev->dev, GFP_KERNEL,
+ "steelseries_headset_battery_%ld", n);
+ if (!sd->battery_desc.name)
+ return -ENOMEM;
+
+ /* avoid the warning of 0% battery while waiting for the first info */
+ steelseries_headset_set_wireless_status(sd->hdev, false);
+ sd->battery_capacity = 100;
+
+ sd->battery = devm_power_supply_register(&sd->hdev->dev,
+ &sd->battery_desc, &battery_cfg);
+ if (IS_ERR(sd->battery)) {
+ ret = PTR_ERR(sd->battery);
+ hid_err(sd->hdev,
+ "%s:power_supply_register failed with error %d\n",
+ __func__, ret);
+ return ret;
+ }
+ power_supply_powers(sd->battery, &sd->hdev->dev);
+
+ INIT_DELAYED_WORK(&sd->battery_work, steelseries_headset_battery_timer_tick);
+ steelseries_headset_fetch_battery(sd->hdev);
+
+ return 0;
+}
+
+static int steelseries_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ struct steelseries_device *sd;
+ int ret;
+
+ sd = devm_kzalloc(&hdev->dev, sizeof(*sd), GFP_KERNEL);
+ if (!sd)
+ return -ENOMEM;
+ hid_set_drvdata(hdev, sd);
+ sd->hdev = hdev;
+ sd->quirks = id->driver_data;
+
+ if (sd->quirks & STEELSERIES_SRWS1) {
+#if IS_BUILTIN(CONFIG_LEDS_CLASS) || \
+ (IS_MODULE(CONFIG_LEDS_CLASS) && IS_MODULE(CONFIG_HID_STEELSERIES))
+ return steelseries_srws1_probe(hdev, id);
+#else
+ return -ENODEV;
+#endif
+ }
+
+ ret = hid_parse(hdev);
+ if (ret)
+ return ret;
+
+ spin_lock_init(&sd->lock);
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (ret)
+ return ret;
+
+ if (steelseries_headset_battery_register(sd) < 0)
+ hid_err(sd->hdev,
+ "Failed to register battery for headset\n");
+
+ return ret;
+}
+
+static void steelseries_remove(struct hid_device *hdev)
+{
+ struct steelseries_device *sd = hid_get_drvdata(hdev);
+ unsigned long flags;
+
+ if (sd->quirks & STEELSERIES_SRWS1) {
+#if IS_BUILTIN(CONFIG_LEDS_CLASS) || \
+ (IS_MODULE(CONFIG_LEDS_CLASS) && IS_MODULE(CONFIG_HID_STEELSERIES))
+ steelseries_srws1_remove(hdev);
+#endif
+ return;
+ }
+
+ spin_lock_irqsave(&sd->lock, flags);
+ sd->removed = true;
+ spin_unlock_irqrestore(&sd->lock, flags);
+
+ cancel_delayed_work_sync(&sd->battery_work);
+
+ hid_hw_stop(hdev);
+}
+
static __u8 *steelseries_srws1_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
+ if (hdev->vendor != USB_VENDOR_ID_STEELSERIES ||
+ hdev->product != USB_DEVICE_ID_STEELSERIES_SRWS1)
+ return rdesc;
+
if (*rsize >= 115 && rdesc[11] == 0x02 && rdesc[13] == 0xc8
&& rdesc[29] == 0xbb && rdesc[40] == 0xc5) {
hid_info(hdev, "Fixing up Steelseries SRW-S1 report descriptor\n");
@@ -365,22 +586,82 @@ static __u8 *steelseries_srws1_report_fixup(struct hid_device *hdev, __u8 *rdesc
return rdesc;
}
-static const struct hid_device_id steelseries_srws1_devices[] = {
- { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) },
+static int steelseries_headset_raw_event(struct hid_device *hdev,
+ struct hid_report *report, u8 *read_buf,
+ int size)
+{
+ struct steelseries_device *sd = hid_get_drvdata(hdev);
+ int capacity = sd->battery_capacity;
+ bool connected = sd->headset_connected;
+ unsigned long flags;
+
+ /* Not a headset */
+ if (sd->quirks & STEELSERIES_SRWS1)
+ return 0;
+
+ if (sd->quirks & STEELSERIES_ARCTIS_1) {
+ hid_dbg(sd->hdev,
+ "Parsing raw event for Arctis 1 headset (%*ph)\n", size, read_buf);
+ if (size < ARCTIS_1_BATTERY_RESPONSE_LEN ||
+ memcmp (read_buf, arctis_1_battery_request, sizeof(arctis_1_battery_request)))
+ return 0;
+ if (read_buf[2] == 0x01) {
+ connected = false;
+ capacity = 100;
+ } else {
+ connected = true;
+ capacity = read_buf[3];
+ }
+ }
+
+ if (connected != sd->headset_connected) {
+ hid_dbg(sd->hdev,
+ "Connected status changed from %sconnected to %sconnected\n",
+ sd->headset_connected ? "" : "not ",
+ connected ? "" : "not ");
+ sd->headset_connected = connected;
+ steelseries_headset_set_wireless_status(hdev, connected);
+ }
+
+ if (capacity != sd->battery_capacity) {
+ hid_dbg(sd->hdev,
+ "Battery capacity changed from %d%% to %d%%\n",
+ sd->battery_capacity, capacity);
+ sd->battery_capacity = capacity;
+ power_supply_changed(sd->battery);
+ }
+
+ spin_lock_irqsave(&sd->lock, flags);
+ if (!sd->removed)
+ schedule_delayed_work(&sd->battery_work,
+ msecs_to_jiffies(STEELSERIES_HEADSET_BATTERY_TIMEOUT_MS));
+ spin_unlock_irqrestore(&sd->lock, flags);
+
+ return 0;
+}
+
+static const struct hid_device_id steelseries_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1),
+ .driver_data = STEELSERIES_SRWS1 },
+
+ { /* SteelSeries Arctis 1 Wireless for XBox */
+ HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, 0x12b6),
+ .driver_data = STEELSERIES_ARCTIS_1 },
+
{ }
};
-MODULE_DEVICE_TABLE(hid, steelseries_srws1_devices);
-
-static struct hid_driver steelseries_srws1_driver = {
- .name = "steelseries_srws1",
- .id_table = steelseries_srws1_devices,
-#if IS_BUILTIN(CONFIG_LEDS_CLASS) || \
- (IS_MODULE(CONFIG_LEDS_CLASS) && IS_MODULE(CONFIG_HID_STEELSERIES))
- .probe = steelseries_srws1_probe,
- .remove = steelseries_srws1_remove,
-#endif
- .report_fixup = steelseries_srws1_report_fixup
+MODULE_DEVICE_TABLE(hid, steelseries_devices);
+
+static struct hid_driver steelseries_driver = {
+ .name = "steelseries",
+ .id_table = steelseries_devices,
+ .probe = steelseries_probe,
+ .remove = steelseries_remove,
+ .report_fixup = steelseries_srws1_report_fixup,
+ .raw_event = steelseries_headset_raw_event,
};
-module_hid_driver(steelseries_srws1_driver);
+module_hid_driver(steelseries_driver);
MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Bastien Nocera <hadess@hadess.net>");
+MODULE_AUTHOR("Simon Wood <simon@mungewell.org>");
diff --git a/drivers/hid/hid-uclogic-core.c b/drivers/hid/hid-uclogic-core.c
index f67835f9ed4c..ad74cbc9a0aa 100644
--- a/drivers/hid/hid-uclogic-core.c
+++ b/drivers/hid/hid-uclogic-core.c
@@ -85,10 +85,8 @@ static int uclogic_input_configured(struct hid_device *hdev,
{
struct uclogic_drvdata *drvdata = hid_get_drvdata(hdev);
struct uclogic_params *params = &drvdata->params;
- char *name;
const char *suffix = NULL;
struct hid_field *field;
- size_t len;
size_t i;
const struct uclogic_params_frame *frame;
@@ -146,14 +144,9 @@ static int uclogic_input_configured(struct hid_device *hdev,
}
}
- if (suffix) {
- len = strlen(hdev->name) + 2 + strlen(suffix);
- name = devm_kzalloc(&hi->input->dev, len, GFP_KERNEL);
- if (name) {
- snprintf(name, len, "%s %s", hdev->name, suffix);
- hi->input->name = name;
- }
- }
+ if (suffix)
+ hi->input->name = devm_kasprintf(&hdev->dev, GFP_KERNEL,
+ "%s %s", hdev->name, suffix);
return 0;
}
diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
index a99dcca2e099..00f9be55f148 100644
--- a/drivers/hid/hid-wiimote-debug.c
+++ b/drivers/hid/hid-wiimote-debug.c
@@ -173,7 +173,6 @@ int wiidebug_init(struct wiimote_data *wdata)
{
struct wiimote_debug *dbg;
unsigned long flags;
- int ret = -ENOMEM;
dbg = kzalloc(sizeof(*dbg), GFP_KERNEL);
if (!dbg)
@@ -183,13 +182,9 @@ int wiidebug_init(struct wiimote_data *wdata)
dbg->eeprom = debugfs_create_file("eeprom", S_IRUSR,
dbg->wdata->hdev->debug_dir, dbg, &wiidebug_eeprom_fops);
- if (!dbg->eeprom)
- goto err;
dbg->drm = debugfs_create_file("drm", S_IRUSR,
dbg->wdata->hdev->debug_dir, dbg, &wiidebug_drm_fops);
- if (!dbg->drm)
- goto err_drm;
spin_lock_irqsave(&wdata->state.lock, flags);
wdata->debug = dbg;
@@ -197,11 +192,6 @@ int wiidebug_init(struct wiimote_data *wdata)
return 0;
-err_drm:
- debugfs_remove(dbg->eeprom);
-err:
- kfree(dbg);
- return ret;
}
void wiidebug_deinit(struct wiimote_data *wdata)
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index e63c56a0d57f..13c8dd8cd350 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -32,7 +32,9 @@
static int hidraw_major;
static struct cdev hidraw_cdev;
-static struct class *hidraw_class;
+static const struct class hidraw_class = {
+ .name = "hidraw",
+};
static struct hidraw *hidraw_table[HIDRAW_MAX_DEVICES];
static DECLARE_RWSEM(minors_rwsem);
@@ -329,7 +331,7 @@ static void drop_ref(struct hidraw *hidraw, int exists_bit)
hid_hw_close(hidraw->hid);
wake_up_interruptible(&hidraw->wait);
}
- device_destroy(hidraw_class,
+ device_destroy(&hidraw_class,
MKDEV(hidraw_major, hidraw->minor));
} else {
--hidraw->open;
@@ -569,7 +571,7 @@ int hidraw_connect(struct hid_device *hid)
goto out;
}
- dev->dev = device_create(hidraw_class, &hid->dev, MKDEV(hidraw_major, minor),
+ dev->dev = device_create(&hidraw_class, &hid->dev, MKDEV(hidraw_major, minor),
NULL, "%s%d", "hidraw", minor);
if (IS_ERR(dev->dev)) {
@@ -623,11 +625,9 @@ int __init hidraw_init(void)
hidraw_major = MAJOR(dev_id);
- hidraw_class = class_create("hidraw");
- if (IS_ERR(hidraw_class)) {
- result = PTR_ERR(hidraw_class);
+ result = class_register(&hidraw_class);
+ if (result)
goto error_cdev;
- }
cdev_init(&hidraw_cdev, &hidraw_ops);
result = cdev_add(&hidraw_cdev, dev_id, HIDRAW_MAX_DEVICES);
@@ -639,7 +639,7 @@ out:
return result;
error_class:
- class_destroy(hidraw_class);
+ class_unregister(&hidraw_class);
error_cdev:
unregister_chrdev_region(dev_id, HIDRAW_MAX_DEVICES);
goto out;
@@ -650,7 +650,7 @@ void hidraw_exit(void)
dev_t dev_id = MKDEV(hidraw_major, 0);
cdev_del(&hidraw_cdev);
- class_destroy(hidraw_class);
+ class_unregister(&hidraw_class);
unregister_chrdev_region(dev_id, HIDRAW_MAX_DEVICES);
}
diff --git a/drivers/hid/i2c-hid/i2c-hid-of-elan.c b/drivers/hid/i2c-hid/i2c-hid-of-elan.c
index 029045d9661c..31abab57ad44 100644
--- a/drivers/hid/i2c-hid/i2c-hid-of-elan.c
+++ b/drivers/hid/i2c-hid/i2c-hid-of-elan.c
@@ -18,9 +18,11 @@
#include "i2c-hid.h"
struct elan_i2c_hid_chip_data {
- unsigned int post_gpio_reset_delay_ms;
+ unsigned int post_gpio_reset_on_delay_ms;
+ unsigned int post_gpio_reset_off_delay_ms;
unsigned int post_power_delay_ms;
u16 hid_descriptor_address;
+ const char *main_supply_name;
};
struct i2c_hid_of_elan {
@@ -38,9 +40,11 @@ static int elan_i2c_hid_power_up(struct i2chid_ops *ops)
container_of(ops, struct i2c_hid_of_elan, ops);
int ret;
- ret = regulator_enable(ihid_elan->vcc33);
- if (ret)
- return ret;
+ if (ihid_elan->vcc33) {
+ ret = regulator_enable(ihid_elan->vcc33);
+ if (ret)
+ return ret;
+ }
ret = regulator_enable(ihid_elan->vccio);
if (ret) {
@@ -52,8 +56,8 @@ static int elan_i2c_hid_power_up(struct i2chid_ops *ops)
msleep(ihid_elan->chip_data->post_power_delay_ms);
gpiod_set_value_cansleep(ihid_elan->reset_gpio, 0);
- if (ihid_elan->chip_data->post_gpio_reset_delay_ms)
- msleep(ihid_elan->chip_data->post_gpio_reset_delay_ms);
+ if (ihid_elan->chip_data->post_gpio_reset_on_delay_ms)
+ msleep(ihid_elan->chip_data->post_gpio_reset_on_delay_ms);
return 0;
}
@@ -64,8 +68,12 @@ static void elan_i2c_hid_power_down(struct i2chid_ops *ops)
container_of(ops, struct i2c_hid_of_elan, ops);
gpiod_set_value_cansleep(ihid_elan->reset_gpio, 1);
+ if (ihid_elan->chip_data->post_gpio_reset_off_delay_ms)
+ msleep(ihid_elan->chip_data->post_gpio_reset_off_delay_ms);
+
regulator_disable(ihid_elan->vccio);
- regulator_disable(ihid_elan->vcc33);
+ if (ihid_elan->vcc33)
+ regulator_disable(ihid_elan->vcc33);
}
static int i2c_hid_of_elan_probe(struct i2c_client *client)
@@ -89,24 +97,42 @@ static int i2c_hid_of_elan_probe(struct i2c_client *client)
if (IS_ERR(ihid_elan->vccio))
return PTR_ERR(ihid_elan->vccio);
- ihid_elan->vcc33 = devm_regulator_get(&client->dev, "vcc33");
- if (IS_ERR(ihid_elan->vcc33))
- return PTR_ERR(ihid_elan->vcc33);
-
ihid_elan->chip_data = device_get_match_data(&client->dev);
+ if (ihid_elan->chip_data->main_supply_name) {
+ ihid_elan->vcc33 = devm_regulator_get(&client->dev,
+ ihid_elan->chip_data->main_supply_name);
+ if (IS_ERR(ihid_elan->vcc33))
+ return PTR_ERR(ihid_elan->vcc33);
+ }
+
return i2c_hid_core_probe(client, &ihid_elan->ops,
ihid_elan->chip_data->hid_descriptor_address, 0);
}
static const struct elan_i2c_hid_chip_data elan_ekth6915_chip_data = {
.post_power_delay_ms = 1,
- .post_gpio_reset_delay_ms = 300,
+ .post_gpio_reset_on_delay_ms = 300,
+ .hid_descriptor_address = 0x0001,
+ .main_supply_name = "vcc33",
+};
+
+static const struct elan_i2c_hid_chip_data ilitek_ili9882t_chip_data = {
+ .post_power_delay_ms = 1,
+ .post_gpio_reset_on_delay_ms = 200,
+ .post_gpio_reset_off_delay_ms = 65,
.hid_descriptor_address = 0x0001,
+ /*
+ * this touchscreen is tightly integrated with the panel and assumes
+ * that the relevant power rails (other than the IO rail) have already
+ * been turned on by the panel driver because we're a panel follower.
+ */
+ .main_supply_name = NULL,
};
static const struct of_device_id elan_i2c_hid_of_match[] = {
{ .compatible = "elan,ekth6915", .data = &elan_ekth6915_chip_data },
+ { .compatible = "ilitek,ili9882t", .data = &ilitek_ili9882t_chip_data },
{ }
};
MODULE_DEVICE_TABLE(of, elan_i2c_hid_of_match);
diff --git a/drivers/hid/wacom.h b/drivers/hid/wacom.h
index 4da50e19808e..166a76c9bcad 100644
--- a/drivers/hid/wacom.h
+++ b/drivers/hid/wacom.h
@@ -150,6 +150,7 @@ struct wacom_remote {
struct input_dev *input;
bool registered;
struct wacom_battery battery;
+ ktime_t active_time;
} remotes[WACOM_MAX_REMOTES];
};
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 76e5353aca0c..3f704b8072e8 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -1997,7 +1997,7 @@ static int wacom_initialize_remotes(struct wacom *wacom)
spin_lock_init(&remote->remote_lock);
error = kfifo_alloc(&remote->remote_fifo,
- 5 * sizeof(struct wacom_remote_data),
+ 5 * sizeof(struct wacom_remote_work_data),
GFP_KERNEL);
if (error) {
hid_err(wacom->hdev, "failed allocating remote_fifo\n");
@@ -2523,6 +2523,18 @@ fail:
return;
}
+static void wacom_remote_destroy_battery(struct wacom *wacom, int index)
+{
+ struct wacom_remote *remote = wacom->remote;
+
+ if (remote->remotes[index].battery.battery) {
+ devres_release_group(&wacom->hdev->dev,
+ &remote->remotes[index].battery.bat_desc);
+ remote->remotes[index].battery.battery = NULL;
+ remote->remotes[index].active_time = 0;
+ }
+}
+
static void wacom_remote_destroy_one(struct wacom *wacom, unsigned int index)
{
struct wacom_remote *remote = wacom->remote;
@@ -2537,9 +2549,7 @@ static void wacom_remote_destroy_one(struct wacom *wacom, unsigned int index)
remote->remotes[i].registered = false;
spin_unlock_irqrestore(&remote->remote_lock, flags);
- if (remote->remotes[i].battery.battery)
- devres_release_group(&wacom->hdev->dev,
- &remote->remotes[i].battery.bat_desc);
+ wacom_remote_destroy_battery(wacom, i);
if (remote->remotes[i].group.name)
devres_release_group(&wacom->hdev->dev,
@@ -2547,7 +2557,6 @@ static void wacom_remote_destroy_one(struct wacom *wacom, unsigned int index)
remote->remotes[i].serial = 0;
remote->remotes[i].group.name = NULL;
- remote->remotes[i].battery.battery = NULL;
wacom->led.groups[i].select = WACOM_STATUS_UNKNOWN;
}
}
@@ -2632,6 +2641,9 @@ static int wacom_remote_attach_battery(struct wacom *wacom, int index)
if (remote->remotes[index].battery.battery)
return 0;
+ if (!remote->remotes[index].active_time)
+ return 0;
+
if (wacom->led.groups[index].select == WACOM_STATUS_UNKNOWN)
return 0;
@@ -2647,17 +2659,19 @@ static void wacom_remote_work(struct work_struct *work)
{
struct wacom *wacom = container_of(work, struct wacom, remote_work);
struct wacom_remote *remote = wacom->remote;
- struct wacom_remote_data data;
+ ktime_t kt = ktime_get();
+ struct wacom_remote_work_data remote_work_data;
unsigned long flags;
unsigned int count;
- u32 serial;
+ u32 work_serial;
int i;
spin_lock_irqsave(&remote->remote_lock, flags);
- count = kfifo_out(&remote->remote_fifo, &data, sizeof(data));
+ count = kfifo_out(&remote->remote_fifo, &remote_work_data,
+ sizeof(remote_work_data));
- if (count != sizeof(data)) {
+ if (count != sizeof(remote_work_data)) {
hid_err(wacom->hdev,
"workitem triggered without status available\n");
spin_unlock_irqrestore(&remote->remote_lock, flags);
@@ -2670,10 +2684,14 @@ static void wacom_remote_work(struct work_struct *work)
spin_unlock_irqrestore(&remote->remote_lock, flags);
for (i = 0; i < WACOM_MAX_REMOTES; i++) {
- serial = data.remote[i].serial;
- if (data.remote[i].connected) {
+ work_serial = remote_work_data.remote[i].serial;
+ if (work_serial) {
+
+ if (kt - remote->remotes[i].active_time > WACOM_REMOTE_BATTERY_TIMEOUT
+ && remote->remotes[i].active_time != 0)
+ wacom_remote_destroy_battery(wacom, i);
- if (remote->remotes[i].serial == serial) {
+ if (remote->remotes[i].serial == work_serial) {
wacom_remote_attach_battery(wacom, i);
continue;
}
@@ -2681,7 +2699,7 @@ static void wacom_remote_work(struct work_struct *work)
if (remote->remotes[i].serial)
wacom_remote_destroy_one(wacom, i);
- wacom_remote_create_one(wacom, serial, i);
+ wacom_remote_create_one(wacom, work_serial, i);
} else if (remote->remotes[i].serial) {
wacom_remote_destroy_one(wacom, i);
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 174bf03908d7..471db78dbbf0 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -1134,6 +1134,7 @@ static int wacom_remote_irq(struct wacom_wac *wacom_wac, size_t len)
if (index < 0 || !remote->remotes[index].registered)
goto out;
+ remote->remotes[i].active_time = ktime_get();
input = remote->remotes[index].input;
input_report_key(input, BTN_0, (data[9] & 0x01));
@@ -1196,22 +1197,20 @@ static void wacom_remote_status_irq(struct wacom_wac *wacom_wac, size_t len)
struct wacom *wacom = container_of(wacom_wac, struct wacom, wacom_wac);
unsigned char *data = wacom_wac->data;
struct wacom_remote *remote = wacom->remote;
- struct wacom_remote_data remote_data;
+ struct wacom_remote_work_data remote_data;
unsigned long flags;
int i, ret;
if (data[0] != WACOM_REPORT_DEVICE_LIST)
return;
- memset(&remote_data, 0, sizeof(struct wacom_remote_data));
+ memset(&remote_data, 0, sizeof(struct wacom_remote_work_data));
for (i = 0; i < WACOM_MAX_REMOTES; i++) {
int j = i * 6;
int serial = (data[j+6] << 16) + (data[j+5] << 8) + data[j+4];
- bool connected = data[j+2];
remote_data.remote[i].serial = serial;
- remote_data.remote[i].connected = connected;
}
spin_lock_irqsave(&remote->remote_lock, flags);
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index ee21bb260f22..57e185f18d53 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -13,6 +13,7 @@
#define WACOM_NAME_MAX 64
#define WACOM_MAX_REMOTES 5
#define WACOM_STATUS_UNKNOWN 255
+#define WACOM_REMOTE_BATTERY_TIMEOUT 21000000000ll
/* packet length for individual models */
#define WACOM_PKGLEN_BBFUN 9
@@ -327,10 +328,9 @@ struct hid_data {
ktime_t time_delayed;
};
-struct wacom_remote_data {
+struct wacom_remote_work_data {
struct {
u32 serial;
- bool connected;
} remote[WACOM_MAX_REMOTES];
};
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index ebf15f31d97e..3cabeeabb1ca 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -98,6 +98,7 @@ int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version)
*/
if (version >= VERSION_WIN10_V5) {
msg->msg_sint = VMBUS_MESSAGE_SINT;
+ msg->msg_vtl = ms_hyperv.vtl;
vmbus_connection.msg_conn_id = VMBUS_MESSAGE_CONNECTION_ID_4;
} else {
msg->interrupt_page = virt_to_phys(vmbus_connection.int_page);
@@ -482,10 +483,17 @@ void vmbus_set_event(struct vmbus_channel *channel)
++channel->sig_events;
- if (hv_isolation_type_snp())
- hv_ghcb_hypercall(HVCALL_SIGNAL_EVENT, &channel->sig_event,
- NULL, sizeof(channel->sig_event));
- else
+ if (ms_hyperv.paravisor_present) {
+ if (hv_isolation_type_snp())
+ hv_ghcb_hypercall(HVCALL_SIGNAL_EVENT, &channel->sig_event,
+ NULL, sizeof(channel->sig_event));
+ else if (hv_isolation_type_tdx())
+ hv_tdx_hypercall(HVCALL_SIGNAL_EVENT | HV_HYPERCALL_FAST_BIT,
+ channel->sig_event, 0);
+ else
+ WARN_ON_ONCE(1);
+ } else {
hv_do_fast_hypercall8(HVCALL_SIGNAL_EVENT, channel->sig_event);
+ }
}
EXPORT_SYMBOL_GPL(vmbus_set_event);
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index de6708dbe0df..51e5018ac9b2 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -20,6 +20,7 @@
#include <linux/interrupt.h>
#include <clocksource/hyperv_timer.h>
#include <asm/mshyperv.h>
+#include <linux/set_memory.h>
#include "hyperv_vmbus.h"
/* The one and only */
@@ -56,20 +57,37 @@ int hv_post_message(union hv_connection_id connection_id,
local_irq_save(flags);
- aligned_msg = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ /*
+ * A TDX VM with the paravisor must use the decrypted post_msg_page: see
+ * the comment in struct hv_per_cpu_context. A SNP VM with the paravisor
+ * can use the encrypted hyperv_pcpu_input_arg because it copies the
+ * input into the GHCB page, which has been decrypted by the paravisor.
+ */
+ if (hv_isolation_type_tdx() && ms_hyperv.paravisor_present)
+ aligned_msg = this_cpu_ptr(hv_context.cpu_context)->post_msg_page;
+ else
+ aligned_msg = *this_cpu_ptr(hyperv_pcpu_input_arg);
+
aligned_msg->connectionid = connection_id;
aligned_msg->reserved = 0;
aligned_msg->message_type = message_type;
aligned_msg->payload_size = payload_size;
memcpy((void *)aligned_msg->payload, payload, payload_size);
- if (hv_isolation_type_snp())
- status = hv_ghcb_hypercall(HVCALL_POST_MESSAGE,
- (void *)aligned_msg, NULL,
- sizeof(*aligned_msg));
- else
+ if (ms_hyperv.paravisor_present) {
+ if (hv_isolation_type_tdx())
+ status = hv_tdx_hypercall(HVCALL_POST_MESSAGE,
+ virt_to_phys(aligned_msg), 0);
+ else if (hv_isolation_type_snp())
+ status = hv_ghcb_hypercall(HVCALL_POST_MESSAGE,
+ aligned_msg, NULL,
+ sizeof(*aligned_msg));
+ else
+ status = HV_STATUS_INVALID_PARAMETER;
+ } else {
status = hv_do_hypercall(HVCALL_POST_MESSAGE,
aligned_msg, NULL);
+ }
local_irq_restore(flags);
@@ -78,7 +96,7 @@ int hv_post_message(union hv_connection_id connection_id,
int hv_synic_alloc(void)
{
- int cpu;
+ int cpu, ret = -ENOMEM;
struct hv_per_cpu_context *hv_cpu;
/*
@@ -104,11 +122,29 @@ int hv_synic_alloc(void)
tasklet_init(&hv_cpu->msg_dpc,
vmbus_on_msg_dpc, (unsigned long) hv_cpu);
+ if (ms_hyperv.paravisor_present && hv_isolation_type_tdx()) {
+ hv_cpu->post_msg_page = (void *)get_zeroed_page(GFP_ATOMIC);
+ if (hv_cpu->post_msg_page == NULL) {
+ pr_err("Unable to allocate post msg page\n");
+ goto err;
+ }
+
+ ret = set_memory_decrypted((unsigned long)hv_cpu->post_msg_page, 1);
+ if (ret) {
+ pr_err("Failed to decrypt post msg page: %d\n", ret);
+ /* Just leak the page, as it's unsafe to free the page. */
+ hv_cpu->post_msg_page = NULL;
+ goto err;
+ }
+
+ memset(hv_cpu->post_msg_page, 0, PAGE_SIZE);
+ }
+
/*
* Synic message and event pages are allocated by paravisor.
* Skip these pages allocation here.
*/
- if (!hv_isolation_type_snp() && !hv_root_partition) {
+ if (!ms_hyperv.paravisor_present && !hv_root_partition) {
hv_cpu->synic_message_page =
(void *)get_zeroed_page(GFP_ATOMIC);
if (hv_cpu->synic_message_page == NULL) {
@@ -120,29 +156,96 @@ int hv_synic_alloc(void)
(void *)get_zeroed_page(GFP_ATOMIC);
if (hv_cpu->synic_event_page == NULL) {
pr_err("Unable to allocate SYNIC event page\n");
+
+ free_page((unsigned long)hv_cpu->synic_message_page);
+ hv_cpu->synic_message_page = NULL;
goto err;
}
}
+
+ if (!ms_hyperv.paravisor_present &&
+ (hv_isolation_type_snp() || hv_isolation_type_tdx())) {
+ ret = set_memory_decrypted((unsigned long)
+ hv_cpu->synic_message_page, 1);
+ if (ret) {
+ pr_err("Failed to decrypt SYNIC msg page: %d\n", ret);
+ hv_cpu->synic_message_page = NULL;
+
+ /*
+ * Free the event page here so that hv_synic_free()
+ * won't later try to re-encrypt it.
+ */
+ free_page((unsigned long)hv_cpu->synic_event_page);
+ hv_cpu->synic_event_page = NULL;
+ goto err;
+ }
+
+ ret = set_memory_decrypted((unsigned long)
+ hv_cpu->synic_event_page, 1);
+ if (ret) {
+ pr_err("Failed to decrypt SYNIC event page: %d\n", ret);
+ hv_cpu->synic_event_page = NULL;
+ goto err;
+ }
+
+ memset(hv_cpu->synic_message_page, 0, PAGE_SIZE);
+ memset(hv_cpu->synic_event_page, 0, PAGE_SIZE);
+ }
}
return 0;
+
err:
/*
* Any memory allocations that succeeded will be freed when
* the caller cleans up by calling hv_synic_free()
*/
- return -ENOMEM;
+ return ret;
}
void hv_synic_free(void)
{
- int cpu;
+ int cpu, ret;
for_each_present_cpu(cpu) {
struct hv_per_cpu_context *hv_cpu
= per_cpu_ptr(hv_context.cpu_context, cpu);
+ /* It's better to leak the page if the encryption fails. */
+ if (ms_hyperv.paravisor_present && hv_isolation_type_tdx()) {
+ if (hv_cpu->post_msg_page) {
+ ret = set_memory_encrypted((unsigned long)
+ hv_cpu->post_msg_page, 1);
+ if (ret) {
+ pr_err("Failed to encrypt post msg page: %d\n", ret);
+ hv_cpu->post_msg_page = NULL;
+ }
+ }
+ }
+
+ if (!ms_hyperv.paravisor_present &&
+ (hv_isolation_type_snp() || hv_isolation_type_tdx())) {
+ if (hv_cpu->synic_message_page) {
+ ret = set_memory_encrypted((unsigned long)
+ hv_cpu->synic_message_page, 1);
+ if (ret) {
+ pr_err("Failed to encrypt SYNIC msg page: %d\n", ret);
+ hv_cpu->synic_message_page = NULL;
+ }
+ }
+
+ if (hv_cpu->synic_event_page) {
+ ret = set_memory_encrypted((unsigned long)
+ hv_cpu->synic_event_page, 1);
+ if (ret) {
+ pr_err("Failed to encrypt SYNIC event page: %d\n", ret);
+ hv_cpu->synic_event_page = NULL;
+ }
+ }
+ }
+
+ free_page((unsigned long)hv_cpu->post_msg_page);
free_page((unsigned long)hv_cpu->synic_event_page);
free_page((unsigned long)hv_cpu->synic_message_page);
}
@@ -170,7 +273,7 @@ void hv_synic_enable_regs(unsigned int cpu)
simp.as_uint64 = hv_get_register(HV_REGISTER_SIMP);
simp.simp_enabled = 1;
- if (hv_isolation_type_snp() || hv_root_partition) {
+ if (ms_hyperv.paravisor_present || hv_root_partition) {
/* Mask out vTOM bit. ioremap_cache() maps decrypted */
u64 base = (simp.base_simp_gpa << HV_HYP_PAGE_SHIFT) &
~ms_hyperv.shared_gpa_boundary;
@@ -189,7 +292,7 @@ void hv_synic_enable_regs(unsigned int cpu)
siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP);
siefp.siefp_enabled = 1;
- if (hv_isolation_type_snp() || hv_root_partition) {
+ if (ms_hyperv.paravisor_present || hv_root_partition) {
/* Mask out vTOM bit. ioremap_cache() maps decrypted */
u64 base = (siefp.base_siefp_gpa << HV_HYP_PAGE_SHIFT) &
~ms_hyperv.shared_gpa_boundary;
@@ -272,7 +375,7 @@ void hv_synic_disable_regs(unsigned int cpu)
* addresses.
*/
simp.simp_enabled = 0;
- if (hv_isolation_type_snp() || hv_root_partition) {
+ if (ms_hyperv.paravisor_present || hv_root_partition) {
iounmap(hv_cpu->synic_message_page);
hv_cpu->synic_message_page = NULL;
} else {
@@ -284,7 +387,7 @@ void hv_synic_disable_regs(unsigned int cpu)
siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP);
siefp.siefp_enabled = 0;
- if (hv_isolation_type_snp() || hv_root_partition) {
+ if (ms_hyperv.paravisor_present || hv_root_partition) {
iounmap(hv_cpu->synic_event_page);
hv_cpu->synic_event_page = NULL;
} else {
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 0d7a3ba66396..e000fa3b9f97 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -8,6 +8,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/cleanup.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/mman.h>
@@ -646,7 +647,7 @@ static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
void *v)
{
struct memory_notify *mem = (struct memory_notify *)v;
- unsigned long flags, pfn_count;
+ unsigned long pfn_count;
switch (val) {
case MEM_ONLINE:
@@ -655,21 +656,22 @@ static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
break;
case MEM_OFFLINE:
- spin_lock_irqsave(&dm_device.ha_lock, flags);
- pfn_count = hv_page_offline_check(mem->start_pfn,
- mem->nr_pages);
- if (pfn_count <= dm_device.num_pages_onlined) {
- dm_device.num_pages_onlined -= pfn_count;
- } else {
- /*
- * We're offlining more pages than we managed to online.
- * This is unexpected. In any case don't let
- * num_pages_onlined wrap around zero.
- */
- WARN_ON_ONCE(1);
- dm_device.num_pages_onlined = 0;
+ scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
+ pfn_count = hv_page_offline_check(mem->start_pfn,
+ mem->nr_pages);
+ if (pfn_count <= dm_device.num_pages_onlined) {
+ dm_device.num_pages_onlined -= pfn_count;
+ } else {
+ /*
+ * We're offlining more pages than we
+ * managed to online. This is
+ * unexpected. In any case don't let
+ * num_pages_onlined wrap around zero.
+ */
+ WARN_ON_ONCE(1);
+ dm_device.num_pages_onlined = 0;
+ }
}
- spin_unlock_irqrestore(&dm_device.ha_lock, flags);
break;
case MEM_GOING_ONLINE:
case MEM_GOING_OFFLINE:
@@ -721,24 +723,23 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
unsigned long start_pfn;
unsigned long processed_pfn;
unsigned long total_pfn = pfn_count;
- unsigned long flags;
for (i = 0; i < (size/HA_CHUNK); i++) {
start_pfn = start + (i * HA_CHUNK);
- spin_lock_irqsave(&dm_device.ha_lock, flags);
- has->ha_end_pfn += HA_CHUNK;
+ scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
+ has->ha_end_pfn += HA_CHUNK;
- if (total_pfn > HA_CHUNK) {
- processed_pfn = HA_CHUNK;
- total_pfn -= HA_CHUNK;
- } else {
- processed_pfn = total_pfn;
- total_pfn = 0;
- }
+ if (total_pfn > HA_CHUNK) {
+ processed_pfn = HA_CHUNK;
+ total_pfn -= HA_CHUNK;
+ } else {
+ processed_pfn = total_pfn;
+ total_pfn = 0;
+ }
- has->covered_end_pfn += processed_pfn;
- spin_unlock_irqrestore(&dm_device.ha_lock, flags);
+ has->covered_end_pfn += processed_pfn;
+ }
reinit_completion(&dm_device.ol_waitevent);
@@ -758,10 +759,10 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
*/
do_hot_add = false;
}
- spin_lock_irqsave(&dm_device.ha_lock, flags);
- has->ha_end_pfn -= HA_CHUNK;
- has->covered_end_pfn -= processed_pfn;
- spin_unlock_irqrestore(&dm_device.ha_lock, flags);
+ scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
+ has->ha_end_pfn -= HA_CHUNK;
+ has->covered_end_pfn -= processed_pfn;
+ }
break;
}
@@ -781,10 +782,9 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
static void hv_online_page(struct page *pg, unsigned int order)
{
struct hv_hotadd_state *has;
- unsigned long flags;
unsigned long pfn = page_to_pfn(pg);
- spin_lock_irqsave(&dm_device.ha_lock, flags);
+ guard(spinlock_irqsave)(&dm_device.ha_lock);
list_for_each_entry(has, &dm_device.ha_region_list, list) {
/* The page belongs to a different HAS. */
if ((pfn < has->start_pfn) ||
@@ -794,7 +794,6 @@ static void hv_online_page(struct page *pg, unsigned int order)
hv_bring_pgs_online(has, pfn, 1UL << order);
break;
}
- spin_unlock_irqrestore(&dm_device.ha_lock, flags);
}
static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
@@ -803,9 +802,8 @@ static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
struct hv_hotadd_gap *gap;
unsigned long residual, new_inc;
int ret = 0;
- unsigned long flags;
- spin_lock_irqsave(&dm_device.ha_lock, flags);
+ guard(spinlock_irqsave)(&dm_device.ha_lock);
list_for_each_entry(has, &dm_device.ha_region_list, list) {
/*
* If the pfn range we are dealing with is not in the current
@@ -852,7 +850,6 @@ static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
ret = 1;
break;
}
- spin_unlock_irqrestore(&dm_device.ha_lock, flags);
return ret;
}
@@ -947,7 +944,6 @@ static unsigned long process_hot_add(unsigned long pg_start,
{
struct hv_hotadd_state *ha_region = NULL;
int covered;
- unsigned long flags;
if (pfn_cnt == 0)
return 0;
@@ -979,9 +975,9 @@ static unsigned long process_hot_add(unsigned long pg_start,
ha_region->covered_end_pfn = pg_start;
ha_region->end_pfn = rg_start + rg_size;
- spin_lock_irqsave(&dm_device.ha_lock, flags);
- list_add_tail(&ha_region->list, &dm_device.ha_region_list);
- spin_unlock_irqrestore(&dm_device.ha_lock, flags);
+ scoped_guard(spinlock_irqsave, &dm_device.ha_lock) {
+ list_add_tail(&ha_region->list, &dm_device.ha_region_list);
+ }
}
do_pg_range:
@@ -2047,7 +2043,6 @@ static void balloon_remove(struct hv_device *dev)
struct hv_dynmem_device *dm = hv_get_drvdata(dev);
struct hv_hotadd_state *has, *tmp;
struct hv_hotadd_gap *gap, *tmp_gap;
- unsigned long flags;
if (dm->num_pages_ballooned != 0)
pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned);
@@ -2073,7 +2068,7 @@ static void balloon_remove(struct hv_device *dev)
#endif
}
- spin_lock_irqsave(&dm_device.ha_lock, flags);
+ guard(spinlock_irqsave)(&dm_device.ha_lock);
list_for_each_entry_safe(has, tmp, &dm->ha_region_list, list) {
list_for_each_entry_safe(gap, tmp_gap, &has->gap_list, list) {
list_del(&gap->list);
@@ -2082,7 +2077,6 @@ static void balloon_remove(struct hv_device *dev)
list_del(&has->list);
kfree(has);
}
- spin_unlock_irqrestore(&dm_device.ha_lock, flags);
}
static int balloon_suspend(struct hv_device *hv_dev)
diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c
index 6a2258fef1fe..ccad7bca3fd3 100644
--- a/drivers/hv/hv_common.c
+++ b/drivers/hv/hv_common.c
@@ -24,6 +24,7 @@
#include <linux/kmsg_dump.h>
#include <linux/slab.h>
#include <linux/dma-map-ops.h>
+#include <linux/set_memory.h>
#include <asm/hyperv-tlfs.h>
#include <asm/mshyperv.h>
@@ -359,6 +360,8 @@ int hv_common_cpu_init(unsigned int cpu)
u64 msr_vp_index;
gfp_t flags;
int pgcount = hv_root_partition ? 2 : 1;
+ void *mem;
+ int ret;
/* hv_cpu_init() can be called with IRQs disabled from hv_resume() */
flags = irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL;
@@ -370,14 +373,41 @@ int hv_common_cpu_init(unsigned int cpu)
* allocated if this CPU was previously online and then taken offline
*/
if (!*inputarg) {
- *inputarg = kmalloc(pgcount * HV_HYP_PAGE_SIZE, flags);
- if (!(*inputarg))
+ mem = kmalloc(pgcount * HV_HYP_PAGE_SIZE, flags);
+ if (!mem)
return -ENOMEM;
if (hv_root_partition) {
outputarg = (void **)this_cpu_ptr(hyperv_pcpu_output_arg);
- *outputarg = (char *)(*inputarg) + HV_HYP_PAGE_SIZE;
+ *outputarg = (char *)mem + HV_HYP_PAGE_SIZE;
+ }
+
+ if (!ms_hyperv.paravisor_present &&
+ (hv_isolation_type_snp() || hv_isolation_type_tdx())) {
+ ret = set_memory_decrypted((unsigned long)mem, pgcount);
+ if (ret) {
+ /* It may be unsafe to free 'mem' */
+ return ret;
+ }
+
+ memset(mem, 0x00, pgcount * HV_HYP_PAGE_SIZE);
}
+
+ /*
+ * In a fully enlightened TDX/SNP VM with more than 64 VPs, if
+ * hyperv_pcpu_input_arg is not NULL, set_memory_decrypted() ->
+ * ... -> cpa_flush()-> ... -> __send_ipi_mask_ex() tries to
+ * use hyperv_pcpu_input_arg as the hypercall input page, which
+ * must be a decrypted page in such a VM, but the page is still
+ * encrypted before set_memory_decrypted() returns. Fix this by
+ * setting *inputarg after the above set_memory_decrypted(): if
+ * hyperv_pcpu_input_arg is NULL, __send_ipi_mask_ex() returns
+ * HV_STATUS_INVALID_PARAMETER immediately, and the function
+ * hv_send_ipi_mask() falls back to orig_apic.send_IPI_mask(),
+ * which may be slightly slower than the hypercall, but still
+ * works correctly in such a VM.
+ */
+ *inputarg = mem;
}
msr_vp_index = hv_get_register(HV_REGISTER_VP_INDEX);
@@ -502,6 +532,12 @@ bool __weak hv_isolation_type_snp(void)
}
EXPORT_SYMBOL_GPL(hv_isolation_type_snp);
+bool __weak hv_isolation_type_tdx(void)
+{
+ return false;
+}
+EXPORT_SYMBOL_GPL(hv_isolation_type_tdx);
+
void __weak hv_setup_vmbus_handler(void (*handler)(void))
{
}
@@ -542,3 +578,9 @@ u64 __weak hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_s
return HV_STATUS_INVALID_PARAMETER;
}
EXPORT_SYMBOL_GPL(hv_ghcb_hypercall);
+
+u64 __weak hv_tdx_hypercall(u64 control, u64 param1, u64 param2)
+{
+ return HV_STATUS_INVALID_PARAMETER;
+}
+EXPORT_SYMBOL_GPL(hv_tdx_hypercall);
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 55f2086841ae..f6b1e710f805 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -124,6 +124,17 @@ struct hv_per_cpu_context {
void *synic_event_page;
/*
+ * The page is only used in hv_post_message() for a TDX VM (with the
+ * paravisor) to post a messages to Hyper-V: when such a VM calls
+ * HVCALL_POST_MESSAGE, it can't use the hyperv_pcpu_input_arg (which
+ * is encrypted in such a VM) as the hypercall input page, because
+ * the input page for HVCALL_POST_MESSAGE must be decrypted in such a
+ * VM, so post_msg_page (which is decrypted in hv_synic_alloc()) is
+ * introduced for this purpose. See hyperv_init() for more comments.
+ */
+ void *post_msg_page;
+
+ /*
* Starting with win8, we can take channel interrupts on any CPU;
* we will manage the tasklet that handles events messages on a per CPU
* basis.
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 67f95a29aeca..edbb38f6956b 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -2287,7 +2287,8 @@ static int vmbus_acpi_add(struct platform_device *pdev)
* Some ancestor of the vmbus acpi device (Gen1 or Gen2
* firmware) is the VMOD that has the mmio ranges. Get that.
*/
- for (ancestor = acpi_dev_parent(device); ancestor;
+ for (ancestor = acpi_dev_parent(device);
+ ancestor && ancestor->handle != ACPI_ROOT_OBJECT;
ancestor = acpi_dev_parent(ancestor)) {
result = acpi_walk_resources(ancestor->handle, METHOD_NAME__CRS,
vmbus_walk_resources, NULL);
diff --git a/drivers/hwmon/peci/cputemp.c b/drivers/hwmon/peci/cputemp.c
index e5b65a382772..a812c15948d9 100644
--- a/drivers/hwmon/peci/cputemp.c
+++ b/drivers/hwmon/peci/cputemp.c
@@ -363,6 +363,7 @@ static int init_core_mask(struct peci_cputemp *priv)
switch (peci_dev->info.model) {
case INTEL_FAM6_ICELAKE_X:
case INTEL_FAM6_ICELAKE_D:
+ case INTEL_FAM6_SAPPHIRERAPIDS_X:
ret = peci_ep_pci_local_read(peci_dev, 0, reg->bus, reg->dev,
reg->func, reg->offset + 4, &data);
if (ret)
@@ -531,6 +532,13 @@ static struct resolved_cores_reg resolved_cores_reg_icx = {
.offset = 0xd0,
};
+static struct resolved_cores_reg resolved_cores_reg_spr = {
+ .bus = 31,
+ .dev = 30,
+ .func = 6,
+ .offset = 0x80,
+};
+
static const struct cpu_info cpu_hsx = {
.reg = &resolved_cores_reg_hsx,
.min_peci_revision = 0x33,
@@ -549,6 +557,12 @@ static const struct cpu_info cpu_icx = {
.thermal_margin_to_millidegree = &dts_ten_dot_six_to_millidegree,
};
+static const struct cpu_info cpu_spr = {
+ .reg = &resolved_cores_reg_spr,
+ .min_peci_revision = 0x40,
+ .thermal_margin_to_millidegree = &dts_ten_dot_six_to_millidegree,
+};
+
static const struct auxiliary_device_id peci_cputemp_ids[] = {
{
.name = "peci_cpu.cputemp.hsx",
@@ -574,6 +588,10 @@ static const struct auxiliary_device_id peci_cputemp_ids[] = {
.name = "peci_cpu.cputemp.icxd",
.driver_data = (kernel_ulong_t)&cpu_icx,
},
+ {
+ .name = "peci_cpu.cputemp.spr",
+ .driver_data = (kernel_ulong_t)&cpu_spr,
+ },
{ }
};
MODULE_DEVICE_TABLE(auxiliary, peci_cputemp_ids);
diff --git a/drivers/hwmon/peci/dimmtemp.c b/drivers/hwmon/peci/dimmtemp.c
index ce89da3937a0..5ca4d04e4b14 100644
--- a/drivers/hwmon/peci/dimmtemp.c
+++ b/drivers/hwmon/peci/dimmtemp.c
@@ -30,6 +30,8 @@
#define DIMM_IDX_MAX_ON_ICX 2
#define CHAN_RANK_MAX_ON_ICXD 4
#define DIMM_IDX_MAX_ON_ICXD 2
+#define CHAN_RANK_MAX_ON_SPR 8
+#define DIMM_IDX_MAX_ON_SPR 2
#define CHAN_RANK_MAX CHAN_RANK_MAX_ON_HSX
#define DIMM_IDX_MAX DIMM_IDX_MAX_ON_HSX
@@ -534,6 +536,43 @@ read_thresholds_icx(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u
return 0;
}
+static int
+read_thresholds_spr(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u32 *data)
+{
+ u32 reg_val;
+ u64 offset;
+ int ret;
+ u8 dev;
+
+ ret = peci_ep_pci_local_read(priv->peci_dev, 0, 30, 0, 2, 0xd4, &reg_val);
+ if (ret || !(reg_val & BIT(31)))
+ return -ENODATA; /* Use default or previous value */
+
+ ret = peci_ep_pci_local_read(priv->peci_dev, 0, 30, 0, 2, 0xd0, &reg_val);
+ if (ret)
+ return -ENODATA; /* Use default or previous value */
+
+ /*
+ * Device 26, Offset 219a8: IMC 0 channel 0 -> rank 0
+ * Device 26, Offset 299a8: IMC 0 channel 1 -> rank 1
+ * Device 27, Offset 219a8: IMC 1 channel 0 -> rank 2
+ * Device 27, Offset 299a8: IMC 1 channel 1 -> rank 3
+ * Device 28, Offset 219a8: IMC 2 channel 0 -> rank 4
+ * Device 28, Offset 299a8: IMC 2 channel 1 -> rank 5
+ * Device 29, Offset 219a8: IMC 3 channel 0 -> rank 6
+ * Device 29, Offset 299a8: IMC 3 channel 1 -> rank 7
+ */
+ dev = 26 + chan_rank / 2;
+ offset = 0x219a8 + dimm_order * 4 + (chan_rank % 2) * 0x8000;
+
+ ret = peci_mmio_read(priv->peci_dev, 0, GET_CPU_SEG(reg_val), GET_CPU_BUS(reg_val),
+ dev, 0, offset, data);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static const struct dimm_info dimm_hsx = {
.chan_rank_max = CHAN_RANK_MAX_ON_HSX,
.dimm_idx_max = DIMM_IDX_MAX_ON_HSX,
@@ -576,6 +615,13 @@ static const struct dimm_info dimm_icxd = {
.read_thresholds = &read_thresholds_icx,
};
+static const struct dimm_info dimm_spr = {
+ .chan_rank_max = CHAN_RANK_MAX_ON_SPR,
+ .dimm_idx_max = DIMM_IDX_MAX_ON_SPR,
+ .min_peci_revision = 0x40,
+ .read_thresholds = &read_thresholds_spr,
+};
+
static const struct auxiliary_device_id peci_dimmtemp_ids[] = {
{
.name = "peci_cpu.dimmtemp.hsx",
@@ -601,6 +647,10 @@ static const struct auxiliary_device_id peci_dimmtemp_ids[] = {
.name = "peci_cpu.dimmtemp.icxd",
.driver_data = (kernel_ulong_t)&dimm_icxd,
},
+ {
+ .name = "peci_cpu.dimmtemp.spr",
+ .driver_data = (kernel_ulong_t)&dimm_spr,
+ },
{ }
};
MODULE_DEVICE_TABLE(auxiliary, peci_dimmtemp_ids);
diff --git a/drivers/hwspinlock/omap_hwspinlock.c b/drivers/hwspinlock/omap_hwspinlock.c
index dfe82952671b..a9fd9ca45f2a 100644
--- a/drivers/hwspinlock/omap_hwspinlock.c
+++ b/drivers/hwspinlock/omap_hwspinlock.c
@@ -145,7 +145,7 @@ runtime_err:
return ret;
}
-static int omap_hwspinlock_remove(struct platform_device *pdev)
+static void omap_hwspinlock_remove(struct platform_device *pdev)
{
struct hwspinlock_device *bank = platform_get_drvdata(pdev);
int ret;
@@ -153,12 +153,10 @@ static int omap_hwspinlock_remove(struct platform_device *pdev)
ret = hwspin_lock_unregister(bank);
if (ret) {
dev_err(&pdev->dev, "%s failed: %d\n", __func__, ret);
- return ret;
+ return;
}
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
static const struct of_device_id omap_hwspinlock_of_match[] = {
@@ -171,7 +169,7 @@ MODULE_DEVICE_TABLE(of, omap_hwspinlock_of_match);
static struct platform_driver omap_hwspinlock_driver = {
.probe = omap_hwspinlock_probe,
- .remove = omap_hwspinlock_remove,
+ .remove_new = omap_hwspinlock_remove,
.driver = {
.name = "omap_hwspinlock",
.of_match_table = omap_hwspinlock_of_match,
diff --git a/drivers/hwspinlock/qcom_hwspinlock.c b/drivers/hwspinlock/qcom_hwspinlock.c
index 9cf186362ae2..a0fd67fd2934 100644
--- a/drivers/hwspinlock/qcom_hwspinlock.c
+++ b/drivers/hwspinlock/qcom_hwspinlock.c
@@ -69,9 +69,18 @@ static const struct hwspinlock_ops qcom_hwspinlock_ops = {
.unlock = qcom_hwspinlock_unlock,
};
+static const struct regmap_config sfpb_mutex_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x100,
+ .fast_io = true,
+};
+
static const struct qcom_hwspinlock_of_data of_sfpb_mutex = {
.offset = 0x4,
.stride = 0x4,
+ .regmap_config = &sfpb_mutex_config,
};
static const struct regmap_config tcsr_msm8226_mutex_config = {
@@ -197,6 +206,8 @@ static int qcom_hwspinlock_probe(struct platform_device *pdev)
bank->lock[i].priv = devm_regmap_field_alloc(&pdev->dev,
regmap, field);
+ if (IS_ERR(bank->lock[i].priv))
+ return PTR_ERR(bank->lock[i].priv);
}
return devm_hwspin_lock_register(&pdev->dev, bank, &qcom_hwspinlock_ops,
diff --git a/drivers/hwspinlock/u8500_hsem.c b/drivers/hwspinlock/u8500_hsem.c
index 67845c0c9701..1edca1092f29 100644
--- a/drivers/hwspinlock/u8500_hsem.c
+++ b/drivers/hwspinlock/u8500_hsem.c
@@ -120,20 +120,18 @@ static int u8500_hsem_probe(struct platform_device *pdev)
pdata->base_id, num_locks);
}
-static int u8500_hsem_remove(struct platform_device *pdev)
+static void u8500_hsem_remove(struct platform_device *pdev)
{
struct hwspinlock_device *bank = platform_get_drvdata(pdev);
void __iomem *io_base = bank->lock[0].priv - HSEM_REGISTER_OFFSET;
/* clear all interrupts */
writel(0xFFFF, io_base + HSEM_ICRALL);
-
- return 0;
}
static struct platform_driver u8500_hsem_driver = {
.probe = u8500_hsem_probe,
- .remove = u8500_hsem_remove,
+ .remove_new = u8500_hsem_remove,
.driver = {
.name = "u8500_hsem",
},
diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
index 118fcf27854d..9fabe00a40d6 100644
--- a/drivers/hwtracing/coresight/coresight-core.c
+++ b/drivers/hwtracing/coresight/coresight-core.c
@@ -17,7 +17,7 @@
#include <linux/mutex.h>
#include <linux/clk.h>
#include <linux/coresight.h>
-#include <linux/of_platform.h>
+#include <linux/property.h>
#include <linux/delay.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/hwtracing/coresight/coresight-cti-core.c b/drivers/hwtracing/coresight/coresight-cti-core.c
index 7023ff70cc28..3999d0a2cb60 100644
--- a/drivers/hwtracing/coresight/coresight-cti-core.c
+++ b/drivers/hwtracing/coresight/coresight-cti-core.c
@@ -22,7 +22,7 @@
#include "coresight-priv.h"
#include "coresight-cti.h"
-/**
+/*
* CTI devices can be associated with a PE, or be connected to CoreSight
* hardware. We have a list of all CTIs irrespective of CPU bound or
* otherwise.
diff --git a/drivers/hwtracing/coresight/coresight-dummy.c b/drivers/hwtracing/coresight/coresight-dummy.c
index 8035120b70b3..e4deafae7bc2 100644
--- a/drivers/hwtracing/coresight/coresight-dummy.c
+++ b/drivers/hwtracing/coresight/coresight-dummy.c
@@ -147,17 +147,7 @@ static struct platform_driver dummy_driver = {
},
};
-static int __init dummy_init(void)
-{
- return platform_driver_register(&dummy_driver);
-}
-module_init(dummy_init);
-
-static void __exit dummy_exit(void)
-{
- platform_driver_unregister(&dummy_driver);
-}
-module_exit(dummy_exit);
+module_platform_driver(dummy_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("CoreSight dummy driver");
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-cfg.c b/drivers/hwtracing/coresight/coresight-etm4x-cfg.c
index d2ea903231b2..c302072b293a 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-cfg.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-cfg.c
@@ -40,7 +40,7 @@
* Invalid offsets will result in fail code return and feature load failure.
*
* @drvdata: driver data to map into.
- * @reg: register to map.
+ * @reg_csdev: register to map.
* @offset: device offset for the register
*/
static int etm4_cfg_map_reg_offset(struct etmv4_drvdata *drvdata,
@@ -132,7 +132,7 @@ static int etm4_cfg_map_reg_offset(struct etmv4_drvdata *drvdata,
* etm4_cfg_load_feature - load a feature into a device instance.
*
* @csdev: An ETMv4 CoreSight device.
- * @feat: The feature to be loaded.
+ * @feat_csdev: The feature to be loaded.
*
* The function will load a feature instance into the device, checking that
* the register definitions are valid for the device.
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index 7e307022303a..77b0271ce6eb 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -3,6 +3,7 @@
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
*/
+#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/moduleparam.h>
@@ -30,6 +31,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
+#include <linux/clk/clk-conf.h>
#include <asm/barrier.h>
#include <asm/sections.h>
@@ -66,7 +68,6 @@ static u64 etm4_get_access_type(struct etmv4_config *config);
static enum cpuhp_state hp_online;
struct etm4_init_arg {
- unsigned int pid;
struct device *dev;
struct csdev_access *csa;
};
@@ -370,9 +371,17 @@ static void etm4_disable_arch_specific(struct etmv4_drvdata *drvdata)
}
static void etm4_check_arch_features(struct etmv4_drvdata *drvdata,
- unsigned int id)
+ struct csdev_access *csa)
{
- if (etm4_hisi_match_pid(id))
+ /*
+ * TRCPIDR* registers are not required for ETMs with system
+ * instructions. They must be identified by the MIDR+REVIDRs.
+ * Skip the TRCPID checks for now.
+ */
+ if (!csa->io_mem)
+ return;
+
+ if (etm4_hisi_match_pid(coresight_get_pid(csa)))
set_bit(ETM4_IMPDEF_HISI_CORE_COMMIT, drvdata->arch_features);
}
#else
@@ -385,7 +394,7 @@ static void etm4_disable_arch_specific(struct etmv4_drvdata *drvdata)
}
static void etm4_check_arch_features(struct etmv4_drvdata *drvdata,
- unsigned int id)
+ struct csdev_access *csa)
{
}
#endif /* CONFIG_ETM4X_IMPDEF_FEATURE */
@@ -1066,11 +1075,21 @@ static bool etm4_init_sysreg_access(struct etmv4_drvdata *drvdata,
return true;
}
+static bool is_devtype_cpu_trace(void __iomem *base)
+{
+ u32 devtype = readl(base + TRCDEVTYPE);
+
+ return (devtype == CS_DEVTYPE_PE_TRACE);
+}
+
static bool etm4_init_iomem_access(struct etmv4_drvdata *drvdata,
struct csdev_access *csa)
{
u32 devarch = readl_relaxed(drvdata->base + TRCDEVARCH);
+ if (!is_coresight_device(drvdata->base) || !is_devtype_cpu_trace(drvdata->base))
+ return false;
+
/*
* All ETMs must implement TRCDEVARCH to indicate that
* the component is an ETMv4. Even though TRCIDR1 also
@@ -1161,7 +1180,7 @@ static void etm4_init_arch_data(void *info)
etm4_os_unlock_csa(drvdata, csa);
etm4_cs_unlock(drvdata, csa);
- etm4_check_arch_features(drvdata, init_arg->pid);
+ etm4_check_arch_features(drvdata, csa);
/* find all capabilities of the tracing unit */
etmidr0 = etm4x_relaxed_read32(csa, TRCIDR0);
@@ -2044,19 +2063,16 @@ static int etm4_add_coresight_dev(struct etm4_init_arg *init_arg)
return 0;
}
-static int etm4_probe(struct device *dev, void __iomem *base, u32 etm_pid)
+static int etm4_probe(struct device *dev)
{
- struct etmv4_drvdata *drvdata;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
struct csdev_access access = { 0 };
struct etm4_init_arg init_arg = { 0 };
struct etm4_init_arg *delayed;
- drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
- if (!drvdata)
+ if (WARN_ON(!drvdata))
return -ENOMEM;
- dev_set_drvdata(dev, drvdata);
-
if (pm_save_enable == PARAM_PM_SAVE_FIRMWARE)
pm_save_enable = coresight_loses_context_with_cpu(dev) ?
PARAM_PM_SAVE_SELF_HOSTED : PARAM_PM_SAVE_NEVER;
@@ -2068,8 +2084,6 @@ static int etm4_probe(struct device *dev, void __iomem *base, u32 etm_pid)
return -ENOMEM;
}
- drvdata->base = base;
-
spin_lock_init(&drvdata->spinlock);
drvdata->cpu = coresight_get_cpu(dev);
@@ -2078,7 +2092,6 @@ static int etm4_probe(struct device *dev, void __iomem *base, u32 etm_pid)
init_arg.dev = dev;
init_arg.csa = &access;
- init_arg.pid = etm_pid;
/*
* Serialize against CPUHP callbacks to avoid race condition
@@ -2108,6 +2121,7 @@ static int etm4_probe(struct device *dev, void __iomem *base, u32 etm_pid)
static int etm4_probe_amba(struct amba_device *adev, const struct amba_id *id)
{
+ struct etmv4_drvdata *drvdata;
void __iomem *base;
struct device *dev = &adev->dev;
struct resource *res = &adev->res;
@@ -2118,7 +2132,13 @@ static int etm4_probe_amba(struct amba_device *adev, const struct amba_id *id)
if (IS_ERR(base))
return PTR_ERR(base);
- ret = etm4_probe(dev, base, id->id);
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->base = base;
+ dev_set_drvdata(dev, drvdata);
+ ret = etm4_probe(dev);
if (!ret)
pm_runtime_put(&adev->dev);
@@ -2127,18 +2147,32 @@ static int etm4_probe_amba(struct amba_device *adev, const struct amba_id *id)
static int etm4_probe_platform_dev(struct platform_device *pdev)
{
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ struct etmv4_drvdata *drvdata;
int ret;
+ drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->pclk = coresight_get_enable_apb_pclk(&pdev->dev);
+ if (IS_ERR(drvdata->pclk))
+ return -ENODEV;
+
+ if (res) {
+ drvdata->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(drvdata->base)) {
+ clk_put(drvdata->pclk);
+ return PTR_ERR(drvdata->base);
+ }
+ }
+
+ dev_set_drvdata(&pdev->dev, drvdata);
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- /*
- * System register based devices could match the
- * HW by reading appropriate registers on the HW
- * and thus we could skip the PID.
- */
- ret = etm4_probe(&pdev->dev, NULL, 0);
+ ret = etm4_probe(&pdev->dev);
pm_runtime_put(&pdev->dev);
return ret;
@@ -2178,7 +2212,7 @@ static struct amba_cs_uci_id uci_id_etm4[] = {
/* ETMv4 UCI data */
.devarch = ETM_DEVARCH_ETMv4x_ARCH,
.devarch_mask = ETM_DEVARCH_ID_MASK,
- .devtype = 0x00000013,
+ .devtype = CS_DEVTYPE_PE_TRACE,
}
};
@@ -2234,6 +2268,10 @@ static int __exit etm4_remove_platform_dev(struct platform_device *pdev)
if (drvdata)
etm4_remove_dev(drvdata);
pm_runtime_disable(&pdev->dev);
+
+ if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
+ clk_put(drvdata->pclk);
+
return 0;
}
@@ -2278,19 +2316,55 @@ static struct amba_driver etm4x_amba_driver = {
.id_table = etm4_ids,
};
+#ifdef CONFIG_PM
+static int etm4_runtime_suspend(struct device *dev)
+{
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
+
+ if (drvdata->pclk && !IS_ERR(drvdata->pclk))
+ clk_disable_unprepare(drvdata->pclk);
+
+ return 0;
+}
+
+static int etm4_runtime_resume(struct device *dev)
+{
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
+
+ if (drvdata->pclk && !IS_ERR(drvdata->pclk))
+ clk_prepare_enable(drvdata->pclk);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops etm4_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(etm4_runtime_suspend, etm4_runtime_resume, NULL)
+};
+
static const struct of_device_id etm4_sysreg_match[] = {
{ .compatible = "arm,coresight-etm4x-sysreg" },
{ .compatible = "arm,embedded-trace-extension" },
{}
};
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id etm4x_acpi_ids[] = {
+ {"ARMHC500", 0}, /* ARM CoreSight ETM4x */
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, etm4x_acpi_ids);
+#endif
+
static struct platform_driver etm4_platform_driver = {
.probe = etm4_probe_platform_dev,
.remove = etm4_remove_platform_dev,
.driver = {
.name = "coresight-etm4x",
.of_match_table = etm4_sysreg_match,
+ .acpi_match_table = ACPI_PTR(etm4x_acpi_ids),
.suppress_bind_attrs = true,
+ .pm = &etm4_dev_pm_ops,
},
};
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index 27c8a9901868..20e2e4cb7614 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -701,6 +701,8 @@
#define ETM_DEVARCH_ETE_ARCH \
(ETM_DEVARCH_ARCHITECT_ARM | ETM_DEVARCH_ARCHID_ETE | ETM_DEVARCH_PRESENT)
+#define CS_DEVTYPE_PE_TRACE 0x00000013
+
#define TRCSTATR_IDLE_BIT 0
#define TRCSTATR_PMSTABLE_BIT 1
#define ETM_DEFAULT_ADDR_COMP 0
@@ -944,6 +946,7 @@ struct etmv4_save_state {
/**
* struct etm4_drvdata - specifics associated to an ETM component
+ * @pclk APB clock if present, otherwise NULL
* @base: Memory mapped base address for this component.
* @csdev: Component vitals needed by the framework.
* @spinlock: Only one at a time pls.
@@ -1009,6 +1012,7 @@ struct etmv4_save_state {
* @arch_features: Bitmap of arch features of etmv4 devices.
*/
struct etmv4_drvdata {
+ struct clk *pclk;
void __iomem *base;
struct coresight_device *csdev;
spinlock_t spinlock;
diff --git a/drivers/hwtracing/coresight/coresight-platform.c b/drivers/hwtracing/coresight/coresight-platform.c
index 3e2e135cb8f6..9d550f5697fa 100644
--- a/drivers/hwtracing/coresight/coresight-platform.c
+++ b/drivers/hwtracing/coresight/coresight-platform.c
@@ -9,9 +9,7 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/of.h>
-#include <linux/of_address.h>
#include <linux/of_graph.h>
-#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/amba/bus.h>
#include <linux/coresight.h>
@@ -494,19 +492,18 @@ static inline bool acpi_validate_dsd_graph(const union acpi_object *graph)
/* acpi_get_dsd_graph - Find the _DSD Graph property for the given device. */
static const union acpi_object *
-acpi_get_dsd_graph(struct acpi_device *adev)
+acpi_get_dsd_graph(struct acpi_device *adev, struct acpi_buffer *buf)
{
int i;
- struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
acpi_status status;
const union acpi_object *dsd;
status = acpi_evaluate_object_typed(adev->handle, "_DSD", NULL,
- &buf, ACPI_TYPE_PACKAGE);
+ buf, ACPI_TYPE_PACKAGE);
if (ACPI_FAILURE(status))
return NULL;
- dsd = buf.pointer;
+ dsd = buf->pointer;
/*
* _DSD property consists tuples { Prop_UUID, Package() }
@@ -557,12 +554,12 @@ acpi_validate_coresight_graph(const union acpi_object *cs_graph)
* returns NULL.
*/
static const union acpi_object *
-acpi_get_coresight_graph(struct acpi_device *adev)
+acpi_get_coresight_graph(struct acpi_device *adev, struct acpi_buffer *buf)
{
const union acpi_object *graph_list, *graph;
int i, nr_graphs;
- graph_list = acpi_get_dsd_graph(adev);
+ graph_list = acpi_get_dsd_graph(adev, buf);
if (!graph_list)
return graph_list;
@@ -663,18 +660,24 @@ static int acpi_coresight_parse_graph(struct device *dev,
struct acpi_device *adev,
struct coresight_platform_data *pdata)
{
+ int ret = 0;
int i, nlinks;
const union acpi_object *graph;
struct coresight_connection conn, zero_conn = {};
struct coresight_connection *new_conn;
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
- graph = acpi_get_coresight_graph(adev);
+ graph = acpi_get_coresight_graph(adev, &buf);
+ /*
+ * There are no graph connections, which is fine for some components.
+ * e.g., ETE
+ */
if (!graph)
- return -ENOENT;
+ goto free;
nlinks = graph->package.elements[2].integer.value;
if (!nlinks)
- return 0;
+ goto free;
for (i = 0; i < nlinks; i++) {
const union acpi_object *link = &graph->package.elements[3 + i];
@@ -682,17 +685,28 @@ static int acpi_coresight_parse_graph(struct device *dev,
conn = zero_conn;
dir = acpi_coresight_parse_link(adev, link, &conn);
- if (dir < 0)
- return dir;
+ if (dir < 0) {
+ ret = dir;
+ goto free;
+ }
if (dir == ACPI_CORESIGHT_LINK_MASTER) {
new_conn = coresight_add_out_conn(dev, pdata, &conn);
- if (IS_ERR(new_conn))
- return PTR_ERR(new_conn);
+ if (IS_ERR(new_conn)) {
+ ret = PTR_ERR(new_conn);
+ goto free;
+ }
}
}
- return 0;
+free:
+ /*
+ * When ACPI fails to alloc a buffer, it will free the buffer
+ * created via ACPI_ALLOCATE_BUFFER and set to NULL.
+ * ACPI_FREE can handle NULL pointers, so free it directly.
+ */
+ ACPI_FREE(buf.pointer);
+ return ret;
}
/*
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index 79d8c64eac49..7406b65e2cdd 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -452,7 +452,7 @@ static int tmc_set_etf_buffer(struct coresight_device *csdev,
return -EINVAL;
/* wrap head around to the amount of space we have */
- head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
+ head = handle->head & (((unsigned long)buf->nr_pages << PAGE_SHIFT) - 1);
/* find the page to write to */
buf->cur = head / PAGE_SIZE;
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 766325de0e29..66dc5f97a009 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -45,7 +45,8 @@ struct etr_perf_buffer {
};
/* Convert the perf index to an offset within the ETR buffer */
-#define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT))
+#define PERF_IDX2OFF(idx, buf) \
+ ((idx) % ((unsigned long)(buf)->nr_pages << PAGE_SHIFT))
/* Lower limit for ETR hardware buffer */
#define TMC_ETR_PERF_MIN_BUF_SIZE SZ_1M
@@ -1267,7 +1268,7 @@ alloc_etr_buf(struct tmc_drvdata *drvdata, struct perf_event *event,
* than the size requested via sysfs.
*/
if ((nr_pages << PAGE_SHIFT) > drvdata->size) {
- etr_buf = tmc_alloc_etr_buf(drvdata, (nr_pages << PAGE_SHIFT),
+ etr_buf = tmc_alloc_etr_buf(drvdata, ((ssize_t)nr_pages << PAGE_SHIFT),
0, node, NULL);
if (!IS_ERR(etr_buf))
goto done;
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index b97da39652d2..0ee48c5ba764 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -325,7 +325,7 @@ ssize_t tmc_sg_table_get_data(struct tmc_sg_table *sg_table,
static inline unsigned long
tmc_sg_table_buf_size(struct tmc_sg_table *sg_table)
{
- return sg_table->data_pages.nr_pages << PAGE_SHIFT;
+ return (unsigned long)sg_table->data_pages.nr_pages << PAGE_SHIFT;
}
struct coresight_device *tmc_etr_get_catu_device(struct tmc_drvdata *drvdata);
diff --git a/drivers/hwtracing/coresight/coresight-trbe.c b/drivers/hwtracing/coresight/coresight-trbe.c
index 7720619909d6..e20c1c6acc73 100644
--- a/drivers/hwtracing/coresight/coresight-trbe.c
+++ b/drivers/hwtracing/coresight/coresight-trbe.c
@@ -1225,6 +1225,16 @@ static void arm_trbe_enable_cpu(void *info)
enable_percpu_irq(drvdata->irq, IRQ_TYPE_NONE);
}
+static void arm_trbe_disable_cpu(void *info)
+{
+ struct trbe_drvdata *drvdata = info;
+ struct trbe_cpudata *cpudata = this_cpu_ptr(drvdata->cpudata);
+
+ disable_percpu_irq(drvdata->irq);
+ trbe_reset_local(cpudata);
+}
+
+
static void arm_trbe_register_coresight_cpu(struct trbe_drvdata *drvdata, int cpu)
{
struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu);
@@ -1244,10 +1254,13 @@ static void arm_trbe_register_coresight_cpu(struct trbe_drvdata *drvdata, int cp
if (!desc.name)
goto cpu_clear;
+ desc.pdata = coresight_get_platform_data(dev);
+ if (IS_ERR(desc.pdata))
+ goto cpu_clear;
+
desc.type = CORESIGHT_DEV_TYPE_SINK;
desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_PERCPU_SYSMEM;
desc.ops = &arm_trbe_cs_ops;
- desc.pdata = dev_get_platdata(dev);
desc.groups = arm_trbe_groups;
desc.dev = dev;
trbe_csdev = coresight_register(&desc);
@@ -1326,18 +1339,12 @@ cpu_clear:
cpumask_clear_cpu(cpu, &drvdata->supported_cpus);
}
-static void arm_trbe_remove_coresight_cpu(void *info)
+static void arm_trbe_remove_coresight_cpu(struct trbe_drvdata *drvdata, int cpu)
{
- int cpu = smp_processor_id();
- struct trbe_drvdata *drvdata = info;
- struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu);
struct coresight_device *trbe_csdev = coresight_get_percpu_sink(cpu);
- disable_percpu_irq(drvdata->irq);
- trbe_reset_local(cpudata);
if (trbe_csdev) {
coresight_unregister(trbe_csdev);
- cpudata->drvdata = NULL;
coresight_set_percpu_sink(cpu, NULL);
}
}
@@ -1366,8 +1373,10 @@ static int arm_trbe_remove_coresight(struct trbe_drvdata *drvdata)
{
int cpu;
- for_each_cpu(cpu, &drvdata->supported_cpus)
- smp_call_function_single(cpu, arm_trbe_remove_coresight_cpu, drvdata, 1);
+ for_each_cpu(cpu, &drvdata->supported_cpus) {
+ smp_call_function_single(cpu, arm_trbe_disable_cpu, drvdata, 1);
+ arm_trbe_remove_coresight_cpu(drvdata, cpu);
+ }
free_percpu(drvdata->cpudata);
return 0;
}
@@ -1406,12 +1415,8 @@ static int arm_trbe_cpu_teardown(unsigned int cpu, struct hlist_node *node)
{
struct trbe_drvdata *drvdata = hlist_entry_safe(node, struct trbe_drvdata, hotplug_node);
- if (cpumask_test_cpu(cpu, &drvdata->supported_cpus)) {
- struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu);
-
- disable_percpu_irq(drvdata->irq);
- trbe_reset_local(cpudata);
- }
+ if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
+ arm_trbe_disable_cpu(drvdata);
return 0;
}
@@ -1479,7 +1484,6 @@ static void arm_trbe_remove_irq(struct trbe_drvdata *drvdata)
static int arm_trbe_device_probe(struct platform_device *pdev)
{
- struct coresight_platform_data *pdata;
struct trbe_drvdata *drvdata;
struct device *dev = &pdev->dev;
int ret;
@@ -1494,12 +1498,7 @@ static int arm_trbe_device_probe(struct platform_device *pdev)
if (!drvdata)
return -ENOMEM;
- pdata = coresight_get_platform_data(dev);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
-
dev_set_drvdata(dev, drvdata);
- dev->platform_data = pdata;
drvdata->pdev = pdev;
ret = arm_trbe_probe_irq(pdev, drvdata);
if (ret)
diff --git a/drivers/hwtracing/coresight/coresight-trbe.h b/drivers/hwtracing/coresight/coresight-trbe.h
index 77cbb5c63878..e915e749be55 100644
--- a/drivers/hwtracing/coresight/coresight-trbe.h
+++ b/drivers/hwtracing/coresight/coresight-trbe.h
@@ -23,7 +23,7 @@ static inline bool is_trbe_available(void)
unsigned int trbe = cpuid_feature_extract_unsigned_field(aa64dfr0,
ID_AA64DFR0_EL1_TraceBuffer_SHIFT);
- return trbe >= 0b0001;
+ return trbe >= ID_AA64DFR0_EL1_TraceBuffer_IMP;
}
static inline bool is_trbe_enabled(void)
diff --git a/drivers/hwtracing/ptt/hisi_ptt.c b/drivers/hwtracing/ptt/hisi_ptt.c
index ba081b6d2435..49ea1b0f7489 100644
--- a/drivers/hwtracing/ptt/hisi_ptt.c
+++ b/drivers/hwtracing/ptt/hisi_ptt.c
@@ -618,13 +618,13 @@ static int hisi_ptt_notifier_call(struct notifier_block *nb, unsigned long actio
if (!root_port)
return 0;
- port_devid = PCI_DEVID(root_port->bus->number, root_port->devfn);
+ port_devid = pci_dev_id(root_port);
if (port_devid < hisi_ptt->lower_bdf ||
port_devid > hisi_ptt->upper_bdf)
return 0;
info.is_port = pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT;
- info.devid = PCI_DEVID(pdev->bus->number, pdev->devfn);
+ info.devid = pci_dev_id(pdev);
switch (action) {
case BUS_NOTIFY_ADD_DEVICE:
@@ -664,7 +664,7 @@ static int hisi_ptt_init_filters(struct pci_dev *pdev, void *data)
if (!root_port)
return 0;
- port_devid = PCI_DEVID(root_port->bus->number, root_port->devfn);
+ port_devid = pci_dev_id(root_port);
if (port_devid < hisi_ptt->lower_bdf ||
port_devid > hisi_ptt->upper_bdf)
return 0;
@@ -674,7 +674,7 @@ static int hisi_ptt_init_filters(struct pci_dev *pdev, void *data)
* should be partial initialized and users would know which filter fails
* through the log. Other functions of PTT device are still available.
*/
- filter = hisi_ptt_alloc_add_filter(hisi_ptt, PCI_DEVID(pdev->bus->number, pdev->devfn),
+ filter = hisi_ptt_alloc_add_filter(hisi_ptt, pci_dev_id(pdev),
pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT);
if (!filter)
return -ENOMEM;
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index 438905e2a1d0..c6d1a345ea6d 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -71,6 +71,15 @@ config I2C_MUX
source "drivers/i2c/muxes/Kconfig"
+config I2C_ATR
+ tristate "I2C Address Translator (ATR) support"
+ help
+ Enable support for I2C Address Translator (ATR) chips.
+
+ An ATR allows accessing multiple I2C busses from a single
+ physical bus via address translation instead of bus selection as
+ i2c-muxes do.
+
config I2C_HELPER_AUTO
bool "Autoselect pertinent helper modules"
default y
diff --git a/drivers/i2c/Makefile b/drivers/i2c/Makefile
index c1d493dc9bac..3f71ce4711e3 100644
--- a/drivers/i2c/Makefile
+++ b/drivers/i2c/Makefile
@@ -13,6 +13,7 @@ i2c-core-$(CONFIG_OF) += i2c-core-of.o
obj-$(CONFIG_I2C_SMBUS) += i2c-smbus.o
obj-$(CONFIG_I2C_CHARDEV) += i2c-dev.o
obj-$(CONFIG_I2C_MUX) += i2c-mux.o
+obj-$(CONFIG_I2C_ATR) += i2c-atr.o
obj-y += algos/ busses/ muxes/
obj-$(CONFIG_I2C_STUB) += i2c-stub.o
obj-$(CONFIG_I2C_SLAVE_EEPROM) += i2c-slave-eeprom.o
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 9cfe8fc509d7..169607e80331 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -1384,7 +1384,7 @@ config I2C_ICY
config I2C_MLXCPLD
tristate "Mellanox I2C driver"
- depends on X86_64 || COMPILE_TEST
+ depends on X86_64 || ARM64 || COMPILE_TEST
help
This exposes the Mellanox platform I2C busses to the linux I2C layer
for X86 based systems.
diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c
index cc58feacd082..0231c5be6354 100644
--- a/drivers/i2c/busses/i2c-ali15x3.c
+++ b/drivers/i2c/busses/i2c-ali15x3.c
@@ -165,14 +165,15 @@ static int ali15x3_setup(struct pci_dev *ALI15X3_dev)
}
if(force_addr) {
+ int ret;
+
dev_info(&ALI15X3_dev->dev, "forcing ISA address 0x%04X\n",
ali15x3_smba);
- if (PCIBIOS_SUCCESSFUL != pci_write_config_word(ALI15X3_dev,
- SMBBA,
- ali15x3_smba))
+ ret = pci_write_config_word(ALI15X3_dev, SMBBA, ali15x3_smba);
+ if (ret != PCIBIOS_SUCCESSFUL)
goto error;
- if (PCIBIOS_SUCCESSFUL != pci_read_config_word(ALI15X3_dev,
- SMBBA, &a))
+ ret = pci_read_config_word(ALI15X3_dev, SMBBA, &a);
+ if (ret != PCIBIOS_SUCCESSFUL)
goto error;
if ((a & ~(ALI15X3_SMB_IOSIZE - 1)) != ali15x3_smba) {
/* make sure it works */
diff --git a/drivers/i2c/busses/i2c-at91-core.c b/drivers/i2c/busses/i2c-at91-core.c
index 05ad3bc3578a..db45554327ae 100644
--- a/drivers/i2c/busses/i2c-at91-core.c
+++ b/drivers/i2c/busses/i2c-at91-core.c
@@ -19,7 +19,6 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/pinctrl/consumer.h>
@@ -207,19 +206,15 @@ static int at91_twi_probe(struct platform_device *pdev)
dev->dev = &pdev->dev;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem)
- return -ENODEV;
+ dev->base = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
+ if (IS_ERR(dev->base))
+ return PTR_ERR(dev->base);
phy_addr = mem->start;
dev->pdata = at91_twi_get_driver_data(pdev);
if (!dev->pdata)
return -ENODEV;
- dev->base = devm_ioremap_resource(&pdev->dev, mem);
- if (IS_ERR(dev->base))
- return PTR_ERR(dev->base);
-
dev->irq = platform_get_irq(pdev, 0);
if (dev->irq < 0)
return dev->irq;
@@ -227,10 +222,9 @@ static int at91_twi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
dev->clk = devm_clk_get(dev->dev, NULL);
- if (IS_ERR(dev->clk)) {
- dev_err(dev->dev, "no clock defined\n");
- return -ENODEV;
- }
+ if (IS_ERR(dev->clk))
+ return dev_err_probe(dev->dev, PTR_ERR(dev->clk), "no clock defined\n");
+
clk_prepare_enable(dev->clk);
snprintf(dev->adapter.name, sizeof(dev->adapter.name), "AT91");
diff --git a/drivers/i2c/busses/i2c-at91-master.c b/drivers/i2c/busses/i2c-at91-master.c
index c0c35785a0dc..d311981d3e60 100644
--- a/drivers/i2c/busses/i2c-at91-master.c
+++ b/drivers/i2c/busses/i2c-at91-master.c
@@ -23,7 +23,6 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -832,7 +831,11 @@ static int at91_init_twi_recovery_gpio(struct platform_device *pdev,
struct i2c_bus_recovery_info *rinfo = &dev->rinfo;
rinfo->pinctrl = devm_pinctrl_get(&pdev->dev);
- if (!rinfo->pinctrl || IS_ERR(rinfo->pinctrl)) {
+ if (!rinfo->pinctrl) {
+ dev_info(dev->dev, "pinctrl unavailable, bus recovery not supported\n");
+ return 0;
+ }
+ if (IS_ERR(rinfo->pinctrl)) {
dev_info(dev->dev, "can't get pinctrl, bus recovery not supported\n");
return PTR_ERR(rinfo->pinctrl);
}
diff --git a/drivers/i2c/busses/i2c-au1550.c b/drivers/i2c/busses/i2c-au1550.c
index e66c12ecf270..8e43f25c117e 100644
--- a/drivers/i2c/busses/i2c-au1550.c
+++ b/drivers/i2c/busses/i2c-au1550.c
@@ -342,7 +342,6 @@ static void i2c_au1550_remove(struct platform_device *pdev)
i2c_au1550_disable(priv);
}
-#ifdef CONFIG_PM
static int i2c_au1550_suspend(struct device *dev)
{
struct i2c_au1550_data *priv = dev_get_drvdata(dev);
@@ -361,21 +360,13 @@ static int i2c_au1550_resume(struct device *dev)
return 0;
}
-static const struct dev_pm_ops i2c_au1550_pmops = {
- .suspend = i2c_au1550_suspend,
- .resume = i2c_au1550_resume,
-};
-
-#define AU1XPSC_SMBUS_PMOPS (&i2c_au1550_pmops)
-
-#else
-#define AU1XPSC_SMBUS_PMOPS NULL
-#endif
+static DEFINE_SIMPLE_DEV_PM_OPS(i2c_au1550_pmops,
+ i2c_au1550_suspend, i2c_au1550_resume);
static struct platform_driver au1xpsc_smbus_driver = {
.driver = {
.name = "au1xpsc_smbus",
- .pm = AU1XPSC_SMBUS_PMOPS,
+ .pm = pm_sleep_ptr(&i2c_au1550_pmops),
},
.probe = i2c_au1550_probe,
.remove_new = i2c_au1550_remove,
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index 05c80680dff4..51aab662050b 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -7,7 +7,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -1029,7 +1029,6 @@ static int bcm_iproc_i2c_probe(struct platform_device *pdev)
int irq, ret = 0;
struct bcm_iproc_i2c_dev *iproc_i2c;
struct i2c_adapter *adap;
- struct resource *res;
iproc_i2c = devm_kzalloc(&pdev->dev, sizeof(*iproc_i2c),
GFP_KERNEL);
@@ -1042,15 +1041,12 @@ static int bcm_iproc_i2c_probe(struct platform_device *pdev)
(enum bcm_iproc_i2c_type)of_device_get_match_data(&pdev->dev);
init_completion(&iproc_i2c->done);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- iproc_i2c->base = devm_ioremap_resource(iproc_i2c->device, res);
+ iproc_i2c->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(iproc_i2c->base))
return PTR_ERR(iproc_i2c->base);
if (iproc_i2c->type == IPROC_I2C_NIC) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- iproc_i2c->idm_base = devm_ioremap_resource(iproc_i2c->device,
- res);
+ iproc_i2c->idm_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(iproc_i2c->idm_base))
return PTR_ERR(iproc_i2c->idm_base);
@@ -1128,8 +1124,6 @@ static void bcm_iproc_i2c_remove(struct platform_device *pdev)
bcm_iproc_i2c_enable_disable(iproc_i2c, false);
}
-#ifdef CONFIG_PM_SLEEP
-
static int bcm_iproc_i2c_suspend(struct device *dev)
{
struct bcm_iproc_i2c_dev *iproc_i2c = dev_get_drvdata(dev);
@@ -1180,12 +1174,6 @@ static const struct dev_pm_ops bcm_iproc_i2c_pm_ops = {
.resume_early = &bcm_iproc_i2c_resume
};
-#define BCM_IPROC_I2C_PM_OPS (&bcm_iproc_i2c_pm_ops)
-#else
-#define BCM_IPROC_I2C_PM_OPS NULL
-#endif /* CONFIG_PM_SLEEP */
-
-
static int bcm_iproc_i2c_reg_slave(struct i2c_client *slave)
{
struct bcm_iproc_i2c_dev *iproc_i2c = i2c_get_adapdata(slave->adapter);
@@ -1258,7 +1246,7 @@ static struct platform_driver bcm_iproc_i2c_driver = {
.driver = {
.name = "bcm-iproc-i2c",
.of_match_table = bcm_iproc_i2c_of_match,
- .pm = BCM_IPROC_I2C_PM_OPS,
+ .pm = pm_sleep_ptr(&bcm_iproc_i2c_pm_ops),
},
.probe = bcm_iproc_i2c_probe,
.remove_new = bcm_iproc_i2c_remove,
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
index 8ce6d3f49551..b92de1944221 100644
--- a/drivers/i2c/busses/i2c-bcm2835.c
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -12,7 +12,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -430,10 +430,9 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
i2c_dev->bus_clk = bcm2835_i2c_register_div(&pdev->dev, mclk, i2c_dev);
- if (IS_ERR(i2c_dev->bus_clk)) {
- dev_err(&pdev->dev, "Could not register clock\n");
- return PTR_ERR(i2c_dev->bus_clk);
- }
+ if (IS_ERR(i2c_dev->bus_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(i2c_dev->bus_clk),
+ "Could not register clock\n");
ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency",
&bus_clk_rate);
@@ -444,10 +443,9 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
}
ret = clk_set_rate_exclusive(i2c_dev->bus_clk, bus_clk_rate);
- if (ret < 0) {
- dev_err(&pdev->dev, "Could not set clock frequency\n");
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "Could not set clock frequency\n");
ret = clk_prepare_enable(i2c_dev->bus_clk);
if (ret) {
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
index cf92cbcb8c86..acee76732544 100644
--- a/drivers/i2c/busses/i2c-brcmstb.c
+++ b/drivers/i2c/busses/i2c-brcmstb.c
@@ -594,11 +594,10 @@ static int bcm2711_release_bsc(struct brcmstb_i2c_dev *dev)
static int brcmstb_i2c_probe(struct platform_device *pdev)
{
- int rc = 0;
struct brcmstb_i2c_dev *dev;
struct i2c_adapter *adap;
- struct resource *iomem;
const char *int_name;
+ int rc;
/* Allocate memory for private data structure */
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
@@ -614,18 +613,15 @@ static int brcmstb_i2c_probe(struct platform_device *pdev)
init_completion(&dev->done);
/* Map hardware registers */
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dev->base = devm_ioremap_resource(dev->device, iomem);
- if (IS_ERR(dev->base)) {
- rc = -ENOMEM;
- goto probe_errorout;
- }
+ dev->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(dev->base))
+ return PTR_ERR(dev->base);
if (of_device_is_compatible(dev->device->of_node,
"brcm,bcm2711-hdmi-i2c")) {
rc = bcm2711_release_bsc(dev);
if (rc)
- goto probe_errorout;
+ return rc;
}
rc = of_property_read_string(dev->device->of_node, "interrupt-names",
@@ -678,16 +674,13 @@ static int brcmstb_i2c_probe(struct platform_device *pdev)
adap->dev.of_node = pdev->dev.of_node;
rc = i2c_add_adapter(adap);
if (rc)
- goto probe_errorout;
+ return rc;
dev_info(dev->device, "%s@%dhz registered in %s mode\n",
int_name ? int_name : " ", dev->clk_freq_hz,
(dev->irq >= 0) ? "interrupt" : "polling");
return 0;
-
-probe_errorout:
- return rc;
}
static void brcmstb_i2c_remove(struct platform_device *pdev)
@@ -697,7 +690,6 @@ static void brcmstb_i2c_remove(struct platform_device *pdev)
i2c_del_adapter(&dev->adapter);
}
-#ifdef CONFIG_PM_SLEEP
static int brcmstb_i2c_suspend(struct device *dev)
{
struct brcmstb_i2c_dev *i2c_dev = dev_get_drvdata(dev);
@@ -715,10 +707,9 @@ static int brcmstb_i2c_resume(struct device *dev)
return 0;
}
-#endif
-static SIMPLE_DEV_PM_OPS(brcmstb_i2c_pm, brcmstb_i2c_suspend,
- brcmstb_i2c_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(brcmstb_i2c_pm, brcmstb_i2c_suspend,
+ brcmstb_i2c_resume);
static const struct of_device_id brcmstb_i2c_of_match[] = {
{.compatible = "brcm,brcmstb-i2c"},
@@ -732,7 +723,7 @@ static struct platform_driver brcmstb_i2c_driver = {
.driver = {
.name = "brcmstb-i2c",
.of_match_table = brcmstb_i2c_of_match,
- .pm = &brcmstb_i2c_pm,
+ .pm = pm_sleep_ptr(&brcmstb_i2c_pm),
},
.probe = brcmstb_i2c_probe,
.remove_new = brcmstb_i2c_remove,
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index 732daf6a932b..9a664abf734d 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -26,10 +26,10 @@
#include <linux/i2c.h>
#include <linux/io.h>
#include <linux/dma-mapping.h>
+#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <sysdev/fsl_soc.h>
#include <asm/cpm.h>
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 71b60778c643..02b3b1160fb0 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -25,7 +25,7 @@
#include <linux/slab.h>
#include <linux/cpufreq.h>
#include <linux/gpio/consumer.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_data/i2c-davinci.h>
#include <linux/pm_runtime.h>
@@ -765,7 +765,7 @@ static int davinci_i2c_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0)
- return dev_err_probe(&pdev->dev, irq, "can't get irq resource\n");
+ return irq;
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
@@ -902,7 +902,6 @@ static void davinci_i2c_remove(struct platform_device *pdev)
pm_runtime_disable(dev->dev);
}
-#ifdef CONFIG_PM
static int davinci_i2c_suspend(struct device *dev)
{
struct davinci_i2c_dev *i2c_dev = dev_get_drvdata(dev);
@@ -926,15 +925,10 @@ static int davinci_i2c_resume(struct device *dev)
static const struct dev_pm_ops davinci_i2c_pm = {
.suspend = davinci_i2c_suspend,
.resume = davinci_i2c_resume,
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
-#define davinci_i2c_pm_ops (&davinci_i2c_pm)
-#else
-#define davinci_i2c_pm_ops NULL
-#endif
-
static const struct platform_device_id davinci_i2c_driver_ids[] = {
{ .name = "i2c_davinci", },
{ /* sentinel */ }
@@ -947,7 +941,7 @@ static struct platform_driver davinci_i2c_driver = {
.id_table = davinci_i2c_driver_ids,
.driver = {
.name = "i2c_davinci",
- .pm = davinci_i2c_pm_ops,
+ .pm = pm_sleep_ptr(&davinci_i2c_pm),
.of_match_table = davinci_i2c_of_match,
},
};
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index 24bef0025c98..ca1035e010c7 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -17,6 +17,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/reset.h>
@@ -917,6 +918,17 @@ static int i2c_dw_init_recovery_info(struct dw_i2c_dev *dev)
return PTR_ERR(gpio);
rinfo->sda_gpiod = gpio;
+ rinfo->pinctrl = devm_pinctrl_get(dev->dev);
+ if (IS_ERR(rinfo->pinctrl)) {
+ if (PTR_ERR(rinfo->pinctrl) == -EPROBE_DEFER)
+ return PTR_ERR(rinfo->pinctrl);
+
+ rinfo->pinctrl = NULL;
+ dev_err(dev->dev, "getting pinctrl info failed: bus recovery might not work\n");
+ } else if (!rinfo->pinctrl) {
+ dev_dbg(dev->dev, "pinctrl is disabled, bus recovery might not work\n");
+ }
+
rinfo->recover_bus = i2c_generic_scl_recovery;
rinfo->prepare_recovery = i2c_dw_prepare_recovery;
rinfo->unprepare_recovery = i2c_dw_unprepare_recovery;
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 970c1c3b0402..855b698e99c0 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -418,7 +418,6 @@ static void dw_i2c_plat_remove(struct platform_device *pdev)
reset_control_assert(dev->rst);
}
-#ifdef CONFIG_PM_SLEEP
static int dw_i2c_plat_prepare(struct device *dev)
{
/*
@@ -429,11 +428,7 @@ static int dw_i2c_plat_prepare(struct device *dev)
*/
return !has_acpi_companion(dev);
}
-#else
-#define dw_i2c_plat_prepare NULL
-#endif
-#ifdef CONFIG_PM
static int dw_i2c_plat_runtime_suspend(struct device *dev)
{
struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
@@ -447,7 +442,7 @@ static int dw_i2c_plat_runtime_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused dw_i2c_plat_suspend(struct device *dev)
+static int dw_i2c_plat_suspend(struct device *dev)
{
struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
@@ -468,7 +463,7 @@ static int dw_i2c_plat_runtime_resume(struct device *dev)
return 0;
}
-static int __maybe_unused dw_i2c_plat_resume(struct device *dev)
+static int dw_i2c_plat_resume(struct device *dev)
{
struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
@@ -479,16 +474,11 @@ static int __maybe_unused dw_i2c_plat_resume(struct device *dev)
}
static const struct dev_pm_ops dw_i2c_dev_pm_ops = {
- .prepare = dw_i2c_plat_prepare,
- SET_LATE_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume)
- SET_RUNTIME_PM_OPS(dw_i2c_plat_runtime_suspend, dw_i2c_plat_runtime_resume, NULL)
+ .prepare = pm_sleep_ptr(dw_i2c_plat_prepare),
+ LATE_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume)
+ RUNTIME_PM_OPS(dw_i2c_plat_runtime_suspend, dw_i2c_plat_runtime_resume, NULL)
};
-#define DW_I2C_DEV_PMOPS (&dw_i2c_dev_pm_ops)
-#else
-#define DW_I2C_DEV_PMOPS NULL
-#endif
-
/* Work with hotplug and coldplug */
MODULE_ALIAS("platform:i2c_designware");
@@ -499,7 +489,7 @@ static struct platform_driver dw_i2c_driver = {
.name = "i2c_designware",
.of_match_table = of_match_ptr(dw_i2c_of_match),
.acpi_match_table = ACPI_PTR(dw_i2c_acpi_match),
- .pm = DW_I2C_DEV_PMOPS,
+ .pm = pm_ptr(&dw_i2c_dev_pm_ops),
},
};
diff --git a/drivers/i2c/busses/i2c-dln2.c b/drivers/i2c/busses/i2c-dln2.c
index 4f02cc2fb567..631109c7a098 100644
--- a/drivers/i2c/busses/i2c-dln2.c
+++ b/drivers/i2c/busses/i2c-dln2.c
@@ -218,10 +218,8 @@ static int dln2_i2c_probe(struct platform_device *pdev)
/* initialize the i2c interface */
ret = dln2_i2c_enable(dln2, true);
- if (ret < 0) {
- dev_err(dev, "failed to initialize adapter: %d\n", ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to initialize adapter\n");
/* and finally attach to i2c layer */
ret = i2c_add_adapter(&dln2->adapter);
diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c
index 4ba93cd91c0f..557409410445 100644
--- a/drivers/i2c/busses/i2c-emev2.c
+++ b/drivers/i2c/busses/i2c-emev2.c
@@ -16,7 +16,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index f378cd479e55..2b0b9cdffa86 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -18,9 +18,7 @@
#include <linux/clk.h>
#include <linux/slab.h>
#include <linux/io.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
+#include <linux/of.h>
#include <linux/spinlock.h>
/*
@@ -892,7 +890,6 @@ static void exynos5_i2c_remove(struct platform_device *pdev)
clk_unprepare(i2c->pclk);
}
-#ifdef CONFIG_PM_SLEEP
static int exynos5_i2c_suspend_noirq(struct device *dev)
{
struct exynos5_i2c *i2c = dev_get_drvdata(dev);
@@ -934,11 +931,10 @@ err_pclk:
clk_disable_unprepare(i2c->pclk);
return ret;
}
-#endif
static const struct dev_pm_ops exynos5_i2c_dev_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(exynos5_i2c_suspend_noirq,
- exynos5_i2c_resume_noirq)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(exynos5_i2c_suspend_noirq,
+ exynos5_i2c_resume_noirq)
};
static struct platform_driver exynos5_i2c_driver = {
@@ -946,7 +942,7 @@ static struct platform_driver exynos5_i2c_driver = {
.remove_new = exynos5_i2c_remove,
.driver = {
.name = "exynos5-hsi2c",
- .pm = &exynos5_i2c_dev_pm_ops,
+ .pm = pm_sleep_ptr(&exynos5_i2c_dev_pm_ops),
.of_match_table = exynos5_i2c_match,
},
};
diff --git a/drivers/i2c/busses/i2c-gxp.c b/drivers/i2c/busses/i2c-gxp.c
index 70b0de07ed99..efafc0528c44 100644
--- a/drivers/i2c/busses/i2c-gxp.c
+++ b/drivers/i2c/busses/i2c-gxp.c
@@ -4,8 +4,9 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
diff --git a/drivers/i2c/busses/i2c-hisi.c b/drivers/i2c/busses/i2c-hisi.c
index 0980c773cb5b..dfad5bad5075 100644
--- a/drivers/i2c/busses/i2c-hisi.c
+++ b/drivers/i2c/busses/i2c-hisi.c
@@ -470,18 +470,14 @@ static int hisi_i2c_probe(struct platform_device *pdev)
hisi_i2c_disable_int(ctlr, HISI_I2C_INT_ALL);
ret = devm_request_irq(dev, ctlr->irq, hisi_i2c_irq, 0, "hisi-i2c", ctlr);
- if (ret) {
- dev_err(dev, "failed to request irq handler, ret = %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to request irq handler\n");
ctlr->clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
if (IS_ERR_OR_NULL(ctlr->clk)) {
ret = device_property_read_u64(dev, "clk_rate", &clk_rate_hz);
- if (ret) {
- dev_err(dev, "failed to get clock frequency, ret = %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to get clock frequency\n");
} else {
clk_rate_hz = clk_get_rate(ctlr->clk);
}
diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c
index 784a5f56eb76..8e75515c3ca4 100644
--- a/drivers/i2c/busses/i2c-hix5hd2.c
+++ b/drivers/i2c/busses/i2c-hix5hd2.c
@@ -475,7 +475,6 @@ static void hix5hd2_i2c_remove(struct platform_device *pdev)
pm_runtime_set_suspended(priv->dev);
}
-#ifdef CONFIG_PM
static int hix5hd2_i2c_runtime_suspend(struct device *dev)
{
struct hix5hd2_i2c_priv *priv = dev_get_drvdata(dev);
@@ -494,12 +493,11 @@ static int hix5hd2_i2c_runtime_resume(struct device *dev)
return 0;
}
-#endif
static const struct dev_pm_ops hix5hd2_i2c_pm_ops = {
- SET_RUNTIME_PM_OPS(hix5hd2_i2c_runtime_suspend,
- hix5hd2_i2c_runtime_resume,
- NULL)
+ RUNTIME_PM_OPS(hix5hd2_i2c_runtime_suspend,
+ hix5hd2_i2c_runtime_resume,
+ NULL)
};
static const struct of_device_id hix5hd2_i2c_match[] = {
@@ -513,7 +511,7 @@ static struct platform_driver hix5hd2_i2c_driver = {
.remove_new = hix5hd2_i2c_remove,
.driver = {
.name = "hix5hd2-i2c",
- .pm = &hix5hd2_i2c_pm_ops,
+ .pm = pm_ptr(&hix5hd2_i2c_pm_ops),
.of_match_table = hix5hd2_i2c_match,
},
};
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 943b8e6d026d..73ae06432133 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1808,7 +1808,6 @@ static void i801_shutdown(struct pci_dev *dev)
pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
}
-#ifdef CONFIG_PM_SLEEP
static int i801_suspend(struct device *dev)
{
struct i801_priv *priv = dev_get_drvdata(dev);
@@ -1827,9 +1826,8 @@ static int i801_resume(struct device *dev)
return 0;
}
-#endif
-static SIMPLE_DEV_PM_OPS(i801_pm_ops, i801_suspend, i801_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(i801_pm_ops, i801_suspend, i801_resume);
static struct pci_driver i801_driver = {
.name = DRV_NAME,
@@ -1838,7 +1836,7 @@ static struct pci_driver i801_driver = {
.remove = i801_remove,
.shutdown = i801_shutdown,
.driver = {
- .pm = &i801_pm_ops,
+ .pm = pm_sleep_ptr(&i801_pm_ops),
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index 1ad9d3b26dd3..408820319ec4 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -37,9 +37,10 @@
#include <asm/irq.h>
#include <linux/io.h>
#include <linux/i2c.h>
+#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include "i2c-ibm_iic.h"
diff --git a/drivers/i2c/busses/i2c-img-scb.c b/drivers/i2c/busses/i2c-img-scb.c
index 66ba36949ab5..f9d4bfef511c 100644
--- a/drivers/i2c/busses/i2c-img-scb.c
+++ b/drivers/i2c/busses/i2c-img-scb.c
@@ -1454,7 +1454,6 @@ static int img_i2c_runtime_resume(struct device *dev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int img_i2c_suspend(struct device *dev)
{
struct img_i2c *i2c = dev_get_drvdata(dev);
@@ -1482,13 +1481,10 @@ static int img_i2c_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
static const struct dev_pm_ops img_i2c_pm = {
- SET_RUNTIME_PM_OPS(img_i2c_runtime_suspend,
- img_i2c_runtime_resume,
- NULL)
- SET_SYSTEM_SLEEP_PM_OPS(img_i2c_suspend, img_i2c_resume)
+ RUNTIME_PM_OPS(img_i2c_runtime_suspend, img_i2c_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(img_i2c_suspend, img_i2c_resume)
};
static const struct of_device_id img_scb_i2c_match[] = {
@@ -1501,7 +1497,7 @@ static struct platform_driver img_scb_i2c_driver = {
.driver = {
.name = "img-i2c-scb",
.of_match_table = img_scb_i2c_match,
- .pm = &img_i2c_pm,
+ .pm = pm_ptr(&img_i2c_pm),
},
.probe = img_i2c_probe,
.remove_new = img_i2c_remove,
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
index 150d923ca7f1..678b30e90492 100644
--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -17,7 +17,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -517,14 +516,12 @@ static irqreturn_t lpi2c_imx_isr(int irq, void *dev_id)
temp = readl(lpi2c_imx->base + LPI2C_MSR);
temp &= enabled;
- if (temp & MSR_RDF)
- lpi2c_imx_read_rxfifo(lpi2c_imx);
-
- if (temp & MSR_TDF)
- lpi2c_imx_write_txfifo(lpi2c_imx);
-
if (temp & MSR_NDF)
complete(&lpi2c_imx->complete);
+ else if (temp & MSR_RDF)
+ lpi2c_imx_read_rxfifo(lpi2c_imx);
+ else if (temp & MSR_TDF)
+ lpi2c_imx_write_txfifo(lpi2c_imx);
return IRQ_HANDLED;
}
@@ -572,10 +569,8 @@ static int lpi2c_imx_probe(struct platform_device *pdev)
sizeof(lpi2c_imx->adapter.name));
ret = devm_clk_bulk_get_all(&pdev->dev, &lpi2c_imx->clks);
- if (ret < 0) {
- dev_err(&pdev->dev, "can't get I2C peripheral clock, ret=%d\n", ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "can't get I2C peripheral clock\n");
lpi2c_imx->num_clks = ret;
ret = of_property_read_u32(pdev->dev.of_node,
@@ -585,10 +580,8 @@ static int lpi2c_imx_probe(struct platform_device *pdev)
ret = devm_request_irq(&pdev->dev, irq, lpi2c_imx_isr, 0,
pdev->name, lpi2c_imx);
- if (ret) {
- dev_err(&pdev->dev, "can't claim irq %d\n", irq);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "can't claim irq %d\n", irq);
i2c_set_adapdata(&lpi2c_imx->adapter, lpi2c_imx);
platform_set_drvdata(pdev, lpi2c_imx);
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 65128a73e8a3..1775a79aeba2 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -41,7 +41,6 @@
#include <linux/hrtimer.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_data/i2c-imx.h>
@@ -1389,7 +1388,11 @@ static int i2c_imx_init_recovery_info(struct imx_i2c_struct *i2c_imx,
struct i2c_bus_recovery_info *rinfo = &i2c_imx->rinfo;
i2c_imx->pinctrl = devm_pinctrl_get(&pdev->dev);
- if (!i2c_imx->pinctrl || IS_ERR(i2c_imx->pinctrl)) {
+ if (!i2c_imx->pinctrl) {
+ dev_info(&pdev->dev, "pinctrl unavailable, bus recovery not supported\n");
+ return 0;
+ }
+ if (IS_ERR(i2c_imx->pinctrl)) {
dev_info(&pdev->dev, "can't get pinctrl, bus recovery not supported\n");
return PTR_ERR(i2c_imx->pinctrl);
}
@@ -1506,8 +1509,7 @@ static int i2c_imx_probe(struct platform_device *pdev)
goto rpm_disable;
/* Request IRQ */
- ret = request_threaded_irq(irq, i2c_imx_isr, NULL, IRQF_SHARED,
- pdev->name, i2c_imx);
+ ret = request_irq(irq, i2c_imx_isr, IRQF_SHARED, pdev->name, i2c_imx);
if (ret) {
dev_err(&pdev->dev, "can't claim irq %d\n", irq);
goto rpm_disable;
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
index 0dfe60399521..55035cca0ae5 100644
--- a/drivers/i2c/busses/i2c-jz4780.c
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -18,7 +18,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/slab.h>
diff --git a/drivers/i2c/busses/i2c-kempld.c b/drivers/i2c/busses/i2c-kempld.c
index 281058e3ea46..e01d75308288 100644
--- a/drivers/i2c/busses/i2c-kempld.c
+++ b/drivers/i2c/busses/i2c-kempld.c
@@ -350,10 +350,9 @@ static void kempld_i2c_remove(struct platform_device *pdev)
i2c_del_adapter(&i2c->adap);
}
-#ifdef CONFIG_PM
-static int kempld_i2c_suspend(struct platform_device *pdev, pm_message_t state)
+static int kempld_i2c_suspend(struct device *dev)
{
- struct kempld_i2c_data *i2c = platform_get_drvdata(pdev);
+ struct kempld_i2c_data *i2c = dev_get_drvdata(dev);
struct kempld_device_data *pld = i2c->pld;
u8 ctrl;
@@ -366,9 +365,9 @@ static int kempld_i2c_suspend(struct platform_device *pdev, pm_message_t state)
return 0;
}
-static int kempld_i2c_resume(struct platform_device *pdev)
+static int kempld_i2c_resume(struct device *dev)
{
- struct kempld_i2c_data *i2c = platform_get_drvdata(pdev);
+ struct kempld_i2c_data *i2c = dev_get_drvdata(dev);
struct kempld_device_data *pld = i2c->pld;
kempld_get_mutex(pld);
@@ -377,19 +376,17 @@ static int kempld_i2c_resume(struct platform_device *pdev)
return 0;
}
-#else
-#define kempld_i2c_suspend NULL
-#define kempld_i2c_resume NULL
-#endif
+
+static DEFINE_SIMPLE_DEV_PM_OPS(kempld_i2c_pm_ops,
+ kempld_i2c_suspend, kempld_i2c_resume);
static struct platform_driver kempld_i2c_driver = {
.driver = {
.name = "kempld-i2c",
+ .pm = pm_sleep_ptr(&kempld_i2c_pm_ops),
},
.probe = kempld_i2c_probe,
.remove_new = kempld_i2c_remove,
- .suspend = kempld_i2c_suspend,
- .resume = kempld_i2c_resume,
};
module_platform_driver(kempld_i2c_driver);
diff --git a/drivers/i2c/busses/i2c-lpc2k.c b/drivers/i2c/busses/i2c-lpc2k.c
index 5c6d96554753..e3660333e91c 100644
--- a/drivers/i2c/busses/i2c-lpc2k.c
+++ b/drivers/i2c/busses/i2c-lpc2k.c
@@ -20,7 +20,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/time.h>
@@ -431,7 +430,6 @@ static void i2c_lpc2k_remove(struct platform_device *dev)
i2c_del_adapter(&i2c->adap);
}
-#ifdef CONFIG_PM
static int i2c_lpc2k_suspend(struct device *dev)
{
struct lpc2k_i2c *i2c = dev_get_drvdata(dev);
@@ -456,11 +454,6 @@ static const struct dev_pm_ops i2c_lpc2k_dev_pm_ops = {
.resume_noirq = i2c_lpc2k_resume,
};
-#define I2C_LPC2K_DEV_PM_OPS (&i2c_lpc2k_dev_pm_ops)
-#else
-#define I2C_LPC2K_DEV_PM_OPS NULL
-#endif
-
static const struct of_device_id lpc2k_i2c_match[] = {
{ .compatible = "nxp,lpc1788-i2c" },
{},
@@ -472,7 +465,7 @@ static struct platform_driver i2c_lpc2k_driver = {
.remove_new = i2c_lpc2k_remove,
.driver = {
.name = "lpc2k-i2c",
- .pm = I2C_LPC2K_DEV_PM_OPS,
+ .pm = pm_sleep_ptr(&i2c_lpc2k_dev_pm_ops),
.of_match_table = lpc2k_i2c_match,
},
};
diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c
index 16026c895bb6..c7b203cc4434 100644
--- a/drivers/i2c/busses/i2c-meson.c
+++ b/drivers/i2c/busses/i2c-meson.c
@@ -15,7 +15,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/types.h>
diff --git a/drivers/i2c/busses/i2c-microchip-corei2c.c b/drivers/i2c/busses/i2c-microchip-corei2c.c
index 7f58f7eaabb6..0b0a1c4d17ca 100644
--- a/drivers/i2c/busses/i2c-microchip-corei2c.c
+++ b/drivers/i2c/busses/i2c-microchip-corei2c.c
@@ -378,9 +378,8 @@ static int mchp_corei2c_probe(struct platform_device *pdev)
return PTR_ERR(idev->base);
irq = platform_get_irq(pdev, 0);
- if (irq <= 0)
- return dev_err_probe(&pdev->dev, -ENXIO,
- "invalid IRQ %d for I2C controller\n", irq);
+ if (irq < 0)
+ return irq;
idev->i2c_clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(idev->i2c_clk))
diff --git a/drivers/i2c/busses/i2c-mlxbf.c b/drivers/i2c/busses/i2c-mlxbf.c
index ae66bdd1b737..b3a73921ab69 100644
--- a/drivers/i2c/busses/i2c-mlxbf.c
+++ b/drivers/i2c/busses/i2c-mlxbf.c
@@ -15,7 +15,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/string.h>
@@ -1080,13 +1080,7 @@ static int mlxbf_i2c_init_resource(struct platform_device *pdev,
if (!tmp_res)
return -ENOMEM;
- tmp_res->params = platform_get_resource(pdev, IORESOURCE_MEM, type);
- if (!tmp_res->params) {
- devm_kfree(dev, tmp_res);
- return -EIO;
- }
-
- tmp_res->io = devm_ioremap_resource(dev, tmp_res->params);
+ tmp_res->io = devm_platform_get_and_ioremap_resource(pdev, type, &tmp_res->params);
if (IS_ERR(tmp_res->io)) {
devm_kfree(dev, tmp_res);
return PTR_ERR(tmp_res->io);
@@ -2323,10 +2317,8 @@ static int mlxbf_i2c_probe(struct platform_device *pdev)
ret = mlxbf_i2c_init_resource(pdev, &priv->smbus,
MLXBF_I2C_SMBUS_RES);
- if (ret < 0) {
- dev_err(dev, "Cannot fetch smbus resource info");
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Cannot fetch smbus resource info");
priv->timer->io = priv->smbus->io;
priv->mst->io = priv->smbus->io + MLXBF_I2C_MST_ADDR_OFFSET;
@@ -2334,39 +2326,29 @@ static int mlxbf_i2c_probe(struct platform_device *pdev)
} else {
ret = mlxbf_i2c_init_resource(pdev, &priv->timer,
MLXBF_I2C_SMBUS_TIMER_RES);
- if (ret < 0) {
- dev_err(dev, "Cannot fetch timer resource info");
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Cannot fetch timer resource info");
ret = mlxbf_i2c_init_resource(pdev, &priv->mst,
MLXBF_I2C_SMBUS_MST_RES);
- if (ret < 0) {
- dev_err(dev, "Cannot fetch master resource info");
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Cannot fetch master resource info");
ret = mlxbf_i2c_init_resource(pdev, &priv->slv,
MLXBF_I2C_SMBUS_SLV_RES);
- if (ret < 0) {
- dev_err(dev, "Cannot fetch slave resource info");
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Cannot fetch slave resource info");
}
ret = mlxbf_i2c_init_resource(pdev, &priv->mst_cause,
MLXBF_I2C_MST_CAUSE_RES);
- if (ret < 0) {
- dev_err(dev, "Cannot fetch cause master resource info");
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Cannot fetch cause master resource info");
ret = mlxbf_i2c_init_resource(pdev, &priv->slv_cause,
MLXBF_I2C_SLV_CAUSE_RES);
- if (ret < 0) {
- dev_err(dev, "Cannot fetch cause slave resource info");
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Cannot fetch cause slave resource info");
adap = &priv->adap;
adap->owner = THIS_MODULE;
@@ -2397,11 +2379,9 @@ static int mlxbf_i2c_probe(struct platform_device *pdev)
* does not really hurt, then keep the code as is.
*/
ret = mlxbf_i2c_init_master(pdev, priv);
- if (ret < 0) {
- dev_err(dev, "failed to initialize smbus master %d",
- priv->bus);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to initialize smbus master %d",
+ priv->bus);
mlxbf_i2c_init_timings(pdev, priv);
@@ -2413,10 +2393,8 @@ static int mlxbf_i2c_probe(struct platform_device *pdev)
ret = devm_request_irq(dev, irq, mlxbf_i2c_irq,
IRQF_SHARED | IRQF_PROBE_SHARED,
dev_name(dev), priv);
- if (ret < 0) {
- dev_err(dev, "Cannot get irq %d\n", irq);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Cannot get irq %d\n", irq);
priv->irq = irq;
diff --git a/drivers/i2c/busses/i2c-mlxcpld.c b/drivers/i2c/busses/i2c-mlxcpld.c
index c42fd4b329e4..6fec64ea67fb 100644
--- a/drivers/i2c/busses/i2c-mlxcpld.c
+++ b/drivers/i2c/busses/i2c-mlxcpld.c
@@ -22,6 +22,7 @@
#define MLXCPLD_I2C_BUS_NUM 1
#define MLXCPLD_I2C_DATA_REG_SZ 36
#define MLXCPLD_I2C_DATA_SZ_BIT BIT(5)
+#define MLXCPLD_I2C_DATA_EXT2_SZ_BIT BIT(6)
#define MLXCPLD_I2C_DATA_SZ_MASK GENMASK(6, 5)
#define MLXCPLD_I2C_SMBUS_BLK_BIT BIT(7)
#define MLXCPLD_I2C_MAX_ADDR_LEN 4
@@ -466,6 +467,13 @@ static const struct i2c_adapter_quirks mlxcpld_i2c_quirks_ext = {
.max_comb_1st_msg_len = 4,
};
+static const struct i2c_adapter_quirks mlxcpld_i2c_quirks_ext2 = {
+ .flags = I2C_AQ_COMB_WRITE_THEN_READ,
+ .max_read_len = (MLXCPLD_I2C_DATA_REG_SZ - 4) * 4,
+ .max_write_len = (MLXCPLD_I2C_DATA_REG_SZ - 4) * 4 + MLXCPLD_I2C_MAX_ADDR_LEN,
+ .max_comb_1st_msg_len = 4,
+};
+
static struct i2c_adapter mlxcpld_i2c_adapter = {
.owner = THIS_MODULE,
.name = "i2c-mlxcpld",
@@ -547,6 +555,8 @@ static int mlxcpld_i2c_probe(struct platform_device *pdev)
/* Check support for extended transaction length */
if ((val & MLXCPLD_I2C_DATA_SZ_MASK) == MLXCPLD_I2C_DATA_SZ_BIT)
mlxcpld_i2c_adapter.quirks = &mlxcpld_i2c_quirks_ext;
+ else if ((val & MLXCPLD_I2C_DATA_SZ_MASK) == MLXCPLD_I2C_DATA_EXT2_SZ_BIT)
+ mlxcpld_i2c_adapter.quirks = &mlxcpld_i2c_quirks_ext2;
/* Check support for smbus block transaction */
if (val & MLXCPLD_I2C_SMBUS_BLK_BIT)
priv->smbus_block = true;
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index f460a7fb4eae..e4e4995ab224 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -11,9 +11,10 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched/signal.h>
+#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/slab.h>
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index 7ca3f2221ba6..1a9b5a068ef1 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -19,9 +19,7 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
#include <linux/sched.h>
@@ -1514,7 +1512,6 @@ static void mtk_i2c_remove(struct platform_device *pdev)
clk_bulk_unprepare(I2C_MT65XX_CLK_MAX, i2c->clocks);
}
-#ifdef CONFIG_PM_SLEEP
static int mtk_i2c_suspend_noirq(struct device *dev)
{
struct mtk_i2c *i2c = dev_get_drvdata(dev);
@@ -1544,11 +1541,10 @@ static int mtk_i2c_resume_noirq(struct device *dev)
return 0;
}
-#endif
static const struct dev_pm_ops mtk_i2c_pm = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_i2c_suspend_noirq,
- mtk_i2c_resume_noirq)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_i2c_suspend_noirq,
+ mtk_i2c_resume_noirq)
};
static struct platform_driver mtk_i2c_driver = {
@@ -1556,7 +1552,7 @@ static struct platform_driver mtk_i2c_driver = {
.remove_new = mtk_i2c_remove,
.driver = {
.name = I2C_DRV_NAME,
- .pm = &mtk_i2c_pm,
+ .pm = pm_sleep_ptr(&mtk_i2c_pm),
.of_match_table = mtk_i2c_of_match,
},
};
diff --git a/drivers/i2c/busses/i2c-mt7621.c b/drivers/i2c/busses/i2c-mt7621.c
index 104bb194e990..81d46169bc1f 100644
--- a/drivers/i2c/busses/i2c-mt7621.c
+++ b/drivers/i2c/busses/i2c-mt7621.c
@@ -16,7 +16,8 @@
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/reset.h>
#define REG_SM0CFG2_REG 0x28
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index 1d76f1c4dc06..36def0a9c95c 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -22,7 +22,6 @@
#include <linux/io.h>
#include <linux/stmp_device.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/dma/mxs-dma.h>
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index 777278386f58..38d203d93eee 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -327,8 +327,8 @@ static int nforce2_probe_smb(struct pci_dev *dev, int bar, int alt_reg,
/* Older incarnations of the device used non-standard BARs */
u16 iobase;
- if (pci_read_config_word(dev, alt_reg, &iobase)
- != PCIBIOS_SUCCESSFUL) {
+ error = pci_read_config_word(dev, alt_reg, &iobase);
+ if (error != PCIBIOS_SUCCESSFUL) {
dev_err(&dev->dev, "Error reading PCI config for %s\n",
name);
return -EIO;
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index 212f412f1c74..b10574d42b7a 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -873,7 +873,6 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-#ifdef CONFIG_PM_SLEEP
static int nmk_i2c_suspend_late(struct device *dev)
{
int ret;
@@ -890,9 +889,7 @@ static int nmk_i2c_resume_early(struct device *dev)
{
return pm_runtime_force_resume(dev);
}
-#endif
-#ifdef CONFIG_PM
static int nmk_i2c_runtime_suspend(struct device *dev)
{
struct amba_device *adev = to_amba_device(dev);
@@ -925,13 +922,10 @@ static int nmk_i2c_runtime_resume(struct device *dev)
return ret;
}
-#endif
static const struct dev_pm_ops nmk_i2c_pm = {
- SET_LATE_SYSTEM_SLEEP_PM_OPS(nmk_i2c_suspend_late, nmk_i2c_resume_early)
- SET_RUNTIME_PM_OPS(nmk_i2c_runtime_suspend,
- nmk_i2c_runtime_resume,
- NULL)
+ LATE_SYSTEM_SLEEP_PM_OPS(nmk_i2c_suspend_late, nmk_i2c_resume_early)
+ RUNTIME_PM_OPS(nmk_i2c_runtime_suspend, nmk_i2c_runtime_resume, NULL)
};
static unsigned int nmk_i2c_functionality(struct i2c_adapter *adap)
@@ -1078,7 +1072,7 @@ static struct amba_driver nmk_i2c_driver = {
.drv = {
.owner = THIS_MODULE,
.name = DRIVER_NAME,
- .pm = &nmk_i2c_pm,
+ .pm = pm_ptr(&nmk_i2c_pm),
},
.id_table = nmk_i2c_ids,
.probe = nmk_i2c_probe,
diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c
index 53b65ffb6a64..495a8b5f6a2b 100644
--- a/drivers/i2c/busses/i2c-npcm7xx.c
+++ b/drivers/i2c/busses/i2c-npcm7xx.c
@@ -17,7 +17,6 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index 4ac77e57bbbf..041a76f71a49 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -743,7 +743,6 @@ static void ocores_i2c_remove(struct platform_device *pdev)
i2c_del_adapter(&i2c->adap);
}
-#ifdef CONFIG_PM_SLEEP
static int ocores_i2c_suspend(struct device *dev)
{
struct ocores_i2c *i2c = dev_get_drvdata(dev);
@@ -772,11 +771,8 @@ static int ocores_i2c_resume(struct device *dev)
return ocores_init(dev, i2c);
}
-static SIMPLE_DEV_PM_OPS(ocores_i2c_pm, ocores_i2c_suspend, ocores_i2c_resume);
-#define OCORES_I2C_PM (&ocores_i2c_pm)
-#else
-#define OCORES_I2C_PM NULL
-#endif
+static DEFINE_SIMPLE_DEV_PM_OPS(ocores_i2c_pm,
+ ocores_i2c_suspend, ocores_i2c_resume);
static struct platform_driver ocores_i2c_driver = {
.probe = ocores_i2c_probe,
@@ -784,7 +780,7 @@ static struct platform_driver ocores_i2c_driver = {
.driver = {
.name = "ocores-i2c",
.of_match_table = ocores_i2c_match,
- .pm = OCORES_I2C_PM,
+ .pm = pm_sleep_ptr(&ocores_i2c_pm),
},
};
diff --git a/drivers/i2c/busses/i2c-owl.c b/drivers/i2c/busses/i2c-owl.c
index 5f0ef8c35141..777f1a0278c7 100644
--- a/drivers/i2c/busses/i2c-owl.c
+++ b/drivers/i2c/busses/i2c-owl.c
@@ -16,7 +16,8 @@
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
/* I2C registers */
#define OWL_I2C_REG_CTL 0x0000
diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
index d2a9e7b61c1a..b8d5480c54f6 100644
--- a/drivers/i2c/busses/i2c-pca-platform.c
+++ b/drivers/i2c/busses/i2c-pca-platform.c
@@ -22,7 +22,6 @@
#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <asm/irq.h>
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index 82400057f810..a12525b3186b 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -613,7 +613,6 @@ static const struct i2c_algorithm pnx_algorithm = {
.functionality = i2c_pnx_func,
};
-#ifdef CONFIG_PM_SLEEP
static int i2c_pnx_controller_suspend(struct device *dev)
{
struct i2c_pnx_algo_data *alg_data = dev_get_drvdata(dev);
@@ -630,12 +629,9 @@ static int i2c_pnx_controller_resume(struct device *dev)
return clk_prepare_enable(alg_data->clk);
}
-static SIMPLE_DEV_PM_OPS(i2c_pnx_pm,
- i2c_pnx_controller_suspend, i2c_pnx_controller_resume);
-#define PNX_I2C_PM (&i2c_pnx_pm)
-#else
-#define PNX_I2C_PM NULL
-#endif
+static DEFINE_SIMPLE_DEV_PM_OPS(i2c_pnx_pm,
+ i2c_pnx_controller_suspend,
+ i2c_pnx_controller_resume);
static int i2c_pnx_probe(struct platform_device *pdev)
{
@@ -683,8 +679,7 @@ static int i2c_pnx_probe(struct platform_device *pdev)
"%s", pdev->name);
/* Register I/O resource */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- alg_data->ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ alg_data->ioaddr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(alg_data->ioaddr))
return PTR_ERR(alg_data->ioaddr);
@@ -763,7 +758,7 @@ static struct platform_driver i2c_pnx_driver = {
.driver = {
.name = "pnx-i2c",
.of_match_table = of_match_ptr(i2c_pnx_of_match),
- .pm = PNX_I2C_PM,
+ .pm = pm_sleep_ptr(&i2c_pnx_pm),
},
.probe = i2c_pnx_probe,
.remove_new = i2c_pnx_remove,
diff --git a/drivers/i2c/busses/i2c-pxa-pci.c b/drivers/i2c/busses/i2c-pxa-pci.c
index 30e38bc8b6db..08b3229c443d 100644
--- a/drivers/i2c/busses/i2c-pxa-pci.c
+++ b/drivers/i2c/busses/i2c-pxa-pci.c
@@ -12,7 +12,6 @@
#include <linux/platform_device.h>
#include <linux/platform_data/i2c-pxa.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_address.h>
#define CE4100_PCI_I2C_DEVS 3
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index 937f7eebe906..29be05af826b 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -1362,7 +1362,7 @@ static int i2c_pxa_probe(struct platform_device *dev)
struct i2c_pxa_platform_data *plat = dev_get_platdata(&dev->dev);
enum pxa_i2c_types i2c_type;
struct pxa_i2c *i2c;
- struct resource *res = NULL;
+ struct resource *res;
int ret, irq;
i2c = devm_kzalloc(&dev->dev, sizeof(struct pxa_i2c), GFP_KERNEL);
@@ -1379,8 +1379,7 @@ static int i2c_pxa_probe(struct platform_device *dev)
i2c->adap.dev.of_node = dev->dev.of_node;
#endif
- res = platform_get_resource(dev, IORESOURCE_MEM, 0);
- i2c->reg_base = devm_ioremap_resource(&dev->dev, res);
+ i2c->reg_base = devm_platform_get_and_ioremap_resource(dev, 0, &res);
if (IS_ERR(i2c->reg_base))
return PTR_ERR(i2c->reg_base);
@@ -1404,10 +1403,9 @@ static int i2c_pxa_probe(struct platform_device *dev)
strscpy(i2c->adap.name, "pxa_i2c-i2c", sizeof(i2c->adap.name));
i2c->clk = devm_clk_get(&dev->dev, NULL);
- if (IS_ERR(i2c->clk)) {
- dev_err(&dev->dev, "failed to get the clk: %ld\n", PTR_ERR(i2c->clk));
- return PTR_ERR(i2c->clk);
- }
+ if (IS_ERR(i2c->clk))
+ return dev_err_probe(&dev->dev, PTR_ERR(i2c->clk),
+ "failed to get the clk\n");
i2c->reg_ibmr = i2c->reg_base + pxa_reg_layout[i2c_type].ibmr;
i2c->reg_idbr = i2c->reg_base + pxa_reg_layout[i2c_type].idbr;
@@ -1491,7 +1489,6 @@ static void i2c_pxa_remove(struct platform_device *dev)
clk_disable_unprepare(i2c->clk);
}
-#ifdef CONFIG_PM
static int i2c_pxa_suspend_noirq(struct device *dev)
{
struct pxa_i2c *i2c = dev_get_drvdata(dev);
@@ -1516,17 +1513,12 @@ static const struct dev_pm_ops i2c_pxa_dev_pm_ops = {
.resume_noirq = i2c_pxa_resume_noirq,
};
-#define I2C_PXA_DEV_PM_OPS (&i2c_pxa_dev_pm_ops)
-#else
-#define I2C_PXA_DEV_PM_OPS NULL
-#endif
-
static struct platform_driver i2c_pxa_driver = {
.probe = i2c_pxa_probe,
.remove_new = i2c_pxa_remove,
.driver = {
.name = "pxa2xx-i2c",
- .pm = I2C_PXA_DEV_PM_OPS,
+ .pm = pm_sleep_ptr(&i2c_pxa_dev_pm_ops),
.of_match_table = i2c_pxa_dt_ids,
},
.id_table = i2c_pxa_id_table,
diff --git a/drivers/i2c/busses/i2c-qcom-cci.c b/drivers/i2c/busses/i2c-qcom-cci.c
index 622dc14add9d..414882c57d7f 100644
--- a/drivers/i2c/busses/i2c-qcom-cci.c
+++ b/drivers/i2c/busses/i2c-qcom-cci.c
@@ -588,10 +588,10 @@ static int cci_probe(struct platform_device *pdev)
/* Clocks */
ret = devm_clk_bulk_get_all(dev, &cci->clocks);
- if (ret < 1) {
- dev_err(dev, "failed to get clocks %d\n", ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to get clocks\n");
+ else if (!ret)
+ return dev_err_probe(dev, -EINVAL, "not enough clocks in DT\n");
cci->nclocks = ret;
/* Retrieve CCI clock rate */
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index b670a67c4fdd..229353e96e09 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -767,7 +767,6 @@ err_tx:
static int geni_i2c_probe(struct platform_device *pdev)
{
struct geni_i2c_dev *gi2c;
- struct resource *res;
u32 proto, tx_depth, fifo_disable;
int ret;
struct device *dev = &pdev->dev;
@@ -779,8 +778,7 @@ static int geni_i2c_probe(struct platform_device *pdev)
gi2c->se.dev = dev;
gi2c->se.wrapper = dev_get_drvdata(dev->parent);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- gi2c->se.base = devm_ioremap_resource(dev, res);
+ gi2c->se.base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(gi2c->se.base))
return PTR_ERR(gi2c->se.base);
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index ae90170023b0..598102d16677 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -1927,7 +1927,6 @@ static void qup_i2c_remove(struct platform_device *pdev)
pm_runtime_set_suspended(qup->dev);
}
-#ifdef CONFIG_PM
static int qup_i2c_pm_suspend_runtime(struct device *device)
{
struct qup_i2c_dev *qup = dev_get_drvdata(device);
@@ -1945,9 +1944,7 @@ static int qup_i2c_pm_resume_runtime(struct device *device)
qup_i2c_enable_clocks(qup);
return 0;
}
-#endif
-#ifdef CONFIG_PM_SLEEP
static int qup_i2c_suspend(struct device *device)
{
if (!pm_runtime_suspended(device))
@@ -1962,16 +1959,11 @@ static int qup_i2c_resume(struct device *device)
pm_request_autosuspend(device);
return 0;
}
-#endif
static const struct dev_pm_ops qup_i2c_qup_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(
- qup_i2c_suspend,
- qup_i2c_resume)
- SET_RUNTIME_PM_OPS(
- qup_i2c_pm_suspend_runtime,
- qup_i2c_pm_resume_runtime,
- NULL)
+ SYSTEM_SLEEP_PM_OPS(qup_i2c_suspend, qup_i2c_resume)
+ RUNTIME_PM_OPS(qup_i2c_pm_suspend_runtime,
+ qup_i2c_pm_resume_runtime, NULL)
};
static const struct of_device_id qup_i2c_dt_match[] = {
@@ -1987,7 +1979,7 @@ static struct platform_driver qup_i2c_driver = {
.remove_new = qup_i2c_remove,
.driver = {
.name = "i2c_qup",
- .pm = &qup_i2c_qup_pm_ops,
+ .pm = pm_ptr(&qup_i2c_qup_pm_ops),
.of_match_table = qup_i2c_dt_match,
.acpi_match_table = ACPI_PTR(qup_i2c_acpi_match),
},
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 2d9c37410ebd..a32a93f9a60d 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -24,7 +24,7 @@
#include <linux/i2c-smbus.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
@@ -1169,7 +1169,6 @@ static void rcar_i2c_remove(struct platform_device *pdev)
pm_runtime_disable(dev);
}
-#ifdef CONFIG_PM_SLEEP
static int rcar_i2c_suspend(struct device *dev)
{
struct rcar_i2c_priv *priv = dev_get_drvdata(dev);
@@ -1187,19 +1186,14 @@ static int rcar_i2c_resume(struct device *dev)
}
static const struct dev_pm_ops rcar_i2c_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rcar_i2c_suspend, rcar_i2c_resume)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(rcar_i2c_suspend, rcar_i2c_resume)
};
-#define DEV_PM_OPS (&rcar_i2c_pm_ops)
-#else
-#define DEV_PM_OPS NULL
-#endif /* CONFIG_PM_SLEEP */
-
static struct platform_driver rcar_i2c_driver = {
.driver = {
.name = "i2c-rcar",
.of_match_table = rcar_i2c_dt_ids,
- .pm = DEV_PM_OPS,
+ .pm = pm_sleep_ptr(&rcar_i2c_pm_ops),
},
.probe = rcar_i2c_probe,
.remove_new = rcar_i2c_remove,
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index 5f8c0bd508d2..f0ee8871d5ae 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -42,7 +42,6 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 28f0e5c64f32..127eb3805fac 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -24,7 +24,6 @@
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/gpio/consumer.h>
#include <linux/pinctrl/consumer.h>
#include <linux/mfd/syscon.h>
@@ -1034,9 +1033,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "clock source %p\n", i2c->clk);
/* map the registers */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- i2c->regs = devm_ioremap_resource(&pdev->dev, res);
-
+ i2c->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(i2c->regs))
return PTR_ERR(i2c->regs);
@@ -1076,7 +1073,6 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
if (!(i2c->quirks & QUIRK_POLL)) {
i2c->irq = ret = platform_get_irq(pdev, 0);
if (ret < 0) {
- dev_err(&pdev->dev, "cannot find IRQ\n");
clk_unprepare(i2c->clk);
return ret;
}
@@ -1125,7 +1121,6 @@ static void s3c24xx_i2c_remove(struct platform_device *pdev)
i2c_del_adapter(&i2c->adap);
}
-#ifdef CONFIG_PM_SLEEP
static int s3c24xx_i2c_suspend_noirq(struct device *dev)
{
struct s3c24xx_i2c *i2c = dev_get_drvdata(dev);
@@ -1155,26 +1150,19 @@ static int s3c24xx_i2c_resume_noirq(struct device *dev)
return 0;
}
-#endif
-#ifdef CONFIG_PM
static const struct dev_pm_ops s3c24xx_i2c_dev_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(s3c24xx_i2c_suspend_noirq,
- s3c24xx_i2c_resume_noirq)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(s3c24xx_i2c_suspend_noirq,
+ s3c24xx_i2c_resume_noirq)
};
-#define S3C24XX_DEV_PM_OPS (&s3c24xx_i2c_dev_pm_ops)
-#else
-#define S3C24XX_DEV_PM_OPS NULL
-#endif
-
static struct platform_driver s3c24xx_i2c_driver = {
.probe = s3c24xx_i2c_probe,
.remove_new = s3c24xx_i2c_remove,
.id_table = s3c24xx_driver_ids,
.driver = {
.name = "s3c-i2c",
- .pm = S3C24XX_DEV_PM_OPS,
+ .pm = pm_sleep_ptr(&s3c24xx_i2c_dev_pm_ops),
.of_match_table = of_match_ptr(s3c24xx_i2c_match),
},
};
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 21717b943a9e..5adbe62cf621 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -20,7 +20,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
@@ -871,7 +871,6 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
{
struct sh_mobile_i2c_data *pd;
struct i2c_adapter *adap;
- struct resource *res;
const struct sh_mobile_dt_config *config;
int ret;
u32 bus_speed;
@@ -893,10 +892,7 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
pd->dev = &dev->dev;
platform_set_drvdata(dev, pd);
- res = platform_get_resource(dev, IORESOURCE_MEM, 0);
-
- pd->res = res;
- pd->reg = devm_ioremap_resource(&dev->dev, res);
+ pd->reg = devm_platform_get_and_ioremap_resource(dev, 0, &pd->res);
if (IS_ERR(pd->reg))
return PTR_ERR(pd->reg);
@@ -905,7 +901,7 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
pd->clks_per_count = 1;
/* Newer variants come with two new bits in ICIC */
- if (resource_size(res) > 0x17)
+ if (resource_size(pd->res) > 0x17)
pd->flags |= IIC_FLAG_HAS_ICIC67;
pm_runtime_enable(&dev->dev);
@@ -965,7 +961,6 @@ static void sh_mobile_i2c_remove(struct platform_device *dev)
pm_runtime_disable(&dev->dev);
}
-#ifdef CONFIG_PM_SLEEP
static int sh_mobile_i2c_suspend(struct device *dev)
{
struct sh_mobile_i2c_data *pd = dev_get_drvdata(dev);
@@ -983,20 +978,15 @@ static int sh_mobile_i2c_resume(struct device *dev)
}
static const struct dev_pm_ops sh_mobile_i2c_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sh_mobile_i2c_suspend,
- sh_mobile_i2c_resume)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(sh_mobile_i2c_suspend,
+ sh_mobile_i2c_resume)
};
-#define DEV_PM_OPS (&sh_mobile_i2c_pm_ops)
-#else
-#define DEV_PM_OPS NULL
-#endif /* CONFIG_PM_SLEEP */
-
static struct platform_driver sh_mobile_i2c_driver = {
.driver = {
.name = "i2c-sh_mobile",
.of_match_table = sh_mobile_i2c_dt_ids,
- .pm = DEV_PM_OPS,
+ .pm = pm_sleep_ptr(&sh_mobile_i2c_pm_ops),
},
.probe = sh_mobile_i2c_probe,
.remove_new = sh_mobile_i2c_remove,
diff --git a/drivers/i2c/busses/i2c-sis5595.c b/drivers/i2c/busses/i2c-sis5595.c
index c793a5c14cda..486f1e9dfb74 100644
--- a/drivers/i2c/busses/i2c-sis5595.c
+++ b/drivers/i2c/busses/i2c-sis5595.c
@@ -175,11 +175,11 @@ static int sis5595_setup(struct pci_dev *SIS5595_dev)
if (force_addr) {
dev_info(&SIS5595_dev->dev, "forcing ISA address 0x%04X\n", sis5595_base);
- if (pci_write_config_word(SIS5595_dev, ACPI_BASE, sis5595_base)
- != PCIBIOS_SUCCESSFUL)
+ retval = pci_write_config_word(SIS5595_dev, ACPI_BASE, sis5595_base);
+ if (retval != PCIBIOS_SUCCESSFUL)
goto error;
- if (pci_read_config_word(SIS5595_dev, ACPI_BASE, &a)
- != PCIBIOS_SUCCESSFUL)
+ retval = pci_read_config_word(SIS5595_dev, ACPI_BASE, &a);
+ if (retval != PCIBIOS_SUCCESSFUL)
goto error;
if ((a & ~(SIS5595_EXTENT - 1)) != sis5595_base) {
/* doesn't work for some chips! */
@@ -188,16 +188,16 @@ static int sis5595_setup(struct pci_dev *SIS5595_dev)
}
}
- if (pci_read_config_byte(SIS5595_dev, SIS5595_ENABLE_REG, &val)
- != PCIBIOS_SUCCESSFUL)
+ retval = pci_read_config_byte(SIS5595_dev, SIS5595_ENABLE_REG, &val);
+ if (retval != PCIBIOS_SUCCESSFUL)
goto error;
if ((val & 0x80) == 0) {
dev_info(&SIS5595_dev->dev, "enabling ACPI\n");
- if (pci_write_config_byte(SIS5595_dev, SIS5595_ENABLE_REG, val | 0x80)
- != PCIBIOS_SUCCESSFUL)
+ retval = pci_write_config_byte(SIS5595_dev, SIS5595_ENABLE_REG, val | 0x80);
+ if (retval != PCIBIOS_SUCCESSFUL)
goto error;
- if (pci_read_config_byte(SIS5595_dev, SIS5595_ENABLE_REG, &val)
- != PCIBIOS_SUCCESSFUL)
+ retval = pci_read_config_byte(SIS5595_dev, SIS5595_ENABLE_REG, &val);
+ if (retval != PCIBIOS_SUCCESSFUL)
goto error;
if ((val & 0x80) == 0) {
/* doesn't work for some chips? */
diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c
index ffc54fbf814d..c52d1bec60b4 100644
--- a/drivers/i2c/busses/i2c-sprd.c
+++ b/drivers/i2c/busses/i2c-sprd.c
@@ -14,7 +14,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c
index 25c3521cae0e..ce2333408904 100644
--- a/drivers/i2c/busses/i2c-st.c
+++ b/drivers/i2c/busses/i2c-st.c
@@ -812,8 +812,7 @@ static int st_i2c_probe(struct platform_device *pdev)
if (!i2c_dev)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- i2c_dev->base = devm_ioremap_resource(&pdev->dev, res);
+ i2c_dev->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(i2c_dev->base))
return PTR_ERR(i2c_dev->base);
diff --git a/drivers/i2c/busses/i2c-stm32f4.c b/drivers/i2c/busses/i2c-stm32f4.c
index 6ad06a5a22b4..ecc54792a66f 100644
--- a/drivers/i2c/busses/i2c-stm32f4.c
+++ b/drivers/i2c/busses/i2c-stm32f4.c
@@ -767,8 +767,7 @@ static int stm32f4_i2c_probe(struct platform_device *pdev)
if (!i2c_dev)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- i2c_dev->base = devm_ioremap_resource(&pdev->dev, res);
+ i2c_dev->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(i2c_dev->base))
return PTR_ERR(i2c_dev->base);
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index e897d9101434..579b30581725 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -2121,12 +2121,12 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
phy_addr = (dma_addr_t)res->start;
irq_event = platform_get_irq(pdev, 0);
- if (irq_event <= 0)
- return irq_event ? : -ENOENT;
+ if (irq_event < 0)
+ return irq_event;
irq_error = platform_get_irq(pdev, 1);
- if (irq_error <= 0)
- return irq_error ? : -ENOENT;
+ if (irq_error < 0)
+ return irq_error;
i2c_dev->wakeup_src = of_property_read_bool(pdev->dev.of_node,
"wakeup-source");
diff --git a/drivers/i2c/busses/i2c-synquacer.c b/drivers/i2c/busses/i2c-synquacer.c
index 4cc196ca8f6d..bbea521b05dd 100644
--- a/drivers/i2c/busses/i2c-synquacer.c
+++ b/drivers/i2c/busses/i2c-synquacer.c
@@ -557,20 +557,16 @@ static int synquacer_i2c_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "clock source %p\n", i2c->pclk);
ret = clk_prepare_enable(i2c->pclk);
- if (ret) {
- dev_err(&pdev->dev, "failed to enable clock (%d)\n",
- ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "failed to enable clock\n");
i2c->pclkrate = clk_get_rate(i2c->pclk);
}
if (i2c->pclkrate < SYNQUACER_I2C_MIN_CLK_RATE ||
- i2c->pclkrate > SYNQUACER_I2C_MAX_CLK_RATE) {
- dev_err(&pdev->dev, "PCLK missing or out of range (%d)\n",
- i2c->pclkrate);
- return -EINVAL;
- }
+ i2c->pclkrate > SYNQUACER_I2C_MAX_CLK_RATE)
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "PCLK missing or out of range (%d)\n",
+ i2c->pclkrate);
i2c->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(i2c->base))
@@ -582,10 +578,8 @@ static int synquacer_i2c_probe(struct platform_device *pdev)
ret = devm_request_irq(&pdev->dev, i2c->irq, synquacer_i2c_isr,
0, dev_name(&pdev->dev), i2c);
- if (ret < 0) {
- dev_err(&pdev->dev, "cannot claim IRQ %d\n", i2c->irq);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "cannot claim IRQ %d\n", i2c->irq);
i2c->state = STATE_IDLE;
i2c->dev = &pdev->dev;
@@ -605,10 +599,8 @@ static int synquacer_i2c_probe(struct platform_device *pdev)
synquacer_i2c_hw_init(i2c);
ret = i2c_add_numbered_adapter(&i2c->adapter);
- if (ret) {
- dev_err(&pdev->dev, "failed to add bus to i2c core\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "failed to add bus to i2c core\n");
platform_set_drvdata(pdev, i2c);
diff --git a/drivers/i2c/busses/i2c-tegra-bpmp.c b/drivers/i2c/busses/i2c-tegra-bpmp.c
index bc3f94561746..b0840fa0f53e 100644
--- a/drivers/i2c/busses/i2c-tegra-bpmp.c
+++ b/drivers/i2c/busses/i2c-tegra-bpmp.c
@@ -12,7 +12,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 03fc10b45bd6..920d5a8cbf4c 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -22,7 +22,7 @@
#include <linux/kernel.h>
#include <linux/ktime.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c
index d1fa9ff5aeab..1bffe36c40ad 100644
--- a/drivers/i2c/busses/i2c-tiny-usb.c
+++ b/drivers/i2c/busses/i2c-tiny-usb.c
@@ -222,6 +222,10 @@ static int i2c_tiny_usb_probe(struct usb_interface *interface,
int retval = -ENOMEM;
u16 version;
+ if (interface->intf_assoc &&
+ interface->intf_assoc->bFunctionClass != USB_CLASS_VENDOR_SPEC)
+ return -ENODEV;
+
dev_dbg(&interface->dev, "probing usb device\n");
/* allocate memory for our device state and initialize it */
diff --git a/drivers/i2c/busses/i2c-virtio.c b/drivers/i2c/busses/i2c-virtio.c
index 4b9536f50800..c60ae531ba57 100644
--- a/drivers/i2c/busses/i2c-virtio.c
+++ b/drivers/i2c/busses/i2c-virtio.c
@@ -243,7 +243,6 @@ static struct virtio_device_id id_table[] = {
};
MODULE_DEVICE_TABLE(virtio, id_table);
-#ifdef CONFIG_PM_SLEEP
static int virtio_i2c_freeze(struct virtio_device *vdev)
{
virtio_i2c_del_vqs(vdev);
@@ -254,7 +253,6 @@ static int virtio_i2c_restore(struct virtio_device *vdev)
{
return virtio_i2c_setup_vqs(vdev->priv);
}
-#endif
static const unsigned int features[] = {
VIRTIO_I2C_F_ZERO_LENGTH_REQUEST,
@@ -269,10 +267,8 @@ static struct virtio_driver virtio_i2c_driver = {
.driver = {
.name = "i2c_virtio",
},
-#ifdef CONFIG_PM_SLEEP
- .freeze = virtio_i2c_freeze,
- .restore = virtio_i2c_restore,
-#endif
+ .freeze = pm_sleep_ptr(virtio_i2c_freeze),
+ .restore = pm_sleep_ptr(virtio_i2c_restore),
};
module_virtio_driver(virtio_i2c_driver);
diff --git a/drivers/i2c/busses/i2c-xlp9xx.c b/drivers/i2c/busses/i2c-xlp9xx.c
index f59e8c544f36..08a59a920929 100644
--- a/drivers/i2c/busses/i2c-xlp9xx.c
+++ b/drivers/i2c/busses/i2c-xlp9xx.c
@@ -529,10 +529,8 @@ static int xlp9xx_i2c_probe(struct platform_device *pdev)
err = devm_request_irq(&pdev->dev, priv->irq, xlp9xx_i2c_isr, 0,
pdev->name, priv);
- if (err) {
- dev_err(&pdev->dev, "IRQ request failed!\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "IRQ request failed!\n");
init_completion(&priv->msg_complete);
priv->adapter.dev.parent = &pdev->dev;
diff --git a/drivers/i2c/i2c-atr.c b/drivers/i2c/i2c-atr.c
new file mode 100644
index 000000000000..8ca1daadec93
--- /dev/null
+++ b/drivers/i2c/i2c-atr.c
@@ -0,0 +1,710 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * I2C Address Translator
+ *
+ * Copyright (c) 2019,2022 Luca Ceresoli <luca@lucaceresoli.net>
+ * Copyright (c) 2022,2023 Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+ *
+ * Originally based on i2c-mux.c
+ */
+
+#include <linux/fwnode.h>
+#include <linux/i2c-atr.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#define ATR_MAX_ADAPTERS 100 /* Just a sanity limit */
+#define ATR_MAX_SYMLINK_LEN 11 /* Longest name is 10 chars: "channel-99" */
+
+/**
+ * struct i2c_atr_alias_pair - Holds the alias assigned to a client.
+ * @node: List node
+ * @client: Pointer to the client on the child bus
+ * @alias: I2C alias address assigned by the driver.
+ * This is the address that will be used to issue I2C transactions
+ * on the parent (physical) bus.
+ */
+struct i2c_atr_alias_pair {
+ struct list_head node;
+ const struct i2c_client *client;
+ u16 alias;
+};
+
+/**
+ * struct i2c_atr_chan - Data for a channel.
+ * @adap: The &struct i2c_adapter for the channel
+ * @atr: The parent I2C ATR
+ * @chan_id: The ID of this channel
+ * @alias_list: List of @struct i2c_atr_alias_pair containing the
+ * assigned aliases
+ * @orig_addrs_lock: Mutex protecting @orig_addrs
+ * @orig_addrs: Buffer used to store the original addresses during transmit
+ * @orig_addrs_size: Size of @orig_addrs
+ */
+struct i2c_atr_chan {
+ struct i2c_adapter adap;
+ struct i2c_atr *atr;
+ u32 chan_id;
+
+ struct list_head alias_list;
+
+ /* Lock orig_addrs during xfer */
+ struct mutex orig_addrs_lock;
+ u16 *orig_addrs;
+ unsigned int orig_addrs_size;
+};
+
+/**
+ * struct i2c_atr - The I2C ATR instance
+ * @parent: The parent &struct i2c_adapter
+ * @dev: The device that owns the I2C ATR instance
+ * @ops: &struct i2c_atr_ops
+ * @priv: Private driver data, set with i2c_atr_set_driver_data()
+ * @algo: The &struct i2c_algorithm for adapters
+ * @lock: Lock for the I2C bus segment (see &struct i2c_lock_operations)
+ * @max_adapters: Maximum number of adapters this I2C ATR can have
+ * @num_aliases: Number of aliases in the aliases array
+ * @aliases: The aliases array
+ * @alias_mask_lock: Lock protecting alias_use_mask
+ * @alias_use_mask: Bitmask for used aliases in aliases array
+ * @i2c_nb: Notifier for remote client add & del events
+ * @adapter: Array of adapters
+ */
+struct i2c_atr {
+ struct i2c_adapter *parent;
+ struct device *dev;
+ const struct i2c_atr_ops *ops;
+
+ void *priv;
+
+ struct i2c_algorithm algo;
+ /* lock for the I2C bus segment (see struct i2c_lock_operations) */
+ struct mutex lock;
+ int max_adapters;
+
+ size_t num_aliases;
+ const u16 *aliases;
+ /* Protects alias_use_mask */
+ spinlock_t alias_mask_lock;
+ unsigned long *alias_use_mask;
+
+ struct notifier_block i2c_nb;
+
+ struct i2c_adapter *adapter[];
+};
+
+static struct i2c_atr_alias_pair *
+i2c_atr_find_mapping_by_client(const struct list_head *list,
+ const struct i2c_client *client)
+{
+ struct i2c_atr_alias_pair *c2a;
+
+ list_for_each_entry(c2a, list, node) {
+ if (c2a->client == client)
+ return c2a;
+ }
+
+ return NULL;
+}
+
+static struct i2c_atr_alias_pair *
+i2c_atr_find_mapping_by_addr(const struct list_head *list, u16 phys_addr)
+{
+ struct i2c_atr_alias_pair *c2a;
+
+ list_for_each_entry(c2a, list, node) {
+ if (c2a->client->addr == phys_addr)
+ return c2a;
+ }
+
+ return NULL;
+}
+
+/*
+ * Replace all message addresses with their aliases, saving the original
+ * addresses.
+ *
+ * This function is internal for use in i2c_atr_master_xfer(). It must be
+ * followed by i2c_atr_unmap_msgs() to restore the original addresses.
+ */
+static int i2c_atr_map_msgs(struct i2c_atr_chan *chan, struct i2c_msg *msgs,
+ int num)
+{
+ struct i2c_atr *atr = chan->atr;
+ static struct i2c_atr_alias_pair *c2a;
+ int i;
+
+ /* Ensure we have enough room to save the original addresses */
+ if (unlikely(chan->orig_addrs_size < num)) {
+ u16 *new_buf;
+
+ /* We don't care about old data, hence no realloc() */
+ new_buf = kmalloc_array(num, sizeof(*new_buf), GFP_KERNEL);
+ if (!new_buf)
+ return -ENOMEM;
+
+ kfree(chan->orig_addrs);
+ chan->orig_addrs = new_buf;
+ chan->orig_addrs_size = num;
+ }
+
+ for (i = 0; i < num; i++) {
+ chan->orig_addrs[i] = msgs[i].addr;
+
+ c2a = i2c_atr_find_mapping_by_addr(&chan->alias_list,
+ msgs[i].addr);
+ if (!c2a) {
+ dev_err(atr->dev, "client 0x%02x not mapped!\n",
+ msgs[i].addr);
+
+ while (i--)
+ msgs[i].addr = chan->orig_addrs[i];
+
+ return -ENXIO;
+ }
+
+ msgs[i].addr = c2a->alias;
+ }
+
+ return 0;
+}
+
+/*
+ * Restore all message address aliases with the original addresses. This
+ * function is internal for use in i2c_atr_master_xfer() and for this reason it
+ * needs no null and size checks on orig_addr.
+ *
+ * @see i2c_atr_map_msgs()
+ */
+static void i2c_atr_unmap_msgs(struct i2c_atr_chan *chan, struct i2c_msg *msgs,
+ int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++)
+ msgs[i].addr = chan->orig_addrs[i];
+}
+
+static int i2c_atr_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+ int num)
+{
+ struct i2c_atr_chan *chan = adap->algo_data;
+ struct i2c_atr *atr = chan->atr;
+ struct i2c_adapter *parent = atr->parent;
+ int ret;
+
+ /* Translate addresses */
+ mutex_lock(&chan->orig_addrs_lock);
+
+ ret = i2c_atr_map_msgs(chan, msgs, num);
+ if (ret < 0)
+ goto err_unlock;
+
+ /* Perform the transfer */
+ ret = i2c_transfer(parent, msgs, num);
+
+ /* Restore addresses */
+ i2c_atr_unmap_msgs(chan, msgs, num);
+
+err_unlock:
+ mutex_unlock(&chan->orig_addrs_lock);
+
+ return ret;
+}
+
+static int i2c_atr_smbus_xfer(struct i2c_adapter *adap, u16 addr,
+ unsigned short flags, char read_write, u8 command,
+ int size, union i2c_smbus_data *data)
+{
+ struct i2c_atr_chan *chan = adap->algo_data;
+ struct i2c_atr *atr = chan->atr;
+ struct i2c_adapter *parent = atr->parent;
+ struct i2c_atr_alias_pair *c2a;
+
+ c2a = i2c_atr_find_mapping_by_addr(&chan->alias_list, addr);
+ if (!c2a) {
+ dev_err(atr->dev, "client 0x%02x not mapped!\n", addr);
+ return -ENXIO;
+ }
+
+ return i2c_smbus_xfer(parent, c2a->alias, flags, read_write, command,
+ size, data);
+}
+
+static u32 i2c_atr_functionality(struct i2c_adapter *adap)
+{
+ struct i2c_atr_chan *chan = adap->algo_data;
+ struct i2c_adapter *parent = chan->atr->parent;
+
+ return parent->algo->functionality(parent);
+}
+
+static void i2c_atr_lock_bus(struct i2c_adapter *adapter, unsigned int flags)
+{
+ struct i2c_atr_chan *chan = adapter->algo_data;
+ struct i2c_atr *atr = chan->atr;
+
+ mutex_lock(&atr->lock);
+}
+
+static int i2c_atr_trylock_bus(struct i2c_adapter *adapter, unsigned int flags)
+{
+ struct i2c_atr_chan *chan = adapter->algo_data;
+ struct i2c_atr *atr = chan->atr;
+
+ return mutex_trylock(&atr->lock);
+}
+
+static void i2c_atr_unlock_bus(struct i2c_adapter *adapter, unsigned int flags)
+{
+ struct i2c_atr_chan *chan = adapter->algo_data;
+ struct i2c_atr *atr = chan->atr;
+
+ mutex_unlock(&atr->lock);
+}
+
+static const struct i2c_lock_operations i2c_atr_lock_ops = {
+ .lock_bus = i2c_atr_lock_bus,
+ .trylock_bus = i2c_atr_trylock_bus,
+ .unlock_bus = i2c_atr_unlock_bus,
+};
+
+static int i2c_atr_reserve_alias(struct i2c_atr *atr)
+{
+ unsigned long idx;
+
+ spin_lock(&atr->alias_mask_lock);
+
+ idx = find_first_zero_bit(atr->alias_use_mask, atr->num_aliases);
+ if (idx >= atr->num_aliases) {
+ spin_unlock(&atr->alias_mask_lock);
+ dev_err(atr->dev, "failed to find a free alias\n");
+ return -EBUSY;
+ }
+
+ set_bit(idx, atr->alias_use_mask);
+
+ spin_unlock(&atr->alias_mask_lock);
+
+ return atr->aliases[idx];
+}
+
+static void i2c_atr_release_alias(struct i2c_atr *atr, u16 alias)
+{
+ unsigned int idx;
+
+ spin_lock(&atr->alias_mask_lock);
+
+ for (idx = 0; idx < atr->num_aliases; ++idx) {
+ if (atr->aliases[idx] == alias) {
+ clear_bit(idx, atr->alias_use_mask);
+ spin_unlock(&atr->alias_mask_lock);
+ return;
+ }
+ }
+
+ spin_unlock(&atr->alias_mask_lock);
+
+ /* This should never happen */
+ dev_warn(atr->dev, "Unable to find mapped alias\n");
+}
+
+static int i2c_atr_attach_client(struct i2c_adapter *adapter,
+ const struct i2c_client *client)
+{
+ struct i2c_atr_chan *chan = adapter->algo_data;
+ struct i2c_atr *atr = chan->atr;
+ struct i2c_atr_alias_pair *c2a;
+ u16 alias;
+ int ret;
+
+ ret = i2c_atr_reserve_alias(atr);
+ if (ret < 0)
+ return ret;
+
+ alias = ret;
+
+ c2a = kzalloc(sizeof(*c2a), GFP_KERNEL);
+ if (!c2a) {
+ ret = -ENOMEM;
+ goto err_release_alias;
+ }
+
+ ret = atr->ops->attach_client(atr, chan->chan_id, client, alias);
+ if (ret)
+ goto err_free;
+
+ dev_dbg(atr->dev, "chan%u: client 0x%02x mapped at alias 0x%02x (%s)\n",
+ chan->chan_id, client->addr, alias, client->name);
+
+ c2a->client = client;
+ c2a->alias = alias;
+ list_add(&c2a->node, &chan->alias_list);
+
+ return 0;
+
+err_free:
+ kfree(c2a);
+err_release_alias:
+ i2c_atr_release_alias(atr, alias);
+
+ return ret;
+}
+
+static void i2c_atr_detach_client(struct i2c_adapter *adapter,
+ const struct i2c_client *client)
+{
+ struct i2c_atr_chan *chan = adapter->algo_data;
+ struct i2c_atr *atr = chan->atr;
+ struct i2c_atr_alias_pair *c2a;
+
+ atr->ops->detach_client(atr, chan->chan_id, client);
+
+ c2a = i2c_atr_find_mapping_by_client(&chan->alias_list, client);
+ if (!c2a) {
+ /* This should never happen */
+ dev_warn(atr->dev, "Unable to find address mapping\n");
+ return;
+ }
+
+ i2c_atr_release_alias(atr, c2a->alias);
+
+ dev_dbg(atr->dev,
+ "chan%u: client 0x%02x unmapped from alias 0x%02x (%s)\n",
+ chan->chan_id, client->addr, c2a->alias, client->name);
+
+ list_del(&c2a->node);
+ kfree(c2a);
+}
+
+static int i2c_atr_bus_notifier_call(struct notifier_block *nb,
+ unsigned long event, void *device)
+{
+ struct i2c_atr *atr = container_of(nb, struct i2c_atr, i2c_nb);
+ struct device *dev = device;
+ struct i2c_client *client;
+ u32 chan_id;
+ int ret;
+
+ client = i2c_verify_client(dev);
+ if (!client)
+ return NOTIFY_DONE;
+
+ /* Is the client in one of our adapters? */
+ for (chan_id = 0; chan_id < atr->max_adapters; ++chan_id) {
+ if (client->adapter == atr->adapter[chan_id])
+ break;
+ }
+
+ if (chan_id == atr->max_adapters)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ ret = i2c_atr_attach_client(client->adapter, client);
+ if (ret)
+ dev_err(atr->dev,
+ "Failed to attach remote client '%s': %d\n",
+ dev_name(dev), ret);
+ break;
+
+ case BUS_NOTIFY_DEL_DEVICE:
+ i2c_atr_detach_client(client->adapter, client);
+ break;
+
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int i2c_atr_parse_alias_pool(struct i2c_atr *atr)
+{
+ struct device *dev = atr->dev;
+ unsigned long *alias_use_mask;
+ size_t num_aliases;
+ unsigned int i;
+ u32 *aliases32;
+ u16 *aliases16;
+ int ret;
+
+ ret = fwnode_property_count_u32(dev_fwnode(dev), "i2c-alias-pool");
+ if (ret < 0) {
+ dev_err(dev, "Failed to count 'i2c-alias-pool' property: %d\n",
+ ret);
+ return ret;
+ }
+
+ num_aliases = ret;
+
+ if (!num_aliases)
+ return 0;
+
+ aliases32 = kcalloc(num_aliases, sizeof(*aliases32), GFP_KERNEL);
+ if (!aliases32)
+ return -ENOMEM;
+
+ ret = fwnode_property_read_u32_array(dev_fwnode(dev), "i2c-alias-pool",
+ aliases32, num_aliases);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read 'i2c-alias-pool' property: %d\n",
+ ret);
+ goto err_free_aliases32;
+ }
+
+ aliases16 = kcalloc(num_aliases, sizeof(*aliases16), GFP_KERNEL);
+ if (!aliases16) {
+ ret = -ENOMEM;
+ goto err_free_aliases32;
+ }
+
+ for (i = 0; i < num_aliases; i++) {
+ if (!(aliases32[i] & 0xffff0000)) {
+ aliases16[i] = aliases32[i];
+ continue;
+ }
+
+ dev_err(dev, "Failed to parse 'i2c-alias-pool' property: I2C flags are not supported\n");
+ ret = -EINVAL;
+ goto err_free_aliases16;
+ }
+
+ alias_use_mask = bitmap_zalloc(num_aliases, GFP_KERNEL);
+ if (!alias_use_mask) {
+ ret = -ENOMEM;
+ goto err_free_aliases16;
+ }
+
+ kfree(aliases32);
+
+ atr->num_aliases = num_aliases;
+ atr->aliases = aliases16;
+ atr->alias_use_mask = alias_use_mask;
+
+ dev_dbg(dev, "i2c-alias-pool has %zu aliases", atr->num_aliases);
+
+ return 0;
+
+err_free_aliases16:
+ kfree(aliases16);
+err_free_aliases32:
+ kfree(aliases32);
+ return ret;
+}
+
+struct i2c_atr *i2c_atr_new(struct i2c_adapter *parent, struct device *dev,
+ const struct i2c_atr_ops *ops, int max_adapters)
+{
+ struct i2c_atr *atr;
+ int ret;
+
+ if (max_adapters > ATR_MAX_ADAPTERS)
+ return ERR_PTR(-EINVAL);
+
+ if (!ops || !ops->attach_client || !ops->detach_client)
+ return ERR_PTR(-EINVAL);
+
+ atr = kzalloc(struct_size(atr, adapter, max_adapters), GFP_KERNEL);
+ if (!atr)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&atr->lock);
+ spin_lock_init(&atr->alias_mask_lock);
+
+ atr->parent = parent;
+ atr->dev = dev;
+ atr->ops = ops;
+ atr->max_adapters = max_adapters;
+
+ if (parent->algo->master_xfer)
+ atr->algo.master_xfer = i2c_atr_master_xfer;
+ if (parent->algo->smbus_xfer)
+ atr->algo.smbus_xfer = i2c_atr_smbus_xfer;
+ atr->algo.functionality = i2c_atr_functionality;
+
+ ret = i2c_atr_parse_alias_pool(atr);
+ if (ret)
+ goto err_destroy_mutex;
+
+ atr->i2c_nb.notifier_call = i2c_atr_bus_notifier_call;
+ ret = bus_register_notifier(&i2c_bus_type, &atr->i2c_nb);
+ if (ret)
+ goto err_free_aliases;
+
+ return atr;
+
+err_free_aliases:
+ bitmap_free(atr->alias_use_mask);
+ kfree(atr->aliases);
+err_destroy_mutex:
+ mutex_destroy(&atr->lock);
+ kfree(atr);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_NS_GPL(i2c_atr_new, I2C_ATR);
+
+void i2c_atr_delete(struct i2c_atr *atr)
+{
+ unsigned int i;
+
+ for (i = 0; i < atr->max_adapters; ++i)
+ WARN_ON(atr->adapter[i]);
+
+ bus_unregister_notifier(&i2c_bus_type, &atr->i2c_nb);
+ bitmap_free(atr->alias_use_mask);
+ kfree(atr->aliases);
+ mutex_destroy(&atr->lock);
+ kfree(atr);
+}
+EXPORT_SYMBOL_NS_GPL(i2c_atr_delete, I2C_ATR);
+
+int i2c_atr_add_adapter(struct i2c_atr *atr, u32 chan_id,
+ struct device *adapter_parent,
+ struct fwnode_handle *bus_handle)
+{
+ struct i2c_adapter *parent = atr->parent;
+ struct device *dev = atr->dev;
+ struct i2c_atr_chan *chan;
+ char symlink_name[ATR_MAX_SYMLINK_LEN];
+ int ret;
+
+ if (chan_id >= atr->max_adapters) {
+ dev_err(dev, "No room for more i2c-atr adapters\n");
+ return -EINVAL;
+ }
+
+ if (atr->adapter[chan_id]) {
+ dev_err(dev, "Adapter %d already present\n", chan_id);
+ return -EEXIST;
+ }
+
+ chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+
+ if (!adapter_parent)
+ adapter_parent = dev;
+
+ chan->atr = atr;
+ chan->chan_id = chan_id;
+ INIT_LIST_HEAD(&chan->alias_list);
+ mutex_init(&chan->orig_addrs_lock);
+
+ snprintf(chan->adap.name, sizeof(chan->adap.name), "i2c-%d-atr-%d",
+ i2c_adapter_id(parent), chan_id);
+ chan->adap.owner = THIS_MODULE;
+ chan->adap.algo = &atr->algo;
+ chan->adap.algo_data = chan;
+ chan->adap.dev.parent = adapter_parent;
+ chan->adap.retries = parent->retries;
+ chan->adap.timeout = parent->timeout;
+ chan->adap.quirks = parent->quirks;
+ chan->adap.lock_ops = &i2c_atr_lock_ops;
+
+ if (bus_handle) {
+ device_set_node(&chan->adap.dev, fwnode_handle_get(bus_handle));
+ } else {
+ struct fwnode_handle *atr_node;
+ struct fwnode_handle *child;
+ u32 reg;
+
+ atr_node = device_get_named_child_node(dev, "i2c-atr");
+
+ fwnode_for_each_child_node(atr_node, child) {
+ ret = fwnode_property_read_u32(child, "reg", &reg);
+ if (ret)
+ continue;
+ if (chan_id == reg)
+ break;
+ }
+
+ device_set_node(&chan->adap.dev, child);
+ fwnode_handle_put(atr_node);
+ }
+
+ atr->adapter[chan_id] = &chan->adap;
+
+ ret = i2c_add_adapter(&chan->adap);
+ if (ret) {
+ dev_err(dev, "failed to add atr-adapter %u (error=%d)\n",
+ chan_id, ret);
+ goto err_fwnode_put;
+ }
+
+ snprintf(symlink_name, sizeof(symlink_name), "channel-%u",
+ chan->chan_id);
+
+ ret = sysfs_create_link(&chan->adap.dev.kobj, &dev->kobj, "atr_device");
+ if (ret)
+ dev_warn(dev, "can't create symlink to atr device\n");
+ ret = sysfs_create_link(&dev->kobj, &chan->adap.dev.kobj, symlink_name);
+ if (ret)
+ dev_warn(dev, "can't create symlink for channel %u\n", chan_id);
+
+ dev_dbg(dev, "Added ATR child bus %d\n", i2c_adapter_id(&chan->adap));
+
+ return 0;
+
+err_fwnode_put:
+ fwnode_handle_put(dev_fwnode(&chan->adap.dev));
+ mutex_destroy(&chan->orig_addrs_lock);
+ kfree(chan);
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(i2c_atr_add_adapter, I2C_ATR);
+
+void i2c_atr_del_adapter(struct i2c_atr *atr, u32 chan_id)
+{
+ char symlink_name[ATR_MAX_SYMLINK_LEN];
+ struct i2c_adapter *adap;
+ struct i2c_atr_chan *chan;
+ struct fwnode_handle *fwnode;
+ struct device *dev = atr->dev;
+
+ adap = atr->adapter[chan_id];
+ if (!adap)
+ return;
+
+ chan = adap->algo_data;
+ fwnode = dev_fwnode(&adap->dev);
+
+ dev_dbg(dev, "Removing ATR child bus %d\n", i2c_adapter_id(adap));
+
+ snprintf(symlink_name, sizeof(symlink_name), "channel-%u",
+ chan->chan_id);
+ sysfs_remove_link(&dev->kobj, symlink_name);
+ sysfs_remove_link(&chan->adap.dev.kobj, "atr_device");
+
+ i2c_del_adapter(adap);
+
+ atr->adapter[chan_id] = NULL;
+
+ fwnode_handle_put(fwnode);
+ mutex_destroy(&chan->orig_addrs_lock);
+ kfree(chan->orig_addrs);
+ kfree(chan);
+}
+EXPORT_SYMBOL_NS_GPL(i2c_atr_del_adapter, I2C_ATR);
+
+void i2c_atr_set_driver_data(struct i2c_atr *atr, void *data)
+{
+ atr->priv = data;
+}
+EXPORT_SYMBOL_NS_GPL(i2c_atr_set_driver_data, I2C_ATR);
+
+void *i2c_atr_get_driver_data(struct i2c_atr *atr)
+{
+ return atr->priv;
+}
+EXPORT_SYMBOL_NS_GPL(i2c_atr_get_driver_data, I2C_ATR);
+
+MODULE_AUTHOR("Luca Ceresoli <luca.ceresoli@bootlin.com>");
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>");
+MODULE_DESCRIPTION("I2C Address Translator");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index ea838dbae32e..db1b9057612a 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -65,11 +65,11 @@ config I2C_MUX_PCA9541
will be called i2c-mux-pca9541.
config I2C_MUX_PCA954x
- tristate "NXP PCA954x and PCA984x I2C Mux/switches"
+ tristate "NXP PCA954x/PCA984x and Maxim MAX735x/MAX736x I2C Mux/switches"
depends on GPIOLIB || COMPILE_TEST
help
- If you say yes here you get support for the NXP PCA954x
- and PCA984x I2C mux/switch devices.
+ If you say yes here you get support for NXP PCA954x/PCA984x
+ and Maxim MAX735x/MAX736x I2C mux/switch devices.
This driver can also be built as a module. If so, the module
will be called i2c-mux-pca954x.
diff --git a/drivers/i2c/muxes/i2c-mux-gpmux.c b/drivers/i2c/muxes/i2c-mux-gpmux.c
index 0405af0e1510..baccf4bfaf02 100644
--- a/drivers/i2c/muxes/i2c-mux-gpmux.c
+++ b/drivers/i2c/muxes/i2c-mux-gpmux.c
@@ -11,7 +11,7 @@
#include <linux/i2c-mux.h>
#include <linux/module.h>
#include <linux/mux/consumer.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
struct mux {
diff --git a/drivers/i2c/muxes/i2c-mux-ltc4306.c b/drivers/i2c/muxes/i2c-mux-ltc4306.c
index 5a03031519be..23766d853e76 100644
--- a/drivers/i2c/muxes/i2c-mux-ltc4306.c
+++ b/drivers/i2c/muxes/i2c-mux-ltc4306.c
@@ -15,7 +15,6 @@
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -62,7 +61,7 @@ static const struct chip_desc chips[] = {
static bool ltc4306_is_volatile_reg(struct device *dev, unsigned int reg)
{
- return (reg == LTC_REG_CONFIG) ? true : false;
+ return reg == LTC_REG_CONFIG;
}
static const struct regmap_config ltc4306_regmap_config = {
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index 0ccee2ae5720..2219062104fb 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -11,6 +11,12 @@
* PCA9540, PCA9542, PCA9543, PCA9544, PCA9545, PCA9546, PCA9547,
* PCA9548, PCA9846, PCA9847, PCA9848 and PCA9849.
*
+ * It's also compatible to Maxims MAX735x I2C switch chips, which are controlled
+ * as the NXP PCA9548 and the MAX736x chips that act like the PCA9544.
+ *
+ * This includes the:
+ * MAX7356, MAX7357, MAX7358, MAX7367, MAX7368 and MAX7369
+ *
* These chips are all controlled via the I2C bus itself, and all have a
* single 8-bit register. The upstream "parent" bus fans out to two,
* four, or eight downstream busses or channels; which of these
@@ -42,6 +48,7 @@
#include <linux/module.h>
#include <linux/pm.h>
#include <linux/property.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <dt-bindings/mux/mux.h>
@@ -51,6 +58,12 @@
#define PCA954X_IRQ_OFFSET 4
enum pca_type {
+ max_7356,
+ max_7357,
+ max_7358,
+ max_7367,
+ max_7368,
+ max_7369,
pca_9540,
pca_9542,
pca_9543,
@@ -88,10 +101,52 @@ struct pca954x {
struct irq_domain *irq;
unsigned int irq_mask;
raw_spinlock_t lock;
+ struct regulator *supply;
};
-/* Provide specs for the PCA954x types we know about */
+/* Provide specs for the MAX735x, PCA954x and PCA984x types we know about */
static const struct chip_desc chips[] = {
+ [max_7356] = {
+ .nchans = 8,
+ .muxtype = pca954x_isswi,
+ .id = { .manufacturer_id = I2C_DEVICE_ID_NONE },
+ },
+ [max_7357] = {
+ .nchans = 8,
+ .muxtype = pca954x_isswi,
+ .id = { .manufacturer_id = I2C_DEVICE_ID_NONE },
+ /*
+ * No interrupt controller support. The interrupt
+ * provides information about stuck channels.
+ */
+ },
+ [max_7358] = {
+ .nchans = 8,
+ .muxtype = pca954x_isswi,
+ .id = { .manufacturer_id = I2C_DEVICE_ID_NONE },
+ /*
+ * No interrupt controller support. The interrupt
+ * provides information about stuck channels.
+ */
+ },
+ [max_7367] = {
+ .nchans = 4,
+ .muxtype = pca954x_isswi,
+ .has_irq = 1,
+ .id = { .manufacturer_id = I2C_DEVICE_ID_NONE },
+ },
+ [max_7368] = {
+ .nchans = 4,
+ .muxtype = pca954x_isswi,
+ .id = { .manufacturer_id = I2C_DEVICE_ID_NONE },
+ },
+ [max_7369] = {
+ .nchans = 4,
+ .enable = 0x4,
+ .muxtype = pca954x_ismux,
+ .has_irq = 1,
+ .id = { .manufacturer_id = I2C_DEVICE_ID_NONE },
+ },
[pca_9540] = {
.nchans = 2,
.enable = 0x4,
@@ -177,6 +232,12 @@ static const struct chip_desc chips[] = {
};
static const struct i2c_device_id pca954x_id[] = {
+ { "max7356", max_7356 },
+ { "max7357", max_7357 },
+ { "max7358", max_7358 },
+ { "max7367", max_7367 },
+ { "max7368", max_7368 },
+ { "max7369", max_7369 },
{ "pca9540", pca_9540 },
{ "pca9542", pca_9542 },
{ "pca9543", pca_9543 },
@@ -194,6 +255,12 @@ static const struct i2c_device_id pca954x_id[] = {
MODULE_DEVICE_TABLE(i2c, pca954x_id);
static const struct of_device_id pca954x_of_match[] = {
+ { .compatible = "maxim,max7356", .data = &chips[max_7356] },
+ { .compatible = "maxim,max7357", .data = &chips[max_7357] },
+ { .compatible = "maxim,max7358", .data = &chips[max_7358] },
+ { .compatible = "maxim,max7367", .data = &chips[max_7367] },
+ { .compatible = "maxim,max7368", .data = &chips[max_7368] },
+ { .compatible = "maxim,max7369", .data = &chips[max_7369] },
{ .compatible = "nxp,pca9540", .data = &chips[pca_9540] },
{ .compatible = "nxp,pca9542", .data = &chips[pca_9542] },
{ .compatible = "nxp,pca9543", .data = &chips[pca_9543] },
@@ -382,6 +449,8 @@ static void pca954x_cleanup(struct i2c_mux_core *muxc)
struct pca954x *data = i2c_mux_priv(muxc);
int c, irq;
+ regulator_disable(data->supply);
+
if (data->irq) {
for (c = 0; c < data->chip->nchans; c++) {
irq = irq_find_mapping(data->irq, c);
@@ -434,10 +503,22 @@ static int pca954x_probe(struct i2c_client *client)
i2c_set_clientdata(client, muxc);
data->client = client;
+ data->supply = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(data->supply))
+ return dev_err_probe(dev, PTR_ERR(data->supply),
+ "Failed to request regulator\n");
+
+ ret = regulator_enable(data->supply);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to enable vdd supply\n");
+
/* Reset the mux if a reset GPIO is specified. */
gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(gpio))
- return PTR_ERR(gpio);
+ if (IS_ERR(gpio)) {
+ ret = PTR_ERR(gpio);
+ goto fail_cleanup;
+ }
if (gpio) {
udelay(1);
gpiod_set_value_cansleep(gpio, 0);
@@ -454,7 +535,7 @@ static int pca954x_probe(struct i2c_client *client)
ret = i2c_get_device_id(client, &id);
if (ret && ret != -EOPNOTSUPP)
- return ret;
+ goto fail_cleanup;
if (!ret &&
(id.manufacturer_id != data->chip->id.manufacturer_id ||
@@ -462,7 +543,8 @@ static int pca954x_probe(struct i2c_client *client)
dev_warn(dev, "unexpected device id %03x-%03x-%x\n",
id.manufacturer_id, id.part_id,
id.die_revision);
- return -ENODEV;
+ ret = -ENODEV;
+ goto fail_cleanup;
}
}
@@ -481,7 +563,8 @@ static int pca954x_probe(struct i2c_client *client)
ret = pca954x_init(client, data);
if (ret < 0) {
dev_warn(dev, "probe failed\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto fail_cleanup;
}
ret = pca954x_irq_setup(muxc);
@@ -530,7 +613,6 @@ static void pca954x_remove(struct i2c_client *client)
pca954x_cleanup(muxc);
}
-#ifdef CONFIG_PM_SLEEP
static int pca954x_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
@@ -544,14 +626,13 @@ static int pca954x_resume(struct device *dev)
return ret;
}
-#endif
-static SIMPLE_DEV_PM_OPS(pca954x_pm, NULL, pca954x_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(pca954x_pm, NULL, pca954x_resume);
static struct i2c_driver pca954x_driver = {
.driver = {
.name = "pca954x",
- .pm = &pca954x_pm,
+ .pm = pm_sleep_ptr(&pca954x_pm),
.of_match_table = pca954x_of_match,
},
.probe = pca954x_probe,
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index 08aeb69a7800..87283e4a4607 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -1308,7 +1308,11 @@ static int i3c_master_get_i3c_addrs(struct i3c_dev_desc *dev)
if (dev->info.static_addr) {
status = i3c_bus_get_addr_slot_status(&master->bus,
dev->info.static_addr);
- if (status != I3C_ADDR_SLOT_FREE)
+ /* Since static address and assigned dynamic address can be
+ * equal, allow this case to pass.
+ */
+ if (status != I3C_ADDR_SLOT_FREE &&
+ dev->info.static_addr != dev->boardinfo->init_dyn_addr)
return -EBUSY;
i3c_bus_set_addr_slot_status(&master->bus,
diff --git a/drivers/i3c/master/ast2600-i3c-master.c b/drivers/i3c/master/ast2600-i3c-master.c
index 09ed19d489e9..01a47d3dd499 100644
--- a/drivers/i3c/master/ast2600-i3c-master.c
+++ b/drivers/i3c/master/ast2600-i3c-master.c
@@ -8,7 +8,6 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c
index 01610fa5b0cc..49551db71bc9 100644
--- a/drivers/i3c/master/i3c-master-cdns.c
+++ b/drivers/i3c/master/i3c-master-cdns.c
@@ -22,7 +22,6 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
-#include <linux/of_device.h>
#define DEV_ID 0x0
#define DEV_ID_I3C_MASTER 0x5034
diff --git a/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c b/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
index d97c3175e0e2..6a781f89b0e4 100644
--- a/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
+++ b/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
@@ -339,7 +339,7 @@ static int hci_cmd_v1_daa(struct i3c_hci *hci)
break;
}
if (RESP_STATUS(xfer[0].response) == RESP_ERR_NACK &&
- RESP_STATUS(xfer[0].response) == 1) {
+ RESP_DATA_LENGTH(xfer->response) == 1) {
ret = 0; /* no more devices to be assigned */
break;
}
diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
index 0d63b732ef0c..8f8295acdadb 100644
--- a/drivers/i3c/master/svc-i3c-master.c
+++ b/drivers/i3c/master/svc-i3c-master.c
@@ -156,6 +156,7 @@ struct svc_i3c_regs_save {
* @base: I3C master controller
* @dev: Corresponding device
* @regs: Memory mapping
+ * @saved_regs: Volatile values for PM operations
* @free_slots: Bit array of available slots
* @addrs: Array containing the dynamic addresses of each attached device
* @descs: Array of descriptors, one per attached device
@@ -789,6 +790,10 @@ static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
*/
break;
} else if (SVC_I3C_MSTATUS_NACKED(reg)) {
+ /* No I3C devices attached */
+ if (dev_nb == 0)
+ break;
+
/*
* A slave device nacked the address, this is
* allowed only once, DAA will be stopped and
@@ -1263,11 +1268,17 @@ static int svc_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
{
struct svc_i3c_master *master = to_svc_i3c_master(m);
bool broadcast = cmd->id < 0x80;
+ int ret;
if (broadcast)
- return svc_i3c_master_send_bdcast_ccc_cmd(master, cmd);
+ ret = svc_i3c_master_send_bdcast_ccc_cmd(master, cmd);
else
- return svc_i3c_master_send_direct_ccc_cmd(master, cmd);
+ ret = svc_i3c_master_send_direct_ccc_cmd(master, cmd);
+
+ if (ret)
+ cmd->err = I3C_ERROR_M2;
+
+ return ret;
}
static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
@@ -1518,8 +1529,8 @@ static int svc_i3c_master_probe(struct platform_device *pdev)
return PTR_ERR(master->sclk);
master->irq = platform_get_irq(pdev, 0);
- if (master->irq <= 0)
- return -ENOENT;
+ if (master->irq < 0)
+ return master->irq;
master->dev = dev;
diff --git a/drivers/iio/accel/adxl313_i2c.c b/drivers/iio/accel/adxl313_i2c.c
index 524327ea3663..a4cf0cf2c5aa 100644
--- a/drivers/iio/accel/adxl313_i2c.c
+++ b/drivers/iio/accel/adxl313_i2c.c
@@ -40,8 +40,8 @@ static const struct regmap_config adxl31x_i2c_regmap_config[] = {
static const struct i2c_device_id adxl313_i2c_id[] = {
{ .name = "adxl312", .driver_data = (kernel_ulong_t)&adxl31x_chip_info[ADXL312] },
- { .name = "adxl313", .driver_data = (kernel_ulong_t)&adxl31x_chip_info[ADXL312] },
- { .name = "adxl314", .driver_data = (kernel_ulong_t)&adxl31x_chip_info[ADXL312] },
+ { .name = "adxl313", .driver_data = (kernel_ulong_t)&adxl31x_chip_info[ADXL313] },
+ { .name = "adxl314", .driver_data = (kernel_ulong_t)&adxl31x_chip_info[ADXL314] },
{ }
};
@@ -65,9 +65,7 @@ static int adxl313_i2c_probe(struct i2c_client *client)
* Retrieves device specific data as a pointer to a
* adxl313_chip_info structure
*/
- chip_data = device_get_match_data(&client->dev);
- if (!chip_data)
- chip_data = (const struct adxl313_chip_info *)i2c_match_id(adxl313_i2c_id, client)->driver_data;
+ chip_data = i2c_get_match_data(client);
regmap = devm_regmap_init_i2c(client,
&adxl31x_i2c_regmap_config[chip_data->type]);
diff --git a/drivers/iio/accel/adxl355_i2c.c b/drivers/iio/accel/adxl355_i2c.c
index d5beea61479d..32398cde9608 100644
--- a/drivers/iio/accel/adxl355_i2c.c
+++ b/drivers/iio/accel/adxl355_i2c.c
@@ -24,19 +24,10 @@ static int adxl355_i2c_probe(struct i2c_client *client)
{
struct regmap *regmap;
const struct adxl355_chip_info *chip_data;
- const struct i2c_device_id *adxl355;
- chip_data = device_get_match_data(&client->dev);
- if (!chip_data) {
- adxl355 = to_i2c_driver(client->dev.driver)->id_table;
- if (!adxl355)
- return -EINVAL;
-
- chip_data = (void *)i2c_match_id(adxl355, client)->driver_data;
-
- if (!chip_data)
- return -EINVAL;
- }
+ chip_data = i2c_get_match_data(client);
+ if (!chip_data)
+ return -ENODEV;
regmap = devm_regmap_init_i2c(client, &adxl355_i2c_regmap_config);
if (IS_ERR(regmap)) {
diff --git a/drivers/iio/accel/adxl372_spi.c b/drivers/iio/accel/adxl372_spi.c
index 2bd267a22f29..75a88f16c6c9 100644
--- a/drivers/iio/accel/adxl372_spi.c
+++ b/drivers/iio/accel/adxl372_spi.c
@@ -8,7 +8,6 @@
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/spi/spi.h>
#include "adxl372.h"
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index e8ab0d249351..13439f52d26d 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -16,7 +16,6 @@
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
-#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/bitops.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/iio/accel/da280.c b/drivers/iio/accel/da280.c
index 2f27a5ded94c..572bfe9694b0 100644
--- a/drivers/iio/accel/da280.c
+++ b/drivers/iio/accel/da280.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * IIO driver for the MiraMEMS DA280 3-axis accelerometer and
+ * IIO driver for the MiraMEMS DA217 and DA280 3-axis accelerometer and
* IIO driver for the MiraMEMS DA226 2-axis accelerometer
*
* Copyright (c) 2016 Hans de Goede <hdegoede@redhat.com>
@@ -23,7 +23,7 @@
#define DA280_MODE_ENABLE 0x1e
#define DA280_MODE_DISABLE 0x9e
-enum da280_chipset { da226, da280 };
+enum da280_chipset { da217, da226, da280 };
/*
* a value of + or -4096 corresponds to + or - 1G
@@ -134,7 +134,10 @@ static int da280_probe(struct i2c_client *client)
chip = id->driver_data;
}
- if (chip == da226) {
+ if (chip == da217) {
+ indio_dev->name = "da217";
+ indio_dev->num_channels = 3;
+ } else if (chip == da226) {
indio_dev->name = "da226";
indio_dev->num_channels = 2;
} else {
@@ -166,12 +169,14 @@ static int da280_resume(struct device *dev)
static DEFINE_SIMPLE_DEV_PM_OPS(da280_pm_ops, da280_suspend, da280_resume);
static const struct acpi_device_id da280_acpi_match[] = {
+ {"NSA2513", da217},
{"MIRAACC", da280},
{},
};
MODULE_DEVICE_TABLE(acpi, da280_acpi_match);
static const struct i2c_device_id da280_i2c_id[] = {
+ { "da217", da217 },
{ "da226", da226 },
{ "da280", da280 },
{}
diff --git a/drivers/iio/accel/kxsd9-spi.c b/drivers/iio/accel/kxsd9-spi.c
index 07f14a9f22c7..1719a9f1d90a 100644
--- a/drivers/iio/accel/kxsd9-spi.c
+++ b/drivers/iio/accel/kxsd9-spi.c
@@ -2,7 +2,6 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/spi/spi.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index 6e7399e72221..f42a88711486 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -28,7 +28,7 @@
#include <linux/iio/triggered_buffer.h>
#include <linux/iio/events.h>
#include <linux/delay.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index dc14bde31ac1..517b3db114b8 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -1116,7 +1116,7 @@ config STMPE_ADC
built-in ADC block (stmpe811).
config SUN4I_GPADC
- tristate "Support for the Allwinner SoCs GPADC"
+ tristate "Allwinner A10/A13/A31 and similar GPADCs driver"
depends on IIO
depends on MFD_SUN4I_GPADC || MACH_SUN8I
depends on THERMAL || !THERMAL_OF
@@ -1134,6 +1134,16 @@ config SUN4I_GPADC
To compile this driver as a module, choose M here: the module will be
called sun4i-gpadc-iio.
+config SUN20I_GPADC
+ tristate "Allwinner D1/T113s/T507/R329 and similar GPADCs driver"
+ depends on ARCH_SUNXI || COMPILE_TEST
+ help
+ Say yes here to build support for Allwinner (D1, T113, T507 and R329)
+ SoCs GPADC. This ADC provides up to 16 channels.
+
+ To compile this driver as a module, choose M here: the module will be
+ called sun20i-gpadc-iio.
+
config TI_ADC081C
tristate "Texas Instruments ADC081C/ADC101C/ADC121C family"
depends on I2C
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index eb6e891790fb..2facf979327d 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -96,6 +96,7 @@ obj-$(CONFIG_RZG2L_ADC) += rzg2l_adc.o
obj-$(CONFIG_SC27XX_ADC) += sc27xx_adc.o
obj-$(CONFIG_SPEAR_ADC) += spear_adc.o
obj-$(CONFIG_SUN4I_GPADC) += sun4i-gpadc-iio.o
+obj-$(CONFIG_SUN20I_GPADC) += sun20i-gpadc-iio.o
obj-$(CONFIG_STM32_ADC_CORE) += stm32-adc-core.o
obj-$(CONFIG_STM32_ADC) += stm32-adc.o
obj-$(CONFIG_STM32_DFSDM_CORE) += stm32-dfsdm-core.o
diff --git a/drivers/iio/adc/ab8500-gpadc.c b/drivers/iio/adc/ab8500-gpadc.c
index 4fa2126a354b..3b1bdd0b531d 100644
--- a/drivers/iio/adc/ab8500-gpadc.c
+++ b/drivers/iio/adc/ab8500-gpadc.c
@@ -1099,14 +1099,12 @@ static int ab8500_gpadc_probe(struct platform_device *pdev)
gpadc->irq_sw = platform_get_irq_byname(pdev, "SW_CONV_END");
if (gpadc->irq_sw < 0)
- return dev_err_probe(dev, gpadc->irq_sw,
- "failed to get platform sw_conv_end irq\n");
+ return gpadc->irq_sw;
if (is_ab8500(gpadc->ab8500)) {
gpadc->irq_hw = platform_get_irq_byname(pdev, "HW_CONV_END");
if (gpadc->irq_hw < 0)
- return dev_err_probe(dev, gpadc->irq_hw,
- "failed to get platform hw_conv_end irq\n");
+ return gpadc->irq_hw;
} else {
gpadc->irq_hw = 0;
}
diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
index 050a2fbf5c49..b9b206fcd748 100644
--- a/drivers/iio/adc/ad7124.c
+++ b/drivers/iio/adc/ad7124.c
@@ -14,7 +14,7 @@
#include <linux/kernel.h>
#include <linux/kfifo.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c
index 7bc3ebfe8081..69d1103b9508 100644
--- a/drivers/iio/adc/ad7192.c
+++ b/drivers/iio/adc/ad7192.c
@@ -16,7 +16,7 @@
#include <linux/err.h>
#include <linux/sched.h>
#include <linux/delay.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -402,8 +402,8 @@ static int ad7192_setup(struct iio_dev *indio_dev, struct device_node *np)
id &= AD7192_ID_MASK;
if (id != st->chip_info->chip_id)
- dev_warn(&st->sd.spi->dev, "device ID query failed (0x%X)\n",
- id);
+ dev_warn(&st->sd.spi->dev, "device ID query failed (0x%X != 0x%X)\n",
+ id, st->chip_info->chip_id);
st->mode = AD7192_MODE_SEL(AD7192_MODE_IDLE) |
AD7192_MODE_CLKSRC(st->clock_sel) |
@@ -561,9 +561,8 @@ static ssize_t ad7192_show_filter_avail(struct device *dev,
ad7192_get_available_filter_freq(st, freq_avail);
for (i = 0; i < ARRAY_SIZE(freq_avail); i++)
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "%d.%d ", freq_avail[i] / 1000,
- freq_avail[i] % 1000);
+ len += sysfs_emit_at(buf, len, "%d.%03d ", freq_avail[i] / 1000,
+ freq_avail[i] % 1000);
buf[len - 1] = '\n';
@@ -972,11 +971,6 @@ static void ad7192_reg_disable(void *reg)
regulator_disable(reg);
}
-static void ad7192_clk_disable(void *clk)
-{
- clk_disable_unprepare(clk);
-}
-
static int ad7192_probe(struct spi_device *spi)
{
struct ad7192_state *st;
@@ -1036,7 +1030,9 @@ static int ad7192_probe(struct spi_device *spi)
else
indio_dev->info = &ad7192_info;
- ad_sd_init(&st->sd, indio_dev, spi, &ad7192_sigma_delta_info);
+ ret = ad_sd_init(&st->sd, indio_dev, spi, &ad7192_sigma_delta_info);
+ if (ret)
+ return ret;
ret = devm_ad_sd_setup_buffer_and_trigger(&spi->dev, indio_dev);
if (ret)
@@ -1044,7 +1040,7 @@ static int ad7192_probe(struct spi_device *spi)
st->fclk = AD7192_INT_FREQ_MHZ;
- st->mclk = devm_clk_get_optional(&spi->dev, "mclk");
+ st->mclk = devm_clk_get_optional_enabled(&spi->dev, "mclk");
if (IS_ERR(st->mclk))
return PTR_ERR(st->mclk);
@@ -1052,15 +1048,6 @@ static int ad7192_probe(struct spi_device *spi)
if (st->clock_sel == AD7192_CLK_EXT_MCLK1_2 ||
st->clock_sel == AD7192_CLK_EXT_MCLK2) {
- ret = clk_prepare_enable(st->mclk);
- if (ret < 0)
- return ret;
-
- ret = devm_add_action_or_reset(&spi->dev, ad7192_clk_disable,
- st->mclk);
- if (ret)
- return ret;
-
st->fclk = clk_get_rate(st->mclk);
if (!ad7192_valid_external_frequency(st->fclk)) {
dev_err(&spi->dev,
diff --git a/drivers/iio/adc/ad9467.c b/drivers/iio/adc/ad9467.c
index 0621cf59d614..39eccc28debe 100644
--- a/drivers/iio/adc/ad9467.c
+++ b/drivers/iio/adc/ad9467.c
@@ -13,7 +13,7 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/iio/iio.h>
diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c
index e8a8ea4140f1..aff0532a974a 100644
--- a/drivers/iio/adc/adi-axi-adc.c
+++ b/drivers/iio/adc/adi-axi-adc.c
@@ -11,8 +11,9 @@
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/iio/iio.h>
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index 366e252ebeb0..de6650f9c4b1 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -16,7 +16,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/slab.h>
diff --git a/drivers/iio/adc/bcm_iproc_adc.c b/drivers/iio/adc/bcm_iproc_adc.c
index 44e1e53ada72..0d6885413a7e 100644
--- a/drivers/iio/adc/bcm_iproc_adc.c
+++ b/drivers/iio/adc/bcm_iproc_adc.c
@@ -540,8 +540,8 @@ static int iproc_adc_probe(struct platform_device *pdev)
}
adc_priv->irqno = platform_get_irq(pdev, 0);
- if (adc_priv->irqno <= 0)
- return -ENODEV;
+ if (adc_priv->irqno < 0)
+ return adc_priv->irqno;
ret = regmap_update_bits(adc_priv->regmap, IPROC_REGCTL2,
IPROC_ADC_AUXIN_SCAN_ENA, 0);
diff --git a/drivers/iio/adc/cc10001_adc.c b/drivers/iio/adc/cc10001_adc.c
index 2cde4b44fc6e..a432342348ab 100644
--- a/drivers/iio/adc/cc10001_adc.c
+++ b/drivers/iio/adc/cc10001_adc.c
@@ -9,7 +9,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
diff --git a/drivers/iio/adc/imx7d_adc.c b/drivers/iio/adc/imx7d_adc.c
index 22da81bac97f..828d3fea6d43 100644
--- a/drivers/iio/adc/imx7d_adc.c
+++ b/drivers/iio/adc/imx7d_adc.c
@@ -496,7 +496,7 @@ static int imx7d_adc_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0)
- return dev_err_probe(dev, irq, "Failed getting irq\n");
+ return irq;
info->clk = devm_clk_get(dev, "adc");
if (IS_ERR(info->clk))
diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c
index aea83f369437..9e52207352fb 100644
--- a/drivers/iio/adc/ina2xx-adc.c
+++ b/drivers/iio/adc/ina2xx-adc.c
@@ -28,7 +28,7 @@
#include <linux/iio/sysfs.h>
#include <linux/kthread.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/sched/task.h>
#include <linux/util_macros.h>
diff --git a/drivers/iio/adc/lpc32xx_adc.c b/drivers/iio/adc/lpc32xx_adc.c
index 732c924a976d..e34ed7dacd89 100644
--- a/drivers/iio/adc/lpc32xx_adc.c
+++ b/drivers/iio/adc/lpc32xx_adc.c
@@ -176,8 +176,8 @@ static int lpc32xx_adc_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq <= 0)
- return -ENXIO;
+ if (irq < 0)
+ return irq;
retval = devm_request_irq(&pdev->dev, irq, lpc32xx_adc_isr, 0,
LPC32XXAD_NAME, st);
diff --git a/drivers/iio/adc/men_z188_adc.c b/drivers/iio/adc/men_z188_adc.c
index adc5ceaef8c9..198c7e68e0cf 100644
--- a/drivers/iio/adc/men_z188_adc.c
+++ b/drivers/iio/adc/men_z188_adc.c
@@ -161,7 +161,6 @@ MODULE_DEVICE_TABLE(mcb, men_z188_ids);
static struct mcb_driver men_z188_driver = {
.driver = {
.name = "z188-adc",
- .owner = THIS_MODULE,
},
.probe = men_z188_probe,
.remove = men_z188_remove,
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
index eb78a6f17fd0..320e3e7e3d4d 100644
--- a/drivers/iio/adc/meson_saradc.c
+++ b/drivers/iio/adc/meson_saradc.c
@@ -17,7 +17,6 @@
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_irq.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
@@ -156,6 +155,10 @@
*/
#define MESON_SAR_ADC_REG11 0x2c
#define MESON_SAR_ADC_REG11_BANDGAP_EN BIT(13)
+ #define MESON_SAR_ADC_REG11_CMV_SEL BIT(6)
+ #define MESON_SAR_ADC_REG11_VREF_VOLTAGE BIT(5)
+ #define MESON_SAR_ADC_REG11_EOC BIT(1)
+ #define MESON_SAR_ADC_REG11_VREF_SEL BIT(0)
#define MESON_SAR_ADC_REG13 0x34
#define MESON_SAR_ADC_REG13_12BIT_CALIBRATION_MASK GENMASK(13, 8)
@@ -163,6 +166,7 @@
#define MESON_SAR_ADC_MAX_FIFO_SIZE 32
#define MESON_SAR_ADC_TIMEOUT 100 /* ms */
#define MESON_SAR_ADC_VOLTAGE_AND_TEMP_CHANNEL 6
+#define MESON_SAR_ADC_VOLTAGE_AND_MUX_CHANNEL 7
#define MESON_SAR_ADC_TEMP_OFFSET 27
/* temperature sensor calibration information in eFuse */
@@ -202,29 +206,22 @@
.datasheet_name = "TEMP_SENSOR", \
}
-static const struct iio_chan_spec meson_sar_adc_iio_channels[] = {
- MESON_SAR_ADC_CHAN(0),
- MESON_SAR_ADC_CHAN(1),
- MESON_SAR_ADC_CHAN(2),
- MESON_SAR_ADC_CHAN(3),
- MESON_SAR_ADC_CHAN(4),
- MESON_SAR_ADC_CHAN(5),
- MESON_SAR_ADC_CHAN(6),
- MESON_SAR_ADC_CHAN(7),
- IIO_CHAN_SOFT_TIMESTAMP(8),
-};
+#define MESON_SAR_ADC_MUX(_chan, _sel) { \
+ .type = IIO_VOLTAGE, \
+ .channel = _chan, \
+ .indexed = 1, \
+ .address = MESON_SAR_ADC_VOLTAGE_AND_MUX_CHANNEL, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_AVERAGE_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_CALIBBIAS) | \
+ BIT(IIO_CHAN_INFO_CALIBSCALE), \
+ .datasheet_name = "SAR_ADC_MUX_"#_sel, \
+}
-static const struct iio_chan_spec meson_sar_adc_and_temp_iio_channels[] = {
- MESON_SAR_ADC_CHAN(0),
- MESON_SAR_ADC_CHAN(1),
- MESON_SAR_ADC_CHAN(2),
- MESON_SAR_ADC_CHAN(3),
- MESON_SAR_ADC_CHAN(4),
- MESON_SAR_ADC_CHAN(5),
- MESON_SAR_ADC_CHAN(6),
- MESON_SAR_ADC_CHAN(7),
- MESON_SAR_ADC_TEMP_CHAN(8),
- IIO_CHAN_SOFT_TIMESTAMP(9),
+enum meson_sar_adc_vref_sel {
+ VREF_CALIBATION_VOLTAGE = 0,
+ VREF_VDDA = 1,
};
enum meson_sar_adc_avg_mode {
@@ -249,6 +246,72 @@ enum meson_sar_adc_chan7_mux_sel {
CHAN7_MUX_CH7_INPUT = 0x7,
};
+enum meson_sar_adc_channel_index {
+ NUM_CHAN_0,
+ NUM_CHAN_1,
+ NUM_CHAN_2,
+ NUM_CHAN_3,
+ NUM_CHAN_4,
+ NUM_CHAN_5,
+ NUM_CHAN_6,
+ NUM_CHAN_7,
+ NUM_CHAN_TEMP,
+ NUM_MUX_0_VSS,
+ NUM_MUX_1_VDD_DIV4,
+ NUM_MUX_2_VDD_DIV2,
+ NUM_MUX_3_VDD_MUL3_DIV4,
+ NUM_MUX_4_VDD,
+};
+
+static enum meson_sar_adc_chan7_mux_sel chan7_mux_values[] = {
+ CHAN7_MUX_VSS,
+ CHAN7_MUX_VDD_DIV4,
+ CHAN7_MUX_VDD_DIV2,
+ CHAN7_MUX_VDD_MUL3_DIV4,
+ CHAN7_MUX_VDD,
+};
+
+static const char * const chan7_mux_names[] = {
+ [CHAN7_MUX_VSS] = "gnd",
+ [CHAN7_MUX_VDD_DIV4] = "0.25vdd",
+ [CHAN7_MUX_VDD_DIV2] = "0.5vdd",
+ [CHAN7_MUX_VDD_MUL3_DIV4] = "0.75vdd",
+ [CHAN7_MUX_VDD] = "vdd",
+};
+
+static const struct iio_chan_spec meson_sar_adc_iio_channels[] = {
+ MESON_SAR_ADC_CHAN(NUM_CHAN_0),
+ MESON_SAR_ADC_CHAN(NUM_CHAN_1),
+ MESON_SAR_ADC_CHAN(NUM_CHAN_2),
+ MESON_SAR_ADC_CHAN(NUM_CHAN_3),
+ MESON_SAR_ADC_CHAN(NUM_CHAN_4),
+ MESON_SAR_ADC_CHAN(NUM_CHAN_5),
+ MESON_SAR_ADC_CHAN(NUM_CHAN_6),
+ MESON_SAR_ADC_CHAN(NUM_CHAN_7),
+ MESON_SAR_ADC_MUX(NUM_MUX_0_VSS, 0),
+ MESON_SAR_ADC_MUX(NUM_MUX_1_VDD_DIV4, 1),
+ MESON_SAR_ADC_MUX(NUM_MUX_2_VDD_DIV2, 2),
+ MESON_SAR_ADC_MUX(NUM_MUX_3_VDD_MUL3_DIV4, 3),
+ MESON_SAR_ADC_MUX(NUM_MUX_4_VDD, 4),
+};
+
+static const struct iio_chan_spec meson_sar_adc_and_temp_iio_channels[] = {
+ MESON_SAR_ADC_CHAN(NUM_CHAN_0),
+ MESON_SAR_ADC_CHAN(NUM_CHAN_1),
+ MESON_SAR_ADC_CHAN(NUM_CHAN_2),
+ MESON_SAR_ADC_CHAN(NUM_CHAN_3),
+ MESON_SAR_ADC_CHAN(NUM_CHAN_4),
+ MESON_SAR_ADC_CHAN(NUM_CHAN_5),
+ MESON_SAR_ADC_CHAN(NUM_CHAN_6),
+ MESON_SAR_ADC_CHAN(NUM_CHAN_7),
+ MESON_SAR_ADC_TEMP_CHAN(NUM_CHAN_TEMP),
+ MESON_SAR_ADC_MUX(NUM_MUX_0_VSS, 0),
+ MESON_SAR_ADC_MUX(NUM_MUX_1_VDD_DIV4, 1),
+ MESON_SAR_ADC_MUX(NUM_MUX_2_VDD_DIV2, 2),
+ MESON_SAR_ADC_MUX(NUM_MUX_3_VDD_MUL3_DIV4, 3),
+ MESON_SAR_ADC_MUX(NUM_MUX_4_VDD, 4),
+};
+
struct meson_sar_adc_param {
bool has_bl30_integration;
unsigned long clock_rate;
@@ -258,6 +321,13 @@ struct meson_sar_adc_param {
u8 temperature_trimming_bits;
unsigned int temperature_multiplier;
unsigned int temperature_divider;
+ u8 disable_ring_counter;
+ bool has_reg11;
+ bool has_vref_select;
+ u8 vref_select;
+ u8 cmv_select;
+ u8 adc_eoc;
+ enum meson_sar_adc_vref_sel vref_volatge;
};
struct meson_sar_adc_data {
@@ -285,6 +355,7 @@ struct meson_sar_adc_priv {
bool temperature_sensor_calibrated;
u8 temperature_sensor_coefficient;
u16 temperature_sensor_adc_val;
+ enum meson_sar_adc_chan7_mux_sel chan7_mux_sel;
};
static const struct regmap_config meson_sar_adc_regmap_config_gxbb = {
@@ -301,6 +372,17 @@ static const struct regmap_config meson_sar_adc_regmap_config_meson8 = {
.max_register = MESON_SAR_ADC_DELTA_10,
};
+static const struct iio_chan_spec *
+find_channel_by_num(struct iio_dev *indio_dev, int num)
+{
+ int i;
+
+ for (i = 0; i < indio_dev->num_channels; i++)
+ if (indio_dev->channels[i].channel == num)
+ return &indio_dev->channels[i];
+ return NULL;
+}
+
static unsigned int meson_sar_adc_get_fifo_count(struct iio_dev *indio_dev)
{
struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
@@ -338,6 +420,21 @@ static int meson_sar_adc_wait_busy_clear(struct iio_dev *indio_dev)
1, 10000);
}
+static void meson_sar_adc_set_chan7_mux(struct iio_dev *indio_dev,
+ enum meson_sar_adc_chan7_mux_sel sel)
+{
+ struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
+ u32 regval;
+
+ regval = FIELD_PREP(MESON_SAR_ADC_REG3_CTRL_CHAN7_MUX_SEL_MASK, sel);
+ regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3,
+ MESON_SAR_ADC_REG3_CTRL_CHAN7_MUX_SEL_MASK, regval);
+
+ usleep_range(10, 20);
+
+ priv->chan7_mux_sel = sel;
+}
+
static int meson_sar_adc_read_raw_sample(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
int *val)
@@ -431,20 +528,16 @@ static void meson_sar_adc_enable_channel(struct iio_dev *indio_dev,
regmap_update_bits(priv->regmap,
MESON_SAR_ADC_DELTA_10,
MESON_SAR_ADC_DELTA_10_TEMP_SEL, regval);
- }
-}
-
-static void meson_sar_adc_set_chan7_mux(struct iio_dev *indio_dev,
- enum meson_sar_adc_chan7_mux_sel sel)
-{
- struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
- u32 regval;
-
- regval = FIELD_PREP(MESON_SAR_ADC_REG3_CTRL_CHAN7_MUX_SEL_MASK, sel);
- regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3,
- MESON_SAR_ADC_REG3_CTRL_CHAN7_MUX_SEL_MASK, regval);
+ } else if (chan->address == MESON_SAR_ADC_VOLTAGE_AND_MUX_CHANNEL) {
+ enum meson_sar_adc_chan7_mux_sel sel;
- usleep_range(10, 20);
+ if (chan->channel == NUM_CHAN_7)
+ sel = CHAN7_MUX_CH7_INPUT;
+ else
+ sel = chan7_mux_values[chan->channel - NUM_MUX_0_VSS];
+ if (sel != priv->chan7_mux_sel)
+ meson_sar_adc_set_chan7_mux(indio_dev, sel);
+ }
}
static void meson_sar_adc_start_sample_engine(struct iio_dev *indio_dev)
@@ -821,6 +914,22 @@ static int meson_sar_adc_init(struct iio_dev *indio_dev)
MESON_SAR_ADC_CHAN_10_SW_CHAN1_MUX_SEL_MASK,
regval);
+ regmap_update_bits(priv->regmap, MESON_SAR_ADC_CHAN_10_SW,
+ MESON_SAR_ADC_CHAN_10_SW_CHAN0_XP_DRIVE_SW,
+ MESON_SAR_ADC_CHAN_10_SW_CHAN0_XP_DRIVE_SW);
+
+ regmap_update_bits(priv->regmap, MESON_SAR_ADC_CHAN_10_SW,
+ MESON_SAR_ADC_CHAN_10_SW_CHAN0_YP_DRIVE_SW,
+ MESON_SAR_ADC_CHAN_10_SW_CHAN0_YP_DRIVE_SW);
+
+ regmap_update_bits(priv->regmap, MESON_SAR_ADC_CHAN_10_SW,
+ MESON_SAR_ADC_CHAN_10_SW_CHAN1_XP_DRIVE_SW,
+ MESON_SAR_ADC_CHAN_10_SW_CHAN1_XP_DRIVE_SW);
+
+ regmap_update_bits(priv->regmap, MESON_SAR_ADC_CHAN_10_SW,
+ MESON_SAR_ADC_CHAN_10_SW_CHAN1_YP_DRIVE_SW,
+ MESON_SAR_ADC_CHAN_10_SW_CHAN1_YP_DRIVE_SW);
+
/*
* set up the input channel muxes in MESON_SAR_ADC_AUX_SW
* (2 = SAR_ADC_CH2, 3 = SAR_ADC_CH3, ...) and enable
@@ -873,6 +982,35 @@ static int meson_sar_adc_init(struct iio_dev *indio_dev)
MESON_SAR_ADC_DELTA_10_TS_REVE0, 0);
}
+ regval = FIELD_PREP(MESON_SAR_ADC_REG3_CTRL_CONT_RING_COUNTER_EN,
+ priv->param->disable_ring_counter);
+ regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3,
+ MESON_SAR_ADC_REG3_CTRL_CONT_RING_COUNTER_EN,
+ regval);
+
+ if (priv->param->has_reg11) {
+ regval = FIELD_PREP(MESON_SAR_ADC_REG11_EOC, priv->param->adc_eoc);
+ regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11,
+ MESON_SAR_ADC_REG11_EOC, regval);
+
+ if (priv->param->has_vref_select) {
+ regval = FIELD_PREP(MESON_SAR_ADC_REG11_VREF_SEL,
+ priv->param->vref_select);
+ regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11,
+ MESON_SAR_ADC_REG11_VREF_SEL, regval);
+ }
+
+ regval = FIELD_PREP(MESON_SAR_ADC_REG11_VREF_VOLTAGE,
+ priv->param->vref_volatge);
+ regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11,
+ MESON_SAR_ADC_REG11_VREF_VOLTAGE, regval);
+
+ regval = FIELD_PREP(MESON_SAR_ADC_REG11_CMV_SEL,
+ priv->param->cmv_select);
+ regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11,
+ MESON_SAR_ADC_REG11_CMV_SEL, regval);
+ }
+
ret = clk_set_parent(priv->adc_sel_clk, priv->clkin);
if (ret)
return dev_err_probe(dev, ret, "failed to set adc parent to clkin\n");
@@ -1006,7 +1144,8 @@ static int meson_sar_adc_calib(struct iio_dev *indio_dev)
meson_sar_adc_set_chan7_mux(indio_dev, CHAN7_MUX_VDD_DIV4);
usleep_range(10, 20);
ret = meson_sar_adc_get_sample(indio_dev,
- &indio_dev->channels[7],
+ find_channel_by_num(indio_dev,
+ NUM_MUX_1_VDD_DIV4),
MEAN_AVERAGING, EIGHT_SAMPLES, &value0);
if (ret < 0)
goto out;
@@ -1014,7 +1153,8 @@ static int meson_sar_adc_calib(struct iio_dev *indio_dev)
meson_sar_adc_set_chan7_mux(indio_dev, CHAN7_MUX_VDD_MUL3_DIV4);
usleep_range(10, 20);
ret = meson_sar_adc_get_sample(indio_dev,
- &indio_dev->channels[7],
+ find_channel_by_num(indio_dev,
+ NUM_MUX_3_VDD_MUL3_DIV4),
MEAN_AVERAGING, EIGHT_SAMPLES, &value1);
if (ret < 0)
goto out;
@@ -1035,8 +1175,23 @@ out:
return ret;
}
+static int read_label(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ char *label)
+{
+ if (chan->type == IIO_TEMP)
+ return sprintf(label, "temp-sensor\n");
+ if (chan->type == IIO_VOLTAGE && chan->channel >= NUM_MUX_0_VSS)
+ return sprintf(label, "%s\n",
+ chan7_mux_names[chan->channel - NUM_MUX_0_VSS]);
+ if (chan->type == IIO_VOLTAGE)
+ return sprintf(label, "channel-%d\n", chan->channel);
+ return 0;
+}
+
static const struct iio_info meson_sar_adc_iio_info = {
.read_raw = meson_sar_adc_iio_info_read_raw,
+ .read_label = read_label,
};
static const struct meson_sar_adc_param meson_sar_adc_meson8_param = {
@@ -1067,6 +1222,9 @@ static const struct meson_sar_adc_param meson_sar_adc_gxbb_param = {
.bandgap_reg = MESON_SAR_ADC_REG11,
.regmap_config = &meson_sar_adc_regmap_config_gxbb,
.resolution = 10,
+ .has_reg11 = true,
+ .vref_volatge = 1,
+ .cmv_select = 1,
};
static const struct meson_sar_adc_param meson_sar_adc_gxl_param = {
@@ -1075,6 +1233,10 @@ static const struct meson_sar_adc_param meson_sar_adc_gxl_param = {
.bandgap_reg = MESON_SAR_ADC_REG11,
.regmap_config = &meson_sar_adc_regmap_config_gxbb,
.resolution = 12,
+ .disable_ring_counter = 1,
+ .has_reg11 = true,
+ .vref_volatge = 1,
+ .cmv_select = 1,
};
static const struct meson_sar_adc_param meson_sar_adc_g12a_param = {
@@ -1083,6 +1245,11 @@ static const struct meson_sar_adc_param meson_sar_adc_g12a_param = {
.bandgap_reg = MESON_SAR_ADC_REG11,
.regmap_config = &meson_sar_adc_regmap_config_gxbb,
.resolution = 12,
+ .disable_ring_counter = 1,
+ .has_reg11 = true,
+ .adc_eoc = 1,
+ .has_vref_select = true,
+ .vref_select = VREF_VDDA,
};
static const struct meson_sar_adc_data meson_sar_adc_meson8_data = {
diff --git a/drivers/iio/adc/npcm_adc.c b/drivers/iio/adc/npcm_adc.c
index ba4cd8f49f66..3d9207c160eb 100644
--- a/drivers/iio/adc/npcm_adc.c
+++ b/drivers/iio/adc/npcm_adc.c
@@ -244,8 +244,8 @@ static int npcm_adc_probe(struct platform_device *pdev)
info->adc_sample_hz = clk_get_rate(info->adc_clk) / ((div + 1) * 2);
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- ret = -EINVAL;
+ if (irq < 0) {
+ ret = irq;
goto err_disable_clk;
}
diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c
index 27b2632c1037..e202ea18af10 100644
--- a/drivers/iio/adc/palmas_gpadc.c
+++ b/drivers/iio/adc/palmas_gpadc.c
@@ -18,7 +18,6 @@
#include <linux/mfd/palmas.h>
#include <linux/completion.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/iio/events.h>
#include <linux/iio/iio.h>
#include <linux/iio/machine.h>
@@ -916,8 +915,7 @@ static int palmas_gpadc_probe(struct platform_device *pdev)
adc->irq_auto_0 = platform_get_irq(pdev, 1);
if (adc->irq_auto_0 < 0)
- return dev_err_probe(adc->dev, adc->irq_auto_0,
- "get auto0 irq failed\n");
+ return adc->irq_auto_0;
ret = devm_request_threaded_irq(&pdev->dev, adc->irq_auto_0, NULL,
palmas_gpadc_irq_auto, IRQF_ONESHOT,
@@ -929,8 +927,7 @@ static int palmas_gpadc_probe(struct platform_device *pdev)
adc->irq_auto_1 = platform_get_irq(pdev, 2);
if (adc->irq_auto_1 < 0)
- return dev_err_probe(adc->dev, adc->irq_auto_1,
- "get auto1 irq failed\n");
+ return adc->irq_auto_1;
ret = devm_request_threaded_irq(&pdev->dev, adc->irq_auto_1, NULL,
palmas_gpadc_irq_auto, IRQF_ONESHOT,
diff --git a/drivers/iio/adc/qcom-spmi-adc5.c b/drivers/iio/adc/qcom-spmi-adc5.c
index 0a4fd3a46113..b6b612d733ff 100644
--- a/drivers/iio/adc/qcom-spmi-adc5.c
+++ b/drivers/iio/adc/qcom-spmi-adc5.c
@@ -555,6 +555,8 @@ static const struct adc5_channels adc5_chans_pmic[ADC5_MAX_CHANNEL] = {
SCALE_HW_CALIB_PM5_SMB_TEMP)
[ADC5_GPIO1_100K_PU] = ADC5_CHAN_TEMP("gpio1_100k_pu", 0,
SCALE_HW_CALIB_THERM_100K_PULLUP)
+ [ADC5_GPIO2_100K_PU] = ADC5_CHAN_TEMP("gpio2_100k_pu", 0,
+ SCALE_HW_CALIB_THERM_100K_PULLUP)
[ADC5_GPIO3_100K_PU] = ADC5_CHAN_TEMP("gpio3_100k_pu", 0,
SCALE_HW_CALIB_THERM_100K_PULLUP)
[ADC5_GPIO4_100K_PU] = ADC5_CHAN_TEMP("gpio4_100k_pu", 0,
diff --git a/drivers/iio/adc/qcom-spmi-iadc.c b/drivers/iio/adc/qcom-spmi-iadc.c
index acbda6636dc5..7fb8b2499a1d 100644
--- a/drivers/iio/adc/qcom-spmi-iadc.c
+++ b/drivers/iio/adc/qcom-spmi-iadc.c
@@ -13,7 +13,6 @@
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
index 4b011f7eddec..dd94667a623b 100644
--- a/drivers/iio/adc/rockchip_saradc.c
+++ b/drivers/iio/adc/rockchip_saradc.c
@@ -11,7 +11,6 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
@@ -467,7 +466,7 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0)
- return dev_err_probe(&pdev->dev, irq, "failed to get irq\n");
+ return irq;
ret = devm_request_irq(&pdev->dev, irq, rockchip_saradc_isr,
0, dev_name(&pdev->dev), info);
diff --git a/drivers/iio/adc/sc27xx_adc.c b/drivers/iio/adc/sc27xx_adc.c
index ff1fc329bb9b..b4a2e057d80f 100644
--- a/drivers/iio/adc/sc27xx_adc.c
+++ b/drivers/iio/adc/sc27xx_adc.c
@@ -7,7 +7,6 @@
#include <linux/mutex.h>
#include <linux/nvmem-consumer.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/iio/adc/spear_adc.c b/drivers/iio/adc/spear_adc.c
index d93e580b3dc5..ad54ef798109 100644
--- a/drivers/iio/adc/spear_adc.c
+++ b/drivers/iio/adc/spear_adc.c
@@ -310,8 +310,8 @@ static int spear_adc_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- ret = -EINVAL;
+ if (irq < 0) {
+ ret = irq;
goto errout2;
}
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
index 48f02dcc81c1..2f082006550f 100644
--- a/drivers/iio/adc/stm32-adc-core.c
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -18,6 +18,8 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
@@ -723,8 +725,7 @@ static int stm32_adc_probe(struct platform_device *pdev)
priv->nb_adc_max = priv->cfg->num_adcs;
spin_lock_init(&priv->common.lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->common.base = devm_ioremap_resource(&pdev->dev, res);
+ priv->common.base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(priv->common.base))
return PTR_ERR(priv->common.base);
priv->common.phys_base = res->start;
diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
index a428bdb567d5..b5cc43d12b6f 100644
--- a/drivers/iio/adc/stm32-dfsdm-adc.c
+++ b/drivers/iio/adc/stm32-dfsdm-adc.c
@@ -19,7 +19,8 @@
#include <linux/iio/triggered_buffer.h>
#include <linux/interrupt.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
diff --git a/drivers/iio/adc/stm32-dfsdm-core.c b/drivers/iio/adc/stm32-dfsdm-core.c
index 0362df285a57..0f6ebb3061a0 100644
--- a/drivers/iio/adc/stm32-dfsdm-core.c
+++ b/drivers/iio/adc/stm32-dfsdm-core.c
@@ -12,8 +12,10 @@
#include <linux/iio/sysfs.h>
#include <linux/interrupt.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/slab.h>
diff --git a/drivers/iio/adc/stmpe-adc.c b/drivers/iio/adc/stmpe-adc.c
index 67518e460e05..8e56def1c9e5 100644
--- a/drivers/iio/adc/stmpe-adc.c
+++ b/drivers/iio/adc/stmpe-adc.c
@@ -14,7 +14,7 @@
#include <linux/kernel.h>
#include <linux/mfd/stmpe.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/device.h>
diff --git a/drivers/iio/adc/sun20i-gpadc-iio.c b/drivers/iio/adc/sun20i-gpadc-iio.c
new file mode 100644
index 000000000000..6a893d484cf7
--- /dev/null
+++ b/drivers/iio/adc/sun20i-gpadc-iio.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * GPADC driver for sunxi platforms (D1, T113-S3 and R329)
+ * Copyright (c) 2023 Maksim Kiselev <bigunclemax@gmail.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/reset.h>
+
+#include <linux/iio/iio.h>
+
+#define SUN20I_GPADC_DRIVER_NAME "sun20i-gpadc"
+
+/* Register map definition */
+#define SUN20I_GPADC_SR 0x00
+#define SUN20I_GPADC_CTRL 0x04
+#define SUN20I_GPADC_CS_EN 0x08
+#define SUN20I_GPADC_FIFO_INTC 0x0c
+#define SUN20I_GPADC_FIFO_INTS 0x10
+#define SUN20I_GPADC_FIFO_DATA 0X14
+#define SUN20I_GPADC_CB_DATA 0X18
+#define SUN20I_GPADC_DATAL_INTC 0x20
+#define SUN20I_GPADC_DATAH_INTC 0x24
+#define SUN20I_GPADC_DATA_INTC 0x28
+#define SUN20I_GPADC_DATAL_INTS 0x30
+#define SUN20I_GPADC_DATAH_INTS 0x34
+#define SUN20I_GPADC_DATA_INTS 0x38
+#define SUN20I_GPADC_CH_CMP_DATA(x) (0x40 + (x) * 4)
+#define SUN20I_GPADC_CH_DATA(x) (0x80 + (x) * 4)
+
+#define SUN20I_GPADC_CTRL_ADC_AUTOCALI_EN_MASK BIT(23)
+#define SUN20I_GPADC_CTRL_WORK_MODE_MASK GENMASK(19, 18)
+#define SUN20I_GPADC_CTRL_ADC_EN_MASK BIT(16)
+#define SUN20I_GPADC_CS_EN_ADC_CH(x) BIT(x)
+#define SUN20I_GPADC_DATA_INTC_CH_DATA_IRQ_EN(x) BIT(x)
+
+#define SUN20I_GPADC_WORK_MODE_SINGLE 0
+
+struct sun20i_gpadc_iio {
+ void __iomem *regs;
+ struct completion completion;
+ int last_channel;
+ /*
+ * Lock to protect the device state during a potential concurrent
+ * read access from userspace. Reading a raw value requires a sequence
+ * of register writes, then a wait for a completion callback,
+ * and finally a register read, during which userspace could issue
+ * another read request. This lock protects a read access from
+ * ocurring before another one has finished.
+ */
+ struct mutex lock;
+};
+
+static int sun20i_gpadc_adc_read(struct sun20i_gpadc_iio *info,
+ struct iio_chan_spec const *chan, int *val)
+{
+ u32 ctrl;
+ int ret = IIO_VAL_INT;
+
+ mutex_lock(&info->lock);
+
+ reinit_completion(&info->completion);
+
+ if (info->last_channel != chan->channel) {
+ info->last_channel = chan->channel;
+
+ /* enable the analog input channel */
+ writel(SUN20I_GPADC_CS_EN_ADC_CH(chan->channel),
+ info->regs + SUN20I_GPADC_CS_EN);
+
+ /* enable the data irq for input channel */
+ writel(SUN20I_GPADC_DATA_INTC_CH_DATA_IRQ_EN(chan->channel),
+ info->regs + SUN20I_GPADC_DATA_INTC);
+ }
+
+ /* enable the ADC function */
+ ctrl = readl(info->regs + SUN20I_GPADC_CTRL);
+ ctrl |= FIELD_PREP(SUN20I_GPADC_CTRL_ADC_EN_MASK, 1);
+ writel(ctrl, info->regs + SUN20I_GPADC_CTRL);
+
+ /*
+ * According to the datasheet maximum acquire time(TACQ) can be
+ * (65535+1)/24Mhz and conversion time(CONV_TIME) is always constant
+ * and equal to 14/24Mhz, so (TACQ+CONV_TIME) <= 2.73125ms.
+ * A 10ms delay should be enough to make sure an interrupt occurs in
+ * normal conditions. If it doesn't occur, then there is a timeout.
+ */
+ if (!wait_for_completion_timeout(&info->completion, msecs_to_jiffies(10))) {
+ ret = -ETIMEDOUT;
+ goto err_unlock;
+ }
+
+ /* read the ADC data */
+ *val = readl(info->regs + SUN20I_GPADC_CH_DATA(chan->channel));
+
+err_unlock:
+ mutex_unlock(&info->lock);
+
+ return ret;
+}
+
+static int sun20i_gpadc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct sun20i_gpadc_iio *info = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return sun20i_gpadc_adc_read(info, chan, val);
+ case IIO_CHAN_INFO_SCALE:
+ /* value in mv = 1800mV / 4096 raw */
+ *val = 1800;
+ *val2 = 12;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
+ }
+}
+
+static irqreturn_t sun20i_gpadc_irq_handler(int irq, void *data)
+{
+ struct sun20i_gpadc_iio *info = data;
+
+ /* clear data interrupt status register */
+ writel(GENMASK(31, 0), info->regs + SUN20I_GPADC_DATA_INTS);
+
+ complete(&info->completion);
+
+ return IRQ_HANDLED;
+}
+
+static const struct iio_info sun20i_gpadc_iio_info = {
+ .read_raw = sun20i_gpadc_read_raw,
+};
+
+static void sun20i_gpadc_reset_assert(void *data)
+{
+ struct reset_control *rst = data;
+
+ reset_control_assert(rst);
+}
+
+static int sun20i_gpadc_alloc_channels(struct iio_dev *indio_dev,
+ struct device *dev)
+{
+ unsigned int channel;
+ int num_channels, i, ret;
+ struct iio_chan_spec *channels;
+ struct fwnode_handle *node;
+
+ num_channels = device_get_child_node_count(dev);
+ if (num_channels == 0)
+ return dev_err_probe(dev, -ENODEV, "no channel children\n");
+
+ channels = devm_kcalloc(dev, num_channels, sizeof(*channels),
+ GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
+
+ i = 0;
+ device_for_each_child_node(dev, node) {
+ ret = fwnode_property_read_u32(node, "reg", &channel);
+ if (ret) {
+ fwnode_handle_put(node);
+ return dev_err_probe(dev, ret, "invalid channel number\n");
+ }
+
+ channels[i].type = IIO_VOLTAGE;
+ channels[i].indexed = 1;
+ channels[i].channel = channel;
+ channels[i].info_mask_separate = BIT(IIO_CHAN_INFO_RAW);
+ channels[i].info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE);
+
+ i++;
+ }
+
+ indio_dev->channels = channels;
+ indio_dev->num_channels = num_channels;
+
+ return 0;
+}
+
+static int sun20i_gpadc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct iio_dev *indio_dev;
+ struct sun20i_gpadc_iio *info;
+ struct reset_control *rst;
+ struct clk *clk;
+ int irq;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*info));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ info = iio_priv(indio_dev);
+ info->last_channel = -1;
+
+ mutex_init(&info->lock);
+ init_completion(&info->completion);
+
+ ret = sun20i_gpadc_alloc_channels(indio_dev, dev);
+ if (ret)
+ return ret;
+
+ indio_dev->info = &sun20i_gpadc_iio_info;
+ indio_dev->name = SUN20I_GPADC_DRIVER_NAME;
+
+ info->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(info->regs))
+ return PTR_ERR(info->regs);
+
+ clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "failed to enable bus clock\n");
+
+ rst = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(rst))
+ return dev_err_probe(dev, PTR_ERR(rst), "failed to get reset control\n");
+
+ ret = reset_control_deassert(rst);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to deassert reset\n");
+
+ ret = devm_add_action_or_reset(dev, sun20i_gpadc_reset_assert, rst);
+ if (ret)
+ return ret;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(dev, irq, sun20i_gpadc_irq_handler, 0,
+ dev_name(dev), info);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed requesting irq %d\n", irq);
+
+ writel(FIELD_PREP(SUN20I_GPADC_CTRL_ADC_AUTOCALI_EN_MASK, 1) |
+ FIELD_PREP(SUN20I_GPADC_CTRL_WORK_MODE_MASK, SUN20I_GPADC_WORK_MODE_SINGLE),
+ info->regs + SUN20I_GPADC_CTRL);
+
+ ret = devm_iio_device_register(dev, indio_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "could not register the device\n");
+
+ return 0;
+}
+
+static const struct of_device_id sun20i_gpadc_of_id[] = {
+ { .compatible = "allwinner,sun20i-d1-gpadc" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sun20i_gpadc_of_id);
+
+static struct platform_driver sun20i_gpadc_driver = {
+ .driver = {
+ .name = SUN20I_GPADC_DRIVER_NAME,
+ .of_match_table = sun20i_gpadc_of_id,
+ },
+ .probe = sun20i_gpadc_probe,
+};
+module_platform_driver(sun20i_gpadc_driver);
+
+MODULE_DESCRIPTION("ADC driver for sunxi platforms");
+MODULE_AUTHOR("Maksim Kiselev <bigunclemax@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/sun4i-gpadc-iio.c b/drivers/iio/adc/sun4i-gpadc-iio.c
index a5322550c422..25bba96367a8 100644
--- a/drivers/iio/adc/sun4i-gpadc-iio.c
+++ b/drivers/iio/adc/sun4i-gpadc-iio.c
@@ -24,7 +24,6 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
diff --git a/drivers/iio/adc/ti-lmp92064.c b/drivers/iio/adc/ti-lmp92064.c
index c30ed824924f..84ba5c4a0eea 100644
--- a/drivers/iio/adc/ti-lmp92064.c
+++ b/drivers/iio/adc/ti-lmp92064.c
@@ -16,7 +16,10 @@
#include <linux/spi/spi.h>
#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
#include <linux/iio/driver.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
#define TI_LMP92064_REG_CONFIG_A 0x0000
#define TI_LMP92064_REG_CONFIG_B 0x0001
@@ -91,6 +94,12 @@ static const struct iio_chan_spec lmp92064_adc_channels[] = {
.address = TI_LMP92064_CHAN_INC,
.info_mask_separate =
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = TI_LMP92064_CHAN_INC,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 12,
+ .storagebits = 16,
+ },
.datasheet_name = "INC",
},
{
@@ -98,8 +107,20 @@ static const struct iio_chan_spec lmp92064_adc_channels[] = {
.address = TI_LMP92064_CHAN_INV,
.info_mask_separate =
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = TI_LMP92064_CHAN_INV,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 12,
+ .storagebits = 16,
+ },
.datasheet_name = "INV",
},
+ IIO_CHAN_SOFT_TIMESTAMP(2),
+};
+
+static const unsigned long lmp92064_scan_masks[] = {
+ BIT(TI_LMP92064_CHAN_INC) | BIT(TI_LMP92064_CHAN_INV),
+ 0
};
static int lmp92064_read_meas(struct lmp92064_adc_priv *priv, u16 *res)
@@ -171,6 +192,32 @@ static int lmp92064_read_raw(struct iio_dev *indio_dev,
}
}
+static irqreturn_t lmp92064_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct lmp92064_adc_priv *priv = iio_priv(indio_dev);
+ struct {
+ u16 values[2];
+ int64_t timestamp __aligned(8);
+ } data;
+ int ret;
+
+ memset(&data, 0, sizeof(data));
+
+ ret = lmp92064_read_meas(priv, data.values);
+ if (ret)
+ goto err;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &data,
+ iio_get_time_ns(indio_dev));
+
+err:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
static int lmp92064_reset(struct lmp92064_adc_priv *priv,
struct gpio_desc *gpio_reset)
{
@@ -301,6 +348,12 @@ static int lmp92064_adc_probe(struct spi_device *spi)
indio_dev->channels = lmp92064_adc_channels;
indio_dev->num_channels = ARRAY_SIZE(lmp92064_adc_channels);
indio_dev->info = &lmp92064_adc_info;
+ indio_dev->available_scan_masks = lmp92064_scan_masks;
+
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
+ lmp92064_trigger_handler, NULL);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to setup buffered read\n");
return devm_iio_device_register(dev, indio_dev);
}
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index 642c5c4895e3..8db7a01cb5fb 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -14,7 +14,6 @@
#include <linux/io.h>
#include <linux/iio/iio.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/iio/machine.h>
#include <linux/iio/driver.h>
#include <linux/iopoll.h>
diff --git a/drivers/iio/amplifiers/Kconfig b/drivers/iio/amplifiers/Kconfig
index f217a2a1e958..b54fe01734b0 100644
--- a/drivers/iio/amplifiers/Kconfig
+++ b/drivers/iio/amplifiers/Kconfig
@@ -18,6 +18,7 @@ config AD8366
AD8366 Dual-Digital Variable Gain Amplifier (VGA)
ADA4961 BiCMOS RF Digital Gain Amplifier (DGA)
ADL5240 Digitally controlled variable gain amplifier (VGA)
+ HMC792A 0.25 dB LSB GaAs MMIC 6-Bit Digital Attenuator
HMC1119 0.25 dB LSB, 7-Bit, Silicon Digital Attenuator
To compile this driver as a module, choose M here: the
diff --git a/drivers/iio/amplifiers/ad8366.c b/drivers/iio/amplifiers/ad8366.c
index 8d8c8ea94258..31564afb13a2 100644
--- a/drivers/iio/amplifiers/ad8366.c
+++ b/drivers/iio/amplifiers/ad8366.c
@@ -5,6 +5,7 @@
* AD8366 Dual-Digital Variable Gain Amplifier (VGA)
* ADA4961 BiCMOS RF Digital Gain Amplifier (DGA)
* ADL5240 Digitally controlled variable gain amplifier (VGA)
+ * HMC792A 0.25 dB LSB GaAs MMIC 6-Bit Digital Attenuator
* HMC1119 0.25 dB LSB, 7-Bit, Silicon Digital Attenuator
*
* Copyright 2012-2019 Analog Devices Inc.
@@ -28,6 +29,7 @@ enum ad8366_type {
ID_AD8366,
ID_ADA4961,
ID_ADL5240,
+ ID_HMC792,
ID_HMC1119,
};
@@ -64,6 +66,10 @@ static struct ad8366_info ad8366_infos[] = {
.gain_min = -11500,
.gain_max = 20000,
},
+ [ID_HMC792] = {
+ .gain_min = -15750,
+ .gain_max = 0,
+ },
[ID_HMC1119] = {
.gain_min = -31750,
.gain_max = 0,
@@ -90,6 +96,7 @@ static int ad8366_write(struct iio_dev *indio_dev,
case ID_ADL5240:
st->data[0] = (ch_a & 0x3F);
break;
+ case ID_HMC792:
case ID_HMC1119:
st->data[0] = ch_a;
break;
@@ -127,6 +134,9 @@ static int ad8366_read_raw(struct iio_dev *indio_dev,
case ID_ADL5240:
gain = 20000 - 31500 + code * 500;
break;
+ case ID_HMC792:
+ gain = -1 * code * 500;
+ break;
case ID_HMC1119:
gain = -1 * code * 250;
break;
@@ -176,6 +186,9 @@ static int ad8366_write_raw(struct iio_dev *indio_dev,
case ID_ADL5240:
code = ((gain - 500 - 20000) / 500) & 0x3F;
break;
+ case ID_HMC792:
+ code = (abs(gain) / 500) & 0x3F;
+ break;
case ID_HMC1119:
code = (abs(gain) / 250) & 0x7F;
break;
@@ -261,6 +274,7 @@ static int ad8366_probe(struct spi_device *spi)
break;
case ID_ADA4961:
case ID_ADL5240:
+ case ID_HMC792:
case ID_HMC1119:
st->reset_gpio = devm_gpiod_get_optional(&spi->dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(st->reset_gpio)) {
@@ -314,6 +328,7 @@ static const struct spi_device_id ad8366_id[] = {
{"ad8366", ID_AD8366},
{"ada4961", ID_ADA4961},
{"adl5240", ID_ADL5240},
+ {"hmc792a", ID_HMC792},
{"hmc1119", ID_HMC1119},
{}
};
diff --git a/drivers/iio/cdc/ad7150.c b/drivers/iio/cdc/ad7150.c
index d656d2f12755..4c03b9e834b8 100644
--- a/drivers/iio/cdc/ad7150.c
+++ b/drivers/iio/cdc/ad7150.c
@@ -541,6 +541,7 @@ static int ad7150_probe(struct i2c_client *client)
const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct ad7150_chip_info *chip;
struct iio_dev *indio_dev;
+ bool use_irq = true;
int ret;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
@@ -561,14 +562,13 @@ static int ad7150_probe(struct i2c_client *client)
chip->interrupts[0] = fwnode_irq_get(dev_fwnode(&client->dev), 0);
if (chip->interrupts[0] < 0)
- return chip->interrupts[0];
- if (id->driver_data == AD7150) {
+ use_irq = false;
+ else if (id->driver_data == AD7150) {
chip->interrupts[1] = fwnode_irq_get(dev_fwnode(&client->dev), 1);
if (chip->interrupts[1] < 0)
- return chip->interrupts[1];
+ use_irq = false;
}
- if (chip->interrupts[0] &&
- (id->driver_data == AD7151 || chip->interrupts[1])) {
+ if (use_irq) {
irq_set_status_flags(chip->interrupts[0], IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(&client->dev,
chip->interrupts[0],
diff --git a/drivers/iio/chemical/scd4x.c b/drivers/iio/chemical/scd4x.c
index a4f22d926400..ca6b20270711 100644
--- a/drivers/iio/chemical/scd4x.c
+++ b/drivers/iio/chemical/scd4x.c
@@ -36,6 +36,8 @@
#define SCD4X_WRITE_BUF_SIZE 5
#define SCD4X_FRC_MIN_PPM 0
#define SCD4X_FRC_MAX_PPM 2000
+#define SCD4X_PRESSURE_COMP_MIN_MBAR 700
+#define SCD4X_PRESSURE_COMP_MAX_MBAR 1200
#define SCD4X_READY_MASK 0x01
/*Commands SCD4X*/
@@ -45,6 +47,8 @@ enum scd4x_cmd {
CMD_STOP_MEAS = 0x3f86,
CMD_SET_TEMP_OFFSET = 0x241d,
CMD_GET_TEMP_OFFSET = 0x2318,
+ CMD_SET_AMB_PRESSURE = 0xe000,
+ CMD_GET_AMB_PRESSURE = 0xe000,
CMD_FRC = 0x362f,
CMD_SET_ASC = 0x2416,
CMD_GET_ASC = 0x2313,
@@ -137,7 +141,8 @@ static int scd4x_read(struct scd4x_state *state, enum scd4x_cmd cmd,
* Measurement needs to be stopped before sending commands.
* Except for reading measurement and data ready command.
*/
- if ((cmd != CMD_GET_DATA_READY) && (cmd != CMD_READ_MEAS)) {
+ if ((cmd != CMD_GET_DATA_READY) && (cmd != CMD_READ_MEAS) &&
+ (cmd != CMD_GET_AMB_PRESSURE)) {
ret = scd4x_send_command(state, CMD_STOP_MEAS);
if (ret)
return ret;
@@ -166,7 +171,8 @@ static int scd4x_read(struct scd4x_state *state, enum scd4x_cmd cmd,
}
/* start measurement */
- if ((cmd != CMD_GET_DATA_READY) && (cmd != CMD_READ_MEAS)) {
+ if ((cmd != CMD_GET_DATA_READY) && (cmd != CMD_READ_MEAS) &&
+ (cmd != CMD_GET_AMB_PRESSURE)) {
ret = scd4x_send_command(state, CMD_START_MEAS);
if (ret)
return ret;
@@ -188,9 +194,11 @@ static int scd4x_write(struct scd4x_state *state, enum scd4x_cmd cmd, uint16_t a
buf[4] = crc;
/* measurement needs to be stopped before sending commands */
- ret = scd4x_send_command(state, CMD_STOP_MEAS);
- if (ret)
- return ret;
+ if (cmd != CMD_SET_AMB_PRESSURE) {
+ ret = scd4x_send_command(state, CMD_STOP_MEAS);
+ if (ret)
+ return ret;
+ }
/* execution time */
msleep_interruptible(500);
@@ -200,7 +208,7 @@ static int scd4x_write(struct scd4x_state *state, enum scd4x_cmd cmd, uint16_t a
return ret;
/* start measurement, except for forced calibration command */
- if (cmd != CMD_FRC) {
+ if ((cmd != CMD_FRC) && (cmd != CMD_SET_AMB_PRESSURE)) {
ret = scd4x_send_command(state, CMD_START_MEAS);
if (ret)
return ret;
@@ -338,6 +346,18 @@ static int scd4x_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
+ if (chan->output) {
+ mutex_lock(&state->lock);
+ ret = scd4x_read(state, CMD_GET_AMB_PRESSURE, &tmp, sizeof(tmp));
+ mutex_unlock(&state->lock);
+
+ if (ret)
+ return ret;
+
+ *val = be16_to_cpu(tmp);
+ return IIO_VAL_INT;
+ }
+
ret = iio_device_claim_direct_mode(indio_dev);
if (ret)
return ret;
@@ -386,6 +406,25 @@ static int scd4x_read_raw(struct iio_dev *indio_dev,
}
}
+static const int scd4x_pressure_calibbias_available[] = {
+ SCD4X_PRESSURE_COMP_MIN_MBAR, 1, SCD4X_PRESSURE_COMP_MAX_MBAR,
+};
+
+static int scd4x_read_avail(struct iio_dev *indio_dev, struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length, long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ *vals = scd4x_pressure_calibbias_available;
+ *type = IIO_VAL_INT;
+
+ return IIO_AVAIL_RANGE;
+ }
+
+ return -EINVAL;
+}
+
+
static int scd4x_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan,
int val, int val2, long mask)
{
@@ -399,6 +438,21 @@ static int scd4x_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const
mutex_unlock(&state->lock);
return ret;
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_PRESSURE:
+ if (val < SCD4X_PRESSURE_COMP_MIN_MBAR ||
+ val > SCD4X_PRESSURE_COMP_MAX_MBAR)
+ return -EINVAL;
+
+ mutex_lock(&state->lock);
+ ret = scd4x_write(state, CMD_SET_AMB_PRESSURE, val);
+ mutex_unlock(&state->lock);
+
+ return ret;
+ default:
+ return -EINVAL;
+ }
default:
return -EINVAL;
}
@@ -503,10 +557,23 @@ static const struct iio_info scd4x_info = {
.attrs = &scd4x_attr_group,
.read_raw = scd4x_read_raw,
.write_raw = scd4x_write_raw,
+ .read_avail = scd4x_read_avail,
};
static const struct iio_chan_spec scd4x_channels[] = {
{
+ /*
+ * this channel is special in a sense we are pretending that
+ * sensor is able to change measurement chamber pressure but in
+ * fact we're just setting pressure compensation value
+ */
+ .type = IIO_PRESSURE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_separate_available = BIT(IIO_CHAN_INFO_RAW),
+ .output = 1,
+ .scan_index = -1,
+ },
+ {
.type = IIO_CONCENTRATION,
.channel2 = IIO_MOD_CO2,
.modified = 1,
diff --git a/drivers/iio/common/Kconfig b/drivers/iio/common/Kconfig
index 0334b4954773..1ccb5ccf3706 100644
--- a/drivers/iio/common/Kconfig
+++ b/drivers/iio/common/Kconfig
@@ -5,6 +5,7 @@
source "drivers/iio/common/cros_ec_sensors/Kconfig"
source "drivers/iio/common/hid-sensors/Kconfig"
+source "drivers/iio/common/inv_sensors/Kconfig"
source "drivers/iio/common/ms_sensors/Kconfig"
source "drivers/iio/common/scmi_sensors/Kconfig"
source "drivers/iio/common/ssp_sensors/Kconfig"
diff --git a/drivers/iio/common/Makefile b/drivers/iio/common/Makefile
index fad40e1e1718..d3e952239a62 100644
--- a/drivers/iio/common/Makefile
+++ b/drivers/iio/common/Makefile
@@ -10,6 +10,7 @@
# When adding new entries keep the list in alphabetical order
obj-y += cros_ec_sensors/
obj-y += hid-sensors/
+obj-y += inv_sensors/
obj-y += ms_sensors/
obj-y += scmi_sensors/
obj-y += ssp_sensors/
diff --git a/drivers/iio/common/inv_sensors/Kconfig b/drivers/iio/common/inv_sensors/Kconfig
new file mode 100644
index 000000000000..28815fb43157
--- /dev/null
+++ b/drivers/iio/common/inv_sensors/Kconfig
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# TDK-InvenSense sensors common library
+#
+
+config IIO_INV_SENSORS_TIMESTAMP
+ tristate
diff --git a/drivers/iio/common/inv_sensors/Makefile b/drivers/iio/common/inv_sensors/Makefile
new file mode 100644
index 000000000000..dcf39f249112
--- /dev/null
+++ b/drivers/iio/common/inv_sensors/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for TDK-InvenSense sensors module.
+#
+
+obj-$(CONFIG_IIO_INV_SENSORS_TIMESTAMP) += inv_sensors_timestamp.o
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_timestamp.c b/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c
index 37cbf08acb3a..03823ee57f59 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_timestamp.c
+++ b/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c
@@ -3,25 +3,23 @@
* Copyright (C) 2020 Invensense, Inc.
*/
+#include <linux/errno.h>
#include <linux/kernel.h>
-#include <linux/regmap.h>
#include <linux/math64.h>
+#include <linux/module.h>
-#include "inv_icm42600.h"
-#include "inv_icm42600_timestamp.h"
+#include <linux/iio/common/inv_sensors_timestamp.h>
-/* internal chip period is 32kHz, 31250ns */
-#define INV_ICM42600_TIMESTAMP_PERIOD 31250
-/* allow a jitter of +/- 2% */
-#define INV_ICM42600_TIMESTAMP_JITTER 2
-/* compute min and max periods accepted */
-#define INV_ICM42600_TIMESTAMP_MIN_PERIOD(_p) \
- (((_p) * (100 - INV_ICM42600_TIMESTAMP_JITTER)) / 100)
-#define INV_ICM42600_TIMESTAMP_MAX_PERIOD(_p) \
- (((_p) * (100 + INV_ICM42600_TIMESTAMP_JITTER)) / 100)
+/* compute jitter, min and max following jitter in per mille */
+#define INV_SENSORS_TIMESTAMP_JITTER(_val, _jitter) \
+ (div_s64((_val) * (_jitter), 1000))
+#define INV_SENSORS_TIMESTAMP_MIN(_val, _jitter) \
+ (((_val) * (1000 - (_jitter))) / 1000)
+#define INV_SENSORS_TIMESTAMP_MAX(_val, _jitter) \
+ (((_val) * (1000 + (_jitter))) / 1000)
/* Add a new value inside an accumulator and update the estimate value */
-static void inv_update_acc(struct inv_icm42600_timestamp_acc *acc, uint32_t val)
+static void inv_update_acc(struct inv_sensors_timestamp_acc *acc, uint32_t val)
{
uint64_t sum = 0;
size_t i;
@@ -40,65 +38,57 @@ static void inv_update_acc(struct inv_icm42600_timestamp_acc *acc, uint32_t val)
acc->val = div_u64(sum, i);
}
-void inv_icm42600_timestamp_init(struct inv_icm42600_timestamp *ts,
- uint32_t period)
+void inv_sensors_timestamp_init(struct inv_sensors_timestamp *ts,
+ const struct inv_sensors_timestamp_chip *chip)
{
- /* initial odr for sensor after reset is 1kHz */
- const uint32_t default_period = 1000000;
+ memset(ts, 0, sizeof(*ts));
+
+ /* save chip parameters and compute min and max clock period */
+ ts->chip = *chip;
+ ts->min_period = INV_SENSORS_TIMESTAMP_MIN(chip->clock_period, chip->jitter);
+ ts->max_period = INV_SENSORS_TIMESTAMP_MAX(chip->clock_period, chip->jitter);
/* current multiplier and period values after reset */
- ts->mult = default_period / INV_ICM42600_TIMESTAMP_PERIOD;
- ts->period = default_period;
- /* new set multiplier is the one from chip initialization */
- ts->new_mult = period / INV_ICM42600_TIMESTAMP_PERIOD;
+ ts->mult = chip->init_period / chip->clock_period;
+ ts->period = chip->init_period;
/* use theoretical value for chip period */
- inv_update_acc(&ts->chip_period, INV_ICM42600_TIMESTAMP_PERIOD);
+ inv_update_acc(&ts->chip_period, chip->clock_period);
}
+EXPORT_SYMBOL_NS_GPL(inv_sensors_timestamp_init, IIO_INV_SENSORS_TIMESTAMP);
-int inv_icm42600_timestamp_setup(struct inv_icm42600_state *st)
-{
- unsigned int val;
-
- /* enable timestamp register */
- val = INV_ICM42600_TMST_CONFIG_TMST_TO_REGS_EN |
- INV_ICM42600_TMST_CONFIG_TMST_EN;
- return regmap_update_bits(st->map, INV_ICM42600_REG_TMST_CONFIG,
- INV_ICM42600_TMST_CONFIG_MASK, val);
-}
-
-int inv_icm42600_timestamp_update_odr(struct inv_icm42600_timestamp *ts,
- uint32_t period, bool fifo)
+int inv_sensors_timestamp_update_odr(struct inv_sensors_timestamp *ts,
+ uint32_t period, bool fifo)
{
/* when FIFO is on, prevent odr change if one is already pending */
if (fifo && ts->new_mult != 0)
return -EAGAIN;
- ts->new_mult = period / INV_ICM42600_TIMESTAMP_PERIOD;
+ ts->new_mult = period / ts->chip.clock_period;
return 0;
}
+EXPORT_SYMBOL_NS_GPL(inv_sensors_timestamp_update_odr, IIO_INV_SENSORS_TIMESTAMP);
-static bool inv_validate_period(uint32_t period, uint32_t mult)
+static bool inv_validate_period(struct inv_sensors_timestamp *ts, uint32_t period, uint32_t mult)
{
- const uint32_t chip_period = INV_ICM42600_TIMESTAMP_PERIOD;
uint32_t period_min, period_max;
/* check that period is acceptable */
- period_min = INV_ICM42600_TIMESTAMP_MIN_PERIOD(chip_period) * mult;
- period_max = INV_ICM42600_TIMESTAMP_MAX_PERIOD(chip_period) * mult;
+ period_min = ts->min_period * mult;
+ period_max = ts->max_period * mult;
if (period > period_min && period < period_max)
return true;
else
return false;
}
-static bool inv_update_chip_period(struct inv_icm42600_timestamp *ts,
- uint32_t mult, uint32_t period)
+static bool inv_update_chip_period(struct inv_sensors_timestamp *ts,
+ uint32_t mult, uint32_t period)
{
uint32_t new_chip_period;
- if (!inv_validate_period(period, mult))
+ if (!inv_validate_period(ts, period, mult))
return false;
/* update chip internal period estimation */
@@ -109,7 +99,7 @@ static bool inv_update_chip_period(struct inv_icm42600_timestamp *ts,
return true;
}
-static void inv_align_timestamp_it(struct inv_icm42600_timestamp *ts)
+static void inv_align_timestamp_it(struct inv_sensors_timestamp *ts)
{
int64_t delta, jitter;
int64_t adjust;
@@ -118,7 +108,7 @@ static void inv_align_timestamp_it(struct inv_icm42600_timestamp *ts)
delta = ts->it.lo - ts->timestamp;
/* adjust timestamp while respecting jitter */
- jitter = div_s64((int64_t)ts->period * INV_ICM42600_TIMESTAMP_JITTER, 100);
+ jitter = INV_SENSORS_TIMESTAMP_JITTER((int64_t)ts->period, ts->chip.jitter);
if (delta > jitter)
adjust = jitter;
else if (delta < -jitter)
@@ -129,13 +119,13 @@ static void inv_align_timestamp_it(struct inv_icm42600_timestamp *ts)
ts->timestamp += adjust;
}
-void inv_icm42600_timestamp_interrupt(struct inv_icm42600_timestamp *ts,
+void inv_sensors_timestamp_interrupt(struct inv_sensors_timestamp *ts,
uint32_t fifo_period, size_t fifo_nb,
size_t sensor_nb, int64_t timestamp)
{
- struct inv_icm42600_timestamp_interval *it;
+ struct inv_sensors_timestamp_interval *it;
int64_t delta, interval;
- const uint32_t fifo_mult = fifo_period / INV_ICM42600_TIMESTAMP_PERIOD;
+ const uint32_t fifo_mult = fifo_period / ts->chip.clock_period;
uint32_t period = ts->period;
bool valid = false;
@@ -165,10 +155,11 @@ void inv_icm42600_timestamp_interrupt(struct inv_icm42600_timestamp *ts,
if (valid)
inv_align_timestamp_it(ts);
}
+EXPORT_SYMBOL_NS_GPL(inv_sensors_timestamp_interrupt, IIO_INV_SENSORS_TIMESTAMP);
-void inv_icm42600_timestamp_apply_odr(struct inv_icm42600_timestamp *ts,
- uint32_t fifo_period, size_t fifo_nb,
- unsigned int fifo_no)
+void inv_sensors_timestamp_apply_odr(struct inv_sensors_timestamp *ts,
+ uint32_t fifo_period, size_t fifo_nb,
+ unsigned int fifo_no)
{
int64_t interval;
uint32_t fifo_mult;
@@ -189,10 +180,15 @@ void inv_icm42600_timestamp_apply_odr(struct inv_icm42600_timestamp *ts,
*/
if (ts->timestamp != 0) {
/* compute measured fifo period */
- fifo_mult = fifo_period / INV_ICM42600_TIMESTAMP_PERIOD;
+ fifo_mult = fifo_period / ts->chip.clock_period;
fifo_period = fifo_mult * ts->chip_period.val;
/* computes time interval between interrupt and this sample */
interval = (int64_t)(fifo_nb - fifo_no) * (int64_t)fifo_period;
ts->timestamp = ts->it.up - interval;
}
}
+EXPORT_SYMBOL_NS_GPL(inv_sensors_timestamp_apply_odr, IIO_INV_SENSORS_TIMESTAMP);
+
+MODULE_AUTHOR("InvenSense, Inc.");
+MODULE_DESCRIPTION("InvenSense sensors timestamp module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index 3acd9c3f388e..93b8be183de6 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -389,6 +389,17 @@ config MCP4725
To compile this driver as a module, choose M here: the module
will be called mcp4725.
+config MCP4728
+ tristate "MCP4728 DAC driver"
+ depends on I2C
+ help
+ Say Y here if you want to build a driver for the Microchip
+ MCP4728 quad channel, 12-bit digital-to-analog converter (DAC)
+ with I2C interface.
+
+ To compile this driver as a module, choose M here: the module
+ will be called mcp4728.
+
config MCP4922
tristate "MCP4902, MCP4912, MCP4922 DAC driver"
depends on SPI
diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile
index addd97a78838..5b2bac900d5a 100644
--- a/drivers/iio/dac/Makefile
+++ b/drivers/iio/dac/Makefile
@@ -41,6 +41,7 @@ obj-$(CONFIG_MAX517) += max517.o
obj-$(CONFIG_MAX5522) += max5522.o
obj-$(CONFIG_MAX5821) += max5821.o
obj-$(CONFIG_MCP4725) += mcp4725.o
+obj-$(CONFIG_MCP4728) += mcp4728.o
obj-$(CONFIG_MCP4922) += mcp4922.o
obj-$(CONFIG_STM32_DAC_CORE) += stm32-dac-core.o
obj-$(CONFIG_STM32_DAC) += stm32-dac.o
diff --git a/drivers/iio/dac/mcp4728.c b/drivers/iio/dac/mcp4728.c
new file mode 100644
index 000000000000..5113f67ddc31
--- /dev/null
+++ b/drivers/iio/dac/mcp4728.c
@@ -0,0 +1,618 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Support for Microchip MCP4728
+ *
+ * Copyright (C) 2023 Andrea Collamati <andrea.collamati@gmail.com>
+ *
+ * Based on mcp4725 by Peter Meerwald <pmeerw@pmeerw.net>
+ *
+ * Driver for the Microchip I2C 12-bit digital-to-analog quad channels
+ * converter (DAC).
+ *
+ * (7-bit I2C slave address 0x60, the three LSBs can be configured in
+ * hardware)
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+
+#define MCP4728_RESOLUTION 12
+#define MCP4728_N_CHANNELS 4
+
+#define MCP4728_CMD_MASK GENMASK(7, 3)
+#define MCP4728_CHSEL_MASK GENMASK(2, 1)
+#define MCP4728_UDAC_MASK BIT(0)
+
+#define MCP4728_VREF_MASK BIT(7)
+#define MCP4728_PDMODE_MASK GENMASK(6, 5)
+#define MCP4728_GAIN_MASK BIT(4)
+
+#define MCP4728_DAC_H_MASK GENMASK(3, 0)
+#define MCP4728_DAC_L_MASK GENMASK(7, 0)
+
+#define MCP4728_RDY_MASK BIT(7)
+
+#define MCP4728_MW_CMD 0x08 /* Multiwrite Command */
+#define MCP4728_SW_CMD 0x0A /* Sequential Write Command with EEPROM */
+
+#define MCP4728_READ_RESPONSE_LEN (MCP4728_N_CHANNELS * 3 * 2)
+#define MCP4728_WRITE_EEPROM_LEN (1 + MCP4728_N_CHANNELS * 2)
+
+enum vref_mode {
+ MCP4728_VREF_EXTERNAL_VDD = 0,
+ MCP4728_VREF_INTERNAL_2048mV = 1,
+};
+
+enum gain_mode {
+ MCP4728_GAIN_X1 = 0,
+ MCP4728_GAIN_X2 = 1,
+};
+
+enum iio_powerdown_mode {
+ MCP4728_IIO_1K,
+ MCP4728_IIO_100K,
+ MCP4728_IIO_500K,
+};
+
+struct mcp4728_channel_data {
+ enum vref_mode ref_mode;
+ enum iio_powerdown_mode pd_mode;
+ enum gain_mode g_mode;
+ u16 dac_value;
+};
+
+/* MCP4728 Full Scale Ranges
+ * the device available ranges are
+ * - VREF = VDD FSR = from 0.0V to VDD
+ * - VREF = Internal Gain = 1 FSR = from 0.0V to VREF
+ * - VREF = Internal Gain = 2 FSR = from 0.0V to 2*VREF
+ */
+enum mcp4728_scale {
+ MCP4728_SCALE_VDD,
+ MCP4728_SCALE_VINT_NO_GAIN,
+ MCP4728_SCALE_VINT_GAIN_X2,
+ MCP4728_N_SCALES
+};
+
+struct mcp4728_data {
+ struct i2c_client *client;
+ struct regulator *vdd_reg;
+ bool powerdown;
+ int scales_avail[MCP4728_N_SCALES * 2];
+ struct mcp4728_channel_data chdata[MCP4728_N_CHANNELS];
+};
+
+#define MCP4728_CHAN(chan) { \
+ .type = IIO_VOLTAGE, \
+ .output = 1, \
+ .indexed = 1, \
+ .channel = chan, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SCALE), \
+ .ext_info = mcp4728_ext_info, \
+}
+
+static int mcp4728_suspend(struct device *dev);
+static int mcp4728_resume(struct device *dev);
+
+static ssize_t mcp4728_store_eeprom(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct mcp4728_data *data = iio_priv(indio_dev);
+ u8 outbuf[MCP4728_WRITE_EEPROM_LEN];
+ int tries = 20;
+ u8 inbuf[3];
+ bool state;
+ int ret;
+ unsigned int i;
+
+ ret = kstrtobool(buf, &state);
+ if (ret < 0)
+ return ret;
+
+ if (!state)
+ return 0;
+
+ outbuf[0] = FIELD_PREP(MCP4728_CMD_MASK, MCP4728_SW_CMD);
+
+ for (i = 0; i < MCP4728_N_CHANNELS; i++) {
+ struct mcp4728_channel_data *ch = &data->chdata[i];
+ int offset = 1 + i * 2;
+
+ outbuf[offset] = FIELD_PREP(MCP4728_VREF_MASK, ch->ref_mode);
+
+ if (data->powerdown) {
+ u8 mcp4728_pd_mode = ch->pd_mode + 1;
+
+ outbuf[offset] |= FIELD_PREP(MCP4728_PDMODE_MASK,
+ mcp4728_pd_mode);
+ }
+
+ outbuf[offset] |= FIELD_PREP(MCP4728_GAIN_MASK, ch->g_mode);
+ outbuf[offset] |=
+ FIELD_PREP(MCP4728_DAC_H_MASK, ch->dac_value >> 8);
+ outbuf[offset + 1] =
+ FIELD_PREP(MCP4728_DAC_L_MASK, ch->dac_value);
+ }
+
+ ret = i2c_master_send(data->client, outbuf, MCP4728_WRITE_EEPROM_LEN);
+ if (ret < 0)
+ return ret;
+ else if (ret != MCP4728_WRITE_EEPROM_LEN)
+ return -EIO;
+
+ /* wait RDY signal for write complete, takes up to 50ms */
+ while (tries--) {
+ msleep(20);
+ ret = i2c_master_recv(data->client, inbuf, 3);
+ if (ret < 0)
+ return ret;
+ else if (ret != 3)
+ return -EIO;
+
+ if (FIELD_GET(MCP4728_RDY_MASK, inbuf[0]))
+ break;
+ }
+
+ if (tries < 0) {
+ dev_err(&data->client->dev, "%s failed, incomplete\n",
+ __func__);
+ return -EIO;
+ }
+ return len;
+}
+
+static IIO_DEVICE_ATTR(store_eeprom, 0200, NULL, mcp4728_store_eeprom, 0);
+
+static struct attribute *mcp4728_attributes[] = {
+ &iio_dev_attr_store_eeprom.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group mcp4728_attribute_group = {
+ .attrs = mcp4728_attributes,
+};
+
+static int mcp4728_program_channel_cfg(int channel, struct iio_dev *indio_dev)
+{
+ struct mcp4728_data *data = iio_priv(indio_dev);
+ struct mcp4728_channel_data *ch = &data->chdata[channel];
+ u8 outbuf[3];
+ int ret;
+
+ outbuf[0] = FIELD_PREP(MCP4728_CMD_MASK, MCP4728_MW_CMD);
+ outbuf[0] |= FIELD_PREP(MCP4728_CHSEL_MASK, channel);
+ outbuf[0] |= FIELD_PREP(MCP4728_UDAC_MASK, 0);
+
+ outbuf[1] = FIELD_PREP(MCP4728_VREF_MASK, ch->ref_mode);
+
+ if (data->powerdown)
+ outbuf[1] |= FIELD_PREP(MCP4728_PDMODE_MASK, ch->pd_mode + 1);
+
+ outbuf[1] |= FIELD_PREP(MCP4728_GAIN_MASK, ch->g_mode);
+ outbuf[1] |= FIELD_PREP(MCP4728_DAC_H_MASK, ch->dac_value >> 8);
+ outbuf[2] = FIELD_PREP(MCP4728_DAC_L_MASK, ch->dac_value);
+
+ ret = i2c_master_send(data->client, outbuf, 3);
+ if (ret < 0)
+ return ret;
+ else if (ret != 3)
+ return -EIO;
+
+ return 0;
+}
+
+static const char *const mcp4728_powerdown_modes[] = { "1kohm_to_gnd",
+ "100kohm_to_gnd",
+ "500kohm_to_gnd" };
+
+static int mcp4728_get_powerdown_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct mcp4728_data *data = iio_priv(indio_dev);
+
+ return data->chdata[chan->channel].pd_mode;
+}
+
+static int mcp4728_set_powerdown_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ unsigned int mode)
+{
+ struct mcp4728_data *data = iio_priv(indio_dev);
+
+ data->chdata[chan->channel].pd_mode = mode;
+
+ return 0;
+}
+
+static ssize_t mcp4728_read_powerdown(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct mcp4728_data *data = iio_priv(indio_dev);
+
+ return sysfs_emit(buf, "%d\n", data->powerdown);
+}
+
+static ssize_t mcp4728_write_powerdown(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ struct mcp4728_data *data = iio_priv(indio_dev);
+ bool state;
+ int ret;
+
+ ret = kstrtobool(buf, &state);
+ if (ret)
+ return ret;
+
+ if (state)
+ ret = mcp4728_suspend(&data->client->dev);
+ else
+ ret = mcp4728_resume(&data->client->dev);
+
+ if (ret < 0)
+ return ret;
+
+ return len;
+}
+
+static const struct iio_enum mcp4728_powerdown_mode_enum = {
+ .items = mcp4728_powerdown_modes,
+ .num_items = ARRAY_SIZE(mcp4728_powerdown_modes),
+ .get = mcp4728_get_powerdown_mode,
+ .set = mcp4728_set_powerdown_mode,
+};
+
+static const struct iio_chan_spec_ext_info mcp4728_ext_info[] = {
+ {
+ .name = "powerdown",
+ .read = mcp4728_read_powerdown,
+ .write = mcp4728_write_powerdown,
+ .shared = IIO_SEPARATE,
+ },
+ IIO_ENUM("powerdown_mode", IIO_SEPARATE, &mcp4728_powerdown_mode_enum),
+ IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE,
+ &mcp4728_powerdown_mode_enum),
+ {},
+};
+
+static const struct iio_chan_spec mcp4728_channels[MCP4728_N_CHANNELS] = {
+ MCP4728_CHAN(0),
+ MCP4728_CHAN(1),
+ MCP4728_CHAN(2),
+ MCP4728_CHAN(3),
+};
+
+static void mcp4728_get_scale_avail(enum mcp4728_scale scale,
+ struct mcp4728_data *data, int *val,
+ int *val2)
+{
+ *val = data->scales_avail[scale * 2];
+ *val2 = data->scales_avail[scale * 2 + 1];
+}
+
+static void mcp4728_get_scale(int channel, struct mcp4728_data *data, int *val,
+ int *val2)
+{
+ int ref_mode = data->chdata[channel].ref_mode;
+ int g_mode = data->chdata[channel].g_mode;
+
+ if (ref_mode == MCP4728_VREF_EXTERNAL_VDD) {
+ mcp4728_get_scale_avail(MCP4728_SCALE_VDD, data, val, val2);
+ } else {
+ if (g_mode == MCP4728_GAIN_X1) {
+ mcp4728_get_scale_avail(MCP4728_SCALE_VINT_NO_GAIN,
+ data, val, val2);
+ } else {
+ mcp4728_get_scale_avail(MCP4728_SCALE_VINT_GAIN_X2,
+ data, val, val2);
+ }
+ }
+}
+
+static int mcp4728_find_matching_scale(struct mcp4728_data *data, int val,
+ int val2)
+{
+ for (int i = 0; i < MCP4728_N_SCALES; i++) {
+ if (data->scales_avail[i * 2] == val &&
+ data->scales_avail[i * 2 + 1] == val2)
+ return i;
+ }
+ return -EINVAL;
+}
+
+static int mcp4728_set_scale(int channel, struct mcp4728_data *data, int val,
+ int val2)
+{
+ int scale = mcp4728_find_matching_scale(data, val, val2);
+
+ if (scale < 0)
+ return scale;
+
+ switch (scale) {
+ case MCP4728_SCALE_VDD:
+ data->chdata[channel].ref_mode = MCP4728_VREF_EXTERNAL_VDD;
+ return 0;
+ case MCP4728_SCALE_VINT_NO_GAIN:
+ data->chdata[channel].ref_mode = MCP4728_VREF_INTERNAL_2048mV;
+ data->chdata[channel].g_mode = MCP4728_GAIN_X1;
+ return 0;
+ case MCP4728_SCALE_VINT_GAIN_X2:
+ data->chdata[channel].ref_mode = MCP4728_VREF_INTERNAL_2048mV;
+ data->chdata[channel].g_mode = MCP4728_GAIN_X2;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mcp4728_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct mcp4728_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ *val = data->chdata[chan->channel].dac_value;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ mcp4728_get_scale(chan->channel, data, val, val2);
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+ return -EINVAL;
+}
+
+static int mcp4728_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long mask)
+{
+ struct mcp4728_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (val < 0 || val > GENMASK(MCP4728_RESOLUTION - 1, 0))
+ return -EINVAL;
+ data->chdata[chan->channel].dac_value = val;
+ return mcp4728_program_channel_cfg(chan->channel, indio_dev);
+ case IIO_CHAN_INFO_SCALE:
+ ret = mcp4728_set_scale(chan->channel, data, val, val2);
+ if (ret)
+ return ret;
+
+ return mcp4728_program_channel_cfg(chan->channel, indio_dev);
+ default:
+ return -EINVAL;
+ }
+}
+
+static void mcp4728_init_scale_avail(enum mcp4728_scale scale, int vref_mv,
+ struct mcp4728_data *data)
+{
+ s64 tmp;
+ int value_micro;
+ int value_int;
+
+ tmp = (s64)vref_mv * 1000000LL >> MCP4728_RESOLUTION;
+ value_int = div_s64_rem(tmp, 1000000LL, &value_micro);
+
+ data->scales_avail[scale * 2] = value_int;
+ data->scales_avail[scale * 2 + 1] = value_micro;
+}
+
+static int mcp4728_init_scales_avail(struct mcp4728_data *data)
+{
+ int ret;
+
+ ret = regulator_get_voltage(data->vdd_reg);
+ if (ret < 0)
+ return ret;
+
+ mcp4728_init_scale_avail(MCP4728_SCALE_VDD, ret / 1000, data);
+ mcp4728_init_scale_avail(MCP4728_SCALE_VINT_NO_GAIN, 2048, data);
+ mcp4728_init_scale_avail(MCP4728_SCALE_VINT_GAIN_X2, 4096, data);
+
+ return 0;
+}
+
+static int mcp4728_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long info)
+{
+ struct mcp4728_data *data = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_CHAN_INFO_SCALE:
+ *type = IIO_VAL_INT_PLUS_MICRO;
+
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ *vals = data->scales_avail;
+ *length = MCP4728_N_SCALES * 2;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info mcp4728_info = {
+ .read_raw = mcp4728_read_raw,
+ .write_raw = mcp4728_write_raw,
+ .read_avail = &mcp4728_read_avail,
+ .attrs = &mcp4728_attribute_group,
+};
+
+static int mcp4728_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct mcp4728_data *data = iio_priv(indio_dev);
+ unsigned int i;
+
+ data->powerdown = true;
+
+ for (i = 0; i < MCP4728_N_CHANNELS; i++) {
+ int err = mcp4728_program_channel_cfg(i, indio_dev);
+
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int mcp4728_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct mcp4728_data *data = iio_priv(indio_dev);
+ int err = 0;
+ unsigned int i;
+
+ data->powerdown = false;
+
+ for (i = 0; i < MCP4728_N_CHANNELS; i++) {
+ int ret = mcp4728_program_channel_cfg(i, indio_dev);
+
+ if (ret)
+ err = ret;
+ }
+ return err;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(mcp4728_pm_ops, mcp4728_suspend,
+ mcp4728_resume);
+
+static int mcp4728_init_channels_data(struct mcp4728_data *data)
+{
+ u8 inbuf[MCP4728_READ_RESPONSE_LEN];
+ int ret;
+ unsigned int i;
+
+ ret = i2c_master_recv(data->client, inbuf, MCP4728_READ_RESPONSE_LEN);
+ if (ret < 0) {
+ return dev_err_probe(&data->client->dev, ret,
+ "failed to read mcp4728 conf.\n");
+ } else if (ret != MCP4728_READ_RESPONSE_LEN) {
+ return dev_err_probe(&data->client->dev, -EIO,
+ "failed to read mcp4728 conf. Wrong Response Len ret=%d\n",
+ ret);
+ }
+
+ for (i = 0; i < MCP4728_N_CHANNELS; i++) {
+ struct mcp4728_channel_data *ch = &data->chdata[i];
+ u8 r2 = inbuf[i * 6 + 1];
+ u8 r3 = inbuf[i * 6 + 2];
+
+ ch->dac_value = FIELD_GET(MCP4728_DAC_H_MASK, r2) << 8 |
+ FIELD_GET(MCP4728_DAC_L_MASK, r3);
+ ch->ref_mode = FIELD_GET(MCP4728_VREF_MASK, r2);
+ ch->pd_mode = FIELD_GET(MCP4728_PDMODE_MASK, r2);
+ ch->g_mode = FIELD_GET(MCP4728_GAIN_MASK, r2);
+ }
+
+ return 0;
+}
+
+static void mcp4728_reg_disable(void *reg)
+{
+ regulator_disable(reg);
+}
+
+static int mcp4728_probe(struct i2c_client *client)
+{
+ const struct i2c_device_id *id = i2c_client_get_device_id(client);
+ struct mcp4728_data *data;
+ struct iio_dev *indio_dev;
+ int err;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+
+ data->vdd_reg = devm_regulator_get(&client->dev, "vdd");
+ if (IS_ERR(data->vdd_reg))
+ return PTR_ERR(data->vdd_reg);
+
+ err = regulator_enable(data->vdd_reg);
+ if (err)
+ return err;
+
+ err = devm_add_action_or_reset(&client->dev, mcp4728_reg_disable,
+ data->vdd_reg);
+ if (err)
+ return err;
+
+ /*
+ * MCP4728 has internal EEPROM that save each channel boot
+ * configuration. It means that device configuration is unknown to the
+ * driver at kernel boot. mcp4728_init_channels_data() reads back DAC
+ * settings and stores them in data structure.
+ */
+ err = mcp4728_init_channels_data(data);
+ if (err) {
+ return dev_err_probe(&client->dev, err,
+ "failed to read mcp4728 current configuration\n");
+ }
+
+ err = mcp4728_init_scales_avail(data);
+ if (err) {
+ return dev_err_probe(&client->dev, err,
+ "failed to init scales\n");
+ }
+
+ indio_dev->name = id->name;
+ indio_dev->info = &mcp4728_info;
+ indio_dev->channels = mcp4728_channels;
+ indio_dev->num_channels = MCP4728_N_CHANNELS;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct i2c_device_id mcp4728_id[] = {
+ { "mcp4728", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, mcp4728_id);
+
+static const struct of_device_id mcp4728_of_match[] = {
+ { .compatible = "microchip,mcp4728" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mcp4728_of_match);
+
+static struct i2c_driver mcp4728_driver = {
+ .driver = {
+ .name = "mcp4728",
+ .of_match_table = mcp4728_of_match,
+ .pm = pm_sleep_ptr(&mcp4728_pm_ops),
+ },
+ .probe = mcp4728_probe,
+ .id_table = mcp4728_id,
+};
+module_i2c_driver(mcp4728_driver);
+
+MODULE_AUTHOR("Andrea Collamati <andrea.collamati@gmail.com>");
+MODULE_DESCRIPTION("MCP4728 12-bit DAC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/filter/admv8818.c b/drivers/iio/filter/admv8818.c
index fe8d46cb7f1d..848baa6e3bbf 100644
--- a/drivers/iio/filter/admv8818.c
+++ b/drivers/iio/filter/admv8818.c
@@ -78,6 +78,7 @@ enum {
enum {
ADMV8818_AUTO_MODE,
ADMV8818_MANUAL_MODE,
+ ADMV8818_BYPASS_MODE,
};
struct admv8818_state {
@@ -114,7 +115,8 @@ static const struct regmap_config admv8818_regmap_config = {
static const char * const admv8818_modes[] = {
[0] = "auto",
- [1] = "manual"
+ [1] = "manual",
+ [2] = "bypass"
};
static int __admv8818_hpf_select(struct admv8818_state *st, u64 freq)
@@ -394,6 +396,36 @@ static int admv8818_reg_access(struct iio_dev *indio_dev,
return regmap_write(st->regmap, reg, write_val);
}
+static int admv8818_filter_bypass(struct admv8818_state *st)
+{
+ int ret;
+
+ mutex_lock(&st->lock);
+
+ ret = regmap_update_bits(st->regmap, ADMV8818_REG_WR0_SW,
+ ADMV8818_SW_IN_SET_WR0_MSK |
+ ADMV8818_SW_IN_WR0_MSK |
+ ADMV8818_SW_OUT_SET_WR0_MSK |
+ ADMV8818_SW_OUT_WR0_MSK,
+ FIELD_PREP(ADMV8818_SW_IN_SET_WR0_MSK, 1) |
+ FIELD_PREP(ADMV8818_SW_IN_WR0_MSK, 0) |
+ FIELD_PREP(ADMV8818_SW_OUT_SET_WR0_MSK, 1) |
+ FIELD_PREP(ADMV8818_SW_OUT_WR0_MSK, 0));
+ if (ret)
+ goto exit;
+
+ ret = regmap_update_bits(st->regmap, ADMV8818_REG_WR0_FILTER,
+ ADMV8818_HPF_WR0_MSK |
+ ADMV8818_LPF_WR0_MSK,
+ FIELD_PREP(ADMV8818_HPF_WR0_MSK, 0) |
+ FIELD_PREP(ADMV8818_LPF_WR0_MSK, 0));
+
+exit:
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
static int admv8818_get_mode(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan)
{
@@ -411,14 +443,22 @@ static int admv8818_set_mode(struct iio_dev *indio_dev,
if (!st->clkin) {
if (mode == ADMV8818_MANUAL_MODE)
- return 0;
+ goto set_mode;
+
+ if (mode == ADMV8818_BYPASS_MODE) {
+ ret = admv8818_filter_bypass(st);
+ if (ret)
+ return ret;
+
+ goto set_mode;
+ }
return -EINVAL;
}
switch (mode) {
case ADMV8818_AUTO_MODE:
- if (!st->filter_mode)
+ if (st->filter_mode == ADMV8818_AUTO_MODE)
return 0;
ret = clk_prepare_enable(st->clkin);
@@ -434,20 +474,27 @@ static int admv8818_set_mode(struct iio_dev *indio_dev,
break;
case ADMV8818_MANUAL_MODE:
- if (st->filter_mode)
- return 0;
+ case ADMV8818_BYPASS_MODE:
+ if (st->filter_mode == ADMV8818_AUTO_MODE) {
+ clk_disable_unprepare(st->clkin);
- clk_disable_unprepare(st->clkin);
+ ret = clk_notifier_unregister(st->clkin, &st->nb);
+ if (ret)
+ return ret;
+ }
- ret = clk_notifier_unregister(st->clkin, &st->nb);
- if (ret)
- return ret;
+ if (mode == ADMV8818_BYPASS_MODE) {
+ ret = admv8818_filter_bypass(st);
+ if (ret)
+ return ret;
+ }
break;
default:
return -EINVAL;
}
+set_mode:
st->filter_mode = mode;
return ret;
diff --git a/drivers/iio/frequency/admv1013.c b/drivers/iio/frequency/admv1013.c
index 8c8e0bbfc99f..6355c1f28423 100644
--- a/drivers/iio/frequency/admv1013.c
+++ b/drivers/iio/frequency/admv1013.c
@@ -382,6 +382,11 @@ static const struct iio_info admv1013_info = {
.debugfs_reg_access = &admv1013_reg_access,
};
+static const char * const admv1013_vcc_regs[] = {
+ "vcc-drv", "vcc2-drv", "vcc-vva", "vcc-amp1", "vcc-amp2",
+ "vcc-env", "vcc-bg", "vcc-bg2", "vcc-mixer", "vcc-quad"
+};
+
static int admv1013_freq_change(struct notifier_block *nb, unsigned long action, void *data)
{
struct admv1013_state *st = container_of(nb, struct admv1013_state, nb);
@@ -557,6 +562,15 @@ static int admv1013_properties_parse(struct admv1013_state *st)
return dev_err_probe(&spi->dev, PTR_ERR(st->reg),
"failed to get the common-mode voltage\n");
+ ret = devm_regulator_bulk_get_enable(&st->spi->dev,
+ ARRAY_SIZE(admv1013_vcc_regs),
+ admv1013_vcc_regs);
+ if (ret) {
+ dev_err_probe(&spi->dev, ret,
+ "Failed to request VCC regulators\n");
+ return ret;
+ }
+
return 0;
}
diff --git a/drivers/iio/imu/adis16475.c b/drivers/iio/imu/adis16475.c
index 3abffb01ba31..17275a53ca2c 100644
--- a/drivers/iio/imu/adis16475.c
+++ b/drivers/iio/imu/adis16475.c
@@ -115,8 +115,6 @@ enum {
ADIS16475_SCAN_ACCEL_Y,
ADIS16475_SCAN_ACCEL_Z,
ADIS16475_SCAN_TEMP,
- ADIS16475_SCAN_DIAG_S_FLAGS,
- ADIS16475_SCAN_CRC_FAILURE,
};
static bool low_rate_allow;
@@ -728,6 +726,7 @@ static const struct adis16475_chip_info adis16475_chip_info[] = {
.max_dec = 1999,
.sync = adis16475_sync_mode,
.num_sync = ARRAY_SIZE(adis16475_sync_mode),
+ .has_burst32 = true,
.adis_data = ADIS16475_DATA(16477, &adis16475_timeouts),
},
[ADIS16477_2] = {
@@ -743,6 +742,7 @@ static const struct adis16475_chip_info adis16475_chip_info[] = {
.max_dec = 1999,
.sync = adis16475_sync_mode,
.num_sync = ARRAY_SIZE(adis16475_sync_mode),
+ .has_burst32 = true,
.adis_data = ADIS16475_DATA(16477, &adis16475_timeouts),
},
[ADIS16477_3] = {
@@ -758,6 +758,7 @@ static const struct adis16475_chip_info adis16475_chip_info[] = {
.max_dec = 1999,
.sync = adis16475_sync_mode,
.num_sync = ARRAY_SIZE(adis16475_sync_mode),
+ .has_burst32 = true,
.adis_data = ADIS16475_DATA(16477, &adis16475_timeouts),
},
[ADIS16465_1] = {
diff --git a/drivers/iio/imu/inv_icm42600/Kconfig b/drivers/iio/imu/inv_icm42600/Kconfig
index 50cbcfcb6cf1..f56b0816cc4d 100644
--- a/drivers/iio/imu/inv_icm42600/Kconfig
+++ b/drivers/iio/imu/inv_icm42600/Kconfig
@@ -3,6 +3,7 @@
config INV_ICM42600
tristate
select IIO_BUFFER
+ select IIO_INV_SENSORS_TIMESTAMP
config INV_ICM42600_I2C
tristate "InvenSense ICM-426xx I2C driver"
diff --git a/drivers/iio/imu/inv_icm42600/Makefile b/drivers/iio/imu/inv_icm42600/Makefile
index 291714d9aa54..0f49f6df3647 100644
--- a/drivers/iio/imu/inv_icm42600/Makefile
+++ b/drivers/iio/imu/inv_icm42600/Makefile
@@ -6,7 +6,6 @@ inv-icm42600-y += inv_icm42600_gyro.o
inv-icm42600-y += inv_icm42600_accel.o
inv-icm42600-y += inv_icm42600_temp.o
inv-icm42600-y += inv_icm42600_buffer.o
-inv-icm42600-y += inv_icm42600_timestamp.o
obj-$(CONFIG_INV_ICM42600_I2C) += inv-icm42600-i2c.o
inv-icm42600-i2c-y += inv_icm42600_i2c.o
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
index c3f433ad3af6..b1e4fde27d25 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
@@ -10,14 +10,15 @@
#include <linux/regmap.h>
#include <linux/delay.h>
#include <linux/math64.h>
-#include <linux/iio/iio.h>
+
#include <linux/iio/buffer.h>
+#include <linux/iio/common/inv_sensors_timestamp.h>
+#include <linux/iio/iio.h>
#include <linux/iio/kfifo_buf.h>
#include "inv_icm42600.h"
#include "inv_icm42600_temp.h"
#include "inv_icm42600_buffer.h"
-#include "inv_icm42600_timestamp.h"
#define INV_ICM42600_ACCEL_CHAN(_modifier, _index, _ext_info) \
{ \
@@ -98,7 +99,7 @@ static int inv_icm42600_accel_update_scan_mode(struct iio_dev *indio_dev,
const unsigned long *scan_mask)
{
struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev);
- struct inv_icm42600_timestamp *ts = iio_priv(indio_dev);
+ struct inv_sensors_timestamp *ts = iio_priv(indio_dev);
struct inv_icm42600_sensor_conf conf = INV_ICM42600_SENSOR_CONF_INIT;
unsigned int fifo_en = 0;
unsigned int sleep_temp = 0;
@@ -126,7 +127,7 @@ static int inv_icm42600_accel_update_scan_mode(struct iio_dev *indio_dev,
}
/* update data FIFO write */
- inv_icm42600_timestamp_apply_odr(ts, 0, 0, 0);
+ inv_sensors_timestamp_apply_odr(ts, 0, 0, 0);
ret = inv_icm42600_buffer_set_fifo_en(st, fifo_en | st->fifo.en);
if (ret)
goto out_unlock;
@@ -311,7 +312,7 @@ static int inv_icm42600_accel_write_odr(struct iio_dev *indio_dev,
int val, int val2)
{
struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev);
- struct inv_icm42600_timestamp *ts = iio_priv(indio_dev);
+ struct inv_sensors_timestamp *ts = iio_priv(indio_dev);
struct device *dev = regmap_get_device(st->map);
unsigned int idx;
struct inv_icm42600_sensor_conf conf = INV_ICM42600_SENSOR_CONF_INIT;
@@ -330,8 +331,8 @@ static int inv_icm42600_accel_write_odr(struct iio_dev *indio_dev,
pm_runtime_get_sync(dev);
mutex_lock(&st->lock);
- ret = inv_icm42600_timestamp_update_odr(ts, inv_icm42600_odr_to_period(conf.odr),
- iio_buffer_enabled(indio_dev));
+ ret = inv_sensors_timestamp_update_odr(ts, inv_icm42600_odr_to_period(conf.odr),
+ iio_buffer_enabled(indio_dev));
if (ret)
goto out_unlock;
@@ -707,7 +708,8 @@ struct iio_dev *inv_icm42600_accel_init(struct inv_icm42600_state *st)
{
struct device *dev = regmap_get_device(st->map);
const char *name;
- struct inv_icm42600_timestamp *ts;
+ struct inv_sensors_timestamp_chip ts_chip;
+ struct inv_sensors_timestamp *ts;
struct iio_dev *indio_dev;
int ret;
@@ -719,8 +721,15 @@ struct iio_dev *inv_icm42600_accel_init(struct inv_icm42600_state *st)
if (!indio_dev)
return ERR_PTR(-ENOMEM);
+ /*
+ * clock period is 32kHz (31250ns)
+ * jitter is +/- 2% (20 per mille)
+ */
+ ts_chip.clock_period = 31250;
+ ts_chip.jitter = 20;
+ ts_chip.init_period = inv_icm42600_odr_to_period(st->conf.accel.odr);
ts = iio_priv(indio_dev);
- inv_icm42600_timestamp_init(ts, inv_icm42600_odr_to_period(st->conf.accel.odr));
+ inv_sensors_timestamp_init(ts, &ts_chip);
iio_device_set_drvdata(indio_dev, st);
indio_dev->name = name;
@@ -745,7 +754,7 @@ struct iio_dev *inv_icm42600_accel_init(struct inv_icm42600_state *st)
int inv_icm42600_accel_parse_fifo(struct iio_dev *indio_dev)
{
struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev);
- struct inv_icm42600_timestamp *ts = iio_priv(indio_dev);
+ struct inv_sensors_timestamp *ts = iio_priv(indio_dev);
ssize_t i, size;
unsigned int no;
const void *accel, *gyro, *timestamp;
@@ -768,15 +777,15 @@ int inv_icm42600_accel_parse_fifo(struct iio_dev *indio_dev)
/* update odr */
if (odr & INV_ICM42600_SENSOR_ACCEL)
- inv_icm42600_timestamp_apply_odr(ts, st->fifo.period,
- st->fifo.nb.total, no);
+ inv_sensors_timestamp_apply_odr(ts, st->fifo.period,
+ st->fifo.nb.total, no);
/* buffer is copied to userspace, zeroing it to avoid any data leak */
memset(&buffer, 0, sizeof(buffer));
memcpy(&buffer.accel, accel, sizeof(buffer.accel));
/* convert 8 bits FIFO temperature in high resolution format */
buffer.temp = temp ? (*temp * 64) : 0;
- ts_val = inv_icm42600_timestamp_pop(ts);
+ ts_val = inv_sensors_timestamp_pop(ts);
iio_push_to_buffers_with_timestamp(indio_dev, &buffer, ts_val);
}
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
index 32d7f8364230..6ef1df9d60b7 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
@@ -9,11 +9,12 @@
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/delay.h>
-#include <linux/iio/iio.h>
+
#include <linux/iio/buffer.h>
+#include <linux/iio/common/inv_sensors_timestamp.h>
+#include <linux/iio/iio.h>
#include "inv_icm42600.h"
-#include "inv_icm42600_timestamp.h"
#include "inv_icm42600_buffer.h"
/* FIFO header: 1 byte */
@@ -275,12 +276,12 @@ static int inv_icm42600_buffer_preenable(struct iio_dev *indio_dev)
{
struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev);
struct device *dev = regmap_get_device(st->map);
- struct inv_icm42600_timestamp *ts = iio_priv(indio_dev);
+ struct inv_sensors_timestamp *ts = iio_priv(indio_dev);
pm_runtime_get_sync(dev);
mutex_lock(&st->lock);
- inv_icm42600_timestamp_reset(ts);
+ inv_sensors_timestamp_reset(ts);
mutex_unlock(&st->lock);
return 0;
@@ -504,7 +505,7 @@ int inv_icm42600_buffer_fifo_read(struct inv_icm42600_state *st,
int inv_icm42600_buffer_fifo_parse(struct inv_icm42600_state *st)
{
- struct inv_icm42600_timestamp *ts;
+ struct inv_sensors_timestamp *ts;
int ret;
if (st->fifo.nb.total == 0)
@@ -512,8 +513,8 @@ int inv_icm42600_buffer_fifo_parse(struct inv_icm42600_state *st)
/* handle gyroscope timestamp and FIFO data parsing */
ts = iio_priv(st->indio_gyro);
- inv_icm42600_timestamp_interrupt(ts, st->fifo.period, st->fifo.nb.total,
- st->fifo.nb.gyro, st->timestamp.gyro);
+ inv_sensors_timestamp_interrupt(ts, st->fifo.period, st->fifo.nb.total,
+ st->fifo.nb.gyro, st->timestamp.gyro);
if (st->fifo.nb.gyro > 0) {
ret = inv_icm42600_gyro_parse_fifo(st->indio_gyro);
if (ret)
@@ -522,8 +523,8 @@ int inv_icm42600_buffer_fifo_parse(struct inv_icm42600_state *st)
/* handle accelerometer timestamp and FIFO data parsing */
ts = iio_priv(st->indio_accel);
- inv_icm42600_timestamp_interrupt(ts, st->fifo.period, st->fifo.nb.total,
- st->fifo.nb.accel, st->timestamp.accel);
+ inv_sensors_timestamp_interrupt(ts, st->fifo.period, st->fifo.nb.total,
+ st->fifo.nb.accel, st->timestamp.accel);
if (st->fifo.nb.accel > 0) {
ret = inv_icm42600_accel_parse_fifo(st->indio_accel);
if (ret)
@@ -536,7 +537,7 @@ int inv_icm42600_buffer_fifo_parse(struct inv_icm42600_state *st)
int inv_icm42600_buffer_hwfifo_flush(struct inv_icm42600_state *st,
unsigned int count)
{
- struct inv_icm42600_timestamp *ts;
+ struct inv_sensors_timestamp *ts;
int64_t gyro_ts, accel_ts;
int ret;
@@ -552,9 +553,9 @@ int inv_icm42600_buffer_hwfifo_flush(struct inv_icm42600_state *st,
if (st->fifo.nb.gyro > 0) {
ts = iio_priv(st->indio_gyro);
- inv_icm42600_timestamp_interrupt(ts, st->fifo.period,
- st->fifo.nb.total, st->fifo.nb.gyro,
- gyro_ts);
+ inv_sensors_timestamp_interrupt(ts, st->fifo.period,
+ st->fifo.nb.total, st->fifo.nb.gyro,
+ gyro_ts);
ret = inv_icm42600_gyro_parse_fifo(st->indio_gyro);
if (ret)
return ret;
@@ -562,9 +563,9 @@ int inv_icm42600_buffer_hwfifo_flush(struct inv_icm42600_state *st,
if (st->fifo.nb.accel > 0) {
ts = iio_priv(st->indio_accel);
- inv_icm42600_timestamp_interrupt(ts, st->fifo.period,
- st->fifo.nb.total, st->fifo.nb.accel,
- accel_ts);
+ inv_sensors_timestamp_interrupt(ts, st->fifo.period,
+ st->fifo.nb.total, st->fifo.nb.accel,
+ accel_ts);
ret = inv_icm42600_accel_parse_fifo(st->indio_accel);
if (ret)
return ret;
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
index 7b3a2a0dc2cb..a5e81906e37e 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
@@ -15,11 +15,11 @@
#include <linux/pm_runtime.h>
#include <linux/property.h>
#include <linux/regmap.h>
+
#include <linux/iio/iio.h>
#include "inv_icm42600.h"
#include "inv_icm42600_buffer.h"
-#include "inv_icm42600_timestamp.h"
static const struct regmap_range_cfg inv_icm42600_regmap_ranges[] = {
{
@@ -516,6 +516,17 @@ static int inv_icm42600_irq_init(struct inv_icm42600_state *st, int irq,
"inv_icm42600", st);
}
+static int inv_icm42600_timestamp_setup(struct inv_icm42600_state *st)
+{
+ unsigned int val;
+
+ /* enable timestamp register */
+ val = INV_ICM42600_TMST_CONFIG_TMST_TO_REGS_EN |
+ INV_ICM42600_TMST_CONFIG_TMST_EN;
+ return regmap_update_bits(st->map, INV_ICM42600_REG_TMST_CONFIG,
+ INV_ICM42600_TMST_CONFIG_MASK, val);
+}
+
static int inv_icm42600_enable_regulator_vddio(struct inv_icm42600_state *st)
{
int ret;
@@ -788,3 +799,4 @@ EXPORT_NS_GPL_DEV_PM_OPS(inv_icm42600_pm_ops, IIO_ICM42600) = {
MODULE_AUTHOR("InvenSense, Inc.");
MODULE_DESCRIPTION("InvenSense ICM-426xx device driver");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(IIO_INV_SENSORS_TIMESTAMP);
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
index 9d94a8518e3c..3bf946e56e1d 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
@@ -10,14 +10,15 @@
#include <linux/regmap.h>
#include <linux/delay.h>
#include <linux/math64.h>
-#include <linux/iio/iio.h>
+
#include <linux/iio/buffer.h>
+#include <linux/iio/common/inv_sensors_timestamp.h>
+#include <linux/iio/iio.h>
#include <linux/iio/kfifo_buf.h>
#include "inv_icm42600.h"
#include "inv_icm42600_temp.h"
#include "inv_icm42600_buffer.h"
-#include "inv_icm42600_timestamp.h"
#define INV_ICM42600_GYRO_CHAN(_modifier, _index, _ext_info) \
{ \
@@ -98,7 +99,7 @@ static int inv_icm42600_gyro_update_scan_mode(struct iio_dev *indio_dev,
const unsigned long *scan_mask)
{
struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev);
- struct inv_icm42600_timestamp *ts = iio_priv(indio_dev);
+ struct inv_sensors_timestamp *ts = iio_priv(indio_dev);
struct inv_icm42600_sensor_conf conf = INV_ICM42600_SENSOR_CONF_INIT;
unsigned int fifo_en = 0;
unsigned int sleep_gyro = 0;
@@ -126,7 +127,7 @@ static int inv_icm42600_gyro_update_scan_mode(struct iio_dev *indio_dev,
}
/* update data FIFO write */
- inv_icm42600_timestamp_apply_odr(ts, 0, 0, 0);
+ inv_sensors_timestamp_apply_odr(ts, 0, 0, 0);
ret = inv_icm42600_buffer_set_fifo_en(st, fifo_en | st->fifo.en);
if (ret)
goto out_unlock;
@@ -323,7 +324,7 @@ static int inv_icm42600_gyro_write_odr(struct iio_dev *indio_dev,
int val, int val2)
{
struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev);
- struct inv_icm42600_timestamp *ts = iio_priv(indio_dev);
+ struct inv_sensors_timestamp *ts = iio_priv(indio_dev);
struct device *dev = regmap_get_device(st->map);
unsigned int idx;
struct inv_icm42600_sensor_conf conf = INV_ICM42600_SENSOR_CONF_INIT;
@@ -342,8 +343,8 @@ static int inv_icm42600_gyro_write_odr(struct iio_dev *indio_dev,
pm_runtime_get_sync(dev);
mutex_lock(&st->lock);
- ret = inv_icm42600_timestamp_update_odr(ts, inv_icm42600_odr_to_period(conf.odr),
- iio_buffer_enabled(indio_dev));
+ ret = inv_sensors_timestamp_update_odr(ts, inv_icm42600_odr_to_period(conf.odr),
+ iio_buffer_enabled(indio_dev));
if (ret)
goto out_unlock;
@@ -718,7 +719,8 @@ struct iio_dev *inv_icm42600_gyro_init(struct inv_icm42600_state *st)
{
struct device *dev = regmap_get_device(st->map);
const char *name;
- struct inv_icm42600_timestamp *ts;
+ struct inv_sensors_timestamp_chip ts_chip;
+ struct inv_sensors_timestamp *ts;
struct iio_dev *indio_dev;
int ret;
@@ -730,8 +732,15 @@ struct iio_dev *inv_icm42600_gyro_init(struct inv_icm42600_state *st)
if (!indio_dev)
return ERR_PTR(-ENOMEM);
+ /*
+ * clock period is 32kHz (31250ns)
+ * jitter is +/- 2% (20 per mille)
+ */
+ ts_chip.clock_period = 31250;
+ ts_chip.jitter = 20;
+ ts_chip.init_period = inv_icm42600_odr_to_period(st->conf.accel.odr);
ts = iio_priv(indio_dev);
- inv_icm42600_timestamp_init(ts, inv_icm42600_odr_to_period(st->conf.gyro.odr));
+ inv_sensors_timestamp_init(ts, &ts_chip);
iio_device_set_drvdata(indio_dev, st);
indio_dev->name = name;
@@ -757,7 +766,7 @@ struct iio_dev *inv_icm42600_gyro_init(struct inv_icm42600_state *st)
int inv_icm42600_gyro_parse_fifo(struct iio_dev *indio_dev)
{
struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev);
- struct inv_icm42600_timestamp *ts = iio_priv(indio_dev);
+ struct inv_sensors_timestamp *ts = iio_priv(indio_dev);
ssize_t i, size;
unsigned int no;
const void *accel, *gyro, *timestamp;
@@ -780,15 +789,15 @@ int inv_icm42600_gyro_parse_fifo(struct iio_dev *indio_dev)
/* update odr */
if (odr & INV_ICM42600_SENSOR_GYRO)
- inv_icm42600_timestamp_apply_odr(ts, st->fifo.period,
- st->fifo.nb.total, no);
+ inv_sensors_timestamp_apply_odr(ts, st->fifo.period,
+ st->fifo.nb.total, no);
/* buffer is copied to userspace, zeroing it to avoid any data leak */
memset(&buffer, 0, sizeof(buffer));
memcpy(&buffer.gyro, gyro, sizeof(buffer.gyro));
/* convert 8 bits FIFO temperature in high resolution format */
buffer.temp = temp ? (*temp * 64) : 0;
- ts_val = inv_icm42600_timestamp_pop(ts);
+ ts_val = inv_sensors_timestamp_pop(ts);
iio_push_to_buffers_with_timestamp(indio_dev, &buffer, ts_val);
}
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_timestamp.h b/drivers/iio/imu/inv_icm42600/inv_icm42600_timestamp.h
deleted file mode 100644
index 4e4f331d4fe4..000000000000
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_timestamp.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2020 Invensense, Inc.
- */
-
-#ifndef INV_ICM42600_TIMESTAMP_H_
-#define INV_ICM42600_TIMESTAMP_H_
-
-#include <linux/kernel.h>
-
-struct inv_icm42600_state;
-
-/**
- * struct inv_icm42600_timestamp_interval - timestamps interval
- * @lo: interval lower bound
- * @up: interval upper bound
- */
-struct inv_icm42600_timestamp_interval {
- int64_t lo;
- int64_t up;
-};
-
-/**
- * struct inv_icm42600_timestamp_acc - accumulator for computing an estimation
- * @val: current estimation of the value, the mean of all values
- * @idx: current index of the next free place in values table
- * @values: table of all measured values, use for computing the mean
- */
-struct inv_icm42600_timestamp_acc {
- uint32_t val;
- size_t idx;
- uint32_t values[32];
-};
-
-/**
- * struct inv_icm42600_timestamp - timestamp management states
- * @it: interrupts interval timestamps
- * @timestamp: store last timestamp for computing next data timestamp
- * @mult: current internal period multiplier
- * @new_mult: new set internal period multiplier (not yet effective)
- * @period: measured current period of the sensor
- * @chip_period: accumulator for computing internal chip period
- */
-struct inv_icm42600_timestamp {
- struct inv_icm42600_timestamp_interval it;
- int64_t timestamp;
- uint32_t mult;
- uint32_t new_mult;
- uint32_t period;
- struct inv_icm42600_timestamp_acc chip_period;
-};
-
-void inv_icm42600_timestamp_init(struct inv_icm42600_timestamp *ts,
- uint32_t period);
-
-int inv_icm42600_timestamp_setup(struct inv_icm42600_state *st);
-
-int inv_icm42600_timestamp_update_odr(struct inv_icm42600_timestamp *ts,
- uint32_t period, bool fifo);
-
-void inv_icm42600_timestamp_interrupt(struct inv_icm42600_timestamp *ts,
- uint32_t fifo_period, size_t fifo_nb,
- size_t sensor_nb, int64_t timestamp);
-
-static inline int64_t
-inv_icm42600_timestamp_pop(struct inv_icm42600_timestamp *ts)
-{
- ts->timestamp += ts->period;
- return ts->timestamp;
-}
-
-void inv_icm42600_timestamp_apply_odr(struct inv_icm42600_timestamp *ts,
- uint32_t fifo_period, size_t fifo_nb,
- unsigned int fifo_no);
-
-static inline void
-inv_icm42600_timestamp_reset(struct inv_icm42600_timestamp *ts)
-{
- const struct inv_icm42600_timestamp_interval interval_init = {0LL, 0LL};
-
- ts->it = interval_init;
- ts->timestamp = 0;
-}
-
-#endif
diff --git a/drivers/iio/imu/inv_mpu6050/Kconfig b/drivers/iio/imu/inv_mpu6050/Kconfig
index 64dd73dcc4ba..5f62e4fd475d 100644
--- a/drivers/iio/imu/inv_mpu6050/Kconfig
+++ b/drivers/iio/imu/inv_mpu6050/Kconfig
@@ -7,6 +7,7 @@ config INV_MPU6050_IIO
tristate
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
+ select IIO_INV_SENSORS_TIMESTAMP
config INV_MPU6050_I2C
tristate "Invensense MPU6050 devices (I2C)"
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 592a6e60b413..29f906c884bd 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -12,12 +12,15 @@
#include <linux/jiffies.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
-#include <linux/iio/iio.h>
#include <linux/acpi.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
+
+#include <linux/iio/common/inv_sensors_timestamp.h>
+#include <linux/iio/iio.h>
+
#include "inv_mpu_iio.h"
#include "inv_mpu_magn.h"
@@ -521,6 +524,7 @@ static int inv_mpu6050_init_config(struct iio_dev *indio_dev)
int result;
u8 d;
struct inv_mpu6050_state *st = iio_priv(indio_dev);
+ struct inv_sensors_timestamp_chip timestamp;
result = inv_mpu6050_set_gyro_fsr(st, st->chip_config.fsr);
if (result)
@@ -544,12 +548,12 @@ static int inv_mpu6050_init_config(struct iio_dev *indio_dev)
if (result)
return result;
- /*
- * Internal chip period is 1ms (1kHz).
- * Let's use at the beginning the theorical value before measuring
- * with interrupt timestamps.
- */
- st->chip_period = NSEC_PER_MSEC;
+ /* clock jitter is +/- 2% */
+ timestamp.clock_period = NSEC_PER_SEC / INV_MPU6050_INTERNAL_FREQ_HZ;
+ timestamp.jitter = 20;
+ timestamp.init_period =
+ NSEC_PER_SEC / INV_MPU6050_DIVIDER_TO_FIFO_RATE(st->chip_config.divider);
+ inv_sensors_timestamp_init(&st->timestamp, &timestamp);
/* magn chip init, noop if not present in the chip */
result = inv_mpu_magn_probe(st);
@@ -936,6 +940,8 @@ inv_mpu6050_fifo_rate_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int fifo_rate;
+ u32 fifo_period;
+ bool fifo_on;
u8 d;
int result;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
@@ -952,12 +958,21 @@ inv_mpu6050_fifo_rate_store(struct device *dev, struct device_attribute *attr,
d = INV_MPU6050_FIFO_RATE_TO_DIVIDER(fifo_rate);
/* compute back the fifo rate to handle truncation cases */
fifo_rate = INV_MPU6050_DIVIDER_TO_FIFO_RATE(d);
+ fifo_period = NSEC_PER_SEC / fifo_rate;
mutex_lock(&st->lock);
if (d == st->chip_config.divider) {
result = 0;
goto fifo_rate_fail_unlock;
}
+
+ fifo_on = st->chip_config.accl_fifo_enable ||
+ st->chip_config.gyro_fifo_enable ||
+ st->chip_config.magn_fifo_enable;
+ result = inv_sensors_timestamp_update_odr(&st->timestamp, fifo_period, fifo_on);
+ if (result)
+ goto fifo_rate_fail_unlock;
+
result = pm_runtime_resume_and_get(pdev);
if (result)
goto fifo_rate_fail_unlock;
@@ -1330,6 +1345,9 @@ static int inv_check_and_setup_chip(struct inv_mpu6050_state *st)
st->reg = hw_info[st->chip_type].reg;
memcpy(&st->chip_config, hw_info[st->chip_type].config,
sizeof(st->chip_config));
+ st->data = devm_kzalloc(regmap_get_device(st->map), st->hw->fifo_size, GFP_KERNEL);
+ if (st->data == NULL)
+ return -ENOMEM;
/* check chip self-identification */
result = regmap_read(st->map, INV_MPU6050_REG_WHOAMI, &regval);
@@ -1785,3 +1803,4 @@ EXPORT_NS_GPL_DEV_PM_OPS(inv_mpu_pmops, IIO_MPU6050) = {
MODULE_AUTHOR("Invensense Corporation");
MODULE_DESCRIPTION("Invensense device MPU6050 driver");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(IIO_INV_SENSORS_TIMESTAMP);
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index b4ab2c397d0f..ed5a96e78df0 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -9,15 +9,17 @@
#include <linux/i2c.h>
#include <linux/i2c-mux.h>
#include <linux/mutex.h>
-#include <linux/iio/iio.h>
-#include <linux/iio/buffer.h>
+#include <linux/platform_data/invensense_mpu6050.h>
#include <linux/regmap.h>
-#include <linux/iio/sysfs.h>
+
+#include <linux/iio/buffer.h>
+#include <linux/iio/common/inv_sensors_timestamp.h>
+#include <linux/iio/iio.h>
#include <linux/iio/kfifo_buf.h>
#include <linux/iio/trigger.h>
#include <linux/iio/triggered_buffer.h>
#include <linux/iio/trigger_consumer.h>
-#include <linux/platform_data/invensense_mpu6050.h>
+#include <linux/iio/sysfs.h>
/**
* struct inv_mpu6050_reg_map - Notable registers.
@@ -170,16 +172,14 @@ struct inv_mpu6050_hw {
* @map regmap pointer.
* @irq interrupt number.
* @irq_mask the int_pin_cfg mask to configure interrupt type.
- * @chip_period: chip internal period estimation (~1kHz).
- * @it_timestamp: timestamp from previous interrupt.
- * @data_timestamp: timestamp for next data sample.
+ * @timestamp: timestamping module
* @vdd_supply: VDD voltage regulator for the chip.
* @vddio_supply I/O voltage regulator for the chip.
* @magn_disabled: magnetometer disabled for backward compatibility reason.
* @magn_raw_to_gauss: coefficient to convert mag raw value to Gauss.
* @magn_orient: magnetometer sensor chip orientation if available.
* @suspended_sensors: sensors mask of sensors turned off for suspend
- * @data: dma safe buffer used for bulk reads.
+ * @data: read buffer used for bulk reads.
*/
struct inv_mpu6050_state {
struct mutex lock;
@@ -196,16 +196,14 @@ struct inv_mpu6050_state {
int irq;
u8 irq_mask;
unsigned skip_samples;
- s64 chip_period;
- s64 it_timestamp;
- s64 data_timestamp;
+ struct inv_sensors_timestamp timestamp;
struct regulator *vdd_supply;
struct regulator *vddio_supply;
bool magn_disabled;
s32 magn_raw_to_gauss[3];
struct iio_mount_matrix magn_orient;
unsigned int suspended_sensors;
- u8 data[INV_MPU6050_OUTPUT_DATA_SIZE] __aligned(IIO_DMA_MINALIGN);
+ u8 *data;
};
/*register and associated bit definition*/
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
index 45c37525c2f1..66d4ba088e70 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
@@ -13,81 +13,10 @@
#include <linux/interrupt.h>
#include <linux/poll.h>
#include <linux/math64.h>
-#include "inv_mpu_iio.h"
-
-/**
- * inv_mpu6050_update_period() - Update chip internal period estimation
- *
- * @st: driver state
- * @timestamp: the interrupt timestamp
- * @nb: number of data set in the fifo
- *
- * This function uses interrupt timestamps to estimate the chip period and
- * to choose the data timestamp to come.
- */
-static void inv_mpu6050_update_period(struct inv_mpu6050_state *st,
- s64 timestamp, size_t nb)
-{
- /* Period boundaries for accepting timestamp */
- const s64 period_min =
- (NSEC_PER_MSEC * (100 - INV_MPU6050_TS_PERIOD_JITTER)) / 100;
- const s64 period_max =
- (NSEC_PER_MSEC * (100 + INV_MPU6050_TS_PERIOD_JITTER)) / 100;
- const s32 divider = INV_MPU6050_FREQ_DIVIDER(st);
- s64 delta, interval;
- bool use_it_timestamp = false;
-
- if (st->it_timestamp == 0) {
- /* not initialized, forced to use it_timestamp */
- use_it_timestamp = true;
- } else if (nb == 1) {
- /*
- * Validate the use of it timestamp by checking if interrupt
- * has been delayed.
- * nb > 1 means interrupt was delayed for more than 1 sample,
- * so it's obviously not good.
- * Compute the chip period between 2 interrupts for validating.
- */
- delta = div_s64(timestamp - st->it_timestamp, divider);
- if (delta > period_min && delta < period_max) {
- /* update chip period and use it timestamp */
- st->chip_period = (st->chip_period + delta) / 2;
- use_it_timestamp = true;
- }
- }
- if (use_it_timestamp) {
- /*
- * Manage case of multiple samples in the fifo (nb > 1):
- * compute timestamp corresponding to the first sample using
- * estimated chip period.
- */
- interval = (nb - 1) * st->chip_period * divider;
- st->data_timestamp = timestamp - interval;
- }
+#include <linux/iio/common/inv_sensors_timestamp.h>
- /* save it timestamp */
- st->it_timestamp = timestamp;
-}
-
-/**
- * inv_mpu6050_get_timestamp() - Return the current data timestamp
- *
- * @st: driver state
- * @return: current data timestamp
- *
- * This function returns the current data timestamp and prepares for next one.
- */
-static s64 inv_mpu6050_get_timestamp(struct inv_mpu6050_state *st)
-{
- s64 ts;
-
- /* return current data timestamp and increment */
- ts = st->data_timestamp;
- st->data_timestamp += st->chip_period * INV_MPU6050_FREQ_DIVIDER(st);
-
- return ts;
-}
+#include "inv_mpu_iio.h"
static int inv_reset_fifo(struct iio_dev *indio_dev)
{
@@ -121,7 +50,9 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
size_t bytes_per_datum;
int result;
u16 fifo_count;
+ u32 fifo_period;
s64 timestamp;
+ u8 data[INV_MPU6050_OUTPUT_DATA_SIZE];
int int_status;
size_t i, nb;
@@ -175,21 +106,30 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p)
goto flush_fifo;
}
- /* compute and process all complete datum */
+ /* compute and process only all complete datum */
nb = fifo_count / bytes_per_datum;
- inv_mpu6050_update_period(st, pf->timestamp, nb);
+ fifo_count = nb * bytes_per_datum;
+ /* Each FIFO data contains all sensors, so same number for FIFO and sensor data */
+ fifo_period = NSEC_PER_SEC / INV_MPU6050_DIVIDER_TO_FIFO_RATE(st->chip_config.divider);
+ inv_sensors_timestamp_interrupt(&st->timestamp, fifo_period, nb, nb, pf->timestamp);
+ inv_sensors_timestamp_apply_odr(&st->timestamp, fifo_period, nb, 0);
+
+ /* clear internal data buffer for avoiding kernel data leak */
+ memset(data, 0, sizeof(data));
+
+ /* read all data once and process every samples */
+ result = regmap_noinc_read(st->map, st->reg->fifo_r_w, st->data, fifo_count);
+ if (result)
+ goto flush_fifo;
for (i = 0; i < nb; ++i) {
- result = regmap_noinc_read(st->map, st->reg->fifo_r_w,
- st->data, bytes_per_datum);
- if (result)
- goto flush_fifo;
/* skip first samples if needed */
if (st->skip_samples) {
st->skip_samples--;
continue;
}
- timestamp = inv_mpu6050_get_timestamp(st);
- iio_push_to_buffers_with_timestamp(indio_dev, st->data, timestamp);
+ memcpy(data, &st->data[i * bytes_per_datum], bytes_per_datum);
+ timestamp = inv_sensors_timestamp_pop(&st->timestamp);
+ iio_push_to_buffers_with_timestamp(indio_dev, data, timestamp);
}
end_session:
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
index 882546897255..676704f9151f 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
@@ -4,6 +4,9 @@
*/
#include <linux/pm_runtime.h>
+
+#include <linux/iio/common/inv_sensors_timestamp.h>
+
#include "inv_mpu_iio.h"
static unsigned int inv_scan_query_mpu6050(struct iio_dev *indio_dev)
@@ -106,7 +109,8 @@ int inv_mpu6050_prepare_fifo(struct inv_mpu6050_state *st, bool enable)
int ret;
if (enable) {
- st->it_timestamp = 0;
+ /* reset timestamping */
+ inv_sensors_timestamp_reset(&st->timestamp);
/* reset FIFO */
d = st->chip_config.user_ctrl | INV_MPU6050_BIT_FIFO_RST;
ret = regmap_write(st->map, st->reg->user_ctrl, d);
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index adcba832e6fa..d752e9c0499b 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* The industrial I/O core
+/*
+ * The industrial I/O core
*
* Copyright (c) 2008 Jonathan Cameron
*
@@ -183,7 +184,9 @@ static const char * const iio_chan_info_postfix[] = {
* @indio_dev: Device structure whose ID is being queried
*
* The IIO device ID is a unique index used for example for the naming
- * of the character device /dev/iio\:device[ID]
+ * of the character device /dev/iio\:device[ID].
+ *
+ * Returns: Unique ID for the device.
*/
int iio_device_id(struct iio_dev *indio_dev)
{
@@ -196,14 +199,16 @@ EXPORT_SYMBOL_GPL(iio_device_id);
/**
* iio_buffer_enabled() - helper function to test if the buffer is enabled
* @indio_dev: IIO device structure for device
+ *
+ * Returns: True, if the buffer is enabled.
*/
bool iio_buffer_enabled(struct iio_dev *indio_dev)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
- return iio_dev_opaque->currentmode
- & (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE |
- INDIO_BUFFER_SOFTWARE);
+ return iio_dev_opaque->currentmode &
+ (INDIO_BUFFER_HARDWARE | INDIO_BUFFER_SOFTWARE |
+ INDIO_BUFFER_TRIGGERED);
}
EXPORT_SYMBOL_GPL(iio_buffer_enabled);
@@ -225,6 +230,9 @@ EXPORT_SYMBOL_GPL(iio_get_debugfs_dentry);
* iio_find_channel_from_si() - get channel from its scan index
* @indio_dev: device
* @si: scan index to match
+ *
+ * Returns:
+ * Constant pointer to iio_chan_spec, if scan index matches, NULL on failure.
*/
const struct iio_chan_spec
*iio_find_channel_from_si(struct iio_dev *indio_dev, int si)
@@ -249,7 +257,9 @@ EXPORT_SYMBOL(iio_read_const_attr);
/**
* iio_device_set_clock() - Set current timestamping clock for the device
* @indio_dev: IIO device structure containing the device
- * @clock_id: timestamping clock posix identifier to set.
+ * @clock_id: timestamping clock POSIX identifier to set.
+ *
+ * Returns: 0 on success, or a negative error code.
*/
int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id)
{
@@ -275,6 +285,8 @@ EXPORT_SYMBOL(iio_device_set_clock);
/**
* iio_device_get_clock() - Retrieve current timestamping clock for the device
* @indio_dev: IIO device structure containing the device
+ *
+ * Returns: Clock ID of the current timestamping clock for the device.
*/
clockid_t iio_device_get_clock(const struct iio_dev *indio_dev)
{
@@ -287,6 +299,8 @@ EXPORT_SYMBOL(iio_device_get_clock);
/**
* iio_get_time_ns() - utility function to get a time stamp for events etc
* @indio_dev: device
+ *
+ * Returns: Timestamp of the event in nanoseconds.
*/
s64 iio_get_time_ns(const struct iio_dev *indio_dev)
{
@@ -372,8 +386,8 @@ static ssize_t iio_debugfs_read_reg(struct file *file, char __user *userbuf,
}
iio_dev_opaque->read_buf_len = snprintf(iio_dev_opaque->read_buf,
- sizeof(iio_dev_opaque->read_buf),
- "0x%X\n", val);
+ sizeof(iio_dev_opaque->read_buf),
+ "0x%X\n", val);
return simple_read_from_buffer(userbuf, count, ppos,
iio_dev_opaque->read_buf,
@@ -389,7 +403,7 @@ static ssize_t iio_debugfs_write_reg(struct file *file,
char buf[80];
int ret;
- count = min_t(size_t, count, (sizeof(buf)-1));
+ count = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, userbuf, count))
return -EFAULT;
@@ -476,8 +490,7 @@ static ssize_t iio_read_channel_ext_info(struct device *dev,
static ssize_t iio_write_channel_ext_info(struct device *dev,
struct device_attribute *attr,
- const char *buf,
- size_t len)
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
@@ -524,7 +537,7 @@ ssize_t iio_enum_read(struct iio_dev *indio_dev,
i = e->get(indio_dev, chan);
if (i < 0)
return i;
- else if (i >= e->num_items || !e->items[i])
+ if (i >= e->num_items || !e->items[i])
return -EINVAL;
return sysfs_emit(buf, "%s\n", e->items[i]);
@@ -569,9 +582,9 @@ static int iio_setup_mount_idmatrix(const struct device *dev,
ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv,
const struct iio_chan_spec *chan, char *buf)
{
- const struct iio_mount_matrix *mtx = ((iio_get_mount_matrix_t *)
- priv)(indio_dev, chan);
+ const struct iio_mount_matrix *mtx;
+ mtx = ((iio_get_mount_matrix_t *)priv)(indio_dev, chan);
if (IS_ERR(mtx))
return PTR_ERR(mtx);
@@ -594,7 +607,7 @@ EXPORT_SYMBOL_GPL(iio_show_mount_matrix);
* If device is assigned no mounting matrix property, a default 3x3 identity
* matrix will be filled in.
*
- * Return: 0 if success, or a negative error code on failure.
+ * Returns: 0 if success, or a negative error code on failure.
*/
int iio_read_mount_matrix(struct device *dev, struct iio_mount_matrix *matrix)
{
@@ -692,9 +705,9 @@ static ssize_t __iio_format_value(char *buf, size_t offset, unsigned int type,
* @vals: Pointer to the values, exact meaning depends on the
* type parameter.
*
- * Return: 0 by default, a negative number on failure or the
- * total number of characters written for a type that belongs
- * to the IIO_VAL_* constant.
+ * Returns:
+ * 0 by default, a negative number on failure or the total number of characters
+ * written for a type that belongs to the IIO_VAL_* constant.
*/
ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals)
{
@@ -847,8 +860,8 @@ static ssize_t iio_read_channel_info_avail(struct device *dev,
* @fract: The fractional part of the number
* @scale_db: True if this should parse as dB
*
- * Returns 0 on success, or a negative error code if the string could not be
- * parsed.
+ * Returns:
+ * 0 on success, or a negative error code if the string could not be parsed.
*/
static int __iio_str_to_fixpoint(const char *str, int fract_mult,
int *integer, int *fract, bool scale_db)
@@ -917,8 +930,8 @@ static int __iio_str_to_fixpoint(const char *str, int fract_mult,
* @integer: The integer part of the number
* @fract: The fractional part of the number
*
- * Returns 0 on success, or a negative error code if the string could not be
- * parsed.
+ * Returns:
+ * 0 on success, or a negative error code if the string could not be parsed.
*/
int iio_str_to_fixpoint(const char *str, int fract_mult,
int *integer, int *fract)
@@ -1009,14 +1022,12 @@ int __iio_device_attr_init(struct device_attribute *dev_attr,
if (chan->modified && (shared_by == IIO_SEPARATE)) {
if (chan->extend_name)
full_postfix = kasprintf(GFP_KERNEL, "%s_%s_%s",
- iio_modifier_names[chan
- ->channel2],
+ iio_modifier_names[chan->channel2],
chan->extend_name,
postfix);
else
full_postfix = kasprintf(GFP_KERNEL, "%s_%s",
- iio_modifier_names[chan
- ->channel2],
+ iio_modifier_names[chan->channel2],
postfix);
} else {
if (chan->extend_name == NULL || shared_by != IIO_SEPARATE)
@@ -1217,7 +1228,7 @@ static int iio_device_add_info_mask_type(struct iio_dev *indio_dev,
&iio_dev_opaque->channel_attr_list);
if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE))
continue;
- else if (ret < 0)
+ if (ret < 0)
return ret;
attrcount++;
}
@@ -1255,7 +1266,7 @@ static int iio_device_add_info_mask_type_avail(struct iio_dev *indio_dev,
kfree(avail_postfix);
if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE))
continue;
- else if (ret < 0)
+ if (ret < 0)
return ret;
attrcount++;
}
@@ -1400,50 +1411,42 @@ static ssize_t label_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(label);
+static const char * const clock_names[] = {
+ [CLOCK_REALTIME] = "realtime",
+ [CLOCK_MONOTONIC] = "monotonic",
+ [CLOCK_PROCESS_CPUTIME_ID] = "process_cputime_id",
+ [CLOCK_THREAD_CPUTIME_ID] = "thread_cputime_id",
+ [CLOCK_MONOTONIC_RAW] = "monotonic_raw",
+ [CLOCK_REALTIME_COARSE] = "realtime_coarse",
+ [CLOCK_MONOTONIC_COARSE] = "monotonic_coarse",
+ [CLOCK_BOOTTIME] = "boottime",
+ [CLOCK_REALTIME_ALARM] = "realtime_alarm",
+ [CLOCK_BOOTTIME_ALARM] = "boottime_alarm",
+ [CLOCK_SGI_CYCLE] = "sgi_cycle",
+ [CLOCK_TAI] = "tai",
+};
+
static ssize_t current_timestamp_clock_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
const struct iio_dev *indio_dev = dev_to_iio_dev(dev);
const clockid_t clk = iio_device_get_clock(indio_dev);
- const char *name;
- ssize_t sz;
switch (clk) {
case CLOCK_REALTIME:
- name = "realtime\n";
- sz = sizeof("realtime\n");
- break;
case CLOCK_MONOTONIC:
- name = "monotonic\n";
- sz = sizeof("monotonic\n");
- break;
case CLOCK_MONOTONIC_RAW:
- name = "monotonic_raw\n";
- sz = sizeof("monotonic_raw\n");
- break;
case CLOCK_REALTIME_COARSE:
- name = "realtime_coarse\n";
- sz = sizeof("realtime_coarse\n");
- break;
case CLOCK_MONOTONIC_COARSE:
- name = "monotonic_coarse\n";
- sz = sizeof("monotonic_coarse\n");
- break;
case CLOCK_BOOTTIME:
- name = "boottime\n";
- sz = sizeof("boottime\n");
- break;
case CLOCK_TAI:
- name = "tai\n";
- sz = sizeof("tai\n");
break;
default:
BUG();
}
- memcpy(buf, name, sz);
- return sz;
+ return sysfs_emit(buf, "%s\n", clock_names[clk]);
}
static ssize_t current_timestamp_clock_store(struct device *dev,
@@ -1453,22 +1456,23 @@ static ssize_t current_timestamp_clock_store(struct device *dev,
clockid_t clk;
int ret;
- if (sysfs_streq(buf, "realtime"))
- clk = CLOCK_REALTIME;
- else if (sysfs_streq(buf, "monotonic"))
- clk = CLOCK_MONOTONIC;
- else if (sysfs_streq(buf, "monotonic_raw"))
- clk = CLOCK_MONOTONIC_RAW;
- else if (sysfs_streq(buf, "realtime_coarse"))
- clk = CLOCK_REALTIME_COARSE;
- else if (sysfs_streq(buf, "monotonic_coarse"))
- clk = CLOCK_MONOTONIC_COARSE;
- else if (sysfs_streq(buf, "boottime"))
- clk = CLOCK_BOOTTIME;
- else if (sysfs_streq(buf, "tai"))
- clk = CLOCK_TAI;
- else
+ ret = sysfs_match_string(clock_names, buf);
+ if (ret < 0)
+ return ret;
+ clk = ret;
+
+ switch (clk) {
+ case CLOCK_REALTIME:
+ case CLOCK_MONOTONIC:
+ case CLOCK_MONOTONIC_RAW:
+ case CLOCK_REALTIME_COARSE:
+ case CLOCK_MONOTONIC_COARSE:
+ case CLOCK_BOOTTIME:
+ case CLOCK_TAI:
+ break;
+ default:
return -EINVAL;
+ }
ret = iio_device_set_clock(dev_to_iio_dev(dev), clk);
if (ret)
@@ -1484,7 +1488,7 @@ int iio_device_register_sysfs_group(struct iio_dev *indio_dev,
const struct attribute_group **new, **old = iio_dev_opaque->groups;
unsigned int cnt = iio_dev_opaque->groupcounter;
- new = krealloc(old, sizeof(*new) * (cnt + 2), GFP_KERNEL);
+ new = krealloc_array(old, cnt + 2, sizeof(*new), GFP_KERNEL);
if (!new)
return -ENOMEM;
@@ -1621,7 +1625,10 @@ const struct device_type iio_device_type = {
* iio_device_alloc() - allocate an iio_dev from a driver
* @parent: Parent device.
* @sizeof_priv: Space to allocate for private structure.
- **/
+ *
+ * Returns:
+ * Pointer to allocated iio_dev on success, NULL on failure.
+ */
struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv)
{
struct iio_dev_opaque *iio_dev_opaque;
@@ -1677,7 +1684,7 @@ EXPORT_SYMBOL(iio_device_alloc);
/**
* iio_device_free() - free an iio_dev from a driver
* @dev: the iio_dev associated with the device
- **/
+ */
void iio_device_free(struct iio_dev *dev)
{
if (dev)
@@ -1698,7 +1705,7 @@ static void devm_iio_device_release(void *iio_dev)
* Managed iio_device_alloc. iio_dev allocated with this function is
* automatically freed on driver detach.
*
- * RETURNS:
+ * Returns:
* Pointer to allocated iio_dev on success, NULL on failure.
*/
struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv)
@@ -1725,8 +1732,8 @@ EXPORT_SYMBOL_GPL(devm_iio_device_alloc);
* @filp: File structure for iio device used to keep and later access
* private data
*
- * Return: 0 on success or -EBUSY if the device is already opened
- **/
+ * Returns: 0 on success or -EBUSY if the device is already opened
+ */
static int iio_chrdev_open(struct inode *inode, struct file *filp)
{
struct iio_dev_opaque *iio_dev_opaque =
@@ -1759,7 +1766,7 @@ static int iio_chrdev_open(struct inode *inode, struct file *filp)
* @inode: Inode structure pointer for the char device
* @filp: File structure pointer for the char device
*
- * Return: 0 for successful release
+ * Returns: 0 for successful release.
*/
static int iio_chrdev_release(struct inode *inode, struct file *filp)
{
@@ -1798,7 +1805,7 @@ static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
mutex_lock(&iio_dev_opaque->info_exist_lock);
- /**
+ /*
* The NULL check here is required to prevent crashing when a device
* is being removed while userspace would still have open file handles
* to try to access this device.
@@ -1976,7 +1983,7 @@ EXPORT_SYMBOL(__iio_device_register);
/**
* iio_device_unregister() - unregister a device from the IIO subsystem
* @indio_dev: Device structure representing the device.
- **/
+ */
void iio_device_unregister(struct iio_dev *indio_dev)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
@@ -2027,7 +2034,7 @@ EXPORT_SYMBOL_GPL(__devm_iio_device_register);
*
* Use with iio_device_release_direct_mode()
*
- * Returns: 0 on success, -EBUSY on failure
+ * Returns: 0 on success, -EBUSY on failure.
*/
int iio_device_claim_direct_mode(struct iio_dev *indio_dev)
{
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index f77ce49d4c36..19f7a91157ee 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -252,6 +252,8 @@ static const char * const iio_ev_info_text[] = {
[IIO_EV_INFO_TIMEOUT] = "timeout",
[IIO_EV_INFO_RESET_TIMEOUT] = "reset_timeout",
[IIO_EV_INFO_TAP2_MIN_DELAY] = "tap2_min_delay",
+ [IIO_EV_INFO_RUNNING_PERIOD] = "runningperiod",
+ [IIO_EV_INFO_RUNNING_COUNT] = "runningcount",
};
static enum iio_event_direction iio_ev_attr_dir(struct iio_dev_attr *attr)
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index f207e36b12cc..18f83158f637 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -313,7 +313,7 @@ int iio_trigger_attach_poll_func(struct iio_trigger *trig,
/* Enable trigger in driver */
if (trig->ops && trig->ops->set_trigger_state && notinuse) {
ret = trig->ops->set_trigger_state(trig, true);
- if (ret < 0)
+ if (ret)
goto out_free_irq;
}
diff --git a/drivers/iio/light/cm3605.c b/drivers/iio/light/cm3605.c
index 0b30db77f78b..e7f0b81b7f5a 100644
--- a/drivers/iio/light/cm3605.c
+++ b/drivers/iio/light/cm3605.c
@@ -227,7 +227,7 @@ static int cm3605_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- ret = dev_err_probe(dev, irq, "failed to get irq\n");
+ ret = irq;
goto out_disable_aset;
}
diff --git a/drivers/iio/light/rohm-bu27008.c b/drivers/iio/light/rohm-bu27008.c
index b50bf8973d9a..6a6d77805091 100644
--- a/drivers/iio/light/rohm-bu27008.c
+++ b/drivers/iio/light/rohm-bu27008.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * BU27008 ROHM Colour Sensor
+ * ROHM Colour Sensor driver for
+ * - BU27008 RGBC sensor
+ * - BU27010 RGBC + Flickering sensor
*
* Copyright (c) 2023, ROHM Semiconductor.
*/
@@ -22,6 +24,25 @@
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
+/*
+ * A word about register address and mask definitions.
+ *
+ * At a quick glance to the data-sheet register tables, the BU27010 has all the
+ * registers that the BU27008 has. On top of that the BU27010 adds couple of new
+ * ones.
+ *
+ * So, all definitions BU27008_REG_* are there also for BU27010 but none of the
+ * BU27010_REG_* are present on BU27008. This makes sense as BU27010 just adds
+ * some features (Flicker FIFO, more power control) on top of the BU27008.
+ *
+ * Unfortunately, some of the wheel has been re-invented. Even though the names
+ * of the registers have stayed the same, pretty much all of the functionality
+ * provided by the registers has changed place. Contents of all MODE_CONTROL
+ * registers on BU27008 and BU27010 are different.
+ *
+ * Chip-specific mapping from register addresses/bits to functionality is done
+ * in bu27_chip_data structures.
+ */
#define BU27008_REG_SYSTEM_CONTROL 0x40
#define BU27008_MASK_SW_RESET BIT(7)
#define BU27008_MASK_PART_ID GENMASK(5, 0)
@@ -52,6 +73,56 @@
#define BU27008_REG_MANUFACTURER_ID 0x92
#define BU27008_REG_MAX BU27008_REG_MANUFACTURER_ID
+/* BU27010 specific definitions */
+
+#define BU27010_MASK_SW_RESET BIT(7)
+#define BU27010_ID 0x1b
+#define BU27010_REG_POWER 0x3e
+#define BU27010_MASK_POWER BIT(0)
+
+#define BU27010_REG_RESET 0x3f
+#define BU27010_MASK_RESET BIT(0)
+#define BU27010_RESET_RELEASE BU27010_MASK_RESET
+
+#define BU27010_MASK_MEAS_EN BIT(1)
+
+#define BU27010_MASK_CHAN_SEL GENMASK(7, 6)
+#define BU27010_MASK_MEAS_MODE GENMASK(5, 4)
+#define BU27010_MASK_RGBC_GAIN GENMASK(3, 0)
+
+#define BU27010_MASK_DATA3_GAIN GENMASK(7, 6)
+#define BU27010_MASK_DATA2_GAIN GENMASK(5, 4)
+#define BU27010_MASK_DATA1_GAIN GENMASK(3, 2)
+#define BU27010_MASK_DATA0_GAIN GENMASK(1, 0)
+
+#define BU27010_MASK_FLC_MODE BIT(7)
+#define BU27010_MASK_FLC_GAIN GENMASK(4, 0)
+
+#define BU27010_REG_MODE_CONTROL4 0x44
+/* If flicker is ever to be supported the IRQ must be handled as a field */
+#define BU27010_IRQ_DIS_ALL GENMASK(1, 0)
+#define BU27010_DRDY_EN BIT(0)
+#define BU27010_MASK_INT_SEL GENMASK(1, 0)
+
+#define BU27010_REG_MODE_CONTROL5 0x45
+#define BU27010_MASK_RGB_VALID BIT(7)
+#define BU27010_MASK_FLC_VALID BIT(6)
+#define BU27010_MASK_WAIT_EN BIT(3)
+#define BU27010_MASK_FIFO_EN BIT(2)
+#define BU27010_MASK_RGB_EN BIT(1)
+#define BU27010_MASK_FLC_EN BIT(0)
+
+#define BU27010_REG_DATA_FLICKER_LO 0x56
+#define BU27010_MASK_DATA_FLICKER_HI GENMASK(2, 0)
+#define BU27010_REG_FLICKER_COUNT 0x5a
+#define BU27010_REG_FIFO_LEVEL_LO 0x5b
+#define BU27010_MASK_FIFO_LEVEL_HI BIT(0)
+#define BU27010_REG_FIFO_DATA_LO 0x5d
+#define BU27010_REG_FIFO_DATA_HI 0x5e
+#define BU27010_MASK_FIFO_DATA_HI GENMASK(2, 0)
+#define BU27010_REG_MANUFACTURER_ID 0x92
+#define BU27010_REG_MAX BU27010_REG_MANUFACTURER_ID
+
/**
* enum bu27008_chan_type - BU27008 channel types
* @BU27008_RED: Red channel. Always via data0.
@@ -117,6 +188,17 @@ static const unsigned long bu27008_scan_masks[] = {
*/
#define BU27008_SCALE_1X 16
+/*
+ * On BU27010 available scales with gain 1x - 4096x,
+ * timings 55, 100, 200, 400 mS. Time impacts to gain: 1x, 2x, 4x, 8x.
+ *
+ * => Max total gain is HWGAIN * gain by integration time (8 * 4096)
+ *
+ * Using NANO precision for scale we must use scale 64x corresponding gain 1x
+ * to avoid precision loss.
+ */
+#define BU27010_SCALE_1X 64
+
/* See the data sheet for the "Gain Setting" table */
#define BU27008_GSEL_1X 0x00
#define BU27008_GSEL_4X 0x08
@@ -152,10 +234,44 @@ static const struct iio_gain_sel_pair bu27008_gains_ir[] = {
GAIN_SCALE_GAIN(1024, BU27008_GSEL_1024X),
};
+#define BU27010_GSEL_1X 0x00 /* 000000 */
+#define BU27010_GSEL_4X 0x08 /* 001000 */
+#define BU27010_GSEL_16X 0x09 /* 001001 */
+#define BU27010_GSEL_64X 0x0e /* 001110 */
+#define BU27010_GSEL_256X 0x1e /* 011110 */
+#define BU27010_GSEL_1024X 0x2e /* 101110 */
+#define BU27010_GSEL_4096X 0x3f /* 111111 */
+
+static const struct iio_gain_sel_pair bu27010_gains[] = {
+ GAIN_SCALE_GAIN(1, BU27010_GSEL_1X),
+ GAIN_SCALE_GAIN(4, BU27010_GSEL_4X),
+ GAIN_SCALE_GAIN(16, BU27010_GSEL_16X),
+ GAIN_SCALE_GAIN(64, BU27010_GSEL_64X),
+ GAIN_SCALE_GAIN(256, BU27010_GSEL_256X),
+ GAIN_SCALE_GAIN(1024, BU27010_GSEL_1024X),
+ GAIN_SCALE_GAIN(4096, BU27010_GSEL_4096X),
+};
+
+static const struct iio_gain_sel_pair bu27010_gains_ir[] = {
+ GAIN_SCALE_GAIN(2, BU27010_GSEL_1X),
+ GAIN_SCALE_GAIN(4, BU27010_GSEL_4X),
+ GAIN_SCALE_GAIN(16, BU27010_GSEL_16X),
+ GAIN_SCALE_GAIN(64, BU27010_GSEL_64X),
+ GAIN_SCALE_GAIN(256, BU27010_GSEL_256X),
+ GAIN_SCALE_GAIN(1024, BU27010_GSEL_1024X),
+ GAIN_SCALE_GAIN(4096, BU27010_GSEL_4096X),
+};
+
#define BU27008_MEAS_MODE_100MS 0x00
#define BU27008_MEAS_MODE_55MS 0x01
#define BU27008_MEAS_MODE_200MS 0x02
#define BU27008_MEAS_MODE_400MS 0x04
+
+#define BU27010_MEAS_MODE_100MS 0x00
+#define BU27010_MEAS_MODE_55MS 0x03
+#define BU27010_MEAS_MODE_200MS 0x01
+#define BU27010_MEAS_MODE_400MS 0x02
+
#define BU27008_MEAS_TIME_MAX_MS 400
static const struct iio_itime_sel_mul bu27008_itimes[] = {
@@ -165,6 +281,13 @@ static const struct iio_itime_sel_mul bu27008_itimes[] = {
GAIN_SCALE_ITIME_US(55000, BU27008_MEAS_MODE_55MS, 1),
};
+static const struct iio_itime_sel_mul bu27010_itimes[] = {
+ GAIN_SCALE_ITIME_US(400000, BU27010_MEAS_MODE_400MS, 8),
+ GAIN_SCALE_ITIME_US(200000, BU27010_MEAS_MODE_200MS, 4),
+ GAIN_SCALE_ITIME_US(100000, BU27010_MEAS_MODE_100MS, 2),
+ GAIN_SCALE_ITIME_US(55000, BU27010_MEAS_MODE_55MS, 1),
+};
+
/*
* All the RGBC channels share the same gain.
* IR gain can be fine-tuned from the gain set for the RGBC by 2 bit, but this
@@ -211,7 +334,35 @@ static const struct iio_chan_spec bu27008_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(BU27008_NUM_CHANS),
};
+struct bu27008_data;
+
+struct bu27_chip_data {
+ const char *name;
+ int (*chip_init)(struct bu27008_data *data);
+ int (*get_gain_sel)(struct bu27008_data *data, int *sel);
+ int (*write_gain_sel)(struct bu27008_data *data, int sel);
+ const struct regmap_config *regmap_cfg;
+ const struct iio_gain_sel_pair *gains;
+ const struct iio_gain_sel_pair *gains_ir;
+ const struct iio_itime_sel_mul *itimes;
+ int num_gains;
+ int num_gains_ir;
+ int num_itimes;
+ int scale1x;
+
+ int drdy_en_reg;
+ int drdy_en_mask;
+ int meas_en_reg;
+ int meas_en_mask;
+ int valid_reg;
+ int chan_sel_reg;
+ int chan_sel_mask;
+ int int_time_mask;
+ u8 part_id;
+};
+
struct bu27008_data {
+ const struct bu27_chip_data *cd;
struct regmap *regmap;
struct iio_trigger *trig;
struct device *dev;
@@ -240,11 +391,29 @@ static const struct regmap_range bu27008_volatile_ranges[] = {
},
};
+static const struct regmap_range bu27010_volatile_ranges[] = {
+ {
+ .range_min = BU27010_REG_RESET, /* RSTB */
+ .range_max = BU27008_REG_SYSTEM_CONTROL, /* RESET */
+ }, {
+ .range_min = BU27010_REG_MODE_CONTROL5, /* VALID bits */
+ .range_max = BU27010_REG_MODE_CONTROL5,
+ }, {
+ .range_min = BU27008_REG_DATA0_LO,
+ .range_max = BU27010_REG_FIFO_DATA_HI,
+ },
+};
+
static const struct regmap_access_table bu27008_volatile_regs = {
.yes_ranges = &bu27008_volatile_ranges[0],
.n_yes_ranges = ARRAY_SIZE(bu27008_volatile_ranges),
};
+static const struct regmap_access_table bu27010_volatile_regs = {
+ .yes_ranges = &bu27010_volatile_ranges[0],
+ .n_yes_ranges = ARRAY_SIZE(bu27010_volatile_ranges),
+};
+
static const struct regmap_range bu27008_read_only_ranges[] = {
{
.range_min = BU27008_REG_DATA0_LO,
@@ -255,11 +424,26 @@ static const struct regmap_range bu27008_read_only_ranges[] = {
},
};
+static const struct regmap_range bu27010_read_only_ranges[] = {
+ {
+ .range_min = BU27008_REG_DATA0_LO,
+ .range_max = BU27010_REG_FIFO_DATA_HI,
+ }, {
+ .range_min = BU27010_REG_MANUFACTURER_ID,
+ .range_max = BU27010_REG_MANUFACTURER_ID,
+ }
+};
+
static const struct regmap_access_table bu27008_ro_regs = {
.no_ranges = &bu27008_read_only_ranges[0],
.n_no_ranges = ARRAY_SIZE(bu27008_read_only_ranges),
};
+static const struct regmap_access_table bu27010_ro_regs = {
+ .no_ranges = &bu27010_read_only_ranges[0],
+ .n_no_ranges = ARRAY_SIZE(bu27010_read_only_ranges),
+};
+
static const struct regmap_config bu27008_regmap = {
.reg_bits = 8,
.val_bits = 8,
@@ -282,50 +466,16 @@ static const struct regmap_config bu27008_regmap = {
.disable_locking = true,
};
-#define BU27008_MAX_VALID_RESULT_WAIT_US 50000
-#define BU27008_VALID_RESULT_WAIT_QUANTA_US 1000
-
-static int bu27008_chan_read_data(struct bu27008_data *data, int reg, int *val)
-{
- int ret, valid;
- __le16 tmp;
-
- ret = regmap_read_poll_timeout(data->regmap, BU27008_REG_MODE_CONTROL3,
- valid, (valid & BU27008_MASK_VALID),
- BU27008_VALID_RESULT_WAIT_QUANTA_US,
- BU27008_MAX_VALID_RESULT_WAIT_US);
- if (ret)
- return ret;
-
- ret = regmap_bulk_read(data->regmap, reg, &tmp, sizeof(tmp));
- if (ret)
- dev_err(data->dev, "Reading channel data failed\n");
-
- *val = le16_to_cpu(tmp);
-
- return ret;
-}
-
-static int bu27008_get_gain(struct bu27008_data *data, struct iio_gts *gts, int *gain)
-{
- int ret, sel;
-
- ret = regmap_read(data->regmap, BU27008_REG_MODE_CONTROL2, &sel);
- if (ret)
- return ret;
-
- sel = FIELD_GET(BU27008_MASK_RGBC_GAIN, sel);
+static const struct regmap_config bu27010_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
- ret = iio_gts_find_gain_by_sel(gts, sel);
- if (ret < 0) {
- dev_err(data->dev, "unknown gain value 0x%x\n", sel);
- return ret;
- }
-
- *gain = ret;
-
- return 0;
-}
+ .max_register = BU27010_REG_MAX,
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_table = &bu27010_volatile_regs,
+ .wr_table = &bu27010_ro_regs,
+ .disable_locking = true,
+};
static int bu27008_write_gain_sel(struct bu27008_data *data, int sel)
{
@@ -368,6 +518,264 @@ static int bu27008_write_gain_sel(struct bu27008_data *data, int sel)
BU27008_MASK_RGBC_GAIN, regval);
}
+static int bu27010_write_gain_sel(struct bu27008_data *data, int sel)
+{
+ unsigned int regval;
+ int ret, chan_selector;
+
+ /*
+ * Gain 'selector' is composed of two registers. Selector is 6bit value,
+ * 4 high bits being the RGBC gain fieild in MODE_CONTROL1 register and
+ * two low bits being the channel specific gain in MODE_CONTROL2.
+ *
+ * Let's take the 4 high bits of whole 6 bit selector, and prepare
+ * the MODE_CONTROL1 value (RGBC gain part).
+ */
+ regval = FIELD_PREP(BU27010_MASK_RGBC_GAIN, (sel >> 2));
+
+ ret = regmap_update_bits(data->regmap, BU27008_REG_MODE_CONTROL1,
+ BU27010_MASK_RGBC_GAIN, regval);
+ if (ret)
+ return ret;
+
+ /*
+ * Two low two bits of the selector must be written for all 4
+ * channels in the MODE_CONTROL2 register. Copy these two bits for
+ * all channels.
+ */
+ chan_selector = sel & GENMASK(1, 0);
+
+ regval = FIELD_PREP(BU27010_MASK_DATA0_GAIN, chan_selector);
+ regval |= FIELD_PREP(BU27010_MASK_DATA1_GAIN, chan_selector);
+ regval |= FIELD_PREP(BU27010_MASK_DATA2_GAIN, chan_selector);
+ regval |= FIELD_PREP(BU27010_MASK_DATA3_GAIN, chan_selector);
+
+ return regmap_write(data->regmap, BU27008_REG_MODE_CONTROL2, regval);
+}
+
+static int bu27008_get_gain_sel(struct bu27008_data *data, int *sel)
+{
+ int ret;
+
+ /*
+ * If we always "lock" the gain selectors for all channels to prevent
+ * unsupported configs, then it does not matter which channel is used
+ * we can just return selector from any of them.
+ *
+ * This, however is not true if we decide to support only 4X and 16X
+ * and then individual gains for channels. Currently this is not the
+ * case.
+ *
+ * If we some day decide to support individual gains, then we need to
+ * have channel information here.
+ */
+
+ ret = regmap_read(data->regmap, BU27008_REG_MODE_CONTROL2, sel);
+ if (ret)
+ return ret;
+
+ *sel = FIELD_GET(BU27008_MASK_RGBC_GAIN, *sel);
+
+ return 0;
+}
+
+static int bu27010_get_gain_sel(struct bu27008_data *data, int *sel)
+{
+ int ret, tmp;
+
+ /*
+ * We always "lock" the gain selectors for all channels to prevent
+ * unsupported configs. It does not matter which channel is used
+ * we can just return selector from any of them.
+ *
+ * Read the channel0 gain.
+ */
+ ret = regmap_read(data->regmap, BU27008_REG_MODE_CONTROL2, sel);
+ if (ret)
+ return ret;
+
+ *sel = FIELD_GET(BU27010_MASK_DATA0_GAIN, *sel);
+
+ /* Read the shared gain */
+ ret = regmap_read(data->regmap, BU27008_REG_MODE_CONTROL1, &tmp);
+ if (ret)
+ return ret;
+
+ /*
+ * The gain selector is made as a combination of common RGBC gain and
+ * the channel specific gain. The channel specific gain forms the low
+ * bits of selector and RGBC gain is appended right after it.
+ *
+ * Compose the selector from channel0 gain and shared RGBC gain.
+ */
+ *sel |= FIELD_GET(BU27010_MASK_RGBC_GAIN, tmp) << fls(BU27010_MASK_DATA0_GAIN);
+
+ return ret;
+}
+
+static int bu27008_chip_init(struct bu27008_data *data)
+{
+ int ret;
+
+ ret = regmap_write_bits(data->regmap, BU27008_REG_SYSTEM_CONTROL,
+ BU27008_MASK_SW_RESET, BU27008_MASK_SW_RESET);
+ if (ret)
+ return dev_err_probe(data->dev, ret, "Sensor reset failed\n");
+
+ /*
+ * The data-sheet does not tell how long performing the IC reset takes.
+ * However, the data-sheet says the minimum time it takes the IC to be
+ * able to take inputs after power is applied, is 100 uS. I'd assume
+ * > 1 mS is enough.
+ */
+ msleep(1);
+
+ ret = regmap_reinit_cache(data->regmap, data->cd->regmap_cfg);
+ if (ret)
+ dev_err(data->dev, "Failed to reinit reg cache\n");
+
+ return ret;
+}
+
+static int bu27010_chip_init(struct bu27008_data *data)
+{
+ int ret;
+
+ ret = regmap_write_bits(data->regmap, BU27008_REG_SYSTEM_CONTROL,
+ BU27010_MASK_SW_RESET, BU27010_MASK_SW_RESET);
+ if (ret)
+ return dev_err_probe(data->dev, ret, "Sensor reset failed\n");
+
+ msleep(1);
+
+ /* Power ON*/
+ ret = regmap_write_bits(data->regmap, BU27010_REG_POWER,
+ BU27010_MASK_POWER, BU27010_MASK_POWER);
+ if (ret)
+ return dev_err_probe(data->dev, ret, "Sensor power-on failed\n");
+
+ msleep(1);
+
+ /* Release blocks from reset */
+ ret = regmap_write_bits(data->regmap, BU27010_REG_RESET,
+ BU27010_MASK_RESET, BU27010_RESET_RELEASE);
+ if (ret)
+ return dev_err_probe(data->dev, ret, "Sensor powering failed\n");
+
+ msleep(1);
+
+ /*
+ * The IRQ enabling on BU27010 is done in a peculiar way. The IRQ
+ * enabling is not a bit mask where individual IRQs could be enabled but
+ * a field which values are:
+ * 00 => IRQs disabled
+ * 01 => Data-ready (RGBC/IR)
+ * 10 => Data-ready (flicker)
+ * 11 => Flicker FIFO
+ *
+ * So, only one IRQ can be enabled at a time and enabling for example
+ * flicker FIFO would automagically disable data-ready IRQ.
+ *
+ * Currently the driver does not support the flicker. Hence, we can
+ * just treat the RGBC data-ready as single bit which can be enabled /
+ * disabled. This works for as long as the second bit in the field
+ * stays zero. Here we ensure it gets zeroed.
+ */
+ return regmap_clear_bits(data->regmap, BU27010_REG_MODE_CONTROL4,
+ BU27010_IRQ_DIS_ALL);
+}
+
+static const struct bu27_chip_data bu27010_chip = {
+ .name = "bu27010",
+ .chip_init = bu27010_chip_init,
+ .get_gain_sel = bu27010_get_gain_sel,
+ .write_gain_sel = bu27010_write_gain_sel,
+ .regmap_cfg = &bu27010_regmap,
+ .gains = &bu27010_gains[0],
+ .gains_ir = &bu27010_gains_ir[0],
+ .itimes = &bu27010_itimes[0],
+ .num_gains = ARRAY_SIZE(bu27010_gains),
+ .num_gains_ir = ARRAY_SIZE(bu27010_gains_ir),
+ .num_itimes = ARRAY_SIZE(bu27010_itimes),
+ .scale1x = BU27010_SCALE_1X,
+ .drdy_en_reg = BU27010_REG_MODE_CONTROL4,
+ .drdy_en_mask = BU27010_DRDY_EN,
+ .meas_en_reg = BU27010_REG_MODE_CONTROL5,
+ .meas_en_mask = BU27010_MASK_MEAS_EN,
+ .valid_reg = BU27010_REG_MODE_CONTROL5,
+ .chan_sel_reg = BU27008_REG_MODE_CONTROL1,
+ .chan_sel_mask = BU27010_MASK_CHAN_SEL,
+ .int_time_mask = BU27010_MASK_MEAS_MODE,
+ .part_id = BU27010_ID,
+};
+
+static const struct bu27_chip_data bu27008_chip = {
+ .name = "bu27008",
+ .chip_init = bu27008_chip_init,
+ .get_gain_sel = bu27008_get_gain_sel,
+ .write_gain_sel = bu27008_write_gain_sel,
+ .regmap_cfg = &bu27008_regmap,
+ .gains = &bu27008_gains[0],
+ .gains_ir = &bu27008_gains_ir[0],
+ .itimes = &bu27008_itimes[0],
+ .num_gains = ARRAY_SIZE(bu27008_gains),
+ .num_gains_ir = ARRAY_SIZE(bu27008_gains_ir),
+ .num_itimes = ARRAY_SIZE(bu27008_itimes),
+ .scale1x = BU27008_SCALE_1X,
+ .drdy_en_reg = BU27008_REG_MODE_CONTROL3,
+ .drdy_en_mask = BU27008_MASK_INT_EN,
+ .valid_reg = BU27008_REG_MODE_CONTROL3,
+ .meas_en_reg = BU27008_REG_MODE_CONTROL3,
+ .meas_en_mask = BU27008_MASK_MEAS_EN,
+ .chan_sel_reg = BU27008_REG_MODE_CONTROL3,
+ .chan_sel_mask = BU27008_MASK_CHAN_SEL,
+ .int_time_mask = BU27008_MASK_MEAS_MODE,
+ .part_id = BU27008_ID,
+};
+
+#define BU27008_MAX_VALID_RESULT_WAIT_US 50000
+#define BU27008_VALID_RESULT_WAIT_QUANTA_US 1000
+
+static int bu27008_chan_read_data(struct bu27008_data *data, int reg, int *val)
+{
+ int ret, valid;
+ __le16 tmp;
+
+ ret = regmap_read_poll_timeout(data->regmap, data->cd->valid_reg,
+ valid, (valid & BU27008_MASK_VALID),
+ BU27008_VALID_RESULT_WAIT_QUANTA_US,
+ BU27008_MAX_VALID_RESULT_WAIT_US);
+ if (ret)
+ return ret;
+
+ ret = regmap_bulk_read(data->regmap, reg, &tmp, sizeof(tmp));
+ if (ret)
+ dev_err(data->dev, "Reading channel data failed\n");
+
+ *val = le16_to_cpu(tmp);
+
+ return ret;
+}
+
+static int bu27008_get_gain(struct bu27008_data *data, struct iio_gts *gts, int *gain)
+{
+ int ret, sel;
+
+ ret = data->cd->get_gain_sel(data, &sel);
+ if (ret)
+ return ret;
+
+ ret = iio_gts_find_gain_by_sel(gts, sel);
+ if (ret < 0) {
+ dev_err(data->dev, "unknown gain value 0x%x\n", sel);
+ return ret;
+ }
+
+ *gain = ret;
+
+ return 0;
+}
+
static int bu27008_set_gain(struct bu27008_data *data, int gain)
{
int ret;
@@ -376,7 +784,7 @@ static int bu27008_set_gain(struct bu27008_data *data, int gain)
if (ret < 0)
return ret;
- return bu27008_write_gain_sel(data, ret);
+ return data->cd->write_gain_sel(data, ret);
}
static int bu27008_get_int_time_sel(struct bu27008_data *data, int *sel)
@@ -384,15 +792,23 @@ static int bu27008_get_int_time_sel(struct bu27008_data *data, int *sel)
int ret, val;
ret = regmap_read(data->regmap, BU27008_REG_MODE_CONTROL1, &val);
- *sel = FIELD_GET(BU27008_MASK_MEAS_MODE, val);
+ if (ret)
+ return ret;
- return ret;
+ val &= data->cd->int_time_mask;
+ val >>= ffs(data->cd->int_time_mask) - 1;
+
+ *sel = val;
+
+ return 0;
}
static int bu27008_set_int_time_sel(struct bu27008_data *data, int sel)
{
+ sel <<= ffs(data->cd->int_time_mask) - 1;
+
return regmap_update_bits(data->regmap, BU27008_REG_MODE_CONTROL1,
- BU27008_MASK_MEAS_MODE, sel);
+ data->cd->int_time_mask, sel);
}
static int bu27008_get_int_time_us(struct bu27008_data *data)
@@ -448,8 +864,7 @@ static int bu27008_set_int_time(struct bu27008_data *data, int time)
if (ret < 0)
return ret;
- return regmap_update_bits(data->regmap, BU27008_REG_MODE_CONTROL1,
- BU27008_MASK_MEAS_MODE, ret);
+ return bu27008_set_int_time_sel(data, ret);
}
/* Try to change the time so that the scale is maintained */
@@ -527,10 +942,13 @@ unlock_out:
return ret;
}
-static int bu27008_meas_set(struct bu27008_data *data, int state)
+static int bu27008_meas_set(struct bu27008_data *data, bool enable)
{
- return regmap_update_bits(data->regmap, BU27008_REG_MODE_CONTROL3,
- BU27008_MASK_MEAS_EN, state);
+ if (enable)
+ return regmap_set_bits(data->regmap, data->cd->meas_en_reg,
+ data->cd->meas_en_mask);
+ return regmap_clear_bits(data->regmap, data->cd->meas_en_reg,
+ data->cd->meas_en_mask);
}
static int bu27008_chan_cfg(struct bu27008_data *data,
@@ -543,9 +961,15 @@ static int bu27008_chan_cfg(struct bu27008_data *data,
else
chan_sel = BU27008_CLEAR2_IR3;
- chan_sel = FIELD_PREP(BU27008_MASK_CHAN_SEL, chan_sel);
+ /*
+ * prepare bitfield for channel sel. The FIELD_PREP works only when
+ * mask is constant. In our case the mask is assigned based on the
+ * chip type. Hence the open-coded FIELD_PREP here. We don't bother
+ * zeroing the irrelevant bits though - update_bits takes care of that.
+ */
+ chan_sel <<= ffs(data->cd->chan_sel_mask) - 1;
- return regmap_update_bits(data->regmap, BU27008_REG_MODE_CONTROL3,
+ return regmap_update_bits(data->regmap, data->cd->chan_sel_reg,
BU27008_MASK_CHAN_SEL, chan_sel);
}
@@ -558,7 +982,7 @@ static int bu27008_read_one(struct bu27008_data *data, struct iio_dev *idev,
if (ret)
return ret;
- ret = bu27008_meas_set(data, BU27008_MEAS_EN);
+ ret = bu27008_meas_set(data, true);
if (ret)
return ret;
@@ -574,7 +998,7 @@ static int bu27008_read_one(struct bu27008_data *data, struct iio_dev *idev,
if (!ret)
ret = IIO_VAL_INT;
- if (bu27008_meas_set(data, BU27008_MEAS_DIS))
+ if (bu27008_meas_set(data, false))
dev_warn(data->dev, "measurement disabling failed\n");
return ret;
@@ -669,7 +1093,7 @@ static int bu27008_set_scale(struct bu27008_data *data,
goto unlock_out;
}
- ret = bu27008_write_gain_sel(data, gain_sel);
+ ret = data->cd->write_gain_sel(data, gain_sel);
unlock_out:
mutex_unlock(&data->mutex);
@@ -762,10 +1186,10 @@ static int bu27008_update_scan_mode(struct iio_dev *idev,
chan_sel = BU27008_CLEAR2_IR3;
}
- chan_sel = FIELD_PREP(BU27008_MASK_CHAN_SEL, chan_sel);
+ chan_sel <<= ffs(data->cd->chan_sel_mask) - 1;
- return regmap_update_bits(data->regmap, BU27008_REG_MODE_CONTROL3,
- BU27008_MASK_CHAN_SEL, chan_sel);
+ return regmap_update_bits(data->regmap, data->cd->chan_sel_reg,
+ data->cd->chan_sel_mask, chan_sel);
}
static const struct iio_info bu27008_info = {
@@ -777,46 +1201,18 @@ static const struct iio_info bu27008_info = {
.validate_trigger = iio_validate_own_trigger,
};
-static int bu27008_chip_init(struct bu27008_data *data)
-{
- int ret;
-
- ret = regmap_write_bits(data->regmap, BU27008_REG_SYSTEM_CONTROL,
- BU27008_MASK_SW_RESET, BU27008_MASK_SW_RESET);
- if (ret)
- return dev_err_probe(data->dev, ret, "Sensor reset failed\n");
-
- /*
- * The data-sheet does not tell how long performing the IC reset takes.
- * However, the data-sheet says the minimum time it takes the IC to be
- * able to take inputs after power is applied, is 100 uS. I'd assume
- * > 1 mS is enough.
- */
- msleep(1);
-
- ret = regmap_reinit_cache(data->regmap, &bu27008_regmap);
- if (ret)
- dev_err(data->dev, "Failed to reinit reg cache\n");
-
- return ret;
-}
-
-static int bu27008_set_drdy_irq(struct bu27008_data *data, int state)
-{
- return regmap_update_bits(data->regmap, BU27008_REG_MODE_CONTROL3,
- BU27008_MASK_INT_EN, state);
-}
-
-static int bu27008_trigger_set_state(struct iio_trigger *trig,
- bool state)
+static int bu27008_trigger_set_state(struct iio_trigger *trig, bool state)
{
struct bu27008_data *data = iio_trigger_get_drvdata(trig);
int ret;
+
if (state)
- ret = bu27008_set_drdy_irq(data, BU27008_INT_EN);
+ ret = regmap_set_bits(data->regmap, data->cd->drdy_en_reg,
+ data->cd->drdy_en_mask);
else
- ret = bu27008_set_drdy_irq(data, BU27008_INT_DIS);
+ ret = regmap_clear_bits(data->regmap, data->cd->drdy_en_reg,
+ data->cd->drdy_en_mask);
if (ret)
dev_err(data->dev, "Failed to set trigger state\n");
@@ -852,7 +1248,7 @@ static irqreturn_t bu27008_trigger_handler(int irq, void *p)
* After some measurements, it seems reading the
* BU27008_REG_MODE_CONTROL3 debounces the IRQ line
*/
- ret = regmap_read(data->regmap, BU27008_REG_MODE_CONTROL3, &dummy);
+ ret = regmap_read(data->regmap, data->cd->valid_reg, &dummy);
if (ret < 0)
goto err_read;
@@ -872,14 +1268,14 @@ static int bu27008_buffer_preenable(struct iio_dev *idev)
{
struct bu27008_data *data = iio_priv(idev);
- return bu27008_meas_set(data, BU27008_MEAS_EN);
+ return bu27008_meas_set(data, true);
}
static int bu27008_buffer_postdisable(struct iio_dev *idev)
{
struct bu27008_data *data = iio_priv(idev);
- return bu27008_meas_set(data, BU27008_MEAS_DIS);
+ return bu27008_meas_set(data, false);
}
static const struct iio_buffer_setup_ops bu27008_buffer_ops = {
@@ -952,11 +1348,6 @@ static int bu27008_probe(struct i2c_client *i2c)
struct iio_dev *idev;
int ret;
- regmap = devm_regmap_init_i2c(i2c, &bu27008_regmap);
- if (IS_ERR(regmap))
- return dev_err_probe(dev, PTR_ERR(regmap),
- "Failed to initialize Regmap\n");
-
idev = devm_iio_device_alloc(dev, sizeof(*data));
if (!idev)
return -ENOMEM;
@@ -967,24 +1358,34 @@ static int bu27008_probe(struct i2c_client *i2c)
data = iio_priv(idev);
+ data->cd = device_get_match_data(&i2c->dev);
+ if (!data->cd)
+ return -ENODEV;
+
+ regmap = devm_regmap_init_i2c(i2c, data->cd->regmap_cfg);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap),
+ "Failed to initialize Regmap\n");
+
+
ret = regmap_read(regmap, BU27008_REG_SYSTEM_CONTROL, &reg);
if (ret)
return dev_err_probe(dev, ret, "Failed to access sensor\n");
part_id = FIELD_GET(BU27008_MASK_PART_ID, reg);
- if (part_id != BU27008_ID)
+ if (part_id != data->cd->part_id)
dev_warn(dev, "unknown device 0x%x\n", part_id);
- ret = devm_iio_init_iio_gts(dev, BU27008_SCALE_1X, 0, bu27008_gains,
- ARRAY_SIZE(bu27008_gains), bu27008_itimes,
- ARRAY_SIZE(bu27008_itimes), &data->gts);
+ ret = devm_iio_init_iio_gts(dev, data->cd->scale1x, 0, data->cd->gains,
+ data->cd->num_gains, data->cd->itimes,
+ data->cd->num_itimes, &data->gts);
if (ret)
return ret;
- ret = devm_iio_init_iio_gts(dev, BU27008_SCALE_1X, 0, bu27008_gains_ir,
- ARRAY_SIZE(bu27008_gains_ir), bu27008_itimes,
- ARRAY_SIZE(bu27008_itimes), &data->gts_ir);
+ ret = devm_iio_init_iio_gts(dev, data->cd->scale1x, 0, data->cd->gains_ir,
+ data->cd->num_gains_ir, data->cd->itimes,
+ data->cd->num_itimes, &data->gts_ir);
if (ret)
return ret;
@@ -995,12 +1396,12 @@ static int bu27008_probe(struct i2c_client *i2c)
idev->channels = bu27008_channels;
idev->num_channels = ARRAY_SIZE(bu27008_channels);
- idev->name = "bu27008";
+ idev->name = data->cd->name;
idev->info = &bu27008_info;
idev->modes = INDIO_DIRECT_MODE;
idev->available_scan_masks = bu27008_scan_masks;
- ret = bu27008_chip_init(data);
+ ret = data->cd->chip_init(data);
if (ret)
return ret;
@@ -1021,7 +1422,8 @@ static int bu27008_probe(struct i2c_client *i2c)
}
static const struct of_device_id bu27008_of_match[] = {
- { .compatible = "rohm,bu27008" },
+ { .compatible = "rohm,bu27008", .data = &bu27008_chip },
+ { .compatible = "rohm,bu27010", .data = &bu27010_chip },
{ }
};
MODULE_DEVICE_TABLE(of, bu27008_of_match);
@@ -1036,7 +1438,7 @@ static struct i2c_driver bu27008_i2c_driver = {
};
module_i2c_driver(bu27008_i2c_driver);
-MODULE_DESCRIPTION("ROHM BU27008 colour sensor driver");
+MODULE_DESCRIPTION("ROHM BU27008 and BU27010 colour sensor driver");
MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
MODULE_LICENSE("GPL");
MODULE_IMPORT_NS(IIO_GTS_HELPER);
diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
index 7c7362e28821..3a52b09c2823 100644
--- a/drivers/iio/light/vcnl4000.c
+++ b/drivers/iio/light/vcnl4000.c
@@ -24,6 +24,7 @@
#include <linux/delay.h>
#include <linux/pm_runtime.h>
#include <linux/interrupt.h>
+#include <linux/units.h>
#include <linux/iio/buffer.h>
#include <linux/iio/events.h>
@@ -60,11 +61,15 @@
#define VCNL4200_AL_CONF 0x00 /* Ambient light configuration */
#define VCNL4200_PS_CONF1 0x03 /* Proximity configuration */
+#define VCNL4200_PS_CONF3 0x04 /* Proximity configuration */
#define VCNL4040_PS_THDL_LM 0x06 /* Proximity threshold low */
#define VCNL4040_PS_THDH_LM 0x07 /* Proximity threshold high */
+#define VCNL4040_ALS_THDL_LM 0x02 /* Ambient light threshold low */
+#define VCNL4040_ALS_THDH_LM 0x01 /* Ambient light threshold high */
#define VCNL4200_PS_DATA 0x08 /* Proximity data */
#define VCNL4200_AL_DATA 0x09 /* Ambient light data */
#define VCNL4040_INT_FLAGS 0x0b /* Interrupt register */
+#define VCNL4200_INT_FLAGS 0x0d /* Interrupt register */
#define VCNL4200_DEV_ID 0x0e /* Device ID, slave address and version */
#define VCNL4040_DEV_ID 0x0c /* Device ID and version */
@@ -79,11 +84,19 @@
#define VCNL4000_SELF_TIMED_EN BIT(0) /* start self-timed measurement */
#define VCNL4040_ALS_CONF_ALS_SHUTDOWN BIT(0)
+#define VCNL4040_ALS_CONF_IT GENMASK(7, 6) /* Ambient integration time */
+#define VCNL4040_ALS_CONF_INT_EN BIT(1) /* Ambient light Interrupt enable */
+#define VCNL4040_ALS_CONF_PERS GENMASK(3, 2) /* Ambient interrupt persistence setting */
#define VCNL4040_PS_CONF1_PS_SHUTDOWN BIT(0)
#define VCNL4040_PS_CONF2_PS_IT GENMASK(3, 1) /* Proximity integration time */
+#define VCNL4040_CONF1_PS_PERS GENMASK(5, 4) /* Proximity interrupt persistence setting */
#define VCNL4040_PS_CONF2_PS_INT GENMASK(9, 8) /* Proximity interrupt mode */
+#define VCNL4040_PS_CONF3_MPS GENMASK(6, 5) /* Proximity multi pulse number */
+#define VCNL4040_PS_MS_LED_I GENMASK(10, 8) /* Proximity current */
#define VCNL4040_PS_IF_AWAY BIT(8) /* Proximity event cross low threshold */
#define VCNL4040_PS_IF_CLOSE BIT(9) /* Proximity event cross high threshold */
+#define VCNL4040_ALS_RISING BIT(12) /* Ambient Light cross high threshold */
+#define VCNL4040_ALS_FALLING BIT(13) /* Ambient Light cross low threshold */
/* Bit masks for interrupt registers. */
#define VCNL4010_INT_THR_SEL BIT(0) /* Select threshold interrupt source */
@@ -123,6 +136,44 @@ static const int vcnl4040_ps_it_times[][2] = {
{0, 800},
};
+static const int vcnl4200_ps_it_times[][2] = {
+ {0, 96},
+ {0, 144},
+ {0, 192},
+ {0, 384},
+ {0, 768},
+ {0, 864},
+};
+
+static const int vcnl4040_als_it_times[][2] = {
+ {0, 80000},
+ {0, 160000},
+ {0, 320000},
+ {0, 640000},
+};
+
+static const int vcnl4200_als_it_times[][2] = {
+ {0, 50000},
+ {0, 100000},
+ {0, 200000},
+ {0, 400000},
+};
+
+static const int vcnl4040_ps_calibbias_ua[][2] = {
+ {0, 50000},
+ {0, 75000},
+ {0, 100000},
+ {0, 120000},
+ {0, 140000},
+ {0, 160000},
+ {0, 180000},
+ {0, 200000},
+};
+
+static const int vcnl4040_als_persistence[] = {1, 2, 4, 8};
+static const int vcnl4040_ps_persistence[] = {1, 2, 3, 4};
+static const int vcnl4040_ps_oversampling_ratio[] = {1, 2, 4, 8};
+
#define VCNL4000_SLEEP_DELAY_MS 2000 /* before we enter pm_runtime_suspend */
enum vcnl4000_device_ids {
@@ -145,6 +196,7 @@ struct vcnl4000_data {
int rev;
int al_scale;
u8 ps_int; /* proximity interrupt mode */
+ u8 als_int; /* ambient light interrupt mode*/
const struct vcnl4000_chip_spec *chip_spec;
struct mutex vcnl4000_lock;
struct vcnl4200_channel vcnl4200_al;
@@ -164,6 +216,13 @@ struct vcnl4000_chip_spec {
int (*set_power_state)(struct vcnl4000_data *data, bool on);
irqreturn_t (*irq_thread)(int irq, void *priv);
irqreturn_t (*trig_buffer_func)(int irq, void *priv);
+
+ u8 int_reg;
+ const int(*ps_it_times)[][2];
+ const int num_ps_it_times;
+ const int(*als_it_times)[][2];
+ const int num_als_it_times;
+ const unsigned int ulux_step;
};
static const struct i2c_device_id vcnl4000_id[] = {
@@ -263,7 +322,7 @@ static int vcnl4200_set_power_state(struct vcnl4000_data *data, bool on)
int ret;
/* Do not power down if interrupts are enabled */
- if (!on && data->ps_int)
+ if (!on && (data->ps_int || data->als_int))
return 0;
ret = vcnl4000_write_als_enable(data, on);
@@ -308,6 +367,7 @@ static int vcnl4200_init(struct vcnl4000_data *data)
data->rev = (ret >> 8) & 0xf;
data->ps_int = 0;
+ data->als_int = 0;
data->vcnl4200_al.reg = VCNL4200_AL_DATA;
data->vcnl4200_ps.reg = VCNL4200_PS_DATA;
@@ -317,16 +377,15 @@ static int vcnl4200_init(struct vcnl4000_data *data)
data->vcnl4200_al.sampling_rate = ktime_set(0, 60000 * 1000);
/* Default wait time is 4.8ms, add 20% tolerance. */
data->vcnl4200_ps.sampling_rate = ktime_set(0, 5760 * 1000);
- data->al_scale = 24000;
break;
case VCNL4040_PROD_ID:
/* Default wait time is 80ms, add 20% tolerance. */
data->vcnl4200_al.sampling_rate = ktime_set(0, 96000 * 1000);
/* Default wait time is 5ms, add 20% tolerance. */
data->vcnl4200_ps.sampling_rate = ktime_set(0, 6000 * 1000);
- data->al_scale = 120000;
break;
}
+ data->al_scale = data->chip_spec->ulux_step;
mutex_init(&data->vcnl4200_al.lock);
mutex_init(&data->vcnl4200_ps.lock);
@@ -496,6 +555,60 @@ static int vcnl4000_set_pm_runtime_state(struct vcnl4000_data *data, bool on)
return ret;
}
+static int vcnl4040_read_als_it(struct vcnl4000_data *data, int *val, int *val2)
+{
+ int ret;
+
+ ret = i2c_smbus_read_word_data(data->client, VCNL4200_AL_CONF);
+ if (ret < 0)
+ return ret;
+
+ ret = FIELD_GET(VCNL4040_ALS_CONF_IT, ret);
+ if (ret >= data->chip_spec->num_als_it_times)
+ return -EINVAL;
+
+ *val = (*data->chip_spec->als_it_times)[ret][0];
+ *val2 = (*data->chip_spec->als_it_times)[ret][1];
+
+ return 0;
+}
+
+static ssize_t vcnl4040_write_als_it(struct vcnl4000_data *data, int val)
+{
+ unsigned int i;
+ int ret;
+ u16 regval;
+
+ for (i = 0; i < data->chip_spec->num_als_it_times; i++) {
+ if (val == (*data->chip_spec->als_it_times)[i][1])
+ break;
+ }
+
+ if (i == data->chip_spec->num_als_it_times)
+ return -EINVAL;
+
+ data->vcnl4200_al.sampling_rate = ktime_set(0, val * 1200);
+ data->al_scale = div_u64(mul_u32_u32(data->chip_spec->ulux_step,
+ (*data->chip_spec->als_it_times)[0][1]),
+ val);
+
+ mutex_lock(&data->vcnl4000_lock);
+
+ ret = i2c_smbus_read_word_data(data->client, VCNL4200_AL_CONF);
+ if (ret < 0)
+ goto out_unlock;
+
+ regval = FIELD_PREP(VCNL4040_ALS_CONF_IT, i);
+ regval |= (ret & ~VCNL4040_ALS_CONF_IT);
+ ret = i2c_smbus_write_word_data(data->client,
+ VCNL4200_AL_CONF,
+ regval);
+
+out_unlock:
+ mutex_unlock(&data->vcnl4000_lock);
+ return ret;
+}
+
static int vcnl4040_read_ps_it(struct vcnl4000_data *data, int *val, int *val2)
{
int ret;
@@ -506,11 +619,11 @@ static int vcnl4040_read_ps_it(struct vcnl4000_data *data, int *val, int *val2)
ret = FIELD_GET(VCNL4040_PS_CONF2_PS_IT, ret);
- if (ret >= ARRAY_SIZE(vcnl4040_ps_it_times))
+ if (ret >= data->chip_spec->num_ps_it_times)
return -EINVAL;
- *val = vcnl4040_ps_it_times[ret][0];
- *val2 = vcnl4040_ps_it_times[ret][1];
+ *val = (*data->chip_spec->ps_it_times)[ret][0];
+ *val2 = (*data->chip_spec->ps_it_times)[ret][1];
return 0;
}
@@ -521,8 +634,8 @@ static ssize_t vcnl4040_write_ps_it(struct vcnl4000_data *data, int val)
int ret, index = -1;
u16 regval;
- for (i = 0; i < ARRAY_SIZE(vcnl4040_ps_it_times); i++) {
- if (val == vcnl4040_ps_it_times[i][1]) {
+ for (i = 0; i < data->chip_spec->num_ps_it_times; i++) {
+ if (val == (*data->chip_spec->ps_it_times)[i][1]) {
index = i;
break;
}
@@ -531,6 +644,8 @@ static ssize_t vcnl4040_write_ps_it(struct vcnl4000_data *data, int val)
if (index < 0)
return -EINVAL;
+ data->vcnl4200_ps.sampling_rate = ktime_set(0, val * 60 * NSEC_PER_USEC);
+
mutex_lock(&data->vcnl4000_lock);
ret = i2c_smbus_read_word_data(data->client, VCNL4200_PS_CONF1);
@@ -547,6 +662,224 @@ out:
return ret;
}
+static ssize_t vcnl4040_read_als_period(struct vcnl4000_data *data, int *val, int *val2)
+{
+ int ret, ret_pers, it;
+ int64_t val_c;
+
+ ret = i2c_smbus_read_word_data(data->client, VCNL4200_AL_CONF);
+ if (ret < 0)
+ return ret;
+
+ ret_pers = FIELD_GET(VCNL4040_ALS_CONF_PERS, ret);
+ if (ret_pers >= ARRAY_SIZE(vcnl4040_als_persistence))
+ return -EINVAL;
+
+ it = FIELD_GET(VCNL4040_ALS_CONF_IT, ret);
+ if (it >= data->chip_spec->num_als_it_times)
+ return -EINVAL;
+
+ val_c = mul_u32_u32((*data->chip_spec->als_it_times)[it][1],
+ vcnl4040_als_persistence[ret_pers]);
+ *val = div_u64_rem(val_c, MICRO, val2);
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static ssize_t vcnl4040_write_als_period(struct vcnl4000_data *data, int val, int val2)
+{
+ unsigned int i;
+ int ret, it;
+ u16 regval;
+ u64 val_n = mul_u32_u32(val, MICRO) + val2;
+
+ ret = i2c_smbus_read_word_data(data->client, VCNL4200_AL_CONF);
+ if (ret < 0)
+ return ret;
+
+ it = FIELD_GET(VCNL4040_ALS_CONF_IT, ret);
+ if (it >= data->chip_spec->num_als_it_times)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(vcnl4040_als_persistence) - 1; i++) {
+ if (val_n < mul_u32_u32(vcnl4040_als_persistence[i],
+ (*data->chip_spec->als_it_times)[it][1]))
+ break;
+ }
+
+ mutex_lock(&data->vcnl4000_lock);
+
+ ret = i2c_smbus_read_word_data(data->client, VCNL4200_AL_CONF);
+ if (ret < 0)
+ goto out_unlock;
+
+ regval = FIELD_PREP(VCNL4040_ALS_CONF_PERS, i);
+ regval |= (ret & ~VCNL4040_ALS_CONF_PERS);
+ ret = i2c_smbus_write_word_data(data->client, VCNL4200_AL_CONF,
+ regval);
+
+out_unlock:
+ mutex_unlock(&data->vcnl4000_lock);
+ return ret;
+}
+
+static ssize_t vcnl4040_read_ps_period(struct vcnl4000_data *data, int *val, int *val2)
+{
+ int ret, ret_pers, it;
+
+ ret = i2c_smbus_read_word_data(data->client, VCNL4200_PS_CONF1);
+ if (ret < 0)
+ return ret;
+
+ ret_pers = FIELD_GET(VCNL4040_CONF1_PS_PERS, ret);
+ if (ret_pers >= ARRAY_SIZE(vcnl4040_ps_persistence))
+ return -EINVAL;
+
+ it = FIELD_GET(VCNL4040_PS_CONF2_PS_IT, ret);
+ if (it >= data->chip_spec->num_ps_it_times)
+ return -EINVAL;
+
+ *val = (*data->chip_spec->ps_it_times)[it][0];
+ *val2 = (*data->chip_spec->ps_it_times)[it][1] *
+ vcnl4040_ps_persistence[ret_pers];
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static ssize_t vcnl4040_write_ps_period(struct vcnl4000_data *data, int val, int val2)
+{
+ int ret, it, i;
+ u16 regval;
+
+ ret = i2c_smbus_read_word_data(data->client, VCNL4200_PS_CONF1);
+ if (ret < 0)
+ return ret;
+
+ it = FIELD_GET(VCNL4040_PS_CONF2_PS_IT, ret);
+ if (it >= data->chip_spec->num_ps_it_times)
+ return -EINVAL;
+
+ if (val > 0)
+ i = ARRAY_SIZE(vcnl4040_ps_persistence) - 1;
+ else {
+ for (i = 0; i < ARRAY_SIZE(vcnl4040_ps_persistence) - 1; i++) {
+ if (val2 <= vcnl4040_ps_persistence[i] *
+ (*data->chip_spec->ps_it_times)[it][1])
+ break;
+ }
+ }
+
+ mutex_lock(&data->vcnl4000_lock);
+
+ ret = i2c_smbus_read_word_data(data->client, VCNL4200_PS_CONF1);
+ if (ret < 0)
+ goto out_unlock;
+
+ regval = FIELD_PREP(VCNL4040_CONF1_PS_PERS, i);
+ regval |= (ret & ~VCNL4040_CONF1_PS_PERS);
+ ret = i2c_smbus_write_word_data(data->client, VCNL4200_PS_CONF1,
+ regval);
+
+out_unlock:
+ mutex_unlock(&data->vcnl4000_lock);
+ return ret;
+}
+
+static ssize_t vcnl4040_read_ps_oversampling_ratio(struct vcnl4000_data *data, int *val)
+{
+ int ret;
+
+ ret = i2c_smbus_read_word_data(data->client, VCNL4200_PS_CONF3);
+ if (ret < 0)
+ return ret;
+
+ ret = FIELD_GET(VCNL4040_PS_CONF3_MPS, ret);
+ if (ret >= ARRAY_SIZE(vcnl4040_ps_oversampling_ratio))
+ return -EINVAL;
+
+ *val = vcnl4040_ps_oversampling_ratio[ret];
+
+ return ret;
+}
+
+static ssize_t vcnl4040_write_ps_oversampling_ratio(struct vcnl4000_data *data, int val)
+{
+ unsigned int i;
+ int ret;
+ u16 regval;
+
+ for (i = 0; i < ARRAY_SIZE(vcnl4040_ps_oversampling_ratio); i++) {
+ if (val == vcnl4040_ps_oversampling_ratio[i])
+ break;
+ }
+
+ if (i >= ARRAY_SIZE(vcnl4040_ps_oversampling_ratio))
+ return -EINVAL;
+
+ mutex_lock(&data->vcnl4000_lock);
+
+ ret = i2c_smbus_read_word_data(data->client, VCNL4200_PS_CONF3);
+ if (ret < 0)
+ goto out_unlock;
+
+ regval = FIELD_PREP(VCNL4040_PS_CONF3_MPS, i);
+ regval |= (ret & ~VCNL4040_PS_CONF3_MPS);
+ ret = i2c_smbus_write_word_data(data->client, VCNL4200_PS_CONF3,
+ regval);
+
+out_unlock:
+ mutex_unlock(&data->vcnl4000_lock);
+ return ret;
+}
+
+static ssize_t vcnl4040_read_ps_calibbias(struct vcnl4000_data *data, int *val, int *val2)
+{
+ int ret;
+
+ ret = i2c_smbus_read_word_data(data->client, VCNL4200_PS_CONF3);
+ if (ret < 0)
+ return ret;
+
+ ret = FIELD_GET(VCNL4040_PS_MS_LED_I, ret);
+ if (ret >= ARRAY_SIZE(vcnl4040_ps_calibbias_ua))
+ return -EINVAL;
+
+ *val = vcnl4040_ps_calibbias_ua[ret][0];
+ *val2 = vcnl4040_ps_calibbias_ua[ret][1];
+
+ return ret;
+}
+
+static ssize_t vcnl4040_write_ps_calibbias(struct vcnl4000_data *data, int val)
+{
+ unsigned int i;
+ int ret;
+ u16 regval;
+
+ for (i = 0; i < ARRAY_SIZE(vcnl4040_ps_calibbias_ua); i++) {
+ if (val == vcnl4040_ps_calibbias_ua[i][1])
+ break;
+ }
+
+ if (i >= ARRAY_SIZE(vcnl4040_ps_calibbias_ua))
+ return -EINVAL;
+
+ mutex_lock(&data->vcnl4000_lock);
+
+ ret = i2c_smbus_read_word_data(data->client, VCNL4200_PS_CONF3);
+ if (ret < 0)
+ goto out_unlock;
+
+ regval = (ret & ~VCNL4040_PS_MS_LED_I);
+ regval |= FIELD_PREP(VCNL4040_PS_MS_LED_I, i);
+ ret = i2c_smbus_write_word_data(data->client, VCNL4200_PS_CONF3,
+ regval);
+
+out_unlock:
+ mutex_unlock(&data->vcnl4000_lock);
+ return ret;
+}
+
static int vcnl4000_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
@@ -584,12 +917,39 @@ static int vcnl4000_read_raw(struct iio_dev *indio_dev,
*val2 = data->al_scale;
return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_INT_TIME:
- if (chan->type != IIO_PROXIMITY)
+ switch (chan->type) {
+ case IIO_LIGHT:
+ ret = vcnl4040_read_als_it(data, val, val2);
+ break;
+ case IIO_PROXIMITY:
+ ret = vcnl4040_read_ps_it(data, val, val2);
+ break;
+ default:
return -EINVAL;
- ret = vcnl4040_read_ps_it(data, val, val2);
+ }
if (ret < 0)
return ret;
return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ ret = vcnl4040_read_ps_oversampling_ratio(data, val);
+ if (ret < 0)
+ return ret;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_CALIBBIAS:
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ ret = vcnl4040_read_ps_calibbias(data, val, val2);
+ if (ret < 0)
+ return ret;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
default:
return -EINVAL;
}
@@ -605,9 +965,28 @@ static int vcnl4040_write_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_INT_TIME:
if (val != 0)
return -EINVAL;
- if (chan->type != IIO_PROXIMITY)
+ switch (chan->type) {
+ case IIO_LIGHT:
+ return vcnl4040_write_als_it(data, val2);
+ case IIO_PROXIMITY:
+ return vcnl4040_write_ps_it(data, val2);
+ default:
return -EINVAL;
- return vcnl4040_write_ps_it(data, val2);
+ }
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ return vcnl4040_write_ps_oversampling_ratio(data, val);
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_CALIBBIAS:
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ return vcnl4040_write_ps_calibbias(data, val2);
+ default:
+ return -EINVAL;
+ }
default:
return -EINVAL;
}
@@ -618,12 +997,44 @@ static int vcnl4040_read_avail(struct iio_dev *indio_dev,
const int **vals, int *type, int *length,
long mask)
{
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+
switch (mask) {
case IIO_CHAN_INFO_INT_TIME:
- *vals = (int *)vcnl4040_ps_it_times;
+ switch (chan->type) {
+ case IIO_LIGHT:
+ *vals = (int *)(*data->chip_spec->als_it_times);
+ *length = 2 * data->chip_spec->num_als_it_times;
+ break;
+ case IIO_PROXIMITY:
+ *vals = (int *)(*data->chip_spec->ps_it_times);
+ *length = 2 * data->chip_spec->num_ps_it_times;
+ break;
+ default:
+ return -EINVAL;
+ }
*type = IIO_VAL_INT_PLUS_MICRO;
- *length = 2 * ARRAY_SIZE(vcnl4040_ps_it_times);
return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ *vals = (int *)vcnl4040_ps_oversampling_ratio;
+ *length = ARRAY_SIZE(vcnl4040_ps_oversampling_ratio);
+ *type = IIO_VAL_INT;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_CALIBBIAS:
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ *vals = (int *)vcnl4040_ps_calibbias_ua;
+ *length = 2 * ARRAY_SIZE(vcnl4040_ps_calibbias_ua);
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
default:
return -EINVAL;
}
@@ -818,24 +1229,58 @@ static int vcnl4040_read_event(struct iio_dev *indio_dev,
int ret;
struct vcnl4000_data *data = iio_priv(indio_dev);
- switch (dir) {
- case IIO_EV_DIR_RISING:
- ret = i2c_smbus_read_word_data(data->client,
- VCNL4040_PS_THDH_LM);
- if (ret < 0)
- return ret;
- *val = ret;
- return IIO_VAL_INT;
- case IIO_EV_DIR_FALLING:
- ret = i2c_smbus_read_word_data(data->client,
- VCNL4040_PS_THDL_LM);
- if (ret < 0)
- return ret;
- *val = ret;
- return IIO_VAL_INT;
+ switch (chan->type) {
+ case IIO_LIGHT:
+ switch (info) {
+ case IIO_EV_INFO_PERIOD:
+ return vcnl4040_read_als_period(data, val, val2);
+ case IIO_EV_INFO_VALUE:
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ ret = i2c_smbus_read_word_data(data->client,
+ VCNL4040_ALS_THDH_LM);
+ break;
+ case IIO_EV_DIR_FALLING:
+ ret = i2c_smbus_read_word_data(data->client,
+ VCNL4040_ALS_THDL_LM);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case IIO_PROXIMITY:
+ switch (info) {
+ case IIO_EV_INFO_PERIOD:
+ return vcnl4040_read_ps_period(data, val, val2);
+ case IIO_EV_INFO_VALUE:
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ ret = i2c_smbus_read_word_data(data->client,
+ VCNL4040_PS_THDH_LM);
+ break;
+ case IIO_EV_DIR_FALLING:
+ ret = i2c_smbus_read_word_data(data->client,
+ VCNL4040_PS_THDL_LM);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
default:
return -EINVAL;
}
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
}
static int vcnl4040_write_event(struct iio_dev *indio_dev,
@@ -848,22 +1293,61 @@ static int vcnl4040_write_event(struct iio_dev *indio_dev,
int ret;
struct vcnl4000_data *data = iio_priv(indio_dev);
- switch (dir) {
- case IIO_EV_DIR_RISING:
- ret = i2c_smbus_write_word_data(data->client,
- VCNL4040_PS_THDH_LM, val);
- if (ret < 0)
- return ret;
- return IIO_VAL_INT;
- case IIO_EV_DIR_FALLING:
- ret = i2c_smbus_write_word_data(data->client,
- VCNL4040_PS_THDL_LM, val);
- if (ret < 0)
- return ret;
- return IIO_VAL_INT;
+ switch (chan->type) {
+ case IIO_LIGHT:
+ switch (info) {
+ case IIO_EV_INFO_PERIOD:
+ return vcnl4040_write_als_period(data, val, val2);
+ case IIO_EV_INFO_VALUE:
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ ret = i2c_smbus_write_word_data(data->client,
+ VCNL4040_ALS_THDH_LM,
+ val);
+ break;
+ case IIO_EV_DIR_FALLING:
+ ret = i2c_smbus_write_word_data(data->client,
+ VCNL4040_ALS_THDL_LM,
+ val);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case IIO_PROXIMITY:
+ switch (info) {
+ case IIO_EV_INFO_PERIOD:
+ return vcnl4040_write_ps_period(data, val, val2);
+ case IIO_EV_INFO_VALUE:
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ ret = i2c_smbus_write_word_data(data->client,
+ VCNL4040_PS_THDH_LM,
+ val);
+ break;
+ case IIO_EV_DIR_FALLING:
+ ret = i2c_smbus_write_word_data(data->client,
+ VCNL4040_PS_THDL_LM,
+ val);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
default:
return -EINVAL;
}
+ if (ret < 0)
+ return ret;
+ return IIO_VAL_INT;
}
static bool vcnl4010_is_thr_enabled(struct vcnl4000_data *data)
@@ -956,15 +1440,28 @@ static int vcnl4040_read_event_config(struct iio_dev *indio_dev,
int ret;
struct vcnl4000_data *data = iio_priv(indio_dev);
- ret = i2c_smbus_read_word_data(data->client, VCNL4200_PS_CONF1);
- if (ret < 0)
- return ret;
+ switch (chan->type) {
+ case IIO_LIGHT:
+ ret = i2c_smbus_read_word_data(data->client, VCNL4200_AL_CONF);
+ if (ret < 0)
+ return ret;
+
+ data->als_int = FIELD_GET(VCNL4040_ALS_CONF_INT_EN, ret);
- data->ps_int = FIELD_GET(VCNL4040_PS_CONF2_PS_INT, ret);
+ return data->als_int;
+ case IIO_PROXIMITY:
+ ret = i2c_smbus_read_word_data(data->client, VCNL4200_PS_CONF1);
+ if (ret < 0)
+ return ret;
- return (dir == IIO_EV_DIR_RISING) ?
- FIELD_GET(VCNL4040_PS_IF_AWAY, ret) :
- FIELD_GET(VCNL4040_PS_IF_CLOSE, ret);
+ data->ps_int = FIELD_GET(VCNL4040_PS_CONF2_PS_INT, ret);
+
+ return (dir == IIO_EV_DIR_RISING) ?
+ FIELD_GET(VCNL4040_PS_IF_AWAY, ret) :
+ FIELD_GET(VCNL4040_PS_IF_CLOSE, ret);
+ default:
+ return -EINVAL;
+ }
}
static int vcnl4040_write_event_config(struct iio_dev *indio_dev,
@@ -972,29 +1469,51 @@ static int vcnl4040_write_event_config(struct iio_dev *indio_dev,
enum iio_event_type type,
enum iio_event_direction dir, int state)
{
- int ret;
+ int ret = -EINVAL;
u16 val, mask;
struct vcnl4000_data *data = iio_priv(indio_dev);
mutex_lock(&data->vcnl4000_lock);
- ret = i2c_smbus_read_word_data(data->client, VCNL4200_PS_CONF1);
- if (ret < 0)
- goto out;
+ switch (chan->type) {
+ case IIO_LIGHT:
+ ret = i2c_smbus_read_word_data(data->client, VCNL4200_AL_CONF);
+ if (ret < 0)
+ goto out;
- if (dir == IIO_EV_DIR_RISING)
- mask = VCNL4040_PS_IF_AWAY;
- else
- mask = VCNL4040_PS_IF_CLOSE;
+ mask = VCNL4040_ALS_CONF_INT_EN;
+ if (state)
+ val = (ret | mask);
+ else
+ val = (ret & ~mask);
- val = state ? (ret | mask) : (ret & ~mask);
+ data->als_int = FIELD_GET(VCNL4040_ALS_CONF_INT_EN, val);
+ ret = i2c_smbus_write_word_data(data->client, VCNL4200_AL_CONF,
+ val);
+ break;
+ case IIO_PROXIMITY:
+ ret = i2c_smbus_read_word_data(data->client, VCNL4200_PS_CONF1);
+ if (ret < 0)
+ goto out;
- data->ps_int = FIELD_GET(VCNL4040_PS_CONF2_PS_INT, val);
- ret = i2c_smbus_write_word_data(data->client, VCNL4200_PS_CONF1, val);
+ if (dir == IIO_EV_DIR_RISING)
+ mask = VCNL4040_PS_IF_AWAY;
+ else
+ mask = VCNL4040_PS_IF_CLOSE;
+
+ val = state ? (ret | mask) : (ret & ~mask);
+
+ data->ps_int = FIELD_GET(VCNL4040_PS_CONF2_PS_INT, val);
+ ret = i2c_smbus_write_word_data(data->client, VCNL4200_PS_CONF1,
+ val);
+ break;
+ default:
+ break;
+ }
out:
mutex_unlock(&data->vcnl4000_lock);
- data->chip_spec->set_power_state(data, data->ps_int != 0);
+ data->chip_spec->set_power_state(data, data->ps_int || data->als_int);
return ret;
}
@@ -1005,7 +1524,7 @@ static irqreturn_t vcnl4040_irq_thread(int irq, void *p)
struct vcnl4000_data *data = iio_priv(indio_dev);
int ret;
- ret = i2c_smbus_read_word_data(data->client, VCNL4040_INT_FLAGS);
+ ret = i2c_smbus_read_word_data(data->client, data->chip_spec->int_reg);
if (ret < 0)
return IRQ_HANDLED;
@@ -1025,6 +1544,22 @@ static irqreturn_t vcnl4040_irq_thread(int irq, void *p)
iio_get_time_ns(indio_dev));
}
+ if (ret & VCNL4040_ALS_FALLING) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_LIGHT, 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ iio_get_time_ns(indio_dev));
+ }
+
+ if (ret & VCNL4040_ALS_RISING) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_LIGHT, 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ iio_get_time_ns(indio_dev));
+ }
+
return IRQ_HANDLED;
}
@@ -1191,6 +1726,22 @@ static const struct iio_event_spec vcnl4000_event_spec[] = {
}
};
+static const struct iio_event_spec vcnl4040_als_event_spec[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE) | BIT(IIO_EV_INFO_PERIOD),
+ },
+};
+
static const struct iio_event_spec vcnl4040_event_spec[] = {
{
.type = IIO_EV_TYPE_THRESH,
@@ -1200,6 +1751,10 @@ static const struct iio_event_spec vcnl4040_event_spec[] = {
.type = IIO_EV_TYPE_THRESH,
.dir = IIO_EV_DIR_FALLING,
.mask_separate = BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_PERIOD),
},
};
@@ -1244,12 +1799,20 @@ static const struct iio_chan_spec vcnl4040_channels[] = {
{
.type = IIO_LIGHT,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_SCALE),
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_INT_TIME),
+ .info_mask_separate_available = BIT(IIO_CHAN_INFO_INT_TIME),
+ .event_spec = vcnl4040_als_event_spec,
+ .num_event_specs = ARRAY_SIZE(vcnl4040_als_event_spec),
}, {
.type = IIO_PROXIMITY,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_INT_TIME),
- .info_mask_separate_available = BIT(IIO_CHAN_INFO_INT_TIME),
+ BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO) |
+ BIT(IIO_CHAN_INFO_CALIBBIAS),
+ .info_mask_separate_available = BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO) |
+ BIT(IIO_CHAN_INFO_CALIBBIAS),
.ext_info = vcnl4000_ext_info,
.event_spec = vcnl4040_event_spec,
.num_event_specs = ARRAY_SIZE(vcnl4040_event_spec),
@@ -1314,6 +1877,12 @@ static const struct vcnl4000_chip_spec vcnl4000_chip_spec_cfg[] = {
.num_channels = ARRAY_SIZE(vcnl4040_channels),
.info = &vcnl4040_info,
.irq_thread = vcnl4040_irq_thread,
+ .int_reg = VCNL4040_INT_FLAGS,
+ .ps_it_times = &vcnl4040_ps_it_times,
+ .num_ps_it_times = ARRAY_SIZE(vcnl4040_ps_it_times),
+ .als_it_times = &vcnl4040_als_it_times,
+ .num_als_it_times = ARRAY_SIZE(vcnl4040_als_it_times),
+ .ulux_step = 100000,
},
[VCNL4200] = {
.prod = "VCNL4200",
@@ -1321,9 +1890,16 @@ static const struct vcnl4000_chip_spec vcnl4000_chip_spec_cfg[] = {
.measure_light = vcnl4200_measure_light,
.measure_proximity = vcnl4200_measure_proximity,
.set_power_state = vcnl4200_set_power_state,
- .channels = vcnl4000_channels,
+ .channels = vcnl4040_channels,
.num_channels = ARRAY_SIZE(vcnl4000_channels),
- .info = &vcnl4000_info,
+ .info = &vcnl4040_info,
+ .irq_thread = vcnl4040_irq_thread,
+ .int_reg = VCNL4200_INT_FLAGS,
+ .ps_it_times = &vcnl4200_ps_it_times,
+ .num_ps_it_times = ARRAY_SIZE(vcnl4200_ps_it_times),
+ .als_it_times = &vcnl4200_als_it_times,
+ .num_als_it_times = ARRAY_SIZE(vcnl4200_als_it_times),
+ .ulux_step = 24000,
},
};
diff --git a/drivers/iio/potentiometer/mcp4018.c b/drivers/iio/potentiometer/mcp4018.c
index 89daecc90305..44678d372126 100644
--- a/drivers/iio/potentiometer/mcp4018.c
+++ b/drivers/iio/potentiometer/mcp4018.c
@@ -99,20 +99,25 @@ static const struct iio_info mcp4018_info = {
.write_raw = mcp4018_write_raw,
};
+#define MCP4018_ID_TABLE(_name, cfg) { \
+ .name = _name, \
+ .driver_data = (kernel_ulong_t)&mcp4018_cfg[cfg], \
+}
+
static const struct i2c_device_id mcp4018_id[] = {
- { "mcp4017-502", MCP4018_502 },
- { "mcp4017-103", MCP4018_103 },
- { "mcp4017-503", MCP4018_503 },
- { "mcp4017-104", MCP4018_104 },
- { "mcp4018-502", MCP4018_502 },
- { "mcp4018-103", MCP4018_103 },
- { "mcp4018-503", MCP4018_503 },
- { "mcp4018-104", MCP4018_104 },
- { "mcp4019-502", MCP4018_502 },
- { "mcp4019-103", MCP4018_103 },
- { "mcp4019-503", MCP4018_503 },
- { "mcp4019-104", MCP4018_104 },
- {}
+ MCP4018_ID_TABLE("mcp4017-502", MCP4018_502),
+ MCP4018_ID_TABLE("mcp4017-103", MCP4018_103),
+ MCP4018_ID_TABLE("mcp4017-503", MCP4018_503),
+ MCP4018_ID_TABLE("mcp4017-104", MCP4018_104),
+ MCP4018_ID_TABLE("mcp4018-502", MCP4018_502),
+ MCP4018_ID_TABLE("mcp4018-103", MCP4018_103),
+ MCP4018_ID_TABLE("mcp4018-503", MCP4018_503),
+ MCP4018_ID_TABLE("mcp4018-104", MCP4018_104),
+ MCP4018_ID_TABLE("mcp4019-502", MCP4018_502),
+ MCP4018_ID_TABLE("mcp4019-103", MCP4018_103),
+ MCP4018_ID_TABLE("mcp4019-503", MCP4018_503),
+ MCP4018_ID_TABLE("mcp4019-104", MCP4018_104),
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, mcp4018_id);
@@ -157,9 +162,7 @@ static int mcp4018_probe(struct i2c_client *client)
i2c_set_clientdata(client, indio_dev);
data->client = client;
- data->cfg = device_get_match_data(dev);
- if (!data->cfg)
- data->cfg = &mcp4018_cfg[i2c_match_id(mcp4018_id, client)->driver_data];
+ data->cfg = i2c_get_match_data(client);
indio_dev->info = &mcp4018_info;
indio_dev->channels = &mcp4018_channel;
diff --git a/drivers/iio/potentiometer/mcp4531.c b/drivers/iio/potentiometer/mcp4531.c
index c513c00c8243..f28880ebd758 100644
--- a/drivers/iio/potentiometer/mcp4531.c
+++ b/drivers/iio/potentiometer/mcp4531.c
@@ -206,72 +206,77 @@ static const struct iio_info mcp4531_info = {
.write_raw = mcp4531_write_raw,
};
+#define MCP4531_ID_TABLE(_name, cfg) { \
+ .name = _name, \
+ .driver_data = (kernel_ulong_t)&mcp4531_cfg[cfg], \
+}
+
static const struct i2c_device_id mcp4531_id[] = {
- { "mcp4531-502", MCP453x_502 },
- { "mcp4531-103", MCP453x_103 },
- { "mcp4531-503", MCP453x_503 },
- { "mcp4531-104", MCP453x_104 },
- { "mcp4532-502", MCP453x_502 },
- { "mcp4532-103", MCP453x_103 },
- { "mcp4532-503", MCP453x_503 },
- { "mcp4532-104", MCP453x_104 },
- { "mcp4541-502", MCP454x_502 },
- { "mcp4541-103", MCP454x_103 },
- { "mcp4541-503", MCP454x_503 },
- { "mcp4541-104", MCP454x_104 },
- { "mcp4542-502", MCP454x_502 },
- { "mcp4542-103", MCP454x_103 },
- { "mcp4542-503", MCP454x_503 },
- { "mcp4542-104", MCP454x_104 },
- { "mcp4551-502", MCP455x_502 },
- { "mcp4551-103", MCP455x_103 },
- { "mcp4551-503", MCP455x_503 },
- { "mcp4551-104", MCP455x_104 },
- { "mcp4552-502", MCP455x_502 },
- { "mcp4552-103", MCP455x_103 },
- { "mcp4552-503", MCP455x_503 },
- { "mcp4552-104", MCP455x_104 },
- { "mcp4561-502", MCP456x_502 },
- { "mcp4561-103", MCP456x_103 },
- { "mcp4561-503", MCP456x_503 },
- { "mcp4561-104", MCP456x_104 },
- { "mcp4562-502", MCP456x_502 },
- { "mcp4562-103", MCP456x_103 },
- { "mcp4562-503", MCP456x_503 },
- { "mcp4562-104", MCP456x_104 },
- { "mcp4631-502", MCP463x_502 },
- { "mcp4631-103", MCP463x_103 },
- { "mcp4631-503", MCP463x_503 },
- { "mcp4631-104", MCP463x_104 },
- { "mcp4632-502", MCP463x_502 },
- { "mcp4632-103", MCP463x_103 },
- { "mcp4632-503", MCP463x_503 },
- { "mcp4632-104", MCP463x_104 },
- { "mcp4641-502", MCP464x_502 },
- { "mcp4641-103", MCP464x_103 },
- { "mcp4641-503", MCP464x_503 },
- { "mcp4641-104", MCP464x_104 },
- { "mcp4642-502", MCP464x_502 },
- { "mcp4642-103", MCP464x_103 },
- { "mcp4642-503", MCP464x_503 },
- { "mcp4642-104", MCP464x_104 },
- { "mcp4651-502", MCP465x_502 },
- { "mcp4651-103", MCP465x_103 },
- { "mcp4651-503", MCP465x_503 },
- { "mcp4651-104", MCP465x_104 },
- { "mcp4652-502", MCP465x_502 },
- { "mcp4652-103", MCP465x_103 },
- { "mcp4652-503", MCP465x_503 },
- { "mcp4652-104", MCP465x_104 },
- { "mcp4661-502", MCP466x_502 },
- { "mcp4661-103", MCP466x_103 },
- { "mcp4661-503", MCP466x_503 },
- { "mcp4661-104", MCP466x_104 },
- { "mcp4662-502", MCP466x_502 },
- { "mcp4662-103", MCP466x_103 },
- { "mcp4662-503", MCP466x_503 },
- { "mcp4662-104", MCP466x_104 },
- {}
+ MCP4531_ID_TABLE("mcp4531-502", MCP453x_502),
+ MCP4531_ID_TABLE("mcp4531-103", MCP453x_103),
+ MCP4531_ID_TABLE("mcp4531-503", MCP453x_503),
+ MCP4531_ID_TABLE("mcp4531-104", MCP453x_104),
+ MCP4531_ID_TABLE("mcp4532-502", MCP453x_502),
+ MCP4531_ID_TABLE("mcp4532-103", MCP453x_103),
+ MCP4531_ID_TABLE("mcp4532-503", MCP453x_503),
+ MCP4531_ID_TABLE("mcp4532-104", MCP453x_104),
+ MCP4531_ID_TABLE("mcp4541-502", MCP454x_502),
+ MCP4531_ID_TABLE("mcp4541-103", MCP454x_103),
+ MCP4531_ID_TABLE("mcp4541-503", MCP454x_503),
+ MCP4531_ID_TABLE("mcp4541-104", MCP454x_104),
+ MCP4531_ID_TABLE("mcp4542-502", MCP454x_502),
+ MCP4531_ID_TABLE("mcp4542-103", MCP454x_103),
+ MCP4531_ID_TABLE("mcp4542-503", MCP454x_503),
+ MCP4531_ID_TABLE("mcp4542-104", MCP454x_104),
+ MCP4531_ID_TABLE("mcp4551-502", MCP455x_502),
+ MCP4531_ID_TABLE("mcp4551-103", MCP455x_103),
+ MCP4531_ID_TABLE("mcp4551-503", MCP455x_503),
+ MCP4531_ID_TABLE("mcp4551-104", MCP455x_104),
+ MCP4531_ID_TABLE("mcp4552-502", MCP455x_502),
+ MCP4531_ID_TABLE("mcp4552-103", MCP455x_103),
+ MCP4531_ID_TABLE("mcp4552-503", MCP455x_503),
+ MCP4531_ID_TABLE("mcp4552-104", MCP455x_104),
+ MCP4531_ID_TABLE("mcp4561-502", MCP456x_502),
+ MCP4531_ID_TABLE("mcp4561-103", MCP456x_103),
+ MCP4531_ID_TABLE("mcp4561-503", MCP456x_503),
+ MCP4531_ID_TABLE("mcp4561-104", MCP456x_104),
+ MCP4531_ID_TABLE("mcp4562-502", MCP456x_502),
+ MCP4531_ID_TABLE("mcp4562-103", MCP456x_103),
+ MCP4531_ID_TABLE("mcp4562-503", MCP456x_503),
+ MCP4531_ID_TABLE("mcp4562-104", MCP456x_104),
+ MCP4531_ID_TABLE("mcp4631-502", MCP463x_502),
+ MCP4531_ID_TABLE("mcp4631-103", MCP463x_103),
+ MCP4531_ID_TABLE("mcp4631-503", MCP463x_503),
+ MCP4531_ID_TABLE("mcp4631-104", MCP463x_104),
+ MCP4531_ID_TABLE("mcp4632-502", MCP463x_502),
+ MCP4531_ID_TABLE("mcp4632-103", MCP463x_103),
+ MCP4531_ID_TABLE("mcp4632-503", MCP463x_503),
+ MCP4531_ID_TABLE("mcp4632-104", MCP463x_104),
+ MCP4531_ID_TABLE("mcp4641-502", MCP464x_502),
+ MCP4531_ID_TABLE("mcp4641-103", MCP464x_103),
+ MCP4531_ID_TABLE("mcp4641-503", MCP464x_503),
+ MCP4531_ID_TABLE("mcp4641-104", MCP464x_104),
+ MCP4531_ID_TABLE("mcp4642-502", MCP464x_502),
+ MCP4531_ID_TABLE("mcp4642-103", MCP464x_103),
+ MCP4531_ID_TABLE("mcp4642-503", MCP464x_503),
+ MCP4531_ID_TABLE("mcp4642-104", MCP464x_104),
+ MCP4531_ID_TABLE("mcp4651-502", MCP465x_502),
+ MCP4531_ID_TABLE("mcp4651-103", MCP465x_103),
+ MCP4531_ID_TABLE("mcp4651-503", MCP465x_503),
+ MCP4531_ID_TABLE("mcp4651-104", MCP465x_104),
+ MCP4531_ID_TABLE("mcp4652-502", MCP465x_502),
+ MCP4531_ID_TABLE("mcp4652-103", MCP465x_103),
+ MCP4531_ID_TABLE("mcp4652-503", MCP465x_503),
+ MCP4531_ID_TABLE("mcp4652-104", MCP465x_104),
+ MCP4531_ID_TABLE("mcp4661-502", MCP466x_502),
+ MCP4531_ID_TABLE("mcp4661-103", MCP466x_103),
+ MCP4531_ID_TABLE("mcp4661-503", MCP466x_503),
+ MCP4531_ID_TABLE("mcp4661-104", MCP466x_104),
+ MCP4531_ID_TABLE("mcp4662-502", MCP466x_502),
+ MCP4531_ID_TABLE("mcp4662-103", MCP466x_103),
+ MCP4531_ID_TABLE("mcp4662-503", MCP466x_503),
+ MCP4531_ID_TABLE("mcp4662-104", MCP466x_104),
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, mcp4531_id);
@@ -368,9 +373,7 @@ static int mcp4531_probe(struct i2c_client *client)
i2c_set_clientdata(client, indio_dev);
data->client = client;
- data->cfg = device_get_match_data(dev);
- if (!data->cfg)
- data->cfg = &mcp4531_cfg[i2c_match_id(mcp4531_id, client)->driver_data];
+ data->cfg = i2c_get_match_data(client);
indio_dev->info = &mcp4531_info;
indio_dev->channels = mcp4531_channels;
diff --git a/drivers/iio/proximity/Kconfig b/drivers/iio/proximity/Kconfig
index 0e5c17530b8b..2ca3b0bc5eba 100644
--- a/drivers/iio/proximity/Kconfig
+++ b/drivers/iio/proximity/Kconfig
@@ -32,6 +32,18 @@ config CROS_EC_MKBP_PROXIMITY
To compile this driver as a module, choose M here: the
module will be called cros_ec_mkbp_proximity.
+config IRSD200
+ tristate "Murata IRS-D200 PIR sensor"
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select REGMAP_I2C
+ depends on I2C
+ help
+ Say Y here to build a driver for the Murata IRS-D200 PIR sensor.
+
+ To compile this driver as a module, choose M here: the module will be
+ called irsd200.
+
config ISL29501
tristate "Intersil ISL29501 Time Of Flight sensor"
depends on I2C
diff --git a/drivers/iio/proximity/Makefile b/drivers/iio/proximity/Makefile
index cc838bb5408a..f36598380446 100644
--- a/drivers/iio/proximity/Makefile
+++ b/drivers/iio/proximity/Makefile
@@ -6,6 +6,7 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_AS3935) += as3935.o
obj-$(CONFIG_CROS_EC_MKBP_PROXIMITY) += cros_ec_mkbp_proximity.o
+obj-$(CONFIG_IRSD200) += irsd200.o
obj-$(CONFIG_ISL29501) += isl29501.o
obj-$(CONFIG_LIDAR_LITE_V2) += pulsedlight-lidar-lite-v2.o
obj-$(CONFIG_MB1232) += mb1232.o
diff --git a/drivers/iio/proximity/irsd200.c b/drivers/iio/proximity/irsd200.c
new file mode 100644
index 000000000000..5bd791b46d98
--- /dev/null
+++ b/drivers/iio/proximity/irsd200.c
@@ -0,0 +1,958 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for Murata IRS-D200 PIR sensor.
+ *
+ * Copyright (C) 2023 Axis Communications AB
+ */
+
+#include <asm/unaligned.h>
+#include <linux/bitfield.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include <linux/iio/buffer.h>
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/types.h>
+
+#define IRS_DRV_NAME "irsd200"
+
+/* Registers. */
+#define IRS_REG_OP 0x00 /* Operation mode. */
+#define IRS_REG_DATA_LO 0x02 /* Sensor data LSB. */
+#define IRS_REG_DATA_HI 0x03 /* Sensor data MSB. */
+#define IRS_REG_STATUS 0x04 /* Interrupt status. */
+#define IRS_REG_COUNT 0x05 /* Count of exceeding threshold. */
+#define IRS_REG_DATA_RATE 0x06 /* Output data rate. */
+#define IRS_REG_FILTER 0x07 /* High-pass and low-pass filter. */
+#define IRS_REG_INTR 0x09 /* Interrupt mode. */
+#define IRS_REG_NR_COUNT 0x0a /* Number of counts before interrupt. */
+#define IRS_REG_THR_HI 0x0b /* Upper threshold. */
+#define IRS_REG_THR_LO 0x0c /* Lower threshold. */
+#define IRS_REG_TIMER_LO 0x0d /* Timer setting LSB. */
+#define IRS_REG_TIMER_HI 0x0e /* Timer setting MSB. */
+
+/* Interrupt status bits. */
+#define IRS_INTR_DATA 0 /* Data update. */
+#define IRS_INTR_TIMER 1 /* Timer expiration. */
+#define IRS_INTR_COUNT_THR_AND 2 /* Count "AND" threshold. */
+#define IRS_INTR_COUNT_THR_OR 3 /* Count "OR" threshold. */
+
+/* Operation states. */
+#define IRS_OP_ACTIVE 0x00
+#define IRS_OP_SLEEP 0x01
+
+/*
+ * Quantization scale value for threshold. Used for conversion from/to register
+ * value.
+ */
+#define IRS_THR_QUANT_SCALE 128
+
+#define IRS_UPPER_COUNT(count) FIELD_GET(GENMASK(7, 4), count)
+#define IRS_LOWER_COUNT(count) FIELD_GET(GENMASK(3, 0), count)
+
+/* Index corresponds to the value of IRS_REG_DATA_RATE register. */
+static const int irsd200_data_rates[] = {
+ 50,
+ 100,
+};
+
+/* Index corresponds to the (field) value of IRS_REG_FILTER register. */
+static const unsigned int irsd200_lp_filter_freq[] = {
+ 10,
+ 7,
+};
+
+/*
+ * Index corresponds to the (field) value of IRS_REG_FILTER register. Note that
+ * this represents a fractional value (e.g the first value corresponds to 3 / 10
+ * = 0.3 Hz).
+ */
+static const unsigned int irsd200_hp_filter_freq[][2] = {
+ { 3, 10 },
+ { 5, 10 },
+};
+
+/* Register fields. */
+enum irsd200_regfield {
+ /* Data interrupt. */
+ IRS_REGF_INTR_DATA,
+ /* Timer interrupt. */
+ IRS_REGF_INTR_TIMER,
+ /* AND count threshold interrupt. */
+ IRS_REGF_INTR_COUNT_THR_AND,
+ /* OR count threshold interrupt. */
+ IRS_REGF_INTR_COUNT_THR_OR,
+
+ /* Low-pass filter frequency. */
+ IRS_REGF_LP_FILTER,
+ /* High-pass filter frequency. */
+ IRS_REGF_HP_FILTER,
+
+ /* Sentinel value. */
+ IRS_REGF_MAX
+};
+
+static const struct reg_field irsd200_regfields[] = {
+ [IRS_REGF_INTR_DATA] =
+ REG_FIELD(IRS_REG_INTR, IRS_INTR_DATA, IRS_INTR_DATA),
+ [IRS_REGF_INTR_TIMER] =
+ REG_FIELD(IRS_REG_INTR, IRS_INTR_TIMER, IRS_INTR_TIMER),
+ [IRS_REGF_INTR_COUNT_THR_AND] = REG_FIELD(
+ IRS_REG_INTR, IRS_INTR_COUNT_THR_AND, IRS_INTR_COUNT_THR_AND),
+ [IRS_REGF_INTR_COUNT_THR_OR] = REG_FIELD(
+ IRS_REG_INTR, IRS_INTR_COUNT_THR_OR, IRS_INTR_COUNT_THR_OR),
+
+ [IRS_REGF_LP_FILTER] = REG_FIELD(IRS_REG_FILTER, 1, 1),
+ [IRS_REGF_HP_FILTER] = REG_FIELD(IRS_REG_FILTER, 0, 0),
+};
+
+static const struct regmap_config irsd200_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = IRS_REG_TIMER_HI,
+};
+
+struct irsd200_data {
+ struct regmap *regmap;
+ struct regmap_field *regfields[IRS_REGF_MAX];
+ struct device *dev;
+};
+
+static int irsd200_setup(struct irsd200_data *data)
+{
+ unsigned int val;
+ int ret;
+
+ /* Disable all interrupt sources. */
+ ret = regmap_write(data->regmap, IRS_REG_INTR, 0);
+ if (ret) {
+ dev_err(data->dev, "Could not set interrupt sources (%d)\n",
+ ret);
+ return ret;
+ }
+
+ /* Set operation to active. */
+ ret = regmap_write(data->regmap, IRS_REG_OP, IRS_OP_ACTIVE);
+ if (ret) {
+ dev_err(data->dev, "Could not set operation mode (%d)\n", ret);
+ return ret;
+ }
+
+ /* Clear threshold count. */
+ ret = regmap_read(data->regmap, IRS_REG_COUNT, &val);
+ if (ret) {
+ dev_err(data->dev, "Could not clear threshold count (%d)\n",
+ ret);
+ return ret;
+ }
+
+ /* Clear status. */
+ ret = regmap_write(data->regmap, IRS_REG_STATUS, 0x0f);
+ if (ret) {
+ dev_err(data->dev, "Could not clear status (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int irsd200_read_threshold(struct irsd200_data *data,
+ enum iio_event_direction dir, int *val)
+{
+ unsigned int regval;
+ unsigned int reg;
+ int scale;
+ int ret;
+
+ /* Set quantization scale. */
+ if (dir == IIO_EV_DIR_RISING) {
+ scale = IRS_THR_QUANT_SCALE;
+ reg = IRS_REG_THR_HI;
+ } else if (dir == IIO_EV_DIR_FALLING) {
+ scale = -IRS_THR_QUANT_SCALE;
+ reg = IRS_REG_THR_LO;
+ } else {
+ return -EINVAL;
+ }
+
+ ret = regmap_read(data->regmap, reg, &regval);
+ if (ret) {
+ dev_err(data->dev, "Could not read threshold (%d)\n", ret);
+ return ret;
+ }
+
+ *val = ((int)regval) * scale;
+
+ return 0;
+}
+
+static int irsd200_write_threshold(struct irsd200_data *data,
+ enum iio_event_direction dir, int val)
+{
+ unsigned int regval;
+ unsigned int reg;
+ int scale;
+ int ret;
+
+ /* Set quantization scale. */
+ if (dir == IIO_EV_DIR_RISING) {
+ if (val < 0)
+ return -ERANGE;
+
+ scale = IRS_THR_QUANT_SCALE;
+ reg = IRS_REG_THR_HI;
+ } else if (dir == IIO_EV_DIR_FALLING) {
+ if (val > 0)
+ return -ERANGE;
+
+ scale = -IRS_THR_QUANT_SCALE;
+ reg = IRS_REG_THR_LO;
+ } else {
+ return -EINVAL;
+ }
+
+ regval = val / scale;
+
+ if (regval >= BIT(8))
+ return -ERANGE;
+
+ ret = regmap_write(data->regmap, reg, regval);
+ if (ret) {
+ dev_err(data->dev, "Could not write threshold (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int irsd200_read_data(struct irsd200_data *data, s16 *val)
+{
+ __le16 buf;
+ int ret;
+
+ ret = regmap_bulk_read(data->regmap, IRS_REG_DATA_LO, &buf,
+ sizeof(buf));
+ if (ret) {
+ dev_err(data->dev, "Could not bulk read data (%d)\n", ret);
+ return ret;
+ }
+
+ *val = le16_to_cpu(buf);
+
+ return 0;
+}
+
+static int irsd200_read_data_rate(struct irsd200_data *data, int *val)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(data->regmap, IRS_REG_DATA_RATE, &regval);
+ if (ret) {
+ dev_err(data->dev, "Could not read data rate (%d)\n", ret);
+ return ret;
+ }
+
+ if (regval >= ARRAY_SIZE(irsd200_data_rates))
+ return -ERANGE;
+
+ *val = irsd200_data_rates[regval];
+
+ return 0;
+}
+
+static int irsd200_write_data_rate(struct irsd200_data *data, int val)
+{
+ size_t idx;
+ int ret;
+
+ for (idx = 0; idx < ARRAY_SIZE(irsd200_data_rates); ++idx) {
+ if (irsd200_data_rates[idx] == val)
+ break;
+ }
+
+ if (idx == ARRAY_SIZE(irsd200_data_rates))
+ return -ERANGE;
+
+ ret = regmap_write(data->regmap, IRS_REG_DATA_RATE, idx);
+ if (ret) {
+ dev_err(data->dev, "Could not write data rate (%d)\n", ret);
+ return ret;
+ }
+
+ /*
+ * Data sheet says the device needs 3 seconds of settling time. The
+ * device operates normally during this period though. This is more of a
+ * "guarantee" than trying to prevent other user space reads/writes.
+ */
+ ssleep(3);
+
+ return 0;
+}
+
+static int irsd200_read_timer(struct irsd200_data *data, int *val, int *val2)
+{
+ __le16 buf;
+ int ret;
+
+ ret = regmap_bulk_read(data->regmap, IRS_REG_TIMER_LO, &buf,
+ sizeof(buf));
+ if (ret) {
+ dev_err(data->dev, "Could not bulk read timer (%d)\n", ret);
+ return ret;
+ }
+
+ ret = irsd200_read_data_rate(data, val2);
+ if (ret)
+ return ret;
+
+ *val = le16_to_cpu(buf);
+
+ return 0;
+}
+
+static int irsd200_write_timer(struct irsd200_data *data, int val, int val2)
+{
+ unsigned int regval;
+ int data_rate;
+ __le16 buf;
+ int ret;
+
+ if (val < 0 || val2 < 0)
+ return -ERANGE;
+
+ ret = irsd200_read_data_rate(data, &data_rate);
+ if (ret)
+ return ret;
+
+ /* Quantize from seconds. */
+ regval = val * data_rate + (val2 * data_rate) / 1000000;
+
+ /* Value is 10 bits. */
+ if (regval >= BIT(10))
+ return -ERANGE;
+
+ buf = cpu_to_le16((u16)regval);
+
+ ret = regmap_bulk_write(data->regmap, IRS_REG_TIMER_LO, &buf,
+ sizeof(buf));
+ if (ret) {
+ dev_err(data->dev, "Could not bulk write timer (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int irsd200_read_nr_count(struct irsd200_data *data, int *val)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(data->regmap, IRS_REG_NR_COUNT, &regval);
+ if (ret) {
+ dev_err(data->dev, "Could not read nr count (%d)\n", ret);
+ return ret;
+ }
+
+ *val = regval;
+
+ return 0;
+}
+
+static int irsd200_write_nr_count(struct irsd200_data *data, int val)
+{
+ unsigned int regval;
+ int ret;
+
+ /* A value of zero means that IRS_REG_STATUS is never set. */
+ if (val <= 0 || val >= 8)
+ return -ERANGE;
+
+ regval = val;
+
+ if (regval >= 2) {
+ /*
+ * According to the data sheet, timer must be also set in this
+ * case (i.e. be non-zero). Check and enforce that.
+ */
+ ret = irsd200_read_timer(data, &val, &val);
+ if (ret)
+ return ret;
+
+ if (val == 0) {
+ dev_err(data->dev,
+ "Timer must be non-zero when nr count is %u\n",
+ regval);
+ return -EPERM;
+ }
+ }
+
+ ret = regmap_write(data->regmap, IRS_REG_NR_COUNT, regval);
+ if (ret) {
+ dev_err(data->dev, "Could not write nr count (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int irsd200_read_lp_filter(struct irsd200_data *data, int *val)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_field_read(data->regfields[IRS_REGF_LP_FILTER], &regval);
+ if (ret) {
+ dev_err(data->dev, "Could not read lp filter frequency (%d)\n",
+ ret);
+ return ret;
+ }
+
+ *val = irsd200_lp_filter_freq[regval];
+
+ return 0;
+}
+
+static int irsd200_write_lp_filter(struct irsd200_data *data, int val)
+{
+ size_t idx;
+ int ret;
+
+ for (idx = 0; idx < ARRAY_SIZE(irsd200_lp_filter_freq); ++idx) {
+ if (irsd200_lp_filter_freq[idx] == val)
+ break;
+ }
+
+ if (idx == ARRAY_SIZE(irsd200_lp_filter_freq))
+ return -ERANGE;
+
+ ret = regmap_field_write(data->regfields[IRS_REGF_LP_FILTER], idx);
+ if (ret) {
+ dev_err(data->dev, "Could not write lp filter frequency (%d)\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int irsd200_read_hp_filter(struct irsd200_data *data, int *val,
+ int *val2)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_field_read(data->regfields[IRS_REGF_HP_FILTER], &regval);
+ if (ret) {
+ dev_err(data->dev, "Could not read hp filter frequency (%d)\n",
+ ret);
+ return ret;
+ }
+
+ *val = irsd200_hp_filter_freq[regval][0];
+ *val2 = irsd200_hp_filter_freq[regval][1];
+
+ return 0;
+}
+
+static int irsd200_write_hp_filter(struct irsd200_data *data, int val, int val2)
+{
+ size_t idx;
+ int ret;
+
+ /* Truncate fractional part to one digit. */
+ val2 /= 100000;
+
+ for (idx = 0; idx < ARRAY_SIZE(irsd200_hp_filter_freq); ++idx) {
+ if (irsd200_hp_filter_freq[idx][0] == val2)
+ break;
+ }
+
+ if (idx == ARRAY_SIZE(irsd200_hp_filter_freq) || val != 0)
+ return -ERANGE;
+
+ ret = regmap_field_write(data->regfields[IRS_REGF_HP_FILTER], idx);
+ if (ret) {
+ dev_err(data->dev, "Could not write hp filter frequency (%d)\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int irsd200_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct irsd200_data *data = iio_priv(indio_dev);
+ int ret;
+ s16 buf;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = irsd200_read_data(data, &buf);
+ if (ret)
+ return ret;
+
+ *val = buf;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = irsd200_read_data_rate(data, val);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ ret = irsd200_read_lp_filter(data, val);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY:
+ ret = irsd200_read_hp_filter(data, val, val2);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int irsd200_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *vals = irsd200_data_rates;
+ *type = IIO_VAL_INT;
+ *length = ARRAY_SIZE(irsd200_data_rates);
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ *vals = irsd200_lp_filter_freq;
+ *type = IIO_VAL_INT;
+ *length = ARRAY_SIZE(irsd200_lp_filter_freq);
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY:
+ *vals = (int *)irsd200_hp_filter_freq;
+ *type = IIO_VAL_FRACTIONAL;
+ *length = 2 * ARRAY_SIZE(irsd200_hp_filter_freq);
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int irsd200_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long mask)
+{
+ struct irsd200_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return irsd200_write_data_rate(data, val);
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ return irsd200_write_lp_filter(data, val);
+ case IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY:
+ return irsd200_write_hp_filter(data, val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int irsd200_read_event(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int *val, int *val2)
+{
+ struct irsd200_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ ret = irsd200_read_threshold(data, dir, val);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+ case IIO_EV_INFO_RUNNING_PERIOD:
+ ret = irsd200_read_timer(data, val, val2);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_FRACTIONAL;
+ case IIO_EV_INFO_RUNNING_COUNT:
+ ret = irsd200_read_nr_count(data, val);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int irsd200_write_event(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int val, int val2)
+{
+ struct irsd200_data *data = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ return irsd200_write_threshold(data, dir, val);
+ case IIO_EV_INFO_RUNNING_PERIOD:
+ return irsd200_write_timer(data, val, val2);
+ case IIO_EV_INFO_RUNNING_COUNT:
+ return irsd200_write_nr_count(data, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int irsd200_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct irsd200_data *data = iio_priv(indio_dev);
+ unsigned int val;
+ int ret;
+
+ switch (type) {
+ case IIO_EV_TYPE_THRESH:
+ ret = regmap_field_read(
+ data->regfields[IRS_REGF_INTR_COUNT_THR_OR], &val);
+ if (ret)
+ return ret;
+
+ return val;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int irsd200_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir, int state)
+{
+ struct irsd200_data *data = iio_priv(indio_dev);
+ unsigned int tmp;
+ int ret;
+
+ switch (type) {
+ case IIO_EV_TYPE_THRESH:
+ /* Clear the count register (by reading from it). */
+ ret = regmap_read(data->regmap, IRS_REG_COUNT, &tmp);
+ if (ret)
+ return ret;
+
+ return regmap_field_write(
+ data->regfields[IRS_REGF_INTR_COUNT_THR_OR], !!state);
+ default:
+ return -EINVAL;
+ }
+}
+
+static irqreturn_t irsd200_irq_thread(int irq, void *dev_id)
+{
+ struct iio_dev *indio_dev = dev_id;
+ struct irsd200_data *data = iio_priv(indio_dev);
+ enum iio_event_direction dir;
+ unsigned int lower_count;
+ unsigned int upper_count;
+ unsigned int status = 0;
+ unsigned int source = 0;
+ unsigned int clear = 0;
+ unsigned int count = 0;
+ int ret;
+
+ ret = regmap_read(data->regmap, IRS_REG_INTR, &source);
+ if (ret) {
+ dev_err(data->dev, "Could not read interrupt source (%d)\n",
+ ret);
+ return IRQ_HANDLED;
+ }
+
+ ret = regmap_read(data->regmap, IRS_REG_STATUS, &status);
+ if (ret) {
+ dev_err(data->dev, "Could not acknowledge interrupt (%d)\n",
+ ret);
+ return IRQ_HANDLED;
+ }
+
+ if (status & BIT(IRS_INTR_DATA) && iio_buffer_enabled(indio_dev)) {
+ iio_trigger_poll_nested(indio_dev->trig);
+ clear |= BIT(IRS_INTR_DATA);
+ }
+
+ if (status & BIT(IRS_INTR_COUNT_THR_OR) &&
+ source & BIT(IRS_INTR_COUNT_THR_OR)) {
+ /*
+ * The register value resets to zero after reading. We therefore
+ * need to read once and manually extract the lower and upper
+ * count register fields.
+ */
+ ret = regmap_read(data->regmap, IRS_REG_COUNT, &count);
+ if (ret)
+ dev_err(data->dev, "Could not read count (%d)\n", ret);
+
+ upper_count = IRS_UPPER_COUNT(count);
+ lower_count = IRS_LOWER_COUNT(count);
+
+ /*
+ * We only check the OR mode to be able to push events for
+ * rising and falling thresholds. AND mode is covered when both
+ * upper and lower count is non-zero, and is signaled with
+ * IIO_EV_DIR_EITHER.
+ */
+ if (upper_count && !lower_count)
+ dir = IIO_EV_DIR_RISING;
+ else if (!upper_count && lower_count)
+ dir = IIO_EV_DIR_FALLING;
+ else
+ dir = IIO_EV_DIR_EITHER;
+
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 0,
+ IIO_EV_TYPE_THRESH, dir),
+ iio_get_time_ns(indio_dev));
+
+ /*
+ * The OR mode will always trigger when the AND mode does, but
+ * not vice versa. However, it seems like the AND bit needs to
+ * be cleared if data capture _and_ threshold count interrupts
+ * are desirable, even though it hasn't explicitly been selected
+ * (with IRS_REG_INTR). Either way, it doesn't hurt...
+ */
+ clear |= BIT(IRS_INTR_COUNT_THR_OR) |
+ BIT(IRS_INTR_COUNT_THR_AND);
+ }
+
+ if (!clear)
+ return IRQ_NONE;
+
+ ret = regmap_write(data->regmap, IRS_REG_STATUS, clear);
+ if (ret)
+ dev_err(data->dev,
+ "Could not clear interrupt status (%d)\n", ret);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t irsd200_trigger_handler(int irq, void *pollf)
+{
+ struct iio_dev *indio_dev = ((struct iio_poll_func *)pollf)->indio_dev;
+ struct irsd200_data *data = iio_priv(indio_dev);
+ s16 buf = 0;
+ int ret;
+
+ ret = irsd200_read_data(data, &buf);
+ if (ret)
+ goto end;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &buf,
+ iio_get_time_ns(indio_dev));
+
+end:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int irsd200_set_trigger_state(struct iio_trigger *trig, bool state)
+{
+ struct irsd200_data *data = iio_trigger_get_drvdata(trig);
+ int ret;
+
+ ret = regmap_field_write(data->regfields[IRS_REGF_INTR_DATA], state);
+ if (ret) {
+ dev_err(data->dev, "Could not %s data interrupt source (%d)\n",
+ state ? "enable" : "disable", ret);
+ }
+
+ return ret;
+}
+
+static const struct iio_info irsd200_info = {
+ .read_raw = irsd200_read_raw,
+ .read_avail = irsd200_read_avail,
+ .write_raw = irsd200_write_raw,
+ .read_event_value = irsd200_read_event,
+ .write_event_value = irsd200_write_event,
+ .read_event_config = irsd200_read_event_config,
+ .write_event_config = irsd200_write_event_config,
+};
+
+static const struct iio_trigger_ops irsd200_trigger_ops = {
+ .set_trigger_state = irsd200_set_trigger_state,
+ .validate_device = iio_trigger_validate_own_device,
+};
+
+static const struct iio_event_spec irsd200_event_spec[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate =
+ BIT(IIO_EV_INFO_RUNNING_PERIOD) |
+ BIT(IIO_EV_INFO_RUNNING_COUNT) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+static const struct iio_chan_spec irsd200_channels[] = {
+ {
+ .type = IIO_PROXIMITY,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY) |
+ BIT(IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY),
+ .info_mask_separate_available =
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY) |
+ BIT(IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY),
+ .event_spec = irsd200_event_spec,
+ .num_event_specs = ARRAY_SIZE(irsd200_event_spec),
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+ },
+};
+
+static int irsd200_probe(struct i2c_client *client)
+{
+ struct iio_trigger *trigger;
+ struct irsd200_data *data;
+ struct iio_dev *indio_dev;
+ size_t i;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return dev_err_probe(&client->dev, -ENOMEM,
+ "Could not allocate iio device\n");
+
+ data = iio_priv(indio_dev);
+ data->dev = &client->dev;
+
+ data->regmap = devm_regmap_init_i2c(client, &irsd200_regmap_config);
+ if (IS_ERR(data->regmap))
+ return dev_err_probe(data->dev, PTR_ERR(data->regmap),
+ "Could not initialize regmap\n");
+
+ for (i = 0; i < IRS_REGF_MAX; ++i) {
+ data->regfields[i] = devm_regmap_field_alloc(
+ data->dev, data->regmap, irsd200_regfields[i]);
+ if (IS_ERR(data->regfields[i]))
+ return dev_err_probe(
+ data->dev, PTR_ERR(data->regfields[i]),
+ "Could not allocate register field %zu\n", i);
+ }
+
+ ret = devm_regulator_get_enable(data->dev, "vdd");
+ if (ret)
+ return dev_err_probe(
+ data->dev, ret,
+ "Could not get and enable regulator (%d)\n", ret);
+
+ ret = irsd200_setup(data);
+ if (ret)
+ return ret;
+
+ indio_dev->info = &irsd200_info;
+ indio_dev->name = IRS_DRV_NAME;
+ indio_dev->channels = irsd200_channels;
+ indio_dev->num_channels = ARRAY_SIZE(irsd200_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ if (!client->irq)
+ return dev_err_probe(data->dev, -ENXIO, "No irq available\n");
+
+ ret = devm_iio_triggered_buffer_setup(data->dev, indio_dev, NULL,
+ irsd200_trigger_handler, NULL);
+ if (ret)
+ return dev_err_probe(
+ data->dev, ret,
+ "Could not setup iio triggered buffer (%d)\n", ret);
+
+ ret = devm_request_threaded_irq(data->dev, client->irq, NULL,
+ irsd200_irq_thread,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ NULL, indio_dev);
+ if (ret)
+ return dev_err_probe(data->dev, ret,
+ "Could not request irq (%d)\n", ret);
+
+ trigger = devm_iio_trigger_alloc(data->dev, "%s-dev%d", indio_dev->name,
+ iio_device_id(indio_dev));
+ if (!trigger)
+ return dev_err_probe(data->dev, -ENOMEM,
+ "Could not allocate iio trigger\n");
+
+ trigger->ops = &irsd200_trigger_ops;
+ iio_trigger_set_drvdata(trigger, data);
+
+ ret = devm_iio_trigger_register(data->dev, trigger);
+ if (ret)
+ return dev_err_probe(data->dev, ret,
+ "Could not register iio trigger (%d)\n",
+ ret);
+
+ ret = devm_iio_device_register(data->dev, indio_dev);
+ if (ret)
+ return dev_err_probe(data->dev, ret,
+ "Could not register iio device (%d)\n",
+ ret);
+
+ return 0;
+}
+
+static const struct of_device_id irsd200_of_match[] = {
+ {
+ .compatible = "murata,irsd200",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, irsd200_of_match);
+
+static struct i2c_driver irsd200_driver = {
+ .driver = {
+ .name = IRS_DRV_NAME,
+ .of_match_table = irsd200_of_match,
+ },
+ .probe = irsd200_probe,
+};
+module_i2c_driver(irsd200_driver);
+
+MODULE_AUTHOR("Waqar Hameed <waqar.hameed@axis.com>");
+MODULE_DESCRIPTION("Murata IRS-D200 PIR sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/proximity/isl29501.c b/drivers/iio/proximity/isl29501.c
index fe45ca35a124..bcebacaf3dab 100644
--- a/drivers/iio/proximity/isl29501.c
+++ b/drivers/iio/proximity/isl29501.c
@@ -12,7 +12,7 @@
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/err.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
diff --git a/drivers/iio/proximity/mb1232.c b/drivers/iio/proximity/mb1232.c
index fb1073c8d9f7..614e65cb9d42 100644
--- a/drivers/iio/proximity/mb1232.c
+++ b/drivers/iio/proximity/mb1232.c
@@ -76,7 +76,7 @@ static s16 mb1232_read_distance(struct mb1232_data *data)
goto error_unlock;
}
- if (data->irqnr >= 0) {
+ if (data->irqnr > 0) {
/* it cannot take more than 100 ms */
ret = wait_for_completion_killable_timeout(&data->ranging,
HZ/10);
@@ -212,10 +212,7 @@ static int mb1232_probe(struct i2c_client *client)
init_completion(&data->ranging);
data->irqnr = fwnode_irq_get(dev_fwnode(&client->dev), 0);
- if (data->irqnr <= 0) {
- /* usage of interrupt is optional */
- data->irqnr = -1;
- } else {
+ if (data->irqnr > 0) {
ret = devm_request_irq(dev, data->irqnr, mb1232_handle_irq,
IRQF_TRIGGER_FALLING, id->name, indio_dev);
if (ret < 0) {
diff --git a/drivers/iio/temperature/mlx90614.c b/drivers/iio/temperature/mlx90614.c
index 676dc8701924..07bb5df24ab3 100644
--- a/drivers/iio/temperature/mlx90614.c
+++ b/drivers/iio/temperature/mlx90614.c
@@ -27,8 +27,8 @@
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/jiffies.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/iio/iio.h>
diff --git a/drivers/iio/trigger/stm32-lptimer-trigger.c b/drivers/iio/trigger/stm32-lptimer-trigger.c
index 2e447a3f047d..f1e18913236a 100644
--- a/drivers/iio/trigger/stm32-lptimer-trigger.c
+++ b/drivers/iio/trigger/stm32-lptimer-trigger.c
@@ -73,7 +73,6 @@ static int stm32_lptim_trigger_probe(struct platform_device *pdev)
{
struct stm32_lptim_trigger *priv;
u32 index;
- int ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -88,13 +87,7 @@ static int stm32_lptim_trigger_probe(struct platform_device *pdev)
priv->dev = &pdev->dev;
priv->trg = stm32_lptim_triggers[index];
- ret = stm32_lptim_setup_trig(priv);
- if (ret)
- return ret;
-
- platform_set_drvdata(pdev, priv);
-
- return 0;
+ return stm32_lptim_setup_trig(priv);
}
static const struct of_device_id stm32_lptim_trig_of_match[] = {
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 73f913cbd146..7acc0f936dad 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -1457,6 +1457,17 @@ static int config_non_roce_gid_cache(struct ib_device *device,
i);
goto err;
}
+
+ if (rdma_protocol_iwarp(device, port)) {
+ struct net_device *ndev;
+
+ ndev = ib_device_get_netdev(device, port);
+ if (!ndev)
+ continue;
+ RCU_INIT_POINTER(gid_attr.ndev, ndev);
+ dev_put(ndev);
+ }
+
gid_attr.index = i;
tprops->subnet_prefix =
be64_to_cpu(gid_attr.gid.global.subnet_prefix);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 9891c7dc2af5..c343edf2f664 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -686,30 +686,52 @@ cma_validate_port(struct ib_device *device, u32 port,
struct rdma_id_private *id_priv)
{
struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
+ const struct ib_gid_attr *sgid_attr = ERR_PTR(-ENODEV);
int bound_if_index = dev_addr->bound_dev_if;
- const struct ib_gid_attr *sgid_attr;
int dev_type = dev_addr->dev_type;
struct net_device *ndev = NULL;
if (!rdma_dev_access_netns(device, id_priv->id.route.addr.dev_addr.net))
- return ERR_PTR(-ENODEV);
+ goto out;
if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port))
- return ERR_PTR(-ENODEV);
+ goto out;
if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port))
- return ERR_PTR(-ENODEV);
+ goto out;
+
+ /*
+ * For drivers that do not associate more than one net device with
+ * their gid tables, such as iWARP drivers, it is sufficient to
+ * return the first table entry.
+ *
+ * Other driver classes might be included in the future.
+ */
+ if (rdma_protocol_iwarp(device, port)) {
+ sgid_attr = rdma_get_gid_attr(device, port, 0);
+ if (IS_ERR(sgid_attr))
+ goto out;
+
+ rcu_read_lock();
+ ndev = rcu_dereference(sgid_attr->ndev);
+ if (!net_eq(dev_net(ndev), dev_addr->net) ||
+ ndev->ifindex != bound_if_index)
+ sgid_attr = ERR_PTR(-ENODEV);
+ rcu_read_unlock();
+ goto out;
+ }
if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) {
ndev = dev_get_by_index(dev_addr->net, bound_if_index);
if (!ndev)
- return ERR_PTR(-ENODEV);
+ goto out;
} else {
gid_type = IB_GID_TYPE_IB;
}
sgid_attr = rdma_find_gid_by_port(device, gid, gid_type, port, ndev);
dev_put(ndev);
+out:
return sgid_attr;
}
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index 358a2db38d23..eecb369898f5 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -307,7 +307,7 @@ get_remote_info_exit:
struct iwpm_nlmsg_request *iwpm_get_nlmsg_request(__u32 nlmsg_seq,
u8 nl_client, gfp_t gfp)
{
- struct iwpm_nlmsg_request *nlmsg_request = NULL;
+ struct iwpm_nlmsg_request *nlmsg_request;
unsigned long flags;
nlmsg_request = kzalloc(sizeof(struct iwpm_nlmsg_request), gfp);
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index 1b2cc9e45ade..ae2db0c70788 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -75,7 +75,7 @@ static bool is_nl_msg_valid(unsigned int type, unsigned int op)
if (type >= RDMA_NL_NUM_CLIENTS)
return false;
- return (op < max_num_ops[type]) ? true : false;
+ return op < max_num_ops[type];
}
static const struct rdma_nl_cbs *
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 7c9c79c13941..bf800f8cb3e4 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -72,12 +72,23 @@ enum {
#define IB_UVERBS_BASE_DEV MKDEV(IB_UVERBS_MAJOR, IB_UVERBS_BASE_MINOR)
static dev_t dynamic_uverbs_dev;
-static struct class *uverbs_class;
static DEFINE_IDA(uverbs_ida);
static int ib_uverbs_add_one(struct ib_device *device);
static void ib_uverbs_remove_one(struct ib_device *device, void *client_data);
+static char *uverbs_devnode(const struct device *dev, umode_t *mode)
+{
+ if (mode)
+ *mode = 0666;
+ return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
+}
+
+static const struct class uverbs_class = {
+ .name = "infiniband_verbs",
+ .devnode = uverbs_devnode,
+};
+
/*
* Must be called with the ufile->device->disassociate_srcu held, and the lock
* must be held until use of the ucontext is finished.
@@ -1117,7 +1128,7 @@ static int ib_uverbs_add_one(struct ib_device *device)
}
device_initialize(&uverbs_dev->dev);
- uverbs_dev->dev.class = uverbs_class;
+ uverbs_dev->dev.class = &uverbs_class;
uverbs_dev->dev.parent = device->dev.parent;
uverbs_dev->dev.release = ib_uverbs_release_dev;
uverbs_dev->groups[0] = &dev_attr_group;
@@ -1235,13 +1246,6 @@ static void ib_uverbs_remove_one(struct ib_device *device, void *client_data)
put_device(&uverbs_dev->dev);
}
-static char *uverbs_devnode(const struct device *dev, umode_t *mode)
-{
- if (mode)
- *mode = 0666;
- return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
-}
-
static int __init ib_uverbs_init(void)
{
int ret;
@@ -1262,16 +1266,13 @@ static int __init ib_uverbs_init(void)
goto out_alloc;
}
- uverbs_class = class_create("infiniband_verbs");
- if (IS_ERR(uverbs_class)) {
- ret = PTR_ERR(uverbs_class);
+ ret = class_register(&uverbs_class);
+ if (ret) {
pr_err("user_verbs: couldn't create class infiniband_verbs\n");
goto out_chrdev;
}
- uverbs_class->devnode = uverbs_devnode;
-
- ret = class_create_file(uverbs_class, &class_attr_abi_version.attr);
+ ret = class_create_file(&uverbs_class, &class_attr_abi_version.attr);
if (ret) {
pr_err("user_verbs: couldn't create abi_version attribute\n");
goto out_class;
@@ -1286,7 +1287,7 @@ static int __init ib_uverbs_init(void)
return 0;
out_class:
- class_destroy(uverbs_class);
+ class_unregister(&uverbs_class);
out_chrdev:
unregister_chrdev_region(dynamic_uverbs_dev,
@@ -1303,7 +1304,7 @@ out:
static void __exit ib_uverbs_cleanup(void)
{
ib_unregister_client(&uverbs_client);
- class_destroy(uverbs_class);
+ class_unregister(&uverbs_class);
unregister_chrdev_region(IB_UVERBS_BASE_DEV,
IB_UVERBS_NUM_FIXED_MINOR);
unregister_chrdev_region(dynamic_uverbs_dev,
diff --git a/drivers/infiniband/core/uverbs_std_types_counters.c b/drivers/infiniband/core/uverbs_std_types_counters.c
index 999da9c79866..381aa5797641 100644
--- a/drivers/infiniband/core/uverbs_std_types_counters.c
+++ b/drivers/infiniband/core/uverbs_std_types_counters.c
@@ -107,6 +107,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_COUNTERS_READ)(
return ret;
uattr = uverbs_attr_get(attrs, UVERBS_ATTR_READ_COUNTERS_BUFF);
+ if (IS_ERR(uattr))
+ return PTR_ERR(uattr);
read_attr.ncounters = uattr->ptr_attr.len / sizeof(u64);
read_attr.counters_buff = uverbs_zalloc(
attrs, array_size(read_attr.ncounters, sizeof(u64)));
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index b99b3cc283b6..41ff5595c860 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1880,6 +1880,89 @@ int ib_modify_qp_with_udata(struct ib_qp *ib_qp, struct ib_qp_attr *attr,
}
EXPORT_SYMBOL(ib_modify_qp_with_udata);
+static void ib_get_width_and_speed(u32 netdev_speed, u32 lanes,
+ u16 *speed, u8 *width)
+{
+ if (!lanes) {
+ if (netdev_speed <= SPEED_1000) {
+ *width = IB_WIDTH_1X;
+ *speed = IB_SPEED_SDR;
+ } else if (netdev_speed <= SPEED_10000) {
+ *width = IB_WIDTH_1X;
+ *speed = IB_SPEED_FDR10;
+ } else if (netdev_speed <= SPEED_20000) {
+ *width = IB_WIDTH_4X;
+ *speed = IB_SPEED_DDR;
+ } else if (netdev_speed <= SPEED_25000) {
+ *width = IB_WIDTH_1X;
+ *speed = IB_SPEED_EDR;
+ } else if (netdev_speed <= SPEED_40000) {
+ *width = IB_WIDTH_4X;
+ *speed = IB_SPEED_FDR10;
+ } else if (netdev_speed <= SPEED_50000) {
+ *width = IB_WIDTH_2X;
+ *speed = IB_SPEED_EDR;
+ } else if (netdev_speed <= SPEED_100000) {
+ *width = IB_WIDTH_4X;
+ *speed = IB_SPEED_EDR;
+ } else if (netdev_speed <= SPEED_200000) {
+ *width = IB_WIDTH_4X;
+ *speed = IB_SPEED_HDR;
+ } else {
+ *width = IB_WIDTH_4X;
+ *speed = IB_SPEED_NDR;
+ }
+
+ return;
+ }
+
+ switch (lanes) {
+ case 1:
+ *width = IB_WIDTH_1X;
+ break;
+ case 2:
+ *width = IB_WIDTH_2X;
+ break;
+ case 4:
+ *width = IB_WIDTH_4X;
+ break;
+ case 8:
+ *width = IB_WIDTH_8X;
+ break;
+ case 12:
+ *width = IB_WIDTH_12X;
+ break;
+ default:
+ *width = IB_WIDTH_1X;
+ }
+
+ switch (netdev_speed / lanes) {
+ case SPEED_2500:
+ *speed = IB_SPEED_SDR;
+ break;
+ case SPEED_5000:
+ *speed = IB_SPEED_DDR;
+ break;
+ case SPEED_10000:
+ *speed = IB_SPEED_FDR10;
+ break;
+ case SPEED_14000:
+ *speed = IB_SPEED_FDR;
+ break;
+ case SPEED_25000:
+ *speed = IB_SPEED_EDR;
+ break;
+ case SPEED_50000:
+ *speed = IB_SPEED_HDR;
+ break;
+ case SPEED_100000:
+ *speed = IB_SPEED_NDR;
+ break;
+ default:
+ *speed = IB_SPEED_SDR;
+ }
+}
+
int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u16 *speed, u8 *width)
{
int rc;
@@ -1904,29 +1987,13 @@ int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u16 *speed, u8 *width)
netdev_speed = lksettings.base.speed;
} else {
netdev_speed = SPEED_1000;
- pr_warn("%s speed is unknown, defaulting to %u\n", netdev->name,
- netdev_speed);
+ if (rc)
+ pr_warn("%s speed is unknown, defaulting to %u\n",
+ netdev->name, netdev_speed);
}
- if (netdev_speed <= SPEED_1000) {
- *width = IB_WIDTH_1X;
- *speed = IB_SPEED_SDR;
- } else if (netdev_speed <= SPEED_10000) {
- *width = IB_WIDTH_1X;
- *speed = IB_SPEED_FDR10;
- } else if (netdev_speed <= SPEED_20000) {
- *width = IB_WIDTH_4X;
- *speed = IB_SPEED_DDR;
- } else if (netdev_speed <= SPEED_25000) {
- *width = IB_WIDTH_1X;
- *speed = IB_SPEED_EDR;
- } else if (netdev_speed <= SPEED_40000) {
- *width = IB_WIDTH_4X;
- *speed = IB_SPEED_FDR10;
- } else {
- *width = IB_WIDTH_4X;
- *speed = IB_SPEED_EDR;
- }
+ ib_get_width_and_speed(netdev_speed, lksettings.lanes,
+ speed, width);
return 0;
}
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index ea81b2497511..9fd9849ebdd1 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -112,12 +112,34 @@ struct bnxt_re_gsi_context {
#define BNXT_RE_NQ_IDX 1
#define BNXT_RE_GEN_P5_MAX_VF 64
+struct bnxt_re_pacing {
+ u64 dbr_db_fifo_reg_off;
+ void *dbr_page;
+ u64 dbr_bar_addr;
+ u32 pacing_algo_th;
+ u32 do_pacing_save;
+ u32 dbq_pacing_time; /* ms */
+ u32 dbr_def_do_pacing;
+ bool dbr_pacing;
+ struct mutex dbq_lock; /* synchronize db pacing algo */
+};
+
+#define BNXT_RE_MAX_DBR_DO_PACING 0xFFFF
+#define BNXT_RE_DBR_PACING_TIME 5 /* ms */
+#define BNXT_RE_PACING_ALGO_THRESHOLD 250 /* Entries in DB FIFO */
+#define BNXT_RE_PACING_ALARM_TH_MULTIPLE 2 /* Multiple of pacing algo threshold */
+/* Default do_pacing value when there is no congestion */
+#define BNXT_RE_DBR_DO_PACING_NO_CONGESTION 0x7F /* 1 in 512 probability */
+#define BNXT_RE_DB_FIFO_ROOM_MASK 0x1FFF8000
+#define BNXT_RE_MAX_FIFO_DEPTH 0x2c00
+#define BNXT_RE_DB_FIFO_ROOM_SHIFT 15
+#define BNXT_RE_GRC_FIFO_REG_BASE 0x2000
+
struct bnxt_re_dev {
struct ib_device ibdev;
struct list_head list;
unsigned long flags;
#define BNXT_RE_FLAG_NETDEV_REGISTERED 0
-#define BNXT_RE_FLAG_GOT_MSIX 2
#define BNXT_RE_FLAG_HAVE_L2_REF 3
#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4
#define BNXT_RE_FLAG_QOS_WORK_REG 5
@@ -152,16 +174,9 @@ struct bnxt_re_dev {
struct bnxt_qplib_res qplib_res;
struct bnxt_qplib_dpi dpi_privileged;
- atomic_t qp_count;
struct mutex qp_lock; /* protect qp list */
struct list_head qp_list;
- atomic_t cq_count;
- atomic_t srq_count;
- atomic_t mr_count;
- atomic_t mw_count;
- atomic_t ah_count;
- atomic_t pd_count;
/* Max of 2 lossless traffic class supported per port */
u16 cosq[2];
@@ -171,6 +186,9 @@ struct bnxt_re_dev {
atomic_t nq_alloc_cnt;
u32 is_virtfn;
u32 num_vfs;
+ struct bnxt_re_pacing pacing;
+ struct work_struct dbq_fifo_check_work;
+ struct delayed_work dbq_pacing_work;
};
#define to_bnxt_re_dev(ptr, member) \
@@ -181,6 +199,7 @@ struct bnxt_re_dev {
#define BNXT_RE_ROCEV2_IPV6_PACKET 3
#define BNXT_RE_CHECK_RC(x) ((x) && ((x) != -ETIMEDOUT))
+void bnxt_re_pacing_alert(struct bnxt_re_dev *rdev);
static inline struct device *rdev_to_dev(struct bnxt_re_dev *rdev)
{
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c
index 825d512799d9..93572405d6fa 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.c
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c
@@ -61,15 +61,29 @@ static const struct rdma_stat_desc bnxt_re_stat_descs[] = {
[BNXT_RE_ACTIVE_PD].name = "active_pds",
[BNXT_RE_ACTIVE_AH].name = "active_ahs",
[BNXT_RE_ACTIVE_QP].name = "active_qps",
+ [BNXT_RE_ACTIVE_RC_QP].name = "active_rc_qps",
+ [BNXT_RE_ACTIVE_UD_QP].name = "active_ud_qps",
[BNXT_RE_ACTIVE_SRQ].name = "active_srqs",
[BNXT_RE_ACTIVE_CQ].name = "active_cqs",
[BNXT_RE_ACTIVE_MR].name = "active_mrs",
[BNXT_RE_ACTIVE_MW].name = "active_mws",
+ [BNXT_RE_WATERMARK_PD].name = "watermark_pds",
+ [BNXT_RE_WATERMARK_AH].name = "watermark_ahs",
+ [BNXT_RE_WATERMARK_QP].name = "watermark_qps",
+ [BNXT_RE_WATERMARK_RC_QP].name = "watermark_rc_qps",
+ [BNXT_RE_WATERMARK_UD_QP].name = "watermark_ud_qps",
+ [BNXT_RE_WATERMARK_SRQ].name = "watermark_srqs",
+ [BNXT_RE_WATERMARK_CQ].name = "watermark_cqs",
+ [BNXT_RE_WATERMARK_MR].name = "watermark_mrs",
+ [BNXT_RE_WATERMARK_MW].name = "watermark_mws",
+ [BNXT_RE_RESIZE_CQ_CNT].name = "resize_cq_cnt",
[BNXT_RE_RX_PKTS].name = "rx_pkts",
[BNXT_RE_RX_BYTES].name = "rx_bytes",
[BNXT_RE_TX_PKTS].name = "tx_pkts",
[BNXT_RE_TX_BYTES].name = "tx_bytes",
[BNXT_RE_RECOVERABLE_ERRORS].name = "recoverable_errors",
+ [BNXT_RE_TX_ERRORS].name = "tx_roce_errors",
+ [BNXT_RE_TX_DISCARDS].name = "tx_roce_discards",
[BNXT_RE_RX_ERRORS].name = "rx_roce_errors",
[BNXT_RE_RX_DISCARDS].name = "rx_roce_discards",
[BNXT_RE_TO_RETRANSMITS].name = "to_retransmits",
@@ -117,14 +131,25 @@ static const struct rdma_stat_desc bnxt_re_stat_descs[] = {
[BNXT_RE_TX_READ_RES].name = "tx_read_resp",
[BNXT_RE_TX_WRITE_REQ].name = "tx_write_req",
[BNXT_RE_TX_SEND_REQ].name = "tx_send_req",
+ [BNXT_RE_TX_ROCE_PKTS].name = "tx_roce_only_pkts",
+ [BNXT_RE_TX_ROCE_BYTES].name = "tx_roce_only_bytes",
[BNXT_RE_RX_ATOMIC_REQ].name = "rx_atomic_req",
[BNXT_RE_RX_READ_REQ].name = "rx_read_req",
[BNXT_RE_RX_READ_RESP].name = "rx_read_resp",
[BNXT_RE_RX_WRITE_REQ].name = "rx_write_req",
[BNXT_RE_RX_SEND_REQ].name = "rx_send_req",
+ [BNXT_RE_RX_ROCE_PKTS].name = "rx_roce_only_pkts",
+ [BNXT_RE_RX_ROCE_BYTES].name = "rx_roce_only_bytes",
[BNXT_RE_RX_ROCE_GOOD_PKTS].name = "rx_roce_good_pkts",
[BNXT_RE_RX_ROCE_GOOD_BYTES].name = "rx_roce_good_bytes",
- [BNXT_RE_OOB].name = "rx_out_of_buffer"
+ [BNXT_RE_OOB].name = "rx_out_of_buffer",
+ [BNXT_RE_TX_CNP].name = "tx_cnp_pkts",
+ [BNXT_RE_RX_CNP].name = "rx_cnp_pkts",
+ [BNXT_RE_RX_ECN].name = "rx_ecn_marked_pkts",
+ [BNXT_RE_PACING_RESCHED].name = "pacing_reschedule",
+ [BNXT_RE_PACING_CMPL].name = "pacing_complete",
+ [BNXT_RE_PACING_ALERT].name = "pacing_alerts",
+ [BNXT_RE_DB_FIFO_REG].name = "db_fifo_register",
};
static void bnxt_re_copy_ext_stats(struct bnxt_re_dev *rdev,
@@ -136,14 +161,22 @@ static void bnxt_re_copy_ext_stats(struct bnxt_re_dev *rdev,
stats->value[BNXT_RE_TX_READ_RES] = s->tx_read_res;
stats->value[BNXT_RE_TX_WRITE_REQ] = s->tx_write_req;
stats->value[BNXT_RE_TX_SEND_REQ] = s->tx_send_req;
+ stats->value[BNXT_RE_TX_ROCE_PKTS] = s->tx_roce_pkts;
+ stats->value[BNXT_RE_TX_ROCE_BYTES] = s->tx_roce_bytes;
stats->value[BNXT_RE_RX_ATOMIC_REQ] = s->rx_atomic_req;
stats->value[BNXT_RE_RX_READ_REQ] = s->rx_read_req;
stats->value[BNXT_RE_RX_READ_RESP] = s->rx_read_res;
stats->value[BNXT_RE_RX_WRITE_REQ] = s->rx_write_req;
stats->value[BNXT_RE_RX_SEND_REQ] = s->rx_send_req;
+ stats->value[BNXT_RE_RX_ROCE_PKTS] = s->rx_roce_pkts;
+ stats->value[BNXT_RE_RX_ROCE_BYTES] = s->rx_roce_bytes;
stats->value[BNXT_RE_RX_ROCE_GOOD_PKTS] = s->rx_roce_good_pkts;
stats->value[BNXT_RE_RX_ROCE_GOOD_BYTES] = s->rx_roce_good_bytes;
stats->value[BNXT_RE_OOB] = s->rx_out_of_buffer;
+ stats->value[BNXT_RE_TX_CNP] = s->tx_cnp;
+ stats->value[BNXT_RE_RX_CNP] = s->rx_cnp;
+ stats->value[BNXT_RE_RX_ECN] = s->rx_ecn_marked;
+ stats->value[BNXT_RE_OUT_OF_SEQ_ERR] = s->rx_out_of_sequence;
}
static int bnxt_re_get_ext_stat(struct bnxt_re_dev *rdev,
@@ -249,30 +282,59 @@ static void bnxt_re_copy_err_stats(struct bnxt_re_dev *rdev,
err_s->res_oos_drop_count;
}
+static void bnxt_re_copy_db_pacing_stats(struct bnxt_re_dev *rdev,
+ struct rdma_hw_stats *stats)
+{
+ struct bnxt_re_db_pacing_stats *pacing_s = &rdev->stats.pacing;
+
+ stats->value[BNXT_RE_PACING_RESCHED] = pacing_s->resched;
+ stats->value[BNXT_RE_PACING_CMPL] = pacing_s->complete;
+ stats->value[BNXT_RE_PACING_ALERT] = pacing_s->alerts;
+ stats->value[BNXT_RE_DB_FIFO_REG] =
+ readl(rdev->en_dev->bar0 + rdev->pacing.dbr_db_fifo_reg_off);
+}
+
int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
struct rdma_hw_stats *stats,
u32 port, int index)
{
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
- struct ctx_hw_stats *hw_stats = NULL;
+ struct bnxt_re_res_cntrs *res_s = &rdev->stats.res;
struct bnxt_qplib_roce_stats *err_s = NULL;
+ struct ctx_hw_stats *hw_stats = NULL;
int rc = 0;
hw_stats = rdev->qplib_ctx.stats.dma;
if (!port || !stats)
return -EINVAL;
- stats->value[BNXT_RE_ACTIVE_QP] = atomic_read(&rdev->qp_count);
- stats->value[BNXT_RE_ACTIVE_SRQ] = atomic_read(&rdev->srq_count);
- stats->value[BNXT_RE_ACTIVE_CQ] = atomic_read(&rdev->cq_count);
- stats->value[BNXT_RE_ACTIVE_MR] = atomic_read(&rdev->mr_count);
- stats->value[BNXT_RE_ACTIVE_MW] = atomic_read(&rdev->mw_count);
- stats->value[BNXT_RE_ACTIVE_PD] = atomic_read(&rdev->pd_count);
- stats->value[BNXT_RE_ACTIVE_AH] = atomic_read(&rdev->ah_count);
+ stats->value[BNXT_RE_ACTIVE_QP] = atomic_read(&res_s->qp_count);
+ stats->value[BNXT_RE_ACTIVE_RC_QP] = atomic_read(&res_s->rc_qp_count);
+ stats->value[BNXT_RE_ACTIVE_UD_QP] = atomic_read(&res_s->ud_qp_count);
+ stats->value[BNXT_RE_ACTIVE_SRQ] = atomic_read(&res_s->srq_count);
+ stats->value[BNXT_RE_ACTIVE_CQ] = atomic_read(&res_s->cq_count);
+ stats->value[BNXT_RE_ACTIVE_MR] = atomic_read(&res_s->mr_count);
+ stats->value[BNXT_RE_ACTIVE_MW] = atomic_read(&res_s->mw_count);
+ stats->value[BNXT_RE_ACTIVE_PD] = atomic_read(&res_s->pd_count);
+ stats->value[BNXT_RE_ACTIVE_AH] = atomic_read(&res_s->ah_count);
+ stats->value[BNXT_RE_WATERMARK_QP] = res_s->qp_watermark;
+ stats->value[BNXT_RE_WATERMARK_RC_QP] = res_s->rc_qp_watermark;
+ stats->value[BNXT_RE_WATERMARK_UD_QP] = res_s->ud_qp_watermark;
+ stats->value[BNXT_RE_WATERMARK_SRQ] = res_s->srq_watermark;
+ stats->value[BNXT_RE_WATERMARK_CQ] = res_s->cq_watermark;
+ stats->value[BNXT_RE_WATERMARK_MR] = res_s->mr_watermark;
+ stats->value[BNXT_RE_WATERMARK_MW] = res_s->mw_watermark;
+ stats->value[BNXT_RE_WATERMARK_PD] = res_s->pd_watermark;
+ stats->value[BNXT_RE_WATERMARK_AH] = res_s->ah_watermark;
+ stats->value[BNXT_RE_RESIZE_CQ_CNT] = atomic_read(&res_s->resize_count);
if (hw_stats) {
stats->value[BNXT_RE_RECOVERABLE_ERRORS] =
le64_to_cpu(hw_stats->tx_bcast_pkts);
+ stats->value[BNXT_RE_TX_DISCARDS] =
+ le64_to_cpu(hw_stats->tx_discard_pkts);
+ stats->value[BNXT_RE_TX_ERRORS] =
+ le64_to_cpu(hw_stats->tx_error_pkts);
stats->value[BNXT_RE_RX_ERRORS] =
le64_to_cpu(hw_stats->rx_error_pkts);
stats->value[BNXT_RE_RX_DISCARDS] =
@@ -294,6 +356,7 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
&rdev->flags);
goto done;
}
+ bnxt_re_copy_err_stats(rdev, stats, err_s);
if (_is_ext_stats_supported(rdev->dev_attr.dev_cap_flags) &&
!rdev->is_virtfn) {
rc = bnxt_re_get_ext_stat(rdev, stats);
@@ -303,7 +366,8 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
goto done;
}
}
- bnxt_re_copy_err_stats(rdev, stats, err_s);
+ if (rdev->pacing.dbr_pacing)
+ bnxt_re_copy_db_pacing_stats(rdev, stats);
}
done:
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.h b/drivers/infiniband/hw/bnxt_re/hw_counters.h
index 7943b2c393e4..e541b6f8ca9f 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.h
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.h
@@ -44,15 +44,29 @@ enum bnxt_re_hw_stats {
BNXT_RE_ACTIVE_PD,
BNXT_RE_ACTIVE_AH,
BNXT_RE_ACTIVE_QP,
+ BNXT_RE_ACTIVE_RC_QP,
+ BNXT_RE_ACTIVE_UD_QP,
BNXT_RE_ACTIVE_SRQ,
BNXT_RE_ACTIVE_CQ,
BNXT_RE_ACTIVE_MR,
BNXT_RE_ACTIVE_MW,
+ BNXT_RE_WATERMARK_PD,
+ BNXT_RE_WATERMARK_AH,
+ BNXT_RE_WATERMARK_QP,
+ BNXT_RE_WATERMARK_RC_QP,
+ BNXT_RE_WATERMARK_UD_QP,
+ BNXT_RE_WATERMARK_SRQ,
+ BNXT_RE_WATERMARK_CQ,
+ BNXT_RE_WATERMARK_MR,
+ BNXT_RE_WATERMARK_MW,
+ BNXT_RE_RESIZE_CQ_CNT,
BNXT_RE_RX_PKTS,
BNXT_RE_RX_BYTES,
BNXT_RE_TX_PKTS,
BNXT_RE_TX_BYTES,
BNXT_RE_RECOVERABLE_ERRORS,
+ BNXT_RE_TX_ERRORS,
+ BNXT_RE_TX_DISCARDS,
BNXT_RE_RX_ERRORS,
BNXT_RE_RX_DISCARDS,
BNXT_RE_TO_RETRANSMITS,
@@ -100,19 +114,58 @@ enum bnxt_re_hw_stats {
BNXT_RE_TX_READ_RES,
BNXT_RE_TX_WRITE_REQ,
BNXT_RE_TX_SEND_REQ,
+ BNXT_RE_TX_ROCE_PKTS,
+ BNXT_RE_TX_ROCE_BYTES,
BNXT_RE_RX_ATOMIC_REQ,
BNXT_RE_RX_READ_REQ,
BNXT_RE_RX_READ_RESP,
BNXT_RE_RX_WRITE_REQ,
BNXT_RE_RX_SEND_REQ,
+ BNXT_RE_RX_ROCE_PKTS,
+ BNXT_RE_RX_ROCE_BYTES,
BNXT_RE_RX_ROCE_GOOD_PKTS,
BNXT_RE_RX_ROCE_GOOD_BYTES,
BNXT_RE_OOB,
+ BNXT_RE_TX_CNP,
+ BNXT_RE_RX_CNP,
+ BNXT_RE_RX_ECN,
+ BNXT_RE_PACING_RESCHED,
+ BNXT_RE_PACING_CMPL,
+ BNXT_RE_PACING_ALERT,
+ BNXT_RE_DB_FIFO_REG,
BNXT_RE_NUM_EXT_COUNTERS
};
#define BNXT_RE_NUM_STD_COUNTERS (BNXT_RE_OUT_OF_SEQ_ERR + 1)
+struct bnxt_re_db_pacing_stats {
+ u64 resched;
+ u64 complete;
+ u64 alerts;
+};
+
+struct bnxt_re_res_cntrs {
+ atomic_t qp_count;
+ atomic_t rc_qp_count;
+ atomic_t ud_qp_count;
+ atomic_t cq_count;
+ atomic_t srq_count;
+ atomic_t mr_count;
+ atomic_t mw_count;
+ atomic_t ah_count;
+ atomic_t pd_count;
+ atomic_t resize_count;
+ u64 qp_watermark;
+ u64 rc_qp_watermark;
+ u64 ud_qp_watermark;
+ u64 cq_watermark;
+ u64 srq_watermark;
+ u64 mr_watermark;
+ u64 mw_watermark;
+ u64 ah_watermark;
+ u64 pd_watermark;
+};
+
struct bnxt_re_rstat {
struct bnxt_qplib_roce_stats errs;
struct bnxt_qplib_ext_stat ext_stat;
@@ -120,6 +173,8 @@ struct bnxt_re_rstat {
struct bnxt_re_stats {
struct bnxt_re_rstat rstat;
+ struct bnxt_re_res_cntrs res;
+ struct bnxt_re_db_pacing_stats pacing;
};
struct rdma_hw_stats *bnxt_re_ib_alloc_hw_port_stats(struct ib_device *ibdev,
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 03cc45a5458d..0848c2c2ffcf 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -284,7 +284,7 @@ int bnxt_re_query_gid(struct ib_device *ibdev, u32 port_num,
int index, union ib_gid *gid)
{
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
- int rc = 0;
+ int rc;
/* Ignore port_num */
memset(gid, 0, sizeof(*gid));
@@ -565,6 +565,8 @@ bnxt_re_mmap_entry_insert(struct bnxt_re_ucontext *uctx, u64 mem_offset,
break;
case BNXT_RE_MMAP_UC_DB:
case BNXT_RE_MMAP_WC_DB:
+ case BNXT_RE_MMAP_DBR_BAR:
+ case BNXT_RE_MMAP_DBR_PAGE:
ret = rdma_user_mmap_entry_insert(&uctx->ib_uctx,
&entry->rdma_entry, PAGE_SIZE);
break;
@@ -600,7 +602,7 @@ int bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata)
if (!bnxt_qplib_dealloc_pd(&rdev->qplib_res,
&rdev->qplib_res.pd_tbl,
&pd->qplib_pd))
- atomic_dec(&rdev->pd_count);
+ atomic_dec(&rdev->stats.res.pd_count);
}
return 0;
}
@@ -613,10 +615,11 @@ int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
udata, struct bnxt_re_ucontext, ib_uctx);
struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd);
struct bnxt_re_user_mmap_entry *entry = NULL;
+ u32 active_pds;
int rc = 0;
pd->rdev = rdev;
- if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
+ if (bnxt_qplib_alloc_pd(&rdev->qplib_res, &pd->qplib_pd)) {
ibdev_err(&rdev->ibdev, "Failed to allocate HW PD");
rc = -ENOMEM;
goto fail;
@@ -663,7 +666,9 @@ int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
if (bnxt_re_create_fence_mr(pd))
ibdev_warn(&rdev->ibdev,
"Failed to create Fence-MR\n");
- atomic_inc(&rdev->pd_count);
+ active_pds = atomic_inc_return(&rdev->stats.res.pd_count);
+ if (active_pds > rdev->stats.res.pd_watermark)
+ rdev->stats.res.pd_watermark = active_pds;
return 0;
dbfail:
@@ -679,7 +684,7 @@ int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags)
struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
struct bnxt_re_dev *rdev = ah->rdev;
bool block = true;
- int rc = 0;
+ int rc;
block = !(flags & RDMA_DESTROY_AH_SLEEPABLE);
rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah, block);
@@ -689,7 +694,7 @@ int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags)
else
goto fail;
}
- atomic_dec(&rdev->ah_count);
+ atomic_dec(&rdev->stats.res.ah_count);
fail:
return rc;
}
@@ -723,6 +728,7 @@ int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_init_attr *init_attr,
const struct ib_gid_attr *sgid_attr;
struct bnxt_re_gid_ctx *ctx;
struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
+ u32 active_ahs;
u8 nw_type;
int rc;
@@ -775,7 +781,9 @@ int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_init_attr *init_attr,
wmb(); /* make sure cache is updated. */
spin_unlock_irqrestore(&uctx->sh_lock, flag);
}
- atomic_inc(&rdev->ah_count);
+ active_ahs = atomic_inc_return(&rdev->stats.res.ah_count);
+ if (active_ahs > rdev->stats.res.ah_watermark)
+ rdev->stats.res.ah_watermark = active_ahs;
return 0;
}
@@ -826,7 +834,7 @@ static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
struct bnxt_re_qp *gsi_sqp;
struct bnxt_re_ah *gsi_sah;
struct bnxt_re_dev *rdev;
- int rc = 0;
+ int rc;
rdev = qp->rdev;
gsi_sqp = rdev->gsi_ctx.gsi_sqp;
@@ -836,7 +844,7 @@ static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
bnxt_qplib_destroy_ah(&rdev->qplib_res,
&gsi_sah->qplib_ah,
true);
- atomic_dec(&rdev->ah_count);
+ atomic_dec(&rdev->stats.res.ah_count);
bnxt_qplib_clean_qp(&qp->qplib_qp);
ibdev_dbg(&rdev->ibdev, "Destroy the shadow QP\n");
@@ -851,7 +859,7 @@ static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
mutex_lock(&rdev->qp_lock);
list_del(&gsi_sqp->list);
mutex_unlock(&rdev->qp_lock);
- atomic_dec(&rdev->qp_count);
+ atomic_dec(&rdev->stats.res.qp_count);
kfree(rdev->gsi_ctx.sqp_tbl);
kfree(gsi_sah);
@@ -901,7 +909,7 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
mutex_lock(&rdev->qp_lock);
list_del(&qp->list);
mutex_unlock(&rdev->qp_lock);
- atomic_dec(&rdev->qp_count);
+ atomic_dec(&rdev->stats.res.qp_count);
ib_umem_release(qp->rumem);
ib_umem_release(qp->sumem);
@@ -1095,7 +1103,7 @@ static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
"Failed to allocate HW AH for Shadow QP");
goto fail;
}
- atomic_inc(&rdev->ah_count);
+ atomic_inc(&rdev->stats.res.ah_count);
return ah;
@@ -1163,7 +1171,7 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp
INIT_LIST_HEAD(&qp->list);
mutex_lock(&rdev->qp_lock);
list_add_tail(&qp->list, &rdev->qp_list);
- atomic_inc(&rdev->qp_count);
+ atomic_inc(&rdev->stats.res.qp_count);
mutex_unlock(&rdev->qp_lock);
return qp;
fail:
@@ -1340,8 +1348,7 @@ static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
qplqp->pd = &pd->qplib_pd;
qplqp->qp_handle = (u64)qplqp;
qplqp->max_inline_data = init_attr->cap.max_inline_data;
- qplqp->sig_type = ((init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ?
- true : false);
+ qplqp->sig_type = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
qptype = bnxt_re_init_qp_type(rdev, init_attr);
if (qptype < 0) {
rc = qptype;
@@ -1446,7 +1453,7 @@ static int bnxt_re_create_gsi_qp(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
{
struct bnxt_re_dev *rdev;
struct bnxt_qplib_qp *qplqp;
- int rc = 0;
+ int rc;
rdev = qp->rdev;
qplqp = &qp->qplib_qp;
@@ -1497,6 +1504,7 @@ int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
struct bnxt_re_dev *rdev = pd->rdev;
struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
+ u32 active_qps;
int rc;
rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr);
@@ -1545,7 +1553,18 @@ int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
mutex_lock(&rdev->qp_lock);
list_add_tail(&qp->list, &rdev->qp_list);
mutex_unlock(&rdev->qp_lock);
- atomic_inc(&rdev->qp_count);
+ active_qps = atomic_inc_return(&rdev->stats.res.qp_count);
+ if (active_qps > rdev->stats.res.qp_watermark)
+ rdev->stats.res.qp_watermark = active_qps;
+ if (qp_init_attr->qp_type == IB_QPT_RC) {
+ active_qps = atomic_inc_return(&rdev->stats.res.rc_qp_count);
+ if (active_qps > rdev->stats.res.rc_qp_watermark)
+ rdev->stats.res.rc_qp_watermark = active_qps;
+ } else if (qp_init_attr->qp_type == IB_QPT_UD) {
+ active_qps = atomic_inc_return(&rdev->stats.res.ud_qp_count);
+ if (active_qps > rdev->stats.res.ud_qp_watermark)
+ rdev->stats.res.ud_qp_watermark = active_qps;
+ }
return 0;
qp_destroy:
@@ -1648,7 +1667,7 @@ int bnxt_re_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata)
nq = qplib_srq->cq->nq;
bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
ib_umem_release(srq->umem);
- atomic_dec(&rdev->srq_count);
+ atomic_dec(&rdev->stats.res.srq_count);
if (nq)
nq->budget--;
return 0;
@@ -1696,6 +1715,7 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
struct bnxt_re_srq *srq;
struct bnxt_re_pd *pd;
struct ib_pd *ib_pd;
+ u32 active_srqs;
int rc, entries;
ib_pd = ib_srq->pd;
@@ -1760,7 +1780,9 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
}
if (nq)
nq->budget++;
- atomic_inc(&rdev->srq_count);
+ active_srqs = atomic_inc_return(&rdev->stats.res.srq_count);
+ if (active_srqs > rdev->stats.res.srq_watermark)
+ rdev->stats.res.srq_watermark = active_srqs;
spin_lock_init(&srq->lock);
return 0;
@@ -1862,7 +1884,7 @@ static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
int qp_attr_mask)
{
struct bnxt_re_qp *qp = rdev->gsi_ctx.gsi_sqp;
- int rc = 0;
+ int rc;
if (qp_attr_mask & IB_QP_STATE) {
qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
@@ -2212,7 +2234,7 @@ static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
u8 ip_version = 0;
u16 vlan_id = 0xFFFF;
void *buf;
- int i, rc = 0;
+ int i, rc;
memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
@@ -2250,7 +2272,7 @@ static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
}
is_eth = true;
- is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false;
+ is_vlan = vlan_id && (vlan_id < 0x1000);
ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
ip_version, is_udp, 0, &qp->qp1_hdr);
@@ -2787,7 +2809,6 @@ static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
struct bnxt_qplib_swqe wqe;
int rc = 0;
- memset(&wqe, 0, sizeof(wqe));
while (wr) {
/* House keeping */
memset(&wqe, 0, sizeof(wqe));
@@ -2886,7 +2907,7 @@ int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
ib_umem_release(cq->umem);
- atomic_dec(&rdev->cq_count);
+ atomic_dec(&rdev->stats.res.cq_count);
nq->budget--;
kfree(cq->cql);
return 0;
@@ -2902,6 +2923,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
int cqe = attr->cqe;
struct bnxt_qplib_nq *nq = NULL;
unsigned int nq_alloc_cnt;
+ u32 active_cqs;
if (attr->flags)
return -EOPNOTSUPP;
@@ -2970,7 +2992,9 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
cq->cq_period = cq->qplib_cq.period;
nq->budget++;
- atomic_inc(&rdev->cq_count);
+ active_cqs = atomic_inc_return(&rdev->stats.res.cq_count);
+ if (active_cqs > rdev->stats.res.cq_watermark)
+ rdev->stats.res.cq_watermark = active_cqs;
spin_lock_init(&cq->cq_lock);
if (udata) {
@@ -3083,6 +3107,7 @@ int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
}
cq->ib_cq.cqe = cq->resize_cqe;
+ atomic_inc(&rdev->stats.res.resize_count);
return 0;
@@ -3319,26 +3344,21 @@ static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *gsi_qp,
struct bnxt_re_dev *rdev = gsi_qp->rdev;
struct bnxt_re_sqp_entries *sqp_entry = NULL;
struct bnxt_re_qp *gsi_sqp = rdev->gsi_ctx.gsi_sqp;
+ dma_addr_t shrq_hdr_buf_map;
+ struct ib_sge s_sge[2] = {};
+ struct ib_sge r_sge[2] = {};
struct bnxt_re_ah *gsi_sah;
+ struct ib_recv_wr rwr = {};
+ dma_addr_t rq_hdr_buf_map;
+ struct ib_ud_wr udwr = {};
struct ib_send_wr *swr;
- struct ib_ud_wr udwr;
- struct ib_recv_wr rwr;
+ u32 skip_bytes = 0;
int pkt_type = 0;
- u32 tbl_idx;
void *rq_hdr_buf;
- dma_addr_t rq_hdr_buf_map;
- dma_addr_t shrq_hdr_buf_map;
u32 offset = 0;
- u32 skip_bytes = 0;
- struct ib_sge s_sge[2];
- struct ib_sge r_sge[2];
+ u32 tbl_idx;
int rc;
- memset(&udwr, 0, sizeof(udwr));
- memset(&rwr, 0, sizeof(rwr));
- memset(&s_sge, 0, sizeof(s_sge));
- memset(&r_sge, 0, sizeof(r_sge));
-
swr = &udwr.wr;
tbl_idx = cqe->wr_id;
@@ -3578,7 +3598,7 @@ static int send_phantom_wqe(struct bnxt_re_qp *qp)
{
struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
unsigned long flags;
- int rc = 0;
+ int rc;
spin_lock_irqsave(&qp->sq_lock, flags);
@@ -3768,6 +3788,7 @@ struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
struct bnxt_re_dev *rdev = pd->rdev;
struct bnxt_re_mr *mr;
+ u32 active_mrs;
int rc;
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
@@ -3795,7 +3816,9 @@ struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
IB_ACCESS_REMOTE_ATOMIC))
mr->ib_mr.rkey = mr->ib_mr.lkey;
- atomic_inc(&rdev->mr_count);
+ active_mrs = atomic_inc_return(&rdev->stats.res.mr_count);
+ if (active_mrs > rdev->stats.res.mr_watermark)
+ rdev->stats.res.mr_watermark = active_mrs;
return &mr->ib_mr;
@@ -3828,7 +3851,7 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
ib_umem_release(mr->ib_umem);
kfree(mr);
- atomic_dec(&rdev->mr_count);
+ atomic_dec(&rdev->stats.res.mr_count);
return rc;
}
@@ -3858,6 +3881,7 @@ struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
struct bnxt_re_dev *rdev = pd->rdev;
struct bnxt_re_mr *mr = NULL;
+ u32 active_mrs;
int rc;
if (type != IB_MR_TYPE_MEM_REG) {
@@ -3896,7 +3920,9 @@ struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
goto fail_mr;
}
- atomic_inc(&rdev->mr_count);
+ active_mrs = atomic_inc_return(&rdev->stats.res.mr_count);
+ if (active_mrs > rdev->stats.res.mr_watermark)
+ rdev->stats.res.mr_watermark = active_mrs;
return &mr->ib_mr;
fail_mr:
@@ -3914,6 +3940,7 @@ struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
struct bnxt_re_dev *rdev = pd->rdev;
struct bnxt_re_mw *mw;
+ u32 active_mws;
int rc;
mw = kzalloc(sizeof(*mw), GFP_KERNEL);
@@ -3932,7 +3959,9 @@ struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
}
mw->ib_mw.rkey = mw->qplib_mw.rkey;
- atomic_inc(&rdev->mw_count);
+ active_mws = atomic_inc_return(&rdev->stats.res.mw_count);
+ if (active_mws > rdev->stats.res.mw_watermark)
+ rdev->stats.res.mw_watermark = active_mws;
return &mw->ib_mw;
fail:
@@ -3953,21 +3982,19 @@ int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
}
kfree(mw);
- atomic_dec(&rdev->mw_count);
+ atomic_dec(&rdev->stats.res.mw_count);
return rc;
}
-/* uverbs */
-struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
- u64 virt_addr, int mr_access_flags,
- struct ib_udata *udata)
+static struct ib_mr *__bnxt_re_user_reg_mr(struct ib_pd *ib_pd, u64 length, u64 virt_addr,
+ int mr_access_flags, struct ib_umem *umem)
{
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
struct bnxt_re_dev *rdev = pd->rdev;
- struct bnxt_re_mr *mr;
- struct ib_umem *umem;
unsigned long page_size;
+ struct bnxt_re_mr *mr;
int umem_pgs, rc;
+ u32 active_mrs;
if (length > BNXT_RE_MAX_MR_SIZE) {
ibdev_err(&rdev->ibdev, "MR Size: %lld > Max supported:%lld\n",
@@ -3975,6 +4002,12 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
return ERR_PTR(-ENOMEM);
}
+ page_size = ib_umem_find_best_pgsz(umem, BNXT_RE_PAGE_SIZE_SUPPORTED, virt_addr);
+ if (!page_size) {
+ ibdev_err(&rdev->ibdev, "umem page size unsupported!");
+ return ERR_PTR(-EINVAL);
+ }
+
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(-ENOMEM);
@@ -3986,45 +4019,33 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
if (rc) {
- ibdev_err(&rdev->ibdev, "Failed to allocate MR");
+ ibdev_err(&rdev->ibdev, "Failed to allocate MR rc = %d", rc);
+ rc = -EIO;
goto free_mr;
}
/* The fixed portion of the rkey is the same as the lkey */
mr->ib_mr.rkey = mr->qplib_mr.rkey;
-
- umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags);
- if (IS_ERR(umem)) {
- ibdev_err(&rdev->ibdev, "Failed to get umem");
- rc = -EFAULT;
- goto free_mrw;
- }
mr->ib_umem = umem;
-
mr->qplib_mr.va = virt_addr;
- page_size = ib_umem_find_best_pgsz(
- umem, BNXT_RE_PAGE_SIZE_SUPPORTED, virt_addr);
- if (!page_size) {
- ibdev_err(&rdev->ibdev, "umem page size unsupported!");
- rc = -EFAULT;
- goto free_umem;
- }
mr->qplib_mr.total_size = length;
umem_pgs = ib_umem_num_dma_blocks(umem, page_size);
rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, umem,
umem_pgs, page_size);
if (rc) {
- ibdev_err(&rdev->ibdev, "Failed to register user MR");
- goto free_umem;
+ ibdev_err(&rdev->ibdev, "Failed to register user MR - rc = %d\n", rc);
+ rc = -EIO;
+ goto free_mrw;
}
mr->ib_mr.lkey = mr->qplib_mr.lkey;
mr->ib_mr.rkey = mr->qplib_mr.lkey;
- atomic_inc(&rdev->mr_count);
+ active_mrs = atomic_inc_return(&rdev->stats.res.mr_count);
+ if (active_mrs > rdev->stats.res.mr_watermark)
+ rdev->stats.res.mr_watermark = active_mrs;
return &mr->ib_mr;
-free_umem:
- ib_umem_release(umem);
+
free_mrw:
bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
free_mr:
@@ -4032,6 +4053,48 @@ free_mr:
return ERR_PTR(rc);
}
+struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
+ u64 virt_addr, int mr_access_flags,
+ struct ib_udata *udata)
+{
+ struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
+ struct bnxt_re_dev *rdev = pd->rdev;
+ struct ib_umem *umem;
+ struct ib_mr *ib_mr;
+
+ umem = ib_umem_get(&rdev->ibdev, start, length, mr_access_flags);
+ if (IS_ERR(umem))
+ return ERR_CAST(umem);
+
+ ib_mr = __bnxt_re_user_reg_mr(ib_pd, length, virt_addr, mr_access_flags, umem);
+ if (IS_ERR(ib_mr))
+ ib_umem_release(umem);
+ return ib_mr;
+}
+
+struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start,
+ u64 length, u64 virt_addr, int fd,
+ int mr_access_flags, struct ib_udata *udata)
+{
+ struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
+ struct bnxt_re_dev *rdev = pd->rdev;
+ struct ib_umem_dmabuf *umem_dmabuf;
+ struct ib_umem *umem;
+ struct ib_mr *ib_mr;
+
+ umem_dmabuf = ib_umem_dmabuf_get_pinned(&rdev->ibdev, start, length,
+ fd, mr_access_flags);
+ if (IS_ERR(umem_dmabuf))
+ return ERR_CAST(umem_dmabuf);
+
+ umem = &umem_dmabuf->umem;
+
+ ib_mr = __bnxt_re_user_reg_mr(ib_pd, length, virt_addr, mr_access_flags, umem);
+ if (IS_ERR(ib_mr))
+ ib_umem_release(umem);
+ return ib_mr;
+}
+
int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
{
struct ib_device *ibdev = ctx->device;
@@ -4087,6 +4150,8 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
goto cfail;
}
uctx->shpage_mmap = &entry->rdma_entry;
+ if (rdev->pacing.dbr_pacing)
+ resp.comp_mask |= BNXT_RE_UCNTX_CMASK_DBR_PACING_ENABLED;
rc = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
if (rc) {
@@ -4159,6 +4224,19 @@ int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
case BNXT_RE_MMAP_SH_PAGE:
ret = vm_insert_page(vma, vma->vm_start, virt_to_page(uctx->shpg));
break;
+ case BNXT_RE_MMAP_DBR_BAR:
+ pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
+ ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
+ pgprot_noncached(vma->vm_page_prot),
+ rdma_entry);
+ break;
+ case BNXT_RE_MMAP_DBR_PAGE:
+ /* Driver doesn't expect write access for user space */
+ if (vma->vm_flags & VM_WRITE)
+ return -EFAULT;
+ ret = vm_insert_page(vma, vma->vm_start,
+ virt_to_page((void *)bnxt_entry->mem_offset));
+ break;
default:
ret = -EINVAL;
break;
@@ -4178,6 +4256,15 @@ void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
kfree(bnxt_entry);
}
+static int UVERBS_HANDLER(BNXT_RE_METHOD_NOTIFY_DRV)(struct uverbs_attr_bundle *attrs)
+{
+ struct bnxt_re_ucontext *uctx;
+
+ uctx = container_of(ib_uverbs_get_ucontext(attrs), struct bnxt_re_ucontext, ib_uctx);
+ bnxt_re_pacing_alert(uctx->rdev);
+ return 0;
+}
+
static int UVERBS_HANDLER(BNXT_RE_METHOD_ALLOC_PAGE)(struct uverbs_attr_bundle *attrs)
{
struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, BNXT_RE_ALLOC_PAGE_HANDLE);
@@ -4190,7 +4277,7 @@ static int UVERBS_HANDLER(BNXT_RE_METHOD_ALLOC_PAGE)(struct uverbs_attr_bundle *
u64 mmap_offset;
u32 length;
u32 dpi;
- u64 dbr;
+ u64 addr;
int err;
uctx = container_of(ib_uverbs_get_ucontext(attrs), struct bnxt_re_ucontext, ib_uctx);
@@ -4212,19 +4299,30 @@ static int UVERBS_HANDLER(BNXT_RE_METHOD_ALLOC_PAGE)(struct uverbs_attr_bundle *
return -ENOMEM;
length = PAGE_SIZE;
dpi = uctx->wcdpi.dpi;
- dbr = (u64)uctx->wcdpi.umdbr;
+ addr = (u64)uctx->wcdpi.umdbr;
mmap_flag = BNXT_RE_MMAP_WC_DB;
} else {
return -EINVAL;
}
break;
+ case BNXT_RE_ALLOC_DBR_BAR_PAGE:
+ length = PAGE_SIZE;
+ addr = (u64)rdev->pacing.dbr_bar_addr;
+ mmap_flag = BNXT_RE_MMAP_DBR_BAR;
+ break;
+
+ case BNXT_RE_ALLOC_DBR_PAGE:
+ length = PAGE_SIZE;
+ addr = (u64)rdev->pacing.dbr_page;
+ mmap_flag = BNXT_RE_MMAP_DBR_PAGE;
+ break;
default:
return -EOPNOTSUPP;
}
- entry = bnxt_re_mmap_entry_insert(uctx, dbr, mmap_flag, &mmap_offset);
+ entry = bnxt_re_mmap_entry_insert(uctx, addr, mmap_flag, &mmap_offset);
if (!entry)
return -ENOMEM;
@@ -4264,6 +4362,9 @@ static int alloc_page_obj_cleanup(struct ib_uobject *uobject,
uctx->wcdpi.dbr = NULL;
}
break;
+ case BNXT_RE_MMAP_DBR_BAR:
+ case BNXT_RE_MMAP_DBR_PAGE:
+ break;
default:
goto exit;
}
@@ -4301,7 +4402,13 @@ DECLARE_UVERBS_NAMED_OBJECT(BNXT_RE_OBJECT_ALLOC_PAGE,
&UVERBS_METHOD(BNXT_RE_METHOD_ALLOC_PAGE),
&UVERBS_METHOD(BNXT_RE_METHOD_DESTROY_PAGE));
+DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_NOTIFY_DRV);
+
+DECLARE_UVERBS_GLOBAL_METHODS(BNXT_RE_OBJECT_NOTIFY_DRV,
+ &UVERBS_METHOD(BNXT_RE_METHOD_NOTIFY_DRV));
+
const struct uapi_definition bnxt_re_uapi_defs[] = {
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_ALLOC_PAGE),
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_NOTIFY_DRV),
{}
};
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index 32d9e9d09791..84715b7e7a4e 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -146,6 +146,8 @@ enum bnxt_re_mmap_flag {
BNXT_RE_MMAP_SH_PAGE,
BNXT_RE_MMAP_UC_DB,
BNXT_RE_MMAP_WC_DB,
+ BNXT_RE_MMAP_DBR_PAGE,
+ BNXT_RE_MMAP_DBR_BAR,
};
struct bnxt_re_user_mmap_entry {
@@ -227,6 +229,10 @@ int bnxt_re_dealloc_mw(struct ib_mw *mw);
struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int mr_access_flags,
struct ib_udata *udata);
+struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start,
+ u64 length, u64 virt_addr,
+ int fd, int mr_access_flags,
+ struct ib_udata *udata);
int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata);
void bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 63e98e2d3596..c9066aade412 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -360,7 +360,7 @@ static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev)
{
struct bnxt_en_dev *en_dev;
- int rc = 0;
+ int rc;
en_dev = rdev->en_dev;
@@ -395,10 +395,9 @@ static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len,
struct bnxt_en_dev *en_dev = rdev->en_dev;
struct hwrm_func_qcfg_output resp = {0};
struct hwrm_func_qcfg_input req = {0};
- struct bnxt_fw_msg fw_msg;
+ struct bnxt_fw_msg fw_msg = {};
int rc;
- memset(&fw_msg, 0, sizeof(fw_msg));
bnxt_re_init_hwrm_hdr((void *)&req, HWRM_FUNC_QCFG);
req.fid = cpu_to_le16(0xffff);
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
@@ -432,9 +431,219 @@ int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev)
return rc;
cctx->modes.db_push = le32_to_cpu(resp.flags) & FUNC_QCAPS_RESP_FLAGS_WCB_PUSH_MODE;
+ cctx->modes.dbr_pacing =
+ le32_to_cpu(resp.flags_ext2) &
+ FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED;
+ return 0;
+}
+
+static int bnxt_re_hwrm_dbr_pacing_qcfg(struct bnxt_re_dev *rdev)
+{
+ struct hwrm_func_dbr_pacing_qcfg_output resp = {};
+ struct hwrm_func_dbr_pacing_qcfg_input req = {};
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ struct bnxt_qplib_chip_ctx *cctx;
+ struct bnxt_fw_msg fw_msg = {};
+ int rc;
+
+ cctx = rdev->chip_ctx;
+ bnxt_re_init_hwrm_hdr((void *)&req, HWRM_FUNC_DBR_PACING_QCFG);
+ bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
+ sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
+ rc = bnxt_send_msg(en_dev, &fw_msg);
+ if (rc)
+ return rc;
+
+ if ((le32_to_cpu(resp.dbr_stat_db_fifo_reg) &
+ FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK) ==
+ FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_GRC)
+ cctx->dbr_stat_db_fifo =
+ le32_to_cpu(resp.dbr_stat_db_fifo_reg) &
+ ~FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK;
+ return 0;
+}
+
+/* Update the pacing tunable parameters to the default values */
+static void bnxt_re_set_default_pacing_data(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_db_pacing_data *pacing_data = rdev->qplib_res.pacing_data;
+
+ pacing_data->do_pacing = rdev->pacing.dbr_def_do_pacing;
+ pacing_data->pacing_th = rdev->pacing.pacing_algo_th;
+ pacing_data->alarm_th =
+ pacing_data->pacing_th * BNXT_RE_PACING_ALARM_TH_MULTIPLE;
+}
+
+static void __wait_for_fifo_occupancy_below_th(struct bnxt_re_dev *rdev)
+{
+ u32 read_val, fifo_occup;
+
+ /* loop shouldn't run infintely as the occupancy usually goes
+ * below pacing algo threshold as soon as pacing kicks in.
+ */
+ while (1) {
+ read_val = readl(rdev->en_dev->bar0 + rdev->pacing.dbr_db_fifo_reg_off);
+ fifo_occup = BNXT_RE_MAX_FIFO_DEPTH -
+ ((read_val & BNXT_RE_DB_FIFO_ROOM_MASK) >>
+ BNXT_RE_DB_FIFO_ROOM_SHIFT);
+ /* Fifo occupancy cannot be greater the MAX FIFO depth */
+ if (fifo_occup > BNXT_RE_MAX_FIFO_DEPTH)
+ break;
+
+ if (fifo_occup < rdev->qplib_res.pacing_data->pacing_th)
+ break;
+ }
+}
+
+static void bnxt_re_db_fifo_check(struct work_struct *work)
+{
+ struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev,
+ dbq_fifo_check_work);
+ struct bnxt_qplib_db_pacing_data *pacing_data;
+ u32 pacing_save;
+
+ if (!mutex_trylock(&rdev->pacing.dbq_lock))
+ return;
+ pacing_data = rdev->qplib_res.pacing_data;
+ pacing_save = rdev->pacing.do_pacing_save;
+ __wait_for_fifo_occupancy_below_th(rdev);
+ cancel_delayed_work_sync(&rdev->dbq_pacing_work);
+ if (pacing_save > rdev->pacing.dbr_def_do_pacing) {
+ /* Double the do_pacing value during the congestion */
+ pacing_save = pacing_save << 1;
+ } else {
+ /*
+ * when a new congestion is detected increase the do_pacing
+ * by 8 times. And also increase the pacing_th by 4 times. The
+ * reason to increase pacing_th is to give more space for the
+ * queue to oscillate down without getting empty, but also more
+ * room for the queue to increase without causing another alarm.
+ */
+ pacing_save = pacing_save << 3;
+ pacing_data->pacing_th = rdev->pacing.pacing_algo_th * 4;
+ }
+
+ if (pacing_save > BNXT_RE_MAX_DBR_DO_PACING)
+ pacing_save = BNXT_RE_MAX_DBR_DO_PACING;
+
+ pacing_data->do_pacing = pacing_save;
+ rdev->pacing.do_pacing_save = pacing_data->do_pacing;
+ pacing_data->alarm_th =
+ pacing_data->pacing_th * BNXT_RE_PACING_ALARM_TH_MULTIPLE;
+ schedule_delayed_work(&rdev->dbq_pacing_work,
+ msecs_to_jiffies(rdev->pacing.dbq_pacing_time));
+ rdev->stats.pacing.alerts++;
+ mutex_unlock(&rdev->pacing.dbq_lock);
+}
+
+static void bnxt_re_pacing_timer_exp(struct work_struct *work)
+{
+ struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev,
+ dbq_pacing_work.work);
+ struct bnxt_qplib_db_pacing_data *pacing_data;
+ u32 read_val, fifo_occup;
+
+ if (!mutex_trylock(&rdev->pacing.dbq_lock))
+ return;
+
+ pacing_data = rdev->qplib_res.pacing_data;
+ read_val = readl(rdev->en_dev->bar0 + rdev->pacing.dbr_db_fifo_reg_off);
+ fifo_occup = BNXT_RE_MAX_FIFO_DEPTH -
+ ((read_val & BNXT_RE_DB_FIFO_ROOM_MASK) >>
+ BNXT_RE_DB_FIFO_ROOM_SHIFT);
+
+ if (fifo_occup > pacing_data->pacing_th)
+ goto restart_timer;
+
+ /*
+ * Instead of immediately going back to the default do_pacing
+ * reduce it by 1/8 times and restart the timer.
+ */
+ pacing_data->do_pacing = pacing_data->do_pacing - (pacing_data->do_pacing >> 3);
+ pacing_data->do_pacing = max_t(u32, rdev->pacing.dbr_def_do_pacing, pacing_data->do_pacing);
+ if (pacing_data->do_pacing <= rdev->pacing.dbr_def_do_pacing) {
+ bnxt_re_set_default_pacing_data(rdev);
+ rdev->stats.pacing.complete++;
+ goto dbq_unlock;
+ }
+
+restart_timer:
+ schedule_delayed_work(&rdev->dbq_pacing_work,
+ msecs_to_jiffies(rdev->pacing.dbq_pacing_time));
+ rdev->stats.pacing.resched++;
+dbq_unlock:
+ rdev->pacing.do_pacing_save = pacing_data->do_pacing;
+ mutex_unlock(&rdev->pacing.dbq_lock);
+}
+
+void bnxt_re_pacing_alert(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_db_pacing_data *pacing_data;
+
+ if (!rdev->pacing.dbr_pacing)
+ return;
+ mutex_lock(&rdev->pacing.dbq_lock);
+ pacing_data = rdev->qplib_res.pacing_data;
+
+ /*
+ * Increase the alarm_th to max so that other user lib instances do not
+ * keep alerting the driver.
+ */
+ pacing_data->alarm_th = BNXT_RE_MAX_FIFO_DEPTH;
+ pacing_data->do_pacing = BNXT_RE_MAX_DBR_DO_PACING;
+ cancel_work_sync(&rdev->dbq_fifo_check_work);
+ schedule_work(&rdev->dbq_fifo_check_work);
+ mutex_unlock(&rdev->pacing.dbq_lock);
+}
+
+static int bnxt_re_initialize_dbr_pacing(struct bnxt_re_dev *rdev)
+{
+ if (bnxt_re_hwrm_dbr_pacing_qcfg(rdev))
+ return -EIO;
+
+ /* Allocate a page for app use */
+ rdev->pacing.dbr_page = (void *)__get_free_page(GFP_KERNEL);
+ if (!rdev->pacing.dbr_page)
+ return -ENOMEM;
+
+ memset((u8 *)rdev->pacing.dbr_page, 0, PAGE_SIZE);
+ rdev->qplib_res.pacing_data = (struct bnxt_qplib_db_pacing_data *)rdev->pacing.dbr_page;
+
+ /* MAP HW window 2 for reading db fifo depth */
+ writel(rdev->chip_ctx->dbr_stat_db_fifo & BNXT_GRC_BASE_MASK,
+ rdev->en_dev->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
+ rdev->pacing.dbr_db_fifo_reg_off =
+ (rdev->chip_ctx->dbr_stat_db_fifo & BNXT_GRC_OFFSET_MASK) +
+ BNXT_RE_GRC_FIFO_REG_BASE;
+ rdev->pacing.dbr_bar_addr =
+ pci_resource_start(rdev->qplib_res.pdev, 0) + rdev->pacing.dbr_db_fifo_reg_off;
+
+ rdev->pacing.pacing_algo_th = BNXT_RE_PACING_ALGO_THRESHOLD;
+ rdev->pacing.dbq_pacing_time = BNXT_RE_DBR_PACING_TIME;
+ rdev->pacing.dbr_def_do_pacing = BNXT_RE_DBR_DO_PACING_NO_CONGESTION;
+ rdev->pacing.do_pacing_save = rdev->pacing.dbr_def_do_pacing;
+ rdev->qplib_res.pacing_data->fifo_max_depth = BNXT_RE_MAX_FIFO_DEPTH;
+ rdev->qplib_res.pacing_data->fifo_room_mask = BNXT_RE_DB_FIFO_ROOM_MASK;
+ rdev->qplib_res.pacing_data->fifo_room_shift = BNXT_RE_DB_FIFO_ROOM_SHIFT;
+ rdev->qplib_res.pacing_data->grc_reg_offset = rdev->pacing.dbr_db_fifo_reg_off;
+ bnxt_re_set_default_pacing_data(rdev);
+ /* Initialize worker for DBR Pacing */
+ INIT_WORK(&rdev->dbq_fifo_check_work, bnxt_re_db_fifo_check);
+ INIT_DELAYED_WORK(&rdev->dbq_pacing_work, bnxt_re_pacing_timer_exp);
return 0;
}
+static void bnxt_re_deinitialize_dbr_pacing(struct bnxt_re_dev *rdev)
+{
+ cancel_work_sync(&rdev->dbq_fifo_check_work);
+ cancel_delayed_work_sync(&rdev->dbq_pacing_work);
+ if (rdev->pacing.dbr_page)
+ free_page((u64)rdev->pacing.dbr_page);
+
+ rdev->pacing.dbr_page = NULL;
+ rdev->pacing.dbr_pacing = false;
+}
+
static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev,
u16 fw_ring_id, int type)
{
@@ -652,6 +861,7 @@ static const struct ib_device_ops bnxt_re_dev_ops = {
.query_qp = bnxt_re_query_qp,
.query_srq = bnxt_re_query_srq,
.reg_user_mr = bnxt_re_reg_user_mr,
+ .reg_user_mr_dmabuf = bnxt_re_reg_user_mr_dmabuf,
.req_notify_cq = bnxt_re_req_notify_cq,
.resize_cq = bnxt_re_resize_cq,
INIT_RDMA_OBJ_SIZE(ib_ah, bnxt_re_ah, ib_ah),
@@ -711,13 +921,14 @@ static struct bnxt_re_dev *bnxt_re_dev_add(struct bnxt_aux_priv *aux_priv,
rdev->id = rdev->en_dev->pdev->devfn;
INIT_LIST_HEAD(&rdev->qp_list);
mutex_init(&rdev->qp_lock);
- atomic_set(&rdev->qp_count, 0);
- atomic_set(&rdev->cq_count, 0);
- atomic_set(&rdev->srq_count, 0);
- atomic_set(&rdev->mr_count, 0);
- atomic_set(&rdev->mw_count, 0);
- atomic_set(&rdev->ah_count, 0);
- atomic_set(&rdev->pd_count, 0);
+ mutex_init(&rdev->pacing.dbq_lock);
+ atomic_set(&rdev->stats.res.qp_count, 0);
+ atomic_set(&rdev->stats.res.cq_count, 0);
+ atomic_set(&rdev->stats.res.srq_count, 0);
+ atomic_set(&rdev->stats.res.mr_count, 0);
+ atomic_set(&rdev->stats.res.mw_count, 0);
+ atomic_set(&rdev->stats.res.ah_count, 0);
+ atomic_set(&rdev->stats.res.pd_count, 0);
rdev->cosq[0] = 0xFFFF;
rdev->cosq[1] = 0xFFFF;
@@ -759,7 +970,7 @@ static int bnxt_re_handle_unaffi_async_event(struct creq_func_event
static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event,
struct bnxt_re_qp *qp)
{
- struct ib_event event;
+ struct ib_event event = {};
unsigned int flags;
if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR &&
@@ -769,7 +980,6 @@ static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event,
bnxt_re_unlock_cqs(qp, flags);
}
- memset(&event, 0, sizeof(event));
if (qp->qplib_qp.srq) {
event.device = &qp->rdev->ibdev;
event.element.qp = &qp->ib_qp;
@@ -937,13 +1147,12 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
{
struct bnxt_re_ring_attr rattr = {};
int num_vec_created = 0;
- int rc = 0, i;
+ int rc, i;
u8 type;
/* Configure and allocate resources for qplib */
rdev->qplib_res.rcfw = &rdev->rcfw;
- rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr,
- rdev->is_virtfn);
+ rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr);
if (rc)
goto fail;
@@ -1090,11 +1299,10 @@ static u32 bnxt_re_get_priority_mask(struct bnxt_re_dev *rdev)
{
u32 prio_map = 0, tmp_map = 0;
struct net_device *netdev;
- struct dcb_app app;
+ struct dcb_app app = {};
netdev = rdev->netdev;
- memset(&app, 0, sizeof(app));
app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE;
app.protocol = ETH_P_IBOE;
tmp_map = dcb_ieee_getapp_mask(netdev, &app);
@@ -1123,8 +1331,7 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
*/
if ((prio_map == 0 && rdev->qplib_res.prio) ||
(prio_map != 0 && !rdev->qplib_res.prio)) {
- rdev->qplib_res.prio = prio_map ? true : false;
-
+ rdev->qplib_res.prio = prio_map;
bnxt_re_update_gid(rdev);
}
@@ -1138,7 +1345,7 @@ static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
struct hwrm_ver_get_input req = {};
struct bnxt_qplib_chip_ctx *cctx;
struct bnxt_fw_msg fw_msg = {};
- int rc = 0;
+ int rc;
bnxt_re_init_hwrm_hdr((void *)&req, HWRM_VER_GET);
req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
@@ -1168,7 +1375,7 @@ static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
static int bnxt_re_ib_init(struct bnxt_re_dev *rdev)
{
- int rc = 0;
+ int rc;
u32 event;
/* Register ib dev */
@@ -1214,8 +1421,11 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev)
bnxt_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, type);
bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
}
- if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags))
- rdev->num_msix = 0;
+
+ rdev->num_msix = 0;
+
+ if (rdev->pacing.dbr_pacing)
+ bnxt_re_deinitialize_dbr_pacing(rdev);
bnxt_re_destroy_chip_ctx(rdev);
if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags))
@@ -1234,15 +1444,14 @@ static void bnxt_re_worker(struct work_struct *work)
static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
{
+ struct bnxt_re_ring_attr rattr = {};
struct bnxt_qplib_creq_ctx *creq;
- struct bnxt_re_ring_attr rattr;
u32 db_offt;
int vid;
u8 type;
int rc;
/* Registered a new RoCE device instance to netdev */
- memset(&rattr, 0, sizeof(rattr));
rc = bnxt_re_register_netdev(rdev);
if (rc) {
ibdev_err(&rdev->ibdev,
@@ -1271,7 +1480,6 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
ibdev_dbg(&rdev->ibdev, "Got %d MSI-X vectors\n",
rdev->en_dev->ulp_tbl->msix_requested);
rdev->num_msix = rdev->en_dev->ulp_tbl->msix_requested;
- set_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags);
bnxt_re_query_hwrm_intf_version(rdev);
@@ -1311,8 +1519,17 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
goto free_ring;
}
- rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr,
- rdev->is_virtfn);
+ if (bnxt_qplib_dbr_pacing_en(rdev->chip_ctx)) {
+ rc = bnxt_re_initialize_dbr_pacing(rdev);
+ if (!rc) {
+ rdev->pacing.dbr_pacing = true;
+ } else {
+ ibdev_err(&rdev->ibdev,
+ "DBR pacing disabled with error : %d\n", rc);
+ rdev->pacing.dbr_pacing = false;
+ }
+ }
+ rc = bnxt_qplib_get_dev_attr(&rdev->rcfw, &rdev->dev_attr);
if (rc)
goto disable_rcfw;
@@ -1400,7 +1617,7 @@ static int bnxt_re_add_device(struct auxiliary_device *adev, u8 wqe_mode)
container_of(adev, struct bnxt_aux_priv, aux_dev);
struct bnxt_en_dev *en_dev;
struct bnxt_re_dev *rdev;
- int rc = 0;
+ int rc;
/* en_dev should never be NULL as long as adev and aux_dev are valid. */
en_dev = aux_priv->edev;
@@ -1646,7 +1863,7 @@ static struct auxiliary_driver bnxt_re_driver = {
static int __init bnxt_re_mod_init(void)
{
- int rc = 0;
+ int rc;
pr_info("%s: %s", ROCE_DRV_MODULE_NAME, version);
rc = auxiliary_driver_register(&bnxt_re_driver);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index a42555623aed..abbabea7f5fa 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -535,7 +535,7 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
cqn_handler_t cqn_handler,
srqn_handler_t srqn_handler)
{
- int rc = -1;
+ int rc;
nq->pdev = pdev;
nq->cqn_handler = cqn_handler;
@@ -727,27 +727,30 @@ int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct creq_query_srq_resp resp = {};
struct bnxt_qplib_cmdqmsg msg = {};
- struct bnxt_qplib_rcfw_sbuf *sbuf;
+ struct bnxt_qplib_rcfw_sbuf sbuf;
struct creq_query_srq_resp_sb *sb;
struct cmdq_query_srq req = {};
- int rc = 0;
+ int rc;
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
CMDQ_BASE_OPCODE_QUERY_SRQ,
sizeof(req));
/* Configure the request */
- sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
- if (!sbuf)
+ sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
+ sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
+ &sbuf.dma_addr, GFP_KERNEL);
+ if (!sbuf.sb)
return -ENOMEM;
- req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
+ req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
req.srq_cid = cpu_to_le32(srq->id);
- sb = sbuf->sb;
- bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, sbuf, sizeof(req),
+ sb = sbuf.sb;
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
sizeof(resp), 0);
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
srq->threshold = le16_to_cpu(sb->srq_limit);
- bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
+ dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
+ sbuf.sb, sbuf.dma_addr);
return rc;
}
@@ -1365,24 +1368,26 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct creq_query_qp_resp resp = {};
struct bnxt_qplib_cmdqmsg msg = {};
- struct bnxt_qplib_rcfw_sbuf *sbuf;
+ struct bnxt_qplib_rcfw_sbuf sbuf;
struct creq_query_qp_resp_sb *sb;
struct cmdq_query_qp req = {};
u32 temp32[4];
- int i, rc = 0;
+ int i, rc;
+
+ sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
+ sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
+ &sbuf.dma_addr, GFP_KERNEL);
+ if (!sbuf.sb)
+ return -ENOMEM;
+ sb = sbuf.sb;
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
CMDQ_BASE_OPCODE_QUERY_QP,
sizeof(req));
- sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
- if (!sbuf)
- return -ENOMEM;
- sb = sbuf->sb;
-
req.qp_cid = cpu_to_le32(qp->id);
- req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
- bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, sbuf, sizeof(req),
+ req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
sizeof(resp), 0);
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
if (rc)
@@ -1391,8 +1396,7 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
qp->state = sb->en_sqd_async_notify_state &
CREQ_QUERY_QP_RESP_SB_STATE_MASK;
qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
- CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY ?
- true : false;
+ CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY;
qp->access = sb->access;
qp->pkey_index = le16_to_cpu(sb->pkey);
qp->qkey = le32_to_cpu(sb->qkey);
@@ -1442,7 +1446,8 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
memcpy(qp->smac, sb->src_mac, 6);
qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
bail:
- bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
+ dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
+ sbuf.sb, sbuf.dma_addr);
return rc;
}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index bc3aea4592b9..c8c4017fe405 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -55,7 +55,7 @@ static void bnxt_qplib_service_creq(struct tasklet_struct *t);
/**
* bnxt_qplib_map_rc - map return type based on opcode
- * @opcode - roce slow path opcode
+ * @opcode: roce slow path opcode
*
* case #1
* Firmware initiated error recovery is a safe state machine and
@@ -98,8 +98,8 @@ static int bnxt_qplib_map_rc(u8 opcode)
/**
* bnxt_re_is_fw_stalled - Check firmware health
- * @rcfw - rcfw channel instance of rdev
- * @cookie - cookie to track the command
+ * @rcfw: rcfw channel instance of rdev
+ * @cookie: cookie to track the command
*
* If firmware has not responded any rcfw command within
* rcfw->max_timeout, consider firmware as stalled.
@@ -133,8 +133,8 @@ static int bnxt_re_is_fw_stalled(struct bnxt_qplib_rcfw *rcfw,
/**
* __wait_for_resp - Don't hold the cpu context and wait for response
- * @rcfw - rcfw channel instance of rdev
- * @cookie - cookie to track the command
+ * @rcfw: rcfw channel instance of rdev
+ * @cookie: cookie to track the command
*
* Wait for command completion in sleepable context.
*
@@ -179,8 +179,8 @@ static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
/**
* __block_for_resp - hold the cpu context and wait for response
- * @rcfw - rcfw channel instance of rdev
- * @cookie - cookie to track the command
+ * @rcfw: rcfw channel instance of rdev
+ * @cookie: cookie to track the command
*
* This function will hold the cpu (non-sleepable context) and
* wait for command completion. Maximum holding interval is 8 second.
@@ -216,8 +216,8 @@ static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
};
/* __send_message_no_waiter - get cookie and post the message.
- * @rcfw - rcfw channel instance of rdev
- * @msg - qplib message internal
+ * @rcfw: rcfw channel instance of rdev
+ * @msg: qplib message internal
*
* This function will just post and don't bother about completion.
* Current design of this function is -
@@ -335,7 +335,8 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw,
cpu_to_le64(sbuf->dma_addr));
__set_cmdq_base_resp_size(msg->req, msg->req_sz,
ALIGN(sbuf->size,
- BNXT_QPLIB_CMDQE_UNITS));
+ BNXT_QPLIB_CMDQE_UNITS) /
+ BNXT_QPLIB_CMDQE_UNITS);
}
preq = (u8 *)msg->req;
@@ -373,8 +374,8 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw,
/**
* __poll_for_resp - self poll completion for rcfw command
- * @rcfw - rcfw channel instance of rdev
- * @cookie - cookie to track the command
+ * @rcfw: rcfw channel instance of rdev
+ * @cookie: cookie to track the command
*
* It works same as __wait_for_resp except this function will
* do self polling in sort interval since interrupt is disabled.
@@ -470,8 +471,8 @@ static void __destroy_timedout_ah(struct bnxt_qplib_rcfw *rcfw,
/**
* __bnxt_qplib_rcfw_send_message - qplib interface to send
* and complete rcfw command.
- * @rcfw - rcfw channel instance of rdev
- * @msg - qplib message internal
+ * @rcfw: rcfw channel instance of rdev
+ * @msg: qplib message internal
*
* This function does not account shadow queue depth. It will send
* all the command unconditionally as long as send queue is not full.
@@ -487,7 +488,7 @@ static int __bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_crsqe *crsqe;
unsigned long flags;
u16 cookie;
- int rc = 0;
+ int rc;
u8 opcode;
opcode = __get_cmdq_base_opcode(msg->req, msg->req_sz);
@@ -533,8 +534,8 @@ static int __bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
/**
* bnxt_qplib_rcfw_send_message - qplib interface to send
* and complete rcfw command.
- * @rcfw - rcfw channel instance of rdev
- * @msg - qplib message internal
+ * @rcfw: rcfw channel instance of rdev
+ * @msg: qplib message internal
*
* Driver interact with Firmware through rcfw channel/slow path in two ways.
* a. Blocking rcfw command send. In this path, driver cannot hold
@@ -1195,34 +1196,3 @@ int bnxt_qplib_enable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw,
return 0;
}
-
-struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
- struct bnxt_qplib_rcfw *rcfw,
- u32 size)
-{
- struct bnxt_qplib_rcfw_sbuf *sbuf;
-
- sbuf = kzalloc(sizeof(*sbuf), GFP_KERNEL);
- if (!sbuf)
- return NULL;
-
- sbuf->size = size;
- sbuf->sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf->size,
- &sbuf->dma_addr, GFP_KERNEL);
- if (!sbuf->sb)
- goto bail;
-
- return sbuf;
-bail:
- kfree(sbuf);
- return NULL;
-}
-
-void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw,
- struct bnxt_qplib_rcfw_sbuf *sbuf)
-{
- if (sbuf->sb)
- dma_free_coherent(&rcfw->pdev->dev, sbuf->size,
- sbuf->sb, sbuf->dma_addr);
- kfree(sbuf);
-}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 739d942761d1..157db6b7e119 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -118,11 +118,11 @@ static int __alloc_pbl(struct bnxt_qplib_res *res,
else
pages = sginfo->npages;
/* page ptr arrays */
- pbl->pg_arr = vmalloc(pages * sizeof(void *));
+ pbl->pg_arr = vmalloc_array(pages, sizeof(void *));
if (!pbl->pg_arr)
return -ENOMEM;
- pbl->pg_map_arr = vmalloc(pages * sizeof(dma_addr_t));
+ pbl->pg_map_arr = vmalloc_array(pages, sizeof(dma_addr_t));
if (!pbl->pg_map_arr) {
vfree(pbl->pg_arr);
pbl->pg_arr = NULL;
@@ -385,7 +385,7 @@ static int bnxt_qplib_alloc_tqm_rings(struct bnxt_qplib_res *res,
struct bnxt_qplib_hwq_attr hwq_attr = {};
struct bnxt_qplib_sg_info sginfo = {};
struct bnxt_qplib_tqm_ctx *tqmctx;
- int rc = 0;
+ int rc;
int i;
tqmctx = &ctx->tqm_ctx;
@@ -463,7 +463,7 @@ static void bnxt_qplib_map_tqm_pgtbl(struct bnxt_qplib_tqm_ctx *ctx)
static int bnxt_qplib_setup_tqm_rings(struct bnxt_qplib_res *res,
struct bnxt_qplib_ctx *ctx)
{
- int rc = 0;
+ int rc;
rc = bnxt_qplib_alloc_tqm_rings(res, ctx);
if (rc)
@@ -501,7 +501,7 @@ int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
{
struct bnxt_qplib_hwq_attr hwq_attr = {};
struct bnxt_qplib_sg_info sginfo = {};
- int rc = 0;
+ int rc;
if (virt_fn || is_p5)
goto stats_alloc;
@@ -642,31 +642,44 @@ static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
}
/* PDs */
-int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pdt, struct bnxt_qplib_pd *pd)
+int bnxt_qplib_alloc_pd(struct bnxt_qplib_res *res, struct bnxt_qplib_pd *pd)
{
+ struct bnxt_qplib_pd_tbl *pdt = &res->pd_tbl;
u32 bit_num;
+ int rc = 0;
+ mutex_lock(&res->pd_tbl_lock);
bit_num = find_first_bit(pdt->tbl, pdt->max);
- if (bit_num == pdt->max)
- return -ENOMEM;
+ if (bit_num == pdt->max) {
+ rc = -ENOMEM;
+ goto exit;
+ }
/* Found unused PD */
clear_bit(bit_num, pdt->tbl);
pd->id = bit_num;
- return 0;
+exit:
+ mutex_unlock(&res->pd_tbl_lock);
+ return rc;
}
int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
struct bnxt_qplib_pd_tbl *pdt,
struct bnxt_qplib_pd *pd)
{
+ int rc = 0;
+
+ mutex_lock(&res->pd_tbl_lock);
if (test_and_set_bit(pd->id, pdt->tbl)) {
dev_warn(&res->pdev->dev, "Freeing an unused PD? pdn = %d\n",
pd->id);
- return -EINVAL;
+ rc = -EINVAL;
+ goto exit;
}
pd->id = 0;
- return 0;
+exit:
+ mutex_unlock(&res->pd_tbl_lock);
+ return rc;
}
static void bnxt_qplib_free_pd_tbl(struct bnxt_qplib_pd_tbl *pdt)
@@ -691,6 +704,7 @@ static int bnxt_qplib_alloc_pd_tbl(struct bnxt_qplib_res *res,
pdt->max = max;
memset((u8 *)pdt->tbl, 0xFF, bytes);
+ mutex_init(&res->pd_tbl_lock);
return 0;
}
@@ -877,7 +891,7 @@ int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
struct net_device *netdev,
struct bnxt_qplib_dev_attr *dev_attr)
{
- int rc = 0;
+ int rc;
res->pdev = pdev;
res->netdev = netdev;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index d850a553821e..5949f004f785 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -48,6 +48,7 @@ extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero;
struct bnxt_qplib_drv_modes {
u8 wqe_mode;
bool db_push;
+ bool dbr_pacing;
};
struct bnxt_qplib_chip_ctx {
@@ -58,6 +59,17 @@ struct bnxt_qplib_chip_ctx {
u16 hwrm_cmd_max_timeout;
struct bnxt_qplib_drv_modes modes;
u64 hwrm_intf_ver;
+ u32 dbr_stat_db_fifo;
+};
+
+struct bnxt_qplib_db_pacing_data {
+ u32 do_pacing;
+ u32 pacing_th;
+ u32 alarm_th;
+ u32 fifo_max_depth;
+ u32 fifo_room_mask;
+ u32 fifo_room_shift;
+ u32 grc_reg_offset;
};
#define BNXT_QPLIB_DBR_PF_DB_OFFSET 0x10000
@@ -265,12 +277,15 @@ struct bnxt_qplib_res {
struct net_device *netdev;
struct bnxt_qplib_rcfw *rcfw;
struct bnxt_qplib_pd_tbl pd_tbl;
+ /* To protect the pd table bit map */
+ struct mutex pd_tbl_lock;
struct bnxt_qplib_sgid_tbl sgid_tbl;
struct bnxt_qplib_dpi_tbl dpi_tbl;
/* To protect the dpi table bit map */
struct mutex dpi_tbl_lock;
bool prio;
bool is_vf;
+ struct bnxt_qplib_db_pacing_data *pacing_data;
};
static inline bool bnxt_qplib_is_chip_gen_p5(struct bnxt_qplib_chip_ctx *cctx)
@@ -355,7 +370,7 @@ void bnxt_qplib_free_hwq(struct bnxt_qplib_res *res,
struct bnxt_qplib_hwq *hwq);
int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
struct bnxt_qplib_hwq_attr *hwq_attr);
-int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pd_tbl,
+int bnxt_qplib_alloc_pd(struct bnxt_qplib_res *res,
struct bnxt_qplib_pd *pd);
int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
struct bnxt_qplib_pd_tbl *pd_tbl,
@@ -467,4 +482,10 @@ static inline bool _is_ext_stats_supported(u16 dev_cap_flags)
return dev_cap_flags &
CREQ_QUERY_FUNC_RESP_SB_EXT_STATS;
}
+
+static inline u8 bnxt_qplib_dbr_pacing_en(struct bnxt_qplib_chip_ctx *cctx)
+{
+ return cctx->modes.dbr_pacing;
+}
+
#endif /* __BNXT_QPLIB_RES_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index ab45f9d4bb02..a27b68515164 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -72,7 +72,7 @@ static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw,
struct creq_query_version_resp resp = {};
struct bnxt_qplib_cmdqmsg msg = {};
struct cmdq_query_version req = {};
- int rc = 0;
+ int rc;
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
CMDQ_BASE_OPCODE_QUERY_VERSION,
@@ -89,31 +89,29 @@ static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw,
}
int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
- struct bnxt_qplib_dev_attr *attr, bool vf)
+ struct bnxt_qplib_dev_attr *attr)
{
struct creq_query_func_resp resp = {};
struct bnxt_qplib_cmdqmsg msg = {};
struct creq_query_func_resp_sb *sb;
- struct bnxt_qplib_rcfw_sbuf *sbuf;
+ struct bnxt_qplib_rcfw_sbuf sbuf;
struct cmdq_query_func req = {};
u8 *tqm_alloc;
- int i, rc = 0;
+ int i, rc;
u32 temp;
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
CMDQ_BASE_OPCODE_QUERY_FUNC,
sizeof(req));
- sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
- if (!sbuf) {
- dev_err(&rcfw->pdev->dev,
- "SP: QUERY_FUNC alloc side buffer failed\n");
+ sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
+ sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
+ &sbuf.dma_addr, GFP_KERNEL);
+ if (!sbuf.sb)
return -ENOMEM;
- }
-
- sb = sbuf->sb;
- req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
- bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, sbuf, sizeof(req),
+ sb = sbuf.sb;
+ req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
sizeof(resp), 0);
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
if (rc)
@@ -121,9 +119,8 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
/* Extract the context from the side buffer */
attr->max_qp = le32_to_cpu(sb->max_qp);
- /* max_qp value reported by FW for PF doesn't include the QP1 for PF */
- if (!vf)
- attr->max_qp += 1;
+ /* max_qp value reported by FW doesn't include the QP1 */
+ attr->max_qp += 1;
attr->max_qp_rd_atom =
sb->max_qp_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_rd_atom;
@@ -175,7 +172,8 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
attr->is_atomic = bnxt_qplib_is_atomic_cap(rcfw);
bail:
- bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
+ dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
+ sbuf.sb, sbuf.dma_addr);
return rc;
}
@@ -186,7 +184,7 @@ int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res,
struct creq_set_func_resources_resp resp = {};
struct cmdq_set_func_resources req = {};
struct bnxt_qplib_cmdqmsg msg = {};
- int rc = 0;
+ int rc;
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
CMDQ_BASE_OPCODE_SET_FUNC_RESOURCES,
@@ -718,23 +716,22 @@ int bnxt_qplib_get_roce_stats(struct bnxt_qplib_rcfw *rcfw,
struct creq_query_roce_stats_resp_sb *sb;
struct cmdq_query_roce_stats req = {};
struct bnxt_qplib_cmdqmsg msg = {};
- struct bnxt_qplib_rcfw_sbuf *sbuf;
- int rc = 0;
+ struct bnxt_qplib_rcfw_sbuf sbuf;
+ int rc;
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
CMDQ_BASE_OPCODE_QUERY_ROCE_STATS,
sizeof(req));
- sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
- if (!sbuf) {
- dev_err(&rcfw->pdev->dev,
- "SP: QUERY_ROCE_STATS alloc side buffer failed\n");
+ sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
+ sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
+ &sbuf.dma_addr, GFP_KERNEL);
+ if (!sbuf.sb)
return -ENOMEM;
- }
+ sb = sbuf.sb;
- sb = sbuf->sb;
- req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
- bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, sbuf, sizeof(req),
+ req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
sizeof(resp), 0);
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
if (rc)
@@ -790,7 +787,8 @@ int bnxt_qplib_get_roce_stats(struct bnxt_qplib_rcfw *rcfw,
}
bail:
- bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
+ dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
+ sbuf.sb, sbuf.dma_addr);
return rc;
}
@@ -801,49 +799,56 @@ int bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw *rcfw, u32 fid,
struct creq_query_roce_stats_ext_resp_sb *sb;
struct cmdq_query_roce_stats_ext req = {};
struct bnxt_qplib_cmdqmsg msg = {};
- struct bnxt_qplib_rcfw_sbuf *sbuf;
+ struct bnxt_qplib_rcfw_sbuf sbuf;
int rc;
- sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
- if (!sbuf) {
- dev_err(&rcfw->pdev->dev,
- "SP: QUERY_ROCE_STATS_EXT alloc sb failed");
+ sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
+ sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
+ &sbuf.dma_addr, GFP_KERNEL);
+ if (!sbuf.sb)
return -ENOMEM;
- }
+ sb = sbuf.sb;
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
CMDQ_QUERY_ROCE_STATS_EXT_OPCODE_QUERY_ROCE_STATS,
sizeof(req));
- req.resp_size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
- req.resp_addr = cpu_to_le64(sbuf->dma_addr);
+ req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
+ req.resp_addr = cpu_to_le64(sbuf.dma_addr);
req.function_id = cpu_to_le32(fid);
req.flags = cpu_to_le16(CMDQ_QUERY_ROCE_STATS_EXT_FLAGS_FUNCTION_ID);
- bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, sbuf, sizeof(req),
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
sizeof(resp), 0);
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
if (rc)
goto bail;
- sb = sbuf->sb;
estat->tx_atomic_req = le64_to_cpu(sb->tx_atomic_req_pkts);
estat->tx_read_req = le64_to_cpu(sb->tx_read_req_pkts);
estat->tx_read_res = le64_to_cpu(sb->tx_read_res_pkts);
estat->tx_write_req = le64_to_cpu(sb->tx_write_req_pkts);
estat->tx_send_req = le64_to_cpu(sb->tx_send_req_pkts);
+ estat->tx_roce_pkts = le64_to_cpu(sb->tx_roce_pkts);
+ estat->tx_roce_bytes = le64_to_cpu(sb->tx_roce_bytes);
estat->rx_atomic_req = le64_to_cpu(sb->rx_atomic_req_pkts);
estat->rx_read_req = le64_to_cpu(sb->rx_read_req_pkts);
estat->rx_read_res = le64_to_cpu(sb->rx_read_res_pkts);
estat->rx_write_req = le64_to_cpu(sb->rx_write_req_pkts);
estat->rx_send_req = le64_to_cpu(sb->rx_send_req_pkts);
+ estat->rx_roce_pkts = le64_to_cpu(sb->rx_roce_pkts);
+ estat->rx_roce_bytes = le64_to_cpu(sb->rx_roce_bytes);
estat->rx_roce_good_pkts = le64_to_cpu(sb->rx_roce_good_pkts);
estat->rx_roce_good_bytes = le64_to_cpu(sb->rx_roce_good_bytes);
estat->rx_out_of_buffer = le64_to_cpu(sb->rx_out_of_buffer_pkts);
estat->rx_out_of_sequence = le64_to_cpu(sb->rx_out_of_sequence_pkts);
+ estat->tx_cnp = le64_to_cpu(sb->tx_cnp_pkts);
+ estat->rx_cnp = le64_to_cpu(sb->rx_cnp_pkts);
+ estat->rx_ecn_marked = le64_to_cpu(sb->rx_ecn_marked_pkts);
bail:
- bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
+ dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
+ sbuf.sb, sbuf.dma_addr);
return rc;
}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index 264ef3cedc45..d33c78b96217 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -322,7 +322,7 @@ int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct bnxt_qplib_gid *gid, u16 gid_idx,
const u8 *smac);
int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
- struct bnxt_qplib_dev_attr *attr, bool vf);
+ struct bnxt_qplib_dev_attr *attr);
int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res,
struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_ctx *ctx);
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index ffbd9a89981e..d16d8eaa1415 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -2466,7 +2466,7 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
init_attr->cap.max_recv_sge = qhp->attr.rq_max_sges;
init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE;
- init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
+ init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
return 0;
}
diff --git a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
index 4e93ef7f84ee..9c65bd27bae0 100644
--- a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
+++ b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
@@ -66,6 +66,7 @@ enum efa_admin_get_stats_type {
EFA_ADMIN_GET_STATS_TYPE_BASIC = 0,
EFA_ADMIN_GET_STATS_TYPE_MESSAGES = 1,
EFA_ADMIN_GET_STATS_TYPE_RDMA_READ = 2,
+ EFA_ADMIN_GET_STATS_TYPE_RDMA_WRITE = 3,
};
enum efa_admin_get_stats_scope {
@@ -570,6 +571,16 @@ struct efa_admin_rdma_read_stats {
u64 read_resp_bytes;
};
+struct efa_admin_rdma_write_stats {
+ u64 write_wrs;
+
+ u64 write_bytes;
+
+ u64 write_wr_err;
+
+ u64 write_recv_bytes;
+};
+
struct efa_admin_acq_get_stats_resp {
struct efa_admin_acq_common_desc acq_common_desc;
@@ -579,6 +590,8 @@ struct efa_admin_acq_get_stats_resp {
struct efa_admin_messages_stats messages_stats;
struct efa_admin_rdma_read_stats rdma_read_stats;
+
+ struct efa_admin_rdma_write_stats rdma_write_stats;
} u;
};
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.c b/drivers/infiniband/hw/efa/efa_com_cmd.c
index 8f8885e002ba..576811885d59 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.c
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
- * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2023 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#include "efa_com.h"
@@ -794,6 +794,12 @@ int efa_com_get_stats(struct efa_com_dev *edev,
result->rdma_read_stats.read_wr_err = resp.u.rdma_read_stats.read_wr_err;
result->rdma_read_stats.read_resp_bytes = resp.u.rdma_read_stats.read_resp_bytes;
break;
+ case EFA_ADMIN_GET_STATS_TYPE_RDMA_WRITE:
+ result->rdma_write_stats.write_wrs = resp.u.rdma_write_stats.write_wrs;
+ result->rdma_write_stats.write_bytes = resp.u.rdma_write_stats.write_bytes;
+ result->rdma_write_stats.write_wr_err = resp.u.rdma_write_stats.write_wr_err;
+ result->rdma_write_stats.write_recv_bytes = resp.u.rdma_write_stats.write_recv_bytes;
+ break;
}
return 0;
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.h b/drivers/infiniband/hw/efa/efa_com_cmd.h
index 0898ad5bc340..fc97f37bb39b 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.h
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
- * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2023 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_COM_CMD_H_
@@ -262,10 +262,18 @@ struct efa_com_rdma_read_stats {
u64 read_resp_bytes;
};
+struct efa_com_rdma_write_stats {
+ u64 write_wrs;
+ u64 write_bytes;
+ u64 write_wr_err;
+ u64 write_recv_bytes;
+};
+
union efa_com_get_stats_result {
struct efa_com_basic_stats basic_stats;
struct efa_com_messages_stats messages_stats;
struct efa_com_rdma_read_stats rdma_read_stats;
+ struct efa_com_rdma_write_stats rdma_write_stats;
};
int efa_com_create_qp(struct efa_com_dev *edev,
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index 2a195c4b0f17..0f8ca99d0827 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -61,6 +61,10 @@ struct efa_user_mmap_entry {
op(EFA_RDMA_READ_BYTES, "rdma_read_bytes") \
op(EFA_RDMA_READ_WR_ERR, "rdma_read_wr_err") \
op(EFA_RDMA_READ_RESP_BYTES, "rdma_read_resp_bytes") \
+ op(EFA_RDMA_WRITE_WRS, "rdma_write_wrs") \
+ op(EFA_RDMA_WRITE_BYTES, "rdma_write_bytes") \
+ op(EFA_RDMA_WRITE_WR_ERR, "rdma_write_wr_err") \
+ op(EFA_RDMA_WRITE_RECV_BYTES, "rdma_write_recv_bytes") \
#define EFA_STATS_ENUM(ename, name) ename,
#define EFA_STATS_STR(ename, nam) \
@@ -449,12 +453,12 @@ int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
ibdev_dbg(&dev->ibdev, "Destroy qp[%u]\n", ibqp->qp_num);
- efa_qp_user_mmap_entries_remove(qp);
-
err = efa_destroy_qp_handle(dev, qp->qp_handle);
if (err)
return err;
+ efa_qp_user_mmap_entries_remove(qp);
+
if (qp->rq_cpu_addr) {
ibdev_dbg(&dev->ibdev,
"qp->cpu_addr[0x%p] freed: size[%lu], dma[%pad]\n",
@@ -1013,8 +1017,8 @@ int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
"Destroy cq[%d] virt[0x%p] freed: size[%lu], dma[%pad]\n",
cq->cq_idx, cq->cpu_addr, cq->size, &cq->dma_addr);
- efa_cq_user_mmap_entries_remove(cq);
efa_destroy_cq_idx(dev, cq->cq_idx);
+ efa_cq_user_mmap_entries_remove(cq);
if (cq->eq) {
xa_erase(&dev->cqs_xa, cq->cq_idx);
synchronize_irq(cq->eq->irq.irqn);
@@ -2080,6 +2084,7 @@ static int efa_fill_port_stats(struct efa_dev *dev, struct rdma_hw_stats *stats,
{
struct efa_com_get_stats_params params = {};
union efa_com_get_stats_result result;
+ struct efa_com_rdma_write_stats *rws;
struct efa_com_rdma_read_stats *rrs;
struct efa_com_messages_stats *ms;
struct efa_com_basic_stats *bs;
@@ -2121,6 +2126,19 @@ static int efa_fill_port_stats(struct efa_dev *dev, struct rdma_hw_stats *stats,
stats->value[EFA_RDMA_READ_WR_ERR] = rrs->read_wr_err;
stats->value[EFA_RDMA_READ_RESP_BYTES] = rrs->read_resp_bytes;
+ if (EFA_DEV_CAP(dev, RDMA_WRITE)) {
+ params.type = EFA_ADMIN_GET_STATS_TYPE_RDMA_WRITE;
+ err = efa_com_get_stats(&dev->edev, &params, &result);
+ if (err)
+ return err;
+
+ rws = &result.rdma_write_stats;
+ stats->value[EFA_RDMA_WRITE_WRS] = rws->write_wrs;
+ stats->value[EFA_RDMA_WRITE_BYTES] = rws->write_bytes;
+ stats->value[EFA_RDMA_WRITE_WR_ERR] = rws->write_wr_err;
+ stats->value[EFA_RDMA_WRITE_RECV_BYTES] = rws->write_recv_bytes;
+ }
+
return ARRAY_SIZE(efa_port_stats_descs);
}
diff --git a/drivers/infiniband/hw/erdma/erdma_hw.h b/drivers/infiniband/hw/erdma/erdma_hw.h
index a882b57aa118..9d316fdc6f9a 100644
--- a/drivers/infiniband/hw/erdma/erdma_hw.h
+++ b/drivers/infiniband/hw/erdma/erdma_hw.h
@@ -228,7 +228,7 @@ struct erdma_cmdq_ext_db_req {
/* create_cq cfg1 */
#define ERDMA_CMD_CREATE_CQ_MTT_CNT_MASK GENMASK(31, 16)
-#define ERDMA_CMD_CREATE_CQ_MTT_TYPE_MASK BIT(15)
+#define ERDMA_CMD_CREATE_CQ_MTT_LEVEL_MASK BIT(15)
#define ERDMA_CMD_CREATE_CQ_MTT_DB_CFG_MASK BIT(11)
#define ERDMA_CMD_CREATE_CQ_EQN_MASK GENMASK(9, 0)
@@ -248,6 +248,7 @@ struct erdma_cmdq_create_cq_req {
/* regmr/deregmr cfg0 */
#define ERDMA_CMD_MR_VALID_MASK BIT(31)
+#define ERDMA_CMD_MR_VERSION_MASK GENMASK(30, 28)
#define ERDMA_CMD_MR_KEY_MASK GENMASK(27, 20)
#define ERDMA_CMD_MR_MPT_IDX_MASK GENMASK(19, 0)
@@ -258,7 +259,8 @@ struct erdma_cmdq_create_cq_req {
/* regmr cfg2 */
#define ERDMA_CMD_REGMR_PAGESIZE_MASK GENMASK(31, 27)
-#define ERDMA_CMD_REGMR_MTT_TYPE_MASK GENMASK(21, 20)
+#define ERDMA_CMD_REGMR_MTT_PAGESIZE_MASK GENMASK(26, 24)
+#define ERDMA_CMD_REGMR_MTT_LEVEL_MASK GENMASK(21, 20)
#define ERDMA_CMD_REGMR_MTT_CNT_MASK GENMASK(19, 0)
struct erdma_cmdq_reg_mr_req {
@@ -268,7 +270,14 @@ struct erdma_cmdq_reg_mr_req {
u64 start_va;
u32 size;
u32 cfg2;
- u64 phy_addr[4];
+ union {
+ u64 phy_addr[4];
+ struct {
+ u64 rsvd;
+ u32 size_h;
+ u32 mtt_cnt_h;
+ };
+ };
};
struct erdma_cmdq_dereg_mr_req {
@@ -309,7 +318,7 @@ struct erdma_cmdq_modify_qp_req {
/* create qp mtt_cfg */
#define ERDMA_CMD_CREATE_QP_PAGE_OFFSET_MASK GENMASK(31, 12)
#define ERDMA_CMD_CREATE_QP_MTT_CNT_MASK GENMASK(11, 1)
-#define ERDMA_CMD_CREATE_QP_MTT_TYPE_MASK BIT(0)
+#define ERDMA_CMD_CREATE_QP_MTT_LEVEL_MASK BIT(0)
/* create qp db cfg */
#define ERDMA_CMD_CREATE_QP_SQDB_CFG_MASK GENMASK(31, 16)
@@ -364,6 +373,7 @@ struct erdma_cmdq_reflush_req {
enum {
ERDMA_DEV_CAP_FLAGS_ATOMIC = 1 << 7,
+ ERDMA_DEV_CAP_FLAGS_MTT_VA = 1 << 5,
ERDMA_DEV_CAP_FLAGS_EXTEND_DB = 1 << 3,
};
diff --git a/drivers/infiniband/hw/erdma/erdma_qp.c b/drivers/infiniband/hw/erdma/erdma_qp.c
index 44923c51a01b..6d0330badd68 100644
--- a/drivers/infiniband/hw/erdma/erdma_qp.c
+++ b/drivers/infiniband/hw/erdma/erdma_qp.c
@@ -410,7 +410,7 @@ static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
/* Copy SGLs to SQE content to accelerate */
memcpy(get_queue_entry(qp->kern_qp.sq_buf, idx + 1,
qp->attrs.sq_size, SQEBB_SHIFT),
- mr->mem.mtt_buf, MTT_SIZE(mr->mem.mtt_nents));
+ mr->mem.mtt->buf, MTT_SIZE(mr->mem.mtt_nents));
wqe_size = sizeof(struct erdma_reg_mr_sqe) +
MTT_SIZE(mr->mem.mtt_nents);
} else {
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c
index 517676fbb8b1..dcccb6015232 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.c
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.c
@@ -19,6 +19,23 @@
#include "erdma_cm.h"
#include "erdma_verbs.h"
+static void assemble_qbuf_mtt_for_cmd(struct erdma_mem *mem, u32 *cfg,
+ u64 *addr0, u64 *addr1)
+{
+ struct erdma_mtt *mtt = mem->mtt;
+
+ if (mem->mtt_nents > ERDMA_MAX_INLINE_MTT_ENTRIES) {
+ *addr0 = mtt->buf_dma;
+ *cfg |= FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_LEVEL_MASK,
+ ERDMA_MR_MTT_1LEVEL);
+ } else {
+ *addr0 = mtt->buf[0];
+ memcpy(addr1, mtt->buf + 1, MTT_SIZE(mem->mtt_nents - 1));
+ *cfg |= FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_LEVEL_MASK,
+ ERDMA_MR_MTT_0LEVEL);
+ }
+}
+
static int create_qp_cmd(struct erdma_ucontext *uctx, struct erdma_qp *qp)
{
struct erdma_dev *dev = to_edev(qp->ibqp.device);
@@ -53,8 +70,8 @@ static int create_qp_cmd(struct erdma_ucontext *uctx, struct erdma_qp *qp)
req.sq_mtt_cfg =
FIELD_PREP(ERDMA_CMD_CREATE_QP_PAGE_OFFSET_MASK, 0) |
FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_CNT_MASK, 1) |
- FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_TYPE_MASK,
- ERDMA_MR_INLINE_MTT);
+ FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_LEVEL_MASK,
+ ERDMA_MR_MTT_0LEVEL);
req.rq_mtt_cfg = req.sq_mtt_cfg;
req.rq_buf_addr = qp->kern_qp.rq_buf_dma_addr;
@@ -67,30 +84,28 @@ static int create_qp_cmd(struct erdma_ucontext *uctx, struct erdma_qp *qp)
user_qp = &qp->user_qp;
req.sq_cqn_mtt_cfg = FIELD_PREP(
ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
- ilog2(user_qp->sq_mtt.page_size) - ERDMA_HW_PAGE_SHIFT);
+ ilog2(user_qp->sq_mem.page_size) - ERDMA_HW_PAGE_SHIFT);
req.sq_cqn_mtt_cfg |=
FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->scq->cqn);
req.rq_cqn_mtt_cfg = FIELD_PREP(
ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK,
- ilog2(user_qp->rq_mtt.page_size) - ERDMA_HW_PAGE_SHIFT);
+ ilog2(user_qp->rq_mem.page_size) - ERDMA_HW_PAGE_SHIFT);
req.rq_cqn_mtt_cfg |=
FIELD_PREP(ERDMA_CMD_CREATE_QP_CQN_MASK, qp->rcq->cqn);
- req.sq_mtt_cfg = user_qp->sq_mtt.page_offset;
+ req.sq_mtt_cfg = user_qp->sq_mem.page_offset;
req.sq_mtt_cfg |= FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_CNT_MASK,
- user_qp->sq_mtt.mtt_nents) |
- FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_TYPE_MASK,
- user_qp->sq_mtt.mtt_type);
+ user_qp->sq_mem.mtt_nents);
- req.rq_mtt_cfg = user_qp->rq_mtt.page_offset;
+ req.rq_mtt_cfg = user_qp->rq_mem.page_offset;
req.rq_mtt_cfg |= FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_CNT_MASK,
- user_qp->rq_mtt.mtt_nents) |
- FIELD_PREP(ERDMA_CMD_CREATE_QP_MTT_TYPE_MASK,
- user_qp->rq_mtt.mtt_type);
+ user_qp->rq_mem.mtt_nents);
- req.sq_buf_addr = user_qp->sq_mtt.mtt_entry[0];
- req.rq_buf_addr = user_qp->rq_mtt.mtt_entry[0];
+ assemble_qbuf_mtt_for_cmd(&user_qp->sq_mem, &req.sq_mtt_cfg,
+ &req.sq_buf_addr, req.sq_mtt_entry);
+ assemble_qbuf_mtt_for_cmd(&user_qp->rq_mem, &req.rq_mtt_cfg,
+ &req.rq_buf_addr, req.rq_mtt_entry);
req.sq_db_info_dma_addr = user_qp->sq_db_info_dma_addr;
req.rq_db_info_dma_addr = user_qp->rq_db_info_dma_addr;
@@ -117,13 +132,27 @@ static int create_qp_cmd(struct erdma_ucontext *uctx, struct erdma_qp *qp)
static int regmr_cmd(struct erdma_dev *dev, struct erdma_mr *mr)
{
- struct erdma_cmdq_reg_mr_req req;
struct erdma_pd *pd = to_epd(mr->ibmr.pd);
- u64 *phy_addr;
- int i;
+ struct erdma_cmdq_reg_mr_req req;
+ u32 mtt_level;
erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA, CMDQ_OPCODE_REG_MR);
+ if (mr->type == ERDMA_MR_TYPE_FRMR ||
+ mr->mem.page_cnt > ERDMA_MAX_INLINE_MTT_ENTRIES) {
+ if (mr->mem.mtt->continuous) {
+ req.phy_addr[0] = mr->mem.mtt->buf_dma;
+ mtt_level = ERDMA_MR_MTT_1LEVEL;
+ } else {
+ req.phy_addr[0] = sg_dma_address(mr->mem.mtt->sglist);
+ mtt_level = mr->mem.mtt->level;
+ }
+ } else {
+ memcpy(req.phy_addr, mr->mem.mtt->buf,
+ MTT_SIZE(mr->mem.page_cnt));
+ mtt_level = ERDMA_MR_MTT_0LEVEL;
+ }
+
req.cfg0 = FIELD_PREP(ERDMA_CMD_MR_VALID_MASK, mr->valid) |
FIELD_PREP(ERDMA_CMD_MR_KEY_MASK, mr->ibmr.lkey & 0xFF) |
FIELD_PREP(ERDMA_CMD_MR_MPT_IDX_MASK, mr->ibmr.lkey >> 8);
@@ -132,7 +161,7 @@ static int regmr_cmd(struct erdma_dev *dev, struct erdma_mr *mr)
FIELD_PREP(ERDMA_CMD_REGMR_RIGHT_MASK, mr->access);
req.cfg2 = FIELD_PREP(ERDMA_CMD_REGMR_PAGESIZE_MASK,
ilog2(mr->mem.page_size)) |
- FIELD_PREP(ERDMA_CMD_REGMR_MTT_TYPE_MASK, mr->mem.mtt_type) |
+ FIELD_PREP(ERDMA_CMD_REGMR_MTT_LEVEL_MASK, mtt_level) |
FIELD_PREP(ERDMA_CMD_REGMR_MTT_CNT_MASK, mr->mem.page_cnt);
if (mr->type == ERDMA_MR_TYPE_DMA)
@@ -143,14 +172,12 @@ static int regmr_cmd(struct erdma_dev *dev, struct erdma_mr *mr)
req.size = mr->mem.len;
}
- if (mr->type == ERDMA_MR_TYPE_FRMR ||
- mr->mem.mtt_type == ERDMA_MR_INDIRECT_MTT) {
- phy_addr = req.phy_addr;
- *phy_addr = mr->mem.mtt_entry[0];
- } else {
- phy_addr = req.phy_addr;
- for (i = 0; i < mr->mem.mtt_nents; i++)
- *phy_addr++ = mr->mem.mtt_entry[i];
+ if (!mr->mem.mtt->continuous && mr->mem.mtt->level > 1) {
+ req.cfg0 |= FIELD_PREP(ERDMA_CMD_MR_VERSION_MASK, 1);
+ req.cfg2 |= FIELD_PREP(ERDMA_CMD_REGMR_MTT_PAGESIZE_MASK,
+ PAGE_SHIFT - ERDMA_HW_PAGE_SHIFT);
+ req.size_h = upper_32_bits(mr->mem.len);
+ req.mtt_cnt_h = mr->mem.page_cnt >> 20;
}
post_cmd:
@@ -161,7 +188,7 @@ static int create_cq_cmd(struct erdma_ucontext *uctx, struct erdma_cq *cq)
{
struct erdma_dev *dev = to_edev(cq->ibcq.device);
struct erdma_cmdq_create_cq_req req;
- struct erdma_mem *mtt;
+ struct erdma_mem *mem;
u32 page_size;
erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
@@ -179,30 +206,34 @@ static int create_cq_cmd(struct erdma_ucontext *uctx, struct erdma_cq *cq)
req.qbuf_addr_h = upper_32_bits(cq->kern_cq.qbuf_dma_addr);
req.cfg1 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_CNT_MASK, 1) |
- FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_TYPE_MASK,
- ERDMA_MR_INLINE_MTT);
+ FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_LEVEL_MASK,
+ ERDMA_MR_MTT_0LEVEL);
req.first_page_offset = 0;
req.cq_db_info_addr =
cq->kern_cq.qbuf_dma_addr + (cq->depth << CQE_SHIFT);
} else {
- mtt = &cq->user_cq.qbuf_mtt;
+ mem = &cq->user_cq.qbuf_mem;
req.cfg0 |=
FIELD_PREP(ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK,
- ilog2(mtt->page_size) - ERDMA_HW_PAGE_SHIFT);
- if (mtt->mtt_nents == 1) {
- req.qbuf_addr_l = lower_32_bits(*(u64 *)mtt->mtt_buf);
- req.qbuf_addr_h = upper_32_bits(*(u64 *)mtt->mtt_buf);
+ ilog2(mem->page_size) - ERDMA_HW_PAGE_SHIFT);
+ if (mem->mtt_nents == 1) {
+ req.qbuf_addr_l = lower_32_bits(mem->mtt->buf[0]);
+ req.qbuf_addr_h = upper_32_bits(mem->mtt->buf[0]);
+ req.cfg1 |=
+ FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_LEVEL_MASK,
+ ERDMA_MR_MTT_0LEVEL);
} else {
- req.qbuf_addr_l = lower_32_bits(mtt->mtt_entry[0]);
- req.qbuf_addr_h = upper_32_bits(mtt->mtt_entry[0]);
+ req.qbuf_addr_l = lower_32_bits(mem->mtt->buf_dma);
+ req.qbuf_addr_h = upper_32_bits(mem->mtt->buf_dma);
+ req.cfg1 |=
+ FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_LEVEL_MASK,
+ ERDMA_MR_MTT_1LEVEL);
}
req.cfg1 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_CNT_MASK,
- mtt->mtt_nents);
- req.cfg1 |= FIELD_PREP(ERDMA_CMD_CREATE_CQ_MTT_TYPE_MASK,
- mtt->mtt_type);
+ mem->mtt_nents);
- req.first_page_offset = mtt->page_offset;
+ req.first_page_offset = mem->page_offset;
req.cq_db_info_addr = cq->user_cq.db_info_dma_addr;
if (uctx->ext_db.enable) {
@@ -481,8 +512,8 @@ static int init_kernel_qp(struct erdma_dev *dev, struct erdma_qp *qp,
dev->func_bar + (ERDMA_SDB_SHARED_PAGE_INDEX << PAGE_SHIFT);
kqp->hw_rq_db = dev->func_bar + ERDMA_BAR_RQDB_SPACE_OFFSET;
- kqp->swr_tbl = vmalloc(qp->attrs.sq_size * sizeof(u64));
- kqp->rwr_tbl = vmalloc(qp->attrs.rq_size * sizeof(u64));
+ kqp->swr_tbl = vmalloc_array(qp->attrs.sq_size, sizeof(u64));
+ kqp->rwr_tbl = vmalloc_array(qp->attrs.rq_size, sizeof(u64));
if (!kqp->swr_tbl || !kqp->rwr_tbl)
goto err_out;
@@ -508,12 +539,223 @@ err_out:
return -ENOMEM;
}
+static void erdma_fill_bottom_mtt(struct erdma_dev *dev, struct erdma_mem *mem)
+{
+ struct erdma_mtt *mtt = mem->mtt;
+ struct ib_block_iter biter;
+ u32 idx = 0;
+
+ while (mtt->low_level)
+ mtt = mtt->low_level;
+
+ rdma_umem_for_each_dma_block(mem->umem, &biter, mem->page_size)
+ mtt->buf[idx++] = rdma_block_iter_dma_address(&biter);
+}
+
+static struct erdma_mtt *erdma_create_cont_mtt(struct erdma_dev *dev,
+ size_t size)
+{
+ struct erdma_mtt *mtt;
+
+ mtt = kzalloc(sizeof(*mtt), GFP_KERNEL);
+ if (!mtt)
+ return ERR_PTR(-ENOMEM);
+
+ mtt->size = size;
+ mtt->buf = kzalloc(mtt->size, GFP_KERNEL);
+ if (!mtt->buf)
+ goto err_free_mtt;
+
+ mtt->continuous = true;
+ mtt->buf_dma = dma_map_single(&dev->pdev->dev, mtt->buf, mtt->size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&dev->pdev->dev, mtt->buf_dma))
+ goto err_free_mtt_buf;
+
+ return mtt;
+
+err_free_mtt_buf:
+ kfree(mtt->buf);
+
+err_free_mtt:
+ kfree(mtt);
+
+ return ERR_PTR(-ENOMEM);
+}
+
+static void erdma_destroy_mtt_buf_sg(struct erdma_dev *dev,
+ struct erdma_mtt *mtt)
+{
+ dma_unmap_sg(&dev->pdev->dev, mtt->sglist, mtt->nsg, DMA_TO_DEVICE);
+ vfree(mtt->sglist);
+}
+
+static void erdma_destroy_scatter_mtt(struct erdma_dev *dev,
+ struct erdma_mtt *mtt)
+{
+ erdma_destroy_mtt_buf_sg(dev, mtt);
+ vfree(mtt->buf);
+ kfree(mtt);
+}
+
+static void erdma_init_middle_mtt(struct erdma_mtt *mtt,
+ struct erdma_mtt *low_mtt)
+{
+ struct scatterlist *sg;
+ u32 idx = 0, i;
+
+ for_each_sg(low_mtt->sglist, sg, low_mtt->nsg, i)
+ mtt->buf[idx++] = sg_dma_address(sg);
+}
+
+static int erdma_create_mtt_buf_sg(struct erdma_dev *dev, struct erdma_mtt *mtt)
+{
+ struct scatterlist *sglist;
+ void *buf = mtt->buf;
+ u32 npages, i, nsg;
+ struct page *pg;
+
+ /* Failed if buf is not page aligned */
+ if ((uintptr_t)buf & ~PAGE_MASK)
+ return -EINVAL;
+
+ npages = DIV_ROUND_UP(mtt->size, PAGE_SIZE);
+ sglist = vzalloc(npages * sizeof(*sglist));
+ if (!sglist)
+ return -ENOMEM;
+
+ sg_init_table(sglist, npages);
+ for (i = 0; i < npages; i++) {
+ pg = vmalloc_to_page(buf);
+ if (!pg)
+ goto err;
+ sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
+ buf += PAGE_SIZE;
+ }
+
+ nsg = dma_map_sg(&dev->pdev->dev, sglist, npages, DMA_TO_DEVICE);
+ if (!nsg)
+ goto err;
+
+ mtt->sglist = sglist;
+ mtt->nsg = nsg;
+
+ return 0;
+err:
+ vfree(sglist);
+
+ return -ENOMEM;
+}
+
+static struct erdma_mtt *erdma_create_scatter_mtt(struct erdma_dev *dev,
+ size_t size)
+{
+ struct erdma_mtt *mtt;
+ int ret = -ENOMEM;
+
+ mtt = kzalloc(sizeof(*mtt), GFP_KERNEL);
+ if (!mtt)
+ return NULL;
+
+ mtt->size = ALIGN(size, PAGE_SIZE);
+ mtt->buf = vzalloc(mtt->size);
+ mtt->continuous = false;
+ if (!mtt->buf)
+ goto err_free_mtt;
+
+ ret = erdma_create_mtt_buf_sg(dev, mtt);
+ if (ret)
+ goto err_free_mtt_buf;
+
+ ibdev_dbg(&dev->ibdev, "create scatter mtt, size:%lu, nsg:%u\n",
+ mtt->size, mtt->nsg);
+
+ return mtt;
+
+err_free_mtt_buf:
+ vfree(mtt->buf);
+
+err_free_mtt:
+ kfree(mtt);
+
+ return ERR_PTR(ret);
+}
+
+static struct erdma_mtt *erdma_create_mtt(struct erdma_dev *dev, size_t size,
+ bool force_continuous)
+{
+ struct erdma_mtt *mtt, *tmp_mtt;
+ int ret, level = 0;
+
+ ibdev_dbg(&dev->ibdev, "create_mtt, size:%lu, force cont:%d\n", size,
+ force_continuous);
+
+ if (!(dev->attrs.cap_flags & ERDMA_DEV_CAP_FLAGS_MTT_VA))
+ force_continuous = true;
+
+ if (force_continuous)
+ return erdma_create_cont_mtt(dev, size);
+
+ mtt = erdma_create_scatter_mtt(dev, size);
+ if (IS_ERR(mtt))
+ return mtt;
+ level = 1;
+
+ /* convergence the mtt table. */
+ while (mtt->nsg != 1 && level <= 3) {
+ tmp_mtt = erdma_create_scatter_mtt(dev, MTT_SIZE(mtt->nsg));
+ if (IS_ERR(tmp_mtt)) {
+ ret = PTR_ERR(tmp_mtt);
+ goto err_free_mtt;
+ }
+ erdma_init_middle_mtt(tmp_mtt, mtt);
+ tmp_mtt->low_level = mtt;
+ mtt = tmp_mtt;
+ level++;
+ }
+
+ if (level > 3) {
+ ret = -ENOMEM;
+ goto err_free_mtt;
+ }
+
+ mtt->level = level;
+ ibdev_dbg(&dev->ibdev, "top mtt: level:%d, dma_addr 0x%llx\n",
+ mtt->level, mtt->sglist[0].dma_address);
+
+ return mtt;
+err_free_mtt:
+ while (mtt) {
+ tmp_mtt = mtt->low_level;
+ erdma_destroy_scatter_mtt(dev, mtt);
+ mtt = tmp_mtt;
+ }
+
+ return ERR_PTR(ret);
+}
+
+static void erdma_destroy_mtt(struct erdma_dev *dev, struct erdma_mtt *mtt)
+{
+ struct erdma_mtt *tmp_mtt;
+
+ if (mtt->continuous) {
+ dma_unmap_single(&dev->pdev->dev, mtt->buf_dma, mtt->size,
+ DMA_TO_DEVICE);
+ kfree(mtt->buf);
+ kfree(mtt);
+ } else {
+ while (mtt) {
+ tmp_mtt = mtt->low_level;
+ erdma_destroy_scatter_mtt(dev, mtt);
+ mtt = tmp_mtt;
+ }
+ }
+}
+
static int get_mtt_entries(struct erdma_dev *dev, struct erdma_mem *mem,
u64 start, u64 len, int access, u64 virt,
- unsigned long req_page_size, u8 force_indirect_mtt)
+ unsigned long req_page_size, bool force_continuous)
{
- struct ib_block_iter biter;
- uint64_t *phy_addr = NULL;
int ret = 0;
mem->umem = ib_umem_get(&dev->ibdev, start, len, access);
@@ -529,38 +771,14 @@ static int get_mtt_entries(struct erdma_dev *dev, struct erdma_mem *mem,
mem->page_offset = start & (mem->page_size - 1);
mem->mtt_nents = ib_umem_num_dma_blocks(mem->umem, mem->page_size);
mem->page_cnt = mem->mtt_nents;
-
- if (mem->page_cnt > ERDMA_MAX_INLINE_MTT_ENTRIES ||
- force_indirect_mtt) {
- mem->mtt_type = ERDMA_MR_INDIRECT_MTT;
- mem->mtt_buf =
- alloc_pages_exact(MTT_SIZE(mem->page_cnt), GFP_KERNEL);
- if (!mem->mtt_buf) {
- ret = -ENOMEM;
- goto error_ret;
- }
- phy_addr = mem->mtt_buf;
- } else {
- mem->mtt_type = ERDMA_MR_INLINE_MTT;
- phy_addr = mem->mtt_entry;
+ mem->mtt = erdma_create_mtt(dev, MTT_SIZE(mem->page_cnt),
+ force_continuous);
+ if (IS_ERR(mem->mtt)) {
+ ret = PTR_ERR(mem->mtt);
+ goto error_ret;
}
- rdma_umem_for_each_dma_block(mem->umem, &biter, mem->page_size) {
- *phy_addr = rdma_block_iter_dma_address(&biter);
- phy_addr++;
- }
-
- if (mem->mtt_type == ERDMA_MR_INDIRECT_MTT) {
- mem->mtt_entry[0] =
- dma_map_single(&dev->pdev->dev, mem->mtt_buf,
- MTT_SIZE(mem->page_cnt), DMA_TO_DEVICE);
- if (dma_mapping_error(&dev->pdev->dev, mem->mtt_entry[0])) {
- free_pages_exact(mem->mtt_buf, MTT_SIZE(mem->page_cnt));
- mem->mtt_buf = NULL;
- ret = -ENOMEM;
- goto error_ret;
- }
- }
+ erdma_fill_bottom_mtt(dev, mem);
return 0;
@@ -575,11 +793,8 @@ error_ret:
static void put_mtt_entries(struct erdma_dev *dev, struct erdma_mem *mem)
{
- if (mem->mtt_buf) {
- dma_unmap_single(&dev->pdev->dev, mem->mtt_entry[0],
- MTT_SIZE(mem->page_cnt), DMA_TO_DEVICE);
- free_pages_exact(mem->mtt_buf, MTT_SIZE(mem->page_cnt));
- }
+ if (mem->mtt)
+ erdma_destroy_mtt(dev, mem->mtt);
if (mem->umem) {
ib_umem_release(mem->umem);
@@ -660,18 +875,18 @@ static int init_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx,
qp->attrs.rq_size * RQE_SIZE))
return -EINVAL;
- ret = get_mtt_entries(qp->dev, &qp->user_qp.sq_mtt, va,
+ ret = get_mtt_entries(qp->dev, &qp->user_qp.sq_mem, va,
qp->attrs.sq_size << SQEBB_SHIFT, 0, va,
- (SZ_1M - SZ_4K), 1);
+ (SZ_1M - SZ_4K), true);
if (ret)
return ret;
rq_offset = ALIGN(qp->attrs.sq_size << SQEBB_SHIFT, ERDMA_HW_PAGE_SIZE);
qp->user_qp.rq_offset = rq_offset;
- ret = get_mtt_entries(qp->dev, &qp->user_qp.rq_mtt, va + rq_offset,
+ ret = get_mtt_entries(qp->dev, &qp->user_qp.rq_mem, va + rq_offset,
qp->attrs.rq_size << RQE_SHIFT, 0, va + rq_offset,
- (SZ_1M - SZ_4K), 1);
+ (SZ_1M - SZ_4K), true);
if (ret)
goto put_sq_mtt;
@@ -687,18 +902,18 @@ static int init_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx,
return 0;
put_rq_mtt:
- put_mtt_entries(qp->dev, &qp->user_qp.rq_mtt);
+ put_mtt_entries(qp->dev, &qp->user_qp.rq_mem);
put_sq_mtt:
- put_mtt_entries(qp->dev, &qp->user_qp.sq_mtt);
+ put_mtt_entries(qp->dev, &qp->user_qp.sq_mem);
return ret;
}
static void free_user_qp(struct erdma_qp *qp, struct erdma_ucontext *uctx)
{
- put_mtt_entries(qp->dev, &qp->user_qp.sq_mtt);
- put_mtt_entries(qp->dev, &qp->user_qp.rq_mtt);
+ put_mtt_entries(qp->dev, &qp->user_qp.sq_mem);
+ put_mtt_entries(qp->dev, &qp->user_qp.rq_mem);
erdma_unmap_user_dbrecords(uctx, &qp->user_qp.user_dbr_page);
}
@@ -875,33 +1090,20 @@ struct ib_mr *erdma_ib_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
mr->mem.page_size = PAGE_SIZE; /* update it later. */
mr->mem.page_cnt = max_num_sg;
- mr->mem.mtt_type = ERDMA_MR_INDIRECT_MTT;
- mr->mem.mtt_buf =
- alloc_pages_exact(MTT_SIZE(mr->mem.page_cnt), GFP_KERNEL);
- if (!mr->mem.mtt_buf) {
- ret = -ENOMEM;
+ mr->mem.mtt = erdma_create_mtt(dev, MTT_SIZE(max_num_sg), true);
+ if (IS_ERR(mr->mem.mtt)) {
+ ret = PTR_ERR(mr->mem.mtt);
goto out_remove_stag;
}
- mr->mem.mtt_entry[0] =
- dma_map_single(&dev->pdev->dev, mr->mem.mtt_buf,
- MTT_SIZE(mr->mem.page_cnt), DMA_TO_DEVICE);
- if (dma_mapping_error(&dev->pdev->dev, mr->mem.mtt_entry[0])) {
- ret = -ENOMEM;
- goto out_free_mtt;
- }
-
ret = regmr_cmd(dev, mr);
if (ret)
- goto out_dma_unmap;
+ goto out_destroy_mtt;
return &mr->ibmr;
-out_dma_unmap:
- dma_unmap_single(&dev->pdev->dev, mr->mem.mtt_entry[0],
- MTT_SIZE(mr->mem.page_cnt), DMA_TO_DEVICE);
-out_free_mtt:
- free_pages_exact(mr->mem.mtt_buf, MTT_SIZE(mr->mem.page_cnt));
+out_destroy_mtt:
+ erdma_destroy_mtt(dev, mr->mem.mtt);
out_remove_stag:
erdma_free_idx(&dev->res_cb[ERDMA_RES_TYPE_STAG_IDX],
@@ -920,7 +1122,7 @@ static int erdma_set_page(struct ib_mr *ibmr, u64 addr)
if (mr->mem.mtt_nents >= mr->mem.page_cnt)
return -1;
- *((u64 *)mr->mem.mtt_buf + mr->mem.mtt_nents) = addr;
+ mr->mem.mtt->buf[mr->mem.mtt_nents] = addr;
mr->mem.mtt_nents++;
return 0;
@@ -956,7 +1158,7 @@ struct ib_mr *erdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
return ERR_PTR(-ENOMEM);
ret = get_mtt_entries(dev, &mr->mem, start, len, access, virt,
- SZ_2G - SZ_4K, 0);
+ SZ_2G - SZ_4K, false);
if (ret)
goto err_out_free;
@@ -1041,7 +1243,7 @@ int erdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
cq->kern_cq.qbuf, cq->kern_cq.qbuf_dma_addr);
} else {
erdma_unmap_user_dbrecords(ctx, &cq->user_cq.user_dbr_page);
- put_mtt_entries(dev, &cq->user_cq.qbuf_mtt);
+ put_mtt_entries(dev, &cq->user_cq.qbuf_mem);
}
xa_erase(&dev->cq_xa, cq->cqn);
@@ -1089,8 +1291,8 @@ int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
WARPPED_BUFSIZE(qp->attrs.sq_size << SQEBB_SHIFT),
qp->kern_qp.sq_buf, qp->kern_qp.sq_buf_dma_addr);
} else {
- put_mtt_entries(dev, &qp->user_qp.sq_mtt);
- put_mtt_entries(dev, &qp->user_qp.rq_mtt);
+ put_mtt_entries(dev, &qp->user_qp.sq_mem);
+ put_mtt_entries(dev, &qp->user_qp.rq_mem);
erdma_unmap_user_dbrecords(ctx, &qp->user_qp.user_dbr_page);
}
@@ -1379,9 +1581,9 @@ static int erdma_init_user_cq(struct erdma_ucontext *ctx, struct erdma_cq *cq,
int ret;
struct erdma_dev *dev = to_edev(cq->ibcq.device);
- ret = get_mtt_entries(dev, &cq->user_cq.qbuf_mtt, ureq->qbuf_va,
+ ret = get_mtt_entries(dev, &cq->user_cq.qbuf_mem, ureq->qbuf_va,
ureq->qbuf_len, 0, ureq->qbuf_va, SZ_64M - SZ_4K,
- 1);
+ true);
if (ret)
return ret;
@@ -1389,7 +1591,7 @@ static int erdma_init_user_cq(struct erdma_ucontext *ctx, struct erdma_cq *cq,
&cq->user_cq.user_dbr_page,
&cq->user_cq.db_info_dma_addr);
if (ret)
- put_mtt_entries(dev, &cq->user_cq.qbuf_mtt);
+ put_mtt_entries(dev, &cq->user_cq.qbuf_mem);
return ret;
}
@@ -1473,7 +1675,7 @@ int erdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
err_free_res:
if (!rdma_is_kernel_res(&ibcq->res)) {
erdma_unmap_user_dbrecords(ctx, &cq->user_cq.user_dbr_page);
- put_mtt_entries(dev, &cq->user_cq.qbuf_mtt);
+ put_mtt_entries(dev, &cq->user_cq.qbuf_mem);
} else {
dma_free_coherent(&dev->pdev->dev,
WARPPED_BUFSIZE(depth << CQE_SHIFT),
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.h b/drivers/infiniband/hw/erdma/erdma_verbs.h
index 429fc3063f98..eb9c0f92fb6f 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.h
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.h
@@ -65,7 +65,7 @@ struct erdma_pd {
* MemoryRegion definition.
*/
#define ERDMA_MAX_INLINE_MTT_ENTRIES 4
-#define MTT_SIZE(mtt_cnt) (mtt_cnt << 3) /* per mtt takes 8 Bytes. */
+#define MTT_SIZE(mtt_cnt) ((mtt_cnt) << 3) /* per mtt entry takes 8 Bytes. */
#define ERDMA_MR_MAX_MTT_CNT 524288
#define ERDMA_MTT_ENTRY_SIZE 8
@@ -73,8 +73,8 @@ struct erdma_pd {
#define ERDMA_MR_TYPE_FRMR 1
#define ERDMA_MR_TYPE_DMA 2
-#define ERDMA_MR_INLINE_MTT 0
-#define ERDMA_MR_INDIRECT_MTT 1
+#define ERDMA_MR_MTT_0LEVEL 0
+#define ERDMA_MR_MTT_1LEVEL 1
#define ERDMA_MR_ACC_RA BIT(0)
#define ERDMA_MR_ACC_LR BIT(1)
@@ -90,10 +90,28 @@ static inline u8 to_erdma_access_flags(int access)
(access & IB_ACCESS_REMOTE_ATOMIC ? ERDMA_MR_ACC_RA : 0);
}
+/* Hierarchical storage structure for MTT entries */
+struct erdma_mtt {
+ u64 *buf;
+ size_t size;
+
+ bool continuous;
+ union {
+ dma_addr_t buf_dma;
+ struct {
+ struct scatterlist *sglist;
+ u32 nsg;
+ u32 level;
+ };
+ };
+
+ struct erdma_mtt *low_level;
+};
+
struct erdma_mem {
struct ib_umem *umem;
- void *mtt_buf;
- u32 mtt_type;
+ struct erdma_mtt *mtt;
+
u32 page_size;
u32 page_offset;
u32 page_cnt;
@@ -101,8 +119,6 @@ struct erdma_mem {
u64 va;
u64 len;
-
- u64 mtt_entry[ERDMA_MAX_INLINE_MTT_ENTRIES];
};
struct erdma_mr {
@@ -121,8 +137,8 @@ struct erdma_user_dbrecords_page {
};
struct erdma_uqp {
- struct erdma_mem sq_mtt;
- struct erdma_mem rq_mtt;
+ struct erdma_mem sq_mem;
+ struct erdma_mem rq_mem;
dma_addr_t sq_db_info_dma_addr;
dma_addr_t rq_db_info_dma_addr;
@@ -234,7 +250,7 @@ struct erdma_kcq_info {
};
struct erdma_ucq_info {
- struct erdma_mem qbuf_mtt;
+ struct erdma_mem qbuf_mem;
struct erdma_user_dbrecords_page *user_dbr_page;
dma_addr_t db_info_dma_addr;
};
diff --git a/drivers/infiniband/hw/hfi1/Makefile b/drivers/infiniband/hw/hfi1/Makefile
index 2e89ec10efed..5d977f363684 100644
--- a/drivers/infiniband/hw/hfi1/Makefile
+++ b/drivers/infiniband/hw/hfi1/Makefile
@@ -31,6 +31,7 @@ hfi1-y := \
netdev_rx.o \
opfn.o \
pcie.o \
+ pin_system.o \
pio.o \
pio_copy.o \
platform.o \
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index 77ee77d4000f..bbc957c578e1 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -230,11 +230,9 @@ static void node_affinity_add_tail(struct hfi1_affinity_node *entry)
/* It must be called with node_affinity.lock held */
static struct hfi1_affinity_node *node_affinity_lookup(int node)
{
- struct list_head *pos;
struct hfi1_affinity_node *entry;
- list_for_each(pos, &node_affinity.list) {
- entry = list_entry(pos, struct hfi1_affinity_node, list);
+ list_for_each_entry(entry, &node_affinity.list, list) {
if (entry->node == node)
return entry;
}
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index baaa4406d5e6..0814291a0412 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -1461,7 +1461,8 @@ static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
ret = write_lcb_csr(dd, csr, data);
if (ret) {
- dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
+ if (!(dd->flags & HFI1_SHUTDOWN))
+ dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
return 0;
}
@@ -6160,7 +6161,7 @@ static int request_host_lcb_access(struct hfi1_devdata *dd)
ret = do_8051_command(dd, HCMD_MISC,
(u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
LOAD_DATA_FIELD_ID_SHIFT, NULL);
- if (ret != HCMD_SUCCESS) {
+ if (ret != HCMD_SUCCESS && !(dd->flags & HFI1_SHUTDOWN)) {
dd_dev_err(dd, "%s: command failed with error %d\n",
__func__, ret);
}
@@ -6241,7 +6242,8 @@ int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
if (dd->lcb_access_count == 0) {
ret = request_host_lcb_access(dd);
if (ret) {
- dd_dev_err(dd,
+ if (!(dd->flags & HFI1_SHUTDOWN))
+ dd_dev_err(dd,
"%s: unable to acquire LCB access, err %d\n",
__func__, ret);
goto done;
diff --git a/drivers/infiniband/hw/hfi1/device.c b/drivers/infiniband/hw/hfi1/device.c
index 05be0d119f79..b0a00b7aaec5 100644
--- a/drivers/infiniband/hw/hfi1/device.c
+++ b/drivers/infiniband/hw/hfi1/device.c
@@ -10,8 +10,29 @@
#include "hfi.h"
#include "device.h"
-static struct class *class;
-static struct class *user_class;
+static char *hfi1_devnode(const struct device *dev, umode_t *mode)
+{
+ if (mode)
+ *mode = 0600;
+ return kasprintf(GFP_KERNEL, "%s", dev_name(dev));
+}
+
+static const struct class class = {
+ .name = "hfi1",
+ .devnode = hfi1_devnode,
+};
+
+static char *hfi1_user_devnode(const struct device *dev, umode_t *mode)
+{
+ if (mode)
+ *mode = 0666;
+ return kasprintf(GFP_KERNEL, "%s", dev_name(dev));
+}
+
+static const struct class user_class = {
+ .name = "hfi1_user",
+ .devnode = hfi1_user_devnode,
+};
static dev_t hfi1_dev;
int hfi1_cdev_init(int minor, const char *name,
@@ -37,9 +58,9 @@ int hfi1_cdev_init(int minor, const char *name,
}
if (user_accessible)
- device = device_create(user_class, NULL, dev, NULL, "%s", name);
+ device = device_create(&user_class, NULL, dev, NULL, "%s", name);
else
- device = device_create(class, NULL, dev, NULL, "%s", name);
+ device = device_create(&class, NULL, dev, NULL, "%s", name);
if (IS_ERR(device)) {
ret = PTR_ERR(device);
@@ -72,26 +93,6 @@ const char *class_name(void)
return hfi1_class_name;
}
-static char *hfi1_devnode(const struct device *dev, umode_t *mode)
-{
- if (mode)
- *mode = 0600;
- return kasprintf(GFP_KERNEL, "%s", dev_name(dev));
-}
-
-static const char *hfi1_class_name_user = "hfi1_user";
-static const char *class_name_user(void)
-{
- return hfi1_class_name_user;
-}
-
-static char *hfi1_user_devnode(const struct device *dev, umode_t *mode)
-{
- if (mode)
- *mode = 0666;
- return kasprintf(GFP_KERNEL, "%s", dev_name(dev));
-}
-
int __init dev_init(void)
{
int ret;
@@ -102,27 +103,21 @@ int __init dev_init(void)
goto done;
}
- class = class_create(class_name());
- if (IS_ERR(class)) {
- ret = PTR_ERR(class);
+ ret = class_register(&class);
+ if (ret) {
pr_err("Could not create device class (err %d)\n", -ret);
unregister_chrdev_region(hfi1_dev, HFI1_NMINORS);
goto done;
}
- class->devnode = hfi1_devnode;
- user_class = class_create(class_name_user());
- if (IS_ERR(user_class)) {
- ret = PTR_ERR(user_class);
+ ret = class_register(&user_class);
+ if (ret) {
pr_err("Could not create device class for user accessible files (err %d)\n",
-ret);
- class_destroy(class);
- class = NULL;
- user_class = NULL;
+ class_unregister(&class);
unregister_chrdev_region(hfi1_dev, HFI1_NMINORS);
goto done;
}
- user_class->devnode = hfi1_user_devnode;
done:
return ret;
@@ -130,11 +125,8 @@ done:
void dev_cleanup(void)
{
- class_destroy(class);
- class = NULL;
-
- class_destroy(user_class);
- user_class = NULL;
+ class_unregister(&class);
+ class_unregister(&user_class);
unregister_chrdev_region(hfi1_dev, HFI1_NMINORS);
}
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index 7fa9cd39254f..38772e52d7ed 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
- * Copyright(c) 2020 Cornelis Networks, Inc.
+ * Copyright(c) 2020-2023 Cornelis Networks, Inc.
* Copyright(c) 2015-2020 Intel Corporation.
*/
@@ -1378,8 +1378,6 @@ struct hfi1_devdata {
#define PT_INVALID 3
struct tid_rb_node;
-struct mmu_rb_node;
-struct mmu_rb_handler;
/* Private data for file operations */
struct hfi1_filedata {
diff --git a/drivers/infiniband/hw/hfi1/pin_system.c b/drivers/infiniband/hw/hfi1/pin_system.c
new file mode 100644
index 000000000000..384f722093e0
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/pin_system.c
@@ -0,0 +1,474 @@
+// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
+/*
+ * Copyright(c) 2023 - Cornelis Networks, Inc.
+ */
+
+#include <linux/types.h>
+
+#include "hfi.h"
+#include "common.h"
+#include "device.h"
+#include "pinning.h"
+#include "mmu_rb.h"
+#include "user_sdma.h"
+#include "trace.h"
+
+struct sdma_mmu_node {
+ struct mmu_rb_node rb;
+ struct hfi1_user_sdma_pkt_q *pq;
+ struct page **pages;
+ unsigned int npages;
+};
+
+static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
+ unsigned long len);
+static int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode, void *arg2,
+ bool *stop);
+static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode);
+
+static struct mmu_rb_ops sdma_rb_ops = {
+ .filter = sdma_rb_filter,
+ .evict = sdma_rb_evict,
+ .remove = sdma_rb_remove,
+};
+
+int hfi1_init_system_pinning(struct hfi1_user_sdma_pkt_q *pq)
+{
+ struct hfi1_devdata *dd = pq->dd;
+ int ret;
+
+ ret = hfi1_mmu_rb_register(pq, &sdma_rb_ops, dd->pport->hfi1_wq,
+ &pq->handler);
+ if (ret)
+ dd_dev_err(dd,
+ "[%u:%u] Failed to register system memory DMA support with MMU: %d\n",
+ pq->ctxt, pq->subctxt, ret);
+ return ret;
+}
+
+void hfi1_free_system_pinning(struct hfi1_user_sdma_pkt_q *pq)
+{
+ if (pq->handler)
+ hfi1_mmu_rb_unregister(pq->handler);
+}
+
+static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages)
+{
+ struct evict_data evict_data;
+
+ evict_data.cleared = 0;
+ evict_data.target = npages;
+ hfi1_mmu_rb_evict(pq->handler, &evict_data);
+ return evict_data.cleared;
+}
+
+static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
+ unsigned int start, unsigned int npages)
+{
+ hfi1_release_user_pages(mm, pages + start, npages, false);
+ kfree(pages);
+}
+
+static inline struct mm_struct *mm_from_sdma_node(struct sdma_mmu_node *node)
+{
+ return node->rb.handler->mn.mm;
+}
+
+static void free_system_node(struct sdma_mmu_node *node)
+{
+ if (node->npages) {
+ unpin_vector_pages(mm_from_sdma_node(node), node->pages, 0,
+ node->npages);
+ atomic_sub(node->npages, &node->pq->n_locked);
+ }
+ kfree(node);
+}
+
+/*
+ * kref_get()'s an additional kref on the returned rb_node to prevent rb_node
+ * from being released until after rb_node is assigned to an SDMA descriptor
+ * (struct sdma_desc) under add_system_iovec_to_sdma_packet(), even if the
+ * virtual address range for rb_node is invalidated between now and then.
+ */
+static struct sdma_mmu_node *find_system_node(struct mmu_rb_handler *handler,
+ unsigned long start,
+ unsigned long end)
+{
+ struct mmu_rb_node *rb_node;
+ unsigned long flags;
+
+ spin_lock_irqsave(&handler->lock, flags);
+ rb_node = hfi1_mmu_rb_get_first(handler, start, (end - start));
+ if (!rb_node) {
+ spin_unlock_irqrestore(&handler->lock, flags);
+ return NULL;
+ }
+
+ /* "safety" kref to prevent release before add_system_iovec_to_sdma_packet() */
+ kref_get(&rb_node->refcount);
+ spin_unlock_irqrestore(&handler->lock, flags);
+
+ return container_of(rb_node, struct sdma_mmu_node, rb);
+}
+
+static int pin_system_pages(struct user_sdma_request *req,
+ uintptr_t start_address, size_t length,
+ struct sdma_mmu_node *node, int npages)
+{
+ struct hfi1_user_sdma_pkt_q *pq = req->pq;
+ int pinned, cleared;
+ struct page **pages;
+
+ pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+retry:
+ if (!hfi1_can_pin_pages(pq->dd, current->mm, atomic_read(&pq->n_locked),
+ npages)) {
+ SDMA_DBG(req, "Evicting: nlocked %u npages %u",
+ atomic_read(&pq->n_locked), npages);
+ cleared = sdma_cache_evict(pq, npages);
+ if (cleared >= npages)
+ goto retry;
+ }
+
+ SDMA_DBG(req, "Acquire user pages start_address %lx node->npages %u npages %u",
+ start_address, node->npages, npages);
+ pinned = hfi1_acquire_user_pages(current->mm, start_address, npages, 0,
+ pages);
+
+ if (pinned < 0) {
+ kfree(pages);
+ SDMA_DBG(req, "pinned %d", pinned);
+ return pinned;
+ }
+ if (pinned != npages) {
+ unpin_vector_pages(current->mm, pages, node->npages, pinned);
+ SDMA_DBG(req, "npages %u pinned %d", npages, pinned);
+ return -EFAULT;
+ }
+ node->rb.addr = start_address;
+ node->rb.len = length;
+ node->pages = pages;
+ node->npages = npages;
+ atomic_add(pinned, &pq->n_locked);
+ SDMA_DBG(req, "done. pinned %d", pinned);
+ return 0;
+}
+
+/*
+ * kref refcount on *node_p will be 2 on successful addition: one kref from
+ * kref_init() for mmu_rb_handler and one kref to prevent *node_p from being
+ * released until after *node_p is assigned to an SDMA descriptor (struct
+ * sdma_desc) under add_system_iovec_to_sdma_packet(), even if the virtual
+ * address range for *node_p is invalidated between now and then.
+ */
+static int add_system_pinning(struct user_sdma_request *req,
+ struct sdma_mmu_node **node_p,
+ unsigned long start, unsigned long len)
+
+{
+ struct hfi1_user_sdma_pkt_q *pq = req->pq;
+ struct sdma_mmu_node *node;
+ int ret;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ /* First kref "moves" to mmu_rb_handler */
+ kref_init(&node->rb.refcount);
+
+ /* "safety" kref to prevent release before add_system_iovec_to_sdma_packet() */
+ kref_get(&node->rb.refcount);
+
+ node->pq = pq;
+ ret = pin_system_pages(req, start, len, node, PFN_DOWN(len));
+ if (ret == 0) {
+ ret = hfi1_mmu_rb_insert(pq->handler, &node->rb);
+ if (ret)
+ free_system_node(node);
+ else
+ *node_p = node;
+
+ return ret;
+ }
+
+ kfree(node);
+ return ret;
+}
+
+static int get_system_cache_entry(struct user_sdma_request *req,
+ struct sdma_mmu_node **node_p,
+ size_t req_start, size_t req_len)
+{
+ struct hfi1_user_sdma_pkt_q *pq = req->pq;
+ u64 start = ALIGN_DOWN(req_start, PAGE_SIZE);
+ u64 end = PFN_ALIGN(req_start + req_len);
+ int ret;
+
+ if ((end - start) == 0) {
+ SDMA_DBG(req,
+ "Request for empty cache entry req_start %lx req_len %lx start %llx end %llx",
+ req_start, req_len, start, end);
+ return -EINVAL;
+ }
+
+ SDMA_DBG(req, "req_start %lx req_len %lu", req_start, req_len);
+
+ while (1) {
+ struct sdma_mmu_node *node =
+ find_system_node(pq->handler, start, end);
+ u64 prepend_len = 0;
+
+ SDMA_DBG(req, "node %p start %llx end %llu", node, start, end);
+ if (!node) {
+ ret = add_system_pinning(req, node_p, start,
+ end - start);
+ if (ret == -EEXIST) {
+ /*
+ * Another execution context has inserted a
+ * conficting entry first.
+ */
+ continue;
+ }
+ return ret;
+ }
+
+ if (node->rb.addr <= start) {
+ /*
+ * This entry covers at least part of the region. If it doesn't extend
+ * to the end, then this will be called again for the next segment.
+ */
+ *node_p = node;
+ return 0;
+ }
+
+ SDMA_DBG(req, "prepend: node->rb.addr %lx, node->rb.refcount %d",
+ node->rb.addr, kref_read(&node->rb.refcount));
+ prepend_len = node->rb.addr - start;
+
+ /*
+ * This node will not be returned, instead a new node
+ * will be. So release the reference.
+ */
+ kref_put(&node->rb.refcount, hfi1_mmu_rb_release);
+
+ /* Prepend a node to cover the beginning of the allocation */
+ ret = add_system_pinning(req, node_p, start, prepend_len);
+ if (ret == -EEXIST) {
+ /* Another execution context has inserted a conficting entry first. */
+ continue;
+ }
+ return ret;
+ }
+}
+
+static void sdma_mmu_rb_node_get(void *ctx)
+{
+ struct mmu_rb_node *node = ctx;
+
+ kref_get(&node->refcount);
+}
+
+static void sdma_mmu_rb_node_put(void *ctx)
+{
+ struct sdma_mmu_node *node = ctx;
+
+ kref_put(&node->rb.refcount, hfi1_mmu_rb_release);
+}
+
+static int add_mapping_to_sdma_packet(struct user_sdma_request *req,
+ struct user_sdma_txreq *tx,
+ struct sdma_mmu_node *cache_entry,
+ size_t start,
+ size_t from_this_cache_entry)
+{
+ struct hfi1_user_sdma_pkt_q *pq = req->pq;
+ unsigned int page_offset;
+ unsigned int from_this_page;
+ size_t page_index;
+ void *ctx;
+ int ret;
+
+ /*
+ * Because the cache may be more fragmented than the memory that is being accessed,
+ * it's not strictly necessary to have a descriptor per cache entry.
+ */
+
+ while (from_this_cache_entry) {
+ page_index = PFN_DOWN(start - cache_entry->rb.addr);
+
+ if (page_index >= cache_entry->npages) {
+ SDMA_DBG(req,
+ "Request for page_index %zu >= cache_entry->npages %u",
+ page_index, cache_entry->npages);
+ return -EINVAL;
+ }
+
+ page_offset = start - ALIGN_DOWN(start, PAGE_SIZE);
+ from_this_page = PAGE_SIZE - page_offset;
+
+ if (from_this_page < from_this_cache_entry) {
+ ctx = NULL;
+ } else {
+ /*
+ * In the case they are equal the next line has no practical effect,
+ * but it's better to do a register to register copy than a conditional
+ * branch.
+ */
+ from_this_page = from_this_cache_entry;
+ ctx = cache_entry;
+ }
+
+ ret = sdma_txadd_page(pq->dd, &tx->txreq,
+ cache_entry->pages[page_index],
+ page_offset, from_this_page,
+ ctx,
+ sdma_mmu_rb_node_get,
+ sdma_mmu_rb_node_put);
+ if (ret) {
+ /*
+ * When there's a failure, the entire request is freed by
+ * user_sdma_send_pkts().
+ */
+ SDMA_DBG(req,
+ "sdma_txadd_page failed %d page_index %lu page_offset %u from_this_page %u",
+ ret, page_index, page_offset, from_this_page);
+ return ret;
+ }
+ start += from_this_page;
+ from_this_cache_entry -= from_this_page;
+ }
+ return 0;
+}
+
+static int add_system_iovec_to_sdma_packet(struct user_sdma_request *req,
+ struct user_sdma_txreq *tx,
+ struct user_sdma_iovec *iovec,
+ size_t from_this_iovec)
+{
+ while (from_this_iovec > 0) {
+ struct sdma_mmu_node *cache_entry;
+ size_t from_this_cache_entry;
+ size_t start;
+ int ret;
+
+ start = (uintptr_t)iovec->iov.iov_base + iovec->offset;
+ ret = get_system_cache_entry(req, &cache_entry, start,
+ from_this_iovec);
+ if (ret) {
+ SDMA_DBG(req, "pin system segment failed %d", ret);
+ return ret;
+ }
+
+ from_this_cache_entry = cache_entry->rb.len - (start - cache_entry->rb.addr);
+ if (from_this_cache_entry > from_this_iovec)
+ from_this_cache_entry = from_this_iovec;
+
+ ret = add_mapping_to_sdma_packet(req, tx, cache_entry, start,
+ from_this_cache_entry);
+
+ /*
+ * Done adding cache_entry to zero or more sdma_desc. Can
+ * kref_put() the "safety" kref taken under
+ * get_system_cache_entry().
+ */
+ kref_put(&cache_entry->rb.refcount, hfi1_mmu_rb_release);
+
+ if (ret) {
+ SDMA_DBG(req, "add system segment failed %d", ret);
+ return ret;
+ }
+
+ iovec->offset += from_this_cache_entry;
+ from_this_iovec -= from_this_cache_entry;
+ }
+
+ return 0;
+}
+
+/*
+ * Add up to pkt_data_remaining bytes to the txreq, starting at the current
+ * offset in the given iovec entry and continuing until all data has been added
+ * to the iovec or the iovec entry type changes.
+ *
+ * On success, prior to returning, adjust pkt_data_remaining, req->iov_idx, and
+ * the offset value in req->iov[req->iov_idx] to reflect the data that has been
+ * consumed.
+ */
+int hfi1_add_pages_to_sdma_packet(struct user_sdma_request *req,
+ struct user_sdma_txreq *tx,
+ struct user_sdma_iovec *iovec,
+ u32 *pkt_data_remaining)
+{
+ size_t remaining_to_add = *pkt_data_remaining;
+ /*
+ * Walk through iovec entries, ensure the associated pages
+ * are pinned and mapped, add data to the packet until no more
+ * data remains to be added or the iovec entry type changes.
+ */
+ while (remaining_to_add > 0) {
+ struct user_sdma_iovec *cur_iovec;
+ size_t from_this_iovec;
+ int ret;
+
+ cur_iovec = iovec;
+ from_this_iovec = iovec->iov.iov_len - iovec->offset;
+
+ if (from_this_iovec > remaining_to_add) {
+ from_this_iovec = remaining_to_add;
+ } else {
+ /* The current iovec entry will be consumed by this pass. */
+ req->iov_idx++;
+ iovec++;
+ }
+
+ ret = add_system_iovec_to_sdma_packet(req, tx, cur_iovec,
+ from_this_iovec);
+ if (ret)
+ return ret;
+
+ remaining_to_add -= from_this_iovec;
+ }
+ *pkt_data_remaining = remaining_to_add;
+
+ return 0;
+}
+
+static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
+ unsigned long len)
+{
+ return (bool)(node->addr == addr);
+}
+
+/*
+ * Return 1 to remove the node from the rb tree and call the remove op.
+ *
+ * Called with the rb tree lock held.
+ */
+static int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode,
+ void *evict_arg, bool *stop)
+{
+ struct sdma_mmu_node *node =
+ container_of(mnode, struct sdma_mmu_node, rb);
+ struct evict_data *evict_data = evict_arg;
+
+ /* this node will be evicted, add its pages to our count */
+ evict_data->cleared += node->npages;
+
+ /* have enough pages been cleared? */
+ if (evict_data->cleared >= evict_data->target)
+ *stop = true;
+
+ return 1; /* remove this node */
+}
+
+static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode)
+{
+ struct sdma_mmu_node *node =
+ container_of(mnode, struct sdma_mmu_node, rb);
+
+ free_system_node(node);
+}
diff --git a/drivers/infiniband/hw/hfi1/pinning.h b/drivers/infiniband/hw/hfi1/pinning.h
new file mode 100644
index 000000000000..a814a3aa9654
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/pinning.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
+/*
+ * Copyright(c) 2023 Cornelis Networks, Inc.
+ */
+#ifndef _HFI1_PINNING_H
+#define _HFI1_PINNING_H
+
+struct hfi1_user_sdma_pkt_q;
+struct user_sdma_request;
+struct user_sdma_txreq;
+struct user_sdma_iovec;
+
+int hfi1_init_system_pinning(struct hfi1_user_sdma_pkt_q *pq);
+void hfi1_free_system_pinning(struct hfi1_user_sdma_pkt_q *pq);
+int hfi1_add_pages_to_sdma_packet(struct user_sdma_request *req,
+ struct user_sdma_txreq *tx,
+ struct user_sdma_iovec *iovec,
+ u32 *pkt_data_remaining);
+
+#endif /* _HFI1_PINNING_H */
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index 62e7dc9bea7b..dfea53e0fdeb 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -1893,9 +1893,7 @@ int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_scontexts)
vl_scontexts[i] = sc_per_vl + (extra > 0 ? 1 : 0);
}
/* build new map */
- newmap = kzalloc(sizeof(*newmap) +
- roundup_pow_of_two(num_vls) *
- sizeof(struct pio_map_elem *),
+ newmap = kzalloc(struct_size(newmap, map, roundup_pow_of_two(num_vls)),
GFP_KERNEL);
if (!newmap)
goto bail;
@@ -1910,9 +1908,8 @@ int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_scontexts)
int sz = roundup_pow_of_two(vl_scontexts[i]);
/* only allocate once */
- newmap->map[i] = kzalloc(sizeof(*newmap->map[i]) +
- sz * sizeof(struct
- send_context *),
+ newmap->map[i] = kzalloc(struct_size(newmap->map[i],
+ ksc, sz),
GFP_KERNEL);
if (!newmap->map[i])
goto bail;
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index 02bd62b857b7..29ae7beb9b03 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
- * Copyright(c) 2020 - Cornelis Networks, Inc.
+ * Copyright(c) 2020 - 2023 Cornelis Networks, Inc.
* Copyright(c) 2015 - 2018 Intel Corporation.
*/
@@ -60,22 +60,6 @@ static int defer_packet_queue(
uint seq,
bool pkts_sent);
static void activate_packet_queue(struct iowait *wait, int reason);
-static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
- unsigned long len);
-static int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode,
- void *arg2, bool *stop);
-static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode);
-
-static struct mmu_rb_ops sdma_rb_ops = {
- .filter = sdma_rb_filter,
- .evict = sdma_rb_evict,
- .remove = sdma_rb_remove,
-};
-
-static int add_system_pages_to_sdma_packet(struct user_sdma_request *req,
- struct user_sdma_txreq *tx,
- struct user_sdma_iovec *iovec,
- u32 *pkt_remaining);
static int defer_packet_queue(
struct sdma_engine *sde,
@@ -185,12 +169,9 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
cq->nentries = hfi1_sdma_comp_ring_size;
- ret = hfi1_mmu_rb_register(pq, &sdma_rb_ops, dd->pport->hfi1_wq,
- &pq->handler);
- if (ret) {
- dd_dev_err(dd, "Failed to register with MMU %d", ret);
+ ret = hfi1_init_system_pinning(pq);
+ if (ret)
goto pq_mmu_fail;
- }
rcu_assign_pointer(fd->pq, pq);
fd->cq = cq;
@@ -249,8 +230,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
pq->wait,
!atomic_read(&pq->n_reqs));
kfree(pq->reqs);
- if (pq->handler)
- hfi1_mmu_rb_unregister(pq->handler);
+ hfi1_free_system_pinning(pq);
bitmap_free(pq->req_in_use);
kmem_cache_destroy(pq->txreq_cache);
flush_pq_iowait(pq);
@@ -821,8 +801,8 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts)
req->tidoffset += datalen;
req->sent += datalen;
while (datalen) {
- ret = add_system_pages_to_sdma_packet(req, tx, iovec,
- &datalen);
+ ret = hfi1_add_pages_to_sdma_packet(req, tx, iovec,
+ &datalen);
if (ret)
goto free_txreq;
iovec = &req->iovs[req->iov_idx];
@@ -860,17 +840,6 @@ free_tx:
return ret;
}
-static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages)
-{
- struct evict_data evict_data;
- struct mmu_rb_handler *handler = pq->handler;
-
- evict_data.cleared = 0;
- evict_data.target = npages;
- hfi1_mmu_rb_evict(handler, &evict_data);
- return evict_data.cleared;
-}
-
static int check_header_template(struct user_sdma_request *req,
struct hfi1_pkt_header *hdr, u32 lrhlen,
u32 datalen)
@@ -1253,401 +1222,3 @@ static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq,
trace_hfi1_sdma_user_completion(pq->dd, pq->ctxt, pq->subctxt,
idx, state, ret);
}
-
-static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
- unsigned int start, unsigned int npages)
-{
- hfi1_release_user_pages(mm, pages + start, npages, false);
- kfree(pages);
-}
-
-static void free_system_node(struct sdma_mmu_node *node)
-{
- if (node->npages) {
- unpin_vector_pages(mm_from_sdma_node(node), node->pages, 0,
- node->npages);
- atomic_sub(node->npages, &node->pq->n_locked);
- }
- kfree(node);
-}
-
-/*
- * kref_get()'s an additional kref on the returned rb_node to prevent rb_node
- * from being released until after rb_node is assigned to an SDMA descriptor
- * (struct sdma_desc) under add_system_iovec_to_sdma_packet(), even if the
- * virtual address range for rb_node is invalidated between now and then.
- */
-static struct sdma_mmu_node *find_system_node(struct mmu_rb_handler *handler,
- unsigned long start,
- unsigned long end)
-{
- struct mmu_rb_node *rb_node;
- unsigned long flags;
-
- spin_lock_irqsave(&handler->lock, flags);
- rb_node = hfi1_mmu_rb_get_first(handler, start, (end - start));
- if (!rb_node) {
- spin_unlock_irqrestore(&handler->lock, flags);
- return NULL;
- }
-
- /* "safety" kref to prevent release before add_system_iovec_to_sdma_packet() */
- kref_get(&rb_node->refcount);
- spin_unlock_irqrestore(&handler->lock, flags);
-
- return container_of(rb_node, struct sdma_mmu_node, rb);
-}
-
-static int pin_system_pages(struct user_sdma_request *req,
- uintptr_t start_address, size_t length,
- struct sdma_mmu_node *node, int npages)
-{
- struct hfi1_user_sdma_pkt_q *pq = req->pq;
- int pinned, cleared;
- struct page **pages;
-
- pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
- if (!pages)
- return -ENOMEM;
-
-retry:
- if (!hfi1_can_pin_pages(pq->dd, current->mm, atomic_read(&pq->n_locked),
- npages)) {
- SDMA_DBG(req, "Evicting: nlocked %u npages %u",
- atomic_read(&pq->n_locked), npages);
- cleared = sdma_cache_evict(pq, npages);
- if (cleared >= npages)
- goto retry;
- }
-
- SDMA_DBG(req, "Acquire user pages start_address %lx node->npages %u npages %u",
- start_address, node->npages, npages);
- pinned = hfi1_acquire_user_pages(current->mm, start_address, npages, 0,
- pages);
-
- if (pinned < 0) {
- kfree(pages);
- SDMA_DBG(req, "pinned %d", pinned);
- return pinned;
- }
- if (pinned != npages) {
- unpin_vector_pages(current->mm, pages, node->npages, pinned);
- SDMA_DBG(req, "npages %u pinned %d", npages, pinned);
- return -EFAULT;
- }
- node->rb.addr = start_address;
- node->rb.len = length;
- node->pages = pages;
- node->npages = npages;
- atomic_add(pinned, &pq->n_locked);
- SDMA_DBG(req, "done. pinned %d", pinned);
- return 0;
-}
-
-/*
- * kref refcount on *node_p will be 2 on successful addition: one kref from
- * kref_init() for mmu_rb_handler and one kref to prevent *node_p from being
- * released until after *node_p is assigned to an SDMA descriptor (struct
- * sdma_desc) under add_system_iovec_to_sdma_packet(), even if the virtual
- * address range for *node_p is invalidated between now and then.
- */
-static int add_system_pinning(struct user_sdma_request *req,
- struct sdma_mmu_node **node_p,
- unsigned long start, unsigned long len)
-
-{
- struct hfi1_user_sdma_pkt_q *pq = req->pq;
- struct sdma_mmu_node *node;
- int ret;
-
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
- return -ENOMEM;
-
- /* First kref "moves" to mmu_rb_handler */
- kref_init(&node->rb.refcount);
-
- /* "safety" kref to prevent release before add_system_iovec_to_sdma_packet() */
- kref_get(&node->rb.refcount);
-
- node->pq = pq;
- ret = pin_system_pages(req, start, len, node, PFN_DOWN(len));
- if (ret == 0) {
- ret = hfi1_mmu_rb_insert(pq->handler, &node->rb);
- if (ret)
- free_system_node(node);
- else
- *node_p = node;
-
- return ret;
- }
-
- kfree(node);
- return ret;
-}
-
-static int get_system_cache_entry(struct user_sdma_request *req,
- struct sdma_mmu_node **node_p,
- size_t req_start, size_t req_len)
-{
- struct hfi1_user_sdma_pkt_q *pq = req->pq;
- u64 start = ALIGN_DOWN(req_start, PAGE_SIZE);
- u64 end = PFN_ALIGN(req_start + req_len);
- struct mmu_rb_handler *handler = pq->handler;
- int ret;
-
- if ((end - start) == 0) {
- SDMA_DBG(req,
- "Request for empty cache entry req_start %lx req_len %lx start %llx end %llx",
- req_start, req_len, start, end);
- return -EINVAL;
- }
-
- SDMA_DBG(req, "req_start %lx req_len %lu", req_start, req_len);
-
- while (1) {
- struct sdma_mmu_node *node =
- find_system_node(handler, start, end);
- u64 prepend_len = 0;
-
- SDMA_DBG(req, "node %p start %llx end %llu", node, start, end);
- if (!node) {
- ret = add_system_pinning(req, node_p, start,
- end - start);
- if (ret == -EEXIST) {
- /*
- * Another execution context has inserted a
- * conficting entry first.
- */
- continue;
- }
- return ret;
- }
-
- if (node->rb.addr <= start) {
- /*
- * This entry covers at least part of the region. If it doesn't extend
- * to the end, then this will be called again for the next segment.
- */
- *node_p = node;
- return 0;
- }
-
- SDMA_DBG(req, "prepend: node->rb.addr %lx, node->rb.refcount %d",
- node->rb.addr, kref_read(&node->rb.refcount));
- prepend_len = node->rb.addr - start;
-
- /*
- * This node will not be returned, instead a new node
- * will be. So release the reference.
- */
- kref_put(&node->rb.refcount, hfi1_mmu_rb_release);
-
- /* Prepend a node to cover the beginning of the allocation */
- ret = add_system_pinning(req, node_p, start, prepend_len);
- if (ret == -EEXIST) {
- /* Another execution context has inserted a conficting entry first. */
- continue;
- }
- return ret;
- }
-}
-
-static void sdma_mmu_rb_node_get(void *ctx)
-{
- struct mmu_rb_node *node = ctx;
-
- kref_get(&node->refcount);
-}
-
-static void sdma_mmu_rb_node_put(void *ctx)
-{
- struct sdma_mmu_node *node = ctx;
-
- kref_put(&node->rb.refcount, hfi1_mmu_rb_release);
-}
-
-static int add_mapping_to_sdma_packet(struct user_sdma_request *req,
- struct user_sdma_txreq *tx,
- struct sdma_mmu_node *cache_entry,
- size_t start,
- size_t from_this_cache_entry)
-{
- struct hfi1_user_sdma_pkt_q *pq = req->pq;
- unsigned int page_offset;
- unsigned int from_this_page;
- size_t page_index;
- void *ctx;
- int ret;
-
- /*
- * Because the cache may be more fragmented than the memory that is being accessed,
- * it's not strictly necessary to have a descriptor per cache entry.
- */
-
- while (from_this_cache_entry) {
- page_index = PFN_DOWN(start - cache_entry->rb.addr);
-
- if (page_index >= cache_entry->npages) {
- SDMA_DBG(req,
- "Request for page_index %zu >= cache_entry->npages %u",
- page_index, cache_entry->npages);
- return -EINVAL;
- }
-
- page_offset = start - ALIGN_DOWN(start, PAGE_SIZE);
- from_this_page = PAGE_SIZE - page_offset;
-
- if (from_this_page < from_this_cache_entry) {
- ctx = NULL;
- } else {
- /*
- * In the case they are equal the next line has no practical effect,
- * but it's better to do a register to register copy than a conditional
- * branch.
- */
- from_this_page = from_this_cache_entry;
- ctx = cache_entry;
- }
-
- ret = sdma_txadd_page(pq->dd, &tx->txreq,
- cache_entry->pages[page_index],
- page_offset, from_this_page,
- ctx,
- sdma_mmu_rb_node_get,
- sdma_mmu_rb_node_put);
- if (ret) {
- /*
- * When there's a failure, the entire request is freed by
- * user_sdma_send_pkts().
- */
- SDMA_DBG(req,
- "sdma_txadd_page failed %d page_index %lu page_offset %u from_this_page %u",
- ret, page_index, page_offset, from_this_page);
- return ret;
- }
- start += from_this_page;
- from_this_cache_entry -= from_this_page;
- }
- return 0;
-}
-
-static int add_system_iovec_to_sdma_packet(struct user_sdma_request *req,
- struct user_sdma_txreq *tx,
- struct user_sdma_iovec *iovec,
- size_t from_this_iovec)
-{
- while (from_this_iovec > 0) {
- struct sdma_mmu_node *cache_entry;
- size_t from_this_cache_entry;
- size_t start;
- int ret;
-
- start = (uintptr_t)iovec->iov.iov_base + iovec->offset;
- ret = get_system_cache_entry(req, &cache_entry, start,
- from_this_iovec);
- if (ret) {
- SDMA_DBG(req, "pin system segment failed %d", ret);
- return ret;
- }
-
- from_this_cache_entry = cache_entry->rb.len - (start - cache_entry->rb.addr);
- if (from_this_cache_entry > from_this_iovec)
- from_this_cache_entry = from_this_iovec;
-
- ret = add_mapping_to_sdma_packet(req, tx, cache_entry, start,
- from_this_cache_entry);
-
- /*
- * Done adding cache_entry to zero or more sdma_desc. Can
- * kref_put() the "safety" kref taken under
- * get_system_cache_entry().
- */
- kref_put(&cache_entry->rb.refcount, hfi1_mmu_rb_release);
-
- if (ret) {
- SDMA_DBG(req, "add system segment failed %d", ret);
- return ret;
- }
-
- iovec->offset += from_this_cache_entry;
- from_this_iovec -= from_this_cache_entry;
- }
-
- return 0;
-}
-
-static int add_system_pages_to_sdma_packet(struct user_sdma_request *req,
- struct user_sdma_txreq *tx,
- struct user_sdma_iovec *iovec,
- u32 *pkt_data_remaining)
-{
- size_t remaining_to_add = *pkt_data_remaining;
- /*
- * Walk through iovec entries, ensure the associated pages
- * are pinned and mapped, add data to the packet until no more
- * data remains to be added.
- */
- while (remaining_to_add > 0) {
- struct user_sdma_iovec *cur_iovec;
- size_t from_this_iovec;
- int ret;
-
- cur_iovec = iovec;
- from_this_iovec = iovec->iov.iov_len - iovec->offset;
-
- if (from_this_iovec > remaining_to_add) {
- from_this_iovec = remaining_to_add;
- } else {
- /* The current iovec entry will be consumed by this pass. */
- req->iov_idx++;
- iovec++;
- }
-
- ret = add_system_iovec_to_sdma_packet(req, tx, cur_iovec,
- from_this_iovec);
- if (ret)
- return ret;
-
- remaining_to_add -= from_this_iovec;
- }
- *pkt_data_remaining = remaining_to_add;
-
- return 0;
-}
-
-static bool sdma_rb_filter(struct mmu_rb_node *node, unsigned long addr,
- unsigned long len)
-{
- return (bool)(node->addr == addr);
-}
-
-/*
- * Return 1 to remove the node from the rb tree and call the remove op.
- *
- * Called with the rb tree lock held.
- */
-static int sdma_rb_evict(void *arg, struct mmu_rb_node *mnode,
- void *evict_arg, bool *stop)
-{
- struct sdma_mmu_node *node =
- container_of(mnode, struct sdma_mmu_node, rb);
- struct evict_data *evict_data = evict_arg;
-
- /* this node will be evicted, add its pages to our count */
- evict_data->cleared += node->npages;
-
- /* have enough pages been cleared? */
- if (evict_data->cleared >= evict_data->target)
- *stop = true;
-
- return 1; /* remove this node */
-}
-
-static void sdma_rb_remove(void *arg, struct mmu_rb_node *mnode)
-{
- struct sdma_mmu_node *node =
- container_of(mnode, struct sdma_mmu_node, rb);
-
- free_system_node(node);
-}
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h
index 548347d4c5bc..742ec1470cc5 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.h
+++ b/drivers/infiniband/hw/hfi1/user_sdma.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
- * Copyright(c) 2020 - Cornelis Networks, Inc.
+ * Copyright(c) 2023 - Cornelis Networks, Inc.
* Copyright(c) 2015 - 2018 Intel Corporation.
*/
#ifndef _HFI1_USER_SDMA_H
@@ -13,6 +13,8 @@
#include "iowait.h"
#include "user_exp_rcv.h"
#include "mmu_rb.h"
+#include "pinning.h"
+#include "sdma.h"
/* The maximum number of Data io vectors per message/request */
#define MAX_VECTORS_PER_REQ 8
@@ -101,13 +103,6 @@ struct hfi1_user_sdma_comp_q {
struct hfi1_sdma_comp_entry *comps;
};
-struct sdma_mmu_node {
- struct mmu_rb_node rb;
- struct hfi1_user_sdma_pkt_q *pq;
- struct page **pages;
- unsigned int npages;
-};
-
struct user_sdma_iovec {
struct list_head list;
struct iovec iov;
@@ -203,10 +198,4 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
struct iovec *iovec, unsigned long dim,
unsigned long *count);
-
-static inline struct mm_struct *mm_from_sdma_node(struct sdma_mmu_node *node)
-{
- return node->rb.handler->mn.mm;
-}
-
#endif /* _HFI1_USER_SDMA_H */
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 84239b907de2..7f0d0288beb1 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -97,6 +97,7 @@
#define HNS_ROCE_CQ_BANK_NUM 4
#define CQ_BANKID_SHIFT 2
+#define CQ_BANKID_MASK GENMASK(1, 0)
enum {
SERV_TYPE_RC,
@@ -714,7 +715,6 @@ struct hns_roce_caps {
u32 max_rq_sg;
u32 rsv0;
u32 num_qps;
- u32 num_pi_qps;
u32 reserved_qps;
u32 num_srqs;
u32 max_wqes;
@@ -840,6 +840,32 @@ enum hns_roce_device_state {
HNS_ROCE_DEVICE_STATE_UNINIT,
};
+enum hns_roce_hw_pkt_stat_index {
+ HNS_ROCE_HW_RX_RC_PKT_CNT,
+ HNS_ROCE_HW_RX_UC_PKT_CNT,
+ HNS_ROCE_HW_RX_UD_PKT_CNT,
+ HNS_ROCE_HW_RX_XRC_PKT_CNT,
+ HNS_ROCE_HW_RX_PKT_CNT,
+ HNS_ROCE_HW_RX_ERR_PKT_CNT,
+ HNS_ROCE_HW_RX_CNP_PKT_CNT,
+ HNS_ROCE_HW_TX_RC_PKT_CNT,
+ HNS_ROCE_HW_TX_UC_PKT_CNT,
+ HNS_ROCE_HW_TX_UD_PKT_CNT,
+ HNS_ROCE_HW_TX_XRC_PKT_CNT,
+ HNS_ROCE_HW_TX_PKT_CNT,
+ HNS_ROCE_HW_TX_ERR_PKT_CNT,
+ HNS_ROCE_HW_TX_CNP_PKT_CNT,
+ HNS_ROCE_HW_TRP_GET_MPT_ERR_PKT_CNT,
+ HNS_ROCE_HW_TRP_GET_IRRL_ERR_PKT_CNT,
+ HNS_ROCE_HW_ECN_DB_CNT,
+ HNS_ROCE_HW_RX_BUF_CNT,
+ HNS_ROCE_HW_TRP_RX_SOF_CNT,
+ HNS_ROCE_HW_CQ_CQE_CNT,
+ HNS_ROCE_HW_CQ_POE_CNT,
+ HNS_ROCE_HW_CQ_NOTIFY_CNT,
+ HNS_ROCE_HW_CNT_TOTAL
+};
+
struct hns_roce_hw {
int (*cmq_init)(struct hns_roce_dev *hr_dev);
void (*cmq_exit)(struct hns_roce_dev *hr_dev);
@@ -882,6 +908,8 @@ struct hns_roce_hw {
int (*query_cqc)(struct hns_roce_dev *hr_dev, u32 cqn, void *buffer);
int (*query_qpc)(struct hns_roce_dev *hr_dev, u32 qpn, void *buffer);
int (*query_mpt)(struct hns_roce_dev *hr_dev, u32 key, void *buffer);
+ int (*query_hw_counter)(struct hns_roce_dev *hr_dev,
+ u64 *stats, u32 port, int *hw_counters);
const struct ib_device_ops *hns_roce_dev_ops;
const struct ib_device_ops *hns_roce_dev_srq_ops;
};
@@ -1112,7 +1140,6 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev);
void hns_roce_init_srq_table(struct hns_roce_dev *hr_dev);
void hns_roce_init_xrcd_table(struct hns_roce_dev *hr_dev);
-void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev);
void hns_roce_cleanup_qp_table(struct hns_roce_dev *hr_dev);
@@ -1161,9 +1188,6 @@ int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
int hns_roce_create_srq(struct ib_srq *srq,
struct ib_srq_init_attr *srq_init_attr,
struct ib_udata *udata);
-int hns_roce_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr,
- enum ib_srq_attr_mask srq_attr_mask,
- struct ib_udata *udata);
int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
int hns_roce_alloc_xrcd(struct ib_xrcd *ib_xrcd, struct ib_udata *udata);
@@ -1206,7 +1230,6 @@ void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
void flush_cqe(struct hns_roce_dev *dev, struct hns_roce_qp *qp);
void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type);
void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type);
-u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u32 port, int gid_index);
void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev);
int hns_roce_init(struct hns_roce_dev *hr_dev);
void hns_roce_exit(struct hns_roce_dev *hr_dev);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index 47c0efed1821..c4ac06a33869 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -78,7 +78,7 @@ bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type)
return false;
}
- return hop_num ? true : false;
+ return hop_num;
}
static bool hns_roce_check_hem_null(struct hns_roce_hem **hem, u64 hem_idx,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 8f7eb11066b4..d82daff2d9bd 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -750,7 +750,8 @@ out:
qp->sq.head += nreq;
qp->next_sge = sge_idx;
- if (nreq == 1 && (qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE))
+ if (nreq == 1 && !ret &&
+ (qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE))
write_dwqe(hr_dev, qp, wqe);
else
update_sq_db(hr_dev, qp);
@@ -1612,6 +1613,56 @@ static int hns_roce_query_func_info(struct hns_roce_dev *hr_dev)
return 0;
}
+static int hns_roce_hw_v2_query_counter(struct hns_roce_dev *hr_dev,
+ u64 *stats, u32 port, int *num_counters)
+{
+#define CNT_PER_DESC 3
+ struct hns_roce_cmq_desc *desc;
+ int bd_idx, cnt_idx;
+ __le64 *cnt_data;
+ int desc_num;
+ int ret;
+ int i;
+
+ if (port > hr_dev->caps.num_ports)
+ return -EINVAL;
+
+ desc_num = DIV_ROUND_UP(HNS_ROCE_HW_CNT_TOTAL, CNT_PER_DESC);
+ desc = kcalloc(desc_num, sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ for (i = 0; i < desc_num; i++) {
+ hns_roce_cmq_setup_basic_desc(&desc[i],
+ HNS_ROCE_OPC_QUERY_COUNTER, true);
+ if (i != desc_num - 1)
+ desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
+ }
+
+ ret = hns_roce_cmq_send(hr_dev, desc, desc_num);
+ if (ret) {
+ ibdev_err(&hr_dev->ib_dev,
+ "failed to get counter, ret = %d.\n", ret);
+ goto err_out;
+ }
+
+ for (i = 0; i < HNS_ROCE_HW_CNT_TOTAL && i < *num_counters; i++) {
+ bd_idx = i / CNT_PER_DESC;
+ if (!(desc[bd_idx].flag & HNS_ROCE_CMD_FLAG_NEXT) &&
+ bd_idx != HNS_ROCE_HW_CNT_TOTAL / CNT_PER_DESC)
+ break;
+
+ cnt_data = (__le64 *)&desc[bd_idx].data[0];
+ cnt_idx = i % CNT_PER_DESC;
+ stats[i] = le64_to_cpu(cnt_data[cnt_idx]);
+ }
+ *num_counters = i;
+
+err_out:
+ kfree(desc);
+ return ret;
+}
+
static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
{
struct hns_roce_cmq_desc desc;
@@ -1680,29 +1731,6 @@ static int load_func_res_caps(struct hns_roce_dev *hr_dev, bool is_vf)
return 0;
}
-static int load_ext_cfg_caps(struct hns_roce_dev *hr_dev, bool is_vf)
-{
- struct hns_roce_cmq_desc desc;
- struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
- struct hns_roce_caps *caps = &hr_dev->caps;
- u32 func_num, qp_num;
- int ret;
-
- hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_EXT_CFG, true);
- ret = hns_roce_cmq_send(hr_dev, &desc, 1);
- if (ret)
- return ret;
-
- func_num = is_vf ? 1 : max_t(u32, 1, hr_dev->func_num);
- qp_num = hr_reg_read(req, EXT_CFG_QP_PI_NUM) / func_num;
- caps->num_pi_qps = round_down(qp_num, HNS_ROCE_QP_BANK_NUM);
-
- qp_num = hr_reg_read(req, EXT_CFG_QP_NUM) / func_num;
- caps->num_qps = round_down(qp_num, HNS_ROCE_QP_BANK_NUM);
-
- return 0;
-}
-
static int load_pf_timer_res_caps(struct hns_roce_dev *hr_dev)
{
struct hns_roce_cmq_desc desc;
@@ -1723,50 +1751,37 @@ static int load_pf_timer_res_caps(struct hns_roce_dev *hr_dev)
return 0;
}
-static int query_func_resource_caps(struct hns_roce_dev *hr_dev, bool is_vf)
+static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
{
struct device *dev = hr_dev->dev;
int ret;
- ret = load_func_res_caps(hr_dev, is_vf);
+ ret = load_func_res_caps(hr_dev, false);
if (ret) {
- dev_err(dev, "failed to load res caps, ret = %d (%s).\n", ret,
- is_vf ? "vf" : "pf");
+ dev_err(dev, "failed to load pf res caps, ret = %d.\n", ret);
return ret;
}
- if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
- ret = load_ext_cfg_caps(hr_dev, is_vf);
- if (ret)
- dev_err(dev, "failed to load ext cfg, ret = %d (%s).\n",
- ret, is_vf ? "vf" : "pf");
- }
+ ret = load_pf_timer_res_caps(hr_dev);
+ if (ret)
+ dev_err(dev, "failed to load pf timer resource, ret = %d.\n",
+ ret);
return ret;
}
-static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
+static int hns_roce_query_vf_resource(struct hns_roce_dev *hr_dev)
{
struct device *dev = hr_dev->dev;
int ret;
- ret = query_func_resource_caps(hr_dev, false);
+ ret = load_func_res_caps(hr_dev, true);
if (ret)
- return ret;
-
- ret = load_pf_timer_res_caps(hr_dev);
- if (ret)
- dev_err(dev, "failed to load pf timer resource, ret = %d.\n",
- ret);
+ dev_err(dev, "failed to load vf res caps, ret = %d.\n", ret);
return ret;
}
-static int hns_roce_query_vf_resource(struct hns_roce_dev *hr_dev)
-{
- return query_func_resource_caps(hr_dev, true);
-}
-
static int __hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev,
u32 vf_id)
{
@@ -1849,24 +1864,6 @@ static int config_vf_hem_resource(struct hns_roce_dev *hr_dev, int vf_id)
return hns_roce_cmq_send(hr_dev, desc, 2);
}
-static int config_vf_ext_resource(struct hns_roce_dev *hr_dev, u32 vf_id)
-{
- struct hns_roce_cmq_desc desc;
- struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data;
- struct hns_roce_caps *caps = &hr_dev->caps;
-
- hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_EXT_CFG, false);
-
- hr_reg_write(req, EXT_CFG_VF_ID, vf_id);
-
- hr_reg_write(req, EXT_CFG_QP_PI_NUM, caps->num_pi_qps);
- hr_reg_write(req, EXT_CFG_QP_PI_IDX, vf_id * caps->num_pi_qps);
- hr_reg_write(req, EXT_CFG_QP_NUM, caps->num_qps);
- hr_reg_write(req, EXT_CFG_QP_IDX, vf_id * caps->num_qps);
-
- return hns_roce_cmq_send(hr_dev, &desc, 1);
-}
-
static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
{
u32 func_num = max_t(u32, 1, hr_dev->func_num);
@@ -1881,16 +1878,6 @@ static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
vf_id, ret);
return ret;
}
-
- if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
- ret = config_vf_ext_resource(hr_dev, vf_id);
- if (ret) {
- dev_err(hr_dev->dev,
- "failed to config vf-%u ext res, ret = %d.\n",
- vf_id, ret);
- return ret;
- }
- }
}
return 0;
@@ -2075,9 +2062,6 @@ static void apply_func_caps(struct hns_roce_dev *hr_dev)
caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
- caps->num_xrcds = HNS_ROCE_V2_MAX_XRCD_NUM;
- caps->reserved_xrcds = HNS_ROCE_V2_RSV_XRCD_NUM;
-
caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
@@ -2200,6 +2184,7 @@ static int hns_roce_query_caps(struct hns_roce_dev *hr_dev)
caps->num_cqs = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_CQS);
caps->gid_table_len[0] = hr_reg_read(resp_c, PF_CAPS_C_MAX_GID);
caps->max_cqes = 1 << hr_reg_read(resp_c, PF_CAPS_C_CQ_DEPTH);
+ caps->num_xrcds = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_XRCDS);
caps->num_mtpts = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_MRWS);
caps->num_qps = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_QPS);
caps->max_qp_init_rdma = hr_reg_read(resp_c, PF_CAPS_C_MAX_ORD);
@@ -2220,6 +2205,7 @@ static int hns_roce_query_caps(struct hns_roce_dev *hr_dev)
caps->reserved_mrws = hr_reg_read(resp_e, PF_CAPS_E_RSV_MRWS);
caps->chunk_sz = 1 << hr_reg_read(resp_e, PF_CAPS_E_CHUNK_SIZE_SHIFT);
caps->reserved_cqs = hr_reg_read(resp_e, PF_CAPS_E_RSV_CQS);
+ caps->reserved_xrcds = hr_reg_read(resp_e, PF_CAPS_E_RSV_XRCDS);
caps->reserved_srqs = hr_reg_read(resp_e, PF_CAPS_E_RSV_SRQS);
caps->reserved_lkey = hr_reg_read(resp_e, PF_CAPS_E_RSV_LKEYS);
@@ -6646,6 +6632,7 @@ static const struct hns_roce_hw hns_roce_hw_v2 = {
.query_cqc = hns_roce_v2_query_cqc,
.query_qpc = hns_roce_v2_query_qpc,
.query_mpt = hns_roce_v2_query_mpt,
+ .query_hw_counter = hns_roce_hw_v2_query_counter,
.hns_roce_dev_ops = &hns_roce_v2_dev_ops,
.hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops,
};
@@ -6722,14 +6709,14 @@ static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
ret = hns_roce_init(hr_dev);
if (ret) {
dev_err(hr_dev->dev, "RoCE Engine init failed!\n");
- goto error_failed_cfg;
+ goto error_failed_roce_init;
}
if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
ret = free_mr_init(hr_dev);
if (ret) {
dev_err(hr_dev->dev, "failed to init free mr!\n");
- goto error_failed_roce_init;
+ goto error_failed_free_mr_init;
}
}
@@ -6737,10 +6724,10 @@ static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
return 0;
-error_failed_roce_init:
+error_failed_free_mr_init:
hns_roce_exit(hr_dev);
-error_failed_cfg:
+error_failed_roce_init:
kfree(hr_dev->priv);
error_failed_kzalloc:
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 7033eae2407c..cd97cbee682a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -42,7 +42,6 @@
#define HNS_ROCE_V2_MAX_SRQWQE_SEGS 0x1000000
#define HNS_ROCE_V2_MAX_IDX_SEGS 0x1000000
#define HNS_ROCE_V2_MAX_XRCD_NUM 0x1000000
-#define HNS_ROCE_V2_RSV_XRCD_NUM 0
#define HNS_ROCE_V2_QP_ACK_TIMEOUT_OFS_HIP08 10
@@ -199,6 +198,7 @@ enum hns_roce_opcode_type {
HNS_ROCE_OPC_QUERY_HW_VER = 0x8000,
HNS_ROCE_OPC_CFG_GLOBAL_PARAM = 0x8001,
HNS_ROCE_OPC_ALLOC_PF_RES = 0x8004,
+ HNS_ROCE_OPC_QUERY_COUNTER = 0x8206,
HNS_ROCE_OPC_QUERY_PF_RES = 0x8400,
HNS_ROCE_OPC_ALLOC_VF_RES = 0x8401,
HNS_ROCE_OPC_CFG_EXT_LLM = 0x8403,
@@ -220,7 +220,6 @@ enum hns_roce_opcode_type {
HNS_ROCE_OPC_QUERY_VF_RES = 0x850e,
HNS_ROCE_OPC_CFG_GMV_TBL = 0x850f,
HNS_ROCE_OPC_CFG_GMV_BT = 0x8510,
- HNS_ROCE_OPC_EXT_CFG = 0x8512,
HNS_ROCE_QUERY_RAM_ECC = 0x8513,
HNS_SWITCH_PARAMETER_CFG = 0x1033,
};
@@ -957,15 +956,6 @@ struct hns_roce_func_clear {
#define HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_INTERVAL 40
#define HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT 20
-/* Fields of HNS_ROCE_OPC_EXT_CFG */
-#define EXT_CFG_VF_ID CMQ_REQ_FIELD_LOC(31, 0)
-#define EXT_CFG_QP_PI_IDX CMQ_REQ_FIELD_LOC(45, 32)
-#define EXT_CFG_QP_PI_NUM CMQ_REQ_FIELD_LOC(63, 48)
-#define EXT_CFG_QP_NUM CMQ_REQ_FIELD_LOC(87, 64)
-#define EXT_CFG_QP_IDX CMQ_REQ_FIELD_LOC(119, 96)
-#define EXT_CFG_LLM_IDX CMQ_REQ_FIELD_LOC(139, 128)
-#define EXT_CFG_LLM_NUM CMQ_REQ_FIELD_LOC(156, 144)
-
#define CFG_LLM_A_BA_L CMQ_REQ_FIELD_LOC(31, 0)
#define CFG_LLM_A_BA_H CMQ_REQ_FIELD_LOC(63, 32)
#define CFG_LLM_A_DEPTH CMQ_REQ_FIELD_LOC(76, 64)
@@ -1202,6 +1192,7 @@ struct hns_roce_query_pf_caps_c {
#define PF_CAPS_C_NUM_CQS PF_CAPS_C_FIELD_LOC(51, 32)
#define PF_CAPS_C_MAX_GID PF_CAPS_C_FIELD_LOC(60, 52)
#define PF_CAPS_C_CQ_DEPTH PF_CAPS_C_FIELD_LOC(86, 64)
+#define PF_CAPS_C_NUM_XRCDS PF_CAPS_C_FIELD_LOC(91, 87)
#define PF_CAPS_C_NUM_MRWS PF_CAPS_C_FIELD_LOC(115, 96)
#define PF_CAPS_C_NUM_QPS PF_CAPS_C_FIELD_LOC(147, 128)
#define PF_CAPS_C_MAX_ORD PF_CAPS_C_FIELD_LOC(155, 148)
@@ -1260,6 +1251,7 @@ struct hns_roce_query_pf_caps_e {
#define PF_CAPS_E_RSV_MRWS PF_CAPS_E_FIELD_LOC(19, 0)
#define PF_CAPS_E_CHUNK_SIZE_SHIFT PF_CAPS_E_FIELD_LOC(31, 20)
#define PF_CAPS_E_RSV_CQS PF_CAPS_E_FIELD_LOC(51, 32)
+#define PF_CAPS_E_RSV_XRCDS PF_CAPS_E_FIELD_LOC(63, 52)
#define PF_CAPS_E_RSV_SRQS PF_CAPS_E_FIELD_LOC(83, 64)
#define PF_CAPS_E_RSV_LKEYS PF_CAPS_E_FIELD_LOC(115, 96)
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index 485e110ca433..d9d546cdef52 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -219,6 +219,7 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u32 port_num,
unsigned long flags;
enum ib_mtu mtu;
u32 port;
+ int ret;
port = port_num - 1;
@@ -231,8 +232,10 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u32 port_num,
IB_PORT_BOOT_MGMT_SUP;
props->max_msg_sz = HNS_ROCE_MAX_MSG_LEN;
props->pkey_tbl_len = 1;
- props->active_width = IB_WIDTH_4X;
- props->active_speed = 1;
+ ret = ib_get_eth_speed(ib_dev, port_num, &props->active_speed,
+ &props->active_width);
+ if (ret)
+ ibdev_warn(ib_dev, "failed to get speed, ret = %d.\n", ret);
spin_lock_irqsave(&hr_dev->iboe.lock, flags);
@@ -512,6 +515,83 @@ static void hns_roce_get_fw_ver(struct ib_device *device, char *str)
sub_minor);
}
+#define HNS_ROCE_HW_CNT(ename, cname) \
+ [HNS_ROCE_HW_##ename##_CNT].name = cname
+
+static const struct rdma_stat_desc hns_roce_port_stats_descs[] = {
+ HNS_ROCE_HW_CNT(RX_RC_PKT, "rx_rc_pkt"),
+ HNS_ROCE_HW_CNT(RX_UC_PKT, "rx_uc_pkt"),
+ HNS_ROCE_HW_CNT(RX_UD_PKT, "rx_ud_pkt"),
+ HNS_ROCE_HW_CNT(RX_XRC_PKT, "rx_xrc_pkt"),
+ HNS_ROCE_HW_CNT(RX_PKT, "rx_pkt"),
+ HNS_ROCE_HW_CNT(RX_ERR_PKT, "rx_err_pkt"),
+ HNS_ROCE_HW_CNT(RX_CNP_PKT, "rx_cnp_pkt"),
+ HNS_ROCE_HW_CNT(TX_RC_PKT, "tx_rc_pkt"),
+ HNS_ROCE_HW_CNT(TX_UC_PKT, "tx_uc_pkt"),
+ HNS_ROCE_HW_CNT(TX_UD_PKT, "tx_ud_pkt"),
+ HNS_ROCE_HW_CNT(TX_XRC_PKT, "tx_xrc_pkt"),
+ HNS_ROCE_HW_CNT(TX_PKT, "tx_pkt"),
+ HNS_ROCE_HW_CNT(TX_ERR_PKT, "tx_err_pkt"),
+ HNS_ROCE_HW_CNT(TX_CNP_PKT, "tx_cnp_pkt"),
+ HNS_ROCE_HW_CNT(TRP_GET_MPT_ERR_PKT, "trp_get_mpt_err_pkt"),
+ HNS_ROCE_HW_CNT(TRP_GET_IRRL_ERR_PKT, "trp_get_irrl_err_pkt"),
+ HNS_ROCE_HW_CNT(ECN_DB, "ecn_doorbell"),
+ HNS_ROCE_HW_CNT(RX_BUF, "rx_buffer"),
+ HNS_ROCE_HW_CNT(TRP_RX_SOF, "trp_rx_sof"),
+ HNS_ROCE_HW_CNT(CQ_CQE, "cq_cqe"),
+ HNS_ROCE_HW_CNT(CQ_POE, "cq_poe"),
+ HNS_ROCE_HW_CNT(CQ_NOTIFY, "cq_notify"),
+};
+
+static struct rdma_hw_stats *hns_roce_alloc_hw_port_stats(
+ struct ib_device *device, u32 port_num)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(device);
+ u32 port = port_num - 1;
+
+ if (port > hr_dev->caps.num_ports) {
+ ibdev_err(device, "invalid port num.\n");
+ return NULL;
+ }
+
+ if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08 ||
+ hr_dev->is_vf)
+ return NULL;
+
+ return rdma_alloc_hw_stats_struct(hns_roce_port_stats_descs,
+ ARRAY_SIZE(hns_roce_port_stats_descs),
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+}
+
+static int hns_roce_get_hw_stats(struct ib_device *device,
+ struct rdma_hw_stats *stats,
+ u32 port, int index)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(device);
+ int num_counters = HNS_ROCE_HW_CNT_TOTAL;
+ int ret;
+
+ if (port == 0)
+ return 0;
+
+ if (port > hr_dev->caps.num_ports)
+ return -EINVAL;
+
+ if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08 ||
+ hr_dev->is_vf)
+ return -EOPNOTSUPP;
+
+ ret = hr_dev->hw->query_hw_counter(hr_dev, stats->value, port,
+ &num_counters);
+ if (ret) {
+ ibdev_err(device, "failed to query hw counter, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ return num_counters;
+}
+
static void hns_roce_unregister_device(struct hns_roce_dev *hr_dev)
{
struct hns_roce_ib_iboe *iboe = &hr_dev->iboe;
@@ -554,6 +634,8 @@ static const struct ib_device_ops hns_roce_dev_ops = {
.query_pkey = hns_roce_query_pkey,
.query_port = hns_roce_query_port,
.reg_user_mr = hns_roce_reg_user_mr,
+ .alloc_hw_port_stats = hns_roce_alloc_hw_port_stats,
+ .get_hw_stats = hns_roce_get_hw_stats,
INIT_RDMA_OBJ_SIZE(ib_ah, hns_roce_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, hns_roce_cq, ib_cq),
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index d855a917f4cf..cdc1c6de43a1 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -170,14 +170,29 @@ static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp,
}
}
-static u8 get_least_load_bankid_for_qp(struct hns_roce_bank *bank)
+static u8 get_affinity_cq_bank(u8 qp_bank)
{
- u32 least_load = bank[0].inuse;
+ return (qp_bank >> 1) & CQ_BANKID_MASK;
+}
+
+static u8 get_least_load_bankid_for_qp(struct ib_qp_init_attr *init_attr,
+ struct hns_roce_bank *bank)
+{
+#define INVALID_LOAD_QPNUM 0xFFFFFFFF
+ struct ib_cq *scq = init_attr->send_cq;
+ u32 least_load = INVALID_LOAD_QPNUM;
+ unsigned long cqn = 0;
u8 bankid = 0;
u32 bankcnt;
u8 i;
- for (i = 1; i < HNS_ROCE_QP_BANK_NUM; i++) {
+ if (scq)
+ cqn = to_hr_cq(scq)->cqn;
+
+ for (i = 0; i < HNS_ROCE_QP_BANK_NUM; i++) {
+ if (scq && (get_affinity_cq_bank(i) != (cqn & CQ_BANKID_MASK)))
+ continue;
+
bankcnt = bank[i].inuse;
if (bankcnt < least_load) {
least_load = bankcnt;
@@ -209,7 +224,8 @@ static int alloc_qpn_with_bankid(struct hns_roce_bank *bank, u8 bankid,
return 0;
}
-static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
+static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
+ struct ib_qp_init_attr *init_attr)
{
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
unsigned long num = 0;
@@ -220,7 +236,7 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
num = 1;
} else {
mutex_lock(&qp_table->bank_mutex);
- bankid = get_least_load_bankid_for_qp(qp_table->bank);
+ bankid = get_least_load_bankid_for_qp(init_attr, qp_table->bank);
ret = alloc_qpn_with_bankid(&qp_table->bank[bankid], bankid,
&num);
@@ -1082,7 +1098,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
goto err_buf;
}
- ret = alloc_qpn(hr_dev, hr_qp);
+ ret = alloc_qpn(hr_dev, hr_qp, init_attr);
if (ret) {
ibdev_err(ibdev, "failed to alloc QPN, ret = %d.\n", ret);
goto err_qpn;
diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c
index 989a2af2e938..081a01de3055 100644
--- a/drivers/infiniband/hw/hns/hns_roce_restrack.c
+++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c
@@ -9,8 +9,6 @@
#include "hns_roce_device.h"
#include "hns_roce_hw_v2.h"
-#define MAX_ENTRY_NUM 256
-
int hns_roce_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq)
{
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
@@ -47,8 +45,6 @@ int hns_roce_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq)
struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
struct hns_roce_v2_cq_context context;
- u32 data[MAX_ENTRY_NUM] = {};
- int offset = 0;
int ret;
if (!hr_dev->hw->query_cqc)
@@ -58,23 +54,7 @@ int hns_roce_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq)
if (ret)
return -EINVAL;
- data[offset++] = hr_reg_read(&context, CQC_CQ_ST);
- data[offset++] = hr_reg_read(&context, CQC_SHIFT);
- data[offset++] = hr_reg_read(&context, CQC_CQE_SIZE);
- data[offset++] = hr_reg_read(&context, CQC_CQE_CNT);
- data[offset++] = hr_reg_read(&context, CQC_CQ_PRODUCER_IDX);
- data[offset++] = hr_reg_read(&context, CQC_CQ_CONSUMER_IDX);
- data[offset++] = hr_reg_read(&context, CQC_DB_RECORD_EN);
- data[offset++] = hr_reg_read(&context, CQC_ARM_ST);
- data[offset++] = hr_reg_read(&context, CQC_CMD_SN);
- data[offset++] = hr_reg_read(&context, CQC_CEQN);
- data[offset++] = hr_reg_read(&context, CQC_CQ_MAX_CNT);
- data[offset++] = hr_reg_read(&context, CQC_CQ_PERIOD);
- data[offset++] = hr_reg_read(&context, CQC_CQE_HOP_NUM);
- data[offset++] = hr_reg_read(&context, CQC_CQE_BAR_PG_SZ);
- data[offset++] = hr_reg_read(&context, CQC_CQE_BUF_PG_SZ);
-
- ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, offset * sizeof(u32), data);
+ ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context);
return ret;
}
@@ -118,8 +98,6 @@ int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp)
struct hns_roce_dev *hr_dev = to_hr_dev(ib_qp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ib_qp);
struct hns_roce_v2_qp_context context;
- u32 data[MAX_ENTRY_NUM] = {};
- int offset = 0;
int ret;
if (!hr_dev->hw->query_qpc)
@@ -129,42 +107,7 @@ int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp)
if (ret)
return -EINVAL;
- data[offset++] = hr_reg_read(&context, QPC_QP_ST);
- data[offset++] = hr_reg_read(&context, QPC_ERR_TYPE);
- data[offset++] = hr_reg_read(&context, QPC_CHECK_FLG);
- data[offset++] = hr_reg_read(&context, QPC_SRQ_EN);
- data[offset++] = hr_reg_read(&context, QPC_SRQN);
- data[offset++] = hr_reg_read(&context, QPC_QKEY_XRCD);
- data[offset++] = hr_reg_read(&context, QPC_TX_CQN);
- data[offset++] = hr_reg_read(&context, QPC_RX_CQN);
- data[offset++] = hr_reg_read(&context, QPC_SQ_PRODUCER_IDX);
- data[offset++] = hr_reg_read(&context, QPC_SQ_CONSUMER_IDX);
- data[offset++] = hr_reg_read(&context, QPC_RQ_RECORD_EN);
- data[offset++] = hr_reg_read(&context, QPC_RQ_PRODUCER_IDX);
- data[offset++] = hr_reg_read(&context, QPC_RQ_CONSUMER_IDX);
- data[offset++] = hr_reg_read(&context, QPC_SQ_SHIFT);
- data[offset++] = hr_reg_read(&context, QPC_RQWS);
- data[offset++] = hr_reg_read(&context, QPC_RQ_SHIFT);
- data[offset++] = hr_reg_read(&context, QPC_SGE_SHIFT);
- data[offset++] = hr_reg_read(&context, QPC_SQ_HOP_NUM);
- data[offset++] = hr_reg_read(&context, QPC_RQ_HOP_NUM);
- data[offset++] = hr_reg_read(&context, QPC_SGE_HOP_NUM);
- data[offset++] = hr_reg_read(&context, QPC_WQE_SGE_BA_PG_SZ);
- data[offset++] = hr_reg_read(&context, QPC_WQE_SGE_BUF_PG_SZ);
- data[offset++] = hr_reg_read(&context, QPC_RETRY_NUM_INIT);
- data[offset++] = hr_reg_read(&context, QPC_RETRY_CNT);
- data[offset++] = hr_reg_read(&context, QPC_SQ_CUR_PSN);
- data[offset++] = hr_reg_read(&context, QPC_SQ_MAX_PSN);
- data[offset++] = hr_reg_read(&context, QPC_SQ_FLUSH_IDX);
- data[offset++] = hr_reg_read(&context, QPC_SQ_MAX_IDX);
- data[offset++] = hr_reg_read(&context, QPC_SQ_TX_ERR);
- data[offset++] = hr_reg_read(&context, QPC_SQ_RX_ERR);
- data[offset++] = hr_reg_read(&context, QPC_RQ_RX_ERR);
- data[offset++] = hr_reg_read(&context, QPC_RQ_TX_ERR);
- data[offset++] = hr_reg_read(&context, QPC_RQ_CQE_IDX);
- data[offset++] = hr_reg_read(&context, QPC_RQ_RTY_TX_ERR);
-
- ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, offset * sizeof(u32), data);
+ ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context);
return ret;
}
@@ -204,8 +147,6 @@ int hns_roce_fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ib_mr)
struct hns_roce_dev *hr_dev = to_hr_dev(ib_mr->device);
struct hns_roce_mr *hr_mr = to_hr_mr(ib_mr);
struct hns_roce_v2_mpt_entry context;
- u32 data[MAX_ENTRY_NUM] = {};
- int offset = 0;
int ret;
if (!hr_dev->hw->query_mpt)
@@ -215,17 +156,7 @@ int hns_roce_fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ib_mr)
if (ret)
return -EINVAL;
- data[offset++] = hr_reg_read(&context, MPT_ST);
- data[offset++] = hr_reg_read(&context, MPT_PD);
- data[offset++] = hr_reg_read(&context, MPT_LKEY);
- data[offset++] = hr_reg_read(&context, MPT_LEN_L);
- data[offset++] = hr_reg_read(&context, MPT_LEN_H);
- data[offset++] = hr_reg_read(&context, MPT_PBL_SIZE);
- data[offset++] = hr_reg_read(&context, MPT_PBL_HOP_NUM);
- data[offset++] = hr_reg_read(&context, MPT_PBL_BA_PG_SZ);
- data[offset++] = hr_reg_read(&context, MPT_PBL_BUF_PG_SZ);
-
- ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, offset * sizeof(u32), data);
+ ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context);
return ret;
}
diff --git a/drivers/infiniband/hw/irdma/cm.c b/drivers/infiniband/hw/irdma/cm.c
index 70009b970e08..42d1e9771066 100644
--- a/drivers/infiniband/hw/irdma/cm.c
+++ b/drivers/infiniband/hw/irdma/cm.c
@@ -1555,22 +1555,56 @@ static int irdma_del_multiple_qhash(struct irdma_device *iwdev,
return ret;
}
+static u8 irdma_iw_get_vlan_prio(u32 *loc_addr, u8 prio, bool ipv4)
+{
+ struct net_device *ndev = NULL;
+
+ rcu_read_lock();
+ if (ipv4) {
+ ndev = ip_dev_find(&init_net, htonl(loc_addr[0]));
+ } else if (IS_ENABLED(CONFIG_IPV6)) {
+ struct net_device *ip_dev;
+ struct in6_addr laddr6;
+
+ irdma_copy_ip_htonl(laddr6.in6_u.u6_addr32, loc_addr);
+
+ for_each_netdev_rcu (&init_net, ip_dev) {
+ if (ipv6_chk_addr(&init_net, &laddr6, ip_dev, 1)) {
+ ndev = ip_dev;
+ break;
+ }
+ }
+ }
+
+ if (!ndev)
+ goto done;
+ if (is_vlan_dev(ndev))
+ prio = (vlan_dev_get_egress_qos_mask(ndev, prio) & VLAN_PRIO_MASK)
+ >> VLAN_PRIO_SHIFT;
+ if (ipv4)
+ dev_put(ndev);
+
+done:
+ rcu_read_unlock();
+
+ return prio;
+}
+
/**
- * irdma_netdev_vlan_ipv6 - Gets the netdev and mac
+ * irdma_get_vlan_mac_ipv6 - Gets the vlan and mac
* @addr: local IPv6 address
* @vlan_id: vlan id for the given IPv6 address
* @mac: mac address for the given IPv6 address
*
- * Returns the net_device of the IPv6 address and also sets the
- * vlan id and mac for that address.
+ * Returns the vlan id and mac for an IPv6 address.
*/
-struct net_device *irdma_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *mac)
+void irdma_get_vlan_mac_ipv6(u32 *addr, u16 *vlan_id, u8 *mac)
{
struct net_device *ip_dev = NULL;
struct in6_addr laddr6;
if (!IS_ENABLED(CONFIG_IPV6))
- return NULL;
+ return;
irdma_copy_ip_htonl(laddr6.in6_u.u6_addr32, addr);
if (vlan_id)
@@ -1589,8 +1623,6 @@ struct net_device *irdma_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *mac)
}
}
rcu_read_unlock();
-
- return ip_dev;
}
/**
@@ -1667,6 +1699,12 @@ static int irdma_add_mqh_6(struct irdma_device *iwdev,
ifp->addr.in6_u.u6_addr32);
memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
sizeof(cm_info->loc_addr));
+ if (!iwdev->vsi.dscp_mode)
+ cm_info->user_pri =
+ irdma_iw_get_vlan_prio(child_listen_node->loc_addr,
+ cm_info->user_pri,
+ false);
+
ret = irdma_manage_qhash(iwdev, cm_info,
IRDMA_QHASH_TYPE_TCP_SYN,
IRDMA_QHASH_MANAGE_TYPE_ADD,
@@ -1751,6 +1789,11 @@ static int irdma_add_mqh_4(struct irdma_device *iwdev,
ntohl(ifa->ifa_address);
memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
sizeof(cm_info->loc_addr));
+ if (!iwdev->vsi.dscp_mode)
+ cm_info->user_pri =
+ irdma_iw_get_vlan_prio(child_listen_node->loc_addr,
+ cm_info->user_pri,
+ true);
ret = irdma_manage_qhash(iwdev, cm_info,
IRDMA_QHASH_TYPE_TCP_SYN,
IRDMA_QHASH_MANAGE_TYPE_ADD,
@@ -2219,6 +2262,10 @@ irdma_make_cm_node(struct irdma_cm_core *cm_core, struct irdma_device *iwdev,
} else {
cm_node->tos = max(listener->tos, cm_info->tos);
cm_node->user_pri = rt_tos2priority(cm_node->tos);
+ cm_node->user_pri =
+ irdma_iw_get_vlan_prio(cm_info->loc_addr,
+ cm_node->user_pri,
+ cm_info->ipv4);
}
ibdev_dbg(&iwdev->ibdev,
"DCB: listener: TOS:[%d] UP:[%d]\n", cm_node->tos,
@@ -3577,7 +3624,6 @@ void irdma_free_lsmm_rsrc(struct irdma_qp *iwqp)
iwqp->ietf_mem.size, iwqp->ietf_mem.va,
iwqp->ietf_mem.pa);
iwqp->ietf_mem.va = NULL;
- iwqp->ietf_mem.va = NULL;
}
}
@@ -3617,8 +3663,8 @@ int irdma_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
cm_node->vlan_id = irdma_get_vlan_ipv4(cm_node->loc_addr);
} else {
cm_node->ipv4 = false;
- irdma_netdev_vlan_ipv6(cm_node->loc_addr, &cm_node->vlan_id,
- NULL);
+ irdma_get_vlan_mac_ipv6(cm_node->loc_addr, &cm_node->vlan_id,
+ NULL);
}
ibdev_dbg(&iwdev->ibdev, "CM: Accept vlan_id=%d\n",
cm_node->vlan_id);
@@ -3826,17 +3872,21 @@ int irdma_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
raddr6->sin6_addr.in6_u.u6_addr32);
cm_info.loc_port = ntohs(laddr6->sin6_port);
cm_info.rem_port = ntohs(raddr6->sin6_port);
- irdma_netdev_vlan_ipv6(cm_info.loc_addr, &cm_info.vlan_id,
- NULL);
+ irdma_get_vlan_mac_ipv6(cm_info.loc_addr, &cm_info.vlan_id,
+ NULL);
}
cm_info.cm_id = cm_id;
cm_info.qh_qpid = iwdev->vsi.ilq->qp_id;
cm_info.tos = cm_id->tos;
- if (iwdev->vsi.dscp_mode)
+ if (iwdev->vsi.dscp_mode) {
cm_info.user_pri =
iwqp->sc_qp.vsi->dscp_map[irdma_tos2dscp(cm_info.tos)];
- else
+ } else {
cm_info.user_pri = rt_tos2priority(cm_id->tos);
+ cm_info.user_pri = irdma_iw_get_vlan_prio(cm_info.loc_addr,
+ cm_info.user_pri,
+ cm_info.ipv4);
+ }
if (iwqp->sc_qp.dev->ws_add(iwqp->sc_qp.vsi, cm_info.user_pri))
return -ENOMEM;
@@ -3952,8 +4002,8 @@ int irdma_create_listen(struct iw_cm_id *cm_id, int backlog)
laddr6->sin6_addr.in6_u.u6_addr32);
cm_info.loc_port = ntohs(laddr6->sin6_port);
if (ipv6_addr_type(&laddr6->sin6_addr) != IPV6_ADDR_ANY) {
- irdma_netdev_vlan_ipv6(cm_info.loc_addr,
- &cm_info.vlan_id, NULL);
+ irdma_get_vlan_mac_ipv6(cm_info.loc_addr,
+ &cm_info.vlan_id, NULL);
} else {
cm_info.vlan_id = 0xFFFF;
wildcard = true;
@@ -3980,7 +4030,7 @@ int irdma_create_listen(struct iw_cm_id *cm_id, int backlog)
cm_listen_node->tos = cm_id->tos;
if (iwdev->vsi.dscp_mode)
cm_listen_node->user_pri =
- iwdev->vsi.dscp_map[irdma_tos2dscp(cm_id->tos)];
+ iwdev->vsi.dscp_map[irdma_tos2dscp(cm_id->tos)];
else
cm_listen_node->user_pri = rt_tos2priority(cm_id->tos);
cm_info.user_pri = cm_listen_node->user_pri;
@@ -3990,6 +4040,12 @@ int irdma_create_listen(struct iw_cm_id *cm_id, int backlog)
if (err)
goto error;
} else {
+ if (!iwdev->vsi.dscp_mode)
+ cm_listen_node->user_pri =
+ irdma_iw_get_vlan_prio(cm_info.loc_addr,
+ cm_info.user_pri,
+ cm_info.ipv4);
+ cm_info.user_pri = cm_listen_node->user_pri;
err = irdma_manage_qhash(iwdev, &cm_info,
IRDMA_QHASH_TYPE_TCP_SYN,
IRDMA_QHASH_MANAGE_TYPE_ADD,
diff --git a/drivers/infiniband/hw/irdma/ctrl.c b/drivers/infiniband/hw/irdma/ctrl.c
index 45e3344daa04..8a6200e55c54 100644
--- a/drivers/infiniband/hw/irdma/ctrl.c
+++ b/drivers/infiniband/hw/irdma/ctrl.c
@@ -1061,6 +1061,9 @@ static int irdma_sc_alloc_stag(struct irdma_sc_dev *dev,
u64 hdr;
enum irdma_page_size page_size;
+ if (!info->total_len && !info->all_memory)
+ return -EINVAL;
+
if (info->page_size == 0x40000000)
page_size = IRDMA_PAGE_SIZE_1G;
else if (info->page_size == 0x200000)
@@ -1126,6 +1129,9 @@ static int irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev,
u8 addr_type;
enum irdma_page_size page_size;
+ if (!info->total_len && !info->all_memory)
+ return -EINVAL;
+
if (info->page_size == 0x40000000)
page_size = IRDMA_PAGE_SIZE_1G;
else if (info->page_size == 0x200000)
@@ -1301,7 +1307,6 @@ int irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
sq_info.wr_id = info->wr_id;
sq_info.signaled = info->signaled;
- sq_info.push_wqe = info->push_wqe;
wqe = irdma_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx,
IRDMA_QP_WQE_MIN_QUANTA, 0, &sq_info);
@@ -1335,7 +1340,6 @@ int irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
FIELD_PREP(IRDMAQPSQ_HPAGESIZE, page_size) |
FIELD_PREP(IRDMAQPSQ_STAGRIGHTS, info->access_rights) |
FIELD_PREP(IRDMAQPSQ_VABASEDTO, info->addr_type) |
- FIELD_PREP(IRDMAQPSQ_PUSHWQE, (sq_info.push_wqe ? 1 : 0)) |
FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
@@ -1346,13 +1350,9 @@ int irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
print_hex_dump_debug("WQE: FAST_REG WQE", DUMP_PREFIX_OFFSET, 16, 8,
wqe, IRDMA_QP_WQE_MIN_SIZE, false);
- if (sq_info.push_wqe) {
- irdma_qp_push_wqe(&qp->qp_uk, wqe, IRDMA_QP_WQE_MIN_QUANTA,
- wqe_idx, post_sq);
- } else {
- if (post_sq)
- irdma_uk_qp_post_wr(&qp->qp_uk);
- }
+
+ if (post_sq)
+ irdma_uk_qp_post_wr(&qp->qp_uk);
return 0;
}
@@ -4007,7 +4007,6 @@ int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
{
u64 temp, compl_ctx;
__le64 *aeqe;
- u16 wqe_idx;
u8 ae_src;
u8 polarity;
@@ -4027,7 +4026,7 @@ int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
aeqe, 16, false);
ae_src = (u8)FIELD_GET(IRDMA_AEQE_AESRC, temp);
- wqe_idx = (u16)FIELD_GET(IRDMA_AEQE_WQDESCIDX, temp);
+ info->wqe_idx = (u16)FIELD_GET(IRDMA_AEQE_WQDESCIDX, temp);
info->qp_cq_id = (u32)FIELD_GET(IRDMA_AEQE_QPCQID_LOW, temp) |
((u32)FIELD_GET(IRDMA_AEQE_QPCQID_HI, temp) << 18);
info->ae_id = (u16)FIELD_GET(IRDMA_AEQE_AECODE, temp);
@@ -4110,7 +4109,6 @@ int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
case IRDMA_AE_SOURCE_RQ_0011:
info->qp = true;
info->rq = true;
- info->wqe_idx = wqe_idx;
info->compl_ctx = compl_ctx;
break;
case IRDMA_AE_SOURCE_CQ:
@@ -4124,7 +4122,6 @@ int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
case IRDMA_AE_SOURCE_SQ_0111:
info->qp = true;
info->sq = true;
- info->wqe_idx = wqe_idx;
info->compl_ctx = compl_ctx;
break;
case IRDMA_AE_SOURCE_IN_RR_WR:
diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
index 457368e324e1..7cbdd5433dba 100644
--- a/drivers/infiniband/hw/irdma/hw.c
+++ b/drivers/infiniband/hw/irdma/hw.c
@@ -219,7 +219,6 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
struct irdma_aeqe_info *info = &aeinfo;
int ret;
struct irdma_qp *iwqp = NULL;
- struct irdma_sc_cq *cq = NULL;
struct irdma_cq *iwcq = NULL;
struct irdma_sc_qp *qp = NULL;
struct irdma_qp_host_ctx_info *ctx_info = NULL;
@@ -336,10 +335,18 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
ibdev_err(&iwdev->ibdev,
"Processing an iWARP related AE for CQ misc = 0x%04X\n",
info->ae_id);
- cq = (struct irdma_sc_cq *)(unsigned long)
- info->compl_ctx;
- iwcq = cq->back_cq;
+ spin_lock_irqsave(&rf->cqtable_lock, flags);
+ iwcq = rf->cq_table[info->qp_cq_id];
+ if (!iwcq) {
+ spin_unlock_irqrestore(&rf->cqtable_lock,
+ flags);
+ ibdev_dbg(to_ibdev(dev),
+ "cq_id %d is already freed\n", info->qp_cq_id);
+ continue;
+ }
+ irdma_cq_add_ref(&iwcq->ibcq);
+ spin_unlock_irqrestore(&rf->cqtable_lock, flags);
if (iwcq->ibcq.event_handler) {
struct ib_event ibevent;
@@ -350,6 +357,7 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
iwcq->ibcq.event_handler(&ibevent,
iwcq->ibcq.cq_context);
}
+ irdma_cq_rem_ref(&iwcq->ibcq);
break;
case IRDMA_AE_RESET_NOT_SENT:
case IRDMA_AE_LLP_DOUBT_REACHABILITY:
@@ -563,12 +571,11 @@ static void irdma_destroy_irq(struct irdma_pci_f *rf,
/**
* irdma_destroy_cqp - destroy control qp
* @rf: RDMA PCI function
- * @free_hwcqp: 1 if hw cqp should be freed
*
* Issue destroy cqp request and
* free the resources associated with the cqp
*/
-static void irdma_destroy_cqp(struct irdma_pci_f *rf, bool free_hwcqp)
+static void irdma_destroy_cqp(struct irdma_pci_f *rf)
{
struct irdma_sc_dev *dev = &rf->sc_dev;
struct irdma_cqp *cqp = &rf->cqp;
@@ -576,8 +583,8 @@ static void irdma_destroy_cqp(struct irdma_pci_f *rf, bool free_hwcqp)
if (rf->cqp_cmpl_wq)
destroy_workqueue(rf->cqp_cmpl_wq);
- if (free_hwcqp)
- status = irdma_sc_cqp_destroy(dev->cqp);
+
+ status = irdma_sc_cqp_destroy(dev->cqp);
if (status)
ibdev_dbg(to_ibdev(dev), "ERR: Destroy CQP failed %d\n", status);
@@ -921,8 +928,8 @@ static int irdma_create_cqp(struct irdma_pci_f *rf)
cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL);
if (!cqp->scratch_array) {
- kfree(cqp->cqp_requests);
- return -ENOMEM;
+ status = -ENOMEM;
+ goto err_scratch;
}
dev->cqp = &cqp->sc_cqp;
@@ -932,15 +939,14 @@ static int irdma_create_cqp(struct irdma_pci_f *rf)
cqp->sq.va = dma_alloc_coherent(dev->hw->device, cqp->sq.size,
&cqp->sq.pa, GFP_KERNEL);
if (!cqp->sq.va) {
- kfree(cqp->scratch_array);
- kfree(cqp->cqp_requests);
- return -ENOMEM;
+ status = -ENOMEM;
+ goto err_sq;
}
status = irdma_obj_aligned_mem(rf, &mem, sizeof(struct irdma_cqp_ctx),
IRDMA_HOST_CTX_ALIGNMENT_M);
if (status)
- goto exit;
+ goto err_ctx;
dev->cqp->host_ctx_pa = mem.pa;
dev->cqp->host_ctx = mem.va;
@@ -966,7 +972,7 @@ static int irdma_create_cqp(struct irdma_pci_f *rf)
status = irdma_sc_cqp_init(dev->cqp, &cqp_init_info);
if (status) {
ibdev_dbg(to_ibdev(dev), "ERR: cqp init status %d\n", status);
- goto exit;
+ goto err_ctx;
}
spin_lock_init(&cqp->req_lock);
@@ -977,7 +983,7 @@ static int irdma_create_cqp(struct irdma_pci_f *rf)
ibdev_dbg(to_ibdev(dev),
"ERR: cqp create failed - status %d maj_err %d min_err %d\n",
status, maj_err, min_err);
- goto exit;
+ goto err_ctx;
}
INIT_LIST_HEAD(&cqp->cqp_avail_reqs);
@@ -991,8 +997,16 @@ static int irdma_create_cqp(struct irdma_pci_f *rf)
init_waitqueue_head(&cqp->remove_wq);
return 0;
-exit:
- irdma_destroy_cqp(rf, false);
+err_ctx:
+ dma_free_coherent(dev->hw->device, cqp->sq.size,
+ cqp->sq.va, cqp->sq.pa);
+ cqp->sq.va = NULL;
+err_sq:
+ kfree(cqp->scratch_array);
+ cqp->scratch_array = NULL;
+err_scratch:
+ kfree(cqp->cqp_requests);
+ cqp->cqp_requests = NULL;
return status;
}
@@ -1549,7 +1563,7 @@ static void irdma_del_init_mem(struct irdma_pci_f *rf)
kfree(dev->hmc_info->sd_table.sd_entry);
dev->hmc_info->sd_table.sd_entry = NULL;
- kfree(rf->mem_rsrc);
+ vfree(rf->mem_rsrc);
rf->mem_rsrc = NULL;
dma_free_coherent(rf->hw.device, rf->obj_mem.size, rf->obj_mem.va,
rf->obj_mem.pa);
@@ -1747,7 +1761,7 @@ void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf)
rf->reset, rf->rdma_ver);
fallthrough;
case CQP_CREATED:
- irdma_destroy_cqp(rf, true);
+ irdma_destroy_cqp(rf);
fallthrough;
case INITIAL_STATE:
irdma_del_init_mem(rf);
@@ -1945,10 +1959,12 @@ static void irdma_set_hw_rsrc(struct irdma_pci_f *rf)
rf->allocated_arps = &rf->allocated_mcgs[BITS_TO_LONGS(rf->max_mcg)];
rf->qp_table = (struct irdma_qp **)
(&rf->allocated_arps[BITS_TO_LONGS(rf->arp_table_size)]);
+ rf->cq_table = (struct irdma_cq **)(&rf->qp_table[rf->max_qp]);
spin_lock_init(&rf->rsrc_lock);
spin_lock_init(&rf->arp_lock);
spin_lock_init(&rf->qptable_lock);
+ spin_lock_init(&rf->cqtable_lock);
spin_lock_init(&rf->qh_list_lock);
}
@@ -1969,6 +1985,7 @@ static u32 irdma_calc_mem_rsrc_size(struct irdma_pci_f *rf)
rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_ah);
rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mcg);
rsrc_size += sizeof(struct irdma_qp **) * rf->max_qp;
+ rsrc_size += sizeof(struct irdma_cq **) * rf->max_cq;
return rsrc_size;
}
@@ -2002,10 +2019,10 @@ u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)
rf->max_mcg = rf->max_qp;
rsrc_size = irdma_calc_mem_rsrc_size(rf);
- rf->mem_rsrc = kzalloc(rsrc_size, GFP_KERNEL);
+ rf->mem_rsrc = vzalloc(rsrc_size);
if (!rf->mem_rsrc) {
ret = -ENOMEM;
- goto mem_rsrc_kzalloc_fail;
+ goto mem_rsrc_vzalloc_fail;
}
rf->arp_table = (struct irdma_arp_entry *)rf->mem_rsrc;
@@ -2033,7 +2050,7 @@ u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)
return 0;
-mem_rsrc_kzalloc_fail:
+mem_rsrc_vzalloc_fail:
bitmap_free(rf->allocated_ws_nodes);
rf->allocated_ws_nodes = NULL;
diff --git a/drivers/infiniband/hw/irdma/i40iw_hw.c b/drivers/infiniband/hw/irdma/i40iw_hw.c
index 37a40fb4d0d7..638d127fb3e0 100644
--- a/drivers/infiniband/hw/irdma/i40iw_hw.c
+++ b/drivers/infiniband/hw/irdma/i40iw_hw.c
@@ -254,5 +254,6 @@ void i40iw_init_hw(struct irdma_sc_dev *dev)
dev->hw_attrs.max_stat_idx = IRDMA_HW_STAT_INDEX_MAX_GEN_1;
dev->hw_attrs.max_hw_outbound_msg_size = I40IW_MAX_OUTBOUND_MSG_SIZE;
dev->hw_attrs.max_hw_inbound_msg_size = I40IW_MAX_INBOUND_MSG_SIZE;
+ dev->hw_attrs.uk_attrs.min_hw_wq_size = I40IW_MIN_WQ_SIZE;
dev->hw_attrs.max_qp_wr = I40IW_MAX_QP_WRS;
}
diff --git a/drivers/infiniband/hw/irdma/i40iw_hw.h b/drivers/infiniband/hw/irdma/i40iw_hw.h
index 1c438b3593ea..10afc165f5ea 100644
--- a/drivers/infiniband/hw/irdma/i40iw_hw.h
+++ b/drivers/infiniband/hw/irdma/i40iw_hw.h
@@ -140,11 +140,11 @@ enum i40iw_device_caps_const {
I40IW_MAX_CQ_SIZE = 1048575,
I40IW_MAX_OUTBOUND_MSG_SIZE = 2147483647,
I40IW_MAX_INBOUND_MSG_SIZE = 2147483647,
+ I40IW_MIN_WQ_SIZE = 4 /* WQEs */,
};
#define I40IW_QP_WQE_MIN_SIZE 32
#define I40IW_QP_WQE_MAX_SIZE 128
-#define I40IW_QP_SW_MIN_WQSIZE 4
#define I40IW_MAX_RQ_WQE_SHIFT 2
#define I40IW_MAX_QUANTA_PER_WR 2
diff --git a/drivers/infiniband/hw/irdma/icrdma_hw.c b/drivers/infiniband/hw/irdma/icrdma_hw.c
index 298d14905993..10ccf4bc3f2d 100644
--- a/drivers/infiniband/hw/irdma/icrdma_hw.c
+++ b/drivers/infiniband/hw/irdma/icrdma_hw.c
@@ -195,6 +195,7 @@ void icrdma_init_hw(struct irdma_sc_dev *dev)
dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT;
dev->hw_attrs.max_stat_idx = IRDMA_HW_STAT_INDEX_MAX_GEN_2;
+ dev->hw_attrs.uk_attrs.min_hw_wq_size = ICRDMA_MIN_WQ_SIZE;
dev->hw_attrs.uk_attrs.max_hw_sq_chunk = IRDMA_MAX_QUANTA_PER_WR;
dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_RTS_AE |
IRDMA_FEATURE_CQ_RESIZE;
diff --git a/drivers/infiniband/hw/irdma/icrdma_hw.h b/drivers/infiniband/hw/irdma/icrdma_hw.h
index b65c463abf0b..54035a08cc93 100644
--- a/drivers/infiniband/hw/irdma/icrdma_hw.h
+++ b/drivers/infiniband/hw/irdma/icrdma_hw.h
@@ -64,6 +64,7 @@ enum icrdma_device_caps_const {
ICRDMA_MAX_IRD_SIZE = 127,
ICRDMA_MAX_ORD_SIZE = 255,
+ ICRDMA_MIN_WQ_SIZE = 8 /* WQEs */,
};
diff --git a/drivers/infiniband/hw/irdma/irdma.h b/drivers/infiniband/hw/irdma/irdma.h
index 173e2dc2fc35..3237fa64bc8f 100644
--- a/drivers/infiniband/hw/irdma/irdma.h
+++ b/drivers/infiniband/hw/irdma/irdma.h
@@ -119,6 +119,7 @@ struct irdma_uk_attrs {
u32 min_hw_cq_size;
u32 max_hw_cq_size;
u16 max_hw_sq_chunk;
+ u16 min_hw_wq_size;
u8 hw_rev;
};
diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h
index 2323962cdeac..82fc5f5b002c 100644
--- a/drivers/infiniband/hw/irdma/main.h
+++ b/drivers/infiniband/hw/irdma/main.h
@@ -239,7 +239,7 @@ struct irdma_qv_info {
struct irdma_qvlist_info {
u32 num_vectors;
- struct irdma_qv_info qv_info[1];
+ struct irdma_qv_info qv_info[];
};
struct irdma_gen_ops {
@@ -309,7 +309,9 @@ struct irdma_pci_f {
spinlock_t arp_lock; /*protect ARP table access*/
spinlock_t rsrc_lock; /* protect HW resource array access */
spinlock_t qptable_lock; /*protect QP table access*/
+ spinlock_t cqtable_lock; /*protect CQ table access*/
struct irdma_qp **qp_table;
+ struct irdma_cq **cq_table;
spinlock_t qh_list_lock; /* protect mc_qht_list */
struct mc_table_list mc_qht_list;
struct irdma_msix_vector *iw_msixtbl;
@@ -500,6 +502,8 @@ int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
struct ib_udata *udata);
int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
+void irdma_cq_add_ref(struct ib_cq *ibcq);
+void irdma_cq_rem_ref(struct ib_cq *ibcq);
void irdma_cq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_cq *cq);
void irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf);
@@ -529,7 +533,7 @@ void irdma_gen_ae(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
void irdma_copy_ip_ntohl(u32 *dst, __be32 *src);
void irdma_copy_ip_htonl(__be32 *dst, u32 *src);
u16 irdma_get_vlan_ipv4(u32 *addr);
-struct net_device *irdma_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *mac);
+void irdma_get_vlan_mac_ipv6(u32 *addr, u16 *vlan_id, u8 *mac);
struct ib_mr *irdma_reg_phys_mr(struct ib_pd *ib_pd, u64 addr, u64 size,
int acc, u64 *iova_start);
int irdma_upload_qp_context(struct irdma_qp *iwqp, bool freeze, bool raw);
diff --git a/drivers/infiniband/hw/irdma/type.h b/drivers/infiniband/hw/irdma/type.h
index a20709577ab0..c84ec4dd8536 100644
--- a/drivers/infiniband/hw/irdma/type.h
+++ b/drivers/infiniband/hw/irdma/type.h
@@ -971,6 +971,7 @@ struct irdma_allocate_stag_info {
bool remote_access:1;
bool use_hmc_fcn_index:1;
bool use_pf_rid:1;
+ bool all_memory:1;
u8 hmc_fcn_index;
};
@@ -998,6 +999,7 @@ struct irdma_reg_ns_stag_info {
bool use_hmc_fcn_index:1;
u8 hmc_fcn_index;
bool use_pf_rid:1;
+ bool all_memory:1;
};
struct irdma_fast_reg_stag_info {
@@ -1017,7 +1019,6 @@ struct irdma_fast_reg_stag_info {
bool local_fence:1;
bool read_fence:1;
bool signaled:1;
- bool push_wqe:1;
bool use_hmc_fcn_index:1;
u8 hmc_fcn_index;
bool use_pf_rid:1;
diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c
index 280d633d4ec4..d8285ca16293 100644
--- a/drivers/infiniband/hw/irdma/uk.c
+++ b/drivers/infiniband/hw/irdma/uk.c
@@ -127,10 +127,7 @@ void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp)
hw_sq_tail = (u32)FIELD_GET(IRDMA_QP_DBSA_HW_SQ_TAIL, temp);
sw_sq_head = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
if (sw_sq_head != qp->initial_ring.head) {
- if (qp->push_dropped) {
- writel(qp->qp_id, qp->wqe_alloc_db);
- qp->push_dropped = false;
- } else if (sw_sq_head != hw_sq_tail) {
+ if (sw_sq_head != hw_sq_tail) {
if (sw_sq_head > qp->initial_ring.head) {
if (hw_sq_tail >= qp->initial_ring.head &&
hw_sq_tail < sw_sq_head)
@@ -147,38 +144,6 @@ void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp)
}
/**
- * irdma_qp_ring_push_db - ring qp doorbell
- * @qp: hw qp ptr
- * @wqe_idx: wqe index
- */
-static void irdma_qp_ring_push_db(struct irdma_qp_uk *qp, u32 wqe_idx)
-{
- set_32bit_val(qp->push_db, 0,
- FIELD_PREP(IRDMA_WQEALLOC_WQE_DESC_INDEX, wqe_idx >> 3) | qp->qp_id);
- qp->initial_ring.head = qp->sq_ring.head;
- qp->push_mode = true;
- qp->push_dropped = false;
-}
-
-void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta,
- u32 wqe_idx, bool post_sq)
-{
- __le64 *push;
-
- if (IRDMA_RING_CURRENT_HEAD(qp->initial_ring) !=
- IRDMA_RING_CURRENT_TAIL(qp->sq_ring) &&
- !qp->push_mode) {
- if (post_sq)
- irdma_uk_qp_post_wr(qp);
- } else {
- push = (__le64 *)((uintptr_t)qp->push_wqe +
- (wqe_idx & 0x7) * 0x20);
- memcpy(push, wqe, quanta * IRDMA_QP_WQE_MIN_SIZE);
- irdma_qp_ring_push_db(qp, wqe_idx);
- }
-}
-
-/**
* irdma_qp_get_next_send_wqe - pad with NOP if needed, return where next WR should go
* @qp: hw qp ptr
* @wqe_idx: return wqe index
@@ -192,7 +157,6 @@ __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
{
__le64 *wqe;
__le64 *wqe_0 = NULL;
- u32 nop_wqe_idx;
u16 avail_quanta;
u16 i;
@@ -209,14 +173,10 @@ __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
return NULL;
- nop_wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
for (i = 0; i < avail_quanta; i++) {
irdma_nop_1(qp);
IRDMA_RING_MOVE_HEAD_NOCHECK(qp->sq_ring);
}
- if (qp->push_db && info->push_wqe)
- irdma_qp_push_wqe(qp, qp->sq_base[nop_wqe_idx].elem,
- avail_quanta, nop_wqe_idx, true);
}
*wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
@@ -282,8 +242,6 @@ int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
bool read_fence = false;
u16 quanta;
- info->push_wqe = qp->push_db ? true : false;
-
op_info = &info->op.rdma_write;
if (op_info->num_lo_sges > qp->max_sq_frag_cnt)
return -EINVAL;
@@ -344,7 +302,6 @@ int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid) |
FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt) |
FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
- FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
@@ -353,12 +310,9 @@ int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
dma_wmb(); /* make sure WQE is populated before valid bit is set */
set_64bit_val(wqe, 24, hdr);
- if (info->push_wqe) {
- irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
- } else {
- if (post_sq)
- irdma_uk_qp_post_wr(qp);
- }
+
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
return 0;
}
@@ -383,8 +337,6 @@ int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
u16 quanta;
u64 hdr;
- info->push_wqe = qp->push_db ? true : false;
-
op_info = &info->op.rdma_read;
if (qp->max_sq_frag_cnt < op_info->num_lo_sges)
return -EINVAL;
@@ -431,7 +383,6 @@ int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
FIELD_PREP(IRDMAQPSQ_OPCODE,
(inv_stag ? IRDMAQP_OP_RDMA_READ_LOC_INV : IRDMAQP_OP_RDMA_READ)) |
- FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
@@ -440,12 +391,9 @@ int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
dma_wmb(); /* make sure WQE is populated before valid bit is set */
set_64bit_val(wqe, 24, hdr);
- if (info->push_wqe) {
- irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
- } else {
- if (post_sq)
- irdma_uk_qp_post_wr(qp);
- }
+
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
return 0;
}
@@ -468,8 +416,6 @@ int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
bool read_fence = false;
u16 quanta;
- info->push_wqe = qp->push_db ? true : false;
-
op_info = &info->op.send;
if (qp->max_sq_frag_cnt < op_info->num_sges)
return -EINVAL;
@@ -530,7 +476,6 @@ int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
- FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
@@ -541,12 +486,9 @@ int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
dma_wmb(); /* make sure WQE is populated before valid bit is set */
set_64bit_val(wqe, 24, hdr);
- if (info->push_wqe) {
- irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
- } else {
- if (post_sq)
- irdma_uk_qp_post_wr(qp);
- }
+
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
return 0;
}
@@ -720,7 +662,6 @@ int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
u32 i, total_size = 0;
u16 quanta;
- info->push_wqe = qp->push_db ? true : false;
op_info = &info->op.rdma_write;
if (unlikely(qp->max_sq_frag_cnt < op_info->num_lo_sges))
@@ -750,7 +691,6 @@ int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt ? 1 : 0) |
FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid ? 1 : 0) |
- FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe ? 1 : 0) |
FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
@@ -767,12 +707,8 @@ int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
set_64bit_val(wqe, 24, hdr);
- if (info->push_wqe) {
- irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
- } else {
- if (post_sq)
- irdma_uk_qp_post_wr(qp);
- }
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
return 0;
}
@@ -794,7 +730,6 @@ int irdma_uk_inline_send(struct irdma_qp_uk *qp,
u32 i, total_size = 0;
u16 quanta;
- info->push_wqe = qp->push_db ? true : false;
op_info = &info->op.send;
if (unlikely(qp->max_sq_frag_cnt < op_info->num_sges))
@@ -827,7 +762,6 @@ int irdma_uk_inline_send(struct irdma_qp_uk *qp,
(info->imm_data_valid ? 1 : 0)) |
FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
- FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
@@ -845,12 +779,8 @@ int irdma_uk_inline_send(struct irdma_qp_uk *qp,
set_64bit_val(wqe, 24, hdr);
- if (info->push_wqe) {
- irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
- } else {
- if (post_sq)
- irdma_uk_qp_post_wr(qp);
- }
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
return 0;
}
@@ -872,7 +802,6 @@ int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
bool local_fence = false;
struct ib_sge sge = {};
- info->push_wqe = qp->push_db ? true : false;
op_info = &info->op.inv_local_stag;
local_fence = info->local_fence;
@@ -889,7 +818,6 @@ int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
set_64bit_val(wqe, 16, 0);
hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_INV_STAG) |
- FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
@@ -899,13 +827,8 @@ int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
set_64bit_val(wqe, 24, hdr);
- if (info->push_wqe) {
- irdma_qp_push_wqe(qp, wqe, IRDMA_QP_WQE_MIN_QUANTA, wqe_idx,
- post_sq);
- } else {
- if (post_sq)
- irdma_uk_qp_post_wr(qp);
- }
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
return 0;
}
@@ -1124,7 +1047,6 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
- info->push_dropped = (bool)FIELD_GET(IRDMACQ_PSHDROP, qword3);
info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
if (info->error) {
info->major_err = FIELD_GET(IRDMA_CQ_MAJERR, qword3);
@@ -1213,11 +1135,6 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
return irdma_uk_cq_poll_cmpl(cq, info);
}
}
- /*cease posting push mode on push drop*/
- if (info->push_dropped) {
- qp->push_mode = false;
- qp->push_dropped = true;
- }
if (info->comp_status != IRDMA_COMPL_STATUS_FLUSHED) {
info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
if (!info->comp_status)
@@ -1349,10 +1266,12 @@ void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
u32 *sqdepth)
{
+ u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
+
*sqdepth = irdma_qp_round_up((sq_size << shift) + IRDMA_SQ_RSVD);
- if (*sqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift))
- *sqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift;
+ if (*sqdepth < min_size)
+ *sqdepth = min_size;
else if (*sqdepth > uk_attrs->max_hw_wq_quanta)
return -EINVAL;
@@ -1369,10 +1288,12 @@ int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
u32 *rqdepth)
{
+ u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
+
*rqdepth = irdma_qp_round_up((rq_size << shift) + IRDMA_RQ_RSVD);
- if (*rqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift))
- *rqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift;
+ if (*rqdepth < min_size)
+ *rqdepth = min_size;
else if (*rqdepth > uk_attrs->max_hw_rq_quanta)
return -EINVAL;
@@ -1415,6 +1336,78 @@ static void irdma_setup_connection_wqes(struct irdma_qp_uk *qp,
}
/**
+ * irdma_uk_calc_shift_wq - calculate WQE shift for both SQ and RQ
+ * @ukinfo: qp initialization info
+ * @sq_shift: Returns shift of SQ
+ * @rq_shift: Returns shift of RQ
+ */
+void irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info *ukinfo, u8 *sq_shift,
+ u8 *rq_shift)
+{
+ bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2;
+
+ irdma_get_wqe_shift(ukinfo->uk_attrs,
+ imm_support ? ukinfo->max_sq_frag_cnt + 1 :
+ ukinfo->max_sq_frag_cnt,
+ ukinfo->max_inline_data, sq_shift);
+
+ irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0,
+ rq_shift);
+
+ if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) {
+ if (ukinfo->abi_ver > 4)
+ *rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
+ }
+}
+
+/**
+ * irdma_uk_calc_depth_shift_sq - calculate depth and shift for SQ size.
+ * @ukinfo: qp initialization info
+ * @sq_depth: Returns depth of SQ
+ * @sq_shift: Returns shift of SQ
+ */
+int irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
+ u32 *sq_depth, u8 *sq_shift)
+{
+ bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2;
+ int status;
+
+ irdma_get_wqe_shift(ukinfo->uk_attrs,
+ imm_support ? ukinfo->max_sq_frag_cnt + 1 :
+ ukinfo->max_sq_frag_cnt,
+ ukinfo->max_inline_data, sq_shift);
+ status = irdma_get_sqdepth(ukinfo->uk_attrs, ukinfo->sq_size,
+ *sq_shift, sq_depth);
+
+ return status;
+}
+
+/**
+ * irdma_uk_calc_depth_shift_rq - calculate depth and shift for RQ size.
+ * @ukinfo: qp initialization info
+ * @rq_depth: Returns depth of RQ
+ * @rq_shift: Returns shift of RQ
+ */
+int irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo,
+ u32 *rq_depth, u8 *rq_shift)
+{
+ int status;
+
+ irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0,
+ rq_shift);
+
+ if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) {
+ if (ukinfo->abi_ver > 4)
+ *rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
+ }
+
+ status = irdma_get_rqdepth(ukinfo->uk_attrs, ukinfo->rq_size,
+ *rq_shift, rq_depth);
+
+ return status;
+}
+
+/**
* irdma_uk_qp_init - initialize shared qp
* @qp: hw qp (user and kernel)
* @info: qp initialization info
@@ -1428,23 +1421,12 @@ int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
{
int ret_code = 0;
u32 sq_ring_size;
- u8 sqshift, rqshift;
qp->uk_attrs = info->uk_attrs;
if (info->max_sq_frag_cnt > qp->uk_attrs->max_hw_wq_frags ||
info->max_rq_frag_cnt > qp->uk_attrs->max_hw_wq_frags)
return -EINVAL;
- irdma_get_wqe_shift(qp->uk_attrs, info->max_rq_frag_cnt, 0, &rqshift);
- if (qp->uk_attrs->hw_rev == IRDMA_GEN_1) {
- irdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt,
- info->max_inline_data, &sqshift);
- if (info->abi_ver > 4)
- rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
- } else {
- irdma_get_wqe_shift(qp->uk_attrs, info->max_sq_frag_cnt + 1,
- info->max_inline_data, &sqshift);
- }
qp->qp_caps = info->qp_caps;
qp->sq_base = info->sq;
qp->rq_base = info->rq;
@@ -1456,9 +1438,8 @@ int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
qp->wqe_alloc_db = info->wqe_alloc_db;
qp->qp_id = info->qp_id;
qp->sq_size = info->sq_size;
- qp->push_mode = false;
qp->max_sq_frag_cnt = info->max_sq_frag_cnt;
- sq_ring_size = qp->sq_size << sqshift;
+ sq_ring_size = qp->sq_size << info->sq_shift;
IRDMA_RING_INIT(qp->sq_ring, sq_ring_size);
IRDMA_RING_INIT(qp->initial_ring, sq_ring_size);
if (info->first_sq_wq) {
@@ -1473,9 +1454,9 @@ int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
qp->rq_size = info->rq_size;
qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
qp->max_inline_data = info->max_inline_data;
- qp->rq_wqe_size = rqshift;
+ qp->rq_wqe_size = info->rq_shift;
IRDMA_RING_INIT(qp->rq_ring, qp->rq_size);
- qp->rq_wqe_size_multiplier = 1 << rqshift;
+ qp->rq_wqe_size_multiplier = 1 << info->rq_shift;
if (qp->uk_attrs->hw_rev == IRDMA_GEN_1)
qp->wqe_ops = iw_wqe_uk_ops_gen_1;
else
@@ -1554,7 +1535,6 @@ int irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq)
u32 wqe_idx;
struct irdma_post_sq_info info = {};
- info.push_wqe = false;
info.wr_id = wr_id;
wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
0, &info);
diff --git a/drivers/infiniband/hw/irdma/user.h b/drivers/infiniband/hw/irdma/user.h
index d0cdf609f5e0..36feca57b274 100644
--- a/drivers/infiniband/hw/irdma/user.h
+++ b/drivers/infiniband/hw/irdma/user.h
@@ -85,6 +85,7 @@ enum irdma_device_caps_const {
IRDMA_Q2_BUF_SIZE = 256,
IRDMA_QP_CTX_SIZE = 256,
IRDMA_MAX_PDS = 262144,
+ IRDMA_MIN_WQ_SIZE_GEN2 = 8,
};
enum irdma_addressing_type {
@@ -215,7 +216,6 @@ struct irdma_post_sq_info {
bool local_fence:1;
bool inline_data:1;
bool imm_data_valid:1;
- bool push_wqe:1;
bool report_rtt:1;
bool udp_hdr:1;
bool defer_flag:1;
@@ -247,7 +247,6 @@ struct irdma_cq_poll_info {
u8 op_type;
u8 q_type;
bool stag_invalid_set:1; /* or L_R_Key set */
- bool push_dropped:1;
bool error:1;
bool solicited_event:1;
bool ipv4:1;
@@ -295,6 +294,12 @@ void irdma_uk_cq_init(struct irdma_cq_uk *cq,
struct irdma_cq_uk_init_info *info);
int irdma_uk_qp_init(struct irdma_qp_uk *qp,
struct irdma_qp_uk_init_info *info);
+void irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info *ukinfo, u8 *sq_shift,
+ u8 *rq_shift);
+int irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
+ u32 *sq_depth, u8 *sq_shift);
+int irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo,
+ u32 *rq_depth, u8 *rq_shift);
struct irdma_sq_uk_wr_trk_info {
u64 wrid;
u32 wr_len;
@@ -314,8 +319,6 @@ struct irdma_qp_uk {
struct irdma_sq_uk_wr_trk_info *sq_wrtrk_array;
u64 *rq_wrid_array;
__le64 *shadow_area;
- __le32 *push_db;
- __le64 *push_wqe;
struct irdma_ring sq_ring;
struct irdma_ring rq_ring;
struct irdma_ring initial_ring;
@@ -335,8 +338,6 @@ struct irdma_qp_uk {
u8 rq_wqe_size;
u8 rq_wqe_size_multiplier;
bool deferred_flag:1;
- bool push_mode:1; /* whether the last post wqe was pushed */
- bool push_dropped:1;
bool first_sq_wq:1;
bool sq_flush_complete:1; /* Indicates flush was seen and SQ was empty after the flush */
bool rq_flush_complete:1; /* Indicates flush was seen and RQ was empty after the flush */
@@ -374,8 +375,12 @@ struct irdma_qp_uk_init_info {
u32 max_sq_frag_cnt;
u32 max_rq_frag_cnt;
u32 max_inline_data;
+ u32 sq_depth;
+ u32 rq_depth;
u8 first_sq_wq;
u8 type;
+ u8 sq_shift;
+ u8 rq_shift;
int abi_ver;
bool legacy_mode;
};
@@ -404,7 +409,5 @@ int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
u32 *wqdepth);
int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
u32 *wqdepth);
-void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta,
- u32 wqe_idx, bool post_sq);
void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx);
#endif /* IRDMA_USER_H */
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
index eb083f70b09f..6cd5cb85dafe 100644
--- a/drivers/infiniband/hw/irdma/utils.c
+++ b/drivers/infiniband/hw/irdma/utils.c
@@ -760,6 +760,31 @@ void irdma_qp_rem_ref(struct ib_qp *ibqp)
complete(&iwqp->free_qp);
}
+void irdma_cq_add_ref(struct ib_cq *ibcq)
+{
+ struct irdma_cq *iwcq = to_iwcq(ibcq);
+
+ refcount_inc(&iwcq->refcnt);
+}
+
+void irdma_cq_rem_ref(struct ib_cq *ibcq)
+{
+ struct ib_device *ibdev = ibcq->device;
+ struct irdma_device *iwdev = to_iwdev(ibdev);
+ struct irdma_cq *iwcq = to_iwcq(ibcq);
+ unsigned long flags;
+
+ spin_lock_irqsave(&iwdev->rf->cqtable_lock, flags);
+ if (!refcount_dec_and_test(&iwcq->refcnt)) {
+ spin_unlock_irqrestore(&iwdev->rf->cqtable_lock, flags);
+ return;
+ }
+
+ iwdev->rf->cq_table[iwcq->cq_num] = NULL;
+ spin_unlock_irqrestore(&iwdev->rf->cqtable_lock, flags);
+ complete(&iwcq->free_cq);
+}
+
struct ib_device *to_ibdev(struct irdma_sc_dev *dev)
{
return &(container_of(dev, struct irdma_pci_f, sc_dev))->iwdev->ibdev;
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index 9c4fe4fa9001..3eb7a7a3a975 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -277,7 +277,7 @@ static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
struct irdma_alloc_ucontext_req req = {};
struct irdma_alloc_ucontext_resp uresp = {};
struct irdma_ucontext *ucontext = to_ucontext(uctx);
- struct irdma_uk_attrs *uk_attrs;
+ struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
if (udata->inlen < IRDMA_ALLOC_UCTX_MIN_REQ_LEN ||
udata->outlen < IRDMA_ALLOC_UCTX_MIN_RESP_LEN)
@@ -292,7 +292,9 @@ static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
ucontext->iwdev = iwdev;
ucontext->abi_ver = req.userspace_ver;
- uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
+ if (req.comp_mask & IRDMA_ALLOC_UCTX_USE_RAW_ATTR)
+ ucontext->use_raw_attrs = true;
+
/* GEN_1 legacy support with libi40iw */
if (udata->outlen == IRDMA_ALLOC_UCTX_MIN_RESP_LEN) {
if (uk_attrs->hw_rev != IRDMA_GEN_1)
@@ -327,6 +329,9 @@ static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size;
uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size;
uresp.hw_rev = uk_attrs->hw_rev;
+ uresp.comp_mask |= IRDMA_ALLOC_UCTX_USE_RAW_ATTR;
+ uresp.min_hw_wq_size = uk_attrs->min_hw_wq_size;
+ uresp.comp_mask |= IRDMA_ALLOC_UCTX_MIN_HW_WQ_SIZE;
if (ib_copy_to_udata(udata, &uresp,
min(sizeof(uresp), udata->outlen))) {
rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
@@ -567,6 +572,87 @@ static void irdma_setup_virt_qp(struct irdma_device *iwdev,
}
/**
+ * irdma_setup_umode_qp - setup sq and rq size in user mode qp
+ * @udata: udata
+ * @iwdev: iwarp device
+ * @iwqp: qp ptr (user or kernel)
+ * @info: initialize info to return
+ * @init_attr: Initial QP create attributes
+ */
+static int irdma_setup_umode_qp(struct ib_udata *udata,
+ struct irdma_device *iwdev,
+ struct irdma_qp *iwqp,
+ struct irdma_qp_init_info *info,
+ struct ib_qp_init_attr *init_attr)
+{
+ struct irdma_ucontext *ucontext = rdma_udata_to_drv_context(udata,
+ struct irdma_ucontext, ibucontext);
+ struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
+ struct irdma_create_qp_req req;
+ unsigned long flags;
+ int ret;
+
+ ret = ib_copy_from_udata(&req, udata,
+ min(sizeof(req), udata->inlen));
+ if (ret) {
+ ibdev_dbg(&iwdev->ibdev, "VERBS: ib_copy_from_data fail\n");
+ return ret;
+ }
+
+ iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
+ iwqp->user_mode = 1;
+ if (req.user_wqe_bufs) {
+ info->qp_uk_init_info.legacy_mode = ucontext->legacy_mode;
+ spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
+ iwqp->iwpbl = irdma_get_pbl((unsigned long)req.user_wqe_bufs,
+ &ucontext->qp_reg_mem_list);
+ spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
+
+ if (!iwqp->iwpbl) {
+ ret = -ENODATA;
+ ibdev_dbg(&iwdev->ibdev, "VERBS: no pbl info\n");
+ return ret;
+ }
+ }
+
+ if (!ucontext->use_raw_attrs) {
+ /**
+ * Maintain backward compat with older ABI which passes sq and
+ * rq depth in quanta in cap.max_send_wr and cap.max_recv_wr.
+ * There is no way to compute the correct value of
+ * iwqp->max_send_wr/max_recv_wr in the kernel.
+ */
+ iwqp->max_send_wr = init_attr->cap.max_send_wr;
+ iwqp->max_recv_wr = init_attr->cap.max_recv_wr;
+ ukinfo->sq_size = init_attr->cap.max_send_wr;
+ ukinfo->rq_size = init_attr->cap.max_recv_wr;
+ irdma_uk_calc_shift_wq(ukinfo, &ukinfo->sq_shift,
+ &ukinfo->rq_shift);
+ } else {
+ ret = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth,
+ &ukinfo->sq_shift);
+ if (ret)
+ return ret;
+
+ ret = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth,
+ &ukinfo->rq_shift);
+ if (ret)
+ return ret;
+
+ iwqp->max_send_wr =
+ (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift;
+ iwqp->max_recv_wr =
+ (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
+ ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
+ ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
+ }
+
+ irdma_setup_virt_qp(iwdev, iwqp, info);
+
+ return 0;
+}
+
+/**
* irdma_setup_kmode_qp - setup initialization for kernel mode qp
* @iwdev: iwarp device
* @iwqp: qp ptr (user or kernel)
@@ -579,40 +665,28 @@ static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
struct ib_qp_init_attr *init_attr)
{
struct irdma_dma_mem *mem = &iwqp->kqp.dma_mem;
- u32 sqdepth, rqdepth;
- u8 sqshift, rqshift;
u32 size;
int status;
struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
- struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
- irdma_get_wqe_shift(uk_attrs,
- uk_attrs->hw_rev >= IRDMA_GEN_2 ? ukinfo->max_sq_frag_cnt + 1 :
- ukinfo->max_sq_frag_cnt,
- ukinfo->max_inline_data, &sqshift);
- status = irdma_get_sqdepth(uk_attrs, ukinfo->sq_size, sqshift,
- &sqdepth);
+ status = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth,
+ &ukinfo->sq_shift);
if (status)
return status;
- if (uk_attrs->hw_rev == IRDMA_GEN_1)
- rqshift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
- else
- irdma_get_wqe_shift(uk_attrs, ukinfo->max_rq_frag_cnt, 0,
- &rqshift);
-
- status = irdma_get_rqdepth(uk_attrs, ukinfo->rq_size, rqshift,
- &rqdepth);
+ status = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth,
+ &ukinfo->rq_shift);
if (status)
return status;
iwqp->kqp.sq_wrid_mem =
- kcalloc(sqdepth, sizeof(*iwqp->kqp.sq_wrid_mem), GFP_KERNEL);
+ kcalloc(ukinfo->sq_depth, sizeof(*iwqp->kqp.sq_wrid_mem), GFP_KERNEL);
if (!iwqp->kqp.sq_wrid_mem)
return -ENOMEM;
iwqp->kqp.rq_wrid_mem =
- kcalloc(rqdepth, sizeof(*iwqp->kqp.rq_wrid_mem), GFP_KERNEL);
+ kcalloc(ukinfo->rq_depth, sizeof(*iwqp->kqp.rq_wrid_mem), GFP_KERNEL);
+
if (!iwqp->kqp.rq_wrid_mem) {
kfree(iwqp->kqp.sq_wrid_mem);
iwqp->kqp.sq_wrid_mem = NULL;
@@ -622,7 +696,7 @@ static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
ukinfo->sq_wrtrk_array = iwqp->kqp.sq_wrid_mem;
ukinfo->rq_wrid_array = iwqp->kqp.rq_wrid_mem;
- size = (sqdepth + rqdepth) * IRDMA_QP_WQE_MIN_SIZE;
+ size = (ukinfo->sq_depth + ukinfo->rq_depth) * IRDMA_QP_WQE_MIN_SIZE;
size += (IRDMA_SHADOW_AREA_SIZE << 3);
mem->size = ALIGN(size, 256);
@@ -638,16 +712,19 @@ static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
ukinfo->sq = mem->va;
info->sq_pa = mem->pa;
- ukinfo->rq = &ukinfo->sq[sqdepth];
- info->rq_pa = info->sq_pa + (sqdepth * IRDMA_QP_WQE_MIN_SIZE);
- ukinfo->shadow_area = ukinfo->rq[rqdepth].elem;
- info->shadow_area_pa = info->rq_pa + (rqdepth * IRDMA_QP_WQE_MIN_SIZE);
- ukinfo->sq_size = sqdepth >> sqshift;
- ukinfo->rq_size = rqdepth >> rqshift;
+ ukinfo->rq = &ukinfo->sq[ukinfo->sq_depth];
+ info->rq_pa = info->sq_pa + (ukinfo->sq_depth * IRDMA_QP_WQE_MIN_SIZE);
+ ukinfo->shadow_area = ukinfo->rq[ukinfo->rq_depth].elem;
+ info->shadow_area_pa =
+ info->rq_pa + (ukinfo->rq_depth * IRDMA_QP_WQE_MIN_SIZE);
+ ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
+ ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
ukinfo->qp_id = iwqp->ibqp.qp_num;
- init_attr->cap.max_send_wr = (sqdepth - IRDMA_SQ_RSVD) >> sqshift;
- init_attr->cap.max_recv_wr = (rqdepth - IRDMA_RQ_RSVD) >> rqshift;
+ iwqp->max_send_wr = (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift;
+ iwqp->max_recv_wr = (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
+ init_attr->cap.max_send_wr = iwqp->max_send_wr;
+ init_attr->cap.max_recv_wr = iwqp->max_recv_wr;
return 0;
}
@@ -803,18 +880,14 @@ static int irdma_create_qp(struct ib_qp *ibqp,
struct irdma_device *iwdev = to_iwdev(ibpd->device);
struct irdma_pci_f *rf = iwdev->rf;
struct irdma_qp *iwqp = to_iwqp(ibqp);
- struct irdma_create_qp_req req = {};
struct irdma_create_qp_resp uresp = {};
u32 qp_num = 0;
int err_code;
- int sq_size;
- int rq_size;
struct irdma_sc_qp *qp;
struct irdma_sc_dev *dev = &rf->sc_dev;
struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
struct irdma_qp_init_info init_info = {};
struct irdma_qp_host_ctx_info *ctx_info;
- unsigned long flags;
err_code = irdma_validate_qp_attrs(init_attr, iwdev);
if (err_code)
@@ -824,13 +897,10 @@ static int irdma_create_qp(struct ib_qp *ibqp,
udata->outlen < IRDMA_CREATE_QP_MIN_RESP_LEN))
return -EINVAL;
- sq_size = init_attr->cap.max_send_wr;
- rq_size = init_attr->cap.max_recv_wr;
-
init_info.vsi = &iwdev->vsi;
init_info.qp_uk_init_info.uk_attrs = uk_attrs;
- init_info.qp_uk_init_info.sq_size = sq_size;
- init_info.qp_uk_init_info.rq_size = rq_size;
+ init_info.qp_uk_init_info.sq_size = init_attr->cap.max_send_wr;
+ init_info.qp_uk_init_info.rq_size = init_attr->cap.max_recv_wr;
init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;
@@ -880,36 +950,9 @@ static int irdma_create_qp(struct ib_qp *ibqp,
init_waitqueue_head(&iwqp->mod_qp_waitq);
if (udata) {
- err_code = ib_copy_from_udata(&req, udata,
- min(sizeof(req), udata->inlen));
- if (err_code) {
- ibdev_dbg(&iwdev->ibdev,
- "VERBS: ib_copy_from_data fail\n");
- goto error;
- }
-
- iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
- iwqp->user_mode = 1;
- if (req.user_wqe_bufs) {
- struct irdma_ucontext *ucontext =
- rdma_udata_to_drv_context(udata,
- struct irdma_ucontext,
- ibucontext);
-
- init_info.qp_uk_init_info.legacy_mode = ucontext->legacy_mode;
- spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
- iwqp->iwpbl = irdma_get_pbl((unsigned long)req.user_wqe_bufs,
- &ucontext->qp_reg_mem_list);
- spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
-
- if (!iwqp->iwpbl) {
- err_code = -ENODATA;
- ibdev_dbg(&iwdev->ibdev, "VERBS: no pbl info\n");
- goto error;
- }
- }
init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver;
- irdma_setup_virt_qp(iwdev, iwqp, &init_info);
+ err_code = irdma_setup_umode_qp(udata, iwdev, iwqp, &init_info,
+ init_attr);
} else {
INIT_DELAYED_WORK(&iwqp->dwork_flush, irdma_flush_worker);
init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER;
@@ -962,10 +1005,8 @@ static int irdma_create_qp(struct ib_qp *ibqp,
refcount_set(&iwqp->refcnt, 1);
spin_lock_init(&iwqp->lock);
spin_lock_init(&iwqp->sc_qp.pfpdu.lock);
- iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
+ iwqp->sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
rf->qp_table[qp_num] = iwqp;
- iwqp->max_send_wr = sq_size;
- iwqp->max_recv_wr = rq_size;
if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
if (dev->ws_add(&iwdev->vsi, 0)) {
@@ -986,8 +1027,8 @@ static int irdma_create_qp(struct ib_qp *ibqp,
if (rdma_protocol_iwarp(&iwdev->ibdev, 1))
uresp.lsmm = 1;
}
- uresp.actual_sq_size = sq_size;
- uresp.actual_rq_size = rq_size;
+ uresp.actual_sq_size = init_info.qp_uk_init_info.sq_size;
+ uresp.actual_rq_size = init_info.qp_uk_init_info.rq_size;
uresp.qp_id = qp_num;
uresp.qp_caps = qp->qp_uk.qp_caps;
@@ -1098,6 +1139,24 @@ static int irdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
return 0;
}
+static u8 irdma_roce_get_vlan_prio(const struct ib_gid_attr *attr, u8 prio)
+{
+ struct net_device *ndev;
+
+ rcu_read_lock();
+ ndev = rcu_dereference(attr->ndev);
+ if (!ndev)
+ goto exit;
+ if (is_vlan_dev(ndev)) {
+ u16 vlan_qos = vlan_dev_get_egress_qos_mask(ndev, prio);
+
+ prio = (vlan_qos & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ }
+exit:
+ rcu_read_unlock();
+ return prio;
+}
+
/**
* irdma_modify_qp_roce - modify qp request
* @ibqp: qp's pointer for modify
@@ -1174,7 +1233,8 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (attr_mask & IB_QP_AV) {
struct irdma_av *av = &iwqp->roce_ah.av;
- const struct ib_gid_attr *sgid_attr;
+ const struct ib_gid_attr *sgid_attr =
+ attr->ah_attr.grh.sgid_attr;
u16 vlan_id = VLAN_N_VID;
u32 local_ip[4];
@@ -1189,17 +1249,22 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
roce_info->dest_qp);
irdma_qp_rem_qos(&iwqp->sc_qp);
dev->ws_remove(iwqp->sc_qp.vsi, ctx_info->user_pri);
- ctx_info->user_pri = rt_tos2priority(udp_info->tos);
- iwqp->sc_qp.user_pri = ctx_info->user_pri;
- if (dev->ws_add(iwqp->sc_qp.vsi, ctx_info->user_pri))
- return -ENOMEM;
- irdma_qp_add_qos(&iwqp->sc_qp);
+ if (iwqp->sc_qp.vsi->dscp_mode)
+ ctx_info->user_pri =
+ iwqp->sc_qp.vsi->dscp_map[irdma_tos2dscp(udp_info->tos)];
+ else
+ ctx_info->user_pri = rt_tos2priority(udp_info->tos);
}
- sgid_attr = attr->ah_attr.grh.sgid_attr;
ret = rdma_read_gid_l2_fields(sgid_attr, &vlan_id,
ctx_info->roce_info->mac_addr);
if (ret)
return ret;
+ ctx_info->user_pri = irdma_roce_get_vlan_prio(sgid_attr,
+ ctx_info->user_pri);
+ if (dev->ws_add(iwqp->sc_qp.vsi, ctx_info->user_pri))
+ return -ENOMEM;
+ iwqp->sc_qp.user_pri = ctx_info->user_pri;
+ irdma_qp_add_qos(&iwqp->sc_qp);
if (vlan_id >= VLAN_N_VID && iwdev->dcb_vlan_mode)
vlan_id = 0;
@@ -1781,6 +1846,9 @@ static int irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
irdma_process_resize_list(iwcq, iwdev, NULL);
spin_unlock_irqrestore(&iwcq->lock, flags);
+ irdma_cq_rem_ref(ib_cq);
+ wait_for_completion(&iwcq->free_cq);
+
irdma_cq_wq_destroy(iwdev->rf, cq);
spin_lock_irqsave(&iwceq->ce_lock, flags);
@@ -1990,6 +2058,7 @@ static int irdma_create_cq(struct ib_cq *ibcq,
cq = &iwcq->sc_cq;
cq->back_cq = iwcq;
+ refcount_set(&iwcq->refcnt, 1);
spin_lock_init(&iwcq->lock);
INIT_LIST_HEAD(&iwcq->resize_list);
INIT_LIST_HEAD(&iwcq->cmpl_generated);
@@ -2141,6 +2210,9 @@ static int irdma_create_cq(struct ib_cq *ibcq,
goto cq_destroy;
}
}
+ rf->cq_table[cq_num] = iwcq;
+ init_completion(&iwcq->free_cq);
+
return 0;
cq_destroy:
irdma_cq_wq_destroy(rf, cq);
@@ -2552,7 +2624,8 @@ static int irdma_hw_alloc_stag(struct irdma_device *iwdev,
struct irdma_mr *iwmr)
{
struct irdma_allocate_stag_info *info;
- struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
+ struct ib_pd *pd = iwmr->ibmr.pd;
+ struct irdma_pd *iwpd = to_iwpd(pd);
int status;
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
@@ -2568,6 +2641,7 @@ static int irdma_hw_alloc_stag(struct irdma_device *iwdev,
info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
info->pd_id = iwpd->sc_pd.pd_id;
info->total_len = iwmr->len;
+ info->all_memory = pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY;
info->remote_access = true;
cqp_info->cqp_cmd = IRDMA_OP_ALLOC_STAG;
cqp_info->post_sq = 1;
@@ -2615,6 +2689,8 @@ static struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
iwmr->type = IRDMA_MEMREG_TYPE_MEM;
palloc = &iwpbl->pble_alloc;
iwmr->page_cnt = max_num_sg;
+ /* Use system PAGE_SIZE as the sg page sizes are unknown at this point */
+ iwmr->len = max_num_sg * PAGE_SIZE;
err_code = irdma_get_pble(iwdev->rf->pble_rsrc, palloc, iwmr->page_cnt,
false);
if (err_code)
@@ -2694,7 +2770,8 @@ static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
{
struct irdma_pbl *iwpbl = &iwmr->iwpbl;
struct irdma_reg_ns_stag_info *stag_info;
- struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
+ struct ib_pd *pd = iwmr->ibmr.pd;
+ struct irdma_pd *iwpd = to_iwpd(pd);
struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
@@ -2713,6 +2790,7 @@ static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
stag_info->total_len = iwmr->len;
stag_info->access_rights = irdma_get_mr_access(access);
stag_info->pd_id = iwpd->sc_pd.pd_id;
+ stag_info->all_memory = pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY;
if (stag_info->access_rights & IRDMA_ACCESS_FLAGS_ZERO_BASED)
stag_info->addr_type = IRDMA_ADDR_TYPE_ZERO_BASED;
else
@@ -2794,8 +2872,8 @@ static struct irdma_mr *irdma_alloc_iwmr(struct ib_umem *region,
enum irdma_memreg_type reg_type)
{
struct irdma_device *iwdev = to_iwdev(pd->device);
- struct irdma_pbl *iwpbl = NULL;
- struct irdma_mr *iwmr = NULL;
+ struct irdma_pbl *iwpbl;
+ struct irdma_mr *iwmr;
unsigned long pgsz_bitmap;
iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
@@ -3476,8 +3554,7 @@ static void irdma_process_cqe(struct ib_wc *entry,
set_ib_wc_op_sq(cq_poll_info, entry);
} else {
set_ib_wc_op_rq(cq_poll_info, entry,
- qp->qp_uk.qp_caps & IRDMA_SEND_WITH_IMM ?
- true : false);
+ qp->qp_uk.qp_caps & IRDMA_SEND_WITH_IMM);
if (qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_UD &&
cq_poll_info->stag_invalid_set) {
entry->ex.invalidate_rkey = cq_poll_info->inv_stag;
@@ -3963,7 +4040,7 @@ static int irdma_attach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid)) {
irdma_copy_ip_ntohl(ip_addr,
sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
- irdma_netdev_vlan_ipv6(ip_addr, &vlan_id, NULL);
+ irdma_get_vlan_mac_ipv6(ip_addr, &vlan_id, NULL);
ipv4 = false;
ibdev_dbg(&iwdev->ibdev,
"VERBS: qp_id=%d, IP6address=%pI6\n", ibqp->qp_num,
@@ -4261,9 +4338,12 @@ static int irdma_setup_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *attr)
ah_info->vlan_tag = 0;
if (ah_info->vlan_tag < VLAN_N_VID) {
+ u8 prio = rt_tos2priority(ah_info->tc_tos);
+
+ prio = irdma_roce_get_vlan_prio(sgid_attr, prio);
+
+ ah_info->vlan_tag |= (u16)prio << VLAN_PRIO_SHIFT;
ah_info->insert_vlan_tag = true;
- ah_info->vlan_tag |=
- rt_tos2priority(ah_info->tc_tos) << VLAN_PRIO_SHIFT;
}
return 0;
@@ -4424,7 +4504,6 @@ static int irdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
ah_attr->grh.traffic_class = ah->sc_ah.ah_info.tc_tos;
ah_attr->grh.hop_limit = ah->sc_ah.ah_info.hop_ttl;
ah_attr->grh.sgid_index = ah->sgid_index;
- ah_attr->grh.sgid_index = ah->sgid_index;
memcpy(&ah_attr->grh.dgid, &ah->dgid,
sizeof(ah_attr->grh.dgid));
}
diff --git a/drivers/infiniband/hw/irdma/verbs.h b/drivers/infiniband/hw/irdma/verbs.h
index a536e9fa85eb..5d7b983f47a2 100644
--- a/drivers/infiniband/hw/irdma/verbs.h
+++ b/drivers/infiniband/hw/irdma/verbs.h
@@ -18,7 +18,8 @@ struct irdma_ucontext {
struct list_head qp_reg_mem_list;
spinlock_t qp_reg_mem_list_lock; /* protect QP memory list */
int abi_ver;
- bool legacy_mode;
+ u8 legacy_mode : 1;
+ u8 use_raw_attrs : 1;
};
struct irdma_pd {
@@ -122,6 +123,8 @@ struct irdma_cq {
u32 cq_mem_size;
struct irdma_dma_mem kmem;
struct irdma_dma_mem kmem_shadow;
+ struct completion free_cq;
+ refcount_t refcnt;
spinlock_t lock; /* for poll cq */
struct irdma_pbl *iwpbl;
struct irdma_pbl *iwpbl_shadow;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 1f8d0d2c5f17..529db874d67c 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -136,7 +136,7 @@ static struct net_device *mlx4_ib_get_netdev(struct ib_device *device,
continue;
if (mlx4_is_bonded(ibdev->dev)) {
- struct net_device *upper = NULL;
+ struct net_device *upper;
upper = netdev_master_upper_dev_get_rcu(dev);
if (upper) {
@@ -261,7 +261,7 @@ static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
int ret = 0;
int hw_update = 0;
int i;
- struct gid_entry *gids = NULL;
+ struct gid_entry *gids;
u16 vlan_id = 0xffff;
u8 mac[ETH_ALEN];
@@ -300,8 +300,7 @@ static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context)
ret = -ENOMEM;
} else {
*context = port_gid_table->gids[free].ctx;
- memcpy(&port_gid_table->gids[free].gid,
- &attr->gid, sizeof(attr->gid));
+ port_gid_table->gids[free].gid = attr->gid;
port_gid_table->gids[free].gid_type = attr->gid_type;
port_gid_table->gids[free].vlan_id = vlan_id;
port_gid_table->gids[free].ctx->real_index = free;
@@ -352,7 +351,7 @@ static int mlx4_ib_del_gid(const struct ib_gid_attr *attr, void **context)
struct mlx4_port_gid_table *port_gid_table;
int ret = 0;
int hw_update = 0;
- struct gid_entry *gids = NULL;
+ struct gid_entry *gids;
if (!rdma_cap_roce_gid_table(attr->device, attr->port_num))
return -EINVAL;
@@ -438,8 +437,8 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
struct ib_udata *uhw)
{
struct mlx4_ib_dev *dev = to_mdev(ibdev);
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int err;
int have_ib_ports;
struct mlx4_uverbs_ex_query_device cmd;
@@ -656,8 +655,8 @@ mlx4_ib_port_link_layer(struct ib_device *device, u32 port_num)
static int ib_link_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props, int netw_view)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int ext_active_speed;
int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
int err = -ENOMEM;
@@ -834,8 +833,8 @@ static int mlx4_ib_query_port(struct ib_device *ibdev, u32 port,
int __mlx4_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
union ib_gid *gid, int netw_view)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int err = -ENOMEM;
struct mlx4_ib_dev *dev = to_mdev(ibdev);
int clear = 0;
@@ -899,8 +898,8 @@ static int mlx4_ib_query_sl2vl(struct ib_device *ibdev, u32 port,
u64 *sl2vl_tbl)
{
union sl2vl_tbl_to_u64 sl2vl64;
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
int err = -ENOMEM;
int jj;
@@ -959,8 +958,8 @@ static void mlx4_init_sl2vl_tbl(struct mlx4_ib_dev *mdev)
int __mlx4_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
u16 *pkey, int netw_view)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
int err = -ENOMEM;
@@ -1975,8 +1974,8 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
static int init_node_data(struct mlx4_ib_dev *dev)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS;
int err = -ENOMEM;
@@ -2623,7 +2622,7 @@ static int mlx4_ib_probe(struct auxiliary_device *adev,
int num_req_counters;
int allocated;
u32 counter_index;
- struct counter_index *new_counter_index = NULL;
+ struct counter_index *new_counter_index;
pr_info_once("%s", mlx4_ib_version);
@@ -2946,7 +2945,7 @@ int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
{
int err;
size_t flow_size;
- struct ib_flow_attr *flow = NULL;
+ struct ib_flow_attr *flow;
struct ib_flow_spec_ib *ib_spec;
if (is_attach) {
@@ -2966,11 +2965,11 @@ int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
err = __mlx4_ib_create_flow(&mqp->ibqp, flow, MLX4_DOMAIN_NIC,
MLX4_FS_REGULAR, &mqp->reg_id);
- } else {
- err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id);
+ kfree(flow);
+ return err;
}
- kfree(flow);
- return err;
+
+ return __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id);
}
static void mlx4_ib_remove(struct auxiliary_device *adev)
@@ -3019,7 +3018,7 @@ static void mlx4_ib_remove(struct auxiliary_device *adev)
static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
{
- struct mlx4_ib_demux_work **dm = NULL;
+ struct mlx4_ib_demux_work **dm;
struct mlx4_dev *dev = ibdev->dev;
int i;
unsigned long flags;
diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c
index 93257fa5aae8..8300ce622835 100644
--- a/drivers/infiniband/hw/mlx5/counters.c
+++ b/drivers/infiniband/hw/mlx5/counters.c
@@ -27,6 +27,7 @@ static const struct mlx5_ib_counter basic_q_cnts[] = {
INIT_Q_COUNTER(rx_write_requests),
INIT_Q_COUNTER(rx_read_requests),
INIT_Q_COUNTER(rx_atomic_requests),
+ INIT_Q_COUNTER(rx_dct_connect),
INIT_Q_COUNTER(out_of_buffer),
};
@@ -46,6 +47,7 @@ static const struct mlx5_ib_counter vport_basic_q_cnts[] = {
INIT_VPORT_Q_COUNTER(rx_write_requests),
INIT_VPORT_Q_COUNTER(rx_read_requests),
INIT_VPORT_Q_COUNTER(rx_atomic_requests),
+ INIT_VPORT_Q_COUNTER(rx_dct_connect),
INIT_VPORT_Q_COUNTER(out_of_buffer),
};
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 9c8a7b206dcf..8102ef113b7e 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -308,8 +308,8 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, unsigned int port)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int err = -ENOMEM;
u16 packet_error;
@@ -338,8 +338,8 @@ out:
static int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
struct ib_smp *out_mad)
{
- struct ib_smp *in_mad = NULL;
- int err = -ENOMEM;
+ struct ib_smp *in_mad;
+ int err;
in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
if (!in_mad)
@@ -358,8 +358,8 @@ static int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
__be64 *sys_image_guid)
{
- struct ib_smp *out_mad = NULL;
- int err = -ENOMEM;
+ struct ib_smp *out_mad;
+ int err;
out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
if (!out_mad)
@@ -380,8 +380,8 @@ out:
int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
u16 *max_pkeys)
{
- struct ib_smp *out_mad = NULL;
- int err = -ENOMEM;
+ struct ib_smp *out_mad;
+ int err;
out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
if (!out_mad)
@@ -402,8 +402,8 @@ out:
int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
u32 *vendor_id)
{
- struct ib_smp *out_mad = NULL;
- int err = -ENOMEM;
+ struct ib_smp *out_mad;
+ int err;
out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
if (!out_mad)
@@ -423,8 +423,8 @@ out:
int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int err = -ENOMEM;
in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
@@ -448,8 +448,8 @@ out:
int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int err = -ENOMEM;
in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
@@ -474,8 +474,8 @@ out:
int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u32 port, u16 index,
u16 *pkey)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int err = -ENOMEM;
in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
@@ -503,8 +503,8 @@ out:
int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u32 port, int index,
union ib_gid *gid)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int err = -ENOMEM;
in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
@@ -545,8 +545,8 @@ int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u32 port,
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_core_dev *mdev = dev->mdev;
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int ext_active_speed;
int err = -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 2017ede100a6..3e345ef380f1 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1235,7 +1235,8 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
}
/* The pg_access bit allows setting the access flags
- * in the page list submitted with the command. */
+ * in the page list submitted with the command.
+ */
MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
@@ -1766,6 +1767,11 @@ mlx5_alloc_priv_descs(struct ib_device *device,
int ret;
add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
+ if (is_power_of_2(MLX5_UMR_ALIGN) && add_size) {
+ int end = max_t(int, MLX5_UMR_ALIGN, roundup_pow_of_two(size));
+
+ add_size = min_t(int, end - size, add_size);
+ }
mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL);
if (!mr->descs_alloc)
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index c46df53f26cf..e1325f2927d6 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -53,8 +53,8 @@
static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
struct ib_udata *uhw)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int err = -ENOMEM;
struct mthca_dev *mdev = to_mdev(ibdev);
@@ -121,8 +121,8 @@ static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *pr
static int mthca_query_port(struct ib_device *ibdev,
u32 port, struct ib_port_attr *props)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int err = -ENOMEM;
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
@@ -217,8 +217,8 @@ out:
static int mthca_query_pkey(struct ib_device *ibdev,
u32 port, u16 index, u16 *pkey)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int err = -ENOMEM;
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
@@ -246,8 +246,8 @@ static int mthca_query_pkey(struct ib_device *ibdev,
static int mthca_query_gid(struct ib_device *ibdev, u32 port,
int index, union ib_gid *gid)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int err = -ENOMEM;
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
@@ -989,8 +989,8 @@ static const struct attribute_group mthca_attr_group = {
static int mthca_init_node_data(struct mthca_dev *dev)
{
- struct ib_smp *in_mad = NULL;
- struct ib_smp *out_mad = NULL;
+ struct ib_smp *in_mad;
+ struct ib_smp *out_mad;
int err = -ENOMEM;
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 58f994341e9a..c849fdbd4c99 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -1277,7 +1277,7 @@ static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp,
qp->sq.max_sges = attrs->cap.max_send_sge;
qp->rq.max_sges = attrs->cap.max_recv_sge;
qp->state = OCRDMA_QPS_RST;
- qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
+ qp->signaled = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
}
static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev,
diff --git a/drivers/infiniband/hw/qedr/qedr_roce_cm.c b/drivers/infiniband/hw/qedr/qedr_roce_cm.c
index 05307c1488b8..859f66a51bd2 100644
--- a/drivers/infiniband/hw/qedr/qedr_roce_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_roce_cm.c
@@ -354,7 +354,6 @@ int qedr_create_gsi_qp(struct qedr_dev *dev, struct ib_qp_init_attr *attrs,
/* the GSI CQ is handled by the driver so remove it from the FW */
qedr_destroy_gsi_cq(dev, attrs);
dev->gsi_rqcq->cq_type = QEDR_CQ_TYPE_GSI;
- dev->gsi_rqcq->cq_type = QEDR_CQ_TYPE_GSI;
DP_DEBUG(dev, QEDR_MSG_GSI, "created GSI QP %p\n", qp);
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index d745ce9dc88a..7887a6786ed4 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -1358,7 +1358,7 @@ static void qedr_set_common_qp_params(struct qedr_dev *dev,
qp->prev_wqe_size = 0;
- qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
+ qp->signaled = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
qp->dev = dev;
if (qedr_qp_has_sq(qp)) {
qedr_reset_qp_hwq_info(&qp->sq);
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index ef85bc8d9384..152952127f13 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -2250,7 +2250,9 @@ static ssize_t qib_write_iter(struct kiocb *iocb, struct iov_iter *from)
return qib_user_sdma_writev(rcd, pq, iter_iov(from), from->nr_segs);
}
-static struct class *qib_class;
+static const struct class qib_class = {
+ .name = "ipath",
+};
static dev_t qib_dev;
int qib_cdev_init(int minor, const char *name,
@@ -2281,7 +2283,7 @@ int qib_cdev_init(int minor, const char *name,
goto err_cdev;
}
- device = device_create(qib_class, NULL, dev, NULL, "%s", name);
+ device = device_create(&qib_class, NULL, dev, NULL, "%s", name);
if (!IS_ERR(device))
goto done;
ret = PTR_ERR(device);
@@ -2325,9 +2327,8 @@ int __init qib_dev_init(void)
goto done;
}
- qib_class = class_create("ipath");
- if (IS_ERR(qib_class)) {
- ret = PTR_ERR(qib_class);
+ ret = class_register(&qib_class);
+ if (ret) {
pr_err("Could not create device class (err %d)\n", -ret);
unregister_chrdev_region(qib_dev, QIB_NMINORS);
}
@@ -2338,10 +2339,8 @@ done:
void qib_dev_cleanup(void)
{
- if (qib_class) {
- class_destroy(qib_class);
- qib_class = NULL;
- }
+ if (class_is_registered(&qib_class))
+ class_unregister(&qib_class);
unregister_chrdev_region(qib_dev, QIB_NMINORS);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index 5111735aafae..d0bdc2d8adc8 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -597,6 +597,10 @@ static void flush_send_queue(struct rxe_qp *qp, bool notify)
struct rxe_queue *q = qp->sq.queue;
int err;
+ /* send queue never got created. nothing to do. */
+ if (!qp->sq.queue)
+ return;
+
while ((wqe = queue_head(q, q->type))) {
if (notify) {
err = flush_send_wqe(qp, wqe);
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 666e06a82bc9..4d2a8ef52c85 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -136,12 +136,6 @@ static inline int qp_mtu(struct rxe_qp *qp)
return IB_MTU_4096;
}
-static inline int rcv_wqe_size(int max_sge)
-{
- return sizeof(struct rxe_recv_wqe) +
- max_sge * sizeof(struct ib_sge);
-}
-
void free_rd_atomic_resource(struct resp_res *res);
static inline void rxe_advance_resp_resource(struct rxe_qp *qp)
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index a569b111a9d2..28e379c108bc 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -183,13 +183,63 @@ static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
atomic_set(&qp->skb_out, 0);
}
+static int rxe_init_sq(struct rxe_qp *qp, struct ib_qp_init_attr *init,
+ struct ib_udata *udata,
+ struct rxe_create_qp_resp __user *uresp)
+{
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ int wqe_size;
+ int err;
+
+ qp->sq.max_wr = init->cap.max_send_wr;
+ wqe_size = max_t(int, init->cap.max_send_sge * sizeof(struct ib_sge),
+ init->cap.max_inline_data);
+ qp->sq.max_sge = wqe_size / sizeof(struct ib_sge);
+ qp->sq.max_inline = wqe_size;
+ wqe_size += sizeof(struct rxe_send_wqe);
+
+ qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr, wqe_size,
+ QUEUE_TYPE_FROM_CLIENT);
+ if (!qp->sq.queue) {
+ rxe_err_qp(qp, "Unable to allocate send queue");
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ /* prepare info for caller to mmap send queue if user space qp */
+ err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, udata,
+ qp->sq.queue->buf, qp->sq.queue->buf_size,
+ &qp->sq.queue->ip);
+ if (err) {
+ rxe_err_qp(qp, "do_mmap_info failed, err = %d", err);
+ goto err_free;
+ }
+
+ /* return actual capabilities to caller which may be larger
+ * than requested
+ */
+ init->cap.max_send_wr = qp->sq.max_wr;
+ init->cap.max_send_sge = qp->sq.max_sge;
+ init->cap.max_inline_data = qp->sq.max_inline;
+
+ return 0;
+
+err_free:
+ vfree(qp->sq.queue->buf);
+ kfree(qp->sq.queue);
+ qp->sq.queue = NULL;
+err_out:
+ return err;
+}
+
static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
struct ib_qp_init_attr *init, struct ib_udata *udata,
struct rxe_create_qp_resp __user *uresp)
{
int err;
- int wqe_size;
- enum queue_type type;
+
+ /* if we don't finish qp create make sure queue is valid */
+ skb_queue_head_init(&qp->req_pkts);
err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
if (err < 0)
@@ -204,32 +254,10 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
* (0xc000 - 0xffff).
*/
qp->src_port = RXE_ROCE_V2_SPORT + (hash_32(qp_num(qp), 14) & 0x3fff);
- qp->sq.max_wr = init->cap.max_send_wr;
-
- /* These caps are limited by rxe_qp_chk_cap() done by the caller */
- wqe_size = max_t(int, init->cap.max_send_sge * sizeof(struct ib_sge),
- init->cap.max_inline_data);
- qp->sq.max_sge = init->cap.max_send_sge =
- wqe_size / sizeof(struct ib_sge);
- qp->sq.max_inline = init->cap.max_inline_data = wqe_size;
- wqe_size += sizeof(struct rxe_send_wqe);
- type = QUEUE_TYPE_FROM_CLIENT;
- qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr,
- wqe_size, type);
- if (!qp->sq.queue)
- return -ENOMEM;
-
- err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, udata,
- qp->sq.queue->buf, qp->sq.queue->buf_size,
- &qp->sq.queue->ip);
-
- if (err) {
- vfree(qp->sq.queue->buf);
- kfree(qp->sq.queue);
- qp->sq.queue = NULL;
+ err = rxe_init_sq(qp, init, udata, uresp);
+ if (err)
return err;
- }
qp->req.wqe_index = queue_get_producer(qp->sq.queue,
QUEUE_TYPE_FROM_CLIENT);
@@ -248,36 +276,65 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
return 0;
}
+static int rxe_init_rq(struct rxe_qp *qp, struct ib_qp_init_attr *init,
+ struct ib_udata *udata,
+ struct rxe_create_qp_resp __user *uresp)
+{
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ int wqe_size;
+ int err;
+
+ qp->rq.max_wr = init->cap.max_recv_wr;
+ qp->rq.max_sge = init->cap.max_recv_sge;
+ wqe_size = sizeof(struct rxe_recv_wqe) +
+ qp->rq.max_sge*sizeof(struct ib_sge);
+
+ qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr, wqe_size,
+ QUEUE_TYPE_FROM_CLIENT);
+ if (!qp->rq.queue) {
+ rxe_err_qp(qp, "Unable to allocate recv queue");
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ /* prepare info for caller to mmap recv queue if user space qp */
+ err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, udata,
+ qp->rq.queue->buf, qp->rq.queue->buf_size,
+ &qp->rq.queue->ip);
+ if (err) {
+ rxe_err_qp(qp, "do_mmap_info failed, err = %d", err);
+ goto err_free;
+ }
+
+ /* return actual capabilities to caller which may be larger
+ * than requested
+ */
+ init->cap.max_recv_wr = qp->rq.max_wr;
+
+ return 0;
+
+err_free:
+ vfree(qp->rq.queue->buf);
+ kfree(qp->rq.queue);
+ qp->rq.queue = NULL;
+err_out:
+ return err;
+}
+
static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
struct ib_qp_init_attr *init,
struct ib_udata *udata,
struct rxe_create_qp_resp __user *uresp)
{
int err;
- int wqe_size;
- enum queue_type type;
+
+ /* if we don't finish qp create make sure queue is valid */
+ skb_queue_head_init(&qp->resp_pkts);
if (!qp->srq) {
- qp->rq.max_wr = init->cap.max_recv_wr;
- qp->rq.max_sge = init->cap.max_recv_sge;
-
- wqe_size = rcv_wqe_size(qp->rq.max_sge);
-
- type = QUEUE_TYPE_FROM_CLIENT;
- qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr,
- wqe_size, type);
- if (!qp->rq.queue)
- return -ENOMEM;
-
- err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, udata,
- qp->rq.queue->buf, qp->rq.queue->buf_size,
- &qp->rq.queue->ip);
- if (err) {
- vfree(qp->rq.queue->buf);
- kfree(qp->rq.queue);
- qp->rq.queue = NULL;
+ err = rxe_init_rq(qp, init, udata, uresp);
+ if (err)
return err;
- }
}
rxe_init_task(&qp->resp.task, qp, rxe_responder);
@@ -307,10 +364,10 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
if (srq)
rxe_get(srq);
- qp->pd = pd;
- qp->rcq = rcq;
- qp->scq = scq;
- qp->srq = srq;
+ qp->pd = pd;
+ qp->rcq = rcq;
+ qp->scq = scq;
+ qp->srq = srq;
atomic_inc(&rcq->num_wq);
atomic_inc(&scq->num_wq);
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 2171f19494bc..d8c41fd626a9 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -578,10 +578,11 @@ static void save_state(struct rxe_send_wqe *wqe,
struct rxe_send_wqe *rollback_wqe,
u32 *rollback_psn)
{
- rollback_wqe->state = wqe->state;
+ rollback_wqe->state = wqe->state;
rollback_wqe->first_psn = wqe->first_psn;
- rollback_wqe->last_psn = wqe->last_psn;
- *rollback_psn = qp->req.psn;
+ rollback_wqe->last_psn = wqe->last_psn;
+ rollback_wqe->dma = wqe->dma;
+ *rollback_psn = qp->req.psn;
}
static void rollback_state(struct rxe_send_wqe *wqe,
@@ -589,10 +590,11 @@ static void rollback_state(struct rxe_send_wqe *wqe,
struct rxe_send_wqe *rollback_wqe,
u32 rollback_psn)
{
- wqe->state = rollback_wqe->state;
+ wqe->state = rollback_wqe->state;
wqe->first_psn = rollback_wqe->first_psn;
- wqe->last_psn = rollback_wqe->last_psn;
- qp->req.psn = rollback_psn;
+ wqe->last_psn = rollback_wqe->last_psn;
+ wqe->dma = rollback_wqe->dma;
+ qp->req.psn = rollback_psn;
}
static void update_state(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
@@ -797,6 +799,9 @@ int rxe_requester(struct rxe_qp *qp)
pkt.mask = rxe_opcode[opcode].mask;
pkt.wqe = wqe;
+ /* save wqe state before we build and send packet */
+ save_state(wqe, qp, &rollback_wqe, &rollback_psn);
+
av = rxe_get_av(&pkt, &ah);
if (unlikely(!av)) {
rxe_dbg_qp(qp, "Failed no address vector\n");
@@ -829,29 +834,29 @@ int rxe_requester(struct rxe_qp *qp)
if (ah)
rxe_put(ah);
- /*
- * To prevent a race on wqe access between requester and completer,
- * wqe members state and psn need to be set before calling
- * rxe_xmit_packet().
- * Otherwise, completer might initiate an unjustified retry flow.
- */
- save_state(wqe, qp, &rollback_wqe, &rollback_psn);
+ /* update wqe state as though we had sent it */
update_wqe_state(qp, wqe, &pkt);
update_wqe_psn(qp, wqe, &pkt, payload);
err = rxe_xmit_packet(qp, &pkt, skb);
if (err) {
- qp->need_req_skb = 1;
+ if (err != -EAGAIN) {
+ wqe->status = IB_WC_LOC_QP_OP_ERR;
+ goto err;
+ }
+ /* the packet was dropped so reset wqe to the state
+ * before we sent it so we can try to resend
+ */
rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
- if (err == -EAGAIN) {
- rxe_sched_task(&qp->req.task);
- goto exit;
- }
+ /* force a delay until the dropped packet is freed and
+ * the send queue is drained below the low water mark
+ */
+ qp->need_req_skb = 1;
- wqe->status = IB_WC_LOC_QP_OP_ERR;
- goto err;
+ rxe_sched_task(&qp->req.task);
+ goto exit;
}
update_state(qp, &pkt);
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 64c64f5f36a8..da470a925efc 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -1469,6 +1469,10 @@ static void flush_recv_queue(struct rxe_qp *qp, bool notify)
return;
}
+ /* recv queue not created. nothing to do. */
+ if (!qp->rq.queue)
+ return;
+
while ((wqe = queue_head(q, q->type))) {
if (notify) {
err = flush_recv_wqe(qp, wqe);
diff --git a/drivers/infiniband/sw/rxe/rxe_srq.c b/drivers/infiniband/sw/rxe/rxe_srq.c
index 27ca82ec0826..3661cb627d28 100644
--- a/drivers/infiniband/sw/rxe/rxe_srq.c
+++ b/drivers/infiniband/sw/rxe/rxe_srq.c
@@ -45,40 +45,41 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
struct ib_srq_init_attr *init, struct ib_udata *udata,
struct rxe_create_srq_resp __user *uresp)
{
- int err;
- int srq_wqe_size;
struct rxe_queue *q;
- enum queue_type type;
+ int wqe_size;
+ int err;
- srq->ibsrq.event_handler = init->event_handler;
- srq->ibsrq.srq_context = init->srq_context;
- srq->limit = init->attr.srq_limit;
- srq->srq_num = srq->elem.index;
- srq->rq.max_wr = init->attr.max_wr;
- srq->rq.max_sge = init->attr.max_sge;
+ srq->ibsrq.event_handler = init->event_handler;
+ srq->ibsrq.srq_context = init->srq_context;
+ srq->limit = init->attr.srq_limit;
+ srq->srq_num = srq->elem.index;
+ srq->rq.max_wr = init->attr.max_wr;
+ srq->rq.max_sge = init->attr.max_sge;
- srq_wqe_size = rcv_wqe_size(srq->rq.max_sge);
+ wqe_size = sizeof(struct rxe_recv_wqe) +
+ srq->rq.max_sge*sizeof(struct ib_sge);
spin_lock_init(&srq->rq.producer_lock);
spin_lock_init(&srq->rq.consumer_lock);
- type = QUEUE_TYPE_FROM_CLIENT;
- q = rxe_queue_init(rxe, &srq->rq.max_wr, srq_wqe_size, type);
+ q = rxe_queue_init(rxe, &srq->rq.max_wr, wqe_size,
+ QUEUE_TYPE_FROM_CLIENT);
if (!q) {
rxe_dbg_srq(srq, "Unable to allocate queue\n");
- return -ENOMEM;
+ err = -ENOMEM;
+ goto err_out;
}
- srq->rq.queue = q;
-
err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata, q->buf,
q->buf_size, &q->ip);
if (err) {
- vfree(q->buf);
- kfree(q);
- return err;
+ rxe_dbg_srq(srq, "Unable to init mmap info for caller\n");
+ goto err_free;
}
+ srq->rq.queue = q;
+ init->attr.max_wr = srq->rq.max_wr;
+
if (uresp) {
if (copy_to_user(&uresp->srq_num, &srq->srq_num,
sizeof(uresp->srq_num))) {
@@ -88,6 +89,12 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
}
return 0;
+
+err_free:
+ vfree(q->buf);
+ kfree(q);
+err_out:
+ return err;
}
int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
@@ -145,9 +152,10 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
struct ib_srq_attr *attr, enum ib_srq_attr_mask mask,
struct rxe_modify_srq_cmd *ucmd, struct ib_udata *udata)
{
- int err;
struct rxe_queue *q = srq->rq.queue;
struct mminfo __user *mi = NULL;
+ int wqe_size;
+ int err;
if (mask & IB_SRQ_MAX_WR) {
/*
@@ -156,12 +164,16 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
*/
mi = u64_to_user_ptr(ucmd->mmap_info_addr);
- err = rxe_queue_resize(q, &attr->max_wr,
- rcv_wqe_size(srq->rq.max_sge), udata, mi,
- &srq->rq.producer_lock,
+ wqe_size = sizeof(struct rxe_recv_wqe) +
+ srq->rq.max_sge*sizeof(struct ib_sge);
+
+ err = rxe_queue_resize(q, &attr->max_wr, wqe_size,
+ udata, mi, &srq->rq.producer_lock,
&srq->rq.consumer_lock);
if (err)
- goto err2;
+ goto err_free;
+
+ srq->rq.max_wr = attr->max_wr;
}
if (mask & IB_SRQ_LIMIT)
@@ -169,7 +181,7 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
return 0;
-err2:
+err_free:
rxe_queue_cleanup(q);
srq->rq.queue = NULL;
return err;
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 903f0b71447e..48f86839d36a 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -798,7 +798,6 @@ static int init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
rxe_err_qp(qp, "unsupported wr opcode %d",
wr->opcode);
return -EINVAL;
- break;
}
}
diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
index 2f3a9cda3850..58dddb143b9f 100644
--- a/drivers/infiniband/sw/siw/siw.h
+++ b/drivers/infiniband/sw/siw/siw.h
@@ -74,6 +74,7 @@ struct siw_device {
u32 vendor_part_id;
int numa_node;
+ char raw_gid[ETH_ALEN];
/* physical port state (only one port per device) */
enum ib_port_state state;
@@ -530,11 +531,12 @@ void siw_qp_llp_data_ready(struct sock *sk);
void siw_qp_llp_write_space(struct sock *sk);
/* QP TX path functions */
+int siw_create_tx_threads(void);
+void siw_stop_tx_threads(void);
int siw_run_sq(void *arg);
int siw_qp_sq_process(struct siw_qp *qp);
int siw_sq_start(struct siw_qp *qp);
int siw_activate_tx(struct siw_qp *qp);
-void siw_stop_tx_thread(int nr_cpu);
int siw_get_tx_cpu(struct siw_device *sdev);
void siw_put_tx_cpu(int cpu);
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
index da530c0404da..a2605178f4ed 100644
--- a/drivers/infiniband/sw/siw/siw_cm.c
+++ b/drivers/infiniband/sw/siw/siw_cm.c
@@ -1501,7 +1501,6 @@ error:
cep->cm_id = NULL;
id->rem_ref(id);
- siw_cep_put(cep);
qp->cep = NULL;
siw_cep_put(cep);
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index 65b5cda5457b..d4b6e0106851 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -75,8 +75,7 @@ static int siw_device_register(struct siw_device *sdev, const char *name)
return rv;
}
- siw_dbg(base_dev, "HWaddr=%pM\n", sdev->netdev->dev_addr);
-
+ siw_dbg(base_dev, "HWaddr=%pM\n", sdev->raw_gid);
return 0;
}
@@ -88,29 +87,6 @@ static void siw_device_cleanup(struct ib_device *base_dev)
xa_destroy(&sdev->mem_xa);
}
-static int siw_create_tx_threads(void)
-{
- int cpu, assigned = 0;
-
- for_each_online_cpu(cpu) {
- /* Skip HT cores */
- if (cpu % cpumask_weight(topology_sibling_cpumask(cpu)))
- continue;
-
- siw_tx_thread[cpu] =
- kthread_run_on_cpu(siw_run_sq,
- (unsigned long *)(long)cpu,
- cpu, "siw_tx/%u");
- if (IS_ERR(siw_tx_thread[cpu])) {
- siw_tx_thread[cpu] = NULL;
- continue;
- }
-
- assigned++;
- }
- return assigned;
-}
-
static int siw_dev_qualified(struct net_device *netdev)
{
/*
@@ -313,24 +289,19 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
return NULL;
base_dev = &sdev->base_dev;
-
sdev->netdev = netdev;
- if (netdev->type != ARPHRD_LOOPBACK && netdev->type != ARPHRD_NONE) {
- addrconf_addr_eui48((unsigned char *)&base_dev->node_guid,
- netdev->dev_addr);
+ if (netdev->addr_len) {
+ memcpy(sdev->raw_gid, netdev->dev_addr,
+ min_t(unsigned int, netdev->addr_len, ETH_ALEN));
} else {
/*
- * This device does not have a HW address,
- * but connection mangagement lib expects gid != 0
+ * This device does not have a HW address, but
+ * connection mangagement requires a unique gid.
*/
- size_t len = min_t(size_t, strlen(base_dev->name), 6);
- char addr[6] = { };
-
- memcpy(addr, base_dev->name, len);
- addrconf_addr_eui48((unsigned char *)&base_dev->node_guid,
- addr);
+ eth_random_addr(sdev->raw_gid);
}
+ addrconf_addr_eui48((u8 *)&base_dev->node_guid, sdev->raw_gid);
base_dev->uverbs_cmd_mask |= BIT_ULL(IB_USER_VERBS_CMD_POST_SEND);
@@ -535,7 +506,6 @@ static struct rdma_link_ops siw_link_ops = {
static __init int siw_init_module(void)
{
int rv;
- int nr_cpu;
if (SENDPAGE_THRESH < SIW_MAX_INLINE) {
pr_info("siw: sendpage threshold too small: %u\n",
@@ -580,12 +550,8 @@ static __init int siw_init_module(void)
return 0;
out_error:
- for (nr_cpu = 0; nr_cpu < nr_cpu_ids; nr_cpu++) {
- if (siw_tx_thread[nr_cpu]) {
- siw_stop_tx_thread(nr_cpu);
- siw_tx_thread[nr_cpu] = NULL;
- }
- }
+ siw_stop_tx_threads();
+
if (siw_crypto_shash)
crypto_free_shash(siw_crypto_shash);
@@ -599,14 +565,8 @@ out_error:
static void __exit siw_exit_module(void)
{
- int cpu;
+ siw_stop_tx_threads();
- for_each_possible_cpu(cpu) {
- if (siw_tx_thread[cpu]) {
- siw_stop_tx_thread(cpu);
- siw_tx_thread[cpu] = NULL;
- }
- }
unregister_netdevice_notifier(&siw_netdev_nb);
rdma_link_unregister(&siw_link_ops);
ib_unregister_driver(RDMA_DRIVER_SIW);
diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c
index 81e9bbd9ebda..47d0197db9a1 100644
--- a/drivers/infiniband/sw/siw/siw_qp.c
+++ b/drivers/infiniband/sw/siw/siw_qp.c
@@ -204,7 +204,7 @@ static int siw_qp_readq_init(struct siw_qp *qp, int irq_size, int orq_size)
{
if (irq_size) {
irq_size = roundup_pow_of_two(irq_size);
- qp->irq = vzalloc(irq_size * sizeof(struct siw_sqe));
+ qp->irq = vcalloc(irq_size, sizeof(struct siw_sqe));
if (!qp->irq) {
qp->attrs.irq_size = 0;
return -ENOMEM;
@@ -212,7 +212,7 @@ static int siw_qp_readq_init(struct siw_qp *qp, int irq_size, int orq_size)
}
if (orq_size) {
orq_size = roundup_pow_of_two(orq_size);
- qp->orq = vzalloc(orq_size * sizeof(struct siw_sqe));
+ qp->orq = vcalloc(orq_size, sizeof(struct siw_sqe));
if (!qp->orq) {
qp->attrs.orq_size = 0;
qp->attrs.irq_size = 0;
diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
index 7c7a51d36d0c..60b6a4135961 100644
--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
@@ -1208,10 +1208,45 @@ struct tx_task_t {
static DEFINE_PER_CPU(struct tx_task_t, siw_tx_task_g);
-void siw_stop_tx_thread(int nr_cpu)
+int siw_create_tx_threads(void)
{
- kthread_stop(siw_tx_thread[nr_cpu]);
- wake_up(&per_cpu(siw_tx_task_g, nr_cpu).waiting);
+ int cpu, assigned = 0;
+
+ for_each_online_cpu(cpu) {
+ struct tx_task_t *tx_task;
+
+ /* Skip HT cores */
+ if (cpu % cpumask_weight(topology_sibling_cpumask(cpu)))
+ continue;
+
+ tx_task = &per_cpu(siw_tx_task_g, cpu);
+ init_llist_head(&tx_task->active);
+ init_waitqueue_head(&tx_task->waiting);
+
+ siw_tx_thread[cpu] =
+ kthread_run_on_cpu(siw_run_sq,
+ (unsigned long *)(long)cpu,
+ cpu, "siw_tx/%u");
+ if (IS_ERR(siw_tx_thread[cpu])) {
+ siw_tx_thread[cpu] = NULL;
+ continue;
+ }
+ assigned++;
+ }
+ return assigned;
+}
+
+void siw_stop_tx_threads(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ if (siw_tx_thread[cpu]) {
+ kthread_stop(siw_tx_thread[cpu]);
+ wake_up(&per_cpu(siw_tx_task_g, cpu).waiting);
+ siw_tx_thread[cpu] = NULL;
+ }
+ }
}
int siw_run_sq(void *data)
@@ -1221,9 +1256,6 @@ int siw_run_sq(void *data)
struct siw_qp *qp;
struct tx_task_t *tx_task = &per_cpu(siw_tx_task_g, nr_cpu);
- init_llist_head(&tx_task->active);
- init_waitqueue_head(&tx_task->waiting);
-
while (1) {
struct llist_node *fifo_list = NULL;
@@ -1239,13 +1271,7 @@ int siw_run_sq(void *data)
* llist_del_all returns a list with newest entry first.
* Re-order list for fairness among QP's.
*/
- while (active) {
- struct llist_node *tmp = active;
-
- active = llist_next(active);
- tmp->next = fifo_list;
- fifo_list = tmp;
- }
+ fifo_list = llist_reverse_order(active);
while (fifo_list) {
qp = container_of(fifo_list, struct siw_qp, tx_list);
fifo_list = llist_next(fifo_list);
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index 398ec13db624..fdbef3254e30 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -157,7 +157,7 @@ int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr,
attr->vendor_part_id = sdev->vendor_part_id;
addrconf_addr_eui48((u8 *)&attr->sys_image_guid,
- sdev->netdev->dev_addr);
+ sdev->raw_gid);
return 0;
}
@@ -218,7 +218,7 @@ int siw_query_gid(struct ib_device *base_dev, u32 port, int idx,
/* subnet_prefix == interface_id == 0; */
memset(gid, 0, sizeof(*gid));
- memcpy(&gid->raw[0], sdev->netdev->dev_addr, 6);
+ memcpy(gid->raw, sdev->raw_gid, ETH_ALEN);
return 0;
}
@@ -381,7 +381,7 @@ int siw_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
if (udata)
qp->sendq = vmalloc_user(num_sqe * sizeof(struct siw_sqe));
else
- qp->sendq = vzalloc(num_sqe * sizeof(struct siw_sqe));
+ qp->sendq = vcalloc(num_sqe, sizeof(struct siw_sqe));
if (qp->sendq == NULL) {
rv = -ENOMEM;
@@ -414,7 +414,7 @@ int siw_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
qp->recvq =
vmalloc_user(num_rqe * sizeof(struct siw_rqe));
else
- qp->recvq = vzalloc(num_rqe * sizeof(struct siw_rqe));
+ qp->recvq = vcalloc(num_rqe, sizeof(struct siw_rqe));
if (qp->recvq == NULL) {
rv = -ENOMEM;
@@ -1494,7 +1494,7 @@ int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle,
if (pbl->max_buf < num_sle) {
siw_dbg_mem(mem, "too many SGE's: %d > %d\n",
- mem->pbl->max_buf, num_sle);
+ num_sle, pbl->max_buf);
return -ENOMEM;
}
for_each_sg(sl, slp, num_sle, i) {
@@ -1624,7 +1624,7 @@ int siw_create_srq(struct ib_srq *base_srq,
srq->recvq =
vmalloc_user(srq->num_rqe * sizeof(struct siw_rqe));
else
- srq->recvq = vzalloc(srq->num_rqe * sizeof(struct siw_rqe));
+ srq->recvq = vcalloc(srq->num_rqe, sizeof(struct siw_rqe));
if (srq->recvq == NULL) {
rv = -ENOMEM;
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 92e1e7587af8..00a7303c8cc6 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -2570,6 +2570,8 @@ static void isert_wait_conn(struct iscsit_conn *conn)
isert_put_unsol_pending_cmds(conn);
isert_wait4cmds(conn);
isert_wait4logout(isert_conn);
+
+ queue_work(isert_release_wq, &isert_conn->release_work);
}
static void isert_free_conn(struct iscsit_conn *conn)
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
index b32941dd67cb..b6ee801fd0ff 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -45,7 +45,9 @@ static struct rtrs_rdma_dev_pd dev_pd = {
};
static struct workqueue_struct *rtrs_wq;
-static struct class *rtrs_clt_dev_class;
+static const struct class rtrs_clt_dev_class = {
+ .name = "rtrs-client",
+};
static inline bool rtrs_clt_is_connected(const struct rtrs_clt_sess *clt)
{
@@ -2698,7 +2700,7 @@ static struct rtrs_clt_sess *alloc_clt(const char *sessname, size_t paths_num,
return ERR_PTR(-ENOMEM);
}
- clt->dev.class = rtrs_clt_dev_class;
+ clt->dev.class = &rtrs_clt_dev_class;
clt->dev.release = rtrs_clt_dev_release;
uuid_gen(&clt->paths_uuid);
INIT_LIST_HEAD_RCU(&clt->paths_list);
@@ -3151,16 +3153,17 @@ static const struct rtrs_rdma_dev_pd_ops dev_pd_ops = {
static int __init rtrs_client_init(void)
{
- rtrs_rdma_dev_pd_init(0, &dev_pd);
+ int ret = 0;
- rtrs_clt_dev_class = class_create("rtrs-client");
- if (IS_ERR(rtrs_clt_dev_class)) {
+ rtrs_rdma_dev_pd_init(0, &dev_pd);
+ ret = class_register(&rtrs_clt_dev_class);
+ if (ret) {
pr_err("Failed to create rtrs-client dev class\n");
- return PTR_ERR(rtrs_clt_dev_class);
+ return ret;
}
rtrs_wq = alloc_workqueue("rtrs_client_wq", 0, 0);
if (!rtrs_wq) {
- class_destroy(rtrs_clt_dev_class);
+ class_unregister(&rtrs_clt_dev_class);
return -ENOMEM;
}
@@ -3170,7 +3173,7 @@ static int __init rtrs_client_init(void)
static void __exit rtrs_client_exit(void)
{
destroy_workqueue(rtrs_wq);
- class_destroy(rtrs_clt_dev_class);
+ class_unregister(&rtrs_clt_dev_class);
rtrs_rdma_dev_pd_deinit(&dev_pd);
}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
index 5adba0f754b6..3f305e694fe8 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
@@ -164,7 +164,7 @@ static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_path *srv_pat
*/
goto unlock;
}
- srv->dev.class = rtrs_dev_class;
+ srv->dev.class = &rtrs_dev_class;
err = dev_set_name(&srv->dev, "%s", srv_path->s.sessname);
if (err)
goto unlock;
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
index c38901e2c8f4..75e56604e462 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
@@ -27,7 +27,9 @@ MODULE_LICENSE("GPL");
#define MAX_HDR_SIZE PAGE_SIZE
static struct rtrs_rdma_dev_pd dev_pd;
-struct class *rtrs_dev_class;
+const struct class rtrs_dev_class = {
+ .name = "rtrs-server",
+};
static struct rtrs_srv_ib_ctx ib_ctx;
static int __read_mostly max_chunk_size = DEFAULT_MAX_CHUNK_SIZE;
@@ -2253,11 +2255,10 @@ static int __init rtrs_server_init(void)
err);
return err;
}
- rtrs_dev_class = class_create("rtrs-server");
- if (IS_ERR(rtrs_dev_class)) {
- err = PTR_ERR(rtrs_dev_class);
+ err = class_register(&rtrs_dev_class);
+ if (err)
goto out_err;
- }
+
rtrs_wq = alloc_workqueue("rtrs_server_wq", 0, 0);
if (!rtrs_wq) {
err = -ENOMEM;
@@ -2267,7 +2268,7 @@ static int __init rtrs_server_init(void)
return 0;
out_dev_class:
- class_destroy(rtrs_dev_class);
+ class_unregister(&rtrs_dev_class);
out_err:
return err;
}
@@ -2275,7 +2276,7 @@ out_err:
static void __exit rtrs_server_exit(void)
{
destroy_workqueue(rtrs_wq);
- class_destroy(rtrs_dev_class);
+ class_unregister(&rtrs_dev_class);
rtrs_rdma_dev_pd_deinit(&dev_pd);
}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.h b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
index 2f8a638e36fa..5e325b82ff33 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
@@ -129,7 +129,7 @@ struct rtrs_srv_ib_ctx {
int ib_dev_count;
};
-extern struct class *rtrs_dev_class;
+extern const struct class rtrs_dev_class;
void close_path(struct rtrs_srv_path *srv_path);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 0e513a7e5ac8..1574218764e0 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -1979,12 +1979,8 @@ static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
- else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
- scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
- else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
- scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
srp_free_req(ch, req, scmnd,
be32_to_cpu(rsp->req_lim_delta));
diff --git a/drivers/input/gameport/Kconfig b/drivers/input/gameport/Kconfig
index 5a2c2fb3217d..fe73b26e647a 100644
--- a/drivers/input/gameport/Kconfig
+++ b/drivers/input/gameport/Kconfig
@@ -25,6 +25,7 @@ if GAMEPORT
config GAMEPORT_NS558
tristate "Classic ISA and PnP gameport support"
+ depends on ISA
help
Say Y here if you have an ISA or PnP gameport.
@@ -35,6 +36,7 @@ config GAMEPORT_NS558
config GAMEPORT_L4
tristate "PDPI Lightning 4 gamecard support"
+ depends on ISA
help
Say Y here if you have a PDPI Lightning 4 gamecard.
@@ -53,7 +55,7 @@ config GAMEPORT_EMU10K1
config GAMEPORT_FM801
tristate "ForteMedia FM801 gameport support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
Say Y here if you have ForteMedia FM801 PCI audio controller
(Abit AU10, Genius Sound Maker, HP Workstation zx2000,
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index a1443320b419..34f416a3ebcb 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -519,12 +519,32 @@ EXPORT_SYMBOL(gameport_set_phys);
static void gameport_default_trigger(struct gameport *gameport)
{
+#ifdef CONFIG_HAS_IOPORT
outb(0xff, gameport->io);
+#endif
}
static unsigned char gameport_default_read(struct gameport *gameport)
{
+#ifdef CONFIG_HAS_IOPORT
return inb(gameport->io);
+#else
+ return 0xff;
+#endif
+}
+
+static void gameport_setup_default_handlers(struct gameport *gameport)
+{
+ if ((!gameport->trigger || !gameport->read) &&
+ !IS_ENABLED(CONFIG_HAS_IOPORT))
+ dev_err(&gameport->dev,
+ "I/O port access is required for %s (%s) but is not available\n",
+ gameport->phys, gameport->name);
+
+ if (!gameport->trigger)
+ gameport->trigger = gameport_default_trigger;
+ if (!gameport->read)
+ gameport->read = gameport_default_read;
}
/*
@@ -545,11 +565,7 @@ static void gameport_init_port(struct gameport *gameport)
if (gameport->parent)
gameport->dev.parent = &gameport->parent->dev;
- if (!gameport->trigger)
- gameport->trigger = gameport_default_trigger;
- if (!gameport->read)
- gameport->read = gameport_default_read;
-
+ gameport_setup_default_handlers(gameport);
INIT_LIST_HEAD(&gameport->node);
spin_lock_init(&gameport->timer_lock);
timer_setup(&gameport->poll_timer, gameport_run_poll_handler, 0);
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index cdb193317c3b..ede380551e55 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -264,6 +264,7 @@ static const struct xpad_device {
{ 0x0f0d, 0x0067, "HORIPAD ONE", 0, XTYPE_XBOXONE },
{ 0x0f0d, 0x0078, "Hori Real Arcade Pro V Kai Xbox One", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
{ 0x0f0d, 0x00c5, "Hori Fighting Commander ONE", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
+ { 0x0f0d, 0x00dc, "HORIPAD FPS for Nintendo Switch", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0f30, 0x010b, "Philips Recoil", 0, XTYPE_XBOX },
{ 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
{ 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
@@ -365,6 +366,7 @@ static const struct xpad_device {
{ 0x31e3, 0x1300, "Wooting 60HE (AVR)", 0, XTYPE_XBOX360 },
{ 0x31e3, 0x1310, "Wooting 60HE (ARM)", 0, XTYPE_XBOX360 },
{ 0x3285, 0x0607, "Nacon GC-100", 0, XTYPE_XBOX360 },
+ { 0x3537, 0x1004, "GameSir T4 Kaleid", 0, XTYPE_XBOX360 },
{ 0x3767, 0x0101, "Fanatec Speedster 3 Forceshock Wheel", 0, XTYPE_XBOX },
{ 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
{ 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN }
@@ -499,6 +501,8 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOX360_VENDOR(0x2f24), /* GameSir controllers */
XPAD_XBOX360_VENDOR(0x31e3), /* Wooting Keyboards */
XPAD_XBOX360_VENDOR(0x3285), /* Nacon GC-100 */
+ XPAD_XBOX360_VENDOR(0x3537), /* GameSir Controllers */
+ XPAD_XBOXONE_VENDOR(0x3537), /* GameSir Controllers */
{ }
};
@@ -1720,6 +1724,27 @@ static int xpad_start_input(struct usb_xpad *xpad)
return error;
}
}
+ if (xpad->xtype == XTYPE_XBOX360) {
+ /*
+ * Some third-party controllers Xbox 360-style controllers
+ * require this message to finish initialization.
+ */
+ u8 dummy[20];
+
+ error = usb_control_msg_recv(xpad->udev, 0,
+ /* bRequest */ 0x01,
+ /* bmRequestType */
+ USB_TYPE_VENDOR | USB_DIR_IN |
+ USB_RECIP_INTERFACE,
+ /* wValue */ 0x100,
+ /* wIndex */ 0x00,
+ dummy, sizeof(dummy),
+ 25, GFP_KERNEL);
+ if (error)
+ dev_warn(&xpad->dev->dev,
+ "unable to receive magic message: %d\n",
+ error);
+ }
return 0;
}
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index 896a5a989ddc..61e8e43e9c2b 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -713,17 +713,11 @@ static int adp5588_fw_parse(struct adp5588_kpad *kpad)
return 0;
}
-static void adp5588_disable_regulator(void *reg)
-{
- regulator_disable(reg);
-}
-
static int adp5588_probe(struct i2c_client *client)
{
struct adp5588_kpad *kpad;
struct input_dev *input;
struct gpio_desc *gpio;
- struct regulator *vcc;
unsigned int revid;
int ret;
int error;
@@ -749,16 +743,7 @@ static int adp5588_probe(struct i2c_client *client)
if (error)
return error;
- vcc = devm_regulator_get(&client->dev, "vcc");
- if (IS_ERR(vcc))
- return PTR_ERR(vcc);
-
- error = regulator_enable(vcc);
- if (error)
- return error;
-
- error = devm_add_action_or_reset(&client->dev,
- adp5588_disable_regulator, vcc);
+ error = devm_regulator_get_enable(&client->dev, "vcc");
if (error)
return error;
diff --git a/drivers/input/keyboard/amikbd.c b/drivers/input/keyboard/amikbd.c
index a20a4e186639..e305c44cd0aa 100644
--- a/drivers/input/keyboard/amikbd.c
+++ b/drivers/input/keyboard/amikbd.c
@@ -196,7 +196,7 @@ static int __init amikbd_probe(struct platform_device *pdev)
struct input_dev *dev;
int i, err;
- dev = input_allocate_device();
+ dev = devm_input_allocate_device(&pdev->dev);
if (!dev) {
dev_err(&pdev->dev, "Not enough memory for input device\n");
return -ENOMEM;
@@ -208,7 +208,6 @@ static int __init amikbd_probe(struct platform_device *pdev)
dev->id.vendor = 0x0001;
dev->id.product = 0x0001;
dev->id.version = 0x0100;
- dev->dev.parent = &pdev->dev;
dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
@@ -218,35 +217,21 @@ static int __init amikbd_probe(struct platform_device *pdev)
amikbd_init_console_keymaps();
ciaa.cra &= ~0x41; /* serial data in, turn off TA */
- err = request_irq(IRQ_AMIGA_CIAA_SP, amikbd_interrupt, 0, "amikbd",
- dev);
+ err = devm_request_irq(&pdev->dev, IRQ_AMIGA_CIAA_SP, amikbd_interrupt,
+ 0, "amikbd", dev);
if (err)
- goto fail2;
+ return err;
err = input_register_device(dev);
if (err)
- goto fail3;
+ return err;
platform_set_drvdata(pdev, dev);
return 0;
-
- fail3: free_irq(IRQ_AMIGA_CIAA_SP, dev);
- fail2: input_free_device(dev);
- return err;
-}
-
-static int __exit amikbd_remove(struct platform_device *pdev)
-{
- struct input_dev *dev = platform_get_drvdata(pdev);
-
- free_irq(IRQ_AMIGA_CIAA_SP, dev);
- input_unregister_device(dev);
- return 0;
}
static struct platform_driver amikbd_driver = {
- .remove = __exit_p(amikbd_remove),
.driver = {
.name = "amiga-keyboard",
},
diff --git a/drivers/input/keyboard/bcm-keypad.c b/drivers/input/keyboard/bcm-keypad.c
index 56a919ec23b5..f3c3746acd4c 100644
--- a/drivers/input/keyboard/bcm-keypad.c
+++ b/drivers/input/keyboard/bcm-keypad.c
@@ -307,7 +307,6 @@ static int bcm_kp_probe(struct platform_device *pdev)
{
struct bcm_kp *kp;
struct input_dev *input_dev;
- struct resource *res;
int error;
kp = devm_kzalloc(&pdev->dev, sizeof(*kp), GFP_KERNEL);
@@ -353,29 +352,16 @@ static int bcm_kp_probe(struct platform_device *pdev)
return error;
}
- /* Get the KEYPAD base address */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Missing keypad base address resource\n");
- return -ENODEV;
- }
-
- kp->base = devm_ioremap_resource(&pdev->dev, res);
+ kp->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(kp->base))
return PTR_ERR(kp->base);
/* Enable clock */
- kp->clk = devm_clk_get(&pdev->dev, "peri_clk");
+ kp->clk = devm_clk_get_optional(&pdev->dev, "peri_clk");
if (IS_ERR(kp->clk)) {
- error = PTR_ERR(kp->clk);
- if (error != -ENOENT) {
- if (error != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Failed to get clock\n");
- return error;
- }
- dev_dbg(&pdev->dev,
- "No clock specified. Assuming it's enabled\n");
- kp->clk = NULL;
+ return dev_err_probe(&pdev->dev, PTR_ERR(kp->clk), "Failed to get clock\n");
+ } else if (!kp->clk) {
+ dev_dbg(&pdev->dev, "No clock specified. Assuming it's enabled\n");
} else {
unsigned int desired_rate;
long actual_rate;
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index c928829a8b0c..2e7c2c046e67 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -523,18 +523,15 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
NULL, GPIOD_IN, desc);
if (IS_ERR(bdata->gpiod)) {
error = PTR_ERR(bdata->gpiod);
- if (error == -ENOENT) {
- /*
- * GPIO is optional, we may be dealing with
- * purely interrupt-driven setup.
- */
- bdata->gpiod = NULL;
- } else {
- if (error != -EPROBE_DEFER)
- dev_err(dev, "failed to get gpio: %d\n",
- error);
- return error;
- }
+ if (error != -ENOENT)
+ return dev_err_probe(dev, error,
+ "failed to get gpio\n");
+
+ /*
+ * GPIO is optional, we may be dealing with
+ * purely interrupt-driven setup.
+ */
+ bdata->gpiod = NULL;
}
} else if (gpio_is_valid(button->gpio)) {
/*
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
index c3937d2fc744..ba00ecfbd343 100644
--- a/drivers/input/keyboard/gpio_keys_polled.c
+++ b/drivers/input/keyboard/gpio_keys_polled.c
@@ -299,13 +299,9 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
NULL, GPIOD_IN,
button->desc);
if (IS_ERR(bdata->gpiod)) {
- error = PTR_ERR(bdata->gpiod);
- if (error != -EPROBE_DEFER)
- dev_err(dev,
- "failed to get gpio: %d\n",
- error);
fwnode_handle_put(child);
- return error;
+ return dev_err_probe(dev, PTR_ERR(bdata->gpiod),
+ "failed to get gpio\n");
}
} else if (gpio_is_valid(button->gpio)) {
/*
diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c
index 3964f6e0f6af..7bee93e9b0f5 100644
--- a/drivers/input/keyboard/lm8323.c
+++ b/drivers/input/keyboard/lm8323.c
@@ -556,6 +556,7 @@ static int init_pwm(struct lm8323_chip *lm, int id, struct device *dev,
const char *name)
{
struct lm8323_pwm *pwm;
+ int err;
BUG_ON(id > 3);
@@ -575,9 +576,11 @@ static int init_pwm(struct lm8323_chip *lm, int id, struct device *dev,
pwm->cdev.name = name;
pwm->cdev.brightness_set = lm8323_pwm_set_brightness;
pwm->cdev.groups = lm8323_pwm_groups;
- if (led_classdev_register(dev, &pwm->cdev) < 0) {
- dev_err(dev, "couldn't register PWM %d\n", id);
- return -1;
+
+ err = devm_led_classdev_register(dev, &pwm->cdev);
+ if (err) {
+ dev_err(dev, "couldn't register PWM %d: %d\n", id, err);
+ return err;
}
pwm->enabled = true;
}
@@ -585,8 +588,6 @@ static int init_pwm(struct lm8323_chip *lm, int id, struct device *dev,
return 0;
}
-static struct i2c_driver lm8323_i2c_driver;
-
static ssize_t lm8323_show_disable(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -615,6 +616,12 @@ static ssize_t lm8323_set_disable(struct device *dev,
}
static DEVICE_ATTR(disable_kp, 0644, lm8323_show_disable, lm8323_set_disable);
+static struct attribute *lm8323_attrs[] = {
+ &dev_attr_disable_kp.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(lm8323);
+
static int lm8323_probe(struct i2c_client *client)
{
struct lm8323_platform_data *pdata = dev_get_platdata(&client->dev);
@@ -642,12 +649,13 @@ static int lm8323_probe(struct i2c_client *client)
return -EINVAL;
}
- lm = kzalloc(sizeof *lm, GFP_KERNEL);
- idev = input_allocate_device();
- if (!lm || !idev) {
- err = -ENOMEM;
- goto fail1;
- }
+ lm = devm_kzalloc(&client->dev, sizeof(*lm), GFP_KERNEL);
+ if (!lm)
+ return -ENOMEM;
+
+ idev = devm_input_allocate_device(&client->dev);
+ if (!idev)
+ return -ENOMEM;
lm->client = client;
lm->idev = idev;
@@ -663,8 +671,10 @@ static int lm8323_probe(struct i2c_client *client)
lm8323_reset(lm);
- /* Nothing's set up to service the IRQ yet, so just spin for max.
- * 100ms until we can configure. */
+ /*
+ * Nothing's set up to service the IRQ yet, so just spin for max.
+ * 100ms until we can configure.
+ */
tmo = jiffies + msecs_to_jiffies(100);
while (lm8323_read(lm, LM8323_CMD_READ_INT, data, 1) == 1) {
if (data[0] & INT_NOINIT)
@@ -684,21 +694,17 @@ static int lm8323_probe(struct i2c_client *client)
/* If a true probe check the device */
if (lm8323_read_id(lm, data) != 0) {
dev_err(&client->dev, "device not found\n");
- err = -ENODEV;
- goto fail1;
+ return -ENODEV;
}
for (pwm = 0; pwm < LM8323_NUM_PWMS; pwm++) {
err = init_pwm(lm, pwm + 1, &client->dev,
pdata->pwm_names[pwm]);
- if (err < 0)
- goto fail2;
+ if (err)
+ return err;
}
lm->kp_enabled = true;
- err = device_create_file(&client->dev, &dev_attr_disable_kp);
- if (err < 0)
- goto fail2;
idev->name = pdata->name ? : "LM8323 keypad";
snprintf(lm->phys, sizeof(lm->phys),
@@ -719,14 +725,16 @@ static int lm8323_probe(struct i2c_client *client)
err = input_register_device(idev);
if (err) {
dev_dbg(&client->dev, "error registering input device\n");
- goto fail3;
+ return err;
}
- err = request_threaded_irq(client->irq, NULL, lm8323_irq,
- IRQF_TRIGGER_LOW|IRQF_ONESHOT, "lm8323", lm);
+ err = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, lm8323_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "lm8323", lm);
if (err) {
dev_err(&client->dev, "could not get IRQ %d\n", client->irq);
- goto fail4;
+ return err;
}
i2c_set_clientdata(client, lm);
@@ -735,39 +743,6 @@ static int lm8323_probe(struct i2c_client *client)
enable_irq_wake(client->irq);
return 0;
-
-fail4:
- input_unregister_device(idev);
- idev = NULL;
-fail3:
- device_remove_file(&client->dev, &dev_attr_disable_kp);
-fail2:
- while (--pwm >= 0)
- if (lm->pwm[pwm].enabled)
- led_classdev_unregister(&lm->pwm[pwm].cdev);
-fail1:
- input_free_device(idev);
- kfree(lm);
- return err;
-}
-
-static void lm8323_remove(struct i2c_client *client)
-{
- struct lm8323_chip *lm = i2c_get_clientdata(client);
- int i;
-
- disable_irq_wake(client->irq);
- free_irq(client->irq, lm);
-
- input_unregister_device(lm->idev);
-
- device_remove_file(&lm->client->dev, &dev_attr_disable_kp);
-
- for (i = 0; i < 3; i++)
- if (lm->pwm[i].enabled)
- led_classdev_unregister(&lm->pwm[i].cdev);
-
- kfree(lm);
}
/*
@@ -823,11 +798,11 @@ static const struct i2c_device_id lm8323_id[] = {
static struct i2c_driver lm8323_i2c_driver = {
.driver = {
- .name = "lm8323",
- .pm = pm_sleep_ptr(&lm8323_pm_ops),
+ .name = "lm8323",
+ .pm = pm_sleep_ptr(&lm8323_pm_ops),
+ .dev_groups = lm8323_groups,
},
.probe = lm8323_probe,
- .remove = lm8323_remove,
.id_table = lm8323_id,
};
MODULE_DEVICE_TABLE(i2c, lm8323_id);
diff --git a/drivers/input/keyboard/lm8333.c b/drivers/input/keyboard/lm8333.c
index c9f05764e36d..1c070c499c85 100644
--- a/drivers/input/keyboard/lm8333.c
+++ b/drivers/input/keyboard/lm8333.c
@@ -142,18 +142,18 @@ static int lm8333_probe(struct i2c_client *client)
return -EINVAL;
}
- lm8333 = kzalloc(sizeof(*lm8333), GFP_KERNEL);
- input = input_allocate_device();
- if (!lm8333 || !input) {
- err = -ENOMEM;
- goto free_mem;
- }
+ lm8333 = devm_kzalloc(&client->dev, sizeof(*lm8333), GFP_KERNEL);
+ if (!lm8333)
+ return -ENOMEM;
+
+ input = devm_input_allocate_device(&client->dev);
+ if (!input)
+ return -ENOMEM;
lm8333->client = client;
lm8333->input = input;
input->name = client->name;
- input->dev.parent = &client->dev;
input->id.bustype = BUS_I2C;
input_set_capability(input, EV_MSC, MSC_SCAN);
@@ -162,7 +162,7 @@ static int lm8333_probe(struct i2c_client *client)
LM8333_NUM_ROWS, LM8333_NUM_COLS,
lm8333->keycodes, input);
if (err)
- goto free_mem;
+ return err;
if (pdata->debounce_time) {
err = lm8333_write8(lm8333, LM8333_DEBOUNCE,
@@ -178,34 +178,19 @@ static int lm8333_probe(struct i2c_client *client)
dev_warn(&client->dev, "Unable to set active time\n");
}
- err = request_threaded_irq(client->irq, NULL, lm8333_irq_thread,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- "lm8333", lm8333);
+ err = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, lm8333_irq_thread,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "lm8333", lm8333);
if (err)
- goto free_mem;
+ return err;
err = input_register_device(input);
if (err)
- goto free_irq;
+ return err;
i2c_set_clientdata(client, lm8333);
return 0;
-
- free_irq:
- free_irq(client->irq, lm8333);
- free_mem:
- input_free_device(input);
- kfree(lm8333);
- return err;
-}
-
-static void lm8333_remove(struct i2c_client *client)
-{
- struct lm8333 *lm8333 = i2c_get_clientdata(client);
-
- free_irq(client->irq, lm8333);
- input_unregister_device(lm8333->input);
- kfree(lm8333);
}
static const struct i2c_device_id lm8333_id[] = {
@@ -219,7 +204,6 @@ static struct i2c_driver lm8333_driver = {
.name = "lm8333",
},
.probe = lm8333_probe,
- .remove = lm8333_remove,
.id_table = lm8333_id,
};
module_i2c_driver(lm8333_driver);
diff --git a/drivers/input/keyboard/lpc32xx-keys.c b/drivers/input/keyboard/lpc32xx-keys.c
index 911e1181cd6f..322a87807159 100644
--- a/drivers/input/keyboard/lpc32xx-keys.c
+++ b/drivers/input/keyboard/lpc32xx-keys.c
@@ -160,17 +160,10 @@ static int lpc32xx_kscan_probe(struct platform_device *pdev)
{
struct lpc32xx_kscan_drv *kscandat;
struct input_dev *input;
- struct resource *res;
size_t keymap_size;
int error;
int irq;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "failed to get platform I/O memory\n");
- return -EINVAL;
- }
-
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return -EINVAL;
@@ -221,7 +214,7 @@ static int lpc32xx_kscan_probe(struct platform_device *pdev)
input_set_drvdata(kscandat->input, kscandat);
- kscandat->kscan_base = devm_ioremap_resource(&pdev->dev, res);
+ kscandat->kscan_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(kscandat->kscan_base))
return PTR_ERR(kscandat->kscan_base);
diff --git a/drivers/input/keyboard/mcs_touchkey.c b/drivers/input/keyboard/mcs_touchkey.c
index de312d8eb974..2410f676c7f9 100644
--- a/drivers/input/keyboard/mcs_touchkey.c
+++ b/drivers/input/keyboard/mcs_touchkey.c
@@ -92,6 +92,13 @@ static irqreturn_t mcs_touchkey_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void mcs_touchkey_poweroff(void *data)
+{
+ struct mcs_touchkey_data *touchkey = data;
+
+ touchkey->poweron(false);
+}
+
static int mcs_touchkey_probe(struct i2c_client *client)
{
const struct i2c_device_id *id = i2c_client_get_device_id(client);
@@ -109,13 +116,16 @@ static int mcs_touchkey_probe(struct i2c_client *client)
return -EINVAL;
}
- data = kzalloc(struct_size(data, keycodes, pdata->key_maxval + 1),
- GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!data || !input_dev) {
- dev_err(&client->dev, "Failed to allocate memory\n");
- error = -ENOMEM;
- goto err_free_mem;
+ data = devm_kzalloc(&client->dev,
+ struct_size(data, keycodes, pdata->key_maxval + 1),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ input_dev = devm_input_allocate_device(&client->dev);
+ if (!input_dev) {
+ dev_err(&client->dev, "Failed to allocate input device\n");
+ return -ENOMEM;
}
data->client = client;
@@ -136,15 +146,13 @@ static int mcs_touchkey_probe(struct i2c_client *client)
fw_ver = i2c_smbus_read_byte_data(client, fw_reg);
if (fw_ver < 0) {
- error = fw_ver;
- dev_err(&client->dev, "i2c read error[%d]\n", error);
- goto err_free_mem;
+ dev_err(&client->dev, "i2c read error[%d]\n", fw_ver);
+ return fw_ver;
}
dev_info(&client->dev, "Firmware version: %d\n", fw_ver);
input_dev->name = "MELFAS MCS Touchkey";
input_dev->id.bustype = BUS_I2C;
- input_dev->dev.parent = &client->dev;
input_dev->evbit[0] = BIT_MASK(EV_KEY);
if (!pdata->no_autorepeat)
input_dev->evbit[0] |= BIT_MASK(EV_REP);
@@ -169,40 +177,28 @@ static int mcs_touchkey_probe(struct i2c_client *client)
if (pdata->poweron) {
data->poweron = pdata->poweron;
data->poweron(true);
+
+ error = devm_add_action_or_reset(&client->dev,
+ mcs_touchkey_poweroff, data);
+ if (error)
+ return error;
}
- error = request_threaded_irq(client->irq, NULL, mcs_touchkey_interrupt,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- client->dev.driver->name, data);
+ error = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, mcs_touchkey_interrupt,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ client->dev.driver->name, data);
if (error) {
dev_err(&client->dev, "Failed to register interrupt\n");
- goto err_free_mem;
+ return error;
}
error = input_register_device(input_dev);
if (error)
- goto err_free_irq;
+ return error;
i2c_set_clientdata(client, data);
return 0;
-
-err_free_irq:
- free_irq(client->irq, data);
-err_free_mem:
- input_free_device(input_dev);
- kfree(data);
- return error;
-}
-
-static void mcs_touchkey_remove(struct i2c_client *client)
-{
- struct mcs_touchkey_data *data = i2c_get_clientdata(client);
-
- free_irq(client->irq, data);
- if (data->poweron)
- data->poweron(false);
- input_unregister_device(data->input_dev);
- kfree(data);
}
static void mcs_touchkey_shutdown(struct i2c_client *client)
@@ -259,7 +255,6 @@ static struct i2c_driver mcs_touchkey_driver = {
.pm = pm_sleep_ptr(&mcs_touchkey_pm_ops),
},
.probe = mcs_touchkey_probe,
- .remove = mcs_touchkey_remove,
.shutdown = mcs_touchkey_shutdown,
.id_table = mcs_touchkey_id,
};
diff --git a/drivers/input/keyboard/nomadik-ske-keypad.c b/drivers/input/keyboard/nomadik-ske-keypad.c
index 970f2a671c2e..b3ccc97f61e1 100644
--- a/drivers/input/keyboard/nomadik-ske-keypad.c
+++ b/drivers/input/keyboard/nomadik-ske-keypad.c
@@ -221,13 +221,20 @@ static irqreturn_t ske_keypad_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void ske_keypad_board_exit(void *data)
+{
+ struct ske_keypad *keypad = data;
+
+ keypad->board->exit();
+}
+
static int __init ske_keypad_probe(struct platform_device *pdev)
{
const struct ske_keypad_platform_data *plat =
dev_get_platdata(&pdev->dev);
+ struct device *dev = &pdev->dev;
struct ske_keypad *keypad;
struct input_dev *input;
- struct resource *res;
int irq;
int error;
@@ -238,20 +245,14 @@ static int __init ske_keypad_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0)
- return -EINVAL;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "missing platform resources\n");
- return -EINVAL;
- }
+ return irq;
- keypad = kzalloc(sizeof(struct ske_keypad), GFP_KERNEL);
- input = input_allocate_device();
+ keypad = devm_kzalloc(dev, sizeof(struct ske_keypad),
+ GFP_KERNEL);
+ input = devm_input_allocate_device(dev);
if (!keypad || !input) {
dev_err(&pdev->dev, "failed to allocate keypad memory\n");
- error = -ENOMEM;
- goto err_free_mem;
+ return -ENOMEM;
}
keypad->irq = irq;
@@ -259,31 +260,20 @@ static int __init ske_keypad_probe(struct platform_device *pdev)
keypad->input = input;
spin_lock_init(&keypad->ske_keypad_lock);
- if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
- dev_err(&pdev->dev, "failed to request I/O memory\n");
- error = -EBUSY;
- goto err_free_mem;
- }
-
- keypad->reg_base = ioremap(res->start, resource_size(res));
- if (!keypad->reg_base) {
- dev_err(&pdev->dev, "failed to remap I/O memory\n");
- error = -ENXIO;
- goto err_free_mem_region;
- }
+ keypad->reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(keypad->reg_base))
+ return PTR_ERR(keypad->reg_base);
- keypad->pclk = clk_get(&pdev->dev, "apb_pclk");
+ keypad->pclk = devm_clk_get_enabled(dev, "apb_pclk");
if (IS_ERR(keypad->pclk)) {
dev_err(&pdev->dev, "failed to get pclk\n");
- error = PTR_ERR(keypad->pclk);
- goto err_iounmap;
+ return PTR_ERR(keypad->pclk);
}
- keypad->clk = clk_get(&pdev->dev, NULL);
+ keypad->clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(keypad->clk)) {
dev_err(&pdev->dev, "failed to get clk\n");
- error = PTR_ERR(keypad->clk);
- goto err_pclk;
+ return PTR_ERR(keypad->clk);
}
input->id.bustype = BUS_HOST;
@@ -295,48 +285,43 @@ static int __init ske_keypad_probe(struct platform_device *pdev)
keypad->keymap, input);
if (error) {
dev_err(&pdev->dev, "Failed to build keymap\n");
- goto err_clk;
+ return error;
}
input_set_capability(input, EV_MSC, MSC_SCAN);
if (!plat->no_autorepeat)
__set_bit(EV_REP, input->evbit);
- error = clk_prepare_enable(keypad->pclk);
- if (error) {
- dev_err(&pdev->dev, "Failed to prepare/enable pclk\n");
- goto err_clk;
- }
-
- error = clk_prepare_enable(keypad->clk);
- if (error) {
- dev_err(&pdev->dev, "Failed to prepare/enable clk\n");
- goto err_pclk_disable;
- }
-
-
/* go through board initialization helpers */
if (keypad->board->init)
keypad->board->init();
+ if (keypad->board->exit) {
+ error = devm_add_action_or_reset(dev, ske_keypad_board_exit,
+ keypad);
+ if (error)
+ return error;
+ }
+
error = ske_keypad_chip_init(keypad);
if (error) {
dev_err(&pdev->dev, "unable to init keypad hardware\n");
- goto err_clk_disable;
+ return error;
}
- error = request_threaded_irq(keypad->irq, NULL, ske_keypad_irq,
- IRQF_ONESHOT, "ske-keypad", keypad);
+ error = devm_request_threaded_irq(dev, keypad->irq,
+ NULL, ske_keypad_irq,
+ IRQF_ONESHOT, "ske-keypad", keypad);
if (error) {
dev_err(&pdev->dev, "allocate irq %d failed\n", keypad->irq);
- goto err_clk_disable;
+ return error;
}
error = input_register_device(input);
if (error) {
dev_err(&pdev->dev,
- "unable to register input device: %d\n", error);
- goto err_free_irq;
+ "unable to register input device: %d\n", error);
+ return error;
}
if (plat->wakeup_enable)
@@ -345,47 +330,6 @@ static int __init ske_keypad_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, keypad);
return 0;
-
-err_free_irq:
- free_irq(keypad->irq, keypad);
-err_clk_disable:
- clk_disable_unprepare(keypad->clk);
-err_pclk_disable:
- clk_disable_unprepare(keypad->pclk);
-err_clk:
- clk_put(keypad->clk);
-err_pclk:
- clk_put(keypad->pclk);
-err_iounmap:
- iounmap(keypad->reg_base);
-err_free_mem_region:
- release_mem_region(res->start, resource_size(res));
-err_free_mem:
- input_free_device(input);
- kfree(keypad);
- return error;
-}
-
-static int ske_keypad_remove(struct platform_device *pdev)
-{
- struct ske_keypad *keypad = platform_get_drvdata(pdev);
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- free_irq(keypad->irq, keypad);
-
- input_unregister_device(keypad->input);
-
- clk_disable_unprepare(keypad->clk);
- clk_put(keypad->clk);
-
- if (keypad->board->exit)
- keypad->board->exit();
-
- iounmap(keypad->reg_base);
- release_mem_region(res->start, resource_size(res));
- kfree(keypad);
-
- return 0;
}
static int ske_keypad_suspend(struct device *dev)
@@ -424,7 +368,6 @@ static struct platform_driver ske_keypad_driver = {
.name = "nmk-ske-keypad",
.pm = pm_sleep_ptr(&ske_keypad_dev_pm_ops),
},
- .remove = ske_keypad_remove,
};
module_platform_driver_probe(ske_keypad_driver, ske_keypad_probe);
diff --git a/drivers/input/keyboard/nspire-keypad.c b/drivers/input/keyboard/nspire-keypad.c
index e9fa1423f136..096c18d7bca1 100644
--- a/drivers/input/keyboard/nspire-keypad.c
+++ b/drivers/input/keyboard/nspire-keypad.c
@@ -186,8 +186,7 @@ static int nspire_keypad_probe(struct platform_device *pdev)
return PTR_ERR(keypad->clk);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- keypad->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ keypad->reg_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(keypad->reg_base))
return PTR_ERR(keypad->reg_base);
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
index 9f085d5679db..773e55eed88b 100644
--- a/drivers/input/keyboard/omap4-keypad.c
+++ b/drivers/input/keyboard/omap4-keypad.c
@@ -341,17 +341,10 @@ static int omap4_keypad_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct omap4_keypad *keypad_data;
struct input_dev *input_dev;
- struct resource *res;
unsigned int max_keys;
int irq;
int error;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "no base address specified\n");
- return -EINVAL;
- }
-
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
@@ -370,7 +363,7 @@ static int omap4_keypad_probe(struct platform_device *pdev)
if (error)
return error;
- keypad_data->base = devm_ioremap_resource(dev, res);
+ keypad_data->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(keypad_data->base))
return PTR_ERR(keypad_data->base);
diff --git a/drivers/input/keyboard/opencores-kbd.c b/drivers/input/keyboard/opencores-kbd.c
index b0ea387414c1..7ffe1a70c856 100644
--- a/drivers/input/keyboard/opencores-kbd.c
+++ b/drivers/input/keyboard/opencores-kbd.c
@@ -39,15 +39,8 @@ static int opencores_kbd_probe(struct platform_device *pdev)
{
struct input_dev *input;
struct opencores_kbd *opencores_kbd;
- struct resource *res;
int irq, i, error;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "missing board memory resource\n");
- return -EINVAL;
- }
-
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return -EINVAL;
@@ -65,7 +58,7 @@ static int opencores_kbd_probe(struct platform_device *pdev)
opencores_kbd->input = input;
- opencores_kbd->addr = devm_ioremap_resource(&pdev->dev, res);
+ opencores_kbd->addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(opencores_kbd->addr))
return PTR_ERR(opencores_kbd->addr);
diff --git a/drivers/input/keyboard/pinephone-keyboard.c b/drivers/input/keyboard/pinephone-keyboard.c
index 038ff3549a7a..147b1f288a33 100644
--- a/drivers/input/keyboard/pinephone-keyboard.c
+++ b/drivers/input/keyboard/pinephone-keyboard.c
@@ -318,40 +318,22 @@ static void ppkb_close(struct input_dev *input)
ppkb_set_scan(client, false);
}
-static void ppkb_regulator_disable(void *regulator)
-{
- regulator_disable(regulator);
-}
-
static int ppkb_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
unsigned int phys_rows, phys_cols;
struct pinephone_keyboard *ppkb;
- struct regulator *vbat_supply;
u8 info[PPKB_MATRIX_SIZE + 1];
struct device_node *i2c_bus;
int ret;
int error;
- vbat_supply = devm_regulator_get(dev, "vbat");
- error = PTR_ERR_OR_ZERO(vbat_supply);
+ error = devm_regulator_get_enable(dev, "vbat");
if (error) {
dev_err(dev, "Failed to get VBAT supply: %d\n", error);
return error;
}
- error = regulator_enable(vbat_supply);
- if (error) {
- dev_err(dev, "Failed to enable VBAT: %d\n", error);
- return error;
- }
-
- error = devm_add_action_or_reset(dev, ppkb_regulator_disable,
- vbat_supply);
- if (error)
- return error;
-
ret = i2c_smbus_read_i2c_block_data(client, 0, sizeof(info), info);
if (ret != sizeof(info)) {
error = ret < 0 ? ret : -EIO;
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index 871f858d0ba7..3724363d140e 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -717,7 +717,6 @@ static int pxa27x_keypad_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct pxa27x_keypad *keypad;
struct input_dev *input_dev;
- struct resource *res;
int irq, error;
/* Driver need build keycode from device tree or pdata */
@@ -728,12 +727,6 @@ static int pxa27x_keypad_probe(struct platform_device *pdev)
if (irq < 0)
return -ENXIO;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- dev_err(&pdev->dev, "failed to get I/O memory\n");
- return -ENXIO;
- }
-
keypad = devm_kzalloc(&pdev->dev, sizeof(*keypad),
GFP_KERNEL);
if (!keypad)
@@ -747,7 +740,7 @@ static int pxa27x_keypad_probe(struct platform_device *pdev)
keypad->input_dev = input_dev;
keypad->irq = irq;
- keypad->mmio_base = devm_ioremap_resource(&pdev->dev, res);
+ keypad->mmio_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(keypad->mmio_base))
return PTR_ERR(keypad->mmio_base);
diff --git a/drivers/input/keyboard/qt1070.c b/drivers/input/keyboard/qt1070.c
index 91aaa9fc43a4..9b093b042bf1 100644
--- a/drivers/input/keyboard/qt1070.c
+++ b/drivers/input/keyboard/qt1070.c
@@ -149,20 +149,20 @@ static int qt1070_probe(struct i2c_client *client)
if (!qt1070_identify(client))
return -ENODEV;
- data = kzalloc(sizeof(struct qt1070_data), GFP_KERNEL);
- input = input_allocate_device();
- if (!data || !input) {
- dev_err(&client->dev, "insufficient memory\n");
- err = -ENOMEM;
- goto err_free_mem;
- }
+ data = devm_kzalloc(&client->dev, sizeof(struct qt1070_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ input = devm_input_allocate_device(&client->dev);
+ if (!input)
+ return -ENOMEM;
data->client = client;
data->input = input;
data->irq = client->irq;
input->name = "AT42QT1070 QTouch Sensor";
- input->dev.parent = &client->dev;
input->id.bustype = BUS_I2C;
/* Add the keycode */
@@ -185,19 +185,20 @@ static int qt1070_probe(struct i2c_client *client)
qt1070_write(client, RESET, 1);
msleep(QT1070_RESET_TIME);
- err = request_threaded_irq(client->irq, NULL, qt1070_interrupt,
- IRQF_TRIGGER_NONE | IRQF_ONESHOT,
- client->dev.driver->name, data);
+ err = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, qt1070_interrupt,
+ IRQF_TRIGGER_NONE | IRQF_ONESHOT,
+ client->dev.driver->name, data);
if (err) {
dev_err(&client->dev, "fail to request irq\n");
- goto err_free_mem;
+ return err;
}
/* Register the input device */
err = input_register_device(data->input);
if (err) {
dev_err(&client->dev, "Failed to register input device\n");
- goto err_free_irq;
+ return err;
}
i2c_set_clientdata(client, data);
@@ -206,24 +207,6 @@ static int qt1070_probe(struct i2c_client *client)
qt1070_read(client, DET_STATUS);
return 0;
-
-err_free_irq:
- free_irq(client->irq, data);
-err_free_mem:
- input_free_device(input);
- kfree(data);
- return err;
-}
-
-static void qt1070_remove(struct i2c_client *client)
-{
- struct qt1070_data *data = i2c_get_clientdata(client);
-
- /* Release IRQ */
- free_irq(client->irq, data);
-
- input_unregister_device(data->input);
- kfree(data);
}
static int qt1070_suspend(struct device *dev)
@@ -272,7 +255,6 @@ static struct i2c_driver qt1070_driver = {
},
.id_table = qt1070_id,
.probe = qt1070_probe,
- .remove = qt1070_remove,
};
module_i2c_driver(qt1070_driver);
diff --git a/drivers/input/keyboard/qt2160.c b/drivers/input/keyboard/qt2160.c
index 599ea85cfd30..7e3b09642ab7 100644
--- a/drivers/input/keyboard/qt2160.c
+++ b/drivers/input/keyboard/qt2160.c
@@ -32,7 +32,7 @@
#define QT2160_NUM_LEDS_X 8
-#define QT2160_CYCLE_INTERVAL (2*HZ)
+#define QT2160_CYCLE_INTERVAL 2000 /* msec - 2 sec */
static unsigned char qt2160_key2code[] = {
KEY_0, KEY_1, KEY_2, KEY_3,
@@ -54,7 +54,6 @@ struct qt2160_led {
struct qt2160_data {
struct i2c_client *client;
struct input_dev *input;
- struct delayed_work dwork;
unsigned short keycodes[ARRAY_SIZE(qt2160_key2code)];
u16 key_matrix;
#ifdef CONFIG_LEDS_CLASS
@@ -155,10 +154,10 @@ static int qt2160_read_block(struct i2c_client *client,
return 0;
}
-static int qt2160_get_key_matrix(struct qt2160_data *qt2160)
+static void qt2160_get_key_matrix(struct input_dev *input)
{
+ struct qt2160_data *qt2160 = input_get_drvdata(input);
struct i2c_client *client = qt2160->client;
- struct input_dev *input = qt2160->input;
u8 regs[6];
u16 old_matrix, new_matrix;
int ret, i, mask;
@@ -173,7 +172,7 @@ static int qt2160_get_key_matrix(struct qt2160_data *qt2160)
if (ret) {
dev_err(&client->dev,
"could not perform chip read.\n");
- return ret;
+ return;
}
old_matrix = qt2160->key_matrix;
@@ -191,37 +190,17 @@ static int qt2160_get_key_matrix(struct qt2160_data *qt2160)
}
input_sync(input);
-
- return 0;
}
-static irqreturn_t qt2160_irq(int irq, void *_qt2160)
+static irqreturn_t qt2160_irq(int irq, void *data)
{
- struct qt2160_data *qt2160 = _qt2160;
+ struct input_dev *input = data;
- mod_delayed_work(system_wq, &qt2160->dwork, 0);
+ qt2160_get_key_matrix(input);
return IRQ_HANDLED;
}
-static void qt2160_schedule_read(struct qt2160_data *qt2160)
-{
- schedule_delayed_work(&qt2160->dwork, QT2160_CYCLE_INTERVAL);
-}
-
-static void qt2160_worker(struct work_struct *work)
-{
- struct qt2160_data *qt2160 =
- container_of(work, struct qt2160_data, dwork.work);
-
- dev_dbg(&qt2160->client->dev, "worker\n");
-
- qt2160_get_key_matrix(qt2160);
-
- /* Avoid device lock up by checking every so often */
- qt2160_schedule_read(qt2160);
-}
-
static int qt2160_read(struct i2c_client *client, u8 reg)
{
int ret;
@@ -260,7 +239,7 @@ static int qt2160_write(struct i2c_client *client, u8 reg, u8 data)
static int qt2160_register_leds(struct qt2160_data *qt2160)
{
struct i2c_client *client = qt2160->client;
- int ret;
+ int error;
int i;
for (i = 0; i < QT2160_NUM_LEDS_X; i++) {
@@ -273,9 +252,9 @@ static int qt2160_register_leds(struct qt2160_data *qt2160)
led->id = i;
led->qt2160 = qt2160;
- ret = led_classdev_register(&client->dev, &led->cdev);
- if (ret < 0)
- return ret;
+ error = devm_led_classdev_register(&client->dev, &led->cdev);
+ if (error)
+ return error;
}
/* Tur off LEDs */
@@ -286,14 +265,6 @@ static int qt2160_register_leds(struct qt2160_data *qt2160)
return 0;
}
-static void qt2160_unregister_leds(struct qt2160_data *qt2160)
-{
- int i;
-
- for (i = 0; i < QT2160_NUM_LEDS_X; i++)
- led_classdev_unregister(&qt2160->leds[i].cdev);
-}
-
#else
static inline int qt2160_register_leds(struct qt2160_data *qt2160)
@@ -301,10 +272,6 @@ static inline int qt2160_register_leds(struct qt2160_data *qt2160)
return 0;
}
-static inline void qt2160_unregister_leds(struct qt2160_data *qt2160)
-{
-}
-
#endif
static bool qt2160_identify(struct i2c_client *client)
@@ -345,12 +312,9 @@ static int qt2160_probe(struct i2c_client *client)
int i;
int error;
- /* Check functionality */
- error = i2c_check_functionality(client->adapter,
- I2C_FUNC_SMBUS_BYTE);
- if (!error) {
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE)) {
dev_err(&client->dev, "%s adapter not supported\n",
- dev_driver_string(&client->adapter->dev));
+ dev_driver_string(&client->adapter->dev));
return -ENODEV;
}
@@ -358,17 +322,16 @@ static int qt2160_probe(struct i2c_client *client)
return -ENODEV;
/* Chip is valid and active. Allocate structure */
- qt2160 = kzalloc(sizeof(struct qt2160_data), GFP_KERNEL);
- input = input_allocate_device();
- if (!qt2160 || !input) {
- dev_err(&client->dev, "insufficient memory\n");
- error = -ENOMEM;
- goto err_free_mem;
- }
+ qt2160 = devm_kzalloc(&client->dev, sizeof(*qt2160), GFP_KERNEL);
+ if (!qt2160)
+ return -ENOMEM;
+
+ input = devm_input_allocate_device(&client->dev);
+ if (!input)
+ return -ENOMEM;
qt2160->client = client;
qt2160->input = input;
- INIT_DELAYED_WORK(&qt2160->dwork, qt2160_worker);
input->name = "AT42QT2160 Touch Sense Keyboard";
input->id.bustype = BUS_I2C;
@@ -385,66 +348,48 @@ static int qt2160_probe(struct i2c_client *client)
}
__clear_bit(KEY_RESERVED, input->keybit);
+ input_set_drvdata(input, qt2160);
+
/* Calibrate device */
error = qt2160_write(client, QT2160_CMD_CALIBRATE, 1);
if (error) {
dev_err(&client->dev, "failed to calibrate device\n");
- goto err_free_mem;
+ return error;
}
if (client->irq) {
- error = request_irq(client->irq, qt2160_irq,
- IRQF_TRIGGER_FALLING, "qt2160", qt2160);
+ error = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, qt2160_irq,
+ IRQF_ONESHOT,
+ "qt2160", input);
if (error) {
dev_err(&client->dev,
"failed to allocate irq %d\n", client->irq);
- goto err_free_mem;
+ return error;
+ }
+ } else {
+ error = input_setup_polling(input, qt2160_get_key_matrix);
+ if (error) {
+ dev_err(&client->dev, "Failed to setup polling\n");
+ return error;
}
+ input_set_poll_interval(input, QT2160_CYCLE_INTERVAL);
}
error = qt2160_register_leds(qt2160);
if (error) {
dev_err(&client->dev, "Failed to register leds\n");
- goto err_free_irq;
+ return error;
}
error = input_register_device(qt2160->input);
if (error) {
dev_err(&client->dev,
"Failed to register input device\n");
- goto err_unregister_leds;
+ return error;
}
- i2c_set_clientdata(client, qt2160);
- qt2160_schedule_read(qt2160);
-
return 0;
-
-err_unregister_leds:
- qt2160_unregister_leds(qt2160);
-err_free_irq:
- if (client->irq)
- free_irq(client->irq, qt2160);
-err_free_mem:
- input_free_device(input);
- kfree(qt2160);
- return error;
-}
-
-static void qt2160_remove(struct i2c_client *client)
-{
- struct qt2160_data *qt2160 = i2c_get_clientdata(client);
-
- qt2160_unregister_leds(qt2160);
-
- /* Release IRQ so no queue will be scheduled */
- if (client->irq)
- free_irq(client->irq, qt2160);
-
- cancel_delayed_work_sync(&qt2160->dwork);
-
- input_unregister_device(qt2160->input);
- kfree(qt2160);
}
static const struct i2c_device_id qt2160_idtable[] = {
@@ -461,7 +406,6 @@ static struct i2c_driver qt2160_driver = {
.id_table = qt2160_idtable,
.probe = qt2160_probe,
- .remove = qt2160_remove,
};
module_i2c_driver(qt2160_driver);
diff --git a/drivers/input/keyboard/sun4i-lradc-keys.c b/drivers/input/keyboard/sun4i-lradc-keys.c
index 15c15c0958b0..f304cab0ebdb 100644
--- a/drivers/input/keyboard/sun4i-lradc-keys.c
+++ b/drivers/input/keyboard/sun4i-lradc-keys.c
@@ -21,10 +21,11 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_wakeirq.h>
#include <linux/pm_wakeup.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/slab.h>
@@ -307,8 +308,7 @@ static int sun4i_lradc_probe(struct platform_device *pdev)
input_set_drvdata(lradc->input, lradc);
- lradc->base = devm_ioremap_resource(dev,
- platform_get_resource(pdev, IORESOURCE_MEM, 0));
+ lradc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(lradc->base))
return PTR_ERR(lradc->base);
diff --git a/drivers/input/keyboard/tca6416-keypad.c b/drivers/input/keyboard/tca6416-keypad.c
index 2f745cabf4f2..8af59ced1ec2 100644
--- a/drivers/input/keyboard/tca6416-keypad.c
+++ b/drivers/input/keyboard/tca6416-keypad.c
@@ -24,6 +24,8 @@
#define TCA6416_INVERT 2
#define TCA6416_DIRECTION 3
+#define TCA6416_POLL_INTERVAL 100 /* msec */
+
static const struct i2c_device_id tca6416_id[] = {
{ "tca6416-keys", 16, },
{ "tca6408-keys", 8, },
@@ -43,7 +45,6 @@ struct tca6416_keypad_chip {
struct i2c_client *client;
struct input_dev *input;
- struct delayed_work dwork;
int io_size;
int irqnum;
u16 pinmask;
@@ -85,9 +86,9 @@ static int tca6416_read_reg(struct tca6416_keypad_chip *chip, int reg, u16 *val)
return 0;
}
-static void tca6416_keys_scan(struct tca6416_keypad_chip *chip)
+static void tca6416_keys_scan(struct input_dev *input)
{
- struct input_dev *input = chip->input;
+ struct tca6416_keypad_chip *chip = input_get_drvdata(input);
u16 reg_val, val;
int error, i, pin_index;
@@ -122,33 +123,20 @@ static void tca6416_keys_scan(struct tca6416_keypad_chip *chip)
*/
static irqreturn_t tca6416_keys_isr(int irq, void *dev_id)
{
- struct tca6416_keypad_chip *chip = dev_id;
-
- tca6416_keys_scan(chip);
+ tca6416_keys_scan(dev_id);
return IRQ_HANDLED;
}
-static void tca6416_keys_work_func(struct work_struct *work)
-{
- struct tca6416_keypad_chip *chip =
- container_of(work, struct tca6416_keypad_chip, dwork.work);
-
- tca6416_keys_scan(chip);
- schedule_delayed_work(&chip->dwork, msecs_to_jiffies(100));
-}
-
static int tca6416_keys_open(struct input_dev *dev)
{
struct tca6416_keypad_chip *chip = input_get_drvdata(dev);
- /* Get initial device state in case it has switches */
- tca6416_keys_scan(chip);
-
- if (chip->use_polling)
- schedule_delayed_work(&chip->dwork, msecs_to_jiffies(100));
- else
- enable_irq(chip->irqnum);
+ if (!chip->use_polling) {
+ /* Get initial device state in case it has switches */
+ tca6416_keys_scan(dev);
+ enable_irq(chip->client->irq);
+ }
return 0;
}
@@ -157,10 +145,8 @@ static void tca6416_keys_close(struct input_dev *dev)
{
struct tca6416_keypad_chip *chip = input_get_drvdata(dev);
- if (chip->use_polling)
- cancel_delayed_work_sync(&chip->dwork);
- else
- disable_irq(chip->irqnum);
+ if (!chip->use_polling)
+ disable_irq(chip->client->irq);
}
static int tca6416_setup_registers(struct tca6416_keypad_chip *chip)
@@ -216,12 +202,15 @@ static int tca6416_keypad_probe(struct i2c_client *client)
return -EINVAL;
}
- chip = kzalloc(struct_size(chip, buttons, pdata->nbuttons), GFP_KERNEL);
- input = input_allocate_device();
- if (!chip || !input) {
- error = -ENOMEM;
- goto fail1;
- }
+ chip = devm_kzalloc(&client->dev,
+ struct_size(chip, buttons, pdata->nbuttons),
+ GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ input = devm_input_allocate_device(&client->dev);
+ if (!input)
+ return -ENOMEM;
chip->client = client;
chip->input = input;
@@ -229,11 +218,8 @@ static int tca6416_keypad_probe(struct i2c_client *client)
chip->pinmask = pdata->pinmask;
chip->use_polling = pdata->use_polling;
- INIT_DELAYED_WORK(&chip->dwork, tca6416_keys_work_func);
-
input->phys = "tca6416-keys/input0";
input->name = client->name;
- input->dev.parent = &client->dev;
input->open = tca6416_keys_open;
input->close = tca6416_keys_close;
@@ -263,24 +249,28 @@ static int tca6416_keypad_probe(struct i2c_client *client)
*/
error = tca6416_setup_registers(chip);
if (error)
- goto fail1;
+ return error;
- if (!chip->use_polling) {
- if (pdata->irq_is_gpio)
- chip->irqnum = gpio_to_irq(client->irq);
- else
- chip->irqnum = client->irq;
-
- error = request_threaded_irq(chip->irqnum, NULL,
- tca6416_keys_isr,
- IRQF_TRIGGER_FALLING |
- IRQF_ONESHOT | IRQF_NO_AUTOEN,
- "tca6416-keypad", chip);
+ if (chip->use_polling) {
+ error = input_setup_polling(input, tca6416_keys_scan);
+ if (error) {
+ dev_err(&client->dev, "Failed to setup polling\n");
+ return error;
+ }
+
+ input_set_poll_interval(input, TCA6416_POLL_INTERVAL);
+ } else {
+ error = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, tca6416_keys_isr,
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT |
+ IRQF_NO_AUTOEN,
+ "tca6416-keypad", input);
if (error) {
dev_dbg(&client->dev,
"Unable to claim irq %d; error %d\n",
- chip->irqnum, error);
- goto fail1;
+ client->irq, error);
+ return error;
}
}
@@ -288,70 +278,19 @@ static int tca6416_keypad_probe(struct i2c_client *client)
if (error) {
dev_dbg(&client->dev,
"Unable to register input device, error: %d\n", error);
- goto fail2;
+ return error;
}
i2c_set_clientdata(client, chip);
- device_init_wakeup(&client->dev, 1);
return 0;
-
-fail2:
- if (!chip->use_polling) {
- free_irq(chip->irqnum, chip);
- enable_irq(chip->irqnum);
- }
-fail1:
- input_free_device(input);
- kfree(chip);
- return error;
}
-static void tca6416_keypad_remove(struct i2c_client *client)
-{
- struct tca6416_keypad_chip *chip = i2c_get_clientdata(client);
-
- if (!chip->use_polling) {
- free_irq(chip->irqnum, chip);
- enable_irq(chip->irqnum);
- }
-
- input_unregister_device(chip->input);
- kfree(chip);
-}
-
-static int tca6416_keypad_suspend(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct tca6416_keypad_chip *chip = i2c_get_clientdata(client);
-
- if (device_may_wakeup(dev))
- enable_irq_wake(chip->irqnum);
-
- return 0;
-}
-
-static int tca6416_keypad_resume(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct tca6416_keypad_chip *chip = i2c_get_clientdata(client);
-
- if (device_may_wakeup(dev))
- disable_irq_wake(chip->irqnum);
-
- return 0;
-}
-
-static DEFINE_SIMPLE_DEV_PM_OPS(tca6416_keypad_dev_pm_ops,
- tca6416_keypad_suspend, tca6416_keypad_resume);
-
static struct i2c_driver tca6416_keypad_driver = {
.driver = {
.name = "tca6416-keypad",
- .pm = pm_sleep_ptr(&tca6416_keypad_dev_pm_ops),
},
.probe = tca6416_keypad_probe,
- .remove = tca6416_keypad_remove,
.id_table = tca6416_id,
};
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index d5a6c7d8eb25..c9a823ea45d0 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -640,7 +640,7 @@ static int tegra_kbc_probe(struct platform_device *pdev)
timer_setup(&kbc->timer, tegra_kbc_keypress_timer, 0);
- kbc->mmio = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+ kbc->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(kbc->mmio))
return PTR_ERR(kbc->mmio);
diff --git a/drivers/input/keyboard/tm2-touchkey.c b/drivers/input/keyboard/tm2-touchkey.c
index 75bd3ea51194..0fd761ae052f 100644
--- a/drivers/input/keyboard/tm2-touchkey.c
+++ b/drivers/input/keyboard/tm2-touchkey.c
@@ -19,7 +19,6 @@
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/pm.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 8a320e6218e3..6ba984d7f0b1 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -791,10 +791,10 @@ config INPUT_IQS626A
module will be called iqs626a.
config INPUT_IQS7222
- tristate "Azoteq IQS7222A/B/C capacitive touch controller"
+ tristate "Azoteq IQS7222A/B/C/D capacitive touch controller"
depends on I2C
help
- Say Y to enable support for the Azoteq IQS7222A/B/C family
+ Say Y to enable support for the Azoteq IQS7222A/B/C/D family
of capacitive touch controllers.
To compile this driver as a module, choose M here: the
diff --git a/drivers/input/misc/cpcap-pwrbutton.c b/drivers/input/misc/cpcap-pwrbutton.c
index 879790bbf9fe..85cddb84717a 100644
--- a/drivers/input/misc/cpcap-pwrbutton.c
+++ b/drivers/input/misc/cpcap-pwrbutton.c
@@ -1,16 +1,8 @@
-/**
+// SPDX-License-Identifier: GPL-2.0-only
+/*
* CPCAP Power Button Input Driver
*
* Copyright (C) 2017 Sebastian Reichel <sre@kernel.org>
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License. See the file "COPYING" in the main directory of this
- * archive for more details.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
diff --git a/drivers/input/misc/da9063_onkey.c b/drivers/input/misc/da9063_onkey.c
index b14a389600c9..74808bae326a 100644
--- a/drivers/input/misc/da9063_onkey.c
+++ b/drivers/input/misc/da9063_onkey.c
@@ -10,6 +10,7 @@
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
#include <linux/workqueue.h>
#include <linux/regmap.h>
#include <linux/of.h>
@@ -251,6 +252,14 @@ static int da9063_onkey_probe(struct platform_device *pdev)
return error;
}
+ error = dev_pm_set_wake_irq(&pdev->dev, irq);
+ if (error)
+ dev_warn(&pdev->dev,
+ "Failed to set IRQ %d as a wake IRQ: %d\n",
+ irq, error);
+ else
+ device_init_wakeup(&pdev->dev, true);
+
error = input_register_device(onkey->input);
if (error) {
dev_err(&pdev->dev,
diff --git a/drivers/input/misc/gpio-vibra.c b/drivers/input/misc/gpio-vibra.c
index 134a1309ba92..ad44b4d18a2a 100644
--- a/drivers/input/misc/gpio-vibra.c
+++ b/drivers/input/misc/gpio-vibra.c
@@ -18,7 +18,7 @@
#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/regulator/consumer.h>
@@ -113,22 +113,14 @@ static int gpio_vibrator_probe(struct platform_device *pdev)
return -ENOMEM;
vibrator->vcc = devm_regulator_get(&pdev->dev, "vcc");
- err = PTR_ERR_OR_ZERO(vibrator->vcc);
- if (err) {
- if (err != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Failed to request regulator: %d\n",
- err);
- return err;
- }
+ if (IS_ERR(vibrator->vcc))
+ return dev_err_probe(&pdev->dev, PTR_ERR(vibrator->vcc),
+ "Failed to request regulator\n");
vibrator->gpio = devm_gpiod_get(&pdev->dev, "enable", GPIOD_OUT_LOW);
- err = PTR_ERR_OR_ZERO(vibrator->gpio);
- if (err) {
- if (err != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Failed to request main gpio: %d\n",
- err);
- return err;
- }
+ if (IS_ERR(vibrator->gpio))
+ return dev_err_probe(&pdev->dev, PTR_ERR(vibrator->gpio),
+ "Failed to request main gpio\n");
INIT_WORK(&vibrator->play_work, gpio_vibrator_play_work);
diff --git a/drivers/input/misc/iqs269a.c b/drivers/input/misc/iqs269a.c
index 1272ef7b5794..c0a085639870 100644
--- a/drivers/input/misc/iqs269a.c
+++ b/drivers/input/misc/iqs269a.c
@@ -17,9 +17,9 @@
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of_device.h>
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/slab.h>
diff --git a/drivers/input/misc/iqs626a.c b/drivers/input/misc/iqs626a.c
index 50035c25c3f7..0dab54d3a060 100644
--- a/drivers/input/misc/iqs626a.c
+++ b/drivers/input/misc/iqs626a.c
@@ -19,8 +19,8 @@
#include <linux/input/touchscreen.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/slab.h>
diff --git a/drivers/input/misc/iqs7222.c b/drivers/input/misc/iqs7222.c
index 096b0925f41b..36aeeae77611 100644
--- a/drivers/input/misc/iqs7222.c
+++ b/drivers/input/misc/iqs7222.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Azoteq IQS7222A/B/C Capacitive Touch Controller
+ * Azoteq IQS7222A/B/C/D Capacitive Touch Controller
*
* Copyright (C) 2022 Jeff LaBundy <jeff@labundy.com>
*/
@@ -12,11 +12,12 @@
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/input.h>
+#include <linux/input/touchscreen.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/ktime.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/property.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
@@ -25,6 +26,7 @@
#define IQS7222_PROD_NUM_A 840
#define IQS7222_PROD_NUM_B 698
#define IQS7222_PROD_NUM_C 863
+#define IQS7222_PROD_NUM_D 1046
#define IQS7222_SYS_STATUS 0x10
#define IQS7222_SYS_STATUS_RESET BIT(3)
@@ -54,6 +56,7 @@
#define IQS7222_EVENT_MASK_ATI BIT(12)
#define IQS7222_EVENT_MASK_SLDR BIT(10)
+#define IQS7222_EVENT_MASK_TPAD IQS7222_EVENT_MASK_SLDR
#define IQS7222_EVENT_MASK_TOUCH BIT(1)
#define IQS7222_EVENT_MASK_PROX BIT(0)
@@ -71,6 +74,7 @@
#define IQS7222_MAX_COLS_CHAN 6
#define IQS7222_MAX_COLS_FILT 2
#define IQS7222_MAX_COLS_SLDR 11
+#define IQS7222_MAX_COLS_TPAD 24
#define IQS7222_MAX_COLS_GPIO 3
#define IQS7222_MAX_COLS_SYS 13
@@ -102,16 +106,18 @@ enum iqs7222_reg_grp_id {
IQS7222_REG_GRP_BTN,
IQS7222_REG_GRP_CHAN,
IQS7222_REG_GRP_SLDR,
+ IQS7222_REG_GRP_TPAD,
IQS7222_REG_GRP_GPIO,
IQS7222_REG_GRP_SYS,
IQS7222_NUM_REG_GRPS
};
static const char * const iqs7222_reg_grp_names[IQS7222_NUM_REG_GRPS] = {
- [IQS7222_REG_GRP_CYCLE] = "cycle",
- [IQS7222_REG_GRP_CHAN] = "channel",
- [IQS7222_REG_GRP_SLDR] = "slider",
- [IQS7222_REG_GRP_GPIO] = "gpio",
+ [IQS7222_REG_GRP_CYCLE] = "cycle-%d",
+ [IQS7222_REG_GRP_CHAN] = "channel-%d",
+ [IQS7222_REG_GRP_SLDR] = "slider-%d",
+ [IQS7222_REG_GRP_TPAD] = "trackpad",
+ [IQS7222_REG_GRP_GPIO] = "gpio-%d",
};
static const unsigned int iqs7222_max_cols[IQS7222_NUM_REG_GRPS] = {
@@ -122,6 +128,7 @@ static const unsigned int iqs7222_max_cols[IQS7222_NUM_REG_GRPS] = {
[IQS7222_REG_GRP_CHAN] = IQS7222_MAX_COLS_CHAN,
[IQS7222_REG_GRP_FILT] = IQS7222_MAX_COLS_FILT,
[IQS7222_REG_GRP_SLDR] = IQS7222_MAX_COLS_SLDR,
+ [IQS7222_REG_GRP_TPAD] = IQS7222_MAX_COLS_TPAD,
[IQS7222_REG_GRP_GPIO] = IQS7222_MAX_COLS_GPIO,
[IQS7222_REG_GRP_SYS] = IQS7222_MAX_COLS_SYS,
};
@@ -130,8 +137,10 @@ static const unsigned int iqs7222_gpio_links[] = { 2, 5, 6, };
struct iqs7222_event_desc {
const char *name;
+ u16 link;
u16 mask;
u16 val;
+ u16 strict;
u16 enable;
enum iqs7222_reg_key_id reg_key;
};
@@ -188,6 +197,93 @@ static const struct iqs7222_event_desc iqs7222_sl_events[] = {
},
};
+static const struct iqs7222_event_desc iqs7222_tp_events[] = {
+ {
+ .name = "event-press",
+ .link = BIT(7),
+ },
+ {
+ .name = "event-tap",
+ .link = BIT(0),
+ .mask = BIT(0),
+ .val = BIT(0),
+ .enable = BIT(0),
+ .reg_key = IQS7222_REG_KEY_TAP,
+ },
+ {
+ .name = "event-swipe-x-pos",
+ .link = BIT(2),
+ .mask = BIT(2) | BIT(1),
+ .val = BIT(2),
+ .strict = BIT(4),
+ .enable = BIT(1),
+ .reg_key = IQS7222_REG_KEY_AXIAL,
+ },
+ {
+ .name = "event-swipe-y-pos",
+ .link = BIT(3),
+ .mask = BIT(3) | BIT(1),
+ .val = BIT(3),
+ .strict = BIT(3),
+ .enable = BIT(1),
+ .reg_key = IQS7222_REG_KEY_AXIAL,
+ },
+ {
+ .name = "event-swipe-x-neg",
+ .link = BIT(4),
+ .mask = BIT(4) | BIT(1),
+ .val = BIT(4),
+ .strict = BIT(4),
+ .enable = BIT(1),
+ .reg_key = IQS7222_REG_KEY_AXIAL,
+ },
+ {
+ .name = "event-swipe-y-neg",
+ .link = BIT(5),
+ .mask = BIT(5) | BIT(1),
+ .val = BIT(5),
+ .strict = BIT(3),
+ .enable = BIT(1),
+ .reg_key = IQS7222_REG_KEY_AXIAL,
+ },
+ {
+ .name = "event-flick-x-pos",
+ .link = BIT(2),
+ .mask = BIT(2) | BIT(1),
+ .val = BIT(2) | BIT(1),
+ .strict = BIT(4),
+ .enable = BIT(2),
+ .reg_key = IQS7222_REG_KEY_AXIAL,
+ },
+ {
+ .name = "event-flick-y-pos",
+ .link = BIT(3),
+ .mask = BIT(3) | BIT(1),
+ .val = BIT(3) | BIT(1),
+ .strict = BIT(3),
+ .enable = BIT(2),
+ .reg_key = IQS7222_REG_KEY_AXIAL,
+ },
+ {
+ .name = "event-flick-x-neg",
+ .link = BIT(4),
+ .mask = BIT(4) | BIT(1),
+ .val = BIT(4) | BIT(1),
+ .strict = BIT(4),
+ .enable = BIT(2),
+ .reg_key = IQS7222_REG_KEY_AXIAL,
+ },
+ {
+ .name = "event-flick-y-neg",
+ .link = BIT(5),
+ .mask = BIT(5) | BIT(1),
+ .val = BIT(5) | BIT(1),
+ .strict = BIT(3),
+ .enable = BIT(2),
+ .reg_key = IQS7222_REG_KEY_AXIAL,
+ },
+};
+
struct iqs7222_reg_grp_desc {
u16 base;
int num_row;
@@ -524,6 +620,62 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
},
},
},
+ {
+ .prod_num = IQS7222_PROD_NUM_D,
+ .fw_major = 0,
+ .fw_minor = 37,
+ .touch_link = 1770,
+ .allow_offset = 9,
+ .event_offset = 10,
+ .comms_offset = 11,
+ .reg_grps = {
+ [IQS7222_REG_GRP_STAT] = {
+ .base = IQS7222_SYS_STATUS,
+ .num_row = 1,
+ .num_col = 7,
+ },
+ [IQS7222_REG_GRP_CYCLE] = {
+ .base = 0x8000,
+ .num_row = 7,
+ .num_col = 2,
+ },
+ [IQS7222_REG_GRP_GLBL] = {
+ .base = 0x8700,
+ .num_row = 1,
+ .num_col = 3,
+ },
+ [IQS7222_REG_GRP_BTN] = {
+ .base = 0x9000,
+ .num_row = 14,
+ .num_col = 3,
+ },
+ [IQS7222_REG_GRP_CHAN] = {
+ .base = 0xA000,
+ .num_row = 14,
+ .num_col = 4,
+ },
+ [IQS7222_REG_GRP_FILT] = {
+ .base = 0xAE00,
+ .num_row = 1,
+ .num_col = 2,
+ },
+ [IQS7222_REG_GRP_TPAD] = {
+ .base = 0xB000,
+ .num_row = 1,
+ .num_col = 24,
+ },
+ [IQS7222_REG_GRP_GPIO] = {
+ .base = 0xC000,
+ .num_row = 3,
+ .num_col = 3,
+ },
+ [IQS7222_REG_GRP_SYS] = {
+ .base = IQS7222_SYS_SETUP,
+ .num_row = 1,
+ .num_col = 12,
+ },
+ },
+ },
};
struct iqs7222_prop_desc {
@@ -1009,6 +1161,123 @@ static const struct iqs7222_prop_desc iqs7222_props[] = {
.label = "maximum gesture time",
},
{
+ .name = "azoteq,num-rows",
+ .reg_grp = IQS7222_REG_GRP_TPAD,
+ .reg_offset = 0,
+ .reg_shift = 4,
+ .reg_width = 4,
+ .val_min = 1,
+ .val_max = 12,
+ .label = "number of rows",
+ },
+ {
+ .name = "azoteq,num-cols",
+ .reg_grp = IQS7222_REG_GRP_TPAD,
+ .reg_offset = 0,
+ .reg_shift = 0,
+ .reg_width = 4,
+ .val_min = 1,
+ .val_max = 12,
+ .label = "number of columns",
+ },
+ {
+ .name = "azoteq,lower-cal-y",
+ .reg_grp = IQS7222_REG_GRP_TPAD,
+ .reg_offset = 1,
+ .reg_shift = 8,
+ .reg_width = 8,
+ .label = "lower vertical calibration",
+ },
+ {
+ .name = "azoteq,lower-cal-x",
+ .reg_grp = IQS7222_REG_GRP_TPAD,
+ .reg_offset = 1,
+ .reg_shift = 0,
+ .reg_width = 8,
+ .label = "lower horizontal calibration",
+ },
+ {
+ .name = "azoteq,upper-cal-y",
+ .reg_grp = IQS7222_REG_GRP_TPAD,
+ .reg_offset = 2,
+ .reg_shift = 8,
+ .reg_width = 8,
+ .label = "upper vertical calibration",
+ },
+ {
+ .name = "azoteq,upper-cal-x",
+ .reg_grp = IQS7222_REG_GRP_TPAD,
+ .reg_offset = 2,
+ .reg_shift = 0,
+ .reg_width = 8,
+ .label = "upper horizontal calibration",
+ },
+ {
+ .name = "azoteq,top-speed",
+ .reg_grp = IQS7222_REG_GRP_TPAD,
+ .reg_offset = 3,
+ .reg_shift = 8,
+ .reg_width = 8,
+ .val_pitch = 4,
+ .label = "top speed",
+ },
+ {
+ .name = "azoteq,bottom-speed",
+ .reg_grp = IQS7222_REG_GRP_TPAD,
+ .reg_offset = 3,
+ .reg_shift = 0,
+ .reg_width = 8,
+ .label = "bottom speed",
+ },
+ {
+ .name = "azoteq,gesture-min-ms",
+ .reg_grp = IQS7222_REG_GRP_TPAD,
+ .reg_key = IQS7222_REG_KEY_TAP,
+ .reg_offset = 20,
+ .reg_shift = 8,
+ .reg_width = 8,
+ .val_pitch = 16,
+ .label = "minimum gesture time",
+ },
+ {
+ .name = "azoteq,gesture-max-ms",
+ .reg_grp = IQS7222_REG_GRP_TPAD,
+ .reg_key = IQS7222_REG_KEY_AXIAL,
+ .reg_offset = 21,
+ .reg_shift = 8,
+ .reg_width = 8,
+ .val_pitch = 16,
+ .label = "maximum gesture time",
+ },
+ {
+ .name = "azoteq,gesture-max-ms",
+ .reg_grp = IQS7222_REG_GRP_TPAD,
+ .reg_key = IQS7222_REG_KEY_TAP,
+ .reg_offset = 21,
+ .reg_shift = 0,
+ .reg_width = 8,
+ .val_pitch = 16,
+ .label = "maximum gesture time",
+ },
+ {
+ .name = "azoteq,gesture-dist",
+ .reg_grp = IQS7222_REG_GRP_TPAD,
+ .reg_key = IQS7222_REG_KEY_TAP,
+ .reg_offset = 22,
+ .reg_shift = 0,
+ .reg_width = 16,
+ .label = "gesture distance",
+ },
+ {
+ .name = "azoteq,gesture-dist",
+ .reg_grp = IQS7222_REG_GRP_TPAD,
+ .reg_key = IQS7222_REG_KEY_AXIAL,
+ .reg_offset = 23,
+ .reg_shift = 0,
+ .reg_width = 16,
+ .label = "gesture distance",
+ },
+ {
.name = "drive-open-drain",
.reg_grp = IQS7222_REG_GRP_GPIO,
.reg_offset = 0,
@@ -1091,16 +1360,19 @@ struct iqs7222_private {
struct gpio_desc *irq_gpio;
struct i2c_client *client;
struct input_dev *keypad;
+ struct touchscreen_properties prop;
unsigned int kp_type[IQS7222_MAX_CHAN][ARRAY_SIZE(iqs7222_kp_events)];
unsigned int kp_code[IQS7222_MAX_CHAN][ARRAY_SIZE(iqs7222_kp_events)];
unsigned int sl_code[IQS7222_MAX_SLDR][ARRAY_SIZE(iqs7222_sl_events)];
unsigned int sl_axis[IQS7222_MAX_SLDR];
+ unsigned int tp_code[ARRAY_SIZE(iqs7222_tp_events)];
u16 cycle_setup[IQS7222_MAX_CHAN / 2][IQS7222_MAX_COLS_CYCLE];
u16 glbl_setup[IQS7222_MAX_COLS_GLBL];
u16 btn_setup[IQS7222_MAX_CHAN][IQS7222_MAX_COLS_BTN];
u16 chan_setup[IQS7222_MAX_CHAN][IQS7222_MAX_COLS_CHAN];
u16 filt_setup[IQS7222_MAX_COLS_FILT];
u16 sldr_setup[IQS7222_MAX_SLDR][IQS7222_MAX_COLS_SLDR];
+ u16 tpad_setup[IQS7222_MAX_COLS_TPAD];
u16 gpio_setup[ARRAY_SIZE(iqs7222_gpio_links)][IQS7222_MAX_COLS_GPIO];
u16 sys_setup[IQS7222_MAX_COLS_SYS];
};
@@ -1127,6 +1399,9 @@ static u16 *iqs7222_setup(struct iqs7222_private *iqs7222,
case IQS7222_REG_GRP_SLDR:
return iqs7222->sldr_setup[row];
+ case IQS7222_REG_GRP_TPAD:
+ return iqs7222->tpad_setup;
+
case IQS7222_REG_GRP_GPIO:
return iqs7222->gpio_setup[row];
@@ -1381,9 +1656,6 @@ static int iqs7222_ati_trigger(struct iqs7222_private *iqs7222)
if (error)
return error;
- sys_setup &= ~IQS7222_SYS_SETUP_INTF_MODE_MASK;
- sys_setup &= ~IQS7222_SYS_SETUP_PWR_MODE_MASK;
-
for (i = 0; i < IQS7222_NUM_RETRIES; i++) {
/*
* Trigger ATI from streaming and normal-power modes so that
@@ -1561,8 +1833,11 @@ static int iqs7222_dev_init(struct iqs7222_private *iqs7222, int dir)
return error;
}
- if (dir == READ)
+ if (dir == READ) {
+ iqs7222->sys_setup[0] &= ~IQS7222_SYS_SETUP_INTF_MODE_MASK;
+ iqs7222->sys_setup[0] &= ~IQS7222_SYS_SETUP_PWR_MODE_MASK;
return 0;
+ }
return iqs7222_ati_trigger(iqs7222);
}
@@ -1936,6 +2211,14 @@ static int iqs7222_parse_chan(struct iqs7222_private *iqs7222,
ref_setup[4] = dev_desc->touch_link;
if (fwnode_property_present(chan_node, "azoteq,use-prox"))
ref_setup[4] -= 2;
+ } else if (dev_desc->reg_grps[IQS7222_REG_GRP_TPAD].num_row &&
+ fwnode_property_present(chan_node,
+ "azoteq,counts-filt-enable")) {
+ /*
+ * In the case of IQS7222D, however, the reference mode field
+ * is partially repurposed as a counts filter enable control.
+ */
+ chan_setup[0] |= IQS7222_CHAN_SETUP_0_REF_MODE_REF;
}
if (fwnode_property_present(chan_node, "azoteq,rx-enable")) {
@@ -2278,6 +2561,136 @@ static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222,
IQS7222_REG_KEY_NO_WHEEL);
}
+static int iqs7222_parse_tpad(struct iqs7222_private *iqs7222,
+ struct fwnode_handle *tpad_node, int tpad_index)
+{
+ const struct iqs7222_dev_desc *dev_desc = iqs7222->dev_desc;
+ struct touchscreen_properties *prop = &iqs7222->prop;
+ struct i2c_client *client = iqs7222->client;
+ int num_chan = dev_desc->reg_grps[IQS7222_REG_GRP_CHAN].num_row;
+ int count, error, i;
+ u16 *event_mask = &iqs7222->sys_setup[dev_desc->event_offset];
+ u16 *tpad_setup = iqs7222->tpad_setup;
+ unsigned int chan_sel[12];
+
+ error = iqs7222_parse_props(iqs7222, tpad_node, tpad_index,
+ IQS7222_REG_GRP_TPAD,
+ IQS7222_REG_KEY_NONE);
+ if (error)
+ return error;
+
+ count = fwnode_property_count_u32(tpad_node, "azoteq,channel-select");
+ if (count < 0) {
+ dev_err(&client->dev, "Failed to count %s channels: %d\n",
+ fwnode_get_name(tpad_node), count);
+ return count;
+ } else if (!count || count > ARRAY_SIZE(chan_sel)) {
+ dev_err(&client->dev, "Invalid number of %s channels\n",
+ fwnode_get_name(tpad_node));
+ return -EINVAL;
+ }
+
+ error = fwnode_property_read_u32_array(tpad_node,
+ "azoteq,channel-select",
+ chan_sel, count);
+ if (error) {
+ dev_err(&client->dev, "Failed to read %s channels: %d\n",
+ fwnode_get_name(tpad_node), error);
+ return error;
+ }
+
+ tpad_setup[6] &= ~GENMASK(num_chan - 1, 0);
+
+ for (i = 0; i < ARRAY_SIZE(chan_sel); i++) {
+ tpad_setup[8 + i] = 0;
+ if (i >= count || chan_sel[i] == U8_MAX)
+ continue;
+
+ if (chan_sel[i] >= num_chan) {
+ dev_err(&client->dev, "Invalid %s channel: %u\n",
+ fwnode_get_name(tpad_node), chan_sel[i]);
+ return -EINVAL;
+ }
+
+ /*
+ * The following fields indicate which channels participate in
+ * the trackpad, as well as each channel's relative placement.
+ */
+ tpad_setup[6] |= BIT(chan_sel[i]);
+ tpad_setup[8 + i] = chan_sel[i] * 34 + 1072;
+ }
+
+ tpad_setup[7] = dev_desc->touch_link;
+ if (fwnode_property_present(tpad_node, "azoteq,use-prox"))
+ tpad_setup[7] -= 2;
+
+ for (i = 0; i < ARRAY_SIZE(iqs7222_tp_events); i++)
+ tpad_setup[20] &= ~(iqs7222_tp_events[i].strict |
+ iqs7222_tp_events[i].enable);
+
+ for (i = 0; i < ARRAY_SIZE(iqs7222_tp_events); i++) {
+ const char *event_name = iqs7222_tp_events[i].name;
+ struct fwnode_handle *event_node;
+
+ event_node = fwnode_get_named_child_node(tpad_node, event_name);
+ if (!event_node)
+ continue;
+
+ if (fwnode_property_present(event_node,
+ "azoteq,gesture-angle-tighten"))
+ tpad_setup[20] |= iqs7222_tp_events[i].strict;
+
+ tpad_setup[20] |= iqs7222_tp_events[i].enable;
+
+ error = iqs7222_parse_event(iqs7222, event_node, tpad_index,
+ IQS7222_REG_GRP_TPAD,
+ iqs7222_tp_events[i].reg_key,
+ iqs7222_tp_events[i].link, 1566,
+ NULL,
+ &iqs7222->tp_code[i]);
+ fwnode_handle_put(event_node);
+ if (error)
+ return error;
+
+ if (!dev_desc->event_offset)
+ continue;
+
+ /*
+ * The press/release event is determined based on whether the
+ * coordinate fields report 0xFFFF and solely relies on touch
+ * or proximity interrupts to be unmasked.
+ */
+ if (i)
+ *event_mask |= IQS7222_EVENT_MASK_TPAD;
+ else if (tpad_setup[7] == dev_desc->touch_link)
+ *event_mask |= IQS7222_EVENT_MASK_TOUCH;
+ else
+ *event_mask |= IQS7222_EVENT_MASK_PROX;
+ }
+
+ if (!iqs7222->tp_code[0])
+ return 0;
+
+ input_set_abs_params(iqs7222->keypad, ABS_X,
+ 0, (tpad_setup[4] ? : 1) - 1, 0, 0);
+
+ input_set_abs_params(iqs7222->keypad, ABS_Y,
+ 0, (tpad_setup[5] ? : 1) - 1, 0, 0);
+
+ touchscreen_parse_properties(iqs7222->keypad, false, prop);
+
+ if (prop->max_x >= U16_MAX || prop->max_y >= U16_MAX) {
+ dev_err(&client->dev, "Invalid trackpad size: %u*%u\n",
+ prop->max_x, prop->max_y);
+ return -EINVAL;
+ }
+
+ tpad_setup[4] = prop->max_x + 1;
+ tpad_setup[5] = prop->max_y + 1;
+
+ return 0;
+}
+
static int (*iqs7222_parse_extra[IQS7222_NUM_REG_GRPS])
(struct iqs7222_private *iqs7222,
struct fwnode_handle *reg_grp_node,
@@ -2285,6 +2698,7 @@ static int (*iqs7222_parse_extra[IQS7222_NUM_REG_GRPS])
[IQS7222_REG_GRP_CYCLE] = iqs7222_parse_cycle,
[IQS7222_REG_GRP_CHAN] = iqs7222_parse_chan,
[IQS7222_REG_GRP_SLDR] = iqs7222_parse_sldr,
+ [IQS7222_REG_GRP_TPAD] = iqs7222_parse_tpad,
};
static int iqs7222_parse_reg_grp(struct iqs7222_private *iqs7222,
@@ -2298,7 +2712,7 @@ static int iqs7222_parse_reg_grp(struct iqs7222_private *iqs7222,
if (iqs7222_reg_grp_names[reg_grp]) {
char reg_grp_name[16];
- snprintf(reg_grp_name, sizeof(reg_grp_name), "%s-%d",
+ snprintf(reg_grp_name, sizeof(reg_grp_name),
iqs7222_reg_grp_names[reg_grp], reg_grp_index);
reg_grp_node = device_get_named_child_node(&client->dev,
@@ -2346,8 +2760,8 @@ static int iqs7222_parse_all(struct iqs7222_private *iqs7222)
continue;
/*
- * The IQS7222C exposes multiple GPIO and must be informed
- * as to which GPIO this group represents.
+ * The IQS7222C and IQS7222D expose multiple GPIO and must be
+ * informed as to which GPIO this group represents.
*/
for (j = 0; j < ARRAY_SIZE(iqs7222_gpio_links); j++)
gpio_setup[0] &= ~BIT(iqs7222_gpio_links[j]);
@@ -2480,6 +2894,41 @@ static int iqs7222_report(struct iqs7222_private *iqs7222)
iqs7222->sl_code[i][j], 0);
}
+ for (i = 0; i < dev_desc->reg_grps[IQS7222_REG_GRP_TPAD].num_row; i++) {
+ u16 tpad_pos_x = le16_to_cpu(status[4]);
+ u16 tpad_pos_y = le16_to_cpu(status[5]);
+ u16 state = le16_to_cpu(status[6]);
+
+ input_report_key(iqs7222->keypad, iqs7222->tp_code[0],
+ tpad_pos_x < U16_MAX);
+
+ if (tpad_pos_x < U16_MAX)
+ touchscreen_report_pos(iqs7222->keypad, &iqs7222->prop,
+ tpad_pos_x, tpad_pos_y, false);
+
+ if (!(le16_to_cpu(status[1]) & IQS7222_EVENT_MASK_TPAD))
+ continue;
+
+ /*
+ * Skip the press/release event, as it does not have separate
+ * status fields and is handled separately.
+ */
+ for (j = 1; j < ARRAY_SIZE(iqs7222_tp_events); j++) {
+ u16 mask = iqs7222_tp_events[j].mask;
+ u16 val = iqs7222_tp_events[j].val;
+
+ input_report_key(iqs7222->keypad,
+ iqs7222->tp_code[j],
+ (state & mask) == val);
+ }
+
+ input_sync(iqs7222->keypad);
+
+ for (j = 1; j < ARRAY_SIZE(iqs7222_tp_events); j++)
+ input_report_key(iqs7222->keypad,
+ iqs7222->tp_code[j], 0);
+ }
+
input_sync(iqs7222->keypad);
return 0;
@@ -2584,6 +3033,7 @@ static const struct of_device_id iqs7222_of_match[] = {
{ .compatible = "azoteq,iqs7222a" },
{ .compatible = "azoteq,iqs7222b" },
{ .compatible = "azoteq,iqs7222c" },
+ { .compatible = "azoteq,iqs7222d" },
{ }
};
MODULE_DEVICE_TABLE(of, iqs7222_of_match);
@@ -2598,5 +3048,5 @@ static struct i2c_driver iqs7222_i2c_driver = {
module_i2c_driver(iqs7222_i2c_driver);
MODULE_AUTHOR("Jeff LaBundy <jeff@labundy.com>");
-MODULE_DESCRIPTION("Azoteq IQS7222A/B/C Capacitive Touch Controller");
+MODULE_DESCRIPTION("Azoteq IQS7222A/B/C/D Capacitive Touch Controller");
MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/mma8450.c b/drivers/input/misc/mma8450.c
index 76a190b2220b..662b436d765b 100644
--- a/drivers/input/misc/mma8450.c
+++ b/drivers/input/misc/mma8450.c
@@ -11,7 +11,7 @@
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/input.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#define MMA8450_DRV_NAME "mma8450"
diff --git a/drivers/input/misc/pm8941-pwrkey.c b/drivers/input/misc/pm8941-pwrkey.c
index 74d77d8aaeff..ba747c5b2b5f 100644
--- a/drivers/input/misc/pm8941-pwrkey.c
+++ b/drivers/input/misc/pm8941-pwrkey.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <linux/regmap.h>
diff --git a/drivers/input/misc/pm8xxx-vibrator.c b/drivers/input/misc/pm8xxx-vibrator.c
index 04cb87efd799..5c288fe7accf 100644
--- a/drivers/input/misc/pm8xxx-vibrator.c
+++ b/drivers/input/misc/pm8xxx-vibrator.c
@@ -7,7 +7,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
index 89fb137e3715..c406a1cca5c4 100644
--- a/drivers/input/misc/pmic8xxx-pwrkey.c
+++ b/drivers/input/misc/pmic8xxx-pwrkey.c
@@ -12,7 +12,6 @@
#include <linux/regmap.h>
#include <linux/log2.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#define PON_CNTL_1 0x1C
#define PON_CNTL_PULL_UP BIT(7)
diff --git a/drivers/input/misc/pwm-beeper.c b/drivers/input/misc/pwm-beeper.c
index 3cf1812384e6..1e731d8397c6 100644
--- a/drivers/input/misc/pwm-beeper.c
+++ b/drivers/input/misc/pwm-beeper.c
@@ -132,13 +132,8 @@ static int pwm_beeper_probe(struct platform_device *pdev)
return -ENOMEM;
beeper->pwm = devm_pwm_get(dev, NULL);
- if (IS_ERR(beeper->pwm)) {
- error = PTR_ERR(beeper->pwm);
- if (error != -EPROBE_DEFER)
- dev_err(dev, "Failed to request PWM device: %d\n",
- error);
- return error;
- }
+ if (IS_ERR(beeper->pwm))
+ return dev_err_probe(dev, PTR_ERR(beeper->pwm), "Failed to request PWM device\n");
/* Sync up PWM state and ensure it is off. */
pwm_init_state(beeper->pwm, &state);
@@ -151,13 +146,9 @@ static int pwm_beeper_probe(struct platform_device *pdev)
}
beeper->amplifier = devm_regulator_get(dev, "amp");
- if (IS_ERR(beeper->amplifier)) {
- error = PTR_ERR(beeper->amplifier);
- if (error != -EPROBE_DEFER)
- dev_err(dev, "Failed to get 'amp' regulator: %d\n",
- error);
- return error;
- }
+ if (IS_ERR(beeper->amplifier))
+ return dev_err_probe(dev, PTR_ERR(beeper->amplifier),
+ "Failed to get 'amp' regulator\n");
INIT_WORK(&beeper->work, pwm_beeper_work);
diff --git a/drivers/input/misc/pwm-vibra.c b/drivers/input/misc/pwm-vibra.c
index 2ba035299db8..acac79c488aa 100644
--- a/drivers/input/misc/pwm-vibra.c
+++ b/drivers/input/misc/pwm-vibra.c
@@ -15,7 +15,7 @@
#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/pwm.h>
@@ -140,32 +140,20 @@ static int pwm_vibrator_probe(struct platform_device *pdev)
return -ENOMEM;
vibrator->vcc = devm_regulator_get(&pdev->dev, "vcc");
- err = PTR_ERR_OR_ZERO(vibrator->vcc);
- if (err) {
- if (err != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Failed to request regulator: %d\n",
- err);
- return err;
- }
+ if (IS_ERR(vibrator->vcc))
+ return dev_err_probe(&pdev->dev, PTR_ERR(vibrator->vcc),
+ "Failed to request regulator\n");
vibrator->enable_gpio = devm_gpiod_get_optional(&pdev->dev, "enable",
GPIOD_OUT_LOW);
- err = PTR_ERR_OR_ZERO(vibrator->enable_gpio);
- if (err) {
- if (err != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Failed to request enable gpio: %d\n",
- err);
- return err;
- }
+ if (IS_ERR(vibrator->enable_gpio))
+ return dev_err_probe(&pdev->dev, PTR_ERR(vibrator->enable_gpio),
+ "Failed to request enable gpio\n");
vibrator->pwm = devm_pwm_get(&pdev->dev, "enable");
- err = PTR_ERR_OR_ZERO(vibrator->pwm);
- if (err) {
- if (err != -EPROBE_DEFER)
- dev_err(&pdev->dev, "Failed to request main pwm: %d\n",
- err);
- return err;
- }
+ if (IS_ERR(vibrator->pwm))
+ return dev_err_probe(&pdev->dev, PTR_ERR(vibrator->pwm),
+ "Failed to request main pwm\n");
INIT_WORK(&vibrator->play_work, pwm_vibrator_play_work);
diff --git a/drivers/input/misc/rotary_encoder.c b/drivers/input/misc/rotary_encoder.c
index 22ec62083065..e94cab8133be 100644
--- a/drivers/input/misc/rotary_encoder.c
+++ b/drivers/input/misc/rotary_encoder.c
@@ -236,12 +236,8 @@ static int rotary_encoder_probe(struct platform_device *pdev)
device_property_read_bool(dev, "rotary-encoder,relative-axis");
encoder->gpios = devm_gpiod_get_array(dev, NULL, GPIOD_IN);
- if (IS_ERR(encoder->gpios)) {
- err = PTR_ERR(encoder->gpios);
- if (err != -EPROBE_DEFER)
- dev_err(dev, "unable to get gpios: %d\n", err);
- return err;
- }
+ if (IS_ERR(encoder->gpios))
+ return dev_err_probe(dev, PTR_ERR(encoder->gpios), "unable to get gpios\n");
if (encoder->gpios->ndescs < 2) {
dev_err(dev, "not enough gpios found\n");
return -EINVAL;
@@ -255,7 +251,6 @@ static int rotary_encoder_probe(struct platform_device *pdev)
input->name = pdev->name;
input->id.bustype = BUS_HOST;
- input->dev.parent = dev;
if (encoder->relative_axis)
input_set_capability(input, EV_REL, encoder->axis);
diff --git a/drivers/input/misc/sparcspkr.c b/drivers/input/misc/sparcspkr.c
index cdcb7737c46a..e5dd84725c6e 100644
--- a/drivers/input/misc/sparcspkr.c
+++ b/drivers/input/misc/sparcspkr.c
@@ -9,7 +9,8 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/input.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <asm/io.h>
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 0cff742302a9..148a601396f9 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1221,13 +1221,8 @@ static int elan_probe(struct i2c_client *client)
mutex_init(&data->sysfs_mutex);
data->vcc = devm_regulator_get(dev, "vcc");
- if (IS_ERR(data->vcc)) {
- error = PTR_ERR(data->vcc);
- if (error != -EPROBE_DEFER)
- dev_err(dev, "Failed to get 'vcc' regulator: %d\n",
- error);
- return error;
- }
+ if (IS_ERR(data->vcc))
+ return dev_err_probe(dev, PTR_ERR(data->vcc), "Failed to get 'vcc' regulator\n");
error = regulator_enable(data->vcc);
if (error) {
diff --git a/drivers/input/mouse/psmouse-smbus.c b/drivers/input/mouse/psmouse-smbus.c
index 2a2459b1b4f2..7b13de979908 100644
--- a/drivers/input/mouse/psmouse-smbus.c
+++ b/drivers/input/mouse/psmouse-smbus.c
@@ -5,6 +5,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/libps2.h>
@@ -118,13 +119,18 @@ static psmouse_ret_t psmouse_smbus_process_byte(struct psmouse *psmouse)
return PSMOUSE_FULL_PACKET;
}
-static int psmouse_smbus_reconnect(struct psmouse *psmouse)
+static void psmouse_activate_smbus_mode(struct psmouse_smbus_dev *smbdev)
{
- struct psmouse_smbus_dev *smbdev = psmouse->private;
-
- if (smbdev->need_deactivate)
- psmouse_deactivate(psmouse);
+ if (smbdev->need_deactivate) {
+ psmouse_deactivate(smbdev->psmouse);
+ /* Give the device time to switch into SMBus mode */
+ msleep(30);
+ }
+}
+static int psmouse_smbus_reconnect(struct psmouse *psmouse)
+{
+ psmouse_activate_smbus_mode(psmouse->private);
return 0;
}
@@ -257,8 +263,7 @@ int psmouse_smbus_init(struct psmouse *psmouse,
}
}
- if (need_deactivate)
- psmouse_deactivate(psmouse);
+ psmouse_activate_smbus_mode(smbdev);
psmouse->private = smbdev;
psmouse->protocol_handler = psmouse_smbus_process_byte;
diff --git a/drivers/input/serio/apbps2.c b/drivers/input/serio/apbps2.c
index 513d96e40e0e..3f6866d39b86 100644
--- a/drivers/input/serio/apbps2.c
+++ b/drivers/input/serio/apbps2.c
@@ -14,11 +14,11 @@
* Contributors: Daniel Hellstrom <daniel@gaisler.com>
*/
#include <linux/platform_device.h>
-#include <linux/of_device.h>
#include <linux/module.h>
#include <linux/serio.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
+#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/device.h>
#include <linux/delay.h>
diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h
index 028e45bd050b..1724d6cb8649 100644
--- a/drivers/input/serio/i8042-acpipnpio.h
+++ b/drivers/input/serio/i8042-acpipnpio.h
@@ -1281,6 +1281,13 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
},
+ /* See comment on TUXEDO InfinityBook S17 Gen6 / Clevo NS70MU above */
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "PD5x_7xPNP_PNR_PNN_PNT"),
+ },
+ .driver_data = (void *)(SERIO_QUIRK_NOAUX)
+ },
{
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "X170SM"),
diff --git a/drivers/input/serio/i8042-sparcio.h b/drivers/input/serio/i8042-sparcio.h
index c712c1fe0605..b68793bf05c8 100644
--- a/drivers/input/serio/i8042-sparcio.h
+++ b/drivers/input/serio/i8042-sparcio.h
@@ -2,7 +2,9 @@
#ifndef _I8042_SPARCIO_H
#define _I8042_SPARCIO_H
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/types.h>
#include <asm/io.h>
diff --git a/drivers/input/serio/rpckbd.c b/drivers/input/serio/rpckbd.c
index ce420eb1f51b..e8a9709f32eb 100644
--- a/drivers/input/serio/rpckbd.c
+++ b/drivers/input/serio/rpckbd.c
@@ -101,12 +101,12 @@ static int rpckbd_probe(struct platform_device *dev)
int tx_irq, rx_irq;
rx_irq = platform_get_irq(dev, 0);
- if (rx_irq <= 0)
- return rx_irq < 0 ? rx_irq : -ENXIO;
+ if (rx_irq < 0)
+ return rx_irq;
tx_irq = platform_get_irq(dev, 1);
- if (tx_irq <= 0)
- return tx_irq < 0 ? tx_irq : -ENXIO;
+ if (tx_irq < 0)
+ return tx_irq;
serio = kzalloc(sizeof(struct serio), GFP_KERNEL);
rpckbd = kzalloc(sizeof(*rpckbd), GFP_KERNEL);
diff --git a/drivers/input/serio/serport.c b/drivers/input/serio/serport.c
index 7f7ef0e3a749..1db3f30011c4 100644
--- a/drivers/input/serio/serport.c
+++ b/drivers/input/serio/serport.c
@@ -103,7 +103,7 @@ static int serport_ldisc_open(struct tty_struct *tty)
static void serport_ldisc_close(struct tty_struct *tty)
{
- struct serport *serport = (struct serport *) tty->disc_data;
+ struct serport *serport = tty->disc_data;
kfree(serport);
}
@@ -114,10 +114,10 @@ static void serport_ldisc_close(struct tty_struct *tty)
* 'interrupt' routine.
*/
-static void serport_ldisc_receive(struct tty_struct *tty,
- const unsigned char *cp, const char *fp, int count)
+static void serport_ldisc_receive(struct tty_struct *tty, const u8 *cp,
+ const u8 *fp, size_t count)
{
- struct serport *serport = (struct serport*) tty->disc_data;
+ struct serport *serport = tty->disc_data;
unsigned long flags;
unsigned int ch_flags = 0;
int i;
@@ -158,10 +158,10 @@ out:
*/
static ssize_t serport_ldisc_read(struct tty_struct * tty, struct file * file,
- unsigned char *kbuf, size_t nr,
- void **cookie, unsigned long offset)
+ u8 *kbuf, size_t nr, void **cookie,
+ unsigned long offset)
{
- struct serport *serport = (struct serport*) tty->disc_data;
+ struct serport *serport = tty->disc_data;
struct serio *serio;
if (test_and_set_bit(SERPORT_BUSY, &serport->flags))
@@ -245,7 +245,7 @@ static int serport_ldisc_compat_ioctl(struct tty_struct *tty,
static void serport_ldisc_hangup(struct tty_struct *tty)
{
- struct serport *serport = (struct serport *) tty->disc_data;
+ struct serport *serport = tty->disc_data;
unsigned long flags;
spin_lock_irqsave(&serport->lock, flags);
@@ -257,7 +257,7 @@ static void serport_ldisc_hangup(struct tty_struct *tty)
static void serport_ldisc_write_wakeup(struct tty_struct * tty)
{
- struct serport *serport = (struct serport *) tty->disc_data;
+ struct serport *serport = tty->disc_data;
unsigned long flags;
spin_lock_irqsave(&serport->lock, flags);
diff --git a/drivers/input/serio/xilinx_ps2.c b/drivers/input/serio/xilinx_ps2.c
index 960d7601fbc8..f3d28da70b75 100644
--- a/drivers/input/serio/xilinx_ps2.c
+++ b/drivers/input/serio/xilinx_ps2.c
@@ -14,10 +14,10 @@
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#define DRIVER_NAME "xilinx_ps2"
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index c2cbd332af1d..e3e2324547b9 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -655,10 +655,10 @@ config TOUCHSCREEN_MTOUCH
module will be called mtouch.
config TOUCHSCREEN_NOVATEK_NVT_TS
- tristate "Novatek NVT-ts touchscreen support"
+ tristate "Novatek NT11205 touchscreen support"
depends on I2C
help
- Say Y here if you have a Novatek NVT-ts touchscreen.
+ Say Y here if you have a Novatek NT11205 touchscreen.
If unsure, say N.
To compile this driver as a module, choose M here: the
@@ -1365,6 +1365,16 @@ config TOUCHSCREEN_IQS5XX
To compile this driver as a module, choose M here: the
module will be called iqs5xx.
+config TOUCHSCREEN_IQS7211
+ tristate "Azoteq IQS7210A/7211A/E trackpad/touchscreen controller"
+ depends on I2C
+ help
+ Say Y to enable support for the Azoteq IQS7210A/7211A/E
+ family of trackpad/touchscreen controllers.
+
+ To compile this driver as a module, choose M here: the
+ module will be called iqs7211.
+
config TOUCHSCREEN_ZINITIX
tristate "Zinitix touchscreen support"
depends on I2C
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 159cd5136fdb..62bd24f3ac8e 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -115,5 +115,6 @@ obj-$(CONFIG_TOUCHSCREEN_COLIBRI_VF50) += colibri-vf50-ts.o
obj-$(CONFIG_TOUCHSCREEN_ROHM_BU21023) += rohm_bu21023.o
obj-$(CONFIG_TOUCHSCREEN_RASPBERRYPI_FW) += raspberrypi-ts.o
obj-$(CONFIG_TOUCHSCREEN_IQS5XX) += iqs5xx.o
+obj-$(CONFIG_TOUCHSCREEN_IQS7211) += iqs7211.o
obj-$(CONFIG_TOUCHSCREEN_ZINITIX) += zinitix.o
obj-$(CONFIG_TOUCHSCREEN_HIMAX_HX83112B) += himax_hx83112b.o
diff --git a/drivers/input/touchscreen/bu21013_ts.c b/drivers/input/touchscreen/bu21013_ts.c
index 85332cfaa29d..652439a79e21 100644
--- a/drivers/input/touchscreen/bu21013_ts.c
+++ b/drivers/input/touchscreen/bu21013_ts.c
@@ -410,31 +410,32 @@ static int bu21013_probe(struct i2c_client *client)
struct input_dev *in_dev;
struct input_absinfo *info;
u32 max_x = 0, max_y = 0;
+ struct device *dev = &client->dev;
int error;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_BYTE_DATA)) {
- dev_err(&client->dev, "i2c smbus byte data not supported\n");
+ dev_err(dev, "i2c smbus byte data not supported\n");
return -EIO;
}
if (!client->irq) {
- dev_err(&client->dev, "No IRQ set up\n");
+ dev_err(dev, "No IRQ set up\n");
return -EINVAL;
}
- ts = devm_kzalloc(&client->dev, sizeof(*ts), GFP_KERNEL);
+ ts = devm_kzalloc(dev, sizeof(*ts), GFP_KERNEL);
if (!ts)
return -ENOMEM;
ts->client = client;
- ts->x_flip = device_property_read_bool(&client->dev, "rohm,flip-x");
- ts->y_flip = device_property_read_bool(&client->dev, "rohm,flip-y");
+ ts->x_flip = device_property_read_bool(dev, "rohm,flip-x");
+ ts->y_flip = device_property_read_bool(dev, "rohm,flip-y");
- in_dev = devm_input_allocate_device(&client->dev);
+ in_dev = devm_input_allocate_device(dev);
if (!in_dev) {
- dev_err(&client->dev, "device memory alloc failed\n");
+ dev_err(dev, "device memory alloc failed\n");
return -ENOMEM;
}
ts->in_dev = in_dev;
@@ -444,8 +445,8 @@ static int bu21013_probe(struct i2c_client *client)
in_dev->name = DRIVER_TP;
in_dev->id.bustype = BUS_I2C;
- device_property_read_u32(&client->dev, "rohm,touch-max-x", &max_x);
- device_property_read_u32(&client->dev, "rohm,touch-max-y", &max_y);
+ device_property_read_u32(dev, "rohm,touch-max-x", &max_x);
+ device_property_read_u32(dev, "rohm,touch-max-y", &max_y);
input_set_abs_params(in_dev, ABS_MT_POSITION_X, 0, max_x, 0, 0);
input_set_abs_params(in_dev, ABS_MT_POSITION_Y, 0, max_y, 0, 0);
@@ -454,14 +455,14 @@ static int bu21013_probe(struct i2c_client *client)
/* Adjust for the legacy "flip" properties, if present */
if (!ts->props.invert_x &&
- device_property_read_bool(&client->dev, "rohm,flip-x")) {
+ device_property_read_bool(dev, "rohm,flip-x")) {
info = &in_dev->absinfo[ABS_MT_POSITION_X];
info->maximum -= info->minimum;
info->minimum = 0;
}
if (!ts->props.invert_y &&
- device_property_read_bool(&client->dev, "rohm,flip-y")) {
+ device_property_read_bool(dev, "rohm,flip-y")) {
info = &in_dev->absinfo[ABS_MT_POSITION_Y];
info->maximum -= info->minimum;
info->minimum = 0;
@@ -471,55 +472,46 @@ static int bu21013_probe(struct i2c_client *client)
INPUT_MT_DIRECT | INPUT_MT_TRACK |
INPUT_MT_DROP_UNUSED);
if (error) {
- dev_err(&client->dev, "failed to initialize MT slots");
+ dev_err(dev, "failed to initialize MT slots");
return error;
}
- ts->regulator = devm_regulator_get(&client->dev, "avdd");
+ ts->regulator = devm_regulator_get(dev, "avdd");
if (IS_ERR(ts->regulator)) {
- dev_err(&client->dev, "regulator_get failed\n");
+ dev_err(dev, "regulator_get failed\n");
return PTR_ERR(ts->regulator);
}
error = regulator_enable(ts->regulator);
if (error) {
- dev_err(&client->dev, "regulator enable failed\n");
+ dev_err(dev, "regulator enable failed\n");
return error;
}
- error = devm_add_action_or_reset(&client->dev, bu21013_power_off, ts);
+ error = devm_add_action_or_reset(dev, bu21013_power_off, ts);
if (error) {
- dev_err(&client->dev, "failed to install power off handler\n");
+ dev_err(dev, "failed to install power off handler\n");
return error;
}
/* Named "CS" on the chip, DT binding is "reset" */
- ts->cs_gpiod = devm_gpiod_get(&client->dev, "reset", GPIOD_OUT_HIGH);
- error = PTR_ERR_OR_ZERO(ts->cs_gpiod);
- if (error) {
- if (error != -EPROBE_DEFER)
- dev_err(&client->dev, "failed to get CS GPIO\n");
- return error;
- }
+ ts->cs_gpiod = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ts->cs_gpiod))
+ return dev_err_probe(dev, PTR_ERR(ts->cs_gpiod), "failed to get CS GPIO\n");
+
gpiod_set_consumer_name(ts->cs_gpiod, "BU21013 CS");
- error = devm_add_action_or_reset(&client->dev,
- bu21013_disable_chip, ts);
+ error = devm_add_action_or_reset(dev, bu21013_disable_chip, ts);
if (error) {
- dev_err(&client->dev,
- "failed to install chip disable handler\n");
+ dev_err(dev, "failed to install chip disable handler\n");
return error;
}
/* Named "INT" on the chip, DT binding is "touch" */
- ts->int_gpiod = devm_gpiod_get_optional(&client->dev,
- "touch", GPIOD_IN);
+ ts->int_gpiod = devm_gpiod_get_optional(dev, "touch", GPIOD_IN);
error = PTR_ERR_OR_ZERO(ts->int_gpiod);
- if (error) {
- if (error != -EPROBE_DEFER)
- dev_err(&client->dev, "failed to get INT GPIO\n");
- return error;
- }
+ if (error)
+ return dev_err_probe(dev, error, "failed to get INT GPIO\n");
if (ts->int_gpiod)
gpiod_set_consumer_name(ts->int_gpiod, "BU21013 INT");
@@ -527,22 +519,20 @@ static int bu21013_probe(struct i2c_client *client)
/* configure the touch panel controller */
error = bu21013_init_chip(ts);
if (error) {
- dev_err(&client->dev, "error in bu21013 config\n");
+ dev_err(dev, "error in bu21013 config\n");
return error;
}
- error = devm_request_threaded_irq(&client->dev, client->irq,
- NULL, bu21013_gpio_irq,
+ error = devm_request_threaded_irq(dev, client->irq, NULL, bu21013_gpio_irq,
IRQF_ONESHOT, DRIVER_TP, ts);
if (error) {
- dev_err(&client->dev, "request irq %d failed\n",
- client->irq);
+ dev_err(dev, "request irq %d failed\n", client->irq);
return error;
}
error = input_register_device(in_dev);
if (error) {
- dev_err(&client->dev, "failed to register input device\n");
+ dev_err(dev, "failed to register input device\n");
return error;
}
diff --git a/drivers/input/touchscreen/bu21029_ts.c b/drivers/input/touchscreen/bu21029_ts.c
index c8126d2efe95..e1dfbd92ab64 100644
--- a/drivers/input/touchscreen/bu21029_ts.c
+++ b/drivers/input/touchscreen/bu21029_ts.c
@@ -333,6 +333,7 @@ static void bu21029_stop_chip(struct input_dev *dev)
static int bu21029_probe(struct i2c_client *client)
{
+ struct device *dev = &client->dev;
struct bu21029_ts_data *bu21029;
struct input_dev *in_dev;
int error;
@@ -341,45 +342,33 @@ static int bu21029_probe(struct i2c_client *client)
I2C_FUNC_SMBUS_WRITE_BYTE |
I2C_FUNC_SMBUS_WRITE_BYTE_DATA |
I2C_FUNC_SMBUS_READ_I2C_BLOCK)) {
- dev_err(&client->dev,
- "i2c functionality support is not sufficient\n");
+ dev_err(dev, "i2c functionality support is not sufficient\n");
return -EIO;
}
- bu21029 = devm_kzalloc(&client->dev, sizeof(*bu21029), GFP_KERNEL);
+ bu21029 = devm_kzalloc(dev, sizeof(*bu21029), GFP_KERNEL);
if (!bu21029)
return -ENOMEM;
- error = device_property_read_u32(&client->dev, "rohm,x-plate-ohms",
- &bu21029->x_plate_ohms);
+ error = device_property_read_u32(dev, "rohm,x-plate-ohms", &bu21029->x_plate_ohms);
if (error) {
- dev_err(&client->dev,
- "invalid 'x-plate-ohms' supplied: %d\n", error);
+ dev_err(dev, "invalid 'x-plate-ohms' supplied: %d\n", error);
return error;
}
- bu21029->vdd = devm_regulator_get(&client->dev, "vdd");
- if (IS_ERR(bu21029->vdd)) {
- error = PTR_ERR(bu21029->vdd);
- if (error != -EPROBE_DEFER)
- dev_err(&client->dev,
- "failed to acquire 'vdd' supply: %d\n", error);
- return error;
- }
+ bu21029->vdd = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(bu21029->vdd))
+ return dev_err_probe(dev, PTR_ERR(bu21029->vdd),
+ "failed to acquire 'vdd' supply\n");
- bu21029->reset_gpios = devm_gpiod_get_optional(&client->dev,
- "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(bu21029->reset_gpios)) {
- error = PTR_ERR(bu21029->reset_gpios);
- if (error != -EPROBE_DEFER)
- dev_err(&client->dev,
- "failed to acquire 'reset' gpio: %d\n", error);
- return error;
- }
+ bu21029->reset_gpios = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(bu21029->reset_gpios))
+ return dev_err_probe(dev, PTR_ERR(bu21029->reset_gpios),
+ "failed to acquire 'reset' gpio\n");
- in_dev = devm_input_allocate_device(&client->dev);
+ in_dev = devm_input_allocate_device(dev);
if (!in_dev) {
- dev_err(&client->dev, "unable to allocate input device\n");
+ dev_err(dev, "unable to allocate input device\n");
return -ENOMEM;
}
@@ -400,20 +389,18 @@ static int bu21029_probe(struct i2c_client *client)
input_set_drvdata(in_dev, bu21029);
- error = devm_request_threaded_irq(&client->dev, client->irq,
- NULL, bu21029_touch_soft_irq,
+ error = devm_request_threaded_irq(dev, client->irq, NULL,
+ bu21029_touch_soft_irq,
IRQF_ONESHOT | IRQF_NO_AUTOEN,
DRIVER_NAME, bu21029);
if (error) {
- dev_err(&client->dev,
- "unable to request touch irq: %d\n", error);
+ dev_err(dev, "unable to request touch irq: %d\n", error);
return error;
}
error = input_register_device(in_dev);
if (error) {
- dev_err(&client->dev,
- "unable to register input device: %d\n", error);
+ dev_err(dev, "unable to register input device: %d\n", error);
return error;
}
diff --git a/drivers/input/touchscreen/chipone_icn8318.c b/drivers/input/touchscreen/chipone_icn8318.c
index 9fbeaf17f00b..d6876d10b252 100644
--- a/drivers/input/touchscreen/chipone_icn8318.c
+++ b/drivers/input/touchscreen/chipone_icn8318.c
@@ -191,12 +191,8 @@ static int icn8318_probe(struct i2c_client *client)
return -ENOMEM;
data->wake_gpio = devm_gpiod_get(dev, "wake", GPIOD_OUT_LOW);
- if (IS_ERR(data->wake_gpio)) {
- error = PTR_ERR(data->wake_gpio);
- if (error != -EPROBE_DEFER)
- dev_err(dev, "Error getting wake gpio: %d\n", error);
- return error;
- }
+ if (IS_ERR(data->wake_gpio))
+ return dev_err_probe(dev, PTR_ERR(data->wake_gpio), "Error getting wake gpio\n");
input = devm_input_allocate_device(dev);
if (!input)
diff --git a/drivers/input/touchscreen/cy8ctma140.c b/drivers/input/touchscreen/cy8ctma140.c
index 967ecde23e83..ea3895167b82 100644
--- a/drivers/input/touchscreen/cy8ctma140.c
+++ b/drivers/input/touchscreen/cy8ctma140.c
@@ -258,12 +258,8 @@ static int cy8ctma140_probe(struct i2c_client *client)
ts->regulators[1].supply = "vdd";
error = devm_regulator_bulk_get(dev, ARRAY_SIZE(ts->regulators),
ts->regulators);
- if (error) {
- if (error != -EPROBE_DEFER)
- dev_err(dev, "Failed to get regulators %d\n",
- error);
- return error;
- }
+ if (error)
+ return dev_err_probe(dev, error, "Failed to get regulators\n");
error = cy8ctma140_power_up(ts);
if (error)
diff --git a/drivers/input/touchscreen/cyttsp5.c b/drivers/input/touchscreen/cyttsp5.c
index b461ded946fc..db5a885ecd72 100644
--- a/drivers/input/touchscreen/cyttsp5.c
+++ b/drivers/input/touchscreen/cyttsp5.c
@@ -18,8 +18,8 @@
#include <linux/input/touchscreen.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/regmap.h>
#include <asm/unaligned.h>
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 795c7dad22bf..457d53337fbb 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -1168,13 +1168,9 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client)
tsdata->max_support_points = chip_data->max_support_points;
tsdata->vcc = devm_regulator_get(&client->dev, "vcc");
- if (IS_ERR(tsdata->vcc)) {
- error = PTR_ERR(tsdata->vcc);
- if (error != -EPROBE_DEFER)
- dev_err(&client->dev,
- "failed to request regulator: %d\n", error);
- return error;
- }
+ if (IS_ERR(tsdata->vcc))
+ return dev_err_probe(&client->dev, PTR_ERR(tsdata->vcc),
+ "failed to request regulator\n");
tsdata->iovcc = devm_regulator_get(&client->dev, "iovcc");
if (IS_ERR(tsdata->iovcc)) {
diff --git a/drivers/input/touchscreen/ektf2127.c b/drivers/input/touchscreen/ektf2127.c
index fd8724a3c19f..cc3103b9cbfb 100644
--- a/drivers/input/touchscreen/ektf2127.c
+++ b/drivers/input/touchscreen/ektf2127.c
@@ -264,12 +264,8 @@ static int ektf2127_probe(struct i2c_client *client)
/* This requests the gpio *and* turns on the touchscreen controller */
ts->power_gpios = devm_gpiod_get(dev, "power", GPIOD_OUT_HIGH);
- if (IS_ERR(ts->power_gpios)) {
- error = PTR_ERR(ts->power_gpios);
- if (error != -EPROBE_DEFER)
- dev_err(dev, "Error getting power gpio: %d\n", error);
- return error;
- }
+ if (IS_ERR(ts->power_gpios))
+ return dev_err_probe(dev, PTR_ERR(ts->power_gpios), "Error getting power gpio\n");
input = devm_input_allocate_device(dev);
if (!input)
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index 2da1db64126d..a1af3de9f310 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -1438,24 +1438,14 @@ static int elants_i2c_probe(struct i2c_client *client)
i2c_set_clientdata(client, ts);
ts->vcc33 = devm_regulator_get(&client->dev, "vcc33");
- if (IS_ERR(ts->vcc33)) {
- error = PTR_ERR(ts->vcc33);
- if (error != -EPROBE_DEFER)
- dev_err(&client->dev,
- "Failed to get 'vcc33' regulator: %d\n",
- error);
- return error;
- }
+ if (IS_ERR(ts->vcc33))
+ return dev_err_probe(&client->dev, PTR_ERR(ts->vcc33),
+ "Failed to get 'vcc33' regulator\n");
ts->vccio = devm_regulator_get(&client->dev, "vccio");
- if (IS_ERR(ts->vccio)) {
- error = PTR_ERR(ts->vccio);
- if (error != -EPROBE_DEFER)
- dev_err(&client->dev,
- "Failed to get 'vccio' regulator: %d\n",
- error);
- return error;
- }
+ if (IS_ERR(ts->vccio))
+ return dev_err_probe(&client->dev, PTR_ERR(ts->vccio),
+ "Failed to get 'vccio' regulator\n");
ts->reset_gpio = devm_gpiod_get(&client->dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ts->reset_gpio)) {
diff --git a/drivers/input/touchscreen/exc3000.c b/drivers/input/touchscreen/exc3000.c
index 4af4c1e5d0da..4c0d99aae9e0 100644
--- a/drivers/input/touchscreen/exc3000.c
+++ b/drivers/input/touchscreen/exc3000.c
@@ -7,6 +7,7 @@
* minimal implementation based on egalax_ts.c and egalax_i2c.c
*/
+#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -18,6 +19,7 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/regulator/consumer.h>
#include <linux/sizes.h>
#include <linux/timer.h>
#include <asm/unaligned.h>
@@ -360,6 +362,12 @@ static int exc3000_probe(struct i2c_client *client)
if (IS_ERR(data->reset))
return PTR_ERR(data->reset);
+ /* For proper reset sequence, enable power while reset asserted */
+ error = devm_regulator_get_enable(&client->dev, "vdd");
+ if (error && error != -ENODEV)
+ return dev_err_probe(&client->dev, error,
+ "failed to request vdd regulator\n");
+
if (data->reset) {
msleep(EXC3000_RESET_MS);
gpiod_set_value_cansleep(data->reset, 0);
@@ -454,10 +462,19 @@ static const struct of_device_id exc3000_of_match[] = {
MODULE_DEVICE_TABLE(of, exc3000_of_match);
#endif
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id exc3000_acpi_match[] = {
+ { "EGA00001", .driver_data = (kernel_ulong_t)&exc3000_info[EETI_EXC80H60] },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, exc3000_acpi_match);
+#endif
+
static struct i2c_driver exc3000_driver = {
.driver = {
.name = "exc3000",
.of_match_table = of_match_ptr(exc3000_of_match),
+ .acpi_match_table = ACPI_PTR(exc3000_acpi_match),
},
.id_table = exc3000_id,
.probe = exc3000_probe,
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index f5aa240739f9..da9954d6df44 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -935,7 +935,6 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts)
*/
static int goodix_get_gpio_config(struct goodix_ts_data *ts)
{
- int error;
struct device *dev;
struct gpio_desc *gpiod;
bool added_acpi_mappings = false;
@@ -951,33 +950,20 @@ static int goodix_get_gpio_config(struct goodix_ts_data *ts)
ts->gpiod_rst_flags = GPIOD_IN;
ts->avdd28 = devm_regulator_get(dev, "AVDD28");
- if (IS_ERR(ts->avdd28)) {
- error = PTR_ERR(ts->avdd28);
- if (error != -EPROBE_DEFER)
- dev_err(dev,
- "Failed to get AVDD28 regulator: %d\n", error);
- return error;
- }
+ if (IS_ERR(ts->avdd28))
+ return dev_err_probe(dev, PTR_ERR(ts->avdd28), "Failed to get AVDD28 regulator\n");
ts->vddio = devm_regulator_get(dev, "VDDIO");
- if (IS_ERR(ts->vddio)) {
- error = PTR_ERR(ts->vddio);
- if (error != -EPROBE_DEFER)
- dev_err(dev,
- "Failed to get VDDIO regulator: %d\n", error);
- return error;
- }
+ if (IS_ERR(ts->vddio))
+ return dev_err_probe(dev, PTR_ERR(ts->vddio), "Failed to get VDDIO regulator\n");
retry_get_irq_gpio:
/* Get the interrupt GPIO pin number */
gpiod = devm_gpiod_get_optional(dev, GOODIX_GPIO_INT_NAME, GPIOD_IN);
- if (IS_ERR(gpiod)) {
- error = PTR_ERR(gpiod);
- if (error != -EPROBE_DEFER)
- dev_err(dev, "Failed to get %s GPIO: %d\n",
- GOODIX_GPIO_INT_NAME, error);
- return error;
- }
+ if (IS_ERR(gpiod))
+ return dev_err_probe(dev, PTR_ERR(gpiod), "Failed to get %s GPIO\n",
+ GOODIX_GPIO_INT_NAME);
+
if (!gpiod && has_acpi_companion(dev) && !added_acpi_mappings) {
added_acpi_mappings = true;
if (goodix_add_acpi_gpio_mappings(ts) == 0)
@@ -988,13 +974,9 @@ retry_get_irq_gpio:
/* Get the reset line GPIO pin number */
gpiod = devm_gpiod_get_optional(dev, GOODIX_GPIO_RST_NAME, ts->gpiod_rst_flags);
- if (IS_ERR(gpiod)) {
- error = PTR_ERR(gpiod);
- if (error != -EPROBE_DEFER)
- dev_err(dev, "Failed to get %s GPIO: %d\n",
- GOODIX_GPIO_RST_NAME, error);
- return error;
- }
+ if (IS_ERR(gpiod))
+ return dev_err_probe(dev, PTR_ERR(gpiod), "Failed to get %s GPIO\n",
+ GOODIX_GPIO_RST_NAME);
ts->gpiod_rst = gpiod;
@@ -1517,6 +1499,7 @@ MODULE_DEVICE_TABLE(i2c, goodix_ts_id);
static const struct acpi_device_id goodix_acpi_match[] = {
{ "GDIX1001", 0 },
{ "GDIX1002", 0 },
+ { "GDX9110", 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, goodix_acpi_match);
diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
index f7cd773f7292..ad6828e4f2e2 100644
--- a/drivers/input/touchscreen/ili210x.c
+++ b/drivers/input/touchscreen/ili210x.c
@@ -8,8 +8,8 @@
#include <linux/input/mt.h>
#include <linux/input/touchscreen.h>
#include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
diff --git a/drivers/input/touchscreen/iqs5xx.c b/drivers/input/touchscreen/iqs5xx.c
index 0aa9d6492df8..b4768b66eb10 100644
--- a/drivers/input/touchscreen/iqs5xx.c
+++ b/drivers/input/touchscreen/iqs5xx.c
@@ -23,8 +23,8 @@
#include <linux/input/touchscreen.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
diff --git a/drivers/input/touchscreen/iqs7211.c b/drivers/input/touchscreen/iqs7211.c
new file mode 100644
index 000000000000..dc084f873762
--- /dev/null
+++ b/drivers/input/touchscreen/iqs7211.c
@@ -0,0 +1,2557 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Azoteq IQS7210A/7211A/E Trackpad/Touchscreen Controller
+ *
+ * Copyright (C) 2023 Jeff LaBundy <jeff@labundy.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/input/touchscreen.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+
+#define IQS7211_PROD_NUM 0x00
+
+#define IQS7211_EVENT_MASK_ALL GENMASK(14, 8)
+#define IQS7211_EVENT_MASK_ALP BIT(13)
+#define IQS7211_EVENT_MASK_BTN BIT(12)
+#define IQS7211_EVENT_MASK_ATI BIT(11)
+#define IQS7211_EVENT_MASK_MOVE BIT(10)
+#define IQS7211_EVENT_MASK_GSTR BIT(9)
+#define IQS7211_EVENT_MODE BIT(8)
+
+#define IQS7211_COMMS_ERROR 0xEEEE
+#define IQS7211_COMMS_RETRY_MS 50
+#define IQS7211_COMMS_SLEEP_US 100
+#define IQS7211_COMMS_TIMEOUT_US (100 * USEC_PER_MSEC)
+#define IQS7211_RESET_TIMEOUT_MS 150
+#define IQS7211_START_TIMEOUT_US (1 * USEC_PER_SEC)
+
+#define IQS7211_NUM_RETRIES 5
+#define IQS7211_NUM_CRX 8
+#define IQS7211_MAX_CTX 13
+
+#define IQS7211_MAX_CONTACTS 2
+#define IQS7211_MAX_CYCLES 21
+
+/*
+ * The following delay is used during instances that must wait for the open-
+ * drain RDY pin to settle. Its value is calculated as 5*R*C, where R and C
+ * represent typical datasheet values of 4.7k and 100 nF, respectively.
+ */
+#define iqs7211_irq_wait() usleep_range(2500, 2600)
+
+enum iqs7211_dev_id {
+ IQS7210A,
+ IQS7211A,
+ IQS7211E,
+};
+
+enum iqs7211_comms_mode {
+ IQS7211_COMMS_MODE_WAIT,
+ IQS7211_COMMS_MODE_FREE,
+ IQS7211_COMMS_MODE_FORCE,
+};
+
+struct iqs7211_reg_field_desc {
+ struct list_head list;
+ u8 addr;
+ u16 mask;
+ u16 val;
+};
+
+enum iqs7211_reg_key_id {
+ IQS7211_REG_KEY_NONE,
+ IQS7211_REG_KEY_PROX,
+ IQS7211_REG_KEY_TOUCH,
+ IQS7211_REG_KEY_TAP,
+ IQS7211_REG_KEY_HOLD,
+ IQS7211_REG_KEY_PALM,
+ IQS7211_REG_KEY_AXIAL_X,
+ IQS7211_REG_KEY_AXIAL_Y,
+ IQS7211_REG_KEY_RESERVED
+};
+
+enum iqs7211_reg_grp_id {
+ IQS7211_REG_GRP_TP,
+ IQS7211_REG_GRP_BTN,
+ IQS7211_REG_GRP_ALP,
+ IQS7211_REG_GRP_SYS,
+ IQS7211_NUM_REG_GRPS
+};
+
+static const char * const iqs7211_reg_grp_names[IQS7211_NUM_REG_GRPS] = {
+ [IQS7211_REG_GRP_TP] = "trackpad",
+ [IQS7211_REG_GRP_BTN] = "button",
+ [IQS7211_REG_GRP_ALP] = "alp",
+};
+
+static const u16 iqs7211_reg_grp_masks[IQS7211_NUM_REG_GRPS] = {
+ [IQS7211_REG_GRP_TP] = IQS7211_EVENT_MASK_GSTR,
+ [IQS7211_REG_GRP_BTN] = IQS7211_EVENT_MASK_BTN,
+ [IQS7211_REG_GRP_ALP] = IQS7211_EVENT_MASK_ALP,
+};
+
+struct iqs7211_event_desc {
+ const char *name;
+ u16 mask;
+ u16 enable;
+ enum iqs7211_reg_grp_id reg_grp;
+ enum iqs7211_reg_key_id reg_key;
+};
+
+static const struct iqs7211_event_desc iqs7210a_kp_events[] = {
+ {
+ .mask = BIT(10),
+ .enable = BIT(13) | BIT(12),
+ .reg_grp = IQS7211_REG_GRP_ALP,
+ },
+ {
+ .name = "event-prox",
+ .mask = BIT(2),
+ .enable = BIT(5) | BIT(4),
+ .reg_grp = IQS7211_REG_GRP_BTN,
+ .reg_key = IQS7211_REG_KEY_PROX,
+ },
+ {
+ .name = "event-touch",
+ .mask = BIT(3),
+ .enable = BIT(5) | BIT(4),
+ .reg_grp = IQS7211_REG_GRP_BTN,
+ .reg_key = IQS7211_REG_KEY_TOUCH,
+ },
+ {
+ .name = "event-tap",
+ .mask = BIT(0),
+ .enable = BIT(0),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_TAP,
+ },
+ {
+ .name = "event-hold",
+ .mask = BIT(1),
+ .enable = BIT(1),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_HOLD,
+ },
+ {
+ .name = "event-swipe-x-neg",
+ .mask = BIT(2),
+ .enable = BIT(2),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_AXIAL_X,
+ },
+ {
+ .name = "event-swipe-x-pos",
+ .mask = BIT(3),
+ .enable = BIT(3),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_AXIAL_X,
+ },
+ {
+ .name = "event-swipe-y-pos",
+ .mask = BIT(4),
+ .enable = BIT(4),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_AXIAL_Y,
+ },
+ {
+ .name = "event-swipe-y-neg",
+ .mask = BIT(5),
+ .enable = BIT(5),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_AXIAL_Y,
+ },
+};
+
+static const struct iqs7211_event_desc iqs7211a_kp_events[] = {
+ {
+ .mask = BIT(14),
+ .reg_grp = IQS7211_REG_GRP_ALP,
+ },
+ {
+ .name = "event-tap",
+ .mask = BIT(0),
+ .enable = BIT(0),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_TAP,
+ },
+ {
+ .name = "event-hold",
+ .mask = BIT(1),
+ .enable = BIT(1),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_HOLD,
+ },
+ {
+ .name = "event-swipe-x-neg",
+ .mask = BIT(2),
+ .enable = BIT(2),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_AXIAL_X,
+ },
+ {
+ .name = "event-swipe-x-pos",
+ .mask = BIT(3),
+ .enable = BIT(3),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_AXIAL_X,
+ },
+ {
+ .name = "event-swipe-y-pos",
+ .mask = BIT(4),
+ .enable = BIT(4),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_AXIAL_Y,
+ },
+ {
+ .name = "event-swipe-y-neg",
+ .mask = BIT(5),
+ .enable = BIT(5),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_AXIAL_Y,
+ },
+};
+
+static const struct iqs7211_event_desc iqs7211e_kp_events[] = {
+ {
+ .mask = BIT(14),
+ .reg_grp = IQS7211_REG_GRP_ALP,
+ },
+ {
+ .name = "event-tap",
+ .mask = BIT(0),
+ .enable = BIT(0),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_TAP,
+ },
+ {
+ .name = "event-tap-double",
+ .mask = BIT(1),
+ .enable = BIT(1),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_TAP,
+ },
+ {
+ .name = "event-tap-triple",
+ .mask = BIT(2),
+ .enable = BIT(2),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_TAP,
+ },
+ {
+ .name = "event-hold",
+ .mask = BIT(3),
+ .enable = BIT(3),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_HOLD,
+ },
+ {
+ .name = "event-palm",
+ .mask = BIT(4),
+ .enable = BIT(4),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_PALM,
+ },
+ {
+ .name = "event-swipe-x-pos",
+ .mask = BIT(8),
+ .enable = BIT(8),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_AXIAL_X,
+ },
+ {
+ .name = "event-swipe-x-neg",
+ .mask = BIT(9),
+ .enable = BIT(9),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_AXIAL_X,
+ },
+ {
+ .name = "event-swipe-y-pos",
+ .mask = BIT(10),
+ .enable = BIT(10),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_AXIAL_Y,
+ },
+ {
+ .name = "event-swipe-y-neg",
+ .mask = BIT(11),
+ .enable = BIT(11),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_AXIAL_Y,
+ },
+ {
+ .name = "event-swipe-x-pos-hold",
+ .mask = BIT(12),
+ .enable = BIT(12),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_HOLD,
+ },
+ {
+ .name = "event-swipe-x-neg-hold",
+ .mask = BIT(13),
+ .enable = BIT(13),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_HOLD,
+ },
+ {
+ .name = "event-swipe-y-pos-hold",
+ .mask = BIT(14),
+ .enable = BIT(14),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_HOLD,
+ },
+ {
+ .name = "event-swipe-y-neg-hold",
+ .mask = BIT(15),
+ .enable = BIT(15),
+ .reg_grp = IQS7211_REG_GRP_TP,
+ .reg_key = IQS7211_REG_KEY_HOLD,
+ },
+};
+
+struct iqs7211_dev_desc {
+ const char *tp_name;
+ const char *kp_name;
+ u16 prod_num;
+ u16 show_reset;
+ u16 ati_error[IQS7211_NUM_REG_GRPS];
+ u16 ati_start[IQS7211_NUM_REG_GRPS];
+ u16 suspend;
+ u16 ack_reset;
+ u16 comms_end;
+ u16 comms_req;
+ int charge_shift;
+ int info_offs;
+ int gesture_offs;
+ int contact_offs;
+ u8 sys_stat;
+ u8 sys_ctrl;
+ u8 alp_config;
+ u8 tp_config;
+ u8 exp_file;
+ u8 kp_enable[IQS7211_NUM_REG_GRPS];
+ u8 gesture_angle;
+ u8 rx_tx_map;
+ u8 cycle_alloc[2];
+ u8 cycle_limit[2];
+ const struct iqs7211_event_desc *kp_events;
+ int num_kp_events;
+ int min_crx_alp;
+ int num_ctx;
+};
+
+static const struct iqs7211_dev_desc iqs7211_devs[] = {
+ [IQS7210A] = {
+ .tp_name = "iqs7210a_trackpad",
+ .kp_name = "iqs7210a_keys",
+ .prod_num = 944,
+ .show_reset = BIT(15),
+ .ati_error = {
+ [IQS7211_REG_GRP_TP] = BIT(12),
+ [IQS7211_REG_GRP_BTN] = BIT(0),
+ [IQS7211_REG_GRP_ALP] = BIT(8),
+ },
+ .ati_start = {
+ [IQS7211_REG_GRP_TP] = BIT(13),
+ [IQS7211_REG_GRP_BTN] = BIT(1),
+ [IQS7211_REG_GRP_ALP] = BIT(9),
+ },
+ .suspend = BIT(11),
+ .ack_reset = BIT(7),
+ .comms_end = BIT(2),
+ .comms_req = BIT(1),
+ .charge_shift = 4,
+ .info_offs = 0,
+ .gesture_offs = 1,
+ .contact_offs = 4,
+ .sys_stat = 0x0A,
+ .sys_ctrl = 0x35,
+ .alp_config = 0x39,
+ .tp_config = 0x4E,
+ .exp_file = 0x57,
+ .kp_enable = {
+ [IQS7211_REG_GRP_TP] = 0x58,
+ [IQS7211_REG_GRP_BTN] = 0x37,
+ [IQS7211_REG_GRP_ALP] = 0x37,
+ },
+ .gesture_angle = 0x5F,
+ .rx_tx_map = 0x60,
+ .cycle_alloc = { 0x66, 0x75, },
+ .cycle_limit = { 10, 6, },
+ .kp_events = iqs7210a_kp_events,
+ .num_kp_events = ARRAY_SIZE(iqs7210a_kp_events),
+ .min_crx_alp = 4,
+ .num_ctx = IQS7211_MAX_CTX - 1,
+ },
+ [IQS7211A] = {
+ .tp_name = "iqs7211a_trackpad",
+ .kp_name = "iqs7211a_keys",
+ .prod_num = 763,
+ .show_reset = BIT(7),
+ .ati_error = {
+ [IQS7211_REG_GRP_TP] = BIT(3),
+ [IQS7211_REG_GRP_ALP] = BIT(5),
+ },
+ .ati_start = {
+ [IQS7211_REG_GRP_TP] = BIT(5),
+ [IQS7211_REG_GRP_ALP] = BIT(6),
+ },
+ .ack_reset = BIT(7),
+ .comms_req = BIT(4),
+ .charge_shift = 0,
+ .info_offs = 0,
+ .gesture_offs = 1,
+ .contact_offs = 4,
+ .sys_stat = 0x10,
+ .sys_ctrl = 0x50,
+ .tp_config = 0x60,
+ .alp_config = 0x72,
+ .exp_file = 0x74,
+ .kp_enable = {
+ [IQS7211_REG_GRP_TP] = 0x80,
+ },
+ .gesture_angle = 0x87,
+ .rx_tx_map = 0x90,
+ .cycle_alloc = { 0xA0, 0xB0, },
+ .cycle_limit = { 10, 8, },
+ .kp_events = iqs7211a_kp_events,
+ .num_kp_events = ARRAY_SIZE(iqs7211a_kp_events),
+ .num_ctx = IQS7211_MAX_CTX - 1,
+ },
+ [IQS7211E] = {
+ .tp_name = "iqs7211e_trackpad",
+ .kp_name = "iqs7211e_keys",
+ .prod_num = 1112,
+ .show_reset = BIT(7),
+ .ati_error = {
+ [IQS7211_REG_GRP_TP] = BIT(3),
+ [IQS7211_REG_GRP_ALP] = BIT(5),
+ },
+ .ati_start = {
+ [IQS7211_REG_GRP_TP] = BIT(5),
+ [IQS7211_REG_GRP_ALP] = BIT(6),
+ },
+ .suspend = BIT(11),
+ .ack_reset = BIT(7),
+ .comms_end = BIT(6),
+ .comms_req = BIT(4),
+ .charge_shift = 0,
+ .info_offs = 1,
+ .gesture_offs = 0,
+ .contact_offs = 2,
+ .sys_stat = 0x0E,
+ .sys_ctrl = 0x33,
+ .tp_config = 0x41,
+ .alp_config = 0x36,
+ .exp_file = 0x4A,
+ .kp_enable = {
+ [IQS7211_REG_GRP_TP] = 0x4B,
+ },
+ .gesture_angle = 0x55,
+ .rx_tx_map = 0x56,
+ .cycle_alloc = { 0x5D, 0x6C, },
+ .cycle_limit = { 10, 11, },
+ .kp_events = iqs7211e_kp_events,
+ .num_kp_events = ARRAY_SIZE(iqs7211e_kp_events),
+ .num_ctx = IQS7211_MAX_CTX,
+ },
+};
+
+struct iqs7211_prop_desc {
+ const char *name;
+ enum iqs7211_reg_key_id reg_key;
+ u8 reg_addr[IQS7211_NUM_REG_GRPS][ARRAY_SIZE(iqs7211_devs)];
+ int reg_shift;
+ int reg_width;
+ int val_pitch;
+ int val_min;
+ int val_max;
+ const char *label;
+};
+
+static const struct iqs7211_prop_desc iqs7211_props[] = {
+ {
+ .name = "azoteq,ati-frac-div-fine",
+ .reg_addr = {
+ [IQS7211_REG_GRP_TP] = {
+ [IQS7210A] = 0x1E,
+ [IQS7211A] = 0x30,
+ [IQS7211E] = 0x21,
+ },
+ [IQS7211_REG_GRP_BTN] = {
+ [IQS7210A] = 0x22,
+ },
+ [IQS7211_REG_GRP_ALP] = {
+ [IQS7210A] = 0x23,
+ [IQS7211A] = 0x36,
+ [IQS7211E] = 0x25,
+ },
+ },
+ .reg_shift = 9,
+ .reg_width = 5,
+ .label = "ATI fine fractional divider",
+ },
+ {
+ .name = "azoteq,ati-frac-mult-coarse",
+ .reg_addr = {
+ [IQS7211_REG_GRP_TP] = {
+ [IQS7210A] = 0x1E,
+ [IQS7211A] = 0x30,
+ [IQS7211E] = 0x21,
+ },
+ [IQS7211_REG_GRP_BTN] = {
+ [IQS7210A] = 0x22,
+ },
+ [IQS7211_REG_GRP_ALP] = {
+ [IQS7210A] = 0x23,
+ [IQS7211A] = 0x36,
+ [IQS7211E] = 0x25,
+ },
+ },
+ .reg_shift = 5,
+ .reg_width = 4,
+ .label = "ATI coarse fractional multiplier",
+ },
+ {
+ .name = "azoteq,ati-frac-div-coarse",
+ .reg_addr = {
+ [IQS7211_REG_GRP_TP] = {
+ [IQS7210A] = 0x1E,
+ [IQS7211A] = 0x30,
+ [IQS7211E] = 0x21,
+ },
+ [IQS7211_REG_GRP_BTN] = {
+ [IQS7210A] = 0x22,
+ },
+ [IQS7211_REG_GRP_ALP] = {
+ [IQS7210A] = 0x23,
+ [IQS7211A] = 0x36,
+ [IQS7211E] = 0x25,
+ },
+ },
+ .reg_shift = 0,
+ .reg_width = 5,
+ .label = "ATI coarse fractional divider",
+ },
+ {
+ .name = "azoteq,ati-comp-div",
+ .reg_addr = {
+ [IQS7211_REG_GRP_TP] = {
+ [IQS7210A] = 0x1F,
+ [IQS7211E] = 0x22,
+ },
+ [IQS7211_REG_GRP_BTN] = {
+ [IQS7210A] = 0x24,
+ },
+ [IQS7211_REG_GRP_ALP] = {
+ [IQS7211E] = 0x26,
+ },
+ },
+ .reg_shift = 0,
+ .reg_width = 8,
+ .val_max = 31,
+ .label = "ATI compensation divider",
+ },
+ {
+ .name = "azoteq,ati-comp-div",
+ .reg_addr = {
+ [IQS7211_REG_GRP_ALP] = {
+ [IQS7210A] = 0x24,
+ },
+ },
+ .reg_shift = 8,
+ .reg_width = 8,
+ .val_max = 31,
+ .label = "ATI compensation divider",
+ },
+ {
+ .name = "azoteq,ati-comp-div",
+ .reg_addr = {
+ [IQS7211_REG_GRP_TP] = {
+ [IQS7211A] = 0x31,
+ },
+ [IQS7211_REG_GRP_ALP] = {
+ [IQS7211A] = 0x37,
+ },
+ },
+ .val_max = 31,
+ .label = "ATI compensation divider",
+ },
+ {
+ .name = "azoteq,ati-target",
+ .reg_addr = {
+ [IQS7211_REG_GRP_TP] = {
+ [IQS7210A] = 0x20,
+ [IQS7211A] = 0x32,
+ [IQS7211E] = 0x23,
+ },
+ [IQS7211_REG_GRP_BTN] = {
+ [IQS7210A] = 0x27,
+ },
+ [IQS7211_REG_GRP_ALP] = {
+ [IQS7210A] = 0x28,
+ [IQS7211A] = 0x38,
+ [IQS7211E] = 0x27,
+ },
+ },
+ .label = "ATI target",
+ },
+ {
+ .name = "azoteq,ati-base",
+ .reg_addr[IQS7211_REG_GRP_ALP] = {
+ [IQS7210A] = 0x26,
+ },
+ .reg_shift = 8,
+ .reg_width = 8,
+ .val_pitch = 8,
+ .label = "ATI base",
+ },
+ {
+ .name = "azoteq,ati-base",
+ .reg_addr[IQS7211_REG_GRP_BTN] = {
+ [IQS7210A] = 0x26,
+ },
+ .reg_shift = 0,
+ .reg_width = 8,
+ .val_pitch = 8,
+ .label = "ATI base",
+ },
+ {
+ .name = "azoteq,rate-active-ms",
+ .reg_addr[IQS7211_REG_GRP_SYS] = {
+ [IQS7210A] = 0x29,
+ [IQS7211A] = 0x40,
+ [IQS7211E] = 0x28,
+ },
+ .label = "active mode report rate",
+ },
+ {
+ .name = "azoteq,rate-touch-ms",
+ .reg_addr[IQS7211_REG_GRP_SYS] = {
+ [IQS7210A] = 0x2A,
+ [IQS7211A] = 0x41,
+ [IQS7211E] = 0x29,
+ },
+ .label = "idle-touch mode report rate",
+ },
+ {
+ .name = "azoteq,rate-idle-ms",
+ .reg_addr[IQS7211_REG_GRP_SYS] = {
+ [IQS7210A] = 0x2B,
+ [IQS7211A] = 0x42,
+ [IQS7211E] = 0x2A,
+ },
+ .label = "idle mode report rate",
+ },
+ {
+ .name = "azoteq,rate-lp1-ms",
+ .reg_addr[IQS7211_REG_GRP_SYS] = {
+ [IQS7210A] = 0x2C,
+ [IQS7211A] = 0x43,
+ [IQS7211E] = 0x2B,
+ },
+ .label = "low-power mode 1 report rate",
+ },
+ {
+ .name = "azoteq,rate-lp2-ms",
+ .reg_addr[IQS7211_REG_GRP_SYS] = {
+ [IQS7210A] = 0x2D,
+ [IQS7211A] = 0x44,
+ [IQS7211E] = 0x2C,
+ },
+ .label = "low-power mode 2 report rate",
+ },
+ {
+ .name = "azoteq,timeout-active-ms",
+ .reg_addr[IQS7211_REG_GRP_SYS] = {
+ [IQS7210A] = 0x2E,
+ [IQS7211A] = 0x45,
+ [IQS7211E] = 0x2D,
+ },
+ .val_pitch = 1000,
+ .label = "active mode timeout",
+ },
+ {
+ .name = "azoteq,timeout-touch-ms",
+ .reg_addr[IQS7211_REG_GRP_SYS] = {
+ [IQS7210A] = 0x2F,
+ [IQS7211A] = 0x46,
+ [IQS7211E] = 0x2E,
+ },
+ .val_pitch = 1000,
+ .label = "idle-touch mode timeout",
+ },
+ {
+ .name = "azoteq,timeout-idle-ms",
+ .reg_addr[IQS7211_REG_GRP_SYS] = {
+ [IQS7210A] = 0x30,
+ [IQS7211A] = 0x47,
+ [IQS7211E] = 0x2F,
+ },
+ .val_pitch = 1000,
+ .label = "idle mode timeout",
+ },
+ {
+ .name = "azoteq,timeout-lp1-ms",
+ .reg_addr[IQS7211_REG_GRP_SYS] = {
+ [IQS7210A] = 0x31,
+ [IQS7211A] = 0x48,
+ [IQS7211E] = 0x30,
+ },
+ .val_pitch = 1000,
+ .label = "low-power mode 1 timeout",
+ },
+ {
+ .name = "azoteq,timeout-lp2-ms",
+ .reg_addr[IQS7211_REG_GRP_SYS] = {
+ [IQS7210A] = 0x32,
+ [IQS7211E] = 0x31,
+ },
+ .reg_shift = 8,
+ .reg_width = 8,
+ .val_pitch = 1000,
+ .val_max = 60000,
+ .label = "trackpad reference value update rate",
+ },
+ {
+ .name = "azoteq,timeout-lp2-ms",
+ .reg_addr[IQS7211_REG_GRP_SYS] = {
+ [IQS7211A] = 0x49,
+ },
+ .val_pitch = 1000,
+ .val_max = 60000,
+ .label = "trackpad reference value update rate",
+ },
+ {
+ .name = "azoteq,timeout-ati-ms",
+ .reg_addr[IQS7211_REG_GRP_SYS] = {
+ [IQS7210A] = 0x32,
+ [IQS7211E] = 0x31,
+ },
+ .reg_width = 8,
+ .val_pitch = 1000,
+ .val_max = 60000,
+ .label = "ATI error timeout",
+ },
+ {
+ .name = "azoteq,timeout-ati-ms",
+ .reg_addr[IQS7211_REG_GRP_SYS] = {
+ [IQS7211A] = 0x35,
+ },
+ .val_pitch = 1000,
+ .val_max = 60000,
+ .label = "ATI error timeout",
+ },
+ {
+ .name = "azoteq,timeout-comms-ms",
+ .reg_addr[IQS7211_REG_GRP_SYS] = {
+ [IQS7210A] = 0x33,
+ [IQS7211A] = 0x4A,
+ [IQS7211E] = 0x32,
+ },
+ .label = "communication timeout",
+ },
+ {
+ .name = "azoteq,timeout-press-ms",
+ .reg_addr[IQS7211_REG_GRP_SYS] = {
+ [IQS7210A] = 0x34,
+ },
+ .reg_width = 8,
+ .val_pitch = 1000,
+ .val_max = 60000,
+ .label = "press timeout",
+ },
+ {
+ .name = "azoteq,ati-mode",
+ .reg_addr[IQS7211_REG_GRP_ALP] = {
+ [IQS7210A] = 0x37,
+ },
+ .reg_shift = 15,
+ .reg_width = 1,
+ .label = "ATI mode",
+ },
+ {
+ .name = "azoteq,ati-mode",
+ .reg_addr[IQS7211_REG_GRP_BTN] = {
+ [IQS7210A] = 0x37,
+ },
+ .reg_shift = 7,
+ .reg_width = 1,
+ .label = "ATI mode",
+ },
+ {
+ .name = "azoteq,sense-mode",
+ .reg_addr[IQS7211_REG_GRP_ALP] = {
+ [IQS7210A] = 0x37,
+ [IQS7211A] = 0x72,
+ [IQS7211E] = 0x36,
+ },
+ .reg_shift = 8,
+ .reg_width = 1,
+ .label = "sensing mode",
+ },
+ {
+ .name = "azoteq,sense-mode",
+ .reg_addr[IQS7211_REG_GRP_BTN] = {
+ [IQS7210A] = 0x37,
+ },
+ .reg_shift = 0,
+ .reg_width = 2,
+ .val_max = 2,
+ .label = "sensing mode",
+ },
+ {
+ .name = "azoteq,fosc-freq",
+ .reg_addr[IQS7211_REG_GRP_SYS] = {
+ [IQS7210A] = 0x38,
+ [IQS7211A] = 0x52,
+ [IQS7211E] = 0x35,
+ },
+ .reg_shift = 4,
+ .reg_width = 1,
+ .label = "core clock frequency selection",
+ },
+ {
+ .name = "azoteq,fosc-trim",
+ .reg_addr[IQS7211_REG_GRP_SYS] = {
+ [IQS7210A] = 0x38,
+ [IQS7211A] = 0x52,
+ [IQS7211E] = 0x35,
+ },
+ .reg_shift = 0,
+ .reg_width = 4,
+ .label = "core clock frequency trim",
+ },
+ {
+ .name = "azoteq,touch-exit",
+ .reg_addr = {
+ [IQS7211_REG_GRP_TP] = {
+ [IQS7210A] = 0x3B,
+ [IQS7211A] = 0x53,
+ [IQS7211E] = 0x38,
+ },
+ [IQS7211_REG_GRP_BTN] = {
+ [IQS7210A] = 0x3E,
+ },
+ },
+ .reg_shift = 8,
+ .reg_width = 8,
+ .label = "touch exit factor",
+ },
+ {
+ .name = "azoteq,touch-enter",
+ .reg_addr = {
+ [IQS7211_REG_GRP_TP] = {
+ [IQS7210A] = 0x3B,
+ [IQS7211A] = 0x53,
+ [IQS7211E] = 0x38,
+ },
+ [IQS7211_REG_GRP_BTN] = {
+ [IQS7210A] = 0x3E,
+ },
+ },
+ .reg_shift = 0,
+ .reg_width = 8,
+ .label = "touch entrance factor",
+ },
+ {
+ .name = "azoteq,thresh",
+ .reg_addr = {
+ [IQS7211_REG_GRP_BTN] = {
+ [IQS7210A] = 0x3C,
+ },
+ [IQS7211_REG_GRP_ALP] = {
+ [IQS7210A] = 0x3D,
+ [IQS7211A] = 0x54,
+ [IQS7211E] = 0x39,
+ },
+ },
+ .label = "threshold",
+ },
+ {
+ .name = "azoteq,debounce-exit",
+ .reg_addr = {
+ [IQS7211_REG_GRP_BTN] = {
+ [IQS7210A] = 0x3F,
+ },
+ [IQS7211_REG_GRP_ALP] = {
+ [IQS7210A] = 0x40,
+ [IQS7211A] = 0x56,
+ [IQS7211E] = 0x3A,
+ },
+ },
+ .reg_shift = 8,
+ .reg_width = 8,
+ .label = "debounce exit factor",
+ },
+ {
+ .name = "azoteq,debounce-enter",
+ .reg_addr = {
+ [IQS7211_REG_GRP_BTN] = {
+ [IQS7210A] = 0x3F,
+ },
+ [IQS7211_REG_GRP_ALP] = {
+ [IQS7210A] = 0x40,
+ [IQS7211A] = 0x56,
+ [IQS7211E] = 0x3A,
+ },
+ },
+ .reg_shift = 0,
+ .reg_width = 8,
+ .label = "debounce entrance factor",
+ },
+ {
+ .name = "azoteq,conv-frac",
+ .reg_addr = {
+ [IQS7211_REG_GRP_TP] = {
+ [IQS7210A] = 0x48,
+ [IQS7211A] = 0x58,
+ [IQS7211E] = 0x3D,
+ },
+ [IQS7211_REG_GRP_BTN] = {
+ [IQS7210A] = 0x49,
+ },
+ [IQS7211_REG_GRP_ALP] = {
+ [IQS7210A] = 0x4A,
+ [IQS7211A] = 0x59,
+ [IQS7211E] = 0x3E,
+ },
+ },
+ .reg_shift = 8,
+ .reg_width = 8,
+ .label = "conversion frequency fractional divider",
+ },
+ {
+ .name = "azoteq,conv-period",
+ .reg_addr = {
+ [IQS7211_REG_GRP_TP] = {
+ [IQS7210A] = 0x48,
+ [IQS7211A] = 0x58,
+ [IQS7211E] = 0x3D,
+ },
+ [IQS7211_REG_GRP_BTN] = {
+ [IQS7210A] = 0x49,
+ },
+ [IQS7211_REG_GRP_ALP] = {
+ [IQS7210A] = 0x4A,
+ [IQS7211A] = 0x59,
+ [IQS7211E] = 0x3E,
+ },
+ },
+ .reg_shift = 0,
+ .reg_width = 8,
+ .label = "conversion period",
+ },
+ {
+ .name = "azoteq,thresh",
+ .reg_addr[IQS7211_REG_GRP_TP] = {
+ [IQS7210A] = 0x55,
+ [IQS7211A] = 0x67,
+ [IQS7211E] = 0x48,
+ },
+ .reg_shift = 0,
+ .reg_width = 8,
+ .label = "threshold",
+ },
+ {
+ .name = "azoteq,contact-split",
+ .reg_addr[IQS7211_REG_GRP_SYS] = {
+ [IQS7210A] = 0x55,
+ [IQS7211A] = 0x67,
+ [IQS7211E] = 0x48,
+ },
+ .reg_shift = 8,
+ .reg_width = 8,
+ .label = "contact split factor",
+ },
+ {
+ .name = "azoteq,trim-x",
+ .reg_addr[IQS7211_REG_GRP_SYS] = {
+ [IQS7210A] = 0x56,
+ [IQS7211E] = 0x49,
+ },
+ .reg_shift = 0,
+ .reg_width = 8,
+ .label = "horizontal trim width",
+ },
+ {
+ .name = "azoteq,trim-x",
+ .reg_addr[IQS7211_REG_GRP_SYS] = {
+ [IQS7211A] = 0x68,
+ },
+ .label = "horizontal trim width",
+ },
+ {
+ .name = "azoteq,trim-y",
+ .reg_addr[IQS7211_REG_GRP_SYS] = {
+ [IQS7210A] = 0x56,
+ [IQS7211E] = 0x49,
+ },
+ .reg_shift = 8,
+ .reg_width = 8,
+ .label = "vertical trim height",
+ },
+ {
+ .name = "azoteq,trim-y",
+ .reg_addr[IQS7211_REG_GRP_SYS] = {
+ [IQS7211A] = 0x69,
+ },
+ .label = "vertical trim height",
+ },
+ {
+ .name = "azoteq,gesture-max-ms",
+ .reg_key = IQS7211_REG_KEY_TAP,
+ .reg_addr[IQS7211_REG_GRP_TP] = {
+ [IQS7210A] = 0x59,
+ [IQS7211A] = 0x81,
+ [IQS7211E] = 0x4C,
+ },
+ .label = "maximum gesture time",
+ },
+ {
+ .name = "azoteq,gesture-mid-ms",
+ .reg_key = IQS7211_REG_KEY_TAP,
+ .reg_addr[IQS7211_REG_GRP_TP] = {
+ [IQS7211E] = 0x4D,
+ },
+ .label = "repeated gesture time",
+ },
+ {
+ .name = "azoteq,gesture-dist",
+ .reg_key = IQS7211_REG_KEY_TAP,
+ .reg_addr[IQS7211_REG_GRP_TP] = {
+ [IQS7210A] = 0x5A,
+ [IQS7211A] = 0x82,
+ [IQS7211E] = 0x4E,
+ },
+ .label = "gesture distance",
+ },
+ {
+ .name = "azoteq,gesture-dist",
+ .reg_key = IQS7211_REG_KEY_HOLD,
+ .reg_addr[IQS7211_REG_GRP_TP] = {
+ [IQS7210A] = 0x5A,
+ [IQS7211A] = 0x82,
+ [IQS7211E] = 0x4E,
+ },
+ .label = "gesture distance",
+ },
+ {
+ .name = "azoteq,gesture-min-ms",
+ .reg_key = IQS7211_REG_KEY_HOLD,
+ .reg_addr[IQS7211_REG_GRP_TP] = {
+ [IQS7210A] = 0x5B,
+ [IQS7211A] = 0x83,
+ [IQS7211E] = 0x4F,
+ },
+ .label = "minimum gesture time",
+ },
+ {
+ .name = "azoteq,gesture-max-ms",
+ .reg_key = IQS7211_REG_KEY_AXIAL_X,
+ .reg_addr[IQS7211_REG_GRP_TP] = {
+ [IQS7210A] = 0x5C,
+ [IQS7211A] = 0x84,
+ [IQS7211E] = 0x50,
+ },
+ .label = "maximum gesture time",
+ },
+ {
+ .name = "azoteq,gesture-max-ms",
+ .reg_key = IQS7211_REG_KEY_AXIAL_Y,
+ .reg_addr[IQS7211_REG_GRP_TP] = {
+ [IQS7210A] = 0x5C,
+ [IQS7211A] = 0x84,
+ [IQS7211E] = 0x50,
+ },
+ .label = "maximum gesture time",
+ },
+ {
+ .name = "azoteq,gesture-dist",
+ .reg_key = IQS7211_REG_KEY_AXIAL_X,
+ .reg_addr[IQS7211_REG_GRP_TP] = {
+ [IQS7210A] = 0x5D,
+ [IQS7211A] = 0x85,
+ [IQS7211E] = 0x51,
+ },
+ .label = "gesture distance",
+ },
+ {
+ .name = "azoteq,gesture-dist",
+ .reg_key = IQS7211_REG_KEY_AXIAL_Y,
+ .reg_addr[IQS7211_REG_GRP_TP] = {
+ [IQS7210A] = 0x5E,
+ [IQS7211A] = 0x86,
+ [IQS7211E] = 0x52,
+ },
+ .label = "gesture distance",
+ },
+ {
+ .name = "azoteq,gesture-dist-rep",
+ .reg_key = IQS7211_REG_KEY_AXIAL_X,
+ .reg_addr[IQS7211_REG_GRP_TP] = {
+ [IQS7211E] = 0x53,
+ },
+ .label = "repeated gesture distance",
+ },
+ {
+ .name = "azoteq,gesture-dist-rep",
+ .reg_key = IQS7211_REG_KEY_AXIAL_Y,
+ .reg_addr[IQS7211_REG_GRP_TP] = {
+ [IQS7211E] = 0x54,
+ },
+ .label = "repeated gesture distance",
+ },
+ {
+ .name = "azoteq,thresh",
+ .reg_key = IQS7211_REG_KEY_PALM,
+ .reg_addr[IQS7211_REG_GRP_TP] = {
+ [IQS7211E] = 0x55,
+ },
+ .reg_shift = 8,
+ .reg_width = 8,
+ .val_max = 42,
+ .label = "threshold",
+ },
+};
+
+static const u8 iqs7211_gesture_angle[] = {
+ 0x00, 0x01, 0x02, 0x03,
+ 0x04, 0x06, 0x07, 0x08,
+ 0x09, 0x0A, 0x0B, 0x0C,
+ 0x0E, 0x0F, 0x10, 0x11,
+ 0x12, 0x14, 0x15, 0x16,
+ 0x17, 0x19, 0x1A, 0x1B,
+ 0x1C, 0x1E, 0x1F, 0x21,
+ 0x22, 0x23, 0x25, 0x26,
+ 0x28, 0x2A, 0x2B, 0x2D,
+ 0x2E, 0x30, 0x32, 0x34,
+ 0x36, 0x38, 0x3A, 0x3C,
+ 0x3E, 0x40, 0x42, 0x45,
+ 0x47, 0x4A, 0x4C, 0x4F,
+ 0x52, 0x55, 0x58, 0x5B,
+ 0x5F, 0x63, 0x66, 0x6B,
+ 0x6F, 0x73, 0x78, 0x7E,
+ 0x83, 0x89, 0x90, 0x97,
+ 0x9E, 0xA7, 0xB0, 0xBA,
+ 0xC5, 0xD1, 0xDF, 0xEF,
+};
+
+struct iqs7211_ver_info {
+ __le16 prod_num;
+ __le16 major;
+ __le16 minor;
+ __le32 patch;
+} __packed;
+
+struct iqs7211_touch_data {
+ __le16 abs_x;
+ __le16 abs_y;
+ __le16 pressure;
+ __le16 area;
+} __packed;
+
+struct iqs7211_tp_config {
+ u8 tp_settings;
+ u8 total_rx;
+ u8 total_tx;
+ u8 num_contacts;
+ __le16 max_x;
+ __le16 max_y;
+} __packed;
+
+struct iqs7211_private {
+ const struct iqs7211_dev_desc *dev_desc;
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *irq_gpio;
+ struct i2c_client *client;
+ struct input_dev *tp_idev;
+ struct input_dev *kp_idev;
+ struct iqs7211_ver_info ver_info;
+ struct iqs7211_tp_config tp_config;
+ struct touchscreen_properties prop;
+ struct list_head reg_field_head;
+ enum iqs7211_comms_mode comms_init;
+ enum iqs7211_comms_mode comms_mode;
+ unsigned int num_contacts;
+ unsigned int kp_code[ARRAY_SIZE(iqs7211e_kp_events)];
+ u8 rx_tx_map[IQS7211_MAX_CTX + 1];
+ u8 cycle_alloc[2][33];
+ u8 exp_file[2];
+ u16 event_mask;
+ u16 ati_start;
+ u16 gesture_cache;
+};
+
+static int iqs7211_irq_poll(struct iqs7211_private *iqs7211, u64 timeout_us)
+{
+ int error, val;
+
+ error = readx_poll_timeout(gpiod_get_value_cansleep, iqs7211->irq_gpio,
+ val, val, IQS7211_COMMS_SLEEP_US, timeout_us);
+
+ return val < 0 ? val : error;
+}
+
+static int iqs7211_hard_reset(struct iqs7211_private *iqs7211)
+{
+ if (!iqs7211->reset_gpio)
+ return 0;
+
+ gpiod_set_value_cansleep(iqs7211->reset_gpio, 1);
+
+ /*
+ * The following delay ensures the shared RDY/MCLR pin is sampled in
+ * between periodic assertions by the device and assumes the default
+ * communication timeout has not been overwritten in OTP memory.
+ */
+ if (iqs7211->reset_gpio == iqs7211->irq_gpio)
+ msleep(IQS7211_RESET_TIMEOUT_MS);
+ else
+ usleep_range(1000, 1100);
+
+ gpiod_set_value_cansleep(iqs7211->reset_gpio, 0);
+ if (iqs7211->reset_gpio == iqs7211->irq_gpio)
+ iqs7211_irq_wait();
+
+ return iqs7211_irq_poll(iqs7211, IQS7211_START_TIMEOUT_US);
+}
+
+static int iqs7211_force_comms(struct iqs7211_private *iqs7211)
+{
+ u8 msg_buf[] = { 0xFF, };
+ int ret;
+
+ switch (iqs7211->comms_mode) {
+ case IQS7211_COMMS_MODE_WAIT:
+ return iqs7211_irq_poll(iqs7211, IQS7211_START_TIMEOUT_US);
+
+ case IQS7211_COMMS_MODE_FREE:
+ return 0;
+
+ case IQS7211_COMMS_MODE_FORCE:
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * The device cannot communicate until it asserts its interrupt (RDY)
+ * pin. Attempts to do so while RDY is deasserted return an ACK; how-
+ * ever all write data is ignored, and all read data returns 0xEE.
+ *
+ * Unsolicited communication must be preceded by a special force com-
+ * munication command, after which the device eventually asserts its
+ * RDY pin and agrees to communicate.
+ *
+ * Regardless of whether communication is forced or the result of an
+ * interrupt, the device automatically deasserts its RDY pin once it
+ * detects an I2C stop condition, or a timeout expires.
+ */
+ ret = gpiod_get_value_cansleep(iqs7211->irq_gpio);
+ if (ret < 0)
+ return ret;
+ else if (ret > 0)
+ return 0;
+
+ ret = i2c_master_send(iqs7211->client, msg_buf, sizeof(msg_buf));
+ if (ret < (int)sizeof(msg_buf)) {
+ if (ret >= 0)
+ ret = -EIO;
+
+ msleep(IQS7211_COMMS_RETRY_MS);
+ return ret;
+ }
+
+ iqs7211_irq_wait();
+
+ return iqs7211_irq_poll(iqs7211, IQS7211_COMMS_TIMEOUT_US);
+}
+
+static int iqs7211_read_burst(struct iqs7211_private *iqs7211,
+ u8 reg, void *val, u16 val_len)
+{
+ int ret, i;
+ struct i2c_client *client = iqs7211->client;
+ struct i2c_msg msg[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = sizeof(reg),
+ .buf = &reg,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = val_len,
+ .buf = (u8 *)val,
+ },
+ };
+
+ /*
+ * The following loop protects against an edge case in which the RDY
+ * pin is automatically deasserted just as the read is initiated. In
+ * that case, the read must be retried using forced communication.
+ */
+ for (i = 0; i < IQS7211_NUM_RETRIES; i++) {
+ ret = iqs7211_force_comms(iqs7211);
+ if (ret < 0)
+ continue;
+
+ ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ if (ret < (int)ARRAY_SIZE(msg)) {
+ if (ret >= 0)
+ ret = -EIO;
+
+ msleep(IQS7211_COMMS_RETRY_MS);
+ continue;
+ }
+
+ if (get_unaligned_le16(msg[1].buf) == IQS7211_COMMS_ERROR) {
+ ret = -ENODATA;
+ continue;
+ }
+
+ ret = 0;
+ break;
+ }
+
+ iqs7211_irq_wait();
+
+ if (ret < 0)
+ dev_err(&client->dev,
+ "Failed to read from address 0x%02X: %d\n", reg, ret);
+
+ return ret;
+}
+
+static int iqs7211_read_word(struct iqs7211_private *iqs7211, u8 reg, u16 *val)
+{
+ __le16 val_buf;
+ int error;
+
+ error = iqs7211_read_burst(iqs7211, reg, &val_buf, sizeof(val_buf));
+ if (error)
+ return error;
+
+ *val = le16_to_cpu(val_buf);
+
+ return 0;
+}
+
+static int iqs7211_write_burst(struct iqs7211_private *iqs7211,
+ u8 reg, const void *val, u16 val_len)
+{
+ int msg_len = sizeof(reg) + val_len;
+ int ret, i;
+ struct i2c_client *client = iqs7211->client;
+ u8 *msg_buf;
+
+ msg_buf = kzalloc(msg_len, GFP_KERNEL);
+ if (!msg_buf)
+ return -ENOMEM;
+
+ *msg_buf = reg;
+ memcpy(msg_buf + sizeof(reg), val, val_len);
+
+ /*
+ * The following loop protects against an edge case in which the RDY
+ * pin is automatically asserted just before the force communication
+ * command is sent.
+ *
+ * In that case, the subsequent I2C stop condition tricks the device
+ * into preemptively deasserting the RDY pin and the command must be
+ * sent again.
+ */
+ for (i = 0; i < IQS7211_NUM_RETRIES; i++) {
+ ret = iqs7211_force_comms(iqs7211);
+ if (ret < 0)
+ continue;
+
+ ret = i2c_master_send(client, msg_buf, msg_len);
+ if (ret < msg_len) {
+ if (ret >= 0)
+ ret = -EIO;
+
+ msleep(IQS7211_COMMS_RETRY_MS);
+ continue;
+ }
+
+ ret = 0;
+ break;
+ }
+
+ kfree(msg_buf);
+
+ iqs7211_irq_wait();
+
+ if (ret < 0)
+ dev_err(&client->dev,
+ "Failed to write to address 0x%02X: %d\n", reg, ret);
+
+ return ret;
+}
+
+static int iqs7211_write_word(struct iqs7211_private *iqs7211, u8 reg, u16 val)
+{
+ __le16 val_buf = cpu_to_le16(val);
+
+ return iqs7211_write_burst(iqs7211, reg, &val_buf, sizeof(val_buf));
+}
+
+static int iqs7211_start_comms(struct iqs7211_private *iqs7211)
+{
+ const struct iqs7211_dev_desc *dev_desc = iqs7211->dev_desc;
+ struct i2c_client *client = iqs7211->client;
+ bool forced_comms;
+ unsigned int val;
+ u16 comms_setup;
+ int error;
+
+ /*
+ * Until forced communication can be enabled, the host must wait for a
+ * communication window each time it intends to elicit a response from
+ * the device.
+ *
+ * Forced communication is not necessary, however, if the host adapter
+ * can support clock stretching. In that case, the device freely clock
+ * stretches until all pending conversions are complete.
+ */
+ forced_comms = device_property_present(&client->dev,
+ "azoteq,forced-comms");
+
+ error = device_property_read_u32(&client->dev,
+ "azoteq,forced-comms-default", &val);
+ if (error == -EINVAL) {
+ iqs7211->comms_init = IQS7211_COMMS_MODE_WAIT;
+ } else if (error) {
+ dev_err(&client->dev,
+ "Failed to read default communication mode: %d\n",
+ error);
+ return error;
+ } else if (val) {
+ iqs7211->comms_init = forced_comms ? IQS7211_COMMS_MODE_FORCE
+ : IQS7211_COMMS_MODE_WAIT;
+ } else {
+ iqs7211->comms_init = forced_comms ? IQS7211_COMMS_MODE_WAIT
+ : IQS7211_COMMS_MODE_FREE;
+ }
+
+ iqs7211->comms_mode = iqs7211->comms_init;
+
+ error = iqs7211_hard_reset(iqs7211);
+ if (error) {
+ dev_err(&client->dev, "Failed to reset device: %d\n", error);
+ return error;
+ }
+
+ error = iqs7211_read_burst(iqs7211, IQS7211_PROD_NUM,
+ &iqs7211->ver_info,
+ sizeof(iqs7211->ver_info));
+ if (error)
+ return error;
+
+ if (le16_to_cpu(iqs7211->ver_info.prod_num) != dev_desc->prod_num) {
+ dev_err(&client->dev, "Invalid product number: %u\n",
+ le16_to_cpu(iqs7211->ver_info.prod_num));
+ return -EINVAL;
+ }
+
+ error = iqs7211_read_word(iqs7211, dev_desc->sys_ctrl + 1,
+ &comms_setup);
+ if (error)
+ return error;
+
+ if (forced_comms)
+ comms_setup |= dev_desc->comms_req;
+ else
+ comms_setup &= ~dev_desc->comms_req;
+
+ error = iqs7211_write_word(iqs7211, dev_desc->sys_ctrl + 1,
+ comms_setup | dev_desc->comms_end);
+ if (error)
+ return error;
+
+ if (forced_comms)
+ iqs7211->comms_mode = IQS7211_COMMS_MODE_FORCE;
+ else
+ iqs7211->comms_mode = IQS7211_COMMS_MODE_FREE;
+
+ error = iqs7211_read_burst(iqs7211, dev_desc->exp_file,
+ iqs7211->exp_file,
+ sizeof(iqs7211->exp_file));
+ if (error)
+ return error;
+
+ error = iqs7211_read_burst(iqs7211, dev_desc->tp_config,
+ &iqs7211->tp_config,
+ sizeof(iqs7211->tp_config));
+ if (error)
+ return error;
+
+ error = iqs7211_write_word(iqs7211, dev_desc->sys_ctrl + 1,
+ comms_setup);
+ if (error)
+ return error;
+
+ iqs7211->event_mask = comms_setup & ~IQS7211_EVENT_MASK_ALL;
+ iqs7211->event_mask |= (IQS7211_EVENT_MASK_ATI | IQS7211_EVENT_MODE);
+
+ return 0;
+}
+
+static int iqs7211_init_device(struct iqs7211_private *iqs7211)
+{
+ const struct iqs7211_dev_desc *dev_desc = iqs7211->dev_desc;
+ struct iqs7211_reg_field_desc *reg_field;
+ __le16 sys_ctrl[] = {
+ cpu_to_le16(dev_desc->ack_reset),
+ cpu_to_le16(iqs7211->event_mask),
+ };
+ int error, i;
+
+ /*
+ * Acknowledge reset before writing any registers in case the device
+ * suffers a spurious reset during initialization. The communication
+ * mode is configured at this time as well.
+ */
+ error = iqs7211_write_burst(iqs7211, dev_desc->sys_ctrl, sys_ctrl,
+ sizeof(sys_ctrl));
+ if (error)
+ return error;
+
+ if (iqs7211->event_mask & dev_desc->comms_req)
+ iqs7211->comms_mode = IQS7211_COMMS_MODE_FORCE;
+ else
+ iqs7211->comms_mode = IQS7211_COMMS_MODE_FREE;
+
+ /*
+ * Take advantage of the stop-bit disable function, if available, to
+ * save the trouble of having to reopen a communication window after
+ * each read or write.
+ */
+ error = iqs7211_write_word(iqs7211, dev_desc->sys_ctrl + 1,
+ iqs7211->event_mask | dev_desc->comms_end);
+ if (error)
+ return error;
+
+ list_for_each_entry(reg_field, &iqs7211->reg_field_head, list) {
+ u16 new_val = reg_field->val;
+
+ if (reg_field->mask < U16_MAX) {
+ u16 old_val;
+
+ error = iqs7211_read_word(iqs7211, reg_field->addr,
+ &old_val);
+ if (error)
+ return error;
+
+ new_val = old_val & ~reg_field->mask;
+ new_val |= reg_field->val;
+
+ if (new_val == old_val)
+ continue;
+ }
+
+ error = iqs7211_write_word(iqs7211, reg_field->addr, new_val);
+ if (error)
+ return error;
+ }
+
+ error = iqs7211_write_burst(iqs7211, dev_desc->tp_config,
+ &iqs7211->tp_config,
+ sizeof(iqs7211->tp_config));
+ if (error)
+ return error;
+
+ if (**iqs7211->cycle_alloc) {
+ error = iqs7211_write_burst(iqs7211, dev_desc->rx_tx_map,
+ &iqs7211->rx_tx_map,
+ dev_desc->num_ctx);
+ if (error)
+ return error;
+
+ for (i = 0; i < sizeof(dev_desc->cycle_limit); i++) {
+ error = iqs7211_write_burst(iqs7211,
+ dev_desc->cycle_alloc[i],
+ iqs7211->cycle_alloc[i],
+ dev_desc->cycle_limit[i] * 3);
+ if (error)
+ return error;
+ }
+ }
+
+ *sys_ctrl = cpu_to_le16(iqs7211->ati_start);
+
+ return iqs7211_write_burst(iqs7211, dev_desc->sys_ctrl, sys_ctrl,
+ sizeof(sys_ctrl));
+}
+
+static int iqs7211_add_field(struct iqs7211_private *iqs7211,
+ struct iqs7211_reg_field_desc new_field)
+{
+ struct i2c_client *client = iqs7211->client;
+ struct iqs7211_reg_field_desc *reg_field;
+
+ if (!new_field.addr)
+ return 0;
+
+ list_for_each_entry(reg_field, &iqs7211->reg_field_head, list) {
+ if (reg_field->addr != new_field.addr)
+ continue;
+
+ reg_field->mask |= new_field.mask;
+ reg_field->val |= new_field.val;
+ return 0;
+ }
+
+ reg_field = devm_kzalloc(&client->dev, sizeof(*reg_field), GFP_KERNEL);
+ if (!reg_field)
+ return -ENOMEM;
+
+ reg_field->addr = new_field.addr;
+ reg_field->mask = new_field.mask;
+ reg_field->val = new_field.val;
+
+ list_add(&reg_field->list, &iqs7211->reg_field_head);
+
+ return 0;
+}
+
+static int iqs7211_parse_props(struct iqs7211_private *iqs7211,
+ struct fwnode_handle *reg_grp_node,
+ enum iqs7211_reg_grp_id reg_grp,
+ enum iqs7211_reg_key_id reg_key)
+{
+ struct i2c_client *client = iqs7211->client;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(iqs7211_props); i++) {
+ const char *name = iqs7211_props[i].name;
+ u8 reg_addr = iqs7211_props[i].reg_addr[reg_grp]
+ [iqs7211->dev_desc -
+ iqs7211_devs];
+ int reg_shift = iqs7211_props[i].reg_shift;
+ int reg_width = iqs7211_props[i].reg_width ? : 16;
+ int val_pitch = iqs7211_props[i].val_pitch ? : 1;
+ int val_min = iqs7211_props[i].val_min;
+ int val_max = iqs7211_props[i].val_max;
+ const char *label = iqs7211_props[i].label ? : name;
+ struct iqs7211_reg_field_desc reg_field;
+ unsigned int val;
+ int error;
+
+ if (iqs7211_props[i].reg_key != reg_key)
+ continue;
+
+ if (!reg_addr)
+ continue;
+
+ error = fwnode_property_read_u32(reg_grp_node, name, &val);
+ if (error == -EINVAL) {
+ continue;
+ } else if (error) {
+ dev_err(&client->dev, "Failed to read %s %s: %d\n",
+ fwnode_get_name(reg_grp_node), label, error);
+ return error;
+ }
+
+ if (!val_max)
+ val_max = GENMASK(reg_width - 1, 0) * val_pitch;
+
+ if (val < val_min || val > val_max) {
+ dev_err(&client->dev, "Invalid %s: %u\n", label, val);
+ return -EINVAL;
+ }
+
+ reg_field.addr = reg_addr;
+ reg_field.mask = GENMASK(reg_shift + reg_width - 1, reg_shift);
+ reg_field.val = val / val_pitch << reg_shift;
+
+ error = iqs7211_add_field(iqs7211, reg_field);
+ if (error)
+ return error;
+ }
+
+ return 0;
+}
+
+static int iqs7211_parse_event(struct iqs7211_private *iqs7211,
+ struct fwnode_handle *event_node,
+ enum iqs7211_reg_grp_id reg_grp,
+ enum iqs7211_reg_key_id reg_key,
+ unsigned int *event_code)
+{
+ const struct iqs7211_dev_desc *dev_desc = iqs7211->dev_desc;
+ struct i2c_client *client = iqs7211->client;
+ struct iqs7211_reg_field_desc reg_field;
+ unsigned int val;
+ int error;
+
+ error = iqs7211_parse_props(iqs7211, event_node, reg_grp, reg_key);
+ if (error)
+ return error;
+
+ if (reg_key == IQS7211_REG_KEY_AXIAL_X ||
+ reg_key == IQS7211_REG_KEY_AXIAL_Y) {
+ error = fwnode_property_read_u32(event_node,
+ "azoteq,gesture-angle", &val);
+ if (!error) {
+ if (val >= ARRAY_SIZE(iqs7211_gesture_angle)) {
+ dev_err(&client->dev,
+ "Invalid %s gesture angle: %u\n",
+ fwnode_get_name(event_node), val);
+ return -EINVAL;
+ }
+
+ reg_field.addr = dev_desc->gesture_angle;
+ reg_field.mask = U8_MAX;
+ reg_field.val = iqs7211_gesture_angle[val];
+
+ error = iqs7211_add_field(iqs7211, reg_field);
+ if (error)
+ return error;
+ } else if (error != -EINVAL) {
+ dev_err(&client->dev,
+ "Failed to read %s gesture angle: %d\n",
+ fwnode_get_name(event_node), error);
+ return error;
+ }
+ }
+
+ error = fwnode_property_read_u32(event_node, "linux,code", event_code);
+ if (error == -EINVAL)
+ error = 0;
+ else if (error)
+ dev_err(&client->dev, "Failed to read %s code: %d\n",
+ fwnode_get_name(event_node), error);
+
+ return error;
+}
+
+static int iqs7211_parse_cycles(struct iqs7211_private *iqs7211,
+ struct fwnode_handle *tp_node)
+{
+ const struct iqs7211_dev_desc *dev_desc = iqs7211->dev_desc;
+ struct i2c_client *client = iqs7211->client;
+ int num_cycles = dev_desc->cycle_limit[0] + dev_desc->cycle_limit[1];
+ int error, count, i, j, k, cycle_start;
+ unsigned int cycle_alloc[IQS7211_MAX_CYCLES][2];
+ u8 total_rx = iqs7211->tp_config.total_rx;
+ u8 total_tx = iqs7211->tp_config.total_tx;
+
+ for (i = 0; i < IQS7211_MAX_CYCLES * 2; i++)
+ *(cycle_alloc[0] + i) = U8_MAX;
+
+ count = fwnode_property_count_u32(tp_node, "azoteq,channel-select");
+ if (count == -EINVAL) {
+ /*
+ * Assign each sensing cycle's slots (0 and 1) to a channel,
+ * defined as the intersection between two CRx and CTx pins.
+ * A channel assignment of 255 means the slot is unused.
+ */
+ for (i = 0, cycle_start = 0; i < total_tx; i++) {
+ int cycle_stop = 0;
+
+ for (j = 0; j < total_rx; j++) {
+ /*
+ * Channels formed by CRx0-3 and CRx4-7 are
+ * bound to slots 0 and 1, respectively.
+ */
+ int slot = iqs7211->rx_tx_map[j] < 4 ? 0 : 1;
+ int chan = i * total_rx + j;
+
+ for (k = cycle_start; k < num_cycles; k++) {
+ if (cycle_alloc[k][slot] < U8_MAX)
+ continue;
+
+ cycle_alloc[k][slot] = chan;
+ break;
+ }
+
+ if (k < num_cycles) {
+ cycle_stop = max(k, cycle_stop);
+ continue;
+ }
+
+ dev_err(&client->dev,
+ "Insufficient number of cycles\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Sensing cycles cannot straddle more than one CTx
+ * pin. As such, the next row's starting cycle must
+ * be greater than the previous row's highest cycle.
+ */
+ cycle_start = cycle_stop + 1;
+ }
+ } else if (count < 0) {
+ dev_err(&client->dev, "Failed to count channels: %d\n", count);
+ return count;
+ } else if (count > num_cycles * 2) {
+ dev_err(&client->dev, "Insufficient number of cycles\n");
+ return -EINVAL;
+ } else if (count > 0) {
+ error = fwnode_property_read_u32_array(tp_node,
+ "azoteq,channel-select",
+ cycle_alloc[0], count);
+ if (error) {
+ dev_err(&client->dev, "Failed to read channels: %d\n",
+ error);
+ return error;
+ }
+
+ for (i = 0; i < count; i++) {
+ int chan = *(cycle_alloc[0] + i);
+
+ if (chan == U8_MAX)
+ continue;
+
+ if (chan >= total_rx * total_tx) {
+ dev_err(&client->dev, "Invalid channel: %d\n",
+ chan);
+ return -EINVAL;
+ }
+
+ for (j = 0; j < count; j++) {
+ if (j == i || *(cycle_alloc[0] + j) != chan)
+ continue;
+
+ dev_err(&client->dev, "Duplicate channel: %d\n",
+ chan);
+ return -EINVAL;
+ }
+ }
+ }
+
+ /*
+ * Once the raw channel assignments have been derived, they must be
+ * packed according to the device's register map.
+ */
+ for (i = 0, cycle_start = 0; i < sizeof(dev_desc->cycle_limit); i++) {
+ int offs = 0;
+
+ for (j = cycle_start;
+ j < cycle_start + dev_desc->cycle_limit[i]; j++) {
+ iqs7211->cycle_alloc[i][offs++] = 0x05;
+ iqs7211->cycle_alloc[i][offs++] = cycle_alloc[j][0];
+ iqs7211->cycle_alloc[i][offs++] = cycle_alloc[j][1];
+ }
+
+ cycle_start += dev_desc->cycle_limit[i];
+ }
+
+ return 0;
+}
+
+static int iqs7211_parse_tp(struct iqs7211_private *iqs7211,
+ struct fwnode_handle *tp_node)
+{
+ const struct iqs7211_dev_desc *dev_desc = iqs7211->dev_desc;
+ struct i2c_client *client = iqs7211->client;
+ unsigned int pins[IQS7211_MAX_CTX];
+ int error, count, i, j;
+
+ count = fwnode_property_count_u32(tp_node, "azoteq,rx-enable");
+ if (count == -EINVAL) {
+ return 0;
+ } else if (count < 0) {
+ dev_err(&client->dev, "Failed to count CRx pins: %d\n", count);
+ return count;
+ } else if (count > IQS7211_NUM_CRX) {
+ dev_err(&client->dev, "Invalid number of CRx pins\n");
+ return -EINVAL;
+ }
+
+ error = fwnode_property_read_u32_array(tp_node, "azoteq,rx-enable",
+ pins, count);
+ if (error) {
+ dev_err(&client->dev, "Failed to read CRx pins: %d\n", error);
+ return error;
+ }
+
+ for (i = 0; i < count; i++) {
+ if (pins[i] >= IQS7211_NUM_CRX) {
+ dev_err(&client->dev, "Invalid CRx pin: %u\n", pins[i]);
+ return -EINVAL;
+ }
+
+ iqs7211->rx_tx_map[i] = pins[i];
+ }
+
+ iqs7211->tp_config.total_rx = count;
+
+ count = fwnode_property_count_u32(tp_node, "azoteq,tx-enable");
+ if (count < 0) {
+ dev_err(&client->dev, "Failed to count CTx pins: %d\n", count);
+ return count;
+ } else if (count > dev_desc->num_ctx) {
+ dev_err(&client->dev, "Invalid number of CTx pins\n");
+ return -EINVAL;
+ }
+
+ error = fwnode_property_read_u32_array(tp_node, "azoteq,tx-enable",
+ pins, count);
+ if (error) {
+ dev_err(&client->dev, "Failed to read CTx pins: %d\n", error);
+ return error;
+ }
+
+ for (i = 0; i < count; i++) {
+ if (pins[i] >= dev_desc->num_ctx) {
+ dev_err(&client->dev, "Invalid CTx pin: %u\n", pins[i]);
+ return -EINVAL;
+ }
+
+ for (j = 0; j < iqs7211->tp_config.total_rx; j++) {
+ if (iqs7211->rx_tx_map[j] != pins[i])
+ continue;
+
+ dev_err(&client->dev, "Conflicting CTx pin: %u\n",
+ pins[i]);
+ return -EINVAL;
+ }
+
+ iqs7211->rx_tx_map[iqs7211->tp_config.total_rx + i] = pins[i];
+ }
+
+ iqs7211->tp_config.total_tx = count;
+
+ return iqs7211_parse_cycles(iqs7211, tp_node);
+}
+
+static int iqs7211_parse_alp(struct iqs7211_private *iqs7211,
+ struct fwnode_handle *alp_node)
+{
+ const struct iqs7211_dev_desc *dev_desc = iqs7211->dev_desc;
+ struct i2c_client *client = iqs7211->client;
+ struct iqs7211_reg_field_desc reg_field;
+ int error, count, i;
+
+ count = fwnode_property_count_u32(alp_node, "azoteq,rx-enable");
+ if (count < 0 && count != -EINVAL) {
+ dev_err(&client->dev, "Failed to count CRx pins: %d\n", count);
+ return count;
+ } else if (count > IQS7211_NUM_CRX) {
+ dev_err(&client->dev, "Invalid number of CRx pins\n");
+ return -EINVAL;
+ } else if (count >= 0) {
+ unsigned int pins[IQS7211_NUM_CRX];
+
+ error = fwnode_property_read_u32_array(alp_node,
+ "azoteq,rx-enable",
+ pins, count);
+ if (error) {
+ dev_err(&client->dev, "Failed to read CRx pins: %d\n",
+ error);
+ return error;
+ }
+
+ reg_field.addr = dev_desc->alp_config;
+ reg_field.mask = GENMASK(IQS7211_NUM_CRX - 1, 0);
+ reg_field.val = 0;
+
+ for (i = 0; i < count; i++) {
+ if (pins[i] < dev_desc->min_crx_alp ||
+ pins[i] >= IQS7211_NUM_CRX) {
+ dev_err(&client->dev, "Invalid CRx pin: %u\n",
+ pins[i]);
+ return -EINVAL;
+ }
+
+ reg_field.val |= BIT(pins[i]);
+ }
+
+ error = iqs7211_add_field(iqs7211, reg_field);
+ if (error)
+ return error;
+ }
+
+ count = fwnode_property_count_u32(alp_node, "azoteq,tx-enable");
+ if (count < 0 && count != -EINVAL) {
+ dev_err(&client->dev, "Failed to count CTx pins: %d\n", count);
+ return count;
+ } else if (count > dev_desc->num_ctx) {
+ dev_err(&client->dev, "Invalid number of CTx pins\n");
+ return -EINVAL;
+ } else if (count >= 0) {
+ unsigned int pins[IQS7211_MAX_CTX];
+
+ error = fwnode_property_read_u32_array(alp_node,
+ "azoteq,tx-enable",
+ pins, count);
+ if (error) {
+ dev_err(&client->dev, "Failed to read CTx pins: %d\n",
+ error);
+ return error;
+ }
+
+ reg_field.addr = dev_desc->alp_config + 1;
+ reg_field.mask = GENMASK(dev_desc->num_ctx - 1, 0);
+ reg_field.val = 0;
+
+ for (i = 0; i < count; i++) {
+ if (pins[i] >= dev_desc->num_ctx) {
+ dev_err(&client->dev, "Invalid CTx pin: %u\n",
+ pins[i]);
+ return -EINVAL;
+ }
+
+ reg_field.val |= BIT(pins[i]);
+ }
+
+ error = iqs7211_add_field(iqs7211, reg_field);
+ if (error)
+ return error;
+ }
+
+ return 0;
+}
+
+static int (*iqs7211_parse_extra[IQS7211_NUM_REG_GRPS])
+ (struct iqs7211_private *iqs7211,
+ struct fwnode_handle *reg_grp_node) = {
+ [IQS7211_REG_GRP_TP] = iqs7211_parse_tp,
+ [IQS7211_REG_GRP_ALP] = iqs7211_parse_alp,
+};
+
+static int iqs7211_parse_reg_grp(struct iqs7211_private *iqs7211,
+ struct fwnode_handle *reg_grp_node,
+ enum iqs7211_reg_grp_id reg_grp)
+{
+ const struct iqs7211_dev_desc *dev_desc = iqs7211->dev_desc;
+ struct iqs7211_reg_field_desc reg_field;
+ int error, i;
+
+ error = iqs7211_parse_props(iqs7211, reg_grp_node, reg_grp,
+ IQS7211_REG_KEY_NONE);
+ if (error)
+ return error;
+
+ if (iqs7211_parse_extra[reg_grp]) {
+ error = iqs7211_parse_extra[reg_grp](iqs7211, reg_grp_node);
+ if (error)
+ return error;
+ }
+
+ iqs7211->ati_start |= dev_desc->ati_start[reg_grp];
+
+ reg_field.addr = dev_desc->kp_enable[reg_grp];
+ reg_field.mask = 0;
+ reg_field.val = 0;
+
+ for (i = 0; i < dev_desc->num_kp_events; i++) {
+ const char *event_name = dev_desc->kp_events[i].name;
+ struct fwnode_handle *event_node;
+
+ if (dev_desc->kp_events[i].reg_grp != reg_grp)
+ continue;
+
+ reg_field.mask |= dev_desc->kp_events[i].enable;
+
+ if (event_name)
+ event_node = fwnode_get_named_child_node(reg_grp_node,
+ event_name);
+ else
+ event_node = fwnode_handle_get(reg_grp_node);
+
+ if (!event_node)
+ continue;
+
+ error = iqs7211_parse_event(iqs7211, event_node,
+ dev_desc->kp_events[i].reg_grp,
+ dev_desc->kp_events[i].reg_key,
+ &iqs7211->kp_code[i]);
+ fwnode_handle_put(event_node);
+ if (error)
+ return error;
+
+ reg_field.val |= dev_desc->kp_events[i].enable;
+
+ iqs7211->event_mask |= iqs7211_reg_grp_masks[reg_grp];
+ }
+
+ return iqs7211_add_field(iqs7211, reg_field);
+}
+
+static int iqs7211_register_kp(struct iqs7211_private *iqs7211)
+{
+ const struct iqs7211_dev_desc *dev_desc = iqs7211->dev_desc;
+ struct input_dev *kp_idev = iqs7211->kp_idev;
+ struct i2c_client *client = iqs7211->client;
+ int error, i;
+
+ for (i = 0; i < dev_desc->num_kp_events; i++)
+ if (iqs7211->kp_code[i])
+ break;
+
+ if (i == dev_desc->num_kp_events)
+ return 0;
+
+ kp_idev = devm_input_allocate_device(&client->dev);
+ if (!kp_idev)
+ return -ENOMEM;
+
+ iqs7211->kp_idev = kp_idev;
+
+ kp_idev->name = dev_desc->kp_name;
+ kp_idev->id.bustype = BUS_I2C;
+
+ for (i = 0; i < dev_desc->num_kp_events; i++)
+ if (iqs7211->kp_code[i])
+ input_set_capability(iqs7211->kp_idev, EV_KEY,
+ iqs7211->kp_code[i]);
+
+ error = input_register_device(kp_idev);
+ if (error)
+ dev_err(&client->dev, "Failed to register %s: %d\n",
+ kp_idev->name, error);
+
+ return error;
+}
+
+static int iqs7211_register_tp(struct iqs7211_private *iqs7211)
+{
+ const struct iqs7211_dev_desc *dev_desc = iqs7211->dev_desc;
+ struct touchscreen_properties *prop = &iqs7211->prop;
+ struct input_dev *tp_idev = iqs7211->tp_idev;
+ struct i2c_client *client = iqs7211->client;
+ int error;
+
+ error = device_property_read_u32(&client->dev, "azoteq,num-contacts",
+ &iqs7211->num_contacts);
+ if (error == -EINVAL) {
+ return 0;
+ } else if (error) {
+ dev_err(&client->dev, "Failed to read number of contacts: %d\n",
+ error);
+ return error;
+ } else if (iqs7211->num_contacts > IQS7211_MAX_CONTACTS) {
+ dev_err(&client->dev, "Invalid number of contacts: %u\n",
+ iqs7211->num_contacts);
+ return -EINVAL;
+ }
+
+ iqs7211->tp_config.num_contacts = iqs7211->num_contacts ? : 1;
+
+ if (!iqs7211->num_contacts)
+ return 0;
+
+ iqs7211->event_mask |= IQS7211_EVENT_MASK_MOVE;
+
+ tp_idev = devm_input_allocate_device(&client->dev);
+ if (!tp_idev)
+ return -ENOMEM;
+
+ iqs7211->tp_idev = tp_idev;
+
+ tp_idev->name = dev_desc->tp_name;
+ tp_idev->id.bustype = BUS_I2C;
+
+ input_set_abs_params(tp_idev, ABS_MT_POSITION_X,
+ 0, le16_to_cpu(iqs7211->tp_config.max_x), 0, 0);
+
+ input_set_abs_params(tp_idev, ABS_MT_POSITION_Y,
+ 0, le16_to_cpu(iqs7211->tp_config.max_y), 0, 0);
+
+ input_set_abs_params(tp_idev, ABS_MT_PRESSURE, 0, U16_MAX, 0, 0);
+
+ touchscreen_parse_properties(tp_idev, true, prop);
+
+ /*
+ * The device reserves 0xFFFF for coordinates that correspond to slots
+ * which are not in a state of touch.
+ */
+ if (prop->max_x >= U16_MAX || prop->max_y >= U16_MAX) {
+ dev_err(&client->dev, "Invalid trackpad size: %u*%u\n",
+ prop->max_x, prop->max_y);
+ return -EINVAL;
+ }
+
+ iqs7211->tp_config.max_x = cpu_to_le16(prop->max_x);
+ iqs7211->tp_config.max_y = cpu_to_le16(prop->max_y);
+
+ error = input_mt_init_slots(tp_idev, iqs7211->num_contacts,
+ INPUT_MT_DIRECT);
+ if (error) {
+ dev_err(&client->dev, "Failed to initialize slots: %d\n",
+ error);
+ return error;
+ }
+
+ error = input_register_device(tp_idev);
+ if (error)
+ dev_err(&client->dev, "Failed to register %s: %d\n",
+ tp_idev->name, error);
+
+ return error;
+}
+
+static int iqs7211_report(struct iqs7211_private *iqs7211)
+{
+ const struct iqs7211_dev_desc *dev_desc = iqs7211->dev_desc;
+ struct i2c_client *client = iqs7211->client;
+ struct iqs7211_touch_data *touch_data;
+ u16 info_flags, charge_mode, gesture_flags;
+ __le16 status[12];
+ int error, i;
+
+ error = iqs7211_read_burst(iqs7211, dev_desc->sys_stat, status,
+ dev_desc->contact_offs * sizeof(__le16) +
+ iqs7211->num_contacts * sizeof(*touch_data));
+ if (error)
+ return error;
+
+ info_flags = le16_to_cpu(status[dev_desc->info_offs]);
+
+ if (info_flags & dev_desc->show_reset) {
+ dev_err(&client->dev, "Unexpected device reset\n");
+
+ /*
+ * The device may or may not expect forced communication after
+ * it exits hardware reset, so the corresponding state machine
+ * must be reset as well.
+ */
+ iqs7211->comms_mode = iqs7211->comms_init;
+
+ return iqs7211_init_device(iqs7211);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dev_desc->ati_error); i++) {
+ if (!(info_flags & dev_desc->ati_error[i]))
+ continue;
+
+ dev_err(&client->dev, "Unexpected %s ATI error\n",
+ iqs7211_reg_grp_names[i]);
+ return 0;
+ }
+
+ for (i = 0; i < iqs7211->num_contacts; i++) {
+ u16 pressure;
+
+ touch_data = (struct iqs7211_touch_data *)
+ &status[dev_desc->contact_offs] + i;
+ pressure = le16_to_cpu(touch_data->pressure);
+
+ input_mt_slot(iqs7211->tp_idev, i);
+ if (input_mt_report_slot_state(iqs7211->tp_idev, MT_TOOL_FINGER,
+ pressure != 0)) {
+ touchscreen_report_pos(iqs7211->tp_idev, &iqs7211->prop,
+ le16_to_cpu(touch_data->abs_x),
+ le16_to_cpu(touch_data->abs_y),
+ true);
+ input_report_abs(iqs7211->tp_idev, ABS_MT_PRESSURE,
+ pressure);
+ }
+ }
+
+ if (iqs7211->num_contacts) {
+ input_mt_sync_frame(iqs7211->tp_idev);
+ input_sync(iqs7211->tp_idev);
+ }
+
+ if (!iqs7211->kp_idev)
+ return 0;
+
+ charge_mode = info_flags & GENMASK(dev_desc->charge_shift + 2,
+ dev_desc->charge_shift);
+ charge_mode >>= dev_desc->charge_shift;
+
+ /*
+ * A charging mode higher than 2 (idle mode) indicates the device last
+ * operated in low-power mode and intends to express an ALP event.
+ */
+ if (info_flags & dev_desc->kp_events->mask && charge_mode > 2) {
+ input_report_key(iqs7211->kp_idev, *iqs7211->kp_code, 1);
+ input_sync(iqs7211->kp_idev);
+
+ input_report_key(iqs7211->kp_idev, *iqs7211->kp_code, 0);
+ }
+
+ for (i = 0; i < dev_desc->num_kp_events; i++) {
+ if (dev_desc->kp_events[i].reg_grp != IQS7211_REG_GRP_BTN)
+ continue;
+
+ input_report_key(iqs7211->kp_idev, iqs7211->kp_code[i],
+ info_flags & dev_desc->kp_events[i].mask);
+ }
+
+ gesture_flags = le16_to_cpu(status[dev_desc->gesture_offs]);
+
+ for (i = 0; i < dev_desc->num_kp_events; i++) {
+ enum iqs7211_reg_key_id reg_key = dev_desc->kp_events[i].reg_key;
+ u16 mask = dev_desc->kp_events[i].mask;
+
+ if (dev_desc->kp_events[i].reg_grp != IQS7211_REG_GRP_TP)
+ continue;
+
+ if ((gesture_flags ^ iqs7211->gesture_cache) & mask)
+ input_report_key(iqs7211->kp_idev, iqs7211->kp_code[i],
+ gesture_flags & mask);
+
+ iqs7211->gesture_cache &= ~mask;
+
+ /*
+ * Hold and palm gestures persist while the contact remains in
+ * place; all others are momentary and hence are followed by a
+ * complementary release event.
+ */
+ if (reg_key == IQS7211_REG_KEY_HOLD ||
+ reg_key == IQS7211_REG_KEY_PALM) {
+ iqs7211->gesture_cache |= gesture_flags & mask;
+ gesture_flags &= ~mask;
+ }
+ }
+
+ if (gesture_flags) {
+ input_sync(iqs7211->kp_idev);
+
+ for (i = 0; i < dev_desc->num_kp_events; i++)
+ if (dev_desc->kp_events[i].reg_grp == IQS7211_REG_GRP_TP &&
+ gesture_flags & dev_desc->kp_events[i].mask)
+ input_report_key(iqs7211->kp_idev,
+ iqs7211->kp_code[i], 0);
+ }
+
+ input_sync(iqs7211->kp_idev);
+
+ return 0;
+}
+
+static irqreturn_t iqs7211_irq(int irq, void *context)
+{
+ struct iqs7211_private *iqs7211 = context;
+
+ return iqs7211_report(iqs7211) ? IRQ_NONE : IRQ_HANDLED;
+}
+
+static int iqs7211_suspend(struct device *dev)
+{
+ struct iqs7211_private *iqs7211 = dev_get_drvdata(dev);
+ const struct iqs7211_dev_desc *dev_desc = iqs7211->dev_desc;
+ int error;
+
+ if (!dev_desc->suspend || device_may_wakeup(dev))
+ return 0;
+
+ /*
+ * I2C communication prompts the device to assert its RDY pin if it is
+ * not already asserted. As such, the interrupt must be disabled so as
+ * to prevent reentrant interrupts.
+ */
+ disable_irq(gpiod_to_irq(iqs7211->irq_gpio));
+
+ error = iqs7211_write_word(iqs7211, dev_desc->sys_ctrl,
+ dev_desc->suspend);
+
+ enable_irq(gpiod_to_irq(iqs7211->irq_gpio));
+
+ return error;
+}
+
+static int iqs7211_resume(struct device *dev)
+{
+ struct iqs7211_private *iqs7211 = dev_get_drvdata(dev);
+ const struct iqs7211_dev_desc *dev_desc = iqs7211->dev_desc;
+ __le16 sys_ctrl[] = {
+ 0,
+ cpu_to_le16(iqs7211->event_mask),
+ };
+ int error;
+
+ if (!dev_desc->suspend || device_may_wakeup(dev))
+ return 0;
+
+ disable_irq(gpiod_to_irq(iqs7211->irq_gpio));
+
+ /*
+ * Forced communication, if in use, must be explicitly enabled as part
+ * of the wake-up command.
+ */
+ error = iqs7211_write_burst(iqs7211, dev_desc->sys_ctrl, sys_ctrl,
+ sizeof(sys_ctrl));
+
+ enable_irq(gpiod_to_irq(iqs7211->irq_gpio));
+
+ return error;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(iqs7211_pm, iqs7211_suspend, iqs7211_resume);
+
+static ssize_t fw_info_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iqs7211_private *iqs7211 = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%u.%u.%u.%u:%u.%u\n",
+ le16_to_cpu(iqs7211->ver_info.prod_num),
+ le32_to_cpu(iqs7211->ver_info.patch),
+ le16_to_cpu(iqs7211->ver_info.major),
+ le16_to_cpu(iqs7211->ver_info.minor),
+ iqs7211->exp_file[1], iqs7211->exp_file[0]);
+}
+
+static DEVICE_ATTR_RO(fw_info);
+
+static struct attribute *iqs7211_attrs[] = {
+ &dev_attr_fw_info.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(iqs7211);
+
+static const struct of_device_id iqs7211_of_match[] = {
+ {
+ .compatible = "azoteq,iqs7210a",
+ .data = &iqs7211_devs[IQS7210A],
+ },
+ {
+ .compatible = "azoteq,iqs7211a",
+ .data = &iqs7211_devs[IQS7211A],
+ },
+ {
+ .compatible = "azoteq,iqs7211e",
+ .data = &iqs7211_devs[IQS7211E],
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, iqs7211_of_match);
+
+static int iqs7211_probe(struct i2c_client *client)
+{
+ struct iqs7211_private *iqs7211;
+ enum iqs7211_reg_grp_id reg_grp;
+ unsigned long irq_flags;
+ bool shared_irq;
+ int error, irq;
+
+ iqs7211 = devm_kzalloc(&client->dev, sizeof(*iqs7211), GFP_KERNEL);
+ if (!iqs7211)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, iqs7211);
+ iqs7211->client = client;
+
+ INIT_LIST_HEAD(&iqs7211->reg_field_head);
+
+ iqs7211->dev_desc = device_get_match_data(&client->dev);
+ if (!iqs7211->dev_desc)
+ return -ENODEV;
+
+ shared_irq = iqs7211->dev_desc->num_ctx == IQS7211_MAX_CTX;
+
+ /*
+ * The RDY pin behaves as an interrupt, but must also be polled ahead
+ * of unsolicited I2C communication. As such, it is first opened as a
+ * GPIO and then passed to gpiod_to_irq() to register the interrupt.
+ *
+ * If an extra CTx pin is present, the RDY and MCLR pins are combined
+ * into a single bidirectional pin. In that case, the platform's GPIO
+ * must be configured as an open-drain output.
+ */
+ iqs7211->irq_gpio = devm_gpiod_get(&client->dev, "irq",
+ shared_irq ? GPIOD_OUT_LOW
+ : GPIOD_IN);
+ if (IS_ERR(iqs7211->irq_gpio)) {
+ error = PTR_ERR(iqs7211->irq_gpio);
+ dev_err(&client->dev, "Failed to request IRQ GPIO: %d\n",
+ error);
+ return error;
+ }
+
+ if (shared_irq) {
+ iqs7211->reset_gpio = iqs7211->irq_gpio;
+ } else {
+ iqs7211->reset_gpio = devm_gpiod_get_optional(&client->dev,
+ "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(iqs7211->reset_gpio)) {
+ error = PTR_ERR(iqs7211->reset_gpio);
+ dev_err(&client->dev,
+ "Failed to request reset GPIO: %d\n", error);
+ return error;
+ }
+ }
+
+ error = iqs7211_start_comms(iqs7211);
+ if (error)
+ return error;
+
+ for (reg_grp = 0; reg_grp < IQS7211_NUM_REG_GRPS; reg_grp++) {
+ const char *reg_grp_name = iqs7211_reg_grp_names[reg_grp];
+ struct fwnode_handle *reg_grp_node;
+
+ if (reg_grp_name)
+ reg_grp_node = device_get_named_child_node(&client->dev,
+ reg_grp_name);
+ else
+ reg_grp_node = fwnode_handle_get(dev_fwnode(&client->dev));
+
+ if (!reg_grp_node)
+ continue;
+
+ error = iqs7211_parse_reg_grp(iqs7211, reg_grp_node, reg_grp);
+ fwnode_handle_put(reg_grp_node);
+ if (error)
+ return error;
+ }
+
+ error = iqs7211_register_kp(iqs7211);
+ if (error)
+ return error;
+
+ error = iqs7211_register_tp(iqs7211);
+ if (error)
+ return error;
+
+ error = iqs7211_init_device(iqs7211);
+ if (error)
+ return error;
+
+ irq = gpiod_to_irq(iqs7211->irq_gpio);
+ if (irq < 0)
+ return irq;
+
+ irq_flags = gpiod_is_active_low(iqs7211->irq_gpio) ? IRQF_TRIGGER_LOW
+ : IRQF_TRIGGER_HIGH;
+ irq_flags |= IRQF_ONESHOT;
+
+ error = devm_request_threaded_irq(&client->dev, irq, NULL, iqs7211_irq,
+ irq_flags, client->name, iqs7211);
+ if (error)
+ dev_err(&client->dev, "Failed to request IRQ: %d\n", error);
+
+ return error;
+}
+
+static struct i2c_driver iqs7211_i2c_driver = {
+ .probe = iqs7211_probe,
+ .driver = {
+ .name = "iqs7211",
+ .of_match_table = iqs7211_of_match,
+ .dev_groups = iqs7211_groups,
+ .pm = pm_sleep_ptr(&iqs7211_pm),
+ },
+};
+module_i2c_driver(iqs7211_i2c_driver);
+
+MODULE_AUTHOR("Jeff LaBundy <jeff@labundy.com>");
+MODULE_DESCRIPTION("Azoteq IQS7210A/7211A/E Trackpad/Touchscreen Controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/lpc32xx_ts.c b/drivers/input/touchscreen/lpc32xx_ts.c
index 15b5cb763526..9bad8b93c039 100644
--- a/drivers/input/touchscreen/lpc32xx_ts.c
+++ b/drivers/input/touchscreen/lpc32xx_ts.c
@@ -198,54 +198,36 @@ static void lpc32xx_ts_close(struct input_dev *dev)
static int lpc32xx_ts_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct lpc32xx_tsc *tsc;
struct input_dev *input;
- struct resource *res;
- resource_size_t size;
int irq;
int error;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Can't get memory resource\n");
- return -ENOENT;
- }
-
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
- tsc = kzalloc(sizeof(*tsc), GFP_KERNEL);
- input = input_allocate_device();
- if (!tsc || !input) {
- dev_err(&pdev->dev, "failed allocating memory\n");
- error = -ENOMEM;
- goto err_free_mem;
- }
+ tsc = devm_kzalloc(dev, sizeof(*tsc), GFP_KERNEL);
+ if (!tsc)
+ return -ENOMEM;
- tsc->dev = input;
tsc->irq = irq;
- size = resource_size(res);
-
- if (!request_mem_region(res->start, size, pdev->name)) {
- dev_err(&pdev->dev, "TSC registers are not free\n");
- error = -EBUSY;
- goto err_free_mem;
- }
+ tsc->tsc_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(tsc->tsc_base))
+ return PTR_ERR(tsc->tsc_base);
- tsc->tsc_base = ioremap(res->start, size);
- if (!tsc->tsc_base) {
- dev_err(&pdev->dev, "Can't map memory\n");
- error = -ENOMEM;
- goto err_release_mem;
- }
-
- tsc->clk = clk_get(&pdev->dev, NULL);
+ tsc->clk = devm_clk_get(dev, NULL);
if (IS_ERR(tsc->clk)) {
dev_err(&pdev->dev, "failed getting clock\n");
- error = PTR_ERR(tsc->clk);
- goto err_unmap;
+ return PTR_ERR(tsc->clk);
+ }
+
+ input = devm_input_allocate_device(dev);
+ if (!input) {
+ dev_err(&pdev->dev, "failed allocating input device\n");
+ return -ENOMEM;
}
input->name = MOD_NAME;
@@ -254,68 +236,33 @@ static int lpc32xx_ts_probe(struct platform_device *pdev)
input->id.vendor = 0x0001;
input->id.product = 0x0002;
input->id.version = 0x0100;
- input->dev.parent = &pdev->dev;
input->open = lpc32xx_ts_open;
input->close = lpc32xx_ts_close;
- input->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
- input->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+ input_set_capability(input, EV_KEY, BTN_TOUCH);
input_set_abs_params(input, ABS_X, LPC32XX_TSC_MIN_XY_VAL,
LPC32XX_TSC_MAX_XY_VAL, 0, 0);
input_set_abs_params(input, ABS_Y, LPC32XX_TSC_MIN_XY_VAL,
LPC32XX_TSC_MAX_XY_VAL, 0, 0);
input_set_drvdata(input, tsc);
+ tsc->dev = input;
- error = request_irq(tsc->irq, lpc32xx_ts_interrupt,
- 0, pdev->name, tsc);
+ error = devm_request_irq(dev, tsc->irq, lpc32xx_ts_interrupt,
+ 0, pdev->name, tsc);
if (error) {
dev_err(&pdev->dev, "failed requesting interrupt\n");
- goto err_put_clock;
+ return error;
}
error = input_register_device(input);
if (error) {
dev_err(&pdev->dev, "failed registering input device\n");
- goto err_free_irq;
+ return error;
}
platform_set_drvdata(pdev, tsc);
- device_init_wakeup(&pdev->dev, 1);
-
- return 0;
-
-err_free_irq:
- free_irq(tsc->irq, tsc);
-err_put_clock:
- clk_put(tsc->clk);
-err_unmap:
- iounmap(tsc->tsc_base);
-err_release_mem:
- release_mem_region(res->start, size);
-err_free_mem:
- input_free_device(input);
- kfree(tsc);
-
- return error;
-}
-
-static int lpc32xx_ts_remove(struct platform_device *pdev)
-{
- struct lpc32xx_tsc *tsc = platform_get_drvdata(pdev);
- struct resource *res;
-
- free_irq(tsc->irq, tsc);
-
- input_unregister_device(tsc->dev);
-
- clk_put(tsc->clk);
-
- iounmap(tsc->tsc_base);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
-
- kfree(tsc);
+ device_init_wakeup(&pdev->dev, true);
return 0;
}
@@ -384,7 +331,6 @@ MODULE_DEVICE_TABLE(of, lpc32xx_tsc_of_match);
static struct platform_driver lpc32xx_ts_driver = {
.probe = lpc32xx_ts_probe,
- .remove = lpc32xx_ts_remove,
.driver = {
.name = MOD_NAME,
.pm = LPC32XX_TS_PM_OPS,
diff --git a/drivers/input/touchscreen/melfas_mip4.c b/drivers/input/touchscreen/melfas_mip4.c
index 32896e5085bd..2ac4483fbc25 100644
--- a/drivers/input/touchscreen/melfas_mip4.c
+++ b/drivers/input/touchscreen/melfas_mip4.c
@@ -1451,13 +1451,8 @@ static int mip4_probe(struct i2c_client *client)
ts->gpio_ce = devm_gpiod_get_optional(&client->dev,
"ce", GPIOD_OUT_LOW);
- if (IS_ERR(ts->gpio_ce)) {
- error = PTR_ERR(ts->gpio_ce);
- if (error != -EPROBE_DEFER)
- dev_err(&client->dev,
- "Failed to get gpio: %d\n", error);
- return error;
- }
+ if (IS_ERR(ts->gpio_ce))
+ return dev_err_probe(&client->dev, PTR_ERR(ts->gpio_ce), "Failed to get gpio\n");
error = mip4_power_on(ts);
if (error)
diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c
index ac12494c7930..af233b6a16d9 100644
--- a/drivers/input/touchscreen/mms114.c
+++ b/drivers/input/touchscreen/mms114.c
@@ -7,7 +7,6 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/i2c.h>
#include <linux/input/mt.h>
#include <linux/input/touchscreen.h>
@@ -43,6 +42,7 @@
/* Touchscreen absolute values */
#define MMS114_MAX_AREA 0xff
+#define MMS114_MAX_TOUCHKEYS 15
#define MMS114_MAX_TOUCH 10
#define MMS114_EVENT_SIZE 8
#define MMS136_EVENT_SIZE 6
@@ -70,6 +70,9 @@ struct mms114_data {
unsigned int contact_threshold;
unsigned int moving_threshold;
+ u32 keycodes[MMS114_MAX_TOUCHKEYS];
+ int num_keycodes;
+
/* Use cache data for mode control register(write only) */
u8 cache_mode_control;
};
@@ -167,11 +170,6 @@ static void mms114_process_mt(struct mms114_data *data, struct mms114_touch *tou
return;
}
- if (touch->type != MMS114_TYPE_TOUCHSCREEN) {
- dev_err(&client->dev, "Wrong touch type (%d)\n", touch->type);
- return;
- }
-
id = touch->id - 1;
x = touch->x_lo | touch->x_hi << 8;
y = touch->y_lo | touch->y_hi << 8;
@@ -191,9 +189,33 @@ static void mms114_process_mt(struct mms114_data *data, struct mms114_touch *tou
}
}
+static void mms114_process_touchkey(struct mms114_data *data,
+ struct mms114_touch *touch)
+{
+ struct i2c_client *client = data->client;
+ struct input_dev *input_dev = data->input_dev;
+ unsigned int keycode_id;
+
+ if (touch->id == 0)
+ return;
+
+ if (touch->id > data->num_keycodes) {
+ dev_err(&client->dev, "Wrong touch id for touchkey (%d)\n",
+ touch->id);
+ return;
+ }
+
+ keycode_id = touch->id - 1;
+ dev_dbg(&client->dev, "keycode id: %d, pressed: %d\n", keycode_id,
+ touch->pressed);
+
+ input_report_key(input_dev, data->keycodes[keycode_id], touch->pressed);
+}
+
static irqreturn_t mms114_interrupt(int irq, void *dev_id)
{
struct mms114_data *data = dev_id;
+ struct i2c_client *client = data->client;
struct input_dev *input_dev = data->input_dev;
struct mms114_touch touch[MMS114_MAX_TOUCH];
int packet_size;
@@ -223,8 +245,22 @@ static irqreturn_t mms114_interrupt(int irq, void *dev_id)
if (error < 0)
goto out;
- for (index = 0; index < touch_size; index++)
- mms114_process_mt(data, touch + index);
+ for (index = 0; index < touch_size; index++) {
+ switch (touch[index].type) {
+ case MMS114_TYPE_TOUCHSCREEN:
+ mms114_process_mt(data, touch + index);
+ break;
+
+ case MMS114_TYPE_TOUCHKEY:
+ mms114_process_touchkey(data, touch + index);
+ break;
+
+ default:
+ dev_err(&client->dev, "Wrong touch type (%d)\n",
+ touch[index].type);
+ break;
+ }
+ }
input_mt_report_pointer_emulation(data->input_dev, true);
input_sync(data->input_dev);
@@ -446,6 +482,7 @@ static int mms114_probe(struct i2c_client *client)
struct input_dev *input_dev;
const void *match_data;
int error;
+ int i;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
dev_err(&client->dev, "Not supported I2C adapter\n");
@@ -469,6 +506,42 @@ static int mms114_probe(struct i2c_client *client)
data->type = (enum mms_type)match_data;
+ data->num_keycodes = device_property_count_u32(&client->dev,
+ "linux,keycodes");
+ if (data->num_keycodes == -EINVAL) {
+ data->num_keycodes = 0;
+ } else if (data->num_keycodes < 0) {
+ dev_err(&client->dev,
+ "Unable to parse linux,keycodes property: %d\n",
+ data->num_keycodes);
+ return data->num_keycodes;
+ } else if (data->num_keycodes > MMS114_MAX_TOUCHKEYS) {
+ dev_warn(&client->dev,
+ "Found %d linux,keycodes but max is %d, ignoring the rest\n",
+ data->num_keycodes, MMS114_MAX_TOUCHKEYS);
+ data->num_keycodes = MMS114_MAX_TOUCHKEYS;
+ }
+
+ if (data->num_keycodes > 0) {
+ error = device_property_read_u32_array(&client->dev,
+ "linux,keycodes",
+ data->keycodes,
+ data->num_keycodes);
+ if (error) {
+ dev_err(&client->dev,
+ "Unable to read linux,keycodes values: %d\n",
+ error);
+ return error;
+ }
+
+ input_dev->keycode = data->keycodes;
+ input_dev->keycodemax = data->num_keycodes;
+ input_dev->keycodesize = sizeof(data->keycodes[0]);
+ for (i = 0; i < data->num_keycodes; i++)
+ input_set_capability(input_dev,
+ EV_KEY, data->keycodes[i]);
+ }
+
input_set_capability(input_dev, EV_ABS, ABS_MT_POSITION_X);
input_set_capability(input_dev, EV_ABS, ABS_MT_POSITION_Y);
input_set_abs_params(input_dev, ABS_MT_PRESSURE, 0, 255, 0, 0);
diff --git a/drivers/input/touchscreen/novatek-nvt-ts.c b/drivers/input/touchscreen/novatek-nvt-ts.c
index 7f7d879aac6d..1a797e410a3f 100644
--- a/drivers/input/touchscreen/novatek-nvt-ts.c
+++ b/drivers/input/touchscreen/novatek-nvt-ts.c
@@ -1,9 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Driver for Novatek i2c touchscreen controller as found on
- * the Acer Iconia One 7 B1-750 tablet. The Touchscreen controller
- * model-number is unknown. Android calls this a "NVT-ts" touchscreen,
- * but that may apply to other Novatek controller models too.
+ * Driver for Novatek NT11205 i2c touchscreen controller as found
+ * on the Acer Iconia One 7 B1-750 tablet.
*
* Copyright (c) 2023 Hans de Goede <hdegoede@redhat.com>
*/
@@ -272,7 +270,7 @@ static int nvt_ts_probe(struct i2c_client *client)
error = input_register_device(input);
if (error) {
- dev_err(dev, "failed to request irq: %d\n", error);
+ dev_err(dev, "failed to register input device: %d\n", error);
return error;
}
@@ -296,6 +294,6 @@ static struct i2c_driver nvt_ts_driver = {
module_i2c_driver(nvt_ts_driver);
-MODULE_DESCRIPTION("Novatek NVT-ts touchscreen driver");
+MODULE_DESCRIPTION("Novatek NT11205 touchscreen driver");
MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/pixcir_i2c_ts.c b/drivers/input/touchscreen/pixcir_i2c_ts.c
index 554e179c2e48..4ede0687beb0 100644
--- a/drivers/input/touchscreen/pixcir_i2c_ts.c
+++ b/drivers/input/touchscreen/pixcir_i2c_ts.c
@@ -13,8 +13,8 @@
#include <linux/input/mt.h>
#include <linux/input/touchscreen.h>
#include <linux/interrupt.h>
-#include <linux/of_device.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/slab.h>
#define PIXCIR_MAX_SLOTS 5 /* Max fingers supported by driver */
@@ -515,41 +515,27 @@ static int pixcir_i2c_ts_probe(struct i2c_client *client)
input_set_drvdata(input, tsdata);
tsdata->gpio_attb = devm_gpiod_get(dev, "attb", GPIOD_IN);
- if (IS_ERR(tsdata->gpio_attb)) {
- error = PTR_ERR(tsdata->gpio_attb);
- if (error != -EPROBE_DEFER)
- dev_err(dev, "Failed to request ATTB gpio: %d\n",
- error);
- return error;
- }
+ if (IS_ERR(tsdata->gpio_attb))
+ return dev_err_probe(dev, PTR_ERR(tsdata->gpio_attb),
+ "Failed to request ATTB gpio\n");
tsdata->gpio_reset = devm_gpiod_get_optional(dev, "reset",
GPIOD_OUT_LOW);
- if (IS_ERR(tsdata->gpio_reset)) {
- error = PTR_ERR(tsdata->gpio_reset);
- if (error != -EPROBE_DEFER)
- dev_err(dev, "Failed to request RESET gpio: %d\n",
- error);
- return error;
- }
+ if (IS_ERR(tsdata->gpio_reset))
+ return dev_err_probe(dev, PTR_ERR(tsdata->gpio_reset),
+ "Failed to request RESET gpio\n");
tsdata->gpio_wake = devm_gpiod_get_optional(dev, "wake",
GPIOD_OUT_HIGH);
- if (IS_ERR(tsdata->gpio_wake)) {
- error = PTR_ERR(tsdata->gpio_wake);
- if (error != -EPROBE_DEFER)
- dev_err(dev, "Failed to get wake gpio: %d\n", error);
- return error;
- }
+ if (IS_ERR(tsdata->gpio_wake))
+ return dev_err_probe(dev, PTR_ERR(tsdata->gpio_wake),
+ "Failed to get wake gpio\n");
tsdata->gpio_enable = devm_gpiod_get_optional(dev, "enable",
GPIOD_OUT_HIGH);
- if (IS_ERR(tsdata->gpio_enable)) {
- error = PTR_ERR(tsdata->gpio_enable);
- if (error != -EPROBE_DEFER)
- dev_err(dev, "Failed to get enable gpio: %d\n", error);
- return error;
- }
+ if (IS_ERR(tsdata->gpio_enable))
+ return dev_err_probe(dev, PTR_ERR(tsdata->gpio_enable),
+ "Failed to get enable gpio\n");
if (tsdata->gpio_enable)
msleep(100);
diff --git a/drivers/input/touchscreen/raydium_i2c_ts.c b/drivers/input/touchscreen/raydium_i2c_ts.c
index 76e7d62d5870..78dd3059d585 100644
--- a/drivers/input/touchscreen/raydium_i2c_ts.c
+++ b/drivers/input/touchscreen/raydium_i2c_ts.c
@@ -1087,32 +1087,20 @@ static int raydium_i2c_probe(struct i2c_client *client)
i2c_set_clientdata(client, ts);
ts->avdd = devm_regulator_get(&client->dev, "avdd");
- if (IS_ERR(ts->avdd)) {
- error = PTR_ERR(ts->avdd);
- if (error != -EPROBE_DEFER)
- dev_err(&client->dev,
- "Failed to get 'avdd' regulator: %d\n", error);
- return error;
- }
+ if (IS_ERR(ts->avdd))
+ return dev_err_probe(&client->dev, PTR_ERR(ts->avdd),
+ "Failed to get 'avdd' regulator\n");
ts->vccio = devm_regulator_get(&client->dev, "vccio");
- if (IS_ERR(ts->vccio)) {
- error = PTR_ERR(ts->vccio);
- if (error != -EPROBE_DEFER)
- dev_err(&client->dev,
- "Failed to get 'vccio' regulator: %d\n", error);
- return error;
- }
+ if (IS_ERR(ts->vccio))
+ return dev_err_probe(&client->dev, PTR_ERR(ts->vccio),
+ "Failed to get 'vccio' regulator\n");
ts->reset_gpio = devm_gpiod_get_optional(&client->dev, "reset",
GPIOD_OUT_LOW);
- if (IS_ERR(ts->reset_gpio)) {
- error = PTR_ERR(ts->reset_gpio);
- if (error != -EPROBE_DEFER)
- dev_err(&client->dev,
- "failed to get reset gpio: %d\n", error);
- return error;
- }
+ if (IS_ERR(ts->reset_gpio))
+ return dev_err_probe(&client->dev, PTR_ERR(ts->reset_gpio),
+ "Failed to get reset gpio\n");
error = raydium_i2c_power_on(ts);
if (error)
diff --git a/drivers/input/touchscreen/resistive-adc-touch.c b/drivers/input/touchscreen/resistive-adc-touch.c
index 6f754a8d30b1..7e761ec73273 100644
--- a/drivers/input/touchscreen/resistive-adc-touch.c
+++ b/drivers/input/touchscreen/resistive-adc-touch.c
@@ -210,12 +210,8 @@ static int grts_probe(struct platform_device *pdev)
/* get the channels from IIO device */
st->iio_chans = devm_iio_channel_get_all(dev);
- if (IS_ERR(st->iio_chans)) {
- error = PTR_ERR(st->iio_chans);
- if (error != -EPROBE_DEFER)
- dev_err(dev, "can't get iio channels.\n");
- return error;
- }
+ if (IS_ERR(st->iio_chans))
+ return dev_err_probe(dev, PTR_ERR(st->iio_chans), "can't get iio channels\n");
if (!device_property_present(dev, "io-channel-names"))
return -ENODEV;
diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c
index 9e28f962e059..62f562ad5026 100644
--- a/drivers/input/touchscreen/silead.c
+++ b/drivers/input/touchscreen/silead.c
@@ -706,11 +706,9 @@ static int silead_ts_probe(struct i2c_client *client)
/* Power GPIO pin */
data->gpio_power = devm_gpiod_get_optional(dev, "power", GPIOD_OUT_LOW);
- if (IS_ERR(data->gpio_power)) {
- if (PTR_ERR(data->gpio_power) != -EPROBE_DEFER)
- dev_err(dev, "Shutdown GPIO request failed\n");
- return PTR_ERR(data->gpio_power);
- }
+ if (IS_ERR(data->gpio_power))
+ return dev_err_probe(dev, PTR_ERR(data->gpio_power),
+ "Shutdown GPIO request failed\n");
error = silead_ts_setup(client);
if (error)
diff --git a/drivers/input/touchscreen/sis_i2c.c b/drivers/input/touchscreen/sis_i2c.c
index 426564d0fc39..ed56cb546f39 100644
--- a/drivers/input/touchscreen/sis_i2c.c
+++ b/drivers/input/touchscreen/sis_i2c.c
@@ -310,23 +310,15 @@ static int sis_ts_probe(struct i2c_client *client)
ts->attn_gpio = devm_gpiod_get_optional(&client->dev,
"attn", GPIOD_IN);
- if (IS_ERR(ts->attn_gpio)) {
- error = PTR_ERR(ts->attn_gpio);
- if (error != -EPROBE_DEFER)
- dev_err(&client->dev,
- "Failed to get attention GPIO: %d\n", error);
- return error;
- }
+ if (IS_ERR(ts->attn_gpio))
+ return dev_err_probe(&client->dev, PTR_ERR(ts->attn_gpio),
+ "Failed to get attention GPIO\n");
ts->reset_gpio = devm_gpiod_get_optional(&client->dev,
"reset", GPIOD_OUT_LOW);
- if (IS_ERR(ts->reset_gpio)) {
- error = PTR_ERR(ts->reset_gpio);
- if (error != -EPROBE_DEFER)
- dev_err(&client->dev,
- "Failed to get reset GPIO: %d\n", error);
- return error;
- }
+ if (IS_ERR(ts->reset_gpio))
+ return dev_err_probe(&client->dev, PTR_ERR(ts->reset_gpio),
+ "Failed to get reset GPIO\n");
sis_ts_reset(ts);
diff --git a/drivers/input/touchscreen/surface3_spi.c b/drivers/input/touchscreen/surface3_spi.c
index 31d140248f2e..7efbcd0fde4f 100644
--- a/drivers/input/touchscreen/surface3_spi.c
+++ b/drivers/input/touchscreen/surface3_spi.c
@@ -221,7 +221,6 @@ static void surface3_spi_power(struct surface3_ts_data *data, bool on)
*/
static int surface3_spi_get_gpio_config(struct surface3_ts_data *data)
{
- int error;
struct device *dev;
struct gpio_desc *gpiod;
int i;
@@ -231,15 +230,9 @@ static int surface3_spi_get_gpio_config(struct surface3_ts_data *data)
/* Get the reset lines GPIO pin number */
for (i = 0; i < 2; i++) {
gpiod = devm_gpiod_get_index(dev, NULL, i, GPIOD_OUT_LOW);
- if (IS_ERR(gpiod)) {
- error = PTR_ERR(gpiod);
- if (error != -EPROBE_DEFER)
- dev_err(dev,
- "Failed to get power GPIO %d: %d\n",
- i,
- error);
- return error;
- }
+ if (IS_ERR(gpiod))
+ return dev_err_probe(dev, PTR_ERR(gpiod),
+ "Failed to get power GPIO %d\n", i);
data->gpiod_rst[i] = gpiod;
}
diff --git a/drivers/input/touchscreen/sx8654.c b/drivers/input/touchscreen/sx8654.c
index 0293c493bc79..f5c5881cef6b 100644
--- a/drivers/input/touchscreen/sx8654.c
+++ b/drivers/input/touchscreen/sx8654.c
@@ -323,13 +323,9 @@ static int sx8654_probe(struct i2c_client *client)
sx8654->gpio_reset = devm_gpiod_get_optional(&client->dev, "reset",
GPIOD_OUT_HIGH);
- if (IS_ERR(sx8654->gpio_reset)) {
- error = PTR_ERR(sx8654->gpio_reset);
- if (error != -EPROBE_DEFER)
- dev_err(&client->dev, "unable to get reset-gpio: %d\n",
- error);
- return error;
- }
+ if (IS_ERR(sx8654->gpio_reset))
+ return dev_err_probe(&client->dev, PTR_ERR(sx8654->gpio_reset),
+ "unable to get reset-gpio\n");
dev_dbg(&client->dev, "got GPIO reset pin\n");
sx8654->data = device_get_match_data(&client->dev);
diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c
index decf2d24a115..9aa4e35fb4f5 100644
--- a/drivers/input/touchscreen/ti_am335x_tsc.c
+++ b/drivers/input/touchscreen/ti_am335x_tsc.c
@@ -25,7 +25,6 @@
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/sort.h>
#include <linux/pm_wakeirq.h>
diff --git a/drivers/interconnect/Makefile b/drivers/interconnect/Makefile
index 5604ce351a9f..d0888babb9a1 100644
--- a/drivers/interconnect/Makefile
+++ b/drivers/interconnect/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
CFLAGS_core.o := -I$(src)
-icc-core-objs := core.o bulk.o
+icc-core-objs := core.o bulk.o debugfs-client.o
obj-$(CONFIG_INTERCONNECT) += icc-core.o
obj-$(CONFIG_INTERCONNECT_IMX) += imx/
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index 5fac448c28fd..dfab160ca529 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -28,6 +28,7 @@ static LIST_HEAD(icc_providers);
static int providers_count;
static bool synced_state;
static DEFINE_MUTEX(icc_lock);
+static DEFINE_MUTEX(icc_bw_lock);
static struct dentry *icc_debugfs_dir;
static void icc_summary_show_one(struct seq_file *s, struct icc_node *n)
@@ -147,6 +148,21 @@ static struct icc_node *node_find(const int id)
return idr_find(&icc_idr, id);
}
+static struct icc_node *node_find_by_name(const char *name)
+{
+ struct icc_provider *provider;
+ struct icc_node *n;
+
+ list_for_each_entry(provider, &icc_providers, provider_list) {
+ list_for_each_entry(n, &provider->nodes, node_list) {
+ if (!strcmp(n->name, name))
+ return n;
+ }
+ }
+
+ return NULL;
+}
+
static struct icc_path *path_init(struct device *dev, struct icc_node *dst,
ssize_t num_nodes)
{
@@ -562,6 +578,54 @@ struct icc_path *of_icc_get(struct device *dev, const char *name)
EXPORT_SYMBOL_GPL(of_icc_get);
/**
+ * icc_get() - get a path handle between two endpoints
+ * @dev: device pointer for the consumer device
+ * @src: source node name
+ * @dst: destination node name
+ *
+ * This function will search for a path between two endpoints and return an
+ * icc_path handle on success. Use icc_put() to release constraints when they
+ * are not needed anymore.
+ *
+ * Return: icc_path pointer on success or ERR_PTR() on error. NULL is returned
+ * when the API is disabled.
+ */
+struct icc_path *icc_get(struct device *dev, const char *src, const char *dst)
+{
+ struct icc_node *src_node, *dst_node;
+ struct icc_path *path = ERR_PTR(-EPROBE_DEFER);
+
+ mutex_lock(&icc_lock);
+
+ src_node = node_find_by_name(src);
+ if (!src_node) {
+ dev_err(dev, "%s: invalid src=%s\n", __func__, src);
+ goto out;
+ }
+
+ dst_node = node_find_by_name(dst);
+ if (!dst_node) {
+ dev_err(dev, "%s: invalid dst=%s\n", __func__, dst);
+ goto out;
+ }
+
+ path = path_find(dev, src_node, dst_node);
+ if (IS_ERR(path)) {
+ dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path));
+ goto out;
+ }
+
+ path->name = kasprintf(GFP_KERNEL, "%s-%s", src_node->name, dst_node->name);
+ if (!path->name) {
+ kfree(path);
+ path = ERR_PTR(-ENOMEM);
+ }
+out:
+ mutex_unlock(&icc_lock);
+ return path;
+}
+
+/**
* icc_set_tag() - set an optional tag on a path
* @path: the path we want to tag
* @tag: the tag value
@@ -631,7 +695,7 @@ int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
if (WARN_ON(IS_ERR(path) || !path->num_nodes))
return -EINVAL;
- mutex_lock(&icc_lock);
+ mutex_lock(&icc_bw_lock);
old_avg = path->reqs[0].avg_bw;
old_peak = path->reqs[0].peak_bw;
@@ -663,7 +727,7 @@ int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw)
apply_constraints(path);
}
- mutex_unlock(&icc_lock);
+ mutex_unlock(&icc_bw_lock);
trace_icc_set_bw_end(path, ret);
@@ -872,6 +936,7 @@ void icc_node_add(struct icc_node *node, struct icc_provider *provider)
return;
mutex_lock(&icc_lock);
+ mutex_lock(&icc_bw_lock);
node->provider = provider;
list_add_tail(&node->node_list, &provider->nodes);
@@ -900,6 +965,7 @@ void icc_node_add(struct icc_node *node, struct icc_provider *provider)
node->avg_bw = 0;
node->peak_bw = 0;
+ mutex_unlock(&icc_bw_lock);
mutex_unlock(&icc_lock);
}
EXPORT_SYMBOL_GPL(icc_node_add);
@@ -1025,6 +1091,7 @@ void icc_sync_state(struct device *dev)
return;
mutex_lock(&icc_lock);
+ mutex_lock(&icc_bw_lock);
synced_state = true;
list_for_each_entry(p, &icc_providers, provider_list) {
dev_dbg(p->dev, "interconnect provider is in synced state\n");
@@ -1037,13 +1104,21 @@ void icc_sync_state(struct device *dev)
}
}
}
+ mutex_unlock(&icc_bw_lock);
mutex_unlock(&icc_lock);
}
EXPORT_SYMBOL_GPL(icc_sync_state);
static int __init icc_init(void)
{
- struct device_node *root = of_find_node_by_path("/");
+ struct device_node *root;
+
+ /* Teach lockdep about lock ordering wrt. shrinker: */
+ fs_reclaim_acquire(GFP_KERNEL);
+ might_lock(&icc_bw_lock);
+ fs_reclaim_release(GFP_KERNEL);
+
+ root = of_find_node_by_path("/");
providers_count = of_count_icc_providers(root);
of_node_put(root);
@@ -1053,6 +1128,9 @@ static int __init icc_init(void)
icc_debugfs_dir, NULL, &icc_summary_fops);
debugfs_create_file("interconnect_graph", 0444,
icc_debugfs_dir, NULL, &icc_graph_fops);
+
+ icc_debugfs_client_init(icc_debugfs_dir);
+
return 0;
}
diff --git a/drivers/interconnect/debugfs-client.c b/drivers/interconnect/debugfs-client.c
new file mode 100644
index 000000000000..bc3fd8a7b9eb
--- /dev/null
+++ b/drivers/interconnect/debugfs-client.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#include <linux/debugfs.h>
+#include <linux/interconnect.h>
+#include <linux/platform_device.h>
+
+#include "internal.h"
+
+/*
+ * This can be dangerous, therefore don't provide any real compile time
+ * configuration option for this feature.
+ * People who want to use this will need to modify the source code directly.
+ */
+#undef INTERCONNECT_ALLOW_WRITE_DEBUGFS
+
+#if defined(INTERCONNECT_ALLOW_WRITE_DEBUGFS) && defined(CONFIG_DEBUG_FS)
+
+static LIST_HEAD(debugfs_paths);
+static DEFINE_MUTEX(debugfs_lock);
+
+static struct platform_device *pdev;
+static struct icc_path *cur_path;
+
+static char *src_node;
+static char *dst_node;
+static u32 avg_bw;
+static u32 peak_bw;
+static u32 tag;
+
+struct debugfs_path {
+ const char *src;
+ const char *dst;
+ struct icc_path *path;
+ struct list_head list;
+};
+
+static struct icc_path *get_path(const char *src, const char *dst)
+{
+ struct debugfs_path *path;
+
+ list_for_each_entry(path, &debugfs_paths, list) {
+ if (!strcmp(path->src, src) && !strcmp(path->dst, dst))
+ return path->path;
+ }
+
+ return NULL;
+}
+
+static int icc_get_set(void *data, u64 val)
+{
+ struct debugfs_path *debugfs_path;
+ char *src, *dst;
+ int ret = 0;
+
+ mutex_lock(&debugfs_lock);
+
+ rcu_read_lock();
+ src = rcu_dereference(src_node);
+ dst = rcu_dereference(dst_node);
+
+ /*
+ * If we've already looked up a path, then use the existing one instead
+ * of calling icc_get() again. This allows for updating previous BW
+ * votes when "get" is written to multiple times for multiple paths.
+ */
+ cur_path = get_path(src, dst);
+ if (cur_path) {
+ rcu_read_unlock();
+ goto out;
+ }
+
+ src = kstrdup(src, GFP_ATOMIC);
+ dst = kstrdup(dst, GFP_ATOMIC);
+ rcu_read_unlock();
+
+ if (!src || !dst) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
+ cur_path = icc_get(&pdev->dev, src, dst);
+ if (IS_ERR(cur_path)) {
+ ret = PTR_ERR(cur_path);
+ goto err_free;
+ }
+
+ debugfs_path = kzalloc(sizeof(*debugfs_path), GFP_KERNEL);
+ if (!debugfs_path) {
+ ret = -ENOMEM;
+ goto err_put;
+ }
+
+ debugfs_path->path = cur_path;
+ debugfs_path->src = src;
+ debugfs_path->dst = dst;
+ list_add_tail(&debugfs_path->list, &debugfs_paths);
+
+ goto out;
+
+err_put:
+ icc_put(cur_path);
+err_free:
+ kfree(src);
+ kfree(dst);
+out:
+ mutex_unlock(&debugfs_lock);
+ return ret;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(icc_get_fops, NULL, icc_get_set, "%llu\n");
+
+static int icc_commit_set(void *data, u64 val)
+{
+ int ret;
+
+ mutex_lock(&debugfs_lock);
+
+ if (IS_ERR_OR_NULL(cur_path)) {
+ ret = PTR_ERR(cur_path);
+ goto out;
+ }
+
+ icc_set_tag(cur_path, tag);
+ ret = icc_set_bw(cur_path, avg_bw, peak_bw);
+out:
+ mutex_unlock(&debugfs_lock);
+ return ret;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(icc_commit_fops, NULL, icc_commit_set, "%llu\n");
+
+int icc_debugfs_client_init(struct dentry *icc_dir)
+{
+ struct dentry *client_dir;
+ int ret;
+
+ pdev = platform_device_alloc("icc-debugfs-client", PLATFORM_DEVID_NONE);
+
+ ret = platform_device_add(pdev);
+ if (ret) {
+ pr_err("%s: failed to add platform device: %d\n", __func__, ret);
+ platform_device_put(pdev);
+ return ret;
+ }
+
+ client_dir = debugfs_create_dir("test_client", icc_dir);
+
+ debugfs_create_str("src_node", 0600, client_dir, &src_node);
+ debugfs_create_str("dst_node", 0600, client_dir, &dst_node);
+ debugfs_create_file("get", 0200, client_dir, NULL, &icc_get_fops);
+ debugfs_create_u32("avg_bw", 0600, client_dir, &avg_bw);
+ debugfs_create_u32("peak_bw", 0600, client_dir, &peak_bw);
+ debugfs_create_u32("tag", 0600, client_dir, &tag);
+ debugfs_create_file("commit", 0200, client_dir, NULL, &icc_commit_fops);
+
+ return 0;
+}
+
+#else
+
+int icc_debugfs_client_init(struct dentry *icc_dir)
+{
+ return 0;
+}
+
+#endif
diff --git a/drivers/interconnect/icc-clk.c b/drivers/interconnect/icc-clk.c
index 4d43ebff4257..d787f2ea36d9 100644
--- a/drivers/interconnect/icc-clk.c
+++ b/drivers/interconnect/icc-clk.c
@@ -16,7 +16,7 @@ struct icc_clk_node {
struct icc_clk_provider {
struct icc_provider provider;
int num_clocks;
- struct icc_clk_node clocks[];
+ struct icc_clk_node clocks[] __counted_by(num_clocks);
};
#define to_icc_clk_provider(_provider) \
diff --git a/drivers/interconnect/imx/imx8mp.c b/drivers/interconnect/imx/imx8mp.c
index 8bfaf173f1da..a66ae3638b18 100644
--- a/drivers/interconnect/imx/imx8mp.c
+++ b/drivers/interconnect/imx/imx8mp.c
@@ -7,7 +7,6 @@
*/
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <dt-bindings/interconnect/fsl,imx8mp.h>
diff --git a/drivers/interconnect/internal.h b/drivers/interconnect/internal.h
index f5f82a5c939e..3b9d50589c01 100644
--- a/drivers/interconnect/internal.h
+++ b/drivers/interconnect/internal.h
@@ -38,7 +38,10 @@ struct icc_req {
struct icc_path {
const char *name;
size_t num_nodes;
- struct icc_req reqs[];
+ struct icc_req reqs[] __counted_by(num_nodes);
};
+struct icc_path *icc_get(struct device *dev, const char *src, const char *dst);
+int icc_debugfs_client_init(struct dentry *icc_dir);
+
#endif
diff --git a/drivers/interconnect/qcom/bcm-voter.c b/drivers/interconnect/qcom/bcm-voter.c
index d5f2a6b5376b..a2d437a05a11 100644
--- a/drivers/interconnect/qcom/bcm-voter.c
+++ b/drivers/interconnect/qcom/bcm-voter.c
@@ -58,6 +58,36 @@ static u64 bcm_div(u64 num, u32 base)
return num;
}
+/* BCMs with enable_mask use one-hot-encoding for on/off signaling */
+static void bcm_aggregate_mask(struct qcom_icc_bcm *bcm)
+{
+ struct qcom_icc_node *node;
+ int bucket, i;
+
+ for (bucket = 0; bucket < QCOM_ICC_NUM_BUCKETS; bucket++) {
+ bcm->vote_x[bucket] = 0;
+ bcm->vote_y[bucket] = 0;
+
+ for (i = 0; i < bcm->num_nodes; i++) {
+ node = bcm->nodes[i];
+
+ /* If any vote in this bucket exists, keep the BCM enabled */
+ if (node->sum_avg[bucket] || node->max_peak[bucket]) {
+ bcm->vote_x[bucket] = 0;
+ bcm->vote_y[bucket] = bcm->enable_mask;
+ break;
+ }
+ }
+ }
+
+ if (bcm->keepalive) {
+ bcm->vote_x[QCOM_ICC_BUCKET_AMC] = bcm->enable_mask;
+ bcm->vote_x[QCOM_ICC_BUCKET_WAKE] = bcm->enable_mask;
+ bcm->vote_y[QCOM_ICC_BUCKET_AMC] = bcm->enable_mask;
+ bcm->vote_y[QCOM_ICC_BUCKET_WAKE] = bcm->enable_mask;
+ }
+}
+
static void bcm_aggregate(struct qcom_icc_bcm *bcm)
{
struct qcom_icc_node *node;
@@ -83,11 +113,6 @@ static void bcm_aggregate(struct qcom_icc_bcm *bcm)
temp = agg_peak[bucket] * bcm->vote_scale;
bcm->vote_y[bucket] = bcm_div(temp, bcm->aux_data.unit);
-
- if (bcm->enable_mask && (bcm->vote_x[bucket] || bcm->vote_y[bucket])) {
- bcm->vote_x[bucket] = 0;
- bcm->vote_y[bucket] = bcm->enable_mask;
- }
}
if (bcm->keepalive && bcm->vote_x[QCOM_ICC_BUCKET_AMC] == 0 &&
@@ -260,8 +285,12 @@ int qcom_icc_bcm_voter_commit(struct bcm_voter *voter)
return 0;
mutex_lock(&voter->lock);
- list_for_each_entry(bcm, &voter->commit_list, list)
- bcm_aggregate(bcm);
+ list_for_each_entry(bcm, &voter->commit_list, list) {
+ if (bcm->enable_mask)
+ bcm_aggregate_mask(bcm);
+ else
+ bcm_aggregate(bcm);
+ }
/*
* Pre sort the BCMs based on VCD for ease of generating a command list
diff --git a/drivers/interconnect/qcom/bcm-voter.h b/drivers/interconnect/qcom/bcm-voter.h
index 0f64c0bab2c0..b4d36e349f3c 100644
--- a/drivers/interconnect/qcom/bcm-voter.h
+++ b/drivers/interconnect/qcom/bcm-voter.h
@@ -12,14 +12,6 @@
#include "icc-rpmh.h"
-#define DEFINE_QBCM(_name, _bcmname, _keepalive, ...) \
-static struct qcom_icc_bcm _name = { \
- .name = _bcmname, \
- .keepalive = _keepalive, \
- .num_nodes = ARRAY_SIZE(((struct qcom_icc_node *[]){ __VA_ARGS__ })), \
- .nodes = { __VA_ARGS__ }, \
-}
-
struct bcm_voter *of_bcm_voter_get(struct device *dev, const char *name);
void qcom_icc_bcm_voter_add(struct bcm_voter *voter, struct qcom_icc_bcm *bcm);
int qcom_icc_bcm_voter_commit(struct bcm_voter *voter);
diff --git a/drivers/interconnect/qcom/icc-rpm.c b/drivers/interconnect/qcom/icc-rpm.c
index 3209d8de709b..2c16917ba1fd 100644
--- a/drivers/interconnect/qcom/icc-rpm.c
+++ b/drivers/interconnect/qcom/icc-rpm.c
@@ -7,7 +7,7 @@
#include <linux/interconnect-provider.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -240,7 +240,7 @@ static int qcom_icc_rpm_set(struct qcom_icc_node *qn, u64 *bw)
}
}
- return ret;
+ return 0;
}
/**
diff --git a/drivers/interconnect/qcom/icc-rpmh.c b/drivers/interconnect/qcom/icc-rpmh.c
index fdb5e58e408b..b9f27ce3b607 100644
--- a/drivers/interconnect/qcom/icc-rpmh.c
+++ b/drivers/interconnect/qcom/icc-rpmh.c
@@ -7,7 +7,7 @@
#include <linux/interconnect-provider.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/of_platform.h>
#include <linux/slab.h>
#include "bcm-voter.h"
@@ -185,6 +185,7 @@ int qcom_icc_rpmh_probe(struct platform_device *pdev)
data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes), GFP_KERNEL);
if (!data)
return -ENOMEM;
+ data->num_nodes = num_nodes;
provider = &qp->provider;
provider->dev = dev;
@@ -228,8 +229,6 @@ int qcom_icc_rpmh_probe(struct platform_device *pdev)
data->nodes[i] = node;
}
- data->num_nodes = num_nodes;
-
ret = icc_provider_register(provider);
if (ret)
goto err_remove_nodes;
diff --git a/drivers/interconnect/qcom/icc-rpmh.h b/drivers/interconnect/qcom/icc-rpmh.h
index 7843d8864d6b..5f0af8b1fc43 100644
--- a/drivers/interconnect/qcom/icc-rpmh.h
+++ b/drivers/interconnect/qcom/icc-rpmh.h
@@ -120,16 +120,6 @@ struct qcom_icc_desc {
size_t num_bcms;
};
-#define DEFINE_QNODE(_name, _id, _channels, _buswidth, ...) \
- static struct qcom_icc_node _name = { \
- .id = _id, \
- .name = #_name, \
- .channels = _channels, \
- .buswidth = _buswidth, \
- .num_links = ARRAY_SIZE(((int[]){ __VA_ARGS__ })), \
- .links = { __VA_ARGS__ }, \
- }
-
int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
u32 peak_bw, u32 *agg_avg, u32 *agg_peak);
int qcom_icc_set(struct icc_node *src, struct icc_node *dst);
diff --git a/drivers/interconnect/qcom/msm8916.c b/drivers/interconnect/qcom/msm8916.c
index b567a2b4199c..35148880b3e8 100644
--- a/drivers/interconnect/qcom/msm8916.c
+++ b/drivers/interconnect/qcom/msm8916.c
@@ -8,9 +8,9 @@
#include <linux/interconnect-provider.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
-#include <linux/of_device.h>
#include <dt-bindings/interconnect/qcom,msm8916.h>
diff --git a/drivers/interconnect/qcom/msm8939.c b/drivers/interconnect/qcom/msm8939.c
index 6732eeeb8158..b52c5ac1175c 100644
--- a/drivers/interconnect/qcom/msm8939.c
+++ b/drivers/interconnect/qcom/msm8939.c
@@ -9,9 +9,9 @@
#include <linux/interconnect-provider.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
-#include <linux/of_device.h>
#include <dt-bindings/interconnect/qcom,msm8939.h>
diff --git a/drivers/interconnect/qcom/msm8974.c b/drivers/interconnect/qcom/msm8974.c
index 968162213d40..885ca9d6d4ed 100644
--- a/drivers/interconnect/qcom/msm8974.c
+++ b/drivers/interconnect/qcom/msm8974.c
@@ -33,8 +33,7 @@
#include <linux/interconnect-provider.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -676,6 +675,7 @@ static int msm8974_icc_probe(struct platform_device *pdev)
GFP_KERNEL);
if (!data)
return -ENOMEM;
+ data->num_nodes = num_nodes;
qp->bus_clks = devm_kmemdup(dev, msm8974_icc_bus_clocks,
sizeof(msm8974_icc_bus_clocks), GFP_KERNEL);
@@ -722,7 +722,6 @@ static int msm8974_icc_probe(struct platform_device *pdev)
data->nodes[i] = node;
}
- data->num_nodes = num_nodes;
ret = icc_provider_register(provider);
if (ret)
diff --git a/drivers/interconnect/qcom/msm8996.c b/drivers/interconnect/qcom/msm8996.c
index b9695c1931ce..88683dfa468f 100644
--- a/drivers/interconnect/qcom/msm8996.c
+++ b/drivers/interconnect/qcom/msm8996.c
@@ -8,9 +8,8 @@
#include <linux/device.h>
#include <linux/interconnect-provider.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/interconnect/qcom/osm-l3.c b/drivers/interconnect/qcom/osm-l3.c
index a1f4f918b911..dc321bb86d0b 100644
--- a/drivers/interconnect/qcom/osm-l3.c
+++ b/drivers/interconnect/qcom/osm-l3.c
@@ -9,7 +9,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <dt-bindings/interconnect/qcom,osm-l3.h>
@@ -232,6 +232,7 @@ static int qcom_osm_l3_probe(struct platform_device *pdev)
data = devm_kzalloc(&pdev->dev, struct_size(data, nodes, num_nodes), GFP_KERNEL);
if (!data)
return -ENOMEM;
+ data->num_nodes = num_nodes;
provider = &qp->provider;
provider->dev = &pdev->dev;
@@ -261,7 +262,6 @@ static int qcom_osm_l3_probe(struct platform_device *pdev)
data->nodes[i] = node;
}
- data->num_nodes = num_nodes;
ret = icc_provider_register(provider);
if (ret)
diff --git a/drivers/interconnect/qcom/qcm2290.c b/drivers/interconnect/qcom/qcm2290.c
index 8fc4acc4220b..5bc4b7516608 100644
--- a/drivers/interconnect/qcom/qcm2290.c
+++ b/drivers/interconnect/qcom/qcm2290.c
@@ -10,9 +10,8 @@
#include <linux/device.h>
#include <linux/interconnect-provider.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -1197,6 +1196,7 @@ static const struct qcom_icc_desc qcm2290_bimc = {
.num_nodes = ARRAY_SIZE(qcm2290_bimc_nodes),
.bus_clk_desc = &bimc_clk,
.regmap_cfg = &qcm2290_bimc_regmap_config,
+ .keep_alive = true,
/* M_REG_BASE() in vendor msm_bus_bimc_adhoc driver */
.qos_offset = 0x8000,
};
@@ -1253,6 +1253,7 @@ static const struct qcom_icc_desc qcm2290_cnoc = {
.num_nodes = ARRAY_SIZE(qcm2290_cnoc_nodes),
.bus_clk_desc = &bus_1_clk,
.regmap_cfg = &qcm2290_cnoc_regmap_config,
+ .keep_alive = true,
};
static struct qcom_icc_node * const qcm2290_snoc_nodes[] = {
@@ -1295,6 +1296,7 @@ static const struct qcom_icc_desc qcm2290_snoc = {
.num_nodes = ARRAY_SIZE(qcm2290_snoc_nodes),
.bus_clk_desc = &bus_2_clk,
.regmap_cfg = &qcm2290_snoc_regmap_config,
+ .keep_alive = true,
/* Vendor DT node fab-sys_noc property 'qcom,base-offset' */
.qos_offset = 0x15000,
};
@@ -1309,6 +1311,7 @@ static const struct qcom_icc_desc qcm2290_qup_virt = {
.nodes = qcm2290_qup_virt_nodes,
.num_nodes = ARRAY_SIZE(qcm2290_qup_virt_nodes),
.bus_clk_desc = &qup_clk,
+ .keep_alive = true,
};
static struct qcom_icc_node * const qcm2290_mmnrt_virt_nodes[] = {
@@ -1324,6 +1327,7 @@ static const struct qcom_icc_desc qcm2290_mmnrt_virt = {
.num_nodes = ARRAY_SIZE(qcm2290_mmnrt_virt_nodes),
.bus_clk_desc = &mmaxi_0_clk,
.regmap_cfg = &qcm2290_snoc_regmap_config,
+ .keep_alive = true,
.qos_offset = 0x15000,
};
@@ -1339,6 +1343,7 @@ static const struct qcom_icc_desc qcm2290_mmrt_virt = {
.num_nodes = ARRAY_SIZE(qcm2290_mmrt_virt_nodes),
.bus_clk_desc = &mmaxi_1_clk,
.regmap_cfg = &qcm2290_snoc_regmap_config,
+ .keep_alive = true,
.qos_offset = 0x15000,
};
@@ -1359,6 +1364,7 @@ static struct platform_driver qcm2290_noc_driver = {
.driver = {
.name = "qnoc-qcm2290",
.of_match_table = qcm2290_noc_of_match,
+ .sync_state = icc_sync_state,
},
};
module_platform_driver(qcm2290_noc_driver);
diff --git a/drivers/interconnect/qcom/qcs404.c b/drivers/interconnect/qcom/qcs404.c
index 82fe905b74a9..9fa1da70c843 100644
--- a/drivers/interconnect/qcom/qcs404.c
+++ b/drivers/interconnect/qcom/qcs404.c
@@ -8,8 +8,8 @@
#include <linux/interconnect-provider.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
-#include <linux/of_device.h>
#include "icc-rpm.h"
diff --git a/drivers/interconnect/qcom/qdu1000.c b/drivers/interconnect/qcom/qdu1000.c
index a4cf559de2b0..bf800dd7d4ba 100644
--- a/drivers/interconnect/qcom/qdu1000.c
+++ b/drivers/interconnect/qcom/qdu1000.c
@@ -7,8 +7,9 @@
#include <linux/device.h>
#include <linux/interconnect.h>
#include <linux/interconnect-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <dt-bindings/interconnect/qcom,qdu1000-rpmh.h>
#include "bcm-voter.h"
diff --git a/drivers/interconnect/qcom/sa8775p.c b/drivers/interconnect/qcom/sa8775p.c
index f56538669de0..ef1b5e326089 100644
--- a/drivers/interconnect/qcom/sa8775p.c
+++ b/drivers/interconnect/qcom/sa8775p.c
@@ -7,8 +7,9 @@
#include <linux/device.h>
#include <linux/interconnect.h>
#include <linux/interconnect-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <dt-bindings/interconnect/qcom,sa8775p-rpmh.h>
#include "bcm-voter.h"
diff --git a/drivers/interconnect/qcom/sc7180.c b/drivers/interconnect/qcom/sc7180.c
index ef4e13fb4983..d94ab9b39f3d 100644
--- a/drivers/interconnect/qcom/sc7180.c
+++ b/drivers/interconnect/qcom/sc7180.c
@@ -7,176 +7,1466 @@
#include <linux/device.h>
#include <linux/interconnect.h>
#include <linux/interconnect-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <dt-bindings/interconnect/qcom,sc7180.h>
#include "bcm-voter.h"
#include "icc-rpmh.h"
#include "sc7180.h"
-DEFINE_QNODE(qhm_a1noc_cfg, SC7180_MASTER_A1NOC_CFG, 1, 4, SC7180_SLAVE_SERVICE_A1NOC);
-DEFINE_QNODE(qhm_qspi, SC7180_MASTER_QSPI, 1, 4, SC7180_SLAVE_A1NOC_SNOC);
-DEFINE_QNODE(qhm_qup_0, SC7180_MASTER_QUP_0, 1, 4, SC7180_SLAVE_A1NOC_SNOC);
-DEFINE_QNODE(xm_sdc2, SC7180_MASTER_SDCC_2, 1, 8, SC7180_SLAVE_A1NOC_SNOC);
-DEFINE_QNODE(xm_emmc, SC7180_MASTER_EMMC, 1, 8, SC7180_SLAVE_A1NOC_SNOC);
-DEFINE_QNODE(xm_ufs_mem, SC7180_MASTER_UFS_MEM, 1, 8, SC7180_SLAVE_A1NOC_SNOC);
-DEFINE_QNODE(qhm_a2noc_cfg, SC7180_MASTER_A2NOC_CFG, 1, 4, SC7180_SLAVE_SERVICE_A2NOC);
-DEFINE_QNODE(qhm_qdss_bam, SC7180_MASTER_QDSS_BAM, 1, 4, SC7180_SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(qhm_qup_1, SC7180_MASTER_QUP_1, 1, 4, SC7180_SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(qxm_crypto, SC7180_MASTER_CRYPTO, 1, 8, SC7180_SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(qxm_ipa, SC7180_MASTER_IPA, 1, 8, SC7180_SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(xm_qdss_etr, SC7180_MASTER_QDSS_ETR, 1, 8, SC7180_SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(qhm_usb3, SC7180_MASTER_USB3, 1, 8, SC7180_SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(qxm_camnoc_hf0_uncomp, SC7180_MASTER_CAMNOC_HF0_UNCOMP, 1, 32, SC7180_SLAVE_CAMNOC_UNCOMP);
-DEFINE_QNODE(qxm_camnoc_hf1_uncomp, SC7180_MASTER_CAMNOC_HF1_UNCOMP, 1, 32, SC7180_SLAVE_CAMNOC_UNCOMP);
-DEFINE_QNODE(qxm_camnoc_sf_uncomp, SC7180_MASTER_CAMNOC_SF_UNCOMP, 1, 32, SC7180_SLAVE_CAMNOC_UNCOMP);
-DEFINE_QNODE(qnm_npu, SC7180_MASTER_NPU, 2, 32, SC7180_SLAVE_CDSP_GEM_NOC);
-DEFINE_QNODE(qxm_npu_dsp, SC7180_MASTER_NPU_PROC, 1, 8, SC7180_SLAVE_CDSP_GEM_NOC);
-DEFINE_QNODE(qnm_snoc, SC7180_MASTER_SNOC_CNOC, 1, 8, SC7180_SLAVE_A1NOC_CFG, SC7180_SLAVE_A2NOC_CFG, SC7180_SLAVE_AHB2PHY_SOUTH, SC7180_SLAVE_AHB2PHY_CENTER, SC7180_SLAVE_AOP, SC7180_SLAVE_AOSS, SC7180_SLAVE_BOOT_ROM, SC7180_SLAVE_CAMERA_CFG, SC7180_SLAVE_CAMERA_NRT_THROTTLE_CFG, SC7180_SLAVE_CAMERA_RT_THROTTLE_CFG, SC7180_SLAVE_CLK_CTL, SC7180_SLAVE_RBCPR_CX_CFG, SC7180_SLAVE_RBCPR_MX_CFG, SC7180_SLAVE_CRYPTO_0_CFG, SC7180_SLAVE_DCC_CFG, SC7180_SLAVE_CNOC_DDRSS, SC7180_SLAVE_DISPLAY_CFG, SC7180_SLAVE_DISPLAY_RT_THROTTLE_CFG, SC7180_SLAVE_DISPLAY_THROTTLE_CFG, SC7180_SLAVE_EMMC_CFG, SC7180_SLAVE_GLM,
- SC7180_SLAVE_GFX3D_CFG, SC7180_SLAVE_IMEM_CFG, SC7180_SLAVE_IPA_CFG, SC7180_SLAVE_CNOC_MNOC_CFG, SC7180_SLAVE_CNOC_MSS, SC7180_SLAVE_NPU_CFG, SC7180_SLAVE_NPU_DMA_BWMON_CFG, SC7180_SLAVE_NPU_PROC_BWMON_CFG, SC7180_SLAVE_PDM, SC7180_SLAVE_PIMEM_CFG, SC7180_SLAVE_PRNG, SC7180_SLAVE_QDSS_CFG, SC7180_SLAVE_QM_CFG, SC7180_SLAVE_QM_MPU_CFG, SC7180_SLAVE_QSPI_0, SC7180_SLAVE_QUP_0, SC7180_SLAVE_QUP_1, SC7180_SLAVE_SDCC_2, SC7180_SLAVE_SECURITY, SC7180_SLAVE_SNOC_CFG, SC7180_SLAVE_TCSR, SC7180_SLAVE_TLMM_WEST, SC7180_SLAVE_TLMM_NORTH, SC7180_SLAVE_TLMM_SOUTH, SC7180_SLAVE_UFS_MEM_CFG, SC7180_SLAVE_USB3, SC7180_SLAVE_VENUS_CFG, SC7180_SLAVE_VENUS_THROTTLE_CFG, SC7180_SLAVE_VSENSE_CTRL_CFG, SC7180_SLAVE_SERVICE_CNOC);
-DEFINE_QNODE(xm_qdss_dap, SC7180_MASTER_QDSS_DAP, 1, 8, SC7180_SLAVE_A1NOC_CFG, SC7180_SLAVE_A2NOC_CFG, SC7180_SLAVE_AHB2PHY_SOUTH, SC7180_SLAVE_AHB2PHY_CENTER, SC7180_SLAVE_AOP, SC7180_SLAVE_AOSS, SC7180_SLAVE_BOOT_ROM, SC7180_SLAVE_CAMERA_CFG, SC7180_SLAVE_CAMERA_NRT_THROTTLE_CFG, SC7180_SLAVE_CAMERA_RT_THROTTLE_CFG, SC7180_SLAVE_CLK_CTL, SC7180_SLAVE_RBCPR_CX_CFG, SC7180_SLAVE_RBCPR_MX_CFG, SC7180_SLAVE_CRYPTO_0_CFG, SC7180_SLAVE_DCC_CFG, SC7180_SLAVE_CNOC_DDRSS, SC7180_SLAVE_DISPLAY_CFG, SC7180_SLAVE_DISPLAY_RT_THROTTLE_CFG, SC7180_SLAVE_DISPLAY_THROTTLE_CFG, SC7180_SLAVE_EMMC_CFG, SC7180_SLAVE_GLM, SC7180_SLAVE_GFX3D_CFG, SC7180_SLAVE_IMEM_CFG, SC7180_SLAVE_IPA_CFG, SC7180_SLAVE_CNOC_MNOC_CFG, SC7180_SLAVE_CNOC_MSS, SC7180_SLAVE_NPU_CFG, SC7180_SLAVE_NPU_DMA_BWMON_CFG,
-SC7180_SLAVE_NPU_PROC_BWMON_CFG, SC7180_SLAVE_PDM, SC7180_SLAVE_PIMEM_CFG, SC7180_SLAVE_PRNG, SC7180_SLAVE_QDSS_CFG, SC7180_SLAVE_QM_CFG, SC7180_SLAVE_QM_MPU_CFG, SC7180_SLAVE_QSPI_0, SC7180_SLAVE_QUP_0, SC7180_SLAVE_QUP_1, SC7180_SLAVE_SDCC_2, SC7180_SLAVE_SECURITY, SC7180_SLAVE_SNOC_CFG, SC7180_SLAVE_TCSR, SC7180_SLAVE_TLMM_WEST, SC7180_SLAVE_TLMM_NORTH, SC7180_SLAVE_TLMM_SOUTH, SC7180_SLAVE_UFS_MEM_CFG, SC7180_SLAVE_USB3, SC7180_SLAVE_VENUS_CFG, SC7180_SLAVE_VENUS_THROTTLE_CFG, SC7180_SLAVE_VSENSE_CTRL_CFG, SC7180_SLAVE_SERVICE_CNOC);
-DEFINE_QNODE(qhm_cnoc_dc_noc, SC7180_MASTER_CNOC_DC_NOC, 1, 4, SC7180_SLAVE_GEM_NOC_CFG, SC7180_SLAVE_LLCC_CFG);
-DEFINE_QNODE(acm_apps0, SC7180_MASTER_APPSS_PROC, 1, 16, SC7180_SLAVE_GEM_NOC_SNOC, SC7180_SLAVE_LLCC);
-DEFINE_QNODE(acm_sys_tcu, SC7180_MASTER_SYS_TCU, 1, 8, SC7180_SLAVE_GEM_NOC_SNOC, SC7180_SLAVE_LLCC);
-DEFINE_QNODE(qhm_gemnoc_cfg, SC7180_MASTER_GEM_NOC_CFG, 1, 4, SC7180_SLAVE_MSS_PROC_MS_MPU_CFG, SC7180_SLAVE_SERVICE_GEM_NOC);
-DEFINE_QNODE(qnm_cmpnoc, SC7180_MASTER_COMPUTE_NOC, 1, 32, SC7180_SLAVE_GEM_NOC_SNOC, SC7180_SLAVE_LLCC);
-DEFINE_QNODE(qnm_mnoc_hf, SC7180_MASTER_MNOC_HF_MEM_NOC, 1, 32, SC7180_SLAVE_LLCC);
-DEFINE_QNODE(qnm_mnoc_sf, SC7180_MASTER_MNOC_SF_MEM_NOC, 1, 32, SC7180_SLAVE_GEM_NOC_SNOC, SC7180_SLAVE_LLCC);
-DEFINE_QNODE(qnm_snoc_gc, SC7180_MASTER_SNOC_GC_MEM_NOC, 1, 8, SC7180_SLAVE_LLCC);
-DEFINE_QNODE(qnm_snoc_sf, SC7180_MASTER_SNOC_SF_MEM_NOC, 1, 16, SC7180_SLAVE_LLCC);
-DEFINE_QNODE(qxm_gpu, SC7180_MASTER_GFX3D, 2, 32, SC7180_SLAVE_GEM_NOC_SNOC, SC7180_SLAVE_LLCC);
-DEFINE_QNODE(llcc_mc, SC7180_MASTER_LLCC, 2, 4, SC7180_SLAVE_EBI1);
-DEFINE_QNODE(qhm_mnoc_cfg, SC7180_MASTER_CNOC_MNOC_CFG, 1, 4, SC7180_SLAVE_SERVICE_MNOC);
-DEFINE_QNODE(qxm_camnoc_hf0, SC7180_MASTER_CAMNOC_HF0, 2, 32, SC7180_SLAVE_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(qxm_camnoc_hf1, SC7180_MASTER_CAMNOC_HF1, 2, 32, SC7180_SLAVE_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(qxm_camnoc_sf, SC7180_MASTER_CAMNOC_SF, 1, 32, SC7180_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qxm_mdp0, SC7180_MASTER_MDP0, 1, 32, SC7180_SLAVE_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(qxm_rot, SC7180_MASTER_ROTATOR, 1, 16, SC7180_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qxm_venus0, SC7180_MASTER_VIDEO_P0, 1, 32, SC7180_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qxm_venus_arm9, SC7180_MASTER_VIDEO_PROC, 1, 8, SC7180_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(amm_npu_sys, SC7180_MASTER_NPU_SYS, 2, 32, SC7180_SLAVE_NPU_COMPUTE_NOC);
-DEFINE_QNODE(qhm_npu_cfg, SC7180_MASTER_NPU_NOC_CFG, 1, 4, SC7180_SLAVE_NPU_CAL_DP0, SC7180_SLAVE_NPU_CP, SC7180_SLAVE_NPU_INT_DMA_BWMON_CFG, SC7180_SLAVE_NPU_DPM, SC7180_SLAVE_ISENSE_CFG, SC7180_SLAVE_NPU_LLM_CFG, SC7180_SLAVE_NPU_TCM, SC7180_SLAVE_SERVICE_NPU_NOC);
-DEFINE_QNODE(qup_core_master_1, SC7180_MASTER_QUP_CORE_0, 1, 4, SC7180_SLAVE_QUP_CORE_0);
-DEFINE_QNODE(qup_core_master_2, SC7180_MASTER_QUP_CORE_1, 1, 4, SC7180_SLAVE_QUP_CORE_1);
-DEFINE_QNODE(qhm_snoc_cfg, SC7180_MASTER_SNOC_CFG, 1, 4, SC7180_SLAVE_SERVICE_SNOC);
-DEFINE_QNODE(qnm_aggre1_noc, SC7180_MASTER_A1NOC_SNOC, 1, 16, SC7180_SLAVE_APPSS, SC7180_SLAVE_SNOC_CNOC, SC7180_SLAVE_SNOC_GEM_NOC_SF, SC7180_SLAVE_IMEM, SC7180_SLAVE_PIMEM, SC7180_SLAVE_QDSS_STM);
-DEFINE_QNODE(qnm_aggre2_noc, SC7180_MASTER_A2NOC_SNOC, 1, 16, SC7180_SLAVE_APPSS, SC7180_SLAVE_SNOC_CNOC, SC7180_SLAVE_SNOC_GEM_NOC_SF, SC7180_SLAVE_IMEM, SC7180_SLAVE_PIMEM, SC7180_SLAVE_QDSS_STM, SC7180_SLAVE_TCU);
-DEFINE_QNODE(qnm_gemnoc, SC7180_MASTER_GEM_NOC_SNOC, 1, 8, SC7180_SLAVE_APPSS, SC7180_SLAVE_SNOC_CNOC, SC7180_SLAVE_IMEM, SC7180_SLAVE_PIMEM, SC7180_SLAVE_QDSS_STM, SC7180_SLAVE_TCU);
-DEFINE_QNODE(qxm_pimem, SC7180_MASTER_PIMEM, 1, 8, SC7180_SLAVE_SNOC_GEM_NOC_GC, SC7180_SLAVE_IMEM);
-DEFINE_QNODE(qns_a1noc_snoc, SC7180_SLAVE_A1NOC_SNOC, 1, 16, SC7180_MASTER_A1NOC_SNOC);
-DEFINE_QNODE(srvc_aggre1_noc, SC7180_SLAVE_SERVICE_A1NOC, 1, 4);
-DEFINE_QNODE(qns_a2noc_snoc, SC7180_SLAVE_A2NOC_SNOC, 1, 16, SC7180_MASTER_A2NOC_SNOC);
-DEFINE_QNODE(srvc_aggre2_noc, SC7180_SLAVE_SERVICE_A2NOC, 1, 4);
-DEFINE_QNODE(qns_camnoc_uncomp, SC7180_SLAVE_CAMNOC_UNCOMP, 1, 32);
-DEFINE_QNODE(qns_cdsp_gemnoc, SC7180_SLAVE_CDSP_GEM_NOC, 1, 32, SC7180_MASTER_COMPUTE_NOC);
-DEFINE_QNODE(qhs_a1_noc_cfg, SC7180_SLAVE_A1NOC_CFG, 1, 4, SC7180_MASTER_A1NOC_CFG);
-DEFINE_QNODE(qhs_a2_noc_cfg, SC7180_SLAVE_A2NOC_CFG, 1, 4, SC7180_MASTER_A2NOC_CFG);
-DEFINE_QNODE(qhs_ahb2phy0, SC7180_SLAVE_AHB2PHY_SOUTH, 1, 4);
-DEFINE_QNODE(qhs_ahb2phy2, SC7180_SLAVE_AHB2PHY_CENTER, 1, 4);
-DEFINE_QNODE(qhs_aop, SC7180_SLAVE_AOP, 1, 4);
-DEFINE_QNODE(qhs_aoss, SC7180_SLAVE_AOSS, 1, 4);
-DEFINE_QNODE(qhs_boot_rom, SC7180_SLAVE_BOOT_ROM, 1, 4);
-DEFINE_QNODE(qhs_camera_cfg, SC7180_SLAVE_CAMERA_CFG, 1, 4);
-DEFINE_QNODE(qhs_camera_nrt_throttle_cfg, SC7180_SLAVE_CAMERA_NRT_THROTTLE_CFG, 1, 4);
-DEFINE_QNODE(qhs_camera_rt_throttle_cfg, SC7180_SLAVE_CAMERA_RT_THROTTLE_CFG, 1, 4);
-DEFINE_QNODE(qhs_clk_ctl, SC7180_SLAVE_CLK_CTL, 1, 4);
-DEFINE_QNODE(qhs_cpr_cx, SC7180_SLAVE_RBCPR_CX_CFG, 1, 4);
-DEFINE_QNODE(qhs_cpr_mx, SC7180_SLAVE_RBCPR_MX_CFG, 1, 4);
-DEFINE_QNODE(qhs_crypto0_cfg, SC7180_SLAVE_CRYPTO_0_CFG, 1, 4);
-DEFINE_QNODE(qhs_dcc_cfg, SC7180_SLAVE_DCC_CFG, 1, 4);
-DEFINE_QNODE(qhs_ddrss_cfg, SC7180_SLAVE_CNOC_DDRSS, 1, 4, SC7180_MASTER_CNOC_DC_NOC);
-DEFINE_QNODE(qhs_display_cfg, SC7180_SLAVE_DISPLAY_CFG, 1, 4);
-DEFINE_QNODE(qhs_display_rt_throttle_cfg, SC7180_SLAVE_DISPLAY_RT_THROTTLE_CFG, 1, 4);
-DEFINE_QNODE(qhs_display_throttle_cfg, SC7180_SLAVE_DISPLAY_THROTTLE_CFG, 1, 4);
-DEFINE_QNODE(qhs_emmc_cfg, SC7180_SLAVE_EMMC_CFG, 1, 4);
-DEFINE_QNODE(qhs_glm, SC7180_SLAVE_GLM, 1, 4);
-DEFINE_QNODE(qhs_gpuss_cfg, SC7180_SLAVE_GFX3D_CFG, 1, 8);
-DEFINE_QNODE(qhs_imem_cfg, SC7180_SLAVE_IMEM_CFG, 1, 4);
-DEFINE_QNODE(qhs_ipa, SC7180_SLAVE_IPA_CFG, 1, 4);
-DEFINE_QNODE(qhs_mnoc_cfg, SC7180_SLAVE_CNOC_MNOC_CFG, 1, 4, SC7180_MASTER_CNOC_MNOC_CFG);
-DEFINE_QNODE(qhs_mss_cfg, SC7180_SLAVE_CNOC_MSS, 1, 4);
-DEFINE_QNODE(qhs_npu_cfg, SC7180_SLAVE_NPU_CFG, 1, 4, SC7180_MASTER_NPU_NOC_CFG);
-DEFINE_QNODE(qhs_npu_dma_throttle_cfg, SC7180_SLAVE_NPU_DMA_BWMON_CFG, 1, 4);
-DEFINE_QNODE(qhs_npu_dsp_throttle_cfg, SC7180_SLAVE_NPU_PROC_BWMON_CFG, 1, 4);
-DEFINE_QNODE(qhs_pdm, SC7180_SLAVE_PDM, 1, 4);
-DEFINE_QNODE(qhs_pimem_cfg, SC7180_SLAVE_PIMEM_CFG, 1, 4);
-DEFINE_QNODE(qhs_prng, SC7180_SLAVE_PRNG, 1, 4);
-DEFINE_QNODE(qhs_qdss_cfg, SC7180_SLAVE_QDSS_CFG, 1, 4);
-DEFINE_QNODE(qhs_qm_cfg, SC7180_SLAVE_QM_CFG, 1, 4);
-DEFINE_QNODE(qhs_qm_mpu_cfg, SC7180_SLAVE_QM_MPU_CFG, 1, 4);
-DEFINE_QNODE(qhs_qspi, SC7180_SLAVE_QSPI_0, 1, 4);
-DEFINE_QNODE(qhs_qup0, SC7180_SLAVE_QUP_0, 1, 4);
-DEFINE_QNODE(qhs_qup1, SC7180_SLAVE_QUP_1, 1, 4);
-DEFINE_QNODE(qhs_sdc2, SC7180_SLAVE_SDCC_2, 1, 4);
-DEFINE_QNODE(qhs_security, SC7180_SLAVE_SECURITY, 1, 4);
-DEFINE_QNODE(qhs_snoc_cfg, SC7180_SLAVE_SNOC_CFG, 1, 4, SC7180_MASTER_SNOC_CFG);
-DEFINE_QNODE(qhs_tcsr, SC7180_SLAVE_TCSR, 1, 4);
-DEFINE_QNODE(qhs_tlmm_1, SC7180_SLAVE_TLMM_WEST, 1, 4);
-DEFINE_QNODE(qhs_tlmm_2, SC7180_SLAVE_TLMM_NORTH, 1, 4);
-DEFINE_QNODE(qhs_tlmm_3, SC7180_SLAVE_TLMM_SOUTH, 1, 4);
-DEFINE_QNODE(qhs_ufs_mem_cfg, SC7180_SLAVE_UFS_MEM_CFG, 1, 4);
-DEFINE_QNODE(qhs_usb3, SC7180_SLAVE_USB3, 1, 4);
-DEFINE_QNODE(qhs_venus_cfg, SC7180_SLAVE_VENUS_CFG, 1, 4);
-DEFINE_QNODE(qhs_venus_throttle_cfg, SC7180_SLAVE_VENUS_THROTTLE_CFG, 1, 4);
-DEFINE_QNODE(qhs_vsense_ctrl_cfg, SC7180_SLAVE_VSENSE_CTRL_CFG, 1, 4);
-DEFINE_QNODE(srvc_cnoc, SC7180_SLAVE_SERVICE_CNOC, 1, 4);
-DEFINE_QNODE(qhs_gemnoc, SC7180_SLAVE_GEM_NOC_CFG, 1, 4, SC7180_MASTER_GEM_NOC_CFG);
-DEFINE_QNODE(qhs_llcc, SC7180_SLAVE_LLCC_CFG, 1, 4);
-DEFINE_QNODE(qhs_mdsp_ms_mpu_cfg, SC7180_SLAVE_MSS_PROC_MS_MPU_CFG, 1, 4);
-DEFINE_QNODE(qns_gem_noc_snoc, SC7180_SLAVE_GEM_NOC_SNOC, 1, 8, SC7180_MASTER_GEM_NOC_SNOC);
-DEFINE_QNODE(qns_llcc, SC7180_SLAVE_LLCC, 1, 16, SC7180_MASTER_LLCC);
-DEFINE_QNODE(srvc_gemnoc, SC7180_SLAVE_SERVICE_GEM_NOC, 1, 4);
-DEFINE_QNODE(ebi, SC7180_SLAVE_EBI1, 2, 4);
-DEFINE_QNODE(qns_mem_noc_hf, SC7180_SLAVE_MNOC_HF_MEM_NOC, 1, 32, SC7180_MASTER_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(qns_mem_noc_sf, SC7180_SLAVE_MNOC_SF_MEM_NOC, 1, 32, SC7180_MASTER_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(srvc_mnoc, SC7180_SLAVE_SERVICE_MNOC, 1, 4);
-DEFINE_QNODE(qhs_cal_dp0, SC7180_SLAVE_NPU_CAL_DP0, 1, 4);
-DEFINE_QNODE(qhs_cp, SC7180_SLAVE_NPU_CP, 1, 4);
-DEFINE_QNODE(qhs_dma_bwmon, SC7180_SLAVE_NPU_INT_DMA_BWMON_CFG, 1, 4);
-DEFINE_QNODE(qhs_dpm, SC7180_SLAVE_NPU_DPM, 1, 4);
-DEFINE_QNODE(qhs_isense, SC7180_SLAVE_ISENSE_CFG, 1, 4);
-DEFINE_QNODE(qhs_llm, SC7180_SLAVE_NPU_LLM_CFG, 1, 4);
-DEFINE_QNODE(qhs_tcm, SC7180_SLAVE_NPU_TCM, 1, 4);
-DEFINE_QNODE(qns_npu_sys, SC7180_SLAVE_NPU_COMPUTE_NOC, 2, 32);
-DEFINE_QNODE(srvc_noc, SC7180_SLAVE_SERVICE_NPU_NOC, 1, 4);
-DEFINE_QNODE(qup_core_slave_1, SC7180_SLAVE_QUP_CORE_0, 1, 4);
-DEFINE_QNODE(qup_core_slave_2, SC7180_SLAVE_QUP_CORE_1, 1, 4);
-DEFINE_QNODE(qhs_apss, SC7180_SLAVE_APPSS, 1, 8);
-DEFINE_QNODE(qns_cnoc, SC7180_SLAVE_SNOC_CNOC, 1, 8, SC7180_MASTER_SNOC_CNOC);
-DEFINE_QNODE(qns_gemnoc_gc, SC7180_SLAVE_SNOC_GEM_NOC_GC, 1, 8, SC7180_MASTER_SNOC_GC_MEM_NOC);
-DEFINE_QNODE(qns_gemnoc_sf, SC7180_SLAVE_SNOC_GEM_NOC_SF, 1, 16, SC7180_MASTER_SNOC_SF_MEM_NOC);
-DEFINE_QNODE(qxs_imem, SC7180_SLAVE_IMEM, 1, 8);
-DEFINE_QNODE(qxs_pimem, SC7180_SLAVE_PIMEM, 1, 8);
-DEFINE_QNODE(srvc_snoc, SC7180_SLAVE_SERVICE_SNOC, 1, 4);
-DEFINE_QNODE(xs_qdss_stm, SC7180_SLAVE_QDSS_STM, 1, 4);
-DEFINE_QNODE(xs_sys_tcu_cfg, SC7180_SLAVE_TCU, 1, 8);
-
-DEFINE_QBCM(bcm_acv, "ACV", false, &ebi);
-DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
-DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
-DEFINE_QBCM(bcm_mm0, "MM0", false, &qns_mem_noc_hf);
-DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
-DEFINE_QBCM(bcm_cn0, "CN0", true, &qnm_snoc, &xm_qdss_dap, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_ahb2phy0, &qhs_aop, &qhs_aoss, &qhs_boot_rom, &qhs_camera_cfg, &qhs_camera_nrt_throttle_cfg, &qhs_camera_rt_throttle_cfg, &qhs_clk_ctl, &qhs_cpr_cx, &qhs_cpr_mx, &qhs_crypto0_cfg, &qhs_dcc_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_display_rt_throttle_cfg, &qhs_display_throttle_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_mss_cfg, &qhs_npu_cfg, &qhs_npu_dma_throttle_cfg, &qhs_npu_dsp_throttle_cfg, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qm_cfg, &qhs_qm_mpu_cfg, &qhs_qup0, &qhs_qup1, &qhs_security, &qhs_snoc_cfg, &qhs_tcsr, &qhs_tlmm_1, &qhs_tlmm_2, &qhs_tlmm_3, &qhs_ufs_mem_cfg, &qhs_usb3, &qhs_venus_cfg, &qhs_venus_throttle_cfg, &qhs_vsense_ctrl_cfg, &srvc_cnoc);
-DEFINE_QBCM(bcm_mm1, "MM1", false, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qhm_mnoc_cfg, &qxm_mdp0, &qxm_rot, &qxm_venus0, &qxm_venus_arm9);
-DEFINE_QBCM(bcm_sh2, "SH2", false, &acm_sys_tcu);
-DEFINE_QBCM(bcm_mm2, "MM2", false, &qns_mem_noc_sf);
-DEFINE_QBCM(bcm_qup0, "QUP0", false, &qup_core_master_1, &qup_core_master_2);
-DEFINE_QBCM(bcm_sh3, "SH3", false, &qnm_cmpnoc);
-DEFINE_QBCM(bcm_sh4, "SH4", false, &acm_apps0);
-DEFINE_QBCM(bcm_sn0, "SN0", true, &qns_gemnoc_sf);
-DEFINE_QBCM(bcm_co0, "CO0", false, &qns_cdsp_gemnoc);
-DEFINE_QBCM(bcm_sn1, "SN1", false, &qxs_imem);
-DEFINE_QBCM(bcm_cn1, "CN1", false, &qhm_qspi, &xm_sdc2, &xm_emmc, &qhs_ahb2phy2, &qhs_emmc_cfg, &qhs_pdm, &qhs_qspi, &qhs_sdc2);
-DEFINE_QBCM(bcm_sn2, "SN2", false, &qxm_pimem, &qns_gemnoc_gc);
-DEFINE_QBCM(bcm_co2, "CO2", false, &qnm_npu);
-DEFINE_QBCM(bcm_sn3, "SN3", false, &qxs_pimem);
-DEFINE_QBCM(bcm_co3, "CO3", false, &qxm_npu_dsp);
-DEFINE_QBCM(bcm_sn4, "SN4", false, &xs_qdss_stm);
-DEFINE_QBCM(bcm_sn7, "SN7", false, &qnm_aggre1_noc);
-DEFINE_QBCM(bcm_sn9, "SN9", false, &qnm_aggre2_noc);
-DEFINE_QBCM(bcm_sn12, "SN12", false, &qnm_gemnoc);
+static struct qcom_icc_node qhm_a1noc_cfg = {
+ .name = "qhm_a1noc_cfg",
+ .id = SC7180_MASTER_A1NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC7180_SLAVE_SERVICE_A1NOC },
+};
+
+static struct qcom_icc_node qhm_qspi = {
+ .name = "qhm_qspi",
+ .id = SC7180_MASTER_QSPI,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC7180_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup_0 = {
+ .name = "qhm_qup_0",
+ .id = SC7180_MASTER_QUP_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC7180_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_sdc2 = {
+ .name = "xm_sdc2",
+ .id = SC7180_MASTER_SDCC_2,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC7180_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_emmc = {
+ .name = "xm_emmc",
+ .id = SC7180_MASTER_EMMC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC7180_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_ufs_mem = {
+ .name = "xm_ufs_mem",
+ .id = SC7180_MASTER_UFS_MEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC7180_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_a2noc_cfg = {
+ .name = "qhm_a2noc_cfg",
+ .id = SC7180_MASTER_A2NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC7180_SLAVE_SERVICE_A2NOC },
+};
+
+static struct qcom_icc_node qhm_qdss_bam = {
+ .name = "qhm_qdss_bam",
+ .id = SC7180_MASTER_QDSS_BAM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC7180_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup_1 = {
+ .name = "qhm_qup_1",
+ .id = SC7180_MASTER_QUP_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC7180_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_crypto = {
+ .name = "qxm_crypto",
+ .id = SC7180_MASTER_CRYPTO,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC7180_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_ipa = {
+ .name = "qxm_ipa",
+ .id = SC7180_MASTER_IPA,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC7180_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_qdss_etr = {
+ .name = "xm_qdss_etr",
+ .id = SC7180_MASTER_QDSS_ETR,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC7180_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_usb3 = {
+ .name = "qhm_usb3",
+ .id = SC7180_MASTER_USB3,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC7180_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_camnoc_hf0_uncomp = {
+ .name = "qxm_camnoc_hf0_uncomp",
+ .id = SC7180_MASTER_CAMNOC_HF0_UNCOMP,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC7180_SLAVE_CAMNOC_UNCOMP },
+};
+
+static struct qcom_icc_node qxm_camnoc_hf1_uncomp = {
+ .name = "qxm_camnoc_hf1_uncomp",
+ .id = SC7180_MASTER_CAMNOC_HF1_UNCOMP,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC7180_SLAVE_CAMNOC_UNCOMP },
+};
+
+static struct qcom_icc_node qxm_camnoc_sf_uncomp = {
+ .name = "qxm_camnoc_sf_uncomp",
+ .id = SC7180_MASTER_CAMNOC_SF_UNCOMP,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC7180_SLAVE_CAMNOC_UNCOMP },
+};
+
+static struct qcom_icc_node qnm_npu = {
+ .name = "qnm_npu",
+ .id = SC7180_MASTER_NPU,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC7180_SLAVE_CDSP_GEM_NOC },
+};
+
+static struct qcom_icc_node qxm_npu_dsp = {
+ .name = "qxm_npu_dsp",
+ .id = SC7180_MASTER_NPU_PROC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC7180_SLAVE_CDSP_GEM_NOC },
+};
+
+static struct qcom_icc_node qnm_snoc = {
+ .name = "qnm_snoc",
+ .id = SC7180_MASTER_SNOC_CNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 51,
+ .links = { SC7180_SLAVE_A1NOC_CFG,
+ SC7180_SLAVE_A2NOC_CFG,
+ SC7180_SLAVE_AHB2PHY_SOUTH,
+ SC7180_SLAVE_AHB2PHY_CENTER,
+ SC7180_SLAVE_AOP,
+ SC7180_SLAVE_AOSS,
+ SC7180_SLAVE_BOOT_ROM,
+ SC7180_SLAVE_CAMERA_CFG,
+ SC7180_SLAVE_CAMERA_NRT_THROTTLE_CFG,
+ SC7180_SLAVE_CAMERA_RT_THROTTLE_CFG,
+ SC7180_SLAVE_CLK_CTL,
+ SC7180_SLAVE_RBCPR_CX_CFG,
+ SC7180_SLAVE_RBCPR_MX_CFG,
+ SC7180_SLAVE_CRYPTO_0_CFG,
+ SC7180_SLAVE_DCC_CFG,
+ SC7180_SLAVE_CNOC_DDRSS,
+ SC7180_SLAVE_DISPLAY_CFG,
+ SC7180_SLAVE_DISPLAY_RT_THROTTLE_CFG,
+ SC7180_SLAVE_DISPLAY_THROTTLE_CFG,
+ SC7180_SLAVE_EMMC_CFG,
+ SC7180_SLAVE_GLM,
+ SC7180_SLAVE_GFX3D_CFG,
+ SC7180_SLAVE_IMEM_CFG,
+ SC7180_SLAVE_IPA_CFG,
+ SC7180_SLAVE_CNOC_MNOC_CFG,
+ SC7180_SLAVE_CNOC_MSS,
+ SC7180_SLAVE_NPU_CFG,
+ SC7180_SLAVE_NPU_DMA_BWMON_CFG,
+ SC7180_SLAVE_NPU_PROC_BWMON_CFG,
+ SC7180_SLAVE_PDM,
+ SC7180_SLAVE_PIMEM_CFG,
+ SC7180_SLAVE_PRNG,
+ SC7180_SLAVE_QDSS_CFG,
+ SC7180_SLAVE_QM_CFG,
+ SC7180_SLAVE_QM_MPU_CFG,
+ SC7180_SLAVE_QSPI_0,
+ SC7180_SLAVE_QUP_0,
+ SC7180_SLAVE_QUP_1,
+ SC7180_SLAVE_SDCC_2,
+ SC7180_SLAVE_SECURITY,
+ SC7180_SLAVE_SNOC_CFG,
+ SC7180_SLAVE_TCSR,
+ SC7180_SLAVE_TLMM_WEST,
+ SC7180_SLAVE_TLMM_NORTH,
+ SC7180_SLAVE_TLMM_SOUTH,
+ SC7180_SLAVE_UFS_MEM_CFG,
+ SC7180_SLAVE_USB3,
+ SC7180_SLAVE_VENUS_CFG,
+ SC7180_SLAVE_VENUS_THROTTLE_CFG,
+ SC7180_SLAVE_VSENSE_CTRL_CFG,
+ SC7180_SLAVE_SERVICE_CNOC
+ },
+};
+
+static struct qcom_icc_node xm_qdss_dap = {
+ .name = "xm_qdss_dap",
+ .id = SC7180_MASTER_QDSS_DAP,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 51,
+ .links = { SC7180_SLAVE_A1NOC_CFG,
+ SC7180_SLAVE_A2NOC_CFG,
+ SC7180_SLAVE_AHB2PHY_SOUTH,
+ SC7180_SLAVE_AHB2PHY_CENTER,
+ SC7180_SLAVE_AOP,
+ SC7180_SLAVE_AOSS,
+ SC7180_SLAVE_BOOT_ROM,
+ SC7180_SLAVE_CAMERA_CFG,
+ SC7180_SLAVE_CAMERA_NRT_THROTTLE_CFG,
+ SC7180_SLAVE_CAMERA_RT_THROTTLE_CFG,
+ SC7180_SLAVE_CLK_CTL,
+ SC7180_SLAVE_RBCPR_CX_CFG,
+ SC7180_SLAVE_RBCPR_MX_CFG,
+ SC7180_SLAVE_CRYPTO_0_CFG,
+ SC7180_SLAVE_DCC_CFG,
+ SC7180_SLAVE_CNOC_DDRSS,
+ SC7180_SLAVE_DISPLAY_CFG,
+ SC7180_SLAVE_DISPLAY_RT_THROTTLE_CFG,
+ SC7180_SLAVE_DISPLAY_THROTTLE_CFG,
+ SC7180_SLAVE_EMMC_CFG,
+ SC7180_SLAVE_GLM,
+ SC7180_SLAVE_GFX3D_CFG,
+ SC7180_SLAVE_IMEM_CFG,
+ SC7180_SLAVE_IPA_CFG,
+ SC7180_SLAVE_CNOC_MNOC_CFG,
+ SC7180_SLAVE_CNOC_MSS,
+ SC7180_SLAVE_NPU_CFG,
+ SC7180_SLAVE_NPU_DMA_BWMON_CFG,
+ SC7180_SLAVE_NPU_PROC_BWMON_CFG,
+ SC7180_SLAVE_PDM,
+ SC7180_SLAVE_PIMEM_CFG,
+ SC7180_SLAVE_PRNG,
+ SC7180_SLAVE_QDSS_CFG,
+ SC7180_SLAVE_QM_CFG,
+ SC7180_SLAVE_QM_MPU_CFG,
+ SC7180_SLAVE_QSPI_0,
+ SC7180_SLAVE_QUP_0,
+ SC7180_SLAVE_QUP_1,
+ SC7180_SLAVE_SDCC_2,
+ SC7180_SLAVE_SECURITY,
+ SC7180_SLAVE_SNOC_CFG,
+ SC7180_SLAVE_TCSR,
+ SC7180_SLAVE_TLMM_WEST,
+ SC7180_SLAVE_TLMM_NORTH,
+ SC7180_SLAVE_TLMM_SOUTH,
+ SC7180_SLAVE_UFS_MEM_CFG,
+ SC7180_SLAVE_USB3,
+ SC7180_SLAVE_VENUS_CFG,
+ SC7180_SLAVE_VENUS_THROTTLE_CFG,
+ SC7180_SLAVE_VSENSE_CTRL_CFG,
+ SC7180_SLAVE_SERVICE_CNOC
+ },
+};
+
+static struct qcom_icc_node qhm_cnoc_dc_noc = {
+ .name = "qhm_cnoc_dc_noc",
+ .id = SC7180_MASTER_CNOC_DC_NOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 2,
+ .links = { SC7180_SLAVE_GEM_NOC_CFG,
+ SC7180_SLAVE_LLCC_CFG
+ },
+};
+
+static struct qcom_icc_node acm_apps0 = {
+ .name = "acm_apps0",
+ .id = SC7180_MASTER_APPSS_PROC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 2,
+ .links = { SC7180_SLAVE_GEM_NOC_SNOC,
+ SC7180_SLAVE_LLCC
+ },
+};
+
+static struct qcom_icc_node acm_sys_tcu = {
+ .name = "acm_sys_tcu",
+ .id = SC7180_MASTER_SYS_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SC7180_SLAVE_GEM_NOC_SNOC,
+ SC7180_SLAVE_LLCC
+ },
+};
+
+static struct qcom_icc_node qhm_gemnoc_cfg = {
+ .name = "qhm_gemnoc_cfg",
+ .id = SC7180_MASTER_GEM_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 2,
+ .links = { SC7180_SLAVE_MSS_PROC_MS_MPU_CFG,
+ SC7180_SLAVE_SERVICE_GEM_NOC
+ },
+};
+
+static struct qcom_icc_node qnm_cmpnoc = {
+ .name = "qnm_cmpnoc",
+ .id = SC7180_MASTER_COMPUTE_NOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SC7180_SLAVE_GEM_NOC_SNOC,
+ SC7180_SLAVE_LLCC
+ },
+};
+
+static struct qcom_icc_node qnm_mnoc_hf = {
+ .name = "qnm_mnoc_hf",
+ .id = SC7180_MASTER_MNOC_HF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC7180_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_mnoc_sf = {
+ .name = "qnm_mnoc_sf",
+ .id = SC7180_MASTER_MNOC_SF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SC7180_SLAVE_GEM_NOC_SNOC,
+ SC7180_SLAVE_LLCC
+ },
+};
+
+static struct qcom_icc_node qnm_snoc_gc = {
+ .name = "qnm_snoc_gc",
+ .id = SC7180_MASTER_SNOC_GC_MEM_NOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC7180_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_snoc_sf = {
+ .name = "qnm_snoc_sf",
+ .id = SC7180_MASTER_SNOC_SF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC7180_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qxm_gpu = {
+ .name = "qxm_gpu",
+ .id = SC7180_MASTER_GFX3D,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SC7180_SLAVE_GEM_NOC_SNOC,
+ SC7180_SLAVE_LLCC
+ },
+};
+
+static struct qcom_icc_node llcc_mc = {
+ .name = "llcc_mc",
+ .id = SC7180_MASTER_LLCC,
+ .channels = 2,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC7180_SLAVE_EBI1 },
+};
+
+static struct qcom_icc_node qhm_mnoc_cfg = {
+ .name = "qhm_mnoc_cfg",
+ .id = SC7180_MASTER_CNOC_MNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC7180_SLAVE_SERVICE_MNOC },
+};
+
+static struct qcom_icc_node qxm_camnoc_hf0 = {
+ .name = "qxm_camnoc_hf0",
+ .id = SC7180_MASTER_CAMNOC_HF0,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC7180_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_camnoc_hf1 = {
+ .name = "qxm_camnoc_hf1",
+ .id = SC7180_MASTER_CAMNOC_HF1,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC7180_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_camnoc_sf = {
+ .name = "qxm_camnoc_sf",
+ .id = SC7180_MASTER_CAMNOC_SF,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC7180_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_mdp0 = {
+ .name = "qxm_mdp0",
+ .id = SC7180_MASTER_MDP0,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC7180_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_rot = {
+ .name = "qxm_rot",
+ .id = SC7180_MASTER_ROTATOR,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC7180_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_venus0 = {
+ .name = "qxm_venus0",
+ .id = SC7180_MASTER_VIDEO_P0,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC7180_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_venus_arm9 = {
+ .name = "qxm_venus_arm9",
+ .id = SC7180_MASTER_VIDEO_PROC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC7180_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node amm_npu_sys = {
+ .name = "amm_npu_sys",
+ .id = SC7180_MASTER_NPU_SYS,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC7180_SLAVE_NPU_COMPUTE_NOC },
+};
+
+static struct qcom_icc_node qhm_npu_cfg = {
+ .name = "qhm_npu_cfg",
+ .id = SC7180_MASTER_NPU_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 8,
+ .links = { SC7180_SLAVE_NPU_CAL_DP0,
+ SC7180_SLAVE_NPU_CP,
+ SC7180_SLAVE_NPU_INT_DMA_BWMON_CFG,
+ SC7180_SLAVE_NPU_DPM,
+ SC7180_SLAVE_ISENSE_CFG,
+ SC7180_SLAVE_NPU_LLM_CFG,
+ SC7180_SLAVE_NPU_TCM,
+ SC7180_SLAVE_SERVICE_NPU_NOC
+ },
+};
+
+static struct qcom_icc_node qup_core_master_1 = {
+ .name = "qup_core_master_1",
+ .id = SC7180_MASTER_QUP_CORE_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC7180_SLAVE_QUP_CORE_0 },
+};
+
+static struct qcom_icc_node qup_core_master_2 = {
+ .name = "qup_core_master_2",
+ .id = SC7180_MASTER_QUP_CORE_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC7180_SLAVE_QUP_CORE_1 },
+};
+
+static struct qcom_icc_node qhm_snoc_cfg = {
+ .name = "qhm_snoc_cfg",
+ .id = SC7180_MASTER_SNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC7180_SLAVE_SERVICE_SNOC },
+};
+
+static struct qcom_icc_node qnm_aggre1_noc = {
+ .name = "qnm_aggre1_noc",
+ .id = SC7180_MASTER_A1NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 6,
+ .links = { SC7180_SLAVE_APPSS,
+ SC7180_SLAVE_SNOC_CNOC,
+ SC7180_SLAVE_SNOC_GEM_NOC_SF,
+ SC7180_SLAVE_IMEM,
+ SC7180_SLAVE_PIMEM,
+ SC7180_SLAVE_QDSS_STM
+ },
+};
+
+static struct qcom_icc_node qnm_aggre2_noc = {
+ .name = "qnm_aggre2_noc",
+ .id = SC7180_MASTER_A2NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 7,
+ .links = { SC7180_SLAVE_APPSS,
+ SC7180_SLAVE_SNOC_CNOC,
+ SC7180_SLAVE_SNOC_GEM_NOC_SF,
+ SC7180_SLAVE_IMEM,
+ SC7180_SLAVE_PIMEM,
+ SC7180_SLAVE_QDSS_STM,
+ SC7180_SLAVE_TCU
+ },
+};
+
+static struct qcom_icc_node qnm_gemnoc = {
+ .name = "qnm_gemnoc",
+ .id = SC7180_MASTER_GEM_NOC_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 6,
+ .links = { SC7180_SLAVE_APPSS,
+ SC7180_SLAVE_SNOC_CNOC,
+ SC7180_SLAVE_IMEM,
+ SC7180_SLAVE_PIMEM,
+ SC7180_SLAVE_QDSS_STM,
+ SC7180_SLAVE_TCU
+ },
+};
+
+static struct qcom_icc_node qxm_pimem = {
+ .name = "qxm_pimem",
+ .id = SC7180_MASTER_PIMEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SC7180_SLAVE_SNOC_GEM_NOC_GC,
+ SC7180_SLAVE_IMEM
+ },
+};
+
+static struct qcom_icc_node qns_a1noc_snoc = {
+ .name = "qns_a1noc_snoc",
+ .id = SC7180_SLAVE_A1NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC7180_MASTER_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node srvc_aggre1_noc = {
+ .name = "srvc_aggre1_noc",
+ .id = SC7180_SLAVE_SERVICE_A1NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_a2noc_snoc = {
+ .name = "qns_a2noc_snoc",
+ .id = SC7180_SLAVE_A2NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC7180_MASTER_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node srvc_aggre2_noc = {
+ .name = "srvc_aggre2_noc",
+ .id = SC7180_SLAVE_SERVICE_A2NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_camnoc_uncomp = {
+ .name = "qns_camnoc_uncomp",
+ .id = SC7180_SLAVE_CAMNOC_UNCOMP,
+ .channels = 1,
+ .buswidth = 32,
+};
+
+static struct qcom_icc_node qns_cdsp_gemnoc = {
+ .name = "qns_cdsp_gemnoc",
+ .id = SC7180_SLAVE_CDSP_GEM_NOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC7180_MASTER_COMPUTE_NOC },
+};
+
+static struct qcom_icc_node qhs_a1_noc_cfg = {
+ .name = "qhs_a1_noc_cfg",
+ .id = SC7180_SLAVE_A1NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC7180_MASTER_A1NOC_CFG },
+};
+
+static struct qcom_icc_node qhs_a2_noc_cfg = {
+ .name = "qhs_a2_noc_cfg",
+ .id = SC7180_SLAVE_A2NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC7180_MASTER_A2NOC_CFG },
+};
+
+static struct qcom_icc_node qhs_ahb2phy0 = {
+ .name = "qhs_ahb2phy0",
+ .id = SC7180_SLAVE_AHB2PHY_SOUTH,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ahb2phy2 = {
+ .name = "qhs_ahb2phy2",
+ .id = SC7180_SLAVE_AHB2PHY_CENTER,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_aop = {
+ .name = "qhs_aop",
+ .id = SC7180_SLAVE_AOP,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_aoss = {
+ .name = "qhs_aoss",
+ .id = SC7180_SLAVE_AOSS,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_boot_rom = {
+ .name = "qhs_boot_rom",
+ .id = SC7180_SLAVE_BOOT_ROM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_camera_cfg = {
+ .name = "qhs_camera_cfg",
+ .id = SC7180_SLAVE_CAMERA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_camera_nrt_throttle_cfg = {
+ .name = "qhs_camera_nrt_throttle_cfg",
+ .id = SC7180_SLAVE_CAMERA_NRT_THROTTLE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_camera_rt_throttle_cfg = {
+ .name = "qhs_camera_rt_throttle_cfg",
+ .id = SC7180_SLAVE_CAMERA_RT_THROTTLE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_clk_ctl = {
+ .name = "qhs_clk_ctl",
+ .id = SC7180_SLAVE_CLK_CTL,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_cpr_cx = {
+ .name = "qhs_cpr_cx",
+ .id = SC7180_SLAVE_RBCPR_CX_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_cpr_mx = {
+ .name = "qhs_cpr_mx",
+ .id = SC7180_SLAVE_RBCPR_MX_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_crypto0_cfg = {
+ .name = "qhs_crypto0_cfg",
+ .id = SC7180_SLAVE_CRYPTO_0_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_dcc_cfg = {
+ .name = "qhs_dcc_cfg",
+ .id = SC7180_SLAVE_DCC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ddrss_cfg = {
+ .name = "qhs_ddrss_cfg",
+ .id = SC7180_SLAVE_CNOC_DDRSS,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC7180_MASTER_CNOC_DC_NOC },
+};
+
+static struct qcom_icc_node qhs_display_cfg = {
+ .name = "qhs_display_cfg",
+ .id = SC7180_SLAVE_DISPLAY_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_display_rt_throttle_cfg = {
+ .name = "qhs_display_rt_throttle_cfg",
+ .id = SC7180_SLAVE_DISPLAY_RT_THROTTLE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_display_throttle_cfg = {
+ .name = "qhs_display_throttle_cfg",
+ .id = SC7180_SLAVE_DISPLAY_THROTTLE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_emmc_cfg = {
+ .name = "qhs_emmc_cfg",
+ .id = SC7180_SLAVE_EMMC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_glm = {
+ .name = "qhs_glm",
+ .id = SC7180_SLAVE_GLM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_gpuss_cfg = {
+ .name = "qhs_gpuss_cfg",
+ .id = SC7180_SLAVE_GFX3D_CFG,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qhs_imem_cfg = {
+ .name = "qhs_imem_cfg",
+ .id = SC7180_SLAVE_IMEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ipa = {
+ .name = "qhs_ipa",
+ .id = SC7180_SLAVE_IPA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_mnoc_cfg = {
+ .name = "qhs_mnoc_cfg",
+ .id = SC7180_SLAVE_CNOC_MNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC7180_MASTER_CNOC_MNOC_CFG },
+};
+
+static struct qcom_icc_node qhs_mss_cfg = {
+ .name = "qhs_mss_cfg",
+ .id = SC7180_SLAVE_CNOC_MSS,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_npu_cfg = {
+ .name = "qhs_npu_cfg",
+ .id = SC7180_SLAVE_NPU_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC7180_MASTER_NPU_NOC_CFG },
+};
+
+static struct qcom_icc_node qhs_npu_dma_throttle_cfg = {
+ .name = "qhs_npu_dma_throttle_cfg",
+ .id = SC7180_SLAVE_NPU_DMA_BWMON_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_npu_dsp_throttle_cfg = {
+ .name = "qhs_npu_dsp_throttle_cfg",
+ .id = SC7180_SLAVE_NPU_PROC_BWMON_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pdm = {
+ .name = "qhs_pdm",
+ .id = SC7180_SLAVE_PDM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pimem_cfg = {
+ .name = "qhs_pimem_cfg",
+ .id = SC7180_SLAVE_PIMEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_prng = {
+ .name = "qhs_prng",
+ .id = SC7180_SLAVE_PRNG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qdss_cfg = {
+ .name = "qhs_qdss_cfg",
+ .id = SC7180_SLAVE_QDSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qm_cfg = {
+ .name = "qhs_qm_cfg",
+ .id = SC7180_SLAVE_QM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qm_mpu_cfg = {
+ .name = "qhs_qm_mpu_cfg",
+ .id = SC7180_SLAVE_QM_MPU_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qspi = {
+ .name = "qhs_qspi",
+ .id = SC7180_SLAVE_QSPI_0,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qup0 = {
+ .name = "qhs_qup0",
+ .id = SC7180_SLAVE_QUP_0,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qup1 = {
+ .name = "qhs_qup1",
+ .id = SC7180_SLAVE_QUP_1,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_sdc2 = {
+ .name = "qhs_sdc2",
+ .id = SC7180_SLAVE_SDCC_2,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_security = {
+ .name = "qhs_security",
+ .id = SC7180_SLAVE_SECURITY,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_snoc_cfg = {
+ .name = "qhs_snoc_cfg",
+ .id = SC7180_SLAVE_SNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC7180_MASTER_SNOC_CFG },
+};
+
+static struct qcom_icc_node qhs_tcsr = {
+ .name = "qhs_tcsr",
+ .id = SC7180_SLAVE_TCSR,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tlmm_1 = {
+ .name = "qhs_tlmm_1",
+ .id = SC7180_SLAVE_TLMM_WEST,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tlmm_2 = {
+ .name = "qhs_tlmm_2",
+ .id = SC7180_SLAVE_TLMM_NORTH,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tlmm_3 = {
+ .name = "qhs_tlmm_3",
+ .id = SC7180_SLAVE_TLMM_SOUTH,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ufs_mem_cfg = {
+ .name = "qhs_ufs_mem_cfg",
+ .id = SC7180_SLAVE_UFS_MEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb3 = {
+ .name = "qhs_usb3",
+ .id = SC7180_SLAVE_USB3,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_venus_cfg = {
+ .name = "qhs_venus_cfg",
+ .id = SC7180_SLAVE_VENUS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_venus_throttle_cfg = {
+ .name = "qhs_venus_throttle_cfg",
+ .id = SC7180_SLAVE_VENUS_THROTTLE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
+ .name = "qhs_vsense_ctrl_cfg",
+ .id = SC7180_SLAVE_VSENSE_CTRL_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node srvc_cnoc = {
+ .name = "srvc_cnoc",
+ .id = SC7180_SLAVE_SERVICE_CNOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_gemnoc = {
+ .name = "qhs_gemnoc",
+ .id = SC7180_SLAVE_GEM_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SC7180_MASTER_GEM_NOC_CFG },
+};
+
+static struct qcom_icc_node qhs_llcc = {
+ .name = "qhs_llcc",
+ .id = SC7180_SLAVE_LLCC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_mdsp_ms_mpu_cfg = {
+ .name = "qhs_mdsp_ms_mpu_cfg",
+ .id = SC7180_SLAVE_MSS_PROC_MS_MPU_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_gem_noc_snoc = {
+ .name = "qns_gem_noc_snoc",
+ .id = SC7180_SLAVE_GEM_NOC_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC7180_MASTER_GEM_NOC_SNOC },
+};
+
+static struct qcom_icc_node qns_llcc = {
+ .name = "qns_llcc",
+ .id = SC7180_SLAVE_LLCC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC7180_MASTER_LLCC },
+};
+
+static struct qcom_icc_node srvc_gemnoc = {
+ .name = "srvc_gemnoc",
+ .id = SC7180_SLAVE_SERVICE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node ebi = {
+ .name = "ebi",
+ .id = SC7180_SLAVE_EBI1,
+ .channels = 2,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_mem_noc_hf = {
+ .name = "qns_mem_noc_hf",
+ .id = SC7180_SLAVE_MNOC_HF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC7180_MASTER_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_mem_noc_sf = {
+ .name = "qns_mem_noc_sf",
+ .id = SC7180_SLAVE_MNOC_SF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SC7180_MASTER_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node srvc_mnoc = {
+ .name = "srvc_mnoc",
+ .id = SC7180_SLAVE_SERVICE_MNOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_cal_dp0 = {
+ .name = "qhs_cal_dp0",
+ .id = SC7180_SLAVE_NPU_CAL_DP0,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_cp = {
+ .name = "qhs_cp",
+ .id = SC7180_SLAVE_NPU_CP,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_dma_bwmon = {
+ .name = "qhs_dma_bwmon",
+ .id = SC7180_SLAVE_NPU_INT_DMA_BWMON_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_dpm = {
+ .name = "qhs_dpm",
+ .id = SC7180_SLAVE_NPU_DPM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_isense = {
+ .name = "qhs_isense",
+ .id = SC7180_SLAVE_ISENSE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_llm = {
+ .name = "qhs_llm",
+ .id = SC7180_SLAVE_NPU_LLM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tcm = {
+ .name = "qhs_tcm",
+ .id = SC7180_SLAVE_NPU_TCM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_npu_sys = {
+ .name = "qns_npu_sys",
+ .id = SC7180_SLAVE_NPU_COMPUTE_NOC,
+ .channels = 2,
+ .buswidth = 32,
+};
+
+static struct qcom_icc_node srvc_noc = {
+ .name = "srvc_noc",
+ .id = SC7180_SLAVE_SERVICE_NPU_NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qup_core_slave_1 = {
+ .name = "qup_core_slave_1",
+ .id = SC7180_SLAVE_QUP_CORE_0,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qup_core_slave_2 = {
+ .name = "qup_core_slave_2",
+ .id = SC7180_SLAVE_QUP_CORE_1,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_apss = {
+ .name = "qhs_apss",
+ .id = SC7180_SLAVE_APPSS,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qns_cnoc = {
+ .name = "qns_cnoc",
+ .id = SC7180_SLAVE_SNOC_CNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC7180_MASTER_SNOC_CNOC },
+};
+
+static struct qcom_icc_node qns_gemnoc_gc = {
+ .name = "qns_gemnoc_gc",
+ .id = SC7180_SLAVE_SNOC_GEM_NOC_GC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SC7180_MASTER_SNOC_GC_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_gemnoc_sf = {
+ .name = "qns_gemnoc_sf",
+ .id = SC7180_SLAVE_SNOC_GEM_NOC_SF,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SC7180_MASTER_SNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxs_imem = {
+ .name = "qxs_imem",
+ .id = SC7180_SLAVE_IMEM,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qxs_pimem = {
+ .name = "qxs_pimem",
+ .id = SC7180_SLAVE_PIMEM,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node srvc_snoc = {
+ .name = "srvc_snoc",
+ .id = SC7180_SLAVE_SERVICE_SNOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node xs_qdss_stm = {
+ .name = "xs_qdss_stm",
+ .id = SC7180_SLAVE_QDSS_STM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node xs_sys_tcu_cfg = {
+ .name = "xs_sys_tcu_cfg",
+ .id = SC7180_SLAVE_TCU,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_mc0 = {
+ .name = "MC0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_sh0 = {
+ .name = "SH0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_llcc },
+};
+
+static struct qcom_icc_bcm bcm_mm0 = {
+ .name = "MM0",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qns_mem_noc_hf },
+};
+
+static struct qcom_icc_bcm bcm_ce0 = {
+ .name = "CE0",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qxm_crypto },
+};
+
+static struct qcom_icc_bcm bcm_cn0 = {
+ .name = "CN0",
+ .keepalive = true,
+ .num_nodes = 48,
+ .nodes = { &qnm_snoc,
+ &xm_qdss_dap,
+ &qhs_a1_noc_cfg,
+ &qhs_a2_noc_cfg,
+ &qhs_ahb2phy0,
+ &qhs_aop,
+ &qhs_aoss,
+ &qhs_boot_rom,
+ &qhs_camera_cfg,
+ &qhs_camera_nrt_throttle_cfg,
+ &qhs_camera_rt_throttle_cfg,
+ &qhs_clk_ctl,
+ &qhs_cpr_cx,
+ &qhs_cpr_mx,
+ &qhs_crypto0_cfg,
+ &qhs_dcc_cfg,
+ &qhs_ddrss_cfg,
+ &qhs_display_cfg,
+ &qhs_display_rt_throttle_cfg,
+ &qhs_display_throttle_cfg,
+ &qhs_glm,
+ &qhs_gpuss_cfg,
+ &qhs_imem_cfg,
+ &qhs_ipa,
+ &qhs_mnoc_cfg,
+ &qhs_mss_cfg,
+ &qhs_npu_cfg,
+ &qhs_npu_dma_throttle_cfg,
+ &qhs_npu_dsp_throttle_cfg,
+ &qhs_pimem_cfg,
+ &qhs_prng,
+ &qhs_qdss_cfg,
+ &qhs_qm_cfg,
+ &qhs_qm_mpu_cfg,
+ &qhs_qup0,
+ &qhs_qup1,
+ &qhs_security,
+ &qhs_snoc_cfg,
+ &qhs_tcsr,
+ &qhs_tlmm_1,
+ &qhs_tlmm_2,
+ &qhs_tlmm_3,
+ &qhs_ufs_mem_cfg,
+ &qhs_usb3,
+ &qhs_venus_cfg,
+ &qhs_venus_throttle_cfg,
+ &qhs_vsense_ctrl_cfg,
+ &srvc_cnoc
+ },
+};
+
+static struct qcom_icc_bcm bcm_mm1 = {
+ .name = "MM1",
+ .keepalive = false,
+ .num_nodes = 8,
+ .nodes = { &qxm_camnoc_hf0_uncomp,
+ &qxm_camnoc_hf1_uncomp,
+ &qxm_camnoc_sf_uncomp,
+ &qhm_mnoc_cfg,
+ &qxm_mdp0,
+ &qxm_rot,
+ &qxm_venus0,
+ &qxm_venus_arm9
+ },
+};
+
+static struct qcom_icc_bcm bcm_sh2 = {
+ .name = "SH2",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &acm_sys_tcu },
+};
+
+static struct qcom_icc_bcm bcm_mm2 = {
+ .name = "MM2",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_bcm bcm_qup0 = {
+ .name = "QUP0",
+ .keepalive = false,
+ .num_nodes = 2,
+ .nodes = { &qup_core_master_1, &qup_core_master_2 },
+};
+
+static struct qcom_icc_bcm bcm_sh3 = {
+ .name = "SH3",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qnm_cmpnoc },
+};
+
+static struct qcom_icc_bcm bcm_sh4 = {
+ .name = "SH4",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &acm_apps0 },
+};
+
+static struct qcom_icc_bcm bcm_sn0 = {
+ .name = "SN0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_bcm bcm_co0 = {
+ .name = "CO0",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qns_cdsp_gemnoc },
+};
+
+static struct qcom_icc_bcm bcm_sn1 = {
+ .name = "SN1",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qxs_imem },
+};
+
+static struct qcom_icc_bcm bcm_cn1 = {
+ .name = "CN1",
+ .keepalive = false,
+ .num_nodes = 8,
+ .nodes = { &qhm_qspi,
+ &xm_sdc2,
+ &xm_emmc,
+ &qhs_ahb2phy2,
+ &qhs_emmc_cfg,
+ &qhs_pdm,
+ &qhs_qspi,
+ &qhs_sdc2
+ },
+};
+
+static struct qcom_icc_bcm bcm_sn2 = {
+ .name = "SN2",
+ .keepalive = false,
+ .num_nodes = 2,
+ .nodes = { &qxm_pimem, &qns_gemnoc_gc },
+};
+
+static struct qcom_icc_bcm bcm_co2 = {
+ .name = "CO2",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qnm_npu },
+};
+
+static struct qcom_icc_bcm bcm_sn3 = {
+ .name = "SN3",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qxs_pimem },
+};
+
+static struct qcom_icc_bcm bcm_co3 = {
+ .name = "CO3",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qxm_npu_dsp },
+};
+
+static struct qcom_icc_bcm bcm_sn4 = {
+ .name = "SN4",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &xs_qdss_stm },
+};
+
+static struct qcom_icc_bcm bcm_sn7 = {
+ .name = "SN7",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qnm_aggre1_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn9 = {
+ .name = "SN9",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qnm_aggre2_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn12 = {
+ .name = "SN12",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qnm_gemnoc },
+};
static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
&bcm_cn1,
diff --git a/drivers/interconnect/qcom/sc7280.c b/drivers/interconnect/qcom/sc7280.c
index 971f538bc98a..6592839b4d94 100644
--- a/drivers/interconnect/qcom/sc7280.c
+++ b/drivers/interconnect/qcom/sc7280.c
@@ -7,8 +7,9 @@
#include <linux/device.h>
#include <linux/interconnect.h>
#include <linux/interconnect-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <dt-bindings/interconnect/qcom,sc7280.h>
#include "bcm-voter.h"
diff --git a/drivers/interconnect/qcom/sc8180x.c b/drivers/interconnect/qcom/sc8180x.c
index c76e3a6a98cd..0fb4898dabcf 100644
--- a/drivers/interconnect/qcom/sc8180x.c
+++ b/drivers/interconnect/qcom/sc8180x.c
@@ -7,7 +7,8 @@
#include <linux/device.h>
#include <linux/interconnect-provider.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
#include <dt-bindings/interconnect/qcom,sc8180x.h>
diff --git a/drivers/interconnect/qcom/sc8280xp.c b/drivers/interconnect/qcom/sc8280xp.c
index e56df893ec3e..b82c5493cbb5 100644
--- a/drivers/interconnect/qcom/sc8280xp.c
+++ b/drivers/interconnect/qcom/sc8280xp.c
@@ -7,8 +7,9 @@
#include <linux/device.h>
#include <linux/interconnect.h>
#include <linux/interconnect-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <dt-bindings/interconnect/qcom,sc8280xp.h>
#include "bcm-voter.h"
diff --git a/drivers/interconnect/qcom/sdm660.c b/drivers/interconnect/qcom/sdm660.c
index e1aed937c86b..36962f7bd7bb 100644
--- a/drivers/interconnect/qcom/sdm660.c
+++ b/drivers/interconnect/qcom/sdm660.c
@@ -8,9 +8,8 @@
#include <linux/device.h>
#include <linux/interconnect-provider.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
diff --git a/drivers/interconnect/qcom/sdm670.c b/drivers/interconnect/qcom/sdm670.c
index bda955035518..540a2108b77c 100644
--- a/drivers/interconnect/qcom/sdm670.c
+++ b/drivers/interconnect/qcom/sdm670.c
@@ -6,155 +6,1260 @@
#include <linux/device.h>
#include <linux/interconnect.h>
#include <linux/interconnect-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <dt-bindings/interconnect/qcom,sdm670-rpmh.h>
#include "bcm-voter.h"
#include "icc-rpmh.h"
#include "sdm670.h"
-DEFINE_QNODE(qhm_a1noc_cfg, SDM670_MASTER_A1NOC_CFG, 1, 4, SDM670_SLAVE_SERVICE_A1NOC);
-DEFINE_QNODE(qhm_qup1, SDM670_MASTER_BLSP_1, 1, 4, SDM670_SLAVE_A1NOC_SNOC);
-DEFINE_QNODE(qhm_tsif, SDM670_MASTER_TSIF, 1, 4, SDM670_SLAVE_A1NOC_SNOC);
-DEFINE_QNODE(xm_emmc, SDM670_MASTER_EMMC, 1, 8, SDM670_SLAVE_A1NOC_SNOC);
-DEFINE_QNODE(xm_sdc2, SDM670_MASTER_SDCC_2, 1, 8, SDM670_SLAVE_A1NOC_SNOC);
-DEFINE_QNODE(xm_sdc4, SDM670_MASTER_SDCC_4, 1, 8, SDM670_SLAVE_A1NOC_SNOC);
-DEFINE_QNODE(xm_ufs_mem, SDM670_MASTER_UFS_MEM, 1, 8, SDM670_SLAVE_A1NOC_SNOC);
-DEFINE_QNODE(qhm_a2noc_cfg, SDM670_MASTER_A2NOC_CFG, 1, 4, SDM670_SLAVE_SERVICE_A2NOC);
-DEFINE_QNODE(qhm_qdss_bam, SDM670_MASTER_QDSS_BAM, 1, 4, SDM670_SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(qhm_qup2, SDM670_MASTER_BLSP_2, 1, 4, SDM670_SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(qnm_cnoc, SDM670_MASTER_CNOC_A2NOC, 1, 8, SDM670_SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(qxm_crypto, SDM670_MASTER_CRYPTO_CORE_0, 1, 8, SDM670_SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(qxm_ipa, SDM670_MASTER_IPA, 1, 8, SDM670_SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(xm_qdss_etr, SDM670_MASTER_QDSS_ETR, 1, 8, SDM670_SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(xm_usb3_0, SDM670_MASTER_USB3, 1, 8, SDM670_SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(qxm_camnoc_hf0_uncomp, SDM670_MASTER_CAMNOC_HF0_UNCOMP, 1, 32, SDM670_SLAVE_CAMNOC_UNCOMP);
-DEFINE_QNODE(qxm_camnoc_hf1_uncomp, SDM670_MASTER_CAMNOC_HF1_UNCOMP, 1, 32, SDM670_SLAVE_CAMNOC_UNCOMP);
-DEFINE_QNODE(qxm_camnoc_sf_uncomp, SDM670_MASTER_CAMNOC_SF_UNCOMP, 1, 32, SDM670_SLAVE_CAMNOC_UNCOMP);
-DEFINE_QNODE(qhm_spdm, SDM670_MASTER_SPDM, 1, 4, SDM670_SLAVE_CNOC_A2NOC);
-DEFINE_QNODE(qnm_snoc, SDM670_MASTER_SNOC_CNOC, 1, 8, SDM670_SLAVE_TLMM_SOUTH, SDM670_SLAVE_CAMERA_CFG, SDM670_SLAVE_SDCC_4, SDM670_SLAVE_SDCC_2, SDM670_SLAVE_CNOC_MNOC_CFG, SDM670_SLAVE_UFS_MEM_CFG, SDM670_SLAVE_GLM, SDM670_SLAVE_PDM, SDM670_SLAVE_A2NOC_CFG, SDM670_SLAVE_QDSS_CFG, SDM670_SLAVE_DISPLAY_CFG, SDM670_SLAVE_TCSR, SDM670_SLAVE_DCC_CFG, SDM670_SLAVE_CNOC_DDRSS, SDM670_SLAVE_SNOC_CFG, SDM670_SLAVE_SOUTH_PHY_CFG, SDM670_SLAVE_GRAPHICS_3D_CFG, SDM670_SLAVE_VENUS_CFG, SDM670_SLAVE_TSIF, SDM670_SLAVE_CDSP_CFG, SDM670_SLAVE_AOP, SDM670_SLAVE_BLSP_2, SDM670_SLAVE_SERVICE_CNOC, SDM670_SLAVE_USB3, SDM670_SLAVE_IPA_CFG, SDM670_SLAVE_RBCPR_CX_CFG, SDM670_SLAVE_A1NOC_CFG, SDM670_SLAVE_AOSS, SDM670_SLAVE_PRNG, SDM670_SLAVE_VSENSE_CTRL_CFG, SDM670_SLAVE_EMMC_CFG, SDM670_SLAVE_BLSP_1, SDM670_SLAVE_SPDM_WRAPPER, SDM670_SLAVE_CRYPTO_0_CFG, SDM670_SLAVE_PIMEM_CFG, SDM670_SLAVE_TLMM_NORTH, SDM670_SLAVE_CLK_CTL, SDM670_SLAVE_IMEM_CFG);
-DEFINE_QNODE(qhm_cnoc, SDM670_MASTER_CNOC_DC_NOC, 1, 4, SDM670_SLAVE_MEM_NOC_CFG, SDM670_SLAVE_LLCC_CFG);
-DEFINE_QNODE(acm_l3, SDM670_MASTER_AMPSS_M0, 1, 16, SDM670_SLAVE_SERVICE_GNOC, SDM670_SLAVE_GNOC_SNOC, SDM670_SLAVE_GNOC_MEM_NOC);
-DEFINE_QNODE(pm_gnoc_cfg, SDM670_MASTER_GNOC_CFG, 1, 4, SDM670_SLAVE_SERVICE_GNOC);
-DEFINE_QNODE(llcc_mc, SDM670_MASTER_LLCC, 2, 4, SDM670_SLAVE_EBI_CH0);
-DEFINE_QNODE(acm_tcu, SDM670_MASTER_TCU_0, 1, 8, SDM670_SLAVE_MEM_NOC_GNOC, SDM670_SLAVE_LLCC, SDM670_SLAVE_MEM_NOC_SNOC);
-DEFINE_QNODE(qhm_memnoc_cfg, SDM670_MASTER_MEM_NOC_CFG, 1, 4, SDM670_SLAVE_SERVICE_MEM_NOC, SDM670_SLAVE_MSS_PROC_MS_MPU_CFG);
-DEFINE_QNODE(qnm_apps, SDM670_MASTER_GNOC_MEM_NOC, 2, 32, SDM670_SLAVE_LLCC);
-DEFINE_QNODE(qnm_mnoc_hf, SDM670_MASTER_MNOC_HF_MEM_NOC, 2, 32, SDM670_SLAVE_LLCC);
-DEFINE_QNODE(qnm_mnoc_sf, SDM670_MASTER_MNOC_SF_MEM_NOC, 1, 32, SDM670_SLAVE_MEM_NOC_GNOC, SDM670_SLAVE_LLCC, SDM670_SLAVE_MEM_NOC_SNOC);
-DEFINE_QNODE(qnm_snoc_gc, SDM670_MASTER_SNOC_GC_MEM_NOC, 1, 8, SDM670_SLAVE_LLCC);
-DEFINE_QNODE(qnm_snoc_sf, SDM670_MASTER_SNOC_SF_MEM_NOC, 1, 16, SDM670_SLAVE_MEM_NOC_GNOC, SDM670_SLAVE_LLCC);
-DEFINE_QNODE(qxm_gpu, SDM670_MASTER_GRAPHICS_3D, 2, 32, SDM670_SLAVE_MEM_NOC_GNOC, SDM670_SLAVE_LLCC, SDM670_SLAVE_MEM_NOC_SNOC);
-DEFINE_QNODE(qhm_mnoc_cfg, SDM670_MASTER_CNOC_MNOC_CFG, 1, 4, SDM670_SLAVE_SERVICE_MNOC);
-DEFINE_QNODE(qxm_camnoc_hf0, SDM670_MASTER_CAMNOC_HF0, 1, 32, SDM670_SLAVE_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(qxm_camnoc_hf1, SDM670_MASTER_CAMNOC_HF1, 1, 32, SDM670_SLAVE_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(qxm_camnoc_sf, SDM670_MASTER_CAMNOC_SF, 1, 32, SDM670_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qxm_mdp0, SDM670_MASTER_MDP_PORT0, 1, 32, SDM670_SLAVE_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(qxm_mdp1, SDM670_MASTER_MDP_PORT1, 1, 32, SDM670_SLAVE_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(qxm_rot, SDM670_MASTER_ROTATOR, 1, 32, SDM670_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qxm_venus0, SDM670_MASTER_VIDEO_P0, 1, 32, SDM670_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qxm_venus1, SDM670_MASTER_VIDEO_P1, 1, 32, SDM670_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qxm_venus_arm9, SDM670_MASTER_VIDEO_PROC, 1, 8, SDM670_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qhm_snoc_cfg, SDM670_MASTER_SNOC_CFG, 1, 4, SDM670_SLAVE_SERVICE_SNOC);
-DEFINE_QNODE(qnm_aggre1_noc, SDM670_MASTER_A1NOC_SNOC, 1, 16, SDM670_SLAVE_PIMEM, SDM670_SLAVE_SNOC_MEM_NOC_SF, SDM670_SLAVE_OCIMEM, SDM670_SLAVE_APPSS, SDM670_SLAVE_SNOC_CNOC, SDM670_SLAVE_QDSS_STM);
-DEFINE_QNODE(qnm_aggre2_noc, SDM670_MASTER_A2NOC_SNOC, 1, 16, SDM670_SLAVE_PIMEM, SDM670_SLAVE_SNOC_MEM_NOC_SF, SDM670_SLAVE_OCIMEM, SDM670_SLAVE_APPSS, SDM670_SLAVE_SNOC_CNOC, SDM670_SLAVE_TCU, SDM670_SLAVE_QDSS_STM);
-DEFINE_QNODE(qnm_gladiator_sodv, SDM670_MASTER_GNOC_SNOC, 1, 8, SDM670_SLAVE_PIMEM, SDM670_SLAVE_OCIMEM, SDM670_SLAVE_APPSS, SDM670_SLAVE_SNOC_CNOC, SDM670_SLAVE_TCU, SDM670_SLAVE_QDSS_STM);
-DEFINE_QNODE(qnm_memnoc, SDM670_MASTER_MEM_NOC_SNOC, 1, 8, SDM670_SLAVE_OCIMEM, SDM670_SLAVE_APPSS, SDM670_SLAVE_PIMEM, SDM670_SLAVE_SNOC_CNOC, SDM670_SLAVE_QDSS_STM);
-DEFINE_QNODE(qxm_pimem, SDM670_MASTER_PIMEM, 1, 8, SDM670_SLAVE_OCIMEM, SDM670_SLAVE_SNOC_MEM_NOC_GC);
-DEFINE_QNODE(xm_gic, SDM670_MASTER_GIC, 1, 8, SDM670_SLAVE_OCIMEM, SDM670_SLAVE_SNOC_MEM_NOC_GC);
-DEFINE_QNODE(qns_a1noc_snoc, SDM670_SLAVE_A1NOC_SNOC, 1, 16, SDM670_MASTER_A1NOC_SNOC);
-DEFINE_QNODE(srvc_aggre1_noc, SDM670_SLAVE_SERVICE_A1NOC, 1, 4);
-DEFINE_QNODE(qns_a2noc_snoc, SDM670_SLAVE_A2NOC_SNOC, 1, 16, SDM670_MASTER_A2NOC_SNOC);
-DEFINE_QNODE(srvc_aggre2_noc, SDM670_SLAVE_SERVICE_A2NOC, 1, 4);
-DEFINE_QNODE(qns_camnoc_uncomp, SDM670_SLAVE_CAMNOC_UNCOMP, 1, 32);
-DEFINE_QNODE(qhs_a1_noc_cfg, SDM670_SLAVE_A1NOC_CFG, 1, 4, SDM670_MASTER_A1NOC_CFG);
-DEFINE_QNODE(qhs_a2_noc_cfg, SDM670_SLAVE_A2NOC_CFG, 1, 4, SDM670_MASTER_A2NOC_CFG);
-DEFINE_QNODE(qhs_aop, SDM670_SLAVE_AOP, 1, 4);
-DEFINE_QNODE(qhs_aoss, SDM670_SLAVE_AOSS, 1, 4);
-DEFINE_QNODE(qhs_camera_cfg, SDM670_SLAVE_CAMERA_CFG, 1, 4);
-DEFINE_QNODE(qhs_clk_ctl, SDM670_SLAVE_CLK_CTL, 1, 4);
-DEFINE_QNODE(qhs_compute_dsp_cfg, SDM670_SLAVE_CDSP_CFG, 1, 4);
-DEFINE_QNODE(qhs_cpr_cx, SDM670_SLAVE_RBCPR_CX_CFG, 1, 4);
-DEFINE_QNODE(qhs_crypto0_cfg, SDM670_SLAVE_CRYPTO_0_CFG, 1, 4);
-DEFINE_QNODE(qhs_dcc_cfg, SDM670_SLAVE_DCC_CFG, 1, 4, SDM670_MASTER_CNOC_DC_NOC);
-DEFINE_QNODE(qhs_ddrss_cfg, SDM670_SLAVE_CNOC_DDRSS, 1, 4);
-DEFINE_QNODE(qhs_display_cfg, SDM670_SLAVE_DISPLAY_CFG, 1, 4);
-DEFINE_QNODE(qhs_emmc_cfg, SDM670_SLAVE_EMMC_CFG, 1, 4);
-DEFINE_QNODE(qhs_glm, SDM670_SLAVE_GLM, 1, 4);
-DEFINE_QNODE(qhs_gpuss_cfg, SDM670_SLAVE_GRAPHICS_3D_CFG, 1, 8);
-DEFINE_QNODE(qhs_imem_cfg, SDM670_SLAVE_IMEM_CFG, 1, 4);
-DEFINE_QNODE(qhs_ipa, SDM670_SLAVE_IPA_CFG, 1, 4);
-DEFINE_QNODE(qhs_mnoc_cfg, SDM670_SLAVE_CNOC_MNOC_CFG, 1, 4, SDM670_MASTER_CNOC_MNOC_CFG);
-DEFINE_QNODE(qhs_pdm, SDM670_SLAVE_PDM, 1, 4);
-DEFINE_QNODE(qhs_phy_refgen_south, SDM670_SLAVE_SOUTH_PHY_CFG, 1, 4);
-DEFINE_QNODE(qhs_pimem_cfg, SDM670_SLAVE_PIMEM_CFG, 1, 4);
-DEFINE_QNODE(qhs_prng, SDM670_SLAVE_PRNG, 1, 4);
-DEFINE_QNODE(qhs_qdss_cfg, SDM670_SLAVE_QDSS_CFG, 1, 4);
-DEFINE_QNODE(qhs_qupv3_north, SDM670_SLAVE_BLSP_2, 1, 4);
-DEFINE_QNODE(qhs_qupv3_south, SDM670_SLAVE_BLSP_1, 1, 4);
-DEFINE_QNODE(qhs_sdc2, SDM670_SLAVE_SDCC_2, 1, 4);
-DEFINE_QNODE(qhs_sdc4, SDM670_SLAVE_SDCC_4, 1, 4);
-DEFINE_QNODE(qhs_snoc_cfg, SDM670_SLAVE_SNOC_CFG, 1, 4, SDM670_MASTER_SNOC_CFG);
-DEFINE_QNODE(qhs_spdm, SDM670_SLAVE_SPDM_WRAPPER, 1, 4);
-DEFINE_QNODE(qhs_tcsr, SDM670_SLAVE_TCSR, 1, 4);
-DEFINE_QNODE(qhs_tlmm_north, SDM670_SLAVE_TLMM_NORTH, 1, 4);
-DEFINE_QNODE(qhs_tlmm_south, SDM670_SLAVE_TLMM_SOUTH, 1, 4);
-DEFINE_QNODE(qhs_tsif, SDM670_SLAVE_TSIF, 1, 4);
-DEFINE_QNODE(qhs_ufs_mem_cfg, SDM670_SLAVE_UFS_MEM_CFG, 1, 4);
-DEFINE_QNODE(qhs_usb3_0, SDM670_SLAVE_USB3, 1, 4);
-DEFINE_QNODE(qhs_venus_cfg, SDM670_SLAVE_VENUS_CFG, 1, 4);
-DEFINE_QNODE(qhs_vsense_ctrl_cfg, SDM670_SLAVE_VSENSE_CTRL_CFG, 1, 4);
-DEFINE_QNODE(qns_cnoc_a2noc, SDM670_SLAVE_CNOC_A2NOC, 1, 8, SDM670_MASTER_CNOC_A2NOC);
-DEFINE_QNODE(srvc_cnoc, SDM670_SLAVE_SERVICE_CNOC, 1, 4);
-DEFINE_QNODE(qhs_llcc, SDM670_SLAVE_LLCC_CFG, 1, 4);
-DEFINE_QNODE(qhs_memnoc, SDM670_SLAVE_MEM_NOC_CFG, 1, 4, SDM670_MASTER_MEM_NOC_CFG);
-DEFINE_QNODE(qns_gladiator_sodv, SDM670_SLAVE_GNOC_SNOC, 1, 8, SDM670_MASTER_GNOC_SNOC);
-DEFINE_QNODE(qns_gnoc_memnoc, SDM670_SLAVE_GNOC_MEM_NOC, 2, 32, SDM670_MASTER_GNOC_MEM_NOC);
-DEFINE_QNODE(srvc_gnoc, SDM670_SLAVE_SERVICE_GNOC, 1, 4);
-DEFINE_QNODE(ebi, SDM670_SLAVE_EBI_CH0, 2, 4);
-DEFINE_QNODE(qhs_mdsp_ms_mpu_cfg, SDM670_SLAVE_MSS_PROC_MS_MPU_CFG, 1, 4);
-DEFINE_QNODE(qns_apps_io, SDM670_SLAVE_MEM_NOC_GNOC, 1, 32);
-DEFINE_QNODE(qns_llcc, SDM670_SLAVE_LLCC, 2, 16, SDM670_MASTER_LLCC);
-DEFINE_QNODE(qns_memnoc_snoc, SDM670_SLAVE_MEM_NOC_SNOC, 1, 8, SDM670_MASTER_MEM_NOC_SNOC);
-DEFINE_QNODE(srvc_memnoc, SDM670_SLAVE_SERVICE_MEM_NOC, 1, 4);
-DEFINE_QNODE(qns2_mem_noc, SDM670_SLAVE_MNOC_SF_MEM_NOC, 1, 32, SDM670_MASTER_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qns_mem_noc_hf, SDM670_SLAVE_MNOC_HF_MEM_NOC, 2, 32, SDM670_MASTER_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(srvc_mnoc, SDM670_SLAVE_SERVICE_MNOC, 1, 4);
-DEFINE_QNODE(qhs_apss, SDM670_SLAVE_APPSS, 1, 8);
-DEFINE_QNODE(qns_cnoc, SDM670_SLAVE_SNOC_CNOC, 1, 8, SDM670_MASTER_SNOC_CNOC);
-DEFINE_QNODE(qns_memnoc_gc, SDM670_SLAVE_SNOC_MEM_NOC_GC, 1, 8, SDM670_MASTER_SNOC_GC_MEM_NOC);
-DEFINE_QNODE(qns_memnoc_sf, SDM670_SLAVE_SNOC_MEM_NOC_SF, 1, 16, SDM670_MASTER_SNOC_SF_MEM_NOC);
-DEFINE_QNODE(qxs_imem, SDM670_SLAVE_OCIMEM, 1, 8);
-DEFINE_QNODE(qxs_pimem, SDM670_SLAVE_PIMEM, 1, 8);
-DEFINE_QNODE(srvc_snoc, SDM670_SLAVE_SERVICE_SNOC, 1, 4);
-DEFINE_QNODE(xs_qdss_stm, SDM670_SLAVE_QDSS_STM, 1, 4);
-DEFINE_QNODE(xs_sys_tcu_cfg, SDM670_SLAVE_TCU, 1, 8);
-
-DEFINE_QBCM(bcm_acv, "ACV", false, &ebi);
-DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
-DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
-DEFINE_QBCM(bcm_mm0, "MM0", true, &qns_mem_noc_hf);
-DEFINE_QBCM(bcm_sh1, "SH1", false, &qns_apps_io);
-DEFINE_QBCM(bcm_mm1, "MM1", true, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qxm_camnoc_hf0, &qxm_camnoc_hf1, &qxm_mdp0, &qxm_mdp1);
-DEFINE_QBCM(bcm_sh2, "SH2", false, &qns_memnoc_snoc);
-DEFINE_QBCM(bcm_mm2, "MM2", false, &qns2_mem_noc);
-DEFINE_QBCM(bcm_sh3, "SH3", false, &acm_tcu);
-DEFINE_QBCM(bcm_mm3, "MM3", false, &qxm_camnoc_sf, &qxm_rot, &qxm_venus0, &qxm_venus1, &qxm_venus_arm9);
-DEFINE_QBCM(bcm_sh5, "SH5", false, &qnm_apps);
-DEFINE_QBCM(bcm_sn0, "SN0", true, &qns_memnoc_sf);
-DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
-DEFINE_QBCM(bcm_cn0, "CN0", true, &qhm_spdm, &qnm_snoc, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_aop, &qhs_aoss, &qhs_camera_cfg, &qhs_clk_ctl, &qhs_compute_dsp_cfg, &qhs_cpr_cx, &qhs_crypto0_cfg, &qhs_dcc_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_emmc_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_pdm, &qhs_phy_refgen_south, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qupv3_north, &qhs_qupv3_south, &qhs_sdc2, &qhs_sdc4, &qhs_snoc_cfg, &qhs_spdm, &qhs_tcsr, &qhs_tlmm_north, &qhs_tlmm_south, &qhs_tsif, &qhs_ufs_mem_cfg, &qhs_usb3_0, &qhs_venus_cfg, &qhs_vsense_ctrl_cfg, &qns_cnoc_a2noc, &srvc_cnoc);
-DEFINE_QBCM(bcm_qup0, "QUP0", false, &qhm_qup1, &qhm_qup2);
-DEFINE_QBCM(bcm_sn1, "SN1", false, &qxs_imem);
-DEFINE_QBCM(bcm_sn2, "SN2", false, &qns_memnoc_gc);
-DEFINE_QBCM(bcm_sn3, "SN3", false, &qns_cnoc);
-DEFINE_QBCM(bcm_sn4, "SN4", false, &qxm_pimem, &qxs_pimem);
-DEFINE_QBCM(bcm_sn5, "SN5", false, &xs_qdss_stm);
-DEFINE_QBCM(bcm_sn8, "SN8", false, &qnm_aggre1_noc, &srvc_aggre1_noc);
-DEFINE_QBCM(bcm_sn10, "SN10", false, &qnm_aggre2_noc, &srvc_aggre2_noc);
-DEFINE_QBCM(bcm_sn11, "SN11", false, &qnm_gladiator_sodv, &xm_gic);
-DEFINE_QBCM(bcm_sn13, "SN13", false, &qnm_memnoc);
+static struct qcom_icc_node qhm_a1noc_cfg = {
+ .name = "qhm_a1noc_cfg",
+ .id = SDM670_MASTER_A1NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDM670_SLAVE_SERVICE_A1NOC },
+};
+
+static struct qcom_icc_node qhm_qup1 = {
+ .name = "qhm_qup1",
+ .id = SDM670_MASTER_BLSP_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDM670_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_tsif = {
+ .name = "qhm_tsif",
+ .id = SDM670_MASTER_TSIF,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDM670_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_emmc = {
+ .name = "xm_emmc",
+ .id = SDM670_MASTER_EMMC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDM670_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_sdc2 = {
+ .name = "xm_sdc2",
+ .id = SDM670_MASTER_SDCC_2,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDM670_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_sdc4 = {
+ .name = "xm_sdc4",
+ .id = SDM670_MASTER_SDCC_4,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDM670_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_ufs_mem = {
+ .name = "xm_ufs_mem",
+ .id = SDM670_MASTER_UFS_MEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDM670_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_a2noc_cfg = {
+ .name = "qhm_a2noc_cfg",
+ .id = SDM670_MASTER_A2NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDM670_SLAVE_SERVICE_A2NOC },
+};
+
+static struct qcom_icc_node qhm_qdss_bam = {
+ .name = "qhm_qdss_bam",
+ .id = SDM670_MASTER_QDSS_BAM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDM670_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup2 = {
+ .name = "qhm_qup2",
+ .id = SDM670_MASTER_BLSP_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDM670_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qnm_cnoc = {
+ .name = "qnm_cnoc",
+ .id = SDM670_MASTER_CNOC_A2NOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDM670_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_crypto = {
+ .name = "qxm_crypto",
+ .id = SDM670_MASTER_CRYPTO_CORE_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDM670_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_ipa = {
+ .name = "qxm_ipa",
+ .id = SDM670_MASTER_IPA,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDM670_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_qdss_etr = {
+ .name = "xm_qdss_etr",
+ .id = SDM670_MASTER_QDSS_ETR,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDM670_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_usb3_0 = {
+ .name = "xm_usb3_0",
+ .id = SDM670_MASTER_USB3,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDM670_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_camnoc_hf0_uncomp = {
+ .name = "qxm_camnoc_hf0_uncomp",
+ .id = SDM670_MASTER_CAMNOC_HF0_UNCOMP,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SDM670_SLAVE_CAMNOC_UNCOMP },
+};
+
+static struct qcom_icc_node qxm_camnoc_hf1_uncomp = {
+ .name = "qxm_camnoc_hf1_uncomp",
+ .id = SDM670_MASTER_CAMNOC_HF1_UNCOMP,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SDM670_SLAVE_CAMNOC_UNCOMP },
+};
+
+static struct qcom_icc_node qxm_camnoc_sf_uncomp = {
+ .name = "qxm_camnoc_sf_uncomp",
+ .id = SDM670_MASTER_CAMNOC_SF_UNCOMP,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SDM670_SLAVE_CAMNOC_UNCOMP },
+};
+
+static struct qcom_icc_node qhm_spdm = {
+ .name = "qhm_spdm",
+ .id = SDM670_MASTER_SPDM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDM670_SLAVE_CNOC_A2NOC },
+};
+
+static struct qcom_icc_node qnm_snoc = {
+ .name = "qnm_snoc",
+ .id = SDM670_MASTER_SNOC_CNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 38,
+ .links = { SDM670_SLAVE_TLMM_SOUTH,
+ SDM670_SLAVE_CAMERA_CFG,
+ SDM670_SLAVE_SDCC_4,
+ SDM670_SLAVE_SDCC_2,
+ SDM670_SLAVE_CNOC_MNOC_CFG,
+ SDM670_SLAVE_UFS_MEM_CFG,
+ SDM670_SLAVE_GLM,
+ SDM670_SLAVE_PDM,
+ SDM670_SLAVE_A2NOC_CFG,
+ SDM670_SLAVE_QDSS_CFG,
+ SDM670_SLAVE_DISPLAY_CFG,
+ SDM670_SLAVE_TCSR,
+ SDM670_SLAVE_DCC_CFG,
+ SDM670_SLAVE_CNOC_DDRSS,
+ SDM670_SLAVE_SNOC_CFG,
+ SDM670_SLAVE_SOUTH_PHY_CFG,
+ SDM670_SLAVE_GRAPHICS_3D_CFG,
+ SDM670_SLAVE_VENUS_CFG,
+ SDM670_SLAVE_TSIF,
+ SDM670_SLAVE_CDSP_CFG,
+ SDM670_SLAVE_AOP,
+ SDM670_SLAVE_BLSP_2,
+ SDM670_SLAVE_SERVICE_CNOC,
+ SDM670_SLAVE_USB3,
+ SDM670_SLAVE_IPA_CFG,
+ SDM670_SLAVE_RBCPR_CX_CFG,
+ SDM670_SLAVE_A1NOC_CFG,
+ SDM670_SLAVE_AOSS,
+ SDM670_SLAVE_PRNG,
+ SDM670_SLAVE_VSENSE_CTRL_CFG,
+ SDM670_SLAVE_EMMC_CFG,
+ SDM670_SLAVE_BLSP_1,
+ SDM670_SLAVE_SPDM_WRAPPER,
+ SDM670_SLAVE_CRYPTO_0_CFG,
+ SDM670_SLAVE_PIMEM_CFG,
+ SDM670_SLAVE_TLMM_NORTH,
+ SDM670_SLAVE_CLK_CTL,
+ SDM670_SLAVE_IMEM_CFG
+ },
+};
+
+static struct qcom_icc_node qhm_cnoc = {
+ .name = "qhm_cnoc",
+ .id = SDM670_MASTER_CNOC_DC_NOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 2,
+ .links = { SDM670_SLAVE_MEM_NOC_CFG,
+ SDM670_SLAVE_LLCC_CFG
+ },
+};
+
+static struct qcom_icc_node acm_l3 = {
+ .name = "acm_l3",
+ .id = SDM670_MASTER_AMPSS_M0,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 3,
+ .links = { SDM670_SLAVE_SERVICE_GNOC,
+ SDM670_SLAVE_GNOC_SNOC,
+ SDM670_SLAVE_GNOC_MEM_NOC
+ },
+};
+
+static struct qcom_icc_node pm_gnoc_cfg = {
+ .name = "pm_gnoc_cfg",
+ .id = SDM670_MASTER_GNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDM670_SLAVE_SERVICE_GNOC },
+};
+
+static struct qcom_icc_node llcc_mc = {
+ .name = "llcc_mc",
+ .id = SDM670_MASTER_LLCC,
+ .channels = 2,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDM670_SLAVE_EBI_CH0 },
+};
+
+static struct qcom_icc_node acm_tcu = {
+ .name = "acm_tcu",
+ .id = SDM670_MASTER_TCU_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 3,
+ .links = { SDM670_SLAVE_MEM_NOC_GNOC,
+ SDM670_SLAVE_LLCC,
+ SDM670_SLAVE_MEM_NOC_SNOC
+ },
+};
+
+static struct qcom_icc_node qhm_memnoc_cfg = {
+ .name = "qhm_memnoc_cfg",
+ .id = SDM670_MASTER_MEM_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 2,
+ .links = { SDM670_SLAVE_SERVICE_MEM_NOC,
+ SDM670_SLAVE_MSS_PROC_MS_MPU_CFG
+ },
+};
+
+static struct qcom_icc_node qnm_apps = {
+ .name = "qnm_apps",
+ .id = SDM670_MASTER_GNOC_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SDM670_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_mnoc_hf = {
+ .name = "qnm_mnoc_hf",
+ .id = SDM670_MASTER_MNOC_HF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SDM670_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_mnoc_sf = {
+ .name = "qnm_mnoc_sf",
+ .id = SDM670_MASTER_MNOC_SF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 3,
+ .links = { SDM670_SLAVE_MEM_NOC_GNOC,
+ SDM670_SLAVE_LLCC,
+ SDM670_SLAVE_MEM_NOC_SNOC
+ },
+};
+
+static struct qcom_icc_node qnm_snoc_gc = {
+ .name = "qnm_snoc_gc",
+ .id = SDM670_MASTER_SNOC_GC_MEM_NOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDM670_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_snoc_sf = {
+ .name = "qnm_snoc_sf",
+ .id = SDM670_MASTER_SNOC_SF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 2,
+ .links = { SDM670_SLAVE_MEM_NOC_GNOC,
+ SDM670_SLAVE_LLCC
+ },
+};
+
+static struct qcom_icc_node qxm_gpu = {
+ .name = "qxm_gpu",
+ .id = SDM670_MASTER_GRAPHICS_3D,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 3,
+ .links = { SDM670_SLAVE_MEM_NOC_GNOC,
+ SDM670_SLAVE_LLCC,
+ SDM670_SLAVE_MEM_NOC_SNOC
+ },
+};
+
+static struct qcom_icc_node qhm_mnoc_cfg = {
+ .name = "qhm_mnoc_cfg",
+ .id = SDM670_MASTER_CNOC_MNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDM670_SLAVE_SERVICE_MNOC },
+};
+
+static struct qcom_icc_node qxm_camnoc_hf0 = {
+ .name = "qxm_camnoc_hf0",
+ .id = SDM670_MASTER_CAMNOC_HF0,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SDM670_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_camnoc_hf1 = {
+ .name = "qxm_camnoc_hf1",
+ .id = SDM670_MASTER_CAMNOC_HF1,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SDM670_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_camnoc_sf = {
+ .name = "qxm_camnoc_sf",
+ .id = SDM670_MASTER_CAMNOC_SF,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SDM670_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_mdp0 = {
+ .name = "qxm_mdp0",
+ .id = SDM670_MASTER_MDP_PORT0,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SDM670_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_mdp1 = {
+ .name = "qxm_mdp1",
+ .id = SDM670_MASTER_MDP_PORT1,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SDM670_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_rot = {
+ .name = "qxm_rot",
+ .id = SDM670_MASTER_ROTATOR,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SDM670_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_venus0 = {
+ .name = "qxm_venus0",
+ .id = SDM670_MASTER_VIDEO_P0,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SDM670_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_venus1 = {
+ .name = "qxm_venus1",
+ .id = SDM670_MASTER_VIDEO_P1,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SDM670_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_venus_arm9 = {
+ .name = "qxm_venus_arm9",
+ .id = SDM670_MASTER_VIDEO_PROC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDM670_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qhm_snoc_cfg = {
+ .name = "qhm_snoc_cfg",
+ .id = SDM670_MASTER_SNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDM670_SLAVE_SERVICE_SNOC },
+};
+
+static struct qcom_icc_node qnm_aggre1_noc = {
+ .name = "qnm_aggre1_noc",
+ .id = SDM670_MASTER_A1NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 6,
+ .links = { SDM670_SLAVE_PIMEM,
+ SDM670_SLAVE_SNOC_MEM_NOC_SF,
+ SDM670_SLAVE_OCIMEM,
+ SDM670_SLAVE_APPSS,
+ SDM670_SLAVE_SNOC_CNOC,
+ SDM670_SLAVE_QDSS_STM
+ },
+};
+
+static struct qcom_icc_node qnm_aggre2_noc = {
+ .name = "qnm_aggre2_noc",
+ .id = SDM670_MASTER_A2NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 7,
+ .links = { SDM670_SLAVE_PIMEM,
+ SDM670_SLAVE_SNOC_MEM_NOC_SF,
+ SDM670_SLAVE_OCIMEM,
+ SDM670_SLAVE_APPSS,
+ SDM670_SLAVE_SNOC_CNOC,
+ SDM670_SLAVE_TCU,
+ SDM670_SLAVE_QDSS_STM
+ },
+};
+
+static struct qcom_icc_node qnm_gladiator_sodv = {
+ .name = "qnm_gladiator_sodv",
+ .id = SDM670_MASTER_GNOC_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 6,
+ .links = { SDM670_SLAVE_PIMEM,
+ SDM670_SLAVE_OCIMEM,
+ SDM670_SLAVE_APPSS,
+ SDM670_SLAVE_SNOC_CNOC,
+ SDM670_SLAVE_TCU,
+ SDM670_SLAVE_QDSS_STM
+ },
+};
+
+static struct qcom_icc_node qnm_memnoc = {
+ .name = "qnm_memnoc",
+ .id = SDM670_MASTER_MEM_NOC_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 5,
+ .links = { SDM670_SLAVE_OCIMEM,
+ SDM670_SLAVE_APPSS,
+ SDM670_SLAVE_PIMEM,
+ SDM670_SLAVE_SNOC_CNOC,
+ SDM670_SLAVE_QDSS_STM
+ },
+};
+
+static struct qcom_icc_node qxm_pimem = {
+ .name = "qxm_pimem",
+ .id = SDM670_MASTER_PIMEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SDM670_SLAVE_OCIMEM,
+ SDM670_SLAVE_SNOC_MEM_NOC_GC
+ },
+};
+
+static struct qcom_icc_node xm_gic = {
+ .name = "xm_gic",
+ .id = SDM670_MASTER_GIC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SDM670_SLAVE_OCIMEM,
+ SDM670_SLAVE_SNOC_MEM_NOC_GC
+ },
+};
+
+static struct qcom_icc_node qns_a1noc_snoc = {
+ .name = "qns_a1noc_snoc",
+ .id = SDM670_SLAVE_A1NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SDM670_MASTER_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node srvc_aggre1_noc = {
+ .name = "srvc_aggre1_noc",
+ .id = SDM670_SLAVE_SERVICE_A1NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_a2noc_snoc = {
+ .name = "qns_a2noc_snoc",
+ .id = SDM670_SLAVE_A2NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SDM670_MASTER_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node srvc_aggre2_noc = {
+ .name = "srvc_aggre2_noc",
+ .id = SDM670_SLAVE_SERVICE_A2NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_camnoc_uncomp = {
+ .name = "qns_camnoc_uncomp",
+ .id = SDM670_SLAVE_CAMNOC_UNCOMP,
+ .channels = 1,
+ .buswidth = 32,
+};
+
+static struct qcom_icc_node qhs_a1_noc_cfg = {
+ .name = "qhs_a1_noc_cfg",
+ .id = SDM670_SLAVE_A1NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDM670_MASTER_A1NOC_CFG },
+};
+
+static struct qcom_icc_node qhs_a2_noc_cfg = {
+ .name = "qhs_a2_noc_cfg",
+ .id = SDM670_SLAVE_A2NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDM670_MASTER_A2NOC_CFG },
+};
+
+static struct qcom_icc_node qhs_aop = {
+ .name = "qhs_aop",
+ .id = SDM670_SLAVE_AOP,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_aoss = {
+ .name = "qhs_aoss",
+ .id = SDM670_SLAVE_AOSS,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_camera_cfg = {
+ .name = "qhs_camera_cfg",
+ .id = SDM670_SLAVE_CAMERA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_clk_ctl = {
+ .name = "qhs_clk_ctl",
+ .id = SDM670_SLAVE_CLK_CTL,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_compute_dsp_cfg = {
+ .name = "qhs_compute_dsp_cfg",
+ .id = SDM670_SLAVE_CDSP_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_cpr_cx = {
+ .name = "qhs_cpr_cx",
+ .id = SDM670_SLAVE_RBCPR_CX_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_crypto0_cfg = {
+ .name = "qhs_crypto0_cfg",
+ .id = SDM670_SLAVE_CRYPTO_0_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_dcc_cfg = {
+ .name = "qhs_dcc_cfg",
+ .id = SDM670_SLAVE_DCC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDM670_MASTER_CNOC_DC_NOC },
+};
+
+static struct qcom_icc_node qhs_ddrss_cfg = {
+ .name = "qhs_ddrss_cfg",
+ .id = SDM670_SLAVE_CNOC_DDRSS,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_display_cfg = {
+ .name = "qhs_display_cfg",
+ .id = SDM670_SLAVE_DISPLAY_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_emmc_cfg = {
+ .name = "qhs_emmc_cfg",
+ .id = SDM670_SLAVE_EMMC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_glm = {
+ .name = "qhs_glm",
+ .id = SDM670_SLAVE_GLM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_gpuss_cfg = {
+ .name = "qhs_gpuss_cfg",
+ .id = SDM670_SLAVE_GRAPHICS_3D_CFG,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qhs_imem_cfg = {
+ .name = "qhs_imem_cfg",
+ .id = SDM670_SLAVE_IMEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ipa = {
+ .name = "qhs_ipa",
+ .id = SDM670_SLAVE_IPA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_mnoc_cfg = {
+ .name = "qhs_mnoc_cfg",
+ .id = SDM670_SLAVE_CNOC_MNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDM670_MASTER_CNOC_MNOC_CFG },
+};
+
+static struct qcom_icc_node qhs_pdm = {
+ .name = "qhs_pdm",
+ .id = SDM670_SLAVE_PDM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_phy_refgen_south = {
+ .name = "qhs_phy_refgen_south",
+ .id = SDM670_SLAVE_SOUTH_PHY_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pimem_cfg = {
+ .name = "qhs_pimem_cfg",
+ .id = SDM670_SLAVE_PIMEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_prng = {
+ .name = "qhs_prng",
+ .id = SDM670_SLAVE_PRNG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qdss_cfg = {
+ .name = "qhs_qdss_cfg",
+ .id = SDM670_SLAVE_QDSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qupv3_north = {
+ .name = "qhs_qupv3_north",
+ .id = SDM670_SLAVE_BLSP_2,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qupv3_south = {
+ .name = "qhs_qupv3_south",
+ .id = SDM670_SLAVE_BLSP_1,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_sdc2 = {
+ .name = "qhs_sdc2",
+ .id = SDM670_SLAVE_SDCC_2,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_sdc4 = {
+ .name = "qhs_sdc4",
+ .id = SDM670_SLAVE_SDCC_4,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_snoc_cfg = {
+ .name = "qhs_snoc_cfg",
+ .id = SDM670_SLAVE_SNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDM670_MASTER_SNOC_CFG },
+};
+
+static struct qcom_icc_node qhs_spdm = {
+ .name = "qhs_spdm",
+ .id = SDM670_SLAVE_SPDM_WRAPPER,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tcsr = {
+ .name = "qhs_tcsr",
+ .id = SDM670_SLAVE_TCSR,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tlmm_north = {
+ .name = "qhs_tlmm_north",
+ .id = SDM670_SLAVE_TLMM_NORTH,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tlmm_south = {
+ .name = "qhs_tlmm_south",
+ .id = SDM670_SLAVE_TLMM_SOUTH,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tsif = {
+ .name = "qhs_tsif",
+ .id = SDM670_SLAVE_TSIF,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ufs_mem_cfg = {
+ .name = "qhs_ufs_mem_cfg",
+ .id = SDM670_SLAVE_UFS_MEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb3_0 = {
+ .name = "qhs_usb3_0",
+ .id = SDM670_SLAVE_USB3,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_venus_cfg = {
+ .name = "qhs_venus_cfg",
+ .id = SDM670_SLAVE_VENUS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
+ .name = "qhs_vsense_ctrl_cfg",
+ .id = SDM670_SLAVE_VSENSE_CTRL_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_cnoc_a2noc = {
+ .name = "qns_cnoc_a2noc",
+ .id = SDM670_SLAVE_CNOC_A2NOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDM670_MASTER_CNOC_A2NOC },
+};
+
+static struct qcom_icc_node srvc_cnoc = {
+ .name = "srvc_cnoc",
+ .id = SDM670_SLAVE_SERVICE_CNOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_llcc = {
+ .name = "qhs_llcc",
+ .id = SDM670_SLAVE_LLCC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_memnoc = {
+ .name = "qhs_memnoc",
+ .id = SDM670_SLAVE_MEM_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDM670_MASTER_MEM_NOC_CFG },
+};
+
+static struct qcom_icc_node qns_gladiator_sodv = {
+ .name = "qns_gladiator_sodv",
+ .id = SDM670_SLAVE_GNOC_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDM670_MASTER_GNOC_SNOC },
+};
+
+static struct qcom_icc_node qns_gnoc_memnoc = {
+ .name = "qns_gnoc_memnoc",
+ .id = SDM670_SLAVE_GNOC_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SDM670_MASTER_GNOC_MEM_NOC },
+};
+
+static struct qcom_icc_node srvc_gnoc = {
+ .name = "srvc_gnoc",
+ .id = SDM670_SLAVE_SERVICE_GNOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node ebi = {
+ .name = "ebi",
+ .id = SDM670_SLAVE_EBI_CH0,
+ .channels = 2,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_mdsp_ms_mpu_cfg = {
+ .name = "qhs_mdsp_ms_mpu_cfg",
+ .id = SDM670_SLAVE_MSS_PROC_MS_MPU_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_apps_io = {
+ .name = "qns_apps_io",
+ .id = SDM670_SLAVE_MEM_NOC_GNOC,
+ .channels = 1,
+ .buswidth = 32,
+};
+
+static struct qcom_icc_node qns_llcc = {
+ .name = "qns_llcc",
+ .id = SDM670_SLAVE_LLCC,
+ .channels = 2,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SDM670_MASTER_LLCC },
+};
+
+static struct qcom_icc_node qns_memnoc_snoc = {
+ .name = "qns_memnoc_snoc",
+ .id = SDM670_SLAVE_MEM_NOC_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDM670_MASTER_MEM_NOC_SNOC },
+};
+
+static struct qcom_icc_node srvc_memnoc = {
+ .name = "srvc_memnoc",
+ .id = SDM670_SLAVE_SERVICE_MEM_NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns2_mem_noc = {
+ .name = "qns2_mem_noc",
+ .id = SDM670_SLAVE_MNOC_SF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SDM670_MASTER_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_mem_noc_hf = {
+ .name = "qns_mem_noc_hf",
+ .id = SDM670_SLAVE_MNOC_HF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SDM670_MASTER_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node srvc_mnoc = {
+ .name = "srvc_mnoc",
+ .id = SDM670_SLAVE_SERVICE_MNOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_apss = {
+ .name = "qhs_apss",
+ .id = SDM670_SLAVE_APPSS,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qns_cnoc = {
+ .name = "qns_cnoc",
+ .id = SDM670_SLAVE_SNOC_CNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDM670_MASTER_SNOC_CNOC },
+};
+
+static struct qcom_icc_node qns_memnoc_gc = {
+ .name = "qns_memnoc_gc",
+ .id = SDM670_SLAVE_SNOC_MEM_NOC_GC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDM670_MASTER_SNOC_GC_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_memnoc_sf = {
+ .name = "qns_memnoc_sf",
+ .id = SDM670_SLAVE_SNOC_MEM_NOC_SF,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SDM670_MASTER_SNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxs_imem = {
+ .name = "qxs_imem",
+ .id = SDM670_SLAVE_OCIMEM,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qxs_pimem = {
+ .name = "qxs_pimem",
+ .id = SDM670_SLAVE_PIMEM,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node srvc_snoc = {
+ .name = "srvc_snoc",
+ .id = SDM670_SLAVE_SERVICE_SNOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node xs_qdss_stm = {
+ .name = "xs_qdss_stm",
+ .id = SDM670_SLAVE_QDSS_STM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node xs_sys_tcu_cfg = {
+ .name = "xs_sys_tcu_cfg",
+ .id = SDM670_SLAVE_TCU,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_mc0 = {
+ .name = "MC0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_sh0 = {
+ .name = "SH0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_llcc },
+};
+
+static struct qcom_icc_bcm bcm_mm0 = {
+ .name = "MM0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_mem_noc_hf },
+};
+
+static struct qcom_icc_bcm bcm_sh1 = {
+ .name = "SH1",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qns_apps_io },
+};
+
+static struct qcom_icc_bcm bcm_mm1 = {
+ .name = "MM1",
+ .keepalive = true,
+ .num_nodes = 7,
+ .nodes = { &qxm_camnoc_hf0_uncomp,
+ &qxm_camnoc_hf1_uncomp,
+ &qxm_camnoc_sf_uncomp,
+ &qxm_camnoc_hf0,
+ &qxm_camnoc_hf1,
+ &qxm_mdp0,
+ &qxm_mdp1
+ },
+};
+
+static struct qcom_icc_bcm bcm_sh2 = {
+ .name = "SH2",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qns_memnoc_snoc },
+};
+
+static struct qcom_icc_bcm bcm_mm2 = {
+ .name = "MM2",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qns2_mem_noc },
+};
+
+static struct qcom_icc_bcm bcm_sh3 = {
+ .name = "SH3",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &acm_tcu },
+};
+
+static struct qcom_icc_bcm bcm_mm3 = {
+ .name = "MM3",
+ .keepalive = false,
+ .num_nodes = 5,
+ .nodes = { &qxm_camnoc_sf, &qxm_rot, &qxm_venus0, &qxm_venus1, &qxm_venus_arm9 },
+};
+
+static struct qcom_icc_bcm bcm_sh5 = {
+ .name = "SH5",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qnm_apps },
+};
+
+static struct qcom_icc_bcm bcm_sn0 = {
+ .name = "SN0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_memnoc_sf },
+};
+
+static struct qcom_icc_bcm bcm_ce0 = {
+ .name = "CE0",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qxm_crypto },
+};
+
+static struct qcom_icc_bcm bcm_cn0 = {
+ .name = "CN0",
+ .keepalive = true,
+ .num_nodes = 41,
+ .nodes = { &qhm_spdm,
+ &qnm_snoc,
+ &qhs_a1_noc_cfg,
+ &qhs_a2_noc_cfg,
+ &qhs_aop,
+ &qhs_aoss,
+ &qhs_camera_cfg,
+ &qhs_clk_ctl,
+ &qhs_compute_dsp_cfg,
+ &qhs_cpr_cx,
+ &qhs_crypto0_cfg,
+ &qhs_dcc_cfg,
+ &qhs_ddrss_cfg,
+ &qhs_display_cfg,
+ &qhs_emmc_cfg,
+ &qhs_glm,
+ &qhs_gpuss_cfg,
+ &qhs_imem_cfg,
+ &qhs_ipa,
+ &qhs_mnoc_cfg,
+ &qhs_pdm,
+ &qhs_phy_refgen_south,
+ &qhs_pimem_cfg,
+ &qhs_prng,
+ &qhs_qdss_cfg,
+ &qhs_qupv3_north,
+ &qhs_qupv3_south,
+ &qhs_sdc2,
+ &qhs_sdc4,
+ &qhs_snoc_cfg,
+ &qhs_spdm,
+ &qhs_tcsr,
+ &qhs_tlmm_north,
+ &qhs_tlmm_south,
+ &qhs_tsif,
+ &qhs_ufs_mem_cfg,
+ &qhs_usb3_0,
+ &qhs_venus_cfg,
+ &qhs_vsense_ctrl_cfg,
+ &qns_cnoc_a2noc,
+ &srvc_cnoc
+ },
+};
+
+static struct qcom_icc_bcm bcm_qup0 = {
+ .name = "QUP0",
+ .keepalive = false,
+ .num_nodes = 2,
+ .nodes = { &qhm_qup1, &qhm_qup2 },
+};
+
+static struct qcom_icc_bcm bcm_sn1 = {
+ .name = "SN1",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qxs_imem },
+};
+
+static struct qcom_icc_bcm bcm_sn2 = {
+ .name = "SN2",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qns_memnoc_gc },
+};
+
+static struct qcom_icc_bcm bcm_sn3 = {
+ .name = "SN3",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qns_cnoc },
+};
+
+static struct qcom_icc_bcm bcm_sn4 = {
+ .name = "SN4",
+ .keepalive = false,
+ .num_nodes = 2,
+ .nodes = { &qxm_pimem, &qxs_pimem },
+};
+
+static struct qcom_icc_bcm bcm_sn5 = {
+ .name = "SN5",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &xs_qdss_stm },
+};
+
+static struct qcom_icc_bcm bcm_sn8 = {
+ .name = "SN8",
+ .keepalive = false,
+ .num_nodes = 2,
+ .nodes = { &qnm_aggre1_noc, &srvc_aggre1_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn10 = {
+ .name = "SN10",
+ .keepalive = false,
+ .num_nodes = 2,
+ .nodes = { &qnm_aggre2_noc, &srvc_aggre2_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn11 = {
+ .name = "SN11",
+ .keepalive = false,
+ .num_nodes = 2,
+ .nodes = { &qnm_gladiator_sodv, &xm_gic },
+};
+
+static struct qcom_icc_bcm bcm_sn13 = {
+ .name = "SN13",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qnm_memnoc },
+};
static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
&bcm_qup0,
diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c
index 954e7bd13fc4..b9243c0aa626 100644
--- a/drivers/interconnect/qcom/sdm845.c
+++ b/drivers/interconnect/qcom/sdm845.c
@@ -7,7 +7,8 @@
#include <linux/interconnect.h>
#include <linux/interconnect-provider.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
#include <dt-bindings/interconnect/qcom,sdm845.h>
@@ -15,165 +16,1502 @@
#include "icc-rpmh.h"
#include "sdm845.h"
-DEFINE_QNODE(qhm_a1noc_cfg, SDM845_MASTER_A1NOC_CFG, 1, 4, SDM845_SLAVE_SERVICE_A1NOC);
-DEFINE_QNODE(qhm_qup1, SDM845_MASTER_BLSP_1, 1, 4, SDM845_SLAVE_A1NOC_SNOC);
-DEFINE_QNODE(qhm_tsif, SDM845_MASTER_TSIF, 1, 4, SDM845_SLAVE_A1NOC_SNOC);
-DEFINE_QNODE(xm_sdc2, SDM845_MASTER_SDCC_2, 1, 8, SDM845_SLAVE_A1NOC_SNOC);
-DEFINE_QNODE(xm_sdc4, SDM845_MASTER_SDCC_4, 1, 8, SDM845_SLAVE_A1NOC_SNOC);
-DEFINE_QNODE(xm_ufs_card, SDM845_MASTER_UFS_CARD, 1, 8, SDM845_SLAVE_A1NOC_SNOC);
-DEFINE_QNODE(xm_ufs_mem, SDM845_MASTER_UFS_MEM, 1, 8, SDM845_SLAVE_A1NOC_SNOC);
-DEFINE_QNODE(xm_pcie_0, SDM845_MASTER_PCIE_0, 1, 8, SDM845_SLAVE_ANOC_PCIE_A1NOC_SNOC);
-DEFINE_QNODE(qhm_a2noc_cfg, SDM845_MASTER_A2NOC_CFG, 1, 4, SDM845_SLAVE_SERVICE_A2NOC);
-DEFINE_QNODE(qhm_qdss_bam, SDM845_MASTER_QDSS_BAM, 1, 4, SDM845_SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(qhm_qup2, SDM845_MASTER_BLSP_2, 1, 4, SDM845_SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(qnm_cnoc, SDM845_MASTER_CNOC_A2NOC, 1, 8, SDM845_SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(qxm_crypto, SDM845_MASTER_CRYPTO, 1, 8, SDM845_SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(qxm_ipa, SDM845_MASTER_IPA, 1, 8, SDM845_SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(xm_pcie3_1, SDM845_MASTER_PCIE_1, 1, 8, SDM845_SLAVE_ANOC_PCIE_SNOC);
-DEFINE_QNODE(xm_qdss_etr, SDM845_MASTER_QDSS_ETR, 1, 8, SDM845_SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(xm_usb3_0, SDM845_MASTER_USB3_0, 1, 8, SDM845_SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(xm_usb3_1, SDM845_MASTER_USB3_1, 1, 8, SDM845_SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(qxm_camnoc_hf0_uncomp, SDM845_MASTER_CAMNOC_HF0_UNCOMP, 1, 32, SDM845_SLAVE_CAMNOC_UNCOMP);
-DEFINE_QNODE(qxm_camnoc_hf1_uncomp, SDM845_MASTER_CAMNOC_HF1_UNCOMP, 1, 32, SDM845_SLAVE_CAMNOC_UNCOMP);
-DEFINE_QNODE(qxm_camnoc_sf_uncomp, SDM845_MASTER_CAMNOC_SF_UNCOMP, 1, 32, SDM845_SLAVE_CAMNOC_UNCOMP);
-DEFINE_QNODE(qhm_spdm, SDM845_MASTER_SPDM, 1, 4, SDM845_SLAVE_CNOC_A2NOC);
-DEFINE_QNODE(qhm_tic, SDM845_MASTER_TIC, 1, 4, SDM845_SLAVE_A1NOC_CFG, SDM845_SLAVE_A2NOC_CFG, SDM845_SLAVE_AOP, SDM845_SLAVE_AOSS, SDM845_SLAVE_CAMERA_CFG, SDM845_SLAVE_CLK_CTL, SDM845_SLAVE_CDSP_CFG, SDM845_SLAVE_RBCPR_CX_CFG, SDM845_SLAVE_CRYPTO_0_CFG, SDM845_SLAVE_DCC_CFG, SDM845_SLAVE_CNOC_DDRSS, SDM845_SLAVE_DISPLAY_CFG, SDM845_SLAVE_GLM, SDM845_SLAVE_GFX3D_CFG, SDM845_SLAVE_IMEM_CFG, SDM845_SLAVE_IPA_CFG, SDM845_SLAVE_CNOC_MNOC_CFG, SDM845_SLAVE_PCIE_0_CFG, SDM845_SLAVE_PCIE_1_CFG, SDM845_SLAVE_PDM, SDM845_SLAVE_SOUTH_PHY_CFG, SDM845_SLAVE_PIMEM_CFG, SDM845_SLAVE_PRNG, SDM845_SLAVE_QDSS_CFG, SDM845_SLAVE_BLSP_2, SDM845_SLAVE_BLSP_1, SDM845_SLAVE_SDCC_2, SDM845_SLAVE_SDCC_4, SDM845_SLAVE_SNOC_CFG, SDM845_SLAVE_SPDM_WRAPPER, SDM845_SLAVE_SPSS_CFG, SDM845_SLAVE_TCSR, SDM845_SLAVE_TLMM_NORTH, SDM845_SLAVE_TLMM_SOUTH, SDM845_SLAVE_TSIF, SDM845_SLAVE_UFS_CARD_CFG, SDM845_SLAVE_UFS_MEM_CFG, SDM845_SLAVE_USB3_0, SDM845_SLAVE_USB3_1, SDM845_SLAVE_VENUS_CFG, SDM845_SLAVE_VSENSE_CTRL_CFG, SDM845_SLAVE_CNOC_A2NOC, SDM845_SLAVE_SERVICE_CNOC);
-DEFINE_QNODE(qnm_snoc, SDM845_MASTER_SNOC_CNOC, 1, 8, SDM845_SLAVE_A1NOC_CFG, SDM845_SLAVE_A2NOC_CFG, SDM845_SLAVE_AOP, SDM845_SLAVE_AOSS, SDM845_SLAVE_CAMERA_CFG, SDM845_SLAVE_CLK_CTL, SDM845_SLAVE_CDSP_CFG, SDM845_SLAVE_RBCPR_CX_CFG, SDM845_SLAVE_CRYPTO_0_CFG, SDM845_SLAVE_DCC_CFG, SDM845_SLAVE_CNOC_DDRSS, SDM845_SLAVE_DISPLAY_CFG, SDM845_SLAVE_GLM, SDM845_SLAVE_GFX3D_CFG, SDM845_SLAVE_IMEM_CFG, SDM845_SLAVE_IPA_CFG, SDM845_SLAVE_CNOC_MNOC_CFG, SDM845_SLAVE_PCIE_0_CFG, SDM845_SLAVE_PCIE_1_CFG, SDM845_SLAVE_PDM, SDM845_SLAVE_SOUTH_PHY_CFG, SDM845_SLAVE_PIMEM_CFG, SDM845_SLAVE_PRNG, SDM845_SLAVE_QDSS_CFG, SDM845_SLAVE_BLSP_2, SDM845_SLAVE_BLSP_1, SDM845_SLAVE_SDCC_2, SDM845_SLAVE_SDCC_4, SDM845_SLAVE_SNOC_CFG, SDM845_SLAVE_SPDM_WRAPPER, SDM845_SLAVE_SPSS_CFG, SDM845_SLAVE_TCSR, SDM845_SLAVE_TLMM_NORTH, SDM845_SLAVE_TLMM_SOUTH, SDM845_SLAVE_TSIF, SDM845_SLAVE_UFS_CARD_CFG, SDM845_SLAVE_UFS_MEM_CFG, SDM845_SLAVE_USB3_0, SDM845_SLAVE_USB3_1, SDM845_SLAVE_VENUS_CFG, SDM845_SLAVE_VSENSE_CTRL_CFG, SDM845_SLAVE_SERVICE_CNOC);
-DEFINE_QNODE(xm_qdss_dap, SDM845_MASTER_QDSS_DAP, 1, 8, SDM845_SLAVE_A1NOC_CFG, SDM845_SLAVE_A2NOC_CFG, SDM845_SLAVE_AOP, SDM845_SLAVE_AOSS, SDM845_SLAVE_CAMERA_CFG, SDM845_SLAVE_CLK_CTL, SDM845_SLAVE_CDSP_CFG, SDM845_SLAVE_RBCPR_CX_CFG, SDM845_SLAVE_CRYPTO_0_CFG, SDM845_SLAVE_DCC_CFG, SDM845_SLAVE_CNOC_DDRSS, SDM845_SLAVE_DISPLAY_CFG, SDM845_SLAVE_GLM, SDM845_SLAVE_GFX3D_CFG, SDM845_SLAVE_IMEM_CFG, SDM845_SLAVE_IPA_CFG, SDM845_SLAVE_CNOC_MNOC_CFG, SDM845_SLAVE_PCIE_0_CFG, SDM845_SLAVE_PCIE_1_CFG, SDM845_SLAVE_PDM, SDM845_SLAVE_SOUTH_PHY_CFG, SDM845_SLAVE_PIMEM_CFG, SDM845_SLAVE_PRNG, SDM845_SLAVE_QDSS_CFG, SDM845_SLAVE_BLSP_2, SDM845_SLAVE_BLSP_1, SDM845_SLAVE_SDCC_2, SDM845_SLAVE_SDCC_4, SDM845_SLAVE_SNOC_CFG, SDM845_SLAVE_SPDM_WRAPPER, SDM845_SLAVE_SPSS_CFG, SDM845_SLAVE_TCSR, SDM845_SLAVE_TLMM_NORTH, SDM845_SLAVE_TLMM_SOUTH, SDM845_SLAVE_TSIF, SDM845_SLAVE_UFS_CARD_CFG, SDM845_SLAVE_UFS_MEM_CFG, SDM845_SLAVE_USB3_0, SDM845_SLAVE_USB3_1, SDM845_SLAVE_VENUS_CFG, SDM845_SLAVE_VSENSE_CTRL_CFG, SDM845_SLAVE_CNOC_A2NOC, SDM845_SLAVE_SERVICE_CNOC);
-DEFINE_QNODE(qhm_cnoc, SDM845_MASTER_CNOC_DC_NOC, 1, 4, SDM845_SLAVE_LLCC_CFG, SDM845_SLAVE_MEM_NOC_CFG);
-DEFINE_QNODE(acm_l3, SDM845_MASTER_APPSS_PROC, 1, 16, SDM845_SLAVE_GNOC_SNOC, SDM845_SLAVE_GNOC_MEM_NOC, SDM845_SLAVE_SERVICE_GNOC);
-DEFINE_QNODE(pm_gnoc_cfg, SDM845_MASTER_GNOC_CFG, 1, 4, SDM845_SLAVE_SERVICE_GNOC);
-DEFINE_QNODE(llcc_mc, SDM845_MASTER_LLCC, 4, 4, SDM845_SLAVE_EBI1);
-DEFINE_QNODE(acm_tcu, SDM845_MASTER_TCU_0, 1, 8, SDM845_SLAVE_MEM_NOC_GNOC, SDM845_SLAVE_LLCC, SDM845_SLAVE_MEM_NOC_SNOC);
-DEFINE_QNODE(qhm_memnoc_cfg, SDM845_MASTER_MEM_NOC_CFG, 1, 4, SDM845_SLAVE_MSS_PROC_MS_MPU_CFG, SDM845_SLAVE_SERVICE_MEM_NOC);
-DEFINE_QNODE(qnm_apps, SDM845_MASTER_GNOC_MEM_NOC, 2, 32, SDM845_SLAVE_LLCC);
-DEFINE_QNODE(qnm_mnoc_hf, SDM845_MASTER_MNOC_HF_MEM_NOC, 2, 32, SDM845_SLAVE_MEM_NOC_GNOC, SDM845_SLAVE_LLCC);
-DEFINE_QNODE(qnm_mnoc_sf, SDM845_MASTER_MNOC_SF_MEM_NOC, 1, 32, SDM845_SLAVE_MEM_NOC_GNOC, SDM845_SLAVE_LLCC, SDM845_SLAVE_MEM_NOC_SNOC);
-DEFINE_QNODE(qnm_snoc_gc, SDM845_MASTER_SNOC_GC_MEM_NOC, 1, 8, SDM845_SLAVE_LLCC);
-DEFINE_QNODE(qnm_snoc_sf, SDM845_MASTER_SNOC_SF_MEM_NOC, 1, 16, SDM845_SLAVE_MEM_NOC_GNOC, SDM845_SLAVE_LLCC);
-DEFINE_QNODE(qxm_gpu, SDM845_MASTER_GFX3D, 2, 32, SDM845_SLAVE_MEM_NOC_GNOC, SDM845_SLAVE_LLCC, SDM845_SLAVE_MEM_NOC_SNOC);
-DEFINE_QNODE(qhm_mnoc_cfg, SDM845_MASTER_CNOC_MNOC_CFG, 1, 4, SDM845_SLAVE_SERVICE_MNOC);
-DEFINE_QNODE(qxm_camnoc_hf0, SDM845_MASTER_CAMNOC_HF0, 1, 32, SDM845_SLAVE_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(qxm_camnoc_hf1, SDM845_MASTER_CAMNOC_HF1, 1, 32, SDM845_SLAVE_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(qxm_camnoc_sf, SDM845_MASTER_CAMNOC_SF, 1, 32, SDM845_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qxm_mdp0, SDM845_MASTER_MDP0, 1, 32, SDM845_SLAVE_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(qxm_mdp1, SDM845_MASTER_MDP1, 1, 32, SDM845_SLAVE_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(qxm_rot, SDM845_MASTER_ROTATOR, 1, 32, SDM845_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qxm_venus0, SDM845_MASTER_VIDEO_P0, 1, 32, SDM845_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qxm_venus1, SDM845_MASTER_VIDEO_P1, 1, 32, SDM845_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qxm_venus_arm9, SDM845_MASTER_VIDEO_PROC, 1, 8, SDM845_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qhm_snoc_cfg, SDM845_MASTER_SNOC_CFG, 1, 4, SDM845_SLAVE_SERVICE_SNOC);
-DEFINE_QNODE(qnm_aggre1_noc, SDM845_MASTER_A1NOC_SNOC, 1, 16, SDM845_SLAVE_APPSS, SDM845_SLAVE_SNOC_CNOC, SDM845_SLAVE_SNOC_MEM_NOC_SF, SDM845_SLAVE_IMEM, SDM845_SLAVE_PIMEM, SDM845_SLAVE_QDSS_STM);
-DEFINE_QNODE(qnm_aggre2_noc, SDM845_MASTER_A2NOC_SNOC, 1, 16, SDM845_SLAVE_APPSS, SDM845_SLAVE_SNOC_CNOC, SDM845_SLAVE_SNOC_MEM_NOC_SF, SDM845_SLAVE_IMEM, SDM845_SLAVE_PCIE_0, SDM845_SLAVE_PCIE_1, SDM845_SLAVE_PIMEM, SDM845_SLAVE_QDSS_STM, SDM845_SLAVE_TCU);
-DEFINE_QNODE(qnm_gladiator_sodv, SDM845_MASTER_GNOC_SNOC, 1, 8, SDM845_SLAVE_APPSS, SDM845_SLAVE_SNOC_CNOC, SDM845_SLAVE_IMEM, SDM845_SLAVE_PCIE_0, SDM845_SLAVE_PCIE_1, SDM845_SLAVE_PIMEM, SDM845_SLAVE_QDSS_STM, SDM845_SLAVE_TCU);
-DEFINE_QNODE(qnm_memnoc, SDM845_MASTER_MEM_NOC_SNOC, 1, 8, SDM845_SLAVE_APPSS, SDM845_SLAVE_SNOC_CNOC, SDM845_SLAVE_IMEM, SDM845_SLAVE_PIMEM, SDM845_SLAVE_QDSS_STM);
-DEFINE_QNODE(qnm_pcie_anoc, SDM845_MASTER_ANOC_PCIE_SNOC, 1, 16, SDM845_SLAVE_APPSS, SDM845_SLAVE_SNOC_CNOC, SDM845_SLAVE_SNOC_MEM_NOC_SF, SDM845_SLAVE_IMEM, SDM845_SLAVE_QDSS_STM);
-DEFINE_QNODE(qxm_pimem, SDM845_MASTER_PIMEM, 1, 8, SDM845_SLAVE_SNOC_MEM_NOC_GC, SDM845_SLAVE_IMEM);
-DEFINE_QNODE(xm_gic, SDM845_MASTER_GIC, 1, 8, SDM845_SLAVE_SNOC_MEM_NOC_GC, SDM845_SLAVE_IMEM);
-DEFINE_QNODE(qns_a1noc_snoc, SDM845_SLAVE_A1NOC_SNOC, 1, 16, SDM845_MASTER_A1NOC_SNOC);
-DEFINE_QNODE(srvc_aggre1_noc, SDM845_SLAVE_SERVICE_A1NOC, 1, 4, 0);
-DEFINE_QNODE(qns_pcie_a1noc_snoc, SDM845_SLAVE_ANOC_PCIE_A1NOC_SNOC, 1, 16, SDM845_MASTER_ANOC_PCIE_SNOC);
-DEFINE_QNODE(qns_a2noc_snoc, SDM845_SLAVE_A2NOC_SNOC, 1, 16, SDM845_MASTER_A2NOC_SNOC);
-DEFINE_QNODE(qns_pcie_snoc, SDM845_SLAVE_ANOC_PCIE_SNOC, 1, 16, SDM845_MASTER_ANOC_PCIE_SNOC);
-DEFINE_QNODE(srvc_aggre2_noc, SDM845_SLAVE_SERVICE_A2NOC, 1, 4);
-DEFINE_QNODE(qns_camnoc_uncomp, SDM845_SLAVE_CAMNOC_UNCOMP, 1, 32);
-DEFINE_QNODE(qhs_a1_noc_cfg, SDM845_SLAVE_A1NOC_CFG, 1, 4, SDM845_MASTER_A1NOC_CFG);
-DEFINE_QNODE(qhs_a2_noc_cfg, SDM845_SLAVE_A2NOC_CFG, 1, 4, SDM845_MASTER_A2NOC_CFG);
-DEFINE_QNODE(qhs_aop, SDM845_SLAVE_AOP, 1, 4);
-DEFINE_QNODE(qhs_aoss, SDM845_SLAVE_AOSS, 1, 4);
-DEFINE_QNODE(qhs_camera_cfg, SDM845_SLAVE_CAMERA_CFG, 1, 4);
-DEFINE_QNODE(qhs_clk_ctl, SDM845_SLAVE_CLK_CTL, 1, 4);
-DEFINE_QNODE(qhs_compute_dsp_cfg, SDM845_SLAVE_CDSP_CFG, 1, 4);
-DEFINE_QNODE(qhs_cpr_cx, SDM845_SLAVE_RBCPR_CX_CFG, 1, 4);
-DEFINE_QNODE(qhs_crypto0_cfg, SDM845_SLAVE_CRYPTO_0_CFG, 1, 4);
-DEFINE_QNODE(qhs_dcc_cfg, SDM845_SLAVE_DCC_CFG, 1, 4, SDM845_MASTER_CNOC_DC_NOC);
-DEFINE_QNODE(qhs_ddrss_cfg, SDM845_SLAVE_CNOC_DDRSS, 1, 4);
-DEFINE_QNODE(qhs_display_cfg, SDM845_SLAVE_DISPLAY_CFG, 1, 4);
-DEFINE_QNODE(qhs_glm, SDM845_SLAVE_GLM, 1, 4);
-DEFINE_QNODE(qhs_gpuss_cfg, SDM845_SLAVE_GFX3D_CFG, 1, 8);
-DEFINE_QNODE(qhs_imem_cfg, SDM845_SLAVE_IMEM_CFG, 1, 4);
-DEFINE_QNODE(qhs_ipa, SDM845_SLAVE_IPA_CFG, 1, 4);
-DEFINE_QNODE(qhs_mnoc_cfg, SDM845_SLAVE_CNOC_MNOC_CFG, 1, 4, SDM845_MASTER_CNOC_MNOC_CFG);
-DEFINE_QNODE(qhs_pcie0_cfg, SDM845_SLAVE_PCIE_0_CFG, 1, 4);
-DEFINE_QNODE(qhs_pcie_gen3_cfg, SDM845_SLAVE_PCIE_1_CFG, 1, 4);
-DEFINE_QNODE(qhs_pdm, SDM845_SLAVE_PDM, 1, 4);
-DEFINE_QNODE(qhs_phy_refgen_south, SDM845_SLAVE_SOUTH_PHY_CFG, 1, 4);
-DEFINE_QNODE(qhs_pimem_cfg, SDM845_SLAVE_PIMEM_CFG, 1, 4);
-DEFINE_QNODE(qhs_prng, SDM845_SLAVE_PRNG, 1, 4);
-DEFINE_QNODE(qhs_qdss_cfg, SDM845_SLAVE_QDSS_CFG, 1, 4);
-DEFINE_QNODE(qhs_qupv3_north, SDM845_SLAVE_BLSP_2, 1, 4);
-DEFINE_QNODE(qhs_qupv3_south, SDM845_SLAVE_BLSP_1, 1, 4);
-DEFINE_QNODE(qhs_sdc2, SDM845_SLAVE_SDCC_2, 1, 4);
-DEFINE_QNODE(qhs_sdc4, SDM845_SLAVE_SDCC_4, 1, 4);
-DEFINE_QNODE(qhs_snoc_cfg, SDM845_SLAVE_SNOC_CFG, 1, 4, SDM845_MASTER_SNOC_CFG);
-DEFINE_QNODE(qhs_spdm, SDM845_SLAVE_SPDM_WRAPPER, 1, 4);
-DEFINE_QNODE(qhs_spss_cfg, SDM845_SLAVE_SPSS_CFG, 1, 4);
-DEFINE_QNODE(qhs_tcsr, SDM845_SLAVE_TCSR, 1, 4);
-DEFINE_QNODE(qhs_tlmm_north, SDM845_SLAVE_TLMM_NORTH, 1, 4);
-DEFINE_QNODE(qhs_tlmm_south, SDM845_SLAVE_TLMM_SOUTH, 1, 4);
-DEFINE_QNODE(qhs_tsif, SDM845_SLAVE_TSIF, 1, 4);
-DEFINE_QNODE(qhs_ufs_card_cfg, SDM845_SLAVE_UFS_CARD_CFG, 1, 4);
-DEFINE_QNODE(qhs_ufs_mem_cfg, SDM845_SLAVE_UFS_MEM_CFG, 1, 4);
-DEFINE_QNODE(qhs_usb3_0, SDM845_SLAVE_USB3_0, 1, 4);
-DEFINE_QNODE(qhs_usb3_1, SDM845_SLAVE_USB3_1, 1, 4);
-DEFINE_QNODE(qhs_venus_cfg, SDM845_SLAVE_VENUS_CFG, 1, 4);
-DEFINE_QNODE(qhs_vsense_ctrl_cfg, SDM845_SLAVE_VSENSE_CTRL_CFG, 1, 4);
-DEFINE_QNODE(qns_cnoc_a2noc, SDM845_SLAVE_CNOC_A2NOC, 1, 8, SDM845_MASTER_CNOC_A2NOC);
-DEFINE_QNODE(srvc_cnoc, SDM845_SLAVE_SERVICE_CNOC, 1, 4);
-DEFINE_QNODE(qhs_llcc, SDM845_SLAVE_LLCC_CFG, 1, 4);
-DEFINE_QNODE(qhs_memnoc, SDM845_SLAVE_MEM_NOC_CFG, 1, 4, SDM845_MASTER_MEM_NOC_CFG);
-DEFINE_QNODE(qns_gladiator_sodv, SDM845_SLAVE_GNOC_SNOC, 1, 8, SDM845_MASTER_GNOC_SNOC);
-DEFINE_QNODE(qns_gnoc_memnoc, SDM845_SLAVE_GNOC_MEM_NOC, 2, 32, SDM845_MASTER_GNOC_MEM_NOC);
-DEFINE_QNODE(srvc_gnoc, SDM845_SLAVE_SERVICE_GNOC, 1, 4);
-DEFINE_QNODE(ebi, SDM845_SLAVE_EBI1, 4, 4);
-DEFINE_QNODE(qhs_mdsp_ms_mpu_cfg, SDM845_SLAVE_MSS_PROC_MS_MPU_CFG, 1, 4);
-DEFINE_QNODE(qns_apps_io, SDM845_SLAVE_MEM_NOC_GNOC, 1, 32);
-DEFINE_QNODE(qns_llcc, SDM845_SLAVE_LLCC, 4, 16, SDM845_MASTER_LLCC);
-DEFINE_QNODE(qns_memnoc_snoc, SDM845_SLAVE_MEM_NOC_SNOC, 1, 8, SDM845_MASTER_MEM_NOC_SNOC);
-DEFINE_QNODE(srvc_memnoc, SDM845_SLAVE_SERVICE_MEM_NOC, 1, 4);
-DEFINE_QNODE(qns2_mem_noc, SDM845_SLAVE_MNOC_SF_MEM_NOC, 1, 32, SDM845_MASTER_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qns_mem_noc_hf, SDM845_SLAVE_MNOC_HF_MEM_NOC, 2, 32, SDM845_MASTER_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(srvc_mnoc, SDM845_SLAVE_SERVICE_MNOC, 1, 4);
-DEFINE_QNODE(qhs_apss, SDM845_SLAVE_APPSS, 1, 8);
-DEFINE_QNODE(qns_cnoc, SDM845_SLAVE_SNOC_CNOC, 1, 8, SDM845_MASTER_SNOC_CNOC);
-DEFINE_QNODE(qns_memnoc_gc, SDM845_SLAVE_SNOC_MEM_NOC_GC, 1, 8, SDM845_MASTER_SNOC_GC_MEM_NOC);
-DEFINE_QNODE(qns_memnoc_sf, SDM845_SLAVE_SNOC_MEM_NOC_SF, 1, 16, SDM845_MASTER_SNOC_SF_MEM_NOC);
-DEFINE_QNODE(qxs_imem, SDM845_SLAVE_IMEM, 1, 8);
-DEFINE_QNODE(qxs_pcie, SDM845_SLAVE_PCIE_0, 1, 8);
-DEFINE_QNODE(qxs_pcie_gen3, SDM845_SLAVE_PCIE_1, 1, 8);
-DEFINE_QNODE(qxs_pimem, SDM845_SLAVE_PIMEM, 1, 8);
-DEFINE_QNODE(srvc_snoc, SDM845_SLAVE_SERVICE_SNOC, 1, 4);
-DEFINE_QNODE(xs_qdss_stm, SDM845_SLAVE_QDSS_STM, 1, 4);
-DEFINE_QNODE(xs_sys_tcu_cfg, SDM845_SLAVE_TCU, 1, 8);
-
-DEFINE_QBCM(bcm_acv, "ACV", false, &ebi);
-DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
-DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
-DEFINE_QBCM(bcm_mm0, "MM0", false, &qns_mem_noc_hf);
-DEFINE_QBCM(bcm_sh1, "SH1", false, &qns_apps_io);
-DEFINE_QBCM(bcm_mm1, "MM1", true, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qxm_camnoc_hf0, &qxm_camnoc_hf1, &qxm_mdp0, &qxm_mdp1);
-DEFINE_QBCM(bcm_sh2, "SH2", false, &qns_memnoc_snoc);
-DEFINE_QBCM(bcm_mm2, "MM2", false, &qns2_mem_noc);
-DEFINE_QBCM(bcm_sh3, "SH3", false, &acm_tcu);
-DEFINE_QBCM(bcm_mm3, "MM3", false, &qxm_camnoc_sf, &qxm_rot, &qxm_venus0, &qxm_venus1, &qxm_venus_arm9);
-DEFINE_QBCM(bcm_sh5, "SH5", false, &qnm_apps);
-DEFINE_QBCM(bcm_sn0, "SN0", true, &qns_memnoc_sf);
-DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
-DEFINE_QBCM(bcm_cn0, "CN0", false, &qhm_spdm, &qhm_tic, &qnm_snoc, &xm_qdss_dap, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_aop, &qhs_aoss, &qhs_camera_cfg, &qhs_clk_ctl, &qhs_compute_dsp_cfg, &qhs_cpr_cx, &qhs_crypto0_cfg, &qhs_dcc_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_pcie0_cfg, &qhs_pcie_gen3_cfg, &qhs_pdm, &qhs_phy_refgen_south, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qupv3_north, &qhs_qupv3_south, &qhs_sdc2, &qhs_sdc4, &qhs_snoc_cfg, &qhs_spdm, &qhs_spss_cfg, &qhs_tcsr, &qhs_tlmm_north, &qhs_tlmm_south, &qhs_tsif, &qhs_ufs_card_cfg, &qhs_ufs_mem_cfg, &qhs_usb3_0, &qhs_usb3_1, &qhs_venus_cfg, &qhs_vsense_ctrl_cfg, &qns_cnoc_a2noc, &srvc_cnoc);
-DEFINE_QBCM(bcm_qup0, "QUP0", false, &qhm_qup1, &qhm_qup2);
-DEFINE_QBCM(bcm_sn1, "SN1", false, &qxs_imem);
-DEFINE_QBCM(bcm_sn2, "SN2", false, &qns_memnoc_gc);
-DEFINE_QBCM(bcm_sn3, "SN3", false, &qns_cnoc);
-DEFINE_QBCM(bcm_sn4, "SN4", false, &qxm_pimem);
-DEFINE_QBCM(bcm_sn5, "SN5", false, &xs_qdss_stm);
-DEFINE_QBCM(bcm_sn6, "SN6", false, &qhs_apss, &srvc_snoc, &xs_sys_tcu_cfg);
-DEFINE_QBCM(bcm_sn7, "SN7", false, &qxs_pcie);
-DEFINE_QBCM(bcm_sn8, "SN8", false, &qxs_pcie_gen3);
-DEFINE_QBCM(bcm_sn9, "SN9", false, &srvc_aggre1_noc, &qnm_aggre1_noc);
-DEFINE_QBCM(bcm_sn11, "SN11", false, &srvc_aggre2_noc, &qnm_aggre2_noc);
-DEFINE_QBCM(bcm_sn12, "SN12", false, &qnm_gladiator_sodv, &xm_gic);
-DEFINE_QBCM(bcm_sn14, "SN14", false, &qnm_pcie_anoc);
-DEFINE_QBCM(bcm_sn15, "SN15", false, &qnm_memnoc);
+static struct qcom_icc_node qhm_a1noc_cfg = {
+ .name = "qhm_a1noc_cfg",
+ .id = SDM845_MASTER_A1NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDM845_SLAVE_SERVICE_A1NOC },
+};
+
+static struct qcom_icc_node qhm_qup1 = {
+ .name = "qhm_qup1",
+ .id = SDM845_MASTER_BLSP_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDM845_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_tsif = {
+ .name = "qhm_tsif",
+ .id = SDM845_MASTER_TSIF,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDM845_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_sdc2 = {
+ .name = "xm_sdc2",
+ .id = SDM845_MASTER_SDCC_2,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDM845_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_sdc4 = {
+ .name = "xm_sdc4",
+ .id = SDM845_MASTER_SDCC_4,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDM845_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_ufs_card = {
+ .name = "xm_ufs_card",
+ .id = SDM845_MASTER_UFS_CARD,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDM845_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_ufs_mem = {
+ .name = "xm_ufs_mem",
+ .id = SDM845_MASTER_UFS_MEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDM845_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_pcie_0 = {
+ .name = "xm_pcie_0",
+ .id = SDM845_MASTER_PCIE_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDM845_SLAVE_ANOC_PCIE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_a2noc_cfg = {
+ .name = "qhm_a2noc_cfg",
+ .id = SDM845_MASTER_A2NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDM845_SLAVE_SERVICE_A2NOC },
+};
+
+static struct qcom_icc_node qhm_qdss_bam = {
+ .name = "qhm_qdss_bam",
+ .id = SDM845_MASTER_QDSS_BAM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDM845_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup2 = {
+ .name = "qhm_qup2",
+ .id = SDM845_MASTER_BLSP_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDM845_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qnm_cnoc = {
+ .name = "qnm_cnoc",
+ .id = SDM845_MASTER_CNOC_A2NOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDM845_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_crypto = {
+ .name = "qxm_crypto",
+ .id = SDM845_MASTER_CRYPTO,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDM845_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_ipa = {
+ .name = "qxm_ipa",
+ .id = SDM845_MASTER_IPA,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDM845_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_pcie3_1 = {
+ .name = "xm_pcie3_1",
+ .id = SDM845_MASTER_PCIE_1,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDM845_SLAVE_ANOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node xm_qdss_etr = {
+ .name = "xm_qdss_etr",
+ .id = SDM845_MASTER_QDSS_ETR,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDM845_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_usb3_0 = {
+ .name = "xm_usb3_0",
+ .id = SDM845_MASTER_USB3_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDM845_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_usb3_1 = {
+ .name = "xm_usb3_1",
+ .id = SDM845_MASTER_USB3_1,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDM845_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_camnoc_hf0_uncomp = {
+ .name = "qxm_camnoc_hf0_uncomp",
+ .id = SDM845_MASTER_CAMNOC_HF0_UNCOMP,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SDM845_SLAVE_CAMNOC_UNCOMP },
+};
+
+static struct qcom_icc_node qxm_camnoc_hf1_uncomp = {
+ .name = "qxm_camnoc_hf1_uncomp",
+ .id = SDM845_MASTER_CAMNOC_HF1_UNCOMP,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SDM845_SLAVE_CAMNOC_UNCOMP },
+};
+
+static struct qcom_icc_node qxm_camnoc_sf_uncomp = {
+ .name = "qxm_camnoc_sf_uncomp",
+ .id = SDM845_MASTER_CAMNOC_SF_UNCOMP,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SDM845_SLAVE_CAMNOC_UNCOMP },
+};
+
+static struct qcom_icc_node qhm_spdm = {
+ .name = "qhm_spdm",
+ .id = SDM845_MASTER_SPDM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDM845_SLAVE_CNOC_A2NOC },
+};
+
+static struct qcom_icc_node qhm_tic = {
+ .name = "qhm_tic",
+ .id = SDM845_MASTER_TIC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 43,
+ .links = { SDM845_SLAVE_A1NOC_CFG,
+ SDM845_SLAVE_A2NOC_CFG,
+ SDM845_SLAVE_AOP,
+ SDM845_SLAVE_AOSS,
+ SDM845_SLAVE_CAMERA_CFG,
+ SDM845_SLAVE_CLK_CTL,
+ SDM845_SLAVE_CDSP_CFG,
+ SDM845_SLAVE_RBCPR_CX_CFG,
+ SDM845_SLAVE_CRYPTO_0_CFG,
+ SDM845_SLAVE_DCC_CFG,
+ SDM845_SLAVE_CNOC_DDRSS,
+ SDM845_SLAVE_DISPLAY_CFG,
+ SDM845_SLAVE_GLM,
+ SDM845_SLAVE_GFX3D_CFG,
+ SDM845_SLAVE_IMEM_CFG,
+ SDM845_SLAVE_IPA_CFG,
+ SDM845_SLAVE_CNOC_MNOC_CFG,
+ SDM845_SLAVE_PCIE_0_CFG,
+ SDM845_SLAVE_PCIE_1_CFG,
+ SDM845_SLAVE_PDM,
+ SDM845_SLAVE_SOUTH_PHY_CFG,
+ SDM845_SLAVE_PIMEM_CFG,
+ SDM845_SLAVE_PRNG,
+ SDM845_SLAVE_QDSS_CFG,
+ SDM845_SLAVE_BLSP_2,
+ SDM845_SLAVE_BLSP_1,
+ SDM845_SLAVE_SDCC_2,
+ SDM845_SLAVE_SDCC_4,
+ SDM845_SLAVE_SNOC_CFG,
+ SDM845_SLAVE_SPDM_WRAPPER,
+ SDM845_SLAVE_SPSS_CFG,
+ SDM845_SLAVE_TCSR,
+ SDM845_SLAVE_TLMM_NORTH,
+ SDM845_SLAVE_TLMM_SOUTH,
+ SDM845_SLAVE_TSIF,
+ SDM845_SLAVE_UFS_CARD_CFG,
+ SDM845_SLAVE_UFS_MEM_CFG,
+ SDM845_SLAVE_USB3_0,
+ SDM845_SLAVE_USB3_1,
+ SDM845_SLAVE_VENUS_CFG,
+ SDM845_SLAVE_VSENSE_CTRL_CFG,
+ SDM845_SLAVE_CNOC_A2NOC,
+ SDM845_SLAVE_SERVICE_CNOC
+ },
+};
+
+static struct qcom_icc_node qnm_snoc = {
+ .name = "qnm_snoc",
+ .id = SDM845_MASTER_SNOC_CNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 42,
+ .links = { SDM845_SLAVE_A1NOC_CFG,
+ SDM845_SLAVE_A2NOC_CFG,
+ SDM845_SLAVE_AOP,
+ SDM845_SLAVE_AOSS,
+ SDM845_SLAVE_CAMERA_CFG,
+ SDM845_SLAVE_CLK_CTL,
+ SDM845_SLAVE_CDSP_CFG,
+ SDM845_SLAVE_RBCPR_CX_CFG,
+ SDM845_SLAVE_CRYPTO_0_CFG,
+ SDM845_SLAVE_DCC_CFG,
+ SDM845_SLAVE_CNOC_DDRSS,
+ SDM845_SLAVE_DISPLAY_CFG,
+ SDM845_SLAVE_GLM,
+ SDM845_SLAVE_GFX3D_CFG,
+ SDM845_SLAVE_IMEM_CFG,
+ SDM845_SLAVE_IPA_CFG,
+ SDM845_SLAVE_CNOC_MNOC_CFG,
+ SDM845_SLAVE_PCIE_0_CFG,
+ SDM845_SLAVE_PCIE_1_CFG,
+ SDM845_SLAVE_PDM,
+ SDM845_SLAVE_SOUTH_PHY_CFG,
+ SDM845_SLAVE_PIMEM_CFG,
+ SDM845_SLAVE_PRNG,
+ SDM845_SLAVE_QDSS_CFG,
+ SDM845_SLAVE_BLSP_2,
+ SDM845_SLAVE_BLSP_1,
+ SDM845_SLAVE_SDCC_2,
+ SDM845_SLAVE_SDCC_4,
+ SDM845_SLAVE_SNOC_CFG,
+ SDM845_SLAVE_SPDM_WRAPPER,
+ SDM845_SLAVE_SPSS_CFG,
+ SDM845_SLAVE_TCSR,
+ SDM845_SLAVE_TLMM_NORTH,
+ SDM845_SLAVE_TLMM_SOUTH,
+ SDM845_SLAVE_TSIF,
+ SDM845_SLAVE_UFS_CARD_CFG,
+ SDM845_SLAVE_UFS_MEM_CFG,
+ SDM845_SLAVE_USB3_0,
+ SDM845_SLAVE_USB3_1,
+ SDM845_SLAVE_VENUS_CFG,
+ SDM845_SLAVE_VSENSE_CTRL_CFG,
+ SDM845_SLAVE_SERVICE_CNOC
+ },
+};
+
+static struct qcom_icc_node xm_qdss_dap = {
+ .name = "xm_qdss_dap",
+ .id = SDM845_MASTER_QDSS_DAP,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 43,
+ .links = { SDM845_SLAVE_A1NOC_CFG,
+ SDM845_SLAVE_A2NOC_CFG,
+ SDM845_SLAVE_AOP,
+ SDM845_SLAVE_AOSS,
+ SDM845_SLAVE_CAMERA_CFG,
+ SDM845_SLAVE_CLK_CTL,
+ SDM845_SLAVE_CDSP_CFG,
+ SDM845_SLAVE_RBCPR_CX_CFG,
+ SDM845_SLAVE_CRYPTO_0_CFG,
+ SDM845_SLAVE_DCC_CFG,
+ SDM845_SLAVE_CNOC_DDRSS,
+ SDM845_SLAVE_DISPLAY_CFG,
+ SDM845_SLAVE_GLM,
+ SDM845_SLAVE_GFX3D_CFG,
+ SDM845_SLAVE_IMEM_CFG,
+ SDM845_SLAVE_IPA_CFG,
+ SDM845_SLAVE_CNOC_MNOC_CFG,
+ SDM845_SLAVE_PCIE_0_CFG,
+ SDM845_SLAVE_PCIE_1_CFG,
+ SDM845_SLAVE_PDM,
+ SDM845_SLAVE_SOUTH_PHY_CFG,
+ SDM845_SLAVE_PIMEM_CFG,
+ SDM845_SLAVE_PRNG,
+ SDM845_SLAVE_QDSS_CFG,
+ SDM845_SLAVE_BLSP_2,
+ SDM845_SLAVE_BLSP_1,
+ SDM845_SLAVE_SDCC_2,
+ SDM845_SLAVE_SDCC_4,
+ SDM845_SLAVE_SNOC_CFG,
+ SDM845_SLAVE_SPDM_WRAPPER,
+ SDM845_SLAVE_SPSS_CFG,
+ SDM845_SLAVE_TCSR,
+ SDM845_SLAVE_TLMM_NORTH,
+ SDM845_SLAVE_TLMM_SOUTH,
+ SDM845_SLAVE_TSIF,
+ SDM845_SLAVE_UFS_CARD_CFG,
+ SDM845_SLAVE_UFS_MEM_CFG,
+ SDM845_SLAVE_USB3_0,
+ SDM845_SLAVE_USB3_1,
+ SDM845_SLAVE_VENUS_CFG,
+ SDM845_SLAVE_VSENSE_CTRL_CFG,
+ SDM845_SLAVE_CNOC_A2NOC,
+ SDM845_SLAVE_SERVICE_CNOC
+ },
+};
+
+static struct qcom_icc_node qhm_cnoc = {
+ .name = "qhm_cnoc",
+ .id = SDM845_MASTER_CNOC_DC_NOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 2,
+ .links = { SDM845_SLAVE_LLCC_CFG,
+ SDM845_SLAVE_MEM_NOC_CFG
+ },
+};
+
+static struct qcom_icc_node acm_l3 = {
+ .name = "acm_l3",
+ .id = SDM845_MASTER_APPSS_PROC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 3,
+ .links = { SDM845_SLAVE_GNOC_SNOC,
+ SDM845_SLAVE_GNOC_MEM_NOC,
+ SDM845_SLAVE_SERVICE_GNOC
+ },
+};
+
+static struct qcom_icc_node pm_gnoc_cfg = {
+ .name = "pm_gnoc_cfg",
+ .id = SDM845_MASTER_GNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDM845_SLAVE_SERVICE_GNOC },
+};
+
+static struct qcom_icc_node llcc_mc = {
+ .name = "llcc_mc",
+ .id = SDM845_MASTER_LLCC,
+ .channels = 4,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDM845_SLAVE_EBI1 },
+};
+
+static struct qcom_icc_node acm_tcu = {
+ .name = "acm_tcu",
+ .id = SDM845_MASTER_TCU_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 3,
+ .links = { SDM845_SLAVE_MEM_NOC_GNOC,
+ SDM845_SLAVE_LLCC,
+ SDM845_SLAVE_MEM_NOC_SNOC
+ },
+};
+
+static struct qcom_icc_node qhm_memnoc_cfg = {
+ .name = "qhm_memnoc_cfg",
+ .id = SDM845_MASTER_MEM_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 2,
+ .links = { SDM845_SLAVE_MSS_PROC_MS_MPU_CFG,
+ SDM845_SLAVE_SERVICE_MEM_NOC
+ },
+};
+
+static struct qcom_icc_node qnm_apps = {
+ .name = "qnm_apps",
+ .id = SDM845_MASTER_GNOC_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SDM845_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_mnoc_hf = {
+ .name = "qnm_mnoc_hf",
+ .id = SDM845_MASTER_MNOC_HF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SDM845_SLAVE_MEM_NOC_GNOC,
+ SDM845_SLAVE_LLCC
+ },
+};
+
+static struct qcom_icc_node qnm_mnoc_sf = {
+ .name = "qnm_mnoc_sf",
+ .id = SDM845_MASTER_MNOC_SF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 3,
+ .links = { SDM845_SLAVE_MEM_NOC_GNOC,
+ SDM845_SLAVE_LLCC,
+ SDM845_SLAVE_MEM_NOC_SNOC
+ },
+};
+
+static struct qcom_icc_node qnm_snoc_gc = {
+ .name = "qnm_snoc_gc",
+ .id = SDM845_MASTER_SNOC_GC_MEM_NOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDM845_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_snoc_sf = {
+ .name = "qnm_snoc_sf",
+ .id = SDM845_MASTER_SNOC_SF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 2,
+ .links = { SDM845_SLAVE_MEM_NOC_GNOC,
+ SDM845_SLAVE_LLCC
+ },
+};
+
+static struct qcom_icc_node qxm_gpu = {
+ .name = "qxm_gpu",
+ .id = SDM845_MASTER_GFX3D,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 3,
+ .links = { SDM845_SLAVE_MEM_NOC_GNOC,
+ SDM845_SLAVE_LLCC,
+ SDM845_SLAVE_MEM_NOC_SNOC
+ },
+};
+
+static struct qcom_icc_node qhm_mnoc_cfg = {
+ .name = "qhm_mnoc_cfg",
+ .id = SDM845_MASTER_CNOC_MNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDM845_SLAVE_SERVICE_MNOC },
+};
+
+static struct qcom_icc_node qxm_camnoc_hf0 = {
+ .name = "qxm_camnoc_hf0",
+ .id = SDM845_MASTER_CAMNOC_HF0,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SDM845_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_camnoc_hf1 = {
+ .name = "qxm_camnoc_hf1",
+ .id = SDM845_MASTER_CAMNOC_HF1,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SDM845_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_camnoc_sf = {
+ .name = "qxm_camnoc_sf",
+ .id = SDM845_MASTER_CAMNOC_SF,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SDM845_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_mdp0 = {
+ .name = "qxm_mdp0",
+ .id = SDM845_MASTER_MDP0,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SDM845_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_mdp1 = {
+ .name = "qxm_mdp1",
+ .id = SDM845_MASTER_MDP1,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SDM845_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_rot = {
+ .name = "qxm_rot",
+ .id = SDM845_MASTER_ROTATOR,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SDM845_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_venus0 = {
+ .name = "qxm_venus0",
+ .id = SDM845_MASTER_VIDEO_P0,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SDM845_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_venus1 = {
+ .name = "qxm_venus1",
+ .id = SDM845_MASTER_VIDEO_P1,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SDM845_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_venus_arm9 = {
+ .name = "qxm_venus_arm9",
+ .id = SDM845_MASTER_VIDEO_PROC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDM845_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qhm_snoc_cfg = {
+ .name = "qhm_snoc_cfg",
+ .id = SDM845_MASTER_SNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDM845_SLAVE_SERVICE_SNOC },
+};
+
+static struct qcom_icc_node qnm_aggre1_noc = {
+ .name = "qnm_aggre1_noc",
+ .id = SDM845_MASTER_A1NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 6,
+ .links = { SDM845_SLAVE_APPSS,
+ SDM845_SLAVE_SNOC_CNOC,
+ SDM845_SLAVE_SNOC_MEM_NOC_SF,
+ SDM845_SLAVE_IMEM,
+ SDM845_SLAVE_PIMEM,
+ SDM845_SLAVE_QDSS_STM
+ },
+};
+
+static struct qcom_icc_node qnm_aggre2_noc = {
+ .name = "qnm_aggre2_noc",
+ .id = SDM845_MASTER_A2NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 9,
+ .links = { SDM845_SLAVE_APPSS,
+ SDM845_SLAVE_SNOC_CNOC,
+ SDM845_SLAVE_SNOC_MEM_NOC_SF,
+ SDM845_SLAVE_IMEM,
+ SDM845_SLAVE_PCIE_0,
+ SDM845_SLAVE_PCIE_1,
+ SDM845_SLAVE_PIMEM,
+ SDM845_SLAVE_QDSS_STM,
+ SDM845_SLAVE_TCU
+ },
+};
+
+static struct qcom_icc_node qnm_gladiator_sodv = {
+ .name = "qnm_gladiator_sodv",
+ .id = SDM845_MASTER_GNOC_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 8,
+ .links = { SDM845_SLAVE_APPSS,
+ SDM845_SLAVE_SNOC_CNOC,
+ SDM845_SLAVE_IMEM,
+ SDM845_SLAVE_PCIE_0,
+ SDM845_SLAVE_PCIE_1,
+ SDM845_SLAVE_PIMEM,
+ SDM845_SLAVE_QDSS_STM,
+ SDM845_SLAVE_TCU
+ },
+};
+
+static struct qcom_icc_node qnm_memnoc = {
+ .name = "qnm_memnoc",
+ .id = SDM845_MASTER_MEM_NOC_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 5,
+ .links = { SDM845_SLAVE_APPSS,
+ SDM845_SLAVE_SNOC_CNOC,
+ SDM845_SLAVE_IMEM,
+ SDM845_SLAVE_PIMEM,
+ SDM845_SLAVE_QDSS_STM
+ },
+};
+
+static struct qcom_icc_node qnm_pcie_anoc = {
+ .name = "qnm_pcie_anoc",
+ .id = SDM845_MASTER_ANOC_PCIE_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 5,
+ .links = { SDM845_SLAVE_APPSS,
+ SDM845_SLAVE_SNOC_CNOC,
+ SDM845_SLAVE_SNOC_MEM_NOC_SF,
+ SDM845_SLAVE_IMEM,
+ SDM845_SLAVE_QDSS_STM
+ },
+};
+
+static struct qcom_icc_node qxm_pimem = {
+ .name = "qxm_pimem",
+ .id = SDM845_MASTER_PIMEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SDM845_SLAVE_SNOC_MEM_NOC_GC,
+ SDM845_SLAVE_IMEM
+ },
+};
+
+static struct qcom_icc_node xm_gic = {
+ .name = "xm_gic",
+ .id = SDM845_MASTER_GIC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SDM845_SLAVE_SNOC_MEM_NOC_GC,
+ SDM845_SLAVE_IMEM
+ },
+};
+
+static struct qcom_icc_node qns_a1noc_snoc = {
+ .name = "qns_a1noc_snoc",
+ .id = SDM845_SLAVE_A1NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SDM845_MASTER_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node srvc_aggre1_noc = {
+ .name = "srvc_aggre1_noc",
+ .id = SDM845_SLAVE_SERVICE_A1NOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { 0 },
+};
+
+static struct qcom_icc_node qns_pcie_a1noc_snoc = {
+ .name = "qns_pcie_a1noc_snoc",
+ .id = SDM845_SLAVE_ANOC_PCIE_A1NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SDM845_MASTER_ANOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qns_a2noc_snoc = {
+ .name = "qns_a2noc_snoc",
+ .id = SDM845_SLAVE_A2NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SDM845_MASTER_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qns_pcie_snoc = {
+ .name = "qns_pcie_snoc",
+ .id = SDM845_SLAVE_ANOC_PCIE_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SDM845_MASTER_ANOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node srvc_aggre2_noc = {
+ .name = "srvc_aggre2_noc",
+ .id = SDM845_SLAVE_SERVICE_A2NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_camnoc_uncomp = {
+ .name = "qns_camnoc_uncomp",
+ .id = SDM845_SLAVE_CAMNOC_UNCOMP,
+ .channels = 1,
+ .buswidth = 32,
+};
+
+static struct qcom_icc_node qhs_a1_noc_cfg = {
+ .name = "qhs_a1_noc_cfg",
+ .id = SDM845_SLAVE_A1NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDM845_MASTER_A1NOC_CFG },
+};
+
+static struct qcom_icc_node qhs_a2_noc_cfg = {
+ .name = "qhs_a2_noc_cfg",
+ .id = SDM845_SLAVE_A2NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDM845_MASTER_A2NOC_CFG },
+};
+
+static struct qcom_icc_node qhs_aop = {
+ .name = "qhs_aop",
+ .id = SDM845_SLAVE_AOP,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_aoss = {
+ .name = "qhs_aoss",
+ .id = SDM845_SLAVE_AOSS,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_camera_cfg = {
+ .name = "qhs_camera_cfg",
+ .id = SDM845_SLAVE_CAMERA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_clk_ctl = {
+ .name = "qhs_clk_ctl",
+ .id = SDM845_SLAVE_CLK_CTL,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_compute_dsp_cfg = {
+ .name = "qhs_compute_dsp_cfg",
+ .id = SDM845_SLAVE_CDSP_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_cpr_cx = {
+ .name = "qhs_cpr_cx",
+ .id = SDM845_SLAVE_RBCPR_CX_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_crypto0_cfg = {
+ .name = "qhs_crypto0_cfg",
+ .id = SDM845_SLAVE_CRYPTO_0_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_dcc_cfg = {
+ .name = "qhs_dcc_cfg",
+ .id = SDM845_SLAVE_DCC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDM845_MASTER_CNOC_DC_NOC },
+};
+
+static struct qcom_icc_node qhs_ddrss_cfg = {
+ .name = "qhs_ddrss_cfg",
+ .id = SDM845_SLAVE_CNOC_DDRSS,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_display_cfg = {
+ .name = "qhs_display_cfg",
+ .id = SDM845_SLAVE_DISPLAY_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_glm = {
+ .name = "qhs_glm",
+ .id = SDM845_SLAVE_GLM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_gpuss_cfg = {
+ .name = "qhs_gpuss_cfg",
+ .id = SDM845_SLAVE_GFX3D_CFG,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qhs_imem_cfg = {
+ .name = "qhs_imem_cfg",
+ .id = SDM845_SLAVE_IMEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ipa = {
+ .name = "qhs_ipa",
+ .id = SDM845_SLAVE_IPA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_mnoc_cfg = {
+ .name = "qhs_mnoc_cfg",
+ .id = SDM845_SLAVE_CNOC_MNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDM845_MASTER_CNOC_MNOC_CFG },
+};
+
+static struct qcom_icc_node qhs_pcie0_cfg = {
+ .name = "qhs_pcie0_cfg",
+ .id = SDM845_SLAVE_PCIE_0_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie_gen3_cfg = {
+ .name = "qhs_pcie_gen3_cfg",
+ .id = SDM845_SLAVE_PCIE_1_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pdm = {
+ .name = "qhs_pdm",
+ .id = SDM845_SLAVE_PDM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_phy_refgen_south = {
+ .name = "qhs_phy_refgen_south",
+ .id = SDM845_SLAVE_SOUTH_PHY_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pimem_cfg = {
+ .name = "qhs_pimem_cfg",
+ .id = SDM845_SLAVE_PIMEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_prng = {
+ .name = "qhs_prng",
+ .id = SDM845_SLAVE_PRNG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qdss_cfg = {
+ .name = "qhs_qdss_cfg",
+ .id = SDM845_SLAVE_QDSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qupv3_north = {
+ .name = "qhs_qupv3_north",
+ .id = SDM845_SLAVE_BLSP_2,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qupv3_south = {
+ .name = "qhs_qupv3_south",
+ .id = SDM845_SLAVE_BLSP_1,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_sdc2 = {
+ .name = "qhs_sdc2",
+ .id = SDM845_SLAVE_SDCC_2,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_sdc4 = {
+ .name = "qhs_sdc4",
+ .id = SDM845_SLAVE_SDCC_4,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_snoc_cfg = {
+ .name = "qhs_snoc_cfg",
+ .id = SDM845_SLAVE_SNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDM845_MASTER_SNOC_CFG },
+};
+
+static struct qcom_icc_node qhs_spdm = {
+ .name = "qhs_spdm",
+ .id = SDM845_SLAVE_SPDM_WRAPPER,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_spss_cfg = {
+ .name = "qhs_spss_cfg",
+ .id = SDM845_SLAVE_SPSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tcsr = {
+ .name = "qhs_tcsr",
+ .id = SDM845_SLAVE_TCSR,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tlmm_north = {
+ .name = "qhs_tlmm_north",
+ .id = SDM845_SLAVE_TLMM_NORTH,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tlmm_south = {
+ .name = "qhs_tlmm_south",
+ .id = SDM845_SLAVE_TLMM_SOUTH,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tsif = {
+ .name = "qhs_tsif",
+ .id = SDM845_SLAVE_TSIF,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ufs_card_cfg = {
+ .name = "qhs_ufs_card_cfg",
+ .id = SDM845_SLAVE_UFS_CARD_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ufs_mem_cfg = {
+ .name = "qhs_ufs_mem_cfg",
+ .id = SDM845_SLAVE_UFS_MEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb3_0 = {
+ .name = "qhs_usb3_0",
+ .id = SDM845_SLAVE_USB3_0,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb3_1 = {
+ .name = "qhs_usb3_1",
+ .id = SDM845_SLAVE_USB3_1,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_venus_cfg = {
+ .name = "qhs_venus_cfg",
+ .id = SDM845_SLAVE_VENUS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
+ .name = "qhs_vsense_ctrl_cfg",
+ .id = SDM845_SLAVE_VSENSE_CTRL_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_cnoc_a2noc = {
+ .name = "qns_cnoc_a2noc",
+ .id = SDM845_SLAVE_CNOC_A2NOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDM845_MASTER_CNOC_A2NOC },
+};
+
+static struct qcom_icc_node srvc_cnoc = {
+ .name = "srvc_cnoc",
+ .id = SDM845_SLAVE_SERVICE_CNOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_llcc = {
+ .name = "qhs_llcc",
+ .id = SDM845_SLAVE_LLCC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_memnoc = {
+ .name = "qhs_memnoc",
+ .id = SDM845_SLAVE_MEM_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDM845_MASTER_MEM_NOC_CFG },
+};
+
+static struct qcom_icc_node qns_gladiator_sodv = {
+ .name = "qns_gladiator_sodv",
+ .id = SDM845_SLAVE_GNOC_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDM845_MASTER_GNOC_SNOC },
+};
+
+static struct qcom_icc_node qns_gnoc_memnoc = {
+ .name = "qns_gnoc_memnoc",
+ .id = SDM845_SLAVE_GNOC_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SDM845_MASTER_GNOC_MEM_NOC },
+};
+
+static struct qcom_icc_node srvc_gnoc = {
+ .name = "srvc_gnoc",
+ .id = SDM845_SLAVE_SERVICE_GNOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node ebi = {
+ .name = "ebi",
+ .id = SDM845_SLAVE_EBI1,
+ .channels = 4,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_mdsp_ms_mpu_cfg = {
+ .name = "qhs_mdsp_ms_mpu_cfg",
+ .id = SDM845_SLAVE_MSS_PROC_MS_MPU_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_apps_io = {
+ .name = "qns_apps_io",
+ .id = SDM845_SLAVE_MEM_NOC_GNOC,
+ .channels = 1,
+ .buswidth = 32,
+};
+
+static struct qcom_icc_node qns_llcc = {
+ .name = "qns_llcc",
+ .id = SDM845_SLAVE_LLCC,
+ .channels = 4,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SDM845_MASTER_LLCC },
+};
+
+static struct qcom_icc_node qns_memnoc_snoc = {
+ .name = "qns_memnoc_snoc",
+ .id = SDM845_SLAVE_MEM_NOC_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDM845_MASTER_MEM_NOC_SNOC },
+};
+
+static struct qcom_icc_node srvc_memnoc = {
+ .name = "srvc_memnoc",
+ .id = SDM845_SLAVE_SERVICE_MEM_NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns2_mem_noc = {
+ .name = "qns2_mem_noc",
+ .id = SDM845_SLAVE_MNOC_SF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SDM845_MASTER_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_mem_noc_hf = {
+ .name = "qns_mem_noc_hf",
+ .id = SDM845_SLAVE_MNOC_HF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SDM845_MASTER_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node srvc_mnoc = {
+ .name = "srvc_mnoc",
+ .id = SDM845_SLAVE_SERVICE_MNOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_apss = {
+ .name = "qhs_apss",
+ .id = SDM845_SLAVE_APPSS,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qns_cnoc = {
+ .name = "qns_cnoc",
+ .id = SDM845_SLAVE_SNOC_CNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDM845_MASTER_SNOC_CNOC },
+};
+
+static struct qcom_icc_node qns_memnoc_gc = {
+ .name = "qns_memnoc_gc",
+ .id = SDM845_SLAVE_SNOC_MEM_NOC_GC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDM845_MASTER_SNOC_GC_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_memnoc_sf = {
+ .name = "qns_memnoc_sf",
+ .id = SDM845_SLAVE_SNOC_MEM_NOC_SF,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SDM845_MASTER_SNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxs_imem = {
+ .name = "qxs_imem",
+ .id = SDM845_SLAVE_IMEM,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qxs_pcie = {
+ .name = "qxs_pcie",
+ .id = SDM845_SLAVE_PCIE_0,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qxs_pcie_gen3 = {
+ .name = "qxs_pcie_gen3",
+ .id = SDM845_SLAVE_PCIE_1,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qxs_pimem = {
+ .name = "qxs_pimem",
+ .id = SDM845_SLAVE_PIMEM,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node srvc_snoc = {
+ .name = "srvc_snoc",
+ .id = SDM845_SLAVE_SERVICE_SNOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node xs_qdss_stm = {
+ .name = "xs_qdss_stm",
+ .id = SDM845_SLAVE_QDSS_STM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node xs_sys_tcu_cfg = {
+ .name = "xs_sys_tcu_cfg",
+ .id = SDM845_SLAVE_TCU,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_mc0 = {
+ .name = "MC0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_sh0 = {
+ .name = "SH0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_llcc },
+};
+
+static struct qcom_icc_bcm bcm_mm0 = {
+ .name = "MM0",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qns_mem_noc_hf },
+};
+
+static struct qcom_icc_bcm bcm_sh1 = {
+ .name = "SH1",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qns_apps_io },
+};
+
+static struct qcom_icc_bcm bcm_mm1 = {
+ .name = "MM1",
+ .keepalive = true,
+ .num_nodes = 7,
+ .nodes = { &qxm_camnoc_hf0_uncomp,
+ &qxm_camnoc_hf1_uncomp,
+ &qxm_camnoc_sf_uncomp,
+ &qxm_camnoc_hf0,
+ &qxm_camnoc_hf1,
+ &qxm_mdp0,
+ &qxm_mdp1
+ },
+};
+
+static struct qcom_icc_bcm bcm_sh2 = {
+ .name = "SH2",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qns_memnoc_snoc },
+};
+
+static struct qcom_icc_bcm bcm_mm2 = {
+ .name = "MM2",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qns2_mem_noc },
+};
+
+static struct qcom_icc_bcm bcm_sh3 = {
+ .name = "SH3",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &acm_tcu },
+};
+
+static struct qcom_icc_bcm bcm_mm3 = {
+ .name = "MM3",
+ .keepalive = false,
+ .num_nodes = 5,
+ .nodes = { &qxm_camnoc_sf, &qxm_rot, &qxm_venus0, &qxm_venus1, &qxm_venus_arm9 },
+};
+
+static struct qcom_icc_bcm bcm_sh5 = {
+ .name = "SH5",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qnm_apps },
+};
+
+static struct qcom_icc_bcm bcm_sn0 = {
+ .name = "SN0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_memnoc_sf },
+};
+
+static struct qcom_icc_bcm bcm_ce0 = {
+ .name = "CE0",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qxm_crypto },
+};
+
+static struct qcom_icc_bcm bcm_cn0 = {
+ .name = "CN0",
+ .keepalive = false,
+ .num_nodes = 47,
+ .nodes = { &qhm_spdm,
+ &qhm_tic,
+ &qnm_snoc,
+ &xm_qdss_dap,
+ &qhs_a1_noc_cfg,
+ &qhs_a2_noc_cfg,
+ &qhs_aop,
+ &qhs_aoss,
+ &qhs_camera_cfg,
+ &qhs_clk_ctl,
+ &qhs_compute_dsp_cfg,
+ &qhs_cpr_cx,
+ &qhs_crypto0_cfg,
+ &qhs_dcc_cfg,
+ &qhs_ddrss_cfg,
+ &qhs_display_cfg,
+ &qhs_glm,
+ &qhs_gpuss_cfg,
+ &qhs_imem_cfg,
+ &qhs_ipa,
+ &qhs_mnoc_cfg,
+ &qhs_pcie0_cfg,
+ &qhs_pcie_gen3_cfg,
+ &qhs_pdm,
+ &qhs_phy_refgen_south,
+ &qhs_pimem_cfg,
+ &qhs_prng,
+ &qhs_qdss_cfg,
+ &qhs_qupv3_north,
+ &qhs_qupv3_south,
+ &qhs_sdc2,
+ &qhs_sdc4,
+ &qhs_snoc_cfg,
+ &qhs_spdm,
+ &qhs_spss_cfg,
+ &qhs_tcsr,
+ &qhs_tlmm_north,
+ &qhs_tlmm_south,
+ &qhs_tsif,
+ &qhs_ufs_card_cfg,
+ &qhs_ufs_mem_cfg,
+ &qhs_usb3_0,
+ &qhs_usb3_1,
+ &qhs_venus_cfg,
+ &qhs_vsense_ctrl_cfg,
+ &qns_cnoc_a2noc,
+ &srvc_cnoc
+ },
+};
+
+static struct qcom_icc_bcm bcm_qup0 = {
+ .name = "QUP0",
+ .keepalive = false,
+ .num_nodes = 2,
+ .nodes = { &qhm_qup1, &qhm_qup2 },
+};
+
+static struct qcom_icc_bcm bcm_sn1 = {
+ .name = "SN1",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qxs_imem },
+};
+
+static struct qcom_icc_bcm bcm_sn2 = {
+ .name = "SN2",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qns_memnoc_gc },
+};
+
+static struct qcom_icc_bcm bcm_sn3 = {
+ .name = "SN3",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qns_cnoc },
+};
+
+static struct qcom_icc_bcm bcm_sn4 = {
+ .name = "SN4",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qxm_pimem },
+};
+
+static struct qcom_icc_bcm bcm_sn5 = {
+ .name = "SN5",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &xs_qdss_stm },
+};
+
+static struct qcom_icc_bcm bcm_sn6 = {
+ .name = "SN6",
+ .keepalive = false,
+ .num_nodes = 3,
+ .nodes = { &qhs_apss, &srvc_snoc, &xs_sys_tcu_cfg },
+};
+
+static struct qcom_icc_bcm bcm_sn7 = {
+ .name = "SN7",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qxs_pcie },
+};
+
+static struct qcom_icc_bcm bcm_sn8 = {
+ .name = "SN8",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qxs_pcie_gen3 },
+};
+
+static struct qcom_icc_bcm bcm_sn9 = {
+ .name = "SN9",
+ .keepalive = false,
+ .num_nodes = 2,
+ .nodes = { &srvc_aggre1_noc, &qnm_aggre1_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn11 = {
+ .name = "SN11",
+ .keepalive = false,
+ .num_nodes = 2,
+ .nodes = { &srvc_aggre2_noc, &qnm_aggre2_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn12 = {
+ .name = "SN12",
+ .keepalive = false,
+ .num_nodes = 2,
+ .nodes = { &qnm_gladiator_sodv, &xm_gic },
+};
+
+static struct qcom_icc_bcm bcm_sn14 = {
+ .name = "SN14",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qnm_pcie_anoc },
+};
+
+static struct qcom_icc_bcm bcm_sn15 = {
+ .name = "SN15",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qnm_memnoc },
+};
static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
&bcm_sn9,
diff --git a/drivers/interconnect/qcom/sdx55.c b/drivers/interconnect/qcom/sdx55.c
index 130a828c3873..4117db046fa0 100644
--- a/drivers/interconnect/qcom/sdx55.c
+++ b/drivers/interconnect/qcom/sdx55.c
@@ -10,94 +10,778 @@
#include <linux/device.h>
#include <linux/interconnect.h>
#include <linux/interconnect-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <dt-bindings/interconnect/qcom,sdx55.h>
#include "bcm-voter.h"
#include "icc-rpmh.h"
#include "sdx55.h"
-DEFINE_QNODE(llcc_mc, SDX55_MASTER_LLCC, 4, 4, SDX55_SLAVE_EBI_CH0);
-DEFINE_QNODE(acm_tcu, SDX55_MASTER_TCU_0, 1, 8, SDX55_SLAVE_LLCC, SDX55_SLAVE_MEM_NOC_SNOC, SDX55_SLAVE_MEM_NOC_PCIE_SNOC);
-DEFINE_QNODE(qnm_snoc_gc, SDX55_MASTER_SNOC_GC_MEM_NOC, 1, 8, SDX55_SLAVE_LLCC);
-DEFINE_QNODE(xm_apps_rdwr, SDX55_MASTER_AMPSS_M0, 1, 16, SDX55_SLAVE_LLCC, SDX55_SLAVE_MEM_NOC_SNOC, SDX55_SLAVE_MEM_NOC_PCIE_SNOC);
-DEFINE_QNODE(qhm_audio, SDX55_MASTER_AUDIO, 1, 4, SDX55_SLAVE_ANOC_SNOC);
-DEFINE_QNODE(qhm_blsp1, SDX55_MASTER_BLSP_1, 1, 4, SDX55_SLAVE_ANOC_SNOC);
-DEFINE_QNODE(qhm_qdss_bam, SDX55_MASTER_QDSS_BAM, 1, 4, SDX55_SLAVE_SNOC_CFG, SDX55_SLAVE_EMAC_CFG, SDX55_SLAVE_USB3, SDX55_SLAVE_TLMM, SDX55_SLAVE_SPMI_FETCHER, SDX55_SLAVE_QDSS_CFG, SDX55_SLAVE_PDM, SDX55_SLAVE_SNOC_MEM_NOC_GC, SDX55_SLAVE_TCSR, SDX55_SLAVE_CNOC_DDRSS, SDX55_SLAVE_SPMI_VGI_COEX, SDX55_SLAVE_QPIC, SDX55_SLAVE_OCIMEM, SDX55_SLAVE_IPA_CFG, SDX55_SLAVE_USB3_PHY_CFG, SDX55_SLAVE_AOP, SDX55_SLAVE_BLSP_1, SDX55_SLAVE_SDCC_1, SDX55_SLAVE_CNOC_MSS, SDX55_SLAVE_PCIE_PARF, SDX55_SLAVE_ECC_CFG, SDX55_SLAVE_AUDIO, SDX55_SLAVE_AOSS, SDX55_SLAVE_PRNG, SDX55_SLAVE_CRYPTO_0_CFG, SDX55_SLAVE_TCU, SDX55_SLAVE_CLK_CTL, SDX55_SLAVE_IMEM_CFG);
-DEFINE_QNODE(qhm_qpic, SDX55_MASTER_QPIC, 1, 4, SDX55_SLAVE_AOSS, SDX55_SLAVE_IPA_CFG, SDX55_SLAVE_ANOC_SNOC, SDX55_SLAVE_AOP, SDX55_SLAVE_AUDIO);
-DEFINE_QNODE(qhm_snoc_cfg, SDX55_MASTER_SNOC_CFG, 1, 4, SDX55_SLAVE_SERVICE_SNOC);
-DEFINE_QNODE(qhm_spmi_fetcher1, SDX55_MASTER_SPMI_FETCHER, 1, 4, SDX55_SLAVE_AOSS, SDX55_SLAVE_ANOC_SNOC, SDX55_SLAVE_AOP);
-DEFINE_QNODE(qnm_aggre_noc, SDX55_MASTER_ANOC_SNOC, 1, 8, SDX55_SLAVE_PCIE_0, SDX55_SLAVE_SNOC_CFG, SDX55_SLAVE_SDCC_1, SDX55_SLAVE_TLMM, SDX55_SLAVE_SPMI_FETCHER, SDX55_SLAVE_QDSS_CFG, SDX55_SLAVE_PDM, SDX55_SLAVE_SNOC_MEM_NOC_GC, SDX55_SLAVE_TCSR, SDX55_SLAVE_CNOC_DDRSS, SDX55_SLAVE_SPMI_VGI_COEX, SDX55_SLAVE_QDSS_STM, SDX55_SLAVE_QPIC, SDX55_SLAVE_OCIMEM, SDX55_SLAVE_IPA_CFG, SDX55_SLAVE_USB3_PHY_CFG, SDX55_SLAVE_AOP, SDX55_SLAVE_BLSP_1, SDX55_SLAVE_USB3, SDX55_SLAVE_CNOC_MSS, SDX55_SLAVE_PCIE_PARF, SDX55_SLAVE_ECC_CFG, SDX55_SLAVE_APPSS, SDX55_SLAVE_AUDIO, SDX55_SLAVE_AOSS, SDX55_SLAVE_PRNG, SDX55_SLAVE_CRYPTO_0_CFG, SDX55_SLAVE_TCU, SDX55_SLAVE_CLK_CTL, SDX55_SLAVE_IMEM_CFG);
-DEFINE_QNODE(qnm_ipa, SDX55_MASTER_IPA, 1, 8, SDX55_SLAVE_SNOC_CFG, SDX55_SLAVE_EMAC_CFG, SDX55_SLAVE_USB3, SDX55_SLAVE_AOSS, SDX55_SLAVE_SPMI_FETCHER, SDX55_SLAVE_QDSS_CFG, SDX55_SLAVE_PDM, SDX55_SLAVE_SNOC_MEM_NOC_GC, SDX55_SLAVE_TCSR, SDX55_SLAVE_CNOC_DDRSS, SDX55_SLAVE_QDSS_STM, SDX55_SLAVE_QPIC, SDX55_SLAVE_OCIMEM, SDX55_SLAVE_IPA_CFG, SDX55_SLAVE_USB3_PHY_CFG, SDX55_SLAVE_AOP, SDX55_SLAVE_BLSP_1, SDX55_SLAVE_SDCC_1, SDX55_SLAVE_CNOC_MSS, SDX55_SLAVE_PCIE_PARF, SDX55_SLAVE_ECC_CFG, SDX55_SLAVE_AUDIO, SDX55_SLAVE_TLMM, SDX55_SLAVE_PRNG, SDX55_SLAVE_CRYPTO_0_CFG, SDX55_SLAVE_CLK_CTL, SDX55_SLAVE_IMEM_CFG);
-DEFINE_QNODE(qnm_memnoc, SDX55_MASTER_MEM_NOC_SNOC, 1, 8, SDX55_SLAVE_SNOC_CFG, SDX55_SLAVE_EMAC_CFG, SDX55_SLAVE_USB3, SDX55_SLAVE_TLMM, SDX55_SLAVE_SPMI_FETCHER, SDX55_SLAVE_QDSS_CFG, SDX55_SLAVE_PDM, SDX55_SLAVE_TCSR, SDX55_SLAVE_CNOC_DDRSS, SDX55_SLAVE_SPMI_VGI_COEX, SDX55_SLAVE_QDSS_STM, SDX55_SLAVE_QPIC, SDX55_SLAVE_OCIMEM, SDX55_SLAVE_IPA_CFG, SDX55_SLAVE_USB3_PHY_CFG, SDX55_SLAVE_AOP, SDX55_SLAVE_BLSP_1, SDX55_SLAVE_SDCC_1, SDX55_SLAVE_CNOC_MSS, SDX55_SLAVE_PCIE_PARF, SDX55_SLAVE_ECC_CFG, SDX55_SLAVE_APPSS, SDX55_SLAVE_AUDIO, SDX55_SLAVE_AOSS, SDX55_SLAVE_PRNG, SDX55_SLAVE_CRYPTO_0_CFG, SDX55_SLAVE_TCU, SDX55_SLAVE_CLK_CTL, SDX55_SLAVE_IMEM_CFG);
-DEFINE_QNODE(qnm_memnoc_pcie, SDX55_MASTER_MEM_NOC_PCIE_SNOC, 1, 8, SDX55_SLAVE_PCIE_0);
-DEFINE_QNODE(qxm_crypto, SDX55_MASTER_CRYPTO_CORE_0, 1, 8, SDX55_SLAVE_AOSS, SDX55_SLAVE_ANOC_SNOC, SDX55_SLAVE_AOP);
-DEFINE_QNODE(xm_emac, SDX55_MASTER_EMAC, 1, 8, SDX55_SLAVE_ANOC_SNOC);
-DEFINE_QNODE(xm_ipa2pcie_slv, SDX55_MASTER_IPA_PCIE, 1, 8, SDX55_SLAVE_PCIE_0);
-DEFINE_QNODE(xm_pcie, SDX55_MASTER_PCIE, 1, 8, SDX55_SLAVE_ANOC_SNOC);
-DEFINE_QNODE(xm_qdss_etr, SDX55_MASTER_QDSS_ETR, 1, 8, SDX55_SLAVE_SNOC_CFG, SDX55_SLAVE_EMAC_CFG, SDX55_SLAVE_USB3, SDX55_SLAVE_AOSS, SDX55_SLAVE_SPMI_FETCHER, SDX55_SLAVE_QDSS_CFG, SDX55_SLAVE_PDM, SDX55_SLAVE_SNOC_MEM_NOC_GC, SDX55_SLAVE_TCSR, SDX55_SLAVE_CNOC_DDRSS, SDX55_SLAVE_SPMI_VGI_COEX, SDX55_SLAVE_QPIC, SDX55_SLAVE_OCIMEM, SDX55_SLAVE_IPA_CFG, SDX55_SLAVE_USB3_PHY_CFG, SDX55_SLAVE_AOP, SDX55_SLAVE_BLSP_1, SDX55_SLAVE_SDCC_1, SDX55_SLAVE_CNOC_MSS, SDX55_SLAVE_PCIE_PARF, SDX55_SLAVE_ECC_CFG, SDX55_SLAVE_AUDIO, SDX55_SLAVE_AOSS, SDX55_SLAVE_PRNG, SDX55_SLAVE_CRYPTO_0_CFG, SDX55_SLAVE_TCU, SDX55_SLAVE_CLK_CTL, SDX55_SLAVE_IMEM_CFG);
-DEFINE_QNODE(xm_sdc1, SDX55_MASTER_SDCC_1, 1, 8, SDX55_SLAVE_AOSS, SDX55_SLAVE_IPA_CFG, SDX55_SLAVE_ANOC_SNOC, SDX55_SLAVE_AOP, SDX55_SLAVE_AUDIO);
-DEFINE_QNODE(xm_usb3, SDX55_MASTER_USB3, 1, 8, SDX55_SLAVE_ANOC_SNOC);
-DEFINE_QNODE(ebi, SDX55_SLAVE_EBI_CH0, 1, 4);
-DEFINE_QNODE(qns_llcc, SDX55_SLAVE_LLCC, 1, 16, SDX55_SLAVE_EBI_CH0);
-DEFINE_QNODE(qns_memnoc_snoc, SDX55_SLAVE_MEM_NOC_SNOC, 1, 8, SDX55_MASTER_MEM_NOC_SNOC);
-DEFINE_QNODE(qns_sys_pcie, SDX55_SLAVE_MEM_NOC_PCIE_SNOC, 1, 8, SDX55_MASTER_MEM_NOC_PCIE_SNOC);
-DEFINE_QNODE(qhs_aop, SDX55_SLAVE_AOP, 1, 4);
-DEFINE_QNODE(qhs_aoss, SDX55_SLAVE_AOSS, 1, 4);
-DEFINE_QNODE(qhs_apss, SDX55_SLAVE_APPSS, 1, 4);
-DEFINE_QNODE(qhs_audio, SDX55_SLAVE_AUDIO, 1, 4);
-DEFINE_QNODE(qhs_blsp1, SDX55_SLAVE_BLSP_1, 1, 4);
-DEFINE_QNODE(qhs_clk_ctl, SDX55_SLAVE_CLK_CTL, 1, 4);
-DEFINE_QNODE(qhs_crypto0_cfg, SDX55_SLAVE_CRYPTO_0_CFG, 1, 4);
-DEFINE_QNODE(qhs_ddrss_cfg, SDX55_SLAVE_CNOC_DDRSS, 1, 4);
-DEFINE_QNODE(qhs_ecc_cfg, SDX55_SLAVE_ECC_CFG, 1, 4);
-DEFINE_QNODE(qhs_emac_cfg, SDX55_SLAVE_EMAC_CFG, 1, 4);
-DEFINE_QNODE(qhs_imem_cfg, SDX55_SLAVE_IMEM_CFG, 1, 4);
-DEFINE_QNODE(qhs_ipa, SDX55_SLAVE_IPA_CFG, 1, 4);
-DEFINE_QNODE(qhs_mss_cfg, SDX55_SLAVE_CNOC_MSS, 1, 4);
-DEFINE_QNODE(qhs_pcie_parf, SDX55_SLAVE_PCIE_PARF, 1, 4);
-DEFINE_QNODE(qhs_pdm, SDX55_SLAVE_PDM, 1, 4);
-DEFINE_QNODE(qhs_prng, SDX55_SLAVE_PRNG, 1, 4);
-DEFINE_QNODE(qhs_qdss_cfg, SDX55_SLAVE_QDSS_CFG, 1, 4);
-DEFINE_QNODE(qhs_qpic, SDX55_SLAVE_QPIC, 1, 4);
-DEFINE_QNODE(qhs_sdc1, SDX55_SLAVE_SDCC_1, 1, 4);
-DEFINE_QNODE(qhs_snoc_cfg, SDX55_SLAVE_SNOC_CFG, 1, 4, SDX55_MASTER_SNOC_CFG);
-DEFINE_QNODE(qhs_spmi_fetcher, SDX55_SLAVE_SPMI_FETCHER, 1, 4);
-DEFINE_QNODE(qhs_spmi_vgi_coex, SDX55_SLAVE_SPMI_VGI_COEX, 1, 4);
-DEFINE_QNODE(qhs_tcsr, SDX55_SLAVE_TCSR, 1, 4);
-DEFINE_QNODE(qhs_tlmm, SDX55_SLAVE_TLMM, 1, 4);
-DEFINE_QNODE(qhs_usb3, SDX55_SLAVE_USB3, 1, 4);
-DEFINE_QNODE(qhs_usb3_phy, SDX55_SLAVE_USB3_PHY_CFG, 1, 4);
-DEFINE_QNODE(qns_aggre_noc, SDX55_SLAVE_ANOC_SNOC, 1, 8, SDX55_MASTER_ANOC_SNOC);
-DEFINE_QNODE(qns_snoc_memnoc, SDX55_SLAVE_SNOC_MEM_NOC_GC, 1, 8, SDX55_MASTER_SNOC_GC_MEM_NOC);
-DEFINE_QNODE(qxs_imem, SDX55_SLAVE_OCIMEM, 1, 8);
-DEFINE_QNODE(srvc_snoc, SDX55_SLAVE_SERVICE_SNOC, 1, 4);
-DEFINE_QNODE(xs_pcie, SDX55_SLAVE_PCIE_0, 1, 8);
-DEFINE_QNODE(xs_qdss_stm, SDX55_SLAVE_QDSS_STM, 1, 4);
-DEFINE_QNODE(xs_sys_tcu_cfg, SDX55_SLAVE_TCU, 1, 8);
-
-DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
-DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
-DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
-DEFINE_QBCM(bcm_pn0, "PN0", false, &qhm_snoc_cfg);
-DEFINE_QBCM(bcm_sh3, "SH3", false, &xm_apps_rdwr);
-DEFINE_QBCM(bcm_sh4, "SH4", false, &qns_memnoc_snoc, &qns_sys_pcie);
-DEFINE_QBCM(bcm_sn0, "SN0", true, &qns_snoc_memnoc);
-DEFINE_QBCM(bcm_sn1, "SN1", false, &qxs_imem);
-DEFINE_QBCM(bcm_pn1, "PN1", false, &xm_sdc1);
-DEFINE_QBCM(bcm_pn2, "PN2", false, &qhm_audio, &qhm_spmi_fetcher1);
-DEFINE_QBCM(bcm_sn3, "SN3", false, &xs_qdss_stm);
-DEFINE_QBCM(bcm_pn3, "PN3", false, &qhm_blsp1, &qhm_qpic);
-DEFINE_QBCM(bcm_sn4, "SN4", false, &xs_sys_tcu_cfg);
-DEFINE_QBCM(bcm_pn5, "PN5", false, &qxm_crypto);
-DEFINE_QBCM(bcm_sn6, "SN6", false, &xs_pcie);
-DEFINE_QBCM(bcm_sn7, "SN7", false, &qnm_aggre_noc, &xm_emac, &xm_emac, &xm_usb3,
- &qns_aggre_noc);
-DEFINE_QBCM(bcm_sn8, "SN8", false, &qhm_qdss_bam, &xm_qdss_etr);
-DEFINE_QBCM(bcm_sn9, "SN9", false, &qnm_memnoc);
-DEFINE_QBCM(bcm_sn10, "SN10", false, &qnm_memnoc_pcie);
-DEFINE_QBCM(bcm_sn11, "SN11", false, &qnm_ipa, &xm_ipa2pcie_slv);
+static struct qcom_icc_node llcc_mc = {
+ .name = "llcc_mc",
+ .id = SDX55_MASTER_LLCC,
+ .channels = 4,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDX55_SLAVE_EBI_CH0 },
+};
+
+static struct qcom_icc_node acm_tcu = {
+ .name = "acm_tcu",
+ .id = SDX55_MASTER_TCU_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 3,
+ .links = { SDX55_SLAVE_LLCC,
+ SDX55_SLAVE_MEM_NOC_SNOC,
+ SDX55_SLAVE_MEM_NOC_PCIE_SNOC
+ },
+};
+
+static struct qcom_icc_node qnm_snoc_gc = {
+ .name = "qnm_snoc_gc",
+ .id = SDX55_MASTER_SNOC_GC_MEM_NOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDX55_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node xm_apps_rdwr = {
+ .name = "xm_apps_rdwr",
+ .id = SDX55_MASTER_AMPSS_M0,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 3,
+ .links = { SDX55_SLAVE_LLCC,
+ SDX55_SLAVE_MEM_NOC_SNOC,
+ SDX55_SLAVE_MEM_NOC_PCIE_SNOC
+ },
+};
+
+static struct qcom_icc_node qhm_audio = {
+ .name = "qhm_audio",
+ .id = SDX55_MASTER_AUDIO,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDX55_SLAVE_ANOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_blsp1 = {
+ .name = "qhm_blsp1",
+ .id = SDX55_MASTER_BLSP_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDX55_SLAVE_ANOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qdss_bam = {
+ .name = "qhm_qdss_bam",
+ .id = SDX55_MASTER_QDSS_BAM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 28,
+ .links = { SDX55_SLAVE_SNOC_CFG,
+ SDX55_SLAVE_EMAC_CFG,
+ SDX55_SLAVE_USB3,
+ SDX55_SLAVE_TLMM,
+ SDX55_SLAVE_SPMI_FETCHER,
+ SDX55_SLAVE_QDSS_CFG,
+ SDX55_SLAVE_PDM,
+ SDX55_SLAVE_SNOC_MEM_NOC_GC,
+ SDX55_SLAVE_TCSR,
+ SDX55_SLAVE_CNOC_DDRSS,
+ SDX55_SLAVE_SPMI_VGI_COEX,
+ SDX55_SLAVE_QPIC,
+ SDX55_SLAVE_OCIMEM,
+ SDX55_SLAVE_IPA_CFG,
+ SDX55_SLAVE_USB3_PHY_CFG,
+ SDX55_SLAVE_AOP,
+ SDX55_SLAVE_BLSP_1,
+ SDX55_SLAVE_SDCC_1,
+ SDX55_SLAVE_CNOC_MSS,
+ SDX55_SLAVE_PCIE_PARF,
+ SDX55_SLAVE_ECC_CFG,
+ SDX55_SLAVE_AUDIO,
+ SDX55_SLAVE_AOSS,
+ SDX55_SLAVE_PRNG,
+ SDX55_SLAVE_CRYPTO_0_CFG,
+ SDX55_SLAVE_TCU,
+ SDX55_SLAVE_CLK_CTL,
+ SDX55_SLAVE_IMEM_CFG
+ },
+};
+
+static struct qcom_icc_node qhm_qpic = {
+ .name = "qhm_qpic",
+ .id = SDX55_MASTER_QPIC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 5,
+ .links = { SDX55_SLAVE_AOSS,
+ SDX55_SLAVE_IPA_CFG,
+ SDX55_SLAVE_ANOC_SNOC,
+ SDX55_SLAVE_AOP,
+ SDX55_SLAVE_AUDIO
+ },
+};
+
+static struct qcom_icc_node qhm_snoc_cfg = {
+ .name = "qhm_snoc_cfg",
+ .id = SDX55_MASTER_SNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDX55_SLAVE_SERVICE_SNOC },
+};
+
+static struct qcom_icc_node qhm_spmi_fetcher1 = {
+ .name = "qhm_spmi_fetcher1",
+ .id = SDX55_MASTER_SPMI_FETCHER,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 3,
+ .links = { SDX55_SLAVE_AOSS,
+ SDX55_SLAVE_ANOC_SNOC,
+ SDX55_SLAVE_AOP
+ },
+};
+
+static struct qcom_icc_node qnm_aggre_noc = {
+ .name = "qnm_aggre_noc",
+ .id = SDX55_MASTER_ANOC_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 30,
+ .links = { SDX55_SLAVE_PCIE_0,
+ SDX55_SLAVE_SNOC_CFG,
+ SDX55_SLAVE_SDCC_1,
+ SDX55_SLAVE_TLMM,
+ SDX55_SLAVE_SPMI_FETCHER,
+ SDX55_SLAVE_QDSS_CFG,
+ SDX55_SLAVE_PDM,
+ SDX55_SLAVE_SNOC_MEM_NOC_GC,
+ SDX55_SLAVE_TCSR,
+ SDX55_SLAVE_CNOC_DDRSS,
+ SDX55_SLAVE_SPMI_VGI_COEX,
+ SDX55_SLAVE_QDSS_STM,
+ SDX55_SLAVE_QPIC,
+ SDX55_SLAVE_OCIMEM,
+ SDX55_SLAVE_IPA_CFG,
+ SDX55_SLAVE_USB3_PHY_CFG,
+ SDX55_SLAVE_AOP,
+ SDX55_SLAVE_BLSP_1,
+ SDX55_SLAVE_USB3,
+ SDX55_SLAVE_CNOC_MSS,
+ SDX55_SLAVE_PCIE_PARF,
+ SDX55_SLAVE_ECC_CFG,
+ SDX55_SLAVE_APPSS,
+ SDX55_SLAVE_AUDIO,
+ SDX55_SLAVE_AOSS,
+ SDX55_SLAVE_PRNG,
+ SDX55_SLAVE_CRYPTO_0_CFG,
+ SDX55_SLAVE_TCU,
+ SDX55_SLAVE_CLK_CTL,
+ SDX55_SLAVE_IMEM_CFG
+ },
+};
+
+static struct qcom_icc_node qnm_ipa = {
+ .name = "qnm_ipa",
+ .id = SDX55_MASTER_IPA,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 27,
+ .links = { SDX55_SLAVE_SNOC_CFG,
+ SDX55_SLAVE_EMAC_CFG,
+ SDX55_SLAVE_USB3,
+ SDX55_SLAVE_AOSS,
+ SDX55_SLAVE_SPMI_FETCHER,
+ SDX55_SLAVE_QDSS_CFG,
+ SDX55_SLAVE_PDM,
+ SDX55_SLAVE_SNOC_MEM_NOC_GC,
+ SDX55_SLAVE_TCSR,
+ SDX55_SLAVE_CNOC_DDRSS,
+ SDX55_SLAVE_QDSS_STM,
+ SDX55_SLAVE_QPIC,
+ SDX55_SLAVE_OCIMEM,
+ SDX55_SLAVE_IPA_CFG,
+ SDX55_SLAVE_USB3_PHY_CFG,
+ SDX55_SLAVE_AOP,
+ SDX55_SLAVE_BLSP_1,
+ SDX55_SLAVE_SDCC_1,
+ SDX55_SLAVE_CNOC_MSS,
+ SDX55_SLAVE_PCIE_PARF,
+ SDX55_SLAVE_ECC_CFG,
+ SDX55_SLAVE_AUDIO,
+ SDX55_SLAVE_TLMM,
+ SDX55_SLAVE_PRNG,
+ SDX55_SLAVE_CRYPTO_0_CFG,
+ SDX55_SLAVE_CLK_CTL,
+ SDX55_SLAVE_IMEM_CFG
+ },
+};
+
+static struct qcom_icc_node qnm_memnoc = {
+ .name = "qnm_memnoc",
+ .id = SDX55_MASTER_MEM_NOC_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 29,
+ .links = { SDX55_SLAVE_SNOC_CFG,
+ SDX55_SLAVE_EMAC_CFG,
+ SDX55_SLAVE_USB3,
+ SDX55_SLAVE_TLMM,
+ SDX55_SLAVE_SPMI_FETCHER,
+ SDX55_SLAVE_QDSS_CFG,
+ SDX55_SLAVE_PDM,
+ SDX55_SLAVE_TCSR,
+ SDX55_SLAVE_CNOC_DDRSS,
+ SDX55_SLAVE_SPMI_VGI_COEX,
+ SDX55_SLAVE_QDSS_STM,
+ SDX55_SLAVE_QPIC,
+ SDX55_SLAVE_OCIMEM,
+ SDX55_SLAVE_IPA_CFG,
+ SDX55_SLAVE_USB3_PHY_CFG,
+ SDX55_SLAVE_AOP,
+ SDX55_SLAVE_BLSP_1,
+ SDX55_SLAVE_SDCC_1,
+ SDX55_SLAVE_CNOC_MSS,
+ SDX55_SLAVE_PCIE_PARF,
+ SDX55_SLAVE_ECC_CFG,
+ SDX55_SLAVE_APPSS,
+ SDX55_SLAVE_AUDIO,
+ SDX55_SLAVE_AOSS,
+ SDX55_SLAVE_PRNG,
+ SDX55_SLAVE_CRYPTO_0_CFG,
+ SDX55_SLAVE_TCU,
+ SDX55_SLAVE_CLK_CTL,
+ SDX55_SLAVE_IMEM_CFG
+ },
+};
+
+static struct qcom_icc_node qnm_memnoc_pcie = {
+ .name = "qnm_memnoc_pcie",
+ .id = SDX55_MASTER_MEM_NOC_PCIE_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDX55_SLAVE_PCIE_0 },
+};
+
+static struct qcom_icc_node qxm_crypto = {
+ .name = "qxm_crypto",
+ .id = SDX55_MASTER_CRYPTO_CORE_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 3,
+ .links = { SDX55_SLAVE_AOSS,
+ SDX55_SLAVE_ANOC_SNOC,
+ SDX55_SLAVE_AOP
+ },
+};
+
+static struct qcom_icc_node xm_emac = {
+ .name = "xm_emac",
+ .id = SDX55_MASTER_EMAC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDX55_SLAVE_ANOC_SNOC },
+};
+
+static struct qcom_icc_node xm_ipa2pcie_slv = {
+ .name = "xm_ipa2pcie_slv",
+ .id = SDX55_MASTER_IPA_PCIE,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDX55_SLAVE_PCIE_0 },
+};
+
+static struct qcom_icc_node xm_pcie = {
+ .name = "xm_pcie",
+ .id = SDX55_MASTER_PCIE,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDX55_SLAVE_ANOC_SNOC },
+};
+
+static struct qcom_icc_node xm_qdss_etr = {
+ .name = "xm_qdss_etr",
+ .id = SDX55_MASTER_QDSS_ETR,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 28,
+ .links = { SDX55_SLAVE_SNOC_CFG,
+ SDX55_SLAVE_EMAC_CFG,
+ SDX55_SLAVE_USB3,
+ SDX55_SLAVE_AOSS,
+ SDX55_SLAVE_SPMI_FETCHER,
+ SDX55_SLAVE_QDSS_CFG,
+ SDX55_SLAVE_PDM,
+ SDX55_SLAVE_SNOC_MEM_NOC_GC,
+ SDX55_SLAVE_TCSR,
+ SDX55_SLAVE_CNOC_DDRSS,
+ SDX55_SLAVE_SPMI_VGI_COEX,
+ SDX55_SLAVE_QPIC,
+ SDX55_SLAVE_OCIMEM,
+ SDX55_SLAVE_IPA_CFG,
+ SDX55_SLAVE_USB3_PHY_CFG,
+ SDX55_SLAVE_AOP,
+ SDX55_SLAVE_BLSP_1,
+ SDX55_SLAVE_SDCC_1,
+ SDX55_SLAVE_CNOC_MSS,
+ SDX55_SLAVE_PCIE_PARF,
+ SDX55_SLAVE_ECC_CFG,
+ SDX55_SLAVE_AUDIO,
+ SDX55_SLAVE_AOSS,
+ SDX55_SLAVE_PRNG,
+ SDX55_SLAVE_CRYPTO_0_CFG,
+ SDX55_SLAVE_TCU,
+ SDX55_SLAVE_CLK_CTL,
+ SDX55_SLAVE_IMEM_CFG
+ },
+};
+
+static struct qcom_icc_node xm_sdc1 = {
+ .name = "xm_sdc1",
+ .id = SDX55_MASTER_SDCC_1,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 5,
+ .links = { SDX55_SLAVE_AOSS,
+ SDX55_SLAVE_IPA_CFG,
+ SDX55_SLAVE_ANOC_SNOC,
+ SDX55_SLAVE_AOP,
+ SDX55_SLAVE_AUDIO
+ },
+};
+
+static struct qcom_icc_node xm_usb3 = {
+ .name = "xm_usb3",
+ .id = SDX55_MASTER_USB3,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDX55_SLAVE_ANOC_SNOC },
+};
+
+static struct qcom_icc_node ebi = {
+ .name = "ebi",
+ .id = SDX55_SLAVE_EBI_CH0,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_llcc = {
+ .name = "qns_llcc",
+ .id = SDX55_SLAVE_LLCC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SDX55_SLAVE_EBI_CH0 },
+};
+
+static struct qcom_icc_node qns_memnoc_snoc = {
+ .name = "qns_memnoc_snoc",
+ .id = SDX55_SLAVE_MEM_NOC_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDX55_MASTER_MEM_NOC_SNOC },
+};
+
+static struct qcom_icc_node qns_sys_pcie = {
+ .name = "qns_sys_pcie",
+ .id = SDX55_SLAVE_MEM_NOC_PCIE_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDX55_MASTER_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qhs_aop = {
+ .name = "qhs_aop",
+ .id = SDX55_SLAVE_AOP,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_aoss = {
+ .name = "qhs_aoss",
+ .id = SDX55_SLAVE_AOSS,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_apss = {
+ .name = "qhs_apss",
+ .id = SDX55_SLAVE_APPSS,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_audio = {
+ .name = "qhs_audio",
+ .id = SDX55_SLAVE_AUDIO,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_blsp1 = {
+ .name = "qhs_blsp1",
+ .id = SDX55_SLAVE_BLSP_1,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_clk_ctl = {
+ .name = "qhs_clk_ctl",
+ .id = SDX55_SLAVE_CLK_CTL,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_crypto0_cfg = {
+ .name = "qhs_crypto0_cfg",
+ .id = SDX55_SLAVE_CRYPTO_0_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ddrss_cfg = {
+ .name = "qhs_ddrss_cfg",
+ .id = SDX55_SLAVE_CNOC_DDRSS,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ecc_cfg = {
+ .name = "qhs_ecc_cfg",
+ .id = SDX55_SLAVE_ECC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_emac_cfg = {
+ .name = "qhs_emac_cfg",
+ .id = SDX55_SLAVE_EMAC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_imem_cfg = {
+ .name = "qhs_imem_cfg",
+ .id = SDX55_SLAVE_IMEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ipa = {
+ .name = "qhs_ipa",
+ .id = SDX55_SLAVE_IPA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_mss_cfg = {
+ .name = "qhs_mss_cfg",
+ .id = SDX55_SLAVE_CNOC_MSS,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie_parf = {
+ .name = "qhs_pcie_parf",
+ .id = SDX55_SLAVE_PCIE_PARF,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pdm = {
+ .name = "qhs_pdm",
+ .id = SDX55_SLAVE_PDM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_prng = {
+ .name = "qhs_prng",
+ .id = SDX55_SLAVE_PRNG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qdss_cfg = {
+ .name = "qhs_qdss_cfg",
+ .id = SDX55_SLAVE_QDSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qpic = {
+ .name = "qhs_qpic",
+ .id = SDX55_SLAVE_QPIC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_sdc1 = {
+ .name = "qhs_sdc1",
+ .id = SDX55_SLAVE_SDCC_1,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_snoc_cfg = {
+ .name = "qhs_snoc_cfg",
+ .id = SDX55_SLAVE_SNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDX55_MASTER_SNOC_CFG },
+};
+
+static struct qcom_icc_node qhs_spmi_fetcher = {
+ .name = "qhs_spmi_fetcher",
+ .id = SDX55_SLAVE_SPMI_FETCHER,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_spmi_vgi_coex = {
+ .name = "qhs_spmi_vgi_coex",
+ .id = SDX55_SLAVE_SPMI_VGI_COEX,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tcsr = {
+ .name = "qhs_tcsr",
+ .id = SDX55_SLAVE_TCSR,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tlmm = {
+ .name = "qhs_tlmm",
+ .id = SDX55_SLAVE_TLMM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb3 = {
+ .name = "qhs_usb3",
+ .id = SDX55_SLAVE_USB3,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb3_phy = {
+ .name = "qhs_usb3_phy",
+ .id = SDX55_SLAVE_USB3_PHY_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_aggre_noc = {
+ .name = "qns_aggre_noc",
+ .id = SDX55_SLAVE_ANOC_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDX55_MASTER_ANOC_SNOC },
+};
+
+static struct qcom_icc_node qns_snoc_memnoc = {
+ .name = "qns_snoc_memnoc",
+ .id = SDX55_SLAVE_SNOC_MEM_NOC_GC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDX55_MASTER_SNOC_GC_MEM_NOC },
+};
+
+static struct qcom_icc_node qxs_imem = {
+ .name = "qxs_imem",
+ .id = SDX55_SLAVE_OCIMEM,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node srvc_snoc = {
+ .name = "srvc_snoc",
+ .id = SDX55_SLAVE_SERVICE_SNOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node xs_pcie = {
+ .name = "xs_pcie",
+ .id = SDX55_SLAVE_PCIE_0,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node xs_qdss_stm = {
+ .name = "xs_qdss_stm",
+ .id = SDX55_SLAVE_QDSS_STM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node xs_sys_tcu_cfg = {
+ .name = "xs_sys_tcu_cfg",
+ .id = SDX55_SLAVE_TCU,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_bcm bcm_mc0 = {
+ .name = "MC0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_sh0 = {
+ .name = "SH0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_llcc },
+};
+
+static struct qcom_icc_bcm bcm_ce0 = {
+ .name = "CE0",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qxm_crypto },
+};
+
+static struct qcom_icc_bcm bcm_pn0 = {
+ .name = "PN0",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qhm_snoc_cfg },
+};
+
+static struct qcom_icc_bcm bcm_sh3 = {
+ .name = "SH3",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &xm_apps_rdwr },
+};
+
+static struct qcom_icc_bcm bcm_sh4 = {
+ .name = "SH4",
+ .keepalive = false,
+ .num_nodes = 2,
+ .nodes = { &qns_memnoc_snoc, &qns_sys_pcie },
+};
+
+static struct qcom_icc_bcm bcm_sn0 = {
+ .name = "SN0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_snoc_memnoc },
+};
+
+static struct qcom_icc_bcm bcm_sn1 = {
+ .name = "SN1",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qxs_imem },
+};
+
+static struct qcom_icc_bcm bcm_pn1 = {
+ .name = "PN1",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &xm_sdc1 },
+};
+
+static struct qcom_icc_bcm bcm_pn2 = {
+ .name = "PN2",
+ .keepalive = false,
+ .num_nodes = 2,
+ .nodes = { &qhm_audio, &qhm_spmi_fetcher1 },
+};
+
+static struct qcom_icc_bcm bcm_sn3 = {
+ .name = "SN3",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &xs_qdss_stm },
+};
+
+static struct qcom_icc_bcm bcm_pn3 = {
+ .name = "PN3",
+ .keepalive = false,
+ .num_nodes = 2,
+ .nodes = { &qhm_blsp1, &qhm_qpic },
+};
+
+static struct qcom_icc_bcm bcm_sn4 = {
+ .name = "SN4",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &xs_sys_tcu_cfg },
+};
+
+static struct qcom_icc_bcm bcm_pn5 = {
+ .name = "PN5",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qxm_crypto },
+};
+
+static struct qcom_icc_bcm bcm_sn6 = {
+ .name = "SN6",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &xs_pcie },
+};
+
+static struct qcom_icc_bcm bcm_sn7 = {
+ .name = "SN7",
+ .keepalive = false,
+ .num_nodes = 5,
+ .nodes = { &qnm_aggre_noc, &xm_emac, &xm_emac, &xm_usb3, &qns_aggre_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn8 = {
+ .name = "SN8",
+ .keepalive = false,
+ .num_nodes = 2,
+ .nodes = { &qhm_qdss_bam, &xm_qdss_etr },
+};
+
+static struct qcom_icc_bcm bcm_sn9 = {
+ .name = "SN9",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qnm_memnoc },
+};
+
+static struct qcom_icc_bcm bcm_sn10 = {
+ .name = "SN10",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qnm_memnoc_pcie },
+};
+
+static struct qcom_icc_bcm bcm_sn11 = {
+ .name = "SN11",
+ .keepalive = false,
+ .num_nodes = 2,
+ .nodes = { &qnm_ipa, &xm_ipa2pcie_slv },
+};
static struct qcom_icc_bcm * const mc_virt_bcms[] = {
&bcm_mc0,
diff --git a/drivers/interconnect/qcom/sdx65.c b/drivers/interconnect/qcom/sdx65.c
index b16d31d53e9b..d3a6c6c148e5 100644
--- a/drivers/interconnect/qcom/sdx65.c
+++ b/drivers/interconnect/qcom/sdx65.c
@@ -6,90 +6,769 @@
#include <linux/device.h>
#include <linux/interconnect.h>
#include <linux/interconnect-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <dt-bindings/interconnect/qcom,sdx65.h>
#include "bcm-voter.h"
#include "icc-rpmh.h"
#include "sdx65.h"
-DEFINE_QNODE(llcc_mc, SDX65_MASTER_LLCC, 1, 4, SDX65_SLAVE_EBI1);
-DEFINE_QNODE(acm_tcu, SDX65_MASTER_TCU_0, 1, 8, SDX65_SLAVE_LLCC, SDX65_SLAVE_MEM_NOC_SNOC, SDX65_SLAVE_MEM_NOC_PCIE_SNOC);
-DEFINE_QNODE(qnm_snoc_gc, SDX65_MASTER_SNOC_GC_MEM_NOC, 1, 16, SDX65_SLAVE_LLCC);
-DEFINE_QNODE(xm_apps_rdwr, SDX65_MASTER_APPSS_PROC, 1, 16, SDX65_SLAVE_LLCC, SDX65_SLAVE_MEM_NOC_SNOC, SDX65_SLAVE_MEM_NOC_PCIE_SNOC);
-DEFINE_QNODE(qhm_audio, SDX65_MASTER_AUDIO, 1, 4, SDX65_SLAVE_ANOC_SNOC);
-DEFINE_QNODE(qhm_blsp1, SDX65_MASTER_BLSP_1, 1, 4, SDX65_SLAVE_ANOC_SNOC);
-DEFINE_QNODE(qhm_qdss_bam, SDX65_MASTER_QDSS_BAM, 1, 4, SDX65_SLAVE_AOSS, SDX65_SLAVE_AUDIO, SDX65_SLAVE_BLSP_1, SDX65_SLAVE_CLK_CTL, SDX65_SLAVE_CRYPTO_0_CFG, SDX65_SLAVE_CNOC_DDRSS, SDX65_SLAVE_ECC_CFG, SDX65_SLAVE_IMEM_CFG, SDX65_SLAVE_IPA_CFG, SDX65_SLAVE_CNOC_MSS, SDX65_SLAVE_PCIE_PARF, SDX65_SLAVE_PDM, SDX65_SLAVE_PRNG, SDX65_SLAVE_QDSS_CFG, SDX65_SLAVE_QPIC, SDX65_SLAVE_SDCC_1, SDX65_SLAVE_SNOC_CFG, SDX65_SLAVE_SPMI_FETCHER, SDX65_SLAVE_SPMI_VGI_COEX, SDX65_SLAVE_TCSR, SDX65_SLAVE_TLMM, SDX65_SLAVE_USB3, SDX65_SLAVE_USB3_PHY_CFG, SDX65_SLAVE_SNOC_MEM_NOC_GC, SDX65_SLAVE_IMEM, SDX65_SLAVE_TCU);
-DEFINE_QNODE(qhm_qpic, SDX65_MASTER_QPIC, 1, 4, SDX65_SLAVE_AOSS, SDX65_SLAVE_AUDIO, SDX65_SLAVE_IPA_CFG, SDX65_SLAVE_ANOC_SNOC);
-DEFINE_QNODE(qhm_snoc_cfg, SDX65_MASTER_SNOC_CFG, 1, 4, SDX65_SLAVE_SERVICE_SNOC);
-DEFINE_QNODE(qhm_spmi_fetcher1, SDX65_MASTER_SPMI_FETCHER, 1, 4, SDX65_SLAVE_AOSS, SDX65_SLAVE_ANOC_SNOC);
-DEFINE_QNODE(qnm_aggre_noc, SDX65_MASTER_ANOC_SNOC, 1, 8, SDX65_SLAVE_AOSS, SDX65_SLAVE_APPSS, SDX65_SLAVE_AUDIO, SDX65_SLAVE_BLSP_1, SDX65_SLAVE_CLK_CTL, SDX65_SLAVE_CRYPTO_0_CFG, SDX65_SLAVE_CNOC_DDRSS, SDX65_SLAVE_ECC_CFG, SDX65_SLAVE_IMEM_CFG, SDX65_SLAVE_IPA_CFG, SDX65_SLAVE_CNOC_MSS, SDX65_SLAVE_PCIE_PARF, SDX65_SLAVE_PDM, SDX65_SLAVE_PRNG, SDX65_SLAVE_QDSS_CFG, SDX65_SLAVE_QPIC, SDX65_SLAVE_SDCC_1, SDX65_SLAVE_SNOC_CFG, SDX65_SLAVE_SPMI_FETCHER, SDX65_SLAVE_SPMI_VGI_COEX, SDX65_SLAVE_TCSR, SDX65_SLAVE_TLMM, SDX65_SLAVE_USB3, SDX65_SLAVE_USB3_PHY_CFG, SDX65_SLAVE_SNOC_MEM_NOC_GC, SDX65_SLAVE_IMEM, SDX65_SLAVE_PCIE_0, SDX65_SLAVE_QDSS_STM, SDX65_SLAVE_TCU);
-DEFINE_QNODE(qnm_ipa, SDX65_MASTER_IPA, 1, 8, SDX65_SLAVE_AOSS, SDX65_SLAVE_AUDIO, SDX65_SLAVE_BLSP_1, SDX65_SLAVE_CLK_CTL, SDX65_SLAVE_CRYPTO_0_CFG, SDX65_SLAVE_CNOC_DDRSS, SDX65_SLAVE_ECC_CFG, SDX65_SLAVE_IMEM_CFG, SDX65_SLAVE_IPA_CFG, SDX65_SLAVE_CNOC_MSS, SDX65_SLAVE_PCIE_PARF, SDX65_SLAVE_PDM, SDX65_SLAVE_PRNG, SDX65_SLAVE_QDSS_CFG, SDX65_SLAVE_QPIC, SDX65_SLAVE_SDCC_1, SDX65_SLAVE_SNOC_CFG, SDX65_SLAVE_SPMI_FETCHER, SDX65_SLAVE_TCSR, SDX65_SLAVE_TLMM, SDX65_SLAVE_USB3, SDX65_SLAVE_USB3_PHY_CFG, SDX65_SLAVE_SNOC_MEM_NOC_GC, SDX65_SLAVE_IMEM, SDX65_SLAVE_PCIE_0, SDX65_SLAVE_QDSS_STM);
-DEFINE_QNODE(qnm_memnoc, SDX65_MASTER_MEM_NOC_SNOC, 1, 8, SDX65_SLAVE_AOSS, SDX65_SLAVE_APPSS, SDX65_SLAVE_AUDIO, SDX65_SLAVE_BLSP_1, SDX65_SLAVE_CLK_CTL, SDX65_SLAVE_CRYPTO_0_CFG, SDX65_SLAVE_CNOC_DDRSS, SDX65_SLAVE_ECC_CFG, SDX65_SLAVE_IMEM_CFG, SDX65_SLAVE_IPA_CFG, SDX65_SLAVE_CNOC_MSS, SDX65_SLAVE_PCIE_PARF, SDX65_SLAVE_PDM, SDX65_SLAVE_PRNG, SDX65_SLAVE_QDSS_CFG, SDX65_SLAVE_QPIC, SDX65_SLAVE_SDCC_1, SDX65_SLAVE_SNOC_CFG, SDX65_SLAVE_SPMI_FETCHER, SDX65_SLAVE_SPMI_VGI_COEX, SDX65_SLAVE_TCSR, SDX65_SLAVE_TLMM, SDX65_SLAVE_USB3, SDX65_SLAVE_USB3_PHY_CFG, SDX65_SLAVE_IMEM, SDX65_SLAVE_QDSS_STM, SDX65_SLAVE_TCU);
-DEFINE_QNODE(qnm_memnoc_pcie, SDX65_MASTER_MEM_NOC_PCIE_SNOC, 1, 8, SDX65_SLAVE_PCIE_0);
-DEFINE_QNODE(qxm_crypto, SDX65_MASTER_CRYPTO, 1, 8, SDX65_SLAVE_AOSS, SDX65_SLAVE_ANOC_SNOC);
-DEFINE_QNODE(xm_ipa2pcie_slv, SDX65_MASTER_IPA_PCIE, 1, 8, SDX65_SLAVE_PCIE_0);
-DEFINE_QNODE(xm_pcie, SDX65_MASTER_PCIE_0, 1, 8, SDX65_SLAVE_ANOC_SNOC);
-DEFINE_QNODE(xm_qdss_etr, SDX65_MASTER_QDSS_ETR, 1, 8, SDX65_SLAVE_AOSS, SDX65_SLAVE_AUDIO, SDX65_SLAVE_BLSP_1, SDX65_SLAVE_CLK_CTL, SDX65_SLAVE_CRYPTO_0_CFG, SDX65_SLAVE_CNOC_DDRSS, SDX65_SLAVE_ECC_CFG, SDX65_SLAVE_IMEM_CFG, SDX65_SLAVE_IPA_CFG, SDX65_SLAVE_CNOC_MSS, SDX65_SLAVE_PCIE_PARF, SDX65_SLAVE_PDM, SDX65_SLAVE_PRNG, SDX65_SLAVE_QDSS_CFG, SDX65_SLAVE_QPIC, SDX65_SLAVE_SDCC_1, SDX65_SLAVE_SNOC_CFG, SDX65_SLAVE_SPMI_FETCHER, SDX65_SLAVE_SPMI_VGI_COEX, SDX65_SLAVE_TCSR, SDX65_SLAVE_TLMM, SDX65_SLAVE_USB3, SDX65_SLAVE_USB3_PHY_CFG, SDX65_SLAVE_SNOC_MEM_NOC_GC, SDX65_SLAVE_IMEM, SDX65_SLAVE_TCU);
-DEFINE_QNODE(xm_sdc1, SDX65_MASTER_SDCC_1, 1, 8, SDX65_SLAVE_AOSS, SDX65_SLAVE_AUDIO, SDX65_SLAVE_IPA_CFG, SDX65_SLAVE_ANOC_SNOC);
-DEFINE_QNODE(xm_usb3, SDX65_MASTER_USB3, 1, 8, SDX65_SLAVE_ANOC_SNOC);
-DEFINE_QNODE(ebi, SDX65_SLAVE_EBI1, 1, 4);
-DEFINE_QNODE(qns_llcc, SDX65_SLAVE_LLCC, 1, 16, SDX65_MASTER_LLCC);
-DEFINE_QNODE(qns_memnoc_snoc, SDX65_SLAVE_MEM_NOC_SNOC, 1, 8, SDX65_MASTER_MEM_NOC_SNOC);
-DEFINE_QNODE(qns_sys_pcie, SDX65_SLAVE_MEM_NOC_PCIE_SNOC, 1, 8, SDX65_MASTER_MEM_NOC_PCIE_SNOC);
-DEFINE_QNODE(qhs_aoss, SDX65_SLAVE_AOSS, 1, 4);
-DEFINE_QNODE(qhs_apss, SDX65_SLAVE_APPSS, 1, 4);
-DEFINE_QNODE(qhs_audio, SDX65_SLAVE_AUDIO, 1, 4);
-DEFINE_QNODE(qhs_blsp1, SDX65_SLAVE_BLSP_1, 1, 4);
-DEFINE_QNODE(qhs_clk_ctl, SDX65_SLAVE_CLK_CTL, 1, 4);
-DEFINE_QNODE(qhs_crypto0_cfg, SDX65_SLAVE_CRYPTO_0_CFG, 1, 4);
-DEFINE_QNODE(qhs_ddrss_cfg, SDX65_SLAVE_CNOC_DDRSS, 1, 4);
-DEFINE_QNODE(qhs_ecc_cfg, SDX65_SLAVE_ECC_CFG, 1, 4);
-DEFINE_QNODE(qhs_imem_cfg, SDX65_SLAVE_IMEM_CFG, 1, 4);
-DEFINE_QNODE(qhs_ipa, SDX65_SLAVE_IPA_CFG, 1, 4);
-DEFINE_QNODE(qhs_mss_cfg, SDX65_SLAVE_CNOC_MSS, 1, 4);
-DEFINE_QNODE(qhs_pcie_parf, SDX65_SLAVE_PCIE_PARF, 1, 4);
-DEFINE_QNODE(qhs_pdm, SDX65_SLAVE_PDM, 1, 4);
-DEFINE_QNODE(qhs_prng, SDX65_SLAVE_PRNG, 1, 4);
-DEFINE_QNODE(qhs_qdss_cfg, SDX65_SLAVE_QDSS_CFG, 1, 4);
-DEFINE_QNODE(qhs_qpic, SDX65_SLAVE_QPIC, 1, 4);
-DEFINE_QNODE(qhs_sdc1, SDX65_SLAVE_SDCC_1, 1, 4);
-DEFINE_QNODE(qhs_snoc_cfg, SDX65_SLAVE_SNOC_CFG, 1, 4, SDX65_MASTER_SNOC_CFG);
-DEFINE_QNODE(qhs_spmi_fetcher, SDX65_SLAVE_SPMI_FETCHER, 1, 4);
-DEFINE_QNODE(qhs_spmi_vgi_coex, SDX65_SLAVE_SPMI_VGI_COEX, 1, 4);
-DEFINE_QNODE(qhs_tcsr, SDX65_SLAVE_TCSR, 1, 4);
-DEFINE_QNODE(qhs_tlmm, SDX65_SLAVE_TLMM, 1, 4);
-DEFINE_QNODE(qhs_usb3, SDX65_SLAVE_USB3, 1, 4);
-DEFINE_QNODE(qhs_usb3_phy, SDX65_SLAVE_USB3_PHY_CFG, 1, 4);
-DEFINE_QNODE(qns_aggre_noc, SDX65_SLAVE_ANOC_SNOC, 1, 8, SDX65_MASTER_ANOC_SNOC);
-DEFINE_QNODE(qns_snoc_memnoc, SDX65_SLAVE_SNOC_MEM_NOC_GC, 1, 16, SDX65_MASTER_SNOC_GC_MEM_NOC);
-DEFINE_QNODE(qxs_imem, SDX65_SLAVE_IMEM, 1, 8);
-DEFINE_QNODE(srvc_snoc, SDX65_SLAVE_SERVICE_SNOC, 1, 4);
-DEFINE_QNODE(xs_pcie, SDX65_SLAVE_PCIE_0, 1, 8);
-DEFINE_QNODE(xs_qdss_stm, SDX65_SLAVE_QDSS_STM, 1, 4);
-DEFINE_QNODE(xs_sys_tcu_cfg, SDX65_SLAVE_TCU, 1, 8);
-
-DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
-DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
-DEFINE_QBCM(bcm_pn0, "PN0", true, &qhm_snoc_cfg, &qhs_aoss, &qhs_apss, &qhs_audio, &qhs_blsp1, &qhs_clk_ctl, &qhs_crypto0_cfg, &qhs_ddrss_cfg, &qhs_ecc_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mss_cfg, &qhs_pcie_parf, &qhs_pdm, &qhs_prng, &qhs_qdss_cfg, &qhs_qpic, &qhs_sdc1, &qhs_snoc_cfg, &qhs_spmi_fetcher, &qhs_spmi_vgi_coex, &qhs_tcsr, &qhs_tlmm, &qhs_usb3, &qhs_usb3_phy, &srvc_snoc);
-DEFINE_QBCM(bcm_pn1, "PN1", false, &xm_sdc1);
-DEFINE_QBCM(bcm_pn2, "PN2", false, &qhm_audio, &qhm_spmi_fetcher1);
-DEFINE_QBCM(bcm_pn3, "PN3", false, &qhm_blsp1, &qhm_qpic);
-DEFINE_QBCM(bcm_pn4, "PN4", false, &qxm_crypto);
-DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
-DEFINE_QBCM(bcm_sh1, "SH1", false, &qns_memnoc_snoc);
-DEFINE_QBCM(bcm_sh3, "SH3", false, &xm_apps_rdwr);
-DEFINE_QBCM(bcm_sn0, "SN0", true, &qns_snoc_memnoc);
-DEFINE_QBCM(bcm_sn1, "SN1", false, &qxs_imem);
-DEFINE_QBCM(bcm_sn2, "SN2", false, &xs_qdss_stm);
-DEFINE_QBCM(bcm_sn3, "SN3", false, &xs_sys_tcu_cfg);
-DEFINE_QBCM(bcm_sn5, "SN5", false, &xs_pcie);
-DEFINE_QBCM(bcm_sn6, "SN6", false, &qhm_qdss_bam, &xm_qdss_etr);
-DEFINE_QBCM(bcm_sn7, "SN7", false, &qnm_aggre_noc, &xm_pcie, &xm_usb3, &qns_aggre_noc);
-DEFINE_QBCM(bcm_sn8, "SN8", false, &qnm_memnoc);
-DEFINE_QBCM(bcm_sn9, "SN9", false, &qnm_memnoc_pcie);
-DEFINE_QBCM(bcm_sn10, "SN10", false, &qnm_ipa, &xm_ipa2pcie_slv);
+static struct qcom_icc_node llcc_mc = {
+ .name = "llcc_mc",
+ .id = SDX65_MASTER_LLCC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDX65_SLAVE_EBI1 },
+};
+
+static struct qcom_icc_node acm_tcu = {
+ .name = "acm_tcu",
+ .id = SDX65_MASTER_TCU_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 3,
+ .links = { SDX65_SLAVE_LLCC,
+ SDX65_SLAVE_MEM_NOC_SNOC,
+ SDX65_SLAVE_MEM_NOC_PCIE_SNOC
+ },
+};
+
+static struct qcom_icc_node qnm_snoc_gc = {
+ .name = "qnm_snoc_gc",
+ .id = SDX65_MASTER_SNOC_GC_MEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SDX65_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node xm_apps_rdwr = {
+ .name = "xm_apps_rdwr",
+ .id = SDX65_MASTER_APPSS_PROC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 3,
+ .links = { SDX65_SLAVE_LLCC,
+ SDX65_SLAVE_MEM_NOC_SNOC,
+ SDX65_SLAVE_MEM_NOC_PCIE_SNOC
+ },
+};
+
+static struct qcom_icc_node qhm_audio = {
+ .name = "qhm_audio",
+ .id = SDX65_MASTER_AUDIO,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDX65_SLAVE_ANOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_blsp1 = {
+ .name = "qhm_blsp1",
+ .id = SDX65_MASTER_BLSP_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDX65_SLAVE_ANOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qdss_bam = {
+ .name = "qhm_qdss_bam",
+ .id = SDX65_MASTER_QDSS_BAM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 26,
+ .links = { SDX65_SLAVE_AOSS,
+ SDX65_SLAVE_AUDIO,
+ SDX65_SLAVE_BLSP_1,
+ SDX65_SLAVE_CLK_CTL,
+ SDX65_SLAVE_CRYPTO_0_CFG,
+ SDX65_SLAVE_CNOC_DDRSS,
+ SDX65_SLAVE_ECC_CFG,
+ SDX65_SLAVE_IMEM_CFG,
+ SDX65_SLAVE_IPA_CFG,
+ SDX65_SLAVE_CNOC_MSS,
+ SDX65_SLAVE_PCIE_PARF,
+ SDX65_SLAVE_PDM,
+ SDX65_SLAVE_PRNG,
+ SDX65_SLAVE_QDSS_CFG,
+ SDX65_SLAVE_QPIC,
+ SDX65_SLAVE_SDCC_1,
+ SDX65_SLAVE_SNOC_CFG,
+ SDX65_SLAVE_SPMI_FETCHER,
+ SDX65_SLAVE_SPMI_VGI_COEX,
+ SDX65_SLAVE_TCSR,
+ SDX65_SLAVE_TLMM,
+ SDX65_SLAVE_USB3,
+ SDX65_SLAVE_USB3_PHY_CFG,
+ SDX65_SLAVE_SNOC_MEM_NOC_GC,
+ SDX65_SLAVE_IMEM,
+ SDX65_SLAVE_TCU
+ },
+};
+
+static struct qcom_icc_node qhm_qpic = {
+ .name = "qhm_qpic",
+ .id = SDX65_MASTER_QPIC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 4,
+ .links = { SDX65_SLAVE_AOSS,
+ SDX65_SLAVE_AUDIO,
+ SDX65_SLAVE_IPA_CFG,
+ SDX65_SLAVE_ANOC_SNOC
+ },
+};
+
+static struct qcom_icc_node qhm_snoc_cfg = {
+ .name = "qhm_snoc_cfg",
+ .id = SDX65_MASTER_SNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDX65_SLAVE_SERVICE_SNOC },
+};
+
+static struct qcom_icc_node qhm_spmi_fetcher1 = {
+ .name = "qhm_spmi_fetcher1",
+ .id = SDX65_MASTER_SPMI_FETCHER,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 2,
+ .links = { SDX65_SLAVE_AOSS,
+ SDX65_SLAVE_ANOC_SNOC
+ },
+};
+
+static struct qcom_icc_node qnm_aggre_noc = {
+ .name = "qnm_aggre_noc",
+ .id = SDX65_MASTER_ANOC_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 29,
+ .links = { SDX65_SLAVE_AOSS,
+ SDX65_SLAVE_APPSS,
+ SDX65_SLAVE_AUDIO,
+ SDX65_SLAVE_BLSP_1,
+ SDX65_SLAVE_CLK_CTL,
+ SDX65_SLAVE_CRYPTO_0_CFG,
+ SDX65_SLAVE_CNOC_DDRSS,
+ SDX65_SLAVE_ECC_CFG,
+ SDX65_SLAVE_IMEM_CFG,
+ SDX65_SLAVE_IPA_CFG,
+ SDX65_SLAVE_CNOC_MSS,
+ SDX65_SLAVE_PCIE_PARF,
+ SDX65_SLAVE_PDM,
+ SDX65_SLAVE_PRNG,
+ SDX65_SLAVE_QDSS_CFG,
+ SDX65_SLAVE_QPIC,
+ SDX65_SLAVE_SDCC_1,
+ SDX65_SLAVE_SNOC_CFG,
+ SDX65_SLAVE_SPMI_FETCHER,
+ SDX65_SLAVE_SPMI_VGI_COEX,
+ SDX65_SLAVE_TCSR,
+ SDX65_SLAVE_TLMM,
+ SDX65_SLAVE_USB3,
+ SDX65_SLAVE_USB3_PHY_CFG,
+ SDX65_SLAVE_SNOC_MEM_NOC_GC,
+ SDX65_SLAVE_IMEM,
+ SDX65_SLAVE_PCIE_0,
+ SDX65_SLAVE_QDSS_STM,
+ SDX65_SLAVE_TCU
+ },
+};
+
+static struct qcom_icc_node qnm_ipa = {
+ .name = "qnm_ipa",
+ .id = SDX65_MASTER_IPA,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 26,
+ .links = { SDX65_SLAVE_AOSS,
+ SDX65_SLAVE_AUDIO,
+ SDX65_SLAVE_BLSP_1,
+ SDX65_SLAVE_CLK_CTL,
+ SDX65_SLAVE_CRYPTO_0_CFG,
+ SDX65_SLAVE_CNOC_DDRSS,
+ SDX65_SLAVE_ECC_CFG,
+ SDX65_SLAVE_IMEM_CFG,
+ SDX65_SLAVE_IPA_CFG,
+ SDX65_SLAVE_CNOC_MSS,
+ SDX65_SLAVE_PCIE_PARF,
+ SDX65_SLAVE_PDM,
+ SDX65_SLAVE_PRNG,
+ SDX65_SLAVE_QDSS_CFG,
+ SDX65_SLAVE_QPIC,
+ SDX65_SLAVE_SDCC_1,
+ SDX65_SLAVE_SNOC_CFG,
+ SDX65_SLAVE_SPMI_FETCHER,
+ SDX65_SLAVE_TCSR,
+ SDX65_SLAVE_TLMM,
+ SDX65_SLAVE_USB3,
+ SDX65_SLAVE_USB3_PHY_CFG,
+ SDX65_SLAVE_SNOC_MEM_NOC_GC,
+ SDX65_SLAVE_IMEM,
+ SDX65_SLAVE_PCIE_0,
+ SDX65_SLAVE_QDSS_STM
+ },
+};
+
+static struct qcom_icc_node qnm_memnoc = {
+ .name = "qnm_memnoc",
+ .id = SDX65_MASTER_MEM_NOC_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 27,
+ .links = { SDX65_SLAVE_AOSS,
+ SDX65_SLAVE_APPSS,
+ SDX65_SLAVE_AUDIO,
+ SDX65_SLAVE_BLSP_1,
+ SDX65_SLAVE_CLK_CTL,
+ SDX65_SLAVE_CRYPTO_0_CFG,
+ SDX65_SLAVE_CNOC_DDRSS,
+ SDX65_SLAVE_ECC_CFG,
+ SDX65_SLAVE_IMEM_CFG,
+ SDX65_SLAVE_IPA_CFG,
+ SDX65_SLAVE_CNOC_MSS,
+ SDX65_SLAVE_PCIE_PARF,
+ SDX65_SLAVE_PDM,
+ SDX65_SLAVE_PRNG,
+ SDX65_SLAVE_QDSS_CFG,
+ SDX65_SLAVE_QPIC,
+ SDX65_SLAVE_SDCC_1,
+ SDX65_SLAVE_SNOC_CFG,
+ SDX65_SLAVE_SPMI_FETCHER,
+ SDX65_SLAVE_SPMI_VGI_COEX,
+ SDX65_SLAVE_TCSR,
+ SDX65_SLAVE_TLMM,
+ SDX65_SLAVE_USB3,
+ SDX65_SLAVE_USB3_PHY_CFG,
+ SDX65_SLAVE_IMEM,
+ SDX65_SLAVE_QDSS_STM,
+ SDX65_SLAVE_TCU
+ },
+};
+
+static struct qcom_icc_node qnm_memnoc_pcie = {
+ .name = "qnm_memnoc_pcie",
+ .id = SDX65_MASTER_MEM_NOC_PCIE_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDX65_SLAVE_PCIE_0 },
+};
+
+static struct qcom_icc_node qxm_crypto = {
+ .name = "qxm_crypto",
+ .id = SDX65_MASTER_CRYPTO,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SDX65_SLAVE_AOSS,
+ SDX65_SLAVE_ANOC_SNOC
+ },
+};
+
+static struct qcom_icc_node xm_ipa2pcie_slv = {
+ .name = "xm_ipa2pcie_slv",
+ .id = SDX65_MASTER_IPA_PCIE,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDX65_SLAVE_PCIE_0 },
+};
+
+static struct qcom_icc_node xm_pcie = {
+ .name = "xm_pcie",
+ .id = SDX65_MASTER_PCIE_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDX65_SLAVE_ANOC_SNOC },
+};
+
+static struct qcom_icc_node xm_qdss_etr = {
+ .name = "xm_qdss_etr",
+ .id = SDX65_MASTER_QDSS_ETR,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 26,
+ .links = { SDX65_SLAVE_AOSS,
+ SDX65_SLAVE_AUDIO,
+ SDX65_SLAVE_BLSP_1,
+ SDX65_SLAVE_CLK_CTL,
+ SDX65_SLAVE_CRYPTO_0_CFG,
+ SDX65_SLAVE_CNOC_DDRSS,
+ SDX65_SLAVE_ECC_CFG,
+ SDX65_SLAVE_IMEM_CFG,
+ SDX65_SLAVE_IPA_CFG,
+ SDX65_SLAVE_CNOC_MSS,
+ SDX65_SLAVE_PCIE_PARF,
+ SDX65_SLAVE_PDM,
+ SDX65_SLAVE_PRNG,
+ SDX65_SLAVE_QDSS_CFG,
+ SDX65_SLAVE_QPIC,
+ SDX65_SLAVE_SDCC_1,
+ SDX65_SLAVE_SNOC_CFG,
+ SDX65_SLAVE_SPMI_FETCHER,
+ SDX65_SLAVE_SPMI_VGI_COEX,
+ SDX65_SLAVE_TCSR,
+ SDX65_SLAVE_TLMM,
+ SDX65_SLAVE_USB3,
+ SDX65_SLAVE_USB3_PHY_CFG,
+ SDX65_SLAVE_SNOC_MEM_NOC_GC,
+ SDX65_SLAVE_IMEM,
+ SDX65_SLAVE_TCU
+ },
+};
+
+static struct qcom_icc_node xm_sdc1 = {
+ .name = "xm_sdc1",
+ .id = SDX65_MASTER_SDCC_1,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 4,
+ .links = { SDX65_SLAVE_AOSS,
+ SDX65_SLAVE_AUDIO,
+ SDX65_SLAVE_IPA_CFG,
+ SDX65_SLAVE_ANOC_SNOC
+ },
+};
+
+static struct qcom_icc_node xm_usb3 = {
+ .name = "xm_usb3",
+ .id = SDX65_MASTER_USB3,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDX65_SLAVE_ANOC_SNOC },
+};
+
+static struct qcom_icc_node ebi = {
+ .name = "ebi",
+ .id = SDX65_SLAVE_EBI1,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_llcc = {
+ .name = "qns_llcc",
+ .id = SDX65_SLAVE_LLCC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SDX65_MASTER_LLCC },
+};
+
+static struct qcom_icc_node qns_memnoc_snoc = {
+ .name = "qns_memnoc_snoc",
+ .id = SDX65_SLAVE_MEM_NOC_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDX65_MASTER_MEM_NOC_SNOC },
+};
+
+static struct qcom_icc_node qns_sys_pcie = {
+ .name = "qns_sys_pcie",
+ .id = SDX65_SLAVE_MEM_NOC_PCIE_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDX65_MASTER_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qhs_aoss = {
+ .name = "qhs_aoss",
+ .id = SDX65_SLAVE_AOSS,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_apss = {
+ .name = "qhs_apss",
+ .id = SDX65_SLAVE_APPSS,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_audio = {
+ .name = "qhs_audio",
+ .id = SDX65_SLAVE_AUDIO,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_blsp1 = {
+ .name = "qhs_blsp1",
+ .id = SDX65_SLAVE_BLSP_1,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_clk_ctl = {
+ .name = "qhs_clk_ctl",
+ .id = SDX65_SLAVE_CLK_CTL,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_crypto0_cfg = {
+ .name = "qhs_crypto0_cfg",
+ .id = SDX65_SLAVE_CRYPTO_0_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ddrss_cfg = {
+ .name = "qhs_ddrss_cfg",
+ .id = SDX65_SLAVE_CNOC_DDRSS,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ecc_cfg = {
+ .name = "qhs_ecc_cfg",
+ .id = SDX65_SLAVE_ECC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_imem_cfg = {
+ .name = "qhs_imem_cfg",
+ .id = SDX65_SLAVE_IMEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ipa = {
+ .name = "qhs_ipa",
+ .id = SDX65_SLAVE_IPA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_mss_cfg = {
+ .name = "qhs_mss_cfg",
+ .id = SDX65_SLAVE_CNOC_MSS,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie_parf = {
+ .name = "qhs_pcie_parf",
+ .id = SDX65_SLAVE_PCIE_PARF,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pdm = {
+ .name = "qhs_pdm",
+ .id = SDX65_SLAVE_PDM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_prng = {
+ .name = "qhs_prng",
+ .id = SDX65_SLAVE_PRNG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qdss_cfg = {
+ .name = "qhs_qdss_cfg",
+ .id = SDX65_SLAVE_QDSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qpic = {
+ .name = "qhs_qpic",
+ .id = SDX65_SLAVE_QPIC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_sdc1 = {
+ .name = "qhs_sdc1",
+ .id = SDX65_SLAVE_SDCC_1,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_snoc_cfg = {
+ .name = "qhs_snoc_cfg",
+ .id = SDX65_SLAVE_SNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SDX65_MASTER_SNOC_CFG },
+};
+
+static struct qcom_icc_node qhs_spmi_fetcher = {
+ .name = "qhs_spmi_fetcher",
+ .id = SDX65_SLAVE_SPMI_FETCHER,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_spmi_vgi_coex = {
+ .name = "qhs_spmi_vgi_coex",
+ .id = SDX65_SLAVE_SPMI_VGI_COEX,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tcsr = {
+ .name = "qhs_tcsr",
+ .id = SDX65_SLAVE_TCSR,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tlmm = {
+ .name = "qhs_tlmm",
+ .id = SDX65_SLAVE_TLMM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb3 = {
+ .name = "qhs_usb3",
+ .id = SDX65_SLAVE_USB3,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb3_phy = {
+ .name = "qhs_usb3_phy",
+ .id = SDX65_SLAVE_USB3_PHY_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_aggre_noc = {
+ .name = "qns_aggre_noc",
+ .id = SDX65_SLAVE_ANOC_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SDX65_MASTER_ANOC_SNOC },
+};
+
+static struct qcom_icc_node qns_snoc_memnoc = {
+ .name = "qns_snoc_memnoc",
+ .id = SDX65_SLAVE_SNOC_MEM_NOC_GC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SDX65_MASTER_SNOC_GC_MEM_NOC },
+};
+
+static struct qcom_icc_node qxs_imem = {
+ .name = "qxs_imem",
+ .id = SDX65_SLAVE_IMEM,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node srvc_snoc = {
+ .name = "srvc_snoc",
+ .id = SDX65_SLAVE_SERVICE_SNOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node xs_pcie = {
+ .name = "xs_pcie",
+ .id = SDX65_SLAVE_PCIE_0,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node xs_qdss_stm = {
+ .name = "xs_qdss_stm",
+ .id = SDX65_SLAVE_QDSS_STM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node xs_sys_tcu_cfg = {
+ .name = "xs_sys_tcu_cfg",
+ .id = SDX65_SLAVE_TCU,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_bcm bcm_ce0 = {
+ .name = "CE0",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qxm_crypto },
+};
+
+static struct qcom_icc_bcm bcm_mc0 = {
+ .name = "MC0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_pn0 = {
+ .name = "PN0",
+ .keepalive = true,
+ .num_nodes = 26,
+ .nodes = { &qhm_snoc_cfg,
+ &qhs_aoss,
+ &qhs_apss,
+ &qhs_audio,
+ &qhs_blsp1,
+ &qhs_clk_ctl,
+ &qhs_crypto0_cfg,
+ &qhs_ddrss_cfg,
+ &qhs_ecc_cfg,
+ &qhs_imem_cfg,
+ &qhs_ipa,
+ &qhs_mss_cfg,
+ &qhs_pcie_parf,
+ &qhs_pdm,
+ &qhs_prng,
+ &qhs_qdss_cfg,
+ &qhs_qpic,
+ &qhs_sdc1,
+ &qhs_snoc_cfg,
+ &qhs_spmi_fetcher,
+ &qhs_spmi_vgi_coex,
+ &qhs_tcsr,
+ &qhs_tlmm,
+ &qhs_usb3,
+ &qhs_usb3_phy,
+ &srvc_snoc
+ },
+};
+
+static struct qcom_icc_bcm bcm_pn1 = {
+ .name = "PN1",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &xm_sdc1 },
+};
+
+static struct qcom_icc_bcm bcm_pn2 = {
+ .name = "PN2",
+ .keepalive = false,
+ .num_nodes = 2,
+ .nodes = { &qhm_audio, &qhm_spmi_fetcher1 },
+};
+
+static struct qcom_icc_bcm bcm_pn3 = {
+ .name = "PN3",
+ .keepalive = false,
+ .num_nodes = 2,
+ .nodes = { &qhm_blsp1, &qhm_qpic },
+};
+
+static struct qcom_icc_bcm bcm_pn4 = {
+ .name = "PN4",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qxm_crypto },
+};
+
+static struct qcom_icc_bcm bcm_sh0 = {
+ .name = "SH0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_llcc },
+};
+
+static struct qcom_icc_bcm bcm_sh1 = {
+ .name = "SH1",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qns_memnoc_snoc },
+};
+
+static struct qcom_icc_bcm bcm_sh3 = {
+ .name = "SH3",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &xm_apps_rdwr },
+};
+
+static struct qcom_icc_bcm bcm_sn0 = {
+ .name = "SN0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_snoc_memnoc },
+};
+
+static struct qcom_icc_bcm bcm_sn1 = {
+ .name = "SN1",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qxs_imem },
+};
+
+static struct qcom_icc_bcm bcm_sn2 = {
+ .name = "SN2",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &xs_qdss_stm },
+};
+
+static struct qcom_icc_bcm bcm_sn3 = {
+ .name = "SN3",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &xs_sys_tcu_cfg },
+};
+
+static struct qcom_icc_bcm bcm_sn5 = {
+ .name = "SN5",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &xs_pcie },
+};
+
+static struct qcom_icc_bcm bcm_sn6 = {
+ .name = "SN6",
+ .keepalive = false,
+ .num_nodes = 2,
+ .nodes = { &qhm_qdss_bam, &xm_qdss_etr },
+};
+
+static struct qcom_icc_bcm bcm_sn7 = {
+ .name = "SN7",
+ .keepalive = false,
+ .num_nodes = 4,
+ .nodes = { &qnm_aggre_noc, &xm_pcie, &xm_usb3, &qns_aggre_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn8 = {
+ .name = "SN8",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qnm_memnoc },
+};
+
+static struct qcom_icc_bcm bcm_sn9 = {
+ .name = "SN9",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qnm_memnoc_pcie },
+};
+
+static struct qcom_icc_bcm bcm_sn10 = {
+ .name = "SN10",
+ .keepalive = false,
+ .num_nodes = 2,
+ .nodes = { &qnm_ipa, &xm_ipa2pcie_slv },
+};
static struct qcom_icc_bcm * const mc_virt_bcms[] = {
&bcm_mc0,
diff --git a/drivers/interconnect/qcom/sm6350.c b/drivers/interconnect/qcom/sm6350.c
index a3d46e59444e..49aed492e9b8 100644
--- a/drivers/interconnect/qcom/sm6350.c
+++ b/drivers/interconnect/qcom/sm6350.c
@@ -6,167 +6,1388 @@
#include <linux/device.h>
#include <linux/interconnect.h>
#include <linux/interconnect-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <dt-bindings/interconnect/qcom,sm6350.h>
#include "bcm-voter.h"
#include "icc-rpmh.h"
#include "sm6350.h"
-DEFINE_QNODE(qhm_a1noc_cfg, SM6350_MASTER_A1NOC_CFG, 1, 4, SM6350_SLAVE_SERVICE_A1NOC);
-DEFINE_QNODE(qhm_qup_0, SM6350_MASTER_QUP_0, 1, 4, SM6350_A1NOC_SNOC_SLV);
-DEFINE_QNODE(xm_emmc, SM6350_MASTER_EMMC, 1, 8, SM6350_A1NOC_SNOC_SLV);
-DEFINE_QNODE(xm_ufs_mem, SM6350_MASTER_UFS_MEM, 1, 8, SM6350_A1NOC_SNOC_SLV);
-DEFINE_QNODE(qhm_a2noc_cfg, SM6350_MASTER_A2NOC_CFG, 1, 4, SM6350_SLAVE_SERVICE_A2NOC);
-DEFINE_QNODE(qhm_qdss_bam, SM6350_MASTER_QDSS_BAM, 1, 4, SM6350_A2NOC_SNOC_SLV);
-DEFINE_QNODE(qhm_qup_1, SM6350_MASTER_QUP_1, 1, 4, SM6350_A2NOC_SNOC_SLV);
-DEFINE_QNODE(qxm_crypto, SM6350_MASTER_CRYPTO_CORE_0, 1, 8, SM6350_A2NOC_SNOC_SLV);
-DEFINE_QNODE(qxm_ipa, SM6350_MASTER_IPA, 1, 8, SM6350_A2NOC_SNOC_SLV);
-DEFINE_QNODE(xm_qdss_etr, SM6350_MASTER_QDSS_ETR, 1, 8, SM6350_A2NOC_SNOC_SLV);
-DEFINE_QNODE(xm_sdc2, SM6350_MASTER_SDCC_2, 1, 8, SM6350_A2NOC_SNOC_SLV);
-DEFINE_QNODE(xm_usb3_0, SM6350_MASTER_USB3, 1, 8, SM6350_A2NOC_SNOC_SLV);
-DEFINE_QNODE(qxm_camnoc_hf0_uncomp, SM6350_MASTER_CAMNOC_HF0_UNCOMP, 2, 32, SM6350_SLAVE_CAMNOC_UNCOMP);
-DEFINE_QNODE(qxm_camnoc_icp_uncomp, SM6350_MASTER_CAMNOC_ICP_UNCOMP, 1, 32, SM6350_SLAVE_CAMNOC_UNCOMP);
-DEFINE_QNODE(qxm_camnoc_sf_uncomp, SM6350_MASTER_CAMNOC_SF_UNCOMP, 1, 32, SM6350_SLAVE_CAMNOC_UNCOMP);
-DEFINE_QNODE(qup0_core_master, SM6350_MASTER_QUP_CORE_0, 1, 4, SM6350_SLAVE_QUP_CORE_0);
-DEFINE_QNODE(qup1_core_master, SM6350_MASTER_QUP_CORE_1, 1, 4, SM6350_SLAVE_QUP_CORE_1);
-DEFINE_QNODE(qnm_npu, SM6350_MASTER_NPU, 2, 32, SM6350_SLAVE_CDSP_GEM_NOC);
-DEFINE_QNODE(qxm_npu_dsp, SM6350_MASTER_NPU_PROC, 1, 8, SM6350_SLAVE_CDSP_GEM_NOC);
-DEFINE_QNODE(qnm_snoc, SM6350_SNOC_CNOC_MAS, 1, 8, SM6350_SLAVE_CAMERA_CFG, SM6350_SLAVE_SDCC_2, SM6350_SLAVE_CNOC_MNOC_CFG, SM6350_SLAVE_UFS_MEM_CFG, SM6350_SLAVE_QM_CFG, SM6350_SLAVE_SNOC_CFG, SM6350_SLAVE_QM_MPU_CFG, SM6350_SLAVE_GLM, SM6350_SLAVE_PDM, SM6350_SLAVE_CAMERA_NRT_THROTTLE_CFG, SM6350_SLAVE_A2NOC_CFG, SM6350_SLAVE_QDSS_CFG, SM6350_SLAVE_VSENSE_CTRL_CFG, SM6350_SLAVE_CAMERA_RT_THROTTLE_CFG, SM6350_SLAVE_DISPLAY_CFG, SM6350_SLAVE_TCSR, SM6350_SLAVE_DCC_CFG, SM6350_SLAVE_CNOC_DDRSS, SM6350_SLAVE_DISPLAY_THROTTLE_CFG, SM6350_SLAVE_NPU_CFG, SM6350_SLAVE_AHB2PHY, SM6350_SLAVE_GRAPHICS_3D_CFG, SM6350_SLAVE_BOOT_ROM, SM6350_SLAVE_VENUS_CFG, SM6350_SLAVE_IPA_CFG, SM6350_SLAVE_SECURITY, SM6350_SLAVE_IMEM_CFG, SM6350_SLAVE_CNOC_MSS, SM6350_SLAVE_SERVICE_CNOC, SM6350_SLAVE_USB3, SM6350_SLAVE_VENUS_THROTTLE_CFG, SM6350_SLAVE_RBCPR_CX_CFG, SM6350_SLAVE_A1NOC_CFG, SM6350_SLAVE_AOSS, SM6350_SLAVE_PRNG, SM6350_SLAVE_EMMC_CFG, SM6350_SLAVE_CRYPTO_0_CFG, SM6350_SLAVE_PIMEM_CFG, SM6350_SLAVE_RBCPR_MX_CFG, SM6350_SLAVE_QUP_0, SM6350_SLAVE_QUP_1, SM6350_SLAVE_CLK_CTL);
-DEFINE_QNODE(xm_qdss_dap, SM6350_MASTER_QDSS_DAP, 1, 8, SM6350_SLAVE_CAMERA_CFG, SM6350_SLAVE_SDCC_2, SM6350_SLAVE_CNOC_MNOC_CFG, SM6350_SLAVE_UFS_MEM_CFG, SM6350_SLAVE_QM_CFG, SM6350_SLAVE_SNOC_CFG, SM6350_SLAVE_QM_MPU_CFG, SM6350_SLAVE_GLM, SM6350_SLAVE_PDM, SM6350_SLAVE_CAMERA_NRT_THROTTLE_CFG, SM6350_SLAVE_A2NOC_CFG, SM6350_SLAVE_QDSS_CFG, SM6350_SLAVE_VSENSE_CTRL_CFG, SM6350_SLAVE_CAMERA_RT_THROTTLE_CFG, SM6350_SLAVE_DISPLAY_CFG, SM6350_SLAVE_TCSR, SM6350_SLAVE_DCC_CFG, SM6350_SLAVE_CNOC_DDRSS, SM6350_SLAVE_DISPLAY_THROTTLE_CFG, SM6350_SLAVE_NPU_CFG, SM6350_SLAVE_AHB2PHY, SM6350_SLAVE_GRAPHICS_3D_CFG, SM6350_SLAVE_BOOT_ROM, SM6350_SLAVE_VENUS_CFG, SM6350_SLAVE_IPA_CFG, SM6350_SLAVE_SECURITY, SM6350_SLAVE_IMEM_CFG, SM6350_SLAVE_CNOC_MSS, SM6350_SLAVE_SERVICE_CNOC, SM6350_SLAVE_USB3, SM6350_SLAVE_VENUS_THROTTLE_CFG, SM6350_SLAVE_RBCPR_CX_CFG, SM6350_SLAVE_A1NOC_CFG, SM6350_SLAVE_AOSS, SM6350_SLAVE_PRNG, SM6350_SLAVE_EMMC_CFG, SM6350_SLAVE_CRYPTO_0_CFG, SM6350_SLAVE_PIMEM_CFG, SM6350_SLAVE_RBCPR_MX_CFG, SM6350_SLAVE_QUP_0, SM6350_SLAVE_QUP_1, SM6350_SLAVE_CLK_CTL);
-DEFINE_QNODE(qhm_cnoc_dc_noc, SM6350_MASTER_CNOC_DC_NOC, 1, 4, SM6350_SLAVE_LLCC_CFG, SM6350_SLAVE_GEM_NOC_CFG);
-DEFINE_QNODE(acm_apps, SM6350_MASTER_AMPSS_M0, 1, 16, SM6350_SLAVE_LLCC, SM6350_SLAVE_GEM_NOC_SNOC);
-DEFINE_QNODE(acm_sys_tcu, SM6350_MASTER_SYS_TCU, 1, 8, SM6350_SLAVE_LLCC, SM6350_SLAVE_GEM_NOC_SNOC);
-DEFINE_QNODE(qhm_gemnoc_cfg, SM6350_MASTER_GEM_NOC_CFG, 1, 4, SM6350_SLAVE_MCDMA_MS_MPU_CFG, SM6350_SLAVE_SERVICE_GEM_NOC, SM6350_SLAVE_MSS_PROC_MS_MPU_CFG);
-DEFINE_QNODE(qnm_cmpnoc, SM6350_MASTER_COMPUTE_NOC, 1, 32, SM6350_SLAVE_LLCC, SM6350_SLAVE_GEM_NOC_SNOC);
-DEFINE_QNODE(qnm_mnoc_hf, SM6350_MASTER_MNOC_HF_MEM_NOC, 1, 32, SM6350_SLAVE_LLCC, SM6350_SLAVE_GEM_NOC_SNOC);
-DEFINE_QNODE(qnm_mnoc_sf, SM6350_MASTER_MNOC_SF_MEM_NOC, 1, 32, SM6350_SLAVE_LLCC, SM6350_SLAVE_GEM_NOC_SNOC);
-DEFINE_QNODE(qnm_snoc_gc, SM6350_MASTER_SNOC_GC_MEM_NOC, 1, 8, SM6350_SLAVE_LLCC);
-DEFINE_QNODE(qnm_snoc_sf, SM6350_MASTER_SNOC_SF_MEM_NOC, 1, 16, SM6350_SLAVE_LLCC);
-DEFINE_QNODE(qxm_gpu, SM6350_MASTER_GRAPHICS_3D, 2, 32, SM6350_SLAVE_LLCC, SM6350_SLAVE_GEM_NOC_SNOC);
-DEFINE_QNODE(llcc_mc, SM6350_MASTER_LLCC, 2, 4, SM6350_SLAVE_EBI_CH0);
-DEFINE_QNODE(qhm_mnoc_cfg, SM6350_MASTER_CNOC_MNOC_CFG, 1, 4, SM6350_SLAVE_SERVICE_MNOC);
-DEFINE_QNODE(qnm_video0, SM6350_MASTER_VIDEO_P0, 1, 32, SM6350_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qnm_video_cvp, SM6350_MASTER_VIDEO_PROC, 1, 8, SM6350_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qxm_camnoc_hf, SM6350_MASTER_CAMNOC_HF, 2, 32, SM6350_SLAVE_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(qxm_camnoc_icp, SM6350_MASTER_CAMNOC_ICP, 1, 8, SM6350_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qxm_camnoc_sf, SM6350_MASTER_CAMNOC_SF, 1, 32, SM6350_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qxm_mdp0, SM6350_MASTER_MDP_PORT0, 1, 32, SM6350_SLAVE_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(amm_npu_sys, SM6350_MASTER_NPU_SYS, 2, 32, SM6350_SLAVE_NPU_COMPUTE_NOC);
-DEFINE_QNODE(qhm_npu_cfg, SM6350_MASTER_NPU_NOC_CFG, 1, 4, SM6350_SLAVE_SERVICE_NPU_NOC, SM6350_SLAVE_ISENSE_CFG, SM6350_SLAVE_NPU_LLM_CFG, SM6350_SLAVE_NPU_INT_DMA_BWMON_CFG, SM6350_SLAVE_NPU_CP, SM6350_SLAVE_NPU_TCM, SM6350_SLAVE_NPU_CAL_DP0, SM6350_SLAVE_NPU_DPM);
-DEFINE_QNODE(qhm_snoc_cfg, SM6350_MASTER_SNOC_CFG, 1, 4, SM6350_SLAVE_SERVICE_SNOC);
-DEFINE_QNODE(qnm_aggre1_noc, SM6350_A1NOC_SNOC_MAS, 1, 16, SM6350_SLAVE_SNOC_GEM_NOC_SF, SM6350_SLAVE_PIMEM, SM6350_SLAVE_OCIMEM, SM6350_SLAVE_APPSS, SM6350_SNOC_CNOC_SLV, SM6350_SLAVE_QDSS_STM);
-DEFINE_QNODE(qnm_aggre2_noc, SM6350_A2NOC_SNOC_MAS, 1, 16, SM6350_SLAVE_SNOC_GEM_NOC_SF, SM6350_SLAVE_PIMEM, SM6350_SLAVE_OCIMEM, SM6350_SLAVE_APPSS, SM6350_SNOC_CNOC_SLV, SM6350_SLAVE_TCU, SM6350_SLAVE_QDSS_STM);
-DEFINE_QNODE(qnm_gemnoc, SM6350_MASTER_GEM_NOC_SNOC, 1, 8, SM6350_SLAVE_PIMEM, SM6350_SLAVE_OCIMEM, SM6350_SLAVE_APPSS, SM6350_SNOC_CNOC_SLV, SM6350_SLAVE_TCU, SM6350_SLAVE_QDSS_STM);
-DEFINE_QNODE(qxm_pimem, SM6350_MASTER_PIMEM, 1, 8, SM6350_SLAVE_SNOC_GEM_NOC_GC, SM6350_SLAVE_OCIMEM);
-DEFINE_QNODE(xm_gic, SM6350_MASTER_GIC, 1, 8, SM6350_SLAVE_SNOC_GEM_NOC_GC);
-DEFINE_QNODE(qns_a1noc_snoc, SM6350_A1NOC_SNOC_SLV, 1, 16, SM6350_A1NOC_SNOC_MAS);
-DEFINE_QNODE(srvc_aggre1_noc, SM6350_SLAVE_SERVICE_A1NOC, 1, 4);
-DEFINE_QNODE(qns_a2noc_snoc, SM6350_A2NOC_SNOC_SLV, 1, 16, SM6350_A2NOC_SNOC_MAS);
-DEFINE_QNODE(srvc_aggre2_noc, SM6350_SLAVE_SERVICE_A2NOC, 1, 4);
-DEFINE_QNODE(qns_camnoc_uncomp, SM6350_SLAVE_CAMNOC_UNCOMP, 1, 32);
-DEFINE_QNODE(qup0_core_slave, SM6350_SLAVE_QUP_CORE_0, 1, 4);
-DEFINE_QNODE(qup1_core_slave, SM6350_SLAVE_QUP_CORE_1, 1, 4);
-DEFINE_QNODE(qns_cdsp_gemnoc, SM6350_SLAVE_CDSP_GEM_NOC, 1, 32, SM6350_MASTER_COMPUTE_NOC);
-DEFINE_QNODE(qhs_a1_noc_cfg, SM6350_SLAVE_A1NOC_CFG, 1, 4, SM6350_MASTER_A1NOC_CFG);
-DEFINE_QNODE(qhs_a2_noc_cfg, SM6350_SLAVE_A2NOC_CFG, 1, 4, SM6350_MASTER_A2NOC_CFG);
-DEFINE_QNODE(qhs_ahb2phy0, SM6350_SLAVE_AHB2PHY, 1, 4);
-DEFINE_QNODE(qhs_ahb2phy2, SM6350_SLAVE_AHB2PHY_2, 1, 4);
-DEFINE_QNODE(qhs_aoss, SM6350_SLAVE_AOSS, 1, 4);
-DEFINE_QNODE(qhs_boot_rom, SM6350_SLAVE_BOOT_ROM, 1, 4);
-DEFINE_QNODE(qhs_camera_cfg, SM6350_SLAVE_CAMERA_CFG, 1, 4);
-DEFINE_QNODE(qhs_camera_nrt_thrott_cfg, SM6350_SLAVE_CAMERA_NRT_THROTTLE_CFG, 1, 4);
-DEFINE_QNODE(qhs_camera_rt_throttle_cfg, SM6350_SLAVE_CAMERA_RT_THROTTLE_CFG, 1, 4);
-DEFINE_QNODE(qhs_clk_ctl, SM6350_SLAVE_CLK_CTL, 1, 4);
-DEFINE_QNODE(qhs_cpr_cx, SM6350_SLAVE_RBCPR_CX_CFG, 1, 4);
-DEFINE_QNODE(qhs_cpr_mx, SM6350_SLAVE_RBCPR_MX_CFG, 1, 4);
-DEFINE_QNODE(qhs_crypto0_cfg, SM6350_SLAVE_CRYPTO_0_CFG, 1, 4);
-DEFINE_QNODE(qhs_dcc_cfg, SM6350_SLAVE_DCC_CFG, 1, 4);
-DEFINE_QNODE(qhs_ddrss_cfg, SM6350_SLAVE_CNOC_DDRSS, 1, 4, SM6350_MASTER_CNOC_DC_NOC);
-DEFINE_QNODE(qhs_display_cfg, SM6350_SLAVE_DISPLAY_CFG, 1, 4);
-DEFINE_QNODE(qhs_display_throttle_cfg, SM6350_SLAVE_DISPLAY_THROTTLE_CFG, 1, 4);
-DEFINE_QNODE(qhs_emmc_cfg, SM6350_SLAVE_EMMC_CFG, 1, 4);
-DEFINE_QNODE(qhs_glm, SM6350_SLAVE_GLM, 1, 4);
-DEFINE_QNODE(qhs_gpuss_cfg, SM6350_SLAVE_GRAPHICS_3D_CFG, 1, 8);
-DEFINE_QNODE(qhs_imem_cfg, SM6350_SLAVE_IMEM_CFG, 1, 4);
-DEFINE_QNODE(qhs_ipa, SM6350_SLAVE_IPA_CFG, 1, 4);
-DEFINE_QNODE(qhs_mnoc_cfg, SM6350_SLAVE_CNOC_MNOC_CFG, 1, 4, SM6350_MASTER_CNOC_MNOC_CFG);
-DEFINE_QNODE(qhs_mss_cfg, SM6350_SLAVE_CNOC_MSS, 1, 4);
-DEFINE_QNODE(qhs_npu_cfg, SM6350_SLAVE_NPU_CFG, 1, 4, SM6350_MASTER_NPU_NOC_CFG);
-DEFINE_QNODE(qhs_pdm, SM6350_SLAVE_PDM, 1, 4);
-DEFINE_QNODE(qhs_pimem_cfg, SM6350_SLAVE_PIMEM_CFG, 1, 4);
-DEFINE_QNODE(qhs_prng, SM6350_SLAVE_PRNG, 1, 4);
-DEFINE_QNODE(qhs_qdss_cfg, SM6350_SLAVE_QDSS_CFG, 1, 4);
-DEFINE_QNODE(qhs_qm_cfg, SM6350_SLAVE_QM_CFG, 1, 4);
-DEFINE_QNODE(qhs_qm_mpu_cfg, SM6350_SLAVE_QM_MPU_CFG, 1, 4);
-DEFINE_QNODE(qhs_qup0, SM6350_SLAVE_QUP_0, 1, 4);
-DEFINE_QNODE(qhs_qup1, SM6350_SLAVE_QUP_1, 1, 4);
-DEFINE_QNODE(qhs_sdc2, SM6350_SLAVE_SDCC_2, 1, 4);
-DEFINE_QNODE(qhs_security, SM6350_SLAVE_SECURITY, 1, 4);
-DEFINE_QNODE(qhs_snoc_cfg, SM6350_SLAVE_SNOC_CFG, 1, 4, SM6350_MASTER_SNOC_CFG);
-DEFINE_QNODE(qhs_tcsr, SM6350_SLAVE_TCSR, 1, 4);
-DEFINE_QNODE(qhs_ufs_mem_cfg, SM6350_SLAVE_UFS_MEM_CFG, 1, 4);
-DEFINE_QNODE(qhs_usb3_0, SM6350_SLAVE_USB3, 1, 4);
-DEFINE_QNODE(qhs_venus_cfg, SM6350_SLAVE_VENUS_CFG, 1, 4);
-DEFINE_QNODE(qhs_venus_throttle_cfg, SM6350_SLAVE_VENUS_THROTTLE_CFG, 1, 4);
-DEFINE_QNODE(qhs_vsense_ctrl_cfg, SM6350_SLAVE_VSENSE_CTRL_CFG, 1, 4);
-DEFINE_QNODE(srvc_cnoc, SM6350_SLAVE_SERVICE_CNOC, 1, 4);
-DEFINE_QNODE(qhs_gemnoc, SM6350_SLAVE_GEM_NOC_CFG, 1, 4, SM6350_MASTER_GEM_NOC_CFG);
-DEFINE_QNODE(qhs_llcc, SM6350_SLAVE_LLCC_CFG, 1, 4);
-DEFINE_QNODE(qhs_mcdma_ms_mpu_cfg, SM6350_SLAVE_MCDMA_MS_MPU_CFG, 1, 4);
-DEFINE_QNODE(qhs_mdsp_ms_mpu_cfg, SM6350_SLAVE_MSS_PROC_MS_MPU_CFG, 1, 4);
-DEFINE_QNODE(qns_gem_noc_snoc, SM6350_SLAVE_GEM_NOC_SNOC, 1, 8, SM6350_MASTER_GEM_NOC_SNOC);
-DEFINE_QNODE(qns_llcc, SM6350_SLAVE_LLCC, 1, 16, SM6350_MASTER_LLCC);
-DEFINE_QNODE(srvc_gemnoc, SM6350_SLAVE_SERVICE_GEM_NOC, 1, 4);
-DEFINE_QNODE(ebi, SM6350_SLAVE_EBI_CH0, 2, 4);
-DEFINE_QNODE(qns_mem_noc_hf, SM6350_SLAVE_MNOC_HF_MEM_NOC, 1, 32, SM6350_MASTER_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(qns_mem_noc_sf, SM6350_SLAVE_MNOC_SF_MEM_NOC, 1, 32, SM6350_MASTER_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(srvc_mnoc, SM6350_SLAVE_SERVICE_MNOC, 1, 4);
-DEFINE_QNODE(qhs_cal_dp0, SM6350_SLAVE_NPU_CAL_DP0, 1, 4);
-DEFINE_QNODE(qhs_cp, SM6350_SLAVE_NPU_CP, 1, 4);
-DEFINE_QNODE(qhs_dma_bwmon, SM6350_SLAVE_NPU_INT_DMA_BWMON_CFG, 1, 4);
-DEFINE_QNODE(qhs_dpm, SM6350_SLAVE_NPU_DPM, 1, 4);
-DEFINE_QNODE(qhs_isense, SM6350_SLAVE_ISENSE_CFG, 1, 4);
-DEFINE_QNODE(qhs_llm, SM6350_SLAVE_NPU_LLM_CFG, 1, 4);
-DEFINE_QNODE(qhs_tcm, SM6350_SLAVE_NPU_TCM, 1, 4);
-DEFINE_QNODE(qns_npu_sys, SM6350_SLAVE_NPU_COMPUTE_NOC, 2, 32);
-DEFINE_QNODE(srvc_noc, SM6350_SLAVE_SERVICE_NPU_NOC, 1, 4);
-DEFINE_QNODE(qhs_apss, SM6350_SLAVE_APPSS, 1, 8);
-DEFINE_QNODE(qns_cnoc, SM6350_SNOC_CNOC_SLV, 1, 8, SM6350_SNOC_CNOC_MAS);
-DEFINE_QNODE(qns_gemnoc_gc, SM6350_SLAVE_SNOC_GEM_NOC_GC, 1, 8, SM6350_MASTER_SNOC_GC_MEM_NOC);
-DEFINE_QNODE(qns_gemnoc_sf, SM6350_SLAVE_SNOC_GEM_NOC_SF, 1, 16, SM6350_MASTER_SNOC_SF_MEM_NOC);
-DEFINE_QNODE(qxs_imem, SM6350_SLAVE_OCIMEM, 1, 8);
-DEFINE_QNODE(qxs_pimem, SM6350_SLAVE_PIMEM, 1, 8);
-DEFINE_QNODE(srvc_snoc, SM6350_SLAVE_SERVICE_SNOC, 1, 4);
-DEFINE_QNODE(xs_qdss_stm, SM6350_SLAVE_QDSS_STM, 1, 4);
-DEFINE_QNODE(xs_sys_tcu_cfg, SM6350_SLAVE_TCU, 1, 8);
-
-DEFINE_QBCM(bcm_acv, "ACV", false, &ebi);
-DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
-DEFINE_QBCM(bcm_cn0, "CN0", true, &qnm_snoc, &xm_qdss_dap, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_ahb2phy0, &qhs_aoss, &qhs_boot_rom, &qhs_camera_cfg, &qhs_camera_nrt_thrott_cfg, &qhs_camera_rt_throttle_cfg, &qhs_clk_ctl, &qhs_cpr_cx, &qhs_cpr_mx, &qhs_crypto0_cfg, &qhs_dcc_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_display_throttle_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_mss_cfg, &qhs_npu_cfg, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qm_cfg, &qhs_qm_mpu_cfg, &qhs_qup0, &qhs_qup1, &qhs_security, &qhs_snoc_cfg, &qhs_tcsr, &qhs_ufs_mem_cfg, &qhs_usb3_0, &qhs_venus_cfg, &qhs_venus_throttle_cfg, &qhs_vsense_ctrl_cfg, &srvc_cnoc);
-DEFINE_QBCM(bcm_cn1, "CN1", false, &xm_emmc, &xm_sdc2, &qhs_ahb2phy2, &qhs_emmc_cfg, &qhs_pdm, &qhs_sdc2);
-DEFINE_QBCM(bcm_co0, "CO0", false, &qns_cdsp_gemnoc);
-DEFINE_QBCM(bcm_co2, "CO2", false, &qnm_npu);
-DEFINE_QBCM(bcm_co3, "CO3", false, &qxm_npu_dsp);
-DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
-DEFINE_QBCM(bcm_mm0, "MM0", true, &qns_mem_noc_hf);
-DEFINE_QBCM(bcm_mm1, "MM1", true, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_icp_uncomp, &qxm_camnoc_sf_uncomp, &qxm_camnoc_hf, &qxm_mdp0);
-DEFINE_QBCM(bcm_mm2, "MM2", false, &qns_mem_noc_sf);
-DEFINE_QBCM(bcm_mm3, "MM3", false, &qhm_mnoc_cfg, &qnm_video0, &qnm_video_cvp, &qxm_camnoc_sf);
-DEFINE_QBCM(bcm_qup0, "QUP0", false, &qup0_core_master, &qup1_core_master, &qup0_core_slave, &qup1_core_slave);
-DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
-DEFINE_QBCM(bcm_sh2, "SH2", false, &acm_sys_tcu);
-DEFINE_QBCM(bcm_sh3, "SH3", false, &qnm_cmpnoc);
-DEFINE_QBCM(bcm_sh4, "SH4", false, &acm_apps);
-DEFINE_QBCM(bcm_sn0, "SN0", true, &qns_gemnoc_sf);
-DEFINE_QBCM(bcm_sn1, "SN1", false, &qxs_imem);
-DEFINE_QBCM(bcm_sn2, "SN2", false, &qns_gemnoc_gc);
-DEFINE_QBCM(bcm_sn3, "SN3", false, &qxs_pimem);
-DEFINE_QBCM(bcm_sn4, "SN4", false, &xs_qdss_stm);
-DEFINE_QBCM(bcm_sn5, "SN5", false, &qnm_aggre1_noc);
-DEFINE_QBCM(bcm_sn6, "SN6", false, &qnm_aggre2_noc);
-DEFINE_QBCM(bcm_sn10, "SN10", false, &qnm_gemnoc);
+static struct qcom_icc_node qhm_a1noc_cfg = {
+ .name = "qhm_a1noc_cfg",
+ .id = SM6350_MASTER_A1NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM6350_SLAVE_SERVICE_A1NOC },
+};
+
+static struct qcom_icc_node qhm_qup_0 = {
+ .name = "qhm_qup_0",
+ .id = SM6350_MASTER_QUP_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM6350_A1NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node xm_emmc = {
+ .name = "xm_emmc",
+ .id = SM6350_MASTER_EMMC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM6350_A1NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node xm_ufs_mem = {
+ .name = "xm_ufs_mem",
+ .id = SM6350_MASTER_UFS_MEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM6350_A1NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node qhm_a2noc_cfg = {
+ .name = "qhm_a2noc_cfg",
+ .id = SM6350_MASTER_A2NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM6350_SLAVE_SERVICE_A2NOC },
+};
+
+static struct qcom_icc_node qhm_qdss_bam = {
+ .name = "qhm_qdss_bam",
+ .id = SM6350_MASTER_QDSS_BAM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM6350_A2NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node qhm_qup_1 = {
+ .name = "qhm_qup_1",
+ .id = SM6350_MASTER_QUP_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM6350_A2NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node qxm_crypto = {
+ .name = "qxm_crypto",
+ .id = SM6350_MASTER_CRYPTO_CORE_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM6350_A2NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node qxm_ipa = {
+ .name = "qxm_ipa",
+ .id = SM6350_MASTER_IPA,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM6350_A2NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node xm_qdss_etr = {
+ .name = "xm_qdss_etr",
+ .id = SM6350_MASTER_QDSS_ETR,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM6350_A2NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node xm_sdc2 = {
+ .name = "xm_sdc2",
+ .id = SM6350_MASTER_SDCC_2,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM6350_A2NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node xm_usb3_0 = {
+ .name = "xm_usb3_0",
+ .id = SM6350_MASTER_USB3,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM6350_A2NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node qxm_camnoc_hf0_uncomp = {
+ .name = "qxm_camnoc_hf0_uncomp",
+ .id = SM6350_MASTER_CAMNOC_HF0_UNCOMP,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM6350_SLAVE_CAMNOC_UNCOMP },
+};
+
+static struct qcom_icc_node qxm_camnoc_icp_uncomp = {
+ .name = "qxm_camnoc_icp_uncomp",
+ .id = SM6350_MASTER_CAMNOC_ICP_UNCOMP,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM6350_SLAVE_CAMNOC_UNCOMP },
+};
+
+static struct qcom_icc_node qxm_camnoc_sf_uncomp = {
+ .name = "qxm_camnoc_sf_uncomp",
+ .id = SM6350_MASTER_CAMNOC_SF_UNCOMP,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM6350_SLAVE_CAMNOC_UNCOMP },
+};
+
+static struct qcom_icc_node qup0_core_master = {
+ .name = "qup0_core_master",
+ .id = SM6350_MASTER_QUP_CORE_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM6350_SLAVE_QUP_CORE_0 },
+};
+
+static struct qcom_icc_node qup1_core_master = {
+ .name = "qup1_core_master",
+ .id = SM6350_MASTER_QUP_CORE_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM6350_SLAVE_QUP_CORE_1 },
+};
+
+static struct qcom_icc_node qnm_npu = {
+ .name = "qnm_npu",
+ .id = SM6350_MASTER_NPU,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM6350_SLAVE_CDSP_GEM_NOC },
+};
+
+static struct qcom_icc_node qxm_npu_dsp = {
+ .name = "qxm_npu_dsp",
+ .id = SM6350_MASTER_NPU_PROC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM6350_SLAVE_CDSP_GEM_NOC },
+};
+
+static struct qcom_icc_node qnm_snoc = {
+ .name = "qnm_snoc",
+ .id = SM6350_SNOC_CNOC_MAS,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 42,
+ .links = { SM6350_SLAVE_CAMERA_CFG,
+ SM6350_SLAVE_SDCC_2,
+ SM6350_SLAVE_CNOC_MNOC_CFG,
+ SM6350_SLAVE_UFS_MEM_CFG,
+ SM6350_SLAVE_QM_CFG,
+ SM6350_SLAVE_SNOC_CFG,
+ SM6350_SLAVE_QM_MPU_CFG,
+ SM6350_SLAVE_GLM,
+ SM6350_SLAVE_PDM,
+ SM6350_SLAVE_CAMERA_NRT_THROTTLE_CFG,
+ SM6350_SLAVE_A2NOC_CFG,
+ SM6350_SLAVE_QDSS_CFG,
+ SM6350_SLAVE_VSENSE_CTRL_CFG,
+ SM6350_SLAVE_CAMERA_RT_THROTTLE_CFG,
+ SM6350_SLAVE_DISPLAY_CFG,
+ SM6350_SLAVE_TCSR,
+ SM6350_SLAVE_DCC_CFG,
+ SM6350_SLAVE_CNOC_DDRSS,
+ SM6350_SLAVE_DISPLAY_THROTTLE_CFG,
+ SM6350_SLAVE_NPU_CFG,
+ SM6350_SLAVE_AHB2PHY,
+ SM6350_SLAVE_GRAPHICS_3D_CFG,
+ SM6350_SLAVE_BOOT_ROM,
+ SM6350_SLAVE_VENUS_CFG,
+ SM6350_SLAVE_IPA_CFG,
+ SM6350_SLAVE_SECURITY,
+ SM6350_SLAVE_IMEM_CFG,
+ SM6350_SLAVE_CNOC_MSS,
+ SM6350_SLAVE_SERVICE_CNOC,
+ SM6350_SLAVE_USB3,
+ SM6350_SLAVE_VENUS_THROTTLE_CFG,
+ SM6350_SLAVE_RBCPR_CX_CFG,
+ SM6350_SLAVE_A1NOC_CFG,
+ SM6350_SLAVE_AOSS,
+ SM6350_SLAVE_PRNG,
+ SM6350_SLAVE_EMMC_CFG,
+ SM6350_SLAVE_CRYPTO_0_CFG,
+ SM6350_SLAVE_PIMEM_CFG,
+ SM6350_SLAVE_RBCPR_MX_CFG,
+ SM6350_SLAVE_QUP_0,
+ SM6350_SLAVE_QUP_1,
+ SM6350_SLAVE_CLK_CTL
+ },
+};
+
+static struct qcom_icc_node xm_qdss_dap = {
+ .name = "xm_qdss_dap",
+ .id = SM6350_MASTER_QDSS_DAP,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 42,
+ .links = { SM6350_SLAVE_CAMERA_CFG,
+ SM6350_SLAVE_SDCC_2,
+ SM6350_SLAVE_CNOC_MNOC_CFG,
+ SM6350_SLAVE_UFS_MEM_CFG,
+ SM6350_SLAVE_QM_CFG,
+ SM6350_SLAVE_SNOC_CFG,
+ SM6350_SLAVE_QM_MPU_CFG,
+ SM6350_SLAVE_GLM,
+ SM6350_SLAVE_PDM,
+ SM6350_SLAVE_CAMERA_NRT_THROTTLE_CFG,
+ SM6350_SLAVE_A2NOC_CFG,
+ SM6350_SLAVE_QDSS_CFG,
+ SM6350_SLAVE_VSENSE_CTRL_CFG,
+ SM6350_SLAVE_CAMERA_RT_THROTTLE_CFG,
+ SM6350_SLAVE_DISPLAY_CFG,
+ SM6350_SLAVE_TCSR,
+ SM6350_SLAVE_DCC_CFG,
+ SM6350_SLAVE_CNOC_DDRSS,
+ SM6350_SLAVE_DISPLAY_THROTTLE_CFG,
+ SM6350_SLAVE_NPU_CFG,
+ SM6350_SLAVE_AHB2PHY,
+ SM6350_SLAVE_GRAPHICS_3D_CFG,
+ SM6350_SLAVE_BOOT_ROM,
+ SM6350_SLAVE_VENUS_CFG,
+ SM6350_SLAVE_IPA_CFG,
+ SM6350_SLAVE_SECURITY,
+ SM6350_SLAVE_IMEM_CFG,
+ SM6350_SLAVE_CNOC_MSS,
+ SM6350_SLAVE_SERVICE_CNOC,
+ SM6350_SLAVE_USB3,
+ SM6350_SLAVE_VENUS_THROTTLE_CFG,
+ SM6350_SLAVE_RBCPR_CX_CFG,
+ SM6350_SLAVE_A1NOC_CFG,
+ SM6350_SLAVE_AOSS,
+ SM6350_SLAVE_PRNG,
+ SM6350_SLAVE_EMMC_CFG,
+ SM6350_SLAVE_CRYPTO_0_CFG,
+ SM6350_SLAVE_PIMEM_CFG,
+ SM6350_SLAVE_RBCPR_MX_CFG,
+ SM6350_SLAVE_QUP_0,
+ SM6350_SLAVE_QUP_1,
+ SM6350_SLAVE_CLK_CTL
+ },
+};
+
+static struct qcom_icc_node qhm_cnoc_dc_noc = {
+ .name = "qhm_cnoc_dc_noc",
+ .id = SM6350_MASTER_CNOC_DC_NOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 2,
+ .links = { SM6350_SLAVE_LLCC_CFG,
+ SM6350_SLAVE_GEM_NOC_CFG
+ },
+};
+
+static struct qcom_icc_node acm_apps = {
+ .name = "acm_apps",
+ .id = SM6350_MASTER_AMPSS_M0,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 2,
+ .links = { SM6350_SLAVE_LLCC,
+ SM6350_SLAVE_GEM_NOC_SNOC
+ },
+};
+
+static struct qcom_icc_node acm_sys_tcu = {
+ .name = "acm_sys_tcu",
+ .id = SM6350_MASTER_SYS_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SM6350_SLAVE_LLCC,
+ SM6350_SLAVE_GEM_NOC_SNOC
+ },
+};
+
+static struct qcom_icc_node qhm_gemnoc_cfg = {
+ .name = "qhm_gemnoc_cfg",
+ .id = SM6350_MASTER_GEM_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 3,
+ .links = { SM6350_SLAVE_MCDMA_MS_MPU_CFG,
+ SM6350_SLAVE_SERVICE_GEM_NOC,
+ SM6350_SLAVE_MSS_PROC_MS_MPU_CFG
+ },
+};
+
+static struct qcom_icc_node qnm_cmpnoc = {
+ .name = "qnm_cmpnoc",
+ .id = SM6350_MASTER_COMPUTE_NOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SM6350_SLAVE_LLCC,
+ SM6350_SLAVE_GEM_NOC_SNOC
+ },
+};
+
+static struct qcom_icc_node qnm_mnoc_hf = {
+ .name = "qnm_mnoc_hf",
+ .id = SM6350_MASTER_MNOC_HF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SM6350_SLAVE_LLCC,
+ SM6350_SLAVE_GEM_NOC_SNOC
+ },
+};
+
+static struct qcom_icc_node qnm_mnoc_sf = {
+ .name = "qnm_mnoc_sf",
+ .id = SM6350_MASTER_MNOC_SF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SM6350_SLAVE_LLCC,
+ SM6350_SLAVE_GEM_NOC_SNOC
+ },
+};
+
+static struct qcom_icc_node qnm_snoc_gc = {
+ .name = "qnm_snoc_gc",
+ .id = SM6350_MASTER_SNOC_GC_MEM_NOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM6350_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_snoc_sf = {
+ .name = "qnm_snoc_sf",
+ .id = SM6350_MASTER_SNOC_SF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM6350_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qxm_gpu = {
+ .name = "qxm_gpu",
+ .id = SM6350_MASTER_GRAPHICS_3D,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SM6350_SLAVE_LLCC,
+ SM6350_SLAVE_GEM_NOC_SNOC
+ },
+};
+
+static struct qcom_icc_node llcc_mc = {
+ .name = "llcc_mc",
+ .id = SM6350_MASTER_LLCC,
+ .channels = 2,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM6350_SLAVE_EBI_CH0 },
+};
+
+static struct qcom_icc_node qhm_mnoc_cfg = {
+ .name = "qhm_mnoc_cfg",
+ .id = SM6350_MASTER_CNOC_MNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM6350_SLAVE_SERVICE_MNOC },
+};
+
+static struct qcom_icc_node qnm_video0 = {
+ .name = "qnm_video0",
+ .id = SM6350_MASTER_VIDEO_P0,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM6350_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video_cvp = {
+ .name = "qnm_video_cvp",
+ .id = SM6350_MASTER_VIDEO_PROC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM6350_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_camnoc_hf = {
+ .name = "qxm_camnoc_hf",
+ .id = SM6350_MASTER_CAMNOC_HF,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM6350_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_camnoc_icp = {
+ .name = "qxm_camnoc_icp",
+ .id = SM6350_MASTER_CAMNOC_ICP,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM6350_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_camnoc_sf = {
+ .name = "qxm_camnoc_sf",
+ .id = SM6350_MASTER_CAMNOC_SF,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM6350_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_mdp0 = {
+ .name = "qxm_mdp0",
+ .id = SM6350_MASTER_MDP_PORT0,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM6350_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node amm_npu_sys = {
+ .name = "amm_npu_sys",
+ .id = SM6350_MASTER_NPU_SYS,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM6350_SLAVE_NPU_COMPUTE_NOC },
+};
+
+static struct qcom_icc_node qhm_npu_cfg = {
+ .name = "qhm_npu_cfg",
+ .id = SM6350_MASTER_NPU_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 8,
+ .links = { SM6350_SLAVE_SERVICE_NPU_NOC,
+ SM6350_SLAVE_ISENSE_CFG,
+ SM6350_SLAVE_NPU_LLM_CFG,
+ SM6350_SLAVE_NPU_INT_DMA_BWMON_CFG,
+ SM6350_SLAVE_NPU_CP,
+ SM6350_SLAVE_NPU_TCM,
+ SM6350_SLAVE_NPU_CAL_DP0,
+ SM6350_SLAVE_NPU_DPM
+ },
+};
+
+static struct qcom_icc_node qhm_snoc_cfg = {
+ .name = "qhm_snoc_cfg",
+ .id = SM6350_MASTER_SNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM6350_SLAVE_SERVICE_SNOC },
+};
+
+static struct qcom_icc_node qnm_aggre1_noc = {
+ .name = "qnm_aggre1_noc",
+ .id = SM6350_A1NOC_SNOC_MAS,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 6,
+ .links = { SM6350_SLAVE_SNOC_GEM_NOC_SF,
+ SM6350_SLAVE_PIMEM,
+ SM6350_SLAVE_OCIMEM,
+ SM6350_SLAVE_APPSS,
+ SM6350_SNOC_CNOC_SLV,
+ SM6350_SLAVE_QDSS_STM
+ },
+};
+
+static struct qcom_icc_node qnm_aggre2_noc = {
+ .name = "qnm_aggre2_noc",
+ .id = SM6350_A2NOC_SNOC_MAS,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 7,
+ .links = { SM6350_SLAVE_SNOC_GEM_NOC_SF,
+ SM6350_SLAVE_PIMEM,
+ SM6350_SLAVE_OCIMEM,
+ SM6350_SLAVE_APPSS,
+ SM6350_SNOC_CNOC_SLV,
+ SM6350_SLAVE_TCU,
+ SM6350_SLAVE_QDSS_STM
+ },
+};
+
+static struct qcom_icc_node qnm_gemnoc = {
+ .name = "qnm_gemnoc",
+ .id = SM6350_MASTER_GEM_NOC_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 6,
+ .links = { SM6350_SLAVE_PIMEM,
+ SM6350_SLAVE_OCIMEM,
+ SM6350_SLAVE_APPSS,
+ SM6350_SNOC_CNOC_SLV,
+ SM6350_SLAVE_TCU,
+ SM6350_SLAVE_QDSS_STM
+ },
+};
+
+static struct qcom_icc_node qxm_pimem = {
+ .name = "qxm_pimem",
+ .id = SM6350_MASTER_PIMEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SM6350_SLAVE_SNOC_GEM_NOC_GC,
+ SM6350_SLAVE_OCIMEM
+ },
+};
+
+static struct qcom_icc_node xm_gic = {
+ .name = "xm_gic",
+ .id = SM6350_MASTER_GIC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM6350_SLAVE_SNOC_GEM_NOC_GC },
+};
+
+static struct qcom_icc_node qns_a1noc_snoc = {
+ .name = "qns_a1noc_snoc",
+ .id = SM6350_A1NOC_SNOC_SLV,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM6350_A1NOC_SNOC_MAS },
+};
+
+static struct qcom_icc_node srvc_aggre1_noc = {
+ .name = "srvc_aggre1_noc",
+ .id = SM6350_SLAVE_SERVICE_A1NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_a2noc_snoc = {
+ .name = "qns_a2noc_snoc",
+ .id = SM6350_A2NOC_SNOC_SLV,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM6350_A2NOC_SNOC_MAS },
+};
+
+static struct qcom_icc_node srvc_aggre2_noc = {
+ .name = "srvc_aggre2_noc",
+ .id = SM6350_SLAVE_SERVICE_A2NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_camnoc_uncomp = {
+ .name = "qns_camnoc_uncomp",
+ .id = SM6350_SLAVE_CAMNOC_UNCOMP,
+ .channels = 1,
+ .buswidth = 32,
+};
+
+static struct qcom_icc_node qup0_core_slave = {
+ .name = "qup0_core_slave",
+ .id = SM6350_SLAVE_QUP_CORE_0,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qup1_core_slave = {
+ .name = "qup1_core_slave",
+ .id = SM6350_SLAVE_QUP_CORE_1,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_cdsp_gemnoc = {
+ .name = "qns_cdsp_gemnoc",
+ .id = SM6350_SLAVE_CDSP_GEM_NOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM6350_MASTER_COMPUTE_NOC },
+};
+
+static struct qcom_icc_node qhs_a1_noc_cfg = {
+ .name = "qhs_a1_noc_cfg",
+ .id = SM6350_SLAVE_A1NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM6350_MASTER_A1NOC_CFG },
+};
+
+static struct qcom_icc_node qhs_a2_noc_cfg = {
+ .name = "qhs_a2_noc_cfg",
+ .id = SM6350_SLAVE_A2NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM6350_MASTER_A2NOC_CFG },
+};
+
+static struct qcom_icc_node qhs_ahb2phy0 = {
+ .name = "qhs_ahb2phy0",
+ .id = SM6350_SLAVE_AHB2PHY,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ahb2phy2 = {
+ .name = "qhs_ahb2phy2",
+ .id = SM6350_SLAVE_AHB2PHY_2,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_aoss = {
+ .name = "qhs_aoss",
+ .id = SM6350_SLAVE_AOSS,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_boot_rom = {
+ .name = "qhs_boot_rom",
+ .id = SM6350_SLAVE_BOOT_ROM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_camera_cfg = {
+ .name = "qhs_camera_cfg",
+ .id = SM6350_SLAVE_CAMERA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_camera_nrt_thrott_cfg = {
+ .name = "qhs_camera_nrt_thrott_cfg",
+ .id = SM6350_SLAVE_CAMERA_NRT_THROTTLE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_camera_rt_throttle_cfg = {
+ .name = "qhs_camera_rt_throttle_cfg",
+ .id = SM6350_SLAVE_CAMERA_RT_THROTTLE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_clk_ctl = {
+ .name = "qhs_clk_ctl",
+ .id = SM6350_SLAVE_CLK_CTL,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_cpr_cx = {
+ .name = "qhs_cpr_cx",
+ .id = SM6350_SLAVE_RBCPR_CX_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_cpr_mx = {
+ .name = "qhs_cpr_mx",
+ .id = SM6350_SLAVE_RBCPR_MX_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_crypto0_cfg = {
+ .name = "qhs_crypto0_cfg",
+ .id = SM6350_SLAVE_CRYPTO_0_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_dcc_cfg = {
+ .name = "qhs_dcc_cfg",
+ .id = SM6350_SLAVE_DCC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ddrss_cfg = {
+ .name = "qhs_ddrss_cfg",
+ .id = SM6350_SLAVE_CNOC_DDRSS,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM6350_MASTER_CNOC_DC_NOC },
+};
+
+static struct qcom_icc_node qhs_display_cfg = {
+ .name = "qhs_display_cfg",
+ .id = SM6350_SLAVE_DISPLAY_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_display_throttle_cfg = {
+ .name = "qhs_display_throttle_cfg",
+ .id = SM6350_SLAVE_DISPLAY_THROTTLE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_emmc_cfg = {
+ .name = "qhs_emmc_cfg",
+ .id = SM6350_SLAVE_EMMC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_glm = {
+ .name = "qhs_glm",
+ .id = SM6350_SLAVE_GLM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_gpuss_cfg = {
+ .name = "qhs_gpuss_cfg",
+ .id = SM6350_SLAVE_GRAPHICS_3D_CFG,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qhs_imem_cfg = {
+ .name = "qhs_imem_cfg",
+ .id = SM6350_SLAVE_IMEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ipa = {
+ .name = "qhs_ipa",
+ .id = SM6350_SLAVE_IPA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_mnoc_cfg = {
+ .name = "qhs_mnoc_cfg",
+ .id = SM6350_SLAVE_CNOC_MNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM6350_MASTER_CNOC_MNOC_CFG },
+};
+
+static struct qcom_icc_node qhs_mss_cfg = {
+ .name = "qhs_mss_cfg",
+ .id = SM6350_SLAVE_CNOC_MSS,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_npu_cfg = {
+ .name = "qhs_npu_cfg",
+ .id = SM6350_SLAVE_NPU_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM6350_MASTER_NPU_NOC_CFG },
+};
+
+static struct qcom_icc_node qhs_pdm = {
+ .name = "qhs_pdm",
+ .id = SM6350_SLAVE_PDM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pimem_cfg = {
+ .name = "qhs_pimem_cfg",
+ .id = SM6350_SLAVE_PIMEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_prng = {
+ .name = "qhs_prng",
+ .id = SM6350_SLAVE_PRNG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qdss_cfg = {
+ .name = "qhs_qdss_cfg",
+ .id = SM6350_SLAVE_QDSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qm_cfg = {
+ .name = "qhs_qm_cfg",
+ .id = SM6350_SLAVE_QM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qm_mpu_cfg = {
+ .name = "qhs_qm_mpu_cfg",
+ .id = SM6350_SLAVE_QM_MPU_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qup0 = {
+ .name = "qhs_qup0",
+ .id = SM6350_SLAVE_QUP_0,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qup1 = {
+ .name = "qhs_qup1",
+ .id = SM6350_SLAVE_QUP_1,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_sdc2 = {
+ .name = "qhs_sdc2",
+ .id = SM6350_SLAVE_SDCC_2,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_security = {
+ .name = "qhs_security",
+ .id = SM6350_SLAVE_SECURITY,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_snoc_cfg = {
+ .name = "qhs_snoc_cfg",
+ .id = SM6350_SLAVE_SNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM6350_MASTER_SNOC_CFG },
+};
+
+static struct qcom_icc_node qhs_tcsr = {
+ .name = "qhs_tcsr",
+ .id = SM6350_SLAVE_TCSR,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ufs_mem_cfg = {
+ .name = "qhs_ufs_mem_cfg",
+ .id = SM6350_SLAVE_UFS_MEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb3_0 = {
+ .name = "qhs_usb3_0",
+ .id = SM6350_SLAVE_USB3,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_venus_cfg = {
+ .name = "qhs_venus_cfg",
+ .id = SM6350_SLAVE_VENUS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_venus_throttle_cfg = {
+ .name = "qhs_venus_throttle_cfg",
+ .id = SM6350_SLAVE_VENUS_THROTTLE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
+ .name = "qhs_vsense_ctrl_cfg",
+ .id = SM6350_SLAVE_VSENSE_CTRL_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node srvc_cnoc = {
+ .name = "srvc_cnoc",
+ .id = SM6350_SLAVE_SERVICE_CNOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_gemnoc = {
+ .name = "qhs_gemnoc",
+ .id = SM6350_SLAVE_GEM_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM6350_MASTER_GEM_NOC_CFG },
+};
+
+static struct qcom_icc_node qhs_llcc = {
+ .name = "qhs_llcc",
+ .id = SM6350_SLAVE_LLCC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_mcdma_ms_mpu_cfg = {
+ .name = "qhs_mcdma_ms_mpu_cfg",
+ .id = SM6350_SLAVE_MCDMA_MS_MPU_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_mdsp_ms_mpu_cfg = {
+ .name = "qhs_mdsp_ms_mpu_cfg",
+ .id = SM6350_SLAVE_MSS_PROC_MS_MPU_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_gem_noc_snoc = {
+ .name = "qns_gem_noc_snoc",
+ .id = SM6350_SLAVE_GEM_NOC_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM6350_MASTER_GEM_NOC_SNOC },
+};
+
+static struct qcom_icc_node qns_llcc = {
+ .name = "qns_llcc",
+ .id = SM6350_SLAVE_LLCC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM6350_MASTER_LLCC },
+};
+
+static struct qcom_icc_node srvc_gemnoc = {
+ .name = "srvc_gemnoc",
+ .id = SM6350_SLAVE_SERVICE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node ebi = {
+ .name = "ebi",
+ .id = SM6350_SLAVE_EBI_CH0,
+ .channels = 2,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_mem_noc_hf = {
+ .name = "qns_mem_noc_hf",
+ .id = SM6350_SLAVE_MNOC_HF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM6350_MASTER_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_mem_noc_sf = {
+ .name = "qns_mem_noc_sf",
+ .id = SM6350_SLAVE_MNOC_SF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM6350_MASTER_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node srvc_mnoc = {
+ .name = "srvc_mnoc",
+ .id = SM6350_SLAVE_SERVICE_MNOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_cal_dp0 = {
+ .name = "qhs_cal_dp0",
+ .id = SM6350_SLAVE_NPU_CAL_DP0,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_cp = {
+ .name = "qhs_cp",
+ .id = SM6350_SLAVE_NPU_CP,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_dma_bwmon = {
+ .name = "qhs_dma_bwmon",
+ .id = SM6350_SLAVE_NPU_INT_DMA_BWMON_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_dpm = {
+ .name = "qhs_dpm",
+ .id = SM6350_SLAVE_NPU_DPM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_isense = {
+ .name = "qhs_isense",
+ .id = SM6350_SLAVE_ISENSE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_llm = {
+ .name = "qhs_llm",
+ .id = SM6350_SLAVE_NPU_LLM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tcm = {
+ .name = "qhs_tcm",
+ .id = SM6350_SLAVE_NPU_TCM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_npu_sys = {
+ .name = "qns_npu_sys",
+ .id = SM6350_SLAVE_NPU_COMPUTE_NOC,
+ .channels = 2,
+ .buswidth = 32,
+};
+
+static struct qcom_icc_node srvc_noc = {
+ .name = "srvc_noc",
+ .id = SM6350_SLAVE_SERVICE_NPU_NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_apss = {
+ .name = "qhs_apss",
+ .id = SM6350_SLAVE_APPSS,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qns_cnoc = {
+ .name = "qns_cnoc",
+ .id = SM6350_SNOC_CNOC_SLV,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM6350_SNOC_CNOC_MAS },
+};
+
+static struct qcom_icc_node qns_gemnoc_gc = {
+ .name = "qns_gemnoc_gc",
+ .id = SM6350_SLAVE_SNOC_GEM_NOC_GC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM6350_MASTER_SNOC_GC_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_gemnoc_sf = {
+ .name = "qns_gemnoc_sf",
+ .id = SM6350_SLAVE_SNOC_GEM_NOC_SF,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM6350_MASTER_SNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxs_imem = {
+ .name = "qxs_imem",
+ .id = SM6350_SLAVE_OCIMEM,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qxs_pimem = {
+ .name = "qxs_pimem",
+ .id = SM6350_SLAVE_PIMEM,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node srvc_snoc = {
+ .name = "srvc_snoc",
+ .id = SM6350_SLAVE_SERVICE_SNOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node xs_qdss_stm = {
+ .name = "xs_qdss_stm",
+ .id = SM6350_SLAVE_QDSS_STM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node xs_sys_tcu_cfg = {
+ .name = "xs_sys_tcu_cfg",
+ .id = SM6350_SLAVE_TCU,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_ce0 = {
+ .name = "CE0",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qxm_crypto },
+};
+
+static struct qcom_icc_bcm bcm_cn0 = {
+ .name = "CN0",
+ .keepalive = true,
+ .num_nodes = 41,
+ .nodes = { &qnm_snoc,
+ &xm_qdss_dap,
+ &qhs_a1_noc_cfg,
+ &qhs_a2_noc_cfg,
+ &qhs_ahb2phy0,
+ &qhs_aoss,
+ &qhs_boot_rom,
+ &qhs_camera_cfg,
+ &qhs_camera_nrt_thrott_cfg,
+ &qhs_camera_rt_throttle_cfg,
+ &qhs_clk_ctl,
+ &qhs_cpr_cx,
+ &qhs_cpr_mx,
+ &qhs_crypto0_cfg,
+ &qhs_dcc_cfg,
+ &qhs_ddrss_cfg,
+ &qhs_display_cfg,
+ &qhs_display_throttle_cfg,
+ &qhs_glm,
+ &qhs_gpuss_cfg,
+ &qhs_imem_cfg,
+ &qhs_ipa,
+ &qhs_mnoc_cfg,
+ &qhs_mss_cfg,
+ &qhs_npu_cfg,
+ &qhs_pimem_cfg,
+ &qhs_prng,
+ &qhs_qdss_cfg,
+ &qhs_qm_cfg,
+ &qhs_qm_mpu_cfg,
+ &qhs_qup0,
+ &qhs_qup1,
+ &qhs_security,
+ &qhs_snoc_cfg,
+ &qhs_tcsr,
+ &qhs_ufs_mem_cfg,
+ &qhs_usb3_0,
+ &qhs_venus_cfg,
+ &qhs_venus_throttle_cfg,
+ &qhs_vsense_ctrl_cfg,
+ &srvc_cnoc
+ },
+};
+
+static struct qcom_icc_bcm bcm_cn1 = {
+ .name = "CN1",
+ .keepalive = false,
+ .num_nodes = 6,
+ .nodes = { &xm_emmc,
+ &xm_sdc2,
+ &qhs_ahb2phy2,
+ &qhs_emmc_cfg,
+ &qhs_pdm,
+ &qhs_sdc2
+ },
+};
+
+static struct qcom_icc_bcm bcm_co0 = {
+ .name = "CO0",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qns_cdsp_gemnoc },
+};
+
+static struct qcom_icc_bcm bcm_co2 = {
+ .name = "CO2",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qnm_npu },
+};
+
+static struct qcom_icc_bcm bcm_co3 = {
+ .name = "CO3",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qxm_npu_dsp },
+};
+
+static struct qcom_icc_bcm bcm_mc0 = {
+ .name = "MC0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_mm0 = {
+ .name = "MM0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_mem_noc_hf },
+};
+
+static struct qcom_icc_bcm bcm_mm1 = {
+ .name = "MM1",
+ .keepalive = true,
+ .num_nodes = 5,
+ .nodes = { &qxm_camnoc_hf0_uncomp,
+ &qxm_camnoc_icp_uncomp,
+ &qxm_camnoc_sf_uncomp,
+ &qxm_camnoc_hf,
+ &qxm_mdp0
+ },
+};
+
+static struct qcom_icc_bcm bcm_mm2 = {
+ .name = "MM2",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_bcm bcm_mm3 = {
+ .name = "MM3",
+ .keepalive = false,
+ .num_nodes = 4,
+ .nodes = { &qhm_mnoc_cfg, &qnm_video0, &qnm_video_cvp, &qxm_camnoc_sf },
+};
+
+static struct qcom_icc_bcm bcm_qup0 = {
+ .name = "QUP0",
+ .keepalive = false,
+ .num_nodes = 4,
+ .nodes = { &qup0_core_master, &qup1_core_master, &qup0_core_slave, &qup1_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_sh0 = {
+ .name = "SH0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_llcc },
+};
+
+static struct qcom_icc_bcm bcm_sh2 = {
+ .name = "SH2",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &acm_sys_tcu },
+};
+
+static struct qcom_icc_bcm bcm_sh3 = {
+ .name = "SH3",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qnm_cmpnoc },
+};
+
+static struct qcom_icc_bcm bcm_sh4 = {
+ .name = "SH4",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &acm_apps },
+};
+
+static struct qcom_icc_bcm bcm_sn0 = {
+ .name = "SN0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_bcm bcm_sn1 = {
+ .name = "SN1",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qxs_imem },
+};
+
+static struct qcom_icc_bcm bcm_sn2 = {
+ .name = "SN2",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qns_gemnoc_gc },
+};
+
+static struct qcom_icc_bcm bcm_sn3 = {
+ .name = "SN3",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qxs_pimem },
+};
+
+static struct qcom_icc_bcm bcm_sn4 = {
+ .name = "SN4",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &xs_qdss_stm },
+};
+
+static struct qcom_icc_bcm bcm_sn5 = {
+ .name = "SN5",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qnm_aggre1_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn6 = {
+ .name = "SN6",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qnm_aggre2_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn10 = {
+ .name = "SN10",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qnm_gemnoc },
+};
static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
&bcm_cn1,
diff --git a/drivers/interconnect/qcom/sm8150.c b/drivers/interconnect/qcom/sm8150.c
index c5ab29322164..c7c9cf7f746b 100644
--- a/drivers/interconnect/qcom/sm8150.c
+++ b/drivers/interconnect/qcom/sm8150.c
@@ -7,181 +7,1534 @@
#include <linux/device.h>
#include <linux/interconnect.h>
#include <linux/interconnect-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <dt-bindings/interconnect/qcom,sm8150.h>
#include "bcm-voter.h"
#include "icc-rpmh.h"
#include "sm8150.h"
-DEFINE_QNODE(qhm_a1noc_cfg, SM8150_MASTER_A1NOC_CFG, 1, 4, SM8150_SLAVE_SERVICE_A1NOC);
-DEFINE_QNODE(qhm_qup0, SM8150_MASTER_QUP_0, 1, 4, SM8150_A1NOC_SNOC_SLV);
-DEFINE_QNODE(xm_emac, SM8150_MASTER_EMAC, 1, 8, SM8150_A1NOC_SNOC_SLV);
-DEFINE_QNODE(xm_ufs_mem, SM8150_MASTER_UFS_MEM, 1, 8, SM8150_A1NOC_SNOC_SLV);
-DEFINE_QNODE(xm_usb3_0, SM8150_MASTER_USB3, 1, 8, SM8150_A1NOC_SNOC_SLV);
-DEFINE_QNODE(xm_usb3_1, SM8150_MASTER_USB3_1, 1, 8, SM8150_A1NOC_SNOC_SLV);
-DEFINE_QNODE(qhm_a2noc_cfg, SM8150_MASTER_A2NOC_CFG, 1, 4, SM8150_SLAVE_SERVICE_A2NOC);
-DEFINE_QNODE(qhm_qdss_bam, SM8150_MASTER_QDSS_BAM, 1, 4, SM8150_A2NOC_SNOC_SLV);
-DEFINE_QNODE(qhm_qspi, SM8150_MASTER_QSPI, 1, 4, SM8150_A2NOC_SNOC_SLV);
-DEFINE_QNODE(qhm_qup1, SM8150_MASTER_QUP_1, 1, 4, SM8150_A2NOC_SNOC_SLV);
-DEFINE_QNODE(qhm_qup2, SM8150_MASTER_QUP_2, 1, 4, SM8150_A2NOC_SNOC_SLV);
-DEFINE_QNODE(qhm_sensorss_ahb, SM8150_MASTER_SENSORS_AHB, 1, 4, SM8150_A2NOC_SNOC_SLV);
-DEFINE_QNODE(qhm_tsif, SM8150_MASTER_TSIF, 1, 4, SM8150_A2NOC_SNOC_SLV);
-DEFINE_QNODE(qnm_cnoc, SM8150_MASTER_CNOC_A2NOC, 1, 8, SM8150_A2NOC_SNOC_SLV);
-DEFINE_QNODE(qxm_crypto, SM8150_MASTER_CRYPTO_CORE_0, 1, 8, SM8150_A2NOC_SNOC_SLV);
-DEFINE_QNODE(qxm_ipa, SM8150_MASTER_IPA, 1, 8, SM8150_A2NOC_SNOC_SLV);
-DEFINE_QNODE(xm_pcie3_0, SM8150_MASTER_PCIE, 1, 8, SM8150_SLAVE_ANOC_PCIE_GEM_NOC);
-DEFINE_QNODE(xm_pcie3_1, SM8150_MASTER_PCIE_1, 1, 8, SM8150_SLAVE_ANOC_PCIE_GEM_NOC);
-DEFINE_QNODE(xm_qdss_etr, SM8150_MASTER_QDSS_ETR, 1, 8, SM8150_A2NOC_SNOC_SLV);
-DEFINE_QNODE(xm_sdc2, SM8150_MASTER_SDCC_2, 1, 8, SM8150_A2NOC_SNOC_SLV);
-DEFINE_QNODE(xm_sdc4, SM8150_MASTER_SDCC_4, 1, 8, SM8150_A2NOC_SNOC_SLV);
-DEFINE_QNODE(qxm_camnoc_hf0_uncomp, SM8150_MASTER_CAMNOC_HF0_UNCOMP, 1, 32, SM8150_SLAVE_CAMNOC_UNCOMP);
-DEFINE_QNODE(qxm_camnoc_hf1_uncomp, SM8150_MASTER_CAMNOC_HF1_UNCOMP, 1, 32, SM8150_SLAVE_CAMNOC_UNCOMP);
-DEFINE_QNODE(qxm_camnoc_sf_uncomp, SM8150_MASTER_CAMNOC_SF_UNCOMP, 1, 32, SM8150_SLAVE_CAMNOC_UNCOMP);
-DEFINE_QNODE(qnm_npu, SM8150_MASTER_NPU, 1, 32, SM8150_SLAVE_CDSP_MEM_NOC);
-DEFINE_QNODE(qhm_spdm, SM8150_MASTER_SPDM, 1, 4, SM8150_SLAVE_CNOC_A2NOC);
-DEFINE_QNODE(qnm_snoc, SM8150_SNOC_CNOC_MAS, 1, 8, SM8150_SLAVE_TLMM_SOUTH, SM8150_SLAVE_CDSP_CFG, SM8150_SLAVE_SPSS_CFG, SM8150_SLAVE_CAMERA_CFG, SM8150_SLAVE_SDCC_4, SM8150_SLAVE_SDCC_2, SM8150_SLAVE_CNOC_MNOC_CFG, SM8150_SLAVE_EMAC_CFG, SM8150_SLAVE_UFS_MEM_CFG, SM8150_SLAVE_TLMM_EAST, SM8150_SLAVE_SSC_CFG, SM8150_SLAVE_SNOC_CFG, SM8150_SLAVE_NORTH_PHY_CFG, SM8150_SLAVE_QUP_0, SM8150_SLAVE_GLM, SM8150_SLAVE_PCIE_1_CFG, SM8150_SLAVE_A2NOC_CFG, SM8150_SLAVE_QDSS_CFG, SM8150_SLAVE_DISPLAY_CFG, SM8150_SLAVE_TCSR, SM8150_SLAVE_CNOC_DDRSS, SM8150_SLAVE_RBCPR_MMCX_CFG, SM8150_SLAVE_NPU_CFG, SM8150_SLAVE_PCIE_0_CFG, SM8150_SLAVE_GRAPHICS_3D_CFG, SM8150_SLAVE_VENUS_CFG, SM8150_SLAVE_TSIF, SM8150_SLAVE_IPA_CFG, SM8150_SLAVE_CLK_CTL, SM8150_SLAVE_AOP, SM8150_SLAVE_QUP_1, SM8150_SLAVE_AHB2PHY_SOUTH, SM8150_SLAVE_USB3_1, SM8150_SLAVE_SERVICE_CNOC, SM8150_SLAVE_UFS_CARD_CFG, SM8150_SLAVE_QUP_2, SM8150_SLAVE_RBCPR_CX_CFG, SM8150_SLAVE_TLMM_WEST, SM8150_SLAVE_A1NOC_CFG, SM8150_SLAVE_AOSS, SM8150_SLAVE_PRNG, SM8150_SLAVE_VSENSE_CTRL_CFG, SM8150_SLAVE_QSPI, SM8150_SLAVE_USB3, SM8150_SLAVE_SPDM_WRAPPER, SM8150_SLAVE_CRYPTO_0_CFG, SM8150_SLAVE_PIMEM_CFG, SM8150_SLAVE_TLMM_NORTH, SM8150_SLAVE_RBCPR_MX_CFG, SM8150_SLAVE_IMEM_CFG);
-DEFINE_QNODE(xm_qdss_dap, SM8150_MASTER_QDSS_DAP, 1, 8, SM8150_SLAVE_TLMM_SOUTH, SM8150_SLAVE_CDSP_CFG, SM8150_SLAVE_SPSS_CFG, SM8150_SLAVE_CAMERA_CFG, SM8150_SLAVE_SDCC_4, SM8150_SLAVE_SDCC_2, SM8150_SLAVE_CNOC_MNOC_CFG, SM8150_SLAVE_EMAC_CFG, SM8150_SLAVE_UFS_MEM_CFG, SM8150_SLAVE_TLMM_EAST, SM8150_SLAVE_SSC_CFG, SM8150_SLAVE_SNOC_CFG, SM8150_SLAVE_NORTH_PHY_CFG, SM8150_SLAVE_QUP_0, SM8150_SLAVE_GLM, SM8150_SLAVE_PCIE_1_CFG, SM8150_SLAVE_A2NOC_CFG, SM8150_SLAVE_QDSS_CFG, SM8150_SLAVE_DISPLAY_CFG, SM8150_SLAVE_TCSR, SM8150_SLAVE_CNOC_DDRSS, SM8150_SLAVE_CNOC_A2NOC, SM8150_SLAVE_RBCPR_MMCX_CFG, SM8150_SLAVE_NPU_CFG, SM8150_SLAVE_PCIE_0_CFG, SM8150_SLAVE_GRAPHICS_3D_CFG, SM8150_SLAVE_VENUS_CFG, SM8150_SLAVE_TSIF, SM8150_SLAVE_IPA_CFG, SM8150_SLAVE_CLK_CTL, SM8150_SLAVE_AOP, SM8150_SLAVE_QUP_1, SM8150_SLAVE_AHB2PHY_SOUTH, SM8150_SLAVE_USB3_1, SM8150_SLAVE_SERVICE_CNOC, SM8150_SLAVE_UFS_CARD_CFG, SM8150_SLAVE_QUP_2, SM8150_SLAVE_RBCPR_CX_CFG, SM8150_SLAVE_TLMM_WEST, SM8150_SLAVE_A1NOC_CFG, SM8150_SLAVE_AOSS, SM8150_SLAVE_PRNG, SM8150_SLAVE_VSENSE_CTRL_CFG, SM8150_SLAVE_QSPI, SM8150_SLAVE_USB3, SM8150_SLAVE_SPDM_WRAPPER, SM8150_SLAVE_CRYPTO_0_CFG, SM8150_SLAVE_PIMEM_CFG, SM8150_SLAVE_TLMM_NORTH, SM8150_SLAVE_RBCPR_MX_CFG, SM8150_SLAVE_IMEM_CFG);
-DEFINE_QNODE(qhm_cnoc_dc_noc, SM8150_MASTER_CNOC_DC_NOC, 1, 4, SM8150_SLAVE_GEM_NOC_CFG, SM8150_SLAVE_LLCC_CFG);
-DEFINE_QNODE(acm_apps, SM8150_MASTER_AMPSS_M0, 2, 32, SM8150_SLAVE_ECC, SM8150_SLAVE_LLCC, SM8150_SLAVE_GEM_NOC_SNOC);
-DEFINE_QNODE(acm_gpu_tcu, SM8150_MASTER_GPU_TCU, 1, 8, SM8150_SLAVE_LLCC, SM8150_SLAVE_GEM_NOC_SNOC);
-DEFINE_QNODE(acm_sys_tcu, SM8150_MASTER_SYS_TCU, 1, 8, SM8150_SLAVE_LLCC, SM8150_SLAVE_GEM_NOC_SNOC);
-DEFINE_QNODE(qhm_gemnoc_cfg, SM8150_MASTER_GEM_NOC_CFG, 1, 4, SM8150_SLAVE_SERVICE_GEM_NOC, SM8150_SLAVE_MSS_PROC_MS_MPU_CFG);
-DEFINE_QNODE(qnm_cmpnoc, SM8150_MASTER_COMPUTE_NOC, 2, 32, SM8150_SLAVE_ECC, SM8150_SLAVE_LLCC, SM8150_SLAVE_GEM_NOC_SNOC);
-DEFINE_QNODE(qnm_gpu, SM8150_MASTER_GRAPHICS_3D, 2, 32, SM8150_SLAVE_LLCC, SM8150_SLAVE_GEM_NOC_SNOC);
-DEFINE_QNODE(qnm_mnoc_hf, SM8150_MASTER_MNOC_HF_MEM_NOC, 2, 32, SM8150_SLAVE_LLCC);
-DEFINE_QNODE(qnm_mnoc_sf, SM8150_MASTER_MNOC_SF_MEM_NOC, 1, 32, SM8150_SLAVE_LLCC, SM8150_SLAVE_GEM_NOC_SNOC);
-DEFINE_QNODE(qnm_pcie, SM8150_MASTER_GEM_NOC_PCIE_SNOC, 1, 16, SM8150_SLAVE_LLCC, SM8150_SLAVE_GEM_NOC_SNOC);
-DEFINE_QNODE(qnm_snoc_gc, SM8150_MASTER_SNOC_GC_MEM_NOC, 1, 8, SM8150_SLAVE_LLCC);
-DEFINE_QNODE(qnm_snoc_sf, SM8150_MASTER_SNOC_SF_MEM_NOC, 1, 16, SM8150_SLAVE_LLCC);
-DEFINE_QNODE(qxm_ecc, SM8150_MASTER_ECC, 2, 32, SM8150_SLAVE_LLCC);
-DEFINE_QNODE(llcc_mc, SM8150_MASTER_LLCC, 4, 4, SM8150_SLAVE_EBI_CH0);
-DEFINE_QNODE(qhm_mnoc_cfg, SM8150_MASTER_CNOC_MNOC_CFG, 1, 4, SM8150_SLAVE_SERVICE_MNOC);
-DEFINE_QNODE(qxm_camnoc_hf0, SM8150_MASTER_CAMNOC_HF0, 1, 32, SM8150_SLAVE_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(qxm_camnoc_hf1, SM8150_MASTER_CAMNOC_HF1, 1, 32, SM8150_SLAVE_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(qxm_camnoc_sf, SM8150_MASTER_CAMNOC_SF, 1, 32, SM8150_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qxm_mdp0, SM8150_MASTER_MDP_PORT0, 1, 32, SM8150_SLAVE_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(qxm_mdp1, SM8150_MASTER_MDP_PORT1, 1, 32, SM8150_SLAVE_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(qxm_rot, SM8150_MASTER_ROTATOR, 1, 32, SM8150_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qxm_venus0, SM8150_MASTER_VIDEO_P0, 1, 32, SM8150_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qxm_venus1, SM8150_MASTER_VIDEO_P1, 1, 32, SM8150_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qxm_venus_arm9, SM8150_MASTER_VIDEO_PROC, 1, 8, SM8150_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qhm_snoc_cfg, SM8150_MASTER_SNOC_CFG, 1, 4, SM8150_SLAVE_SERVICE_SNOC);
-DEFINE_QNODE(qnm_aggre1_noc, SM8150_A1NOC_SNOC_MAS, 1, 16, SM8150_SLAVE_SNOC_GEM_NOC_SF, SM8150_SLAVE_PIMEM, SM8150_SLAVE_OCIMEM, SM8150_SLAVE_APPSS, SM8150_SNOC_CNOC_SLV, SM8150_SLAVE_QDSS_STM);
-DEFINE_QNODE(qnm_aggre2_noc, SM8150_A2NOC_SNOC_MAS, 1, 16, SM8150_SLAVE_SNOC_GEM_NOC_SF, SM8150_SLAVE_PIMEM, SM8150_SLAVE_OCIMEM, SM8150_SLAVE_APPSS, SM8150_SNOC_CNOC_SLV, SM8150_SLAVE_PCIE_0, SM8150_SLAVE_PCIE_1, SM8150_SLAVE_TCU, SM8150_SLAVE_QDSS_STM);
-DEFINE_QNODE(qnm_gemnoc, SM8150_MASTER_GEM_NOC_SNOC, 1, 8, SM8150_SLAVE_PIMEM, SM8150_SLAVE_OCIMEM, SM8150_SLAVE_APPSS, SM8150_SNOC_CNOC_SLV, SM8150_SLAVE_TCU, SM8150_SLAVE_QDSS_STM);
-DEFINE_QNODE(qxm_pimem, SM8150_MASTER_PIMEM, 1, 8, SM8150_SLAVE_SNOC_GEM_NOC_GC, SM8150_SLAVE_OCIMEM);
-DEFINE_QNODE(xm_gic, SM8150_MASTER_GIC, 1, 8, SM8150_SLAVE_SNOC_GEM_NOC_GC, SM8150_SLAVE_OCIMEM);
-DEFINE_QNODE(qns_a1noc_snoc, SM8150_A1NOC_SNOC_SLV, 1, 16, SM8150_A1NOC_SNOC_MAS);
-DEFINE_QNODE(srvc_aggre1_noc, SM8150_SLAVE_SERVICE_A1NOC, 1, 4);
-DEFINE_QNODE(qns_a2noc_snoc, SM8150_A2NOC_SNOC_SLV, 1, 16, SM8150_A2NOC_SNOC_MAS);
-DEFINE_QNODE(qns_pcie_mem_noc, SM8150_SLAVE_ANOC_PCIE_GEM_NOC, 1, 16, SM8150_MASTER_GEM_NOC_PCIE_SNOC);
-DEFINE_QNODE(srvc_aggre2_noc, SM8150_SLAVE_SERVICE_A2NOC, 1, 4);
-DEFINE_QNODE(qns_camnoc_uncomp, SM8150_SLAVE_CAMNOC_UNCOMP, 1, 32);
-DEFINE_QNODE(qns_cdsp_mem_noc, SM8150_SLAVE_CDSP_MEM_NOC, 2, 32, SM8150_MASTER_COMPUTE_NOC);
-DEFINE_QNODE(qhs_a1_noc_cfg, SM8150_SLAVE_A1NOC_CFG, 1, 4, SM8150_MASTER_A1NOC_CFG);
-DEFINE_QNODE(qhs_a2_noc_cfg, SM8150_SLAVE_A2NOC_CFG, 1, 4, SM8150_MASTER_A2NOC_CFG);
-DEFINE_QNODE(qhs_ahb2phy_south, SM8150_SLAVE_AHB2PHY_SOUTH, 1, 4);
-DEFINE_QNODE(qhs_aop, SM8150_SLAVE_AOP, 1, 4);
-DEFINE_QNODE(qhs_aoss, SM8150_SLAVE_AOSS, 1, 4);
-DEFINE_QNODE(qhs_camera_cfg, SM8150_SLAVE_CAMERA_CFG, 1, 4);
-DEFINE_QNODE(qhs_clk_ctl, SM8150_SLAVE_CLK_CTL, 1, 4);
-DEFINE_QNODE(qhs_compute_dsp, SM8150_SLAVE_CDSP_CFG, 1, 4);
-DEFINE_QNODE(qhs_cpr_cx, SM8150_SLAVE_RBCPR_CX_CFG, 1, 4);
-DEFINE_QNODE(qhs_cpr_mmcx, SM8150_SLAVE_RBCPR_MMCX_CFG, 1, 4);
-DEFINE_QNODE(qhs_cpr_mx, SM8150_SLAVE_RBCPR_MX_CFG, 1, 4);
-DEFINE_QNODE(qhs_crypto0_cfg, SM8150_SLAVE_CRYPTO_0_CFG, 1, 4);
-DEFINE_QNODE(qhs_ddrss_cfg, SM8150_SLAVE_CNOC_DDRSS, 1, 4, SM8150_MASTER_CNOC_DC_NOC);
-DEFINE_QNODE(qhs_display_cfg, SM8150_SLAVE_DISPLAY_CFG, 1, 4);
-DEFINE_QNODE(qhs_emac_cfg, SM8150_SLAVE_EMAC_CFG, 1, 4);
-DEFINE_QNODE(qhs_glm, SM8150_SLAVE_GLM, 1, 4);
-DEFINE_QNODE(qhs_gpuss_cfg, SM8150_SLAVE_GRAPHICS_3D_CFG, 1, 8);
-DEFINE_QNODE(qhs_imem_cfg, SM8150_SLAVE_IMEM_CFG, 1, 4);
-DEFINE_QNODE(qhs_ipa, SM8150_SLAVE_IPA_CFG, 1, 4);
-DEFINE_QNODE(qhs_mnoc_cfg, SM8150_SLAVE_CNOC_MNOC_CFG, 1, 4, SM8150_MASTER_CNOC_MNOC_CFG);
-DEFINE_QNODE(qhs_npu_cfg, SM8150_SLAVE_NPU_CFG, 1, 4);
-DEFINE_QNODE(qhs_pcie0_cfg, SM8150_SLAVE_PCIE_0_CFG, 1, 4);
-DEFINE_QNODE(qhs_pcie1_cfg, SM8150_SLAVE_PCIE_1_CFG, 1, 4);
-DEFINE_QNODE(qhs_phy_refgen_north, SM8150_SLAVE_NORTH_PHY_CFG, 1, 4);
-DEFINE_QNODE(qhs_pimem_cfg, SM8150_SLAVE_PIMEM_CFG, 1, 4);
-DEFINE_QNODE(qhs_prng, SM8150_SLAVE_PRNG, 1, 4);
-DEFINE_QNODE(qhs_qdss_cfg, SM8150_SLAVE_QDSS_CFG, 1, 4);
-DEFINE_QNODE(qhs_qspi, SM8150_SLAVE_QSPI, 1, 4);
-DEFINE_QNODE(qhs_qupv3_east, SM8150_SLAVE_QUP_2, 1, 4);
-DEFINE_QNODE(qhs_qupv3_north, SM8150_SLAVE_QUP_1, 1, 4);
-DEFINE_QNODE(qhs_qupv3_south, SM8150_SLAVE_QUP_0, 1, 4);
-DEFINE_QNODE(qhs_sdc2, SM8150_SLAVE_SDCC_2, 1, 4);
-DEFINE_QNODE(qhs_sdc4, SM8150_SLAVE_SDCC_4, 1, 4);
-DEFINE_QNODE(qhs_snoc_cfg, SM8150_SLAVE_SNOC_CFG, 1, 4, SM8150_MASTER_SNOC_CFG);
-DEFINE_QNODE(qhs_spdm, SM8150_SLAVE_SPDM_WRAPPER, 1, 4);
-DEFINE_QNODE(qhs_spss_cfg, SM8150_SLAVE_SPSS_CFG, 1, 4);
-DEFINE_QNODE(qhs_ssc_cfg, SM8150_SLAVE_SSC_CFG, 1, 4);
-DEFINE_QNODE(qhs_tcsr, SM8150_SLAVE_TCSR, 1, 4);
-DEFINE_QNODE(qhs_tlmm_east, SM8150_SLAVE_TLMM_EAST, 1, 4);
-DEFINE_QNODE(qhs_tlmm_north, SM8150_SLAVE_TLMM_NORTH, 1, 4);
-DEFINE_QNODE(qhs_tlmm_south, SM8150_SLAVE_TLMM_SOUTH, 1, 4);
-DEFINE_QNODE(qhs_tlmm_west, SM8150_SLAVE_TLMM_WEST, 1, 4);
-DEFINE_QNODE(qhs_tsif, SM8150_SLAVE_TSIF, 1, 4);
-DEFINE_QNODE(qhs_ufs_card_cfg, SM8150_SLAVE_UFS_CARD_CFG, 1, 4);
-DEFINE_QNODE(qhs_ufs_mem_cfg, SM8150_SLAVE_UFS_MEM_CFG, 1, 4);
-DEFINE_QNODE(qhs_usb3_0, SM8150_SLAVE_USB3, 1, 4);
-DEFINE_QNODE(qhs_usb3_1, SM8150_SLAVE_USB3_1, 1, 4);
-DEFINE_QNODE(qhs_venus_cfg, SM8150_SLAVE_VENUS_CFG, 1, 4);
-DEFINE_QNODE(qhs_vsense_ctrl_cfg, SM8150_SLAVE_VSENSE_CTRL_CFG, 1, 4);
-DEFINE_QNODE(qns_cnoc_a2noc, SM8150_SLAVE_CNOC_A2NOC, 1, 8, SM8150_MASTER_CNOC_A2NOC);
-DEFINE_QNODE(srvc_cnoc, SM8150_SLAVE_SERVICE_CNOC, 1, 4);
-DEFINE_QNODE(qhs_llcc, SM8150_SLAVE_LLCC_CFG, 1, 4);
-DEFINE_QNODE(qhs_memnoc, SM8150_SLAVE_GEM_NOC_CFG, 1, 4, SM8150_MASTER_GEM_NOC_CFG);
-DEFINE_QNODE(qhs_mdsp_ms_mpu_cfg, SM8150_SLAVE_MSS_PROC_MS_MPU_CFG, 1, 4);
-DEFINE_QNODE(qns_ecc, SM8150_SLAVE_ECC, 1, 32);
-DEFINE_QNODE(qns_gem_noc_snoc, SM8150_SLAVE_GEM_NOC_SNOC, 1, 8, SM8150_MASTER_GEM_NOC_SNOC);
-DEFINE_QNODE(qns_llcc, SM8150_SLAVE_LLCC, 4, 16, SM8150_MASTER_LLCC);
-DEFINE_QNODE(srvc_gemnoc, SM8150_SLAVE_SERVICE_GEM_NOC, 1, 4);
-DEFINE_QNODE(ebi, SM8150_SLAVE_EBI_CH0, 4, 4);
-DEFINE_QNODE(qns2_mem_noc, SM8150_SLAVE_MNOC_SF_MEM_NOC, 1, 32, SM8150_MASTER_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qns_mem_noc_hf, SM8150_SLAVE_MNOC_HF_MEM_NOC, 2, 32, SM8150_MASTER_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(srvc_mnoc, SM8150_SLAVE_SERVICE_MNOC, 1, 4);
-DEFINE_QNODE(qhs_apss, SM8150_SLAVE_APPSS, 1, 8);
-DEFINE_QNODE(qns_cnoc, SM8150_SNOC_CNOC_SLV, 1, 8, SM8150_SNOC_CNOC_MAS);
-DEFINE_QNODE(qns_gemnoc_gc, SM8150_SLAVE_SNOC_GEM_NOC_GC, 1, 8, SM8150_MASTER_SNOC_GC_MEM_NOC);
-DEFINE_QNODE(qns_gemnoc_sf, SM8150_SLAVE_SNOC_GEM_NOC_SF, 1, 16, SM8150_MASTER_SNOC_SF_MEM_NOC);
-DEFINE_QNODE(qxs_imem, SM8150_SLAVE_OCIMEM, 1, 8);
-DEFINE_QNODE(qxs_pimem, SM8150_SLAVE_PIMEM, 1, 8);
-DEFINE_QNODE(srvc_snoc, SM8150_SLAVE_SERVICE_SNOC, 1, 4);
-DEFINE_QNODE(xs_pcie_0, SM8150_SLAVE_PCIE_0, 1, 8);
-DEFINE_QNODE(xs_pcie_1, SM8150_SLAVE_PCIE_1, 1, 8);
-DEFINE_QNODE(xs_qdss_stm, SM8150_SLAVE_QDSS_STM, 1, 4);
-DEFINE_QNODE(xs_sys_tcu_cfg, SM8150_SLAVE_TCU, 1, 8);
-
-DEFINE_QBCM(bcm_acv, "ACV", false, &ebi);
-DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
-DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
-DEFINE_QBCM(bcm_mm0, "MM0", true, &qns_mem_noc_hf);
-DEFINE_QBCM(bcm_mm1, "MM1", false, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qxm_camnoc_hf0, &qxm_camnoc_hf1, &qxm_mdp0, &qxm_mdp1);
-DEFINE_QBCM(bcm_sh2, "SH2", false, &qns_gem_noc_snoc);
-DEFINE_QBCM(bcm_mm2, "MM2", false, &qxm_camnoc_sf, &qns2_mem_noc);
-DEFINE_QBCM(bcm_sh3, "SH3", false, &acm_gpu_tcu, &acm_sys_tcu);
-DEFINE_QBCM(bcm_mm3, "MM3", false, &qxm_rot, &qxm_venus0, &qxm_venus1, &qxm_venus_arm9);
-DEFINE_QBCM(bcm_sh4, "SH4", false, &qnm_cmpnoc);
-DEFINE_QBCM(bcm_sh5, "SH5", false, &acm_apps);
-DEFINE_QBCM(bcm_sn0, "SN0", true, &qns_gemnoc_sf);
-DEFINE_QBCM(bcm_co0, "CO0", false, &qns_cdsp_mem_noc);
-DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
-DEFINE_QBCM(bcm_sn1, "SN1", false, &qxs_imem);
-DEFINE_QBCM(bcm_co1, "CO1", false, &qnm_npu);
-DEFINE_QBCM(bcm_cn0, "CN0", true, &qhm_spdm, &qnm_snoc, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_ahb2phy_south, &qhs_aop, &qhs_aoss, &qhs_camera_cfg, &qhs_clk_ctl, &qhs_compute_dsp, &qhs_cpr_cx, &qhs_cpr_mmcx, &qhs_cpr_mx, &qhs_crypto0_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_emac_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_npu_cfg, &qhs_pcie0_cfg, &qhs_pcie1_cfg, &qhs_phy_refgen_north, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qspi, &qhs_qupv3_east, &qhs_qupv3_north, &qhs_qupv3_south, &qhs_sdc2, &qhs_sdc4, &qhs_snoc_cfg, &qhs_spdm, &qhs_spss_cfg, &qhs_ssc_cfg, &qhs_tcsr, &qhs_tlmm_east, &qhs_tlmm_north, &qhs_tlmm_south, &qhs_tlmm_west, &qhs_tsif, &qhs_ufs_card_cfg, &qhs_ufs_mem_cfg, &qhs_usb3_0, &qhs_usb3_1, &qhs_venus_cfg, &qhs_vsense_ctrl_cfg, &qns_cnoc_a2noc, &srvc_cnoc);
-DEFINE_QBCM(bcm_qup0, "QUP0", false, &qhm_qup0, &qhm_qup1, &qhm_qup2);
-DEFINE_QBCM(bcm_sn2, "SN2", false, &qns_gemnoc_gc);
-DEFINE_QBCM(bcm_sn3, "SN3", false, &srvc_aggre1_noc, &srvc_aggre2_noc, &qns_cnoc);
-DEFINE_QBCM(bcm_sn4, "SN4", false, &qxs_pimem);
-DEFINE_QBCM(bcm_sn5, "SN5", false, &xs_qdss_stm);
-DEFINE_QBCM(bcm_sn8, "SN8", false, &xs_pcie_0, &xs_pcie_1);
-DEFINE_QBCM(bcm_sn9, "SN9", false, &qnm_aggre1_noc);
-DEFINE_QBCM(bcm_sn11, "SN11", false, &qnm_aggre2_noc);
-DEFINE_QBCM(bcm_sn12, "SN12", false, &qxm_pimem, &xm_gic);
-DEFINE_QBCM(bcm_sn14, "SN14", false, &qns_pcie_mem_noc);
-DEFINE_QBCM(bcm_sn15, "SN15", false, &qnm_gemnoc);
+static struct qcom_icc_node qhm_a1noc_cfg = {
+ .name = "qhm_a1noc_cfg",
+ .id = SM8150_MASTER_A1NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8150_SLAVE_SERVICE_A1NOC },
+};
+
+static struct qcom_icc_node qhm_qup0 = {
+ .name = "qhm_qup0",
+ .id = SM8150_MASTER_QUP_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8150_A1NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node xm_emac = {
+ .name = "xm_emac",
+ .id = SM8150_MASTER_EMAC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8150_A1NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node xm_ufs_mem = {
+ .name = "xm_ufs_mem",
+ .id = SM8150_MASTER_UFS_MEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8150_A1NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node xm_usb3_0 = {
+ .name = "xm_usb3_0",
+ .id = SM8150_MASTER_USB3,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8150_A1NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node xm_usb3_1 = {
+ .name = "xm_usb3_1",
+ .id = SM8150_MASTER_USB3_1,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8150_A1NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node qhm_a2noc_cfg = {
+ .name = "qhm_a2noc_cfg",
+ .id = SM8150_MASTER_A2NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8150_SLAVE_SERVICE_A2NOC },
+};
+
+static struct qcom_icc_node qhm_qdss_bam = {
+ .name = "qhm_qdss_bam",
+ .id = SM8150_MASTER_QDSS_BAM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8150_A2NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node qhm_qspi = {
+ .name = "qhm_qspi",
+ .id = SM8150_MASTER_QSPI,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8150_A2NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node qhm_qup1 = {
+ .name = "qhm_qup1",
+ .id = SM8150_MASTER_QUP_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8150_A2NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node qhm_qup2 = {
+ .name = "qhm_qup2",
+ .id = SM8150_MASTER_QUP_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8150_A2NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node qhm_sensorss_ahb = {
+ .name = "qhm_sensorss_ahb",
+ .id = SM8150_MASTER_SENSORS_AHB,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8150_A2NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node qhm_tsif = {
+ .name = "qhm_tsif",
+ .id = SM8150_MASTER_TSIF,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8150_A2NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node qnm_cnoc = {
+ .name = "qnm_cnoc",
+ .id = SM8150_MASTER_CNOC_A2NOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8150_A2NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node qxm_crypto = {
+ .name = "qxm_crypto",
+ .id = SM8150_MASTER_CRYPTO_CORE_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8150_A2NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node qxm_ipa = {
+ .name = "qxm_ipa",
+ .id = SM8150_MASTER_IPA,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8150_A2NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node xm_pcie3_0 = {
+ .name = "xm_pcie3_0",
+ .id = SM8150_MASTER_PCIE,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8150_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node xm_pcie3_1 = {
+ .name = "xm_pcie3_1",
+ .id = SM8150_MASTER_PCIE_1,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8150_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node xm_qdss_etr = {
+ .name = "xm_qdss_etr",
+ .id = SM8150_MASTER_QDSS_ETR,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8150_A2NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node xm_sdc2 = {
+ .name = "xm_sdc2",
+ .id = SM8150_MASTER_SDCC_2,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8150_A2NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node xm_sdc4 = {
+ .name = "xm_sdc4",
+ .id = SM8150_MASTER_SDCC_4,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8150_A2NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node qxm_camnoc_hf0_uncomp = {
+ .name = "qxm_camnoc_hf0_uncomp",
+ .id = SM8150_MASTER_CAMNOC_HF0_UNCOMP,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8150_SLAVE_CAMNOC_UNCOMP },
+};
+
+static struct qcom_icc_node qxm_camnoc_hf1_uncomp = {
+ .name = "qxm_camnoc_hf1_uncomp",
+ .id = SM8150_MASTER_CAMNOC_HF1_UNCOMP,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8150_SLAVE_CAMNOC_UNCOMP },
+};
+
+static struct qcom_icc_node qxm_camnoc_sf_uncomp = {
+ .name = "qxm_camnoc_sf_uncomp",
+ .id = SM8150_MASTER_CAMNOC_SF_UNCOMP,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8150_SLAVE_CAMNOC_UNCOMP },
+};
+
+static struct qcom_icc_node qnm_npu = {
+ .name = "qnm_npu",
+ .id = SM8150_MASTER_NPU,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8150_SLAVE_CDSP_MEM_NOC },
+};
+
+static struct qcom_icc_node qhm_spdm = {
+ .name = "qhm_spdm",
+ .id = SM8150_MASTER_SPDM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8150_SLAVE_CNOC_A2NOC },
+};
+
+static struct qcom_icc_node qnm_snoc = {
+ .name = "qnm_snoc",
+ .id = SM8150_SNOC_CNOC_MAS,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 50,
+ .links = { SM8150_SLAVE_TLMM_SOUTH,
+ SM8150_SLAVE_CDSP_CFG,
+ SM8150_SLAVE_SPSS_CFG,
+ SM8150_SLAVE_CAMERA_CFG,
+ SM8150_SLAVE_SDCC_4,
+ SM8150_SLAVE_SDCC_2,
+ SM8150_SLAVE_CNOC_MNOC_CFG,
+ SM8150_SLAVE_EMAC_CFG,
+ SM8150_SLAVE_UFS_MEM_CFG,
+ SM8150_SLAVE_TLMM_EAST,
+ SM8150_SLAVE_SSC_CFG,
+ SM8150_SLAVE_SNOC_CFG,
+ SM8150_SLAVE_NORTH_PHY_CFG,
+ SM8150_SLAVE_QUP_0,
+ SM8150_SLAVE_GLM,
+ SM8150_SLAVE_PCIE_1_CFG,
+ SM8150_SLAVE_A2NOC_CFG,
+ SM8150_SLAVE_QDSS_CFG,
+ SM8150_SLAVE_DISPLAY_CFG,
+ SM8150_SLAVE_TCSR,
+ SM8150_SLAVE_CNOC_DDRSS,
+ SM8150_SLAVE_RBCPR_MMCX_CFG,
+ SM8150_SLAVE_NPU_CFG,
+ SM8150_SLAVE_PCIE_0_CFG,
+ SM8150_SLAVE_GRAPHICS_3D_CFG,
+ SM8150_SLAVE_VENUS_CFG,
+ SM8150_SLAVE_TSIF,
+ SM8150_SLAVE_IPA_CFG,
+ SM8150_SLAVE_CLK_CTL,
+ SM8150_SLAVE_AOP,
+ SM8150_SLAVE_QUP_1,
+ SM8150_SLAVE_AHB2PHY_SOUTH,
+ SM8150_SLAVE_USB3_1,
+ SM8150_SLAVE_SERVICE_CNOC,
+ SM8150_SLAVE_UFS_CARD_CFG,
+ SM8150_SLAVE_QUP_2,
+ SM8150_SLAVE_RBCPR_CX_CFG,
+ SM8150_SLAVE_TLMM_WEST,
+ SM8150_SLAVE_A1NOC_CFG,
+ SM8150_SLAVE_AOSS,
+ SM8150_SLAVE_PRNG,
+ SM8150_SLAVE_VSENSE_CTRL_CFG,
+ SM8150_SLAVE_QSPI,
+ SM8150_SLAVE_USB3,
+ SM8150_SLAVE_SPDM_WRAPPER,
+ SM8150_SLAVE_CRYPTO_0_CFG,
+ SM8150_SLAVE_PIMEM_CFG,
+ SM8150_SLAVE_TLMM_NORTH,
+ SM8150_SLAVE_RBCPR_MX_CFG,
+ SM8150_SLAVE_IMEM_CFG
+ },
+};
+
+static struct qcom_icc_node xm_qdss_dap = {
+ .name = "xm_qdss_dap",
+ .id = SM8150_MASTER_QDSS_DAP,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 51,
+ .links = { SM8150_SLAVE_TLMM_SOUTH,
+ SM8150_SLAVE_CDSP_CFG,
+ SM8150_SLAVE_SPSS_CFG,
+ SM8150_SLAVE_CAMERA_CFG,
+ SM8150_SLAVE_SDCC_4,
+ SM8150_SLAVE_SDCC_2,
+ SM8150_SLAVE_CNOC_MNOC_CFG,
+ SM8150_SLAVE_EMAC_CFG,
+ SM8150_SLAVE_UFS_MEM_CFG,
+ SM8150_SLAVE_TLMM_EAST,
+ SM8150_SLAVE_SSC_CFG,
+ SM8150_SLAVE_SNOC_CFG,
+ SM8150_SLAVE_NORTH_PHY_CFG,
+ SM8150_SLAVE_QUP_0,
+ SM8150_SLAVE_GLM,
+ SM8150_SLAVE_PCIE_1_CFG,
+ SM8150_SLAVE_A2NOC_CFG,
+ SM8150_SLAVE_QDSS_CFG,
+ SM8150_SLAVE_DISPLAY_CFG,
+ SM8150_SLAVE_TCSR,
+ SM8150_SLAVE_CNOC_DDRSS,
+ SM8150_SLAVE_CNOC_A2NOC,
+ SM8150_SLAVE_RBCPR_MMCX_CFG,
+ SM8150_SLAVE_NPU_CFG,
+ SM8150_SLAVE_PCIE_0_CFG,
+ SM8150_SLAVE_GRAPHICS_3D_CFG,
+ SM8150_SLAVE_VENUS_CFG,
+ SM8150_SLAVE_TSIF,
+ SM8150_SLAVE_IPA_CFG,
+ SM8150_SLAVE_CLK_CTL,
+ SM8150_SLAVE_AOP,
+ SM8150_SLAVE_QUP_1,
+ SM8150_SLAVE_AHB2PHY_SOUTH,
+ SM8150_SLAVE_USB3_1,
+ SM8150_SLAVE_SERVICE_CNOC,
+ SM8150_SLAVE_UFS_CARD_CFG,
+ SM8150_SLAVE_QUP_2,
+ SM8150_SLAVE_RBCPR_CX_CFG,
+ SM8150_SLAVE_TLMM_WEST,
+ SM8150_SLAVE_A1NOC_CFG,
+ SM8150_SLAVE_AOSS,
+ SM8150_SLAVE_PRNG,
+ SM8150_SLAVE_VSENSE_CTRL_CFG,
+ SM8150_SLAVE_QSPI,
+ SM8150_SLAVE_USB3,
+ SM8150_SLAVE_SPDM_WRAPPER,
+ SM8150_SLAVE_CRYPTO_0_CFG,
+ SM8150_SLAVE_PIMEM_CFG,
+ SM8150_SLAVE_TLMM_NORTH,
+ SM8150_SLAVE_RBCPR_MX_CFG,
+ SM8150_SLAVE_IMEM_CFG
+ },
+};
+
+static struct qcom_icc_node qhm_cnoc_dc_noc = {
+ .name = "qhm_cnoc_dc_noc",
+ .id = SM8150_MASTER_CNOC_DC_NOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 2,
+ .links = { SM8150_SLAVE_GEM_NOC_CFG,
+ SM8150_SLAVE_LLCC_CFG
+ },
+};
+
+static struct qcom_icc_node acm_apps = {
+ .name = "acm_apps",
+ .id = SM8150_MASTER_AMPSS_M0,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 3,
+ .links = { SM8150_SLAVE_ECC,
+ SM8150_SLAVE_LLCC,
+ SM8150_SLAVE_GEM_NOC_SNOC
+ },
+};
+
+static struct qcom_icc_node acm_gpu_tcu = {
+ .name = "acm_gpu_tcu",
+ .id = SM8150_MASTER_GPU_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SM8150_SLAVE_LLCC,
+ SM8150_SLAVE_GEM_NOC_SNOC
+ },
+};
+
+static struct qcom_icc_node acm_sys_tcu = {
+ .name = "acm_sys_tcu",
+ .id = SM8150_MASTER_SYS_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SM8150_SLAVE_LLCC,
+ SM8150_SLAVE_GEM_NOC_SNOC
+ },
+};
+
+static struct qcom_icc_node qhm_gemnoc_cfg = {
+ .name = "qhm_gemnoc_cfg",
+ .id = SM8150_MASTER_GEM_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 2,
+ .links = { SM8150_SLAVE_SERVICE_GEM_NOC,
+ SM8150_SLAVE_MSS_PROC_MS_MPU_CFG
+ },
+};
+
+static struct qcom_icc_node qnm_cmpnoc = {
+ .name = "qnm_cmpnoc",
+ .id = SM8150_MASTER_COMPUTE_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 3,
+ .links = { SM8150_SLAVE_ECC,
+ SM8150_SLAVE_LLCC,
+ SM8150_SLAVE_GEM_NOC_SNOC
+ },
+};
+
+static struct qcom_icc_node qnm_gpu = {
+ .name = "qnm_gpu",
+ .id = SM8150_MASTER_GRAPHICS_3D,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SM8150_SLAVE_LLCC,
+ SM8150_SLAVE_GEM_NOC_SNOC
+ },
+};
+
+static struct qcom_icc_node qnm_mnoc_hf = {
+ .name = "qnm_mnoc_hf",
+ .id = SM8150_MASTER_MNOC_HF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8150_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_mnoc_sf = {
+ .name = "qnm_mnoc_sf",
+ .id = SM8150_MASTER_MNOC_SF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SM8150_SLAVE_LLCC,
+ SM8150_SLAVE_GEM_NOC_SNOC
+ },
+};
+
+static struct qcom_icc_node qnm_pcie = {
+ .name = "qnm_pcie",
+ .id = SM8150_MASTER_GEM_NOC_PCIE_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 2,
+ .links = { SM8150_SLAVE_LLCC,
+ SM8150_SLAVE_GEM_NOC_SNOC
+ },
+};
+
+static struct qcom_icc_node qnm_snoc_gc = {
+ .name = "qnm_snoc_gc",
+ .id = SM8150_MASTER_SNOC_GC_MEM_NOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8150_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_snoc_sf = {
+ .name = "qnm_snoc_sf",
+ .id = SM8150_MASTER_SNOC_SF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8150_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qxm_ecc = {
+ .name = "qxm_ecc",
+ .id = SM8150_MASTER_ECC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8150_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node llcc_mc = {
+ .name = "llcc_mc",
+ .id = SM8150_MASTER_LLCC,
+ .channels = 4,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8150_SLAVE_EBI_CH0 },
+};
+
+static struct qcom_icc_node qhm_mnoc_cfg = {
+ .name = "qhm_mnoc_cfg",
+ .id = SM8150_MASTER_CNOC_MNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8150_SLAVE_SERVICE_MNOC },
+};
+
+static struct qcom_icc_node qxm_camnoc_hf0 = {
+ .name = "qxm_camnoc_hf0",
+ .id = SM8150_MASTER_CAMNOC_HF0,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8150_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_camnoc_hf1 = {
+ .name = "qxm_camnoc_hf1",
+ .id = SM8150_MASTER_CAMNOC_HF1,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8150_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_camnoc_sf = {
+ .name = "qxm_camnoc_sf",
+ .id = SM8150_MASTER_CAMNOC_SF,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8150_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_mdp0 = {
+ .name = "qxm_mdp0",
+ .id = SM8150_MASTER_MDP_PORT0,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8150_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_mdp1 = {
+ .name = "qxm_mdp1",
+ .id = SM8150_MASTER_MDP_PORT1,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8150_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_rot = {
+ .name = "qxm_rot",
+ .id = SM8150_MASTER_ROTATOR,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8150_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_venus0 = {
+ .name = "qxm_venus0",
+ .id = SM8150_MASTER_VIDEO_P0,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8150_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_venus1 = {
+ .name = "qxm_venus1",
+ .id = SM8150_MASTER_VIDEO_P1,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8150_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_venus_arm9 = {
+ .name = "qxm_venus_arm9",
+ .id = SM8150_MASTER_VIDEO_PROC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8150_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qhm_snoc_cfg = {
+ .name = "qhm_snoc_cfg",
+ .id = SM8150_MASTER_SNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8150_SLAVE_SERVICE_SNOC },
+};
+
+static struct qcom_icc_node qnm_aggre1_noc = {
+ .name = "qnm_aggre1_noc",
+ .id = SM8150_A1NOC_SNOC_MAS,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 6,
+ .links = { SM8150_SLAVE_SNOC_GEM_NOC_SF,
+ SM8150_SLAVE_PIMEM,
+ SM8150_SLAVE_OCIMEM,
+ SM8150_SLAVE_APPSS,
+ SM8150_SNOC_CNOC_SLV,
+ SM8150_SLAVE_QDSS_STM
+ },
+};
+
+static struct qcom_icc_node qnm_aggre2_noc = {
+ .name = "qnm_aggre2_noc",
+ .id = SM8150_A2NOC_SNOC_MAS,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 9,
+ .links = { SM8150_SLAVE_SNOC_GEM_NOC_SF,
+ SM8150_SLAVE_PIMEM,
+ SM8150_SLAVE_OCIMEM,
+ SM8150_SLAVE_APPSS,
+ SM8150_SNOC_CNOC_SLV,
+ SM8150_SLAVE_PCIE_0,
+ SM8150_SLAVE_PCIE_1,
+ SM8150_SLAVE_TCU,
+ SM8150_SLAVE_QDSS_STM
+ },
+};
+
+static struct qcom_icc_node qnm_gemnoc = {
+ .name = "qnm_gemnoc",
+ .id = SM8150_MASTER_GEM_NOC_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 6,
+ .links = { SM8150_SLAVE_PIMEM,
+ SM8150_SLAVE_OCIMEM,
+ SM8150_SLAVE_APPSS,
+ SM8150_SNOC_CNOC_SLV,
+ SM8150_SLAVE_TCU,
+ SM8150_SLAVE_QDSS_STM
+ },
+};
+
+static struct qcom_icc_node qxm_pimem = {
+ .name = "qxm_pimem",
+ .id = SM8150_MASTER_PIMEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SM8150_SLAVE_SNOC_GEM_NOC_GC,
+ SM8150_SLAVE_OCIMEM
+ },
+};
+
+static struct qcom_icc_node xm_gic = {
+ .name = "xm_gic",
+ .id = SM8150_MASTER_GIC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SM8150_SLAVE_SNOC_GEM_NOC_GC,
+ SM8150_SLAVE_OCIMEM
+ },
+};
+
+static struct qcom_icc_node qns_a1noc_snoc = {
+ .name = "qns_a1noc_snoc",
+ .id = SM8150_A1NOC_SNOC_SLV,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8150_A1NOC_SNOC_MAS },
+};
+
+static struct qcom_icc_node srvc_aggre1_noc = {
+ .name = "srvc_aggre1_noc",
+ .id = SM8150_SLAVE_SERVICE_A1NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_a2noc_snoc = {
+ .name = "qns_a2noc_snoc",
+ .id = SM8150_A2NOC_SNOC_SLV,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8150_A2NOC_SNOC_MAS },
+};
+
+static struct qcom_icc_node qns_pcie_mem_noc = {
+ .name = "qns_pcie_mem_noc",
+ .id = SM8150_SLAVE_ANOC_PCIE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8150_MASTER_GEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node srvc_aggre2_noc = {
+ .name = "srvc_aggre2_noc",
+ .id = SM8150_SLAVE_SERVICE_A2NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_camnoc_uncomp = {
+ .name = "qns_camnoc_uncomp",
+ .id = SM8150_SLAVE_CAMNOC_UNCOMP,
+ .channels = 1,
+ .buswidth = 32,
+};
+
+static struct qcom_icc_node qns_cdsp_mem_noc = {
+ .name = "qns_cdsp_mem_noc",
+ .id = SM8150_SLAVE_CDSP_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8150_MASTER_COMPUTE_NOC },
+};
+
+static struct qcom_icc_node qhs_a1_noc_cfg = {
+ .name = "qhs_a1_noc_cfg",
+ .id = SM8150_SLAVE_A1NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8150_MASTER_A1NOC_CFG },
+};
+
+static struct qcom_icc_node qhs_a2_noc_cfg = {
+ .name = "qhs_a2_noc_cfg",
+ .id = SM8150_SLAVE_A2NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8150_MASTER_A2NOC_CFG },
+};
+
+static struct qcom_icc_node qhs_ahb2phy_south = {
+ .name = "qhs_ahb2phy_south",
+ .id = SM8150_SLAVE_AHB2PHY_SOUTH,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_aop = {
+ .name = "qhs_aop",
+ .id = SM8150_SLAVE_AOP,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_aoss = {
+ .name = "qhs_aoss",
+ .id = SM8150_SLAVE_AOSS,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_camera_cfg = {
+ .name = "qhs_camera_cfg",
+ .id = SM8150_SLAVE_CAMERA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_clk_ctl = {
+ .name = "qhs_clk_ctl",
+ .id = SM8150_SLAVE_CLK_CTL,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_compute_dsp = {
+ .name = "qhs_compute_dsp",
+ .id = SM8150_SLAVE_CDSP_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_cpr_cx = {
+ .name = "qhs_cpr_cx",
+ .id = SM8150_SLAVE_RBCPR_CX_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_cpr_mmcx = {
+ .name = "qhs_cpr_mmcx",
+ .id = SM8150_SLAVE_RBCPR_MMCX_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_cpr_mx = {
+ .name = "qhs_cpr_mx",
+ .id = SM8150_SLAVE_RBCPR_MX_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_crypto0_cfg = {
+ .name = "qhs_crypto0_cfg",
+ .id = SM8150_SLAVE_CRYPTO_0_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ddrss_cfg = {
+ .name = "qhs_ddrss_cfg",
+ .id = SM8150_SLAVE_CNOC_DDRSS,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8150_MASTER_CNOC_DC_NOC },
+};
+
+static struct qcom_icc_node qhs_display_cfg = {
+ .name = "qhs_display_cfg",
+ .id = SM8150_SLAVE_DISPLAY_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_emac_cfg = {
+ .name = "qhs_emac_cfg",
+ .id = SM8150_SLAVE_EMAC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_glm = {
+ .name = "qhs_glm",
+ .id = SM8150_SLAVE_GLM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_gpuss_cfg = {
+ .name = "qhs_gpuss_cfg",
+ .id = SM8150_SLAVE_GRAPHICS_3D_CFG,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qhs_imem_cfg = {
+ .name = "qhs_imem_cfg",
+ .id = SM8150_SLAVE_IMEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ipa = {
+ .name = "qhs_ipa",
+ .id = SM8150_SLAVE_IPA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_mnoc_cfg = {
+ .name = "qhs_mnoc_cfg",
+ .id = SM8150_SLAVE_CNOC_MNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8150_MASTER_CNOC_MNOC_CFG },
+};
+
+static struct qcom_icc_node qhs_npu_cfg = {
+ .name = "qhs_npu_cfg",
+ .id = SM8150_SLAVE_NPU_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie0_cfg = {
+ .name = "qhs_pcie0_cfg",
+ .id = SM8150_SLAVE_PCIE_0_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie1_cfg = {
+ .name = "qhs_pcie1_cfg",
+ .id = SM8150_SLAVE_PCIE_1_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_phy_refgen_north = {
+ .name = "qhs_phy_refgen_north",
+ .id = SM8150_SLAVE_NORTH_PHY_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pimem_cfg = {
+ .name = "qhs_pimem_cfg",
+ .id = SM8150_SLAVE_PIMEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_prng = {
+ .name = "qhs_prng",
+ .id = SM8150_SLAVE_PRNG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qdss_cfg = {
+ .name = "qhs_qdss_cfg",
+ .id = SM8150_SLAVE_QDSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qspi = {
+ .name = "qhs_qspi",
+ .id = SM8150_SLAVE_QSPI,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qupv3_east = {
+ .name = "qhs_qupv3_east",
+ .id = SM8150_SLAVE_QUP_2,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qupv3_north = {
+ .name = "qhs_qupv3_north",
+ .id = SM8150_SLAVE_QUP_1,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qupv3_south = {
+ .name = "qhs_qupv3_south",
+ .id = SM8150_SLAVE_QUP_0,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_sdc2 = {
+ .name = "qhs_sdc2",
+ .id = SM8150_SLAVE_SDCC_2,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_sdc4 = {
+ .name = "qhs_sdc4",
+ .id = SM8150_SLAVE_SDCC_4,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_snoc_cfg = {
+ .name = "qhs_snoc_cfg",
+ .id = SM8150_SLAVE_SNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8150_MASTER_SNOC_CFG },
+};
+
+static struct qcom_icc_node qhs_spdm = {
+ .name = "qhs_spdm",
+ .id = SM8150_SLAVE_SPDM_WRAPPER,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_spss_cfg = {
+ .name = "qhs_spss_cfg",
+ .id = SM8150_SLAVE_SPSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ssc_cfg = {
+ .name = "qhs_ssc_cfg",
+ .id = SM8150_SLAVE_SSC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tcsr = {
+ .name = "qhs_tcsr",
+ .id = SM8150_SLAVE_TCSR,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tlmm_east = {
+ .name = "qhs_tlmm_east",
+ .id = SM8150_SLAVE_TLMM_EAST,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tlmm_north = {
+ .name = "qhs_tlmm_north",
+ .id = SM8150_SLAVE_TLMM_NORTH,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tlmm_south = {
+ .name = "qhs_tlmm_south",
+ .id = SM8150_SLAVE_TLMM_SOUTH,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tlmm_west = {
+ .name = "qhs_tlmm_west",
+ .id = SM8150_SLAVE_TLMM_WEST,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tsif = {
+ .name = "qhs_tsif",
+ .id = SM8150_SLAVE_TSIF,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ufs_card_cfg = {
+ .name = "qhs_ufs_card_cfg",
+ .id = SM8150_SLAVE_UFS_CARD_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ufs_mem_cfg = {
+ .name = "qhs_ufs_mem_cfg",
+ .id = SM8150_SLAVE_UFS_MEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb3_0 = {
+ .name = "qhs_usb3_0",
+ .id = SM8150_SLAVE_USB3,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb3_1 = {
+ .name = "qhs_usb3_1",
+ .id = SM8150_SLAVE_USB3_1,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_venus_cfg = {
+ .name = "qhs_venus_cfg",
+ .id = SM8150_SLAVE_VENUS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
+ .name = "qhs_vsense_ctrl_cfg",
+ .id = SM8150_SLAVE_VSENSE_CTRL_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_cnoc_a2noc = {
+ .name = "qns_cnoc_a2noc",
+ .id = SM8150_SLAVE_CNOC_A2NOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8150_MASTER_CNOC_A2NOC },
+};
+
+static struct qcom_icc_node srvc_cnoc = {
+ .name = "srvc_cnoc",
+ .id = SM8150_SLAVE_SERVICE_CNOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_llcc = {
+ .name = "qhs_llcc",
+ .id = SM8150_SLAVE_LLCC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_memnoc = {
+ .name = "qhs_memnoc",
+ .id = SM8150_SLAVE_GEM_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8150_MASTER_GEM_NOC_CFG },
+};
+
+static struct qcom_icc_node qhs_mdsp_ms_mpu_cfg = {
+ .name = "qhs_mdsp_ms_mpu_cfg",
+ .id = SM8150_SLAVE_MSS_PROC_MS_MPU_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_ecc = {
+ .name = "qns_ecc",
+ .id = SM8150_SLAVE_ECC,
+ .channels = 1,
+ .buswidth = 32,
+};
+
+static struct qcom_icc_node qns_gem_noc_snoc = {
+ .name = "qns_gem_noc_snoc",
+ .id = SM8150_SLAVE_GEM_NOC_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8150_MASTER_GEM_NOC_SNOC },
+};
+
+static struct qcom_icc_node qns_llcc = {
+ .name = "qns_llcc",
+ .id = SM8150_SLAVE_LLCC,
+ .channels = 4,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8150_MASTER_LLCC },
+};
+
+static struct qcom_icc_node srvc_gemnoc = {
+ .name = "srvc_gemnoc",
+ .id = SM8150_SLAVE_SERVICE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node ebi = {
+ .name = "ebi",
+ .id = SM8150_SLAVE_EBI_CH0,
+ .channels = 4,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns2_mem_noc = {
+ .name = "qns2_mem_noc",
+ .id = SM8150_SLAVE_MNOC_SF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8150_MASTER_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_mem_noc_hf = {
+ .name = "qns_mem_noc_hf",
+ .id = SM8150_SLAVE_MNOC_HF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8150_MASTER_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node srvc_mnoc = {
+ .name = "srvc_mnoc",
+ .id = SM8150_SLAVE_SERVICE_MNOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_apss = {
+ .name = "qhs_apss",
+ .id = SM8150_SLAVE_APPSS,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qns_cnoc = {
+ .name = "qns_cnoc",
+ .id = SM8150_SNOC_CNOC_SLV,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8150_SNOC_CNOC_MAS },
+};
+
+static struct qcom_icc_node qns_gemnoc_gc = {
+ .name = "qns_gemnoc_gc",
+ .id = SM8150_SLAVE_SNOC_GEM_NOC_GC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8150_MASTER_SNOC_GC_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_gemnoc_sf = {
+ .name = "qns_gemnoc_sf",
+ .id = SM8150_SLAVE_SNOC_GEM_NOC_SF,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8150_MASTER_SNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxs_imem = {
+ .name = "qxs_imem",
+ .id = SM8150_SLAVE_OCIMEM,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qxs_pimem = {
+ .name = "qxs_pimem",
+ .id = SM8150_SLAVE_PIMEM,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node srvc_snoc = {
+ .name = "srvc_snoc",
+ .id = SM8150_SLAVE_SERVICE_SNOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node xs_pcie_0 = {
+ .name = "xs_pcie_0",
+ .id = SM8150_SLAVE_PCIE_0,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node xs_pcie_1 = {
+ .name = "xs_pcie_1",
+ .id = SM8150_SLAVE_PCIE_1,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node xs_qdss_stm = {
+ .name = "xs_qdss_stm",
+ .id = SM8150_SLAVE_QDSS_STM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node xs_sys_tcu_cfg = {
+ .name = "xs_sys_tcu_cfg",
+ .id = SM8150_SLAVE_TCU,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_mc0 = {
+ .name = "MC0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_sh0 = {
+ .name = "SH0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_llcc },
+};
+
+static struct qcom_icc_bcm bcm_mm0 = {
+ .name = "MM0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_mem_noc_hf },
+};
+
+static struct qcom_icc_bcm bcm_mm1 = {
+ .name = "MM1",
+ .keepalive = false,
+ .num_nodes = 7,
+ .nodes = { &qxm_camnoc_hf0_uncomp,
+ &qxm_camnoc_hf1_uncomp,
+ &qxm_camnoc_sf_uncomp,
+ &qxm_camnoc_hf0,
+ &qxm_camnoc_hf1,
+ &qxm_mdp0,
+ &qxm_mdp1
+ },
+};
+
+static struct qcom_icc_bcm bcm_sh2 = {
+ .name = "SH2",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qns_gem_noc_snoc },
+};
+
+static struct qcom_icc_bcm bcm_mm2 = {
+ .name = "MM2",
+ .keepalive = false,
+ .num_nodes = 2,
+ .nodes = { &qxm_camnoc_sf, &qns2_mem_noc },
+};
+
+static struct qcom_icc_bcm bcm_sh3 = {
+ .name = "SH3",
+ .keepalive = false,
+ .num_nodes = 2,
+ .nodes = { &acm_gpu_tcu, &acm_sys_tcu },
+};
+
+static struct qcom_icc_bcm bcm_mm3 = {
+ .name = "MM3",
+ .keepalive = false,
+ .num_nodes = 4,
+ .nodes = { &qxm_rot, &qxm_venus0, &qxm_venus1, &qxm_venus_arm9 },
+};
+
+static struct qcom_icc_bcm bcm_sh4 = {
+ .name = "SH4",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qnm_cmpnoc },
+};
+
+static struct qcom_icc_bcm bcm_sh5 = {
+ .name = "SH5",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &acm_apps },
+};
+
+static struct qcom_icc_bcm bcm_sn0 = {
+ .name = "SN0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_bcm bcm_co0 = {
+ .name = "CO0",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qns_cdsp_mem_noc },
+};
+
+static struct qcom_icc_bcm bcm_ce0 = {
+ .name = "CE0",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qxm_crypto },
+};
+
+static struct qcom_icc_bcm bcm_sn1 = {
+ .name = "SN1",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qxs_imem },
+};
+
+static struct qcom_icc_bcm bcm_co1 = {
+ .name = "CO1",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qnm_npu },
+};
+
+static struct qcom_icc_bcm bcm_cn0 = {
+ .name = "CN0",
+ .keepalive = true,
+ .num_nodes = 53,
+ .nodes = { &qhm_spdm,
+ &qnm_snoc,
+ &qhs_a1_noc_cfg,
+ &qhs_a2_noc_cfg,
+ &qhs_ahb2phy_south,
+ &qhs_aop,
+ &qhs_aoss,
+ &qhs_camera_cfg,
+ &qhs_clk_ctl,
+ &qhs_compute_dsp,
+ &qhs_cpr_cx,
+ &qhs_cpr_mmcx,
+ &qhs_cpr_mx,
+ &qhs_crypto0_cfg,
+ &qhs_ddrss_cfg,
+ &qhs_display_cfg,
+ &qhs_emac_cfg,
+ &qhs_glm,
+ &qhs_gpuss_cfg,
+ &qhs_imem_cfg,
+ &qhs_ipa,
+ &qhs_mnoc_cfg,
+ &qhs_npu_cfg,
+ &qhs_pcie0_cfg,
+ &qhs_pcie1_cfg,
+ &qhs_phy_refgen_north,
+ &qhs_pimem_cfg,
+ &qhs_prng,
+ &qhs_qdss_cfg,
+ &qhs_qspi,
+ &qhs_qupv3_east,
+ &qhs_qupv3_north,
+ &qhs_qupv3_south,
+ &qhs_sdc2,
+ &qhs_sdc4,
+ &qhs_snoc_cfg,
+ &qhs_spdm,
+ &qhs_spss_cfg,
+ &qhs_ssc_cfg,
+ &qhs_tcsr,
+ &qhs_tlmm_east,
+ &qhs_tlmm_north,
+ &qhs_tlmm_south,
+ &qhs_tlmm_west,
+ &qhs_tsif,
+ &qhs_ufs_card_cfg,
+ &qhs_ufs_mem_cfg,
+ &qhs_usb3_0,
+ &qhs_usb3_1,
+ &qhs_venus_cfg,
+ &qhs_vsense_ctrl_cfg,
+ &qns_cnoc_a2noc,
+ &srvc_cnoc
+ },
+};
+
+static struct qcom_icc_bcm bcm_qup0 = {
+ .name = "QUP0",
+ .keepalive = false,
+ .num_nodes = 3,
+ .nodes = { &qhm_qup0, &qhm_qup1, &qhm_qup2 },
+};
+
+static struct qcom_icc_bcm bcm_sn2 = {
+ .name = "SN2",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qns_gemnoc_gc },
+};
+
+static struct qcom_icc_bcm bcm_sn3 = {
+ .name = "SN3",
+ .keepalive = false,
+ .num_nodes = 3,
+ .nodes = { &srvc_aggre1_noc, &srvc_aggre2_noc, &qns_cnoc },
+};
+
+static struct qcom_icc_bcm bcm_sn4 = {
+ .name = "SN4",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qxs_pimem },
+};
+
+static struct qcom_icc_bcm bcm_sn5 = {
+ .name = "SN5",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &xs_qdss_stm },
+};
+
+static struct qcom_icc_bcm bcm_sn8 = {
+ .name = "SN8",
+ .keepalive = false,
+ .num_nodes = 2,
+ .nodes = { &xs_pcie_0, &xs_pcie_1 },
+};
+
+static struct qcom_icc_bcm bcm_sn9 = {
+ .name = "SN9",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qnm_aggre1_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn11 = {
+ .name = "SN11",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qnm_aggre2_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn12 = {
+ .name = "SN12",
+ .keepalive = false,
+ .num_nodes = 2,
+ .nodes = { &qxm_pimem, &xm_gic },
+};
+
+static struct qcom_icc_bcm bcm_sn14 = {
+ .name = "SN14",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qns_pcie_mem_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn15 = {
+ .name = "SN15",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qnm_gemnoc },
+};
static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
&bcm_qup0,
diff --git a/drivers/interconnect/qcom/sm8250.c b/drivers/interconnect/qcom/sm8250.c
index e3bb008cb219..d4a4ecef11f0 100644
--- a/drivers/interconnect/qcom/sm8250.c
+++ b/drivers/interconnect/qcom/sm8250.c
@@ -7,193 +7,1636 @@
#include <linux/device.h>
#include <linux/interconnect.h>
#include <linux/interconnect-provider.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <dt-bindings/interconnect/qcom,sm8250.h>
#include "bcm-voter.h"
#include "icc-rpmh.h"
#include "sm8250.h"
-DEFINE_QNODE(qhm_a1noc_cfg, SM8250_MASTER_A1NOC_CFG, 1, 4, SM8250_SLAVE_SERVICE_A1NOC);
-DEFINE_QNODE(qhm_qspi, SM8250_MASTER_QSPI_0, 1, 4, SM8250_A1NOC_SNOC_SLV);
-DEFINE_QNODE(qhm_qup1, SM8250_MASTER_QUP_1, 1, 4, SM8250_A1NOC_SNOC_SLV);
-DEFINE_QNODE(qhm_qup2, SM8250_MASTER_QUP_2, 1, 4, SM8250_A1NOC_SNOC_SLV);
-DEFINE_QNODE(qhm_tsif, SM8250_MASTER_TSIF, 1, 4, SM8250_A1NOC_SNOC_SLV);
-DEFINE_QNODE(xm_pcie3_modem, SM8250_MASTER_PCIE_2, 1, 8, SM8250_SLAVE_ANOC_PCIE_GEM_NOC_1);
-DEFINE_QNODE(xm_sdc4, SM8250_MASTER_SDCC_4, 1, 8, SM8250_A1NOC_SNOC_SLV);
-DEFINE_QNODE(xm_ufs_mem, SM8250_MASTER_UFS_MEM, 1, 8, SM8250_A1NOC_SNOC_SLV);
-DEFINE_QNODE(xm_usb3_0, SM8250_MASTER_USB3, 1, 8, SM8250_A1NOC_SNOC_SLV);
-DEFINE_QNODE(xm_usb3_1, SM8250_MASTER_USB3_1, 1, 8, SM8250_A1NOC_SNOC_SLV);
-DEFINE_QNODE(qhm_a2noc_cfg, SM8250_MASTER_A2NOC_CFG, 1, 4, SM8250_SLAVE_SERVICE_A2NOC);
-DEFINE_QNODE(qhm_qdss_bam, SM8250_MASTER_QDSS_BAM, 1, 4, SM8250_A2NOC_SNOC_SLV);
-DEFINE_QNODE(qhm_qup0, SM8250_MASTER_QUP_0, 1, 4, SM8250_A2NOC_SNOC_SLV);
-DEFINE_QNODE(qnm_cnoc, SM8250_MASTER_CNOC_A2NOC, 1, 8, SM8250_A2NOC_SNOC_SLV);
-DEFINE_QNODE(qxm_crypto, SM8250_MASTER_CRYPTO_CORE_0, 1, 8, SM8250_A2NOC_SNOC_SLV);
-DEFINE_QNODE(qxm_ipa, SM8250_MASTER_IPA, 1, 8, SM8250_A2NOC_SNOC_SLV);
-DEFINE_QNODE(xm_pcie3_0, SM8250_MASTER_PCIE, 1, 8, SM8250_SLAVE_ANOC_PCIE_GEM_NOC);
-DEFINE_QNODE(xm_pcie3_1, SM8250_MASTER_PCIE_1, 1, 8, SM8250_SLAVE_ANOC_PCIE_GEM_NOC);
-DEFINE_QNODE(xm_qdss_etr, SM8250_MASTER_QDSS_ETR, 1, 8, SM8250_A2NOC_SNOC_SLV);
-DEFINE_QNODE(xm_sdc2, SM8250_MASTER_SDCC_2, 1, 8, SM8250_A2NOC_SNOC_SLV);
-DEFINE_QNODE(xm_ufs_card, SM8250_MASTER_UFS_CARD, 1, 8, SM8250_A2NOC_SNOC_SLV);
-DEFINE_QNODE(qnm_npu, SM8250_MASTER_NPU, 2, 32, SM8250_SLAVE_CDSP_MEM_NOC);
-DEFINE_QNODE(qnm_snoc, SM8250_SNOC_CNOC_MAS, 1, 8, SM8250_SLAVE_CDSP_CFG, SM8250_SLAVE_CAMERA_CFG, SM8250_SLAVE_TLMM_SOUTH, SM8250_SLAVE_TLMM_NORTH, SM8250_SLAVE_SDCC_4, SM8250_SLAVE_TLMM_WEST, SM8250_SLAVE_SDCC_2, SM8250_SLAVE_CNOC_MNOC_CFG, SM8250_SLAVE_UFS_MEM_CFG, SM8250_SLAVE_SNOC_CFG, SM8250_SLAVE_PDM, SM8250_SLAVE_CX_RDPM, SM8250_SLAVE_PCIE_1_CFG, SM8250_SLAVE_A2NOC_CFG, SM8250_SLAVE_QDSS_CFG, SM8250_SLAVE_DISPLAY_CFG, SM8250_SLAVE_PCIE_2_CFG, SM8250_SLAVE_TCSR, SM8250_SLAVE_DCC_CFG, SM8250_SLAVE_CNOC_DDRSS, SM8250_SLAVE_IPC_ROUTER_CFG, SM8250_SLAVE_PCIE_0_CFG, SM8250_SLAVE_RBCPR_MMCX_CFG, SM8250_SLAVE_NPU_CFG, SM8250_SLAVE_AHB2PHY_SOUTH, SM8250_SLAVE_AHB2PHY_NORTH, SM8250_SLAVE_GRAPHICS_3D_CFG, SM8250_SLAVE_VENUS_CFG, SM8250_SLAVE_TSIF, SM8250_SLAVE_IPA_CFG, SM8250_SLAVE_IMEM_CFG, SM8250_SLAVE_USB3, SM8250_SLAVE_SERVICE_CNOC, SM8250_SLAVE_UFS_CARD_CFG, SM8250_SLAVE_USB3_1, SM8250_SLAVE_LPASS, SM8250_SLAVE_RBCPR_CX_CFG, SM8250_SLAVE_A1NOC_CFG, SM8250_SLAVE_AOSS, SM8250_SLAVE_PRNG, SM8250_SLAVE_VSENSE_CTRL_CFG, SM8250_SLAVE_QSPI_0, SM8250_SLAVE_CRYPTO_0_CFG, SM8250_SLAVE_PIMEM_CFG, SM8250_SLAVE_RBCPR_MX_CFG, SM8250_SLAVE_QUP_0, SM8250_SLAVE_QUP_1, SM8250_SLAVE_QUP_2, SM8250_SLAVE_CLK_CTL);
-DEFINE_QNODE(xm_qdss_dap, SM8250_MASTER_QDSS_DAP, 1, 8, SM8250_SLAVE_CDSP_CFG, SM8250_SLAVE_CAMERA_CFG, SM8250_SLAVE_TLMM_SOUTH, SM8250_SLAVE_TLMM_NORTH, SM8250_SLAVE_SDCC_4, SM8250_SLAVE_TLMM_WEST, SM8250_SLAVE_SDCC_2, SM8250_SLAVE_CNOC_MNOC_CFG, SM8250_SLAVE_UFS_MEM_CFG, SM8250_SLAVE_SNOC_CFG, SM8250_SLAVE_PDM, SM8250_SLAVE_CX_RDPM, SM8250_SLAVE_PCIE_1_CFG, SM8250_SLAVE_A2NOC_CFG, SM8250_SLAVE_QDSS_CFG, SM8250_SLAVE_DISPLAY_CFG, SM8250_SLAVE_PCIE_2_CFG, SM8250_SLAVE_TCSR, SM8250_SLAVE_DCC_CFG, SM8250_SLAVE_CNOC_DDRSS, SM8250_SLAVE_IPC_ROUTER_CFG, SM8250_SLAVE_CNOC_A2NOC, SM8250_SLAVE_PCIE_0_CFG, SM8250_SLAVE_RBCPR_MMCX_CFG, SM8250_SLAVE_NPU_CFG, SM8250_SLAVE_AHB2PHY_SOUTH, SM8250_SLAVE_AHB2PHY_NORTH, SM8250_SLAVE_GRAPHICS_3D_CFG, SM8250_SLAVE_VENUS_CFG, SM8250_SLAVE_TSIF, SM8250_SLAVE_IPA_CFG, SM8250_SLAVE_IMEM_CFG, SM8250_SLAVE_USB3, SM8250_SLAVE_SERVICE_CNOC, SM8250_SLAVE_UFS_CARD_CFG, SM8250_SLAVE_USB3_1, SM8250_SLAVE_LPASS, SM8250_SLAVE_RBCPR_CX_CFG, SM8250_SLAVE_A1NOC_CFG, SM8250_SLAVE_AOSS, SM8250_SLAVE_PRNG, SM8250_SLAVE_VSENSE_CTRL_CFG, SM8250_SLAVE_QSPI_0, SM8250_SLAVE_CRYPTO_0_CFG, SM8250_SLAVE_PIMEM_CFG, SM8250_SLAVE_RBCPR_MX_CFG, SM8250_SLAVE_QUP_0, SM8250_SLAVE_QUP_1, SM8250_SLAVE_QUP_2, SM8250_SLAVE_CLK_CTL);
-DEFINE_QNODE(qhm_cnoc_dc_noc, SM8250_MASTER_CNOC_DC_NOC, 1, 4, SM8250_SLAVE_GEM_NOC_CFG, SM8250_SLAVE_LLCC_CFG);
-DEFINE_QNODE(alm_gpu_tcu, SM8250_MASTER_GPU_TCU, 1, 8, SM8250_SLAVE_LLCC, SM8250_SLAVE_GEM_NOC_SNOC);
-DEFINE_QNODE(alm_sys_tcu, SM8250_MASTER_SYS_TCU, 1, 8, SM8250_SLAVE_LLCC, SM8250_SLAVE_GEM_NOC_SNOC);
-DEFINE_QNODE(chm_apps, SM8250_MASTER_AMPSS_M0, 2, 32, SM8250_SLAVE_LLCC, SM8250_SLAVE_GEM_NOC_SNOC, SM8250_SLAVE_MEM_NOC_PCIE_SNOC);
-DEFINE_QNODE(qhm_gemnoc_cfg, SM8250_MASTER_GEM_NOC_CFG, 1, 4, SM8250_SLAVE_SERVICE_GEM_NOC_2, SM8250_SLAVE_SERVICE_GEM_NOC_1, SM8250_SLAVE_SERVICE_GEM_NOC);
-DEFINE_QNODE(qnm_cmpnoc, SM8250_MASTER_COMPUTE_NOC, 2, 32, SM8250_SLAVE_LLCC, SM8250_SLAVE_GEM_NOC_SNOC);
-DEFINE_QNODE(qnm_gpu, SM8250_MASTER_GRAPHICS_3D, 2, 32, SM8250_SLAVE_LLCC, SM8250_SLAVE_GEM_NOC_SNOC);
-DEFINE_QNODE(qnm_mnoc_hf, SM8250_MASTER_MNOC_HF_MEM_NOC, 2, 32, SM8250_SLAVE_LLCC);
-DEFINE_QNODE(qnm_mnoc_sf, SM8250_MASTER_MNOC_SF_MEM_NOC, 2, 32, SM8250_SLAVE_LLCC, SM8250_SLAVE_GEM_NOC_SNOC);
-DEFINE_QNODE(qnm_pcie, SM8250_MASTER_ANOC_PCIE_GEM_NOC, 1, 16, SM8250_SLAVE_LLCC, SM8250_SLAVE_GEM_NOC_SNOC);
-DEFINE_QNODE(qnm_snoc_gc, SM8250_MASTER_SNOC_GC_MEM_NOC, 1, 8, SM8250_SLAVE_LLCC);
-DEFINE_QNODE(qnm_snoc_sf, SM8250_MASTER_SNOC_SF_MEM_NOC, 1, 16, SM8250_SLAVE_LLCC, SM8250_SLAVE_GEM_NOC_SNOC, SM8250_SLAVE_MEM_NOC_PCIE_SNOC);
-DEFINE_QNODE(llcc_mc, SM8250_MASTER_LLCC, 4, 4, SM8250_SLAVE_EBI_CH0);
-DEFINE_QNODE(qhm_mnoc_cfg, SM8250_MASTER_CNOC_MNOC_CFG, 1, 4, SM8250_SLAVE_SERVICE_MNOC);
-DEFINE_QNODE(qnm_camnoc_hf, SM8250_MASTER_CAMNOC_HF, 2, 32, SM8250_SLAVE_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(qnm_camnoc_icp, SM8250_MASTER_CAMNOC_ICP, 1, 8, SM8250_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qnm_camnoc_sf, SM8250_MASTER_CAMNOC_SF, 2, 32, SM8250_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qnm_video0, SM8250_MASTER_VIDEO_P0, 1, 32, SM8250_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qnm_video1, SM8250_MASTER_VIDEO_P1, 1, 32, SM8250_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qnm_video_cvp, SM8250_MASTER_VIDEO_PROC, 1, 32, SM8250_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qxm_mdp0, SM8250_MASTER_MDP_PORT0, 1, 32, SM8250_SLAVE_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(qxm_mdp1, SM8250_MASTER_MDP_PORT1, 1, 32, SM8250_SLAVE_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(qxm_rot, SM8250_MASTER_ROTATOR, 1, 32, SM8250_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(amm_npu_sys, SM8250_MASTER_NPU_SYS, 4, 32, SM8250_SLAVE_NPU_COMPUTE_NOC);
-DEFINE_QNODE(amm_npu_sys_cdp_w, SM8250_MASTER_NPU_CDP, 2, 16, SM8250_SLAVE_NPU_COMPUTE_NOC);
-DEFINE_QNODE(qhm_cfg, SM8250_MASTER_NPU_NOC_CFG, 1, 4, SM8250_SLAVE_SERVICE_NPU_NOC, SM8250_SLAVE_ISENSE_CFG, SM8250_SLAVE_NPU_LLM_CFG, SM8250_SLAVE_NPU_INT_DMA_BWMON_CFG, SM8250_SLAVE_NPU_CP, SM8250_SLAVE_NPU_TCM, SM8250_SLAVE_NPU_CAL_DP0, SM8250_SLAVE_NPU_CAL_DP1, SM8250_SLAVE_NPU_DPM);
-DEFINE_QNODE(qhm_snoc_cfg, SM8250_MASTER_SNOC_CFG, 1, 4, SM8250_SLAVE_SERVICE_SNOC);
-DEFINE_QNODE(qnm_aggre1_noc, SM8250_A1NOC_SNOC_MAS, 1, 16, SM8250_SLAVE_SNOC_GEM_NOC_SF);
-DEFINE_QNODE(qnm_aggre2_noc, SM8250_A2NOC_SNOC_MAS, 1, 16, SM8250_SLAVE_SNOC_GEM_NOC_SF);
-DEFINE_QNODE(qnm_gemnoc, SM8250_MASTER_GEM_NOC_SNOC, 1, 16, SM8250_SLAVE_PIMEM, SM8250_SLAVE_OCIMEM, SM8250_SLAVE_APPSS, SM8250_SNOC_CNOC_SLV, SM8250_SLAVE_TCU, SM8250_SLAVE_QDSS_STM);
-DEFINE_QNODE(qnm_gemnoc_pcie, SM8250_MASTER_GEM_NOC_PCIE_SNOC, 1, 8, SM8250_SLAVE_PCIE_2, SM8250_SLAVE_PCIE_0, SM8250_SLAVE_PCIE_1);
-DEFINE_QNODE(qxm_pimem, SM8250_MASTER_PIMEM, 1, 8, SM8250_SLAVE_SNOC_GEM_NOC_GC);
-DEFINE_QNODE(xm_gic, SM8250_MASTER_GIC, 1, 8, SM8250_SLAVE_SNOC_GEM_NOC_GC);
-DEFINE_QNODE(qns_a1noc_snoc, SM8250_A1NOC_SNOC_SLV, 1, 16, SM8250_A1NOC_SNOC_MAS);
-DEFINE_QNODE(qns_pcie_modem_mem_noc, SM8250_SLAVE_ANOC_PCIE_GEM_NOC_1, 1, 16, SM8250_MASTER_ANOC_PCIE_GEM_NOC);
-DEFINE_QNODE(srvc_aggre1_noc, SM8250_SLAVE_SERVICE_A1NOC, 1, 4);
-DEFINE_QNODE(qns_a2noc_snoc, SM8250_A2NOC_SNOC_SLV, 1, 16, SM8250_A2NOC_SNOC_MAS);
-DEFINE_QNODE(qns_pcie_mem_noc, SM8250_SLAVE_ANOC_PCIE_GEM_NOC, 1, 16, SM8250_MASTER_ANOC_PCIE_GEM_NOC);
-DEFINE_QNODE(srvc_aggre2_noc, SM8250_SLAVE_SERVICE_A2NOC, 1, 4);
-DEFINE_QNODE(qns_cdsp_mem_noc, SM8250_SLAVE_CDSP_MEM_NOC, 2, 32, SM8250_MASTER_COMPUTE_NOC);
-DEFINE_QNODE(qhs_a1_noc_cfg, SM8250_SLAVE_A1NOC_CFG, 1, 4, SM8250_MASTER_A1NOC_CFG);
-DEFINE_QNODE(qhs_a2_noc_cfg, SM8250_SLAVE_A2NOC_CFG, 1, 4, SM8250_MASTER_A2NOC_CFG);
-DEFINE_QNODE(qhs_ahb2phy0, SM8250_SLAVE_AHB2PHY_SOUTH, 1, 4);
-DEFINE_QNODE(qhs_ahb2phy1, SM8250_SLAVE_AHB2PHY_NORTH, 1, 4);
-DEFINE_QNODE(qhs_aoss, SM8250_SLAVE_AOSS, 1, 4);
-DEFINE_QNODE(qhs_camera_cfg, SM8250_SLAVE_CAMERA_CFG, 1, 4);
-DEFINE_QNODE(qhs_clk_ctl, SM8250_SLAVE_CLK_CTL, 1, 4);
-DEFINE_QNODE(qhs_compute_dsp, SM8250_SLAVE_CDSP_CFG, 1, 4);
-DEFINE_QNODE(qhs_cpr_cx, SM8250_SLAVE_RBCPR_CX_CFG, 1, 4);
-DEFINE_QNODE(qhs_cpr_mmcx, SM8250_SLAVE_RBCPR_MMCX_CFG, 1, 4);
-DEFINE_QNODE(qhs_cpr_mx, SM8250_SLAVE_RBCPR_MX_CFG, 1, 4);
-DEFINE_QNODE(qhs_crypto0_cfg, SM8250_SLAVE_CRYPTO_0_CFG, 1, 4);
-DEFINE_QNODE(qhs_cx_rdpm, SM8250_SLAVE_CX_RDPM, 1, 4);
-DEFINE_QNODE(qhs_dcc_cfg, SM8250_SLAVE_DCC_CFG, 1, 4);
-DEFINE_QNODE(qhs_ddrss_cfg, SM8250_SLAVE_CNOC_DDRSS, 1, 4, SM8250_MASTER_CNOC_DC_NOC);
-DEFINE_QNODE(qhs_display_cfg, SM8250_SLAVE_DISPLAY_CFG, 1, 4);
-DEFINE_QNODE(qhs_gpuss_cfg, SM8250_SLAVE_GRAPHICS_3D_CFG, 1, 8);
-DEFINE_QNODE(qhs_imem_cfg, SM8250_SLAVE_IMEM_CFG, 1, 4);
-DEFINE_QNODE(qhs_ipa, SM8250_SLAVE_IPA_CFG, 1, 4);
-DEFINE_QNODE(qhs_ipc_router, SM8250_SLAVE_IPC_ROUTER_CFG, 1, 4);
-DEFINE_QNODE(qhs_lpass_cfg, SM8250_SLAVE_LPASS, 1, 4);
-DEFINE_QNODE(qhs_mnoc_cfg, SM8250_SLAVE_CNOC_MNOC_CFG, 1, 4, SM8250_MASTER_CNOC_MNOC_CFG);
-DEFINE_QNODE(qhs_npu_cfg, SM8250_SLAVE_NPU_CFG, 1, 4, SM8250_MASTER_NPU_NOC_CFG);
-DEFINE_QNODE(qhs_pcie0_cfg, SM8250_SLAVE_PCIE_0_CFG, 1, 4);
-DEFINE_QNODE(qhs_pcie1_cfg, SM8250_SLAVE_PCIE_1_CFG, 1, 4);
-DEFINE_QNODE(qhs_pcie_modem_cfg, SM8250_SLAVE_PCIE_2_CFG, 1, 4);
-DEFINE_QNODE(qhs_pdm, SM8250_SLAVE_PDM, 1, 4);
-DEFINE_QNODE(qhs_pimem_cfg, SM8250_SLAVE_PIMEM_CFG, 1, 4);
-DEFINE_QNODE(qhs_prng, SM8250_SLAVE_PRNG, 1, 4);
-DEFINE_QNODE(qhs_qdss_cfg, SM8250_SLAVE_QDSS_CFG, 1, 4);
-DEFINE_QNODE(qhs_qspi, SM8250_SLAVE_QSPI_0, 1, 4);
-DEFINE_QNODE(qhs_qup0, SM8250_SLAVE_QUP_0, 1, 4);
-DEFINE_QNODE(qhs_qup1, SM8250_SLAVE_QUP_1, 1, 4);
-DEFINE_QNODE(qhs_qup2, SM8250_SLAVE_QUP_2, 1, 4);
-DEFINE_QNODE(qhs_sdc2, SM8250_SLAVE_SDCC_2, 1, 4);
-DEFINE_QNODE(qhs_sdc4, SM8250_SLAVE_SDCC_4, 1, 4);
-DEFINE_QNODE(qhs_snoc_cfg, SM8250_SLAVE_SNOC_CFG, 1, 4, SM8250_MASTER_SNOC_CFG);
-DEFINE_QNODE(qhs_tcsr, SM8250_SLAVE_TCSR, 1, 4);
-DEFINE_QNODE(qhs_tlmm0, SM8250_SLAVE_TLMM_NORTH, 1, 4);
-DEFINE_QNODE(qhs_tlmm1, SM8250_SLAVE_TLMM_SOUTH, 1, 4);
-DEFINE_QNODE(qhs_tlmm2, SM8250_SLAVE_TLMM_WEST, 1, 4);
-DEFINE_QNODE(qhs_tsif, SM8250_SLAVE_TSIF, 1, 4);
-DEFINE_QNODE(qhs_ufs_card_cfg, SM8250_SLAVE_UFS_CARD_CFG, 1, 4);
-DEFINE_QNODE(qhs_ufs_mem_cfg, SM8250_SLAVE_UFS_MEM_CFG, 1, 4);
-DEFINE_QNODE(qhs_usb3_0, SM8250_SLAVE_USB3, 1, 4);
-DEFINE_QNODE(qhs_usb3_1, SM8250_SLAVE_USB3_1, 1, 4);
-DEFINE_QNODE(qhs_venus_cfg, SM8250_SLAVE_VENUS_CFG, 1, 4);
-DEFINE_QNODE(qhs_vsense_ctrl_cfg, SM8250_SLAVE_VSENSE_CTRL_CFG, 1, 4);
-DEFINE_QNODE(qns_cnoc_a2noc, SM8250_SLAVE_CNOC_A2NOC, 1, 8, SM8250_MASTER_CNOC_A2NOC);
-DEFINE_QNODE(srvc_cnoc, SM8250_SLAVE_SERVICE_CNOC, 1, 4);
-DEFINE_QNODE(qhs_llcc, SM8250_SLAVE_LLCC_CFG, 1, 4);
-DEFINE_QNODE(qhs_memnoc, SM8250_SLAVE_GEM_NOC_CFG, 1, 4, SM8250_MASTER_GEM_NOC_CFG);
-DEFINE_QNODE(qns_gem_noc_snoc, SM8250_SLAVE_GEM_NOC_SNOC, 1, 16, SM8250_MASTER_GEM_NOC_SNOC);
-DEFINE_QNODE(qns_llcc, SM8250_SLAVE_LLCC, 4, 16, SM8250_MASTER_LLCC);
-DEFINE_QNODE(qns_sys_pcie, SM8250_SLAVE_MEM_NOC_PCIE_SNOC, 1, 8, SM8250_MASTER_GEM_NOC_PCIE_SNOC);
-DEFINE_QNODE(srvc_even_gemnoc, SM8250_SLAVE_SERVICE_GEM_NOC_1, 1, 4);
-DEFINE_QNODE(srvc_odd_gemnoc, SM8250_SLAVE_SERVICE_GEM_NOC_2, 1, 4);
-DEFINE_QNODE(srvc_sys_gemnoc, SM8250_SLAVE_SERVICE_GEM_NOC, 1, 4);
-DEFINE_QNODE(ebi, SM8250_SLAVE_EBI_CH0, 4, 4);
-DEFINE_QNODE(qns_mem_noc_hf, SM8250_SLAVE_MNOC_HF_MEM_NOC, 2, 32, SM8250_MASTER_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(qns_mem_noc_sf, SM8250_SLAVE_MNOC_SF_MEM_NOC, 2, 32, SM8250_MASTER_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(srvc_mnoc, SM8250_SLAVE_SERVICE_MNOC, 1, 4);
-DEFINE_QNODE(qhs_cal_dp0, SM8250_SLAVE_NPU_CAL_DP0, 1, 4);
-DEFINE_QNODE(qhs_cal_dp1, SM8250_SLAVE_NPU_CAL_DP1, 1, 4);
-DEFINE_QNODE(qhs_cp, SM8250_SLAVE_NPU_CP, 1, 4);
-DEFINE_QNODE(qhs_dma_bwmon, SM8250_SLAVE_NPU_INT_DMA_BWMON_CFG, 1, 4);
-DEFINE_QNODE(qhs_dpm, SM8250_SLAVE_NPU_DPM, 1, 4);
-DEFINE_QNODE(qhs_isense, SM8250_SLAVE_ISENSE_CFG, 1, 4);
-DEFINE_QNODE(qhs_llm, SM8250_SLAVE_NPU_LLM_CFG, 1, 4);
-DEFINE_QNODE(qhs_tcm, SM8250_SLAVE_NPU_TCM, 1, 4);
-DEFINE_QNODE(qns_npu_sys, SM8250_SLAVE_NPU_COMPUTE_NOC, 2, 32);
-DEFINE_QNODE(srvc_noc, SM8250_SLAVE_SERVICE_NPU_NOC, 1, 4);
-DEFINE_QNODE(qhs_apss, SM8250_SLAVE_APPSS, 1, 8);
-DEFINE_QNODE(qns_cnoc, SM8250_SNOC_CNOC_SLV, 1, 8, SM8250_SNOC_CNOC_MAS);
-DEFINE_QNODE(qns_gemnoc_gc, SM8250_SLAVE_SNOC_GEM_NOC_GC, 1, 8, SM8250_MASTER_SNOC_GC_MEM_NOC);
-DEFINE_QNODE(qns_gemnoc_sf, SM8250_SLAVE_SNOC_GEM_NOC_SF, 1, 16, SM8250_MASTER_SNOC_SF_MEM_NOC);
-DEFINE_QNODE(qxs_imem, SM8250_SLAVE_OCIMEM, 1, 8);
-DEFINE_QNODE(qxs_pimem, SM8250_SLAVE_PIMEM, 1, 8);
-DEFINE_QNODE(srvc_snoc, SM8250_SLAVE_SERVICE_SNOC, 1, 4);
-DEFINE_QNODE(xs_pcie_0, SM8250_SLAVE_PCIE_0, 1, 8);
-DEFINE_QNODE(xs_pcie_1, SM8250_SLAVE_PCIE_1, 1, 8);
-DEFINE_QNODE(xs_pcie_modem, SM8250_SLAVE_PCIE_2, 1, 8);
-DEFINE_QNODE(xs_qdss_stm, SM8250_SLAVE_QDSS_STM, 1, 4);
-DEFINE_QNODE(xs_sys_tcu_cfg, SM8250_SLAVE_TCU, 1, 8);
-
-DEFINE_QBCM(bcm_acv, "ACV", false, &ebi);
-DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
-DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
-DEFINE_QBCM(bcm_mm0, "MM0", true, &qns_mem_noc_hf);
-DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
-DEFINE_QBCM(bcm_mm1, "MM1", false, &qnm_camnoc_hf, &qxm_mdp0, &qxm_mdp1);
-DEFINE_QBCM(bcm_sh2, "SH2", false, &alm_gpu_tcu, &alm_sys_tcu);
-DEFINE_QBCM(bcm_mm2, "MM2", false, &qns_mem_noc_sf);
-DEFINE_QBCM(bcm_qup0, "QUP0", false, &qhm_qup1, &qhm_qup2, &qhm_qup0);
-DEFINE_QBCM(bcm_sh3, "SH3", false, &qnm_cmpnoc);
-DEFINE_QBCM(bcm_mm3, "MM3", false, &qnm_camnoc_icp, &qnm_camnoc_sf, &qnm_video0, &qnm_video1, &qnm_video_cvp);
-DEFINE_QBCM(bcm_sh4, "SH4", false, &chm_apps);
-DEFINE_QBCM(bcm_sn0, "SN0", true, &qns_gemnoc_sf);
-DEFINE_QBCM(bcm_co0, "CO0", false, &qns_cdsp_mem_noc);
-DEFINE_QBCM(bcm_cn0, "CN0", true, &qnm_snoc, &xm_qdss_dap, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_ahb2phy0, &qhs_ahb2phy1, &qhs_aoss, &qhs_camera_cfg, &qhs_clk_ctl, &qhs_compute_dsp, &qhs_cpr_cx, &qhs_cpr_mmcx, &qhs_cpr_mx, &qhs_crypto0_cfg, &qhs_cx_rdpm, &qhs_dcc_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_ipc_router, &qhs_lpass_cfg, &qhs_mnoc_cfg, &qhs_npu_cfg, &qhs_pcie0_cfg, &qhs_pcie1_cfg, &qhs_pcie_modem_cfg, &qhs_pdm, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qspi, &qhs_qup0, &qhs_qup1, &qhs_qup2, &qhs_sdc2, &qhs_sdc4, &qhs_snoc_cfg, &qhs_tcsr, &qhs_tlmm0, &qhs_tlmm1, &qhs_tlmm2, &qhs_tsif, &qhs_ufs_card_cfg, &qhs_ufs_mem_cfg, &qhs_usb3_0, &qhs_usb3_1, &qhs_venus_cfg, &qhs_vsense_ctrl_cfg, &qns_cnoc_a2noc, &srvc_cnoc);
-DEFINE_QBCM(bcm_sn1, "SN1", false, &qxs_imem);
-DEFINE_QBCM(bcm_sn2, "SN2", false, &qns_gemnoc_gc);
-DEFINE_QBCM(bcm_co2, "CO2", false, &qnm_npu);
-DEFINE_QBCM(bcm_sn3, "SN3", false, &qxs_pimem);
-DEFINE_QBCM(bcm_sn4, "SN4", false, &xs_qdss_stm);
-DEFINE_QBCM(bcm_sn5, "SN5", false, &xs_pcie_modem);
-DEFINE_QBCM(bcm_sn6, "SN6", false, &xs_pcie_0, &xs_pcie_1);
-DEFINE_QBCM(bcm_sn7, "SN7", false, &qnm_aggre1_noc);
-DEFINE_QBCM(bcm_sn8, "SN8", false, &qnm_aggre2_noc);
-DEFINE_QBCM(bcm_sn9, "SN9", false, &qnm_gemnoc_pcie);
-DEFINE_QBCM(bcm_sn11, "SN11", false, &qnm_gemnoc);
-DEFINE_QBCM(bcm_sn12, "SN12", false, &qns_pcie_modem_mem_noc, &qns_pcie_mem_noc);
+static struct qcom_icc_node qhm_a1noc_cfg = {
+ .name = "qhm_a1noc_cfg",
+ .id = SM8250_MASTER_A1NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8250_SLAVE_SERVICE_A1NOC },
+};
+
+static struct qcom_icc_node qhm_qspi = {
+ .name = "qhm_qspi",
+ .id = SM8250_MASTER_QSPI_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8250_A1NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node qhm_qup1 = {
+ .name = "qhm_qup1",
+ .id = SM8250_MASTER_QUP_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8250_A1NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node qhm_qup2 = {
+ .name = "qhm_qup2",
+ .id = SM8250_MASTER_QUP_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8250_A1NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node qhm_tsif = {
+ .name = "qhm_tsif",
+ .id = SM8250_MASTER_TSIF,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8250_A1NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node xm_pcie3_modem = {
+ .name = "xm_pcie3_modem",
+ .id = SM8250_MASTER_PCIE_2,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8250_SLAVE_ANOC_PCIE_GEM_NOC_1 },
+};
+
+static struct qcom_icc_node xm_sdc4 = {
+ .name = "xm_sdc4",
+ .id = SM8250_MASTER_SDCC_4,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8250_A1NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node xm_ufs_mem = {
+ .name = "xm_ufs_mem",
+ .id = SM8250_MASTER_UFS_MEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8250_A1NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node xm_usb3_0 = {
+ .name = "xm_usb3_0",
+ .id = SM8250_MASTER_USB3,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8250_A1NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node xm_usb3_1 = {
+ .name = "xm_usb3_1",
+ .id = SM8250_MASTER_USB3_1,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8250_A1NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node qhm_a2noc_cfg = {
+ .name = "qhm_a2noc_cfg",
+ .id = SM8250_MASTER_A2NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8250_SLAVE_SERVICE_A2NOC },
+};
+
+static struct qcom_icc_node qhm_qdss_bam = {
+ .name = "qhm_qdss_bam",
+ .id = SM8250_MASTER_QDSS_BAM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8250_A2NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node qhm_qup0 = {
+ .name = "qhm_qup0",
+ .id = SM8250_MASTER_QUP_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8250_A2NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node qnm_cnoc = {
+ .name = "qnm_cnoc",
+ .id = SM8250_MASTER_CNOC_A2NOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8250_A2NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node qxm_crypto = {
+ .name = "qxm_crypto",
+ .id = SM8250_MASTER_CRYPTO_CORE_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8250_A2NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node qxm_ipa = {
+ .name = "qxm_ipa",
+ .id = SM8250_MASTER_IPA,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8250_A2NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node xm_pcie3_0 = {
+ .name = "xm_pcie3_0",
+ .id = SM8250_MASTER_PCIE,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8250_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node xm_pcie3_1 = {
+ .name = "xm_pcie3_1",
+ .id = SM8250_MASTER_PCIE_1,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8250_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node xm_qdss_etr = {
+ .name = "xm_qdss_etr",
+ .id = SM8250_MASTER_QDSS_ETR,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8250_A2NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node xm_sdc2 = {
+ .name = "xm_sdc2",
+ .id = SM8250_MASTER_SDCC_2,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8250_A2NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node xm_ufs_card = {
+ .name = "xm_ufs_card",
+ .id = SM8250_MASTER_UFS_CARD,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8250_A2NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node qnm_npu = {
+ .name = "qnm_npu",
+ .id = SM8250_MASTER_NPU,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8250_SLAVE_CDSP_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_snoc = {
+ .name = "qnm_snoc",
+ .id = SM8250_SNOC_CNOC_MAS,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 49,
+ .links = { SM8250_SLAVE_CDSP_CFG,
+ SM8250_SLAVE_CAMERA_CFG,
+ SM8250_SLAVE_TLMM_SOUTH,
+ SM8250_SLAVE_TLMM_NORTH,
+ SM8250_SLAVE_SDCC_4,
+ SM8250_SLAVE_TLMM_WEST,
+ SM8250_SLAVE_SDCC_2,
+ SM8250_SLAVE_CNOC_MNOC_CFG,
+ SM8250_SLAVE_UFS_MEM_CFG,
+ SM8250_SLAVE_SNOC_CFG,
+ SM8250_SLAVE_PDM,
+ SM8250_SLAVE_CX_RDPM,
+ SM8250_SLAVE_PCIE_1_CFG,
+ SM8250_SLAVE_A2NOC_CFG,
+ SM8250_SLAVE_QDSS_CFG,
+ SM8250_SLAVE_DISPLAY_CFG,
+ SM8250_SLAVE_PCIE_2_CFG,
+ SM8250_SLAVE_TCSR,
+ SM8250_SLAVE_DCC_CFG,
+ SM8250_SLAVE_CNOC_DDRSS,
+ SM8250_SLAVE_IPC_ROUTER_CFG,
+ SM8250_SLAVE_PCIE_0_CFG,
+ SM8250_SLAVE_RBCPR_MMCX_CFG,
+ SM8250_SLAVE_NPU_CFG,
+ SM8250_SLAVE_AHB2PHY_SOUTH,
+ SM8250_SLAVE_AHB2PHY_NORTH,
+ SM8250_SLAVE_GRAPHICS_3D_CFG,
+ SM8250_SLAVE_VENUS_CFG,
+ SM8250_SLAVE_TSIF,
+ SM8250_SLAVE_IPA_CFG,
+ SM8250_SLAVE_IMEM_CFG,
+ SM8250_SLAVE_USB3,
+ SM8250_SLAVE_SERVICE_CNOC,
+ SM8250_SLAVE_UFS_CARD_CFG,
+ SM8250_SLAVE_USB3_1,
+ SM8250_SLAVE_LPASS,
+ SM8250_SLAVE_RBCPR_CX_CFG,
+ SM8250_SLAVE_A1NOC_CFG,
+ SM8250_SLAVE_AOSS,
+ SM8250_SLAVE_PRNG,
+ SM8250_SLAVE_VSENSE_CTRL_CFG,
+ SM8250_SLAVE_QSPI_0,
+ SM8250_SLAVE_CRYPTO_0_CFG,
+ SM8250_SLAVE_PIMEM_CFG,
+ SM8250_SLAVE_RBCPR_MX_CFG,
+ SM8250_SLAVE_QUP_0,
+ SM8250_SLAVE_QUP_1,
+ SM8250_SLAVE_QUP_2,
+ SM8250_SLAVE_CLK_CTL
+ },
+};
+
+static struct qcom_icc_node xm_qdss_dap = {
+ .name = "xm_qdss_dap",
+ .id = SM8250_MASTER_QDSS_DAP,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 50,
+ .links = { SM8250_SLAVE_CDSP_CFG,
+ SM8250_SLAVE_CAMERA_CFG,
+ SM8250_SLAVE_TLMM_SOUTH,
+ SM8250_SLAVE_TLMM_NORTH,
+ SM8250_SLAVE_SDCC_4,
+ SM8250_SLAVE_TLMM_WEST,
+ SM8250_SLAVE_SDCC_2,
+ SM8250_SLAVE_CNOC_MNOC_CFG,
+ SM8250_SLAVE_UFS_MEM_CFG,
+ SM8250_SLAVE_SNOC_CFG,
+ SM8250_SLAVE_PDM,
+ SM8250_SLAVE_CX_RDPM,
+ SM8250_SLAVE_PCIE_1_CFG,
+ SM8250_SLAVE_A2NOC_CFG,
+ SM8250_SLAVE_QDSS_CFG,
+ SM8250_SLAVE_DISPLAY_CFG,
+ SM8250_SLAVE_PCIE_2_CFG,
+ SM8250_SLAVE_TCSR,
+ SM8250_SLAVE_DCC_CFG,
+ SM8250_SLAVE_CNOC_DDRSS,
+ SM8250_SLAVE_IPC_ROUTER_CFG,
+ SM8250_SLAVE_CNOC_A2NOC,
+ SM8250_SLAVE_PCIE_0_CFG,
+ SM8250_SLAVE_RBCPR_MMCX_CFG,
+ SM8250_SLAVE_NPU_CFG,
+ SM8250_SLAVE_AHB2PHY_SOUTH,
+ SM8250_SLAVE_AHB2PHY_NORTH,
+ SM8250_SLAVE_GRAPHICS_3D_CFG,
+ SM8250_SLAVE_VENUS_CFG,
+ SM8250_SLAVE_TSIF,
+ SM8250_SLAVE_IPA_CFG,
+ SM8250_SLAVE_IMEM_CFG,
+ SM8250_SLAVE_USB3,
+ SM8250_SLAVE_SERVICE_CNOC,
+ SM8250_SLAVE_UFS_CARD_CFG,
+ SM8250_SLAVE_USB3_1,
+ SM8250_SLAVE_LPASS,
+ SM8250_SLAVE_RBCPR_CX_CFG,
+ SM8250_SLAVE_A1NOC_CFG,
+ SM8250_SLAVE_AOSS,
+ SM8250_SLAVE_PRNG,
+ SM8250_SLAVE_VSENSE_CTRL_CFG,
+ SM8250_SLAVE_QSPI_0,
+ SM8250_SLAVE_CRYPTO_0_CFG,
+ SM8250_SLAVE_PIMEM_CFG,
+ SM8250_SLAVE_RBCPR_MX_CFG,
+ SM8250_SLAVE_QUP_0,
+ SM8250_SLAVE_QUP_1,
+ SM8250_SLAVE_QUP_2,
+ SM8250_SLAVE_CLK_CTL
+ },
+};
+
+static struct qcom_icc_node qhm_cnoc_dc_noc = {
+ .name = "qhm_cnoc_dc_noc",
+ .id = SM8250_MASTER_CNOC_DC_NOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 2,
+ .links = { SM8250_SLAVE_GEM_NOC_CFG,
+ SM8250_SLAVE_LLCC_CFG
+ },
+};
+
+static struct qcom_icc_node alm_gpu_tcu = {
+ .name = "alm_gpu_tcu",
+ .id = SM8250_MASTER_GPU_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SM8250_SLAVE_LLCC,
+ SM8250_SLAVE_GEM_NOC_SNOC
+ },
+};
+
+static struct qcom_icc_node alm_sys_tcu = {
+ .name = "alm_sys_tcu",
+ .id = SM8250_MASTER_SYS_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SM8250_SLAVE_LLCC,
+ SM8250_SLAVE_GEM_NOC_SNOC
+ },
+};
+
+static struct qcom_icc_node chm_apps = {
+ .name = "chm_apps",
+ .id = SM8250_MASTER_AMPSS_M0,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 3,
+ .links = { SM8250_SLAVE_LLCC,
+ SM8250_SLAVE_GEM_NOC_SNOC,
+ SM8250_SLAVE_MEM_NOC_PCIE_SNOC
+ },
+};
+
+static struct qcom_icc_node qhm_gemnoc_cfg = {
+ .name = "qhm_gemnoc_cfg",
+ .id = SM8250_MASTER_GEM_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 3,
+ .links = { SM8250_SLAVE_SERVICE_GEM_NOC_2,
+ SM8250_SLAVE_SERVICE_GEM_NOC_1,
+ SM8250_SLAVE_SERVICE_GEM_NOC
+ },
+};
+
+static struct qcom_icc_node qnm_cmpnoc = {
+ .name = "qnm_cmpnoc",
+ .id = SM8250_MASTER_COMPUTE_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SM8250_SLAVE_LLCC,
+ SM8250_SLAVE_GEM_NOC_SNOC
+ },
+};
+
+static struct qcom_icc_node qnm_gpu = {
+ .name = "qnm_gpu",
+ .id = SM8250_MASTER_GRAPHICS_3D,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SM8250_SLAVE_LLCC,
+ SM8250_SLAVE_GEM_NOC_SNOC },
+};
+
+static struct qcom_icc_node qnm_mnoc_hf = {
+ .name = "qnm_mnoc_hf",
+ .id = SM8250_MASTER_MNOC_HF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8250_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_mnoc_sf = {
+ .name = "qnm_mnoc_sf",
+ .id = SM8250_MASTER_MNOC_SF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SM8250_SLAVE_LLCC,
+ SM8250_SLAVE_GEM_NOC_SNOC
+ },
+};
+
+static struct qcom_icc_node qnm_pcie = {
+ .name = "qnm_pcie",
+ .id = SM8250_MASTER_ANOC_PCIE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 2,
+ .links = { SM8250_SLAVE_LLCC,
+ SM8250_SLAVE_GEM_NOC_SNOC
+ },
+};
+
+static struct qcom_icc_node qnm_snoc_gc = {
+ .name = "qnm_snoc_gc",
+ .id = SM8250_MASTER_SNOC_GC_MEM_NOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8250_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_snoc_sf = {
+ .name = "qnm_snoc_sf",
+ .id = SM8250_MASTER_SNOC_SF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 3,
+ .links = { SM8250_SLAVE_LLCC,
+ SM8250_SLAVE_GEM_NOC_SNOC,
+ SM8250_SLAVE_MEM_NOC_PCIE_SNOC
+ },
+};
+
+static struct qcom_icc_node llcc_mc = {
+ .name = "llcc_mc",
+ .id = SM8250_MASTER_LLCC,
+ .channels = 4,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8250_SLAVE_EBI_CH0 },
+};
+
+static struct qcom_icc_node qhm_mnoc_cfg = {
+ .name = "qhm_mnoc_cfg",
+ .id = SM8250_MASTER_CNOC_MNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8250_SLAVE_SERVICE_MNOC },
+};
+
+static struct qcom_icc_node qnm_camnoc_hf = {
+ .name = "qnm_camnoc_hf",
+ .id = SM8250_MASTER_CAMNOC_HF,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8250_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_camnoc_icp = {
+ .name = "qnm_camnoc_icp",
+ .id = SM8250_MASTER_CAMNOC_ICP,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8250_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_camnoc_sf = {
+ .name = "qnm_camnoc_sf",
+ .id = SM8250_MASTER_CAMNOC_SF,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8250_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video0 = {
+ .name = "qnm_video0",
+ .id = SM8250_MASTER_VIDEO_P0,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8250_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video1 = {
+ .name = "qnm_video1",
+ .id = SM8250_MASTER_VIDEO_P1,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8250_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video_cvp = {
+ .name = "qnm_video_cvp",
+ .id = SM8250_MASTER_VIDEO_PROC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8250_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_mdp0 = {
+ .name = "qxm_mdp0",
+ .id = SM8250_MASTER_MDP_PORT0,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8250_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_mdp1 = {
+ .name = "qxm_mdp1",
+ .id = SM8250_MASTER_MDP_PORT1,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8250_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_rot = {
+ .name = "qxm_rot",
+ .id = SM8250_MASTER_ROTATOR,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8250_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node amm_npu_sys = {
+ .name = "amm_npu_sys",
+ .id = SM8250_MASTER_NPU_SYS,
+ .channels = 4,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8250_SLAVE_NPU_COMPUTE_NOC },
+};
+
+static struct qcom_icc_node amm_npu_sys_cdp_w = {
+ .name = "amm_npu_sys_cdp_w",
+ .id = SM8250_MASTER_NPU_CDP,
+ .channels = 2,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8250_SLAVE_NPU_COMPUTE_NOC },
+};
+
+static struct qcom_icc_node qhm_cfg = {
+ .name = "qhm_cfg",
+ .id = SM8250_MASTER_NPU_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 9,
+ .links = { SM8250_SLAVE_SERVICE_NPU_NOC,
+ SM8250_SLAVE_ISENSE_CFG,
+ SM8250_SLAVE_NPU_LLM_CFG,
+ SM8250_SLAVE_NPU_INT_DMA_BWMON_CFG,
+ SM8250_SLAVE_NPU_CP,
+ SM8250_SLAVE_NPU_TCM,
+ SM8250_SLAVE_NPU_CAL_DP0,
+ SM8250_SLAVE_NPU_CAL_DP1,
+ SM8250_SLAVE_NPU_DPM
+ },
+};
+
+static struct qcom_icc_node qhm_snoc_cfg = {
+ .name = "qhm_snoc_cfg",
+ .id = SM8250_MASTER_SNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8250_SLAVE_SERVICE_SNOC },
+};
+
+static struct qcom_icc_node qnm_aggre1_noc = {
+ .name = "qnm_aggre1_noc",
+ .id = SM8250_A1NOC_SNOC_MAS,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8250_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_aggre2_noc = {
+ .name = "qnm_aggre2_noc",
+ .id = SM8250_A2NOC_SNOC_MAS,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8250_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_gemnoc = {
+ .name = "qnm_gemnoc",
+ .id = SM8250_MASTER_GEM_NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 6,
+ .links = { SM8250_SLAVE_PIMEM,
+ SM8250_SLAVE_OCIMEM,
+ SM8250_SLAVE_APPSS,
+ SM8250_SNOC_CNOC_SLV,
+ SM8250_SLAVE_TCU,
+ SM8250_SLAVE_QDSS_STM
+ },
+};
+
+static struct qcom_icc_node qnm_gemnoc_pcie = {
+ .name = "qnm_gemnoc_pcie",
+ .id = SM8250_MASTER_GEM_NOC_PCIE_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 3,
+ .links = { SM8250_SLAVE_PCIE_2,
+ SM8250_SLAVE_PCIE_0,
+ SM8250_SLAVE_PCIE_1
+ },
+};
+
+static struct qcom_icc_node qxm_pimem = {
+ .name = "qxm_pimem",
+ .id = SM8250_MASTER_PIMEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8250_SLAVE_SNOC_GEM_NOC_GC },
+};
+
+static struct qcom_icc_node xm_gic = {
+ .name = "xm_gic",
+ .id = SM8250_MASTER_GIC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8250_SLAVE_SNOC_GEM_NOC_GC },
+};
+
+static struct qcom_icc_node qns_a1noc_snoc = {
+ .name = "qns_a1noc_snoc",
+ .id = SM8250_A1NOC_SNOC_SLV,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8250_A1NOC_SNOC_MAS },
+};
+
+static struct qcom_icc_node qns_pcie_modem_mem_noc = {
+ .name = "qns_pcie_modem_mem_noc",
+ .id = SM8250_SLAVE_ANOC_PCIE_GEM_NOC_1,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8250_MASTER_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node srvc_aggre1_noc = {
+ .name = "srvc_aggre1_noc",
+ .id = SM8250_SLAVE_SERVICE_A1NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_a2noc_snoc = {
+ .name = "qns_a2noc_snoc",
+ .id = SM8250_A2NOC_SNOC_SLV,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8250_A2NOC_SNOC_MAS },
+};
+
+static struct qcom_icc_node qns_pcie_mem_noc = {
+ .name = "qns_pcie_mem_noc",
+ .id = SM8250_SLAVE_ANOC_PCIE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8250_MASTER_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node srvc_aggre2_noc = {
+ .name = "srvc_aggre2_noc",
+ .id = SM8250_SLAVE_SERVICE_A2NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_cdsp_mem_noc = {
+ .name = "qns_cdsp_mem_noc",
+ .id = SM8250_SLAVE_CDSP_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8250_MASTER_COMPUTE_NOC },
+};
+
+static struct qcom_icc_node qhs_a1_noc_cfg = {
+ .name = "qhs_a1_noc_cfg",
+ .id = SM8250_SLAVE_A1NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8250_MASTER_A1NOC_CFG },
+};
+
+static struct qcom_icc_node qhs_a2_noc_cfg = {
+ .name = "qhs_a2_noc_cfg",
+ .id = SM8250_SLAVE_A2NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8250_MASTER_A2NOC_CFG },
+};
+
+static struct qcom_icc_node qhs_ahb2phy0 = {
+ .name = "qhs_ahb2phy0",
+ .id = SM8250_SLAVE_AHB2PHY_SOUTH,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ahb2phy1 = {
+ .name = "qhs_ahb2phy1",
+ .id = SM8250_SLAVE_AHB2PHY_NORTH,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_aoss = {
+ .name = "qhs_aoss",
+ .id = SM8250_SLAVE_AOSS,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_camera_cfg = {
+ .name = "qhs_camera_cfg",
+ .id = SM8250_SLAVE_CAMERA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_clk_ctl = {
+ .name = "qhs_clk_ctl",
+ .id = SM8250_SLAVE_CLK_CTL,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_compute_dsp = {
+ .name = "qhs_compute_dsp",
+ .id = SM8250_SLAVE_CDSP_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_cpr_cx = {
+ .name = "qhs_cpr_cx",
+ .id = SM8250_SLAVE_RBCPR_CX_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_cpr_mmcx = {
+ .name = "qhs_cpr_mmcx",
+ .id = SM8250_SLAVE_RBCPR_MMCX_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_cpr_mx = {
+ .name = "qhs_cpr_mx",
+ .id = SM8250_SLAVE_RBCPR_MX_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_crypto0_cfg = {
+ .name = "qhs_crypto0_cfg",
+ .id = SM8250_SLAVE_CRYPTO_0_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_cx_rdpm = {
+ .name = "qhs_cx_rdpm",
+ .id = SM8250_SLAVE_CX_RDPM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_dcc_cfg = {
+ .name = "qhs_dcc_cfg",
+ .id = SM8250_SLAVE_DCC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ddrss_cfg = {
+ .name = "qhs_ddrss_cfg",
+ .id = SM8250_SLAVE_CNOC_DDRSS,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8250_MASTER_CNOC_DC_NOC },
+};
+
+static struct qcom_icc_node qhs_display_cfg = {
+ .name = "qhs_display_cfg",
+ .id = SM8250_SLAVE_DISPLAY_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_gpuss_cfg = {
+ .name = "qhs_gpuss_cfg",
+ .id = SM8250_SLAVE_GRAPHICS_3D_CFG,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qhs_imem_cfg = {
+ .name = "qhs_imem_cfg",
+ .id = SM8250_SLAVE_IMEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ipa = {
+ .name = "qhs_ipa",
+ .id = SM8250_SLAVE_IPA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ipc_router = {
+ .name = "qhs_ipc_router",
+ .id = SM8250_SLAVE_IPC_ROUTER_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_lpass_cfg = {
+ .name = "qhs_lpass_cfg",
+ .id = SM8250_SLAVE_LPASS,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_mnoc_cfg = {
+ .name = "qhs_mnoc_cfg",
+ .id = SM8250_SLAVE_CNOC_MNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8250_MASTER_CNOC_MNOC_CFG },
+};
+
+static struct qcom_icc_node qhs_npu_cfg = {
+ .name = "qhs_npu_cfg",
+ .id = SM8250_SLAVE_NPU_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8250_MASTER_NPU_NOC_CFG },
+};
+
+static struct qcom_icc_node qhs_pcie0_cfg = {
+ .name = "qhs_pcie0_cfg",
+ .id = SM8250_SLAVE_PCIE_0_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie1_cfg = {
+ .name = "qhs_pcie1_cfg",
+ .id = SM8250_SLAVE_PCIE_1_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie_modem_cfg = {
+ .name = "qhs_pcie_modem_cfg",
+ .id = SM8250_SLAVE_PCIE_2_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pdm = {
+ .name = "qhs_pdm",
+ .id = SM8250_SLAVE_PDM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pimem_cfg = {
+ .name = "qhs_pimem_cfg",
+ .id = SM8250_SLAVE_PIMEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_prng = {
+ .name = "qhs_prng",
+ .id = SM8250_SLAVE_PRNG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qdss_cfg = {
+ .name = "qhs_qdss_cfg",
+ .id = SM8250_SLAVE_QDSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qspi = {
+ .name = "qhs_qspi",
+ .id = SM8250_SLAVE_QSPI_0,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qup0 = {
+ .name = "qhs_qup0",
+ .id = SM8250_SLAVE_QUP_0,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qup1 = {
+ .name = "qhs_qup1",
+ .id = SM8250_SLAVE_QUP_1,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qup2 = {
+ .name = "qhs_qup2",
+ .id = SM8250_SLAVE_QUP_2,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_sdc2 = {
+ .name = "qhs_sdc2",
+ .id = SM8250_SLAVE_SDCC_2,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_sdc4 = {
+ .name = "qhs_sdc4",
+ .id = SM8250_SLAVE_SDCC_4,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_snoc_cfg = {
+ .name = "qhs_snoc_cfg",
+ .id = SM8250_SLAVE_SNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8250_MASTER_SNOC_CFG },
+};
+
+static struct qcom_icc_node qhs_tcsr = {
+ .name = "qhs_tcsr",
+ .id = SM8250_SLAVE_TCSR,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tlmm0 = {
+ .name = "qhs_tlmm0",
+ .id = SM8250_SLAVE_TLMM_NORTH,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tlmm1 = {
+ .name = "qhs_tlmm1",
+ .id = SM8250_SLAVE_TLMM_SOUTH,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tlmm2 = {
+ .name = "qhs_tlmm2",
+ .id = SM8250_SLAVE_TLMM_WEST,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tsif = {
+ .name = "qhs_tsif",
+ .id = SM8250_SLAVE_TSIF,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ufs_card_cfg = {
+ .name = "qhs_ufs_card_cfg",
+ .id = SM8250_SLAVE_UFS_CARD_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ufs_mem_cfg = {
+ .name = "qhs_ufs_mem_cfg",
+ .id = SM8250_SLAVE_UFS_MEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb3_0 = {
+ .name = "qhs_usb3_0",
+ .id = SM8250_SLAVE_USB3,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb3_1 = {
+ .name = "qhs_usb3_1",
+ .id = SM8250_SLAVE_USB3_1,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_venus_cfg = {
+ .name = "qhs_venus_cfg",
+ .id = SM8250_SLAVE_VENUS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
+ .name = "qhs_vsense_ctrl_cfg",
+ .id = SM8250_SLAVE_VSENSE_CTRL_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_cnoc_a2noc = {
+ .name = "qns_cnoc_a2noc",
+ .id = SM8250_SLAVE_CNOC_A2NOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8250_MASTER_CNOC_A2NOC },
+};
+
+static struct qcom_icc_node srvc_cnoc = {
+ .name = "srvc_cnoc",
+ .id = SM8250_SLAVE_SERVICE_CNOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_llcc = {
+ .name = "qhs_llcc",
+ .id = SM8250_SLAVE_LLCC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_memnoc = {
+ .name = "qhs_memnoc",
+ .id = SM8250_SLAVE_GEM_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8250_MASTER_GEM_NOC_CFG },
+};
+
+static struct qcom_icc_node qns_gem_noc_snoc = {
+ .name = "qns_gem_noc_snoc",
+ .id = SM8250_SLAVE_GEM_NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8250_MASTER_GEM_NOC_SNOC },
+};
+
+static struct qcom_icc_node qns_llcc = {
+ .name = "qns_llcc",
+ .id = SM8250_SLAVE_LLCC,
+ .channels = 4,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8250_MASTER_LLCC },
+};
+
+static struct qcom_icc_node qns_sys_pcie = {
+ .name = "qns_sys_pcie",
+ .id = SM8250_SLAVE_MEM_NOC_PCIE_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8250_MASTER_GEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node srvc_even_gemnoc = {
+ .name = "srvc_even_gemnoc",
+ .id = SM8250_SLAVE_SERVICE_GEM_NOC_1,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node srvc_odd_gemnoc = {
+ .name = "srvc_odd_gemnoc",
+ .id = SM8250_SLAVE_SERVICE_GEM_NOC_2,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node srvc_sys_gemnoc = {
+ .name = "srvc_sys_gemnoc",
+ .id = SM8250_SLAVE_SERVICE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node ebi = {
+ .name = "ebi",
+ .id = SM8250_SLAVE_EBI_CH0,
+ .channels = 4,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_mem_noc_hf = {
+ .name = "qns_mem_noc_hf",
+ .id = SM8250_SLAVE_MNOC_HF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8250_MASTER_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_mem_noc_sf = {
+ .name = "qns_mem_noc_sf",
+ .id = SM8250_SLAVE_MNOC_SF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8250_MASTER_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node srvc_mnoc = {
+ .name = "srvc_mnoc",
+ .id = SM8250_SLAVE_SERVICE_MNOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_cal_dp0 = {
+ .name = "qhs_cal_dp0",
+ .id = SM8250_SLAVE_NPU_CAL_DP0,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_cal_dp1 = {
+ .name = "qhs_cal_dp1",
+ .id = SM8250_SLAVE_NPU_CAL_DP1,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_cp = {
+ .name = "qhs_cp",
+ .id = SM8250_SLAVE_NPU_CP,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_dma_bwmon = {
+ .name = "qhs_dma_bwmon",
+ .id = SM8250_SLAVE_NPU_INT_DMA_BWMON_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_dpm = {
+ .name = "qhs_dpm",
+ .id = SM8250_SLAVE_NPU_DPM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_isense = {
+ .name = "qhs_isense",
+ .id = SM8250_SLAVE_ISENSE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_llm = {
+ .name = "qhs_llm",
+ .id = SM8250_SLAVE_NPU_LLM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tcm = {
+ .name = "qhs_tcm",
+ .id = SM8250_SLAVE_NPU_TCM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_npu_sys = {
+ .name = "qns_npu_sys",
+ .id = SM8250_SLAVE_NPU_COMPUTE_NOC,
+ .channels = 2,
+ .buswidth = 32,
+};
+
+static struct qcom_icc_node srvc_noc = {
+ .name = "srvc_noc",
+ .id = SM8250_SLAVE_SERVICE_NPU_NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_apss = {
+ .name = "qhs_apss",
+ .id = SM8250_SLAVE_APPSS,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qns_cnoc = {
+ .name = "qns_cnoc",
+ .id = SM8250_SNOC_CNOC_SLV,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8250_SNOC_CNOC_MAS },
+};
+
+static struct qcom_icc_node qns_gemnoc_gc = {
+ .name = "qns_gemnoc_gc",
+ .id = SM8250_SLAVE_SNOC_GEM_NOC_GC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8250_MASTER_SNOC_GC_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_gemnoc_sf = {
+ .name = "qns_gemnoc_sf",
+ .id = SM8250_SLAVE_SNOC_GEM_NOC_SF,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8250_MASTER_SNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxs_imem = {
+ .name = "qxs_imem",
+ .id = SM8250_SLAVE_OCIMEM,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qxs_pimem = {
+ .name = "qxs_pimem",
+ .id = SM8250_SLAVE_PIMEM,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node srvc_snoc = {
+ .name = "srvc_snoc",
+ .id = SM8250_SLAVE_SERVICE_SNOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node xs_pcie_0 = {
+ .name = "xs_pcie_0",
+ .id = SM8250_SLAVE_PCIE_0,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node xs_pcie_1 = {
+ .name = "xs_pcie_1",
+ .id = SM8250_SLAVE_PCIE_1,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node xs_pcie_modem = {
+ .name = "xs_pcie_modem",
+ .id = SM8250_SLAVE_PCIE_2,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node xs_qdss_stm = {
+ .name = "xs_qdss_stm",
+ .id = SM8250_SLAVE_QDSS_STM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node xs_sys_tcu_cfg = {
+ .name = "xs_sys_tcu_cfg",
+ .id = SM8250_SLAVE_TCU,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qup0_core_master = {
+ .name = "qup0_core_master",
+ .id = SM8250_MASTER_QUP_CORE_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8250_SLAVE_QUP_CORE_0 },
+};
+
+static struct qcom_icc_node qup1_core_master = {
+ .name = "qup1_core_master",
+ .id = SM8250_MASTER_QUP_CORE_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8250_SLAVE_QUP_CORE_1 },
+};
+
+static struct qcom_icc_node qup2_core_master = {
+ .name = "qup2_core_master",
+ .id = SM8250_MASTER_QUP_CORE_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8250_SLAVE_QUP_CORE_2 },
+};
+
+static struct qcom_icc_node qup0_core_slave = {
+ .name = "qup0_core_slave",
+ .id = SM8250_SLAVE_QUP_CORE_0,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qup1_core_slave = {
+ .name = "qup1_core_slave",
+ .id = SM8250_SLAVE_QUP_CORE_1,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qup2_core_slave = {
+ .name = "qup2_core_slave",
+ .id = SM8250_SLAVE_QUP_CORE_2,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_mc0 = {
+ .name = "MC0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_sh0 = {
+ .name = "SH0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_llcc },
+};
+
+static struct qcom_icc_bcm bcm_mm0 = {
+ .name = "MM0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_mem_noc_hf },
+};
+
+static struct qcom_icc_bcm bcm_ce0 = {
+ .name = "CE0",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qxm_crypto },
+};
+
+static struct qcom_icc_bcm bcm_mm1 = {
+ .name = "MM1",
+ .keepalive = false,
+ .num_nodes = 3,
+ .nodes = { &qnm_camnoc_hf, &qxm_mdp0, &qxm_mdp1 },
+};
+
+static struct qcom_icc_bcm bcm_sh2 = {
+ .name = "SH2",
+ .keepalive = false,
+ .num_nodes = 2,
+ .nodes = { &alm_gpu_tcu, &alm_sys_tcu },
+};
+
+static struct qcom_icc_bcm bcm_mm2 = {
+ .name = "MM2",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_bcm bcm_qup0 = {
+ .name = "QUP0",
+ .keepalive = false,
+ .num_nodes = 3,
+ .nodes = { &qup0_core_master, &qup1_core_master, &qup2_core_master },
+};
+
+static struct qcom_icc_bcm bcm_sh3 = {
+ .name = "SH3",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qnm_cmpnoc },
+};
+
+static struct qcom_icc_bcm bcm_mm3 = {
+ .name = "MM3",
+ .keepalive = false,
+ .num_nodes = 5,
+ .nodes = { &qnm_camnoc_icp, &qnm_camnoc_sf, &qnm_video0, &qnm_video1, &qnm_video_cvp },
+};
+
+static struct qcom_icc_bcm bcm_sh4 = {
+ .name = "SH4",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &chm_apps },
+};
+
+static struct qcom_icc_bcm bcm_sn0 = {
+ .name = "SN0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_bcm bcm_co0 = {
+ .name = "CO0",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qns_cdsp_mem_noc },
+};
+
+static struct qcom_icc_bcm bcm_cn0 = {
+ .name = "CN0",
+ .keepalive = true,
+ .num_nodes = 52,
+ .nodes = { &qnm_snoc,
+ &xm_qdss_dap,
+ &qhs_a1_noc_cfg,
+ &qhs_a2_noc_cfg,
+ &qhs_ahb2phy0,
+ &qhs_ahb2phy1,
+ &qhs_aoss,
+ &qhs_camera_cfg,
+ &qhs_clk_ctl,
+ &qhs_compute_dsp,
+ &qhs_cpr_cx,
+ &qhs_cpr_mmcx,
+ &qhs_cpr_mx,
+ &qhs_crypto0_cfg,
+ &qhs_cx_rdpm,
+ &qhs_dcc_cfg,
+ &qhs_ddrss_cfg,
+ &qhs_display_cfg,
+ &qhs_gpuss_cfg,
+ &qhs_imem_cfg,
+ &qhs_ipa,
+ &qhs_ipc_router,
+ &qhs_lpass_cfg,
+ &qhs_mnoc_cfg,
+ &qhs_npu_cfg,
+ &qhs_pcie0_cfg,
+ &qhs_pcie1_cfg,
+ &qhs_pcie_modem_cfg,
+ &qhs_pdm,
+ &qhs_pimem_cfg,
+ &qhs_prng,
+ &qhs_qdss_cfg,
+ &qhs_qspi,
+ &qhs_qup0,
+ &qhs_qup1,
+ &qhs_qup2,
+ &qhs_sdc2,
+ &qhs_sdc4,
+ &qhs_snoc_cfg,
+ &qhs_tcsr,
+ &qhs_tlmm0,
+ &qhs_tlmm1,
+ &qhs_tlmm2,
+ &qhs_tsif,
+ &qhs_ufs_card_cfg,
+ &qhs_ufs_mem_cfg,
+ &qhs_usb3_0,
+ &qhs_usb3_1,
+ &qhs_venus_cfg,
+ &qhs_vsense_ctrl_cfg,
+ &qns_cnoc_a2noc,
+ &srvc_cnoc
+ },
+};
+
+static struct qcom_icc_bcm bcm_sn1 = {
+ .name = "SN1",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qxs_imem },
+};
+
+static struct qcom_icc_bcm bcm_sn2 = {
+ .name = "SN2",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qns_gemnoc_gc },
+};
+
+static struct qcom_icc_bcm bcm_co2 = {
+ .name = "CO2",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qnm_npu },
+};
+
+static struct qcom_icc_bcm bcm_sn3 = {
+ .name = "SN3",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qxs_pimem },
+};
+
+static struct qcom_icc_bcm bcm_sn4 = {
+ .name = "SN4",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &xs_qdss_stm },
+};
+
+static struct qcom_icc_bcm bcm_sn5 = {
+ .name = "SN5",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &xs_pcie_modem },
+};
+
+static struct qcom_icc_bcm bcm_sn6 = {
+ .name = "SN6",
+ .keepalive = false,
+ .num_nodes = 2,
+ .nodes = { &xs_pcie_0, &xs_pcie_1 },
+};
+
+static struct qcom_icc_bcm bcm_sn7 = {
+ .name = "SN7",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qnm_aggre1_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn8 = {
+ .name = "SN8",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qnm_aggre2_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn9 = {
+ .name = "SN9",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qnm_gemnoc_pcie },
+};
+
+static struct qcom_icc_bcm bcm_sn11 = {
+ .name = "SN11",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qnm_gemnoc },
+};
+
+static struct qcom_icc_bcm bcm_sn12 = {
+ .name = "SN12",
+ .keepalive = false,
+ .num_nodes = 2,
+ .nodes = { &qns_pcie_modem_mem_noc, &qns_pcie_mem_noc },
+};
static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
- &bcm_qup0,
&bcm_sn12,
};
@@ -222,10 +1665,29 @@ static const struct qcom_icc_desc sm8250_aggre1_noc = {
static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
&bcm_ce0,
- &bcm_qup0,
&bcm_sn12,
};
+static struct qcom_icc_bcm * const qup_virt_bcms[] = {
+ &bcm_qup0,
+};
+
+static struct qcom_icc_node *qup_virt_nodes[] = {
+ [MASTER_QUP_CORE_0] = &qup0_core_master,
+ [MASTER_QUP_CORE_1] = &qup1_core_master,
+ [MASTER_QUP_CORE_2] = &qup2_core_master,
+ [SLAVE_QUP_CORE_0] = &qup0_core_slave,
+ [SLAVE_QUP_CORE_1] = &qup1_core_slave,
+ [SLAVE_QUP_CORE_2] = &qup2_core_slave,
+};
+
+static const struct qcom_icc_desc sm8250_qup_virt = {
+ .nodes = qup_virt_nodes,
+ .num_nodes = ARRAY_SIZE(qup_virt_nodes),
+ .bcms = qup_virt_bcms,
+ .num_bcms = ARRAY_SIZE(qup_virt_bcms),
+};
+
static struct qcom_icc_node * const aggre2_noc_nodes[] = {
[MASTER_A2NOC_CFG] = &qhm_a2noc_cfg,
[MASTER_QDSS_BAM] = &qhm_qdss_bam,
@@ -518,6 +1980,8 @@ static const struct of_device_id qnoc_of_match[] = {
.data = &sm8250_mmss_noc},
{ .compatible = "qcom,sm8250-npu-noc",
.data = &sm8250_npu_noc},
+ { .compatible = "qcom,sm8250-qup-virt",
+ .data = &sm8250_qup_virt },
{ .compatible = "qcom,sm8250-system-noc",
.data = &sm8250_system_noc},
{ }
diff --git a/drivers/interconnect/qcom/sm8250.h b/drivers/interconnect/qcom/sm8250.h
index 209ab195f21f..032665093c5b 100644
--- a/drivers/interconnect/qcom/sm8250.h
+++ b/drivers/interconnect/qcom/sm8250.h
@@ -158,5 +158,11 @@
#define SM8250_SLAVE_VSENSE_CTRL_CFG 147
#define SM8250_SNOC_CNOC_MAS 148
#define SM8250_SNOC_CNOC_SLV 149
+#define SM8250_MASTER_QUP_CORE_0 150
+#define SM8250_MASTER_QUP_CORE_1 151
+#define SM8250_MASTER_QUP_CORE_2 152
+#define SM8250_SLAVE_QUP_CORE_0 153
+#define SM8250_SLAVE_QUP_CORE_1 154
+#define SM8250_SLAVE_QUP_CORE_2 155
#endif
diff --git a/drivers/interconnect/qcom/sm8350.c b/drivers/interconnect/qcom/sm8350.c
index 5398e7c8d826..bdf75839e6d1 100644
--- a/drivers/interconnect/qcom/sm8350.c
+++ b/drivers/interconnect/qcom/sm8350.c
@@ -7,196 +7,1629 @@
#include <linux/interconnect-provider.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
#include <dt-bindings/interconnect/qcom,sm8350.h>
#include "bcm-voter.h"
#include "icc-rpmh.h"
#include "sm8350.h"
-DEFINE_QNODE(qhm_qspi, SM8350_MASTER_QSPI_0, 1, 4, SM8350_SLAVE_A1NOC_SNOC);
-DEFINE_QNODE(qhm_qup0, SM8350_MASTER_QUP_0, 1, 4, SM8350_SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(qhm_qup1, SM8350_MASTER_QUP_1, 1, 4, SM8350_SLAVE_A1NOC_SNOC);
-DEFINE_QNODE(qhm_qup2, SM8350_MASTER_QUP_2, 1, 4, SM8350_SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(qnm_a1noc_cfg, SM8350_MASTER_A1NOC_CFG, 1, 4, SM8350_SLAVE_SERVICE_A1NOC);
-DEFINE_QNODE(xm_sdc4, SM8350_MASTER_SDCC_4, 1, 8, SM8350_SLAVE_A1NOC_SNOC);
-DEFINE_QNODE(xm_ufs_mem, SM8350_MASTER_UFS_MEM, 1, 8, SM8350_SLAVE_A1NOC_SNOC);
-DEFINE_QNODE(xm_usb3_0, SM8350_MASTER_USB3_0, 1, 8, SM8350_SLAVE_A1NOC_SNOC);
-DEFINE_QNODE(xm_usb3_1, SM8350_MASTER_USB3_1, 1, 8, SM8350_SLAVE_A1NOC_SNOC);
-DEFINE_QNODE(qhm_qdss_bam, SM8350_MASTER_QDSS_BAM, 1, 4, SM8350_SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(qnm_a2noc_cfg, SM8350_MASTER_A2NOC_CFG, 1, 4, SM8350_SLAVE_SERVICE_A2NOC);
-DEFINE_QNODE(qxm_crypto, SM8350_MASTER_CRYPTO, 1, 8, SM8350_SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(qxm_ipa, SM8350_MASTER_IPA, 1, 8, SM8350_SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(xm_pcie3_0, SM8350_MASTER_PCIE_0, 1, 8, SM8350_SLAVE_ANOC_PCIE_GEM_NOC);
-DEFINE_QNODE(xm_pcie3_1, SM8350_MASTER_PCIE_1, 1, 8, SM8350_SLAVE_ANOC_PCIE_GEM_NOC);
-DEFINE_QNODE(xm_qdss_etr, SM8350_MASTER_QDSS_ETR, 1, 8, SM8350_SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(xm_sdc2, SM8350_MASTER_SDCC_2, 1, 8, SM8350_SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(xm_ufs_card, SM8350_MASTER_UFS_CARD, 1, 8, SM8350_SLAVE_A2NOC_SNOC);
-DEFINE_QNODE(qnm_gemnoc_cnoc, SM8350_MASTER_GEM_NOC_CNOC, 1, 16, SM8350_SLAVE_AHB2PHY_SOUTH, SM8350_SLAVE_AHB2PHY_NORTH, SM8350_SLAVE_AOSS, SM8350_SLAVE_APPSS, SM8350_SLAVE_CAMERA_CFG, SM8350_SLAVE_CLK_CTL, SM8350_SLAVE_CDSP_CFG, SM8350_SLAVE_RBCPR_CX_CFG, SM8350_SLAVE_RBCPR_MMCX_CFG, SM8350_SLAVE_RBCPR_MX_CFG, SM8350_SLAVE_CRYPTO_0_CFG, SM8350_SLAVE_CX_RDPM, SM8350_SLAVE_DCC_CFG, SM8350_SLAVE_DISPLAY_CFG, SM8350_SLAVE_GFX3D_CFG, SM8350_SLAVE_HWKM, SM8350_SLAVE_IMEM_CFG, SM8350_SLAVE_IPA_CFG, SM8350_SLAVE_IPC_ROUTER_CFG, SM8350_SLAVE_LPASS, SM8350_SLAVE_CNOC_MSS, SM8350_SLAVE_MX_RDPM, SM8350_SLAVE_PCIE_0_CFG, SM8350_SLAVE_PCIE_1_CFG, SM8350_SLAVE_PDM, SM8350_SLAVE_PIMEM_CFG, SM8350_SLAVE_PKA_WRAPPER_CFG, SM8350_SLAVE_PMU_WRAPPER_CFG, SM8350_SLAVE_QDSS_CFG, SM8350_SLAVE_QSPI_0, SM8350_SLAVE_QUP_0, SM8350_SLAVE_QUP_1, SM8350_SLAVE_QUP_2, SM8350_SLAVE_SDCC_2, SM8350_SLAVE_SDCC_4, SM8350_SLAVE_SECURITY, SM8350_SLAVE_SPSS_CFG, SM8350_SLAVE_TCSR, SM8350_SLAVE_TLMM, SM8350_SLAVE_UFS_CARD_CFG, SM8350_SLAVE_UFS_MEM_CFG, SM8350_SLAVE_USB3_0, SM8350_SLAVE_USB3_1, SM8350_SLAVE_VENUS_CFG, SM8350_SLAVE_VSENSE_CTRL_CFG, SM8350_SLAVE_A1NOC_CFG, SM8350_SLAVE_A2NOC_CFG, SM8350_SLAVE_DDRSS_CFG, SM8350_SLAVE_CNOC_MNOC_CFG, SM8350_SLAVE_SNOC_CFG, SM8350_SLAVE_BOOT_IMEM, SM8350_SLAVE_IMEM, SM8350_SLAVE_PIMEM, SM8350_SLAVE_SERVICE_CNOC, SM8350_SLAVE_QDSS_STM, SM8350_SLAVE_TCU);
-DEFINE_QNODE(qnm_gemnoc_pcie, SM8350_MASTER_GEM_NOC_PCIE_SNOC, 1, 8, SM8350_SLAVE_PCIE_0, SM8350_SLAVE_PCIE_1);
-DEFINE_QNODE(xm_qdss_dap, SM8350_MASTER_QDSS_DAP, 1, 8, SM8350_SLAVE_AHB2PHY_SOUTH, SM8350_SLAVE_AHB2PHY_NORTH, SM8350_SLAVE_AOSS, SM8350_SLAVE_APPSS, SM8350_SLAVE_CAMERA_CFG, SM8350_SLAVE_CLK_CTL, SM8350_SLAVE_CDSP_CFG, SM8350_SLAVE_RBCPR_CX_CFG, SM8350_SLAVE_RBCPR_MMCX_CFG, SM8350_SLAVE_RBCPR_MX_CFG, SM8350_SLAVE_CRYPTO_0_CFG, SM8350_SLAVE_CX_RDPM, SM8350_SLAVE_DCC_CFG, SM8350_SLAVE_DISPLAY_CFG, SM8350_SLAVE_GFX3D_CFG, SM8350_SLAVE_HWKM, SM8350_SLAVE_IMEM_CFG, SM8350_SLAVE_IPA_CFG, SM8350_SLAVE_IPC_ROUTER_CFG, SM8350_SLAVE_LPASS, SM8350_SLAVE_CNOC_MSS, SM8350_SLAVE_MX_RDPM, SM8350_SLAVE_PCIE_0_CFG, SM8350_SLAVE_PCIE_1_CFG, SM8350_SLAVE_PDM, SM8350_SLAVE_PIMEM_CFG, SM8350_SLAVE_PKA_WRAPPER_CFG, SM8350_SLAVE_PMU_WRAPPER_CFG, SM8350_SLAVE_QDSS_CFG, SM8350_SLAVE_QSPI_0, SM8350_SLAVE_QUP_0, SM8350_SLAVE_QUP_1, SM8350_SLAVE_QUP_2, SM8350_SLAVE_SDCC_2, SM8350_SLAVE_SDCC_4, SM8350_SLAVE_SECURITY, SM8350_SLAVE_SPSS_CFG, SM8350_SLAVE_TCSR, SM8350_SLAVE_TLMM, SM8350_SLAVE_UFS_CARD_CFG, SM8350_SLAVE_UFS_MEM_CFG, SM8350_SLAVE_USB3_0, SM8350_SLAVE_USB3_1, SM8350_SLAVE_VENUS_CFG, SM8350_SLAVE_VSENSE_CTRL_CFG, SM8350_SLAVE_A1NOC_CFG, SM8350_SLAVE_A2NOC_CFG, SM8350_SLAVE_DDRSS_CFG, SM8350_SLAVE_CNOC_MNOC_CFG, SM8350_SLAVE_SNOC_CFG, SM8350_SLAVE_BOOT_IMEM, SM8350_SLAVE_IMEM, SM8350_SLAVE_PIMEM, SM8350_SLAVE_SERVICE_CNOC, SM8350_SLAVE_QDSS_STM, SM8350_SLAVE_TCU);
-DEFINE_QNODE(qnm_cnoc_dc_noc, SM8350_MASTER_CNOC_DC_NOC, 1, 4, SM8350_SLAVE_LLCC_CFG, SM8350_SLAVE_GEM_NOC_CFG);
-DEFINE_QNODE(alm_gpu_tcu, SM8350_MASTER_GPU_TCU, 1, 8, SM8350_SLAVE_GEM_NOC_CNOC, SM8350_SLAVE_LLCC);
-DEFINE_QNODE(alm_sys_tcu, SM8350_MASTER_SYS_TCU, 1, 8, SM8350_SLAVE_GEM_NOC_CNOC, SM8350_SLAVE_LLCC);
-DEFINE_QNODE(chm_apps, SM8350_MASTER_APPSS_PROC, 2, 32, SM8350_SLAVE_GEM_NOC_CNOC, SM8350_SLAVE_LLCC, SM8350_SLAVE_MEM_NOC_PCIE_SNOC);
-DEFINE_QNODE(qnm_cmpnoc, SM8350_MASTER_COMPUTE_NOC, 2, 32, SM8350_SLAVE_GEM_NOC_CNOC, SM8350_SLAVE_LLCC);
-DEFINE_QNODE(qnm_gemnoc_cfg, SM8350_MASTER_GEM_NOC_CFG, 1, 4, SM8350_SLAVE_MSS_PROC_MS_MPU_CFG, SM8350_SLAVE_MCDMA_MS_MPU_CFG, SM8350_SLAVE_SERVICE_GEM_NOC_1, SM8350_SLAVE_SERVICE_GEM_NOC_2, SM8350_SLAVE_SERVICE_GEM_NOC);
-DEFINE_QNODE(qnm_gpu, SM8350_MASTER_GFX3D, 2, 32, SM8350_SLAVE_GEM_NOC_CNOC, SM8350_SLAVE_LLCC);
-DEFINE_QNODE(qnm_mnoc_hf, SM8350_MASTER_MNOC_HF_MEM_NOC, 2, 32, SM8350_SLAVE_LLCC);
-DEFINE_QNODE(qnm_mnoc_sf, SM8350_MASTER_MNOC_SF_MEM_NOC, 2, 32, SM8350_SLAVE_GEM_NOC_CNOC, SM8350_SLAVE_LLCC);
-DEFINE_QNODE(qnm_pcie, SM8350_MASTER_ANOC_PCIE_GEM_NOC, 1, 16, SM8350_SLAVE_GEM_NOC_CNOC, SM8350_SLAVE_LLCC);
-DEFINE_QNODE(qnm_snoc_gc, SM8350_MASTER_SNOC_GC_MEM_NOC, 1, 8, SM8350_SLAVE_LLCC);
-DEFINE_QNODE(qnm_snoc_sf, SM8350_MASTER_SNOC_SF_MEM_NOC, 1, 16, SM8350_SLAVE_GEM_NOC_CNOC, SM8350_SLAVE_LLCC, SM8350_SLAVE_MEM_NOC_PCIE_SNOC);
-DEFINE_QNODE(qhm_config_noc, SM8350_MASTER_CNOC_LPASS_AG_NOC, 1, 4, SM8350_SLAVE_LPASS_CORE_CFG, SM8350_SLAVE_LPASS_LPI_CFG, SM8350_SLAVE_LPASS_MPU_CFG, SM8350_SLAVE_LPASS_TOP_CFG, SM8350_SLAVE_SERVICES_LPASS_AML_NOC, SM8350_SLAVE_SERVICE_LPASS_AG_NOC);
-DEFINE_QNODE(llcc_mc, SM8350_MASTER_LLCC, 4, 4, SM8350_SLAVE_EBI1);
-DEFINE_QNODE(qnm_camnoc_hf, SM8350_MASTER_CAMNOC_HF, 2, 32, SM8350_SLAVE_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(qnm_camnoc_icp, SM8350_MASTER_CAMNOC_ICP, 1, 8, SM8350_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qnm_camnoc_sf, SM8350_MASTER_CAMNOC_SF, 2, 32, SM8350_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qnm_mnoc_cfg, SM8350_MASTER_CNOC_MNOC_CFG, 1, 4, SM8350_SLAVE_SERVICE_MNOC);
-DEFINE_QNODE(qnm_video0, SM8350_MASTER_VIDEO_P0, 1, 32, SM8350_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qnm_video1, SM8350_MASTER_VIDEO_P1, 1, 32, SM8350_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qnm_video_cvp, SM8350_MASTER_VIDEO_PROC, 1, 32, SM8350_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qxm_mdp0, SM8350_MASTER_MDP0, 1, 32, SM8350_SLAVE_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(qxm_mdp1, SM8350_MASTER_MDP1, 1, 32, SM8350_SLAVE_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(qxm_rot, SM8350_MASTER_ROTATOR, 1, 32, SM8350_SLAVE_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(qhm_nsp_noc_config, SM8350_MASTER_CDSP_NOC_CFG, 1, 4, SM8350_SLAVE_SERVICE_NSP_NOC);
-DEFINE_QNODE(qxm_nsp, SM8350_MASTER_CDSP_PROC, 2, 32, SM8350_SLAVE_CDSP_MEM_NOC);
-DEFINE_QNODE(qnm_aggre1_noc, SM8350_MASTER_A1NOC_SNOC, 1, 16, SM8350_SLAVE_SNOC_GEM_NOC_SF);
-DEFINE_QNODE(qnm_aggre2_noc, SM8350_MASTER_A2NOC_SNOC, 1, 16, SM8350_SLAVE_SNOC_GEM_NOC_SF);
-DEFINE_QNODE(qnm_snoc_cfg, SM8350_MASTER_SNOC_CFG, 1, 4, SM8350_SLAVE_SERVICE_SNOC);
-DEFINE_QNODE(qxm_pimem, SM8350_MASTER_PIMEM, 1, 8, SM8350_SLAVE_SNOC_GEM_NOC_GC);
-DEFINE_QNODE(xm_gic, SM8350_MASTER_GIC, 1, 8, SM8350_SLAVE_SNOC_GEM_NOC_GC);
-DEFINE_QNODE(qnm_mnoc_hf_disp, SM8350_MASTER_MNOC_HF_MEM_NOC_DISP, 2, 32, SM8350_SLAVE_LLCC_DISP);
-DEFINE_QNODE(qnm_mnoc_sf_disp, SM8350_MASTER_MNOC_SF_MEM_NOC_DISP, 2, 32, SM8350_SLAVE_LLCC_DISP);
-DEFINE_QNODE(llcc_mc_disp, SM8350_MASTER_LLCC_DISP, 4, 4, SM8350_SLAVE_EBI1_DISP);
-DEFINE_QNODE(qxm_mdp0_disp, SM8350_MASTER_MDP0_DISP, 1, 32, SM8350_SLAVE_MNOC_HF_MEM_NOC_DISP);
-DEFINE_QNODE(qxm_mdp1_disp, SM8350_MASTER_MDP1_DISP, 1, 32, SM8350_SLAVE_MNOC_HF_MEM_NOC_DISP);
-DEFINE_QNODE(qxm_rot_disp, SM8350_MASTER_ROTATOR_DISP, 1, 32, SM8350_SLAVE_MNOC_SF_MEM_NOC_DISP);
-DEFINE_QNODE(qns_a1noc_snoc, SM8350_SLAVE_A1NOC_SNOC, 1, 16, SM8350_MASTER_A1NOC_SNOC);
-DEFINE_QNODE(srvc_aggre1_noc, SM8350_SLAVE_SERVICE_A1NOC, 1, 4);
-DEFINE_QNODE(qns_a2noc_snoc, SM8350_SLAVE_A2NOC_SNOC, 1, 16, SM8350_MASTER_A2NOC_SNOC);
-DEFINE_QNODE(qns_pcie_mem_noc, SM8350_SLAVE_ANOC_PCIE_GEM_NOC, 1, 16, SM8350_MASTER_ANOC_PCIE_GEM_NOC);
-DEFINE_QNODE(srvc_aggre2_noc, SM8350_SLAVE_SERVICE_A2NOC, 1, 4);
-DEFINE_QNODE(qhs_ahb2phy0, SM8350_SLAVE_AHB2PHY_SOUTH, 1, 4);
-DEFINE_QNODE(qhs_ahb2phy1, SM8350_SLAVE_AHB2PHY_NORTH, 1, 4);
-DEFINE_QNODE(qhs_aoss, SM8350_SLAVE_AOSS, 1, 4);
-DEFINE_QNODE(qhs_apss, SM8350_SLAVE_APPSS, 1, 8);
-DEFINE_QNODE(qhs_camera_cfg, SM8350_SLAVE_CAMERA_CFG, 1, 4);
-DEFINE_QNODE(qhs_clk_ctl, SM8350_SLAVE_CLK_CTL, 1, 4);
-DEFINE_QNODE(qhs_compute_cfg, SM8350_SLAVE_CDSP_CFG, 1, 4);
-DEFINE_QNODE(qhs_cpr_cx, SM8350_SLAVE_RBCPR_CX_CFG, 1, 4);
-DEFINE_QNODE(qhs_cpr_mmcx, SM8350_SLAVE_RBCPR_MMCX_CFG, 1, 4);
-DEFINE_QNODE(qhs_cpr_mx, SM8350_SLAVE_RBCPR_MX_CFG, 1, 4);
-DEFINE_QNODE(qhs_crypto0_cfg, SM8350_SLAVE_CRYPTO_0_CFG, 1, 4);
-DEFINE_QNODE(qhs_cx_rdpm, SM8350_SLAVE_CX_RDPM, 1, 4);
-DEFINE_QNODE(qhs_dcc_cfg, SM8350_SLAVE_DCC_CFG, 1, 4);
-DEFINE_QNODE(qhs_display_cfg, SM8350_SLAVE_DISPLAY_CFG, 1, 4);
-DEFINE_QNODE(qhs_gpuss_cfg, SM8350_SLAVE_GFX3D_CFG, 1, 8);
-DEFINE_QNODE(qhs_hwkm, SM8350_SLAVE_HWKM, 1, 4);
-DEFINE_QNODE(qhs_imem_cfg, SM8350_SLAVE_IMEM_CFG, 1, 4);
-DEFINE_QNODE(qhs_ipa, SM8350_SLAVE_IPA_CFG, 1, 4);
-DEFINE_QNODE(qhs_ipc_router, SM8350_SLAVE_IPC_ROUTER_CFG, 1, 4);
-DEFINE_QNODE(qhs_lpass_cfg, SM8350_SLAVE_LPASS, 1, 4, SM8350_MASTER_CNOC_LPASS_AG_NOC);
-DEFINE_QNODE(qhs_mss_cfg, SM8350_SLAVE_CNOC_MSS, 1, 4);
-DEFINE_QNODE(qhs_mx_rdpm, SM8350_SLAVE_MX_RDPM, 1, 4);
-DEFINE_QNODE(qhs_pcie0_cfg, SM8350_SLAVE_PCIE_0_CFG, 1, 4);
-DEFINE_QNODE(qhs_pcie1_cfg, SM8350_SLAVE_PCIE_1_CFG, 1, 4);
-DEFINE_QNODE(qhs_pdm, SM8350_SLAVE_PDM, 1, 4);
-DEFINE_QNODE(qhs_pimem_cfg, SM8350_SLAVE_PIMEM_CFG, 1, 4);
-DEFINE_QNODE(qhs_pka_wrapper_cfg, SM8350_SLAVE_PKA_WRAPPER_CFG, 1, 4);
-DEFINE_QNODE(qhs_pmu_wrapper_cfg, SM8350_SLAVE_PMU_WRAPPER_CFG, 1, 4);
-DEFINE_QNODE(qhs_qdss_cfg, SM8350_SLAVE_QDSS_CFG, 1, 4);
-DEFINE_QNODE(qhs_qspi, SM8350_SLAVE_QSPI_0, 1, 4);
-DEFINE_QNODE(qhs_qup0, SM8350_SLAVE_QUP_0, 1, 4);
-DEFINE_QNODE(qhs_qup1, SM8350_SLAVE_QUP_1, 1, 4);
-DEFINE_QNODE(qhs_qup2, SM8350_SLAVE_QUP_2, 1, 4);
-DEFINE_QNODE(qhs_sdc2, SM8350_SLAVE_SDCC_2, 1, 4);
-DEFINE_QNODE(qhs_sdc4, SM8350_SLAVE_SDCC_4, 1, 4);
-DEFINE_QNODE(qhs_security, SM8350_SLAVE_SECURITY, 1, 4);
-DEFINE_QNODE(qhs_spss_cfg, SM8350_SLAVE_SPSS_CFG, 1, 4);
-DEFINE_QNODE(qhs_tcsr, SM8350_SLAVE_TCSR, 1, 4);
-DEFINE_QNODE(qhs_tlmm, SM8350_SLAVE_TLMM, 1, 4);
-DEFINE_QNODE(qhs_ufs_card_cfg, SM8350_SLAVE_UFS_CARD_CFG, 1, 4);
-DEFINE_QNODE(qhs_ufs_mem_cfg, SM8350_SLAVE_UFS_MEM_CFG, 1, 4);
-DEFINE_QNODE(qhs_usb3_0, SM8350_SLAVE_USB3_0, 1, 4);
-DEFINE_QNODE(qhs_usb3_1, SM8350_SLAVE_USB3_1, 1, 4);
-DEFINE_QNODE(qhs_venus_cfg, SM8350_SLAVE_VENUS_CFG, 1, 4);
-DEFINE_QNODE(qhs_vsense_ctrl_cfg, SM8350_SLAVE_VSENSE_CTRL_CFG, 1, 4);
-DEFINE_QNODE(qns_a1_noc_cfg, SM8350_SLAVE_A1NOC_CFG, 1, 4);
-DEFINE_QNODE(qns_a2_noc_cfg, SM8350_SLAVE_A2NOC_CFG, 1, 4);
-DEFINE_QNODE(qns_ddrss_cfg, SM8350_SLAVE_DDRSS_CFG, 1, 4);
-DEFINE_QNODE(qns_mnoc_cfg, SM8350_SLAVE_CNOC_MNOC_CFG, 1, 4);
-DEFINE_QNODE(qns_snoc_cfg, SM8350_SLAVE_SNOC_CFG, 1, 4);
-DEFINE_QNODE(qxs_boot_imem, SM8350_SLAVE_BOOT_IMEM, 1, 8);
-DEFINE_QNODE(qxs_imem, SM8350_SLAVE_IMEM, 1, 8);
-DEFINE_QNODE(qxs_pimem, SM8350_SLAVE_PIMEM, 1, 8);
-DEFINE_QNODE(srvc_cnoc, SM8350_SLAVE_SERVICE_CNOC, 1, 4);
-DEFINE_QNODE(xs_pcie_0, SM8350_SLAVE_PCIE_0, 1, 8);
-DEFINE_QNODE(xs_pcie_1, SM8350_SLAVE_PCIE_1, 1, 8);
-DEFINE_QNODE(xs_qdss_stm, SM8350_SLAVE_QDSS_STM, 1, 4);
-DEFINE_QNODE(xs_sys_tcu_cfg, SM8350_SLAVE_TCU, 1, 8);
-DEFINE_QNODE(qhs_llcc, SM8350_SLAVE_LLCC_CFG, 1, 4);
-DEFINE_QNODE(qns_gemnoc, SM8350_SLAVE_GEM_NOC_CFG, 1, 4);
-DEFINE_QNODE(qhs_mdsp_ms_mpu_cfg, SM8350_SLAVE_MSS_PROC_MS_MPU_CFG, 1, 4);
-DEFINE_QNODE(qhs_modem_ms_mpu_cfg, SM8350_SLAVE_MCDMA_MS_MPU_CFG, 1, 4);
-DEFINE_QNODE(qns_gem_noc_cnoc, SM8350_SLAVE_GEM_NOC_CNOC, 1, 16, SM8350_MASTER_GEM_NOC_CNOC);
-DEFINE_QNODE(qns_llcc, SM8350_SLAVE_LLCC, 4, 16, SM8350_MASTER_LLCC);
-DEFINE_QNODE(qns_pcie, SM8350_SLAVE_MEM_NOC_PCIE_SNOC, 1, 8);
-DEFINE_QNODE(srvc_even_gemnoc, SM8350_SLAVE_SERVICE_GEM_NOC_1, 1, 4);
-DEFINE_QNODE(srvc_odd_gemnoc, SM8350_SLAVE_SERVICE_GEM_NOC_2, 1, 4);
-DEFINE_QNODE(srvc_sys_gemnoc, SM8350_SLAVE_SERVICE_GEM_NOC, 1, 4);
-DEFINE_QNODE(qhs_lpass_core, SM8350_SLAVE_LPASS_CORE_CFG, 1, 4);
-DEFINE_QNODE(qhs_lpass_lpi, SM8350_SLAVE_LPASS_LPI_CFG, 1, 4);
-DEFINE_QNODE(qhs_lpass_mpu, SM8350_SLAVE_LPASS_MPU_CFG, 1, 4);
-DEFINE_QNODE(qhs_lpass_top, SM8350_SLAVE_LPASS_TOP_CFG, 1, 4);
-DEFINE_QNODE(srvc_niu_aml_noc, SM8350_SLAVE_SERVICES_LPASS_AML_NOC, 1, 4);
-DEFINE_QNODE(srvc_niu_lpass_agnoc, SM8350_SLAVE_SERVICE_LPASS_AG_NOC, 1, 4);
-DEFINE_QNODE(ebi, SM8350_SLAVE_EBI1, 4, 4);
-DEFINE_QNODE(qns_mem_noc_hf, SM8350_SLAVE_MNOC_HF_MEM_NOC, 2, 32, SM8350_MASTER_MNOC_HF_MEM_NOC);
-DEFINE_QNODE(qns_mem_noc_sf, SM8350_SLAVE_MNOC_SF_MEM_NOC, 2, 32, SM8350_MASTER_MNOC_SF_MEM_NOC);
-DEFINE_QNODE(srvc_mnoc, SM8350_SLAVE_SERVICE_MNOC, 1, 4);
-DEFINE_QNODE(qns_nsp_gemnoc, SM8350_SLAVE_CDSP_MEM_NOC, 2, 32, SM8350_MASTER_COMPUTE_NOC);
-DEFINE_QNODE(service_nsp_noc, SM8350_SLAVE_SERVICE_NSP_NOC, 1, 4);
-DEFINE_QNODE(qns_gemnoc_gc, SM8350_SLAVE_SNOC_GEM_NOC_GC, 1, 8, SM8350_MASTER_SNOC_GC_MEM_NOC);
-DEFINE_QNODE(qns_gemnoc_sf, SM8350_SLAVE_SNOC_GEM_NOC_SF, 1, 16, SM8350_MASTER_SNOC_SF_MEM_NOC);
-DEFINE_QNODE(srvc_snoc, SM8350_SLAVE_SERVICE_SNOC, 1, 4);
-DEFINE_QNODE(qns_llcc_disp, SM8350_SLAVE_LLCC_DISP, 4, 16, SM8350_MASTER_LLCC_DISP);
-DEFINE_QNODE(ebi_disp, SM8350_SLAVE_EBI1_DISP, 4, 4);
-DEFINE_QNODE(qns_mem_noc_hf_disp, SM8350_SLAVE_MNOC_HF_MEM_NOC_DISP, 2, 32, SM8350_MASTER_MNOC_HF_MEM_NOC_DISP);
-DEFINE_QNODE(qns_mem_noc_sf_disp, SM8350_SLAVE_MNOC_SF_MEM_NOC_DISP, 2, 32, SM8350_MASTER_MNOC_SF_MEM_NOC_DISP);
-
-DEFINE_QBCM(bcm_acv, "ACV", false, &ebi);
-DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
-DEFINE_QBCM(bcm_cn0, "CN0", true, &qnm_gemnoc_cnoc, &qnm_gemnoc_pcie);
-DEFINE_QBCM(bcm_cn1, "CN1", false, &xm_qdss_dap, &qhs_ahb2phy0, &qhs_ahb2phy1, &qhs_aoss, &qhs_apss, &qhs_camera_cfg, &qhs_clk_ctl, &qhs_compute_cfg, &qhs_cpr_cx, &qhs_cpr_mmcx, &qhs_cpr_mx, &qhs_crypto0_cfg, &qhs_cx_rdpm, &qhs_dcc_cfg, &qhs_display_cfg, &qhs_gpuss_cfg, &qhs_hwkm, &qhs_imem_cfg, &qhs_ipa, &qhs_ipc_router, &qhs_mss_cfg, &qhs_mx_rdpm, &qhs_pcie0_cfg, &qhs_pcie1_cfg, &qhs_pimem_cfg, &qhs_pka_wrapper_cfg, &qhs_pmu_wrapper_cfg, &qhs_qdss_cfg, &qhs_qup0, &qhs_qup1, &qhs_qup2, &qhs_security, &qhs_spss_cfg, &qhs_tcsr, &qhs_tlmm, &qhs_ufs_card_cfg, &qhs_ufs_mem_cfg, &qhs_usb3_0, &qhs_usb3_1, &qhs_venus_cfg, &qhs_vsense_ctrl_cfg, &qns_a1_noc_cfg, &qns_a2_noc_cfg, &qns_ddrss_cfg, &qns_mnoc_cfg, &qns_snoc_cfg, &srvc_cnoc);
-DEFINE_QBCM(bcm_cn2, "CN2", false, &qhs_lpass_cfg, &qhs_pdm, &qhs_qspi, &qhs_sdc2, &qhs_sdc4);
-DEFINE_QBCM(bcm_co0, "CO0", false, &qns_nsp_gemnoc);
-DEFINE_QBCM(bcm_co3, "CO3", false, &qxm_nsp);
-DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
-DEFINE_QBCM(bcm_mm0, "MM0", true, &qns_mem_noc_hf);
-DEFINE_QBCM(bcm_mm1, "MM1", false, &qnm_camnoc_hf, &qxm_mdp0, &qxm_mdp1);
-DEFINE_QBCM(bcm_mm4, "MM4", false, &qns_mem_noc_sf);
-DEFINE_QBCM(bcm_mm5, "MM5", false, &qnm_camnoc_icp, &qnm_camnoc_sf, &qnm_video0, &qnm_video1, &qnm_video_cvp, &qxm_rot);
-DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
-DEFINE_QBCM(bcm_sh2, "SH2", false, &alm_gpu_tcu, &alm_sys_tcu);
-DEFINE_QBCM(bcm_sh3, "SH3", false, &qnm_cmpnoc);
-DEFINE_QBCM(bcm_sh4, "SH4", false, &chm_apps);
-DEFINE_QBCM(bcm_sn0, "SN0", true, &qns_gemnoc_sf);
-DEFINE_QBCM(bcm_sn2, "SN2", false, &qns_gemnoc_gc);
-DEFINE_QBCM(bcm_sn3, "SN3", false, &qxs_pimem);
-DEFINE_QBCM(bcm_sn4, "SN4", false, &xs_qdss_stm);
-DEFINE_QBCM(bcm_sn5, "SN5", false, &xm_pcie3_0);
-DEFINE_QBCM(bcm_sn6, "SN6", false, &xm_pcie3_1);
-DEFINE_QBCM(bcm_sn7, "SN7", false, &qnm_aggre1_noc);
-DEFINE_QBCM(bcm_sn8, "SN8", false, &qnm_aggre2_noc);
-DEFINE_QBCM(bcm_sn14, "SN14", false, &qns_pcie_mem_noc);
-DEFINE_QBCM(bcm_acv_disp, "ACV", false, &ebi_disp);
-DEFINE_QBCM(bcm_mc0_disp, "MC0", false, &ebi_disp);
-DEFINE_QBCM(bcm_mm0_disp, "MM0", false, &qns_mem_noc_hf_disp);
-DEFINE_QBCM(bcm_mm1_disp, "MM1", false, &qxm_mdp0_disp, &qxm_mdp1_disp);
-DEFINE_QBCM(bcm_mm4_disp, "MM4", false, &qns_mem_noc_sf_disp);
-DEFINE_QBCM(bcm_mm5_disp, "MM5", false, &qxm_rot_disp);
-DEFINE_QBCM(bcm_sh0_disp, "SH0", false, &qns_llcc_disp);
+static struct qcom_icc_node qhm_qspi = {
+ .name = "qhm_qspi",
+ .id = SM8350_MASTER_QSPI_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8350_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup0 = {
+ .name = "qhm_qup0",
+ .id = SM8350_MASTER_QUP_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8350_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup1 = {
+ .name = "qhm_qup1",
+ .id = SM8350_MASTER_QUP_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8350_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup2 = {
+ .name = "qhm_qup2",
+ .id = SM8350_MASTER_QUP_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8350_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qnm_a1noc_cfg = {
+ .name = "qnm_a1noc_cfg",
+ .id = SM8350_MASTER_A1NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8350_SLAVE_SERVICE_A1NOC },
+};
+
+static struct qcom_icc_node xm_sdc4 = {
+ .name = "xm_sdc4",
+ .id = SM8350_MASTER_SDCC_4,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8350_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_ufs_mem = {
+ .name = "xm_ufs_mem",
+ .id = SM8350_MASTER_UFS_MEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8350_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_usb3_0 = {
+ .name = "xm_usb3_0",
+ .id = SM8350_MASTER_USB3_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8350_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_usb3_1 = {
+ .name = "xm_usb3_1",
+ .id = SM8350_MASTER_USB3_1,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8350_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qdss_bam = {
+ .name = "qhm_qdss_bam",
+ .id = SM8350_MASTER_QDSS_BAM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8350_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qnm_a2noc_cfg = {
+ .name = "qnm_a2noc_cfg",
+ .id = SM8350_MASTER_A2NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8350_SLAVE_SERVICE_A2NOC },
+};
+
+static struct qcom_icc_node qxm_crypto = {
+ .name = "qxm_crypto",
+ .id = SM8350_MASTER_CRYPTO,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8350_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_ipa = {
+ .name = "qxm_ipa",
+ .id = SM8350_MASTER_IPA,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8350_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_pcie3_0 = {
+ .name = "xm_pcie3_0",
+ .id = SM8350_MASTER_PCIE_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8350_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node xm_pcie3_1 = {
+ .name = "xm_pcie3_1",
+ .id = SM8350_MASTER_PCIE_1,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8350_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node xm_qdss_etr = {
+ .name = "xm_qdss_etr",
+ .id = SM8350_MASTER_QDSS_ETR,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8350_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_sdc2 = {
+ .name = "xm_sdc2",
+ .id = SM8350_MASTER_SDCC_2,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8350_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_ufs_card = {
+ .name = "xm_ufs_card",
+ .id = SM8350_MASTER_UFS_CARD,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8350_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qnm_gemnoc_cnoc = {
+ .name = "qnm_gemnoc_cnoc",
+ .id = SM8350_MASTER_GEM_NOC_CNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 56,
+ .links = { SM8350_SLAVE_AHB2PHY_SOUTH,
+ SM8350_SLAVE_AHB2PHY_NORTH,
+ SM8350_SLAVE_AOSS,
+ SM8350_SLAVE_APPSS,
+ SM8350_SLAVE_CAMERA_CFG,
+ SM8350_SLAVE_CLK_CTL,
+ SM8350_SLAVE_CDSP_CFG,
+ SM8350_SLAVE_RBCPR_CX_CFG,
+ SM8350_SLAVE_RBCPR_MMCX_CFG,
+ SM8350_SLAVE_RBCPR_MX_CFG,
+ SM8350_SLAVE_CRYPTO_0_CFG,
+ SM8350_SLAVE_CX_RDPM,
+ SM8350_SLAVE_DCC_CFG,
+ SM8350_SLAVE_DISPLAY_CFG,
+ SM8350_SLAVE_GFX3D_CFG,
+ SM8350_SLAVE_HWKM,
+ SM8350_SLAVE_IMEM_CFG,
+ SM8350_SLAVE_IPA_CFG,
+ SM8350_SLAVE_IPC_ROUTER_CFG,
+ SM8350_SLAVE_LPASS,
+ SM8350_SLAVE_CNOC_MSS,
+ SM8350_SLAVE_MX_RDPM,
+ SM8350_SLAVE_PCIE_0_CFG,
+ SM8350_SLAVE_PCIE_1_CFG,
+ SM8350_SLAVE_PDM,
+ SM8350_SLAVE_PIMEM_CFG,
+ SM8350_SLAVE_PKA_WRAPPER_CFG,
+ SM8350_SLAVE_PMU_WRAPPER_CFG,
+ SM8350_SLAVE_QDSS_CFG,
+ SM8350_SLAVE_QSPI_0,
+ SM8350_SLAVE_QUP_0,
+ SM8350_SLAVE_QUP_1,
+ SM8350_SLAVE_QUP_2,
+ SM8350_SLAVE_SDCC_2,
+ SM8350_SLAVE_SDCC_4,
+ SM8350_SLAVE_SECURITY,
+ SM8350_SLAVE_SPSS_CFG,
+ SM8350_SLAVE_TCSR,
+ SM8350_SLAVE_TLMM,
+ SM8350_SLAVE_UFS_CARD_CFG,
+ SM8350_SLAVE_UFS_MEM_CFG,
+ SM8350_SLAVE_USB3_0,
+ SM8350_SLAVE_USB3_1,
+ SM8350_SLAVE_VENUS_CFG,
+ SM8350_SLAVE_VSENSE_CTRL_CFG,
+ SM8350_SLAVE_A1NOC_CFG,
+ SM8350_SLAVE_A2NOC_CFG,
+ SM8350_SLAVE_DDRSS_CFG,
+ SM8350_SLAVE_CNOC_MNOC_CFG,
+ SM8350_SLAVE_SNOC_CFG,
+ SM8350_SLAVE_BOOT_IMEM,
+ SM8350_SLAVE_IMEM,
+ SM8350_SLAVE_PIMEM,
+ SM8350_SLAVE_SERVICE_CNOC,
+ SM8350_SLAVE_QDSS_STM,
+ SM8350_SLAVE_TCU
+ },
+};
+
+static struct qcom_icc_node qnm_gemnoc_pcie = {
+ .name = "qnm_gemnoc_pcie",
+ .id = SM8350_MASTER_GEM_NOC_PCIE_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SM8350_SLAVE_PCIE_0,
+ SM8350_SLAVE_PCIE_1
+ },
+};
+
+static struct qcom_icc_node xm_qdss_dap = {
+ .name = "xm_qdss_dap",
+ .id = SM8350_MASTER_QDSS_DAP,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 56,
+ .links = { SM8350_SLAVE_AHB2PHY_SOUTH,
+ SM8350_SLAVE_AHB2PHY_NORTH,
+ SM8350_SLAVE_AOSS,
+ SM8350_SLAVE_APPSS,
+ SM8350_SLAVE_CAMERA_CFG,
+ SM8350_SLAVE_CLK_CTL,
+ SM8350_SLAVE_CDSP_CFG,
+ SM8350_SLAVE_RBCPR_CX_CFG,
+ SM8350_SLAVE_RBCPR_MMCX_CFG,
+ SM8350_SLAVE_RBCPR_MX_CFG,
+ SM8350_SLAVE_CRYPTO_0_CFG,
+ SM8350_SLAVE_CX_RDPM,
+ SM8350_SLAVE_DCC_CFG,
+ SM8350_SLAVE_DISPLAY_CFG,
+ SM8350_SLAVE_GFX3D_CFG,
+ SM8350_SLAVE_HWKM,
+ SM8350_SLAVE_IMEM_CFG,
+ SM8350_SLAVE_IPA_CFG,
+ SM8350_SLAVE_IPC_ROUTER_CFG,
+ SM8350_SLAVE_LPASS,
+ SM8350_SLAVE_CNOC_MSS,
+ SM8350_SLAVE_MX_RDPM,
+ SM8350_SLAVE_PCIE_0_CFG,
+ SM8350_SLAVE_PCIE_1_CFG,
+ SM8350_SLAVE_PDM,
+ SM8350_SLAVE_PIMEM_CFG,
+ SM8350_SLAVE_PKA_WRAPPER_CFG,
+ SM8350_SLAVE_PMU_WRAPPER_CFG,
+ SM8350_SLAVE_QDSS_CFG,
+ SM8350_SLAVE_QSPI_0,
+ SM8350_SLAVE_QUP_0,
+ SM8350_SLAVE_QUP_1,
+ SM8350_SLAVE_QUP_2,
+ SM8350_SLAVE_SDCC_2,
+ SM8350_SLAVE_SDCC_4,
+ SM8350_SLAVE_SECURITY,
+ SM8350_SLAVE_SPSS_CFG,
+ SM8350_SLAVE_TCSR,
+ SM8350_SLAVE_TLMM,
+ SM8350_SLAVE_UFS_CARD_CFG,
+ SM8350_SLAVE_UFS_MEM_CFG,
+ SM8350_SLAVE_USB3_0,
+ SM8350_SLAVE_USB3_1,
+ SM8350_SLAVE_VENUS_CFG,
+ SM8350_SLAVE_VSENSE_CTRL_CFG,
+ SM8350_SLAVE_A1NOC_CFG,
+ SM8350_SLAVE_A2NOC_CFG,
+ SM8350_SLAVE_DDRSS_CFG,
+ SM8350_SLAVE_CNOC_MNOC_CFG,
+ SM8350_SLAVE_SNOC_CFG,
+ SM8350_SLAVE_BOOT_IMEM,
+ SM8350_SLAVE_IMEM,
+ SM8350_SLAVE_PIMEM,
+ SM8350_SLAVE_SERVICE_CNOC,
+ SM8350_SLAVE_QDSS_STM,
+ SM8350_SLAVE_TCU
+ },
+};
+
+static struct qcom_icc_node qnm_cnoc_dc_noc = {
+ .name = "qnm_cnoc_dc_noc",
+ .id = SM8350_MASTER_CNOC_DC_NOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 2,
+ .links = { SM8350_SLAVE_LLCC_CFG,
+ SM8350_SLAVE_GEM_NOC_CFG
+ },
+};
+
+static struct qcom_icc_node alm_gpu_tcu = {
+ .name = "alm_gpu_tcu",
+ .id = SM8350_MASTER_GPU_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SM8350_SLAVE_GEM_NOC_CNOC,
+ SM8350_SLAVE_LLCC
+ },
+};
+
+static struct qcom_icc_node alm_sys_tcu = {
+ .name = "alm_sys_tcu",
+ .id = SM8350_MASTER_SYS_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SM8350_SLAVE_GEM_NOC_CNOC,
+ SM8350_SLAVE_LLCC
+ },
+};
+
+static struct qcom_icc_node chm_apps = {
+ .name = "chm_apps",
+ .id = SM8350_MASTER_APPSS_PROC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 3,
+ .links = { SM8350_SLAVE_GEM_NOC_CNOC,
+ SM8350_SLAVE_LLCC,
+ SM8350_SLAVE_MEM_NOC_PCIE_SNOC
+ },
+};
+
+static struct qcom_icc_node qnm_cmpnoc = {
+ .name = "qnm_cmpnoc",
+ .id = SM8350_MASTER_COMPUTE_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SM8350_SLAVE_GEM_NOC_CNOC,
+ SM8350_SLAVE_LLCC
+ },
+};
+
+static struct qcom_icc_node qnm_gemnoc_cfg = {
+ .name = "qnm_gemnoc_cfg",
+ .id = SM8350_MASTER_GEM_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 5,
+ .links = { SM8350_SLAVE_MSS_PROC_MS_MPU_CFG,
+ SM8350_SLAVE_MCDMA_MS_MPU_CFG,
+ SM8350_SLAVE_SERVICE_GEM_NOC_1,
+ SM8350_SLAVE_SERVICE_GEM_NOC_2,
+ SM8350_SLAVE_SERVICE_GEM_NOC
+ },
+};
+
+static struct qcom_icc_node qnm_gpu = {
+ .name = "qnm_gpu",
+ .id = SM8350_MASTER_GFX3D,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SM8350_SLAVE_GEM_NOC_CNOC,
+ SM8350_SLAVE_LLCC
+ },
+};
+
+static struct qcom_icc_node qnm_mnoc_hf = {
+ .name = "qnm_mnoc_hf",
+ .id = SM8350_MASTER_MNOC_HF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8350_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_mnoc_sf = {
+ .name = "qnm_mnoc_sf",
+ .id = SM8350_MASTER_MNOC_SF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SM8350_SLAVE_GEM_NOC_CNOC,
+ SM8350_SLAVE_LLCC
+ },
+};
+
+static struct qcom_icc_node qnm_pcie = {
+ .name = "qnm_pcie",
+ .id = SM8350_MASTER_ANOC_PCIE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 2,
+ .links = { SM8350_SLAVE_GEM_NOC_CNOC,
+ SM8350_SLAVE_LLCC
+ },
+};
+
+static struct qcom_icc_node qnm_snoc_gc = {
+ .name = "qnm_snoc_gc",
+ .id = SM8350_MASTER_SNOC_GC_MEM_NOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8350_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_snoc_sf = {
+ .name = "qnm_snoc_sf",
+ .id = SM8350_MASTER_SNOC_SF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 3,
+ .links = { SM8350_SLAVE_GEM_NOC_CNOC,
+ SM8350_SLAVE_LLCC,
+ SM8350_SLAVE_MEM_NOC_PCIE_SNOC
+ },
+};
+
+static struct qcom_icc_node qhm_config_noc = {
+ .name = "qhm_config_noc",
+ .id = SM8350_MASTER_CNOC_LPASS_AG_NOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 6,
+ .links = { SM8350_SLAVE_LPASS_CORE_CFG,
+ SM8350_SLAVE_LPASS_LPI_CFG,
+ SM8350_SLAVE_LPASS_MPU_CFG,
+ SM8350_SLAVE_LPASS_TOP_CFG,
+ SM8350_SLAVE_SERVICES_LPASS_AML_NOC,
+ SM8350_SLAVE_SERVICE_LPASS_AG_NOC
+ },
+};
+
+static struct qcom_icc_node llcc_mc = {
+ .name = "llcc_mc",
+ .id = SM8350_MASTER_LLCC,
+ .channels = 4,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8350_SLAVE_EBI1 },
+};
+
+static struct qcom_icc_node qnm_camnoc_hf = {
+ .name = "qnm_camnoc_hf",
+ .id = SM8350_MASTER_CAMNOC_HF,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8350_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_camnoc_icp = {
+ .name = "qnm_camnoc_icp",
+ .id = SM8350_MASTER_CAMNOC_ICP,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8350_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_camnoc_sf = {
+ .name = "qnm_camnoc_sf",
+ .id = SM8350_MASTER_CAMNOC_SF,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8350_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_mnoc_cfg = {
+ .name = "qnm_mnoc_cfg",
+ .id = SM8350_MASTER_CNOC_MNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8350_SLAVE_SERVICE_MNOC },
+};
+
+static struct qcom_icc_node qnm_video0 = {
+ .name = "qnm_video0",
+ .id = SM8350_MASTER_VIDEO_P0,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8350_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video1 = {
+ .name = "qnm_video1",
+ .id = SM8350_MASTER_VIDEO_P1,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8350_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video_cvp = {
+ .name = "qnm_video_cvp",
+ .id = SM8350_MASTER_VIDEO_PROC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8350_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_mdp0 = {
+ .name = "qxm_mdp0",
+ .id = SM8350_MASTER_MDP0,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8350_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_mdp1 = {
+ .name = "qxm_mdp1",
+ .id = SM8350_MASTER_MDP1,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8350_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_rot = {
+ .name = "qxm_rot",
+ .id = SM8350_MASTER_ROTATOR,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8350_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qhm_nsp_noc_config = {
+ .name = "qhm_nsp_noc_config",
+ .id = SM8350_MASTER_CDSP_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8350_SLAVE_SERVICE_NSP_NOC },
+};
+
+static struct qcom_icc_node qxm_nsp = {
+ .name = "qxm_nsp",
+ .id = SM8350_MASTER_CDSP_PROC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8350_SLAVE_CDSP_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_aggre1_noc = {
+ .name = "qnm_aggre1_noc",
+ .id = SM8350_MASTER_A1NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8350_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_aggre2_noc = {
+ .name = "qnm_aggre2_noc",
+ .id = SM8350_MASTER_A2NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8350_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_snoc_cfg = {
+ .name = "qnm_snoc_cfg",
+ .id = SM8350_MASTER_SNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8350_SLAVE_SERVICE_SNOC },
+};
+
+static struct qcom_icc_node qxm_pimem = {
+ .name = "qxm_pimem",
+ .id = SM8350_MASTER_PIMEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8350_SLAVE_SNOC_GEM_NOC_GC },
+};
+
+static struct qcom_icc_node xm_gic = {
+ .name = "xm_gic",
+ .id = SM8350_MASTER_GIC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8350_SLAVE_SNOC_GEM_NOC_GC },
+};
+
+static struct qcom_icc_node qnm_mnoc_hf_disp = {
+ .name = "qnm_mnoc_hf_disp",
+ .id = SM8350_MASTER_MNOC_HF_MEM_NOC_DISP,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8350_SLAVE_LLCC_DISP },
+};
+
+static struct qcom_icc_node qnm_mnoc_sf_disp = {
+ .name = "qnm_mnoc_sf_disp",
+ .id = SM8350_MASTER_MNOC_SF_MEM_NOC_DISP,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8350_SLAVE_LLCC_DISP },
+};
+
+static struct qcom_icc_node llcc_mc_disp = {
+ .name = "llcc_mc_disp",
+ .id = SM8350_MASTER_LLCC_DISP,
+ .channels = 4,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8350_SLAVE_EBI1_DISP },
+};
+
+static struct qcom_icc_node qxm_mdp0_disp = {
+ .name = "qxm_mdp0_disp",
+ .id = SM8350_MASTER_MDP0_DISP,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8350_SLAVE_MNOC_HF_MEM_NOC_DISP },
+};
+
+static struct qcom_icc_node qxm_mdp1_disp = {
+ .name = "qxm_mdp1_disp",
+ .id = SM8350_MASTER_MDP1_DISP,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8350_SLAVE_MNOC_HF_MEM_NOC_DISP },
+};
+
+static struct qcom_icc_node qxm_rot_disp = {
+ .name = "qxm_rot_disp",
+ .id = SM8350_MASTER_ROTATOR_DISP,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8350_SLAVE_MNOC_SF_MEM_NOC_DISP },
+};
+
+static struct qcom_icc_node qns_a1noc_snoc = {
+ .name = "qns_a1noc_snoc",
+ .id = SM8350_SLAVE_A1NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8350_MASTER_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node srvc_aggre1_noc = {
+ .name = "srvc_aggre1_noc",
+ .id = SM8350_SLAVE_SERVICE_A1NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_a2noc_snoc = {
+ .name = "qns_a2noc_snoc",
+ .id = SM8350_SLAVE_A2NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8350_MASTER_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qns_pcie_mem_noc = {
+ .name = "qns_pcie_mem_noc",
+ .id = SM8350_SLAVE_ANOC_PCIE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8350_MASTER_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node srvc_aggre2_noc = {
+ .name = "srvc_aggre2_noc",
+ .id = SM8350_SLAVE_SERVICE_A2NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ahb2phy0 = {
+ .name = "qhs_ahb2phy0",
+ .id = SM8350_SLAVE_AHB2PHY_SOUTH,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ahb2phy1 = {
+ .name = "qhs_ahb2phy1",
+ .id = SM8350_SLAVE_AHB2PHY_NORTH,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_aoss = {
+ .name = "qhs_aoss",
+ .id = SM8350_SLAVE_AOSS,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_apss = {
+ .name = "qhs_apss",
+ .id = SM8350_SLAVE_APPSS,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qhs_camera_cfg = {
+ .name = "qhs_camera_cfg",
+ .id = SM8350_SLAVE_CAMERA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_clk_ctl = {
+ .name = "qhs_clk_ctl",
+ .id = SM8350_SLAVE_CLK_CTL,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_compute_cfg = {
+ .name = "qhs_compute_cfg",
+ .id = SM8350_SLAVE_CDSP_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_cpr_cx = {
+ .name = "qhs_cpr_cx",
+ .id = SM8350_SLAVE_RBCPR_CX_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_cpr_mmcx = {
+ .name = "qhs_cpr_mmcx",
+ .id = SM8350_SLAVE_RBCPR_MMCX_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_cpr_mx = {
+ .name = "qhs_cpr_mx",
+ .id = SM8350_SLAVE_RBCPR_MX_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_crypto0_cfg = {
+ .name = "qhs_crypto0_cfg",
+ .id = SM8350_SLAVE_CRYPTO_0_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_cx_rdpm = {
+ .name = "qhs_cx_rdpm",
+ .id = SM8350_SLAVE_CX_RDPM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_dcc_cfg = {
+ .name = "qhs_dcc_cfg",
+ .id = SM8350_SLAVE_DCC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_display_cfg = {
+ .name = "qhs_display_cfg",
+ .id = SM8350_SLAVE_DISPLAY_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_gpuss_cfg = {
+ .name = "qhs_gpuss_cfg",
+ .id = SM8350_SLAVE_GFX3D_CFG,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qhs_hwkm = {
+ .name = "qhs_hwkm",
+ .id = SM8350_SLAVE_HWKM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_imem_cfg = {
+ .name = "qhs_imem_cfg",
+ .id = SM8350_SLAVE_IMEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ipa = {
+ .name = "qhs_ipa",
+ .id = SM8350_SLAVE_IPA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ipc_router = {
+ .name = "qhs_ipc_router",
+ .id = SM8350_SLAVE_IPC_ROUTER_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_lpass_cfg = {
+ .name = "qhs_lpass_cfg",
+ .id = SM8350_SLAVE_LPASS,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8350_MASTER_CNOC_LPASS_AG_NOC },
+};
+
+static struct qcom_icc_node qhs_mss_cfg = {
+ .name = "qhs_mss_cfg",
+ .id = SM8350_SLAVE_CNOC_MSS,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_mx_rdpm = {
+ .name = "qhs_mx_rdpm",
+ .id = SM8350_SLAVE_MX_RDPM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie0_cfg = {
+ .name = "qhs_pcie0_cfg",
+ .id = SM8350_SLAVE_PCIE_0_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie1_cfg = {
+ .name = "qhs_pcie1_cfg",
+ .id = SM8350_SLAVE_PCIE_1_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pdm = {
+ .name = "qhs_pdm",
+ .id = SM8350_SLAVE_PDM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pimem_cfg = {
+ .name = "qhs_pimem_cfg",
+ .id = SM8350_SLAVE_PIMEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pka_wrapper_cfg = {
+ .name = "qhs_pka_wrapper_cfg",
+ .id = SM8350_SLAVE_PKA_WRAPPER_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pmu_wrapper_cfg = {
+ .name = "qhs_pmu_wrapper_cfg",
+ .id = SM8350_SLAVE_PMU_WRAPPER_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qdss_cfg = {
+ .name = "qhs_qdss_cfg",
+ .id = SM8350_SLAVE_QDSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qspi = {
+ .name = "qhs_qspi",
+ .id = SM8350_SLAVE_QSPI_0,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qup0 = {
+ .name = "qhs_qup0",
+ .id = SM8350_SLAVE_QUP_0,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qup1 = {
+ .name = "qhs_qup1",
+ .id = SM8350_SLAVE_QUP_1,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qup2 = {
+ .name = "qhs_qup2",
+ .id = SM8350_SLAVE_QUP_2,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_sdc2 = {
+ .name = "qhs_sdc2",
+ .id = SM8350_SLAVE_SDCC_2,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_sdc4 = {
+ .name = "qhs_sdc4",
+ .id = SM8350_SLAVE_SDCC_4,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_security = {
+ .name = "qhs_security",
+ .id = SM8350_SLAVE_SECURITY,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_spss_cfg = {
+ .name = "qhs_spss_cfg",
+ .id = SM8350_SLAVE_SPSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tcsr = {
+ .name = "qhs_tcsr",
+ .id = SM8350_SLAVE_TCSR,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tlmm = {
+ .name = "qhs_tlmm",
+ .id = SM8350_SLAVE_TLMM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ufs_card_cfg = {
+ .name = "qhs_ufs_card_cfg",
+ .id = SM8350_SLAVE_UFS_CARD_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ufs_mem_cfg = {
+ .name = "qhs_ufs_mem_cfg",
+ .id = SM8350_SLAVE_UFS_MEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb3_0 = {
+ .name = "qhs_usb3_0",
+ .id = SM8350_SLAVE_USB3_0,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb3_1 = {
+ .name = "qhs_usb3_1",
+ .id = SM8350_SLAVE_USB3_1,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_venus_cfg = {
+ .name = "qhs_venus_cfg",
+ .id = SM8350_SLAVE_VENUS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
+ .name = "qhs_vsense_ctrl_cfg",
+ .id = SM8350_SLAVE_VSENSE_CTRL_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_a1_noc_cfg = {
+ .name = "qns_a1_noc_cfg",
+ .id = SM8350_SLAVE_A1NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_a2_noc_cfg = {
+ .name = "qns_a2_noc_cfg",
+ .id = SM8350_SLAVE_A2NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_ddrss_cfg = {
+ .name = "qns_ddrss_cfg",
+ .id = SM8350_SLAVE_DDRSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_mnoc_cfg = {
+ .name = "qns_mnoc_cfg",
+ .id = SM8350_SLAVE_CNOC_MNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_snoc_cfg = {
+ .name = "qns_snoc_cfg",
+ .id = SM8350_SLAVE_SNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qxs_boot_imem = {
+ .name = "qxs_boot_imem",
+ .id = SM8350_SLAVE_BOOT_IMEM,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qxs_imem = {
+ .name = "qxs_imem",
+ .id = SM8350_SLAVE_IMEM,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qxs_pimem = {
+ .name = "qxs_pimem",
+ .id = SM8350_SLAVE_PIMEM,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node srvc_cnoc = {
+ .name = "srvc_cnoc",
+ .id = SM8350_SLAVE_SERVICE_CNOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node xs_pcie_0 = {
+ .name = "xs_pcie_0",
+ .id = SM8350_SLAVE_PCIE_0,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node xs_pcie_1 = {
+ .name = "xs_pcie_1",
+ .id = SM8350_SLAVE_PCIE_1,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node xs_qdss_stm = {
+ .name = "xs_qdss_stm",
+ .id = SM8350_SLAVE_QDSS_STM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node xs_sys_tcu_cfg = {
+ .name = "xs_sys_tcu_cfg",
+ .id = SM8350_SLAVE_TCU,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qhs_llcc = {
+ .name = "qhs_llcc",
+ .id = SM8350_SLAVE_LLCC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_gemnoc = {
+ .name = "qns_gemnoc",
+ .id = SM8350_SLAVE_GEM_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_mdsp_ms_mpu_cfg = {
+ .name = "qhs_mdsp_ms_mpu_cfg",
+ .id = SM8350_SLAVE_MSS_PROC_MS_MPU_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_modem_ms_mpu_cfg = {
+ .name = "qhs_modem_ms_mpu_cfg",
+ .id = SM8350_SLAVE_MCDMA_MS_MPU_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_gem_noc_cnoc = {
+ .name = "qns_gem_noc_cnoc",
+ .id = SM8350_SLAVE_GEM_NOC_CNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8350_MASTER_GEM_NOC_CNOC },
+};
+
+static struct qcom_icc_node qns_llcc = {
+ .name = "qns_llcc",
+ .id = SM8350_SLAVE_LLCC,
+ .channels = 4,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8350_MASTER_LLCC },
+};
+
+static struct qcom_icc_node qns_pcie = {
+ .name = "qns_pcie",
+ .id = SM8350_SLAVE_MEM_NOC_PCIE_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node srvc_even_gemnoc = {
+ .name = "srvc_even_gemnoc",
+ .id = SM8350_SLAVE_SERVICE_GEM_NOC_1,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node srvc_odd_gemnoc = {
+ .name = "srvc_odd_gemnoc",
+ .id = SM8350_SLAVE_SERVICE_GEM_NOC_2,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node srvc_sys_gemnoc = {
+ .name = "srvc_sys_gemnoc",
+ .id = SM8350_SLAVE_SERVICE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_lpass_core = {
+ .name = "qhs_lpass_core",
+ .id = SM8350_SLAVE_LPASS_CORE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_lpass_lpi = {
+ .name = "qhs_lpass_lpi",
+ .id = SM8350_SLAVE_LPASS_LPI_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_lpass_mpu = {
+ .name = "qhs_lpass_mpu",
+ .id = SM8350_SLAVE_LPASS_MPU_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_lpass_top = {
+ .name = "qhs_lpass_top",
+ .id = SM8350_SLAVE_LPASS_TOP_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node srvc_niu_aml_noc = {
+ .name = "srvc_niu_aml_noc",
+ .id = SM8350_SLAVE_SERVICES_LPASS_AML_NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node srvc_niu_lpass_agnoc = {
+ .name = "srvc_niu_lpass_agnoc",
+ .id = SM8350_SLAVE_SERVICE_LPASS_AG_NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node ebi = {
+ .name = "ebi",
+ .id = SM8350_SLAVE_EBI1,
+ .channels = 4,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_mem_noc_hf = {
+ .name = "qns_mem_noc_hf",
+ .id = SM8350_SLAVE_MNOC_HF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8350_MASTER_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_mem_noc_sf = {
+ .name = "qns_mem_noc_sf",
+ .id = SM8350_SLAVE_MNOC_SF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8350_MASTER_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node srvc_mnoc = {
+ .name = "srvc_mnoc",
+ .id = SM8350_SLAVE_SERVICE_MNOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_nsp_gemnoc = {
+ .name = "qns_nsp_gemnoc",
+ .id = SM8350_SLAVE_CDSP_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8350_MASTER_COMPUTE_NOC },
+};
+
+static struct qcom_icc_node service_nsp_noc = {
+ .name = "service_nsp_noc",
+ .id = SM8350_SLAVE_SERVICE_NSP_NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_gemnoc_gc = {
+ .name = "qns_gemnoc_gc",
+ .id = SM8350_SLAVE_SNOC_GEM_NOC_GC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8350_MASTER_SNOC_GC_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_gemnoc_sf = {
+ .name = "qns_gemnoc_sf",
+ .id = SM8350_SLAVE_SNOC_GEM_NOC_SF,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8350_MASTER_SNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node srvc_snoc = {
+ .name = "srvc_snoc",
+ .id = SM8350_SLAVE_SERVICE_SNOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_llcc_disp = {
+ .name = "qns_llcc_disp",
+ .id = SM8350_SLAVE_LLCC_DISP,
+ .channels = 4,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8350_MASTER_LLCC_DISP },
+};
+
+static struct qcom_icc_node ebi_disp = {
+ .name = "ebi_disp",
+ .id = SM8350_SLAVE_EBI1_DISP,
+ .channels = 4,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_mem_noc_hf_disp = {
+ .name = "qns_mem_noc_hf_disp",
+ .id = SM8350_SLAVE_MNOC_HF_MEM_NOC_DISP,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8350_MASTER_MNOC_HF_MEM_NOC_DISP },
+};
+
+static struct qcom_icc_node qns_mem_noc_sf_disp = {
+ .name = "qns_mem_noc_sf_disp",
+ .id = SM8350_SLAVE_MNOC_SF_MEM_NOC_DISP,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8350_MASTER_MNOC_SF_MEM_NOC_DISP },
+};
+
+static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_ce0 = {
+ .name = "CE0",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qxm_crypto },
+};
+
+static struct qcom_icc_bcm bcm_cn0 = {
+ .name = "CN0",
+ .keepalive = true,
+ .num_nodes = 2,
+ .nodes = { &qnm_gemnoc_cnoc, &qnm_gemnoc_pcie },
+};
+
+static struct qcom_icc_bcm bcm_cn1 = {
+ .name = "CN1",
+ .keepalive = false,
+ .num_nodes = 47,
+ .nodes = { &xm_qdss_dap,
+ &qhs_ahb2phy0,
+ &qhs_ahb2phy1,
+ &qhs_aoss,
+ &qhs_apss,
+ &qhs_camera_cfg,
+ &qhs_clk_ctl,
+ &qhs_compute_cfg,
+ &qhs_cpr_cx,
+ &qhs_cpr_mmcx,
+ &qhs_cpr_mx,
+ &qhs_crypto0_cfg,
+ &qhs_cx_rdpm,
+ &qhs_dcc_cfg,
+ &qhs_display_cfg,
+ &qhs_gpuss_cfg,
+ &qhs_hwkm,
+ &qhs_imem_cfg,
+ &qhs_ipa,
+ &qhs_ipc_router,
+ &qhs_mss_cfg,
+ &qhs_mx_rdpm,
+ &qhs_pcie0_cfg,
+ &qhs_pcie1_cfg,
+ &qhs_pimem_cfg,
+ &qhs_pka_wrapper_cfg,
+ &qhs_pmu_wrapper_cfg,
+ &qhs_qdss_cfg,
+ &qhs_qup0,
+ &qhs_qup1,
+ &qhs_qup2,
+ &qhs_security,
+ &qhs_spss_cfg,
+ &qhs_tcsr,
+ &qhs_tlmm,
+ &qhs_ufs_card_cfg,
+ &qhs_ufs_mem_cfg,
+ &qhs_usb3_0,
+ &qhs_usb3_1,
+ &qhs_venus_cfg,
+ &qhs_vsense_ctrl_cfg,
+ &qns_a1_noc_cfg,
+ &qns_a2_noc_cfg,
+ &qns_ddrss_cfg,
+ &qns_mnoc_cfg,
+ &qns_snoc_cfg,
+ &srvc_cnoc
+ },
+};
+
+static struct qcom_icc_bcm bcm_cn2 = {
+ .name = "CN2",
+ .keepalive = false,
+ .num_nodes = 5,
+ .nodes = { &qhs_lpass_cfg, &qhs_pdm, &qhs_qspi, &qhs_sdc2, &qhs_sdc4 },
+};
+
+static struct qcom_icc_bcm bcm_co0 = {
+ .name = "CO0",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qns_nsp_gemnoc },
+};
+
+static struct qcom_icc_bcm bcm_co3 = {
+ .name = "CO3",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qxm_nsp },
+};
+
+static struct qcom_icc_bcm bcm_mc0 = {
+ .name = "MC0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_mm0 = {
+ .name = "MM0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_mem_noc_hf },
+};
+
+static struct qcom_icc_bcm bcm_mm1 = {
+ .name = "MM1",
+ .keepalive = false,
+ .num_nodes = 3,
+ .nodes = { &qnm_camnoc_hf, &qxm_mdp0, &qxm_mdp1 },
+};
+
+static struct qcom_icc_bcm bcm_mm4 = {
+ .name = "MM4",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_bcm bcm_mm5 = {
+ .name = "MM5",
+ .keepalive = false,
+ .num_nodes = 6,
+ .nodes = { &qnm_camnoc_icp,
+ &qnm_camnoc_sf,
+ &qnm_video0,
+ &qnm_video1,
+ &qnm_video_cvp,
+ &qxm_rot
+ },
+};
+
+static struct qcom_icc_bcm bcm_sh0 = {
+ .name = "SH0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_llcc },
+};
+
+static struct qcom_icc_bcm bcm_sh2 = {
+ .name = "SH2",
+ .keepalive = false,
+ .num_nodes = 2,
+ .nodes = { &alm_gpu_tcu, &alm_sys_tcu },
+};
+
+static struct qcom_icc_bcm bcm_sh3 = {
+ .name = "SH3",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qnm_cmpnoc },
+};
+
+static struct qcom_icc_bcm bcm_sh4 = {
+ .name = "SH4",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &chm_apps },
+};
+
+static struct qcom_icc_bcm bcm_sn0 = {
+ .name = "SN0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_bcm bcm_sn2 = {
+ .name = "SN2",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qns_gemnoc_gc },
+};
+
+static struct qcom_icc_bcm bcm_sn3 = {
+ .name = "SN3",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qxs_pimem },
+};
+
+static struct qcom_icc_bcm bcm_sn4 = {
+ .name = "SN4",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &xs_qdss_stm },
+};
+
+static struct qcom_icc_bcm bcm_sn5 = {
+ .name = "SN5",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &xm_pcie3_0 },
+};
+
+static struct qcom_icc_bcm bcm_sn6 = {
+ .name = "SN6",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &xm_pcie3_1 },
+};
+
+static struct qcom_icc_bcm bcm_sn7 = {
+ .name = "SN7",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qnm_aggre1_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn8 = {
+ .name = "SN8",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qnm_aggre2_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn14 = {
+ .name = "SN14",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qns_pcie_mem_noc },
+};
+
+static struct qcom_icc_bcm bcm_acv_disp = {
+ .name = "ACV",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &ebi_disp },
+};
+
+static struct qcom_icc_bcm bcm_mc0_disp = {
+ .name = "MC0",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &ebi_disp },
+};
+
+static struct qcom_icc_bcm bcm_mm0_disp = {
+ .name = "MM0",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qns_mem_noc_hf_disp },
+};
+
+static struct qcom_icc_bcm bcm_mm1_disp = {
+ .name = "MM1",
+ .keepalive = false,
+ .num_nodes = 2,
+ .nodes = { &qxm_mdp0_disp, &qxm_mdp1_disp },
+};
+
+static struct qcom_icc_bcm bcm_mm4_disp = {
+ .name = "MM4",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qns_mem_noc_sf_disp },
+};
+
+static struct qcom_icc_bcm bcm_mm5_disp = {
+ .name = "MM5",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qxm_rot_disp },
+};
+
+static struct qcom_icc_bcm bcm_sh0_disp = {
+ .name = "SH0",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qns_llcc_disp },
+};
static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
};
diff --git a/drivers/interconnect/qcom/sm8450.c b/drivers/interconnect/qcom/sm8450.c
index e64c214b4020..eb7e17df32ba 100644
--- a/drivers/interconnect/qcom/sm8450.c
+++ b/drivers/interconnect/qcom/sm8450.c
@@ -8,7 +8,9 @@
#include <linux/interconnect.h>
#include <linux/interconnect-provider.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
#include <dt-bindings/interconnect/qcom,sm8450.h>
#include "bcm-voter.h"
@@ -1886,6 +1888,7 @@ static struct platform_driver qnoc_driver = {
.driver = {
.name = "qnoc-sm8450",
.of_match_table = qnoc_of_match,
+ .sync_state = icc_sync_state,
},
};
diff --git a/drivers/interconnect/qcom/sm8550.c b/drivers/interconnect/qcom/sm8550.c
index 0864ed285375..a10c8b6549ee 100644
--- a/drivers/interconnect/qcom/sm8550.c
+++ b/drivers/interconnect/qcom/sm8550.c
@@ -10,7 +10,9 @@
#include <linux/interconnect.h>
#include <linux/interconnect-provider.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
#include <dt-bindings/interconnect/qcom,sm8550-rpmh.h>
#include "bcm-voter.h"
diff --git a/drivers/interconnect/qcom/smd-rpm.c b/drivers/interconnect/qcom/smd-rpm.c
index 24bc994e1a12..16a145a3c914 100644
--- a/drivers/interconnect/qcom/smd-rpm.c
+++ b/drivers/interconnect/qcom/smd-rpm.c
@@ -8,8 +8,6 @@
#include <linux/interconnect-provider.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/soc/qcom/smd-rpm.h>
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 0c35018239ce..e2857109e966 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -12,13 +12,14 @@
#include "amd_iommu_types.h"
irqreturn_t amd_iommu_int_thread(int irq, void *data);
+irqreturn_t amd_iommu_int_thread_evtlog(int irq, void *data);
+irqreturn_t amd_iommu_int_thread_pprlog(int irq, void *data);
+irqreturn_t amd_iommu_int_thread_galog(int irq, void *data);
irqreturn_t amd_iommu_int_handler(int irq, void *data);
void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid);
void amd_iommu_restart_event_logging(struct amd_iommu *iommu);
void amd_iommu_restart_ga_log(struct amd_iommu *iommu);
-int amd_iommu_init_devices(void);
-void amd_iommu_uninit_devices(void);
-void amd_iommu_init_notifier(void);
+void amd_iommu_restart_ppr_log(struct amd_iommu *iommu);
void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid);
#ifdef CONFIG_AMD_IOMMU_DEBUGFS
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index dc1db6167927..7dc30c2b56b3 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -120,10 +120,13 @@
#define PASID_MASK 0x0000ffff
/* MMIO status bits */
-#define MMIO_STATUS_EVT_OVERFLOW_INT_MASK BIT(0)
+#define MMIO_STATUS_EVT_OVERFLOW_MASK BIT(0)
#define MMIO_STATUS_EVT_INT_MASK BIT(1)
#define MMIO_STATUS_COM_WAIT_INT_MASK BIT(2)
+#define MMIO_STATUS_EVT_RUN_MASK BIT(3)
+#define MMIO_STATUS_PPR_OVERFLOW_MASK BIT(5)
#define MMIO_STATUS_PPR_INT_MASK BIT(6)
+#define MMIO_STATUS_PPR_RUN_MASK BIT(7)
#define MMIO_STATUS_GALOG_RUN_MASK BIT(8)
#define MMIO_STATUS_GALOG_OVERFLOW_MASK BIT(9)
#define MMIO_STATUS_GALOG_INT_MASK BIT(10)
@@ -381,15 +384,15 @@
*/
#define DTE_FLAG_V BIT_ULL(0)
#define DTE_FLAG_TV BIT_ULL(1)
+#define DTE_FLAG_GIOV BIT_ULL(54)
+#define DTE_FLAG_GV BIT_ULL(55)
+#define DTE_GLX_SHIFT (56)
+#define DTE_GLX_MASK (3)
#define DTE_FLAG_IR BIT_ULL(61)
#define DTE_FLAG_IW BIT_ULL(62)
#define DTE_FLAG_IOTLB BIT_ULL(32)
-#define DTE_FLAG_GIOV BIT_ULL(54)
-#define DTE_FLAG_GV BIT_ULL(55)
#define DTE_FLAG_MASK (0x3ffULL << 32)
-#define DTE_GLX_SHIFT (56)
-#define DTE_GLX_MASK (3)
#define DEV_DOMID_MASK 0xffffULL
#define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL)
@@ -702,12 +705,21 @@ struct amd_iommu {
/* event buffer virtual address */
u8 *evt_buf;
+ /* Name for event log interrupt */
+ unsigned char evt_irq_name[16];
+
/* Base of the PPR log, if present */
u8 *ppr_log;
+ /* Name for PPR log interrupt */
+ unsigned char ppr_irq_name[16];
+
/* Base of the GA log, if present */
u8 *ga_log;
+ /* Name for GA log interrupt */
+ unsigned char ga_irq_name[16];
+
/* Tail of the GA log, if present */
u8 *ga_log_tail;
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index ea0f1ab94178..45efb7e5d725 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -483,6 +483,10 @@ static void iommu_disable(struct amd_iommu *iommu)
iommu_feature_disable(iommu, CONTROL_GALOG_EN);
iommu_feature_disable(iommu, CONTROL_GAINT_EN);
+ /* Disable IOMMU PPR logging */
+ iommu_feature_disable(iommu, CONTROL_PPRLOG_EN);
+ iommu_feature_disable(iommu, CONTROL_PPRINT_EN);
+
/* Disable IOMMU hardware itself */
iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
@@ -753,37 +757,61 @@ static int __init alloc_command_buffer(struct amd_iommu *iommu)
}
/*
+ * Interrupt handler has processed all pending events and adjusted head
+ * and tail pointer. Reset overflow mask and restart logging again.
+ */
+static void amd_iommu_restart_log(struct amd_iommu *iommu, const char *evt_type,
+ u8 cntrl_intr, u8 cntrl_log,
+ u32 status_run_mask, u32 status_overflow_mask)
+{
+ u32 status;
+
+ status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
+ if (status & status_run_mask)
+ return;
+
+ pr_info_ratelimited("IOMMU %s log restarting\n", evt_type);
+
+ iommu_feature_disable(iommu, cntrl_log);
+ iommu_feature_disable(iommu, cntrl_intr);
+
+ writel(status_overflow_mask, iommu->mmio_base + MMIO_STATUS_OFFSET);
+
+ iommu_feature_enable(iommu, cntrl_intr);
+ iommu_feature_enable(iommu, cntrl_log);
+}
+
+/*
* This function restarts event logging in case the IOMMU experienced
* an event log buffer overflow.
*/
void amd_iommu_restart_event_logging(struct amd_iommu *iommu)
{
- iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
- iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
+ amd_iommu_restart_log(iommu, "Event", CONTROL_EVT_INT_EN,
+ CONTROL_EVT_LOG_EN, MMIO_STATUS_EVT_RUN_MASK,
+ MMIO_STATUS_EVT_OVERFLOW_MASK);
}
/*
* This function restarts event logging in case the IOMMU experienced
- * an GA log overflow.
+ * GA log overflow.
*/
void amd_iommu_restart_ga_log(struct amd_iommu *iommu)
{
- u32 status;
-
- status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
- if (status & MMIO_STATUS_GALOG_RUN_MASK)
- return;
-
- pr_info_ratelimited("IOMMU GA Log restarting\n");
-
- iommu_feature_disable(iommu, CONTROL_GALOG_EN);
- iommu_feature_disable(iommu, CONTROL_GAINT_EN);
-
- writel(MMIO_STATUS_GALOG_OVERFLOW_MASK,
- iommu->mmio_base + MMIO_STATUS_OFFSET);
+ amd_iommu_restart_log(iommu, "GA", CONTROL_GAINT_EN,
+ CONTROL_GALOG_EN, MMIO_STATUS_GALOG_RUN_MASK,
+ MMIO_STATUS_GALOG_OVERFLOW_MASK);
+}
- iommu_feature_enable(iommu, CONTROL_GAINT_EN);
- iommu_feature_enable(iommu, CONTROL_GALOG_EN);
+/*
+ * This function restarts ppr logging in case the IOMMU experienced
+ * PPR log overflow.
+ */
+void amd_iommu_restart_ppr_log(struct amd_iommu *iommu)
+{
+ amd_iommu_restart_log(iommu, "PPR", CONTROL_PPRINT_EN,
+ CONTROL_PPRLOG_EN, MMIO_STATUS_PPR_RUN_MASK,
+ MMIO_STATUS_PPR_OVERFLOW_MASK);
}
/*
@@ -906,6 +934,8 @@ static void iommu_enable_ppr_log(struct amd_iommu *iommu)
if (iommu->ppr_log == NULL)
return;
+ iommu_feature_enable(iommu, CONTROL_PPR_EN);
+
entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
@@ -916,7 +946,7 @@ static void iommu_enable_ppr_log(struct amd_iommu *iommu)
writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
iommu_feature_enable(iommu, CONTROL_PPRLOG_EN);
- iommu_feature_enable(iommu, CONTROL_PPR_EN);
+ iommu_feature_enable(iommu, CONTROL_PPRINT_EN);
}
static void __init free_ppr_log(struct amd_iommu *iommu)
@@ -2311,6 +2341,7 @@ static int intcapxt_irqdomain_alloc(struct irq_domain *domain, unsigned int virq
struct irq_data *irqd = irq_domain_get_irq_data(domain, i);
irqd->chip = &intcapxt_controller;
+ irqd->hwirq = info->hwirq;
irqd->chip_data = info->data;
__irq_set_handler(i, handle_edge_irq, 0, "edge");
}
@@ -2337,22 +2368,14 @@ static void intcapxt_unmask_irq(struct irq_data *irqd)
xt.destid_0_23 = cfg->dest_apicid & GENMASK(23, 0);
xt.destid_24_31 = cfg->dest_apicid >> 24;
- /**
- * Current IOMMU implementation uses the same IRQ for all
- * 3 IOMMU interrupts.
- */
- writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET);
- writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET);
- writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET);
+ writeq(xt.capxt, iommu->mmio_base + irqd->hwirq);
}
static void intcapxt_mask_irq(struct irq_data *irqd)
{
struct amd_iommu *iommu = irqd->chip_data;
- writeq(0, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET);
- writeq(0, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET);
- writeq(0, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET);
+ writeq(0, iommu->mmio_base + irqd->hwirq);
}
@@ -2415,7 +2438,8 @@ static struct irq_domain *iommu_get_irqdomain(void)
return iommu_irqdomain;
}
-static int iommu_setup_intcapxt(struct amd_iommu *iommu)
+static int __iommu_setup_intcapxt(struct amd_iommu *iommu, const char *devname,
+ int hwirq, irq_handler_t thread_fn)
{
struct irq_domain *domain;
struct irq_alloc_info info;
@@ -2429,6 +2453,7 @@ static int iommu_setup_intcapxt(struct amd_iommu *iommu)
init_irq_alloc_info(&info, NULL);
info.type = X86_IRQ_ALLOC_TYPE_AMDVI;
info.data = iommu;
+ info.hwirq = hwirq;
irq = irq_domain_alloc_irqs(domain, 1, node, &info);
if (irq < 0) {
@@ -2437,7 +2462,7 @@ static int iommu_setup_intcapxt(struct amd_iommu *iommu)
}
ret = request_threaded_irq(irq, amd_iommu_int_handler,
- amd_iommu_int_thread, 0, "AMD-Vi", iommu);
+ thread_fn, 0, devname, iommu);
if (ret) {
irq_domain_free_irqs(irq, 1);
irq_domain_remove(domain);
@@ -2447,6 +2472,37 @@ static int iommu_setup_intcapxt(struct amd_iommu *iommu)
return 0;
}
+static int iommu_setup_intcapxt(struct amd_iommu *iommu)
+{
+ int ret;
+
+ snprintf(iommu->evt_irq_name, sizeof(iommu->evt_irq_name),
+ "AMD-Vi%d-Evt", iommu->index);
+ ret = __iommu_setup_intcapxt(iommu, iommu->evt_irq_name,
+ MMIO_INTCAPXT_EVT_OFFSET,
+ amd_iommu_int_thread_evtlog);
+ if (ret)
+ return ret;
+
+ snprintf(iommu->ppr_irq_name, sizeof(iommu->ppr_irq_name),
+ "AMD-Vi%d-PPR", iommu->index);
+ ret = __iommu_setup_intcapxt(iommu, iommu->ppr_irq_name,
+ MMIO_INTCAPXT_PPR_OFFSET,
+ amd_iommu_int_thread_pprlog);
+ if (ret)
+ return ret;
+
+#ifdef CONFIG_IRQ_REMAP
+ snprintf(iommu->ga_irq_name, sizeof(iommu->ga_irq_name),
+ "AMD-Vi%d-GA", iommu->index);
+ ret = __iommu_setup_intcapxt(iommu, iommu->ga_irq_name,
+ MMIO_INTCAPXT_GALOG_OFFSET,
+ amd_iommu_int_thread_galog);
+#endif
+
+ return ret;
+}
+
static int iommu_init_irq(struct amd_iommu *iommu)
{
int ret;
@@ -2472,8 +2528,6 @@ enable_faults:
iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
- if (iommu->ppr_log != NULL)
- iommu_feature_enable(iommu, CONTROL_PPRINT_EN);
return 0;
}
@@ -2889,8 +2943,6 @@ static void enable_iommus_vapic(void)
static void enable_iommus(void)
{
early_enable_iommus();
- enable_iommus_vapic();
- enable_iommus_v2();
}
static void disable_iommus(void)
@@ -3154,6 +3206,13 @@ static int amd_iommu_enable_interrupts(void)
goto out;
}
+ /*
+ * Interrupt handler is ready to process interrupts. Enable
+ * PPR and GA log interrupt for all IOMMUs.
+ */
+ enable_iommus_vapic();
+ enable_iommus_v2();
+
out:
return ret;
}
@@ -3233,8 +3292,6 @@ static int __init state_next(void)
register_syscore_ops(&amd_iommu_syscore_ops);
ret = amd_iommu_init_pci();
init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
- enable_iommus_vapic();
- enable_iommus_v2();
break;
case IOMMU_PCI_INIT:
ret = amd_iommu_enable_interrupts();
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 7d7d3799199a..95bd7c25ba6f 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -841,50 +841,27 @@ static inline void
amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { }
#endif /* !CONFIG_IRQ_REMAP */
-#define AMD_IOMMU_INT_MASK \
- (MMIO_STATUS_EVT_OVERFLOW_INT_MASK | \
- MMIO_STATUS_EVT_INT_MASK | \
- MMIO_STATUS_PPR_INT_MASK | \
- MMIO_STATUS_GALOG_OVERFLOW_MASK | \
- MMIO_STATUS_GALOG_INT_MASK)
-
-irqreturn_t amd_iommu_int_thread(int irq, void *data)
+static void amd_iommu_handle_irq(void *data, const char *evt_type,
+ u32 int_mask, u32 overflow_mask,
+ void (*int_handler)(struct amd_iommu *),
+ void (*overflow_handler)(struct amd_iommu *))
{
struct amd_iommu *iommu = (struct amd_iommu *) data;
u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
+ u32 mask = int_mask | overflow_mask;
- while (status & AMD_IOMMU_INT_MASK) {
+ while (status & mask) {
/* Enable interrupt sources again */
- writel(AMD_IOMMU_INT_MASK,
- iommu->mmio_base + MMIO_STATUS_OFFSET);
+ writel(mask, iommu->mmio_base + MMIO_STATUS_OFFSET);
- if (status & MMIO_STATUS_EVT_INT_MASK) {
- pr_devel("Processing IOMMU Event Log\n");
- iommu_poll_events(iommu);
+ if (int_handler) {
+ pr_devel("Processing IOMMU (ivhd%d) %s Log\n",
+ iommu->index, evt_type);
+ int_handler(iommu);
}
- if (status & MMIO_STATUS_PPR_INT_MASK) {
- pr_devel("Processing IOMMU PPR Log\n");
- iommu_poll_ppr_log(iommu);
- }
-
-#ifdef CONFIG_IRQ_REMAP
- if (status & (MMIO_STATUS_GALOG_INT_MASK |
- MMIO_STATUS_GALOG_OVERFLOW_MASK)) {
- pr_devel("Processing IOMMU GA Log\n");
- iommu_poll_ga_log(iommu);
- }
-
- if (status & MMIO_STATUS_GALOG_OVERFLOW_MASK) {
- pr_info_ratelimited("IOMMU GA Log overflow\n");
- amd_iommu_restart_ga_log(iommu);
- }
-#endif
-
- if (status & MMIO_STATUS_EVT_OVERFLOW_INT_MASK) {
- pr_info_ratelimited("IOMMU event log overflow\n");
- amd_iommu_restart_event_logging(iommu);
- }
+ if ((status & overflow_mask) && overflow_handler)
+ overflow_handler(iommu);
/*
* Hardware bug: ERBT1312
@@ -901,6 +878,43 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data)
*/
status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
}
+}
+
+irqreturn_t amd_iommu_int_thread_evtlog(int irq, void *data)
+{
+ amd_iommu_handle_irq(data, "Evt", MMIO_STATUS_EVT_INT_MASK,
+ MMIO_STATUS_EVT_OVERFLOW_MASK,
+ iommu_poll_events, amd_iommu_restart_event_logging);
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t amd_iommu_int_thread_pprlog(int irq, void *data)
+{
+ amd_iommu_handle_irq(data, "PPR", MMIO_STATUS_PPR_INT_MASK,
+ MMIO_STATUS_PPR_OVERFLOW_MASK,
+ iommu_poll_ppr_log, amd_iommu_restart_ppr_log);
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t amd_iommu_int_thread_galog(int irq, void *data)
+{
+#ifdef CONFIG_IRQ_REMAP
+ amd_iommu_handle_irq(data, "GA", MMIO_STATUS_GALOG_INT_MASK,
+ MMIO_STATUS_GALOG_OVERFLOW_MASK,
+ iommu_poll_ga_log, amd_iommu_restart_ga_log);
+#endif
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t amd_iommu_int_thread(int irq, void *data)
+{
+ amd_iommu_int_thread_evtlog(irq, data);
+ amd_iommu_int_thread_pprlog(irq, data);
+ amd_iommu_int_thread_galog(irq, data);
+
return IRQ_HANDLED;
}
diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c
index 2596466cd5a6..57c2fb1146e2 100644
--- a/drivers/iommu/amd/iommu_v2.c
+++ b/drivers/iommu/amd/iommu_v2.c
@@ -262,8 +262,8 @@ static void put_pasid_state(struct pasid_state *pasid_state)
static void put_pasid_state_wait(struct pasid_state *pasid_state)
{
- refcount_dec(&pasid_state->count);
- wait_event(pasid_state->wq, !refcount_read(&pasid_state->count));
+ if (!refcount_dec_and_test(&pasid_state->count))
+ wait_event(pasid_state->wq, !refcount_read(&pasid_state->count));
free_pasid_state(pasid_state);
}
@@ -327,6 +327,9 @@ static void free_pasid_states(struct device_state *dev_state)
put_pasid_state(pasid_state);
+ /* Clear the pasid state so that the pasid can be re-used */
+ clear_pasid_state(dev_state, pasid_state->pasid);
+
/*
* This will call the mn_release function and
* unbind the PASID
diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c
index 8af64b57f048..2082081402d3 100644
--- a/drivers/iommu/apple-dart.c
+++ b/drivers/iommu/apple-dart.c
@@ -1276,7 +1276,7 @@ static __maybe_unused int apple_dart_resume(struct device *dev)
return 0;
}
-DEFINE_SIMPLE_DEV_PM_OPS(apple_dart_pm_ops, apple_dart_suspend, apple_dart_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(apple_dart_pm_ops, apple_dart_suspend, apple_dart_resume);
static const struct of_device_id apple_dart_of_match[] = {
{ .compatible = "apple,t8103-dart", .data = &apple_dart_hw_t8103 },
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
index dbc812a0e57e..4d83edc2be99 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
@@ -80,7 +80,7 @@ arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
* be some overlap between use of both ASIDs, until we invalidate the
* TLB.
*/
- arm_smmu_write_ctx_desc(smmu_domain, 0, cd);
+ arm_smmu_write_ctx_desc(smmu_domain, IOMMU_NO_PASID, cd);
/* Invalidate TLB entries previously associated with that context */
arm_smmu_tlb_inv_asid(smmu, asid);
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 9b0dc3505601..e82bf1c449a3 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -1059,7 +1059,7 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
/*
* This function handles the following cases:
*
- * (1) Install primary CD, for normal DMA traffic (SSID = 0).
+ * (1) Install primary CD, for normal DMA traffic (SSID = IOMMU_NO_PASID = 0).
* (2) Install a secondary CD, for SID+SSID traffic.
* (3) Update ASID of a CD. Atomically write the first 64 bits of the
* CD, then invalidate the old entry and mappings.
@@ -1607,7 +1607,7 @@ static void arm_smmu_handle_ppr(struct arm_smmu_device *smmu, u64 *evt)
sid = FIELD_GET(PRIQ_0_SID, evt[0]);
ssv = FIELD_GET(PRIQ_0_SSID_V, evt[0]);
- ssid = ssv ? FIELD_GET(PRIQ_0_SSID, evt[0]) : 0;
+ ssid = ssv ? FIELD_GET(PRIQ_0_SSID, evt[0]) : IOMMU_NO_PASID;
last = FIELD_GET(PRIQ_0_PRG_LAST, evt[0]);
grpid = FIELD_GET(PRIQ_1_PRG_IDX, evt[1]);
@@ -1748,7 +1748,7 @@ arm_smmu_atc_inv_to_cmd(int ssid, unsigned long iova, size_t size,
*/
*cmd = (struct arm_smmu_cmdq_ent) {
.opcode = CMDQ_OP_ATC_INV,
- .substream_valid = !!ssid,
+ .substream_valid = (ssid != IOMMU_NO_PASID),
.atc.ssid = ssid,
};
@@ -1795,7 +1795,7 @@ static int arm_smmu_atc_inv_master(struct arm_smmu_master *master)
struct arm_smmu_cmdq_ent cmd;
struct arm_smmu_cmdq_batch cmds;
- arm_smmu_atc_inv_to_cmd(0, 0, 0, &cmd);
+ arm_smmu_atc_inv_to_cmd(IOMMU_NO_PASID, 0, 0, &cmd);
cmds.num = 0;
for (i = 0; i < master->num_streams; i++) {
@@ -1875,7 +1875,7 @@ static void arm_smmu_tlb_inv_context(void *cookie)
cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
}
- arm_smmu_atc_inv_domain(smmu_domain, 0, 0, 0);
+ arm_smmu_atc_inv_domain(smmu_domain, IOMMU_NO_PASID, 0, 0);
}
static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd,
@@ -1968,7 +1968,7 @@ static void arm_smmu_tlb_inv_range_domain(unsigned long iova, size_t size,
* Unfortunately, this can't be leaf-only since we may have
* zapped an entire table.
*/
- arm_smmu_atc_inv_domain(smmu_domain, 0, iova, size);
+ arm_smmu_atc_inv_domain(smmu_domain, IOMMU_NO_PASID, iova, size);
}
void arm_smmu_tlb_inv_range_asid(unsigned long iova, size_t size, int asid,
@@ -2055,24 +2055,6 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
return &smmu_domain->domain;
}
-static int arm_smmu_bitmap_alloc(unsigned long *map, int span)
-{
- int idx, size = 1 << span;
-
- do {
- idx = find_first_zero_bit(map, size);
- if (idx == size)
- return -ENOSPC;
- } while (test_and_set_bit(idx, map));
-
- return idx;
-}
-
-static void arm_smmu_bitmap_free(unsigned long *map, int idx)
-{
- clear_bit(idx, map);
-}
-
static void arm_smmu_domain_free(struct iommu_domain *domain)
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
@@ -2093,7 +2075,7 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
} else {
struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
if (cfg->vmid)
- arm_smmu_bitmap_free(smmu->vmid_map, cfg->vmid);
+ ida_free(&smmu->vmid_map, cfg->vmid);
}
kfree(smmu_domain);
@@ -2142,7 +2124,7 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
* the master has been added to the devices list for this domain.
* This isn't an issue because the STE hasn't been installed yet.
*/
- ret = arm_smmu_write_ctx_desc(smmu_domain, 0, &cfg->cd);
+ ret = arm_smmu_write_ctx_desc(smmu_domain, IOMMU_NO_PASID, &cfg->cd);
if (ret)
goto out_free_cd_tables;
@@ -2167,7 +2149,9 @@ static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
typeof(&pgtbl_cfg->arm_lpae_s2_cfg.vtcr) vtcr;
- vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits);
+ /* Reserve VMID 0 for stage-2 bypass STEs */
+ vmid = ida_alloc_range(&smmu->vmid_map, 1, (1 << smmu->vmid_bits) - 1,
+ GFP_KERNEL);
if (vmid < 0)
return vmid;
@@ -2328,7 +2312,7 @@ static void arm_smmu_enable_ats(struct arm_smmu_master *master)
pdev = to_pci_dev(master->dev);
atomic_inc(&smmu_domain->nr_ats_masters);
- arm_smmu_atc_inv_domain(smmu_domain, 0, 0, 0);
+ arm_smmu_atc_inv_domain(smmu_domain, IOMMU_NO_PASID, 0, 0);
if (pci_enable_ats(pdev, stu))
dev_err(master->dev, "Failed to enable ATS (STU %zu)\n", stu);
}
@@ -3098,8 +3082,8 @@ static int arm_smmu_init_strtab(struct arm_smmu_device *smmu)
reg |= STRTAB_BASE_RA;
smmu->strtab_cfg.strtab_base = reg;
- /* Allocate the first VMID for stage-2 bypass STEs */
- set_bit(0, smmu->vmid_map);
+ ida_init(&smmu->vmid_map);
+
return 0;
}
@@ -3923,6 +3907,7 @@ static void arm_smmu_device_remove(struct platform_device *pdev)
iommu_device_sysfs_remove(&smmu->iommu);
arm_smmu_device_disable(smmu);
iopf_queue_free(smmu->evtq.iopf);
+ ida_destroy(&smmu->vmid_map);
}
static void arm_smmu_device_shutdown(struct platform_device *pdev)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index dcab85698a4e..9915850dd4db 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -670,7 +670,7 @@ struct arm_smmu_device {
#define ARM_SMMU_MAX_VMIDS (1 << 16)
unsigned int vmid_bits;
- DECLARE_BITMAP(vmid_map, ARM_SMMU_MAX_VMIDS);
+ struct ida vmid_map;
unsigned int ssid_bits;
unsigned int sid_bits;
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
index b5b14108e086..bb89d49adf8d 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom-debug.c
@@ -3,7 +3,7 @@
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
-#include <linux/of_device.h>
+#include <linux/device.h>
#include <linux/firmware/qcom/qcom_scm.h>
#include <linux/ratelimit.h>
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index c71afda79d64..7f52ac67495f 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -251,10 +251,12 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
{ .compatible = "qcom,sc7280-mss-pil" },
{ .compatible = "qcom,sc8180x-mdss" },
{ .compatible = "qcom,sc8280xp-mdss" },
- { .compatible = "qcom,sm8150-mdss" },
- { .compatible = "qcom,sm8250-mdss" },
{ .compatible = "qcom,sdm845-mdss" },
{ .compatible = "qcom,sdm845-mss-pil" },
+ { .compatible = "qcom,sm6350-mdss" },
+ { .compatible = "qcom,sm6375-mdss" },
+ { .compatible = "qcom,sm8150-mdss" },
+ { .compatible = "qcom,sm8250-mdss" },
{ }
};
@@ -528,6 +530,7 @@ static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = {
{ .compatible = "qcom,sm6125-smmu-500", .data = &qcom_smmu_500_impl0_data },
{ .compatible = "qcom,sm6350-smmu-v2", .data = &qcom_smmu_v2_data },
{ .compatible = "qcom,sm6350-smmu-500", .data = &qcom_smmu_500_impl0_data },
+ { .compatible = "qcom,sm6375-smmu-v2", .data = &qcom_smmu_v2_data },
{ .compatible = "qcom,sm6375-smmu-500", .data = &qcom_smmu_500_impl0_data },
{ .compatible = "qcom,sm8150-smmu-500", .data = &qcom_smmu_500_impl0_data },
{ .compatible = "qcom,sm8250-smmu-500", .data = &qcom_smmu_500_impl0_data },
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index a86acd76c1df..d6d1a2a55cc0 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -29,7 +29,6 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
index a503ed758ec3..775a3cbaff4e 100644
--- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c
+++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
@@ -22,8 +22,7 @@
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
@@ -51,14 +50,15 @@ struct qcom_iommu_dev {
struct clk_bulk_data clks[CLK_NUM];
void __iomem *local_base;
u32 sec_id;
- u8 num_ctxs;
- struct qcom_iommu_ctx *ctxs[]; /* indexed by asid-1 */
+ u8 max_asid;
+ struct qcom_iommu_ctx *ctxs[]; /* indexed by asid */
};
struct qcom_iommu_ctx {
struct device *dev;
void __iomem *base;
bool secure_init;
+ bool secured_ctx;
u8 asid; /* asid and ctx bank # are 1:1 */
struct iommu_domain *domain;
};
@@ -94,7 +94,7 @@ static struct qcom_iommu_ctx * to_ctx(struct qcom_iommu_domain *d, unsigned asid
struct qcom_iommu_dev *qcom_iommu = d->iommu;
if (!qcom_iommu)
return NULL;
- return qcom_iommu->ctxs[asid - 1];
+ return qcom_iommu->ctxs[asid];
}
static inline void
@@ -273,6 +273,19 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
ctx->secure_init = true;
}
+ /* Secured QSMMU-500/QSMMU-v2 contexts cannot be programmed */
+ if (ctx->secured_ctx) {
+ ctx->domain = domain;
+ continue;
+ }
+
+ /* Disable context bank before programming */
+ iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0);
+
+ /* Clear context bank fault address fault status registers */
+ iommu_writel(ctx, ARM_SMMU_CB_FAR, 0);
+ iommu_writel(ctx, ARM_SMMU_CB_FSR, ARM_SMMU_FSR_FAULT);
+
/* TTBRs */
iommu_writeq(ctx, ARM_SMMU_CB_TTBR0,
pgtbl_cfg.arm_lpae_s1_cfg.ttbr |
@@ -527,11 +540,10 @@ static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
qcom_iommu = platform_get_drvdata(iommu_pdev);
/* make sure the asid specified in dt is valid, so we don't have
- * to sanity check this elsewhere, since 'asid - 1' is used to
- * index into qcom_iommu->ctxs:
+ * to sanity check this elsewhere:
*/
- if (WARN_ON(asid < 1) ||
- WARN_ON(asid > qcom_iommu->num_ctxs)) {
+ if (WARN_ON(asid > qcom_iommu->max_asid) ||
+ WARN_ON(qcom_iommu->ctxs[asid] == NULL)) {
put_device(&iommu_pdev->dev);
return -EINVAL;
}
@@ -617,7 +629,8 @@ free_mem:
static int get_asid(const struct device_node *np)
{
- u32 reg;
+ u32 reg, val;
+ int asid;
/* read the "reg" property directly to get the relative address
* of the context bank, and calculate the asid from that:
@@ -625,7 +638,17 @@ static int get_asid(const struct device_node *np)
if (of_property_read_u32_index(np, "reg", 0, &reg))
return -ENODEV;
- return reg / 0x1000; /* context banks are 0x1000 apart */
+ /*
+ * Context banks are 0x1000 apart but, in some cases, the ASID
+ * number doesn't match to this logic and needs to be passed
+ * from the DT configuration explicitly.
+ */
+ if (!of_property_read_u32(np, "qcom,ctx-asid", &val))
+ asid = val;
+ else
+ asid = reg / 0x1000;
+
+ return asid;
}
static int qcom_iommu_ctx_probe(struct platform_device *pdev)
@@ -633,7 +656,6 @@ static int qcom_iommu_ctx_probe(struct platform_device *pdev)
struct qcom_iommu_ctx *ctx;
struct device *dev = &pdev->dev;
struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev->parent);
- struct resource *res;
int ret, irq;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
@@ -643,19 +665,22 @@ static int qcom_iommu_ctx_probe(struct platform_device *pdev)
ctx->dev = dev;
platform_set_drvdata(pdev, ctx);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ctx->base = devm_ioremap_resource(dev, res);
+ ctx->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ctx->base))
return PTR_ERR(ctx->base);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
- return -ENODEV;
+ return irq;
+
+ if (of_device_is_compatible(dev->of_node, "qcom,msm-iommu-v2-sec"))
+ ctx->secured_ctx = true;
/* clear IRQs before registering fault handler, just in case the
* boot-loader left us a surprise:
*/
- iommu_writel(ctx, ARM_SMMU_CB_FSR, iommu_readl(ctx, ARM_SMMU_CB_FSR));
+ if (!ctx->secured_ctx)
+ iommu_writel(ctx, ARM_SMMU_CB_FSR, iommu_readl(ctx, ARM_SMMU_CB_FSR));
ret = devm_request_irq(dev, irq,
qcom_iommu_fault,
@@ -677,7 +702,7 @@ static int qcom_iommu_ctx_probe(struct platform_device *pdev)
dev_dbg(dev, "found asid %u\n", ctx->asid);
- qcom_iommu->ctxs[ctx->asid - 1] = ctx;
+ qcom_iommu->ctxs[ctx->asid] = ctx;
return 0;
}
@@ -689,12 +714,14 @@ static void qcom_iommu_ctx_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
- qcom_iommu->ctxs[ctx->asid - 1] = NULL;
+ qcom_iommu->ctxs[ctx->asid] = NULL;
}
static const struct of_device_id ctx_of_match[] = {
{ .compatible = "qcom,msm-iommu-v1-ns" },
{ .compatible = "qcom,msm-iommu-v1-sec" },
+ { .compatible = "qcom,msm-iommu-v2-ns" },
+ { .compatible = "qcom,msm-iommu-v2-sec" },
{ /* sentinel */ }
};
@@ -712,7 +739,8 @@ static bool qcom_iommu_has_secure_context(struct qcom_iommu_dev *qcom_iommu)
struct device_node *child;
for_each_child_of_node(qcom_iommu->dev->of_node, child) {
- if (of_device_is_compatible(child, "qcom,msm-iommu-v1-sec")) {
+ if (of_device_is_compatible(child, "qcom,msm-iommu-v1-sec") ||
+ of_device_is_compatible(child, "qcom,msm-iommu-v2-sec")) {
of_node_put(child);
return true;
}
@@ -736,11 +764,11 @@ static int qcom_iommu_device_probe(struct platform_device *pdev)
for_each_child_of_node(dev->of_node, child)
max_asid = max(max_asid, get_asid(child));
- qcom_iommu = devm_kzalloc(dev, struct_size(qcom_iommu, ctxs, max_asid),
+ qcom_iommu = devm_kzalloc(dev, struct_size(qcom_iommu, ctxs, max_asid + 1),
GFP_KERNEL);
if (!qcom_iommu)
return -ENOMEM;
- qcom_iommu->num_ctxs = max_asid;
+ qcom_iommu->max_asid = max_asid;
qcom_iommu->dev = dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -856,6 +884,7 @@ static const struct dev_pm_ops qcom_iommu_pm_ops = {
static const struct of_device_id qcom_iommu_of_match[] = {
{ .compatible = "qcom,msm-iommu-v1" },
+ { .compatible = "qcom,msm-iommu-v2" },
{ /* sentinel */ }
};
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index e57724163835..4b1a88f514c9 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -660,7 +660,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
{
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
- unsigned long shift, iova_len, iova = 0;
+ unsigned long shift, iova_len, iova;
if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
cookie->msi_iova += size;
@@ -675,15 +675,29 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
if (domain->geometry.force_aperture)
dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
- /* Try to get PCI devices a SAC address */
- if (dma_limit > DMA_BIT_MASK(32) && !iommu_dma_forcedac && dev_is_pci(dev))
+ /*
+ * Try to use all the 32-bit PCI addresses first. The original SAC vs.
+ * DAC reasoning loses relevance with PCIe, but enough hardware and
+ * firmware bugs are still lurking out there that it's safest not to
+ * venture into the 64-bit space until necessary.
+ *
+ * If your device goes wrong after seeing the notice then likely either
+ * its driver is not setting DMA masks accurately, the hardware has
+ * some inherent bug in handling >32-bit addresses, or not all the
+ * expected address bits are wired up between the device and the IOMMU.
+ */
+ if (dma_limit > DMA_BIT_MASK(32) && dev->iommu->pci_32bit_workaround) {
iova = alloc_iova_fast(iovad, iova_len,
DMA_BIT_MASK(32) >> shift, false);
+ if (iova)
+ goto done;
- if (!iova)
- iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
- true);
+ dev->iommu->pci_32bit_workaround = false;
+ dev_notice(dev, "Using %d-bit DMA addresses\n", bits_per(dma_limit));
+ }
+ iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, true);
+done:
return (dma_addr_t)iova << shift;
}
diff --git a/drivers/iommu/dma-iommu.h b/drivers/iommu/dma-iommu.h
index 942790009292..c829f1f82a99 100644
--- a/drivers/iommu/dma-iommu.h
+++ b/drivers/iommu/dma-iommu.h
@@ -17,6 +17,10 @@ int iommu_dma_init_fq(struct iommu_domain *domain);
void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
extern bool iommu_dma_forcedac;
+static inline void iommu_dma_set_pci_32bit_workaround(struct device *dev)
+{
+ dev->iommu->pci_32bit_workaround = !iommu_dma_forcedac;
+}
#else /* CONFIG_IOMMU_DMA */
@@ -38,5 +42,9 @@ static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_he
{
}
+static inline void iommu_dma_set_pci_32bit_workaround(struct device *dev)
+{
+}
+
#endif /* CONFIG_IOMMU_DMA */
#endif /* __DMA_IOMMU_H */
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 9e6f78830ece..5db283c17e0d 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -114,13 +114,17 @@ static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
are never going to work. */
-static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
+static inline unsigned long mm_to_dma_pfn_start(unsigned long mm_pfn)
{
return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
}
+static inline unsigned long mm_to_dma_pfn_end(unsigned long mm_pfn)
+{
+ return ((mm_pfn + 1) << (PAGE_SHIFT - VTD_PAGE_SHIFT)) - 1;
+}
static inline unsigned long page_to_dma_pfn(struct page *pg)
{
- return mm_to_dma_pfn(page_to_pfn(pg));
+ return mm_to_dma_pfn_start(page_to_pfn(pg));
}
static inline unsigned long virt_to_dma_pfn(void *p)
{
@@ -878,7 +882,7 @@ void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id,
}
/* For request-without-pasid, get the pasid from context entry */
if (intel_iommu_sm && pasid == IOMMU_PASID_INVALID)
- pasid = PASID_RID2PASID;
+ pasid = IOMMU_NO_PASID;
dir_index = pasid >> PASID_PDE_SHIFT;
pde = &dir[dir_index];
@@ -1360,6 +1364,7 @@ domain_lookup_dev_info(struct dmar_domain *domain,
static void domain_update_iotlb(struct dmar_domain *domain)
{
+ struct dev_pasid_info *dev_pasid;
struct device_domain_info *info;
bool has_iotlb_device = false;
unsigned long flags;
@@ -1371,6 +1376,14 @@ static void domain_update_iotlb(struct dmar_domain *domain)
break;
}
}
+
+ list_for_each_entry(dev_pasid, &domain->dev_pasids, link_domain) {
+ info = dev_iommu_priv_get(dev_pasid->dev);
+ if (info->ats_enabled) {
+ has_iotlb_device = true;
+ break;
+ }
+ }
domain->has_iotlb_device = has_iotlb_device;
spin_unlock_irqrestore(&domain->lock, flags);
}
@@ -1450,12 +1463,13 @@ static void __iommu_flush_dev_iotlb(struct device_domain_info *info,
qdep = info->ats_qdep;
qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
qdep, addr, mask);
- quirk_extra_dev_tlb_flush(info, addr, mask, PASID_RID2PASID, qdep);
+ quirk_extra_dev_tlb_flush(info, addr, mask, IOMMU_NO_PASID, qdep);
}
static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
u64 addr, unsigned mask)
{
+ struct dev_pasid_info *dev_pasid;
struct device_domain_info *info;
unsigned long flags;
@@ -1465,6 +1479,36 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry(info, &domain->devices, link)
__iommu_flush_dev_iotlb(info, addr, mask);
+
+ list_for_each_entry(dev_pasid, &domain->dev_pasids, link_domain) {
+ info = dev_iommu_priv_get(dev_pasid->dev);
+
+ if (!info->ats_enabled)
+ continue;
+
+ qi_flush_dev_iotlb_pasid(info->iommu,
+ PCI_DEVID(info->bus, info->devfn),
+ info->pfsid, dev_pasid->pasid,
+ info->ats_qdep, addr,
+ mask);
+ }
+ spin_unlock_irqrestore(&domain->lock, flags);
+}
+
+static void domain_flush_pasid_iotlb(struct intel_iommu *iommu,
+ struct dmar_domain *domain, u64 addr,
+ unsigned long npages, bool ih)
+{
+ u16 did = domain_id_iommu(domain, iommu);
+ struct dev_pasid_info *dev_pasid;
+ unsigned long flags;
+
+ spin_lock_irqsave(&domain->lock, flags);
+ list_for_each_entry(dev_pasid, &domain->dev_pasids, link_domain)
+ qi_flush_piotlb(iommu, did, dev_pasid->pasid, addr, npages, ih);
+
+ if (!list_empty(&domain->devices))
+ qi_flush_piotlb(iommu, did, IOMMU_NO_PASID, addr, npages, ih);
spin_unlock_irqrestore(&domain->lock, flags);
}
@@ -1485,7 +1529,7 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
ih = 1 << 6;
if (domain->use_first_level) {
- qi_flush_piotlb(iommu, did, PASID_RID2PASID, addr, pages, ih);
+ domain_flush_pasid_iotlb(iommu, domain, addr, pages, ih);
} else {
unsigned long bitmask = aligned_pages - 1;
@@ -1555,7 +1599,7 @@ static void intel_flush_iotlb_all(struct iommu_domain *domain)
u16 did = domain_id_iommu(dmar_domain, iommu);
if (dmar_domain->use_first_level)
- qi_flush_piotlb(iommu, did, PASID_RID2PASID, 0, -1, 0);
+ domain_flush_pasid_iotlb(iommu, dmar_domain, 0, -1, 0);
else
iommu->flush.flush_iotlb(iommu, did, 0, 0,
DMA_TLB_DSI_FLUSH);
@@ -1727,6 +1771,7 @@ static struct dmar_domain *alloc_domain(unsigned int type)
domain->use_first_level = true;
domain->has_iotlb_device = false;
INIT_LIST_HEAD(&domain->devices);
+ INIT_LIST_HEAD(&domain->dev_pasids);
spin_lock_init(&domain->lock);
xa_init(&domain->iommu_array);
@@ -1941,7 +1986,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
context_pdts(pds);
/* Setup the RID_PASID field: */
- context_set_sm_rid2pasid(context, PASID_RID2PASID);
+ context_set_sm_rid2pasid(context, IOMMU_NO_PASID);
/*
* Setup the Device-TLB enable bit and Page request
@@ -2363,8 +2408,8 @@ static int __init si_domain_init(int hw)
for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
ret = iommu_domain_identity_map(si_domain,
- mm_to_dma_pfn(start_pfn),
- mm_to_dma_pfn(end_pfn));
+ mm_to_dma_pfn_start(start_pfn),
+ mm_to_dma_pfn_end(end_pfn));
if (ret)
return ret;
}
@@ -2385,8 +2430,8 @@ static int __init si_domain_init(int hw)
continue;
ret = iommu_domain_identity_map(si_domain,
- mm_to_dma_pfn(start >> PAGE_SHIFT),
- mm_to_dma_pfn(end >> PAGE_SHIFT));
+ mm_to_dma_pfn_start(start >> PAGE_SHIFT),
+ mm_to_dma_pfn_end(end >> PAGE_SHIFT));
if (ret)
return ret;
}
@@ -2421,13 +2466,13 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
/* Setup the PASID entry for requests without PASID: */
if (hw_pass_through && domain_type_is_si(domain))
ret = intel_pasid_setup_pass_through(iommu, domain,
- dev, PASID_RID2PASID);
+ dev, IOMMU_NO_PASID);
else if (domain->use_first_level)
ret = domain_setup_first_level(iommu, domain, dev,
- PASID_RID2PASID);
+ IOMMU_NO_PASID);
else
ret = intel_pasid_setup_second_level(iommu, domain,
- dev, PASID_RID2PASID);
+ dev, IOMMU_NO_PASID);
if (ret) {
dev_err(dev, "Setup RID2PASID failed\n");
device_block_translation(dev);
@@ -2447,30 +2492,6 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
return 0;
}
-static bool device_has_rmrr(struct device *dev)
-{
- struct dmar_rmrr_unit *rmrr;
- struct device *tmp;
- int i;
-
- rcu_read_lock();
- for_each_rmrr_units(rmrr) {
- /*
- * Return TRUE if this RMRR contains the device that
- * is passed in.
- */
- for_each_active_dev_scope(rmrr->devices,
- rmrr->devices_cnt, i, tmp)
- if (tmp == dev ||
- is_downstream_to_pci_bridge(dev, tmp)) {
- rcu_read_unlock();
- return true;
- }
- }
- rcu_read_unlock();
- return false;
-}
-
/**
* device_rmrr_is_relaxable - Test whether the RMRR of this device
* is relaxable (ie. is allowed to be not enforced under some conditions)
@@ -2501,34 +2522,6 @@ static bool device_rmrr_is_relaxable(struct device *dev)
}
/*
- * There are a couple cases where we need to restrict the functionality of
- * devices associated with RMRRs. The first is when evaluating a device for
- * identity mapping because problems exist when devices are moved in and out
- * of domains and their respective RMRR information is lost. This means that
- * a device with associated RMRRs will never be in a "passthrough" domain.
- * The second is use of the device through the IOMMU API. This interface
- * expects to have full control of the IOVA space for the device. We cannot
- * satisfy both the requirement that RMRR access is maintained and have an
- * unencumbered IOVA space. We also have no ability to quiesce the device's
- * use of the RMRR space or even inform the IOMMU API user of the restriction.
- * We therefore prevent devices associated with an RMRR from participating in
- * the IOMMU API, which eliminates them from device assignment.
- *
- * In both cases, devices which have relaxable RMRRs are not concerned by this
- * restriction. See device_rmrr_is_relaxable comment.
- */
-static bool device_is_rmrr_locked(struct device *dev)
-{
- if (!device_has_rmrr(dev))
- return false;
-
- if (device_rmrr_is_relaxable(dev))
- return false;
-
- return true;
-}
-
-/*
* Return the required default domain type for a specific device.
*
* @dev: the device in query
@@ -3561,8 +3554,8 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb,
unsigned long val, void *v)
{
struct memory_notify *mhp = v;
- unsigned long start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
- unsigned long last_vpfn = mm_to_dma_pfn(mhp->start_pfn +
+ unsigned long start_vpfn = mm_to_dma_pfn_start(mhp->start_pfn);
+ unsigned long last_vpfn = mm_to_dma_pfn_end(mhp->start_pfn +
mhp->nr_pages - 1);
switch (val) {
@@ -3757,7 +3750,6 @@ static int __init probe_acpi_namespace_devices(void)
for_each_active_dev_scope(drhd->devices,
drhd->devices_cnt, i, dev) {
struct acpi_device_physical_node *pn;
- struct iommu_group *group;
struct acpi_device *adev;
if (dev->bus != &acpi_bus_type)
@@ -3767,12 +3759,6 @@ static int __init probe_acpi_namespace_devices(void)
mutex_lock(&adev->physical_node_lock);
list_for_each_entry(pn,
&adev->physical_node_list, node) {
- group = iommu_group_get(pn->dev);
- if (group) {
- iommu_group_put(group);
- continue;
- }
-
ret = iommu_probe_device(pn->dev);
if (ret)
break;
@@ -3969,7 +3955,7 @@ static void dmar_remove_one_dev_info(struct device *dev)
if (!dev_is_real_dma_subdevice(info->dev)) {
if (dev_is_pci(info->dev) && sm_supported(iommu))
intel_pasid_tear_down_entry(iommu, info->dev,
- PASID_RID2PASID, false);
+ IOMMU_NO_PASID, false);
iommu_disable_pci_caps(info);
domain_context_clear(info);
@@ -3998,7 +3984,7 @@ static void device_block_translation(struct device *dev)
if (!dev_is_real_dma_subdevice(dev)) {
if (sm_supported(iommu))
intel_pasid_tear_down_entry(iommu, dev,
- PASID_RID2PASID, false);
+ IOMMU_NO_PASID, false);
else
domain_context_clear(info);
}
@@ -4140,12 +4126,6 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
struct device_domain_info *info = dev_iommu_priv_get(dev);
int ret;
- if (domain->type == IOMMU_DOMAIN_UNMANAGED &&
- device_is_rmrr_locked(dev)) {
- dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
- return -EPERM;
- }
-
if (info->domain)
device_block_translation(dev);
@@ -4272,7 +4252,7 @@ static void intel_iommu_tlb_sync(struct iommu_domain *domain,
unsigned long i;
nrpages = aligned_nrpages(gather->start, size);
- start_pfn = mm_to_dma_pfn(iova_pfn);
+ start_pfn = mm_to_dma_pfn_start(iova_pfn);
xa_for_each(&dmar_domain->iommu_array, i, info)
iommu_flush_iotlb_psi(info->iommu, dmar_domain,
@@ -4332,7 +4312,7 @@ static void domain_set_force_snooping(struct dmar_domain *domain)
list_for_each_entry(info, &domain->devices, link)
intel_pasid_setup_page_snoop_control(info->iommu, info->dev,
- PASID_RID2PASID);
+ IOMMU_NO_PASID);
}
static bool intel_iommu_enforce_cache_coherency(struct iommu_domain *domain)
@@ -4714,23 +4694,96 @@ static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
{
struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
+ struct dev_pasid_info *curr, *dev_pasid = NULL;
+ struct dmar_domain *dmar_domain;
struct iommu_domain *domain;
+ unsigned long flags;
- /* Domain type specific cleanup: */
domain = iommu_get_domain_for_dev_pasid(dev, pasid, 0);
- if (domain) {
- switch (domain->type) {
- case IOMMU_DOMAIN_SVA:
- intel_svm_remove_dev_pasid(dev, pasid);
- break;
- default:
- /* should never reach here */
- WARN_ON(1);
+ if (WARN_ON_ONCE(!domain))
+ goto out_tear_down;
+
+ /*
+ * The SVA implementation needs to handle its own stuffs like the mm
+ * notification. Before consolidating that code into iommu core, let
+ * the intel sva code handle it.
+ */
+ if (domain->type == IOMMU_DOMAIN_SVA) {
+ intel_svm_remove_dev_pasid(dev, pasid);
+ goto out_tear_down;
+ }
+
+ dmar_domain = to_dmar_domain(domain);
+ spin_lock_irqsave(&dmar_domain->lock, flags);
+ list_for_each_entry(curr, &dmar_domain->dev_pasids, link_domain) {
+ if (curr->dev == dev && curr->pasid == pasid) {
+ list_del(&curr->link_domain);
+ dev_pasid = curr;
break;
}
}
+ WARN_ON_ONCE(!dev_pasid);
+ spin_unlock_irqrestore(&dmar_domain->lock, flags);
+ domain_detach_iommu(dmar_domain, iommu);
+ kfree(dev_pasid);
+out_tear_down:
intel_pasid_tear_down_entry(iommu, dev, pasid, false);
+ intel_drain_pasid_prq(dev, pasid);
+}
+
+static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid)
+{
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+ struct intel_iommu *iommu = info->iommu;
+ struct dev_pasid_info *dev_pasid;
+ unsigned long flags;
+ int ret;
+
+ if (!pasid_supported(iommu) || dev_is_real_dma_subdevice(dev))
+ return -EOPNOTSUPP;
+
+ if (context_copied(iommu, info->bus, info->devfn))
+ return -EBUSY;
+
+ ret = prepare_domain_attach_device(domain, dev);
+ if (ret)
+ return ret;
+
+ dev_pasid = kzalloc(sizeof(*dev_pasid), GFP_KERNEL);
+ if (!dev_pasid)
+ return -ENOMEM;
+
+ ret = domain_attach_iommu(dmar_domain, iommu);
+ if (ret)
+ goto out_free;
+
+ if (domain_type_is_si(dmar_domain))
+ ret = intel_pasid_setup_pass_through(iommu, dmar_domain,
+ dev, pasid);
+ else if (dmar_domain->use_first_level)
+ ret = domain_setup_first_level(iommu, dmar_domain,
+ dev, pasid);
+ else
+ ret = intel_pasid_setup_second_level(iommu, dmar_domain,
+ dev, pasid);
+ if (ret)
+ goto out_detach_iommu;
+
+ dev_pasid->dev = dev;
+ dev_pasid->pasid = pasid;
+ spin_lock_irqsave(&dmar_domain->lock, flags);
+ list_add(&dev_pasid->link_domain, &dmar_domain->dev_pasids);
+ spin_unlock_irqrestore(&dmar_domain->lock, flags);
+
+ return 0;
+out_detach_iommu:
+ domain_detach_iommu(dmar_domain, iommu);
+out_free:
+ kfree(dev_pasid);
+ return ret;
}
static void *intel_iommu_hw_info(struct device *dev, u32 *length, u32 *type)
@@ -4770,6 +4823,7 @@ const struct iommu_ops intel_iommu_ops = {
#endif
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = intel_iommu_attach_device,
+ .set_dev_pasid = intel_iommu_set_dev_pasid,
.map_pages = intel_iommu_map_pages,
.unmap_pages = intel_iommu_unmap_pages,
.iotlb_sync_map = intel_iommu_iotlb_sync_map,
@@ -5006,7 +5060,7 @@ void quirk_extra_dev_tlb_flush(struct device_domain_info *info,
return;
sid = PCI_DEVID(info->bus, info->devfn);
- if (pasid == PASID_RID2PASID) {
+ if (pasid == IOMMU_NO_PASID) {
qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
qdep, address, mask);
} else {
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index 1c5e1d88862b..c18fb699c87a 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -595,6 +595,7 @@ struct dmar_domain {
spinlock_t lock; /* Protect device tracking lists */
struct list_head devices; /* all devices' list */
+ struct list_head dev_pasids; /* all attached pasids */
struct dma_pte *pgd; /* virtual address */
int gaw; /* max guest address width */
@@ -717,6 +718,12 @@ struct device_domain_info {
struct pasid_table *pasid_table; /* pasid table */
};
+struct dev_pasid_info {
+ struct list_head link_domain; /* link to domain siblings */
+ struct device *dev;
+ ioasid_t pasid;
+};
+
static inline void __iommu_flush_cache(
struct intel_iommu *iommu, void *addr, int size)
{
@@ -844,6 +851,7 @@ int intel_svm_page_response(struct device *dev, struct iommu_fault_event *evt,
struct iommu_page_response *msg);
struct iommu_domain *intel_svm_domain_alloc(void);
void intel_svm_remove_dev_pasid(struct device *dev, ioasid_t pasid);
+void intel_drain_pasid_prq(struct device *dev, u32 pasid);
struct intel_svm_dev {
struct list_head list;
@@ -862,6 +870,7 @@ struct intel_svm {
};
#else
static inline void intel_svm_check(struct intel_iommu *iommu) {}
+static inline void intel_drain_pasid_prq(struct device *dev, u32 pasid) {}
static inline struct iommu_domain *intel_svm_domain_alloc(void)
{
return NULL;
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index c5d479770e12..8f92b92f3d2a 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -129,7 +129,7 @@ int intel_pasid_alloc_table(struct device *dev)
info->pasid_table = pasid_table;
if (!ecap_coherent(info->iommu->ecap))
- clflush_cache_range(pasid_table->table, size);
+ clflush_cache_range(pasid_table->table, (1 << order) * PAGE_SIZE);
return 0;
}
@@ -438,7 +438,7 @@ devtlb_invalidation_with_pasid(struct intel_iommu *iommu,
* SVA usage, device could do DMA with multiple PASIDs. It is more
* efficient to flush devTLB specific to the PASID.
*/
- if (pasid == PASID_RID2PASID)
+ if (pasid == IOMMU_NO_PASID)
qi_flush_dev_iotlb(iommu, sid, pfsid, qdep, 0, 64 - VTD_PAGE_SHIFT);
else
qi_flush_dev_iotlb_pasid(iommu, sid, pfsid, pasid, qdep, 0, 64 - VTD_PAGE_SHIFT);
diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
index d6b7d21244b1..4e9e68c3c388 100644
--- a/drivers/iommu/intel/pasid.h
+++ b/drivers/iommu/intel/pasid.h
@@ -10,8 +10,6 @@
#ifndef __INTEL_PASID_H
#define __INTEL_PASID_H
-#define PASID_RID2PASID 0x0
-#define PASID_MIN 0x1
#define PASID_MAX 0x100000
#define PASID_PTE_MASK 0x3F
#define PASID_PTE_PRESENT 1
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 8f6d68006ab6..50a481c895b8 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -26,8 +26,6 @@
#include "trace.h"
static irqreturn_t prq_event_thread(int irq, void *d);
-static void intel_svm_drain_prq(struct device *dev, u32 pasid);
-#define to_intel_svm_dev(handle) container_of(handle, struct intel_svm_dev, sva)
static DEFINE_XARRAY_ALLOC(pasid_private_array);
static int pasid_private_add(ioasid_t pasid, void *priv)
@@ -259,8 +257,6 @@ static const struct mmu_notifier_ops intel_mmuops = {
.arch_invalidate_secondary_tlbs = intel_arch_invalidate_secondary_tlbs,
};
-static DEFINE_MUTEX(pasid_mutex);
-
static int pasid_to_svm_sdev(struct device *dev, unsigned int pasid,
struct intel_svm **rsvm,
struct intel_svm_dev **rsdev)
@@ -268,10 +264,6 @@ static int pasid_to_svm_sdev(struct device *dev, unsigned int pasid,
struct intel_svm_dev *sdev = NULL;
struct intel_svm *svm;
- /* The caller should hold the pasid_mutex lock */
- if (WARN_ON(!mutex_is_locked(&pasid_mutex)))
- return -EINVAL;
-
if (pasid == IOMMU_PASID_INVALID || pasid >= PASID_MAX)
return -EINVAL;
@@ -371,37 +363,23 @@ free_svm:
return ret;
}
-/* Caller must hold pasid_mutex */
-static int intel_svm_unbind_mm(struct device *dev, u32 pasid)
+void intel_svm_remove_dev_pasid(struct device *dev, u32 pasid)
{
struct intel_svm_dev *sdev;
struct intel_iommu *iommu;
struct intel_svm *svm;
struct mm_struct *mm;
- int ret = -EINVAL;
iommu = device_to_iommu(dev, NULL, NULL);
if (!iommu)
- goto out;
+ return;
- ret = pasid_to_svm_sdev(dev, pasid, &svm, &sdev);
- if (ret)
- goto out;
+ if (pasid_to_svm_sdev(dev, pasid, &svm, &sdev))
+ return;
mm = svm->mm;
if (sdev) {
list_del_rcu(&sdev->list);
- /*
- * Flush the PASID cache and IOTLB for this device.
- * Note that we do depend on the hardware *not* using
- * the PASID any more. Just as we depend on other
- * devices never using PASIDs that they have no right
- * to use. We have a *shared* PASID table, because it's
- * large and has to be physically contiguous. So it's
- * hard to be as defensive as we might like.
- */
- intel_pasid_tear_down_entry(iommu, dev, svm->pasid, false);
- intel_svm_drain_prq(dev, svm->pasid);
kfree_rcu(sdev, rcu);
if (list_empty(&svm->devs)) {
@@ -418,8 +396,6 @@ static int intel_svm_unbind_mm(struct device *dev, u32 pasid)
kfree(svm);
}
}
-out:
- return ret;
}
/* Page request queue descriptor */
@@ -460,7 +436,7 @@ static bool is_canonical_address(u64 addr)
}
/**
- * intel_svm_drain_prq - Drain page requests and responses for a pasid
+ * intel_drain_pasid_prq - Drain page requests and responses for a pasid
* @dev: target device
* @pasid: pasid for draining
*
@@ -474,7 +450,7 @@ static bool is_canonical_address(u64 addr)
* described in VT-d spec CH7.10 to drain all page requests and page
* responses pending in the hardware.
*/
-static void intel_svm_drain_prq(struct device *dev, u32 pasid)
+void intel_drain_pasid_prq(struct device *dev, u32 pasid)
{
struct device_domain_info *info;
struct dmar_domain *domain;
@@ -520,19 +496,7 @@ prq_retry:
goto prq_retry;
}
- /*
- * A work in IO page fault workqueue may try to lock pasid_mutex now.
- * Holding pasid_mutex while waiting in iopf_queue_flush_dev() for
- * all works in the workqueue to finish may cause deadlock.
- *
- * It's unnecessary to hold pasid_mutex in iopf_queue_flush_dev().
- * Unlock it to allow the works to be handled while waiting for
- * them to finish.
- */
- lockdep_assert_held(&pasid_mutex);
- mutex_unlock(&pasid_mutex);
iopf_queue_flush_dev(dev);
- mutex_lock(&pasid_mutex);
/*
* Perform steps described in VT-d spec CH7.10 to drain page
@@ -827,26 +791,14 @@ out:
return ret;
}
-void intel_svm_remove_dev_pasid(struct device *dev, ioasid_t pasid)
-{
- mutex_lock(&pasid_mutex);
- intel_svm_unbind_mm(dev, pasid);
- mutex_unlock(&pasid_mutex);
-}
-
static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
struct device *dev, ioasid_t pasid)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
struct mm_struct *mm = domain->mm;
- int ret;
- mutex_lock(&pasid_mutex);
- ret = intel_svm_bind_mm(iommu, dev, mm);
- mutex_unlock(&pasid_mutex);
-
- return ret;
+ return intel_svm_bind_mm(iommu, dev, mm);
}
static void intel_svm_domain_free(struct iommu_domain *domain)
diff --git a/drivers/iommu/iommu-sva.c b/drivers/iommu/iommu-sva.c
index 05c0fb2acbc4..b78671a8a914 100644
--- a/drivers/iommu/iommu-sva.c
+++ b/drivers/iommu/iommu-sva.c
@@ -10,34 +10,30 @@
#include "iommu-sva.h"
static DEFINE_MUTEX(iommu_sva_lock);
-static DEFINE_IDA(iommu_global_pasid_ida);
/* Allocate a PASID for the mm within range (inclusive) */
-static int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max)
+static int iommu_sva_alloc_pasid(struct mm_struct *mm, struct device *dev)
{
+ ioasid_t pasid;
int ret = 0;
- if (min == IOMMU_PASID_INVALID ||
- max == IOMMU_PASID_INVALID ||
- min == 0 || max < min)
- return -EINVAL;
-
if (!arch_pgtable_dma_compat(mm))
return -EBUSY;
mutex_lock(&iommu_sva_lock);
/* Is a PASID already associated with this mm? */
if (mm_valid_pasid(mm)) {
- if (mm->pasid < min || mm->pasid > max)
+ if (mm->pasid >= dev->iommu->max_pasids)
ret = -EOVERFLOW;
goto out;
}
- ret = ida_alloc_range(&iommu_global_pasid_ida, min, max, GFP_KERNEL);
- if (ret < 0)
+ pasid = iommu_alloc_global_pasid(dev);
+ if (pasid == IOMMU_PASID_INVALID) {
+ ret = -ENOSPC;
goto out;
-
- mm->pasid = ret;
+ }
+ mm->pasid = pasid;
ret = 0;
out:
mutex_unlock(&iommu_sva_lock);
@@ -64,15 +60,10 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm
{
struct iommu_domain *domain;
struct iommu_sva *handle;
- ioasid_t max_pasids;
int ret;
- max_pasids = dev->iommu->max_pasids;
- if (!max_pasids)
- return ERR_PTR(-EOPNOTSUPP);
-
/* Allocate mm->pasid if necessary. */
- ret = iommu_sva_alloc_pasid(mm, 1, max_pasids - 1);
+ ret = iommu_sva_alloc_pasid(mm, dev);
if (ret)
return ERR_PTR(ret);
@@ -217,5 +208,5 @@ void mm_pasid_drop(struct mm_struct *mm)
if (likely(!mm_valid_pasid(mm)))
return;
- ida_free(&iommu_global_pasid_ida, mm->pasid);
+ iommu_free_global_pasid(mm->pasid);
}
diff --git a/drivers/iommu/iommu-sysfs.c b/drivers/iommu/iommu-sysfs.c
index 99869217fbec..cbe378c34ba3 100644
--- a/drivers/iommu/iommu-sysfs.c
+++ b/drivers/iommu/iommu-sysfs.c
@@ -107,9 +107,6 @@ int iommu_device_link(struct iommu_device *iommu, struct device *link)
{
int ret;
- if (!iommu || IS_ERR(iommu))
- return -ENODEV;
-
ret = sysfs_add_link_to_group(&iommu->dev->kobj, "devices",
&link->kobj, dev_name(link));
if (ret)
@@ -122,14 +119,9 @@ int iommu_device_link(struct iommu_device *iommu, struct device *link)
return ret;
}
-EXPORT_SYMBOL_GPL(iommu_device_link);
void iommu_device_unlink(struct iommu_device *iommu, struct device *link)
{
- if (!iommu || IS_ERR(iommu))
- return;
-
sysfs_remove_link(&link->kobj, "iommu");
sysfs_remove_link_from_group(&iommu->dev->kobj, "devices", dev_name(link));
}
-EXPORT_SYMBOL_GPL(iommu_device_unlink);
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index c9c370de9d3d..3bfc56df4f78 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -41,6 +41,7 @@
static struct kset *iommu_group_kset;
static DEFINE_IDA(iommu_group_ida);
+static DEFINE_IDA(iommu_global_pasid_ida);
static unsigned int iommu_def_domain_type __read_mostly;
static bool iommu_dma_strict __read_mostly = IS_ENABLED(CONFIG_IOMMU_DEFAULT_DMA_STRICT);
@@ -129,9 +130,12 @@ static int iommu_setup_default_domain(struct iommu_group *group,
int target_type);
static int iommu_create_device_direct_mappings(struct iommu_domain *domain,
struct device *dev);
-static struct iommu_group *iommu_group_get_for_dev(struct device *dev);
static ssize_t iommu_group_store_type(struct iommu_group *group,
const char *buf, size_t count);
+static struct group_device *iommu_group_alloc_device(struct iommu_group *group,
+ struct device *dev);
+static void __iommu_group_free_device(struct iommu_group *group,
+ struct group_device *grp_dev);
#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
struct iommu_group_attribute iommu_group_attr_##_name = \
@@ -377,28 +381,18 @@ static u32 dev_iommu_get_max_pasids(struct device *dev)
return min_t(u32, max_pasids, dev->iommu->iommu_dev->max_pasids);
}
-static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
+/*
+ * Init the dev->iommu and dev->iommu_group in the struct device and get the
+ * driver probed
+ */
+static int iommu_init_device(struct device *dev, const struct iommu_ops *ops)
{
- const struct iommu_ops *ops = dev->bus->iommu_ops;
struct iommu_device *iommu_dev;
struct iommu_group *group;
- static DEFINE_MUTEX(iommu_probe_device_lock);
int ret;
- if (!ops)
- return -ENODEV;
- /*
- * Serialise to avoid races between IOMMU drivers registering in
- * parallel and/or the "replay" calls from ACPI/OF code via client
- * driver probe. Once the latter have been cleaned up we should
- * probably be able to use device_lock() here to minimise the scope,
- * but for now enforcing a simple global ordering is fine.
- */
- mutex_lock(&iommu_probe_device_lock);
- if (!dev_iommu_get(dev)) {
- ret = -ENOMEM;
- goto err_unlock;
- }
+ if (!dev_iommu_get(dev))
+ return -ENOMEM;
if (!try_module_get(ops->owner)) {
ret = -EINVAL;
@@ -408,124 +402,184 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
iommu_dev = ops->probe_device(dev);
if (IS_ERR(iommu_dev)) {
ret = PTR_ERR(iommu_dev);
- goto out_module_put;
+ goto err_module_put;
}
- dev->iommu->iommu_dev = iommu_dev;
- dev->iommu->max_pasids = dev_iommu_get_max_pasids(dev);
- if (ops->is_attach_deferred)
- dev->iommu->attach_deferred = ops->is_attach_deferred(dev);
+ ret = iommu_device_link(iommu_dev, dev);
+ if (ret)
+ goto err_release;
- group = iommu_group_get_for_dev(dev);
+ group = ops->device_group(dev);
+ if (WARN_ON_ONCE(group == NULL))
+ group = ERR_PTR(-EINVAL);
if (IS_ERR(group)) {
ret = PTR_ERR(group);
- goto out_release;
+ goto err_unlink;
}
+ dev->iommu_group = group;
- mutex_lock(&group->mutex);
- if (group_list && !group->default_domain && list_empty(&group->entry))
- list_add_tail(&group->entry, group_list);
- mutex_unlock(&group->mutex);
- iommu_group_put(group);
-
- mutex_unlock(&iommu_probe_device_lock);
- iommu_device_link(iommu_dev, dev);
-
+ dev->iommu->iommu_dev = iommu_dev;
+ dev->iommu->max_pasids = dev_iommu_get_max_pasids(dev);
+ if (ops->is_attach_deferred)
+ dev->iommu->attach_deferred = ops->is_attach_deferred(dev);
return 0;
-out_release:
+err_unlink:
+ iommu_device_unlink(iommu_dev, dev);
+err_release:
if (ops->release_device)
ops->release_device(dev);
-
-out_module_put:
+err_module_put:
module_put(ops->owner);
-
err_free:
dev_iommu_free(dev);
+ return ret;
+}
-err_unlock:
- mutex_unlock(&iommu_probe_device_lock);
+static void iommu_deinit_device(struct device *dev)
+{
+ struct iommu_group *group = dev->iommu_group;
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
- return ret;
+ lockdep_assert_held(&group->mutex);
+
+ iommu_device_unlink(dev->iommu->iommu_dev, dev);
+
+ /*
+ * release_device() must stop using any attached domain on the device.
+ * If there are still other devices in the group they are not effected
+ * by this callback.
+ *
+ * The IOMMU driver must set the device to either an identity or
+ * blocking translation and stop using any domain pointer, as it is
+ * going to be freed.
+ */
+ if (ops->release_device)
+ ops->release_device(dev);
+
+ /*
+ * If this is the last driver to use the group then we must free the
+ * domains before we do the module_put().
+ */
+ if (list_empty(&group->devices)) {
+ if (group->default_domain) {
+ iommu_domain_free(group->default_domain);
+ group->default_domain = NULL;
+ }
+ if (group->blocking_domain) {
+ iommu_domain_free(group->blocking_domain);
+ group->blocking_domain = NULL;
+ }
+ group->domain = NULL;
+ }
+
+ /* Caller must put iommu_group */
+ dev->iommu_group = NULL;
+ module_put(ops->owner);
+ dev_iommu_free(dev);
}
-int iommu_probe_device(struct device *dev)
+static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
{
- const struct iommu_ops *ops;
+ const struct iommu_ops *ops = dev->bus->iommu_ops;
struct iommu_group *group;
+ static DEFINE_MUTEX(iommu_probe_device_lock);
+ struct group_device *gdev;
int ret;
- ret = __iommu_probe_device(dev, NULL);
- if (ret)
- goto err_out;
+ if (!ops)
+ return -ENODEV;
+ /*
+ * Serialise to avoid races between IOMMU drivers registering in
+ * parallel and/or the "replay" calls from ACPI/OF code via client
+ * driver probe. Once the latter have been cleaned up we should
+ * probably be able to use device_lock() here to minimise the scope,
+ * but for now enforcing a simple global ordering is fine.
+ */
+ mutex_lock(&iommu_probe_device_lock);
- group = iommu_group_get(dev);
- if (!group) {
- ret = -ENODEV;
- goto err_release;
+ /* Device is probed already if in a group */
+ if (dev->iommu_group) {
+ ret = 0;
+ goto out_unlock;
}
+ ret = iommu_init_device(dev, ops);
+ if (ret)
+ goto out_unlock;
+
+ group = dev->iommu_group;
+ gdev = iommu_group_alloc_device(group, dev);
mutex_lock(&group->mutex);
+ if (IS_ERR(gdev)) {
+ ret = PTR_ERR(gdev);
+ goto err_put_group;
+ }
+ /*
+ * The gdev must be in the list before calling
+ * iommu_setup_default_domain()
+ */
+ list_add_tail(&gdev->list, &group->devices);
+ WARN_ON(group->default_domain && !group->domain);
if (group->default_domain)
iommu_create_device_direct_mappings(group->default_domain, dev);
-
if (group->domain) {
ret = __iommu_device_set_domain(group, dev, group->domain, 0);
if (ret)
- goto err_unlock;
- } else if (!group->default_domain) {
+ goto err_remove_gdev;
+ } else if (!group->default_domain && !group_list) {
ret = iommu_setup_default_domain(group, 0);
if (ret)
- goto err_unlock;
+ goto err_remove_gdev;
+ } else if (!group->default_domain) {
+ /*
+ * With a group_list argument we defer the default_domain setup
+ * to the caller by providing a de-duplicated list of groups
+ * that need further setup.
+ */
+ if (list_empty(&group->entry))
+ list_add_tail(&group->entry, group_list);
}
-
mutex_unlock(&group->mutex);
- iommu_group_put(group);
+ mutex_unlock(&iommu_probe_device_lock);
- ops = dev_iommu_ops(dev);
- if (ops->probe_finalize)
- ops->probe_finalize(dev);
+ if (dev_is_pci(dev))
+ iommu_dma_set_pci_32bit_workaround(dev);
return 0;
-err_unlock:
+err_remove_gdev:
+ list_del(&gdev->list);
+ __iommu_group_free_device(group, gdev);
+err_put_group:
+ iommu_deinit_device(dev);
mutex_unlock(&group->mutex);
iommu_group_put(group);
-err_release:
- iommu_release_device(dev);
+out_unlock:
+ mutex_unlock(&iommu_probe_device_lock);
-err_out:
return ret;
-
}
-/*
- * Remove a device from a group's device list and return the group device
- * if successful.
- */
-static struct group_device *
-__iommu_group_remove_device(struct iommu_group *group, struct device *dev)
+int iommu_probe_device(struct device *dev)
{
- struct group_device *device;
+ const struct iommu_ops *ops;
+ int ret;
- lockdep_assert_held(&group->mutex);
- for_each_group_device(group, device) {
- if (device->dev == dev) {
- list_del(&device->list);
- return device;
- }
- }
+ ret = __iommu_probe_device(dev, NULL);
+ if (ret)
+ return ret;
- return NULL;
+ ops = dev_iommu_ops(dev);
+ if (ops->probe_finalize)
+ ops->probe_finalize(dev);
+
+ return 0;
}
-/*
- * Release a device from its group and decrements the iommu group reference
- * count.
- */
-static void __iommu_group_release_device(struct iommu_group *group,
- struct group_device *grp_dev)
+static void __iommu_group_free_device(struct iommu_group *group,
+ struct group_device *grp_dev)
{
struct device *dev = grp_dev->dev;
@@ -534,54 +588,57 @@ static void __iommu_group_release_device(struct iommu_group *group,
trace_remove_device_from_group(group->id, dev);
+ /*
+ * If the group has become empty then ownership must have been
+ * released, and the current domain must be set back to NULL or
+ * the default domain.
+ */
+ if (list_empty(&group->devices))
+ WARN_ON(group->owner_cnt ||
+ group->domain != group->default_domain);
+
kfree(grp_dev->name);
kfree(grp_dev);
- dev->iommu_group = NULL;
- kobject_put(group->devices_kobj);
}
-static void iommu_release_device(struct device *dev)
+/* Remove the iommu_group from the struct device. */
+static void __iommu_group_remove_device(struct device *dev)
{
struct iommu_group *group = dev->iommu_group;
struct group_device *device;
- const struct iommu_ops *ops;
-
- if (!dev->iommu || !group)
- return;
-
- iommu_device_unlink(dev->iommu->iommu_dev, dev);
mutex_lock(&group->mutex);
- device = __iommu_group_remove_device(group, dev);
+ for_each_group_device(group, device) {
+ if (device->dev != dev)
+ continue;
- /*
- * If the group has become empty then ownership must have been released,
- * and the current domain must be set back to NULL or the default
- * domain.
- */
- if (list_empty(&group->devices))
- WARN_ON(group->owner_cnt ||
- group->domain != group->default_domain);
+ list_del(&device->list);
+ __iommu_group_free_device(group, device);
+ if (dev->iommu && dev->iommu->iommu_dev)
+ iommu_deinit_device(dev);
+ else
+ dev->iommu_group = NULL;
+ break;
+ }
+ mutex_unlock(&group->mutex);
/*
- * release_device() must stop using any attached domain on the device.
- * If there are still other devices in the group they are not effected
- * by this callback.
- *
- * The IOMMU driver must set the device to either an identity or
- * blocking translation and stop using any domain pointer, as it is
- * going to be freed.
+ * Pairs with the get in iommu_init_device() or
+ * iommu_group_add_device()
*/
- ops = dev_iommu_ops(dev);
- if (ops->release_device)
- ops->release_device(dev);
- mutex_unlock(&group->mutex);
+ iommu_group_put(group);
+}
- if (device)
- __iommu_group_release_device(group, device);
+static void iommu_release_device(struct device *dev)
+{
+ struct iommu_group *group = dev->iommu_group;
- module_put(ops->owner);
- dev_iommu_free(dev);
+ if (group)
+ __iommu_group_remove_device(dev);
+
+ /* Free any fwspec if no iommu_driver was ever attached */
+ if (dev->iommu)
+ dev_iommu_free(dev);
}
static int __init iommu_set_def_domain_type(char *str)
@@ -842,10 +899,9 @@ static void iommu_group_release(struct kobject *kobj)
ida_free(&iommu_group_ida, group->id);
- if (group->default_domain)
- iommu_domain_free(group->default_domain);
- if (group->blocking_domain)
- iommu_domain_free(group->blocking_domain);
+ /* Domains are free'd by iommu_deinit_device() */
+ WARN_ON(group->default_domain);
+ WARN_ON(group->blocking_domain);
kfree(group->name);
kfree(group);
@@ -1003,14 +1059,12 @@ static int iommu_create_device_direct_mappings(struct iommu_domain *domain,
unsigned long pg_size;
int ret = 0;
- if (!iommu_is_dma_domain(domain))
- return 0;
-
- BUG_ON(!domain->pgsize_bitmap);
-
- pg_size = 1UL << __ffs(domain->pgsize_bitmap);
+ pg_size = domain->pgsize_bitmap ? 1UL << __ffs(domain->pgsize_bitmap) : 0;
INIT_LIST_HEAD(&mappings);
+ if (WARN_ON_ONCE(iommu_is_dma_domain(domain) && !pg_size))
+ return -EINVAL;
+
iommu_get_resv_regions(dev, &mappings);
/* We need to consider overlapping regions for different devices */
@@ -1018,13 +1072,17 @@ static int iommu_create_device_direct_mappings(struct iommu_domain *domain,
dma_addr_t start, end, addr;
size_t map_size = 0;
- start = ALIGN(entry->start, pg_size);
- end = ALIGN(entry->start + entry->length, pg_size);
+ if (entry->type == IOMMU_RESV_DIRECT)
+ dev->iommu->require_direct = 1;
- if (entry->type != IOMMU_RESV_DIRECT &&
- entry->type != IOMMU_RESV_DIRECT_RELAXABLE)
+ if ((entry->type != IOMMU_RESV_DIRECT &&
+ entry->type != IOMMU_RESV_DIRECT_RELAXABLE) ||
+ !iommu_is_dma_domain(domain))
continue;
+ start = ALIGN(entry->start, pg_size);
+ end = ALIGN(entry->start + entry->length, pg_size);
+
for (addr = start; addr <= end; addr += pg_size) {
phys_addr_t phys_addr;
@@ -1058,22 +1116,16 @@ out:
return ret;
}
-/**
- * iommu_group_add_device - add a device to an iommu group
- * @group: the group into which to add the device (reference should be held)
- * @dev: the device
- *
- * This function is called by an iommu driver to add a device into a
- * group. Adding a device increments the group reference count.
- */
-int iommu_group_add_device(struct iommu_group *group, struct device *dev)
+/* This is undone by __iommu_group_free_device() */
+static struct group_device *iommu_group_alloc_device(struct iommu_group *group,
+ struct device *dev)
{
int ret, i = 0;
struct group_device *device;
device = kzalloc(sizeof(*device), GFP_KERNEL);
if (!device)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
device->dev = dev;
@@ -1104,18 +1156,11 @@ rename:
goto err_free_name;
}
- kobject_get(group->devices_kobj);
-
- dev->iommu_group = group;
-
- mutex_lock(&group->mutex);
- list_add_tail(&device->list, &group->devices);
- mutex_unlock(&group->mutex);
trace_add_device_to_group(group->id, dev);
dev_info(dev, "Adding to iommu group %d\n", group->id);
- return 0;
+ return device;
err_free_name:
kfree(device->name);
@@ -1124,7 +1169,32 @@ err_remove_link:
err_free_device:
kfree(device);
dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret);
- return ret;
+ return ERR_PTR(ret);
+}
+
+/**
+ * iommu_group_add_device - add a device to an iommu group
+ * @group: the group into which to add the device (reference should be held)
+ * @dev: the device
+ *
+ * This function is called by an iommu driver to add a device into a
+ * group. Adding a device increments the group reference count.
+ */
+int iommu_group_add_device(struct iommu_group *group, struct device *dev)
+{
+ struct group_device *gdev;
+
+ gdev = iommu_group_alloc_device(group, dev);
+ if (IS_ERR(gdev))
+ return PTR_ERR(gdev);
+
+ iommu_group_ref_get(group);
+ dev->iommu_group = group;
+
+ mutex_lock(&group->mutex);
+ list_add_tail(&gdev->list, &group->devices);
+ mutex_unlock(&group->mutex);
+ return 0;
}
EXPORT_SYMBOL_GPL(iommu_group_add_device);
@@ -1138,19 +1208,13 @@ EXPORT_SYMBOL_GPL(iommu_group_add_device);
void iommu_group_remove_device(struct device *dev)
{
struct iommu_group *group = dev->iommu_group;
- struct group_device *device;
if (!group)
return;
dev_info(dev, "Removing from iommu group %d\n", group->id);
- mutex_lock(&group->mutex);
- device = __iommu_group_remove_device(group, dev);
- mutex_unlock(&group->mutex);
-
- if (device)
- __iommu_group_release_device(group, device);
+ __iommu_group_remove_device(dev);
}
EXPORT_SYMBOL_GPL(iommu_group_remove_device);
@@ -1708,45 +1772,6 @@ iommu_group_alloc_default_domain(struct iommu_group *group, int req_type)
return dom;
}
-/**
- * iommu_group_get_for_dev - Find or create the IOMMU group for a device
- * @dev: target device
- *
- * This function is intended to be called by IOMMU drivers and extended to
- * support common, bus-defined algorithms when determining or creating the
- * IOMMU group for a device. On success, the caller will hold a reference
- * to the returned IOMMU group, which will already include the provided
- * device. The reference should be released with iommu_group_put().
- */
-static struct iommu_group *iommu_group_get_for_dev(struct device *dev)
-{
- const struct iommu_ops *ops = dev_iommu_ops(dev);
- struct iommu_group *group;
- int ret;
-
- group = iommu_group_get(dev);
- if (group)
- return group;
-
- group = ops->device_group(dev);
- if (WARN_ON_ONCE(group == NULL))
- return ERR_PTR(-EINVAL);
-
- if (IS_ERR(group))
- return group;
-
- ret = iommu_group_add_device(group, dev);
- if (ret)
- goto out_put_group;
-
- return group;
-
-out_put_group:
- iommu_group_put(group);
-
- return ERR_PTR(ret);
-}
-
struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
{
return group->default_domain;
@@ -1755,16 +1780,8 @@ struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
static int probe_iommu_group(struct device *dev, void *data)
{
struct list_head *group_list = data;
- struct iommu_group *group;
int ret;
- /* Device is probed already if in a group */
- group = iommu_group_get(dev);
- if (group) {
- iommu_group_put(group);
- return 0;
- }
-
ret = __iommu_probe_device(dev, group_list);
if (ret == -ENODEV)
ret = 0;
@@ -1840,11 +1857,6 @@ int bus_iommu_probe(const struct bus_type *bus)
LIST_HEAD(group_list);
int ret;
- /*
- * This code-path does not allocate the default domain when
- * creating the iommu group, so do it after the groups are
- * created.
- */
ret = bus_for_each_dev(bus, NULL, &group_list, probe_iommu_group);
if (ret)
return ret;
@@ -1857,6 +1869,11 @@ int bus_iommu_probe(const struct bus_type *bus)
/* Remove item from the list */
list_del_init(&group->entry);
+ /*
+ * We go to the trouble of deferred default domain creation so
+ * that the cross-group default domain type and the setup of the
+ * IOMMU_RESV_DIRECT will work correctly in non-hotpug scenarios.
+ */
ret = iommu_setup_default_domain(group, 0);
if (ret) {
mutex_unlock(&group->mutex);
@@ -2191,6 +2208,21 @@ static int __iommu_device_set_domain(struct iommu_group *group,
{
int ret;
+ /*
+ * If the device requires IOMMU_RESV_DIRECT then we cannot allow
+ * the blocking domain to be attached as it does not contain the
+ * required 1:1 mapping. This test effectively excludes the device
+ * being used with iommu_group_claim_dma_owner() which will block
+ * vfio and iommufd as well.
+ */
+ if (dev->iommu->require_direct &&
+ (new_domain->type == IOMMU_DOMAIN_BLOCKED ||
+ new_domain == group->blocking_domain)) {
+ dev_warn(dev,
+ "Firmware has requested this device have a 1:1 IOMMU mapping, rejecting configuring the device without a 1:1 mapping. Contact your platform vendor.\n");
+ return -EINVAL;
+ }
+
if (dev->iommu->attach_deferred) {
if (new_domain == group->default_domain)
return 0;
@@ -3282,7 +3314,7 @@ static void __iommu_release_dma_ownership(struct iommu_group *group)
/**
* iommu_group_release_dma_owner() - Release DMA ownership of a group
- * @dev: The device
+ * @group: The group
*
* Release the DMA ownership claimed by iommu_group_claim_dma_owner().
*/
@@ -3296,7 +3328,7 @@ EXPORT_SYMBOL_GPL(iommu_group_release_dma_owner);
/**
* iommu_device_release_dma_owner() - Release DMA ownership of a device
- * @group: The device.
+ * @dev: The device.
*
* Release the DMA ownership claimed by iommu_device_claim_dma_owner().
*/
@@ -3479,3 +3511,30 @@ struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
return domain;
}
+
+ioasid_t iommu_alloc_global_pasid(struct device *dev)
+{
+ int ret;
+
+ /* max_pasids == 0 means that the device does not support PASID */
+ if (!dev->iommu->max_pasids)
+ return IOMMU_PASID_INVALID;
+
+ /*
+ * max_pasids is set up by vendor driver based on number of PASID bits
+ * supported but the IDA allocation is inclusive.
+ */
+ ret = ida_alloc_range(&iommu_global_pasid_ida, IOMMU_FIRST_GLOBAL_PASID,
+ dev->iommu->max_pasids - 1, GFP_KERNEL);
+ return ret < 0 ? IOMMU_PASID_INVALID : ret;
+}
+EXPORT_SYMBOL_GPL(iommu_alloc_global_pasid);
+
+void iommu_free_global_pasid(ioasid_t pasid)
+{
+ if (WARN_ON(pasid == IOMMU_PASID_INVALID))
+ return;
+
+ ida_free(&iommu_global_pasid_ida, pasid);
+}
+EXPORT_SYMBOL_GPL(iommu_free_global_pasid);
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 9f64c5c9f5b9..65ff69477c43 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -14,11 +14,12 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/io-pgtable.h>
#include <linux/iommu.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_platform.h>
+#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include <linux/slab.h>
@@ -253,17 +254,13 @@ static void ipmmu_imuctr_write(struct ipmmu_vmsa_device *mmu,
/* Wait for any pending TLB invalidations to complete */
static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain)
{
- unsigned int count = 0;
+ u32 val;
- while (ipmmu_ctx_read_root(domain, IMCTR) & IMCTR_FLUSH) {
- cpu_relax();
- if (++count == TLB_LOOP_TIMEOUT) {
- dev_err_ratelimited(domain->mmu->dev,
+ if (read_poll_timeout_atomic(ipmmu_ctx_read_root, val,
+ !(val & IMCTR_FLUSH), 1, TLB_LOOP_TIMEOUT,
+ false, domain, IMCTR))
+ dev_err_ratelimited(domain->mmu->dev,
"TLB sync timed out -- MMU may be deadlocked\n");
- return;
- }
- udelay(1);
- }
}
static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain)
@@ -723,6 +720,10 @@ static bool ipmmu_device_is_allowed(struct device *dev)
if (soc_device_match(soc_denylist))
return false;
+ /* Check whether this device is a PCI device */
+ if (dev_is_pci(dev))
+ return true;
+
/* Check whether this device can work with the IPMMU */
for (i = 0; i < ARRAY_SIZE(devices_allowlist); i++) {
if (!strcmp(dev_name(dev), devices_allowlist[i]))
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index e93906d6e112..640275873a27 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -3,6 +3,7 @@
* Copyright (c) 2015-2016 MediaTek Inc.
* Author: Yong Wu <yong.wu@mediatek.com>
*/
+#include <linux/arm-smccc.h>
#include <linux/bitfield.h>
#include <linux/bug.h>
#include <linux/clk.h>
@@ -27,6 +28,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/soc/mediatek/infracfg.h>
+#include <linux/soc/mediatek/mtk_sip_svc.h>
#include <asm/barrier.h>
#include <soc/mediatek/smi.h>
@@ -143,6 +145,7 @@
#define PGTABLE_PA_35_EN BIT(17)
#define TF_PORT_TO_ADDR_MT8173 BIT(18)
#define INT_ID_PORT_WIDTH_6 BIT(19)
+#define CFG_IFA_MASTER_IN_ATF BIT(20)
#define MTK_IOMMU_HAS_FLAG_MASK(pdata, _x, mask) \
((((pdata)->flags) & (mask)) == (_x))
@@ -167,6 +170,7 @@ enum mtk_iommu_plat {
M4U_MT8173,
M4U_MT8183,
M4U_MT8186,
+ M4U_MT8188,
M4U_MT8192,
M4U_MT8195,
M4U_MT8365,
@@ -258,6 +262,8 @@ struct mtk_iommu_data {
struct device *smicomm_dev;
struct mtk_iommu_bank_data *bank;
+ struct mtk_iommu_domain *share_dom; /* For 2 HWs share pgtable */
+
struct regmap *pericfg;
struct mutex mutex; /* Protect m4u_group/m4u_dom above */
@@ -577,41 +583,55 @@ static int mtk_iommu_config(struct mtk_iommu_data *data, struct device *dev,
unsigned int larbid, portid;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
const struct mtk_iommu_iova_region *region;
- u32 peri_mmuen, peri_mmuen_msk;
+ unsigned long portid_msk = 0;
+ struct arm_smccc_res res;
int i, ret = 0;
for (i = 0; i < fwspec->num_ids; ++i) {
- larbid = MTK_M4U_TO_LARB(fwspec->ids[i]);
portid = MTK_M4U_TO_PORT(fwspec->ids[i]);
+ portid_msk |= BIT(portid);
+ }
- if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
- larb_mmu = &data->larb_imu[larbid];
+ if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
+ /* All ports should be in the same larb. just use 0 here */
+ larbid = MTK_M4U_TO_LARB(fwspec->ids[0]);
+ larb_mmu = &data->larb_imu[larbid];
+ region = data->plat_data->iova_region + regionid;
- region = data->plat_data->iova_region + regionid;
+ for_each_set_bit(portid, &portid_msk, 32)
larb_mmu->bank[portid] = upper_32_bits(region->iova_base);
- dev_dbg(dev, "%s iommu for larb(%s) port %d region %d rgn-bank %d.\n",
- enable ? "enable" : "disable", dev_name(larb_mmu->dev),
- portid, regionid, larb_mmu->bank[portid]);
+ dev_dbg(dev, "%s iommu for larb(%s) port 0x%lx region %d rgn-bank %d.\n",
+ enable ? "enable" : "disable", dev_name(larb_mmu->dev),
+ portid_msk, regionid, upper_32_bits(region->iova_base));
- if (enable)
- larb_mmu->mmu |= MTK_SMI_MMU_EN(portid);
- else
- larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid);
- } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA)) {
- peri_mmuen_msk = BIT(portid);
+ if (enable)
+ larb_mmu->mmu |= portid_msk;
+ else
+ larb_mmu->mmu &= ~portid_msk;
+ } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA)) {
+ if (MTK_IOMMU_HAS_FLAG(data->plat_data, CFG_IFA_MASTER_IN_ATF)) {
+ arm_smccc_smc(MTK_SIP_KERNEL_IOMMU_CONTROL,
+ IOMMU_ATF_CMD_CONFIG_INFRA_IOMMU,
+ portid_msk, enable, 0, 0, 0, 0, &res);
+ ret = res.a0;
+ } else {
/* PCI dev has only one output id, enable the next writing bit for PCIe */
- if (dev_is_pci(dev))
- peri_mmuen_msk |= BIT(portid + 1);
+ if (dev_is_pci(dev)) {
+ if (fwspec->num_ids != 1) {
+ dev_err(dev, "PCI dev can only have one port.\n");
+ return -ENODEV;
+ }
+ portid_msk |= BIT(portid + 1);
+ }
- peri_mmuen = enable ? peri_mmuen_msk : 0;
ret = regmap_update_bits(data->pericfg, PERICFG_IOMMU_1,
- peri_mmuen_msk, peri_mmuen);
- if (ret)
- dev_err(dev, "%s iommu(%s) inframaster 0x%x fail(%d).\n",
- enable ? "enable" : "disable",
- dev_name(data->dev), peri_mmuen_msk, ret);
+ (u32)portid_msk, enable ? (u32)portid_msk : 0);
}
+ if (ret)
+ dev_err(dev, "%s iommu(%s) inframaster 0x%lx fail(%d).\n",
+ enable ? "enable" : "disable",
+ dev_name(data->dev), portid_msk, ret);
}
return ret;
}
@@ -620,15 +640,14 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom,
struct mtk_iommu_data *data,
unsigned int region_id)
{
+ struct mtk_iommu_domain *share_dom = data->share_dom;
const struct mtk_iommu_iova_region *region;
- struct mtk_iommu_domain *m4u_dom;
-
- /* Always use bank0 in sharing pgtable case */
- m4u_dom = data->bank[0].m4u_dom;
- if (m4u_dom) {
- dom->iop = m4u_dom->iop;
- dom->cfg = m4u_dom->cfg;
- dom->domain.pgsize_bitmap = m4u_dom->cfg.pgsize_bitmap;
+
+ /* Always use share domain in sharing pgtable case */
+ if (MTK_IOMMU_HAS_FLAG(data->plat_data, SHARE_PGTABLE) && share_dom) {
+ dom->iop = share_dom->iop;
+ dom->cfg = share_dom->cfg;
+ dom->domain.pgsize_bitmap = share_dom->cfg.pgsize_bitmap;
goto update_iova_region;
}
@@ -658,6 +677,9 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom,
/* Update our support page sizes bitmap */
dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap;
+ if (MTK_IOMMU_HAS_FLAG(data->plat_data, SHARE_PGTABLE))
+ data->share_dom = dom;
+
update_iova_region:
/* Update the iova region for this domain */
region = data->plat_data->iova_region + region_id;
@@ -708,7 +730,9 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
/* Data is in the frstdata in sharing pgtable case. */
frstdata = mtk_iommu_get_frst_data(hw_list);
+ mutex_lock(&frstdata->mutex);
ret = mtk_iommu_domain_finalise(dom, frstdata, region_id);
+ mutex_unlock(&frstdata->mutex);
if (ret) {
mutex_unlock(&dom->mutex);
return ret;
@@ -1318,7 +1342,8 @@ static int mtk_iommu_probe(struct platform_device *pdev)
dev_err_probe(dev, ret, "mm dts parse fail\n");
goto out_runtime_disable;
}
- } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA)) {
+ } else if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA) &&
+ !MTK_IOMMU_HAS_FLAG(data->plat_data, CFG_IFA_MASTER_IN_ATF)) {
p = data->plat_data->pericfg_comp_str;
data->pericfg = syscon_regmap_lookup_by_compatible(p);
if (IS_ERR(data->pericfg)) {
@@ -1570,6 +1595,67 @@ static const struct mtk_iommu_plat_data mt8186_data_mm = {
.iova_region_larb_msk = mt8186_larb_region_msk,
};
+static const struct mtk_iommu_plat_data mt8188_data_infra = {
+ .m4u_plat = M4U_MT8188,
+ .flags = WR_THROT_EN | DCM_DISABLE | STD_AXI_MODE | PM_CLK_AO |
+ MTK_IOMMU_TYPE_INFRA | IFA_IOMMU_PCIE_SUPPORT |
+ PGTABLE_PA_35_EN | CFG_IFA_MASTER_IN_ATF,
+ .inv_sel_reg = REG_MMU_INV_SEL_GEN2,
+ .banks_num = 1,
+ .banks_enable = {true},
+ .iova_region = single_domain,
+ .iova_region_nr = ARRAY_SIZE(single_domain),
+};
+
+static const u32 mt8188_larb_region_msk[MT8192_MULTI_REGION_NR_MAX][MTK_LARB_NR_MAX] = {
+ [0] = {~0, ~0, ~0, ~0}, /* Region0: all ports for larb0/1/2/3 */
+ [1] = {0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, ~0, ~0, ~0}, /* Region1: larb19(21)/21(22)/23 */
+ [2] = {0, 0, 0, 0, ~0, ~0, ~0, ~0, /* Region2: the other larbs. */
+ ~0, ~0, ~0, ~0, ~0, ~0, ~0, ~0,
+ ~0, ~0, ~0, ~0, ~0, 0, 0, 0,
+ 0, ~0},
+ [3] = {0},
+ [4] = {[24] = BIT(0) | BIT(1)}, /* Only larb27(24) port0/1 */
+ [5] = {[24] = BIT(2) | BIT(3)}, /* Only larb27(24) port2/3 */
+};
+
+static const struct mtk_iommu_plat_data mt8188_data_vdo = {
+ .m4u_plat = M4U_MT8188,
+ .flags = HAS_BCLK | HAS_SUB_COMM_3BITS | OUT_ORDER_WR_EN |
+ WR_THROT_EN | IOVA_34_EN | SHARE_PGTABLE |
+ PGTABLE_PA_35_EN | MTK_IOMMU_TYPE_MM,
+ .hw_list = &m4ulist,
+ .inv_sel_reg = REG_MMU_INV_SEL_GEN2,
+ .banks_num = 1,
+ .banks_enable = {true},
+ .iova_region = mt8192_multi_dom,
+ .iova_region_nr = ARRAY_SIZE(mt8192_multi_dom),
+ .iova_region_larb_msk = mt8188_larb_region_msk,
+ .larbid_remap = {{2}, {0}, {21}, {0}, {19}, {9, 10,
+ 11 /* 11a */, 25 /* 11c */},
+ {13, 0, 29 /* 16b */, 30 /* 17b */, 0}, {5}},
+};
+
+static const struct mtk_iommu_plat_data mt8188_data_vpp = {
+ .m4u_plat = M4U_MT8188,
+ .flags = HAS_BCLK | HAS_SUB_COMM_3BITS | OUT_ORDER_WR_EN |
+ WR_THROT_EN | IOVA_34_EN | SHARE_PGTABLE |
+ PGTABLE_PA_35_EN | MTK_IOMMU_TYPE_MM,
+ .hw_list = &m4ulist,
+ .inv_sel_reg = REG_MMU_INV_SEL_GEN2,
+ .banks_num = 1,
+ .banks_enable = {true},
+ .iova_region = mt8192_multi_dom,
+ .iova_region_nr = ARRAY_SIZE(mt8192_multi_dom),
+ .iova_region_larb_msk = mt8188_larb_region_msk,
+ .larbid_remap = {{1}, {3}, {23}, {7}, {MTK_INVALID_LARBID},
+ {12, 15, 24 /* 11b */}, {14, MTK_INVALID_LARBID,
+ 16 /* 16a */, 17 /* 17a */, MTK_INVALID_LARBID,
+ 27, 28 /* ccu0 */, MTK_INVALID_LARBID}, {4, 6}},
+};
+
static const unsigned int mt8192_larb_region_msk[MT8192_MULTI_REGION_NR_MAX][MTK_LARB_NR_MAX] = {
[0] = {~0, ~0}, /* Region0: larb0/1 */
[1] = {0, 0, 0, 0, ~0, ~0, 0, ~0}, /* Region1: larb4/5/7 */
@@ -1678,6 +1764,9 @@ static const struct of_device_id mtk_iommu_of_ids[] = {
{ .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data},
{ .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data},
{ .compatible = "mediatek,mt8186-iommu-mm", .data = &mt8186_data_mm}, /* mm: m4u */
+ { .compatible = "mediatek,mt8188-iommu-infra", .data = &mt8188_data_infra},
+ { .compatible = "mediatek,mt8188-iommu-vdo", .data = &mt8188_data_vdo},
+ { .compatible = "mediatek,mt8188-iommu-vpp", .data = &mt8188_data_vpp},
{ .compatible = "mediatek,mt8192-m4u", .data = &mt8192_data},
{ .compatible = "mediatek,mt8195-iommu-infra", .data = &mt8195_data_infra},
{ .compatible = "mediatek,mt8195-iommu-vdo", .data = &mt8195_data_vdo},
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 40f57d293a79..157b286e36bf 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -159,7 +159,7 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
* If we have reason to believe the IOMMU driver missed the initial
* probe for dev, replay it to get things in order.
*/
- if (!err && dev->bus && !device_iommu_mapped(dev))
+ if (!err && dev->bus)
err = iommu_probe_device(dev);
/* Ignore all other errors apart from EPROBE_DEFER */
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 4054030c3237..8ff69fbf9f65 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -98,9 +98,8 @@ struct rk_iommu_ops {
phys_addr_t (*pt_address)(u32 dte);
u32 (*mk_dtentries)(dma_addr_t pt_dma);
u32 (*mk_ptentries)(phys_addr_t page, int prot);
- phys_addr_t (*dte_addr_phys)(u32 addr);
- u32 (*dma_addr_dte)(dma_addr_t dt_dma);
u64 dma_bit_mask;
+ gfp_t gfp_flags;
};
struct rk_iommu {
@@ -278,8 +277,8 @@ static u32 rk_mk_pte(phys_addr_t page, int prot)
/*
* In v2:
* 31:12 - Page address bit 31:0
- * 11:9 - Page address bit 34:32
- * 8:4 - Page address bit 39:35
+ * 11: 8 - Page address bit 35:32
+ * 7: 4 - Page address bit 39:36
* 3 - Security
* 2 - Writable
* 1 - Readable
@@ -506,7 +505,7 @@ static int rk_iommu_force_reset(struct rk_iommu *iommu)
/*
* Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY
- * and verifying that upper 5 nybbles are read back.
+ * and verifying that upper 5 (v1) or 7 (v2) nybbles are read back.
*/
for (i = 0; i < iommu->num_mmu; i++) {
dte_addr = rk_ops->pt_address(DTE_ADDR_DUMMY);
@@ -531,33 +530,6 @@ static int rk_iommu_force_reset(struct rk_iommu *iommu)
return 0;
}
-static inline phys_addr_t rk_dte_addr_phys(u32 addr)
-{
- return (phys_addr_t)addr;
-}
-
-static inline u32 rk_dma_addr_dte(dma_addr_t dt_dma)
-{
- return dt_dma;
-}
-
-#define DT_HI_MASK GENMASK_ULL(39, 32)
-#define DTE_BASE_HI_MASK GENMASK(11, 4)
-#define DT_SHIFT 28
-
-static inline phys_addr_t rk_dte_addr_phys_v2(u32 addr)
-{
- u64 addr64 = addr;
- return (phys_addr_t)(addr64 & RK_DTE_PT_ADDRESS_MASK) |
- ((addr64 & DTE_BASE_HI_MASK) << DT_SHIFT);
-}
-
-static inline u32 rk_dma_addr_dte_v2(dma_addr_t dt_dma)
-{
- return (dt_dma & RK_DTE_PT_ADDRESS_MASK) |
- ((dt_dma & DT_HI_MASK) >> DT_SHIFT);
-}
-
static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
{
void __iomem *base = iommu->bases[index];
@@ -577,7 +549,7 @@ static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
page_offset = rk_iova_page_offset(iova);
mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR);
- mmu_dte_addr_phys = rk_ops->dte_addr_phys(mmu_dte_addr);
+ mmu_dte_addr_phys = rk_ops->pt_address(mmu_dte_addr);
dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
dte_addr = phys_to_virt(dte_addr_phys);
@@ -756,7 +728,7 @@ static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
if (rk_dte_is_pt_valid(dte))
goto done;
- page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
+ page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | rk_ops->gfp_flags);
if (!page_table)
return ERR_PTR(-ENOMEM);
@@ -967,7 +939,7 @@ static int rk_iommu_enable(struct rk_iommu *iommu)
for (i = 0; i < iommu->num_mmu; i++) {
rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR,
- rk_ops->dma_addr_dte(rk_domain->dt_dma));
+ rk_ops->mk_dtentries(rk_domain->dt_dma));
rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
}
@@ -1105,7 +1077,7 @@ static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
* Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
* Allocate one 4 KiB page for each table.
*/
- rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
+ rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | rk_ops->gfp_flags);
if (!rk_domain->dt)
goto err_free_domain;
@@ -1405,18 +1377,16 @@ static struct rk_iommu_ops iommu_data_ops_v1 = {
.pt_address = &rk_dte_pt_address,
.mk_dtentries = &rk_mk_dte,
.mk_ptentries = &rk_mk_pte,
- .dte_addr_phys = &rk_dte_addr_phys,
- .dma_addr_dte = &rk_dma_addr_dte,
.dma_bit_mask = DMA_BIT_MASK(32),
+ .gfp_flags = GFP_DMA32,
};
static struct rk_iommu_ops iommu_data_ops_v2 = {
.pt_address = &rk_dte_pt_address_v2,
.mk_dtentries = &rk_mk_dte_v2,
.mk_ptentries = &rk_mk_pte_v2,
- .dte_addr_phys = &rk_dte_addr_phys_v2,
- .dma_addr_dte = &rk_dma_addr_dte_v2,
.dma_bit_mask = DMA_BIT_MASK(40),
+ .gfp_flags = 0,
};
static const struct of_device_id rk_iommu_dt_ids[] = {
diff --git a/drivers/iommu/sprd-iommu.c b/drivers/iommu/sprd-iommu.c
index 39e34fdeccda..2fa9afebd4f5 100644
--- a/drivers/iommu/sprd-iommu.c
+++ b/drivers/iommu/sprd-iommu.c
@@ -14,6 +14,7 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -148,6 +149,7 @@ static struct iommu_domain *sprd_iommu_domain_alloc(unsigned int domain_type)
dom->domain.geometry.aperture_start = 0;
dom->domain.geometry.aperture_end = SZ_256M - 1;
+ dom->domain.geometry.force_aperture = true;
return &dom->domain;
}
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 1cbf063ccf14..e445f80d0226 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -9,7 +9,7 @@
#include <linux/iommu.h>
#include <linux/kernel.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/of_platform.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 3551ed057774..17dcd826f5c2 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -13,7 +13,7 @@
#include <linux/interval_tree.h>
#include <linux/iommu.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/pci.h>
#include <linux/virtio.h>
#include <linux/virtio_config.h>
diff --git a/drivers/ipack/devices/ipoctal.c b/drivers/ipack/devices/ipoctal.c
index a01c15812b70..da308be6c487 100644
--- a/drivers/ipack/devices/ipoctal.c
+++ b/drivers/ipack/devices/ipoctal.c
@@ -437,8 +437,7 @@ err_put_driver:
}
static inline int ipoctal_copy_write_buffer(struct ipoctal_channel *channel,
- const unsigned char *buf,
- int count)
+ const u8 *buf, int count)
{
unsigned long flags;
int i;
@@ -459,8 +458,8 @@ static inline int ipoctal_copy_write_buffer(struct ipoctal_channel *channel,
return i;
}
-static int ipoctal_write_tty(struct tty_struct *tty,
- const unsigned char *buf, int count)
+static ssize_t ipoctal_write_tty(struct tty_struct *tty, const u8 *buf,
+ size_t count)
{
struct ipoctal_channel *channel = tty->driver_data;
unsigned int char_copied;
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index 45a4043c5042..2f3789515445 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -1077,13 +1077,13 @@ static void capinc_tty_close(struct tty_struct *tty, struct file *filp)
tty_port_close(&mp->port, tty, filp);
}
-static int capinc_tty_write(struct tty_struct *tty,
- const unsigned char *buf, int count)
+static ssize_t capinc_tty_write(struct tty_struct *tty, const u8 *buf,
+ size_t count)
{
struct capiminor *mp = tty->driver_data;
struct sk_buff *skb;
- pr_debug("capinc_tty_write(count=%d)\n", count);
+ pr_debug("capinc_tty_write(count=%zu)\n", count);
spin_lock_bh(&mp->outlock);
skb = mp->outskb;
@@ -1112,7 +1112,7 @@ static int capinc_tty_write(struct tty_struct *tty,
return count;
}
-static int capinc_tty_put_char(struct tty_struct *tty, unsigned char ch)
+static int capinc_tty_put_char(struct tty_struct *tty, u8 ch)
{
struct capiminor *mp = tty->driver_data;
bool invoke_send = false;
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 6046dfeca16f..b92208eccdea 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -521,6 +521,15 @@ config LEDS_PCA963X
LED driver chip accessed via the I2C bus. Supported
devices include PCA9633 and PCA9634
+config LEDS_PCA995X
+ tristate "LED Support for PCA995x I2C chips"
+ depends on LEDS_CLASS
+ depends on I2C
+ help
+ This option enables support for LEDs connected to PCA995x
+ LED driver chips accessed via the I2C bus. Supported
+ devices include PCA9955BTW, PCA9952TW and PCA9955TW.
+
config LEDS_WM831X_STATUS
tristate "LED support for status LEDs on WM831x PMICs"
depends on LEDS_CLASS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index d71f1226540c..d7348e8bc019 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -72,6 +72,7 @@ obj-$(CONFIG_LEDS_OT200) += leds-ot200.o
obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o
obj-$(CONFIG_LEDS_PCA955X) += leds-pca955x.o
obj-$(CONFIG_LEDS_PCA963X) += leds-pca963x.o
+obj-$(CONFIG_LEDS_PCA995X) += leds-pca995x.o
obj-$(CONFIG_LEDS_PM8058) += leds-pm8058.o
obj-$(CONFIG_LEDS_POWERNV) += leds-powernv.o
obj-$(CONFIG_LEDS_PWM) += leds-pwm.o
diff --git a/drivers/leds/blink/Kconfig b/drivers/leds/blink/Kconfig
index 945c84286a4e..bdcb7377cd4e 100644
--- a/drivers/leds/blink/Kconfig
+++ b/drivers/leds/blink/Kconfig
@@ -1,10 +1,10 @@
config LEDS_BCM63138
tristate "LED Support for Broadcom BCM63138 SoC"
depends on LEDS_CLASS
- depends on ARCH_BCM4908 || ARCH_BCM_5301X || BCM63XX || COMPILE_TEST
+ depends on ARCH_BCMBCA || ARCH_BCM_5301X || BCM63XX || COMPILE_TEST
depends on HAS_IOMEM
depends on OF
- default ARCH_BCM4908
+ default ARCH_BCMBCA
help
This option enables support for LED controller that is part of
BCM63138 SoC. The same hardware block is known to be also used
diff --git a/drivers/leds/flash/Kconfig b/drivers/leds/flash/Kconfig
index 4ed2efc65434..4e08dbc05709 100644
--- a/drivers/leds/flash/Kconfig
+++ b/drivers/leds/flash/Kconfig
@@ -89,6 +89,8 @@ config LEDS_QCOM_FLASH
the total LED current will be split symmetrically on each channel and
they will be enabled/disabled at the same time.
+ This driver can be built as a module, it will be called "leds-qcom-flash".
+
config LEDS_RT4505
tristate "LED support for RT4505 flashlight controller"
depends on I2C && OF
diff --git a/drivers/leds/flash/leds-qcom-flash.c b/drivers/leds/flash/leds-qcom-flash.c
index b089ca1a1901..a73d3ea5c97a 100644
--- a/drivers/leds/flash/leds-qcom-flash.c
+++ b/drivers/leds/flash/leds-qcom-flash.c
@@ -309,6 +309,10 @@ static int qcom_flash_strobe_set(struct led_classdev_flash *fled_cdev, bool stat
struct qcom_flash_led *led = flcdev_to_qcom_fled(fled_cdev);
int rc;
+ rc = set_flash_strobe(led, SW_STROBE, false);
+ if (rc)
+ return rc;
+
rc = set_flash_current(led, led->flash_current_ma, FLASH_MODE);
if (rc)
return rc;
@@ -745,6 +749,7 @@ static int qcom_flash_led_probe(struct platform_device *pdev)
return 0;
release:
+ fwnode_handle_put(child);
while (flash_data->v4l2_flash[flash_data->leds_count] && flash_data->leds_count)
v4l2_flash_release(flash_data->v4l2_flash[flash_data->leds_count--]);
return rc;
diff --git a/drivers/leds/led-class-multicolor.c b/drivers/leds/led-class-multicolor.c
index e317408583df..ec62a4811613 100644
--- a/drivers/leds/led-class-multicolor.c
+++ b/drivers/leds/led-class-multicolor.c
@@ -6,6 +6,7 @@
#include <linux/device.h>
#include <linux/init.h>
#include <linux/led-class-multicolor.h>
+#include <linux/math.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
@@ -19,9 +20,10 @@ int led_mc_calc_color_components(struct led_classdev_mc *mcled_cdev,
int i;
for (i = 0; i < mcled_cdev->num_colors; i++)
- mcled_cdev->subled_info[i].brightness = brightness *
- mcled_cdev->subled_info[i].intensity /
- led_cdev->max_brightness;
+ mcled_cdev->subled_info[i].brightness =
+ DIV_ROUND_CLOSEST(brightness *
+ mcled_cdev->subled_info[i].intensity,
+ led_cdev->max_brightness);
return 0;
}
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 6dae56b914fe..974b84f6bd6a 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -22,7 +22,6 @@
#include <linux/of.h>
#include "leds.h"
-static struct class *leds_class;
static DEFINE_MUTEX(leds_lookup_lock);
static LIST_HEAD(leds_lookup_list);
@@ -76,6 +75,19 @@ static ssize_t max_brightness_show(struct device *dev,
}
static DEVICE_ATTR_RO(max_brightness);
+static ssize_t color_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const char *color_text = "invalid";
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+
+ if (led_cdev->color < LED_COLOR_ID_MAX)
+ color_text = led_colors[led_cdev->color];
+
+ return sysfs_emit(buf, "%s\n", color_text);
+}
+static DEVICE_ATTR_RO(color);
+
#ifdef CONFIG_LEDS_TRIGGERS
static BIN_ATTR(trigger, 0644, led_trigger_read, led_trigger_write, 0);
static struct bin_attribute *led_trigger_bin_attrs[] = {
@@ -90,6 +102,7 @@ static const struct attribute_group led_trigger_group = {
static struct attribute *led_class_attrs[] = {
&dev_attr_brightness.attr,
&dev_attr_max_brightness.attr,
+ &dev_attr_color.attr,
NULL,
};
@@ -234,6 +247,12 @@ static struct led_classdev *led_module_get(struct device *led_dev)
return led_cdev;
}
+static const struct class leds_class = {
+ .name = "leds",
+ .dev_groups = led_groups,
+ .pm = &leds_class_dev_pm_ops,
+};
+
/**
* of_led_get() - request a LED device via the LED framework
* @np: device node to get the LED device from
@@ -251,7 +270,7 @@ struct led_classdev *of_led_get(struct device_node *np, int index)
if (!led_node)
return ERR_PTR(-ENOENT);
- led_dev = class_find_device_by_of_node(leds_class, led_node);
+ led_dev = class_find_device_by_of_node(&leds_class, led_node);
of_node_put(led_node);
put_device(led_dev);
@@ -346,7 +365,7 @@ struct led_classdev *led_get(struct device *dev, char *con_id)
if (!provider)
return ERR_PTR(-ENOENT);
- led_dev = class_find_device_by_name(leds_class, provider);
+ led_dev = class_find_device_by_name(&leds_class, provider);
kfree_const(provider);
return led_module_get(led_dev);
@@ -402,6 +421,31 @@ void led_remove_lookup(struct led_lookup_data *led_lookup)
}
EXPORT_SYMBOL_GPL(led_remove_lookup);
+/**
+ * devm_of_led_get_optional - Resource-managed request of an optional LED device
+ * @dev: LED consumer
+ * @index: index of the LED to obtain in the consumer
+ *
+ * The device node of the device is parsed to find the requested LED device.
+ * The LED device returned from this function is automatically released
+ * on driver detach.
+ *
+ * @return a pointer to a LED device, ERR_PTR(errno) on failure and NULL if the
+ * led was not found.
+ */
+struct led_classdev *__must_check devm_of_led_get_optional(struct device *dev,
+ int index)
+{
+ struct led_classdev *led;
+
+ led = devm_of_led_get(dev, index);
+ if (IS_ERR(led) && PTR_ERR(led) == -ENOENT)
+ return NULL;
+
+ return led;
+}
+EXPORT_SYMBOL_GPL(devm_of_led_get_optional);
+
static int led_classdev_next_name(const char *init_name, char *name,
size_t len)
{
@@ -412,7 +456,7 @@ static int led_classdev_next_name(const char *init_name, char *name,
strscpy(name, init_name, len);
while ((ret < len) &&
- (dev = class_find_device_by_name(leds_class, name))) {
+ (dev = class_find_device_by_name(&leds_class, name))) {
put_device(dev);
ret = snprintf(name, len, "%s_%u", init_name, ++i);
}
@@ -457,6 +501,14 @@ int led_classdev_register_ext(struct device *parent,
if (fwnode_property_present(init_data->fwnode,
"retain-state-shutdown"))
led_cdev->flags |= LED_RETAIN_AT_SHUTDOWN;
+
+ fwnode_property_read_u32(init_data->fwnode,
+ "max-brightness",
+ &led_cdev->max_brightness);
+
+ if (fwnode_property_present(init_data->fwnode, "color"))
+ fwnode_property_read_u32(init_data->fwnode, "color",
+ &led_cdev->color);
}
} else {
proposed_name = led_cdev->name;
@@ -466,10 +518,13 @@ int led_classdev_register_ext(struct device *parent,
if (ret < 0)
return ret;
+ if (led_cdev->color >= LED_COLOR_ID_MAX)
+ dev_warn(parent, "LED %s color identifier out of range\n", final_name);
+
mutex_init(&led_cdev->led_access);
mutex_lock(&led_cdev->led_access);
- led_cdev->dev = device_create_with_groups(leds_class, parent, 0,
- led_cdev, led_cdev->groups, "%s", final_name);
+ led_cdev->dev = device_create_with_groups(&leds_class, parent, 0,
+ led_cdev, led_cdev->groups, "%s", final_name);
if (IS_ERR(led_cdev->dev)) {
mutex_unlock(&led_cdev->led_access);
return PTR_ERR(led_cdev->dev);
@@ -626,17 +681,12 @@ EXPORT_SYMBOL_GPL(devm_led_classdev_unregister);
static int __init leds_init(void)
{
- leds_class = class_create("leds");
- if (IS_ERR(leds_class))
- return PTR_ERR(leds_class);
- leds_class->pm = &leds_class_dev_pm_ops;
- leds_class->dev_groups = led_groups;
- return 0;
+ return class_register(&leds_class);
}
static void __exit leds_exit(void)
{
- class_destroy(leds_class);
+ class_unregister(&leds_class);
}
subsys_initcall(leds_init);
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index b9b1295833c9..04f9ea675f2c 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -474,15 +474,15 @@ int led_compose_name(struct device *dev, struct led_init_data *init_data,
struct fwnode_handle *fwnode = init_data->fwnode;
const char *devicename = init_data->devicename;
- /* We want to label LEDs that can produce full range of colors
- * as RGB, not multicolor */
- BUG_ON(props.color == LED_COLOR_ID_MULTI);
-
if (!led_classdev_name)
return -EINVAL;
led_parse_fwnode_props(dev, fwnode, &props);
+ /* We want to label LEDs that can produce full range of colors
+ * as RGB, not multicolor */
+ BUG_ON(props.color == LED_COLOR_ID_MULTI);
+
if (props.label) {
/*
* If init_data.devicename is NULL, then it indicates that
diff --git a/drivers/leds/leds-an30259a.c b/drivers/leds/leds-an30259a.c
index 24b1041213c2..0216afed3b6e 100644
--- a/drivers/leds/leds-an30259a.c
+++ b/drivers/leds/leds-an30259a.c
@@ -344,7 +344,7 @@ MODULE_DEVICE_TABLE(i2c, an30259a_id);
static struct i2c_driver an30259a_driver = {
.driver = {
.name = "leds-an30259a",
- .of_match_table = of_match_ptr(an30259a_match_table),
+ .of_match_table = an30259a_match_table,
},
.probe = an30259a_probe,
.remove = an30259a_remove,
diff --git a/drivers/leds/leds-ariel.c b/drivers/leds/leds-ariel.c
index 49e1bddaa15e..dd319c7e385f 100644
--- a/drivers/leds/leds-ariel.c
+++ b/drivers/leds/leds-ariel.c
@@ -7,8 +7,8 @@
#include <linux/module.h>
#include <linux/leds.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
-#include <linux/of_platform.h>
enum ec_index {
EC_BLUE_LED = 0x01,
diff --git a/drivers/leds/leds-aw200xx.c b/drivers/leds/leds-aw200xx.c
index 96979b8e09b7..691a743cc9b0 100644
--- a/drivers/leds/leds-aw200xx.c
+++ b/drivers/leds/leds-aw200xx.c
@@ -368,7 +368,7 @@ static int aw200xx_probe_fw(struct device *dev, struct aw200xx *chip)
if (!chip->display_rows ||
chip->display_rows > chip->cdef->display_size_rows_max) {
- return dev_err_probe(dev, ret,
+ return dev_err_probe(dev, -EINVAL,
"Invalid leds display size %u\n",
chip->display_rows);
}
@@ -583,7 +583,7 @@ static struct i2c_driver aw200xx_driver = {
.name = "aw200xx",
.of_match_table = aw200xx_match_table,
},
- .probe_new = aw200xx_probe,
+ .probe = aw200xx_probe,
.remove = aw200xx_remove,
.id_table = aw200xx_id,
};
diff --git a/drivers/leds/leds-aw2013.c b/drivers/leds/leds-aw2013.c
index 59765640b70f..91f44b23cb11 100644
--- a/drivers/leds/leds-aw2013.c
+++ b/drivers/leds/leds-aw2013.c
@@ -62,7 +62,7 @@ struct aw2013_led {
struct aw2013 {
struct mutex mutex; /* held when writing to registers */
- struct regulator *vcc_regulator;
+ struct regulator_bulk_data regulators[2];
struct i2c_client *client;
struct aw2013_led leds[AW2013_MAX_LEDS];
struct regmap *regmap;
@@ -106,10 +106,11 @@ static void aw2013_chip_disable(struct aw2013 *chip)
regmap_write(chip->regmap, AW2013_GCR, 0);
- ret = regulator_disable(chip->vcc_regulator);
+ ret = regulator_bulk_disable(ARRAY_SIZE(chip->regulators),
+ chip->regulators);
if (ret) {
dev_err(&chip->client->dev,
- "Failed to disable regulator: %d\n", ret);
+ "Failed to disable regulators: %d\n", ret);
return;
}
@@ -123,10 +124,11 @@ static int aw2013_chip_enable(struct aw2013 *chip)
if (chip->enabled)
return 0;
- ret = regulator_enable(chip->vcc_regulator);
+ ret = regulator_bulk_enable(ARRAY_SIZE(chip->regulators),
+ chip->regulators);
if (ret) {
dev_err(&chip->client->dev,
- "Failed to enable regulator: %d\n", ret);
+ "Failed to enable regulators: %d\n", ret);
return ret;
}
chip->enabled = true;
@@ -348,19 +350,23 @@ static int aw2013_probe(struct i2c_client *client)
goto error;
}
- chip->vcc_regulator = devm_regulator_get(&client->dev, "vcc");
- ret = PTR_ERR_OR_ZERO(chip->vcc_regulator);
- if (ret) {
+ chip->regulators[0].supply = "vcc";
+ chip->regulators[1].supply = "vio";
+ ret = devm_regulator_bulk_get(&client->dev,
+ ARRAY_SIZE(chip->regulators),
+ chip->regulators);
+ if (ret < 0) {
if (ret != -EPROBE_DEFER)
dev_err(&client->dev,
- "Failed to request regulator: %d\n", ret);
+ "Failed to request regulators: %d\n", ret);
goto error;
}
- ret = regulator_enable(chip->vcc_regulator);
+ ret = regulator_bulk_enable(ARRAY_SIZE(chip->regulators),
+ chip->regulators);
if (ret) {
dev_err(&client->dev,
- "Failed to enable regulator: %d\n", ret);
+ "Failed to enable regulators: %d\n", ret);
goto error;
}
@@ -382,10 +388,11 @@ static int aw2013_probe(struct i2c_client *client)
if (ret < 0)
goto error_reg;
- ret = regulator_disable(chip->vcc_regulator);
+ ret = regulator_bulk_disable(ARRAY_SIZE(chip->regulators),
+ chip->regulators);
if (ret) {
dev_err(&client->dev,
- "Failed to disable regulator: %d\n", ret);
+ "Failed to disable regulators: %d\n", ret);
goto error;
}
@@ -394,7 +401,8 @@ static int aw2013_probe(struct i2c_client *client)
return 0;
error_reg:
- regulator_disable(chip->vcc_regulator);
+ regulator_bulk_disable(ARRAY_SIZE(chip->regulators),
+ chip->regulators);
error:
mutex_destroy(&chip->mutex);
@@ -420,7 +428,7 @@ MODULE_DEVICE_TABLE(of, aw2013_match_table);
static struct i2c_driver aw2013_driver = {
.driver = {
.name = "leds-aw2013",
- .of_match_table = of_match_ptr(aw2013_match_table),
+ .of_match_table = aw2013_match_table,
},
.probe = aw2013_probe,
.remove = aw2013_remove,
diff --git a/drivers/leds/leds-cpcap.c b/drivers/leds/leds-cpcap.c
index 7d41ce8c9bb1..87354f17644b 100644
--- a/drivers/leds/leds-cpcap.c
+++ b/drivers/leds/leds-cpcap.c
@@ -7,7 +7,7 @@
#include <linux/mfd/motorola-cpcap.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/leds/leds-cr0014114.c b/drivers/leds/leds-cr0014114.c
index c87686bd7c18..b33bca397ea6 100644
--- a/drivers/leds/leds-cr0014114.c
+++ b/drivers/leds/leds-cr0014114.c
@@ -4,8 +4,8 @@
#include <linux/delay.h>
#include <linux/leds.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/spi/spi.h>
#include <linux/workqueue.h>
diff --git a/drivers/leds/leds-ip30.c b/drivers/leds/leds-ip30.c
index 1f952bad0fe8..2df24c303366 100644
--- a/drivers/leds/leds-ip30.c
+++ b/drivers/leds/leds-ip30.c
@@ -27,22 +27,16 @@ static void ip30led_set(struct led_classdev *led_cdev,
static int ip30led_create(struct platform_device *pdev, int num)
{
- struct resource *res;
struct ip30_led *data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, num);
- if (!res)
- return -EBUSY;
-
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- data->reg = devm_ioremap_resource(&pdev->dev, res);
+ data->reg = devm_platform_ioremap_resource(pdev, num);
if (IS_ERR(data->reg))
return PTR_ERR(data->reg);
-
switch (num) {
case IP30_LED_SYSTEM:
data->cdev.name = "white:power";
diff --git a/drivers/leds/leds-is31fl32xx.c b/drivers/leds/leds-is31fl32xx.c
index 72cb56d305c4..b0a0be77bb33 100644
--- a/drivers/leds/leds-is31fl32xx.c
+++ b/drivers/leds/leds-is31fl32xx.c
@@ -15,7 +15,6 @@
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
/* Used to indicate a device has no such register */
#define IS31FL32XX_REG_NONE 0xFF
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index 030c040fdf6d..2ef19ad23b1d 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -594,18 +594,17 @@ static const struct i2c_device_id lp5521_id[] = {
};
MODULE_DEVICE_TABLE(i2c, lp5521_id);
-#ifdef CONFIG_OF
static const struct of_device_id of_lp5521_leds_match[] = {
{ .compatible = "national,lp5521", },
{},
};
MODULE_DEVICE_TABLE(of, of_lp5521_leds_match);
-#endif
+
static struct i2c_driver lp5521_driver = {
.driver = {
.name = "lp5521",
- .of_match_table = of_match_ptr(of_lp5521_leds_match),
+ .of_match_table = of_lp5521_leds_match,
},
.probe = lp5521_probe,
.remove = lp5521_remove,
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index daa6a165fba6..38de853f9939 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -972,7 +972,6 @@ static const struct i2c_device_id lp5523_id[] = {
MODULE_DEVICE_TABLE(i2c, lp5523_id);
-#ifdef CONFIG_OF
static const struct of_device_id of_lp5523_leds_match[] = {
{ .compatible = "national,lp5523", },
{ .compatible = "ti,lp55231", },
@@ -980,12 +979,11 @@ static const struct of_device_id of_lp5523_leds_match[] = {
};
MODULE_DEVICE_TABLE(of, of_lp5523_leds_match);
-#endif
static struct i2c_driver lp5523_driver = {
.driver = {
.name = "lp5523x",
- .of_match_table = of_match_ptr(of_lp5523_leds_match),
+ .of_match_table = of_lp5523_leds_match,
},
.probe = lp5523_probe,
.remove = lp5523_remove,
diff --git a/drivers/leds/leds-lp5562.c b/drivers/leds/leds-lp5562.c
index 4565cc12cea8..39db9aeb67c5 100644
--- a/drivers/leds/leds-lp5562.c
+++ b/drivers/leds/leds-lp5562.c
@@ -589,19 +589,17 @@ static const struct i2c_device_id lp5562_id[] = {
};
MODULE_DEVICE_TABLE(i2c, lp5562_id);
-#ifdef CONFIG_OF
static const struct of_device_id of_lp5562_leds_match[] = {
{ .compatible = "ti,lp5562", },
{},
};
MODULE_DEVICE_TABLE(of, of_lp5562_leds_match);
-#endif
static struct i2c_driver lp5562_driver = {
.driver = {
.name = "lp5562",
- .of_match_table = of_match_ptr(of_lp5562_leds_match),
+ .of_match_table = of_lp5562_leds_match,
},
.probe = lp5562_probe,
.remove = lp5562_remove,
diff --git a/drivers/leds/leds-lp8501.c b/drivers/leds/leds-lp8501.c
index f11886aa8965..ac50aa88939a 100644
--- a/drivers/leds/leds-lp8501.c
+++ b/drivers/leds/leds-lp8501.c
@@ -380,19 +380,17 @@ static const struct i2c_device_id lp8501_id[] = {
};
MODULE_DEVICE_TABLE(i2c, lp8501_id);
-#ifdef CONFIG_OF
static const struct of_device_id of_lp8501_leds_match[] = {
{ .compatible = "ti,lp8501", },
{},
};
MODULE_DEVICE_TABLE(of, of_lp8501_leds_match);
-#endif
static struct i2c_driver lp8501_driver = {
.driver = {
.name = "lp8501",
- .of_match_table = of_match_ptr(of_lp8501_leds_match),
+ .of_match_table = of_lp8501_leds_match,
},
.probe = lp8501_probe,
.remove = lp8501_remove,
diff --git a/drivers/leds/leds-mlxreg.c b/drivers/leds/leds-mlxreg.c
index b7855c93bd72..39210653acf7 100644
--- a/drivers/leds/leds-mlxreg.c
+++ b/drivers/leds/leds-mlxreg.c
@@ -8,7 +8,6 @@
#include <linux/io.h>
#include <linux/leds.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/platform_data/mlxreg.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c
index 1677d66d8b0e..f3010c472bbd 100644
--- a/drivers/leds/leds-ns2.c
+++ b/drivers/leds/leds-ns2.c
@@ -247,7 +247,7 @@ static int ns2_led_probe(struct platform_device *pdev)
if (!count)
return -ENODEV;
- leds = devm_kzalloc(dev, array_size(sizeof(*leds), count), GFP_KERNEL);
+ leds = devm_kcalloc(dev, count, sizeof(*leds), GFP_KERNEL);
if (!leds)
return -ENOMEM;
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 8b5c62083e50..bf8bb8fc007c 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -18,7 +18,6 @@
#include <linux/leds-pca9532.h>
#include <linux/gpio/driver.h>
#include <linux/of.h>
-#include <linux/of_device.h>
/* m = num_leds*/
#define PCA9532_REG_INPUT(i) ((i) >> 3)
diff --git a/drivers/leds/leds-pca995x.c b/drivers/leds/leds-pca995x.c
new file mode 100644
index 000000000000..78215dff1499
--- /dev/null
+++ b/drivers/leds/leds-pca995x.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * LED driver for PCA995x I2C LED drivers
+ *
+ * Copyright 2011 bct electronic GmbH
+ * Copyright 2013 Qtechnology/AS
+ * Copyright 2022 NXP
+ * Copyright 2023 Marek Vasut
+ */
+
+#include <linux/i2c.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+
+/* Register definition */
+#define PCA995X_MODE1 0x00
+#define PCA995X_MODE2 0x01
+#define PCA995X_LEDOUT0 0x02
+#define PCA9955B_PWM0 0x08
+#define PCA9952_PWM0 0x0A
+#define PCA9952_IREFALL 0x43
+#define PCA9955B_IREFALL 0x45
+
+/* Auto-increment disabled. Normal mode */
+#define PCA995X_MODE1_CFG 0x00
+
+/* LED select registers determine the source that drives LED outputs */
+#define PCA995X_LED_OFF 0x0
+#define PCA995X_LED_ON 0x1
+#define PCA995X_LED_PWM_MODE 0x2
+#define PCA995X_LDRX_MASK 0x3
+#define PCA995X_LDRX_BITS 2
+
+#define PCA995X_MAX_OUTPUTS 16
+#define PCA995X_OUTPUTS_PER_REG 4
+
+#define PCA995X_IREFALL_FULL_CFG 0xFF
+#define PCA995X_IREFALL_HALF_CFG (PCA995X_IREFALL_FULL_CFG / 2)
+
+#define PCA995X_TYPE_NON_B 0
+#define PCA995X_TYPE_B 1
+
+#define ldev_to_led(c) container_of(c, struct pca995x_led, ldev)
+
+struct pca995x_led {
+ unsigned int led_no;
+ struct led_classdev ldev;
+ struct pca995x_chip *chip;
+};
+
+struct pca995x_chip {
+ struct regmap *regmap;
+ struct pca995x_led leds[PCA995X_MAX_OUTPUTS];
+ int btype;
+};
+
+static int pca995x_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct pca995x_led *led = ldev_to_led(led_cdev);
+ struct pca995x_chip *chip = led->chip;
+ u8 ledout_addr, pwmout_addr;
+ int shift, ret;
+
+ pwmout_addr = (chip->btype ? PCA9955B_PWM0 : PCA9952_PWM0) + led->led_no;
+ ledout_addr = PCA995X_LEDOUT0 + (led->led_no / PCA995X_OUTPUTS_PER_REG);
+ shift = PCA995X_LDRX_BITS * (led->led_no % PCA995X_OUTPUTS_PER_REG);
+
+ switch (brightness) {
+ case LED_FULL:
+ return regmap_update_bits(chip->regmap, ledout_addr,
+ PCA995X_LDRX_MASK << shift,
+ PCA995X_LED_ON << shift);
+ case LED_OFF:
+ return regmap_update_bits(chip->regmap, ledout_addr,
+ PCA995X_LDRX_MASK << shift, 0);
+ default:
+ /* Adjust brightness as per user input by changing individual PWM */
+ ret = regmap_write(chip->regmap, pwmout_addr, brightness);
+ if (ret)
+ return ret;
+
+ /*
+ * Change LDRx configuration to individual brightness via PWM.
+ * LED will stop blinking if it's doing so.
+ */
+ return regmap_update_bits(chip->regmap, ledout_addr,
+ PCA995X_LDRX_MASK << shift,
+ PCA995X_LED_PWM_MODE << shift);
+ }
+}
+
+static const struct regmap_config pca995x_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x49,
+};
+
+static int pca995x_probe(struct i2c_client *client)
+{
+ struct fwnode_handle *led_fwnodes[PCA995X_MAX_OUTPUTS] = { 0 };
+ struct fwnode_handle *np, *child;
+ struct device *dev = &client->dev;
+ struct pca995x_chip *chip;
+ struct pca995x_led *led;
+ int i, btype, reg, ret;
+
+ btype = (unsigned long)device_get_match_data(&client->dev);
+
+ np = dev_fwnode(dev);
+ if (!np)
+ return -ENODEV;
+
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->btype = btype;
+ chip->regmap = devm_regmap_init_i2c(client, &pca995x_regmap);
+ if (IS_ERR(chip->regmap))
+ return PTR_ERR(chip->regmap);
+
+ i2c_set_clientdata(client, chip);
+
+ fwnode_for_each_available_child_node(np, child) {
+ ret = fwnode_property_read_u32(child, "reg", &reg);
+ if (ret) {
+ fwnode_handle_put(child);
+ return ret;
+ }
+
+ if (reg < 0 || reg >= PCA995X_MAX_OUTPUTS || led_fwnodes[reg]) {
+ fwnode_handle_put(child);
+ return -EINVAL;
+ }
+
+ led = &chip->leds[reg];
+ led_fwnodes[reg] = child;
+ led->chip = chip;
+ led->led_no = reg;
+ led->ldev.brightness_set_blocking = pca995x_brightness_set;
+ led->ldev.max_brightness = 255;
+ }
+
+ for (i = 0; i < PCA995X_MAX_OUTPUTS; i++) {
+ struct led_init_data init_data = {};
+
+ if (!led_fwnodes[i])
+ continue;
+
+ init_data.fwnode = led_fwnodes[i];
+
+ ret = devm_led_classdev_register_ext(dev,
+ &chip->leds[i].ldev,
+ &init_data);
+ if (ret < 0) {
+ fwnode_handle_put(child);
+ return dev_err_probe(dev, ret,
+ "Could not register LED %s\n",
+ chip->leds[i].ldev.name);
+ }
+ }
+
+ /* Disable LED all-call address and set normal mode */
+ ret = regmap_write(chip->regmap, PCA995X_MODE1, PCA995X_MODE1_CFG);
+ if (ret)
+ return ret;
+
+ /* IREF Output current value for all LEDn outputs */
+ return regmap_write(chip->regmap,
+ btype ? PCA9955B_IREFALL : PCA9952_IREFALL,
+ PCA995X_IREFALL_HALF_CFG);
+}
+
+static const struct i2c_device_id pca995x_id[] = {
+ { "pca9952", .driver_data = (kernel_ulong_t)PCA995X_TYPE_NON_B },
+ { "pca9955b", .driver_data = (kernel_ulong_t)PCA995X_TYPE_B },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, pca995x_id);
+
+static const struct of_device_id pca995x_of_match[] = {
+ { .compatible = "nxp,pca9952", .data = (void *)PCA995X_TYPE_NON_B },
+ { .compatible = "nxp,pca9955b", .data = (void *)PCA995X_TYPE_B },
+ {},
+};
+MODULE_DEVICE_TABLE(of, pca995x_of_match);
+
+static struct i2c_driver pca995x_driver = {
+ .driver = {
+ .name = "leds-pca995x",
+ .of_match_table = pca995x_of_match,
+ },
+ .probe = pca995x_probe,
+ .id_table = pca995x_id,
+};
+module_i2c_driver(pca995x_driver);
+
+MODULE_AUTHOR("Isai Gaspar <isaiezequiel.gaspar@nxp.com>");
+MODULE_DESCRIPTION("PCA995x LED driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-pm8058.c b/drivers/leds/leds-pm8058.c
index b9233f14b646..3f49a5181892 100644
--- a/drivers/leds/leds-pm8058.c
+++ b/drivers/leds/leds-pm8058.c
@@ -4,7 +4,6 @@
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/regmap.h>
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index 29194cc382af..419b710984ab 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -12,7 +12,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/leds.h>
#include <linux/err.h>
#include <linux/pwm.h>
@@ -146,7 +146,7 @@ static int led_pwm_create_fwnode(struct device *dev, struct led_pwm_priv *priv)
led.name = to_of_node(fwnode)->name;
if (!led.name) {
- ret = EINVAL;
+ ret = -EINVAL;
goto err_child_out;
}
diff --git a/drivers/leds/leds-spi-byte.c b/drivers/leds/leds-spi-byte.c
index 2c7ffc3c78e6..9d91f21842f2 100644
--- a/drivers/leds/leds-spi-byte.c
+++ b/drivers/leds/leds-spi-byte.c
@@ -30,7 +30,7 @@
#include <linux/leds.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/spi/spi.h>
#include <linux/mutex.h>
#include <uapi/linux/uleds.h>
diff --git a/drivers/leds/leds-syscon.c b/drivers/leds/leds-syscon.c
index e38abb5e60c1..360a376fa738 100644
--- a/drivers/leds/leds-syscon.c
+++ b/drivers/leds/leds-syscon.c
@@ -7,8 +7,7 @@
*/
#include <linux/io.h>
#include <linux/init.h>
-#include <linux/of_device.h>
-#include <linux/of_address.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/stat.h>
#include <linux/slab.h>
diff --git a/drivers/leds/leds-ti-lmu-common.c b/drivers/leds/leds-ti-lmu-common.c
index d7f10ad721ba..b2491666b5dc 100644
--- a/drivers/leds/leds-ti-lmu-common.c
+++ b/drivers/leds/leds-ti-lmu-common.c
@@ -7,7 +7,7 @@
#include <linux/bitops.h>
#include <linux/err.h>
-#include <linux/of_device.h>
+#include <linux/property.h>
#include <linux/leds-ti-lmu-common.h>
diff --git a/drivers/leds/leds-tlc591xx.c b/drivers/leds/leds-tlc591xx.c
index dfc6fb2b3e52..945e831ef4ac 100644
--- a/drivers/leds/leds-tlc591xx.c
+++ b/drivers/leds/leds-tlc591xx.c
@@ -8,7 +8,6 @@
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
diff --git a/drivers/leds/leds-turris-omnia.c b/drivers/leds/leds-turris-omnia.c
index 64b2d7b6d3f3..b8a95a917cfa 100644
--- a/drivers/leds/leds-turris-omnia.c
+++ b/drivers/leds/leds-turris-omnia.c
@@ -156,24 +156,20 @@ static ssize_t brightness_show(struct device *dev, struct device_attribute *a,
char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
- struct omnia_leds *leds = i2c_get_clientdata(client);
int ret;
- mutex_lock(&leds->lock);
ret = i2c_smbus_read_byte_data(client, CMD_LED_GET_BRIGHTNESS);
- mutex_unlock(&leds->lock);
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", ret);
+ return sysfs_emit(buf, "%d\n", ret);
}
static ssize_t brightness_store(struct device *dev, struct device_attribute *a,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
- struct omnia_leds *leds = i2c_get_clientdata(client);
unsigned long brightness;
int ret;
@@ -183,15 +179,10 @@ static ssize_t brightness_store(struct device *dev, struct device_attribute *a,
if (brightness > 100)
return -EINVAL;
- mutex_lock(&leds->lock);
ret = i2c_smbus_write_byte_data(client, CMD_LED_SET_BRIGHTNESS,
(u8)brightness);
- mutex_unlock(&leds->lock);
-
- if (ret < 0)
- return ret;
- return count;
+ return ret < 0 ? ret : count;
}
static DEVICE_ATTR_RW(brightness);
diff --git a/drivers/leds/rgb/Kconfig b/drivers/leds/rgb/Kconfig
index 360c8679c6e2..183bccc06cf3 100644
--- a/drivers/leds/rgb/Kconfig
+++ b/drivers/leds/rgb/Kconfig
@@ -2,6 +2,18 @@
if LEDS_CLASS_MULTICOLOR
+config LEDS_GROUP_MULTICOLOR
+ tristate "LEDs group multi-color support"
+ depends on OF || COMPILE_TEST
+ help
+ This option enables support for monochrome LEDs that are grouped
+ into multicolor LEDs which is useful in the case where LEDs of
+ different colors are physically grouped in a single multi-color LED
+ and driven by a controller that doesn't have multi-color support.
+
+ To compile this driver as a module, choose M here: the module
+ will be called leds-group-multicolor.
+
config LEDS_PWM_MULTICOLOR
tristate "PWM driven multi-color LED Support"
depends on PWM
diff --git a/drivers/leds/rgb/Makefile b/drivers/leds/rgb/Makefile
index 8c01daf63f61..c11cc56384e7 100644
--- a/drivers/leds/rgb/Makefile
+++ b/drivers/leds/rgb/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_LEDS_GROUP_MULTICOLOR) += leds-group-multicolor.o
obj-$(CONFIG_LEDS_PWM_MULTICOLOR) += leds-pwm-multicolor.o
obj-$(CONFIG_LEDS_QCOM_LPG) += leds-qcom-lpg.o
obj-$(CONFIG_LEDS_MT6370_RGB) += leds-mt6370-rgb.o
diff --git a/drivers/leds/rgb/leds-group-multicolor.c b/drivers/leds/rgb/leds-group-multicolor.c
new file mode 100644
index 000000000000..39f58be32af5
--- /dev/null
+++ b/drivers/leds/rgb/leds-group-multicolor.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Multi-color LED built with monochromatic LED devices
+ *
+ * This driver groups several monochromatic LED devices in a single multicolor LED device.
+ *
+ * Compared to handling this grouping in user-space, the benefits are:
+ * - The state of the monochromatic LED relative to each other is always consistent.
+ * - The sysfs interface of the LEDs can be used for the group as a whole.
+ *
+ * Copyright 2023 Jean-Jacques Hiblot <jjhiblot@traphandler.com>
+ */
+
+#include <linux/err.h>
+#include <linux/leds.h>
+#include <linux/led-class-multicolor.h>
+#include <linux/math.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+
+struct leds_multicolor {
+ struct led_classdev_mc mc_cdev;
+ struct led_classdev **monochromatics;
+};
+
+static int leds_gmc_set(struct led_classdev *cdev, enum led_brightness brightness)
+{
+ struct led_classdev_mc *mc_cdev = lcdev_to_mccdev(cdev);
+ struct leds_multicolor *priv = container_of(mc_cdev, struct leds_multicolor, mc_cdev);
+ const unsigned int group_max_brightness = mc_cdev->led_cdev.max_brightness;
+ int i;
+
+ for (i = 0; i < mc_cdev->num_colors; i++) {
+ struct led_classdev *mono = priv->monochromatics[i];
+ const unsigned int mono_max_brightness = mono->max_brightness;
+ unsigned int intensity = mc_cdev->subled_info[i].intensity;
+ int mono_brightness;
+
+ /*
+ * Scale the brightness according to relative intensity of the
+ * color AND the max brightness of the monochromatic LED.
+ */
+ mono_brightness = DIV_ROUND_CLOSEST(brightness * intensity * mono_max_brightness,
+ group_max_brightness * group_max_brightness);
+
+ led_set_brightness(mono, mono_brightness);
+ }
+
+ return 0;
+}
+
+static void restore_sysfs_write_access(void *data)
+{
+ struct led_classdev *led_cdev = data;
+
+ /* Restore the write acccess to the LED */
+ mutex_lock(&led_cdev->led_access);
+ led_sysfs_enable(led_cdev);
+ mutex_unlock(&led_cdev->led_access);
+}
+
+static int leds_gmc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct led_init_data init_data = {};
+ struct led_classdev *cdev;
+ struct mc_subled *subled;
+ struct leds_multicolor *priv;
+ unsigned int max_brightness = 0;
+ int i, ret, count = 0;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ for (;;) {
+ struct led_classdev *led_cdev;
+
+ led_cdev = devm_of_led_get_optional(dev, count);
+ if (IS_ERR(led_cdev))
+ return dev_err_probe(dev, PTR_ERR(led_cdev), "Unable to get LED #%d",
+ count);
+ if (!led_cdev)
+ break;
+
+ priv->monochromatics = devm_krealloc_array(dev, priv->monochromatics,
+ count + 1, sizeof(*priv->monochromatics),
+ GFP_KERNEL);
+ if (!priv->monochromatics)
+ return -ENOMEM;
+
+ priv->monochromatics[count] = led_cdev;
+
+ max_brightness = max(max_brightness, led_cdev->max_brightness);
+
+ count++;
+ }
+
+ subled = devm_kcalloc(dev, count, sizeof(*subled), GFP_KERNEL);
+ if (!subled)
+ return -ENOMEM;
+ priv->mc_cdev.subled_info = subled;
+
+ for (i = 0; i < count; i++) {
+ struct led_classdev *led_cdev = priv->monochromatics[i];
+
+ subled[i].color_index = led_cdev->color;
+
+ /* Configure the LED intensity to its maximum */
+ subled[i].intensity = max_brightness;
+ }
+
+ /* Initialise the multicolor's LED class device */
+ cdev = &priv->mc_cdev.led_cdev;
+ cdev->flags = LED_CORE_SUSPENDRESUME;
+ cdev->brightness_set_blocking = leds_gmc_set;
+ cdev->max_brightness = max_brightness;
+ cdev->color = LED_COLOR_ID_MULTI;
+ priv->mc_cdev.num_colors = count;
+
+ init_data.fwnode = dev_fwnode(dev);
+ ret = devm_led_classdev_multicolor_register_ext(dev, &priv->mc_cdev, &init_data);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to register multicolor LED for %s.\n",
+ cdev->name);
+
+ ret = leds_gmc_set(cdev, cdev->brightness);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to set LED value for %s.", cdev->name);
+
+ for (i = 0; i < count; i++) {
+ struct led_classdev *led_cdev = priv->monochromatics[i];
+
+ /*
+ * Make the individual LED sysfs interface read-only to prevent the user
+ * to change the brightness of the individual LEDs of the group.
+ */
+ mutex_lock(&led_cdev->led_access);
+ led_sysfs_disable(led_cdev);
+ mutex_unlock(&led_cdev->led_access);
+
+ /* Restore the write access to the LED sysfs when the group is destroyed */
+ devm_add_action_or_reset(dev, restore_sysfs_write_access, led_cdev);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id of_leds_group_multicolor_match[] = {
+ { .compatible = "leds-group-multicolor" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_leds_group_multicolor_match);
+
+static struct platform_driver leds_group_multicolor_driver = {
+ .probe = leds_gmc_probe,
+ .driver = {
+ .name = "leds_group_multicolor",
+ .of_match_table = of_leds_group_multicolor_match,
+ }
+};
+module_platform_driver(leds_group_multicolor_driver);
+
+MODULE_AUTHOR("Jean-Jacques Hiblot <jjhiblot@traphandler.com>");
+MODULE_DESCRIPTION("LEDs group multicolor driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:leds-group-multicolor");
diff --git a/drivers/leds/rgb/leds-qcom-lpg.c b/drivers/leds/rgb/leds-qcom-lpg.c
index 59581b3e25ca..df469aaa7e6e 100644
--- a/drivers/leds/rgb/leds-qcom-lpg.c
+++ b/drivers/leds/rgb/leds-qcom-lpg.c
@@ -9,7 +9,6 @@
#include <linux/led-class-multicolor.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/regmap.h>
@@ -1093,7 +1092,6 @@ static int lpg_add_pwm(struct lpg *lpg)
{
int ret;
- lpg->pwm.base = -1;
lpg->pwm.dev = lpg->dev;
lpg->pwm.npwm = lpg->num_channels;
lpg->pwm.ops = &lpg_pwm_ops;
diff --git a/drivers/leds/simple/Kconfig b/drivers/leds/simple/Kconfig
index 44fa0f93cb3b..e616cc6d6051 100644
--- a/drivers/leds/simple/Kconfig
+++ b/drivers/leds/simple/Kconfig
@@ -1,7 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
config LEDS_SIEMENS_SIMATIC_IPC
tristate "LED driver for Siemens Simatic IPCs"
+ depends on LEDS_CLASS
depends on SIEMENS_SIMATIC_IPC
+ default y
help
This option enables support for the LEDs of several Industrial PCs
from Siemens.
@@ -34,3 +36,16 @@ config LEDS_SIEMENS_SIMATIC_IPC_F7188X
To compile this driver as a module, choose M here: the module
will be called simatic-ipc-leds-gpio-f7188x.
+
+config LEDS_SIEMENS_SIMATIC_IPC_ELKHARTLAKE
+ tristate "LED driver for Siemens Simatic IPCs based on Intel Elkhart Lake GPIO"
+ depends on LEDS_GPIO
+ depends on PINCTRL_ELKHARTLAKE
+ depends on SIEMENS_SIMATIC_IPC
+ default LEDS_SIEMENS_SIMATIC_IPC
+ help
+ This option enables support for the LEDs of several Industrial PCs
+ from Siemens based on Elkhart Lake GPIO i.e. BX-21A.
+
+ To compile this driver as a module, choose M here: the module
+ will be called simatic-ipc-leds-gpio-elkhartlake.
diff --git a/drivers/leds/simple/Makefile b/drivers/leds/simple/Makefile
index e3e840cea275..783578f11bb0 100644
--- a/drivers/leds/simple/Makefile
+++ b/drivers/leds/simple/Makefile
@@ -2,3 +2,4 @@
obj-$(CONFIG_LEDS_SIEMENS_SIMATIC_IPC) += simatic-ipc-leds.o
obj-$(CONFIG_LEDS_SIEMENS_SIMATIC_IPC_APOLLOLAKE) += simatic-ipc-leds-gpio-core.o simatic-ipc-leds-gpio-apollolake.o
obj-$(CONFIG_LEDS_SIEMENS_SIMATIC_IPC_F7188X) += simatic-ipc-leds-gpio-core.o simatic-ipc-leds-gpio-f7188x.o
+obj-$(CONFIG_LEDS_SIEMENS_SIMATIC_IPC_ELKHARTLAKE) += simatic-ipc-leds-gpio-core.o simatic-ipc-leds-gpio-elkhartlake.o
diff --git a/drivers/leds/simple/simatic-ipc-leds-gpio-core.c b/drivers/leds/simple/simatic-ipc-leds-gpio-core.c
index 2a21b663df87..c552ea73ed9d 100644
--- a/drivers/leds/simple/simatic-ipc-leds-gpio-core.c
+++ b/drivers/leds/simple/simatic-ipc-leds-gpio-core.c
@@ -57,6 +57,7 @@ int simatic_ipc_leds_gpio_probe(struct platform_device *pdev,
switch (plat->devmode) {
case SIMATIC_IPC_DEVICE_127E:
case SIMATIC_IPC_DEVICE_227G:
+ case SIMATIC_IPC_DEVICE_BX_21A:
break;
default:
return -ENODEV;
@@ -72,6 +73,9 @@ int simatic_ipc_leds_gpio_probe(struct platform_device *pdev,
goto out;
}
+ if (!table_extra)
+ return 0;
+
table_extra->dev_id = dev_name(dev);
gpiod_add_lookup_table(table_extra);
diff --git a/drivers/leds/simple/simatic-ipc-leds-gpio-elkhartlake.c b/drivers/leds/simple/simatic-ipc-leds-gpio-elkhartlake.c
new file mode 100644
index 000000000000..6ba21dbb3ba0
--- /dev/null
+++ b/drivers/leds/simple/simatic-ipc-leds-gpio-elkhartlake.c
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Siemens SIMATIC IPC driver for GPIO based LEDs
+ *
+ * Copyright (c) Siemens AG, 2023
+ *
+ * Author:
+ * Henning Schild <henning.schild@siemens.com>
+ */
+
+#include <linux/gpio/machine.h>
+#include <linux/gpio/consumer.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/x86/simatic-ipc-base.h>
+
+#include "simatic-ipc-leds-gpio.h"
+
+static struct gpiod_lookup_table simatic_ipc_led_gpio_table = {
+ .dev_id = "leds-gpio",
+ .table = {
+ GPIO_LOOKUP_IDX("INTC1020:04", 72, NULL, 0, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX("INTC1020:04", 77, NULL, 1, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX("INTC1020:04", 78, NULL, 2, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX("INTC1020:04", 58, NULL, 3, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX("INTC1020:04", 60, NULL, 4, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX("INTC1020:04", 62, NULL, 5, GPIO_ACTIVE_HIGH),
+ {} /* Terminating entry */
+ },
+};
+
+static int simatic_ipc_leds_gpio_elkhartlake_probe(struct platform_device *pdev)
+{
+ return simatic_ipc_leds_gpio_probe(pdev, &simatic_ipc_led_gpio_table,
+ NULL);
+}
+
+static int simatic_ipc_leds_gpio_elkhartlake_remove(struct platform_device *pdev)
+{
+ return simatic_ipc_leds_gpio_remove(pdev, &simatic_ipc_led_gpio_table,
+ NULL);
+}
+
+static struct platform_driver simatic_ipc_led_gpio_elkhartlake_driver = {
+ .probe = simatic_ipc_leds_gpio_elkhartlake_probe,
+ .remove = simatic_ipc_leds_gpio_elkhartlake_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+};
+module_platform_driver(simatic_ipc_led_gpio_elkhartlake_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" KBUILD_MODNAME);
+MODULE_SOFTDEP("pre: simatic-ipc-leds-gpio-core platform:elkhartlake-pinctrl");
+MODULE_AUTHOR("Henning Schild <henning.schild@siemens.com>");
diff --git a/drivers/leds/simple/simatic-ipc-leds-gpio.h b/drivers/leds/simple/simatic-ipc-leds-gpio.h
index bf258c32f83d..3d4877aa4e0c 100644
--- a/drivers/leds/simple/simatic-ipc-leds-gpio.h
+++ b/drivers/leds/simple/simatic-ipc-leds-gpio.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Siemens SIMATIC IPC driver for GPIO based LEDs
*
diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
index cc3261543a5e..58f3352539e8 100644
--- a/drivers/leds/trigger/ledtrig-netdev.c
+++ b/drivers/leds/trigger/ledtrig-netdev.c
@@ -609,18 +609,7 @@ static struct led_trigger netdev_led_trigger = {
.groups = netdev_trig_groups,
};
-static int __init netdev_trig_init(void)
-{
- return led_trigger_register(&netdev_led_trigger);
-}
-
-static void __exit netdev_trig_exit(void)
-{
- led_trigger_unregister(&netdev_led_trigger);
-}
-
-module_init(netdev_trig_init);
-module_exit(netdev_trig_exit);
+module_led_trigger(netdev_led_trigger);
MODULE_AUTHOR("Ben Whitten <ben.whitten@gmail.com>");
MODULE_AUTHOR("Oliver Jowett <oliver@opencloud.com>");
diff --git a/drivers/leds/trigger/ledtrig-tty.c b/drivers/leds/trigger/ledtrig-tty.c
index f62db7e520b5..8ae0d2d284af 100644
--- a/drivers/leds/trigger/ledtrig-tty.c
+++ b/drivers/leds/trigger/ledtrig-tty.c
@@ -7,6 +7,8 @@
#include <linux/tty.h>
#include <uapi/linux/serial.h>
+#define LEDTRIG_TTY_INTERVAL 50
+
struct ledtrig_tty_data {
struct led_classdev *led_cdev;
struct delayed_work dwork;
@@ -122,17 +124,19 @@ static void ledtrig_tty_work(struct work_struct *work)
if (icount.rx != trigger_data->rx ||
icount.tx != trigger_data->tx) {
- led_set_brightness_sync(trigger_data->led_cdev, LED_ON);
+ unsigned long interval = LEDTRIG_TTY_INTERVAL;
+
+ led_blink_set_oneshot(trigger_data->led_cdev, &interval,
+ &interval, 0);
trigger_data->rx = icount.rx;
trigger_data->tx = icount.tx;
- } else {
- led_set_brightness_sync(trigger_data->led_cdev, LED_OFF);
}
out:
mutex_unlock(&trigger_data->mutex);
- schedule_delayed_work(&trigger_data->dwork, msecs_to_jiffies(100));
+ schedule_delayed_work(&trigger_data->dwork,
+ msecs_to_jiffies(LEDTRIG_TTY_INTERVAL * 2));
}
static struct attribute *ledtrig_tty_attrs[] = {
diff --git a/drivers/leds/uleds.c b/drivers/leds/uleds.c
index 7320337b22d2..3d361c920030 100644
--- a/drivers/leds/uleds.c
+++ b/drivers/leds/uleds.c
@@ -209,17 +209,7 @@ static struct miscdevice uleds_misc = {
.name = ULEDS_NAME,
};
-static int __init uleds_init(void)
-{
- return misc_register(&uleds_misc);
-}
-module_init(uleds_init);
-
-static void __exit uleds_exit(void)
-{
- misc_deregister(&uleds_misc);
-}
-module_exit(uleds_exit);
+module_misc_device(uleds_misc);
MODULE_AUTHOR("David Lechner <david@lechnology.com>");
MODULE_DESCRIPTION("Userspace driver for the LED subsystem");
diff --git a/drivers/mailbox/arm_mhu.c b/drivers/mailbox/arm_mhu.c
index 22243cabe056..537f7bfb7b06 100644
--- a/drivers/mailbox/arm_mhu.c
+++ b/drivers/mailbox/arm_mhu.c
@@ -12,6 +12,7 @@
#include <linux/io.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
+#include <linux/of.h>
#define INTR_STAT_OFS 0x0
#define INTR_SET_OFS 0x8
diff --git a/drivers/mailbox/arm_mhu_db.c b/drivers/mailbox/arm_mhu_db.c
index aa0a4d83880f..27a510d46908 100644
--- a/drivers/mailbox/arm_mhu_db.c
+++ b/drivers/mailbox/arm_mhu_db.c
@@ -15,7 +15,6 @@
#include <linux/mailbox_controller.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#define INTR_STAT_OFS 0x0
#define INTR_SET_OFS 0x8
diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c
index bf6e86b0ed09..a2b8839d4e7c 100644
--- a/drivers/mailbox/bcm-flexrm-mailbox.c
+++ b/drivers/mailbox/bcm-flexrm-mailbox.c
@@ -1501,16 +1501,12 @@ static int flexrm_mbox_probe(struct platform_device *pdev)
mbox->dev = dev;
platform_set_drvdata(pdev, mbox);
- /* Get resource for registers */
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ /* Get resource for registers and map registers of all rings */
+ mbox->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &iomem);
if (!iomem || (resource_size(iomem) < RING_REGS_SIZE)) {
ret = -ENODEV;
goto fail;
- }
-
- /* Map registers of all rings */
- mbox->regs = devm_ioremap_resource(&pdev->dev, iomem);
- if (IS_ERR(mbox->regs)) {
+ } else if (IS_ERR(mbox->regs)) {
ret = PTR_ERR(mbox->regs);
goto fail;
}
diff --git a/drivers/mailbox/bcm-pdc-mailbox.c b/drivers/mailbox/bcm-pdc-mailbox.c
index 8c95e3ce295f..d67db63b482d 100644
--- a/drivers/mailbox/bcm-pdc-mailbox.c
+++ b/drivers/mailbox/bcm-pdc-mailbox.c
@@ -694,7 +694,7 @@ pdc_receive(struct pdc_state *pdcs)
* pdc_tx_list_sg_add() - Add the buffers in a scatterlist to the transmit
* descriptors for a given SPU. The scatterlist buffers contain the data for a
* SPU request message.
- * @spu_idx: The index of the SPU to submit the request to, [0, max_spu)
+ * @pdcs: PDC state for the SPU that will process this request
* @sg: Scatterlist whose buffers contain part of the SPU request
*
* If a scatterlist buffer is larger than PDC_DMA_BUF_MAX, multiple descriptors
@@ -861,7 +861,7 @@ static int pdc_rx_list_init(struct pdc_state *pdcs, struct scatterlist *dst_sg,
* pdc_rx_list_sg_add() - Add the buffers in a scatterlist to the receive
* descriptors for a given SPU. The caller must have already DMA mapped the
* scatterlist.
- * @spu_idx: Indicates which SPU the buffers are for
+ * @pdcs: PDC state for the SPU that will process this request
* @sg: Scatterlist whose buffers are added to the receive ring
*
* If a receive buffer in the scatterlist is larger than PDC_DMA_BUF_MAX,
@@ -960,7 +960,7 @@ static irqreturn_t pdc_irq_handler(int irq, void *data)
/**
* pdc_tasklet_cb() - Tasklet callback that runs the deferred processing after
* a DMA receive interrupt. Reenables the receive interrupt.
- * @data: PDC state structure
+ * @t: Pointer to the Altera sSGDMA channel structure
*/
static void pdc_tasklet_cb(struct tasklet_struct *t)
{
@@ -1566,19 +1566,13 @@ static int pdc_probe(struct platform_device *pdev)
if (err)
goto cleanup_ring_pool;
- pdc_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!pdc_regs) {
- err = -ENODEV;
- goto cleanup_ring_pool;
- }
- dev_dbg(dev, "PDC register region res.start = %pa, res.end = %pa",
- &pdc_regs->start, &pdc_regs->end);
-
- pdcs->pdc_reg_vbase = devm_ioremap_resource(&pdev->dev, pdc_regs);
+ pdcs->pdc_reg_vbase = devm_platform_get_and_ioremap_resource(pdev, 0, &pdc_regs);
if (IS_ERR(pdcs->pdc_reg_vbase)) {
err = PTR_ERR(pdcs->pdc_reg_vbase);
goto cleanup_ring_pool;
}
+ dev_dbg(dev, "PDC register region res.start = %pa, res.end = %pa",
+ &pdc_regs->start, &pdc_regs->end);
/* create rx buffer pool after dt read to know how big buffers are */
err = pdc_rx_buf_pool_create(pdcs);
diff --git a/drivers/mailbox/hi3660-mailbox.c b/drivers/mailbox/hi3660-mailbox.c
index ab24e731a782..17c29e960fbf 100644
--- a/drivers/mailbox/hi3660-mailbox.c
+++ b/drivers/mailbox/hi3660-mailbox.c
@@ -11,6 +11,7 @@
#include <linux/iopoll.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/mailbox/hi6220-mailbox.c b/drivers/mailbox/hi6220-mailbox.c
index 1c73c63598f5..f77741ce42e7 100644
--- a/drivers/mailbox/hi6220-mailbox.c
+++ b/drivers/mailbox/hi6220-mailbox.c
@@ -15,6 +15,7 @@
#include <linux/kfifo.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
index 20f2ec880ad6..3ef4dd8adf5d 100644
--- a/drivers/mailbox/imx-mailbox.c
+++ b/drivers/mailbox/imx-mailbox.c
@@ -14,7 +14,8 @@
#include <linux/kernel.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/suspend.h>
#include <linux/slab.h>
diff --git a/drivers/mailbox/mailbox-mpfs.c b/drivers/mailbox/mailbox-mpfs.c
index 162df49654fb..20ee283a04cc 100644
--- a/drivers/mailbox/mailbox-mpfs.c
+++ b/drivers/mailbox/mailbox-mpfs.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/mailbox_controller.h>
#include <soc/microchip/mpfs.h>
diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c
index fc6a12a51b40..22d6018ceec3 100644
--- a/drivers/mailbox/mailbox-test.c
+++ b/drivers/mailbox/mailbox-test.c
@@ -367,8 +367,7 @@ static int mbox_test_probe(struct platform_device *pdev)
return -ENOMEM;
/* It's okay for MMIO to be NULL */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- tdev->tx_mmio = devm_ioremap_resource(&pdev->dev, res);
+ tdev->tx_mmio = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (PTR_ERR(tdev->tx_mmio) == -EBUSY) {
/* if reserved area in SRAM, try just ioremap */
size = resource_size(res);
@@ -378,8 +377,7 @@ static int mbox_test_probe(struct platform_device *pdev)
}
/* If specified, second reg entry is Rx MMIO */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- tdev->rx_mmio = devm_ioremap_resource(&pdev->dev, res);
+ tdev->rx_mmio = devm_platform_get_and_ioremap_resource(pdev, 1, &res);
if (PTR_ERR(tdev->rx_mmio) == -EBUSY) {
size = resource_size(res);
tdev->rx_mmio = devm_ioremap(&pdev->dev, res->start, size);
@@ -390,7 +388,7 @@ static int mbox_test_probe(struct platform_device *pdev)
tdev->tx_channel = mbox_test_request_channel(pdev, "tx");
tdev->rx_channel = mbox_test_request_channel(pdev, "rx");
- if (!tdev->tx_channel && !tdev->rx_channel)
+ if (IS_ERR_OR_NULL(tdev->tx_channel) && IS_ERR_OR_NULL(tdev->rx_channel))
return -EPROBE_DEFER;
/* If Rx is not specified but has Rx MMIO, then Rx = Tx */
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index adf36c05fa43..ebff3baf3045 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -17,6 +17,7 @@
#include <linux/bitops.h>
#include <linux/mailbox_client.h>
#include <linux/mailbox_controller.h>
+#include <linux/of.h>
#include "mailbox.h"
diff --git a/drivers/mailbox/mtk-adsp-mailbox.c b/drivers/mailbox/mtk-adsp-mailbox.c
index 14bc0057de81..91487aa4d7da 100644
--- a/drivers/mailbox/mtk-adsp-mailbox.c
+++ b/drivers/mailbox/mtk-adsp-mailbox.c
@@ -10,7 +10,8 @@
#include <linux/kernel.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
struct mtk_adsp_mbox_priv {
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
index b18d47ea13a0..4d62b07c1411 100644
--- a/drivers/mailbox/mtk-cmdq-mailbox.c
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -15,7 +15,7 @@
#include <linux/platform_device.h>
#include <linux/mailbox_controller.h>
#include <linux/mailbox/mtk-cmdq-mailbox.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#define CMDQ_OP_CODE_MASK (0xff << CMDQ_OP_CODE_SHIFT)
#define CMDQ_NUM_CMD(t) (t->cmd_buf_size / CMDQ_INST_SIZE)
diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c
index fa2ce3246b70..792bcaebbc9b 100644
--- a/drivers/mailbox/omap-mailbox.c
+++ b/drivers/mailbox/omap-mailbox.c
@@ -16,7 +16,7 @@
#include <linux/kfifo.h>
#include <linux/err.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/omap-mailbox.h>
diff --git a/drivers/mailbox/platform_mhu.c b/drivers/mailbox/platform_mhu.c
index a5922ac0b0bf..834aecd720ac 100644
--- a/drivers/mailbox/platform_mhu.c
+++ b/drivers/mailbox/platform_mhu.c
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/mailbox_controller.h>
@@ -135,10 +136,8 @@ static int platform_mhu_probe(struct platform_device *pdev)
for (i = 0; i < MHU_CHANS; i++) {
mhu->chan[i].con_priv = &mhu->mlink[i];
mhu->mlink[i].irq = platform_get_irq(pdev, i);
- if (mhu->mlink[i].irq < 0) {
- dev_err(dev, "failed to get irq%d\n", i);
+ if (mhu->mlink[i].irq < 0)
return mhu->mlink[i].irq;
- }
mhu->mlink[i].rx_reg = mhu->base + platform_mhu_reg[i];
mhu->mlink[i].tx_reg = mhu->mlink[i].rx_reg + TX_REG_OFFSET;
}
diff --git a/drivers/mailbox/qcom-ipcc.c b/drivers/mailbox/qcom-ipcc.c
index 7e27acf6c0cc..f597a1bd5684 100644
--- a/drivers/mailbox/qcom-ipcc.c
+++ b/drivers/mailbox/qcom-ipcc.c
@@ -227,10 +227,8 @@ static int qcom_ipcc_setup_mbox(struct qcom_ipcc *ipcc,
ret = of_parse_phandle_with_args(client_dn, "mboxes",
"#mbox-cells", j, &curr_ph);
of_node_put(curr_ph.np);
- if (!ret && curr_ph.np == controller_dn) {
+ if (!ret && curr_ph.np == controller_dn)
ipcc->num_chans++;
- break;
- }
}
}
diff --git a/drivers/mailbox/rockchip-mailbox.c b/drivers/mailbox/rockchip-mailbox.c
index 116286ecc5a0..8ffad059e898 100644
--- a/drivers/mailbox/rockchip-mailbox.c
+++ b/drivers/mailbox/rockchip-mailbox.c
@@ -8,8 +8,8 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mailbox_controller.h>
+#include <linux/of.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#define MAILBOX_A2B_INTEN 0x00
@@ -194,11 +194,7 @@ static int rockchip_mbox_probe(struct platform_device *pdev)
mb->mbox.ops = &rockchip_mbox_chan_ops;
mb->mbox.txdone_irq = true;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
- mb->mbox_base = devm_ioremap_resource(&pdev->dev, res);
+ mb->mbox_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(mb->mbox_base))
return PTR_ERR(mb->mbox_base);
diff --git a/drivers/mailbox/sprd-mailbox.c b/drivers/mailbox/sprd-mailbox.c
index e3c899abeed8..9ae57de77d4d 100644
--- a/drivers/mailbox/sprd-mailbox.c
+++ b/drivers/mailbox/sprd-mailbox.c
@@ -11,7 +11,7 @@
#include <linux/io.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
diff --git a/drivers/mailbox/stm32-ipcc.c b/drivers/mailbox/stm32-ipcc.c
index 15d538fe2113..4ad3653f3866 100644
--- a/drivers/mailbox/stm32-ipcc.c
+++ b/drivers/mailbox/stm32-ipcc.c
@@ -11,6 +11,7 @@
#include <linux/io.h>
#include <linux/mailbox_controller.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_wakeirq.h>
diff --git a/drivers/mailbox/tegra-hsp.c b/drivers/mailbox/tegra-hsp.c
index 7f98e7436d94..fe29fc2ca526 100644
--- a/drivers/mailbox/tegra-hsp.c
+++ b/drivers/mailbox/tegra-hsp.c
@@ -8,7 +8,6 @@
#include <linux/io.h>
#include <linux/mailbox_controller.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/slab.h>
@@ -728,7 +727,6 @@ static int tegra_hsp_request_shared_irq(struct tegra_hsp *hsp)
static int tegra_hsp_probe(struct platform_device *pdev)
{
struct tegra_hsp *hsp;
- struct resource *res;
unsigned int i;
u32 value;
int err;
@@ -742,8 +740,7 @@ static int tegra_hsp_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&hsp->doorbells);
spin_lock_init(&hsp->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hsp->regs = devm_ioremap_resource(&pdev->dev, res);
+ hsp->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hsp->regs))
return PTR_ERR(hsp->regs);
diff --git a/drivers/mailbox/ti-msgmgr.c b/drivers/mailbox/ti-msgmgr.c
index 03048cbda525..a94577f16a47 100644
--- a/drivers/mailbox/ti-msgmgr.c
+++ b/drivers/mailbox/ti-msgmgr.c
@@ -812,7 +812,6 @@ static int ti_msgmgr_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
const struct of_device_id *of_id;
struct device_node *np;
- struct resource *res;
const struct ti_msgmgr_desc *desc;
struct ti_msgmgr_inst *inst;
struct ti_queue_inst *qinst;
@@ -843,22 +842,19 @@ static int ti_msgmgr_probe(struct platform_device *pdev)
inst->dev = dev;
inst->desc = desc;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- desc->data_region_name);
- inst->queue_proxy_region = devm_ioremap_resource(dev, res);
+ inst->queue_proxy_region =
+ devm_platform_ioremap_resource_byname(pdev, desc->data_region_name);
if (IS_ERR(inst->queue_proxy_region))
return PTR_ERR(inst->queue_proxy_region);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- desc->status_region_name);
- inst->queue_state_debug_region = devm_ioremap_resource(dev, res);
+ inst->queue_state_debug_region =
+ devm_platform_ioremap_resource_byname(pdev, desc->status_region_name);
if (IS_ERR(inst->queue_state_debug_region))
return PTR_ERR(inst->queue_state_debug_region);
if (desc->is_sproxy) {
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- desc->ctrl_region_name);
- inst->queue_ctrl_region = devm_ioremap_resource(dev, res);
+ inst->queue_ctrl_region =
+ devm_platform_ioremap_resource_byname(pdev, desc->ctrl_region_name);
if (IS_ERR(inst->queue_ctrl_region))
return PTR_ERR(inst->queue_ctrl_region);
}
diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c
index d097f45b0e5f..e4fcac97dbfa 100644
--- a/drivers/mailbox/zynqmp-ipi-mailbox.c
+++ b/drivers/mailbox/zynqmp-ipi-mailbox.c
@@ -16,8 +16,6 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
#include <linux/platform_device.h>
/* IPI agent ID any */
diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c
index 241b1621b197..09ca83c23329 100644
--- a/drivers/media/cec/core/cec-adap.c
+++ b/drivers/media/cec/core/cec-adap.c
@@ -385,8 +385,8 @@ static void cec_data_cancel(struct cec_data *data, u8 tx_status, u8 rx_status)
cec_queue_msg_monitor(adap, &data->msg, 1);
if (!data->blocking && data->msg.sequence)
- /* Allow drivers to process the message first */
- call_op(adap, received, &data->msg);
+ /* Allow drivers to react to a canceled transmit */
+ call_void_op(adap, adap_nb_transmit_canceled, &data->msg);
cec_data_completed(data);
}
@@ -1348,7 +1348,7 @@ static void cec_adap_unconfigure(struct cec_adapter *adap)
cec_flush(adap);
wake_up_interruptible(&adap->kthread_waitq);
cec_post_state_event(adap);
- call_void_op(adap, adap_configured, false);
+ call_void_op(adap, adap_unconfigured);
}
/*
@@ -1539,7 +1539,7 @@ configured:
adap->kthread_config = NULL;
complete(&adap->config_completion);
mutex_unlock(&adap->lock);
- call_void_op(adap, adap_configured, true);
+ call_void_op(adap, configured);
return 0;
unconfigure:
diff --git a/drivers/media/cec/core/cec-notifier.c b/drivers/media/cec/core/cec-notifier.c
index 389dc664b211..a41f24172b11 100644
--- a/drivers/media/cec/core/cec-notifier.c
+++ b/drivers/media/cec/core/cec-notifier.c
@@ -7,6 +7,7 @@
*/
#include <linux/export.h>
+#include <linux/platform_device.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/i2c.h>
diff --git a/drivers/media/cec/core/cec-pin-priv.h b/drivers/media/cec/core/cec-pin-priv.h
index 8eb5819e6ccb..156a9f81be94 100644
--- a/drivers/media/cec/core/cec-pin-priv.h
+++ b/drivers/media/cec/core/cec-pin-priv.h
@@ -183,6 +183,7 @@ struct cec_pin {
u16 la_mask;
bool monitor_all;
bool rx_eom;
+ bool enabled_irq;
bool enable_irq_failed;
enum cec_pin_state state;
struct cec_msg tx_msg;
diff --git a/drivers/media/cec/core/cec-pin.c b/drivers/media/cec/core/cec-pin.c
index 68353c5dc501..330d5d5d86ab 100644
--- a/drivers/media/cec/core/cec-pin.c
+++ b/drivers/media/cec/core/cec-pin.c
@@ -982,7 +982,7 @@ static enum hrtimer_restart cec_pin_timer(struct hrtimer *timer)
}
if (pin->state != CEC_ST_IDLE || pin->ops->enable_irq == NULL ||
pin->enable_irq_failed || adap->is_configuring ||
- adap->is_configured || adap->monitor_all_cnt)
+ adap->is_configured || adap->monitor_all_cnt || !adap->monitor_pin_cnt)
break;
/* Switch to interrupt mode */
atomic_set(&pin->work_irq_change, CEC_PIN_IRQ_ENABLE);
@@ -1033,8 +1033,9 @@ static int cec_pin_thread_func(void *_adap)
{
struct cec_adapter *adap = _adap;
struct cec_pin *pin = adap->pin;
- bool irq_enabled = false;
+ pin->enabled_irq = false;
+ pin->enable_irq_failed = false;
for (;;) {
wait_event_interruptible(pin->kthread_waitq,
kthread_should_stop() ||
@@ -1088,9 +1089,10 @@ static int cec_pin_thread_func(void *_adap)
switch (atomic_xchg(&pin->work_irq_change,
CEC_PIN_IRQ_UNCHANGED)) {
case CEC_PIN_IRQ_DISABLE:
- if (irq_enabled) {
- call_void_pin_op(pin, disable_irq);
- irq_enabled = false;
+ if (pin->enabled_irq) {
+ pin->ops->disable_irq(adap);
+ pin->enabled_irq = false;
+ pin->enable_irq_failed = false;
}
cec_pin_high(pin);
if (pin->state == CEC_ST_OFF)
@@ -1100,21 +1102,29 @@ static int cec_pin_thread_func(void *_adap)
HRTIMER_MODE_REL);
break;
case CEC_PIN_IRQ_ENABLE:
- if (irq_enabled)
+ if (pin->enabled_irq || !pin->ops->enable_irq ||
+ pin->adap->devnode.unregistered)
break;
- pin->enable_irq_failed = !call_pin_op(pin, enable_irq);
+ pin->enable_irq_failed = !pin->ops->enable_irq(adap);
if (pin->enable_irq_failed) {
cec_pin_to_idle(pin);
hrtimer_start(&pin->timer, ns_to_ktime(0),
HRTIMER_MODE_REL);
} else {
- irq_enabled = true;
+ pin->enabled_irq = true;
}
break;
default:
break;
}
}
+
+ if (pin->enabled_irq) {
+ pin->ops->disable_irq(pin->adap);
+ pin->enabled_irq = false;
+ pin->enable_irq_failed = false;
+ cec_pin_high(pin);
+ }
return 0;
}
@@ -1215,7 +1225,9 @@ static void cec_pin_adap_status(struct cec_adapter *adap,
seq_printf(file, "cec pin: %d\n", call_pin_op(pin, read));
seq_printf(file, "cec pin events dropped: %u\n",
pin->work_pin_events_dropped_cnt);
- seq_printf(file, "irq failed: %d\n", pin->enable_irq_failed);
+ if (pin->ops->enable_irq)
+ seq_printf(file, "irq %s\n", pin->enabled_irq ? "enabled" :
+ (pin->enable_irq_failed ? "failed" : "disabled"));
if (pin->timer_100us_overruns) {
seq_printf(file, "timer overruns > 100us: %u of %u\n",
pin->timer_100us_overruns, pin->timer_cnt);
@@ -1305,7 +1317,7 @@ void cec_pin_changed(struct cec_adapter *adap, bool value)
cec_pin_update(pin, value, false);
if (!value && (adap->is_configuring || adap->is_configured ||
- adap->monitor_all_cnt))
+ adap->monitor_all_cnt || !adap->monitor_pin_cnt))
atomic_set(&pin->work_irq_change, CEC_PIN_IRQ_DISABLE);
}
EXPORT_SYMBOL_GPL(cec_pin_changed);
diff --git a/drivers/media/cec/i2c/ch7322.c b/drivers/media/cec/i2c/ch7322.c
index 439c15bc9e44..b8755337b394 100644
--- a/drivers/media/cec/i2c/ch7322.c
+++ b/drivers/media/cec/i2c/ch7322.c
@@ -589,7 +589,7 @@ MODULE_DEVICE_TABLE(of, ch7322_of_match);
static struct i2c_driver ch7322_i2c_driver = {
.driver = {
.name = "ch7322",
- .of_match_table = of_match_ptr(ch7322_of_match),
+ .of_match_table = ch7322_of_match,
},
.probe = ch7322_probe,
.remove = ch7322_remove,
diff --git a/drivers/media/cec/platform/cec-gpio/cec-gpio.c b/drivers/media/cec/platform/cec-gpio/cec-gpio.c
index ff34490fd869..98dacb0919b6 100644
--- a/drivers/media/cec/platform/cec-gpio/cec-gpio.c
+++ b/drivers/media/cec/platform/cec-gpio/cec-gpio.c
@@ -159,11 +159,6 @@ static int cec_gpio_read_5v(struct cec_adapter *adap)
return gpiod_get_value(cec->v5_gpio);
}
-static void cec_gpio_free(struct cec_adapter *adap)
-{
- cec_gpio_disable_irq(adap);
-}
-
static const struct cec_pin_ops cec_gpio_pin_ops = {
.read = cec_gpio_read,
.low = cec_gpio_low,
@@ -171,7 +166,6 @@ static const struct cec_pin_ops cec_gpio_pin_ops = {
.enable_irq = cec_gpio_enable_irq,
.disable_irq = cec_gpio_disable_irq,
.status = cec_gpio_status,
- .free = cec_gpio_free,
.read_hpd = cec_gpio_read_hpd,
.read_5v = cec_gpio_read_5v,
};
@@ -215,13 +209,11 @@ static int cec_gpio_probe(struct platform_device *pdev)
return PTR_ERR(cec->adap);
ret = devm_request_irq(dev, cec->cec_irq, cec_gpio_irq_handler,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_NO_AUTOEN,
cec->adap->name, cec);
if (ret)
goto del_adap;
- cec_gpio_disable_irq(cec->adap);
-
if (cec->hpd_gpio) {
cec->hpd_irq = gpiod_to_irq(cec->hpd_gpio);
ret = devm_request_threaded_irq(dev, cec->hpd_irq,
diff --git a/drivers/media/cec/platform/meson/ao-cec.c b/drivers/media/cec/platform/meson/ao-cec.c
index f6f51a34f7bd..494738daf09a 100644
--- a/drivers/media/cec/platform/meson/ao-cec.c
+++ b/drivers/media/cec/platform/meson/ao-cec.c
@@ -717,7 +717,7 @@ static struct platform_driver meson_ao_cec_driver = {
.remove_new = meson_ao_cec_remove,
.driver = {
.name = "meson-ao-cec",
- .of_match_table = of_match_ptr(meson_ao_cec_of_match),
+ .of_match_table = meson_ao_cec_of_match,
},
};
diff --git a/drivers/media/cec/platform/stm32/stm32-cec.c b/drivers/media/cec/platform/stm32/stm32-cec.c
index ada3d153362a..bda9d254041a 100644
--- a/drivers/media/cec/platform/stm32/stm32-cec.c
+++ b/drivers/media/cec/platform/stm32/stm32-cec.c
@@ -10,7 +10,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/media/cec/platform/tegra/tegra_cec.c b/drivers/media/cec/platform/tegra/tegra_cec.c
index 04dc06e3c42a..7c1022cee1e8 100644
--- a/drivers/media/cec/platform/tegra/tegra_cec.c
+++ b/drivers/media/cec/platform/tegra/tegra_cec.c
@@ -348,8 +348,8 @@ static int tegra_cec_probe(struct platform_device *pdev)
cec->tegra_cec_irq = platform_get_irq(pdev, 0);
- if (cec->tegra_cec_irq <= 0)
- return -EBUSY;
+ if (cec->tegra_cec_irq < 0)
+ return cec->tegra_cec_irq;
cec->cec_base = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
@@ -462,7 +462,7 @@ static const struct of_device_id tegra_cec_of_match[] = {
static struct platform_driver tegra_cec_driver = {
.driver = {
.name = TEGRA_CEC_NAME,
- .of_match_table = of_match_ptr(tegra_cec_of_match),
+ .of_match_table = tegra_cec_of_match,
},
.probe = tegra_cec_probe,
.remove_new = tegra_cec_remove,
diff --git a/drivers/media/common/siano/smsdvb-debugfs.c b/drivers/media/common/siano/smsdvb-debugfs.c
index 8916bb644756..e0beefd80d7b 100644
--- a/drivers/media/common/siano/smsdvb-debugfs.c
+++ b/drivers/media/common/siano/smsdvb-debugfs.c
@@ -45,89 +45,48 @@ static void smsdvb_print_dvb_stats(struct smsdvb_debugfs *debug_data,
buf = debug_data->stats_data;
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "is_rf_locked = %d\n", p->is_rf_locked);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "is_demod_locked = %d\n", p->is_demod_locked);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "is_external_lna_on = %d\n", p->is_external_lna_on);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "SNR = %d\n", p->SNR);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "ber = %d\n", p->ber);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "FIB_CRC = %d\n", p->FIB_CRC);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "ts_per = %d\n", p->ts_per);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "MFER = %d\n", p->MFER);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "RSSI = %d\n", p->RSSI);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "in_band_pwr = %d\n", p->in_band_pwr);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "carrier_offset = %d\n", p->carrier_offset);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "modem_state = %d\n", p->modem_state);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "frequency = %d\n", p->frequency);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "bandwidth = %d\n", p->bandwidth);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "transmission_mode = %d\n", p->transmission_mode);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "modem_state = %d\n", p->modem_state);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "guard_interval = %d\n", p->guard_interval);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "code_rate = %d\n", p->code_rate);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "lp_code_rate = %d\n", p->lp_code_rate);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "hierarchy = %d\n", p->hierarchy);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "constellation = %d\n", p->constellation);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "burst_size = %d\n", p->burst_size);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "burst_duration = %d\n", p->burst_duration);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "burst_cycle_time = %d\n", p->burst_cycle_time);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "calc_burst_cycle_time = %d\n",
- p->calc_burst_cycle_time);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "num_of_rows = %d\n", p->num_of_rows);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "num_of_padd_cols = %d\n", p->num_of_padd_cols);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "num_of_punct_cols = %d\n", p->num_of_punct_cols);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "error_ts_packets = %d\n", p->error_ts_packets);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "total_ts_packets = %d\n", p->total_ts_packets);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "num_of_valid_mpe_tlbs = %d\n", p->num_of_valid_mpe_tlbs);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "num_of_invalid_mpe_tlbs = %d\n", p->num_of_invalid_mpe_tlbs);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "num_of_corrected_mpe_tlbs = %d\n", p->num_of_corrected_mpe_tlbs);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "ber_error_count = %d\n", p->ber_error_count);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "ber_bit_count = %d\n", p->ber_bit_count);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "sms_to_host_tx_errors = %d\n", p->sms_to_host_tx_errors);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "pre_ber = %d\n", p->pre_ber);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "cell_id = %d\n", p->cell_id);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "dvbh_srv_ind_hp = %d\n", p->dvbh_srv_ind_hp);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "dvbh_srv_ind_lp = %d\n", p->dvbh_srv_ind_lp);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "num_mpe_received = %d\n", p->num_mpe_received);
+ n += sysfs_emit_at(buf, n, "is_rf_locked = %d\n", p->is_rf_locked);
+ n += sysfs_emit_at(buf, n, "is_demod_locked = %d\n", p->is_demod_locked);
+ n += sysfs_emit_at(buf, n, "is_external_lna_on = %d\n", p->is_external_lna_on);
+ n += sysfs_emit_at(buf, n, "SNR = %d\n", p->SNR);
+ n += sysfs_emit_at(buf, n, "ber = %d\n", p->ber);
+ n += sysfs_emit_at(buf, n, "FIB_CRC = %d\n", p->FIB_CRC);
+ n += sysfs_emit_at(buf, n, "ts_per = %d\n", p->ts_per);
+ n += sysfs_emit_at(buf, n, "MFER = %d\n", p->MFER);
+ n += sysfs_emit_at(buf, n, "RSSI = %d\n", p->RSSI);
+ n += sysfs_emit_at(buf, n, "in_band_pwr = %d\n", p->in_band_pwr);
+ n += sysfs_emit_at(buf, n, "carrier_offset = %d\n", p->carrier_offset);
+ n += sysfs_emit_at(buf, n, "modem_state = %d\n", p->modem_state);
+ n += sysfs_emit_at(buf, n, "frequency = %d\n", p->frequency);
+ n += sysfs_emit_at(buf, n, "bandwidth = %d\n", p->bandwidth);
+ n += sysfs_emit_at(buf, n, "transmission_mode = %d\n", p->transmission_mode);
+ n += sysfs_emit_at(buf, n, "modem_state = %d\n", p->modem_state);
+ n += sysfs_emit_at(buf, n, "guard_interval = %d\n", p->guard_interval);
+ n += sysfs_emit_at(buf, n, "code_rate = %d\n", p->code_rate);
+ n += sysfs_emit_at(buf, n, "lp_code_rate = %d\n", p->lp_code_rate);
+ n += sysfs_emit_at(buf, n, "hierarchy = %d\n", p->hierarchy);
+ n += sysfs_emit_at(buf, n, "constellation = %d\n", p->constellation);
+ n += sysfs_emit_at(buf, n, "burst_size = %d\n", p->burst_size);
+ n += sysfs_emit_at(buf, n, "burst_duration = %d\n", p->burst_duration);
+ n += sysfs_emit_at(buf, n, "burst_cycle_time = %d\n", p->burst_cycle_time);
+ n += sysfs_emit_at(buf, n, "calc_burst_cycle_time = %d\n", p->calc_burst_cycle_time);
+ n += sysfs_emit_at(buf, n, "num_of_rows = %d\n", p->num_of_rows);
+ n += sysfs_emit_at(buf, n, "num_of_padd_cols = %d\n", p->num_of_padd_cols);
+ n += sysfs_emit_at(buf, n, "num_of_punct_cols = %d\n", p->num_of_punct_cols);
+ n += sysfs_emit_at(buf, n, "error_ts_packets = %d\n", p->error_ts_packets);
+ n += sysfs_emit_at(buf, n, "total_ts_packets = %d\n", p->total_ts_packets);
+ n += sysfs_emit_at(buf, n, "num_of_valid_mpe_tlbs = %d\n", p->num_of_valid_mpe_tlbs);
+ n += sysfs_emit_at(buf, n, "num_of_invalid_mpe_tlbs = %d\n", p->num_of_invalid_mpe_tlbs);
+ n += sysfs_emit_at(buf, n, "num_of_corrected_mpe_tlbs = %d\n",
+ p->num_of_corrected_mpe_tlbs);
+ n += sysfs_emit_at(buf, n, "ber_error_count = %d\n", p->ber_error_count);
+ n += sysfs_emit_at(buf, n, "ber_bit_count = %d\n", p->ber_bit_count);
+ n += sysfs_emit_at(buf, n, "sms_to_host_tx_errors = %d\n", p->sms_to_host_tx_errors);
+ n += sysfs_emit_at(buf, n, "pre_ber = %d\n", p->pre_ber);
+ n += sysfs_emit_at(buf, n, "cell_id = %d\n", p->cell_id);
+ n += sysfs_emit_at(buf, n, "dvbh_srv_ind_hp = %d\n", p->dvbh_srv_ind_hp);
+ n += sysfs_emit_at(buf, n, "dvbh_srv_ind_lp = %d\n", p->dvbh_srv_ind_lp);
+ n += sysfs_emit_at(buf, n, "num_mpe_received = %d\n", p->num_mpe_received);
debug_data->stats_count = n;
spin_unlock(&debug_data->lock);
@@ -148,78 +107,49 @@ static void smsdvb_print_isdb_stats(struct smsdvb_debugfs *debug_data,
buf = debug_data->stats_data;
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "statistics_type = %d\t", p->statistics_type);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "full_size = %d\n", p->full_size);
-
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "is_rf_locked = %d\t\t", p->is_rf_locked);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "is_demod_locked = %d\t", p->is_demod_locked);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "is_external_lna_on = %d\n", p->is_external_lna_on);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "SNR = %d dB\t\t", p->SNR);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "RSSI = %d dBm\t\t", p->RSSI);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "in_band_pwr = %d dBm\n", p->in_band_pwr);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "carrier_offset = %d\t", p->carrier_offset);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "bandwidth = %d\t\t", p->bandwidth);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "frequency = %d Hz\n", p->frequency);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "transmission_mode = %d\t", p->transmission_mode);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "modem_state = %d\t\t", p->modem_state);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "guard_interval = %d\n", p->guard_interval);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "system_type = %d\t\t", p->system_type);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "partial_reception = %d\t", p->partial_reception);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "num_of_layers = %d\n", p->num_of_layers);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "sms_to_host_tx_errors = %d\n", p->sms_to_host_tx_errors);
+ n += sysfs_emit_at(buf, n, "statistics_type = %d\t", p->statistics_type);
+ n += sysfs_emit_at(buf, n, "full_size = %d\n", p->full_size);
+
+ n += sysfs_emit_at(buf, n, "is_rf_locked = %d\t\t", p->is_rf_locked);
+ n += sysfs_emit_at(buf, n, "is_demod_locked = %d\t", p->is_demod_locked);
+ n += sysfs_emit_at(buf, n, "is_external_lna_on = %d\n", p->is_external_lna_on);
+ n += sysfs_emit_at(buf, n, "SNR = %d dB\t\t", p->SNR);
+ n += sysfs_emit_at(buf, n, "RSSI = %d dBm\t\t", p->RSSI);
+ n += sysfs_emit_at(buf, n, "in_band_pwr = %d dBm\n", p->in_band_pwr);
+ n += sysfs_emit_at(buf, n, "carrier_offset = %d\t", p->carrier_offset);
+ n += sysfs_emit_at(buf, n, "bandwidth = %d\t\t", p->bandwidth);
+ n += sysfs_emit_at(buf, n, "frequency = %d Hz\n", p->frequency);
+ n += sysfs_emit_at(buf, n, "transmission_mode = %d\t", p->transmission_mode);
+ n += sysfs_emit_at(buf, n, "modem_state = %d\t\t", p->modem_state);
+ n += sysfs_emit_at(buf, n, "guard_interval = %d\n", p->guard_interval);
+ n += sysfs_emit_at(buf, n, "system_type = %d\t\t", p->system_type);
+ n += sysfs_emit_at(buf, n, "partial_reception = %d\t", p->partial_reception);
+ n += sysfs_emit_at(buf, n, "num_of_layers = %d\n", p->num_of_layers);
+ n += sysfs_emit_at(buf, n, "sms_to_host_tx_errors = %d\n", p->sms_to_host_tx_errors);
for (i = 0; i < 3; i++) {
if (p->layer_info[i].number_of_segments < 1 ||
p->layer_info[i].number_of_segments > 13)
continue;
- n += scnprintf(&buf[n], PAGE_SIZE - n, "\nLayer %d\n", i);
- n += scnprintf(&buf[n], PAGE_SIZE - n, "\tcode_rate = %d\t",
- p->layer_info[i].code_rate);
- n += scnprintf(&buf[n], PAGE_SIZE - n, "constellation = %d\n",
- p->layer_info[i].constellation);
- n += scnprintf(&buf[n], PAGE_SIZE - n, "\tber = %-5d\t",
- p->layer_info[i].ber);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "\tber_error_count = %-5d\t",
- p->layer_info[i].ber_error_count);
- n += scnprintf(&buf[n], PAGE_SIZE - n, "ber_bit_count = %-5d\n",
- p->layer_info[i].ber_bit_count);
- n += scnprintf(&buf[n], PAGE_SIZE - n, "\tpre_ber = %-5d\t",
- p->layer_info[i].pre_ber);
- n += scnprintf(&buf[n], PAGE_SIZE - n, "\tts_per = %-5d\n",
- p->layer_info[i].ts_per);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "\terror_ts_packets = %-5d\t",
- p->layer_info[i].error_ts_packets);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "total_ts_packets = %-5d\t",
- p->layer_info[i].total_ts_packets);
- n += scnprintf(&buf[n], PAGE_SIZE - n, "ti_ldepth_i = %d\n",
- p->layer_info[i].ti_ldepth_i);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "\tnumber_of_segments = %d\t",
- p->layer_info[i].number_of_segments);
- n += scnprintf(&buf[n], PAGE_SIZE - n, "tmcc_errors = %d\n",
- p->layer_info[i].tmcc_errors);
+ n += sysfs_emit_at(buf, n, "\nLayer %d\n", i);
+ n += sysfs_emit_at(buf, n, "\tcode_rate = %d\t", p->layer_info[i].code_rate);
+ n += sysfs_emit_at(buf, n, "constellation = %d\n", p->layer_info[i].constellation);
+ n += sysfs_emit_at(buf, n, "\tber = %-5d\t", p->layer_info[i].ber);
+ n += sysfs_emit_at(buf, n, "\tber_error_count = %-5d\t",
+ p->layer_info[i].ber_error_count);
+ n += sysfs_emit_at(buf, n, "ber_bit_count = %-5d\n",
+ p->layer_info[i].ber_bit_count);
+ n += sysfs_emit_at(buf, n, "\tpre_ber = %-5d\t", p->layer_info[i].pre_ber);
+ n += sysfs_emit_at(buf, n, "\tts_per = %-5d\n", p->layer_info[i].ts_per);
+ n += sysfs_emit_at(buf, n, "\terror_ts_packets = %-5d\t",
+ p->layer_info[i].error_ts_packets);
+ n += sysfs_emit_at(buf, n, "total_ts_packets = %-5d\t",
+ p->layer_info[i].total_ts_packets);
+ n += sysfs_emit_at(buf, n, "ti_ldepth_i = %d\n", p->layer_info[i].ti_ldepth_i);
+ n += sysfs_emit_at(buf, n, "\tnumber_of_segments = %d\t",
+ p->layer_info[i].number_of_segments);
+ n += sysfs_emit_at(buf, n, "tmcc_errors = %d\n", p->layer_info[i].tmcc_errors);
}
debug_data->stats_count = n;
@@ -241,80 +171,50 @@ static void smsdvb_print_isdb_stats_ex(struct smsdvb_debugfs *debug_data,
buf = debug_data->stats_data;
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "statistics_type = %d\t", p->statistics_type);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "full_size = %d\n", p->full_size);
-
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "is_rf_locked = %d\t\t", p->is_rf_locked);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "is_demod_locked = %d\t", p->is_demod_locked);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "is_external_lna_on = %d\n", p->is_external_lna_on);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "SNR = %d dB\t\t", p->SNR);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "RSSI = %d dBm\t\t", p->RSSI);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "in_band_pwr = %d dBm\n", p->in_band_pwr);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "carrier_offset = %d\t", p->carrier_offset);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "bandwidth = %d\t\t", p->bandwidth);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "frequency = %d Hz\n", p->frequency);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "transmission_mode = %d\t", p->transmission_mode);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "modem_state = %d\t\t", p->modem_state);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "guard_interval = %d\n", p->guard_interval);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "system_type = %d\t\t", p->system_type);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "partial_reception = %d\t", p->partial_reception);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "num_of_layers = %d\n", p->num_of_layers);
- n += scnprintf(&buf[n], PAGE_SIZE - n, "segment_number = %d\t",
- p->segment_number);
- n += scnprintf(&buf[n], PAGE_SIZE - n, "tune_bw = %d\n",
- p->tune_bw);
+ n += sysfs_emit_at(buf, n, "statistics_type = %d\t", p->statistics_type);
+ n += sysfs_emit_at(buf, n, "full_size = %d\n", p->full_size);
+
+ n += sysfs_emit_at(buf, n, "is_rf_locked = %d\t\t", p->is_rf_locked);
+ n += sysfs_emit_at(buf, n, "is_demod_locked = %d\t", p->is_demod_locked);
+ n += sysfs_emit_at(buf, n, "is_external_lna_on = %d\n", p->is_external_lna_on);
+ n += sysfs_emit_at(buf, n, "SNR = %d dB\t\t", p->SNR);
+ n += sysfs_emit_at(buf, n, "RSSI = %d dBm\t\t", p->RSSI);
+ n += sysfs_emit_at(buf, n, "in_band_pwr = %d dBm\n", p->in_band_pwr);
+ n += sysfs_emit_at(buf, n, "carrier_offset = %d\t", p->carrier_offset);
+ n += sysfs_emit_at(buf, n, "bandwidth = %d\t\t", p->bandwidth);
+ n += sysfs_emit_at(buf, n, "frequency = %d Hz\n", p->frequency);
+ n += sysfs_emit_at(buf, n, "transmission_mode = %d\t", p->transmission_mode);
+ n += sysfs_emit_at(buf, n, "modem_state = %d\t\t", p->modem_state);
+ n += sysfs_emit_at(buf, n, "guard_interval = %d\n", p->guard_interval);
+ n += sysfs_emit_at(buf, n, "system_type = %d\t\t", p->system_type);
+ n += sysfs_emit_at(buf, n, "partial_reception = %d\t", p->partial_reception);
+ n += sysfs_emit_at(buf, n, "num_of_layers = %d\n", p->num_of_layers);
+ n += sysfs_emit_at(buf, n, "segment_number = %d\t", p->segment_number);
+ n += sysfs_emit_at(buf, n, "tune_bw = %d\n", p->tune_bw);
for (i = 0; i < 3; i++) {
if (p->layer_info[i].number_of_segments < 1 ||
p->layer_info[i].number_of_segments > 13)
continue;
- n += scnprintf(&buf[n], PAGE_SIZE - n, "\nLayer %d\n", i);
- n += scnprintf(&buf[n], PAGE_SIZE - n, "\tcode_rate = %d\t",
- p->layer_info[i].code_rate);
- n += scnprintf(&buf[n], PAGE_SIZE - n, "constellation = %d\n",
- p->layer_info[i].constellation);
- n += scnprintf(&buf[n], PAGE_SIZE - n, "\tber = %-5d\t",
- p->layer_info[i].ber);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "\tber_error_count = %-5d\t",
- p->layer_info[i].ber_error_count);
- n += scnprintf(&buf[n], PAGE_SIZE - n, "ber_bit_count = %-5d\n",
- p->layer_info[i].ber_bit_count);
- n += scnprintf(&buf[n], PAGE_SIZE - n, "\tpre_ber = %-5d\t",
- p->layer_info[i].pre_ber);
- n += scnprintf(&buf[n], PAGE_SIZE - n, "\tts_per = %-5d\n",
- p->layer_info[i].ts_per);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "\terror_ts_packets = %-5d\t",
- p->layer_info[i].error_ts_packets);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "total_ts_packets = %-5d\t",
- p->layer_info[i].total_ts_packets);
- n += scnprintf(&buf[n], PAGE_SIZE - n, "ti_ldepth_i = %d\n",
- p->layer_info[i].ti_ldepth_i);
- n += scnprintf(&buf[n], PAGE_SIZE - n,
- "\tnumber_of_segments = %d\t",
- p->layer_info[i].number_of_segments);
- n += scnprintf(&buf[n], PAGE_SIZE - n, "tmcc_errors = %d\n",
- p->layer_info[i].tmcc_errors);
+ n += sysfs_emit_at(buf, n, "\nLayer %d\n", i);
+ n += sysfs_emit_at(buf, n, "\tcode_rate = %d\t", p->layer_info[i].code_rate);
+ n += sysfs_emit_at(buf, n, "constellation = %d\n", p->layer_info[i].constellation);
+ n += sysfs_emit_at(buf, n, "\tber = %-5d\t", p->layer_info[i].ber);
+ n += sysfs_emit_at(buf, n, "\tber_error_count = %-5d\t",
+ p->layer_info[i].ber_error_count);
+ n += sysfs_emit_at(buf, n, "ber_bit_count = %-5d\n",
+ p->layer_info[i].ber_bit_count);
+ n += sysfs_emit_at(buf, n, "\tpre_ber = %-5d\t", p->layer_info[i].pre_ber);
+ n += sysfs_emit_at(buf, n, "\tts_per = %-5d\n", p->layer_info[i].ts_per);
+ n += sysfs_emit_at(buf, n, "\terror_ts_packets = %-5d\t",
+ p->layer_info[i].error_ts_packets);
+ n += sysfs_emit_at(buf, n, "total_ts_packets = %-5d\t",
+ p->layer_info[i].total_ts_packets);
+ n += sysfs_emit_at(buf, n, "ti_ldepth_i = %d\n", p->layer_info[i].ti_ldepth_i);
+ n += sysfs_emit_at(buf, n, "\tnumber_of_segments = %d\t",
+ p->layer_info[i].number_of_segments);
+ n += sysfs_emit_at(buf, n, "tmcc_errors = %d\n", p->layer_info[i].tmcc_errors);
}
diff --git a/drivers/media/common/siano/smsendian.c b/drivers/media/common/siano/smsendian.c
index 8cb8853a1edb..a3573814919b 100644
--- a/drivers/media/common/siano/smsendian.c
+++ b/drivers/media/common/siano/smsendian.c
@@ -17,7 +17,7 @@
void smsendian_handle_tx_message(void *buffer)
{
#ifdef __BIG_ENDIAN
- struct sms_msg_data *msg = (struct sms_msg_data *)buffer;
+ struct sms_msg_data *msg = buffer;
int i;
int msg_words;
diff --git a/drivers/media/dvb-frontends/cx24120.c b/drivers/media/dvb-frontends/cx24120.c
index d8acd582c711..0f778660c72b 100644
--- a/drivers/media/dvb-frontends/cx24120.c
+++ b/drivers/media/dvb-frontends/cx24120.c
@@ -973,7 +973,9 @@ static void cx24120_set_clock_ratios(struct dvb_frontend *fe)
cmd.arg[8] = (clock_ratios_table[idx].rate >> 8) & 0xff;
cmd.arg[9] = (clock_ratios_table[idx].rate >> 0) & 0xff;
- cx24120_message_send(state, &cmd);
+ ret = cx24120_message_send(state, &cmd);
+ if (ret != 0)
+ return;
/* Calculate ber window rates for stat work */
cx24120_calculate_ber_window(state, clock_ratios_table[idx].rate);
diff --git a/drivers/media/dvb-frontends/dib7000p.c b/drivers/media/dvb-frontends/dib7000p.c
index b791e687d2e2..9273758bf140 100644
--- a/drivers/media/dvb-frontends/dib7000p.c
+++ b/drivers/media/dvb-frontends/dib7000p.c
@@ -497,7 +497,7 @@ static int dib7000p_update_pll(struct dvb_frontend *fe, struct dibx000_bandwidth
prediv = reg_1856 & 0x3f;
loopdiv = (reg_1856 >> 6) & 0x3f;
- if ((bw != NULL) && (bw->pll_prediv != prediv || bw->pll_ratio != loopdiv)) {
+ if (loopdiv && bw && (bw->pll_prediv != prediv || bw->pll_ratio != loopdiv)) {
dprintk("Updating pll (prediv: old = %d new = %d ; loopdiv : old = %d new = %d)\n", prediv, bw->pll_prediv, loopdiv, bw->pll_ratio);
reg_1856 &= 0xf000;
reg_1857 = dib7000p_read_word(state, 1857);
diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c
index 6ad4f202f1bf..2770baebbbbc 100644
--- a/drivers/media/dvb-frontends/drxk_hard.c
+++ b/drivers/media/dvb-frontends/drxk_hard.c
@@ -229,13 +229,8 @@ static int i2c_write(struct drxk_state *state, u8 adr, u8 *data, int len)
struct i2c_msg msg = {
.addr = adr, .flags = 0, .buf = data, .len = len };
- dprintk(3, ":");
- if (debug > 2) {
- int i;
- for (i = 0; i < len; i++)
- pr_cont(" %02x", data[i]);
- pr_cont("\n");
- }
+ dprintk(3, ": %*ph\n", len, data);
+
status = drxk_i2c_transfer(state, &msg, 1);
if (status >= 0 && status != 1)
status = -EIO;
@@ -267,16 +262,7 @@ static int i2c_read(struct drxk_state *state,
pr_err("i2c read error at addr 0x%02x\n", adr);
return status;
}
- if (debug > 2) {
- int i;
- dprintk(2, ": read from");
- for (i = 0; i < len; i++)
- pr_cont(" %02x", msg[i]);
- pr_cont(", value = ");
- for (i = 0; i < alen; i++)
- pr_cont(" %02x", answ[i]);
- pr_cont("\n");
- }
+ dprintk(3, ": read from %*ph, value = %*ph\n", len, msg, alen, answ);
return 0;
}
@@ -441,13 +427,8 @@ static int write_block(struct drxk_state *state, u32 address,
}
memcpy(&state->chunk[adr_length], p_block, chunk);
dprintk(2, "(0x%08x, 0x%02x)\n", address, flags);
- if (debug > 1) {
- int i;
- if (p_block)
- for (i = 0; i < chunk; i++)
- pr_cont(" %02x", p_block[i]);
- pr_cont("\n");
- }
+ if (p_block)
+ dprintk(2, "%*ph\n", chunk, p_block);
status = i2c_write(state, state->demod_address,
&state->chunk[0], chunk + adr_length);
if (status < 0) {
diff --git a/drivers/media/dvb-frontends/mb86a16.c b/drivers/media/dvb-frontends/mb86a16.c
index d3e29937cf4c..3ec2cb4fa504 100644
--- a/drivers/media/dvb-frontends/mb86a16.c
+++ b/drivers/media/dvb-frontends/mb86a16.c
@@ -1487,10 +1487,12 @@ static int mb86a16_set_fe(struct mb86a16_state *state)
}
}
- mb86a16_read(state, 0x15, &agcval);
- mb86a16_read(state, 0x26, &cnmval);
- dprintk(verbose, MB86A16_INFO, 1, "AGC = %02x CNM = %02x", agcval, cnmval);
-
+ if (mb86a16_read(state, 0x15, &agcval) != 2 || mb86a16_read(state, 0x26, &cnmval) != 2) {
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ ret = -EREMOTEIO;
+ } else {
+ dprintk(verbose, MB86A16_INFO, 1, "AGC = %02x CNM = %02x", agcval, cnmval);
+ }
return ret;
}
diff --git a/drivers/media/dvb-frontends/mn88443x.c b/drivers/media/dvb-frontends/mn88443x.c
index db2921c736af..7a58f53ab999 100644
--- a/drivers/media/dvb-frontends/mn88443x.c
+++ b/drivers/media/dvb-frontends/mn88443x.c
@@ -8,7 +8,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/int_log.h>
diff --git a/drivers/media/firewire/firedtv-avc.c b/drivers/media/firewire/firedtv-avc.c
index 71991f8638e6..a36c28412170 100644
--- a/drivers/media/firewire/firedtv-avc.c
+++ b/drivers/media/firewire/firedtv-avc.c
@@ -597,7 +597,8 @@ int avc_tuner_dsd(struct firedtv *fdtv,
case FIREDTV_DVB_C: pos = avc_tuner_dsd_dvb_c(fdtv, p); break;
case FIREDTV_DVB_T: pos = avc_tuner_dsd_dvb_t(fdtv, p); break;
default:
- BUG();
+ ret = -EIO;
+ goto unlock;
}
pad_operands(c, pos);
@@ -612,6 +613,7 @@ int avc_tuner_dsd(struct firedtv *fdtv,
if (status)
*status = r->operand[2];
#endif
+unlock:
mutex_unlock(&fdtv->avc_mutex);
if (ret == 0)
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 226454b6a90d..74ff833ff48c 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -25,8 +25,15 @@ config VIDEO_IR_I2C
# V4L2 I2C drivers that are related with Camera support
#
-menu "Camera sensor devices"
- visible if MEDIA_CAMERA_SUPPORT
+menuconfig VIDEO_CAMERA_SENSOR
+ bool "Camera sensor devices"
+ depends on MEDIA_CAMERA_SUPPORT && I2C
+ select MEDIA_CONTROLLER
+ select V4L2_FWNODE
+ select VIDEO_V4L2_SUBDEV_API
+ default y
+
+if VIDEO_CAMERA_SENSOR
config VIDEO_APTINA_PLL
tristate
@@ -36,10 +43,6 @@ config VIDEO_CCS_PLL
config VIDEO_AR0521
tristate "ON Semiconductor AR0521 sensor support"
- depends on I2C && VIDEO_DEV
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the ON Semiconductor
AR0521 camera.
@@ -49,10 +52,6 @@ config VIDEO_AR0521
config VIDEO_HI556
tristate "Hynix Hi-556 sensor support"
- depends on I2C && VIDEO_DEV
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the Hynix
Hi-556 camera.
@@ -62,10 +61,6 @@ config VIDEO_HI556
config VIDEO_HI846
tristate "Hynix Hi-846 sensor support"
- depends on I2C && VIDEO_DEV
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the Hynix
Hi-846 camera.
@@ -75,10 +70,6 @@ config VIDEO_HI846
config VIDEO_HI847
tristate "Hynix Hi-847 sensor support"
- depends on I2C && VIDEO_DEV
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the Hynix
Hi-847 camera.
@@ -88,10 +79,6 @@ config VIDEO_HI847
config VIDEO_IMX208
tristate "Sony IMX208 sensor support"
- depends on I2C && VIDEO_DEV
- depends on MEDIA_CAMERA_SUPPORT
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
help
This is a Video4Linux2 sensor driver for the Sony
IMX208 camera.
@@ -101,10 +88,7 @@ config VIDEO_IMX208
config VIDEO_IMX214
tristate "Sony IMX214 sensor support"
- depends on GPIOLIB && I2C && VIDEO_DEV
- select V4L2_FWNODE
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
+ depends on GPIOLIB
select REGMAP_I2C
help
This is a Video4Linux2 sensor driver for the Sony
@@ -115,10 +99,6 @@ config VIDEO_IMX214
config VIDEO_IMX219
tristate "Sony IMX219 sensor support"
- depends on I2C && VIDEO_DEV
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the Sony
IMX219 camera.
@@ -128,9 +108,6 @@ config VIDEO_IMX219
config VIDEO_IMX258
tristate "Sony IMX258 sensor support"
- depends on I2C && VIDEO_DEV
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
help
This is a Video4Linux2 sensor driver for the Sony
IMX258 camera.
@@ -140,9 +117,6 @@ config VIDEO_IMX258
config VIDEO_IMX274
tristate "Sony IMX274 sensor support"
- depends on I2C && VIDEO_DEV
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
select REGMAP_I2C
help
This is a V4L2 sensor driver for the Sony IMX274
@@ -150,11 +124,8 @@ config VIDEO_IMX274
config VIDEO_IMX290
tristate "Sony IMX290 sensor support"
- depends on I2C && VIDEO_DEV
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
select REGMAP_I2C
- select V4L2_FWNODE
+ select V4L2_CCI_I2C
help
This is a Video4Linux2 sensor driver for the Sony
IMX290 camera sensor.
@@ -164,10 +135,6 @@ config VIDEO_IMX290
config VIDEO_IMX296
tristate "Sony IMX296 sensor support"
- depends on I2C && VIDEO_DEV
- select MEDIA_CONTROLLER
- select V4L2_FWNODE
- select VIDEO_V4L2_SUBDEV_API
help
This is a Video4Linux2 sensor driver for the Sony
IMX296 camera.
@@ -177,9 +144,6 @@ config VIDEO_IMX296
config VIDEO_IMX319
tristate "Sony IMX319 sensor support"
- depends on I2C && VIDEO_DEV
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
help
This is a Video4Linux2 sensor driver for the Sony
IMX319 camera.
@@ -190,10 +154,6 @@ config VIDEO_IMX319
config VIDEO_IMX334
tristate "Sony IMX334 sensor support"
depends on OF_GPIO
- depends on I2C && VIDEO_DEV
- select VIDEO_V4L2_SUBDEV_API
- select MEDIA_CONTROLLER
- select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the Sony
IMX334 camera.
@@ -204,10 +164,6 @@ config VIDEO_IMX334
config VIDEO_IMX335
tristate "Sony IMX335 sensor support"
depends on OF_GPIO
- depends on I2C && VIDEO_DEV
- select VIDEO_V4L2_SUBDEV_API
- select MEDIA_CONTROLLER
- select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the Sony
IMX335 camera.
@@ -217,9 +173,6 @@ config VIDEO_IMX335
config VIDEO_IMX355
tristate "Sony IMX355 sensor support"
- depends on I2C && VIDEO_DEV
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
help
This is a Video4Linux2 sensor driver for the Sony
IMX355 camera.
@@ -230,10 +183,6 @@ config VIDEO_IMX355
config VIDEO_IMX412
tristate "Sony IMX412 sensor support"
depends on OF_GPIO
- depends on I2C && VIDEO_DEV
- select VIDEO_V4L2_SUBDEV_API
- select MEDIA_CONTROLLER
- select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the Sony
IMX412 camera.
@@ -244,10 +193,6 @@ config VIDEO_IMX412
config VIDEO_IMX415
tristate "Sony IMX415 sensor support"
depends on OF_GPIO
- depends on I2C && VIDEO_DEV
- select VIDEO_V4L2_SUBDEV_API
- select MEDIA_CONTROLLER
- select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the Sony
IMX415 camera.
@@ -260,35 +205,25 @@ config VIDEO_MAX9271_LIB
config VIDEO_MT9M001
tristate "mt9m001 support"
- depends on I2C && VIDEO_DEV
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
help
This driver supports MT9M001 cameras from Micron, monochrome
and colour models.
config VIDEO_MT9M111
tristate "mt9m111, mt9m112 and mt9m131 support"
- depends on I2C && VIDEO_DEV
- select V4L2_FWNODE
help
This driver supports MT9M111, MT9M112 and MT9M131 cameras from
Micron/Aptina
config VIDEO_MT9P031
tristate "Aptina MT9P031 support"
- depends on I2C && VIDEO_DEV
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
select VIDEO_APTINA_PLL
- select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the Aptina
(Micron) mt9p031 5 Mpixel camera.
config VIDEO_MT9T112
tristate "Aptina MT9T111/MT9T112 support"
- depends on I2C && VIDEO_DEV
help
This is a Video4Linux2 sensor driver for the Aptina
(Micron) MT9T111 and MT9T112 3 Mpixel camera.
@@ -298,7 +233,6 @@ config VIDEO_MT9T112
config VIDEO_MT9V011
tristate "Micron mt9v011 sensor support"
- depends on I2C && VIDEO_DEV
help
This is a Video4Linux2 sensor driver for the Micron
mt0v011 1.3 Mpixel camera. It currently only works with the
@@ -306,18 +240,13 @@ config VIDEO_MT9V011
config VIDEO_MT9V032
tristate "Micron MT9V032 sensor support"
- depends on I2C && VIDEO_DEV
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
select REGMAP_I2C
- select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the Micron
MT9V032 752x480 CMOS sensor.
config VIDEO_MT9V111
tristate "Aptina MT9V111 sensor support"
- depends on I2C && VIDEO_DEV
help
This is a Video4Linux2 sensor driver for the Aptina/Micron
MT9V111 sensor.
@@ -327,10 +256,6 @@ config VIDEO_MT9V111
config VIDEO_OG01A1B
tristate "OmniVision OG01A1B sensor support"
- depends on I2C && VIDEO_DEV
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
OG01A1B camera.
@@ -340,10 +265,6 @@ config VIDEO_OG01A1B
config VIDEO_OV01A10
tristate "OmniVision OV01A10 sensor support"
- depends on VIDEO_DEV && I2C
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
OV01A10 camera.
@@ -353,10 +274,6 @@ config VIDEO_OV01A10
config VIDEO_OV02A10
tristate "OmniVision OV02A10 sensor support"
- depends on VIDEO_DEV && I2C
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
OV02A10 camera.
@@ -366,10 +283,6 @@ config VIDEO_OV02A10
config VIDEO_OV08D10
tristate "OmniVision OV08D10 sensor support"
- depends on I2C && VIDEO_DEV
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
OV08D10 camera sensor.
@@ -379,10 +292,6 @@ config VIDEO_OV08D10
config VIDEO_OV08X40
tristate "OmniVision OV08X40 sensor support"
- depends on VIDEO_DEV && I2C
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
OV08X40 camera.
@@ -392,28 +301,18 @@ config VIDEO_OV08X40
config VIDEO_OV13858
tristate "OmniVision OV13858 sensor support"
- depends on I2C && VIDEO_DEV
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
OV13858 camera.
config VIDEO_OV13B10
tristate "OmniVision OV13B10 sensor support"
- depends on I2C && VIDEO_DEV
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
OV13B10 camera.
config VIDEO_OV2640
tristate "OmniVision OV2640 sensor support"
- depends on VIDEO_DEV && I2C
- select V4L2_ASYNC
help
This is a Video4Linux2 sensor driver for the OmniVision
OV2640 camera.
@@ -423,8 +322,7 @@ config VIDEO_OV2640
config VIDEO_OV2659
tristate "OmniVision OV2659 sensor support"
- depends on VIDEO_DEV && I2C && GPIOLIB
- select V4L2_FWNODE
+ depends on GPIOLIB
help
This is a Video4Linux2 sensor driver for the OmniVision
OV2659 camera.
@@ -434,9 +332,7 @@ config VIDEO_OV2659
config VIDEO_OV2680
tristate "OmniVision OV2680 sensor support"
- depends on VIDEO_DEV && I2C
- select MEDIA_CONTROLLER
- select V4L2_FWNODE
+ select V4L2_CCI_I2C
help
This is a Video4Linux2 sensor driver for the OmniVision
OV2680 camera.
@@ -446,10 +342,6 @@ config VIDEO_OV2680
config VIDEO_OV2685
tristate "OmniVision OV2685 sensor support"
- depends on VIDEO_DEV && I2C
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
OV2685 camera.
@@ -459,11 +351,7 @@ config VIDEO_OV2685
config VIDEO_OV2740
tristate "OmniVision OV2740 sensor support"
- depends on VIDEO_DEV && I2C
depends on ACPI || COMPILE_TEST
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select V4L2_FWNODE
select REGMAP_I2C
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -474,10 +362,7 @@ config VIDEO_OV2740
config VIDEO_OV4689
tristate "OmniVision OV4689 sensor support"
- depends on GPIOLIB && VIDEO_DEV && I2C
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select V4L2_FWNODE
+ depends on GPIOLIB
help
This is a Video4Linux2 sensor-level driver for the OmniVision
OV4689 camera.
@@ -488,10 +373,7 @@ config VIDEO_OV4689
config VIDEO_OV5640
tristate "OmniVision OV5640 sensor support"
depends on OF
- depends on GPIOLIB && VIDEO_DEV && I2C
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select V4L2_FWNODE
+ depends on GPIOLIB
help
This is a Video4Linux2 sensor driver for the Omnivision
OV5640 camera sensor with a MIPI CSI-2 interface.
@@ -499,10 +381,6 @@ config VIDEO_OV5640
config VIDEO_OV5645
tristate "OmniVision OV5645 sensor support"
depends on OF
- depends on I2C && VIDEO_DEV
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
OV5645 camera.
@@ -512,10 +390,6 @@ config VIDEO_OV5645
config VIDEO_OV5647
tristate "OmniVision OV5647 sensor support"
- depends on I2C && VIDEO_DEV
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
OV5647 camera.
@@ -525,10 +399,7 @@ config VIDEO_OV5647
config VIDEO_OV5648
tristate "OmniVision OV5648 sensor support"
- depends on I2C && PM && VIDEO_DEV
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select V4L2_FWNODE
+ depends on PM
help
This is a Video4Linux2 sensor driver for the OmniVision
OV5648 camera.
@@ -538,10 +409,6 @@ config VIDEO_OV5648
config VIDEO_OV5670
tristate "OmniVision OV5670 sensor support"
- depends on I2C && VIDEO_DEV
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
OV5670 camera.
@@ -551,10 +418,6 @@ config VIDEO_OV5670
config VIDEO_OV5675
tristate "OmniVision OV5675 sensor support"
- depends on I2C && VIDEO_DEV
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
OV5675 camera.
@@ -564,8 +427,7 @@ config VIDEO_OV5675
config VIDEO_OV5693
tristate "OmniVision OV5693 sensor support"
- depends on I2C && VIDEO_DEV
- select V4L2_FWNODE
+ select V4L2_CCI_I2C
help
This is a Video4Linux2 sensor driver for the OmniVision
OV5693 camera.
@@ -575,8 +437,6 @@ config VIDEO_OV5693
config VIDEO_OV5695
tristate "OmniVision OV5695 sensor support"
- depends on I2C && VIDEO_DEV
- select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
OV5695 camera.
@@ -586,7 +446,6 @@ config VIDEO_OV5695
config VIDEO_OV6650
tristate "OmniVision OV6650 sensor support"
- depends on I2C && VIDEO_DEV
help
This is a Video4Linux2 sensor driver for the OmniVision
OV6650 camera.
@@ -596,10 +455,6 @@ config VIDEO_OV6650
config VIDEO_OV7251
tristate "OmniVision OV7251 sensor support"
- depends on I2C && VIDEO_DEV
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
OV7251 camera.
@@ -609,7 +464,6 @@ config VIDEO_OV7251
config VIDEO_OV7640
tristate "OmniVision OV7640 sensor support"
- depends on I2C && VIDEO_DEV
help
This is a Video4Linux2 sensor driver for the OmniVision
OV7640 camera.
@@ -619,8 +473,6 @@ config VIDEO_OV7640
config VIDEO_OV7670
tristate "OmniVision OV7670 sensor support"
- depends on I2C && VIDEO_DEV
- select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
OV7670 VGA camera. It currently only works with the M88ALP01
@@ -628,9 +480,7 @@ config VIDEO_OV7670
config VIDEO_OV772X
tristate "OmniVision OV772x sensor support"
- depends on I2C && VIDEO_DEV
select REGMAP_SCCB
- select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
OV772x camera.
@@ -640,7 +490,6 @@ config VIDEO_OV772X
config VIDEO_OV7740
tristate "OmniVision OV7740 sensor support"
- depends on I2C && VIDEO_DEV
select REGMAP_SCCB
help
This is a Video4Linux2 sensor driver for the OmniVision
@@ -648,10 +497,6 @@ config VIDEO_OV7740
config VIDEO_OV8856
tristate "OmniVision OV8856 sensor support"
- depends on I2C && VIDEO_DEV
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
OV8856 camera sensor.
@@ -661,10 +506,7 @@ config VIDEO_OV8856
config VIDEO_OV8858
tristate "OmniVision OV8858 sensor support"
- depends on I2C && PM && VIDEO_DEV
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select V4L2_FWNODE
+ depends on PM
help
This is a Video4Linux2 sensor driver for OmniVision
OV8858 camera sensor.
@@ -674,10 +516,7 @@ config VIDEO_OV8858
config VIDEO_OV8865
tristate "OmniVision OV8865 sensor support"
- depends on I2C && PM && VIDEO_DEV
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select V4L2_FWNODE
+ depends on PM
help
This is a Video4Linux2 sensor driver for OmniVision
OV8865 camera sensor.
@@ -688,10 +527,6 @@ config VIDEO_OV8865
config VIDEO_OV9282
tristate "OmniVision OV9282 sensor support"
depends on OF_GPIO
- depends on I2C && VIDEO_DEV
- select VIDEO_V4L2_SUBDEV_API
- select MEDIA_CONTROLLER
- select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
OV9282 camera sensor.
@@ -701,16 +536,12 @@ config VIDEO_OV9282
config VIDEO_OV9640
tristate "OmniVision OV9640 sensor support"
- depends on I2C && VIDEO_DEV
help
This is a Video4Linux2 sensor driver for the OmniVision
OV9640 camera sensor.
config VIDEO_OV9650
tristate "OmniVision OV9650/OV9652 sensor support"
- depends on I2C && VIDEO_DEV
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
select REGMAP_SCCB
help
This is a V4L2 sensor driver for the Omnivision
@@ -718,11 +549,7 @@ config VIDEO_OV9650
config VIDEO_OV9734
tristate "OmniVision OV9734 sensor support"
- depends on VIDEO_DEV && I2C
depends on ACPI || COMPILE_TEST
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the OmniVision
OV9734 camera.
@@ -732,10 +559,6 @@ config VIDEO_OV9734
config VIDEO_RDACM20
tristate "IMI RDACM20 camera support"
- depends on I2C
- select V4L2_FWNODE
- select VIDEO_V4L2_SUBDEV_API
- select MEDIA_CONTROLLER
select VIDEO_MAX9271_LIB
help
This driver supports the IMI RDACM20 GMSL camera, used in
@@ -746,10 +569,6 @@ config VIDEO_RDACM20
config VIDEO_RDACM21
tristate "IMI RDACM21 camera support"
- depends on I2C
- select V4L2_FWNODE
- select VIDEO_V4L2_SUBDEV_API
- select MEDIA_CONTROLLER
select VIDEO_MAX9271_LIB
help
This driver supports the IMI RDACM21 GMSL camera, used in
@@ -760,7 +579,6 @@ config VIDEO_RDACM21
config VIDEO_RJ54N1
tristate "Sharp RJ54N1CB0C sensor support"
- depends on I2C && VIDEO_DEV
help
This is a V4L2 sensor driver for Sharp RJ54N1CB0C CMOS image
sensor.
@@ -770,39 +588,26 @@ config VIDEO_RJ54N1
config VIDEO_S5C73M3
tristate "Samsung S5C73M3 sensor support"
- depends on I2C && SPI && VIDEO_DEV
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select V4L2_FWNODE
+ depends on SPI
help
This is a V4L2 sensor driver for Samsung S5C73M3
8 Mpixel camera.
config VIDEO_S5K5BAF
tristate "Samsung S5K5BAF sensor support"
- depends on I2C && VIDEO_DEV
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select V4L2_FWNODE
help
This is a V4L2 sensor driver for Samsung S5K5BAF 2M
camera sensor with an embedded SoC image signal processor.
config VIDEO_S5K6A3
tristate "Samsung S5K6A3 sensor support"
- depends on I2C && VIDEO_DEV
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
help
This is a V4L2 sensor driver for Samsung S5K6A3 raw
camera sensor.
config VIDEO_ST_VGXY61
tristate "ST VGXY61 sensor support"
- depends on OF && GPIOLIB && VIDEO_DEV && I2C
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select V4L2_FWNODE
+ depends on OF && GPIOLIB
help
This is a Video4Linux2 sensor driver for the ST VGXY61
camera sensor.
@@ -810,7 +615,7 @@ config VIDEO_ST_VGXY61
source "drivers/media/i2c/ccs/Kconfig"
source "drivers/media/i2c/et8ek8/Kconfig"
-endmenu
+endif
menu "Lens drivers"
visible if MEDIA_CAMERA_SUPPORT
@@ -848,6 +653,18 @@ config VIDEO_DW9714
capability. This is designed for linear control of
voice coil motors, controlled via I2C serial interface.
+config VIDEO_DW9719
+ tristate "DW9719 lens voice coil support"
+ depends on I2C && VIDEO_DEV
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select V4L2_ASYNC
+ select V4L2_CCI_I2C
+ help
+ This is a driver for the DW9719 camera lens voice coil.
+ This is designed for linear control of voice coil motors,
+ controlled via I2C serial interface.
+
config VIDEO_DW9768
tristate "DW9768 lens voice coil support"
depends on I2C && VIDEO_DEV
@@ -1625,4 +1442,51 @@ config VIDEO_THS7303
endmenu
+#
+# Video serializers and deserializers (e.g. FPD-Link)
+#
+
+menu "Video serializers and deserializers"
+
+config VIDEO_DS90UB913
+ tristate "TI DS90UB913 FPD-Link III Serializer"
+ depends on OF && I2C && VIDEO_DEV && COMMON_CLK
+ select I2C_ATR
+ select MEDIA_CONTROLLER
+ select GPIOLIB
+ select REGMAP_I2C
+ select V4L2_FWNODE
+ select VIDEO_V4L2_SUBDEV_API
+ help
+ Device driver for the Texas Instruments DS90UB913
+ FPD-Link III Serializer.
+
+config VIDEO_DS90UB953
+ tristate "TI FPD-Link III/IV CSI-2 Serializers"
+ depends on OF && I2C && VIDEO_DEV && COMMON_CLK
+ select I2C_ATR
+ select MEDIA_CONTROLLER
+ select GPIOLIB
+ select REGMAP_I2C
+ select V4L2_FWNODE
+ select VIDEO_V4L2_SUBDEV_API
+ help
+ Device driver for the Texas Instruments DS90UB953
+ FPD-Link III Serializer and DS90UB971 FPD-Link IV Serializer.
+
+config VIDEO_DS90UB960
+ tristate "TI FPD-Link III/IV Deserializers"
+ depends on OF && I2C && VIDEO_DEV && COMMON_CLK
+ select I2C_ATR
+ select MEDIA_CONTROLLER
+ select GPIOLIB
+ select REGMAP_I2C
+ select V4L2_FWNODE
+ select VIDEO_V4L2_SUBDEV_API
+ help
+ Device driver for the Texas Instruments DS90UB960
+ FPD-Link III Deserializer and DS90UB9702 FPD-Link IV Deserializer.
+
+endmenu
+
endif # VIDEO_DEV
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index c743aeb5d1ad..80b00d39b48f 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -28,7 +28,11 @@ obj-$(CONFIG_VIDEO_CS3308) += cs3308.o
obj-$(CONFIG_VIDEO_CS5345) += cs5345.o
obj-$(CONFIG_VIDEO_CS53L32A) += cs53l32a.o
obj-$(CONFIG_VIDEO_CX25840) += cx25840/
+obj-$(CONFIG_VIDEO_DS90UB913) += ds90ub913.o
+obj-$(CONFIG_VIDEO_DS90UB953) += ds90ub953.o
+obj-$(CONFIG_VIDEO_DS90UB960) += ds90ub960.o
obj-$(CONFIG_VIDEO_DW9714) += dw9714.o
+obj-$(CONFIG_VIDEO_DW9719) += dw9719.o
obj-$(CONFIG_VIDEO_DW9768) += dw9768.o
obj-$(CONFIG_VIDEO_DW9807_VCM) += dw9807-vcm.o
obj-$(CONFIG_VIDEO_ET8EK8) += et8ek8/
diff --git a/drivers/media/i2c/ad5820.c b/drivers/media/i2c/ad5820.c
index 5f605b9be3b1..1543d24f522c 100644
--- a/drivers/media/i2c/ad5820.c
+++ b/drivers/media/i2c/ad5820.c
@@ -349,7 +349,6 @@ static void ad5820_remove(struct i2c_client *client)
static const struct i2c_device_id ad5820_id_table[] = {
{ "ad5820", 0 },
{ "ad5821", 0 },
- { "ad5823", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ad5820_id_table);
@@ -357,7 +356,6 @@ MODULE_DEVICE_TABLE(i2c, ad5820_id_table);
static const struct of_device_id ad5820_of_table[] = {
{ .compatible = "adi,ad5820" },
{ .compatible = "adi,ad5821" },
- { .compatible = "adi,ad5823" },
{ }
};
MODULE_DEVICE_TABLE(of, ad5820_of_table);
diff --git a/drivers/media/i2c/adv748x/adv748x-csi2.c b/drivers/media/i2c/adv748x/adv748x-csi2.c
index bd4f3fe0e309..a5a7cb228896 100644
--- a/drivers/media/i2c/adv748x/adv748x-csi2.c
+++ b/drivers/media/i2c/adv748x/adv748x-csi2.c
@@ -300,9 +300,6 @@ int adv748x_csi2_init(struct adv748x_state *state, struct adv748x_csi2 *tx)
MEDIA_ENT_F_VID_IF_BRIDGE,
is_txa(tx) ? "txa" : "txb");
- /* Ensure that matching is based upon the endpoint fwnodes */
- tx->sd.fwnode = of_fwnode_handle(state->endpoints[tx->port]);
-
/* Register internal ops for incremental subdev registration */
tx->sd.internal_ops = &adv748x_csi2_internal_ops;
@@ -314,10 +311,15 @@ int adv748x_csi2_init(struct adv748x_state *state, struct adv748x_csi2 *tx)
if (ret)
return ret;
- ret = adv748x_csi2_init_controls(tx);
+ ret = v4l2_async_subdev_endpoint_add(&tx->sd,
+ of_fwnode_handle(state->endpoints[tx->port]));
if (ret)
goto err_free_media;
+ ret = adv748x_csi2_init_controls(tx);
+ if (ret)
+ goto err_cleanup_subdev;
+
ret = v4l2_async_register_subdev(&tx->sd);
if (ret)
goto err_free_ctrl;
@@ -326,6 +328,8 @@ int adv748x_csi2_init(struct adv748x_state *state, struct adv748x_csi2 *tx)
err_free_ctrl:
v4l2_ctrl_handler_free(&tx->ctrl_hdl);
+err_cleanup_subdev:
+ v4l2_subdev_cleanup(&tx->sd);
err_free_media:
media_entity_cleanup(&tx->sd.entity);
@@ -340,4 +344,5 @@ void adv748x_csi2_cleanup(struct adv748x_csi2 *tx)
v4l2_async_unregister_subdev(&tx->sd);
media_entity_cleanup(&tx->sd.entity);
v4l2_ctrl_handler_free(&tx->ctrl_hdl);
+ v4l2_subdev_cleanup(&tx->sd);
}
diff --git a/drivers/media/i2c/ccs-pll.c b/drivers/media/i2c/ccs-pll.c
index fcc39360cc50..cf8858cb13d4 100644
--- a/drivers/media/i2c/ccs-pll.c
+++ b/drivers/media/i2c/ccs-pll.c
@@ -296,7 +296,7 @@ __ccs_pll_calculate_vt_tree(struct device *dev,
struct ccs_pll_branch_fr *pll_fr = &pll->vt_fr;
struct ccs_pll_branch_bk *pll_bk = &pll->vt_bk;
u32 more_mul;
- u16 best_pix_div = SHRT_MAX >> 1, best_div;
+ u16 best_pix_div = SHRT_MAX >> 1, best_div = lim_bk->max_sys_clk_div;
u16 vt_div, min_sys_div, max_sys_div, sys_div;
pll_fr->pll_ip_clk_freq_hz =
diff --git a/drivers/media/i2c/ccs/Kconfig b/drivers/media/i2c/ccs/Kconfig
index 71671db3d993..b55c93a2e204 100644
--- a/drivers/media/i2c/ccs/Kconfig
+++ b/drivers/media/i2c/ccs/Kconfig
@@ -1,11 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_CCS
tristate "MIPI CCS/SMIA++/SMIA sensor support"
- depends on I2C && VIDEO_DEV && HAVE_CLK
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
+ depends on HAVE_CLK
select VIDEO_CCS_PLL
- select V4L2_FWNODE
help
This is a generic driver for MIPI CCS, SMIA++ and SMIA compliant
camera sensors.
diff --git a/drivers/media/i2c/ccs/ccs-data.c b/drivers/media/i2c/ccs/ccs-data.c
index 45f2b2f55ec5..08400edf77ce 100644
--- a/drivers/media/i2c/ccs/ccs-data.c
+++ b/drivers/media/i2c/ccs/ccs-data.c
@@ -464,8 +464,7 @@ static int ccs_data_parse_rules(struct bin_container *bin,
rule_payload = __rule_type + 1;
rule_plen2 = rule_plen - sizeof(*__rule_type);
- switch (*__rule_type) {
- case CCS_DATA_BLOCK_RULE_ID_IF: {
+ if (*__rule_type == CCS_DATA_BLOCK_RULE_ID_IF) {
const struct __ccs_data_block_rule_if *__if_rules =
rule_payload;
const size_t __num_if_rules =
@@ -514,49 +513,61 @@ static int ccs_data_parse_rules(struct bin_container *bin,
rules->if_rules = if_rule;
rules->num_if_rules = __num_if_rules;
}
- break;
- }
- case CCS_DATA_BLOCK_RULE_ID_READ_ONLY_REGS:
- rval = ccs_data_parse_reg_rules(bin, &rules->read_only_regs,
- &rules->num_read_only_regs,
- rule_payload,
- rule_payload + rule_plen2,
- dev);
- if (rval)
- return rval;
- break;
- case CCS_DATA_BLOCK_RULE_ID_FFD:
- rval = ccs_data_parse_ffd(bin, &rules->frame_format,
- rule_payload,
- rule_payload + rule_plen2,
- dev);
- if (rval)
- return rval;
- break;
- case CCS_DATA_BLOCK_RULE_ID_MSR:
- rval = ccs_data_parse_reg_rules(bin,
- &rules->manufacturer_regs,
- &rules->num_manufacturer_regs,
- rule_payload,
- rule_payload + rule_plen2,
- dev);
- if (rval)
- return rval;
- break;
- case CCS_DATA_BLOCK_RULE_ID_PDAF_READOUT:
- rval = ccs_data_parse_pdaf_readout(bin,
- &rules->pdaf_readout,
- rule_payload,
- rule_payload + rule_plen2,
- dev);
- if (rval)
- return rval;
- break;
- default:
- dev_dbg(dev,
- "Don't know how to handle rule type %u!\n",
- *__rule_type);
- return -EINVAL;
+ } else {
+ /* Check there was an if rule before any other rules */
+ if (bin->base && !rules)
+ return -EINVAL;
+
+ switch (*__rule_type) {
+ case CCS_DATA_BLOCK_RULE_ID_READ_ONLY_REGS:
+ rval = ccs_data_parse_reg_rules(bin,
+ rules ?
+ &rules->read_only_regs : NULL,
+ rules ?
+ &rules->num_read_only_regs : NULL,
+ rule_payload,
+ rule_payload + rule_plen2,
+ dev);
+ if (rval)
+ return rval;
+ break;
+ case CCS_DATA_BLOCK_RULE_ID_FFD:
+ rval = ccs_data_parse_ffd(bin, rules ?
+ &rules->frame_format : NULL,
+ rule_payload,
+ rule_payload + rule_plen2,
+ dev);
+ if (rval)
+ return rval;
+ break;
+ case CCS_DATA_BLOCK_RULE_ID_MSR:
+ rval = ccs_data_parse_reg_rules(bin,
+ rules ?
+ &rules->manufacturer_regs : NULL,
+ rules ?
+ &rules->num_manufacturer_regs : NULL,
+ rule_payload,
+ rule_payload + rule_plen2,
+ dev);
+ if (rval)
+ return rval;
+ break;
+ case CCS_DATA_BLOCK_RULE_ID_PDAF_READOUT:
+ rval = ccs_data_parse_pdaf_readout(bin,
+ rules ?
+ &rules->pdaf_readout : NULL,
+ rule_payload,
+ rule_payload + rule_plen2,
+ dev);
+ if (rval)
+ return rval;
+ break;
+ default:
+ dev_dbg(dev,
+ "Don't know how to handle rule type %u!\n",
+ *__rule_type);
+ return -EINVAL;
+ }
}
__next_rule = __next_rule + rule_hlen + rule_plen;
}
diff --git a/drivers/media/i2c/ds90ub913.c b/drivers/media/i2c/ds90ub913.c
new file mode 100644
index 000000000000..4bfa3b3cf619
--- /dev/null
+++ b/drivers/media/i2c/ds90ub913.c
@@ -0,0 +1,903 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for the Texas Instruments DS90UB913 video serializer
+ *
+ * Based on a driver from Luca Ceresoli <luca@lucaceresoli.net>
+ *
+ * Copyright (c) 2019 Luca Ceresoli <luca@lucaceresoli.net>
+ * Copyright (c) 2023 Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/fwnode.h>
+#include <linux/gpio/driver.h>
+#include <linux/i2c-atr.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+
+#include <media/i2c/ds90ub9xx.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-mediabus.h>
+#include <media/v4l2-subdev.h>
+
+#define UB913_PAD_SINK 0
+#define UB913_PAD_SOURCE 1
+
+/*
+ * UB913 has 4 gpios, but gpios 3 and 4 are reserved for external oscillator
+ * mode. Thus we only support 2 gpios for now.
+ */
+#define UB913_NUM_GPIOS 2
+
+#define UB913_REG_RESET_CTL 0x01
+#define UB913_REG_RESET_CTL_DIGITAL_RESET_1 BIT(1)
+#define UB913_REG_RESET_CTL_DIGITAL_RESET_0 BIT(0)
+
+#define UB913_REG_GENERAL_CFG 0x03
+#define UB913_REG_GENERAL_CFG_CRC_ERR_RESET BIT(5)
+#define UB913_REG_GENERAL_CFG_PCLK_RISING BIT(0)
+
+#define UB913_REG_MODE_SEL 0x05
+#define UB913_REG_MODE_SEL_MODE_OVERRIDE BIT(5)
+#define UB913_REG_MODE_SEL_MODE_UP_TO_DATE BIT(4)
+#define UB913_REG_MODE_SEL_MODE_MASK GENMASK(3, 0)
+
+#define UB913_REG_CRC_ERRORS_LSB 0x0a
+#define UB913_REG_CRC_ERRORS_MSB 0x0b
+
+#define UB913_REG_GENERAL_STATUS 0x0c
+
+#define UB913_REG_GPIO_CFG(n) (0x0d + (n))
+#define UB913_REG_GPIO_CFG_ENABLE(n) BIT(0 + (n) * 4)
+#define UB913_REG_GPIO_CFG_DIR_INPUT(n) BIT(1 + (n) * 4)
+#define UB913_REG_GPIO_CFG_REMOTE_EN(n) BIT(2 + (n) * 4)
+#define UB913_REG_GPIO_CFG_OUT_VAL(n) BIT(3 + (n) * 4)
+#define UB913_REG_GPIO_CFG_MASK(n) (0xf << ((n) * 4))
+
+#define UB913_REG_SCL_HIGH_TIME 0x11
+#define UB913_REG_SCL_LOW_TIME 0x12
+
+#define UB913_REG_PLL_OVR 0x35
+
+struct ub913_data {
+ struct i2c_client *client;
+ struct regmap *regmap;
+ struct clk *clkin;
+
+ struct gpio_chip gpio_chip;
+
+ struct v4l2_subdev sd;
+ struct media_pad pads[2];
+
+ struct v4l2_async_notifier notifier;
+
+ struct v4l2_subdev *source_sd;
+ u16 source_sd_pad;
+
+ u64 enabled_source_streams;
+
+ struct clk_hw *clkout_clk_hw;
+
+ struct ds90ub9xx_platform_data *plat_data;
+
+ bool pclk_polarity_rising;
+};
+
+static inline struct ub913_data *sd_to_ub913(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct ub913_data, sd);
+}
+
+struct ub913_format_info {
+ u32 incode;
+ u32 outcode;
+};
+
+static const struct ub913_format_info ub913_formats[] = {
+ /* Only RAW10 with 8-bit payload is supported at the moment */
+ { .incode = MEDIA_BUS_FMT_YUYV8_2X8, .outcode = MEDIA_BUS_FMT_YUYV8_1X16 },
+ { .incode = MEDIA_BUS_FMT_UYVY8_2X8, .outcode = MEDIA_BUS_FMT_UYVY8_1X16 },
+ { .incode = MEDIA_BUS_FMT_VYUY8_2X8, .outcode = MEDIA_BUS_FMT_VYUY8_1X16 },
+ { .incode = MEDIA_BUS_FMT_YVYU8_2X8, .outcode = MEDIA_BUS_FMT_YVYU8_1X16 },
+};
+
+static const struct ub913_format_info *ub913_find_format(u32 incode)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(ub913_formats); i++) {
+ if (ub913_formats[i].incode == incode)
+ return &ub913_formats[i];
+ }
+
+ return NULL;
+}
+
+static int ub913_read(const struct ub913_data *priv, u8 reg, u8 *val)
+{
+ unsigned int v;
+ int ret;
+
+ ret = regmap_read(priv->regmap, reg, &v);
+ if (ret < 0) {
+ dev_err(&priv->client->dev,
+ "Cannot read register 0x%02x: %d!\n", reg, ret);
+ return ret;
+ }
+
+ *val = v;
+ return 0;
+}
+
+static int ub913_write(const struct ub913_data *priv, u8 reg, u8 val)
+{
+ int ret;
+
+ ret = regmap_write(priv->regmap, reg, val);
+ if (ret < 0)
+ dev_err(&priv->client->dev,
+ "Cannot write register 0x%02x: %d!\n", reg, ret);
+
+ return ret;
+}
+
+/*
+ * GPIO chip
+ */
+static int ub913_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ return GPIO_LINE_DIRECTION_OUT;
+}
+
+static int ub913_gpio_direction_out(struct gpio_chip *gc, unsigned int offset,
+ int value)
+{
+ struct ub913_data *priv = gpiochip_get_data(gc);
+ unsigned int reg_idx = offset / 2;
+ unsigned int field_idx = offset % 2;
+
+ return regmap_update_bits(priv->regmap, UB913_REG_GPIO_CFG(reg_idx),
+ UB913_REG_GPIO_CFG_MASK(field_idx),
+ UB913_REG_GPIO_CFG_ENABLE(field_idx) |
+ (value ? UB913_REG_GPIO_CFG_OUT_VAL(field_idx) :
+ 0));
+}
+
+static void ub913_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
+{
+ ub913_gpio_direction_out(gc, offset, value);
+}
+
+static int ub913_gpio_of_xlate(struct gpio_chip *gc,
+ const struct of_phandle_args *gpiospec,
+ u32 *flags)
+{
+ if (flags)
+ *flags = gpiospec->args[1];
+
+ return gpiospec->args[0];
+}
+
+static int ub913_gpiochip_probe(struct ub913_data *priv)
+{
+ struct device *dev = &priv->client->dev;
+ struct gpio_chip *gc = &priv->gpio_chip;
+ int ret;
+
+ /* Initialize GPIOs 0 and 1 to local control, tri-state */
+ ub913_write(priv, UB913_REG_GPIO_CFG(0), 0);
+
+ gc->label = dev_name(dev);
+ gc->parent = dev;
+ gc->owner = THIS_MODULE;
+ gc->base = -1;
+ gc->can_sleep = true;
+ gc->ngpio = UB913_NUM_GPIOS;
+ gc->get_direction = ub913_gpio_get_direction;
+ gc->direction_output = ub913_gpio_direction_out;
+ gc->set = ub913_gpio_set;
+ gc->of_xlate = ub913_gpio_of_xlate;
+ gc->of_gpio_n_cells = 2;
+
+ ret = gpiochip_add_data(gc, priv);
+ if (ret) {
+ dev_err(dev, "Failed to add GPIOs: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ub913_gpiochip_remove(struct ub913_data *priv)
+{
+ gpiochip_remove(&priv->gpio_chip);
+}
+
+static const struct regmap_config ub913_regmap_config = {
+ .name = "ds90ub913",
+ .reg_bits = 8,
+ .val_bits = 8,
+ .reg_format_endian = REGMAP_ENDIAN_DEFAULT,
+ .val_format_endian = REGMAP_ENDIAN_DEFAULT,
+};
+
+/*
+ * V4L2
+ */
+
+static int ub913_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
+{
+ struct ub913_data *priv = sd_to_ub913(sd);
+ u64 sink_streams;
+ int ret;
+
+ sink_streams = v4l2_subdev_state_xlate_streams(state, UB913_PAD_SOURCE,
+ UB913_PAD_SINK,
+ &streams_mask);
+
+ ret = v4l2_subdev_enable_streams(priv->source_sd, priv->source_sd_pad,
+ sink_streams);
+ if (ret)
+ return ret;
+
+ priv->enabled_source_streams |= streams_mask;
+
+ return 0;
+}
+
+static int ub913_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
+{
+ struct ub913_data *priv = sd_to_ub913(sd);
+ u64 sink_streams;
+ int ret;
+
+ sink_streams = v4l2_subdev_state_xlate_streams(state, UB913_PAD_SOURCE,
+ UB913_PAD_SINK,
+ &streams_mask);
+
+ ret = v4l2_subdev_disable_streams(priv->source_sd, priv->source_sd_pad,
+ sink_streams);
+ if (ret)
+ return ret;
+
+ priv->enabled_source_streams &= ~streams_mask;
+
+ return 0;
+}
+
+static int _ub913_set_routing(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_krouting *routing)
+{
+ static const struct v4l2_mbus_framefmt in_format = {
+ .width = 640,
+ .height = 480,
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .field = V4L2_FIELD_NONE,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .ycbcr_enc = V4L2_YCBCR_ENC_601,
+ .quantization = V4L2_QUANTIZATION_LIM_RANGE,
+ .xfer_func = V4L2_XFER_FUNC_SRGB,
+ };
+ static const struct v4l2_mbus_framefmt out_format = {
+ .width = 640,
+ .height = 480,
+ .code = MEDIA_BUS_FMT_UYVY8_1X16,
+ .field = V4L2_FIELD_NONE,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .ycbcr_enc = V4L2_YCBCR_ENC_601,
+ .quantization = V4L2_QUANTIZATION_LIM_RANGE,
+ .xfer_func = V4L2_XFER_FUNC_SRGB,
+ };
+ struct v4l2_subdev_stream_configs *stream_configs;
+ unsigned int i;
+ int ret;
+
+ /*
+ * Note: we can only support up to V4L2_FRAME_DESC_ENTRY_MAX, until
+ * frame desc is made dynamically allocated.
+ */
+
+ if (routing->num_routes > V4L2_FRAME_DESC_ENTRY_MAX)
+ return -EINVAL;
+
+ ret = v4l2_subdev_routing_validate(sd, routing,
+ V4L2_SUBDEV_ROUTING_ONLY_1_TO_1);
+ if (ret)
+ return ret;
+
+ ret = v4l2_subdev_set_routing(sd, state, routing);
+ if (ret)
+ return ret;
+
+ stream_configs = &state->stream_configs;
+
+ for (i = 0; i < stream_configs->num_configs; i++) {
+ if (stream_configs->configs[i].pad == UB913_PAD_SINK)
+ stream_configs->configs[i].fmt = in_format;
+ else
+ stream_configs->configs[i].fmt = out_format;
+ }
+
+ return 0;
+}
+
+static int ub913_set_routing(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ enum v4l2_subdev_format_whence which,
+ struct v4l2_subdev_krouting *routing)
+{
+ struct ub913_data *priv = sd_to_ub913(sd);
+
+ if (which == V4L2_SUBDEV_FORMAT_ACTIVE && priv->enabled_source_streams)
+ return -EBUSY;
+
+ return _ub913_set_routing(sd, state, routing);
+}
+
+static int ub913_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_mbus_frame_desc *fd)
+{
+ struct ub913_data *priv = sd_to_ub913(sd);
+ const struct v4l2_subdev_krouting *routing;
+ struct v4l2_mbus_frame_desc source_fd;
+ struct v4l2_subdev_route *route;
+ struct v4l2_subdev_state *state;
+ int ret;
+
+ if (pad != UB913_PAD_SOURCE)
+ return -EINVAL;
+
+ ret = v4l2_subdev_call(priv->source_sd, pad, get_frame_desc,
+ priv->source_sd_pad, &source_fd);
+ if (ret)
+ return ret;
+
+ memset(fd, 0, sizeof(*fd));
+
+ fd->type = V4L2_MBUS_FRAME_DESC_TYPE_PARALLEL;
+
+ state = v4l2_subdev_lock_and_get_active_state(sd);
+
+ routing = &state->routing;
+
+ for_each_active_route(routing, route) {
+ unsigned int i;
+
+ if (route->source_pad != pad)
+ continue;
+
+ for (i = 0; i < source_fd.num_entries; i++) {
+ if (source_fd.entry[i].stream == route->sink_stream)
+ break;
+ }
+
+ if (i == source_fd.num_entries) {
+ dev_err(&priv->client->dev,
+ "Failed to find stream from source frame desc\n");
+ ret = -EPIPE;
+ goto out_unlock;
+ }
+
+ fd->entry[fd->num_entries].stream = route->source_stream;
+ fd->entry[fd->num_entries].flags = source_fd.entry[i].flags;
+ fd->entry[fd->num_entries].length = source_fd.entry[i].length;
+ fd->entry[fd->num_entries].pixelcode =
+ source_fd.entry[i].pixelcode;
+
+ fd->num_entries++;
+ }
+
+out_unlock:
+ v4l2_subdev_unlock_state(state);
+
+ return ret;
+}
+
+static int ub913_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ struct ub913_data *priv = sd_to_ub913(sd);
+ struct v4l2_mbus_framefmt *fmt;
+ const struct ub913_format_info *finfo;
+
+ if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE &&
+ priv->enabled_source_streams)
+ return -EBUSY;
+
+ /* Source format is fully defined by the sink format, so not settable */
+ if (format->pad == UB913_PAD_SOURCE)
+ return v4l2_subdev_get_fmt(sd, state, format);
+
+ finfo = ub913_find_format(format->format.code);
+ if (!finfo) {
+ finfo = &ub913_formats[0];
+ format->format.code = finfo->incode;
+ }
+
+ /* Set sink format */
+ fmt = v4l2_subdev_state_get_stream_format(state, format->pad,
+ format->stream);
+ if (!fmt)
+ return -EINVAL;
+
+ *fmt = format->format;
+
+ /* Propagate to source format, and adjust the mbus code */
+ fmt = v4l2_subdev_state_get_opposite_stream_format(state, format->pad,
+ format->stream);
+ if (!fmt)
+ return -EINVAL;
+
+ format->format.code = finfo->outcode;
+
+ *fmt = format->format;
+
+ return 0;
+}
+
+static int ub913_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ struct v4l2_subdev_route routes[] = {
+ {
+ .sink_pad = UB913_PAD_SINK,
+ .sink_stream = 0,
+ .source_pad = UB913_PAD_SOURCE,
+ .source_stream = 0,
+ .flags = V4L2_SUBDEV_ROUTE_FL_ACTIVE,
+ },
+ };
+
+ struct v4l2_subdev_krouting routing = {
+ .num_routes = ARRAY_SIZE(routes),
+ .routes = routes,
+ };
+
+ return _ub913_set_routing(sd, state, &routing);
+}
+
+static int ub913_log_status(struct v4l2_subdev *sd)
+{
+ struct ub913_data *priv = sd_to_ub913(sd);
+ struct device *dev = &priv->client->dev;
+ u8 v = 0, v1 = 0, v2 = 0;
+
+ ub913_read(priv, UB913_REG_MODE_SEL, &v);
+ dev_info(dev, "MODE_SEL %#02x\n", v);
+
+ ub913_read(priv, UB913_REG_CRC_ERRORS_LSB, &v1);
+ ub913_read(priv, UB913_REG_CRC_ERRORS_MSB, &v2);
+ dev_info(dev, "CRC errors %u\n", v1 | (v2 << 8));
+
+ /* clear CRC errors */
+ ub913_read(priv, UB913_REG_GENERAL_CFG, &v);
+ ub913_write(priv, UB913_REG_GENERAL_CFG,
+ v | UB913_REG_GENERAL_CFG_CRC_ERR_RESET);
+ ub913_write(priv, UB913_REG_GENERAL_CFG, v);
+
+ ub913_read(priv, UB913_REG_GENERAL_STATUS, &v);
+ dev_info(dev, "GENERAL_STATUS %#02x\n", v);
+
+ ub913_read(priv, UB913_REG_PLL_OVR, &v);
+ dev_info(dev, "PLL_OVR %#02x\n", v);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_core_ops ub913_subdev_core_ops = {
+ .log_status = ub913_log_status,
+};
+
+static const struct v4l2_subdev_pad_ops ub913_pad_ops = {
+ .enable_streams = ub913_enable_streams,
+ .disable_streams = ub913_disable_streams,
+ .set_routing = ub913_set_routing,
+ .get_frame_desc = ub913_get_frame_desc,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = ub913_set_fmt,
+ .init_cfg = ub913_init_cfg,
+};
+
+static const struct v4l2_subdev_ops ub913_subdev_ops = {
+ .core = &ub913_subdev_core_ops,
+ .pad = &ub913_pad_ops,
+};
+
+static const struct media_entity_operations ub913_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static int ub913_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *source_subdev,
+ struct v4l2_async_connection *asd)
+{
+ struct ub913_data *priv = sd_to_ub913(notifier->sd);
+ struct device *dev = &priv->client->dev;
+ int ret;
+
+ ret = media_entity_get_fwnode_pad(&source_subdev->entity,
+ source_subdev->fwnode,
+ MEDIA_PAD_FL_SOURCE);
+ if (ret < 0) {
+ dev_err(dev, "Failed to find pad for %s\n",
+ source_subdev->name);
+ return ret;
+ }
+
+ priv->source_sd = source_subdev;
+ priv->source_sd_pad = ret;
+
+ ret = media_create_pad_link(&source_subdev->entity, priv->source_sd_pad,
+ &priv->sd.entity, UB913_PAD_SINK,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (ret) {
+ dev_err(dev, "Unable to link %s:%u -> %s:0\n",
+ source_subdev->name, priv->source_sd_pad,
+ priv->sd.name);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_async_notifier_operations ub913_notify_ops = {
+ .bound = ub913_notify_bound,
+};
+
+static int ub913_v4l2_notifier_register(struct ub913_data *priv)
+{
+ struct device *dev = &priv->client->dev;
+ struct v4l2_async_connection *asd;
+ struct fwnode_handle *ep_fwnode;
+ int ret;
+
+ ep_fwnode = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev),
+ UB913_PAD_SINK, 0, 0);
+ if (!ep_fwnode) {
+ dev_err(dev, "No graph endpoint\n");
+ return -ENODEV;
+ }
+
+ v4l2_async_subdev_nf_init(&priv->notifier, &priv->sd);
+
+ asd = v4l2_async_nf_add_fwnode_remote(&priv->notifier, ep_fwnode,
+ struct v4l2_async_connection);
+
+ fwnode_handle_put(ep_fwnode);
+
+ if (IS_ERR(asd)) {
+ dev_err(dev, "Failed to add subdev: %ld", PTR_ERR(asd));
+ v4l2_async_nf_cleanup(&priv->notifier);
+ return PTR_ERR(asd);
+ }
+
+ priv->notifier.ops = &ub913_notify_ops;
+
+ ret = v4l2_async_nf_register(&priv->notifier);
+ if (ret) {
+ dev_err(dev, "Failed to register subdev_notifier");
+ v4l2_async_nf_cleanup(&priv->notifier);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ub913_v4l2_nf_unregister(struct ub913_data *priv)
+{
+ v4l2_async_nf_unregister(&priv->notifier);
+ v4l2_async_nf_cleanup(&priv->notifier);
+}
+
+static int ub913_register_clkout(struct ub913_data *priv)
+{
+ struct device *dev = &priv->client->dev;
+ const char *name;
+ int ret;
+
+ name = kasprintf(GFP_KERNEL, "ds90ub913.%s.clk_out", dev_name(dev));
+ if (!name)
+ return -ENOMEM;
+
+ priv->clkout_clk_hw = devm_clk_hw_register_fixed_factor(dev, name,
+ __clk_get_name(priv->clkin), 0, 1, 2);
+
+ kfree(name);
+
+ if (IS_ERR(priv->clkout_clk_hw))
+ return dev_err_probe(dev, PTR_ERR(priv->clkout_clk_hw),
+ "Cannot register clkout hw\n");
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
+ priv->clkout_clk_hw);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Cannot add OF clock provider\n");
+
+ return 0;
+}
+
+static int ub913_i2c_master_init(struct ub913_data *priv)
+{
+ /* i2c fast mode */
+ u32 scl_high = 600 + 300; /* high period + rise time, ns */
+ u32 scl_low = 1300 + 300; /* low period + fall time, ns */
+ unsigned long ref;
+ int ret;
+
+ ref = clk_get_rate(priv->clkin) / 2;
+
+ scl_high = div64_u64((u64)scl_high * ref, 1000000000);
+ scl_low = div64_u64((u64)scl_low * ref, 1000000000);
+
+ ret = ub913_write(priv, UB913_REG_SCL_HIGH_TIME, scl_high);
+ if (ret)
+ return ret;
+
+ ret = ub913_write(priv, UB913_REG_SCL_LOW_TIME, scl_low);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ub913_add_i2c_adapter(struct ub913_data *priv)
+{
+ struct device *dev = &priv->client->dev;
+ struct fwnode_handle *i2c_handle;
+ int ret;
+
+ i2c_handle = device_get_named_child_node(dev, "i2c");
+ if (!i2c_handle)
+ return 0;
+
+ ret = i2c_atr_add_adapter(priv->plat_data->atr, priv->plat_data->port,
+ dev, i2c_handle);
+
+ fwnode_handle_put(i2c_handle);
+
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ub913_parse_dt(struct ub913_data *priv)
+{
+ struct device *dev = &priv->client->dev;
+ struct v4l2_fwnode_endpoint vep = {
+ .bus_type = V4L2_MBUS_PARALLEL,
+ };
+ struct fwnode_handle *ep_fwnode;
+ int ret;
+
+ ep_fwnode = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev),
+ UB913_PAD_SINK, 0, 0);
+ if (!ep_fwnode)
+ return dev_err_probe(dev, -ENOENT, "No sink endpoint\n");
+
+ ret = v4l2_fwnode_endpoint_parse(ep_fwnode, &vep);
+
+ fwnode_handle_put(ep_fwnode);
+
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to parse sink endpoint data\n");
+
+ if (vep.bus.parallel.flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
+ priv->pclk_polarity_rising = true;
+ else if (vep.bus.parallel.flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
+ priv->pclk_polarity_rising = false;
+ else
+ return dev_err_probe(dev, -EINVAL,
+ "bad value for 'pclk-sample'\n");
+
+ return 0;
+}
+
+static int ub913_hw_init(struct ub913_data *priv)
+{
+ struct device *dev = &priv->client->dev;
+ bool mode_override;
+ u8 mode;
+ int ret;
+ u8 v;
+
+ ret = ub913_read(priv, UB913_REG_MODE_SEL, &v);
+ if (ret)
+ return ret;
+
+ if (!(v & UB913_REG_MODE_SEL_MODE_UP_TO_DATE))
+ return dev_err_probe(dev, -ENODEV,
+ "Mode value not stabilized\n");
+
+ mode_override = v & UB913_REG_MODE_SEL_MODE_OVERRIDE;
+ mode = v & UB913_REG_MODE_SEL_MODE_MASK;
+
+ dev_dbg(dev, "mode from %s: %#x\n",
+ mode_override ? "reg" : "deserializer", mode);
+
+ ret = ub913_i2c_master_init(priv);
+ if (ret)
+ return dev_err_probe(dev, ret, "i2c master init failed\n");
+
+ ub913_read(priv, UB913_REG_GENERAL_CFG, &v);
+ v &= ~UB913_REG_GENERAL_CFG_PCLK_RISING;
+ v |= priv->pclk_polarity_rising ? UB913_REG_GENERAL_CFG_PCLK_RISING : 0;
+ ub913_write(priv, UB913_REG_GENERAL_CFG, v);
+
+ return 0;
+}
+
+static int ub913_subdev_init(struct ub913_data *priv)
+{
+ struct device *dev = &priv->client->dev;
+ int ret;
+
+ v4l2_i2c_subdev_init(&priv->sd, priv->client, &ub913_subdev_ops);
+ priv->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_STREAMS;
+ priv->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+ priv->sd.entity.ops = &ub913_entity_ops;
+
+ priv->pads[0].flags = MEDIA_PAD_FL_SINK;
+ priv->pads[1].flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&priv->sd.entity, 2, priv->pads);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init pads\n");
+
+ ret = v4l2_subdev_init_finalize(&priv->sd);
+ if (ret)
+ goto err_entity_cleanup;
+
+ ret = ub913_v4l2_notifier_register(priv);
+ if (ret) {
+ dev_err_probe(dev, ret,
+ "v4l2 subdev notifier register failed\n");
+ goto err_subdev_cleanup;
+ }
+
+ ret = v4l2_async_register_subdev(&priv->sd);
+ if (ret) {
+ dev_err_probe(dev, ret, "v4l2_async_register_subdev error\n");
+ goto err_unreg_notif;
+ }
+
+ return 0;
+
+err_unreg_notif:
+ ub913_v4l2_nf_unregister(priv);
+err_subdev_cleanup:
+ v4l2_subdev_cleanup(&priv->sd);
+err_entity_cleanup:
+ media_entity_cleanup(&priv->sd.entity);
+
+ return ret;
+}
+
+static void ub913_subdev_uninit(struct ub913_data *priv)
+{
+ v4l2_async_unregister_subdev(&priv->sd);
+ ub913_v4l2_nf_unregister(priv);
+ v4l2_subdev_cleanup(&priv->sd);
+ fwnode_handle_put(priv->sd.fwnode);
+ media_entity_cleanup(&priv->sd.entity);
+}
+
+static int ub913_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct ub913_data *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->client = client;
+
+ priv->plat_data = dev_get_platdata(&client->dev);
+ if (!priv->plat_data)
+ return dev_err_probe(dev, -ENODEV, "Platform data missing\n");
+
+ priv->regmap = devm_regmap_init_i2c(client, &ub913_regmap_config);
+ if (IS_ERR(priv->regmap))
+ return dev_err_probe(dev, PTR_ERR(priv->regmap),
+ "Failed to init regmap\n");
+
+ /*
+ * ub913 can also work without ext clock, but that is not supported by
+ * the driver yet.
+ */
+ priv->clkin = devm_clk_get(dev, "clkin");
+ if (IS_ERR(priv->clkin))
+ return dev_err_probe(dev, PTR_ERR(priv->clkin),
+ "Cannot get CLKIN\n");
+
+ ret = ub913_parse_dt(priv);
+ if (ret)
+ return ret;
+
+ ret = ub913_hw_init(priv);
+ if (ret)
+ return ret;
+
+ ret = ub913_gpiochip_probe(priv);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init gpiochip\n");
+
+ ret = ub913_register_clkout(priv);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to register clkout\n");
+ goto err_gpiochip_remove;
+ }
+
+ ret = ub913_subdev_init(priv);
+ if (ret)
+ goto err_gpiochip_remove;
+
+ ret = ub913_add_i2c_adapter(priv);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to add remote i2c adapter\n");
+ goto err_subdev_uninit;
+ }
+
+ return 0;
+
+err_subdev_uninit:
+ ub913_subdev_uninit(priv);
+err_gpiochip_remove:
+ ub913_gpiochip_remove(priv);
+
+ return ret;
+}
+
+static void ub913_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ub913_data *priv = sd_to_ub913(sd);
+
+ i2c_atr_del_adapter(priv->plat_data->atr, priv->plat_data->port);
+
+ ub913_subdev_uninit(priv);
+
+ ub913_gpiochip_remove(priv);
+}
+
+static const struct i2c_device_id ub913_id[] = { { "ds90ub913a-q1", 0 }, {} };
+MODULE_DEVICE_TABLE(i2c, ub913_id);
+
+static const struct of_device_id ub913_dt_ids[] = {
+ { .compatible = "ti,ds90ub913a-q1" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ub913_dt_ids);
+
+static struct i2c_driver ds90ub913_driver = {
+ .probe = ub913_probe,
+ .remove = ub913_remove,
+ .id_table = ub913_id,
+ .driver = {
+ .name = "ds90ub913a",
+ .of_match_table = ub913_dt_ids,
+ },
+};
+module_i2c_driver(ds90ub913_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Texas Instruments DS90UB913 FPD-Link III Serializer Driver");
+MODULE_AUTHOR("Luca Ceresoli <luca@lucaceresoli.net>");
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>");
+MODULE_IMPORT_NS(I2C_ATR);
diff --git a/drivers/media/i2c/ds90ub953.c b/drivers/media/i2c/ds90ub953.c
new file mode 100644
index 000000000000..dc394e22a42c
--- /dev/null
+++ b/drivers/media/i2c/ds90ub953.c
@@ -0,0 +1,1430 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for the Texas Instruments DS90UB953 video serializer
+ *
+ * Based on a driver from Luca Ceresoli <luca@lucaceresoli.net>
+ *
+ * Copyright (c) 2019 Luca Ceresoli <luca@lucaceresoli.net>
+ * Copyright (c) 2023 Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/fwnode.h>
+#include <linux/gpio/driver.h>
+#include <linux/i2c-atr.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/rational.h>
+#include <linux/regmap.h>
+
+#include <media/i2c/ds90ub9xx.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-mediabus.h>
+#include <media/v4l2-subdev.h>
+
+#define UB953_PAD_SINK 0
+#define UB953_PAD_SOURCE 1
+
+#define UB953_NUM_GPIOS 4
+
+#define UB953_DEFAULT_CLKOUT_RATE 25000000UL
+
+#define UB953_REG_RESET_CTL 0x01
+#define UB953_REG_RESET_CTL_DIGITAL_RESET_1 BIT(1)
+#define UB953_REG_RESET_CTL_DIGITAL_RESET_0 BIT(0)
+
+#define UB953_REG_GENERAL_CFG 0x02
+#define UB953_REG_GENERAL_CFG_CONT_CLK BIT(6)
+#define UB953_REG_GENERAL_CFG_CSI_LANE_SEL_SHIFT 4
+#define UB953_REG_GENERAL_CFG_CSI_LANE_SEL_MASK GENMASK(5, 4)
+#define UB953_REG_GENERAL_CFG_CRC_TX_GEN_ENABLE BIT(1)
+#define UB953_REG_GENERAL_CFG_I2C_STRAP_MODE BIT(0)
+
+#define UB953_REG_MODE_SEL 0x03
+#define UB953_REG_MODE_SEL_MODE_DONE BIT(3)
+#define UB953_REG_MODE_SEL_MODE_OVERRIDE BIT(4)
+#define UB953_REG_MODE_SEL_MODE_MASK GENMASK(2, 0)
+
+#define UB953_REG_CLKOUT_CTRL0 0x06
+#define UB953_REG_CLKOUT_CTRL1 0x07
+
+#define UB953_REG_SCL_HIGH_TIME 0x0b
+#define UB953_REG_SCL_LOW_TIME 0x0c
+
+#define UB953_REG_LOCAL_GPIO_DATA 0x0d
+#define UB953_REG_LOCAL_GPIO_DATA_GPIO_RMTEN(n) BIT(4 + (n))
+#define UB953_REG_LOCAL_GPIO_DATA_GPIO_OUT_SRC(n) BIT(0 + (n))
+
+#define UB953_REG_GPIO_INPUT_CTRL 0x0e
+#define UB953_REG_GPIO_INPUT_CTRL_OUT_EN(n) BIT(4 + (n))
+#define UB953_REG_GPIO_INPUT_CTRL_INPUT_EN(n) BIT(0 + (n))
+
+#define UB953_REG_REV_MASK_ID 0x50
+#define UB953_REG_GENERAL_STATUS 0x52
+
+#define UB953_REG_GPIO_PIN_STS 0x53
+#define UB953_REG_GPIO_PIN_STS_GPIO_STS(n) BIT(0 + (n))
+
+#define UB953_REG_BIST_ERR_CNT 0x54
+#define UB953_REG_CRC_ERR_CNT1 0x55
+#define UB953_REG_CRC_ERR_CNT2 0x56
+
+#define UB953_REG_CSI_ERR_CNT 0x5c
+#define UB953_REG_CSI_ERR_STATUS 0x5d
+#define UB953_REG_CSI_ERR_DLANE01 0x5e
+#define UB953_REG_CSI_ERR_DLANE23 0x5f
+#define UB953_REG_CSI_ERR_CLK_LANE 0x60
+#define UB953_REG_CSI_PKT_HDR_VC_ID 0x61
+#define UB953_REG_PKT_HDR_WC_LSB 0x62
+#define UB953_REG_PKT_HDR_WC_MSB 0x63
+#define UB953_REG_CSI_ECC 0x64
+
+#define UB953_REG_IND_ACC_CTL 0xb0
+#define UB953_REG_IND_ACC_ADDR 0xb1
+#define UB953_REG_IND_ACC_DATA 0xb2
+
+#define UB953_REG_FPD3_RX_ID(n) (0xf0 + (n))
+#define UB953_REG_FPD3_RX_ID_LEN 6
+
+/* Indirect register blocks */
+#define UB953_IND_TARGET_PAT_GEN 0x00
+#define UB953_IND_TARGET_FPD3_TX 0x01
+#define UB953_IND_TARGET_DIE_ID 0x02
+
+#define UB953_IND_PGEN_CTL 0x01
+#define UB953_IND_PGEN_CTL_PGEN_ENABLE BIT(0)
+#define UB953_IND_PGEN_CFG 0x02
+#define UB953_IND_PGEN_CSI_DI 0x03
+#define UB953_IND_PGEN_LINE_SIZE1 0x04
+#define UB953_IND_PGEN_LINE_SIZE0 0x05
+#define UB953_IND_PGEN_BAR_SIZE1 0x06
+#define UB953_IND_PGEN_BAR_SIZE0 0x07
+#define UB953_IND_PGEN_ACT_LPF1 0x08
+#define UB953_IND_PGEN_ACT_LPF0 0x09
+#define UB953_IND_PGEN_TOT_LPF1 0x0a
+#define UB953_IND_PGEN_TOT_LPF0 0x0b
+#define UB953_IND_PGEN_LINE_PD1 0x0c
+#define UB953_IND_PGEN_LINE_PD0 0x0d
+#define UB953_IND_PGEN_VBP 0x0e
+#define UB953_IND_PGEN_VFP 0x0f
+#define UB953_IND_PGEN_COLOR(n) (0x10 + (n)) /* n <= 15 */
+
+/* Note: Only sync mode supported for now */
+enum ub953_mode {
+ /* FPD-Link III CSI-2 synchronous mode */
+ UB953_MODE_SYNC,
+ /* FPD-Link III CSI-2 non-synchronous mode, external ref clock */
+ UB953_MODE_NONSYNC_EXT,
+ /* FPD-Link III CSI-2 non-synchronous mode, internal ref clock */
+ UB953_MODE_NONSYNC_INT,
+ /* FPD-Link III DVP mode */
+ UB953_MODE_DVP,
+};
+
+struct ub953_hw_data {
+ const char *model;
+ bool is_ub971;
+};
+
+struct ub953_clkout_data {
+ u32 hs_div;
+ u32 m;
+ u32 n;
+ unsigned long rate;
+};
+
+struct ub953_data {
+ const struct ub953_hw_data *hw_data;
+
+ struct i2c_client *client;
+ struct regmap *regmap;
+ struct clk *clkin;
+
+ u32 num_data_lanes;
+ bool non_continous_clk;
+
+ struct gpio_chip gpio_chip;
+
+ struct v4l2_subdev sd;
+ struct media_pad pads[2];
+
+ struct v4l2_async_notifier notifier;
+
+ struct v4l2_subdev *source_sd;
+ u16 source_sd_pad;
+
+ u64 enabled_source_streams;
+
+ /* lock for register access */
+ struct mutex reg_lock;
+
+ u8 current_indirect_target;
+
+ struct clk_hw clkout_clk_hw;
+
+ enum ub953_mode mode;
+
+ const struct ds90ub9xx_platform_data *plat_data;
+};
+
+static inline struct ub953_data *sd_to_ub953(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct ub953_data, sd);
+}
+
+/*
+ * HW Access
+ */
+
+static int ub953_read(struct ub953_data *priv, u8 reg, u8 *val)
+{
+ unsigned int v;
+ int ret;
+
+ mutex_lock(&priv->reg_lock);
+
+ ret = regmap_read(priv->regmap, reg, &v);
+ if (ret) {
+ dev_err(&priv->client->dev, "Cannot read register 0x%02x: %d\n",
+ reg, ret);
+ goto out_unlock;
+ }
+
+ *val = v;
+
+out_unlock:
+ mutex_unlock(&priv->reg_lock);
+
+ return ret;
+}
+
+static int ub953_write(struct ub953_data *priv, u8 reg, u8 val)
+{
+ int ret;
+
+ mutex_lock(&priv->reg_lock);
+
+ ret = regmap_write(priv->regmap, reg, val);
+ if (ret)
+ dev_err(&priv->client->dev,
+ "Cannot write register 0x%02x: %d\n", reg, ret);
+
+ mutex_unlock(&priv->reg_lock);
+
+ return ret;
+}
+
+static int ub953_select_ind_reg_block(struct ub953_data *priv, u8 block)
+{
+ struct device *dev = &priv->client->dev;
+ int ret;
+
+ if (priv->current_indirect_target == block)
+ return 0;
+
+ ret = regmap_write(priv->regmap, UB953_REG_IND_ACC_CTL, block << 2);
+ if (ret) {
+ dev_err(dev, "%s: cannot select indirect target %u (%d)\n",
+ __func__, block, ret);
+ return ret;
+ }
+
+ priv->current_indirect_target = block;
+
+ return 0;
+}
+
+__maybe_unused
+static int ub953_read_ind(struct ub953_data *priv, u8 block, u8 reg, u8 *val)
+{
+ unsigned int v;
+ int ret;
+
+ mutex_lock(&priv->reg_lock);
+
+ ret = ub953_select_ind_reg_block(priv, block);
+ if (ret)
+ goto out_unlock;
+
+ ret = regmap_write(priv->regmap, UB953_REG_IND_ACC_ADDR, reg);
+ if (ret) {
+ dev_err(&priv->client->dev,
+ "Write to IND_ACC_ADDR failed when reading %u:%x02x: %d\n",
+ block, reg, ret);
+ goto out_unlock;
+ }
+
+ ret = regmap_read(priv->regmap, UB953_REG_IND_ACC_DATA, &v);
+ if (ret) {
+ dev_err(&priv->client->dev,
+ "Write to IND_ACC_DATA failed when reading %u:%x02x: %d\n",
+ block, reg, ret);
+ goto out_unlock;
+ }
+
+ *val = v;
+
+out_unlock:
+ mutex_unlock(&priv->reg_lock);
+
+ return ret;
+}
+
+__maybe_unused
+static int ub953_write_ind(struct ub953_data *priv, u8 block, u8 reg, u8 val)
+{
+ int ret;
+
+ mutex_lock(&priv->reg_lock);
+
+ ret = ub953_select_ind_reg_block(priv, block);
+ if (ret)
+ goto out_unlock;
+
+ ret = regmap_write(priv->regmap, UB953_REG_IND_ACC_ADDR, reg);
+ if (ret) {
+ dev_err(&priv->client->dev,
+ "Write to IND_ACC_ADDR failed when writing %u:%x02x: %d\n",
+ block, reg, ret);
+ goto out_unlock;
+ }
+
+ ret = regmap_write(priv->regmap, UB953_REG_IND_ACC_DATA, val);
+ if (ret) {
+ dev_err(&priv->client->dev,
+ "Write to IND_ACC_DATA failed when writing %u:%x02x\n: %d\n",
+ block, reg, ret);
+ }
+
+out_unlock:
+ mutex_unlock(&priv->reg_lock);
+
+ return ret;
+}
+
+/*
+ * GPIO chip
+ */
+static int ub953_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ struct ub953_data *priv = gpiochip_get_data(gc);
+ int ret;
+ u8 v;
+
+ ret = ub953_read(priv, UB953_REG_GPIO_INPUT_CTRL, &v);
+ if (ret)
+ return ret;
+
+ if (v & UB953_REG_GPIO_INPUT_CTRL_INPUT_EN(offset))
+ return GPIO_LINE_DIRECTION_IN;
+ else
+ return GPIO_LINE_DIRECTION_OUT;
+}
+
+static int ub953_gpio_direction_in(struct gpio_chip *gc, unsigned int offset)
+{
+ struct ub953_data *priv = gpiochip_get_data(gc);
+
+ return regmap_update_bits(priv->regmap, UB953_REG_GPIO_INPUT_CTRL,
+ UB953_REG_GPIO_INPUT_CTRL_INPUT_EN(offset) |
+ UB953_REG_GPIO_INPUT_CTRL_OUT_EN(offset),
+ UB953_REG_GPIO_INPUT_CTRL_INPUT_EN(offset));
+}
+
+static int ub953_gpio_direction_out(struct gpio_chip *gc, unsigned int offset,
+ int value)
+{
+ struct ub953_data *priv = gpiochip_get_data(gc);
+ int ret;
+
+ ret = regmap_update_bits(priv->regmap, UB953_REG_LOCAL_GPIO_DATA,
+ UB953_REG_LOCAL_GPIO_DATA_GPIO_OUT_SRC(offset),
+ value ? UB953_REG_LOCAL_GPIO_DATA_GPIO_OUT_SRC(offset) :
+ 0);
+
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(priv->regmap, UB953_REG_GPIO_INPUT_CTRL,
+ UB953_REG_GPIO_INPUT_CTRL_INPUT_EN(offset) |
+ UB953_REG_GPIO_INPUT_CTRL_OUT_EN(offset),
+ UB953_REG_GPIO_INPUT_CTRL_OUT_EN(offset));
+}
+
+static int ub953_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ struct ub953_data *priv = gpiochip_get_data(gc);
+ int ret;
+ u8 v;
+
+ ret = ub953_read(priv, UB953_REG_GPIO_PIN_STS, &v);
+ if (ret)
+ return ret;
+
+ return !!(v & UB953_REG_GPIO_PIN_STS_GPIO_STS(offset));
+}
+
+static void ub953_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
+{
+ struct ub953_data *priv = gpiochip_get_data(gc);
+
+ regmap_update_bits(priv->regmap, UB953_REG_LOCAL_GPIO_DATA,
+ UB953_REG_LOCAL_GPIO_DATA_GPIO_OUT_SRC(offset),
+ value ? UB953_REG_LOCAL_GPIO_DATA_GPIO_OUT_SRC(offset) :
+ 0);
+}
+
+static int ub953_gpio_of_xlate(struct gpio_chip *gc,
+ const struct of_phandle_args *gpiospec,
+ u32 *flags)
+{
+ if (flags)
+ *flags = gpiospec->args[1];
+
+ return gpiospec->args[0];
+}
+
+static int ub953_gpiochip_probe(struct ub953_data *priv)
+{
+ struct device *dev = &priv->client->dev;
+ struct gpio_chip *gc = &priv->gpio_chip;
+ int ret;
+
+ /* Set all GPIOs to local input mode */
+ ub953_write(priv, UB953_REG_LOCAL_GPIO_DATA, 0);
+ ub953_write(priv, UB953_REG_GPIO_INPUT_CTRL, 0xf);
+
+ gc->label = dev_name(dev);
+ gc->parent = dev;
+ gc->owner = THIS_MODULE;
+ gc->base = -1;
+ gc->can_sleep = true;
+ gc->ngpio = UB953_NUM_GPIOS;
+ gc->get_direction = ub953_gpio_get_direction;
+ gc->direction_input = ub953_gpio_direction_in;
+ gc->direction_output = ub953_gpio_direction_out;
+ gc->get = ub953_gpio_get;
+ gc->set = ub953_gpio_set;
+ gc->of_xlate = ub953_gpio_of_xlate;
+ gc->of_gpio_n_cells = 2;
+
+ ret = gpiochip_add_data(gc, priv);
+ if (ret) {
+ dev_err(dev, "Failed to add GPIOs: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ub953_gpiochip_remove(struct ub953_data *priv)
+{
+ gpiochip_remove(&priv->gpio_chip);
+}
+
+/*
+ * V4L2
+ */
+
+static int _ub953_set_routing(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_krouting *routing)
+{
+ static const struct v4l2_mbus_framefmt format = {
+ .width = 640,
+ .height = 480,
+ .code = MEDIA_BUS_FMT_UYVY8_1X16,
+ .field = V4L2_FIELD_NONE,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .ycbcr_enc = V4L2_YCBCR_ENC_601,
+ .quantization = V4L2_QUANTIZATION_LIM_RANGE,
+ .xfer_func = V4L2_XFER_FUNC_SRGB,
+ };
+ int ret;
+
+ /*
+ * Note: we can only support up to V4L2_FRAME_DESC_ENTRY_MAX, until
+ * frame desc is made dynamically allocated.
+ */
+
+ if (routing->num_routes > V4L2_FRAME_DESC_ENTRY_MAX)
+ return -EINVAL;
+
+ ret = v4l2_subdev_routing_validate(sd, routing,
+ V4L2_SUBDEV_ROUTING_ONLY_1_TO_1);
+ if (ret)
+ return ret;
+
+ ret = v4l2_subdev_set_routing_with_fmt(sd, state, routing, &format);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ub953_set_routing(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ enum v4l2_subdev_format_whence which,
+ struct v4l2_subdev_krouting *routing)
+{
+ struct ub953_data *priv = sd_to_ub953(sd);
+
+ if (which == V4L2_SUBDEV_FORMAT_ACTIVE && priv->enabled_source_streams)
+ return -EBUSY;
+
+ return _ub953_set_routing(sd, state, routing);
+}
+
+static int ub953_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_mbus_frame_desc *fd)
+{
+ struct ub953_data *priv = sd_to_ub953(sd);
+ struct v4l2_mbus_frame_desc source_fd;
+ struct v4l2_subdev_route *route;
+ struct v4l2_subdev_state *state;
+ int ret;
+
+ if (pad != UB953_PAD_SOURCE)
+ return -EINVAL;
+
+ ret = v4l2_subdev_call(priv->source_sd, pad, get_frame_desc,
+ priv->source_sd_pad, &source_fd);
+ if (ret)
+ return ret;
+
+ memset(fd, 0, sizeof(*fd));
+
+ fd->type = V4L2_MBUS_FRAME_DESC_TYPE_CSI2;
+
+ state = v4l2_subdev_lock_and_get_active_state(sd);
+
+ for_each_active_route(&state->routing, route) {
+ struct v4l2_mbus_frame_desc_entry *source_entry = NULL;
+ unsigned int i;
+
+ if (route->source_pad != pad)
+ continue;
+
+ for (i = 0; i < source_fd.num_entries; i++) {
+ if (source_fd.entry[i].stream == route->sink_stream) {
+ source_entry = &source_fd.entry[i];
+ break;
+ }
+ }
+
+ if (!source_entry) {
+ dev_err(&priv->client->dev,
+ "Failed to find stream from source frame desc\n");
+ ret = -EPIPE;
+ goto out_unlock;
+ }
+
+ fd->entry[fd->num_entries].stream = route->source_stream;
+ fd->entry[fd->num_entries].flags = source_entry->flags;
+ fd->entry[fd->num_entries].length = source_entry->length;
+ fd->entry[fd->num_entries].pixelcode = source_entry->pixelcode;
+ fd->entry[fd->num_entries].bus.csi2.vc =
+ source_entry->bus.csi2.vc;
+ fd->entry[fd->num_entries].bus.csi2.dt =
+ source_entry->bus.csi2.dt;
+
+ fd->num_entries++;
+ }
+
+out_unlock:
+ v4l2_subdev_unlock_state(state);
+
+ return ret;
+}
+
+static int ub953_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ struct ub953_data *priv = sd_to_ub953(sd);
+ struct v4l2_mbus_framefmt *fmt;
+
+ if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE &&
+ priv->enabled_source_streams)
+ return -EBUSY;
+
+ /* No transcoding, source and sink formats must match. */
+ if (format->pad == UB953_PAD_SOURCE)
+ return v4l2_subdev_get_fmt(sd, state, format);
+
+ /* Set sink format */
+ fmt = v4l2_subdev_state_get_stream_format(state, format->pad,
+ format->stream);
+ if (!fmt)
+ return -EINVAL;
+
+ *fmt = format->format;
+
+ /* Propagate to source format */
+ fmt = v4l2_subdev_state_get_opposite_stream_format(state, format->pad,
+ format->stream);
+ if (!fmt)
+ return -EINVAL;
+
+ *fmt = format->format;
+
+ return 0;
+}
+
+static int ub953_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ struct v4l2_subdev_route routes[] = {
+ {
+ .sink_pad = UB953_PAD_SINK,
+ .sink_stream = 0,
+ .source_pad = UB953_PAD_SOURCE,
+ .source_stream = 0,
+ .flags = V4L2_SUBDEV_ROUTE_FL_ACTIVE,
+ },
+ };
+
+ struct v4l2_subdev_krouting routing = {
+ .num_routes = ARRAY_SIZE(routes),
+ .routes = routes,
+ };
+
+ return _ub953_set_routing(sd, state, &routing);
+}
+
+static int ub953_log_status(struct v4l2_subdev *sd)
+{
+ struct ub953_data *priv = sd_to_ub953(sd);
+ struct device *dev = &priv->client->dev;
+ u8 v = 0, v1 = 0, v2 = 0;
+ unsigned int i;
+ char id[UB953_REG_FPD3_RX_ID_LEN];
+ u8 gpio_local_data = 0;
+ u8 gpio_input_ctrl = 0;
+ u8 gpio_pin_sts = 0;
+
+ for (i = 0; i < sizeof(id); i++)
+ ub953_read(priv, UB953_REG_FPD3_RX_ID(i), &id[i]);
+
+ dev_info(dev, "ID '%.*s'\n", (int)sizeof(id), id);
+
+ ub953_read(priv, UB953_REG_GENERAL_STATUS, &v);
+ dev_info(dev, "GENERAL_STATUS %#02x\n", v);
+
+ ub953_read(priv, UB953_REG_CRC_ERR_CNT1, &v1);
+ ub953_read(priv, UB953_REG_CRC_ERR_CNT2, &v2);
+ dev_info(dev, "CRC error count %u\n", v1 | (v2 << 8));
+
+ ub953_read(priv, UB953_REG_CSI_ERR_CNT, &v);
+ dev_info(dev, "CSI error count %u\n", v);
+
+ ub953_read(priv, UB953_REG_CSI_ERR_STATUS, &v);
+ dev_info(dev, "CSI_ERR_STATUS %#02x\n", v);
+
+ ub953_read(priv, UB953_REG_CSI_ERR_DLANE01, &v);
+ dev_info(dev, "CSI_ERR_DLANE01 %#02x\n", v);
+
+ ub953_read(priv, UB953_REG_CSI_ERR_DLANE23, &v);
+ dev_info(dev, "CSI_ERR_DLANE23 %#02x\n", v);
+
+ ub953_read(priv, UB953_REG_CSI_ERR_CLK_LANE, &v);
+ dev_info(dev, "CSI_ERR_CLK_LANE %#02x\n", v);
+
+ ub953_read(priv, UB953_REG_CSI_PKT_HDR_VC_ID, &v);
+ dev_info(dev, "CSI packet header VC %u ID %u\n", v >> 6, v & 0x3f);
+
+ ub953_read(priv, UB953_REG_PKT_HDR_WC_LSB, &v1);
+ ub953_read(priv, UB953_REG_PKT_HDR_WC_MSB, &v2);
+ dev_info(dev, "CSI packet header WC %u\n", (v2 << 8) | v1);
+
+ ub953_read(priv, UB953_REG_CSI_ECC, &v);
+ dev_info(dev, "CSI ECC %#02x\n", v);
+
+ ub953_read(priv, UB953_REG_LOCAL_GPIO_DATA, &gpio_local_data);
+ ub953_read(priv, UB953_REG_GPIO_INPUT_CTRL, &gpio_input_ctrl);
+ ub953_read(priv, UB953_REG_GPIO_PIN_STS, &gpio_pin_sts);
+
+ for (i = 0; i < UB953_NUM_GPIOS; i++) {
+ dev_info(dev,
+ "GPIO%u: remote: %u is_input: %u is_output: %u val: %u sts: %u\n",
+ i,
+ !!(gpio_local_data & UB953_REG_LOCAL_GPIO_DATA_GPIO_RMTEN(i)),
+ !!(gpio_input_ctrl & UB953_REG_GPIO_INPUT_CTRL_INPUT_EN(i)),
+ !!(gpio_input_ctrl & UB953_REG_GPIO_INPUT_CTRL_OUT_EN(i)),
+ !!(gpio_local_data & UB953_REG_LOCAL_GPIO_DATA_GPIO_OUT_SRC(i)),
+ !!(gpio_pin_sts & UB953_REG_GPIO_PIN_STS_GPIO_STS(i)));
+ }
+
+ return 0;
+}
+
+static int ub953_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
+{
+ struct ub953_data *priv = sd_to_ub953(sd);
+ u64 sink_streams;
+ int ret;
+
+ sink_streams = v4l2_subdev_state_xlate_streams(state, UB953_PAD_SOURCE,
+ UB953_PAD_SINK,
+ &streams_mask);
+
+ ret = v4l2_subdev_enable_streams(priv->source_sd, priv->source_sd_pad,
+ sink_streams);
+ if (ret)
+ return ret;
+
+ priv->enabled_source_streams |= streams_mask;
+
+ return 0;
+}
+
+static int ub953_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
+{
+ struct ub953_data *priv = sd_to_ub953(sd);
+ u64 sink_streams;
+ int ret;
+
+ sink_streams = v4l2_subdev_state_xlate_streams(state, UB953_PAD_SOURCE,
+ UB953_PAD_SINK,
+ &streams_mask);
+
+ ret = v4l2_subdev_disable_streams(priv->source_sd, priv->source_sd_pad,
+ sink_streams);
+ if (ret)
+ return ret;
+
+ priv->enabled_source_streams &= ~streams_mask;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops ub953_pad_ops = {
+ .enable_streams = ub953_enable_streams,
+ .disable_streams = ub953_disable_streams,
+ .set_routing = ub953_set_routing,
+ .get_frame_desc = ub953_get_frame_desc,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = ub953_set_fmt,
+ .init_cfg = ub953_init_cfg,
+};
+
+static const struct v4l2_subdev_core_ops ub953_subdev_core_ops = {
+ .log_status = ub953_log_status,
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
+static const struct v4l2_subdev_ops ub953_subdev_ops = {
+ .core = &ub953_subdev_core_ops,
+ .pad = &ub953_pad_ops,
+};
+
+static const struct media_entity_operations ub953_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static int ub953_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *source_subdev,
+ struct v4l2_async_connection *asd)
+{
+ struct ub953_data *priv = sd_to_ub953(notifier->sd);
+ struct device *dev = &priv->client->dev;
+ int ret;
+
+ ret = media_entity_get_fwnode_pad(&source_subdev->entity,
+ source_subdev->fwnode,
+ MEDIA_PAD_FL_SOURCE);
+ if (ret < 0) {
+ dev_err(dev, "Failed to find pad for %s\n",
+ source_subdev->name);
+ return ret;
+ }
+
+ priv->source_sd = source_subdev;
+ priv->source_sd_pad = ret;
+
+ ret = media_create_pad_link(&source_subdev->entity, priv->source_sd_pad,
+ &priv->sd.entity, 0,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (ret) {
+ dev_err(dev, "Unable to link %s:%u -> %s:0\n",
+ source_subdev->name, priv->source_sd_pad,
+ priv->sd.name);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_async_notifier_operations ub953_notify_ops = {
+ .bound = ub953_notify_bound,
+};
+
+static int ub953_v4l2_notifier_register(struct ub953_data *priv)
+{
+ struct device *dev = &priv->client->dev;
+ struct v4l2_async_connection *asd;
+ struct fwnode_handle *ep_fwnode;
+ int ret;
+
+ ep_fwnode = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev),
+ UB953_PAD_SINK, 0, 0);
+ if (!ep_fwnode) {
+ dev_err(dev, "No graph endpoint\n");
+ return -ENODEV;
+ }
+
+ v4l2_async_subdev_nf_init(&priv->notifier, &priv->sd);
+
+ asd = v4l2_async_nf_add_fwnode_remote(&priv->notifier, ep_fwnode,
+ struct v4l2_async_connection);
+
+ fwnode_handle_put(ep_fwnode);
+
+ if (IS_ERR(asd)) {
+ dev_err(dev, "Failed to add subdev: %ld", PTR_ERR(asd));
+ v4l2_async_nf_cleanup(&priv->notifier);
+ return PTR_ERR(asd);
+ }
+
+ priv->notifier.ops = &ub953_notify_ops;
+
+ ret = v4l2_async_nf_register(&priv->notifier);
+ if (ret) {
+ dev_err(dev, "Failed to register subdev_notifier");
+ v4l2_async_nf_cleanup(&priv->notifier);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ub953_v4l2_notifier_unregister(struct ub953_data *priv)
+{
+ v4l2_async_nf_unregister(&priv->notifier);
+ v4l2_async_nf_cleanup(&priv->notifier);
+}
+
+/*
+ * Probing
+ */
+
+static int ub953_i2c_master_init(struct ub953_data *priv)
+{
+ /* i2c fast mode */
+ u32 ref = 26250000;
+ u32 scl_high = 915; /* ns */
+ u32 scl_low = 1641; /* ns */
+ int ret;
+
+ scl_high = div64_u64((u64)scl_high * ref, 1000000000) - 5;
+ scl_low = div64_u64((u64)scl_low * ref, 1000000000) - 5;
+
+ ret = ub953_write(priv, UB953_REG_SCL_HIGH_TIME, scl_high);
+ if (ret)
+ return ret;
+
+ ret = ub953_write(priv, UB953_REG_SCL_LOW_TIME, scl_low);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static u64 ub953_get_fc_rate(struct ub953_data *priv)
+{
+ switch (priv->mode) {
+ case UB953_MODE_SYNC:
+ if (priv->hw_data->is_ub971)
+ return priv->plat_data->bc_rate * 160ull;
+ else
+ return priv->plat_data->bc_rate / 2 * 160ull;
+
+ case UB953_MODE_NONSYNC_EXT:
+ /* CLKIN_DIV = 1 always */
+ return clk_get_rate(priv->clkin) * 80ull;
+
+ default:
+ /* Not supported */
+ return 0;
+ }
+}
+
+static unsigned long ub953_calc_clkout_ub953(struct ub953_data *priv,
+ unsigned long target, u64 fc,
+ u8 *hs_div, u8 *m, u8 *n)
+{
+ /*
+ * We always use 4 as a pre-divider (HS_CLK_DIV = 2).
+ *
+ * According to the datasheet:
+ * - "HS_CLK_DIV typically should be set to either 16, 8, or 4 (default)."
+ * - "if it is not possible to have an integer ratio of N/M, it is best to
+ * select a smaller value for HS_CLK_DIV.
+ *
+ * For above reasons the default HS_CLK_DIV seems the best in the average
+ * case. Use always that value to keep the code simple.
+ */
+ static const unsigned long hs_clk_div = 4;
+
+ u64 fc_divided;
+ unsigned long mul, div;
+ unsigned long res;
+
+ /* clkout = fc / hs_clk_div * m / n */
+
+ fc_divided = div_u64(fc, hs_clk_div);
+
+ rational_best_approximation(target, fc_divided, (1 << 5) - 1,
+ (1 << 8) - 1, &mul, &div);
+
+ res = div_u64(fc_divided * mul, div);
+
+ *hs_div = hs_clk_div;
+ *m = mul;
+ *n = div;
+
+ return res;
+}
+
+static unsigned long ub953_calc_clkout_ub971(struct ub953_data *priv,
+ unsigned long target, u64 fc,
+ u8 *m, u8 *n)
+{
+ u64 fc_divided;
+ unsigned long mul, div;
+ unsigned long res;
+
+ /* clkout = fc * m / (8 * n) */
+
+ fc_divided = div_u64(fc, 8);
+
+ rational_best_approximation(target, fc_divided, (1 << 5) - 1,
+ (1 << 8) - 1, &mul, &div);
+
+ res = div_u64(fc_divided * mul, div);
+
+ *m = mul;
+ *n = div;
+
+ return res;
+}
+
+static void ub953_calc_clkout_params(struct ub953_data *priv,
+ unsigned long target_rate,
+ struct ub953_clkout_data *clkout_data)
+{
+ struct device *dev = &priv->client->dev;
+ unsigned long clkout_rate;
+ u64 fc_rate;
+
+ fc_rate = ub953_get_fc_rate(priv);
+
+ if (priv->hw_data->is_ub971) {
+ u8 m, n;
+
+ clkout_rate = ub953_calc_clkout_ub971(priv, target_rate,
+ fc_rate, &m, &n);
+
+ clkout_data->m = m;
+ clkout_data->n = n;
+
+ dev_dbg(dev, "%s %llu * %u / (8 * %u) = %lu (requested %lu)",
+ __func__, fc_rate, m, n, clkout_rate, target_rate);
+ } else {
+ u8 hs_div, m, n;
+
+ clkout_rate = ub953_calc_clkout_ub953(priv, target_rate,
+ fc_rate, &hs_div, &m, &n);
+
+ clkout_data->hs_div = hs_div;
+ clkout_data->m = m;
+ clkout_data->n = n;
+
+ dev_dbg(dev, "%s %llu / %u * %u / %u = %lu (requested %lu)",
+ __func__, fc_rate, hs_div, m, n, clkout_rate,
+ target_rate);
+ }
+
+ clkout_data->rate = clkout_rate;
+}
+
+static void ub953_write_clkout_regs(struct ub953_data *priv,
+ const struct ub953_clkout_data *clkout_data)
+{
+ u8 clkout_ctrl0, clkout_ctrl1;
+
+ if (priv->hw_data->is_ub971)
+ clkout_ctrl0 = clkout_data->m;
+ else
+ clkout_ctrl0 = (__ffs(clkout_data->hs_div) << 5) |
+ clkout_data->m;
+
+ clkout_ctrl1 = clkout_data->n;
+
+ ub953_write(priv, UB953_REG_CLKOUT_CTRL0, clkout_ctrl0);
+ ub953_write(priv, UB953_REG_CLKOUT_CTRL1, clkout_ctrl1);
+}
+
+static unsigned long ub953_clkout_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct ub953_data *priv = container_of(hw, struct ub953_data, clkout_clk_hw);
+ struct device *dev = &priv->client->dev;
+ u8 ctrl0, ctrl1;
+ u32 mul, div;
+ u64 fc_rate;
+ u32 hs_clk_div;
+ u64 rate;
+ int ret;
+
+ ret = ub953_read(priv, UB953_REG_CLKOUT_CTRL0, &ctrl0);
+ if (ret) {
+ dev_err(dev, "Failed to read CLKOUT_CTRL0: %d\n", ret);
+ return 0;
+ }
+
+ ret = ub953_read(priv, UB953_REG_CLKOUT_CTRL1, &ctrl1);
+ if (ret) {
+ dev_err(dev, "Failed to read CLKOUT_CTRL1: %d\n", ret);
+ return 0;
+ }
+
+ fc_rate = ub953_get_fc_rate(priv);
+
+ if (priv->hw_data->is_ub971) {
+ mul = ctrl0 & 0x1f;
+ div = ctrl1;
+
+ if (div == 0)
+ return 0;
+
+ rate = div_u64(fc_rate * mul, 8 * div);
+
+ dev_dbg(dev, "clkout: fc rate %llu, mul %u, div %u = %llu\n",
+ fc_rate, mul, div, rate);
+ } else {
+ mul = ctrl0 & 0x1f;
+ hs_clk_div = 1 << (ctrl0 >> 5);
+ div = ctrl1;
+
+ if (div == 0)
+ return 0;
+
+ rate = div_u64(div_u64(fc_rate, hs_clk_div) * mul, div);
+
+ dev_dbg(dev,
+ "clkout: fc rate %llu, hs_clk_div %u, mul %u, div %u = %llu\n",
+ fc_rate, hs_clk_div, mul, div, rate);
+ }
+
+ return rate;
+}
+
+static long ub953_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct ub953_data *priv = container_of(hw, struct ub953_data, clkout_clk_hw);
+ struct ub953_clkout_data clkout_data;
+
+ ub953_calc_clkout_params(priv, rate, &clkout_data);
+
+ return clkout_data.rate;
+}
+
+static int ub953_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct ub953_data *priv = container_of(hw, struct ub953_data, clkout_clk_hw);
+ struct ub953_clkout_data clkout_data;
+
+ ub953_calc_clkout_params(priv, rate, &clkout_data);
+
+ dev_dbg(&priv->client->dev, "%s %lu (requested %lu)\n", __func__,
+ clkout_data.rate, rate);
+
+ ub953_write_clkout_regs(priv, &clkout_data);
+
+ return 0;
+}
+
+static const struct clk_ops ub953_clkout_ops = {
+ .recalc_rate = ub953_clkout_recalc_rate,
+ .round_rate = ub953_clkout_round_rate,
+ .set_rate = ub953_clkout_set_rate,
+};
+
+static int ub953_register_clkout(struct ub953_data *priv)
+{
+ struct device *dev = &priv->client->dev;
+ const struct clk_init_data init = {
+ .name = kasprintf(GFP_KERNEL, "ds90%s.%s.clk_out",
+ priv->hw_data->model, dev_name(dev)),
+ .ops = &ub953_clkout_ops,
+ };
+ struct ub953_clkout_data clkout_data;
+ int ret;
+
+ if (!init.name)
+ return -ENOMEM;
+
+ /* Initialize clkout to 25MHz by default */
+ ub953_calc_clkout_params(priv, UB953_DEFAULT_CLKOUT_RATE, &clkout_data);
+ ub953_write_clkout_regs(priv, &clkout_data);
+
+ priv->clkout_clk_hw.init = &init;
+
+ ret = devm_clk_hw_register(dev, &priv->clkout_clk_hw);
+ kfree(init.name);
+ if (ret)
+ return dev_err_probe(dev, ret, "Cannot register clock HW\n");
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
+ &priv->clkout_clk_hw);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Cannot add OF clock provider\n");
+
+ return 0;
+}
+
+static int ub953_add_i2c_adapter(struct ub953_data *priv)
+{
+ struct device *dev = &priv->client->dev;
+ struct fwnode_handle *i2c_handle;
+ int ret;
+
+ i2c_handle = device_get_named_child_node(dev, "i2c");
+ if (!i2c_handle)
+ return 0;
+
+ ret = i2c_atr_add_adapter(priv->plat_data->atr, priv->plat_data->port,
+ dev, i2c_handle);
+
+ fwnode_handle_put(i2c_handle);
+
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct regmap_config ub953_regmap_config = {
+ .name = "ds90ub953",
+ .reg_bits = 8,
+ .val_bits = 8,
+ .reg_format_endian = REGMAP_ENDIAN_DEFAULT,
+ .val_format_endian = REGMAP_ENDIAN_DEFAULT,
+};
+
+static int ub953_parse_dt(struct ub953_data *priv)
+{
+ struct device *dev = &priv->client->dev;
+ struct v4l2_fwnode_endpoint vep = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY,
+ };
+ struct fwnode_handle *ep_fwnode;
+ unsigned char nlanes;
+ int ret;
+
+ ep_fwnode = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev),
+ UB953_PAD_SINK, 0, 0);
+ if (!ep_fwnode)
+ return dev_err_probe(dev, -ENOENT, "no endpoint found\n");
+
+ ret = v4l2_fwnode_endpoint_parse(ep_fwnode, &vep);
+
+ fwnode_handle_put(ep_fwnode);
+
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to parse sink endpoint data\n");
+
+ nlanes = vep.bus.mipi_csi2.num_data_lanes;
+ if (nlanes != 1 && nlanes != 2 && nlanes != 4)
+ return dev_err_probe(dev, -EINVAL,
+ "bad number of data-lanes: %u\n", nlanes);
+
+ priv->num_data_lanes = nlanes;
+
+ priv->non_continous_clk = vep.bus.mipi_csi2.flags &
+ V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK;
+
+ return 0;
+}
+
+static int ub953_hw_init(struct ub953_data *priv)
+{
+ struct device *dev = &priv->client->dev;
+ bool mode_override;
+ int ret;
+ u8 v;
+
+ ret = ub953_read(priv, UB953_REG_MODE_SEL, &v);
+ if (ret)
+ return ret;
+
+ if (!(v & UB953_REG_MODE_SEL_MODE_DONE))
+ return dev_err_probe(dev, -EIO, "Mode value not stabilized\n");
+
+ mode_override = v & UB953_REG_MODE_SEL_MODE_OVERRIDE;
+
+ switch (v & UB953_REG_MODE_SEL_MODE_MASK) {
+ case 0:
+ priv->mode = UB953_MODE_SYNC;
+ break;
+ case 2:
+ priv->mode = UB953_MODE_NONSYNC_EXT;
+ break;
+ case 3:
+ priv->mode = UB953_MODE_NONSYNC_INT;
+ break;
+ case 5:
+ priv->mode = UB953_MODE_DVP;
+ break;
+ default:
+ return dev_err_probe(dev, -EIO,
+ "Invalid mode in mode register\n");
+ }
+
+ dev_dbg(dev, "mode from %s: %#x\n", mode_override ? "reg" : "strap",
+ priv->mode);
+
+ if (priv->mode != UB953_MODE_SYNC &&
+ priv->mode != UB953_MODE_NONSYNC_EXT)
+ return dev_err_probe(dev, -ENODEV,
+ "Unsupported mode selected: %u\n",
+ priv->mode);
+
+ if (priv->mode == UB953_MODE_NONSYNC_EXT && !priv->clkin)
+ return dev_err_probe(dev, -EINVAL,
+ "clkin required for non-sync ext mode\n");
+
+ ret = ub953_read(priv, UB953_REG_REV_MASK_ID, &v);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to read revision");
+
+ dev_info(dev, "Found %s rev/mask %#04x\n", priv->hw_data->model, v);
+
+ ret = ub953_read(priv, UB953_REG_GENERAL_CFG, &v);
+ if (ret)
+ return ret;
+
+ dev_dbg(dev, "i2c strap setting %s V\n",
+ (v & UB953_REG_GENERAL_CFG_I2C_STRAP_MODE) ? "1.8" : "3.3");
+
+ ret = ub953_i2c_master_init(priv);
+ if (ret)
+ return dev_err_probe(dev, ret, "i2c init failed\n");
+
+ ub953_write(priv, UB953_REG_GENERAL_CFG,
+ (priv->non_continous_clk ? 0 : UB953_REG_GENERAL_CFG_CONT_CLK) |
+ ((priv->num_data_lanes - 1) << UB953_REG_GENERAL_CFG_CSI_LANE_SEL_SHIFT) |
+ UB953_REG_GENERAL_CFG_CRC_TX_GEN_ENABLE);
+
+ return 0;
+}
+
+static int ub953_subdev_init(struct ub953_data *priv)
+{
+ struct device *dev = &priv->client->dev;
+ int ret;
+
+ v4l2_i2c_subdev_init(&priv->sd, priv->client, &ub953_subdev_ops);
+
+ priv->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
+ V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_STREAMS;
+ priv->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+ priv->sd.entity.ops = &ub953_entity_ops;
+
+ priv->pads[0].flags = MEDIA_PAD_FL_SINK;
+ priv->pads[1].flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&priv->sd.entity, 2, priv->pads);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init pads\n");
+
+ ret = v4l2_subdev_init_finalize(&priv->sd);
+ if (ret)
+ goto err_entity_cleanup;
+
+ ret = ub953_v4l2_notifier_register(priv);
+ if (ret) {
+ dev_err_probe(dev, ret,
+ "v4l2 subdev notifier register failed\n");
+ goto err_free_state;
+ }
+
+ ret = v4l2_async_register_subdev(&priv->sd);
+ if (ret) {
+ dev_err_probe(dev, ret, "v4l2_async_register_subdev error\n");
+ goto err_unreg_notif;
+ }
+
+ return 0;
+
+err_unreg_notif:
+ ub953_v4l2_notifier_unregister(priv);
+err_free_state:
+ v4l2_subdev_cleanup(&priv->sd);
+err_entity_cleanup:
+ media_entity_cleanup(&priv->sd.entity);
+
+ return ret;
+}
+
+static void ub953_subdev_uninit(struct ub953_data *priv)
+{
+ v4l2_async_unregister_subdev(&priv->sd);
+ ub953_v4l2_notifier_unregister(priv);
+ v4l2_subdev_cleanup(&priv->sd);
+ fwnode_handle_put(priv->sd.fwnode);
+ media_entity_cleanup(&priv->sd.entity);
+}
+
+static int ub953_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct ub953_data *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->client = client;
+
+ priv->hw_data = device_get_match_data(dev);
+
+ priv->plat_data = dev_get_platdata(&client->dev);
+ if (!priv->plat_data)
+ return dev_err_probe(dev, -ENODEV, "Platform data missing\n");
+
+ mutex_init(&priv->reg_lock);
+
+ /*
+ * Initialize to invalid values so that the first reg writes will
+ * configure the target.
+ */
+ priv->current_indirect_target = 0xff;
+
+ priv->regmap = devm_regmap_init_i2c(client, &ub953_regmap_config);
+ if (IS_ERR(priv->regmap)) {
+ ret = PTR_ERR(priv->regmap);
+ dev_err_probe(dev, ret, "Failed to init regmap\n");
+ goto err_mutex_destroy;
+ }
+
+ priv->clkin = devm_clk_get_optional(dev, "clkin");
+ if (IS_ERR(priv->clkin)) {
+ ret = PTR_ERR(priv->clkin);
+ dev_err_probe(dev, ret, "failed to parse 'clkin'\n");
+ goto err_mutex_destroy;
+ }
+
+ ret = ub953_parse_dt(priv);
+ if (ret)
+ goto err_mutex_destroy;
+
+ ret = ub953_hw_init(priv);
+ if (ret)
+ goto err_mutex_destroy;
+
+ ret = ub953_gpiochip_probe(priv);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to init gpiochip\n");
+ goto err_mutex_destroy;
+ }
+
+ ret = ub953_register_clkout(priv);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to register clkout\n");
+ goto err_gpiochip_remove;
+ }
+
+ ret = ub953_subdev_init(priv);
+ if (ret)
+ goto err_gpiochip_remove;
+
+ ret = ub953_add_i2c_adapter(priv);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to add remote i2c adapter\n");
+ goto err_subdev_uninit;
+ }
+
+ return 0;
+
+err_subdev_uninit:
+ ub953_subdev_uninit(priv);
+err_gpiochip_remove:
+ ub953_gpiochip_remove(priv);
+err_mutex_destroy:
+ mutex_destroy(&priv->reg_lock);
+
+ return ret;
+}
+
+static void ub953_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ub953_data *priv = sd_to_ub953(sd);
+
+ i2c_atr_del_adapter(priv->plat_data->atr, priv->plat_data->port);
+
+ ub953_subdev_uninit(priv);
+
+ ub953_gpiochip_remove(priv);
+ mutex_destroy(&priv->reg_lock);
+}
+
+static const struct ub953_hw_data ds90ub953_hw = {
+ .model = "ub953",
+};
+
+static const struct ub953_hw_data ds90ub971_hw = {
+ .model = "ub971",
+ .is_ub971 = true,
+};
+
+static const struct i2c_device_id ub953_id[] = {
+ { "ds90ub953-q1", (kernel_ulong_t)&ds90ub953_hw },
+ { "ds90ub971-q1", (kernel_ulong_t)&ds90ub971_hw },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ub953_id);
+
+static const struct of_device_id ub953_dt_ids[] = {
+ { .compatible = "ti,ds90ub953-q1", .data = &ds90ub953_hw },
+ { .compatible = "ti,ds90ub971-q1", .data = &ds90ub971_hw },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ub953_dt_ids);
+
+static struct i2c_driver ds90ub953_driver = {
+ .probe = ub953_probe,
+ .remove = ub953_remove,
+ .id_table = ub953_id,
+ .driver = {
+ .name = "ds90ub953",
+ .of_match_table = ub953_dt_ids,
+ },
+};
+module_i2c_driver(ds90ub953_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Texas Instruments FPD-Link III/IV CSI-2 Serializers Driver");
+MODULE_AUTHOR("Luca Ceresoli <luca@lucaceresoli.net>");
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>");
+MODULE_IMPORT_NS(I2C_ATR);
diff --git a/drivers/media/i2c/ds90ub960.c b/drivers/media/i2c/ds90ub960.c
new file mode 100644
index 000000000000..8ba5750f5a23
--- /dev/null
+++ b/drivers/media/i2c/ds90ub960.c
@@ -0,0 +1,4059 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for the Texas Instruments DS90UB960-Q1 video deserializer
+ *
+ * Copyright (c) 2019 Luca Ceresoli <luca@lucaceresoli.net>
+ * Copyright (c) 2023 Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+ */
+
+/*
+ * (Possible) TODOs:
+ *
+ * - PM for serializer and remote peripherals. We need to manage:
+ * - VPOC
+ * - Power domain? Regulator? Somehow any remote device should be able to
+ * cause the VPOC to be turned on.
+ * - Link between the deserializer and the serializer
+ * - Related to VPOC management. We probably always want to turn on the VPOC
+ * and then enable the link.
+ * - Serializer's services: i2c, gpios, power
+ * - The serializer needs to resume before the remote peripherals can
+ * e.g. use the i2c.
+ * - How to handle gpios? Reserving a gpio essentially keeps the provider
+ * (serializer) always powered on.
+ * - Do we need a new bus for the FPD-Link? At the moment the serializers
+ * are children of the same i2c-adapter where the deserializer resides.
+ * - i2c-atr could be made embeddable instead of allocatable.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/fwnode.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c-atr.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include <media/i2c/ds90ub9xx.h>
+#include <media/mipi-csi2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+
+#define MHZ(v) ((u32)((v) * 1000000U))
+
+#define UB960_POLL_TIME_MS 500
+
+#define UB960_MAX_RX_NPORTS 4
+#define UB960_MAX_TX_NPORTS 2
+#define UB960_MAX_NPORTS (UB960_MAX_RX_NPORTS + UB960_MAX_TX_NPORTS)
+
+#define UB960_MAX_PORT_ALIASES 8
+
+#define UB960_NUM_BC_GPIOS 4
+
+/*
+ * Register map
+ *
+ * 0x00-0x32 Shared (UB960_SR)
+ * 0x33-0x3a CSI-2 TX (per-port paged on DS90UB960, shared on 954) (UB960_TR)
+ * 0x4c Shared (UB960_SR)
+ * 0x4d-0x7f FPD-Link RX, per-port paged (UB960_RR)
+ * 0xb0-0xbf Shared (UB960_SR)
+ * 0xd0-0xdf FPD-Link RX, per-port paged (UB960_RR)
+ * 0xf0-0xf5 Shared (UB960_SR)
+ * 0xf8-0xfb Shared (UB960_SR)
+ * All others Reserved
+ *
+ * Register prefixes:
+ * UB960_SR_* = Shared register
+ * UB960_RR_* = FPD-Link RX, per-port paged register
+ * UB960_TR_* = CSI-2 TX, per-port paged register
+ * UB960_XR_* = Reserved register
+ * UB960_IR_* = Indirect register
+ */
+
+#define UB960_SR_I2C_DEV_ID 0x00
+#define UB960_SR_RESET 0x01
+#define UB960_SR_RESET_DIGITAL_RESET1 BIT(1)
+#define UB960_SR_RESET_DIGITAL_RESET0 BIT(0)
+#define UB960_SR_RESET_GPIO_LOCK_RELEASE BIT(5)
+
+#define UB960_SR_GEN_CONFIG 0x02
+#define UB960_SR_REV_MASK 0x03
+#define UB960_SR_DEVICE_STS 0x04
+#define UB960_SR_PAR_ERR_THOLD_HI 0x05
+#define UB960_SR_PAR_ERR_THOLD_LO 0x06
+#define UB960_SR_BCC_WDOG_CTL 0x07
+#define UB960_SR_I2C_CTL1 0x08
+#define UB960_SR_I2C_CTL2 0x09
+#define UB960_SR_SCL_HIGH_TIME 0x0a
+#define UB960_SR_SCL_LOW_TIME 0x0b
+#define UB960_SR_RX_PORT_CTL 0x0c
+#define UB960_SR_IO_CTL 0x0d
+#define UB960_SR_GPIO_PIN_STS 0x0e
+#define UB960_SR_GPIO_INPUT_CTL 0x0f
+#define UB960_SR_GPIO_PIN_CTL(n) (0x10 + (n)) /* n < UB960_NUM_GPIOS */
+#define UB960_SR_GPIO_PIN_CTL_GPIO_OUT_SEL 5
+#define UB960_SR_GPIO_PIN_CTL_GPIO_OUT_SRC_SHIFT 2
+#define UB960_SR_GPIO_PIN_CTL_GPIO_OUT_EN BIT(0)
+
+#define UB960_SR_FS_CTL 0x18
+#define UB960_SR_FS_HIGH_TIME_1 0x19
+#define UB960_SR_FS_HIGH_TIME_0 0x1a
+#define UB960_SR_FS_LOW_TIME_1 0x1b
+#define UB960_SR_FS_LOW_TIME_0 0x1c
+#define UB960_SR_MAX_FRM_HI 0x1d
+#define UB960_SR_MAX_FRM_LO 0x1e
+#define UB960_SR_CSI_PLL_CTL 0x1f
+
+#define UB960_SR_FWD_CTL1 0x20
+#define UB960_SR_FWD_CTL1_PORT_DIS(n) BIT((n) + 4)
+
+#define UB960_SR_FWD_CTL2 0x21
+#define UB960_SR_FWD_STS 0x22
+
+#define UB960_SR_INTERRUPT_CTL 0x23
+#define UB960_SR_INTERRUPT_CTL_INT_EN BIT(7)
+#define UB960_SR_INTERRUPT_CTL_IE_CSI_TX0 BIT(4)
+#define UB960_SR_INTERRUPT_CTL_IE_RX(n) BIT((n)) /* rxport[n] IRQ */
+
+#define UB960_SR_INTERRUPT_STS 0x24
+#define UB960_SR_INTERRUPT_STS_INT BIT(7)
+#define UB960_SR_INTERRUPT_STS_IS_CSI_TX(n) BIT(4 + (n)) /* txport[n] IRQ */
+#define UB960_SR_INTERRUPT_STS_IS_RX(n) BIT((n)) /* rxport[n] IRQ */
+
+#define UB960_SR_TS_CONFIG 0x25
+#define UB960_SR_TS_CONTROL 0x26
+#define UB960_SR_TS_LINE_HI 0x27
+#define UB960_SR_TS_LINE_LO 0x28
+#define UB960_SR_TS_STATUS 0x29
+#define UB960_SR_TIMESTAMP_P0_HI 0x2a
+#define UB960_SR_TIMESTAMP_P0_LO 0x2b
+#define UB960_SR_TIMESTAMP_P1_HI 0x2c
+#define UB960_SR_TIMESTAMP_P1_LO 0x2d
+
+#define UB960_SR_CSI_PORT_SEL 0x32
+
+#define UB960_TR_CSI_CTL 0x33
+#define UB960_TR_CSI_CTL_CSI_CAL_EN BIT(6)
+#define UB960_TR_CSI_CTL_CSI_CONTS_CLOCK BIT(1)
+#define UB960_TR_CSI_CTL_CSI_ENABLE BIT(0)
+
+#define UB960_TR_CSI_CTL2 0x34
+#define UB960_TR_CSI_STS 0x35
+#define UB960_TR_CSI_TX_ICR 0x36
+
+#define UB960_TR_CSI_TX_ISR 0x37
+#define UB960_TR_CSI_TX_ISR_IS_CSI_SYNC_ERROR BIT(3)
+#define UB960_TR_CSI_TX_ISR_IS_CSI_PASS_ERROR BIT(1)
+
+#define UB960_TR_CSI_TEST_CTL 0x38
+#define UB960_TR_CSI_TEST_PATT_HI 0x39
+#define UB960_TR_CSI_TEST_PATT_LO 0x3a
+
+#define UB960_XR_SFILTER_CFG 0x41
+#define UB960_XR_SFILTER_CFG_SFILTER_MAX_SHIFT 4
+#define UB960_XR_SFILTER_CFG_SFILTER_MIN_SHIFT 0
+
+#define UB960_XR_AEQ_CTL1 0x42
+#define UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_FPD_CLK BIT(6)
+#define UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_ENCODING BIT(5)
+#define UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_PARITY BIT(4)
+#define UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_MASK \
+ (UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_FPD_CLK | \
+ UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_ENCODING | \
+ UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_PARITY)
+#define UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN BIT(0)
+
+#define UB960_XR_AEQ_ERR_THOLD 0x43
+
+#define UB960_RR_BCC_ERR_CTL 0x46
+#define UB960_RR_BCC_STATUS 0x47
+#define UB960_RR_BCC_STATUS_SEQ_ERROR BIT(5)
+#define UB960_RR_BCC_STATUS_MASTER_ERR BIT(4)
+#define UB960_RR_BCC_STATUS_MASTER_TO BIT(3)
+#define UB960_RR_BCC_STATUS_SLAVE_ERR BIT(2)
+#define UB960_RR_BCC_STATUS_SLAVE_TO BIT(1)
+#define UB960_RR_BCC_STATUS_RESP_ERR BIT(0)
+#define UB960_RR_BCC_STATUS_ERROR_MASK \
+ (UB960_RR_BCC_STATUS_SEQ_ERROR | UB960_RR_BCC_STATUS_MASTER_ERR | \
+ UB960_RR_BCC_STATUS_MASTER_TO | UB960_RR_BCC_STATUS_SLAVE_ERR | \
+ UB960_RR_BCC_STATUS_SLAVE_TO | UB960_RR_BCC_STATUS_RESP_ERR)
+
+#define UB960_RR_FPD3_CAP 0x4a
+#define UB960_RR_RAW_EMBED_DTYPE 0x4b
+#define UB960_RR_RAW_EMBED_DTYPE_LINES_SHIFT 6
+
+#define UB960_SR_FPD3_PORT_SEL 0x4c
+
+#define UB960_RR_RX_PORT_STS1 0x4d
+#define UB960_RR_RX_PORT_STS1_BCC_CRC_ERROR BIT(5)
+#define UB960_RR_RX_PORT_STS1_LOCK_STS_CHG BIT(4)
+#define UB960_RR_RX_PORT_STS1_BCC_SEQ_ERROR BIT(3)
+#define UB960_RR_RX_PORT_STS1_PARITY_ERROR BIT(2)
+#define UB960_RR_RX_PORT_STS1_PORT_PASS BIT(1)
+#define UB960_RR_RX_PORT_STS1_LOCK_STS BIT(0)
+#define UB960_RR_RX_PORT_STS1_ERROR_MASK \
+ (UB960_RR_RX_PORT_STS1_BCC_CRC_ERROR | \
+ UB960_RR_RX_PORT_STS1_BCC_SEQ_ERROR | \
+ UB960_RR_RX_PORT_STS1_PARITY_ERROR)
+
+#define UB960_RR_RX_PORT_STS2 0x4e
+#define UB960_RR_RX_PORT_STS2_LINE_LEN_UNSTABLE BIT(7)
+#define UB960_RR_RX_PORT_STS2_LINE_LEN_CHG BIT(6)
+#define UB960_RR_RX_PORT_STS2_FPD3_ENCODE_ERROR BIT(5)
+#define UB960_RR_RX_PORT_STS2_BUFFER_ERROR BIT(4)
+#define UB960_RR_RX_PORT_STS2_CSI_ERROR BIT(3)
+#define UB960_RR_RX_PORT_STS2_FREQ_STABLE BIT(2)
+#define UB960_RR_RX_PORT_STS2_CABLE_FAULT BIT(1)
+#define UB960_RR_RX_PORT_STS2_LINE_CNT_CHG BIT(0)
+#define UB960_RR_RX_PORT_STS2_ERROR_MASK \
+ UB960_RR_RX_PORT_STS2_BUFFER_ERROR
+
+#define UB960_RR_RX_FREQ_HIGH 0x4f
+#define UB960_RR_RX_FREQ_LOW 0x50
+#define UB960_RR_SENSOR_STS_0 0x51
+#define UB960_RR_SENSOR_STS_1 0x52
+#define UB960_RR_SENSOR_STS_2 0x53
+#define UB960_RR_SENSOR_STS_3 0x54
+#define UB960_RR_RX_PAR_ERR_HI 0x55
+#define UB960_RR_RX_PAR_ERR_LO 0x56
+#define UB960_RR_BIST_ERR_COUNT 0x57
+
+#define UB960_RR_BCC_CONFIG 0x58
+#define UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH BIT(6)
+#define UB960_RR_BCC_CONFIG_BC_FREQ_SEL_MASK GENMASK(2, 0)
+
+#define UB960_RR_DATAPATH_CTL1 0x59
+#define UB960_RR_DATAPATH_CTL2 0x5a
+#define UB960_RR_SER_ID 0x5b
+#define UB960_RR_SER_ALIAS_ID 0x5c
+
+/* For these two register sets: n < UB960_MAX_PORT_ALIASES */
+#define UB960_RR_SLAVE_ID(n) (0x5d + (n))
+#define UB960_RR_SLAVE_ALIAS(n) (0x65 + (n))
+
+#define UB960_RR_PORT_CONFIG 0x6d
+#define UB960_RR_PORT_CONFIG_FPD3_MODE_MASK GENMASK(1, 0)
+
+#define UB960_RR_BC_GPIO_CTL(n) (0x6e + (n)) /* n < 2 */
+#define UB960_RR_RAW10_ID 0x70
+#define UB960_RR_RAW10_ID_VC_SHIFT 6
+#define UB960_RR_RAW10_ID_DT_SHIFT 0
+
+#define UB960_RR_RAW12_ID 0x71
+#define UB960_RR_CSI_VC_MAP 0x72
+#define UB960_RR_CSI_VC_MAP_SHIFT(x) ((x) * 2)
+
+#define UB960_RR_LINE_COUNT_HI 0x73
+#define UB960_RR_LINE_COUNT_LO 0x74
+#define UB960_RR_LINE_LEN_1 0x75
+#define UB960_RR_LINE_LEN_0 0x76
+#define UB960_RR_FREQ_DET_CTL 0x77
+#define UB960_RR_MAILBOX_1 0x78
+#define UB960_RR_MAILBOX_2 0x79
+
+#define UB960_RR_CSI_RX_STS 0x7a
+#define UB960_RR_CSI_RX_STS_LENGTH_ERR BIT(3)
+#define UB960_RR_CSI_RX_STS_CKSUM_ERR BIT(2)
+#define UB960_RR_CSI_RX_STS_ECC2_ERR BIT(1)
+#define UB960_RR_CSI_RX_STS_ECC1_ERR BIT(0)
+#define UB960_RR_CSI_RX_STS_ERROR_MASK \
+ (UB960_RR_CSI_RX_STS_LENGTH_ERR | UB960_RR_CSI_RX_STS_CKSUM_ERR | \
+ UB960_RR_CSI_RX_STS_ECC2_ERR | UB960_RR_CSI_RX_STS_ECC1_ERR)
+
+#define UB960_RR_CSI_ERR_COUNTER 0x7b
+#define UB960_RR_PORT_CONFIG2 0x7c
+#define UB960_RR_PORT_CONFIG2_RAW10_8BIT_CTL_MASK GENMASK(7, 6)
+#define UB960_RR_PORT_CONFIG2_RAW10_8BIT_CTL_SHIFT 6
+
+#define UB960_RR_PORT_CONFIG2_LV_POL_LOW BIT(1)
+#define UB960_RR_PORT_CONFIG2_FV_POL_LOW BIT(0)
+
+#define UB960_RR_PORT_PASS_CTL 0x7d
+#define UB960_RR_SEN_INT_RISE_CTL 0x7e
+#define UB960_RR_SEN_INT_FALL_CTL 0x7f
+
+#define UB960_SR_CSI_FRAME_COUNT_HI(n) (0x90 + 8 * (n))
+#define UB960_SR_CSI_FRAME_COUNT_LO(n) (0x91 + 8 * (n))
+#define UB960_SR_CSI_FRAME_ERR_COUNT_HI(n) (0x92 + 8 * (n))
+#define UB960_SR_CSI_FRAME_ERR_COUNT_LO(n) (0x93 + 8 * (n))
+#define UB960_SR_CSI_LINE_COUNT_HI(n) (0x94 + 8 * (n))
+#define UB960_SR_CSI_LINE_COUNT_LO(n) (0x95 + 8 * (n))
+#define UB960_SR_CSI_LINE_ERR_COUNT_HI(n) (0x96 + 8 * (n))
+#define UB960_SR_CSI_LINE_ERR_COUNT_LO(n) (0x97 + 8 * (n))
+
+#define UB960_XR_REFCLK_FREQ 0xa5 /* UB960 */
+
+#define UB960_RR_VC_ID_MAP(x) (0xa0 + (x)) /* UB9702 */
+
+#define UB960_SR_IND_ACC_CTL 0xb0
+#define UB960_SR_IND_ACC_CTL_IA_AUTO_INC BIT(1)
+
+#define UB960_SR_IND_ACC_ADDR 0xb1
+#define UB960_SR_IND_ACC_DATA 0xb2
+#define UB960_SR_BIST_CONTROL 0xb3
+#define UB960_SR_MODE_IDX_STS 0xb8
+#define UB960_SR_LINK_ERROR_COUNT 0xb9
+#define UB960_SR_FPD3_ENC_CTL 0xba
+#define UB960_SR_FV_MIN_TIME 0xbc
+#define UB960_SR_GPIO_PD_CTL 0xbe
+
+#define UB960_SR_FPD_RATE_CFG 0xc2 /* UB9702 */
+#define UB960_SR_CSI_PLL_DIV 0xc9 /* UB9702 */
+
+#define UB960_RR_PORT_DEBUG 0xd0
+#define UB960_RR_AEQ_CTL2 0xd2
+#define UB960_RR_AEQ_CTL2_SET_AEQ_FLOOR BIT(2)
+
+#define UB960_RR_AEQ_STATUS 0xd3
+#define UB960_RR_AEQ_STATUS_STATUS_2 GENMASK(5, 3)
+#define UB960_RR_AEQ_STATUS_STATUS_1 GENMASK(2, 0)
+
+#define UB960_RR_AEQ_BYPASS 0xd4
+#define UB960_RR_AEQ_BYPASS_EQ_STAGE1_VALUE_SHIFT 5
+#define UB960_RR_AEQ_BYPASS_EQ_STAGE1_VALUE_MASK GENMASK(7, 5)
+#define UB960_RR_AEQ_BYPASS_EQ_STAGE2_VALUE_SHIFT 1
+#define UB960_RR_AEQ_BYPASS_EQ_STAGE2_VALUE_MASK GENMASK(3, 1)
+#define UB960_RR_AEQ_BYPASS_ENABLE BIT(0)
+
+#define UB960_RR_AEQ_MIN_MAX 0xd5
+#define UB960_RR_AEQ_MIN_MAX_AEQ_MAX_SHIFT 4
+#define UB960_RR_AEQ_MIN_MAX_AEQ_FLOOR_SHIFT 0
+
+#define UB960_RR_SFILTER_STS_0 0xd6
+#define UB960_RR_SFILTER_STS_1 0xd7
+#define UB960_RR_PORT_ICR_HI 0xd8
+#define UB960_RR_PORT_ICR_LO 0xd9
+#define UB960_RR_PORT_ISR_HI 0xda
+#define UB960_RR_PORT_ISR_LO 0xdb
+#define UB960_RR_FC_GPIO_STS 0xdc
+#define UB960_RR_FC_GPIO_ICR 0xdd
+#define UB960_RR_SEN_INT_RISE_STS 0xde
+#define UB960_RR_SEN_INT_FALL_STS 0xdf
+
+#define UB960_RR_CHANNEL_MODE 0xe4 /* UB9702 */
+
+#define UB960_SR_FPD3_RX_ID(n) (0xf0 + (n))
+#define UB960_SR_FPD3_RX_ID_LEN 6
+
+#define UB960_SR_I2C_RX_ID(n) (0xf8 + (n)) /* < UB960_FPD_RX_NPORTS */
+
+/* Indirect register blocks */
+#define UB960_IND_TARGET_PAT_GEN 0x00
+#define UB960_IND_TARGET_RX_ANA(n) (0x01 + (n))
+#define UB960_IND_TARGET_CSI_CSIPLL_REG_1 0x92 /* UB9702 */
+#define UB960_IND_TARGET_CSI_ANA 0x07
+
+/* UB960_IR_PGEN_*: Indirect Registers for Test Pattern Generator */
+
+#define UB960_IR_PGEN_CTL 0x01
+#define UB960_IR_PGEN_CTL_PGEN_ENABLE BIT(0)
+
+#define UB960_IR_PGEN_CFG 0x02
+#define UB960_IR_PGEN_CSI_DI 0x03
+#define UB960_IR_PGEN_LINE_SIZE1 0x04
+#define UB960_IR_PGEN_LINE_SIZE0 0x05
+#define UB960_IR_PGEN_BAR_SIZE1 0x06
+#define UB960_IR_PGEN_BAR_SIZE0 0x07
+#define UB960_IR_PGEN_ACT_LPF1 0x08
+#define UB960_IR_PGEN_ACT_LPF0 0x09
+#define UB960_IR_PGEN_TOT_LPF1 0x0a
+#define UB960_IR_PGEN_TOT_LPF0 0x0b
+#define UB960_IR_PGEN_LINE_PD1 0x0c
+#define UB960_IR_PGEN_LINE_PD0 0x0d
+#define UB960_IR_PGEN_VBP 0x0e
+#define UB960_IR_PGEN_VFP 0x0f
+#define UB960_IR_PGEN_COLOR(n) (0x10 + (n)) /* n < 15 */
+
+#define UB960_IR_RX_ANA_STROBE_SET_CLK 0x08
+#define UB960_IR_RX_ANA_STROBE_SET_CLK_NO_EXTRA_DELAY BIT(3)
+#define UB960_IR_RX_ANA_STROBE_SET_CLK_DELAY_MASK GENMASK(2, 0)
+
+#define UB960_IR_RX_ANA_STROBE_SET_DATA 0x09
+#define UB960_IR_RX_ANA_STROBE_SET_DATA_NO_EXTRA_DELAY BIT(3)
+#define UB960_IR_RX_ANA_STROBE_SET_DATA_DELAY_MASK GENMASK(2, 0)
+
+/* EQ related */
+
+#define UB960_MIN_AEQ_STROBE_POS -7
+#define UB960_MAX_AEQ_STROBE_POS 7
+
+#define UB960_MANUAL_STROBE_EXTRA_DELAY 6
+
+#define UB960_MIN_MANUAL_STROBE_POS -(7 + UB960_MANUAL_STROBE_EXTRA_DELAY)
+#define UB960_MAX_MANUAL_STROBE_POS (7 + UB960_MANUAL_STROBE_EXTRA_DELAY)
+#define UB960_NUM_MANUAL_STROBE_POS (UB960_MAX_MANUAL_STROBE_POS - UB960_MIN_MANUAL_STROBE_POS + 1)
+
+#define UB960_MIN_EQ_LEVEL 0
+#define UB960_MAX_EQ_LEVEL 14
+#define UB960_NUM_EQ_LEVELS (UB960_MAX_EQ_LEVEL - UB960_MIN_EQ_LEVEL + 1)
+
+struct ub960_hw_data {
+ const char *model;
+ u8 num_rxports;
+ u8 num_txports;
+ bool is_ub9702;
+ bool is_fpdlink4;
+};
+
+enum ub960_rxport_mode {
+ RXPORT_MODE_RAW10 = 0,
+ RXPORT_MODE_RAW12_HF = 1,
+ RXPORT_MODE_RAW12_LF = 2,
+ RXPORT_MODE_CSI2_SYNC = 3,
+ RXPORT_MODE_CSI2_NONSYNC = 4,
+ RXPORT_MODE_LAST = RXPORT_MODE_CSI2_NONSYNC,
+};
+
+enum ub960_rxport_cdr {
+ RXPORT_CDR_FPD3 = 0,
+ RXPORT_CDR_FPD4 = 1,
+ RXPORT_CDR_LAST = RXPORT_CDR_FPD4,
+};
+
+struct ub960_rxport {
+ struct ub960_data *priv;
+ u8 nport; /* RX port number, and index in priv->rxport[] */
+
+ struct {
+ struct v4l2_subdev *sd;
+ u16 pad;
+ struct fwnode_handle *ep_fwnode;
+ } source;
+
+ /* Serializer */
+ struct {
+ struct fwnode_handle *fwnode;
+ struct i2c_client *client;
+ unsigned short alias; /* I2C alias (lower 7 bits) */
+ struct ds90ub9xx_platform_data pdata;
+ } ser;
+
+ enum ub960_rxport_mode rx_mode;
+ enum ub960_rxport_cdr cdr_mode;
+
+ u8 lv_fv_pol; /* LV and FV polarities */
+
+ struct regulator *vpoc;
+
+ /* EQ settings */
+ struct {
+ bool manual_eq;
+
+ s8 strobe_pos;
+
+ union {
+ struct {
+ u8 eq_level_min;
+ u8 eq_level_max;
+ } aeq;
+
+ struct {
+ u8 eq_level;
+ } manual;
+ };
+ } eq;
+
+ const struct i2c_client *aliased_clients[UB960_MAX_PORT_ALIASES];
+};
+
+struct ub960_asd {
+ struct v4l2_async_connection base;
+ struct ub960_rxport *rxport;
+};
+
+static inline struct ub960_asd *to_ub960_asd(struct v4l2_async_connection *asd)
+{
+ return container_of(asd, struct ub960_asd, base);
+}
+
+struct ub960_txport {
+ struct ub960_data *priv;
+ u8 nport; /* TX port number, and index in priv->txport[] */
+
+ u32 num_data_lanes;
+ bool non_continous_clk;
+};
+
+struct ub960_data {
+ const struct ub960_hw_data *hw_data;
+ struct i2c_client *client; /* for shared local registers */
+ struct regmap *regmap;
+
+ /* lock for register access */
+ struct mutex reg_lock;
+
+ struct clk *refclk;
+
+ struct regulator *vddio;
+
+ struct gpio_desc *pd_gpio;
+ struct delayed_work poll_work;
+ struct ub960_rxport *rxports[UB960_MAX_RX_NPORTS];
+ struct ub960_txport *txports[UB960_MAX_TX_NPORTS];
+
+ struct v4l2_subdev sd;
+ struct media_pad pads[UB960_MAX_NPORTS];
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_async_notifier notifier;
+
+ u32 tx_data_rate; /* Nominal data rate (Gb/s) */
+ s64 tx_link_freq[1];
+
+ struct i2c_atr *atr;
+
+ struct {
+ u8 rxport;
+ u8 txport;
+ u8 indirect_target;
+ } reg_current;
+
+ bool streaming;
+
+ u8 stored_fwd_ctl;
+
+ u64 stream_enable_mask[UB960_MAX_NPORTS];
+
+ /* These are common to all ports */
+ struct {
+ bool manual;
+
+ s8 min;
+ s8 max;
+ } strobe;
+};
+
+static inline struct ub960_data *sd_to_ub960(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct ub960_data, sd);
+}
+
+static inline bool ub960_pad_is_sink(struct ub960_data *priv, u32 pad)
+{
+ return pad < priv->hw_data->num_rxports;
+}
+
+static inline bool ub960_pad_is_source(struct ub960_data *priv, u32 pad)
+{
+ return pad >= priv->hw_data->num_rxports;
+}
+
+static inline unsigned int ub960_pad_to_port(struct ub960_data *priv, u32 pad)
+{
+ if (ub960_pad_is_sink(priv, pad))
+ return pad;
+ else
+ return pad - priv->hw_data->num_rxports;
+}
+
+struct ub960_format_info {
+ u32 code;
+ u32 bpp;
+ u8 datatype;
+ bool meta;
+};
+
+static const struct ub960_format_info ub960_formats[] = {
+ { .code = MEDIA_BUS_FMT_YUYV8_1X16, .bpp = 16, .datatype = MIPI_CSI2_DT_YUV422_8B, },
+ { .code = MEDIA_BUS_FMT_UYVY8_1X16, .bpp = 16, .datatype = MIPI_CSI2_DT_YUV422_8B, },
+ { .code = MEDIA_BUS_FMT_VYUY8_1X16, .bpp = 16, .datatype = MIPI_CSI2_DT_YUV422_8B, },
+ { .code = MEDIA_BUS_FMT_YVYU8_1X16, .bpp = 16, .datatype = MIPI_CSI2_DT_YUV422_8B, },
+
+ { .code = MEDIA_BUS_FMT_SBGGR12_1X12, .bpp = 12, .datatype = MIPI_CSI2_DT_RAW12, },
+ { .code = MEDIA_BUS_FMT_SGBRG12_1X12, .bpp = 12, .datatype = MIPI_CSI2_DT_RAW12, },
+ { .code = MEDIA_BUS_FMT_SGRBG12_1X12, .bpp = 12, .datatype = MIPI_CSI2_DT_RAW12, },
+ { .code = MEDIA_BUS_FMT_SRGGB12_1X12, .bpp = 12, .datatype = MIPI_CSI2_DT_RAW12, },
+};
+
+static const struct ub960_format_info *ub960_find_format(u32 code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(ub960_formats); i++) {
+ if (ub960_formats[i].code == code)
+ return &ub960_formats[i];
+ }
+
+ return NULL;
+}
+
+/* -----------------------------------------------------------------------------
+ * Basic device access
+ */
+
+static int ub960_read(struct ub960_data *priv, u8 reg, u8 *val)
+{
+ struct device *dev = &priv->client->dev;
+ unsigned int v;
+ int ret;
+
+ mutex_lock(&priv->reg_lock);
+
+ ret = regmap_read(priv->regmap, reg, &v);
+ if (ret) {
+ dev_err(dev, "%s: cannot read register 0x%02x (%d)!\n",
+ __func__, reg, ret);
+ goto out_unlock;
+ }
+
+ *val = v;
+
+out_unlock:
+ mutex_unlock(&priv->reg_lock);
+
+ return ret;
+}
+
+static int ub960_write(struct ub960_data *priv, u8 reg, u8 val)
+{
+ struct device *dev = &priv->client->dev;
+ int ret;
+
+ mutex_lock(&priv->reg_lock);
+
+ ret = regmap_write(priv->regmap, reg, val);
+ if (ret)
+ dev_err(dev, "%s: cannot write register 0x%02x (%d)!\n",
+ __func__, reg, ret);
+
+ mutex_unlock(&priv->reg_lock);
+
+ return ret;
+}
+
+static int ub960_update_bits(struct ub960_data *priv, u8 reg, u8 mask, u8 val)
+{
+ struct device *dev = &priv->client->dev;
+ int ret;
+
+ mutex_lock(&priv->reg_lock);
+
+ ret = regmap_update_bits(priv->regmap, reg, mask, val);
+ if (ret)
+ dev_err(dev, "%s: cannot update register 0x%02x (%d)!\n",
+ __func__, reg, ret);
+
+ mutex_unlock(&priv->reg_lock);
+
+ return ret;
+}
+
+static int ub960_read16(struct ub960_data *priv, u8 reg, u16 *val)
+{
+ struct device *dev = &priv->client->dev;
+ __be16 __v;
+ int ret;
+
+ mutex_lock(&priv->reg_lock);
+
+ ret = regmap_bulk_read(priv->regmap, reg, &__v, sizeof(__v));
+ if (ret) {
+ dev_err(dev, "%s: cannot read register 0x%02x (%d)!\n",
+ __func__, reg, ret);
+ goto out_unlock;
+ }
+
+ *val = be16_to_cpu(__v);
+
+out_unlock:
+ mutex_unlock(&priv->reg_lock);
+
+ return ret;
+}
+
+static int ub960_rxport_select(struct ub960_data *priv, u8 nport)
+{
+ struct device *dev = &priv->client->dev;
+ int ret;
+
+ lockdep_assert_held(&priv->reg_lock);
+
+ if (priv->reg_current.rxport == nport)
+ return 0;
+
+ ret = regmap_write(priv->regmap, UB960_SR_FPD3_PORT_SEL,
+ (nport << 4) | BIT(nport));
+ if (ret) {
+ dev_err(dev, "%s: cannot select rxport %d (%d)!\n", __func__,
+ nport, ret);
+ return ret;
+ }
+
+ priv->reg_current.rxport = nport;
+
+ return 0;
+}
+
+static int ub960_rxport_read(struct ub960_data *priv, u8 nport, u8 reg, u8 *val)
+{
+ struct device *dev = &priv->client->dev;
+ unsigned int v;
+ int ret;
+
+ mutex_lock(&priv->reg_lock);
+
+ ret = ub960_rxport_select(priv, nport);
+ if (ret)
+ goto out_unlock;
+
+ ret = regmap_read(priv->regmap, reg, &v);
+ if (ret) {
+ dev_err(dev, "%s: cannot read register 0x%02x (%d)!\n",
+ __func__, reg, ret);
+ goto out_unlock;
+ }
+
+ *val = v;
+
+out_unlock:
+ mutex_unlock(&priv->reg_lock);
+
+ return ret;
+}
+
+static int ub960_rxport_write(struct ub960_data *priv, u8 nport, u8 reg, u8 val)
+{
+ struct device *dev = &priv->client->dev;
+ int ret;
+
+ mutex_lock(&priv->reg_lock);
+
+ ret = ub960_rxport_select(priv, nport);
+ if (ret)
+ goto out_unlock;
+
+ ret = regmap_write(priv->regmap, reg, val);
+ if (ret)
+ dev_err(dev, "%s: cannot write register 0x%02x (%d)!\n",
+ __func__, reg, ret);
+
+out_unlock:
+ mutex_unlock(&priv->reg_lock);
+
+ return ret;
+}
+
+static int ub960_rxport_update_bits(struct ub960_data *priv, u8 nport, u8 reg,
+ u8 mask, u8 val)
+{
+ struct device *dev = &priv->client->dev;
+ int ret;
+
+ mutex_lock(&priv->reg_lock);
+
+ ret = ub960_rxport_select(priv, nport);
+ if (ret)
+ goto out_unlock;
+
+ ret = regmap_update_bits(priv->regmap, reg, mask, val);
+ if (ret)
+ dev_err(dev, "%s: cannot update register 0x%02x (%d)!\n",
+ __func__, reg, ret);
+
+out_unlock:
+ mutex_unlock(&priv->reg_lock);
+
+ return ret;
+}
+
+static int ub960_rxport_read16(struct ub960_data *priv, u8 nport, u8 reg,
+ u16 *val)
+{
+ struct device *dev = &priv->client->dev;
+ __be16 __v;
+ int ret;
+
+ mutex_lock(&priv->reg_lock);
+
+ ret = ub960_rxport_select(priv, nport);
+ if (ret)
+ goto out_unlock;
+
+ ret = regmap_bulk_read(priv->regmap, reg, &__v, sizeof(__v));
+ if (ret) {
+ dev_err(dev, "%s: cannot read register 0x%02x (%d)!\n",
+ __func__, reg, ret);
+ goto out_unlock;
+ }
+
+ *val = be16_to_cpu(__v);
+
+out_unlock:
+ mutex_unlock(&priv->reg_lock);
+
+ return ret;
+}
+
+static int ub960_txport_select(struct ub960_data *priv, u8 nport)
+{
+ struct device *dev = &priv->client->dev;
+ int ret;
+
+ lockdep_assert_held(&priv->reg_lock);
+
+ if (priv->reg_current.txport == nport)
+ return 0;
+
+ ret = regmap_write(priv->regmap, UB960_SR_CSI_PORT_SEL,
+ (nport << 4) | BIT(nport));
+ if (ret) {
+ dev_err(dev, "%s: cannot select tx port %d (%d)!\n", __func__,
+ nport, ret);
+ return ret;
+ }
+
+ priv->reg_current.txport = nport;
+
+ return 0;
+}
+
+static int ub960_txport_read(struct ub960_data *priv, u8 nport, u8 reg, u8 *val)
+{
+ struct device *dev = &priv->client->dev;
+ unsigned int v;
+ int ret;
+
+ mutex_lock(&priv->reg_lock);
+
+ ret = ub960_txport_select(priv, nport);
+ if (ret)
+ goto out_unlock;
+
+ ret = regmap_read(priv->regmap, reg, &v);
+ if (ret) {
+ dev_err(dev, "%s: cannot read register 0x%02x (%d)!\n",
+ __func__, reg, ret);
+ goto out_unlock;
+ }
+
+ *val = v;
+
+out_unlock:
+ mutex_unlock(&priv->reg_lock);
+
+ return ret;
+}
+
+static int ub960_txport_write(struct ub960_data *priv, u8 nport, u8 reg, u8 val)
+{
+ struct device *dev = &priv->client->dev;
+ int ret;
+
+ mutex_lock(&priv->reg_lock);
+
+ ret = ub960_txport_select(priv, nport);
+ if (ret)
+ goto out_unlock;
+
+ ret = regmap_write(priv->regmap, reg, val);
+ if (ret)
+ dev_err(dev, "%s: cannot write register 0x%02x (%d)!\n",
+ __func__, reg, ret);
+
+out_unlock:
+ mutex_unlock(&priv->reg_lock);
+
+ return ret;
+}
+
+static int ub960_txport_update_bits(struct ub960_data *priv, u8 nport, u8 reg,
+ u8 mask, u8 val)
+{
+ struct device *dev = &priv->client->dev;
+ int ret;
+
+ mutex_lock(&priv->reg_lock);
+
+ ret = ub960_txport_select(priv, nport);
+ if (ret)
+ goto out_unlock;
+
+ ret = regmap_update_bits(priv->regmap, reg, mask, val);
+ if (ret)
+ dev_err(dev, "%s: cannot update register 0x%02x (%d)!\n",
+ __func__, reg, ret);
+
+out_unlock:
+ mutex_unlock(&priv->reg_lock);
+
+ return ret;
+}
+
+static int ub960_select_ind_reg_block(struct ub960_data *priv, u8 block)
+{
+ struct device *dev = &priv->client->dev;
+ int ret;
+
+ lockdep_assert_held(&priv->reg_lock);
+
+ if (priv->reg_current.indirect_target == block)
+ return 0;
+
+ ret = regmap_write(priv->regmap, UB960_SR_IND_ACC_CTL, block << 2);
+ if (ret) {
+ dev_err(dev, "%s: cannot select indirect target %u (%d)!\n",
+ __func__, block, ret);
+ return ret;
+ }
+
+ priv->reg_current.indirect_target = block;
+
+ return 0;
+}
+
+static int ub960_read_ind(struct ub960_data *priv, u8 block, u8 reg, u8 *val)
+{
+ struct device *dev = &priv->client->dev;
+ unsigned int v;
+ int ret;
+
+ mutex_lock(&priv->reg_lock);
+
+ ret = ub960_select_ind_reg_block(priv, block);
+ if (ret)
+ goto out_unlock;
+
+ ret = regmap_write(priv->regmap, UB960_SR_IND_ACC_ADDR, reg);
+ if (ret) {
+ dev_err(dev,
+ "Write to IND_ACC_ADDR failed when reading %u:%x02x: %d\n",
+ block, reg, ret);
+ goto out_unlock;
+ }
+
+ ret = regmap_read(priv->regmap, UB960_SR_IND_ACC_DATA, &v);
+ if (ret) {
+ dev_err(dev,
+ "Write to IND_ACC_DATA failed when reading %u:%x02x: %d\n",
+ block, reg, ret);
+ goto out_unlock;
+ }
+
+ *val = v;
+
+out_unlock:
+ mutex_unlock(&priv->reg_lock);
+
+ return ret;
+}
+
+static int ub960_write_ind(struct ub960_data *priv, u8 block, u8 reg, u8 val)
+{
+ struct device *dev = &priv->client->dev;
+ int ret;
+
+ mutex_lock(&priv->reg_lock);
+
+ ret = ub960_select_ind_reg_block(priv, block);
+ if (ret)
+ goto out_unlock;
+
+ ret = regmap_write(priv->regmap, UB960_SR_IND_ACC_ADDR, reg);
+ if (ret) {
+ dev_err(dev,
+ "Write to IND_ACC_ADDR failed when writing %u:%x02x: %d\n",
+ block, reg, ret);
+ goto out_unlock;
+ }
+
+ ret = regmap_write(priv->regmap, UB960_SR_IND_ACC_DATA, val);
+ if (ret) {
+ dev_err(dev,
+ "Write to IND_ACC_DATA failed when writing %u:%x02x: %d\n",
+ block, reg, ret);
+ goto out_unlock;
+ }
+
+out_unlock:
+ mutex_unlock(&priv->reg_lock);
+
+ return ret;
+}
+
+static int ub960_ind_update_bits(struct ub960_data *priv, u8 block, u8 reg,
+ u8 mask, u8 val)
+{
+ struct device *dev = &priv->client->dev;
+ int ret;
+
+ mutex_lock(&priv->reg_lock);
+
+ ret = ub960_select_ind_reg_block(priv, block);
+ if (ret)
+ goto out_unlock;
+
+ ret = regmap_write(priv->regmap, UB960_SR_IND_ACC_ADDR, reg);
+ if (ret) {
+ dev_err(dev,
+ "Write to IND_ACC_ADDR failed when updating %u:%x02x: %d\n",
+ block, reg, ret);
+ goto out_unlock;
+ }
+
+ ret = regmap_update_bits(priv->regmap, UB960_SR_IND_ACC_DATA, mask,
+ val);
+ if (ret) {
+ dev_err(dev,
+ "Write to IND_ACC_DATA failed when updating %u:%x02x: %d\n",
+ block, reg, ret);
+ goto out_unlock;
+ }
+
+out_unlock:
+ mutex_unlock(&priv->reg_lock);
+
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * I2C-ATR (address translator)
+ */
+
+static int ub960_atr_attach_client(struct i2c_atr *atr, u32 chan_id,
+ const struct i2c_client *client, u16 alias)
+{
+ struct ub960_data *priv = i2c_atr_get_driver_data(atr);
+ struct ub960_rxport *rxport = priv->rxports[chan_id];
+ struct device *dev = &priv->client->dev;
+ unsigned int reg_idx;
+
+ for (reg_idx = 0; reg_idx < ARRAY_SIZE(rxport->aliased_clients); reg_idx++) {
+ if (!rxport->aliased_clients[reg_idx])
+ break;
+ }
+
+ if (reg_idx == ARRAY_SIZE(rxport->aliased_clients)) {
+ dev_err(dev, "rx%u: alias pool exhausted\n", rxport->nport);
+ return -EADDRNOTAVAIL;
+ }
+
+ rxport->aliased_clients[reg_idx] = client;
+
+ ub960_rxport_write(priv, chan_id, UB960_RR_SLAVE_ID(reg_idx),
+ client->addr << 1);
+ ub960_rxport_write(priv, chan_id, UB960_RR_SLAVE_ALIAS(reg_idx),
+ alias << 1);
+
+ dev_dbg(dev, "rx%u: client 0x%02x assigned alias 0x%02x at slot %u\n",
+ rxport->nport, client->addr, alias, reg_idx);
+
+ return 0;
+}
+
+static void ub960_atr_detach_client(struct i2c_atr *atr, u32 chan_id,
+ const struct i2c_client *client)
+{
+ struct ub960_data *priv = i2c_atr_get_driver_data(atr);
+ struct ub960_rxport *rxport = priv->rxports[chan_id];
+ struct device *dev = &priv->client->dev;
+ unsigned int reg_idx;
+
+ for (reg_idx = 0; reg_idx < ARRAY_SIZE(rxport->aliased_clients); reg_idx++) {
+ if (rxport->aliased_clients[reg_idx] == client)
+ break;
+ }
+
+ if (reg_idx == ARRAY_SIZE(rxport->aliased_clients)) {
+ dev_err(dev, "rx%u: client 0x%02x is not mapped!\n",
+ rxport->nport, client->addr);
+ return;
+ }
+
+ rxport->aliased_clients[reg_idx] = NULL;
+
+ ub960_rxport_write(priv, chan_id, UB960_RR_SLAVE_ALIAS(reg_idx), 0);
+
+ dev_dbg(dev, "rx%u: client 0x%02x released at slot %u\n", rxport->nport,
+ client->addr, reg_idx);
+}
+
+static const struct i2c_atr_ops ub960_atr_ops = {
+ .attach_client = ub960_atr_attach_client,
+ .detach_client = ub960_atr_detach_client,
+};
+
+static int ub960_init_atr(struct ub960_data *priv)
+{
+ struct device *dev = &priv->client->dev;
+ struct i2c_adapter *parent_adap = priv->client->adapter;
+
+ priv->atr = i2c_atr_new(parent_adap, dev, &ub960_atr_ops,
+ priv->hw_data->num_rxports);
+ if (IS_ERR(priv->atr))
+ return PTR_ERR(priv->atr);
+
+ i2c_atr_set_driver_data(priv->atr, priv);
+
+ return 0;
+}
+
+static void ub960_uninit_atr(struct ub960_data *priv)
+{
+ i2c_atr_delete(priv->atr);
+ priv->atr = NULL;
+}
+
+/* -----------------------------------------------------------------------------
+ * TX ports
+ */
+
+static int ub960_parse_dt_txport(struct ub960_data *priv,
+ struct fwnode_handle *ep_fwnode,
+ u8 nport)
+{
+ struct device *dev = &priv->client->dev;
+ struct v4l2_fwnode_endpoint vep = {};
+ struct ub960_txport *txport;
+ int ret;
+
+ txport = kzalloc(sizeof(*txport), GFP_KERNEL);
+ if (!txport)
+ return -ENOMEM;
+
+ txport->priv = priv;
+ txport->nport = nport;
+
+ vep.bus_type = V4L2_MBUS_CSI2_DPHY;
+ ret = v4l2_fwnode_endpoint_alloc_parse(ep_fwnode, &vep);
+ if (ret) {
+ dev_err(dev, "tx%u: failed to parse endpoint data\n", nport);
+ goto err_free_txport;
+ }
+
+ txport->non_continous_clk = vep.bus.mipi_csi2.flags &
+ V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK;
+
+ txport->num_data_lanes = vep.bus.mipi_csi2.num_data_lanes;
+
+ if (vep.nr_of_link_frequencies != 1) {
+ ret = -EINVAL;
+ goto err_free_vep;
+ }
+
+ priv->tx_link_freq[0] = vep.link_frequencies[0];
+ priv->tx_data_rate = priv->tx_link_freq[0] * 2;
+
+ if (priv->tx_data_rate != MHZ(1600) &&
+ priv->tx_data_rate != MHZ(1200) &&
+ priv->tx_data_rate != MHZ(800) &&
+ priv->tx_data_rate != MHZ(400)) {
+ dev_err(dev, "tx%u: invalid 'link-frequencies' value\n", nport);
+ ret = -EINVAL;
+ goto err_free_vep;
+ }
+
+ v4l2_fwnode_endpoint_free(&vep);
+
+ priv->txports[nport] = txport;
+
+ return 0;
+
+err_free_vep:
+ v4l2_fwnode_endpoint_free(&vep);
+err_free_txport:
+ kfree(txport);
+
+ return ret;
+}
+
+static void ub960_csi_handle_events(struct ub960_data *priv, u8 nport)
+{
+ struct device *dev = &priv->client->dev;
+ u8 csi_tx_isr;
+ int ret;
+
+ ret = ub960_txport_read(priv, nport, UB960_TR_CSI_TX_ISR, &csi_tx_isr);
+ if (ret)
+ return;
+
+ if (csi_tx_isr & UB960_TR_CSI_TX_ISR_IS_CSI_SYNC_ERROR)
+ dev_warn(dev, "TX%u: CSI_SYNC_ERROR\n", nport);
+
+ if (csi_tx_isr & UB960_TR_CSI_TX_ISR_IS_CSI_PASS_ERROR)
+ dev_warn(dev, "TX%u: CSI_PASS_ERROR\n", nport);
+}
+
+/* -----------------------------------------------------------------------------
+ * RX ports
+ */
+
+static int ub960_rxport_enable_vpocs(struct ub960_data *priv)
+{
+ unsigned int nport;
+ int ret;
+
+ for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
+ struct ub960_rxport *rxport = priv->rxports[nport];
+
+ if (!rxport || !rxport->vpoc)
+ continue;
+
+ ret = regulator_enable(rxport->vpoc);
+ if (ret)
+ goto err_disable_vpocs;
+ }
+
+ return 0;
+
+err_disable_vpocs:
+ while (nport--) {
+ struct ub960_rxport *rxport = priv->rxports[nport];
+
+ if (!rxport || !rxport->vpoc)
+ continue;
+
+ regulator_disable(rxport->vpoc);
+ }
+
+ return ret;
+}
+
+static void ub960_rxport_disable_vpocs(struct ub960_data *priv)
+{
+ unsigned int nport;
+
+ for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
+ struct ub960_rxport *rxport = priv->rxports[nport];
+
+ if (!rxport || !rxport->vpoc)
+ continue;
+
+ regulator_disable(rxport->vpoc);
+ }
+}
+
+static void ub960_rxport_clear_errors(struct ub960_data *priv,
+ unsigned int nport)
+{
+ u8 v;
+
+ ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS1, &v);
+ ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS2, &v);
+ ub960_rxport_read(priv, nport, UB960_RR_CSI_RX_STS, &v);
+ ub960_rxport_read(priv, nport, UB960_RR_BCC_STATUS, &v);
+
+ ub960_rxport_read(priv, nport, UB960_RR_RX_PAR_ERR_HI, &v);
+ ub960_rxport_read(priv, nport, UB960_RR_RX_PAR_ERR_LO, &v);
+
+ ub960_rxport_read(priv, nport, UB960_RR_CSI_ERR_COUNTER, &v);
+}
+
+static void ub960_clear_rx_errors(struct ub960_data *priv)
+{
+ unsigned int nport;
+
+ for (nport = 0; nport < priv->hw_data->num_rxports; nport++)
+ ub960_rxport_clear_errors(priv, nport);
+}
+
+static int ub960_rxport_get_strobe_pos(struct ub960_data *priv,
+ unsigned int nport, s8 *strobe_pos)
+{
+ u8 v;
+ u8 clk_delay, data_delay;
+ int ret;
+
+ ub960_read_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB960_IR_RX_ANA_STROBE_SET_CLK, &v);
+
+ clk_delay = (v & UB960_IR_RX_ANA_STROBE_SET_CLK_NO_EXTRA_DELAY) ?
+ 0 : UB960_MANUAL_STROBE_EXTRA_DELAY;
+
+ ub960_read_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB960_IR_RX_ANA_STROBE_SET_DATA, &v);
+
+ data_delay = (v & UB960_IR_RX_ANA_STROBE_SET_DATA_NO_EXTRA_DELAY) ?
+ 0 : UB960_MANUAL_STROBE_EXTRA_DELAY;
+
+ ret = ub960_rxport_read(priv, nport, UB960_RR_SFILTER_STS_0, &v);
+ if (ret)
+ return ret;
+
+ clk_delay += v & UB960_IR_RX_ANA_STROBE_SET_CLK_DELAY_MASK;
+
+ ub960_rxport_read(priv, nport, UB960_RR_SFILTER_STS_1, &v);
+ if (ret)
+ return ret;
+
+ data_delay += v & UB960_IR_RX_ANA_STROBE_SET_DATA_DELAY_MASK;
+
+ *strobe_pos = data_delay - clk_delay;
+
+ return 0;
+}
+
+static void ub960_rxport_set_strobe_pos(struct ub960_data *priv,
+ unsigned int nport, s8 strobe_pos)
+{
+ u8 clk_delay, data_delay;
+
+ clk_delay = UB960_IR_RX_ANA_STROBE_SET_CLK_NO_EXTRA_DELAY;
+ data_delay = UB960_IR_RX_ANA_STROBE_SET_DATA_NO_EXTRA_DELAY;
+
+ if (strobe_pos < UB960_MIN_AEQ_STROBE_POS)
+ clk_delay = abs(strobe_pos) - UB960_MANUAL_STROBE_EXTRA_DELAY;
+ else if (strobe_pos > UB960_MAX_AEQ_STROBE_POS)
+ data_delay = strobe_pos - UB960_MANUAL_STROBE_EXTRA_DELAY;
+ else if (strobe_pos < 0)
+ clk_delay = abs(strobe_pos) | UB960_IR_RX_ANA_STROBE_SET_CLK_NO_EXTRA_DELAY;
+ else if (strobe_pos > 0)
+ data_delay = strobe_pos | UB960_IR_RX_ANA_STROBE_SET_DATA_NO_EXTRA_DELAY;
+
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB960_IR_RX_ANA_STROBE_SET_CLK, clk_delay);
+
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport),
+ UB960_IR_RX_ANA_STROBE_SET_DATA, data_delay);
+}
+
+static void ub960_rxport_set_strobe_range(struct ub960_data *priv,
+ s8 strobe_min, s8 strobe_max)
+{
+ /* Convert the signed strobe pos to positive zero based value */
+ strobe_min -= UB960_MIN_AEQ_STROBE_POS;
+ strobe_max -= UB960_MIN_AEQ_STROBE_POS;
+
+ ub960_write(priv, UB960_XR_SFILTER_CFG,
+ ((u8)strobe_min << UB960_XR_SFILTER_CFG_SFILTER_MIN_SHIFT) |
+ ((u8)strobe_max << UB960_XR_SFILTER_CFG_SFILTER_MAX_SHIFT));
+}
+
+static int ub960_rxport_get_eq_level(struct ub960_data *priv,
+ unsigned int nport, u8 *eq_level)
+{
+ int ret;
+ u8 v;
+
+ ret = ub960_rxport_read(priv, nport, UB960_RR_AEQ_STATUS, &v);
+ if (ret)
+ return ret;
+
+ *eq_level = (v & UB960_RR_AEQ_STATUS_STATUS_1) +
+ (v & UB960_RR_AEQ_STATUS_STATUS_2);
+
+ return 0;
+}
+
+static void ub960_rxport_set_eq_level(struct ub960_data *priv,
+ unsigned int nport, u8 eq_level)
+{
+ u8 eq_stage_1_select_value, eq_stage_2_select_value;
+ const unsigned int eq_stage_max = 7;
+ u8 v;
+
+ if (eq_level <= eq_stage_max) {
+ eq_stage_1_select_value = eq_level;
+ eq_stage_2_select_value = 0;
+ } else {
+ eq_stage_1_select_value = eq_stage_max;
+ eq_stage_2_select_value = eq_level - eq_stage_max;
+ }
+
+ ub960_rxport_read(priv, nport, UB960_RR_AEQ_BYPASS, &v);
+
+ v &= ~(UB960_RR_AEQ_BYPASS_EQ_STAGE1_VALUE_MASK |
+ UB960_RR_AEQ_BYPASS_EQ_STAGE2_VALUE_MASK);
+ v |= eq_stage_1_select_value << UB960_RR_AEQ_BYPASS_EQ_STAGE1_VALUE_SHIFT;
+ v |= eq_stage_2_select_value << UB960_RR_AEQ_BYPASS_EQ_STAGE2_VALUE_SHIFT;
+ v |= UB960_RR_AEQ_BYPASS_ENABLE;
+
+ ub960_rxport_write(priv, nport, UB960_RR_AEQ_BYPASS, v);
+}
+
+static void ub960_rxport_set_eq_range(struct ub960_data *priv,
+ unsigned int nport, u8 eq_min, u8 eq_max)
+{
+ ub960_rxport_write(priv, nport, UB960_RR_AEQ_MIN_MAX,
+ (eq_min << UB960_RR_AEQ_MIN_MAX_AEQ_FLOOR_SHIFT) |
+ (eq_max << UB960_RR_AEQ_MIN_MAX_AEQ_MAX_SHIFT));
+
+ /* Enable AEQ min setting */
+ ub960_rxport_update_bits(priv, nport, UB960_RR_AEQ_CTL2,
+ UB960_RR_AEQ_CTL2_SET_AEQ_FLOOR,
+ UB960_RR_AEQ_CTL2_SET_AEQ_FLOOR);
+}
+
+static void ub960_rxport_config_eq(struct ub960_data *priv, unsigned int nport)
+{
+ struct ub960_rxport *rxport = priv->rxports[nport];
+
+ /* We also set common settings here. Should be moved elsewhere. */
+
+ if (priv->strobe.manual) {
+ /* Disable AEQ_SFILTER_EN */
+ ub960_update_bits(priv, UB960_XR_AEQ_CTL1,
+ UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN, 0);
+ } else {
+ /* Enable SFILTER and error control */
+ ub960_write(priv, UB960_XR_AEQ_CTL1,
+ UB960_XR_AEQ_CTL1_AEQ_ERR_CTL_MASK |
+ UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN);
+
+ /* Set AEQ strobe range */
+ ub960_rxport_set_strobe_range(priv, priv->strobe.min,
+ priv->strobe.max);
+ }
+
+ /* The rest are port specific */
+
+ if (priv->strobe.manual)
+ ub960_rxport_set_strobe_pos(priv, nport, rxport->eq.strobe_pos);
+ else
+ ub960_rxport_set_strobe_pos(priv, nport, 0);
+
+ if (rxport->eq.manual_eq) {
+ ub960_rxport_set_eq_level(priv, nport,
+ rxport->eq.manual.eq_level);
+
+ /* Enable AEQ Bypass */
+ ub960_rxport_update_bits(priv, nport, UB960_RR_AEQ_BYPASS,
+ UB960_RR_AEQ_BYPASS_ENABLE,
+ UB960_RR_AEQ_BYPASS_ENABLE);
+ } else {
+ ub960_rxport_set_eq_range(priv, nport,
+ rxport->eq.aeq.eq_level_min,
+ rxport->eq.aeq.eq_level_max);
+
+ /* Disable AEQ Bypass */
+ ub960_rxport_update_bits(priv, nport, UB960_RR_AEQ_BYPASS,
+ UB960_RR_AEQ_BYPASS_ENABLE, 0);
+ }
+}
+
+static int ub960_rxport_link_ok(struct ub960_data *priv, unsigned int nport,
+ bool *ok)
+{
+ u8 rx_port_sts1, rx_port_sts2;
+ u16 parity_errors;
+ u8 csi_rx_sts;
+ u8 csi_err_cnt;
+ u8 bcc_sts;
+ int ret;
+ bool errors;
+
+ ret = ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS1,
+ &rx_port_sts1);
+ if (ret)
+ return ret;
+
+ if (!(rx_port_sts1 & UB960_RR_RX_PORT_STS1_LOCK_STS)) {
+ *ok = false;
+ return 0;
+ }
+
+ ret = ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS2,
+ &rx_port_sts2);
+ if (ret)
+ return ret;
+
+ ret = ub960_rxport_read(priv, nport, UB960_RR_CSI_RX_STS, &csi_rx_sts);
+ if (ret)
+ return ret;
+
+ ret = ub960_rxport_read(priv, nport, UB960_RR_CSI_ERR_COUNTER,
+ &csi_err_cnt);
+ if (ret)
+ return ret;
+
+ ret = ub960_rxport_read(priv, nport, UB960_RR_BCC_STATUS, &bcc_sts);
+ if (ret)
+ return ret;
+
+ ret = ub960_rxport_read16(priv, nport, UB960_RR_RX_PAR_ERR_HI,
+ &parity_errors);
+ if (ret)
+ return ret;
+
+ errors = (rx_port_sts1 & UB960_RR_RX_PORT_STS1_ERROR_MASK) ||
+ (rx_port_sts2 & UB960_RR_RX_PORT_STS2_ERROR_MASK) ||
+ (bcc_sts & UB960_RR_BCC_STATUS_ERROR_MASK) ||
+ (csi_rx_sts & UB960_RR_CSI_RX_STS_ERROR_MASK) || csi_err_cnt ||
+ parity_errors;
+
+ *ok = !errors;
+
+ return 0;
+}
+
+/*
+ * Wait for the RX ports to lock, have no errors and have stable strobe position
+ * and EQ level.
+ */
+static int ub960_rxport_wait_locks(struct ub960_data *priv,
+ unsigned long port_mask,
+ unsigned int *lock_mask)
+{
+ struct device *dev = &priv->client->dev;
+ unsigned long timeout;
+ unsigned int link_ok_mask;
+ unsigned int missing;
+ unsigned int loops;
+ u8 nport;
+ int ret;
+
+ if (port_mask == 0) {
+ if (lock_mask)
+ *lock_mask = 0;
+ return 0;
+ }
+
+ if (port_mask >= BIT(priv->hw_data->num_rxports))
+ return -EINVAL;
+
+ timeout = jiffies + msecs_to_jiffies(1000);
+ loops = 0;
+ link_ok_mask = 0;
+
+ while (time_before(jiffies, timeout)) {
+ missing = 0;
+
+ for_each_set_bit(nport, &port_mask,
+ priv->hw_data->num_rxports) {
+ struct ub960_rxport *rxport = priv->rxports[nport];
+ bool ok;
+
+ if (!rxport)
+ continue;
+
+ ret = ub960_rxport_link_ok(priv, nport, &ok);
+ if (ret)
+ return ret;
+
+ /*
+ * We want the link to be ok for two consecutive loops,
+ * as a link could get established just before our test
+ * and drop soon after.
+ */
+ if (!ok || !(link_ok_mask & BIT(nport)))
+ missing++;
+
+ if (ok)
+ link_ok_mask |= BIT(nport);
+ else
+ link_ok_mask &= ~BIT(nport);
+ }
+
+ loops++;
+
+ if (missing == 0)
+ break;
+
+ msleep(50);
+ }
+
+ if (lock_mask)
+ *lock_mask = link_ok_mask;
+
+ dev_dbg(dev, "Wait locks done in %u loops\n", loops);
+ for_each_set_bit(nport, &port_mask, priv->hw_data->num_rxports) {
+ struct ub960_rxport *rxport = priv->rxports[nport];
+ s8 strobe_pos, eq_level;
+ u16 v;
+
+ if (!rxport)
+ continue;
+
+ if (!(link_ok_mask & BIT(nport))) {
+ dev_dbg(dev, "\trx%u: not locked\n", nport);
+ continue;
+ }
+
+ ub960_rxport_read16(priv, nport, UB960_RR_RX_FREQ_HIGH, &v);
+
+ ret = ub960_rxport_get_strobe_pos(priv, nport, &strobe_pos);
+ if (ret)
+ return ret;
+
+ ret = ub960_rxport_get_eq_level(priv, nport, &eq_level);
+ if (ret)
+ return ret;
+
+ dev_dbg(dev, "\trx%u: locked, SP: %d, EQ: %u, freq %llu Hz\n",
+ nport, strobe_pos, eq_level, (v * 1000000ULL) >> 8);
+ }
+
+ return 0;
+}
+
+static unsigned long ub960_calc_bc_clk_rate_ub960(struct ub960_data *priv,
+ struct ub960_rxport *rxport)
+{
+ unsigned int mult;
+ unsigned int div;
+
+ switch (rxport->rx_mode) {
+ case RXPORT_MODE_RAW10:
+ case RXPORT_MODE_RAW12_HF:
+ case RXPORT_MODE_RAW12_LF:
+ mult = 1;
+ div = 10;
+ break;
+
+ case RXPORT_MODE_CSI2_SYNC:
+ mult = 2;
+ div = 1;
+ break;
+
+ case RXPORT_MODE_CSI2_NONSYNC:
+ mult = 2;
+ div = 5;
+ break;
+
+ default:
+ return 0;
+ }
+
+ return clk_get_rate(priv->refclk) * mult / div;
+}
+
+static unsigned long ub960_calc_bc_clk_rate_ub9702(struct ub960_data *priv,
+ struct ub960_rxport *rxport)
+{
+ switch (rxport->rx_mode) {
+ case RXPORT_MODE_RAW10:
+ case RXPORT_MODE_RAW12_HF:
+ case RXPORT_MODE_RAW12_LF:
+ return 2359400;
+
+ case RXPORT_MODE_CSI2_SYNC:
+ return 47187500;
+
+ case RXPORT_MODE_CSI2_NONSYNC:
+ return 9437500;
+
+ default:
+ return 0;
+ }
+}
+
+static int ub960_rxport_add_serializer(struct ub960_data *priv, u8 nport)
+{
+ struct ub960_rxport *rxport = priv->rxports[nport];
+ struct device *dev = &priv->client->dev;
+ struct ds90ub9xx_platform_data *ser_pdata = &rxport->ser.pdata;
+ struct i2c_board_info ser_info = {
+ .of_node = to_of_node(rxport->ser.fwnode),
+ .fwnode = rxport->ser.fwnode,
+ .platform_data = ser_pdata,
+ };
+
+ ser_pdata->port = nport;
+ ser_pdata->atr = priv->atr;
+ if (priv->hw_data->is_ub9702)
+ ser_pdata->bc_rate = ub960_calc_bc_clk_rate_ub9702(priv, rxport);
+ else
+ ser_pdata->bc_rate = ub960_calc_bc_clk_rate_ub960(priv, rxport);
+
+ /*
+ * The serializer is added under the same i2c adapter as the
+ * deserializer. This is not quite right, as the serializer is behind
+ * the FPD-Link.
+ */
+ ser_info.addr = rxport->ser.alias;
+ rxport->ser.client =
+ i2c_new_client_device(priv->client->adapter, &ser_info);
+ if (IS_ERR(rxport->ser.client)) {
+ dev_err(dev, "rx%u: cannot add %s i2c device", nport,
+ ser_info.type);
+ return PTR_ERR(rxport->ser.client);
+ }
+
+ dev_dbg(dev, "rx%u: remote serializer at alias 0x%02x (%u-%04x)\n",
+ nport, rxport->ser.client->addr,
+ rxport->ser.client->adapter->nr, rxport->ser.client->addr);
+
+ return 0;
+}
+
+static void ub960_rxport_remove_serializer(struct ub960_data *priv, u8 nport)
+{
+ struct ub960_rxport *rxport = priv->rxports[nport];
+
+ i2c_unregister_device(rxport->ser.client);
+ rxport->ser.client = NULL;
+}
+
+/* Add serializer i2c devices for all initialized ports */
+static int ub960_rxport_add_serializers(struct ub960_data *priv)
+{
+ unsigned int nport;
+ int ret;
+
+ for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
+ struct ub960_rxport *rxport = priv->rxports[nport];
+
+ if (!rxport)
+ continue;
+
+ ret = ub960_rxport_add_serializer(priv, nport);
+ if (ret)
+ goto err_remove_sers;
+ }
+
+ return 0;
+
+err_remove_sers:
+ while (nport--) {
+ struct ub960_rxport *rxport = priv->rxports[nport];
+
+ if (!rxport)
+ continue;
+
+ ub960_rxport_remove_serializer(priv, nport);
+ }
+
+ return ret;
+}
+
+static void ub960_rxport_remove_serializers(struct ub960_data *priv)
+{
+ unsigned int nport;
+
+ for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
+ struct ub960_rxport *rxport = priv->rxports[nport];
+
+ if (!rxport)
+ continue;
+
+ ub960_rxport_remove_serializer(priv, nport);
+ }
+}
+
+static void ub960_init_tx_port(struct ub960_data *priv,
+ struct ub960_txport *txport)
+{
+ unsigned int nport = txport->nport;
+ u8 csi_ctl = 0;
+
+ /*
+ * From the datasheet: "initial CSI Skew-Calibration
+ * sequence [...] should be set when operating at 1.6 Gbps"
+ */
+ if (priv->tx_data_rate == MHZ(1600))
+ csi_ctl |= UB960_TR_CSI_CTL_CSI_CAL_EN;
+
+ csi_ctl |= (4 - txport->num_data_lanes) << 4;
+
+ if (!txport->non_continous_clk)
+ csi_ctl |= UB960_TR_CSI_CTL_CSI_CONTS_CLOCK;
+
+ ub960_txport_write(priv, nport, UB960_TR_CSI_CTL, csi_ctl);
+}
+
+static int ub960_init_tx_ports(struct ub960_data *priv)
+{
+ unsigned int nport;
+ u8 speed_select;
+ u8 pll_div;
+
+ /* TX ports */
+
+ switch (priv->tx_data_rate) {
+ case MHZ(1600):
+ default:
+ speed_select = 0;
+ pll_div = 0x10;
+ break;
+ case MHZ(1200):
+ speed_select = 1;
+ pll_div = 0x18;
+ break;
+ case MHZ(800):
+ speed_select = 2;
+ pll_div = 0x10;
+ break;
+ case MHZ(400):
+ speed_select = 3;
+ pll_div = 0x10;
+ break;
+ }
+
+ ub960_write(priv, UB960_SR_CSI_PLL_CTL, speed_select);
+
+ if (priv->hw_data->is_ub9702) {
+ ub960_write(priv, UB960_SR_CSI_PLL_DIV, pll_div);
+
+ switch (priv->tx_data_rate) {
+ case MHZ(1600):
+ default:
+ ub960_write_ind(priv, UB960_IND_TARGET_CSI_ANA, 0x92, 0x80);
+ ub960_write_ind(priv, UB960_IND_TARGET_CSI_ANA, 0x4b, 0x2a);
+ break;
+ case MHZ(800):
+ ub960_write_ind(priv, UB960_IND_TARGET_CSI_ANA, 0x92, 0x90);
+ ub960_write_ind(priv, UB960_IND_TARGET_CSI_ANA, 0x4f, 0x2a);
+ ub960_write_ind(priv, UB960_IND_TARGET_CSI_ANA, 0x4b, 0x2a);
+ break;
+ case MHZ(400):
+ ub960_write_ind(priv, UB960_IND_TARGET_CSI_ANA, 0x92, 0xa0);
+ break;
+ }
+ }
+
+ for (nport = 0; nport < priv->hw_data->num_txports; nport++) {
+ struct ub960_txport *txport = priv->txports[nport];
+
+ if (!txport)
+ continue;
+
+ ub960_init_tx_port(priv, txport);
+ }
+
+ return 0;
+}
+
+static void ub960_init_rx_port_ub960(struct ub960_data *priv,
+ struct ub960_rxport *rxport)
+{
+ unsigned int nport = rxport->nport;
+ u32 bc_freq_val;
+
+ /*
+ * Back channel frequency select.
+ * Override FREQ_SELECT from the strap.
+ * 0 - 2.5 Mbps (DS90UB913A-Q1 / DS90UB933-Q1)
+ * 2 - 10 Mbps
+ * 6 - 50 Mbps (DS90UB953-Q1)
+ *
+ * Note that changing this setting will result in some errors on the back
+ * channel for a short period of time.
+ */
+
+ switch (rxport->rx_mode) {
+ case RXPORT_MODE_RAW10:
+ case RXPORT_MODE_RAW12_HF:
+ case RXPORT_MODE_RAW12_LF:
+ bc_freq_val = 0;
+ break;
+
+ case RXPORT_MODE_CSI2_NONSYNC:
+ bc_freq_val = 2;
+ break;
+
+ case RXPORT_MODE_CSI2_SYNC:
+ bc_freq_val = 6;
+ break;
+
+ default:
+ return;
+ }
+
+ ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
+ UB960_RR_BCC_CONFIG_BC_FREQ_SEL_MASK,
+ bc_freq_val);
+
+ switch (rxport->rx_mode) {
+ case RXPORT_MODE_RAW10:
+ /* FPD3_MODE = RAW10 Mode (DS90UB913A-Q1 / DS90UB933-Q1 compatible) */
+ ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG,
+ UB960_RR_PORT_CONFIG_FPD3_MODE_MASK,
+ 0x3);
+
+ /*
+ * RAW10_8BIT_CTL = 0b10 : 8-bit processing using upper 8 bits
+ */
+ ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG2,
+ UB960_RR_PORT_CONFIG2_RAW10_8BIT_CTL_MASK,
+ 0x2 << UB960_RR_PORT_CONFIG2_RAW10_8BIT_CTL_SHIFT);
+
+ break;
+
+ case RXPORT_MODE_RAW12_HF:
+ case RXPORT_MODE_RAW12_LF:
+ /* Not implemented */
+ return;
+
+ case RXPORT_MODE_CSI2_SYNC:
+ case RXPORT_MODE_CSI2_NONSYNC:
+ /* CSI-2 Mode (DS90UB953-Q1 compatible) */
+ ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG, 0x3,
+ 0x0);
+
+ break;
+ }
+
+ /* LV_POLARITY & FV_POLARITY */
+ ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG2, 0x3,
+ rxport->lv_fv_pol);
+
+ /* Enable all interrupt sources from this port */
+ ub960_rxport_write(priv, nport, UB960_RR_PORT_ICR_HI, 0x07);
+ ub960_rxport_write(priv, nport, UB960_RR_PORT_ICR_LO, 0x7f);
+
+ /* Enable I2C_PASS_THROUGH */
+ ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
+ UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH,
+ UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH);
+
+ /* Enable I2C communication to the serializer via the alias addr */
+ ub960_rxport_write(priv, nport, UB960_RR_SER_ALIAS_ID,
+ rxport->ser.alias << 1);
+
+ /* Configure EQ related settings */
+ ub960_rxport_config_eq(priv, nport);
+
+ /* Enable RX port */
+ ub960_update_bits(priv, UB960_SR_RX_PORT_CTL, BIT(nport), BIT(nport));
+}
+
+static void ub960_init_rx_port_ub9702_fpd3(struct ub960_data *priv,
+ struct ub960_rxport *rxport)
+{
+ unsigned int nport = rxport->nport;
+ u8 bc_freq_val;
+ u8 fpd_func_mode;
+
+ switch (rxport->rx_mode) {
+ case RXPORT_MODE_RAW10:
+ bc_freq_val = 0;
+ fpd_func_mode = 5;
+ break;
+
+ case RXPORT_MODE_RAW12_HF:
+ bc_freq_val = 0;
+ fpd_func_mode = 4;
+ break;
+
+ case RXPORT_MODE_RAW12_LF:
+ bc_freq_val = 0;
+ fpd_func_mode = 6;
+ break;
+
+ case RXPORT_MODE_CSI2_SYNC:
+ bc_freq_val = 6;
+ fpd_func_mode = 2;
+ break;
+
+ case RXPORT_MODE_CSI2_NONSYNC:
+ bc_freq_val = 2;
+ fpd_func_mode = 2;
+ break;
+
+ default:
+ return;
+ }
+
+ ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG, 0x7,
+ bc_freq_val);
+ ub960_rxport_write(priv, nport, UB960_RR_CHANNEL_MODE, fpd_func_mode);
+
+ /* set serdes_eq_mode = 1 */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0xa8, 0x80);
+
+ /* enable serdes driver */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x0d, 0x7f);
+
+ /* set serdes_eq_offset=4 */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x2b, 0x04);
+
+ /* init default serdes_eq_max in 0xa9 */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0xa9, 0x23);
+
+ /* init serdes_eq_min in 0xaa */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0xaa, 0);
+
+ /* serdes_driver_ctl2 control: DS90UB953-Q1/DS90UB933-Q1/DS90UB913A-Q1 */
+ ub960_ind_update_bits(priv, UB960_IND_TARGET_RX_ANA(nport), 0x1b,
+ BIT(3), BIT(3));
+
+ /* RX port to half-rate */
+ ub960_update_bits(priv, UB960_SR_FPD_RATE_CFG, 0x3 << (nport * 2),
+ BIT(nport * 2));
+}
+
+static void ub960_init_rx_port_ub9702_fpd4_aeq(struct ub960_data *priv,
+ struct ub960_rxport *rxport)
+{
+ unsigned int nport = rxport->nport;
+ bool first_time_power_up = true;
+
+ if (first_time_power_up) {
+ u8 v;
+
+ /* AEQ init */
+ ub960_read_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x2c, &v);
+
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x27, v);
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x28, v + 1);
+
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x2b, 0x00);
+ }
+
+ /* enable serdes_eq_ctl2 */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x9e, 0x00);
+
+ /* enable serdes_eq_ctl1 */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x90, 0x40);
+
+ /* enable serdes_eq_en */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x2e, 0x40);
+
+ /* disable serdes_eq_override */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0xf0, 0x00);
+
+ /* disable serdes_gain_override */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x71, 0x00);
+}
+
+static void ub960_init_rx_port_ub9702_fpd4(struct ub960_data *priv,
+ struct ub960_rxport *rxport)
+{
+ unsigned int nport = rxport->nport;
+ u8 bc_freq_val;
+
+ switch (rxport->rx_mode) {
+ case RXPORT_MODE_RAW10:
+ bc_freq_val = 0;
+ break;
+
+ case RXPORT_MODE_RAW12_HF:
+ bc_freq_val = 0;
+ break;
+
+ case RXPORT_MODE_RAW12_LF:
+ bc_freq_val = 0;
+ break;
+
+ case RXPORT_MODE_CSI2_SYNC:
+ bc_freq_val = 6;
+ break;
+
+ case RXPORT_MODE_CSI2_NONSYNC:
+ bc_freq_val = 2;
+ break;
+
+ default:
+ return;
+ }
+
+ ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG, 0x7,
+ bc_freq_val);
+
+ /* FPD4 Sync Mode */
+ ub960_rxport_write(priv, nport, UB960_RR_CHANNEL_MODE, 0);
+
+ /* add serdes_eq_offset of 4 */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x2b, 0x04);
+
+ /* FPD4 serdes_start_eq in 0x27: assign default */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x27, 0x0);
+ /* FPD4 serdes_end_eq in 0x28: assign default */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x28, 0x23);
+
+ /* set serdes_driver_mode into FPD IV mode */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x04, 0x00);
+ /* set FPD PBC drv into FPD IV mode */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x1b, 0x00);
+
+ /* set serdes_system_init to 0x2f */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x21, 0x2f);
+ /* set serdes_system_rst in reset mode */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x25, 0xc1);
+
+ /* RX port to 7.55G mode */
+ ub960_update_bits(priv, UB960_SR_FPD_RATE_CFG, 0x3 << (nport * 2),
+ 0 << (nport * 2));
+
+ ub960_init_rx_port_ub9702_fpd4_aeq(priv, rxport);
+}
+
+static void ub960_init_rx_port_ub9702(struct ub960_data *priv,
+ struct ub960_rxport *rxport)
+{
+ unsigned int nport = rxport->nport;
+
+ if (rxport->cdr_mode == RXPORT_CDR_FPD3)
+ ub960_init_rx_port_ub9702_fpd3(priv, rxport);
+ else /* RXPORT_CDR_FPD4 */
+ ub960_init_rx_port_ub9702_fpd4(priv, rxport);
+
+ switch (rxport->rx_mode) {
+ case RXPORT_MODE_RAW10:
+ /*
+ * RAW10_8BIT_CTL = 0b11 : 8-bit processing using lower 8 bits
+ * 0b10 : 8-bit processing using upper 8 bits
+ */
+ ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG2,
+ 0x3 << 6, 0x2 << 6);
+
+ break;
+
+ case RXPORT_MODE_RAW12_HF:
+ case RXPORT_MODE_RAW12_LF:
+ /* Not implemented */
+ return;
+
+ case RXPORT_MODE_CSI2_SYNC:
+ case RXPORT_MODE_CSI2_NONSYNC:
+
+ break;
+ }
+
+ /* LV_POLARITY & FV_POLARITY */
+ ub960_rxport_update_bits(priv, nport, UB960_RR_PORT_CONFIG2, 0x3,
+ rxport->lv_fv_pol);
+
+ /* Enable all interrupt sources from this port */
+ ub960_rxport_write(priv, nport, UB960_RR_PORT_ICR_HI, 0x07);
+ ub960_rxport_write(priv, nport, UB960_RR_PORT_ICR_LO, 0x7f);
+
+ /* Enable I2C_PASS_THROUGH */
+ ub960_rxport_update_bits(priv, nport, UB960_RR_BCC_CONFIG,
+ UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH,
+ UB960_RR_BCC_CONFIG_I2C_PASS_THROUGH);
+
+ /* Enable I2C communication to the serializer via the alias addr */
+ ub960_rxport_write(priv, nport, UB960_RR_SER_ALIAS_ID,
+ rxport->ser.alias << 1);
+
+ /* Enable RX port */
+ ub960_update_bits(priv, UB960_SR_RX_PORT_CTL, BIT(nport), BIT(nport));
+
+ if (rxport->cdr_mode == RXPORT_CDR_FPD4) {
+ /* unreset 960 AEQ */
+ ub960_write_ind(priv, UB960_IND_TARGET_RX_ANA(nport), 0x25, 0x41);
+ }
+}
+
+static int ub960_init_rx_ports(struct ub960_data *priv)
+{
+ unsigned int nport;
+
+ for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
+ struct ub960_rxport *rxport = priv->rxports[nport];
+
+ if (!rxport)
+ continue;
+
+ if (priv->hw_data->is_ub9702)
+ ub960_init_rx_port_ub9702(priv, rxport);
+ else
+ ub960_init_rx_port_ub960(priv, rxport);
+ }
+
+ return 0;
+}
+
+static void ub960_rxport_handle_events(struct ub960_data *priv, u8 nport)
+{
+ struct device *dev = &priv->client->dev;
+ u8 rx_port_sts1;
+ u8 rx_port_sts2;
+ u8 csi_rx_sts;
+ u8 bcc_sts;
+ int ret = 0;
+
+ /* Read interrupts (also clears most of them) */
+ if (!ret)
+ ret = ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS1,
+ &rx_port_sts1);
+ if (!ret)
+ ret = ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS2,
+ &rx_port_sts2);
+ if (!ret)
+ ret = ub960_rxport_read(priv, nport, UB960_RR_CSI_RX_STS,
+ &csi_rx_sts);
+ if (!ret)
+ ret = ub960_rxport_read(priv, nport, UB960_RR_BCC_STATUS,
+ &bcc_sts);
+
+ if (ret)
+ return;
+
+ if (rx_port_sts1 & UB960_RR_RX_PORT_STS1_PARITY_ERROR) {
+ u16 v;
+
+ ret = ub960_rxport_read16(priv, nport, UB960_RR_RX_PAR_ERR_HI,
+ &v);
+ if (!ret)
+ dev_err(dev, "rx%u parity errors: %u\n", nport, v);
+ }
+
+ if (rx_port_sts1 & UB960_RR_RX_PORT_STS1_BCC_CRC_ERROR)
+ dev_err(dev, "rx%u BCC CRC error\n", nport);
+
+ if (rx_port_sts1 & UB960_RR_RX_PORT_STS1_BCC_SEQ_ERROR)
+ dev_err(dev, "rx%u BCC SEQ error\n", nport);
+
+ if (rx_port_sts2 & UB960_RR_RX_PORT_STS2_LINE_LEN_UNSTABLE)
+ dev_err(dev, "rx%u line length unstable\n", nport);
+
+ if (rx_port_sts2 & UB960_RR_RX_PORT_STS2_FPD3_ENCODE_ERROR)
+ dev_err(dev, "rx%u FPD3 encode error\n", nport);
+
+ if (rx_port_sts2 & UB960_RR_RX_PORT_STS2_BUFFER_ERROR)
+ dev_err(dev, "rx%u buffer error\n", nport);
+
+ if (csi_rx_sts)
+ dev_err(dev, "rx%u CSI error: %#02x\n", nport, csi_rx_sts);
+
+ if (csi_rx_sts & UB960_RR_CSI_RX_STS_ECC1_ERR)
+ dev_err(dev, "rx%u CSI ECC1 error\n", nport);
+
+ if (csi_rx_sts & UB960_RR_CSI_RX_STS_ECC2_ERR)
+ dev_err(dev, "rx%u CSI ECC2 error\n", nport);
+
+ if (csi_rx_sts & UB960_RR_CSI_RX_STS_CKSUM_ERR)
+ dev_err(dev, "rx%u CSI checksum error\n", nport);
+
+ if (csi_rx_sts & UB960_RR_CSI_RX_STS_LENGTH_ERR)
+ dev_err(dev, "rx%u CSI length error\n", nport);
+
+ if (bcc_sts)
+ dev_err(dev, "rx%u BCC error: %#02x\n", nport, bcc_sts);
+
+ if (bcc_sts & UB960_RR_BCC_STATUS_RESP_ERR)
+ dev_err(dev, "rx%u BCC response error", nport);
+
+ if (bcc_sts & UB960_RR_BCC_STATUS_SLAVE_TO)
+ dev_err(dev, "rx%u BCC slave timeout", nport);
+
+ if (bcc_sts & UB960_RR_BCC_STATUS_SLAVE_ERR)
+ dev_err(dev, "rx%u BCC slave error", nport);
+
+ if (bcc_sts & UB960_RR_BCC_STATUS_MASTER_TO)
+ dev_err(dev, "rx%u BCC master timeout", nport);
+
+ if (bcc_sts & UB960_RR_BCC_STATUS_MASTER_ERR)
+ dev_err(dev, "rx%u BCC master error", nport);
+
+ if (bcc_sts & UB960_RR_BCC_STATUS_SEQ_ERROR)
+ dev_err(dev, "rx%u BCC sequence error", nport);
+
+ if (rx_port_sts2 & UB960_RR_RX_PORT_STS2_LINE_LEN_CHG) {
+ u16 v;
+
+ ret = ub960_rxport_read16(priv, nport, UB960_RR_LINE_LEN_1, &v);
+ if (!ret)
+ dev_dbg(dev, "rx%u line len changed: %u\n", nport, v);
+ }
+
+ if (rx_port_sts2 & UB960_RR_RX_PORT_STS2_LINE_CNT_CHG) {
+ u16 v;
+
+ ret = ub960_rxport_read16(priv, nport, UB960_RR_LINE_COUNT_HI,
+ &v);
+ if (!ret)
+ dev_dbg(dev, "rx%u line count changed: %u\n", nport, v);
+ }
+
+ if (rx_port_sts1 & UB960_RR_RX_PORT_STS1_LOCK_STS_CHG) {
+ dev_dbg(dev, "rx%u: %s, %s, %s, %s\n", nport,
+ (rx_port_sts1 & UB960_RR_RX_PORT_STS1_LOCK_STS) ?
+ "locked" :
+ "unlocked",
+ (rx_port_sts1 & UB960_RR_RX_PORT_STS1_PORT_PASS) ?
+ "passed" :
+ "not passed",
+ (rx_port_sts2 & UB960_RR_RX_PORT_STS2_CABLE_FAULT) ?
+ "no clock" :
+ "clock ok",
+ (rx_port_sts2 & UB960_RR_RX_PORT_STS2_FREQ_STABLE) ?
+ "stable freq" :
+ "unstable freq");
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2
+ */
+
+/*
+ * The current implementation only supports a simple VC mapping, where all VCs
+ * from a one RX port will be mapped to the same VC. Also, the hardware
+ * dictates that all streams from an RX port must go to a single TX port.
+ *
+ * This function decides the target VC numbers for each RX port with a simple
+ * algorithm, so that for each TX port, we get VC numbers starting from 0,
+ * and counting up.
+ *
+ * E.g. if all four RX ports are in use, of which the first two go to the
+ * first TX port and the secont two go to the second TX port, we would get
+ * the following VCs for the four RX ports: 0, 1, 0, 1.
+ *
+ * TODO: implement a more sophisticated VC mapping. As the driver cannot know
+ * what VCs the sinks expect (say, an FPGA with hardcoded VC routing), this
+ * probably needs to be somehow configurable. Device tree?
+ */
+static void ub960_get_vc_maps(struct ub960_data *priv,
+ struct v4l2_subdev_state *state, u8 *vc)
+{
+ u8 cur_vc[UB960_MAX_TX_NPORTS] = {};
+ struct v4l2_subdev_route *route;
+ u8 handled_mask = 0;
+
+ for_each_active_route(&state->routing, route) {
+ unsigned int rx, tx;
+
+ rx = ub960_pad_to_port(priv, route->sink_pad);
+ if (BIT(rx) & handled_mask)
+ continue;
+
+ tx = ub960_pad_to_port(priv, route->source_pad);
+
+ vc[rx] = cur_vc[tx]++;
+ handled_mask |= BIT(rx);
+ }
+}
+
+static int ub960_enable_tx_port(struct ub960_data *priv, unsigned int nport)
+{
+ struct device *dev = &priv->client->dev;
+
+ dev_dbg(dev, "enable TX port %u\n", nport);
+
+ return ub960_txport_update_bits(priv, nport, UB960_TR_CSI_CTL,
+ UB960_TR_CSI_CTL_CSI_ENABLE,
+ UB960_TR_CSI_CTL_CSI_ENABLE);
+}
+
+static void ub960_disable_tx_port(struct ub960_data *priv, unsigned int nport)
+{
+ struct device *dev = &priv->client->dev;
+
+ dev_dbg(dev, "disable TX port %u\n", nport);
+
+ ub960_txport_update_bits(priv, nport, UB960_TR_CSI_CTL,
+ UB960_TR_CSI_CTL_CSI_ENABLE, 0);
+}
+
+static int ub960_enable_rx_port(struct ub960_data *priv, unsigned int nport)
+{
+ struct device *dev = &priv->client->dev;
+
+ dev_dbg(dev, "enable RX port %u\n", nport);
+
+ /* Enable forwarding */
+ return ub960_update_bits(priv, UB960_SR_FWD_CTL1,
+ UB960_SR_FWD_CTL1_PORT_DIS(nport), 0);
+}
+
+static void ub960_disable_rx_port(struct ub960_data *priv, unsigned int nport)
+{
+ struct device *dev = &priv->client->dev;
+
+ dev_dbg(dev, "disable RX port %u\n", nport);
+
+ /* Disable forwarding */
+ ub960_update_bits(priv, UB960_SR_FWD_CTL1,
+ UB960_SR_FWD_CTL1_PORT_DIS(nport),
+ UB960_SR_FWD_CTL1_PORT_DIS(nport));
+}
+
+/*
+ * The driver only supports using a single VC for each source. This function
+ * checks that each source only provides streams using a single VC.
+ */
+static int ub960_validate_stream_vcs(struct ub960_data *priv)
+{
+ unsigned int nport;
+ unsigned int i;
+
+ for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
+ struct ub960_rxport *rxport = priv->rxports[nport];
+ struct v4l2_mbus_frame_desc desc;
+ int ret;
+ u8 vc;
+
+ if (!rxport)
+ continue;
+
+ ret = v4l2_subdev_call(rxport->source.sd, pad, get_frame_desc,
+ rxport->source.pad, &desc);
+ if (ret)
+ return ret;
+
+ if (desc.type != V4L2_MBUS_FRAME_DESC_TYPE_CSI2)
+ continue;
+
+ if (desc.num_entries == 0)
+ continue;
+
+ vc = desc.entry[0].bus.csi2.vc;
+
+ for (i = 1; i < desc.num_entries; i++) {
+ if (vc == desc.entry[i].bus.csi2.vc)
+ continue;
+
+ dev_err(&priv->client->dev,
+ "rx%u: source with multiple virtual-channels is not supported\n",
+ nport);
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
+static int ub960_configure_ports_for_streaming(struct ub960_data *priv,
+ struct v4l2_subdev_state *state)
+{
+ u8 fwd_ctl;
+ struct {
+ u32 num_streams;
+ u8 pixel_dt;
+ u8 meta_dt;
+ u32 meta_lines;
+ u32 tx_port;
+ } rx_data[UB960_MAX_RX_NPORTS] = {};
+ u8 vc_map[UB960_MAX_RX_NPORTS] = {};
+ struct v4l2_subdev_route *route;
+ unsigned int nport;
+ int ret;
+
+ ret = ub960_validate_stream_vcs(priv);
+ if (ret)
+ return ret;
+
+ ub960_get_vc_maps(priv, state, vc_map);
+
+ for_each_active_route(&state->routing, route) {
+ struct ub960_rxport *rxport;
+ struct ub960_txport *txport;
+ struct v4l2_mbus_framefmt *fmt;
+ const struct ub960_format_info *ub960_fmt;
+ unsigned int nport;
+
+ nport = ub960_pad_to_port(priv, route->sink_pad);
+
+ rxport = priv->rxports[nport];
+ if (!rxport)
+ return -EINVAL;
+
+ txport = priv->txports[ub960_pad_to_port(priv, route->source_pad)];
+ if (!txport)
+ return -EINVAL;
+
+ rx_data[nport].tx_port = ub960_pad_to_port(priv, route->source_pad);
+
+ rx_data[nport].num_streams++;
+
+ /* For the rest, we are only interested in parallel busses */
+ if (rxport->rx_mode == RXPORT_MODE_CSI2_SYNC ||
+ rxport->rx_mode == RXPORT_MODE_CSI2_NONSYNC)
+ continue;
+
+ if (rx_data[nport].num_streams > 2)
+ return -EPIPE;
+
+ fmt = v4l2_subdev_state_get_stream_format(state,
+ route->sink_pad,
+ route->sink_stream);
+ if (!fmt)
+ return -EPIPE;
+
+ ub960_fmt = ub960_find_format(fmt->code);
+ if (!ub960_fmt)
+ return -EPIPE;
+
+ if (ub960_fmt->meta) {
+ if (fmt->height > 3) {
+ dev_err(&priv->client->dev,
+ "rx%u: unsupported metadata height %u\n",
+ nport, fmt->height);
+ return -EPIPE;
+ }
+
+ rx_data[nport].meta_dt = ub960_fmt->datatype;
+ rx_data[nport].meta_lines = fmt->height;
+ } else {
+ rx_data[nport].pixel_dt = ub960_fmt->datatype;
+ }
+ }
+
+ /* Configure RX ports */
+
+ /*
+ * Keep all port forwardings disabled by default. Forwarding will be
+ * enabled in ub960_enable_rx_port.
+ */
+ fwd_ctl = GENMASK(7, 4);
+
+ for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
+ struct ub960_rxport *rxport = priv->rxports[nport];
+ u8 vc = vc_map[nport];
+
+ if (rx_data[nport].num_streams == 0)
+ continue;
+
+ switch (rxport->rx_mode) {
+ case RXPORT_MODE_RAW10:
+ ub960_rxport_write(priv, nport, UB960_RR_RAW10_ID,
+ rx_data[nport].pixel_dt | (vc << UB960_RR_RAW10_ID_VC_SHIFT));
+
+ ub960_rxport_write(priv, rxport->nport,
+ UB960_RR_RAW_EMBED_DTYPE,
+ (rx_data[nport].meta_lines << UB960_RR_RAW_EMBED_DTYPE_LINES_SHIFT) |
+ rx_data[nport].meta_dt);
+
+ break;
+
+ case RXPORT_MODE_RAW12_HF:
+ case RXPORT_MODE_RAW12_LF:
+ /* Not implemented */
+ break;
+
+ case RXPORT_MODE_CSI2_SYNC:
+ case RXPORT_MODE_CSI2_NONSYNC:
+ if (!priv->hw_data->is_ub9702) {
+ /* Map all VCs from this port to the same VC */
+ ub960_rxport_write(priv, nport, UB960_RR_CSI_VC_MAP,
+ (vc << UB960_RR_CSI_VC_MAP_SHIFT(3)) |
+ (vc << UB960_RR_CSI_VC_MAP_SHIFT(2)) |
+ (vc << UB960_RR_CSI_VC_MAP_SHIFT(1)) |
+ (vc << UB960_RR_CSI_VC_MAP_SHIFT(0)));
+ } else {
+ unsigned int i;
+
+ /* Map all VCs from this port to VC(nport) */
+ for (i = 0; i < 8; i++)
+ ub960_rxport_write(priv, nport,
+ UB960_RR_VC_ID_MAP(i),
+ nport);
+ }
+
+ break;
+ }
+
+ if (rx_data[nport].tx_port == 1)
+ fwd_ctl |= BIT(nport); /* forward to TX1 */
+ else
+ fwd_ctl &= ~BIT(nport); /* forward to TX0 */
+ }
+
+ ub960_write(priv, UB960_SR_FWD_CTL1, fwd_ctl);
+
+ return 0;
+}
+
+static void ub960_update_streaming_status(struct ub960_data *priv)
+{
+ unsigned int i;
+
+ for (i = 0; i < UB960_MAX_NPORTS; i++) {
+ if (priv->stream_enable_mask[i])
+ break;
+ }
+
+ priv->streaming = i < UB960_MAX_NPORTS;
+}
+
+static int ub960_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 source_pad,
+ u64 source_streams_mask)
+{
+ struct ub960_data *priv = sd_to_ub960(sd);
+ struct device *dev = &priv->client->dev;
+ u64 sink_streams[UB960_MAX_RX_NPORTS] = {};
+ struct v4l2_subdev_route *route;
+ unsigned int failed_port;
+ unsigned int nport;
+ int ret;
+
+ if (!priv->streaming) {
+ dev_dbg(dev, "Prepare for streaming\n");
+ ret = ub960_configure_ports_for_streaming(priv, state);
+ if (ret)
+ return ret;
+ }
+
+ /* Enable TX port if not yet enabled */
+ if (!priv->stream_enable_mask[source_pad]) {
+ ret = ub960_enable_tx_port(priv,
+ ub960_pad_to_port(priv, source_pad));
+ if (ret)
+ return ret;
+ }
+
+ priv->stream_enable_mask[source_pad] |= source_streams_mask;
+
+ /* Collect sink streams per pad which we need to enable */
+ for_each_active_route(&state->routing, route) {
+ if (route->source_pad != source_pad)
+ continue;
+
+ if (!(source_streams_mask & BIT_ULL(route->source_stream)))
+ continue;
+
+ nport = ub960_pad_to_port(priv, route->sink_pad);
+
+ sink_streams[nport] |= BIT_ULL(route->sink_stream);
+ }
+
+ for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
+ if (!sink_streams[nport])
+ continue;
+
+ /* Enable the RX port if not yet enabled */
+ if (!priv->stream_enable_mask[nport]) {
+ ret = ub960_enable_rx_port(priv, nport);
+ if (ret) {
+ failed_port = nport;
+ goto err;
+ }
+ }
+
+ priv->stream_enable_mask[nport] |= sink_streams[nport];
+
+ dev_dbg(dev, "enable RX port %u streams %#llx\n", nport,
+ sink_streams[nport]);
+
+ ret = v4l2_subdev_enable_streams(
+ priv->rxports[nport]->source.sd,
+ priv->rxports[nport]->source.pad,
+ sink_streams[nport]);
+ if (ret) {
+ priv->stream_enable_mask[nport] &= ~sink_streams[nport];
+
+ if (!priv->stream_enable_mask[nport])
+ ub960_disable_rx_port(priv, nport);
+
+ failed_port = nport;
+ goto err;
+ }
+ }
+
+ priv->streaming = true;
+
+ return 0;
+
+err:
+ for (nport = 0; nport < failed_port; nport++) {
+ if (!sink_streams[nport])
+ continue;
+
+ dev_dbg(dev, "disable RX port %u streams %#llx\n", nport,
+ sink_streams[nport]);
+
+ ret = v4l2_subdev_disable_streams(
+ priv->rxports[nport]->source.sd,
+ priv->rxports[nport]->source.pad,
+ sink_streams[nport]);
+ if (ret)
+ dev_err(dev, "Failed to disable streams: %d\n", ret);
+
+ priv->stream_enable_mask[nport] &= ~sink_streams[nport];
+
+ /* Disable RX port if no active streams */
+ if (!priv->stream_enable_mask[nport])
+ ub960_disable_rx_port(priv, nport);
+ }
+
+ priv->stream_enable_mask[source_pad] &= ~source_streams_mask;
+
+ if (!priv->stream_enable_mask[source_pad])
+ ub960_disable_tx_port(priv,
+ ub960_pad_to_port(priv, source_pad));
+
+ ub960_update_streaming_status(priv);
+
+ return ret;
+}
+
+static int ub960_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ u32 source_pad, u64 source_streams_mask)
+{
+ struct ub960_data *priv = sd_to_ub960(sd);
+ struct device *dev = &priv->client->dev;
+ u64 sink_streams[UB960_MAX_RX_NPORTS] = {};
+ struct v4l2_subdev_route *route;
+ unsigned int nport;
+ int ret;
+
+ /* Collect sink streams per pad which we need to disable */
+ for_each_active_route(&state->routing, route) {
+ if (route->source_pad != source_pad)
+ continue;
+
+ if (!(source_streams_mask & BIT_ULL(route->source_stream)))
+ continue;
+
+ nport = ub960_pad_to_port(priv, route->sink_pad);
+
+ sink_streams[nport] |= BIT_ULL(route->sink_stream);
+ }
+
+ for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
+ if (!sink_streams[nport])
+ continue;
+
+ dev_dbg(dev, "disable RX port %u streams %#llx\n", nport,
+ sink_streams[nport]);
+
+ ret = v4l2_subdev_disable_streams(
+ priv->rxports[nport]->source.sd,
+ priv->rxports[nport]->source.pad,
+ sink_streams[nport]);
+ if (ret)
+ dev_err(dev, "Failed to disable streams: %d\n", ret);
+
+ priv->stream_enable_mask[nport] &= ~sink_streams[nport];
+
+ /* Disable RX port if no active streams */
+ if (!priv->stream_enable_mask[nport])
+ ub960_disable_rx_port(priv, nport);
+ }
+
+ /* Disable TX port if no active streams */
+
+ priv->stream_enable_mask[source_pad] &= ~source_streams_mask;
+
+ if (!priv->stream_enable_mask[source_pad])
+ ub960_disable_tx_port(priv,
+ ub960_pad_to_port(priv, source_pad));
+
+ ub960_update_streaming_status(priv);
+
+ return 0;
+}
+
+static int _ub960_set_routing(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_krouting *routing)
+{
+ static const struct v4l2_mbus_framefmt format = {
+ .width = 640,
+ .height = 480,
+ .code = MEDIA_BUS_FMT_UYVY8_1X16,
+ .field = V4L2_FIELD_NONE,
+ .colorspace = V4L2_COLORSPACE_SRGB,
+ .ycbcr_enc = V4L2_YCBCR_ENC_601,
+ .quantization = V4L2_QUANTIZATION_LIM_RANGE,
+ .xfer_func = V4L2_XFER_FUNC_SRGB,
+ };
+ int ret;
+
+ /*
+ * Note: we can only support up to V4L2_FRAME_DESC_ENTRY_MAX, until
+ * frame desc is made dynamically allocated.
+ */
+
+ if (routing->num_routes > V4L2_FRAME_DESC_ENTRY_MAX)
+ return -E2BIG;
+
+ ret = v4l2_subdev_routing_validate(sd, routing,
+ V4L2_SUBDEV_ROUTING_ONLY_1_TO_1 |
+ V4L2_SUBDEV_ROUTING_NO_SINK_STREAM_MIX);
+ if (ret)
+ return ret;
+
+ ret = v4l2_subdev_set_routing_with_fmt(sd, state, routing, &format);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ub960_set_routing(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ enum v4l2_subdev_format_whence which,
+ struct v4l2_subdev_krouting *routing)
+{
+ struct ub960_data *priv = sd_to_ub960(sd);
+
+ if (which == V4L2_SUBDEV_FORMAT_ACTIVE && priv->streaming)
+ return -EBUSY;
+
+ return _ub960_set_routing(sd, state, routing);
+}
+
+static int ub960_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_mbus_frame_desc *fd)
+{
+ struct ub960_data *priv = sd_to_ub960(sd);
+ struct v4l2_subdev_route *route;
+ struct v4l2_subdev_state *state;
+ int ret = 0;
+ struct device *dev = &priv->client->dev;
+ u8 vc_map[UB960_MAX_RX_NPORTS] = {};
+
+ if (!ub960_pad_is_source(priv, pad))
+ return -EINVAL;
+
+ memset(fd, 0, sizeof(*fd));
+
+ fd->type = V4L2_MBUS_FRAME_DESC_TYPE_CSI2;
+
+ state = v4l2_subdev_lock_and_get_active_state(&priv->sd);
+
+ ub960_get_vc_maps(priv, state, vc_map);
+
+ for_each_active_route(&state->routing, route) {
+ struct v4l2_mbus_frame_desc_entry *source_entry = NULL;
+ struct v4l2_mbus_frame_desc source_fd;
+ unsigned int nport;
+ unsigned int i;
+
+ if (route->source_pad != pad)
+ continue;
+
+ nport = ub960_pad_to_port(priv, route->sink_pad);
+
+ ret = v4l2_subdev_call(priv->rxports[nport]->source.sd, pad,
+ get_frame_desc,
+ priv->rxports[nport]->source.pad,
+ &source_fd);
+ if (ret) {
+ dev_err(dev,
+ "Failed to get source frame desc for pad %u\n",
+ route->sink_pad);
+ goto out_unlock;
+ }
+
+ for (i = 0; i < source_fd.num_entries; i++) {
+ if (source_fd.entry[i].stream == route->sink_stream) {
+ source_entry = &source_fd.entry[i];
+ break;
+ }
+ }
+
+ if (!source_entry) {
+ dev_err(dev,
+ "Failed to find stream from source frame desc\n");
+ ret = -EPIPE;
+ goto out_unlock;
+ }
+
+ fd->entry[fd->num_entries].stream = route->source_stream;
+ fd->entry[fd->num_entries].flags = source_entry->flags;
+ fd->entry[fd->num_entries].length = source_entry->length;
+ fd->entry[fd->num_entries].pixelcode = source_entry->pixelcode;
+
+ fd->entry[fd->num_entries].bus.csi2.vc = vc_map[nport];
+
+ if (source_fd.type == V4L2_MBUS_FRAME_DESC_TYPE_CSI2) {
+ fd->entry[fd->num_entries].bus.csi2.dt =
+ source_entry->bus.csi2.dt;
+ } else {
+ const struct ub960_format_info *ub960_fmt;
+ struct v4l2_mbus_framefmt *fmt;
+
+ fmt = v4l2_subdev_state_get_stream_format(state, pad,
+ route->source_stream);
+
+ if (!fmt) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ ub960_fmt = ub960_find_format(fmt->code);
+ if (!ub960_fmt) {
+ dev_err(dev, "Unable to find format\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ fd->entry[fd->num_entries].bus.csi2.dt =
+ ub960_fmt->datatype;
+ }
+
+ fd->num_entries++;
+ }
+
+out_unlock:
+ v4l2_subdev_unlock_state(state);
+
+ return ret;
+}
+
+static int ub960_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ struct ub960_data *priv = sd_to_ub960(sd);
+ struct v4l2_mbus_framefmt *fmt;
+
+ if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE && priv->streaming)
+ return -EBUSY;
+
+ /* No transcoding, source and sink formats must match. */
+ if (ub960_pad_is_source(priv, format->pad))
+ return v4l2_subdev_get_fmt(sd, state, format);
+
+ /*
+ * Default to the first format if the requested media bus code isn't
+ * supported.
+ */
+ if (!ub960_find_format(format->format.code))
+ format->format.code = ub960_formats[0].code;
+
+ fmt = v4l2_subdev_state_get_stream_format(state, format->pad,
+ format->stream);
+ if (!fmt)
+ return -EINVAL;
+
+ *fmt = format->format;
+
+ fmt = v4l2_subdev_state_get_opposite_stream_format(state, format->pad,
+ format->stream);
+ if (!fmt)
+ return -EINVAL;
+
+ *fmt = format->format;
+
+ return 0;
+}
+
+static int ub960_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ struct ub960_data *priv = sd_to_ub960(sd);
+
+ struct v4l2_subdev_route routes[] = {
+ {
+ .sink_pad = 0,
+ .sink_stream = 0,
+ .source_pad = priv->hw_data->num_rxports,
+ .source_stream = 0,
+ .flags = V4L2_SUBDEV_ROUTE_FL_ACTIVE,
+ },
+ };
+
+ struct v4l2_subdev_krouting routing = {
+ .num_routes = ARRAY_SIZE(routes),
+ .routes = routes,
+ };
+
+ return _ub960_set_routing(sd, state, &routing);
+}
+
+static const struct v4l2_subdev_pad_ops ub960_pad_ops = {
+ .enable_streams = ub960_enable_streams,
+ .disable_streams = ub960_disable_streams,
+
+ .set_routing = ub960_set_routing,
+ .get_frame_desc = ub960_get_frame_desc,
+
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = ub960_set_fmt,
+
+ .init_cfg = ub960_init_cfg,
+};
+
+static int ub960_log_status(struct v4l2_subdev *sd)
+{
+ struct ub960_data *priv = sd_to_ub960(sd);
+ struct device *dev = &priv->client->dev;
+ struct v4l2_subdev_state *state;
+ unsigned int nport;
+ unsigned int i;
+ u16 v16 = 0;
+ u8 v = 0;
+ u8 id[UB960_SR_FPD3_RX_ID_LEN];
+
+ state = v4l2_subdev_lock_and_get_active_state(sd);
+
+ for (i = 0; i < sizeof(id); i++)
+ ub960_read(priv, UB960_SR_FPD3_RX_ID(i), &id[i]);
+
+ dev_info(dev, "ID '%.*s'\n", (int)sizeof(id), id);
+
+ for (nport = 0; nport < priv->hw_data->num_txports; nport++) {
+ struct ub960_txport *txport = priv->txports[nport];
+
+ dev_info(dev, "TX %u\n", nport);
+
+ if (!txport) {
+ dev_info(dev, "\tNot initialized\n");
+ continue;
+ }
+
+ ub960_txport_read(priv, nport, UB960_TR_CSI_STS, &v);
+ dev_info(dev, "\tsync %u, pass %u\n", v & (u8)BIT(1),
+ v & (u8)BIT(0));
+
+ ub960_read16(priv, UB960_SR_CSI_FRAME_COUNT_HI(nport), &v16);
+ dev_info(dev, "\tframe counter %u\n", v16);
+
+ ub960_read16(priv, UB960_SR_CSI_FRAME_ERR_COUNT_HI(nport), &v16);
+ dev_info(dev, "\tframe error counter %u\n", v16);
+
+ ub960_read16(priv, UB960_SR_CSI_LINE_COUNT_HI(nport), &v16);
+ dev_info(dev, "\tline counter %u\n", v16);
+
+ ub960_read16(priv, UB960_SR_CSI_LINE_ERR_COUNT_HI(nport), &v16);
+ dev_info(dev, "\tline error counter %u\n", v16);
+ }
+
+ for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
+ struct ub960_rxport *rxport = priv->rxports[nport];
+ u8 eq_level;
+ s8 strobe_pos;
+ unsigned int i;
+
+ dev_info(dev, "RX %u\n", nport);
+
+ if (!rxport) {
+ dev_info(dev, "\tNot initialized\n");
+ continue;
+ }
+
+ ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS1, &v);
+
+ if (v & UB960_RR_RX_PORT_STS1_LOCK_STS)
+ dev_info(dev, "\tLocked\n");
+ else
+ dev_info(dev, "\tNot locked\n");
+
+ dev_info(dev, "\trx_port_sts1 %#02x\n", v);
+ ub960_rxport_read(priv, nport, UB960_RR_RX_PORT_STS2, &v);
+ dev_info(dev, "\trx_port_sts2 %#02x\n", v);
+
+ ub960_rxport_read16(priv, nport, UB960_RR_RX_FREQ_HIGH, &v16);
+ dev_info(dev, "\tlink freq %llu Hz\n", (v16 * 1000000ULL) >> 8);
+
+ ub960_rxport_read16(priv, nport, UB960_RR_RX_PAR_ERR_HI, &v16);
+ dev_info(dev, "\tparity errors %u\n", v16);
+
+ ub960_rxport_read16(priv, nport, UB960_RR_LINE_COUNT_HI, &v16);
+ dev_info(dev, "\tlines per frame %u\n", v16);
+
+ ub960_rxport_read16(priv, nport, UB960_RR_LINE_LEN_1, &v16);
+ dev_info(dev, "\tbytes per line %u\n", v16);
+
+ ub960_rxport_read(priv, nport, UB960_RR_CSI_ERR_COUNTER, &v);
+ dev_info(dev, "\tcsi_err_counter %u\n", v);
+
+ /* Strobe */
+
+ ub960_read(priv, UB960_XR_AEQ_CTL1, &v);
+
+ dev_info(dev, "\t%s strobe\n",
+ (v & UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN) ? "Adaptive" :
+ "Manual");
+
+ if (v & UB960_XR_AEQ_CTL1_AEQ_SFILTER_EN) {
+ ub960_read(priv, UB960_XR_SFILTER_CFG, &v);
+
+ dev_info(dev, "\tStrobe range [%d, %d]\n",
+ ((v >> UB960_XR_SFILTER_CFG_SFILTER_MIN_SHIFT) & 0xf) - 7,
+ ((v >> UB960_XR_SFILTER_CFG_SFILTER_MAX_SHIFT) & 0xf) - 7);
+ }
+
+ ub960_rxport_get_strobe_pos(priv, nport, &strobe_pos);
+
+ dev_info(dev, "\tStrobe pos %d\n", strobe_pos);
+
+ /* EQ */
+
+ ub960_rxport_read(priv, nport, UB960_RR_AEQ_BYPASS, &v);
+
+ dev_info(dev, "\t%s EQ\n",
+ (v & UB960_RR_AEQ_BYPASS_ENABLE) ? "Manual" :
+ "Adaptive");
+
+ if (!(v & UB960_RR_AEQ_BYPASS_ENABLE)) {
+ ub960_rxport_read(priv, nport, UB960_RR_AEQ_MIN_MAX, &v);
+
+ dev_info(dev, "\tEQ range [%u, %u]\n",
+ (v >> UB960_RR_AEQ_MIN_MAX_AEQ_FLOOR_SHIFT) & 0xf,
+ (v >> UB960_RR_AEQ_MIN_MAX_AEQ_MAX_SHIFT) & 0xf);
+ }
+
+ if (ub960_rxport_get_eq_level(priv, nport, &eq_level) == 0)
+ dev_info(dev, "\tEQ level %u\n", eq_level);
+
+ /* GPIOs */
+ for (i = 0; i < UB960_NUM_BC_GPIOS; i++) {
+ u8 ctl_reg;
+ u8 ctl_shift;
+
+ ctl_reg = UB960_RR_BC_GPIO_CTL(i / 2);
+ ctl_shift = (i % 2) * 4;
+
+ ub960_rxport_read(priv, nport, ctl_reg, &v);
+
+ dev_info(dev, "\tGPIO%u: mode %u\n", i,
+ (v >> ctl_shift) & 0xf);
+ }
+ }
+
+ v4l2_subdev_unlock_state(state);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_core_ops ub960_subdev_core_ops = {
+ .log_status = ub960_log_status,
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
+static const struct v4l2_subdev_ops ub960_subdev_ops = {
+ .core = &ub960_subdev_core_ops,
+ .pad = &ub960_pad_ops,
+};
+
+static const struct media_entity_operations ub960_entity_ops = {
+ .get_fwnode_pad = v4l2_subdev_get_fwnode_pad_1_to_1,
+ .link_validate = v4l2_subdev_link_validate,
+ .has_pad_interdep = v4l2_subdev_has_pad_interdep,
+};
+
+/* -----------------------------------------------------------------------------
+ * Core
+ */
+
+static irqreturn_t ub960_handle_events(int irq, void *arg)
+{
+ struct ub960_data *priv = arg;
+ unsigned int i;
+ u8 int_sts;
+ u8 fwd_sts;
+ int ret;
+
+ ret = ub960_read(priv, UB960_SR_INTERRUPT_STS, &int_sts);
+ if (ret || !int_sts)
+ return IRQ_NONE;
+
+ dev_dbg(&priv->client->dev, "INTERRUPT_STS %x\n", int_sts);
+
+ ret = ub960_read(priv, UB960_SR_FWD_STS, &fwd_sts);
+ if (ret)
+ return IRQ_NONE;
+
+ dev_dbg(&priv->client->dev, "FWD_STS %#02x\n", fwd_sts);
+
+ for (i = 0; i < priv->hw_data->num_txports; i++) {
+ if (int_sts & UB960_SR_INTERRUPT_STS_IS_CSI_TX(i))
+ ub960_csi_handle_events(priv, i);
+ }
+
+ for (i = 0; i < priv->hw_data->num_rxports; i++) {
+ if (!priv->rxports[i])
+ continue;
+
+ if (int_sts & UB960_SR_INTERRUPT_STS_IS_RX(i))
+ ub960_rxport_handle_events(priv, i);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void ub960_handler_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct ub960_data *priv =
+ container_of(dwork, struct ub960_data, poll_work);
+
+ ub960_handle_events(0, priv);
+
+ schedule_delayed_work(&priv->poll_work,
+ msecs_to_jiffies(UB960_POLL_TIME_MS));
+}
+
+static void ub960_txport_free_ports(struct ub960_data *priv)
+{
+ unsigned int nport;
+
+ for (nport = 0; nport < priv->hw_data->num_txports; nport++) {
+ struct ub960_txport *txport = priv->txports[nport];
+
+ if (!txport)
+ continue;
+
+ kfree(txport);
+ priv->txports[nport] = NULL;
+ }
+}
+
+static void ub960_rxport_free_ports(struct ub960_data *priv)
+{
+ unsigned int nport;
+
+ for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
+ struct ub960_rxport *rxport = priv->rxports[nport];
+
+ if (!rxport)
+ continue;
+
+ fwnode_handle_put(rxport->source.ep_fwnode);
+ fwnode_handle_put(rxport->ser.fwnode);
+
+ kfree(rxport);
+ priv->rxports[nport] = NULL;
+ }
+}
+
+static int
+ub960_parse_dt_rxport_link_properties(struct ub960_data *priv,
+ struct fwnode_handle *link_fwnode,
+ struct ub960_rxport *rxport)
+{
+ struct device *dev = &priv->client->dev;
+ unsigned int nport = rxport->nport;
+ u32 rx_mode;
+ u32 cdr_mode;
+ s32 strobe_pos;
+ u32 eq_level;
+ u32 ser_i2c_alias;
+ int ret;
+
+ cdr_mode = RXPORT_CDR_FPD3;
+
+ ret = fwnode_property_read_u32(link_fwnode, "ti,cdr-mode", &cdr_mode);
+ if (ret < 0 && ret != -EINVAL) {
+ dev_err(dev, "rx%u: failed to read '%s': %d\n", nport,
+ "ti,cdr-mode", ret);
+ return ret;
+ }
+
+ if (cdr_mode > RXPORT_CDR_LAST) {
+ dev_err(dev, "rx%u: bad 'ti,cdr-mode' %u\n", nport, cdr_mode);
+ return -EINVAL;
+ }
+
+ if (!priv->hw_data->is_fpdlink4 && cdr_mode == RXPORT_CDR_FPD4) {
+ dev_err(dev, "rx%u: FPD-Link 4 CDR not supported\n", nport);
+ return -EINVAL;
+ }
+
+ rxport->cdr_mode = cdr_mode;
+
+ ret = fwnode_property_read_u32(link_fwnode, "ti,rx-mode", &rx_mode);
+ if (ret < 0) {
+ dev_err(dev, "rx%u: failed to read '%s': %d\n", nport,
+ "ti,rx-mode", ret);
+ return ret;
+ }
+
+ if (rx_mode > RXPORT_MODE_LAST) {
+ dev_err(dev, "rx%u: bad 'ti,rx-mode' %u\n", nport, rx_mode);
+ return -EINVAL;
+ }
+
+ switch (rx_mode) {
+ case RXPORT_MODE_RAW12_HF:
+ case RXPORT_MODE_RAW12_LF:
+ dev_err(dev, "rx%u: unsupported 'ti,rx-mode' %u\n", nport,
+ rx_mode);
+ return -EINVAL;
+ default:
+ break;
+ }
+
+ rxport->rx_mode = rx_mode;
+
+ /* EQ & Strobe related */
+
+ /* Defaults */
+ rxport->eq.manual_eq = false;
+ rxport->eq.aeq.eq_level_min = UB960_MIN_EQ_LEVEL;
+ rxport->eq.aeq.eq_level_max = UB960_MAX_EQ_LEVEL;
+
+ ret = fwnode_property_read_u32(link_fwnode, "ti,strobe-pos",
+ &strobe_pos);
+ if (ret) {
+ if (ret != -EINVAL) {
+ dev_err(dev, "rx%u: failed to read '%s': %d\n", nport,
+ "ti,strobe-pos", ret);
+ return ret;
+ }
+ } else {
+ if (strobe_pos < UB960_MIN_MANUAL_STROBE_POS ||
+ strobe_pos > UB960_MAX_MANUAL_STROBE_POS) {
+ dev_err(dev, "rx%u: illegal 'strobe-pos' value: %d\n",
+ nport, strobe_pos);
+ return -EINVAL;
+ }
+
+ /* NOTE: ignored unless global manual strobe pos is also set */
+ rxport->eq.strobe_pos = strobe_pos;
+ if (!priv->strobe.manual)
+ dev_warn(dev,
+ "rx%u: 'ti,strobe-pos' ignored as 'ti,manual-strobe' not set\n",
+ nport);
+ }
+
+ ret = fwnode_property_read_u32(link_fwnode, "ti,eq-level", &eq_level);
+ if (ret) {
+ if (ret != -EINVAL) {
+ dev_err(dev, "rx%u: failed to read '%s': %d\n", nport,
+ "ti,eq-level", ret);
+ return ret;
+ }
+ } else {
+ if (eq_level > UB960_MAX_EQ_LEVEL) {
+ dev_err(dev, "rx%u: illegal 'ti,eq-level' value: %d\n",
+ nport, eq_level);
+ return -EINVAL;
+ }
+
+ rxport->eq.manual_eq = true;
+ rxport->eq.manual.eq_level = eq_level;
+ }
+
+ ret = fwnode_property_read_u32(link_fwnode, "i2c-alias",
+ &ser_i2c_alias);
+ if (ret) {
+ dev_err(dev, "rx%u: failed to read '%s': %d\n", nport,
+ "i2c-alias", ret);
+ return ret;
+ }
+ rxport->ser.alias = ser_i2c_alias;
+
+ rxport->ser.fwnode = fwnode_get_named_child_node(link_fwnode, "serializer");
+ if (!rxport->ser.fwnode) {
+ dev_err(dev, "rx%u: missing 'serializer' node\n", nport);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ub960_parse_dt_rxport_ep_properties(struct ub960_data *priv,
+ struct fwnode_handle *ep_fwnode,
+ struct ub960_rxport *rxport)
+{
+ struct device *dev = &priv->client->dev;
+ struct v4l2_fwnode_endpoint vep = {};
+ unsigned int nport = rxport->nport;
+ bool hsync_hi;
+ bool vsync_hi;
+ int ret;
+
+ rxport->source.ep_fwnode = fwnode_graph_get_remote_endpoint(ep_fwnode);
+ if (!rxport->source.ep_fwnode) {
+ dev_err(dev, "rx%u: no remote endpoint\n", nport);
+ return -ENODEV;
+ }
+
+ /* We currently have properties only for RAW modes */
+
+ switch (rxport->rx_mode) {
+ case RXPORT_MODE_RAW10:
+ case RXPORT_MODE_RAW12_HF:
+ case RXPORT_MODE_RAW12_LF:
+ break;
+ default:
+ return 0;
+ }
+
+ vep.bus_type = V4L2_MBUS_PARALLEL;
+ ret = v4l2_fwnode_endpoint_parse(ep_fwnode, &vep);
+ if (ret) {
+ dev_err(dev, "rx%u: failed to parse endpoint data\n", nport);
+ goto err_put_source_ep_fwnode;
+ }
+
+ hsync_hi = !!(vep.bus.parallel.flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH);
+ vsync_hi = !!(vep.bus.parallel.flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH);
+
+ /* LineValid and FrameValid are inverse to the h/vsync active */
+ rxport->lv_fv_pol = (hsync_hi ? UB960_RR_PORT_CONFIG2_LV_POL_LOW : 0) |
+ (vsync_hi ? UB960_RR_PORT_CONFIG2_FV_POL_LOW : 0);
+
+ return 0;
+
+err_put_source_ep_fwnode:
+ fwnode_handle_put(rxport->source.ep_fwnode);
+ return ret;
+}
+
+static int ub960_parse_dt_rxport(struct ub960_data *priv, unsigned int nport,
+ struct fwnode_handle *link_fwnode,
+ struct fwnode_handle *ep_fwnode)
+{
+ static const char *vpoc_names[UB960_MAX_RX_NPORTS] = {
+ "vpoc0", "vpoc1", "vpoc2", "vpoc3"
+ };
+ struct device *dev = &priv->client->dev;
+ struct ub960_rxport *rxport;
+ int ret;
+
+ rxport = kzalloc(sizeof(*rxport), GFP_KERNEL);
+ if (!rxport)
+ return -ENOMEM;
+
+ priv->rxports[nport] = rxport;
+
+ rxport->nport = nport;
+ rxport->priv = priv;
+
+ ret = ub960_parse_dt_rxport_link_properties(priv, link_fwnode, rxport);
+ if (ret)
+ goto err_free_rxport;
+
+ rxport->vpoc = devm_regulator_get_optional(dev, vpoc_names[nport]);
+ if (IS_ERR(rxport->vpoc)) {
+ ret = PTR_ERR(rxport->vpoc);
+ if (ret == -ENODEV) {
+ rxport->vpoc = NULL;
+ } else {
+ dev_err(dev, "rx%u: failed to get VPOC supply: %d\n",
+ nport, ret);
+ goto err_put_remote_fwnode;
+ }
+ }
+
+ ret = ub960_parse_dt_rxport_ep_properties(priv, ep_fwnode, rxport);
+ if (ret)
+ goto err_put_remote_fwnode;
+
+ return 0;
+
+err_put_remote_fwnode:
+ fwnode_handle_put(rxport->ser.fwnode);
+err_free_rxport:
+ priv->rxports[nport] = NULL;
+ kfree(rxport);
+ return ret;
+}
+
+static struct fwnode_handle *
+ub960_fwnode_get_link_by_regs(struct fwnode_handle *links_fwnode,
+ unsigned int nport)
+{
+ struct fwnode_handle *link_fwnode;
+ int ret;
+
+ fwnode_for_each_child_node(links_fwnode, link_fwnode) {
+ u32 link_num;
+
+ if (!str_has_prefix(fwnode_get_name(link_fwnode), "link@"))
+ continue;
+
+ ret = fwnode_property_read_u32(link_fwnode, "reg", &link_num);
+ if (ret) {
+ fwnode_handle_put(link_fwnode);
+ return NULL;
+ }
+
+ if (nport == link_num)
+ return link_fwnode;
+ }
+
+ return NULL;
+}
+
+static int ub960_parse_dt_rxports(struct ub960_data *priv)
+{
+ struct device *dev = &priv->client->dev;
+ struct fwnode_handle *links_fwnode;
+ unsigned int nport;
+ int ret;
+
+ links_fwnode = fwnode_get_named_child_node(dev_fwnode(dev), "links");
+ if (!links_fwnode) {
+ dev_err(dev, "'links' node missing\n");
+ return -ENODEV;
+ }
+
+ /* Defaults, recommended by TI */
+ priv->strobe.min = 2;
+ priv->strobe.max = 3;
+
+ priv->strobe.manual = fwnode_property_read_bool(links_fwnode, "ti,manual-strobe");
+
+ for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
+ struct fwnode_handle *link_fwnode;
+ struct fwnode_handle *ep_fwnode;
+
+ link_fwnode = ub960_fwnode_get_link_by_regs(links_fwnode, nport);
+ if (!link_fwnode)
+ continue;
+
+ ep_fwnode = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev),
+ nport, 0, 0);
+ if (!ep_fwnode) {
+ fwnode_handle_put(link_fwnode);
+ continue;
+ }
+
+ ret = ub960_parse_dt_rxport(priv, nport, link_fwnode,
+ ep_fwnode);
+
+ fwnode_handle_put(link_fwnode);
+ fwnode_handle_put(ep_fwnode);
+
+ if (ret) {
+ dev_err(dev, "rx%u: failed to parse RX port\n", nport);
+ goto err_put_links;
+ }
+ }
+
+ fwnode_handle_put(links_fwnode);
+
+ return 0;
+
+err_put_links:
+ fwnode_handle_put(links_fwnode);
+
+ return ret;
+}
+
+static int ub960_parse_dt_txports(struct ub960_data *priv)
+{
+ struct device *dev = &priv->client->dev;
+ u32 nport;
+ int ret;
+
+ for (nport = 0; nport < priv->hw_data->num_txports; nport++) {
+ unsigned int port = nport + priv->hw_data->num_rxports;
+ struct fwnode_handle *ep_fwnode;
+
+ ep_fwnode = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev),
+ port, 0, 0);
+ if (!ep_fwnode)
+ continue;
+
+ ret = ub960_parse_dt_txport(priv, ep_fwnode, nport);
+
+ fwnode_handle_put(ep_fwnode);
+
+ if (ret)
+ break;
+ }
+
+ return 0;
+}
+
+static int ub960_parse_dt(struct ub960_data *priv)
+{
+ int ret;
+
+ ret = ub960_parse_dt_rxports(priv);
+ if (ret)
+ return ret;
+
+ ret = ub960_parse_dt_txports(priv);
+ if (ret)
+ goto err_free_rxports;
+
+ return 0;
+
+err_free_rxports:
+ ub960_rxport_free_ports(priv);
+
+ return ret;
+}
+
+static int ub960_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_connection *asd)
+{
+ struct ub960_data *priv = sd_to_ub960(notifier->sd);
+ struct ub960_rxport *rxport = to_ub960_asd(asd)->rxport;
+ struct device *dev = &priv->client->dev;
+ u8 nport = rxport->nport;
+ unsigned int i;
+ int ret;
+
+ ret = media_entity_get_fwnode_pad(&subdev->entity,
+ rxport->source.ep_fwnode,
+ MEDIA_PAD_FL_SOURCE);
+ if (ret < 0) {
+ dev_err(dev, "Failed to find pad for %s\n", subdev->name);
+ return ret;
+ }
+
+ rxport->source.sd = subdev;
+ rxport->source.pad = ret;
+
+ ret = media_create_pad_link(&rxport->source.sd->entity,
+ rxport->source.pad, &priv->sd.entity, nport,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (ret) {
+ dev_err(dev, "Unable to link %s:%u -> %s:%u\n",
+ rxport->source.sd->name, rxport->source.pad,
+ priv->sd.name, nport);
+ return ret;
+ }
+
+ for (i = 0; i < priv->hw_data->num_rxports; i++) {
+ if (priv->rxports[i] && !priv->rxports[i]->source.sd) {
+ dev_dbg(dev, "Waiting for more subdevs to be bound\n");
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+static void ub960_notify_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_connection *asd)
+{
+ struct ub960_rxport *rxport = to_ub960_asd(asd)->rxport;
+
+ rxport->source.sd = NULL;
+}
+
+static const struct v4l2_async_notifier_operations ub960_notify_ops = {
+ .bound = ub960_notify_bound,
+ .unbind = ub960_notify_unbind,
+};
+
+static int ub960_v4l2_notifier_register(struct ub960_data *priv)
+{
+ struct device *dev = &priv->client->dev;
+ unsigned int i;
+ int ret;
+
+ v4l2_async_subdev_nf_init(&priv->notifier, &priv->sd);
+
+ for (i = 0; i < priv->hw_data->num_rxports; i++) {
+ struct ub960_rxport *rxport = priv->rxports[i];
+ struct ub960_asd *asd;
+
+ if (!rxport)
+ continue;
+
+ asd = v4l2_async_nf_add_fwnode(&priv->notifier,
+ rxport->source.ep_fwnode,
+ struct ub960_asd);
+ if (IS_ERR(asd)) {
+ dev_err(dev, "Failed to add subdev for source %u: %pe",
+ i, asd);
+ v4l2_async_nf_cleanup(&priv->notifier);
+ return PTR_ERR(asd);
+ }
+
+ asd->rxport = rxport;
+ }
+
+ priv->notifier.ops = &ub960_notify_ops;
+
+ ret = v4l2_async_nf_register(&priv->notifier);
+ if (ret) {
+ dev_err(dev, "Failed to register subdev_notifier");
+ v4l2_async_nf_cleanup(&priv->notifier);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ub960_v4l2_notifier_unregister(struct ub960_data *priv)
+{
+ v4l2_async_nf_unregister(&priv->notifier);
+ v4l2_async_nf_cleanup(&priv->notifier);
+}
+
+static int ub960_create_subdev(struct ub960_data *priv)
+{
+ struct device *dev = &priv->client->dev;
+ unsigned int i;
+ int ret;
+
+ v4l2_i2c_subdev_init(&priv->sd, priv->client, &ub960_subdev_ops);
+
+ v4l2_ctrl_handler_init(&priv->ctrl_handler, 1);
+ priv->sd.ctrl_handler = &priv->ctrl_handler;
+
+ v4l2_ctrl_new_int_menu(&priv->ctrl_handler, NULL, V4L2_CID_LINK_FREQ,
+ ARRAY_SIZE(priv->tx_link_freq) - 1, 0,
+ priv->tx_link_freq);
+
+ if (priv->ctrl_handler.error) {
+ ret = priv->ctrl_handler.error;
+ goto err_free_ctrl;
+ }
+
+ priv->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
+ V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_STREAMS;
+ priv->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+ priv->sd.entity.ops = &ub960_entity_ops;
+
+ for (i = 0; i < priv->hw_data->num_rxports + priv->hw_data->num_txports; i++) {
+ priv->pads[i].flags = ub960_pad_is_sink(priv, i) ?
+ MEDIA_PAD_FL_SINK :
+ MEDIA_PAD_FL_SOURCE;
+ }
+
+ ret = media_entity_pads_init(&priv->sd.entity,
+ priv->hw_data->num_rxports +
+ priv->hw_data->num_txports,
+ priv->pads);
+ if (ret)
+ goto err_free_ctrl;
+
+ priv->sd.state_lock = priv->sd.ctrl_handler->lock;
+
+ ret = v4l2_subdev_init_finalize(&priv->sd);
+ if (ret)
+ goto err_entity_cleanup;
+
+ ret = ub960_v4l2_notifier_register(priv);
+ if (ret) {
+ dev_err(dev, "v4l2 subdev notifier register failed: %d\n", ret);
+ goto err_subdev_cleanup;
+ }
+
+ ret = v4l2_async_register_subdev(&priv->sd);
+ if (ret) {
+ dev_err(dev, "v4l2_async_register_subdev error: %d\n", ret);
+ goto err_unreg_notif;
+ }
+
+ return 0;
+
+err_unreg_notif:
+ ub960_v4l2_notifier_unregister(priv);
+err_subdev_cleanup:
+ v4l2_subdev_cleanup(&priv->sd);
+err_entity_cleanup:
+ media_entity_cleanup(&priv->sd.entity);
+err_free_ctrl:
+ v4l2_ctrl_handler_free(&priv->ctrl_handler);
+
+ return ret;
+}
+
+static void ub960_destroy_subdev(struct ub960_data *priv)
+{
+ ub960_v4l2_notifier_unregister(priv);
+ v4l2_async_unregister_subdev(&priv->sd);
+
+ v4l2_subdev_cleanup(&priv->sd);
+
+ media_entity_cleanup(&priv->sd.entity);
+ v4l2_ctrl_handler_free(&priv->ctrl_handler);
+}
+
+static const struct regmap_config ub960_regmap_config = {
+ .name = "ds90ub960",
+
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0xff,
+
+ /*
+ * We do locking in the driver to cover the TX/RX port selection and the
+ * indirect register access.
+ */
+ .disable_locking = true,
+};
+
+static void ub960_reset(struct ub960_data *priv, bool reset_regs)
+{
+ struct device *dev = &priv->client->dev;
+ unsigned int v;
+ int ret;
+ u8 bit;
+
+ bit = reset_regs ? UB960_SR_RESET_DIGITAL_RESET1 :
+ UB960_SR_RESET_DIGITAL_RESET0;
+
+ ub960_write(priv, UB960_SR_RESET, bit);
+
+ mutex_lock(&priv->reg_lock);
+
+ ret = regmap_read_poll_timeout(priv->regmap, UB960_SR_RESET, v,
+ (v & bit) == 0, 2000, 100000);
+
+ mutex_unlock(&priv->reg_lock);
+
+ if (ret)
+ dev_err(dev, "reset failed: %d\n", ret);
+}
+
+static int ub960_get_hw_resources(struct ub960_data *priv)
+{
+ struct device *dev = &priv->client->dev;
+
+ priv->regmap = devm_regmap_init_i2c(priv->client, &ub960_regmap_config);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ priv->vddio = devm_regulator_get(dev, "vddio");
+ if (IS_ERR(priv->vddio))
+ return dev_err_probe(dev, PTR_ERR(priv->vddio),
+ "cannot get VDDIO regulator\n");
+
+ /* get power-down pin from DT */
+ priv->pd_gpio =
+ devm_gpiod_get_optional(dev, "powerdown", GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->pd_gpio))
+ return dev_err_probe(dev, PTR_ERR(priv->pd_gpio),
+ "Cannot get powerdown GPIO\n");
+
+ priv->refclk = devm_clk_get(dev, "refclk");
+ if (IS_ERR(priv->refclk))
+ return dev_err_probe(dev, PTR_ERR(priv->refclk),
+ "Cannot get REFCLK\n");
+
+ return 0;
+}
+
+static int ub960_enable_core_hw(struct ub960_data *priv)
+{
+ struct device *dev = &priv->client->dev;
+ u8 rev_mask;
+ int ret;
+ u8 dev_sts;
+ u8 refclk_freq;
+
+ ret = regulator_enable(priv->vddio);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to enable VDDIO regulator\n");
+
+ ret = clk_prepare_enable(priv->refclk);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to enable refclk\n");
+ goto err_disable_vddio;
+ }
+
+ if (priv->pd_gpio) {
+ gpiod_set_value_cansleep(priv->pd_gpio, 1);
+ /* wait min 2 ms for reset to complete */
+ fsleep(2000);
+ gpiod_set_value_cansleep(priv->pd_gpio, 0);
+ /* wait min 2 ms for power up to finish */
+ fsleep(2000);
+ }
+
+ ub960_reset(priv, true);
+
+ /* Runtime check register accessibility */
+ ret = ub960_read(priv, UB960_SR_REV_MASK, &rev_mask);
+ if (ret) {
+ dev_err_probe(dev, ret, "Cannot read first register, abort\n");
+ goto err_pd_gpio;
+ }
+
+ dev_dbg(dev, "Found %s (rev/mask %#04x)\n", priv->hw_data->model,
+ rev_mask);
+
+ ret = ub960_read(priv, UB960_SR_DEVICE_STS, &dev_sts);
+ if (ret)
+ goto err_pd_gpio;
+
+ ret = ub960_read(priv, UB960_XR_REFCLK_FREQ, &refclk_freq);
+ if (ret)
+ goto err_pd_gpio;
+
+ dev_dbg(dev, "refclk valid %u freq %u MHz (clk fw freq %lu MHz)\n",
+ !!(dev_sts & BIT(4)), refclk_freq,
+ clk_get_rate(priv->refclk) / 1000000);
+
+ /* Disable all RX ports by default */
+ ret = ub960_write(priv, UB960_SR_RX_PORT_CTL, 0);
+ if (ret)
+ goto err_pd_gpio;
+
+ /* release GPIO lock */
+ if (priv->hw_data->is_ub9702) {
+ ret = ub960_update_bits(priv, UB960_SR_RESET,
+ UB960_SR_RESET_GPIO_LOCK_RELEASE,
+ UB960_SR_RESET_GPIO_LOCK_RELEASE);
+ if (ret)
+ goto err_pd_gpio;
+ }
+
+ return 0;
+
+err_pd_gpio:
+ gpiod_set_value_cansleep(priv->pd_gpio, 1);
+ clk_disable_unprepare(priv->refclk);
+err_disable_vddio:
+ regulator_disable(priv->vddio);
+
+ return ret;
+}
+
+static void ub960_disable_core_hw(struct ub960_data *priv)
+{
+ gpiod_set_value_cansleep(priv->pd_gpio, 1);
+ clk_disable_unprepare(priv->refclk);
+ regulator_disable(priv->vddio);
+}
+
+static int ub960_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct ub960_data *priv;
+ unsigned int port_lock_mask;
+ unsigned int port_mask;
+ unsigned int nport;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->client = client;
+
+ priv->hw_data = device_get_match_data(dev);
+
+ mutex_init(&priv->reg_lock);
+
+ INIT_DELAYED_WORK(&priv->poll_work, ub960_handler_work);
+
+ /*
+ * Initialize these to invalid values so that the first reg writes will
+ * configure the target.
+ */
+ priv->reg_current.indirect_target = 0xff;
+ priv->reg_current.rxport = 0xff;
+ priv->reg_current.txport = 0xff;
+
+ ret = ub960_get_hw_resources(priv);
+ if (ret)
+ goto err_mutex_destroy;
+
+ ret = ub960_enable_core_hw(priv);
+ if (ret)
+ goto err_mutex_destroy;
+
+ ret = ub960_parse_dt(priv);
+ if (ret)
+ goto err_disable_core_hw;
+
+ ret = ub960_init_tx_ports(priv);
+ if (ret)
+ goto err_free_ports;
+
+ ret = ub960_rxport_enable_vpocs(priv);
+ if (ret)
+ goto err_free_ports;
+
+ ret = ub960_init_rx_ports(priv);
+ if (ret)
+ goto err_disable_vpocs;
+
+ ub960_reset(priv, false);
+
+ port_mask = 0;
+
+ for (nport = 0; nport < priv->hw_data->num_rxports; nport++) {
+ struct ub960_rxport *rxport = priv->rxports[nport];
+
+ if (!rxport)
+ continue;
+
+ port_mask |= BIT(nport);
+ }
+
+ ret = ub960_rxport_wait_locks(priv, port_mask, &port_lock_mask);
+ if (ret)
+ goto err_disable_vpocs;
+
+ if (port_mask != port_lock_mask) {
+ ret = -EIO;
+ dev_err_probe(dev, ret, "Failed to lock all RX ports\n");
+ goto err_disable_vpocs;
+ }
+
+ /*
+ * Clear any errors caused by switching the RX port settings while
+ * probing.
+ */
+ ub960_clear_rx_errors(priv);
+
+ ret = ub960_init_atr(priv);
+ if (ret)
+ goto err_disable_vpocs;
+
+ ret = ub960_rxport_add_serializers(priv);
+ if (ret)
+ goto err_uninit_atr;
+
+ ret = ub960_create_subdev(priv);
+ if (ret)
+ goto err_free_sers;
+
+ if (client->irq)
+ dev_warn(dev, "irq support not implemented, using polling\n");
+
+ schedule_delayed_work(&priv->poll_work,
+ msecs_to_jiffies(UB960_POLL_TIME_MS));
+
+ return 0;
+
+err_free_sers:
+ ub960_rxport_remove_serializers(priv);
+err_uninit_atr:
+ ub960_uninit_atr(priv);
+err_disable_vpocs:
+ ub960_rxport_disable_vpocs(priv);
+err_free_ports:
+ ub960_rxport_free_ports(priv);
+ ub960_txport_free_ports(priv);
+err_disable_core_hw:
+ ub960_disable_core_hw(priv);
+err_mutex_destroy:
+ mutex_destroy(&priv->reg_lock);
+ return ret;
+}
+
+static void ub960_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ub960_data *priv = sd_to_ub960(sd);
+
+ cancel_delayed_work_sync(&priv->poll_work);
+
+ ub960_destroy_subdev(priv);
+ ub960_rxport_remove_serializers(priv);
+ ub960_uninit_atr(priv);
+ ub960_rxport_disable_vpocs(priv);
+ ub960_rxport_free_ports(priv);
+ ub960_txport_free_ports(priv);
+ ub960_disable_core_hw(priv);
+ mutex_destroy(&priv->reg_lock);
+}
+
+static const struct ub960_hw_data ds90ub960_hw = {
+ .model = "ub960",
+ .num_rxports = 4,
+ .num_txports = 2,
+};
+
+static const struct ub960_hw_data ds90ub9702_hw = {
+ .model = "ub9702",
+ .num_rxports = 4,
+ .num_txports = 2,
+ .is_ub9702 = true,
+ .is_fpdlink4 = true,
+};
+
+static const struct i2c_device_id ub960_id[] = {
+ { "ds90ub960-q1", (kernel_ulong_t)&ds90ub960_hw },
+ { "ds90ub9702-q1", (kernel_ulong_t)&ds90ub9702_hw },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ub960_id);
+
+static const struct of_device_id ub960_dt_ids[] = {
+ { .compatible = "ti,ds90ub960-q1", .data = &ds90ub960_hw },
+ { .compatible = "ti,ds90ub9702-q1", .data = &ds90ub9702_hw },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ub960_dt_ids);
+
+static struct i2c_driver ds90ub960_driver = {
+ .probe = ub960_probe,
+ .remove = ub960_remove,
+ .id_table = ub960_id,
+ .driver = {
+ .name = "ds90ub960",
+ .of_match_table = ub960_dt_ids,
+ },
+};
+module_i2c_driver(ds90ub960_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Texas Instruments FPD-Link III/IV Deserializers Driver");
+MODULE_AUTHOR("Luca Ceresoli <luca@lucaceresoli.net>");
+MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>");
+MODULE_IMPORT_NS(I2C_ATR);
diff --git a/drivers/media/i2c/dw9719.c b/drivers/media/i2c/dw9719.c
new file mode 100644
index 000000000000..c626ed845928
--- /dev/null
+++ b/drivers/media/i2c/dw9719.c
@@ -0,0 +1,350 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2012 Intel Corporation
+
+/*
+ * Based on linux/modules/camera/drivers/media/i2c/imx/dw9719.c in this repo:
+ * https://github.com/ZenfoneArea/android_kernel_asus_zenfone5
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/types.h>
+
+#include <media/v4l2-cci.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-subdev.h>
+
+#define DW9719_MAX_FOCUS_POS 1023
+#define DW9719_CTRL_STEPS 16
+#define DW9719_CTRL_DELAY_US 1000
+
+#define DW9719_INFO CCI_REG8(0)
+#define DW9719_ID 0xF1
+
+#define DW9719_CONTROL CCI_REG8(2)
+#define DW9719_ENABLE_RINGING 0x02
+
+#define DW9719_VCM_CURRENT CCI_REG16(3)
+
+#define DW9719_MODE CCI_REG8(6)
+#define DW9719_MODE_SAC_SHIFT 4
+#define DW9719_MODE_SAC3 4
+
+#define DW9719_VCM_FREQ CCI_REG8(7)
+#define DW9719_DEFAULT_VCM_FREQ 0x60
+
+#define to_dw9719_device(x) container_of(x, struct dw9719_device, sd)
+
+struct dw9719_device {
+ struct v4l2_subdev sd;
+ struct device *dev;
+ struct regmap *regmap;
+ struct regulator *regulator;
+ u32 sac_mode;
+ u32 vcm_freq;
+
+ struct dw9719_v4l2_ctrls {
+ struct v4l2_ctrl_handler handler;
+ struct v4l2_ctrl *focus;
+ } ctrls;
+};
+
+static int dw9719_detect(struct dw9719_device *dw9719)
+{
+ int ret;
+ u64 val;
+
+ ret = cci_read(dw9719->regmap, DW9719_INFO, &val, NULL);
+ if (ret < 0)
+ return ret;
+
+ if (val != DW9719_ID) {
+ dev_err(dw9719->dev, "Failed to detect correct id\n");
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int dw9719_power_down(struct dw9719_device *dw9719)
+{
+ return regulator_disable(dw9719->regulator);
+}
+
+static int dw9719_power_up(struct dw9719_device *dw9719)
+{
+ int ret;
+
+ ret = regulator_enable(dw9719->regulator);
+ if (ret)
+ return ret;
+
+ /* Jiggle SCL pin to wake up device */
+ cci_write(dw9719->regmap, DW9719_CONTROL, 1, &ret);
+
+ /* Need 100us to transit from SHUTDOWN to STANDBY */
+ fsleep(100);
+
+ cci_write(dw9719->regmap, DW9719_CONTROL, DW9719_ENABLE_RINGING, &ret);
+ cci_write(dw9719->regmap, DW9719_MODE,
+ dw9719->sac_mode << DW9719_MODE_SAC_SHIFT, &ret);
+ cci_write(dw9719->regmap, DW9719_VCM_FREQ, dw9719->vcm_freq, &ret);
+
+ if (ret)
+ dw9719_power_down(dw9719);
+
+ return ret;
+}
+
+static int dw9719_t_focus_abs(struct dw9719_device *dw9719, s32 value)
+{
+ return cci_write(dw9719->regmap, DW9719_VCM_CURRENT, value, NULL);
+}
+
+static int dw9719_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct dw9719_device *dw9719 = container_of(ctrl->handler,
+ struct dw9719_device,
+ ctrls.handler);
+ int ret;
+
+ /* Only apply changes to the controls if the device is powered up */
+ if (!pm_runtime_get_if_in_use(dw9719->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_FOCUS_ABSOLUTE:
+ ret = dw9719_t_focus_abs(dw9719, ctrl->val);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ pm_runtime_put(dw9719->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops dw9719_ctrl_ops = {
+ .s_ctrl = dw9719_set_ctrl,
+};
+
+static int dw9719_suspend(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct dw9719_device *dw9719 = to_dw9719_device(sd);
+ int ret;
+ int val;
+
+ for (val = dw9719->ctrls.focus->val; val >= 0;
+ val -= DW9719_CTRL_STEPS) {
+ ret = dw9719_t_focus_abs(dw9719, val);
+ if (ret)
+ return ret;
+
+ usleep_range(DW9719_CTRL_DELAY_US, DW9719_CTRL_DELAY_US + 10);
+ }
+
+ return dw9719_power_down(dw9719);
+}
+
+static int dw9719_resume(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct dw9719_device *dw9719 = to_dw9719_device(sd);
+ int current_focus = dw9719->ctrls.focus->val;
+ int ret;
+ int val;
+
+ ret = dw9719_power_up(dw9719);
+ if (ret)
+ return ret;
+
+ for (val = current_focus % DW9719_CTRL_STEPS; val < current_focus;
+ val += DW9719_CTRL_STEPS) {
+ ret = dw9719_t_focus_abs(dw9719, val);
+ if (ret)
+ goto err_power_down;
+
+ usleep_range(DW9719_CTRL_DELAY_US, DW9719_CTRL_DELAY_US + 10);
+ }
+
+ return 0;
+
+err_power_down:
+ dw9719_power_down(dw9719);
+ return ret;
+}
+
+static int dw9719_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ return pm_runtime_resume_and_get(sd->dev);
+}
+
+static int dw9719_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ pm_runtime_put(sd->dev);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_internal_ops dw9719_internal_ops = {
+ .open = dw9719_open,
+ .close = dw9719_close,
+};
+
+static int dw9719_init_controls(struct dw9719_device *dw9719)
+{
+ const struct v4l2_ctrl_ops *ops = &dw9719_ctrl_ops;
+ int ret;
+
+ v4l2_ctrl_handler_init(&dw9719->ctrls.handler, 1);
+
+ dw9719->ctrls.focus = v4l2_ctrl_new_std(&dw9719->ctrls.handler, ops,
+ V4L2_CID_FOCUS_ABSOLUTE, 0,
+ DW9719_MAX_FOCUS_POS, 1, 0);
+
+ if (dw9719->ctrls.handler.error) {
+ dev_err(dw9719->dev, "Error initialising v4l2 ctrls\n");
+ ret = dw9719->ctrls.handler.error;
+ goto err_free_handler;
+ }
+
+ dw9719->sd.ctrl_handler = &dw9719->ctrls.handler;
+ return 0;
+
+err_free_handler:
+ v4l2_ctrl_handler_free(&dw9719->ctrls.handler);
+ return ret;
+}
+
+static const struct v4l2_subdev_ops dw9719_ops = { };
+
+static int dw9719_probe(struct i2c_client *client)
+{
+ struct dw9719_device *dw9719;
+ int ret;
+
+ dw9719 = devm_kzalloc(&client->dev, sizeof(*dw9719), GFP_KERNEL);
+ if (!dw9719)
+ return -ENOMEM;
+
+ dw9719->regmap = devm_cci_regmap_init_i2c(client, 8);
+ if (IS_ERR(dw9719->regmap))
+ return PTR_ERR(dw9719->regmap);
+
+ dw9719->dev = &client->dev;
+ dw9719->sac_mode = DW9719_MODE_SAC3;
+ dw9719->vcm_freq = DW9719_DEFAULT_VCM_FREQ;
+
+ /* Optional indication of SAC mode select */
+ device_property_read_u32(&client->dev, "dongwoon,sac-mode",
+ &dw9719->sac_mode);
+
+ /* Optional indication of VCM frequency */
+ device_property_read_u32(&client->dev, "dongwoon,vcm-freq",
+ &dw9719->vcm_freq);
+
+ dw9719->regulator = devm_regulator_get(&client->dev, "vdd");
+ if (IS_ERR(dw9719->regulator))
+ return dev_err_probe(&client->dev, PTR_ERR(dw9719->regulator),
+ "getting regulator\n");
+
+ v4l2_i2c_subdev_init(&dw9719->sd, client, &dw9719_ops);
+ dw9719->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ dw9719->sd.internal_ops = &dw9719_internal_ops;
+
+ ret = dw9719_init_controls(dw9719);
+ if (ret)
+ return ret;
+
+ ret = media_entity_pads_init(&dw9719->sd.entity, 0, NULL);
+ if (ret < 0)
+ goto err_free_ctrl_handler;
+
+ dw9719->sd.entity.function = MEDIA_ENT_F_LENS;
+
+ /*
+ * We need the driver to work in the event that pm runtime is disable in
+ * the kernel, so power up and verify the chip now. In the event that
+ * runtime pm is disabled this will leave the chip on, so that the lens
+ * will work.
+ */
+
+ ret = dw9719_power_up(dw9719);
+ if (ret)
+ goto err_cleanup_media;
+
+ ret = dw9719_detect(dw9719);
+ if (ret)
+ goto err_powerdown;
+
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_get_noresume(&client->dev);
+ pm_runtime_enable(&client->dev);
+
+ ret = v4l2_async_register_subdev(&dw9719->sd);
+ if (ret < 0)
+ goto err_pm_runtime;
+
+ pm_runtime_set_autosuspend_delay(&client->dev, 1000);
+ pm_runtime_use_autosuspend(&client->dev);
+ pm_runtime_put_autosuspend(&client->dev);
+
+ return ret;
+
+err_pm_runtime:
+ pm_runtime_disable(&client->dev);
+ pm_runtime_put_noidle(&client->dev);
+err_powerdown:
+ dw9719_power_down(dw9719);
+err_cleanup_media:
+ media_entity_cleanup(&dw9719->sd.entity);
+err_free_ctrl_handler:
+ v4l2_ctrl_handler_free(&dw9719->ctrls.handler);
+
+ return ret;
+}
+
+static void dw9719_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct dw9719_device *dw9719 =
+ container_of(sd, struct dw9719_device, sd);
+
+ v4l2_async_unregister_subdev(sd);
+ v4l2_ctrl_handler_free(&dw9719->ctrls.handler);
+ media_entity_cleanup(&dw9719->sd.entity);
+
+ pm_runtime_disable(&client->dev);
+ if (!pm_runtime_status_suspended(&client->dev))
+ dw9719_power_down(dw9719);
+ pm_runtime_set_suspended(&client->dev);
+}
+
+static const struct i2c_device_id dw9719_id_table[] = {
+ { "dw9719" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, dw9719_id_table);
+
+static DEFINE_RUNTIME_DEV_PM_OPS(dw9719_pm_ops, dw9719_suspend, dw9719_resume,
+ NULL);
+
+static struct i2c_driver dw9719_i2c_driver = {
+ .driver = {
+ .name = "dw9719",
+ .pm = pm_sleep_ptr(&dw9719_pm_ops),
+ },
+ .probe = dw9719_probe,
+ .remove = dw9719_remove,
+ .id_table = dw9719_id_table,
+};
+module_i2c_driver(dw9719_i2c_driver);
+
+MODULE_AUTHOR("Daniel Scally <djrscally@gmail.com>");
+MODULE_DESCRIPTION("DW9719 VCM Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/et8ek8/Kconfig b/drivers/media/i2c/et8ek8/Kconfig
index 398dd4d21df1..987fc62d5e6b 100644
--- a/drivers/media/i2c/et8ek8/Kconfig
+++ b/drivers/media/i2c/et8ek8/Kconfig
@@ -1,10 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_ET8EK8
tristate "ET8EK8 camera sensor support"
- depends on I2C && VIDEO_DEV
- select MEDIA_CONTROLLER
- select VIDEO_V4L2_SUBDEV_API
- select V4L2_FWNODE
help
This is a driver for the Toshiba ET8EK8 5 MP camera sensor.
It is used for example in Nokia N900 (RX-51).
diff --git a/drivers/media/i2c/hi556.c b/drivers/media/i2c/hi556.c
index 50e78f5b058c..fd56ba138739 100644
--- a/drivers/media/i2c/hi556.c
+++ b/drivers/media/i2c/hi556.c
@@ -1357,6 +1357,6 @@ static struct i2c_driver hi556_i2c_driver = {
module_i2c_driver(hi556_i2c_driver);
-MODULE_AUTHOR("Shawn Tu <shawnx.tu@intel.com>");
+MODULE_AUTHOR("Shawn Tu");
MODULE_DESCRIPTION("Hynix HI556 sensor driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/hi847.c b/drivers/media/i2c/hi847.c
index 7cdce392e137..32547d7a2659 100644
--- a/drivers/media/i2c/hi847.c
+++ b/drivers/media/i2c/hi847.c
@@ -3005,6 +3005,6 @@ static struct i2c_driver hi847_i2c_driver = {
module_i2c_driver(hi847_i2c_driver);
-MODULE_AUTHOR("Shawn Tu <shawnx.tu@intel.com>");
+MODULE_AUTHOR("Shawn Tu");
MODULE_DESCRIPTION("Hynix HI847 sensor driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/imx208.c b/drivers/media/i2c/imx208.c
index 3e870fa9ff79..ee5a28675388 100644
--- a/drivers/media/i2c/imx208.c
+++ b/drivers/media/i2c/imx208.c
@@ -1109,6 +1109,6 @@ module_i2c_driver(imx208_i2c_driver);
MODULE_AUTHOR("Yeh, Andy <andy.yeh@intel.com>");
MODULE_AUTHOR("Chen, Ping-chung <ping-chung.chen@intel.com>");
-MODULE_AUTHOR("Shawn Tu <shawnx.tu@intel.com>");
+MODULE_AUTHOR("Shawn Tu");
MODULE_DESCRIPTION("Sony IMX208 sensor driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c
index d737d5e9a4a6..a1136fdfbed2 100644
--- a/drivers/media/i2c/imx219.c
+++ b/drivers/media/i2c/imx219.c
@@ -345,7 +345,7 @@ static const char * const imx219_supply_name[] = {
* - v flip
* - h&v flips
*/
-static const u32 codes[] = {
+static const u32 imx219_mbus_formats[] = {
MEDIA_BUS_FMT_SRGGB10_1X10,
MEDIA_BUS_FMT_SGRBG10_1X10,
MEDIA_BUS_FMT_SGBRG10_1X10,
@@ -460,8 +460,6 @@ struct imx219 {
struct v4l2_subdev sd;
struct media_pad pad;
- struct v4l2_mbus_framefmt fmt;
-
struct clk *xclk; /* system clock to IMX219 */
u32 xclk_freq;
@@ -481,12 +479,6 @@ struct imx219 {
/* Current mode */
const struct imx219_mode *mode;
- /*
- * Mutex for serialized access:
- * Protect sensor module set pad format and start/stop streaming safely.
- */
- struct mutex mutex;
-
/* Streaming on/off */
bool streaming;
@@ -576,64 +568,17 @@ static u32 imx219_get_format_code(struct imx219 *imx219, u32 code)
{
unsigned int i;
- lockdep_assert_held(&imx219->mutex);
-
- for (i = 0; i < ARRAY_SIZE(codes); i++)
- if (codes[i] == code)
+ for (i = 0; i < ARRAY_SIZE(imx219_mbus_formats); i++)
+ if (imx219_mbus_formats[i] == code)
break;
- if (i >= ARRAY_SIZE(codes))
+ if (i >= ARRAY_SIZE(imx219_mbus_formats))
i = 0;
i = (i & ~3) | (imx219->vflip->val ? 2 : 0) |
(imx219->hflip->val ? 1 : 0);
- return codes[i];
-}
-
-static void imx219_set_default_format(struct imx219 *imx219)
-{
- struct v4l2_mbus_framefmt *fmt;
-
- fmt = &imx219->fmt;
- fmt->code = MEDIA_BUS_FMT_SRGGB10_1X10;
- fmt->colorspace = V4L2_COLORSPACE_SRGB;
- fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace);
- fmt->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true,
- fmt->colorspace,
- fmt->ycbcr_enc);
- fmt->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(fmt->colorspace);
- fmt->width = supported_modes[0].width;
- fmt->height = supported_modes[0].height;
- fmt->field = V4L2_FIELD_NONE;
-}
-
-static int imx219_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
-{
- struct imx219 *imx219 = to_imx219(sd);
- struct v4l2_mbus_framefmt *try_fmt =
- v4l2_subdev_get_try_format(sd, fh->state, 0);
- struct v4l2_rect *try_crop;
-
- mutex_lock(&imx219->mutex);
-
- /* Initialize try_fmt */
- try_fmt->width = supported_modes[0].width;
- try_fmt->height = supported_modes[0].height;
- try_fmt->code = imx219_get_format_code(imx219,
- MEDIA_BUS_FMT_SRGGB10_1X10);
- try_fmt->field = V4L2_FIELD_NONE;
-
- /* Initialize try_crop rectangle. */
- try_crop = v4l2_subdev_get_try_crop(sd, fh->state, 0);
- try_crop->top = IMX219_PIXEL_ARRAY_TOP;
- try_crop->left = IMX219_PIXEL_ARRAY_LEFT;
- try_crop->width = IMX219_PIXEL_ARRAY_WIDTH;
- try_crop->height = IMX219_PIXEL_ARRAY_HEIGHT;
-
- mutex_unlock(&imx219->mutex);
-
- return 0;
+ return imx219_mbus_formats[i];
}
static int imx219_set_ctrl(struct v4l2_ctrl *ctrl)
@@ -725,18 +670,52 @@ static const struct v4l2_ctrl_ops imx219_ctrl_ops = {
.s_ctrl = imx219_set_ctrl,
};
+static void imx219_update_pad_format(struct imx219 *imx219,
+ const struct imx219_mode *mode,
+ struct v4l2_mbus_framefmt *fmt, u32 code)
+{
+ /* Bayer order varies with flips */
+ fmt->code = imx219_get_format_code(imx219, code);
+ fmt->width = mode->width;
+ fmt->height = mode->height;
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_RAW;
+ fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+ fmt->xfer_func = V4L2_XFER_FUNC_NONE;
+}
+
+static int imx219_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ struct imx219 *imx219 = to_imx219(sd);
+ struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *crop;
+
+ /* Initialize try_fmt */
+ format = v4l2_subdev_get_pad_format(sd, state, 0);
+ imx219_update_pad_format(imx219, &supported_modes[0], format,
+ MEDIA_BUS_FMT_SRGGB10_1X10);
+
+ /* Initialize crop rectangle. */
+ crop = v4l2_subdev_get_pad_crop(sd, state, 0);
+ crop->top = IMX219_PIXEL_ARRAY_TOP;
+ crop->left = IMX219_PIXEL_ARRAY_LEFT;
+ crop->width = IMX219_PIXEL_ARRAY_WIDTH;
+ crop->height = IMX219_PIXEL_ARRAY_HEIGHT;
+
+ return 0;
+}
+
static int imx219_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct imx219 *imx219 = to_imx219(sd);
- if (code->index >= (ARRAY_SIZE(codes) / 4))
+ if (code->index >= (ARRAY_SIZE(imx219_mbus_formats) / 4))
return -EINVAL;
- mutex_lock(&imx219->mutex);
- code->code = imx219_get_format_code(imx219, codes[code->index * 4]);
- mutex_unlock(&imx219->mutex);
+ code->code = imx219_get_format_code(imx219, imx219_mbus_formats[code->index * 4]);
return 0;
}
@@ -751,9 +730,7 @@ static int imx219_enum_frame_size(struct v4l2_subdev *sd,
if (fse->index >= ARRAY_SIZE(supported_modes))
return -EINVAL;
- mutex_lock(&imx219->mutex);
code = imx219_get_format_code(imx219, fse->code);
- mutex_unlock(&imx219->mutex);
if (fse->code != code)
return -EINVAL;
@@ -765,92 +742,27 @@ static int imx219_enum_frame_size(struct v4l2_subdev *sd,
return 0;
}
-static void imx219_reset_colorspace(struct v4l2_mbus_framefmt *fmt)
-{
- fmt->colorspace = V4L2_COLORSPACE_SRGB;
- fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace);
- fmt->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true,
- fmt->colorspace,
- fmt->ycbcr_enc);
- fmt->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(fmt->colorspace);
-}
-
-static void imx219_update_pad_format(struct imx219 *imx219,
- const struct imx219_mode *mode,
- struct v4l2_subdev_format *fmt)
-{
- fmt->format.width = mode->width;
- fmt->format.height = mode->height;
- fmt->format.field = V4L2_FIELD_NONE;
- imx219_reset_colorspace(&fmt->format);
-}
-
-static int __imx219_get_pad_format(struct imx219 *imx219,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_format *fmt)
-{
- if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- struct v4l2_mbus_framefmt *try_fmt =
- v4l2_subdev_get_try_format(&imx219->sd, sd_state,
- fmt->pad);
- /* update the code which could change due to vflip or hflip: */
- try_fmt->code = imx219_get_format_code(imx219, try_fmt->code);
- fmt->format = *try_fmt;
- } else {
- imx219_update_pad_format(imx219, imx219->mode, fmt);
- fmt->format.code = imx219_get_format_code(imx219,
- imx219->fmt.code);
- }
-
- return 0;
-}
-
-static int imx219_get_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_format *fmt)
-{
- struct imx219 *imx219 = to_imx219(sd);
- int ret;
-
- mutex_lock(&imx219->mutex);
- ret = __imx219_get_pad_format(imx219, sd_state, fmt);
- mutex_unlock(&imx219->mutex);
-
- return ret;
-}
-
static int imx219_set_pad_format(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct imx219 *imx219 = to_imx219(sd);
const struct imx219_mode *mode;
- struct v4l2_mbus_framefmt *framefmt;
int exposure_max, exposure_def, hblank;
- unsigned int i;
-
- mutex_lock(&imx219->mutex);
-
- for (i = 0; i < ARRAY_SIZE(codes); i++)
- if (codes[i] == fmt->format.code)
- break;
- if (i >= ARRAY_SIZE(codes))
- i = 0;
-
- /* Bayer order varies with flips */
- fmt->format.code = imx219_get_format_code(imx219, codes[i]);
+ struct v4l2_mbus_framefmt *format;
mode = v4l2_find_nearest_size(supported_modes,
ARRAY_SIZE(supported_modes),
width, height,
fmt->format.width, fmt->format.height);
- imx219_update_pad_format(imx219, mode, fmt);
- if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
- *framefmt = fmt->format;
- } else if (imx219->mode != mode ||
- imx219->fmt.code != fmt->format.code) {
- imx219->fmt = fmt->format;
+
+ imx219_update_pad_format(imx219, mode, &fmt->format, fmt->format.code);
+ format = v4l2_subdev_get_pad_format(sd, sd_state, 0);
+
+ if (imx219->mode == mode && format->code == fmt->format.code)
+ return 0;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
imx219->mode = mode;
/* Update limits and set FPS to default */
__v4l2_ctrl_modify_range(imx219->vblank, IMX219_VBLANK_MIN,
@@ -876,14 +788,15 @@ static int imx219_set_pad_format(struct v4l2_subdev *sd,
hblank);
}
- mutex_unlock(&imx219->mutex);
+ *format = fmt->format;
return 0;
}
-static int imx219_set_framefmt(struct imx219 *imx219)
+static int imx219_set_framefmt(struct imx219 *imx219,
+ const struct v4l2_mbus_framefmt *format)
{
- switch (imx219->fmt.code) {
+ switch (format->code) {
case MEDIA_BUS_FMT_SRGGB8_1X8:
case MEDIA_BUS_FMT_SGRBG8_1X8:
case MEDIA_BUS_FMT_SGBRG8_1X8:
@@ -902,7 +815,8 @@ static int imx219_set_framefmt(struct imx219 *imx219)
return -EINVAL;
}
-static int imx219_set_binning(struct imx219 *imx219)
+static int imx219_set_binning(struct imx219 *imx219,
+ const struct v4l2_mbus_framefmt *format)
{
if (!imx219->mode->binning) {
return imx219_write_reg(imx219, IMX219_REG_BINNING_MODE,
@@ -910,7 +824,7 @@ static int imx219_set_binning(struct imx219 *imx219)
IMX219_BINNING_NONE);
}
- switch (imx219->fmt.code) {
+ switch (format->code) {
case MEDIA_BUS_FMT_SRGGB8_1X8:
case MEDIA_BUS_FMT_SGRBG8_1X8:
case MEDIA_BUS_FMT_SGBRG8_1X8:
@@ -931,34 +845,13 @@ static int imx219_set_binning(struct imx219 *imx219)
return -EINVAL;
}
-static const struct v4l2_rect *
-__imx219_get_pad_crop(struct imx219 *imx219,
- struct v4l2_subdev_state *sd_state,
- unsigned int pad, enum v4l2_subdev_format_whence which)
-{
- switch (which) {
- case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_crop(&imx219->sd, sd_state, pad);
- case V4L2_SUBDEV_FORMAT_ACTIVE:
- return &imx219->mode->crop;
- }
-
- return NULL;
-}
-
static int imx219_get_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
switch (sel->target) {
case V4L2_SEL_TGT_CROP: {
- struct imx219 *imx219 = to_imx219(sd);
-
- mutex_lock(&imx219->mutex);
- sel->r = *__imx219_get_pad_crop(imx219, sd_state, sel->pad,
- sel->which);
- mutex_unlock(&imx219->mutex);
-
+ sel->r = *v4l2_subdev_get_pad_crop(sd, sd_state, 0);
return 0;
}
@@ -990,9 +883,11 @@ static int imx219_configure_lanes(struct imx219 *imx219)
IMX219_CSI_2_LANE_MODE : IMX219_CSI_4_LANE_MODE);
};
-static int imx219_start_streaming(struct imx219 *imx219)
+static int imx219_start_streaming(struct imx219 *imx219,
+ struct v4l2_subdev_state *state)
{
struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
+ const struct v4l2_mbus_framefmt *format;
const struct imx219_reg_list *reg_list;
int ret;
@@ -1022,14 +917,15 @@ static int imx219_start_streaming(struct imx219 *imx219)
goto err_rpm_put;
}
- ret = imx219_set_framefmt(imx219);
+ format = v4l2_subdev_get_pad_format(&imx219->sd, state, 0);
+ ret = imx219_set_framefmt(imx219, format);
if (ret) {
dev_err(&client->dev, "%s failed to set frame format: %d\n",
__func__, ret);
goto err_rpm_put;
}
- ret = imx219_set_binning(imx219);
+ ret = imx219_set_binning(imx219, format);
if (ret) {
dev_err(&client->dev, "%s failed to set binning: %d\n",
__func__, ret);
@@ -1078,35 +974,30 @@ static void imx219_stop_streaming(struct imx219 *imx219)
static int imx219_set_stream(struct v4l2_subdev *sd, int enable)
{
struct imx219 *imx219 = to_imx219(sd);
+ struct v4l2_subdev_state *state;
int ret = 0;
- mutex_lock(&imx219->mutex);
- if (imx219->streaming == enable) {
- mutex_unlock(&imx219->mutex);
- return 0;
- }
+ state = v4l2_subdev_lock_and_get_active_state(sd);
+
+ if (imx219->streaming == enable)
+ goto unlock;
if (enable) {
/*
* Apply default & customized values
* and then start streaming.
*/
- ret = imx219_start_streaming(imx219);
+ ret = imx219_start_streaming(imx219, state);
if (ret)
- goto err_unlock;
+ goto unlock;
} else {
imx219_stop_streaming(imx219);
}
imx219->streaming = enable;
- mutex_unlock(&imx219->mutex);
-
- return ret;
-
-err_unlock:
- mutex_unlock(&imx219->mutex);
-
+unlock:
+ v4l2_subdev_unlock_state(state);
return ret;
}
@@ -1171,10 +1062,13 @@ static int __maybe_unused imx219_resume(struct device *dev)
{
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct imx219 *imx219 = to_imx219(sd);
+ struct v4l2_subdev_state *state;
int ret;
if (imx219->streaming) {
- ret = imx219_start_streaming(imx219);
+ state = v4l2_subdev_lock_and_get_active_state(sd);
+ ret = imx219_start_streaming(imx219, state);
+ v4l2_subdev_unlock_state(state);
if (ret)
goto error;
}
@@ -1235,8 +1129,9 @@ static const struct v4l2_subdev_video_ops imx219_video_ops = {
};
static const struct v4l2_subdev_pad_ops imx219_pad_ops = {
+ .init_cfg = imx219_init_cfg,
.enum_mbus_code = imx219_enum_mbus_code,
- .get_fmt = imx219_get_pad_format,
+ .get_fmt = v4l2_subdev_get_fmt,
.set_fmt = imx219_set_pad_format,
.get_selection = imx219_get_selection,
.enum_frame_size = imx219_enum_frame_size,
@@ -1248,9 +1143,6 @@ static const struct v4l2_subdev_ops imx219_subdev_ops = {
.pad = &imx219_pad_ops,
};
-static const struct v4l2_subdev_internal_ops imx219_internal_ops = {
- .open = imx219_open,
-};
static unsigned long imx219_get_pixel_rate(struct imx219 *imx219)
{
@@ -1272,9 +1164,6 @@ static int imx219_init_controls(struct imx219 *imx219)
if (ret)
return ret;
- mutex_init(&imx219->mutex);
- ctrl_hdlr->lock = &imx219->mutex;
-
/* By default, PIXEL_RATE is read only */
imx219->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
V4L2_CID_PIXEL_RATE,
@@ -1371,7 +1260,6 @@ static int imx219_init_controls(struct imx219 *imx219)
error:
v4l2_ctrl_handler_free(ctrl_hdlr);
- mutex_destroy(&imx219->mutex);
return ret;
}
@@ -1379,7 +1267,6 @@ error:
static void imx219_free_controls(struct imx219 *imx219)
{
v4l2_ctrl_handler_free(imx219->sd.ctrl_handler);
- mutex_destroy(&imx219->mutex);
}
static int imx219_check_hwcfg(struct device *dev, struct imx219 *imx219)
@@ -1509,7 +1396,6 @@ static int imx219_probe(struct i2c_client *client)
goto error_power_off;
/* Initialize subdev */
- imx219->sd.internal_ops = &imx219_internal_ops;
imx219->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
V4L2_SUBDEV_FL_HAS_EVENTS;
imx219->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
@@ -1517,19 +1403,23 @@ static int imx219_probe(struct i2c_client *client)
/* Initialize source pad */
imx219->pad.flags = MEDIA_PAD_FL_SOURCE;
- /* Initialize default format */
- imx219_set_default_format(imx219);
-
ret = media_entity_pads_init(&imx219->sd.entity, 1, &imx219->pad);
if (ret) {
dev_err(dev, "failed to init entity pads: %d\n", ret);
goto error_handler_free;
}
+ imx219->sd.state_lock = imx219->ctrl_handler.lock;
+ ret = v4l2_subdev_init_finalize(&imx219->sd);
+ if (ret < 0) {
+ dev_err(dev, "subdev init error: %d\n", ret);
+ goto error_media_entity;
+ }
+
ret = v4l2_async_register_subdev_sensor(&imx219->sd);
if (ret < 0) {
dev_err(dev, "failed to register sensor sub-device: %d\n", ret);
- goto error_media_entity;
+ goto error_subdev_cleanup;
}
/* Enable runtime PM and turn off the device */
@@ -1539,6 +1429,9 @@ static int imx219_probe(struct i2c_client *client)
return 0;
+error_subdev_cleanup:
+ v4l2_subdev_cleanup(&imx219->sd);
+
error_media_entity:
media_entity_cleanup(&imx219->sd.entity);
@@ -1557,6 +1450,7 @@ static void imx219_remove(struct i2c_client *client)
struct imx219 *imx219 = to_imx219(sd);
v4l2_async_unregister_subdev(sd);
+ v4l2_subdev_cleanup(sd);
media_entity_cleanup(&sd->entity);
imx219_free_controls(imx219);
diff --git a/drivers/media/i2c/imx290.c b/drivers/media/i2c/imx290.c
index b3f832e9d7e1..29098612813c 100644
--- a/drivers/media/i2c/imx290.c
+++ b/drivers/media/i2c/imx290.c
@@ -13,7 +13,7 @@
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
@@ -21,91 +21,86 @@
#include <asm/unaligned.h>
#include <media/media-entity.h>
+#include <media/v4l2-cci.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
-#define IMX290_REG_SIZE_SHIFT 16
-#define IMX290_REG_ADDR_MASK 0xffff
-#define IMX290_REG_8BIT(n) ((1U << IMX290_REG_SIZE_SHIFT) | (n))
-#define IMX290_REG_16BIT(n) ((2U << IMX290_REG_SIZE_SHIFT) | (n))
-#define IMX290_REG_24BIT(n) ((3U << IMX290_REG_SIZE_SHIFT) | (n))
-
-#define IMX290_STANDBY IMX290_REG_8BIT(0x3000)
-#define IMX290_REGHOLD IMX290_REG_8BIT(0x3001)
-#define IMX290_XMSTA IMX290_REG_8BIT(0x3002)
-#define IMX290_ADBIT IMX290_REG_8BIT(0x3005)
+#define IMX290_STANDBY CCI_REG8(0x3000)
+#define IMX290_REGHOLD CCI_REG8(0x3001)
+#define IMX290_XMSTA CCI_REG8(0x3002)
+#define IMX290_ADBIT CCI_REG8(0x3005)
#define IMX290_ADBIT_10BIT (0 << 0)
#define IMX290_ADBIT_12BIT (1 << 0)
-#define IMX290_CTRL_07 IMX290_REG_8BIT(0x3007)
+#define IMX290_CTRL_07 CCI_REG8(0x3007)
#define IMX290_VREVERSE BIT(0)
#define IMX290_HREVERSE BIT(1)
#define IMX290_WINMODE_1080P (0 << 4)
#define IMX290_WINMODE_720P (1 << 4)
#define IMX290_WINMODE_CROP (4 << 4)
-#define IMX290_FR_FDG_SEL IMX290_REG_8BIT(0x3009)
-#define IMX290_BLKLEVEL IMX290_REG_16BIT(0x300a)
-#define IMX290_GAIN IMX290_REG_8BIT(0x3014)
-#define IMX290_VMAX IMX290_REG_24BIT(0x3018)
+#define IMX290_FR_FDG_SEL CCI_REG8(0x3009)
+#define IMX290_BLKLEVEL CCI_REG16(0x300a)
+#define IMX290_GAIN CCI_REG8(0x3014)
+#define IMX290_VMAX CCI_REG24(0x3018)
#define IMX290_VMAX_MAX 0x3ffff
-#define IMX290_HMAX IMX290_REG_16BIT(0x301c)
+#define IMX290_HMAX CCI_REG16(0x301c)
#define IMX290_HMAX_MAX 0xffff
-#define IMX290_SHS1 IMX290_REG_24BIT(0x3020)
-#define IMX290_WINWV_OB IMX290_REG_8BIT(0x303a)
-#define IMX290_WINPV IMX290_REG_16BIT(0x303c)
-#define IMX290_WINWV IMX290_REG_16BIT(0x303e)
-#define IMX290_WINPH IMX290_REG_16BIT(0x3040)
-#define IMX290_WINWH IMX290_REG_16BIT(0x3042)
-#define IMX290_OUT_CTRL IMX290_REG_8BIT(0x3046)
+#define IMX290_SHS1 CCI_REG24(0x3020)
+#define IMX290_WINWV_OB CCI_REG8(0x303a)
+#define IMX290_WINPV CCI_REG16(0x303c)
+#define IMX290_WINWV CCI_REG16(0x303e)
+#define IMX290_WINPH CCI_REG16(0x3040)
+#define IMX290_WINWH CCI_REG16(0x3042)
+#define IMX290_OUT_CTRL CCI_REG8(0x3046)
#define IMX290_ODBIT_10BIT (0 << 0)
#define IMX290_ODBIT_12BIT (1 << 0)
#define IMX290_OPORTSEL_PARALLEL (0x0 << 4)
#define IMX290_OPORTSEL_LVDS_2CH (0xd << 4)
#define IMX290_OPORTSEL_LVDS_4CH (0xe << 4)
#define IMX290_OPORTSEL_LVDS_8CH (0xf << 4)
-#define IMX290_XSOUTSEL IMX290_REG_8BIT(0x304b)
+#define IMX290_XSOUTSEL CCI_REG8(0x304b)
#define IMX290_XSOUTSEL_XVSOUTSEL_HIGH (0 << 0)
#define IMX290_XSOUTSEL_XVSOUTSEL_VSYNC (2 << 0)
#define IMX290_XSOUTSEL_XHSOUTSEL_HIGH (0 << 2)
#define IMX290_XSOUTSEL_XHSOUTSEL_HSYNC (2 << 2)
-#define IMX290_INCKSEL1 IMX290_REG_8BIT(0x305c)
-#define IMX290_INCKSEL2 IMX290_REG_8BIT(0x305d)
-#define IMX290_INCKSEL3 IMX290_REG_8BIT(0x305e)
-#define IMX290_INCKSEL4 IMX290_REG_8BIT(0x305f)
-#define IMX290_PGCTRL IMX290_REG_8BIT(0x308c)
-#define IMX290_ADBIT1 IMX290_REG_8BIT(0x3129)
+#define IMX290_INCKSEL1 CCI_REG8(0x305c)
+#define IMX290_INCKSEL2 CCI_REG8(0x305d)
+#define IMX290_INCKSEL3 CCI_REG8(0x305e)
+#define IMX290_INCKSEL4 CCI_REG8(0x305f)
+#define IMX290_PGCTRL CCI_REG8(0x308c)
+#define IMX290_ADBIT1 CCI_REG8(0x3129)
#define IMX290_ADBIT1_10BIT 0x1d
#define IMX290_ADBIT1_12BIT 0x00
-#define IMX290_INCKSEL5 IMX290_REG_8BIT(0x315e)
-#define IMX290_INCKSEL6 IMX290_REG_8BIT(0x3164)
-#define IMX290_ADBIT2 IMX290_REG_8BIT(0x317c)
+#define IMX290_INCKSEL5 CCI_REG8(0x315e)
+#define IMX290_INCKSEL6 CCI_REG8(0x3164)
+#define IMX290_ADBIT2 CCI_REG8(0x317c)
#define IMX290_ADBIT2_10BIT 0x12
#define IMX290_ADBIT2_12BIT 0x00
-#define IMX290_CHIP_ID IMX290_REG_16BIT(0x319a)
-#define IMX290_ADBIT3 IMX290_REG_8BIT(0x31ec)
+#define IMX290_CHIP_ID CCI_REG16(0x319a)
+#define IMX290_ADBIT3 CCI_REG8(0x31ec)
#define IMX290_ADBIT3_10BIT 0x37
#define IMX290_ADBIT3_12BIT 0x0e
-#define IMX290_REPETITION IMX290_REG_8BIT(0x3405)
-#define IMX290_PHY_LANE_NUM IMX290_REG_8BIT(0x3407)
-#define IMX290_OPB_SIZE_V IMX290_REG_8BIT(0x3414)
-#define IMX290_Y_OUT_SIZE IMX290_REG_16BIT(0x3418)
-#define IMX290_CSI_DT_FMT IMX290_REG_16BIT(0x3441)
+#define IMX290_REPETITION CCI_REG8(0x3405)
+#define IMX290_PHY_LANE_NUM CCI_REG8(0x3407)
+#define IMX290_OPB_SIZE_V CCI_REG8(0x3414)
+#define IMX290_Y_OUT_SIZE CCI_REG16(0x3418)
+#define IMX290_CSI_DT_FMT CCI_REG16(0x3441)
#define IMX290_CSI_DT_FMT_RAW10 0x0a0a
#define IMX290_CSI_DT_FMT_RAW12 0x0c0c
-#define IMX290_CSI_LANE_MODE IMX290_REG_8BIT(0x3443)
-#define IMX290_EXTCK_FREQ IMX290_REG_16BIT(0x3444)
-#define IMX290_TCLKPOST IMX290_REG_16BIT(0x3446)
-#define IMX290_THSZERO IMX290_REG_16BIT(0x3448)
-#define IMX290_THSPREPARE IMX290_REG_16BIT(0x344a)
-#define IMX290_TCLKTRAIL IMX290_REG_16BIT(0x344c)
-#define IMX290_THSTRAIL IMX290_REG_16BIT(0x344e)
-#define IMX290_TCLKZERO IMX290_REG_16BIT(0x3450)
-#define IMX290_TCLKPREPARE IMX290_REG_16BIT(0x3452)
-#define IMX290_TLPX IMX290_REG_16BIT(0x3454)
-#define IMX290_X_OUT_SIZE IMX290_REG_16BIT(0x3472)
-#define IMX290_INCKSEL7 IMX290_REG_8BIT(0x3480)
+#define IMX290_CSI_LANE_MODE CCI_REG8(0x3443)
+#define IMX290_EXTCK_FREQ CCI_REG16(0x3444)
+#define IMX290_TCLKPOST CCI_REG16(0x3446)
+#define IMX290_THSZERO CCI_REG16(0x3448)
+#define IMX290_THSPREPARE CCI_REG16(0x344a)
+#define IMX290_TCLKTRAIL CCI_REG16(0x344c)
+#define IMX290_THSTRAIL CCI_REG16(0x344e)
+#define IMX290_TCLKZERO CCI_REG16(0x3450)
+#define IMX290_TCLKPREPARE CCI_REG16(0x3452)
+#define IMX290_TLPX CCI_REG16(0x3454)
+#define IMX290_X_OUT_SIZE CCI_REG16(0x3472)
+#define IMX290_INCKSEL7 CCI_REG8(0x3480)
#define IMX290_PGCTRL_REGEN BIT(0)
#define IMX290_PGCTRL_THRU BIT(1)
@@ -181,7 +176,7 @@ enum imx290_model {
struct imx290_model_info {
enum imx290_colour_variant colour_variant;
- const struct imx290_regval *init_regs;
+ const struct cci_reg_sequence *init_regs;
size_t init_regs_num;
const char *name;
};
@@ -192,11 +187,6 @@ enum imx290_clk_freq {
IMX290_NUM_CLK
};
-struct imx290_regval {
- u32 reg;
- u32 val;
-};
-
/*
* Clock configuration for registers INCKSEL1 to INCKSEL6.
*/
@@ -217,7 +207,7 @@ struct imx290_mode {
u8 link_freq_index;
u8 ctrl_07;
- const struct imx290_regval *data;
+ const struct cci_reg_sequence *data;
u32 data_size;
const struct imx290_clk_cfg *clk_cfg;
@@ -271,7 +261,7 @@ static inline struct imx290 *to_imx290(struct v4l2_subdev *_sd)
* Modes and formats
*/
-static const struct imx290_regval imx290_global_init_settings[] = {
+static const struct cci_reg_sequence imx290_global_init_settings[] = {
{ IMX290_WINWV_OB, 12 },
{ IMX290_WINPH, 0 },
{ IMX290_WINPV, 0 },
@@ -279,56 +269,56 @@ static const struct imx290_regval imx290_global_init_settings[] = {
{ IMX290_WINWV, 1097 },
{ IMX290_XSOUTSEL, IMX290_XSOUTSEL_XVSOUTSEL_VSYNC |
IMX290_XSOUTSEL_XHSOUTSEL_HSYNC },
- { IMX290_REG_8BIT(0x3011), 0x02 },
- { IMX290_REG_8BIT(0x3012), 0x64 },
- { IMX290_REG_8BIT(0x3013), 0x00 },
+ { CCI_REG8(0x3011), 0x02 },
+ { CCI_REG8(0x3012), 0x64 },
+ { CCI_REG8(0x3013), 0x00 },
};
-static const struct imx290_regval imx290_global_init_settings_290[] = {
- { IMX290_REG_8BIT(0x300f), 0x00 },
- { IMX290_REG_8BIT(0x3010), 0x21 },
- { IMX290_REG_8BIT(0x3016), 0x09 },
- { IMX290_REG_8BIT(0x3070), 0x02 },
- { IMX290_REG_8BIT(0x3071), 0x11 },
- { IMX290_REG_8BIT(0x309b), 0x10 },
- { IMX290_REG_8BIT(0x309c), 0x22 },
- { IMX290_REG_8BIT(0x30a2), 0x02 },
- { IMX290_REG_8BIT(0x30a6), 0x20 },
- { IMX290_REG_8BIT(0x30a8), 0x20 },
- { IMX290_REG_8BIT(0x30aa), 0x20 },
- { IMX290_REG_8BIT(0x30ac), 0x20 },
- { IMX290_REG_8BIT(0x30b0), 0x43 },
- { IMX290_REG_8BIT(0x3119), 0x9e },
- { IMX290_REG_8BIT(0x311c), 0x1e },
- { IMX290_REG_8BIT(0x311e), 0x08 },
- { IMX290_REG_8BIT(0x3128), 0x05 },
- { IMX290_REG_8BIT(0x313d), 0x83 },
- { IMX290_REG_8BIT(0x3150), 0x03 },
- { IMX290_REG_8BIT(0x317e), 0x00 },
- { IMX290_REG_8BIT(0x32b8), 0x50 },
- { IMX290_REG_8BIT(0x32b9), 0x10 },
- { IMX290_REG_8BIT(0x32ba), 0x00 },
- { IMX290_REG_8BIT(0x32bb), 0x04 },
- { IMX290_REG_8BIT(0x32c8), 0x50 },
- { IMX290_REG_8BIT(0x32c9), 0x10 },
- { IMX290_REG_8BIT(0x32ca), 0x00 },
- { IMX290_REG_8BIT(0x32cb), 0x04 },
- { IMX290_REG_8BIT(0x332c), 0xd3 },
- { IMX290_REG_8BIT(0x332d), 0x10 },
- { IMX290_REG_8BIT(0x332e), 0x0d },
- { IMX290_REG_8BIT(0x3358), 0x06 },
- { IMX290_REG_8BIT(0x3359), 0xe1 },
- { IMX290_REG_8BIT(0x335a), 0x11 },
- { IMX290_REG_8BIT(0x3360), 0x1e },
- { IMX290_REG_8BIT(0x3361), 0x61 },
- { IMX290_REG_8BIT(0x3362), 0x10 },
- { IMX290_REG_8BIT(0x33b0), 0x50 },
- { IMX290_REG_8BIT(0x33b2), 0x1a },
- { IMX290_REG_8BIT(0x33b3), 0x04 },
+static const struct cci_reg_sequence imx290_global_init_settings_290[] = {
+ { CCI_REG8(0x300f), 0x00 },
+ { CCI_REG8(0x3010), 0x21 },
+ { CCI_REG8(0x3016), 0x09 },
+ { CCI_REG8(0x3070), 0x02 },
+ { CCI_REG8(0x3071), 0x11 },
+ { CCI_REG8(0x309b), 0x10 },
+ { CCI_REG8(0x309c), 0x22 },
+ { CCI_REG8(0x30a2), 0x02 },
+ { CCI_REG8(0x30a6), 0x20 },
+ { CCI_REG8(0x30a8), 0x20 },
+ { CCI_REG8(0x30aa), 0x20 },
+ { CCI_REG8(0x30ac), 0x20 },
+ { CCI_REG8(0x30b0), 0x43 },
+ { CCI_REG8(0x3119), 0x9e },
+ { CCI_REG8(0x311c), 0x1e },
+ { CCI_REG8(0x311e), 0x08 },
+ { CCI_REG8(0x3128), 0x05 },
+ { CCI_REG8(0x313d), 0x83 },
+ { CCI_REG8(0x3150), 0x03 },
+ { CCI_REG8(0x317e), 0x00 },
+ { CCI_REG8(0x32b8), 0x50 },
+ { CCI_REG8(0x32b9), 0x10 },
+ { CCI_REG8(0x32ba), 0x00 },
+ { CCI_REG8(0x32bb), 0x04 },
+ { CCI_REG8(0x32c8), 0x50 },
+ { CCI_REG8(0x32c9), 0x10 },
+ { CCI_REG8(0x32ca), 0x00 },
+ { CCI_REG8(0x32cb), 0x04 },
+ { CCI_REG8(0x332c), 0xd3 },
+ { CCI_REG8(0x332d), 0x10 },
+ { CCI_REG8(0x332e), 0x0d },
+ { CCI_REG8(0x3358), 0x06 },
+ { CCI_REG8(0x3359), 0xe1 },
+ { CCI_REG8(0x335a), 0x11 },
+ { CCI_REG8(0x3360), 0x1e },
+ { CCI_REG8(0x3361), 0x61 },
+ { CCI_REG8(0x3362), 0x10 },
+ { CCI_REG8(0x33b0), 0x50 },
+ { CCI_REG8(0x33b2), 0x1a },
+ { CCI_REG8(0x33b3), 0x04 },
};
#define IMX290_NUM_CLK_REGS 2
-static const struct imx290_regval xclk_regs[][IMX290_NUM_CLK_REGS] = {
+static const struct cci_reg_sequence xclk_regs[][IMX290_NUM_CLK_REGS] = {
[IMX290_CLK_37_125] = {
{ IMX290_EXTCK_FREQ, (37125 * 256) / 1000 },
{ IMX290_INCKSEL7, 0x49 },
@@ -339,13 +329,13 @@ static const struct imx290_regval xclk_regs[][IMX290_NUM_CLK_REGS] = {
},
};
-static const struct imx290_regval imx290_global_init_settings_327[] = {
- { IMX290_REG_8BIT(0x309e), 0x4A },
- { IMX290_REG_8BIT(0x309f), 0x4A },
- { IMX290_REG_8BIT(0x313b), 0x61 },
+static const struct cci_reg_sequence imx290_global_init_settings_327[] = {
+ { CCI_REG8(0x309e), 0x4A },
+ { CCI_REG8(0x309f), 0x4A },
+ { CCI_REG8(0x313b), 0x61 },
};
-static const struct imx290_regval imx290_1080p_settings[] = {
+static const struct cci_reg_sequence imx290_1080p_settings[] = {
/* mode settings */
{ IMX290_WINWV_OB, 12 },
{ IMX290_OPB_SIZE_V, 10 },
@@ -353,7 +343,7 @@ static const struct imx290_regval imx290_1080p_settings[] = {
{ IMX290_Y_OUT_SIZE, 1080 },
};
-static const struct imx290_regval imx290_720p_settings[] = {
+static const struct cci_reg_sequence imx290_720p_settings[] = {
/* mode settings */
{ IMX290_WINWV_OB, 6 },
{ IMX290_OPB_SIZE_V, 4 },
@@ -361,7 +351,7 @@ static const struct imx290_regval imx290_720p_settings[] = {
{ IMX290_Y_OUT_SIZE, 720 },
};
-static const struct imx290_regval imx290_10bit_settings[] = {
+static const struct cci_reg_sequence imx290_10bit_settings[] = {
{ IMX290_ADBIT, IMX290_ADBIT_10BIT },
{ IMX290_OUT_CTRL, IMX290_ODBIT_10BIT },
{ IMX290_ADBIT1, IMX290_ADBIT1_10BIT },
@@ -370,7 +360,7 @@ static const struct imx290_regval imx290_10bit_settings[] = {
{ IMX290_CSI_DT_FMT, IMX290_CSI_DT_FMT_RAW10 },
};
-static const struct imx290_regval imx290_12bit_settings[] = {
+static const struct cci_reg_sequence imx290_12bit_settings[] = {
{ IMX290_ADBIT, IMX290_ADBIT_12BIT },
{ IMX290_OUT_CTRL, IMX290_ODBIT_12BIT },
{ IMX290_ADBIT1, IMX290_ADBIT1_12BIT },
@@ -576,7 +566,7 @@ static inline int imx290_modes_num(const struct imx290 *imx290)
struct imx290_format_info {
u32 code[IMX290_VARIANT_MAX];
u8 bpp;
- const struct imx290_regval *regs;
+ const struct cci_reg_sequence *regs;
unsigned int num_regs;
};
@@ -615,63 +605,15 @@ imx290_format_info(const struct imx290 *imx290, u32 code)
return NULL;
}
-/* -----------------------------------------------------------------------------
- * Register access
- */
-
-static int __always_unused imx290_read(struct imx290 *imx290, u32 addr, u32 *value)
-{
- u8 data[3] = { 0, 0, 0 };
- int ret;
-
- ret = regmap_raw_read(imx290->regmap, addr & IMX290_REG_ADDR_MASK,
- data, (addr >> IMX290_REG_SIZE_SHIFT) & 3);
- if (ret < 0) {
- dev_err(imx290->dev, "%u-bit read from 0x%04x failed: %d\n",
- ((addr >> IMX290_REG_SIZE_SHIFT) & 3) * 8,
- addr & IMX290_REG_ADDR_MASK, ret);
- return ret;
- }
-
- *value = get_unaligned_le24(data);
- return 0;
-}
-
-static int imx290_write(struct imx290 *imx290, u32 addr, u32 value, int *err)
-{
- u8 data[3];
- int ret;
-
- if (err && *err)
- return *err;
-
- put_unaligned_le24(value, data);
-
- ret = regmap_raw_write(imx290->regmap, addr & IMX290_REG_ADDR_MASK,
- data, (addr >> IMX290_REG_SIZE_SHIFT) & 3);
- if (ret < 0) {
- dev_err(imx290->dev, "%u-bit write to 0x%04x failed: %d\n",
- ((addr >> IMX290_REG_SIZE_SHIFT) & 3) * 8,
- addr & IMX290_REG_ADDR_MASK, ret);
- if (err)
- *err = ret;
- }
-
- return ret;
-}
-
static int imx290_set_register_array(struct imx290 *imx290,
- const struct imx290_regval *settings,
+ const struct cci_reg_sequence *settings,
unsigned int num_settings)
{
- unsigned int i;
int ret;
- for (i = 0; i < num_settings; ++i, ++settings) {
- ret = imx290_write(imx290, settings->reg, settings->val, NULL);
- if (ret < 0)
- return ret;
- }
+ ret = cci_multi_reg_write(imx290->regmap, settings, num_settings, NULL);
+ if (ret < 0)
+ return ret;
/* Provide 10ms settle time */
usleep_range(10000, 11000);
@@ -689,12 +631,12 @@ static int imx290_set_clock(struct imx290 *imx290)
ret = imx290_set_register_array(imx290, xclk_regs[clk_idx],
IMX290_NUM_CLK_REGS);
- imx290_write(imx290, IMX290_INCKSEL1, clk_cfg->incksel1, &ret);
- imx290_write(imx290, IMX290_INCKSEL2, clk_cfg->incksel2, &ret);
- imx290_write(imx290, IMX290_INCKSEL3, clk_cfg->incksel3, &ret);
- imx290_write(imx290, IMX290_INCKSEL4, clk_cfg->incksel4, &ret);
- imx290_write(imx290, IMX290_INCKSEL5, clk_cfg->incksel5, &ret);
- imx290_write(imx290, IMX290_INCKSEL6, clk_cfg->incksel6, &ret);
+ cci_write(imx290->regmap, IMX290_INCKSEL1, clk_cfg->incksel1, &ret);
+ cci_write(imx290->regmap, IMX290_INCKSEL2, clk_cfg->incksel2, &ret);
+ cci_write(imx290->regmap, IMX290_INCKSEL3, clk_cfg->incksel3, &ret);
+ cci_write(imx290->regmap, IMX290_INCKSEL4, clk_cfg->incksel4, &ret);
+ cci_write(imx290->regmap, IMX290_INCKSEL5, clk_cfg->incksel5, &ret);
+ cci_write(imx290->regmap, IMX290_INCKSEL6, clk_cfg->incksel6, &ret);
return ret;
}
@@ -703,9 +645,11 @@ static int imx290_set_data_lanes(struct imx290 *imx290)
{
int ret = 0;
- imx290_write(imx290, IMX290_PHY_LANE_NUM, imx290->nlanes - 1, &ret);
- imx290_write(imx290, IMX290_CSI_LANE_MODE, imx290->nlanes - 1, &ret);
- imx290_write(imx290, IMX290_FR_FDG_SEL, 0x01, &ret);
+ cci_write(imx290->regmap, IMX290_PHY_LANE_NUM, imx290->nlanes - 1,
+ &ret);
+ cci_write(imx290->regmap, IMX290_CSI_LANE_MODE, imx290->nlanes - 1,
+ &ret);
+ cci_write(imx290->regmap, IMX290_FR_FDG_SEL, 0x01, &ret);
return ret;
}
@@ -716,8 +660,8 @@ static int imx290_set_black_level(struct imx290 *imx290,
{
unsigned int bpp = imx290_format_info(imx290, format->code)->bpp;
- return imx290_write(imx290, IMX290_BLKLEVEL,
- black_level >> (16 - bpp), err);
+ return cci_write(imx290->regmap, IMX290_BLKLEVEL,
+ black_level >> (16 - bpp), err);
}
static int imx290_set_csi_config(struct imx290 *imx290)
@@ -743,15 +687,16 @@ static int imx290_set_csi_config(struct imx290 *imx290)
return -EINVAL;
}
- imx290_write(imx290, IMX290_REPETITION, csi_cfg->repetition, &ret);
- imx290_write(imx290, IMX290_TCLKPOST, csi_cfg->tclkpost, &ret);
- imx290_write(imx290, IMX290_THSZERO, csi_cfg->thszero, &ret);
- imx290_write(imx290, IMX290_THSPREPARE, csi_cfg->thsprepare, &ret);
- imx290_write(imx290, IMX290_TCLKTRAIL, csi_cfg->tclktrail, &ret);
- imx290_write(imx290, IMX290_THSTRAIL, csi_cfg->thstrail, &ret);
- imx290_write(imx290, IMX290_TCLKZERO, csi_cfg->tclkzero, &ret);
- imx290_write(imx290, IMX290_TCLKPREPARE, csi_cfg->tclkprepare, &ret);
- imx290_write(imx290, IMX290_TLPX, csi_cfg->tlpx, &ret);
+ cci_write(imx290->regmap, IMX290_REPETITION, csi_cfg->repetition, &ret);
+ cci_write(imx290->regmap, IMX290_TCLKPOST, csi_cfg->tclkpost, &ret);
+ cci_write(imx290->regmap, IMX290_THSZERO, csi_cfg->thszero, &ret);
+ cci_write(imx290->regmap, IMX290_THSPREPARE, csi_cfg->thsprepare, &ret);
+ cci_write(imx290->regmap, IMX290_TCLKTRAIL, csi_cfg->tclktrail, &ret);
+ cci_write(imx290->regmap, IMX290_THSTRAIL, csi_cfg->thstrail, &ret);
+ cci_write(imx290->regmap, IMX290_TCLKZERO, csi_cfg->tclkzero, &ret);
+ cci_write(imx290->regmap, IMX290_TCLKPREPARE, csi_cfg->tclkprepare,
+ &ret);
+ cci_write(imx290->regmap, IMX290_TLPX, csi_cfg->tlpx, &ret);
return ret;
}
@@ -817,13 +762,12 @@ static int imx290_set_ctrl(struct v4l2_ctrl *ctrl)
switch (ctrl->id) {
case V4L2_CID_ANALOGUE_GAIN:
- ret = imx290_write(imx290, IMX290_GAIN, ctrl->val, NULL);
+ ret = cci_write(imx290->regmap, IMX290_GAIN, ctrl->val, NULL);
break;
case V4L2_CID_VBLANK:
- ret = imx290_write(imx290, IMX290_VMAX,
- ctrl->val + imx290->current_mode->height,
- NULL);
+ ret = cci_write(imx290->regmap, IMX290_VMAX,
+ ctrl->val + imx290->current_mode->height, NULL);
/*
* Due to the way that exposure is programmed in this sensor in
* relation to VMAX, we have to reprogramme it whenever VMAX is
@@ -835,20 +779,20 @@ static int imx290_set_ctrl(struct v4l2_ctrl *ctrl)
fallthrough;
case V4L2_CID_EXPOSURE:
vmax = imx290->vblank->val + imx290->current_mode->height;
- ret = imx290_write(imx290, IMX290_SHS1,
- vmax - ctrl->val - 1, NULL);
+ ret = cci_write(imx290->regmap, IMX290_SHS1,
+ vmax - ctrl->val - 1, NULL);
break;
case V4L2_CID_TEST_PATTERN:
if (ctrl->val) {
imx290_set_black_level(imx290, format, 0, &ret);
usleep_range(10000, 11000);
- imx290_write(imx290, IMX290_PGCTRL,
- (u8)(IMX290_PGCTRL_REGEN |
- IMX290_PGCTRL_THRU |
- IMX290_PGCTRL_MODE(ctrl->val)), &ret);
+ cci_write(imx290->regmap, IMX290_PGCTRL,
+ (u8)(IMX290_PGCTRL_REGEN |
+ IMX290_PGCTRL_THRU |
+ IMX290_PGCTRL_MODE(ctrl->val)), &ret);
} else {
- imx290_write(imx290, IMX290_PGCTRL, 0x00, &ret);
+ cci_write(imx290->regmap, IMX290_PGCTRL, 0x00, &ret);
usleep_range(10000, 11000);
imx290_set_black_level(imx290, format,
IMX290_BLACK_LEVEL_DEFAULT, &ret);
@@ -856,9 +800,8 @@ static int imx290_set_ctrl(struct v4l2_ctrl *ctrl)
break;
case V4L2_CID_HBLANK:
- ret = imx290_write(imx290, IMX290_HMAX,
- ctrl->val + imx290->current_mode->width,
- NULL);
+ ret = cci_write(imx290->regmap, IMX290_HMAX,
+ ctrl->val + imx290->current_mode->width, NULL);
break;
case V4L2_CID_HFLIP:
@@ -871,7 +814,7 @@ static int imx290_set_ctrl(struct v4l2_ctrl *ctrl)
reg |= IMX290_HREVERSE;
if (imx290->vflip->val)
reg |= IMX290_VREVERSE;
- ret = imx290_write(imx290, IMX290_CTRL_07, reg, NULL);
+ ret = cci_write(imx290->regmap, IMX290_CTRL_07, reg, NULL);
break;
}
@@ -902,7 +845,6 @@ static const char * const imx290_test_pattern_menu[] = {
};
static void imx290_ctrl_update(struct imx290 *imx290,
- const struct v4l2_mbus_framefmt *format,
const struct imx290_mode *mode)
{
unsigned int hblank_min = mode->hmax_min - mode->width;
@@ -1074,12 +1016,12 @@ static int imx290_start_streaming(struct imx290 *imx290,
return ret;
}
- imx290_write(imx290, IMX290_STANDBY, 0x00, &ret);
+ cci_write(imx290->regmap, IMX290_STANDBY, 0x00, &ret);
msleep(30);
/* Start streaming */
- return imx290_write(imx290, IMX290_XMSTA, 0x00, &ret);
+ return cci_write(imx290->regmap, IMX290_XMSTA, 0x00, &ret);
}
/* Stop streaming */
@@ -1087,11 +1029,11 @@ static int imx290_stop_streaming(struct imx290 *imx290)
{
int ret = 0;
- imx290_write(imx290, IMX290_STANDBY, 0x01, &ret);
+ cci_write(imx290->regmap, IMX290_STANDBY, 0x01, &ret);
msleep(30);
- return imx290_write(imx290, IMX290_XMSTA, 0x01, &ret);
+ return cci_write(imx290->regmap, IMX290_XMSTA, 0x01, &ret);
}
static int imx290_set_stream(struct v4l2_subdev *sd, int enable)
@@ -1195,7 +1137,7 @@ static int imx290_set_fmt(struct v4l2_subdev *sd,
if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
imx290->current_mode = mode;
- imx290_ctrl_update(imx290, &fmt->format, mode);
+ imx290_ctrl_update(imx290, mode);
imx290_exposure_update(imx290, mode);
}
@@ -1300,7 +1242,6 @@ static const struct media_entity_operations imx290_subdev_entity_ops = {
static int imx290_subdev_init(struct imx290 *imx290)
{
struct i2c_client *client = to_i2c_client(imx290->dev);
- const struct v4l2_mbus_framefmt *format;
struct v4l2_subdev_state *state;
int ret;
@@ -1335,8 +1276,7 @@ static int imx290_subdev_init(struct imx290 *imx290)
}
state = v4l2_subdev_lock_and_get_active_state(&imx290->sd);
- format = v4l2_subdev_get_pad_format(&imx290->sd, state, 0);
- imx290_ctrl_update(imx290, format, imx290->current_mode);
+ imx290_ctrl_update(imx290, imx290->current_mode);
v4l2_subdev_unlock_state(state);
return 0;
@@ -1417,11 +1357,6 @@ static const struct dev_pm_ops imx290_pm_ops = {
* Probe & remove
*/
-static const struct regmap_config imx290_regmap_config = {
- .reg_bits = 16,
- .val_bits = 8,
-};
-
static const char * const imx290_supply_name[IMX290_NUM_SUPPLIES] = {
"vdda",
"vddd",
@@ -1588,7 +1523,7 @@ static int imx290_probe(struct i2c_client *client)
return -ENOMEM;
imx290->dev = dev;
- imx290->regmap = devm_regmap_init_i2c(client, &imx290_regmap_config);
+ imx290->regmap = devm_cci_regmap_init_i2c(client, 16);
if (IS_ERR(imx290->regmap)) {
dev_err(dev, "Unable to initialize I2C\n");
return -ENODEV;
diff --git a/drivers/media/i2c/imx296.c b/drivers/media/i2c/imx296.c
index c0b9a5349668..3b4539b622b4 100644
--- a/drivers/media/i2c/imx296.c
+++ b/drivers/media/i2c/imx296.c
@@ -9,7 +9,7 @@
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/media/i2c/imx319.c b/drivers/media/i2c/imx319.c
index a2140848d0d6..52ebb096e107 100644
--- a/drivers/media/i2c/imx319.c
+++ b/drivers/media/i2c/imx319.c
@@ -2565,7 +2565,7 @@ static struct i2c_driver imx319_i2c_driver = {
module_i2c_driver(imx319_i2c_driver);
MODULE_AUTHOR("Qiu, Tianshu <tian.shu.qiu@intel.com>");
-MODULE_AUTHOR("Rapolu, Chiranjeevi <chiranjeevi.rapolu@intel.com>");
+MODULE_AUTHOR("Rapolu, Chiranjeevi");
MODULE_AUTHOR("Bingbu Cao <bingbu.cao@intel.com>");
MODULE_AUTHOR("Yang, Hyungwoo");
MODULE_DESCRIPTION("Sony imx319 sensor driver");
diff --git a/drivers/media/i2c/imx355.c b/drivers/media/i2c/imx355.c
index 6571a98b1e9e..9c79ae8dc842 100644
--- a/drivers/media/i2c/imx355.c
+++ b/drivers/media/i2c/imx355.c
@@ -1851,7 +1851,7 @@ static struct i2c_driver imx355_i2c_driver = {
module_i2c_driver(imx355_i2c_driver);
MODULE_AUTHOR("Qiu, Tianshu <tian.shu.qiu@intel.com>");
-MODULE_AUTHOR("Rapolu, Chiranjeevi <chiranjeevi.rapolu@intel.com>");
+MODULE_AUTHOR("Rapolu, Chiranjeevi");
MODULE_AUTHOR("Bingbu Cao <bingbu.cao@intel.com>");
MODULE_AUTHOR("Yang, Hyungwoo");
MODULE_DESCRIPTION("Sony imx355 sensor driver");
diff --git a/drivers/media/i2c/imx415.c b/drivers/media/i2c/imx415.c
index 4b5d1ee9cc6b..3f00172df3cc 100644
--- a/drivers/media/i2c/imx415.c
+++ b/drivers/media/i2c/imx415.c
@@ -9,7 +9,7 @@
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/media/i2c/isl7998x.c b/drivers/media/i2c/isl7998x.c
index 92e49d95363d..73460688c356 100644
--- a/drivers/media/i2c/isl7998x.c
+++ b/drivers/media/i2c/isl7998x.c
@@ -1611,7 +1611,7 @@ static const struct dev_pm_ops isl7998x_pm_ops = {
static struct i2c_driver isl7998x_i2c_driver = {
.driver = {
.name = "isl7998x",
- .of_match_table = of_match_ptr(isl7998x_of_match),
+ .of_match_table = isl7998x_of_match,
.pm = &isl7998x_pm_ops,
},
.probe = isl7998x_probe,
diff --git a/drivers/media/i2c/max9286.c b/drivers/media/i2c/max9286.c
index 88c58e0c49aa..20e7c7cf5eeb 100644
--- a/drivers/media/i2c/max9286.c
+++ b/drivers/media/i2c/max9286.c
@@ -161,11 +161,12 @@ struct max9286_source {
};
struct max9286_asd {
- struct v4l2_async_subdev base;
+ struct v4l2_async_connection base;
struct max9286_source *source;
};
-static inline struct max9286_asd *to_max9286_asd(struct v4l2_async_subdev *asd)
+static inline struct max9286_asd *
+to_max9286_asd(struct v4l2_async_connection *asd)
{
return container_of(asd, struct max9286_asd, base);
}
@@ -659,7 +660,7 @@ static int max9286_set_pixelrate(struct max9286_priv *priv)
static int max9286_notify_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
struct max9286_priv *priv = sd_to_max9286(notifier->sd);
struct max9286_source *source = to_max9286_asd(asd)->source;
@@ -721,7 +722,7 @@ static int max9286_notify_bound(struct v4l2_async_notifier *notifier,
static void max9286_notify_unbind(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
struct max9286_priv *priv = sd_to_max9286(notifier->sd);
struct max9286_source *source = to_max9286_asd(asd)->source;
@@ -745,7 +746,7 @@ static int max9286_v4l2_notifier_register(struct max9286_priv *priv)
if (!priv->nsources)
return 0;
- v4l2_async_nf_init(&priv->notifier);
+ v4l2_async_subdev_nf_init(&priv->notifier, &priv->sd);
for_each_source(priv, source) {
unsigned int i = to_index(priv, source);
@@ -765,7 +766,7 @@ static int max9286_v4l2_notifier_register(struct max9286_priv *priv)
priv->notifier.ops = &max9286_notify_ops;
- ret = v4l2_async_subdev_nf_register(&priv->sd, &priv->notifier);
+ ret = v4l2_async_nf_register(&priv->notifier);
if (ret) {
dev_err(dev, "Failed to register subdev_notifier");
v4l2_async_nf_cleanup(&priv->notifier);
@@ -1051,7 +1052,6 @@ static const struct v4l2_ctrl_ops max9286_ctrl_ops = {
static int max9286_v4l2_register(struct max9286_priv *priv)
{
struct device *dev = &priv->client->dev;
- struct fwnode_handle *ep;
int ret;
int i;
@@ -1093,25 +1093,14 @@ static int max9286_v4l2_register(struct max9286_priv *priv)
if (ret)
goto err_async;
- ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), MAX9286_SRC_PAD,
- 0, 0);
- if (!ep) {
- dev_err(dev, "Unable to retrieve endpoint on \"port@4\"\n");
- ret = -ENOENT;
- goto err_async;
- }
- priv->sd.fwnode = ep;
-
ret = v4l2_async_register_subdev(&priv->sd);
if (ret < 0) {
dev_err(dev, "Unable to register subdevice\n");
- goto err_put_node;
+ goto err_async;
}
return 0;
-err_put_node:
- fwnode_handle_put(ep);
err_async:
v4l2_ctrl_handler_free(&priv->ctrls);
max9286_v4l2_notifier_unregister(priv);
@@ -1714,7 +1703,7 @@ MODULE_DEVICE_TABLE(of, max9286_dt_ids);
static struct i2c_driver max9286_i2c_driver = {
.driver = {
.name = "max9286",
- .of_match_table = of_match_ptr(max9286_dt_ids),
+ .of_match_table = max9286_dt_ids,
},
.probe = max9286_probe,
.remove = max9286_remove,
diff --git a/drivers/media/i2c/mt9m111.c b/drivers/media/i2c/mt9m111.c
index 2878d328fc01..df8d9c9e6a96 100644
--- a/drivers/media/i2c/mt9m111.c
+++ b/drivers/media/i2c/mt9m111.c
@@ -1382,7 +1382,7 @@ MODULE_DEVICE_TABLE(i2c, mt9m111_id);
static struct i2c_driver mt9m111_i2c_driver = {
.driver = {
.name = "mt9m111",
- .of_match_table = of_match_ptr(mt9m111_of_match),
+ .of_match_table = mt9m111_of_match,
},
.probe = mt9m111_probe,
.remove = mt9m111_remove,
diff --git a/drivers/media/i2c/og01a1b.c b/drivers/media/i2c/og01a1b.c
index b5948759342e..365ce5684583 100644
--- a/drivers/media/i2c/og01a1b.c
+++ b/drivers/media/i2c/og01a1b.c
@@ -1121,6 +1121,6 @@ static struct i2c_driver og01a1b_i2c_driver = {
module_i2c_driver(og01a1b_i2c_driver);
-MODULE_AUTHOR("Shawn Tu <shawnx.tu@intel.com>");
+MODULE_AUTHOR("Shawn Tu");
MODULE_DESCRIPTION("OmniVision OG01A1B sensor driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/ov01a10.c b/drivers/media/i2c/ov01a10.c
index de5bc19e715b..2b9e1b3a3bf4 100644
--- a/drivers/media/i2c/ov01a10.c
+++ b/drivers/media/i2c/ov01a10.c
@@ -992,7 +992,7 @@ static struct i2c_driver ov01a10_i2c_driver = {
.pm = &ov01a10_pm_ops,
.acpi_match_table = ACPI_PTR(ov01a10_acpi_ids),
},
- .probe_new = ov01a10_probe,
+ .probe = ov01a10_probe,
.remove = ov01a10_remove,
};
diff --git a/drivers/media/i2c/ov08x40.c b/drivers/media/i2c/ov08x40.c
index 77bcdcd0824c..637da4df6901 100644
--- a/drivers/media/i2c/ov08x40.c
+++ b/drivers/media/i2c/ov08x40.c
@@ -110,8 +110,6 @@ struct ov08x40_reg_list {
/* Link frequency config */
struct ov08x40_link_freq_config {
- u32 pixels_per_line;
-
/* registers for this link frequency */
struct ov08x40_reg_list reg_list;
};
@@ -128,6 +126,9 @@ struct ov08x40_mode {
u32 vts_def;
u32 vts_min;
+ /* HTS */
+ u32 hts;
+
/* Index of Link frequency config to be used */
u32 link_freq_index;
/* Default register values */
@@ -2391,6 +2392,7 @@ static const struct ov08x40_mode supported_modes[] = {
.height = 2416,
.vts_def = OV08X40_VTS_30FPS,
.vts_min = OV08X40_VTS_30FPS,
+ .hts = 640,
.lanes = 4,
.reg_list = {
.num_of_regs = ARRAY_SIZE(mode_3856x2416_regs),
@@ -2403,6 +2405,7 @@ static const struct ov08x40_mode supported_modes[] = {
.height = 1208,
.vts_def = OV08X40_VTS_BIN_30FPS,
.vts_min = OV08X40_VTS_BIN_30FPS,
+ .hts = 720,
.lanes = 4,
.reg_list = {
.num_of_regs = ARRAY_SIZE(mode_1928x1208_regs),
@@ -2846,9 +2849,7 @@ ov08x40_set_pad_format(struct v4l2_subdev *sd,
1,
vblank_def);
__v4l2_ctrl_s_ctrl(ov08x->vblank, vblank_def);
- h_blank =
- link_freq_configs[mode->link_freq_index].pixels_per_line
- - ov08x->cur_mode->width;
+ h_blank = ov08x->cur_mode->hts;
__v4l2_ctrl_modify_range(ov08x->hblank, h_blank,
h_blank, 1, h_blank);
}
@@ -3074,8 +3075,7 @@ static int ov08x40_init_controls(struct ov08x40 *ov08x)
OV08X40_VTS_MAX - mode->height, 1,
vblank_def);
- hblank = link_freq_configs[mode->link_freq_index].pixels_per_line -
- mode->width;
+ hblank = ov08x->cur_mode->hts;
ov08x->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov08x40_ctrl_ops,
V4L2_CID_HBLANK,
hblank, hblank, 1, hblank);
@@ -3320,6 +3320,6 @@ static struct i2c_driver ov08x40_i2c_driver = {
module_i2c_driver(ov08x40_i2c_driver);
MODULE_AUTHOR("Jason Chen <jason.z.chen@intel.com>");
-MODULE_AUTHOR("Shawn Tu <shawnx.tu@intel.com>");
+MODULE_AUTHOR("Shawn Tu");
MODULE_DESCRIPTION("OmniVision OV08X40 sensor driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/ov13858.c b/drivers/media/i2c/ov13858.c
index 3db3e64fa3ff..35652b362347 100644
--- a/drivers/media/i2c/ov13858.c
+++ b/drivers/media/i2c/ov13858.c
@@ -1814,7 +1814,7 @@ static struct i2c_driver ov13858_i2c_driver = {
module_i2c_driver(ov13858_i2c_driver);
MODULE_AUTHOR("Kan, Chris <chris.kan@intel.com>");
-MODULE_AUTHOR("Rapolu, Chiranjeevi <chiranjeevi.rapolu@intel.com>");
+MODULE_AUTHOR("Rapolu, Chiranjeevi");
MODULE_AUTHOR("Yang, Hyungwoo");
MODULE_DESCRIPTION("Omnivision ov13858 sensor driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/ov13b10.c b/drivers/media/i2c/ov13b10.c
index 6110fb1e6bc6..dbc642c5995b 100644
--- a/drivers/media/i2c/ov13b10.c
+++ b/drivers/media/i2c/ov13b10.c
@@ -2,6 +2,9 @@
// Copyright (c) 2021 Intel Corporation.
#include <linux/acpi.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
@@ -573,6 +576,11 @@ struct ov13b10 {
struct media_pad pad;
struct v4l2_ctrl_handler ctrl_handler;
+
+ struct clk *img_clk;
+ struct regulator *avdd;
+ struct gpio_desc *reset;
+
/* V4L2 Controls */
struct v4l2_ctrl *link_freq;
struct v4l2_ctrl *pixel_rate;
@@ -1051,6 +1059,49 @@ static int ov13b10_identify_module(struct ov13b10 *ov13b)
return 0;
}
+static int ov13b10_power_off(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ov13b10 *ov13b10 = to_ov13b10(sd);
+
+ gpiod_set_value_cansleep(ov13b10->reset, 1);
+
+ if (ov13b10->avdd)
+ regulator_disable(ov13b10->avdd);
+
+ clk_disable_unprepare(ov13b10->img_clk);
+
+ return 0;
+}
+
+static int ov13b10_power_on(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ov13b10 *ov13b10 = to_ov13b10(sd);
+ int ret;
+
+ ret = clk_prepare_enable(ov13b10->img_clk);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable imaging clock: %d", ret);
+ return ret;
+ }
+
+ if (ov13b10->avdd) {
+ ret = regulator_enable(ov13b10->avdd);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable avdd: %d", ret);
+ clk_disable_unprepare(ov13b10->img_clk);
+ return ret;
+ }
+ }
+
+ gpiod_set_value_cansleep(ov13b10->reset, 0);
+ /* 5ms to wait ready after XSHUTDN assert */
+ usleep_range(5000, 5500);
+
+ return 0;
+}
+
static int ov13b10_start_streaming(struct ov13b10 *ov13b)
{
struct i2c_client *client = v4l2_get_subdevdata(&ov13b->sd);
@@ -1145,7 +1196,7 @@ err_unlock:
return ret;
}
-static int __maybe_unused ov13b10_suspend(struct device *dev)
+static int ov13b10_suspend(struct device *dev)
{
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct ov13b10 *ov13b = to_ov13b10(sd);
@@ -1153,26 +1204,35 @@ static int __maybe_unused ov13b10_suspend(struct device *dev)
if (ov13b->streaming)
ov13b10_stop_streaming(ov13b);
+ ov13b10_power_off(dev);
+
return 0;
}
-static int __maybe_unused ov13b10_resume(struct device *dev)
+static int ov13b10_resume(struct device *dev)
{
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct ov13b10 *ov13b = to_ov13b10(sd);
int ret;
+ ret = ov13b10_power_on(dev);
+ if (ret)
+ goto pm_fail;
+
if (ov13b->streaming) {
ret = ov13b10_start_streaming(ov13b);
if (ret)
- goto error;
+ goto stop_streaming;
}
return 0;
-error:
+stop_streaming:
ov13b10_stop_streaming(ov13b);
+ ov13b10_power_off(dev);
+pm_fail:
ov13b->streaming = false;
+
return ret;
}
@@ -1317,6 +1377,34 @@ static void ov13b10_free_controls(struct ov13b10 *ov13b)
mutex_destroy(&ov13b->mutex);
}
+static int ov13b10_get_pm_resources(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ov13b10 *ov13b = to_ov13b10(sd);
+ int ret;
+
+ ov13b->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ov13b->reset))
+ return dev_err_probe(dev, PTR_ERR(ov13b->reset),
+ "failed to get reset gpio\n");
+
+ ov13b->img_clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(ov13b->img_clk))
+ return dev_err_probe(dev, PTR_ERR(ov13b->img_clk),
+ "failed to get imaging clock\n");
+
+ ov13b->avdd = devm_regulator_get_optional(dev, "avdd");
+ if (IS_ERR(ov13b->avdd)) {
+ ret = PTR_ERR(ov13b->avdd);
+ ov13b->avdd = NULL;
+ if (ret != -ENODEV)
+ return dev_err_probe(dev, ret,
+ "failed to get avdd regulator\n");
+ }
+
+ return 0;
+}
+
static int ov13b10_check_hwcfg(struct device *dev)
{
struct v4l2_fwnode_endpoint bus_cfg = {
@@ -1331,6 +1419,10 @@ static int ov13b10_check_hwcfg(struct device *dev)
if (!fwnode)
return -ENXIO;
+ ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
+ if (!ep)
+ return -EPROBE_DEFER;
+
ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
&ext_clk);
if (ret) {
@@ -1344,10 +1436,6 @@ static int ov13b10_check_hwcfg(struct device *dev)
return -EINVAL;
}
- ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
- if (!ep)
- return -ENXIO;
-
ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
fwnode_handle_put(ep);
if (ret)
@@ -1407,13 +1495,23 @@ static int ov13b10_probe(struct i2c_client *client)
/* Initialize subdev */
v4l2_i2c_subdev_init(&ov13b->sd, client, &ov13b10_subdev_ops);
+ ret = ov13b10_get_pm_resources(&client->dev);
+ if (ret)
+ return ret;
+
full_power = acpi_dev_state_d0(&client->dev);
if (full_power) {
+ ov13b10_power_on(&client->dev);
+ if (ret) {
+ dev_err(&client->dev, "failed to power on\n");
+ return ret;
+ }
+
/* Check module identity */
ret = ov13b10_identify_module(ov13b);
if (ret) {
dev_err(&client->dev, "failed to find sensor: %d\n", ret);
- return ret;
+ goto error_power_off;
}
}
@@ -1422,7 +1520,7 @@ static int ov13b10_probe(struct i2c_client *client)
ret = ov13b10_init_controls(ov13b);
if (ret)
- return ret;
+ goto error_power_off;
/* Initialize subdev */
ov13b->sd.internal_ops = &ov13b10_internal_ops;
@@ -1462,6 +1560,9 @@ error_handler_free:
ov13b10_free_controls(ov13b);
dev_err(&client->dev, "%s failed:%d\n", __func__, ret);
+error_power_off:
+ ov13b10_power_off(&client->dev);
+
return ret;
}
@@ -1477,13 +1578,13 @@ static void ov13b10_remove(struct i2c_client *client)
pm_runtime_disable(&client->dev);
}
-static const struct dev_pm_ops ov13b10_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ov13b10_suspend, ov13b10_resume)
-};
+static DEFINE_RUNTIME_DEV_PM_OPS(ov13b10_pm_ops, ov13b10_suspend,
+ ov13b10_resume, NULL);
#ifdef CONFIG_ACPI
static const struct acpi_device_id ov13b10_acpi_ids[] = {
{"OVTIDB10"},
+ {"OVTI13B1"},
{ /* sentinel */ }
};
@@ -1493,7 +1594,7 @@ MODULE_DEVICE_TABLE(acpi, ov13b10_acpi_ids);
static struct i2c_driver ov13b10_i2c_driver = {
.driver = {
.name = "ov13b10",
- .pm = &ov13b10_pm_ops,
+ .pm = pm_ptr(&ov13b10_pm_ops),
.acpi_match_table = ACPI_PTR(ov13b10_acpi_ids),
},
.probe = ov13b10_probe,
diff --git a/drivers/media/i2c/ov2640.c b/drivers/media/i2c/ov2640.c
index ec801a81c2d0..bb6c9863a546 100644
--- a/drivers/media/i2c/ov2640.c
+++ b/drivers/media/i2c/ov2640.c
@@ -1296,7 +1296,7 @@ MODULE_DEVICE_TABLE(of, ov2640_of_match);
static struct i2c_driver ov2640_i2c_driver = {
.driver = {
.name = "ov2640",
- .of_match_table = of_match_ptr(ov2640_of_match),
+ .of_match_table = ov2640_of_match,
},
.probe = ov2640_probe,
.remove = ov2640_remove,
diff --git a/drivers/media/i2c/ov2680.c b/drivers/media/i2c/ov2680.c
index d06e9fc37f77..72bab0ff8a36 100644
--- a/drivers/media/i2c/ov2680.c
+++ b/drivers/media/i2c/ov2680.c
@@ -10,61 +10,93 @@
*
*/
-#include <asm/unaligned.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/init.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/gpio/consumer.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
+#include <media/v4l2-cci.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
-#define OV2680_XVCLK_VALUE 24000000
+#define OV2680_CHIP_ID 0x2680
-#define OV2680_CHIP_ID 0x2680
+#define OV2680_REG_STREAM_CTRL CCI_REG8(0x0100)
+#define OV2680_REG_SOFT_RESET CCI_REG8(0x0103)
-#define OV2680_REG_STREAM_CTRL 0x0100
-#define OV2680_REG_SOFT_RESET 0x0103
+#define OV2680_REG_CHIP_ID CCI_REG16(0x300a)
+#define OV2680_REG_SC_CMMN_SUB_ID CCI_REG8(0x302a)
+#define OV2680_REG_PLL_MULTIPLIER CCI_REG16(0x3081)
-#define OV2680_REG_CHIP_ID_HIGH 0x300a
-#define OV2680_REG_CHIP_ID_LOW 0x300b
+#define OV2680_REG_EXPOSURE_PK CCI_REG24(0x3500)
+#define OV2680_REG_R_MANUAL CCI_REG8(0x3503)
+#define OV2680_REG_GAIN_PK CCI_REG16(0x350a)
-#define OV2680_REG_R_MANUAL 0x3503
-#define OV2680_REG_GAIN_PK 0x350a
-#define OV2680_REG_EXPOSURE_PK_HIGH 0x3500
-#define OV2680_REG_TIMING_HTS 0x380c
-#define OV2680_REG_TIMING_VTS 0x380e
-#define OV2680_REG_FORMAT1 0x3820
-#define OV2680_REG_FORMAT2 0x3821
+#define OV2680_REG_SENSOR_CTRL_0A CCI_REG8(0x370a)
-#define OV2680_REG_ISP_CTRL00 0x5080
+#define OV2680_REG_HORIZONTAL_START CCI_REG16(0x3800)
+#define OV2680_REG_VERTICAL_START CCI_REG16(0x3802)
+#define OV2680_REG_HORIZONTAL_END CCI_REG16(0x3804)
+#define OV2680_REG_VERTICAL_END CCI_REG16(0x3806)
+#define OV2680_REG_HORIZONTAL_OUTPUT_SIZE CCI_REG16(0x3808)
+#define OV2680_REG_VERTICAL_OUTPUT_SIZE CCI_REG16(0x380a)
+#define OV2680_REG_TIMING_HTS CCI_REG16(0x380c)
+#define OV2680_REG_TIMING_VTS CCI_REG16(0x380e)
+#define OV2680_REG_ISP_X_WIN CCI_REG16(0x3810)
+#define OV2680_REG_ISP_Y_WIN CCI_REG16(0x3812)
+#define OV2680_REG_X_INC CCI_REG8(0x3814)
+#define OV2680_REG_Y_INC CCI_REG8(0x3815)
+#define OV2680_REG_FORMAT1 CCI_REG8(0x3820)
+#define OV2680_REG_FORMAT2 CCI_REG8(0x3821)
-#define OV2680_FRAME_RATE 30
+#define OV2680_REG_ISP_CTRL00 CCI_REG8(0x5080)
-#define OV2680_REG_VALUE_8BIT 1
-#define OV2680_REG_VALUE_16BIT 2
-#define OV2680_REG_VALUE_24BIT 3
+#define OV2680_REG_X_WIN CCI_REG16(0x5704)
+#define OV2680_REG_Y_WIN CCI_REG16(0x5706)
-#define OV2680_WIDTH_MAX 1600
-#define OV2680_HEIGHT_MAX 1200
+#define OV2680_FRAME_RATE 30
-enum ov2680_mode_id {
- OV2680_MODE_QUXGA_800_600,
- OV2680_MODE_720P_1280_720,
- OV2680_MODE_UXGA_1600_1200,
- OV2680_MODE_MAX,
-};
+#define OV2680_NATIVE_WIDTH 1616
+#define OV2680_NATIVE_HEIGHT 1216
+#define OV2680_NATIVE_START_LEFT 0
+#define OV2680_NATIVE_START_TOP 0
+#define OV2680_ACTIVE_WIDTH 1600
+#define OV2680_ACTIVE_HEIGHT 1200
+#define OV2680_ACTIVE_START_LEFT 8
+#define OV2680_ACTIVE_START_TOP 8
+#define OV2680_MIN_CROP_WIDTH 2
+#define OV2680_MIN_CROP_HEIGHT 2
-struct reg_value {
- u16 reg_addr;
- u8 val;
-};
+/* Fixed pre-div of 1/2 */
+#define OV2680_PLL_PREDIV0 2
+
+/* Pre-div configurable through reg 0x3080, left at its default of 0x02 : 1/2 */
+#define OV2680_PLL_PREDIV 2
+
+/* 66MHz pixel clock: 66MHz / 1704 * 1294 = 30fps */
+#define OV2680_PIXELS_PER_LINE 1704
+#define OV2680_LINES_PER_FRAME 1294
+
+/* If possible send 16 extra rows / lines to the ISP as padding */
+#define OV2680_END_MARGIN 16
+
+/* Max exposure time is VTS - 8 */
+#define OV2680_INTEGRATION_TIME_MARGIN 8
+
+#define OV2680_DEFAULT_WIDTH 800
+#define OV2680_DEFAULT_HEIGHT 600
+
+/* For enum_frame_size() full-size + binned-/quarter-size */
+#define OV2680_FRAME_SIZES 2
static const char * const ov2680_supply_name[] = {
"DOVDD",
@@ -74,52 +106,74 @@ static const char * const ov2680_supply_name[] = {
#define OV2680_NUM_SUPPLIES ARRAY_SIZE(ov2680_supply_name)
-struct ov2680_mode_info {
- const char *name;
- enum ov2680_mode_id id;
- u32 width;
- u32 height;
- const struct reg_value *reg_data;
- u32 reg_data_size;
+enum {
+ OV2680_19_2_MHZ,
+ OV2680_24_MHZ,
+};
+
+static const unsigned long ov2680_xvclk_freqs[] = {
+ [OV2680_19_2_MHZ] = 19200000,
+ [OV2680_24_MHZ] = 24000000,
+};
+
+static const u8 ov2680_pll_multipliers[] = {
+ [OV2680_19_2_MHZ] = 69,
+ [OV2680_24_MHZ] = 55,
};
struct ov2680_ctrls {
struct v4l2_ctrl_handler handler;
- struct {
- struct v4l2_ctrl *auto_exp;
- struct v4l2_ctrl *exposure;
- };
- struct {
- struct v4l2_ctrl *auto_gain;
- struct v4l2_ctrl *gain;
- };
-
+ struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *gain;
struct v4l2_ctrl *hflip;
struct v4l2_ctrl *vflip;
struct v4l2_ctrl *test_pattern;
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *pixel_rate;
+};
+
+struct ov2680_mode {
+ struct v4l2_rect crop;
+ struct v4l2_mbus_framefmt fmt;
+ struct v4l2_fract frame_interval;
+ bool binning;
+ u16 h_start;
+ u16 v_start;
+ u16 h_end;
+ u16 v_end;
+ u16 h_output_size;
+ u16 v_output_size;
+ u16 hts;
+ u16 vts;
};
struct ov2680_dev {
- struct i2c_client *i2c_client;
+ struct device *dev;
+ struct regmap *regmap;
struct v4l2_subdev sd;
struct media_pad pad;
struct clk *xvclk;
u32 xvclk_freq;
+ u8 pll_mult;
+ s64 link_freq[1];
+ u64 pixel_rate;
struct regulator_bulk_data supplies[OV2680_NUM_SUPPLIES];
- struct gpio_desc *reset_gpio;
+ struct gpio_desc *pwdn_gpio;
struct mutex lock; /* protect members */
- bool mode_pending_changes;
- bool is_enabled;
bool is_streaming;
struct ov2680_ctrls ctrls;
- struct v4l2_mbus_framefmt fmt;
- struct v4l2_fract frame_interval;
+ struct ov2680_mode mode;
+};
- const struct ov2680_mode_info *current_mode;
+static const struct v4l2_rect ov2680_default_crop = {
+ .left = OV2680_ACTIVE_START_LEFT,
+ .top = OV2680_ACTIVE_START_TOP,
+ .width = OV2680_ACTIVE_WIDTH,
+ .height = OV2680_ACTIVE_HEIGHT,
};
static const char * const test_pattern_menu[] = {
@@ -137,426 +191,349 @@ static const int ov2680_hv_flip_bayer_order[] = {
MEDIA_BUS_FMT_SRGGB10_1X10,
};
-static const struct reg_value ov2680_setting_30fps_QUXGA_800_600[] = {
- {0x3086, 0x01}, {0x370a, 0x23}, {0x3808, 0x03}, {0x3809, 0x20},
- {0x380a, 0x02}, {0x380b, 0x58}, {0x380c, 0x06}, {0x380d, 0xac},
- {0x380e, 0x02}, {0x380f, 0x84}, {0x3811, 0x04}, {0x3813, 0x04},
- {0x3814, 0x31}, {0x3815, 0x31}, {0x3820, 0xc0}, {0x4008, 0x00},
- {0x4009, 0x03}, {0x4837, 0x1e}, {0x3501, 0x4e}, {0x3502, 0xe0},
-};
+static const struct reg_sequence ov2680_global_setting[] = {
+ /* MIPI PHY, 0x10 -> 0x1c enable bp_c_hs_en_lat and bp_d_hs_en_lat */
+ {0x3016, 0x1c},
-static const struct reg_value ov2680_setting_30fps_720P_1280_720[] = {
- {0x3086, 0x00}, {0x3808, 0x05}, {0x3809, 0x00}, {0x380a, 0x02},
- {0x380b, 0xd0}, {0x380c, 0x06}, {0x380d, 0xa8}, {0x380e, 0x05},
- {0x380f, 0x0e}, {0x3811, 0x08}, {0x3813, 0x06}, {0x3814, 0x11},
- {0x3815, 0x11}, {0x3820, 0xc0}, {0x4008, 0x00},
-};
+ /* R MANUAL set exposure and gain to manual (hw does not do auto) */
+ {0x3503, 0x03},
-static const struct reg_value ov2680_setting_30fps_UXGA_1600_1200[] = {
- {0x3086, 0x00}, {0x3501, 0x4e}, {0x3502, 0xe0}, {0x3808, 0x06},
- {0x3809, 0x40}, {0x380a, 0x04}, {0x380b, 0xb0}, {0x380c, 0x06},
- {0x380d, 0xa8}, {0x380e, 0x05}, {0x380f, 0x0e}, {0x3811, 0x00},
- {0x3813, 0x00}, {0x3814, 0x11}, {0x3815, 0x11}, {0x3820, 0xc0},
- {0x4008, 0x00}, {0x4837, 0x18}
-};
+ /* Analog control register tweaks */
+ {0x3603, 0x39}, /* Reset value 0x99 */
+ {0x3604, 0x24}, /* Reset value 0x74 */
+ {0x3621, 0x37}, /* Reset value 0x44 */
-static const struct ov2680_mode_info ov2680_mode_init_data = {
- "mode_quxga_800_600", OV2680_MODE_QUXGA_800_600, 800, 600,
- ov2680_setting_30fps_QUXGA_800_600,
- ARRAY_SIZE(ov2680_setting_30fps_QUXGA_800_600),
-};
+ /* Sensor control register tweaks */
+ {0x3701, 0x64}, /* Reset value 0x61 */
+ {0x3705, 0x3c}, /* Reset value 0x21 */
+ {0x370c, 0x50}, /* Reset value 0x10 */
+ {0x370d, 0xc0}, /* Reset value 0x00 */
+ {0x3718, 0x88}, /* Reset value 0x80 */
-static const struct ov2680_mode_info ov2680_mode_data[OV2680_MODE_MAX] = {
- {"mode_quxga_800_600", OV2680_MODE_QUXGA_800_600,
- 800, 600, ov2680_setting_30fps_QUXGA_800_600,
- ARRAY_SIZE(ov2680_setting_30fps_QUXGA_800_600)},
- {"mode_720p_1280_720", OV2680_MODE_720P_1280_720,
- 1280, 720, ov2680_setting_30fps_720P_1280_720,
- ARRAY_SIZE(ov2680_setting_30fps_720P_1280_720)},
- {"mode_uxga_1600_1200", OV2680_MODE_UXGA_1600_1200,
- 1600, 1200, ov2680_setting_30fps_UXGA_1600_1200,
- ARRAY_SIZE(ov2680_setting_30fps_UXGA_1600_1200)},
-};
+ /* PSRAM tweaks */
+ {0x3781, 0x80}, /* Reset value 0x00 */
+ {0x3784, 0x0c}, /* Reset value 0x00, based on OV2680_R1A_AM10.ovt */
+ {0x3789, 0x60}, /* Reset value 0x50 */
-static struct ov2680_dev *to_ov2680_dev(struct v4l2_subdev *sd)
-{
- return container_of(sd, struct ov2680_dev, sd);
-}
+ /* BLC CTRL00 0x01 -> 0x81 set avg_weight to 8 */
+ {0x4000, 0x81},
-static struct device *ov2680_to_dev(struct ov2680_dev *sensor)
-{
- return &sensor->i2c_client->dev;
-}
+ /* Set black level compensation range to 0 - 3 (default 0 - 11) */
+ {0x4008, 0x00},
+ {0x4009, 0x03},
-static inline struct v4l2_subdev *ctrl_to_sd(struct v4l2_ctrl *ctrl)
-{
- return &container_of(ctrl->handler, struct ov2680_dev,
- ctrls.handler)->sd;
-}
+ /* VFIFO R2 0x00 -> 0x02 set Frame reset enable */
+ {0x4602, 0x02},
-static int __ov2680_write_reg(struct ov2680_dev *sensor, u16 reg,
- unsigned int len, u32 val)
-{
- struct i2c_client *client = sensor->i2c_client;
- u8 buf[6];
- int ret;
+ /* MIPI ctrl CLK PREPARE MIN change from 0x26 (38) -> 0x36 (54) */
+ {0x481f, 0x36},
- if (len > 4)
- return -EINVAL;
+ /* MIPI ctrl CLK LPX P MIN change from 0x32 (50) -> 0x36 (54) */
+ {0x4825, 0x36},
- put_unaligned_be16(reg, buf);
- put_unaligned_be32(val << (8 * (4 - len)), buf + 2);
- ret = i2c_master_send(client, buf, len + 2);
- if (ret != len + 2) {
- dev_err(&client->dev, "write error: reg=0x%4x: %d\n", reg, ret);
- return -EIO;
- }
+ /* R ISP CTRL2 0x20 -> 0x30, set sof_sel bit */
+ {0x5002, 0x30},
- return 0;
-}
-
-#define ov2680_write_reg(s, r, v) \
- __ov2680_write_reg(s, r, OV2680_REG_VALUE_8BIT, v)
-
-#define ov2680_write_reg16(s, r, v) \
- __ov2680_write_reg(s, r, OV2680_REG_VALUE_16BIT, v)
-
-#define ov2680_write_reg24(s, r, v) \
- __ov2680_write_reg(s, r, OV2680_REG_VALUE_24BIT, v)
-
-static int __ov2680_read_reg(struct ov2680_dev *sensor, u16 reg,
- unsigned int len, u32 *val)
-{
- struct i2c_client *client = sensor->i2c_client;
- struct i2c_msg msgs[2];
- u8 addr_buf[2] = { reg >> 8, reg & 0xff };
- u8 data_buf[4] = { 0, };
- int ret;
-
- if (len > 4)
- return -EINVAL;
-
- msgs[0].addr = client->addr;
- msgs[0].flags = 0;
- msgs[0].len = ARRAY_SIZE(addr_buf);
- msgs[0].buf = addr_buf;
+ /*
+ * Window CONTROL 0x00 -> 0x01, enable manual window control,
+ * this is necessary for full size flip and mirror support.
+ */
+ {0x5708, 0x01},
- msgs[1].addr = client->addr;
- msgs[1].flags = I2C_M_RD;
- msgs[1].len = len;
- msgs[1].buf = &data_buf[4 - len];
+ /*
+ * DPC CTRL0 0x14 -> 0x3e, set enable_tail, enable_3x3_cluster
+ * and enable_general_tail bits based OV2680_R1A_AM10.ovt.
+ */
+ {0x5780, 0x3e},
- ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
- if (ret != ARRAY_SIZE(msgs)) {
- dev_err(&client->dev, "read error: reg=0x%4x: %d\n", reg, ret);
- return -EIO;
- }
-
- *val = get_unaligned_be32(data_buf);
-
- return 0;
-}
+ /* DPC MORE CONNECTION CASE THRE 0x0c (12) -> 0x02 (2) */
+ {0x5788, 0x02},
-#define ov2680_read_reg(s, r, v) \
- __ov2680_read_reg(s, r, OV2680_REG_VALUE_8BIT, v)
+ /* DPC GAIN LIST1 0x0f (15) -> 0x08 (8) */
+ {0x578e, 0x08},
-#define ov2680_read_reg16(s, r, v) \
- __ov2680_read_reg(s, r, OV2680_REG_VALUE_16BIT, v)
+ /* DPC GAIN LIST2 0x3f (63) -> 0x0c (12) */
+ {0x578f, 0x0c},
-#define ov2680_read_reg24(s, r, v) \
- __ov2680_read_reg(s, r, OV2680_REG_VALUE_24BIT, v)
+ /* DPC THRE RATIO 0x04 (4) -> 0x00 (0) */
+ {0x5792, 0x00},
+};
-static int ov2680_mod_reg(struct ov2680_dev *sensor, u16 reg, u8 mask, u8 val)
+static struct ov2680_dev *to_ov2680_dev(struct v4l2_subdev *sd)
{
- u32 readval;
- int ret;
-
- ret = ov2680_read_reg(sensor, reg, &readval);
- if (ret < 0)
- return ret;
-
- readval &= ~mask;
- val &= mask;
- val |= readval;
-
- return ov2680_write_reg(sensor, reg, val);
+ return container_of(sd, struct ov2680_dev, sd);
}
-static int ov2680_load_regs(struct ov2680_dev *sensor,
- const struct ov2680_mode_info *mode)
+static inline struct v4l2_subdev *ctrl_to_sd(struct v4l2_ctrl *ctrl)
{
- const struct reg_value *regs = mode->reg_data;
- unsigned int i;
- int ret = 0;
- u16 reg_addr;
- u8 val;
-
- for (i = 0; i < mode->reg_data_size; ++i, ++regs) {
- reg_addr = regs->reg_addr;
- val = regs->val;
-
- ret = ov2680_write_reg(sensor, reg_addr, val);
- if (ret)
- break;
- }
-
- return ret;
+ return &container_of(ctrl->handler, struct ov2680_dev,
+ ctrls.handler)->sd;
}
static void ov2680_power_up(struct ov2680_dev *sensor)
{
- if (!sensor->reset_gpio)
+ if (!sensor->pwdn_gpio)
return;
- gpiod_set_value(sensor->reset_gpio, 0);
+ gpiod_set_value(sensor->pwdn_gpio, 0);
usleep_range(5000, 10000);
}
static void ov2680_power_down(struct ov2680_dev *sensor)
{
- if (!sensor->reset_gpio)
+ if (!sensor->pwdn_gpio)
return;
- gpiod_set_value(sensor->reset_gpio, 1);
+ gpiod_set_value(sensor->pwdn_gpio, 1);
usleep_range(5000, 10000);
}
-static int ov2680_bayer_order(struct ov2680_dev *sensor)
+static void ov2680_set_bayer_order(struct ov2680_dev *sensor,
+ struct v4l2_mbus_framefmt *fmt)
{
- u32 format1;
- u32 format2;
- u32 hv_flip;
- int ret;
-
- ret = ov2680_read_reg(sensor, OV2680_REG_FORMAT1, &format1);
- if (ret < 0)
- return ret;
+ int hv_flip = 0;
- ret = ov2680_read_reg(sensor, OV2680_REG_FORMAT2, &format2);
- if (ret < 0)
- return ret;
+ if (sensor->ctrls.vflip && sensor->ctrls.vflip->val)
+ hv_flip += 1;
- hv_flip = (format2 & BIT(2) << 1) | (format1 & BIT(2));
+ if (sensor->ctrls.hflip && sensor->ctrls.hflip->val)
+ hv_flip += 2;
- sensor->fmt.code = ov2680_hv_flip_bayer_order[hv_flip];
-
- return 0;
+ fmt->code = ov2680_hv_flip_bayer_order[hv_flip];
}
-static int ov2680_vflip_enable(struct ov2680_dev *sensor)
+static struct v4l2_mbus_framefmt *
+__ov2680_get_pad_format(struct ov2680_dev *sensor,
+ struct v4l2_subdev_state *state,
+ unsigned int pad,
+ enum v4l2_subdev_format_whence which)
{
- int ret;
-
- ret = ov2680_mod_reg(sensor, OV2680_REG_FORMAT1, BIT(2), BIT(2));
- if (ret < 0)
- return ret;
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(&sensor->sd, state, pad);
- return ov2680_bayer_order(sensor);
+ return &sensor->mode.fmt;
}
-static int ov2680_vflip_disable(struct ov2680_dev *sensor)
+static struct v4l2_rect *
+__ov2680_get_pad_crop(struct ov2680_dev *sensor,
+ struct v4l2_subdev_state *state,
+ unsigned int pad,
+ enum v4l2_subdev_format_whence which)
{
- int ret;
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_crop(&sensor->sd, state, pad);
- ret = ov2680_mod_reg(sensor, OV2680_REG_FORMAT1, BIT(2), BIT(0));
- if (ret < 0)
- return ret;
-
- return ov2680_bayer_order(sensor);
+ return &sensor->mode.crop;
}
-static int ov2680_hflip_enable(struct ov2680_dev *sensor)
+static void ov2680_fill_format(struct ov2680_dev *sensor,
+ struct v4l2_mbus_framefmt *fmt,
+ unsigned int width, unsigned int height)
{
- int ret;
-
- ret = ov2680_mod_reg(sensor, OV2680_REG_FORMAT2, BIT(2), BIT(2));
- if (ret < 0)
- return ret;
-
- return ov2680_bayer_order(sensor);
+ memset(fmt, 0, sizeof(*fmt));
+ fmt->width = width;
+ fmt->height = height;
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ ov2680_set_bayer_order(sensor, fmt);
}
-static int ov2680_hflip_disable(struct ov2680_dev *sensor)
+static void ov2680_calc_mode(struct ov2680_dev *sensor)
{
- int ret;
-
- ret = ov2680_mod_reg(sensor, OV2680_REG_FORMAT2, BIT(2), BIT(0));
- if (ret < 0)
- return ret;
+ int width = sensor->mode.fmt.width;
+ int height = sensor->mode.fmt.height;
+ int orig_width = width;
+ int orig_height = height;
+
+ if (width <= (sensor->mode.crop.width / 2) &&
+ height <= (sensor->mode.crop.height / 2)) {
+ sensor->mode.binning = true;
+ width *= 2;
+ height *= 2;
+ } else {
+ sensor->mode.binning = false;
+ }
- return ov2680_bayer_order(sensor);
+ sensor->mode.h_start = (sensor->mode.crop.left +
+ (sensor->mode.crop.width - width) / 2) & ~1;
+ sensor->mode.v_start = (sensor->mode.crop.top +
+ (sensor->mode.crop.height - height) / 2) & ~1;
+ sensor->mode.h_end =
+ min(sensor->mode.h_start + width + OV2680_END_MARGIN - 1,
+ OV2680_NATIVE_WIDTH - 1);
+ sensor->mode.v_end =
+ min(sensor->mode.v_start + height + OV2680_END_MARGIN - 1,
+ OV2680_NATIVE_HEIGHT - 1);
+ sensor->mode.h_output_size = orig_width;
+ sensor->mode.v_output_size = orig_height;
+ sensor->mode.hts = OV2680_PIXELS_PER_LINE;
+ sensor->mode.vts = OV2680_LINES_PER_FRAME;
}
-static int ov2680_test_pattern_set(struct ov2680_dev *sensor, int value)
+static int ov2680_set_mode(struct ov2680_dev *sensor)
{
- int ret;
-
- if (!value)
- return ov2680_mod_reg(sensor, OV2680_REG_ISP_CTRL00, BIT(7), 0);
+ u8 sensor_ctrl_0a, inc, fmt1, fmt2;
+ int ret = 0;
- ret = ov2680_mod_reg(sensor, OV2680_REG_ISP_CTRL00, 0x03, value - 1);
- if (ret < 0)
- return ret;
+ if (sensor->mode.binning) {
+ sensor_ctrl_0a = 0x23;
+ inc = 0x31;
+ fmt1 = 0xc2;
+ fmt2 = 0x01;
+ } else {
+ sensor_ctrl_0a = 0x21;
+ inc = 0x11;
+ fmt1 = 0xc0;
+ fmt2 = 0x00;
+ }
- ret = ov2680_mod_reg(sensor, OV2680_REG_ISP_CTRL00, BIT(7), BIT(7));
- if (ret < 0)
- return ret;
+ cci_write(sensor->regmap, OV2680_REG_SENSOR_CTRL_0A,
+ sensor_ctrl_0a, &ret);
+ cci_write(sensor->regmap, OV2680_REG_HORIZONTAL_START,
+ sensor->mode.h_start, &ret);
+ cci_write(sensor->regmap, OV2680_REG_VERTICAL_START,
+ sensor->mode.v_start, &ret);
+ cci_write(sensor->regmap, OV2680_REG_HORIZONTAL_END,
+ sensor->mode.h_end, &ret);
+ cci_write(sensor->regmap, OV2680_REG_VERTICAL_END,
+ sensor->mode.v_end, &ret);
+ cci_write(sensor->regmap, OV2680_REG_HORIZONTAL_OUTPUT_SIZE,
+ sensor->mode.h_output_size, &ret);
+ cci_write(sensor->regmap, OV2680_REG_VERTICAL_OUTPUT_SIZE,
+ sensor->mode.v_output_size, &ret);
+ cci_write(sensor->regmap, OV2680_REG_TIMING_HTS,
+ sensor->mode.hts, &ret);
+ cci_write(sensor->regmap, OV2680_REG_TIMING_VTS,
+ sensor->mode.vts, &ret);
+ cci_write(sensor->regmap, OV2680_REG_ISP_X_WIN, 0, &ret);
+ cci_write(sensor->regmap, OV2680_REG_ISP_Y_WIN, 0, &ret);
+ cci_write(sensor->regmap, OV2680_REG_X_INC, inc, &ret);
+ cci_write(sensor->regmap, OV2680_REG_Y_INC, inc, &ret);
+ cci_write(sensor->regmap, OV2680_REG_X_WIN,
+ sensor->mode.h_output_size, &ret);
+ cci_write(sensor->regmap, OV2680_REG_Y_WIN,
+ sensor->mode.v_output_size, &ret);
+ cci_write(sensor->regmap, OV2680_REG_FORMAT1, fmt1, &ret);
+ cci_write(sensor->regmap, OV2680_REG_FORMAT2, fmt2, &ret);
- return 0;
+ return ret;
}
-static int ov2680_gain_set(struct ov2680_dev *sensor, bool auto_gain)
+static int ov2680_set_vflip(struct ov2680_dev *sensor, s32 val)
{
- struct ov2680_ctrls *ctrls = &sensor->ctrls;
- u32 gain;
int ret;
- ret = ov2680_mod_reg(sensor, OV2680_REG_R_MANUAL, BIT(1),
- auto_gain ? 0 : BIT(1));
+ if (sensor->is_streaming)
+ return -EBUSY;
+
+ ret = cci_update_bits(sensor->regmap, OV2680_REG_FORMAT1,
+ BIT(2), val ? BIT(2) : 0, NULL);
if (ret < 0)
return ret;
- if (auto_gain || !ctrls->gain->is_new)
- return 0;
-
- gain = ctrls->gain->val;
-
- ret = ov2680_write_reg16(sensor, OV2680_REG_GAIN_PK, gain);
-
+ ov2680_set_bayer_order(sensor, &sensor->mode.fmt);
return 0;
}
-static int ov2680_gain_get(struct ov2680_dev *sensor)
+static int ov2680_set_hflip(struct ov2680_dev *sensor, s32 val)
{
- u32 gain;
int ret;
- ret = ov2680_read_reg16(sensor, OV2680_REG_GAIN_PK, &gain);
- if (ret)
- return ret;
-
- return gain;
-}
-
-static int ov2680_exposure_set(struct ov2680_dev *sensor, bool auto_exp)
-{
- struct ov2680_ctrls *ctrls = &sensor->ctrls;
- u32 exp;
- int ret;
+ if (sensor->is_streaming)
+ return -EBUSY;
- ret = ov2680_mod_reg(sensor, OV2680_REG_R_MANUAL, BIT(0),
- auto_exp ? 0 : BIT(0));
+ ret = cci_update_bits(sensor->regmap, OV2680_REG_FORMAT2,
+ BIT(2), val ? BIT(2) : 0, NULL);
if (ret < 0)
return ret;
- if (auto_exp || !ctrls->exposure->is_new)
- return 0;
-
- exp = (u32)ctrls->exposure->val;
- exp <<= 4;
-
- return ov2680_write_reg24(sensor, OV2680_REG_EXPOSURE_PK_HIGH, exp);
+ ov2680_set_bayer_order(sensor, &sensor->mode.fmt);
+ return 0;
}
-static int ov2680_exposure_get(struct ov2680_dev *sensor)
+static int ov2680_test_pattern_set(struct ov2680_dev *sensor, int value)
{
- int ret;
- u32 exp;
+ int ret = 0;
- ret = ov2680_read_reg24(sensor, OV2680_REG_EXPOSURE_PK_HIGH, &exp);
- if (ret)
- return ret;
+ if (!value)
+ return cci_update_bits(sensor->regmap, OV2680_REG_ISP_CTRL00,
+ BIT(7), 0, NULL);
- return exp >> 4;
+ cci_update_bits(sensor->regmap, OV2680_REG_ISP_CTRL00,
+ 0x03, value - 1, &ret);
+ cci_update_bits(sensor->regmap, OV2680_REG_ISP_CTRL00,
+ BIT(7), BIT(7), &ret);
+
+ return ret;
}
-static int ov2680_stream_enable(struct ov2680_dev *sensor)
+static int ov2680_gain_set(struct ov2680_dev *sensor, u32 gain)
{
- return ov2680_write_reg(sensor, OV2680_REG_STREAM_CTRL, 1);
+ return cci_write(sensor->regmap, OV2680_REG_GAIN_PK, gain, NULL);
}
-static int ov2680_stream_disable(struct ov2680_dev *sensor)
+static int ov2680_exposure_set(struct ov2680_dev *sensor, u32 exp)
{
- return ov2680_write_reg(sensor, OV2680_REG_STREAM_CTRL, 0);
+ return cci_write(sensor->regmap, OV2680_REG_EXPOSURE_PK, exp << 4,
+ NULL);
}
-static int ov2680_mode_set(struct ov2680_dev *sensor)
+static int ov2680_stream_enable(struct ov2680_dev *sensor)
{
- struct ov2680_ctrls *ctrls = &sensor->ctrls;
int ret;
- ret = ov2680_gain_set(sensor, false);
+ ret = cci_write(sensor->regmap, OV2680_REG_PLL_MULTIPLIER,
+ sensor->pll_mult, NULL);
if (ret < 0)
return ret;
- ret = ov2680_exposure_set(sensor, false);
+ ret = regmap_multi_reg_write(sensor->regmap,
+ ov2680_global_setting,
+ ARRAY_SIZE(ov2680_global_setting));
if (ret < 0)
return ret;
- ret = ov2680_load_regs(sensor, sensor->current_mode);
+ ret = ov2680_set_mode(sensor);
if (ret < 0)
return ret;
- if (ctrls->auto_gain->val) {
- ret = ov2680_gain_set(sensor, true);
- if (ret < 0)
- return ret;
- }
-
- if (ctrls->auto_exp->val == V4L2_EXPOSURE_AUTO) {
- ret = ov2680_exposure_set(sensor, true);
- if (ret < 0)
- return ret;
- }
-
- sensor->mode_pending_changes = false;
+ /* Restore value of all ctrls */
+ ret = __v4l2_ctrl_handler_setup(&sensor->ctrls.handler);
+ if (ret < 0)
+ return ret;
- return 0;
+ return cci_write(sensor->regmap, OV2680_REG_STREAM_CTRL, 1, NULL);
}
-static int ov2680_mode_restore(struct ov2680_dev *sensor)
+static int ov2680_stream_disable(struct ov2680_dev *sensor)
{
- int ret;
-
- ret = ov2680_load_regs(sensor, &ov2680_mode_init_data);
- if (ret < 0)
- return ret;
-
- return ov2680_mode_set(sensor);
+ return cci_write(sensor->regmap, OV2680_REG_STREAM_CTRL, 0, NULL);
}
static int ov2680_power_off(struct ov2680_dev *sensor)
{
- if (!sensor->is_enabled)
- return 0;
-
clk_disable_unprepare(sensor->xvclk);
ov2680_power_down(sensor);
regulator_bulk_disable(OV2680_NUM_SUPPLIES, sensor->supplies);
- sensor->is_enabled = false;
-
return 0;
}
static int ov2680_power_on(struct ov2680_dev *sensor)
{
- struct device *dev = ov2680_to_dev(sensor);
int ret;
- if (sensor->is_enabled)
- return 0;
-
ret = regulator_bulk_enable(OV2680_NUM_SUPPLIES, sensor->supplies);
if (ret < 0) {
- dev_err(dev, "failed to enable regulators: %d\n", ret);
+ dev_err(sensor->dev, "failed to enable regulators: %d\n", ret);
return ret;
}
- if (!sensor->reset_gpio) {
- ret = ov2680_write_reg(sensor, OV2680_REG_SOFT_RESET, 0x01);
+ if (!sensor->pwdn_gpio) {
+ ret = cci_write(sensor->regmap, OV2680_REG_SOFT_RESET, 0x01,
+ NULL);
if (ret != 0) {
- dev_err(dev, "sensor soft reset failed\n");
- return ret;
+ dev_err(sensor->dev, "sensor soft reset failed\n");
+ goto err_disable_regulators;
}
usleep_range(1000, 2000);
} else {
@@ -566,40 +543,12 @@ static int ov2680_power_on(struct ov2680_dev *sensor)
ret = clk_prepare_enable(sensor->xvclk);
if (ret < 0)
- return ret;
-
- sensor->is_enabled = true;
-
- /* Set clock lane into LP-11 state */
- ov2680_stream_enable(sensor);
- usleep_range(1000, 2000);
- ov2680_stream_disable(sensor);
+ goto err_disable_regulators;
return 0;
-}
-
-static int ov2680_s_power(struct v4l2_subdev *sd, int on)
-{
- struct ov2680_dev *sensor = to_ov2680_dev(sd);
- int ret = 0;
-
- mutex_lock(&sensor->lock);
-
- if (on)
- ret = ov2680_power_on(sensor);
- else
- ret = ov2680_power_off(sensor);
-
- mutex_unlock(&sensor->lock);
-
- if (on && ret == 0) {
- ret = v4l2_ctrl_handler_setup(&sensor->ctrls.handler);
- if (ret < 0)
- return ret;
-
- ret = ov2680_mode_restore(sensor);
- }
+err_disable_regulators:
+ regulator_bulk_disable(OV2680_NUM_SUPPLIES, sensor->supplies);
return ret;
}
@@ -609,7 +558,7 @@ static int ov2680_s_g_frame_interval(struct v4l2_subdev *sd,
struct ov2680_dev *sensor = to_ov2680_dev(sd);
mutex_lock(&sensor->lock);
- fi->interval = sensor->frame_interval;
+ fi->interval = sensor->mode.frame_interval;
mutex_unlock(&sensor->lock);
return 0;
@@ -625,16 +574,20 @@ static int ov2680_s_stream(struct v4l2_subdev *sd, int enable)
if (sensor->is_streaming == !!enable)
goto unlock;
- if (enable && sensor->mode_pending_changes) {
- ret = ov2680_mode_set(sensor);
+ if (enable) {
+ ret = pm_runtime_resume_and_get(sensor->sd.dev);
if (ret < 0)
goto unlock;
- }
- if (enable)
ret = ov2680_stream_enable(sensor);
- else
+ if (ret < 0) {
+ pm_runtime_put(sensor->sd.dev);
+ goto unlock;
+ }
+ } else {
ret = ov2680_stream_disable(sensor);
+ pm_runtime_put(sensor->sd.dev);
+ }
sensor->is_streaming = !!enable;
@@ -650,10 +603,10 @@ static int ov2680_enum_mbus_code(struct v4l2_subdev *sd,
{
struct ov2680_dev *sensor = to_ov2680_dev(sd);
- if (code->pad != 0 || code->index != 0)
+ if (code->index != 0)
return -EINVAL;
- code->code = sensor->fmt.code;
+ code->code = sensor->mode.fmt.code;
return 0;
}
@@ -663,31 +616,16 @@ static int ov2680_get_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_format *format)
{
struct ov2680_dev *sensor = to_ov2680_dev(sd);
- struct v4l2_mbus_framefmt *fmt = NULL;
- int ret = 0;
+ struct v4l2_mbus_framefmt *fmt;
- if (format->pad != 0)
- return -EINVAL;
+ fmt = __ov2680_get_pad_format(sensor, sd_state, format->pad,
+ format->which);
mutex_lock(&sensor->lock);
-
- if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
- fmt = v4l2_subdev_get_try_format(&sensor->sd, sd_state,
- format->pad);
-#else
- ret = -EINVAL;
-#endif
- } else {
- fmt = &sensor->fmt;
- }
-
- if (fmt)
- format->format = *fmt;
-
+ format->format = *fmt;
mutex_unlock(&sensor->lock);
- return ret;
+ return 0;
}
static int ov2680_set_fmt(struct v4l2_subdev *sd,
@@ -695,15 +633,27 @@ static int ov2680_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_format *format)
{
struct ov2680_dev *sensor = to_ov2680_dev(sd);
- struct v4l2_mbus_framefmt *fmt = &format->format;
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
struct v4l2_mbus_framefmt *try_fmt;
-#endif
- const struct ov2680_mode_info *mode;
+ const struct v4l2_rect *crop;
+ unsigned int width, height;
int ret = 0;
- if (format->pad != 0)
- return -EINVAL;
+ crop = __ov2680_get_pad_crop(sensor, sd_state, format->pad,
+ format->which);
+
+ /* Limit set_fmt max size to crop width / height */
+ width = clamp_val(ALIGN(format->format.width, 2),
+ OV2680_MIN_CROP_WIDTH, crop->width);
+ height = clamp_val(ALIGN(format->format.height, 2),
+ OV2680_MIN_CROP_HEIGHT, crop->height);
+
+ ov2680_fill_format(sensor, &format->format, width, height);
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ try_fmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
+ *try_fmt = format->format;
+ return 0;
+ }
mutex_lock(&sensor->lock);
@@ -712,112 +662,168 @@ static int ov2680_set_fmt(struct v4l2_subdev *sd,
goto unlock;
}
- mode = v4l2_find_nearest_size(ov2680_mode_data,
- ARRAY_SIZE(ov2680_mode_data), width,
- height, fmt->width, fmt->height);
- if (!mode) {
- ret = -EINVAL;
- goto unlock;
- }
+ sensor->mode.fmt = format->format;
+ ov2680_calc_mode(sensor);
- if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
- try_fmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
- format->format = *try_fmt;
-#endif
- goto unlock;
+unlock:
+ mutex_unlock(&sensor->lock);
+
+ return ret;
+}
+
+static int ov2680_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct ov2680_dev *sensor = to_ov2680_dev(sd);
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ mutex_lock(&sensor->lock);
+ sel->r = *__ov2680_get_pad_crop(sensor, state, sel->pad,
+ sel->which);
+ mutex_unlock(&sensor->lock);
+ break;
+ case V4L2_SEL_TGT_NATIVE_SIZE:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r.top = 0;
+ sel->r.left = 0;
+ sel->r.width = OV2680_NATIVE_WIDTH;
+ sel->r.height = OV2680_NATIVE_HEIGHT;
+ break;
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ sel->r = ov2680_default_crop;
+ break;
+ default:
+ return -EINVAL;
}
- fmt->width = mode->width;
- fmt->height = mode->height;
- fmt->code = sensor->fmt.code;
- fmt->colorspace = sensor->fmt.colorspace;
+ return 0;
+}
- sensor->current_mode = mode;
- sensor->fmt = format->format;
- sensor->mode_pending_changes = true;
+static int ov2680_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct ov2680_dev *sensor = to_ov2680_dev(sd);
+ struct v4l2_mbus_framefmt *format;
+ struct v4l2_rect *crop;
+ struct v4l2_rect rect;
-unlock:
+ if (sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ /*
+ * Clamp the boundaries of the crop rectangle to the size of the sensor
+ * pixel array. Align to multiples of 2 to ensure Bayer pattern isn't
+ * disrupted.
+ */
+ rect.left = clamp_val(ALIGN(sel->r.left, 2),
+ OV2680_NATIVE_START_LEFT, OV2680_NATIVE_WIDTH);
+ rect.top = clamp_val(ALIGN(sel->r.top, 2),
+ OV2680_NATIVE_START_TOP, OV2680_NATIVE_HEIGHT);
+ rect.width = clamp_val(ALIGN(sel->r.width, 2),
+ OV2680_MIN_CROP_WIDTH, OV2680_NATIVE_WIDTH);
+ rect.height = clamp_val(ALIGN(sel->r.height, 2),
+ OV2680_MIN_CROP_HEIGHT, OV2680_NATIVE_HEIGHT);
+
+ /* Make sure the crop rectangle isn't outside the bounds of the array */
+ rect.width = min_t(unsigned int, rect.width,
+ OV2680_NATIVE_WIDTH - rect.left);
+ rect.height = min_t(unsigned int, rect.height,
+ OV2680_NATIVE_HEIGHT - rect.top);
+
+ crop = __ov2680_get_pad_crop(sensor, state, sel->pad, sel->which);
+
+ mutex_lock(&sensor->lock);
+ if (rect.width != crop->width || rect.height != crop->height) {
+ /*
+ * Reset the output image size if the crop rectangle size has
+ * been modified.
+ */
+ format = __ov2680_get_pad_format(sensor, state, sel->pad,
+ sel->which);
+ format->width = rect.width;
+ format->height = rect.height;
+ }
+
+ *crop = rect;
mutex_unlock(&sensor->lock);
- return ret;
+ sel->r = rect;
+
+ return 0;
}
static int ov2680_init_cfg(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state)
{
- struct v4l2_subdev_format fmt = {
- .which = sd_state ? V4L2_SUBDEV_FORMAT_TRY
- : V4L2_SUBDEV_FORMAT_ACTIVE,
- .format = {
- .width = 800,
- .height = 600,
- }
- };
+ struct ov2680_dev *sensor = to_ov2680_dev(sd);
- return ov2680_set_fmt(sd, sd_state, &fmt);
+ sd_state->pads[0].try_crop = ov2680_default_crop;
+
+ ov2680_fill_format(sensor, &sd_state->pads[0].try_fmt,
+ OV2680_DEFAULT_WIDTH, OV2680_DEFAULT_HEIGHT);
+ return 0;
}
static int ov2680_enum_frame_size(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
- int index = fse->index;
+ struct ov2680_dev *sensor = to_ov2680_dev(sd);
+ struct v4l2_rect *crop;
+
+ if (fse->index >= OV2680_FRAME_SIZES)
+ return -EINVAL;
- if (index >= OV2680_MODE_MAX || index < 0)
+ crop = __ov2680_get_pad_crop(sensor, sd_state, fse->pad, fse->which);
+ if (!crop)
return -EINVAL;
- fse->min_width = ov2680_mode_data[index].width;
- fse->min_height = ov2680_mode_data[index].height;
- fse->max_width = ov2680_mode_data[index].width;
- fse->max_height = ov2680_mode_data[index].height;
+ fse->min_width = crop->width / (fse->index + 1);
+ fse->min_height = crop->height / (fse->index + 1);
+ fse->max_width = fse->min_width;
+ fse->max_height = fse->min_height;
return 0;
}
-static int ov2680_enum_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_frame_interval_enum *fie)
+static bool ov2680_valid_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval_enum *fie)
{
- struct v4l2_fract tpf;
+ struct v4l2_subdev_frame_size_enum fse = {
+ .pad = fie->pad,
+ .which = fie->which,
+ };
+ int i;
- if (fie->index >= OV2680_MODE_MAX || fie->width > OV2680_WIDTH_MAX ||
- fie->height > OV2680_HEIGHT_MAX ||
- fie->which > V4L2_SUBDEV_FORMAT_ACTIVE)
- return -EINVAL;
+ for (i = 0; i < OV2680_FRAME_SIZES; i++) {
+ fse.index = i;
- tpf.denominator = OV2680_FRAME_RATE;
- tpf.numerator = 1;
+ if (ov2680_enum_frame_size(sd, sd_state, &fse))
+ return false;
- fie->interval = tpf;
+ if (fie->width == fse.min_width &&
+ fie->height == fse.min_height)
+ return true;
+ }
- return 0;
+ return false;
}
-static int ov2680_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+static int ov2680_enum_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_interval_enum *fie)
{
- struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
struct ov2680_dev *sensor = to_ov2680_dev(sd);
- struct ov2680_ctrls *ctrls = &sensor->ctrls;
- int val;
- if (!sensor->is_enabled)
- return 0;
+ /* Only 1 framerate */
+ if (fie->index || !ov2680_valid_frame_size(sd, sd_state, fie))
+ return -EINVAL;
- switch (ctrl->id) {
- case V4L2_CID_GAIN:
- val = ov2680_gain_get(sensor);
- if (val < 0)
- return val;
- ctrls->gain->val = val;
- break;
- case V4L2_CID_EXPOSURE:
- val = ov2680_exposure_get(sensor);
- if (val < 0)
- return val;
- ctrls->exposure->val = val;
- break;
- }
+ fie->interval = sensor->mode.frame_interval;
return 0;
}
@@ -826,52 +832,43 @@ static int ov2680_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
struct ov2680_dev *sensor = to_ov2680_dev(sd);
- struct ov2680_ctrls *ctrls = &sensor->ctrls;
+ int ret;
- if (!sensor->is_enabled)
+ /* Only apply changes to the controls if the device is powered up */
+ if (!pm_runtime_get_if_in_use(sensor->sd.dev)) {
+ ov2680_set_bayer_order(sensor, &sensor->mode.fmt);
return 0;
+ }
switch (ctrl->id) {
- case V4L2_CID_AUTOGAIN:
- return ov2680_gain_set(sensor, !!ctrl->val);
- case V4L2_CID_GAIN:
- return ov2680_gain_set(sensor, !!ctrls->auto_gain->val);
- case V4L2_CID_EXPOSURE_AUTO:
- return ov2680_exposure_set(sensor, !!ctrl->val);
+ case V4L2_CID_ANALOGUE_GAIN:
+ ret = ov2680_gain_set(sensor, ctrl->val);
+ break;
case V4L2_CID_EXPOSURE:
- return ov2680_exposure_set(sensor, !!ctrls->auto_exp->val);
+ ret = ov2680_exposure_set(sensor, ctrl->val);
+ break;
case V4L2_CID_VFLIP:
- if (sensor->is_streaming)
- return -EBUSY;
- if (ctrl->val)
- return ov2680_vflip_enable(sensor);
- else
- return ov2680_vflip_disable(sensor);
+ ret = ov2680_set_vflip(sensor, ctrl->val);
+ break;
case V4L2_CID_HFLIP:
- if (sensor->is_streaming)
- return -EBUSY;
- if (ctrl->val)
- return ov2680_hflip_enable(sensor);
- else
- return ov2680_hflip_disable(sensor);
+ ret = ov2680_set_hflip(sensor, ctrl->val);
+ break;
case V4L2_CID_TEST_PATTERN:
- return ov2680_test_pattern_set(sensor, ctrl->val);
+ ret = ov2680_test_pattern_set(sensor, ctrl->val);
+ break;
default:
+ ret = -EINVAL;
break;
}
- return -EINVAL;
+ pm_runtime_put(sensor->sd.dev);
+ return ret;
}
static const struct v4l2_ctrl_ops ov2680_ctrl_ops = {
- .g_volatile_ctrl = ov2680_g_volatile_ctrl,
.s_ctrl = ov2680_s_ctrl,
};
-static const struct v4l2_subdev_core_ops ov2680_core_ops = {
- .s_power = ov2680_s_power,
-};
-
static const struct v4l2_subdev_video_ops ov2680_video_ops = {
.g_frame_interval = ov2680_s_g_frame_interval,
.s_frame_interval = ov2680_s_g_frame_interval,
@@ -881,54 +878,45 @@ static const struct v4l2_subdev_video_ops ov2680_video_ops = {
static const struct v4l2_subdev_pad_ops ov2680_pad_ops = {
.init_cfg = ov2680_init_cfg,
.enum_mbus_code = ov2680_enum_mbus_code,
- .get_fmt = ov2680_get_fmt,
- .set_fmt = ov2680_set_fmt,
.enum_frame_size = ov2680_enum_frame_size,
.enum_frame_interval = ov2680_enum_frame_interval,
+ .get_fmt = ov2680_get_fmt,
+ .set_fmt = ov2680_set_fmt,
+ .get_selection = ov2680_get_selection,
+ .set_selection = ov2680_set_selection,
};
static const struct v4l2_subdev_ops ov2680_subdev_ops = {
- .core = &ov2680_core_ops,
.video = &ov2680_video_ops,
.pad = &ov2680_pad_ops,
};
static int ov2680_mode_init(struct ov2680_dev *sensor)
{
- const struct ov2680_mode_info *init_mode;
-
/* set initial mode */
- sensor->fmt.code = MEDIA_BUS_FMT_SBGGR10_1X10;
- sensor->fmt.width = 800;
- sensor->fmt.height = 600;
- sensor->fmt.field = V4L2_FIELD_NONE;
- sensor->fmt.colorspace = V4L2_COLORSPACE_SRGB;
-
- sensor->frame_interval.denominator = OV2680_FRAME_RATE;
- sensor->frame_interval.numerator = 1;
+ sensor->mode.crop = ov2680_default_crop;
+ ov2680_fill_format(sensor, &sensor->mode.fmt,
+ OV2680_DEFAULT_WIDTH, OV2680_DEFAULT_HEIGHT);
+ ov2680_calc_mode(sensor);
- init_mode = &ov2680_mode_init_data;
-
- sensor->current_mode = init_mode;
-
- sensor->mode_pending_changes = true;
+ sensor->mode.frame_interval.denominator = OV2680_FRAME_RATE;
+ sensor->mode.frame_interval.numerator = 1;
return 0;
}
static int ov2680_v4l2_register(struct ov2680_dev *sensor)
{
+ struct i2c_client *client = to_i2c_client(sensor->dev);
const struct v4l2_ctrl_ops *ops = &ov2680_ctrl_ops;
struct ov2680_ctrls *ctrls = &sensor->ctrls;
struct v4l2_ctrl_handler *hdl = &ctrls->handler;
+ int exp_max = OV2680_LINES_PER_FRAME - OV2680_INTEGRATION_TIME_MARGIN;
int ret = 0;
- v4l2_i2c_subdev_init(&sensor->sd, sensor->i2c_client,
- &ov2680_subdev_ops);
+ v4l2_i2c_subdev_init(&sensor->sd, client, &ov2680_subdev_ops);
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
sensor->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
-#endif
sensor->pad.flags = MEDIA_PAD_FL_SOURCE;
sensor->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
@@ -936,7 +924,7 @@ static int ov2680_v4l2_register(struct ov2680_dev *sensor)
if (ret < 0)
return ret;
- v4l2_ctrl_handler_init(hdl, 7);
+ v4l2_ctrl_handler_init(hdl, 5);
hdl->lock = &sensor->lock;
@@ -948,30 +936,26 @@ static int ov2680_v4l2_register(struct ov2680_dev *sensor)
ARRAY_SIZE(test_pattern_menu) - 1,
0, 0, test_pattern_menu);
- ctrls->auto_exp = v4l2_ctrl_new_std_menu(hdl, ops,
- V4L2_CID_EXPOSURE_AUTO,
- V4L2_EXPOSURE_MANUAL, 0,
- V4L2_EXPOSURE_AUTO);
-
ctrls->exposure = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_EXPOSURE,
- 0, 32767, 1, 0);
+ 0, exp_max, 1, exp_max);
- ctrls->auto_gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_AUTOGAIN,
- 0, 1, 1, 1);
- ctrls->gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_GAIN, 0, 2047, 1, 0);
+ ctrls->gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_ANALOGUE_GAIN,
+ 0, 1023, 1, 250);
+
+ ctrls->link_freq = v4l2_ctrl_new_int_menu(hdl, NULL, V4L2_CID_LINK_FREQ,
+ 0, 0, sensor->link_freq);
+ ctrls->pixel_rate = v4l2_ctrl_new_std(hdl, NULL, V4L2_CID_PIXEL_RATE,
+ 0, sensor->pixel_rate,
+ 1, sensor->pixel_rate);
if (hdl->error) {
ret = hdl->error;
goto cleanup_entity;
}
- ctrls->gain->flags |= V4L2_CTRL_FLAG_VOLATILE;
- ctrls->exposure->flags |= V4L2_CTRL_FLAG_VOLATILE;
ctrls->vflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
ctrls->hflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
-
- v4l2_ctrl_auto_cluster(2, &ctrls->auto_gain, 0, true);
- v4l2_ctrl_auto_cluster(2, &ctrls->auto_exp, 1, true);
+ ctrls->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
sensor->sd.ctrl_handler = hdl;
@@ -995,61 +979,153 @@ static int ov2680_get_regulators(struct ov2680_dev *sensor)
for (i = 0; i < OV2680_NUM_SUPPLIES; i++)
sensor->supplies[i].supply = ov2680_supply_name[i];
- return devm_regulator_bulk_get(&sensor->i2c_client->dev,
- OV2680_NUM_SUPPLIES,
- sensor->supplies);
+ return devm_regulator_bulk_get(sensor->dev,
+ OV2680_NUM_SUPPLIES, sensor->supplies);
}
static int ov2680_check_id(struct ov2680_dev *sensor)
{
- struct device *dev = ov2680_to_dev(sensor);
- u32 chip_id;
- int ret;
-
- ov2680_power_on(sensor);
+ u64 chip_id, rev;
+ int ret = 0;
- ret = ov2680_read_reg16(sensor, OV2680_REG_CHIP_ID_HIGH, &chip_id);
+ cci_read(sensor->regmap, OV2680_REG_CHIP_ID, &chip_id, &ret);
+ cci_read(sensor->regmap, OV2680_REG_SC_CMMN_SUB_ID, &rev, &ret);
if (ret < 0) {
- dev_err(dev, "failed to read chip id high\n");
- return -ENODEV;
+ dev_err(sensor->dev, "failed to read chip id\n");
+ return ret;
}
if (chip_id != OV2680_CHIP_ID) {
- dev_err(dev, "chip id: 0x%04x does not match expected 0x%04x\n",
+ dev_err(sensor->dev, "chip id: 0x%04llx does not match expected 0x%04x\n",
chip_id, OV2680_CHIP_ID);
return -ENODEV;
}
+ dev_info(sensor->dev, "sensor_revision id = 0x%llx, rev= %lld\n",
+ chip_id, rev & 0x0f);
+
return 0;
}
static int ov2680_parse_dt(struct ov2680_dev *sensor)
{
- struct device *dev = ov2680_to_dev(sensor);
- int ret;
+ struct v4l2_fwnode_endpoint bus_cfg = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY,
+ };
+ struct device *dev = sensor->dev;
+ struct fwnode_handle *ep_fwnode;
+ struct gpio_desc *gpio;
+ unsigned int rate = 0;
+ int i, ret;
+
+ /*
+ * Sometimes the fwnode graph is initialized by the bridge driver.
+ * Bridge drivers doing this may also add GPIO mappings, wait for this.
+ */
+ ep_fwnode = fwnode_graph_get_next_endpoint(dev_fwnode(dev), NULL);
+ if (!ep_fwnode)
+ return dev_err_probe(dev, -EPROBE_DEFER,
+ "waiting for fwnode graph endpoint\n");
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(ep_fwnode, &bus_cfg);
+ fwnode_handle_put(ep_fwnode);
+ if (ret)
+ return ret;
- sensor->reset_gpio = devm_gpiod_get_optional(dev, "reset",
- GPIOD_OUT_HIGH);
- ret = PTR_ERR_OR_ZERO(sensor->reset_gpio);
+ /*
+ * The pin we want is named XSHUTDN in the datasheet. Linux sensor
+ * drivers have standardized on using "powerdown" as con-id name
+ * for powerdown or shutdown pins. Older DTB files use "reset",
+ * so fallback to that if there is no "powerdown" pin.
+ */
+ gpio = devm_gpiod_get_optional(dev, "powerdown", GPIOD_OUT_HIGH);
+ if (!gpio)
+ gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+
+ ret = PTR_ERR_OR_ZERO(gpio);
if (ret < 0) {
dev_dbg(dev, "error while getting reset gpio: %d\n", ret);
- return ret;
+ goto out_free_bus_cfg;
}
- sensor->xvclk = devm_clk_get(dev, "xvclk");
+ sensor->pwdn_gpio = gpio;
+
+ sensor->xvclk = devm_clk_get_optional(dev, "xvclk");
if (IS_ERR(sensor->xvclk)) {
- dev_err(dev, "xvclk clock missing or invalid\n");
- return PTR_ERR(sensor->xvclk);
+ ret = dev_err_probe(dev, PTR_ERR(sensor->xvclk),
+ "xvclk clock missing or invalid\n");
+ goto out_free_bus_cfg;
}
- sensor->xvclk_freq = clk_get_rate(sensor->xvclk);
- if (sensor->xvclk_freq != OV2680_XVCLK_VALUE) {
- dev_err(dev, "wrong xvclk frequency %d HZ, expected: %d Hz\n",
- sensor->xvclk_freq, OV2680_XVCLK_VALUE);
- return -EINVAL;
+ /*
+ * We could have either a 24MHz or 19.2MHz clock rate from either DT or
+ * ACPI... but we also need to support the weird IPU3 case which will
+ * have an external clock AND a clock-frequency property. Check for the
+ * clock-frequency property and if found, set that rate if we managed
+ * to acquire a clock. This should cover the ACPI case. If the system
+ * uses devicetree then the configured rate should already be set, so
+ * we can just read it.
+ */
+ ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
+ &rate);
+ if (ret && !sensor->xvclk) {
+ dev_err_probe(dev, ret, "invalid clock config\n");
+ goto out_free_bus_cfg;
}
- return 0;
+ if (!ret && sensor->xvclk) {
+ ret = clk_set_rate(sensor->xvclk, rate);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to set clock rate\n");
+ goto out_free_bus_cfg;
+ }
+ }
+
+ sensor->xvclk_freq = rate ?: clk_get_rate(sensor->xvclk);
+
+ for (i = 0; i < ARRAY_SIZE(ov2680_xvclk_freqs); i++) {
+ if (sensor->xvclk_freq == ov2680_xvclk_freqs[i])
+ break;
+ }
+
+ if (i == ARRAY_SIZE(ov2680_xvclk_freqs)) {
+ ret = dev_err_probe(dev, -EINVAL,
+ "unsupported xvclk frequency %d Hz\n",
+ sensor->xvclk_freq);
+ goto out_free_bus_cfg;
+ }
+
+ sensor->pll_mult = ov2680_pll_multipliers[i];
+
+ sensor->link_freq[0] = sensor->xvclk_freq / OV2680_PLL_PREDIV0 /
+ OV2680_PLL_PREDIV * sensor->pll_mult;
+
+ /* CSI-2 is double data rate, bus-format is 10 bpp */
+ sensor->pixel_rate = sensor->link_freq[0] * 2;
+ do_div(sensor->pixel_rate, 10);
+
+ /* Verify bus cfg */
+ if (bus_cfg.bus.mipi_csi2.num_data_lanes != 1) {
+ ret = dev_err_probe(dev, -EINVAL,
+ "only a 1-lane CSI2 config is supported");
+ goto out_free_bus_cfg;
+ }
+
+ for (i = 0; i < bus_cfg.nr_of_link_frequencies; i++)
+ if (bus_cfg.link_frequencies[i] == sensor->link_freq[0])
+ break;
+
+ if (bus_cfg.nr_of_link_frequencies == 0 ||
+ bus_cfg.nr_of_link_frequencies == i) {
+ ret = dev_err_probe(dev, -EINVAL,
+ "supported link freq %lld not found\n",
+ sensor->link_freq[0]);
+ goto out_free_bus_cfg;
+ }
+
+out_free_bus_cfg:
+ v4l2_fwnode_endpoint_free(&bus_cfg);
+ return ret;
}
static int ov2680_probe(struct i2c_client *client)
@@ -1062,11 +1138,15 @@ static int ov2680_probe(struct i2c_client *client)
if (!sensor)
return -ENOMEM;
- sensor->i2c_client = client;
+ sensor->dev = &client->dev;
+
+ sensor->regmap = devm_cci_regmap_init_i2c(client, 16);
+ if (IS_ERR(sensor->regmap))
+ return PTR_ERR(sensor->regmap);
ret = ov2680_parse_dt(sensor);
if (ret < 0)
- return -EINVAL;
+ return ret;
ret = ov2680_mode_init(sensor);
if (ret < 0)
@@ -1080,18 +1160,37 @@ static int ov2680_probe(struct i2c_client *client)
mutex_init(&sensor->lock);
- ret = ov2680_check_id(sensor);
+ /*
+ * Power up and verify the chip now, so that if runtime pm is
+ * disabled the chip is left on and streaming will work.
+ */
+ ret = ov2680_power_on(sensor);
if (ret < 0)
goto lock_destroy;
+ ret = ov2680_check_id(sensor);
+ if (ret < 0)
+ goto err_powerdown;
+
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_get_noresume(&client->dev);
+ pm_runtime_enable(&client->dev);
+
ret = ov2680_v4l2_register(sensor);
if (ret < 0)
- goto lock_destroy;
+ goto err_pm_runtime;
- dev_info(dev, "ov2680 init correctly\n");
+ pm_runtime_set_autosuspend_delay(&client->dev, 1000);
+ pm_runtime_use_autosuspend(&client->dev);
+ pm_runtime_put_autosuspend(&client->dev);
return 0;
+err_pm_runtime:
+ pm_runtime_disable(&client->dev);
+ pm_runtime_put_noidle(&client->dev);
+err_powerdown:
+ ov2680_power_off(sensor);
lock_destroy:
dev_err(dev, "ov2680 init fail: %d\n", ret);
mutex_destroy(&sensor->lock);
@@ -1108,9 +1207,18 @@ static void ov2680_remove(struct i2c_client *client)
mutex_destroy(&sensor->lock);
media_entity_cleanup(&sensor->sd.entity);
v4l2_ctrl_handler_free(&sensor->ctrls.handler);
+
+ /*
+ * Disable runtime PM. In case runtime PM is disabled in the kernel,
+ * make sure to turn power off manually.
+ */
+ pm_runtime_disable(&client->dev);
+ if (!pm_runtime_status_suspended(&client->dev))
+ ov2680_power_off(sensor);
+ pm_runtime_set_suspended(&client->dev);
}
-static int __maybe_unused ov2680_suspend(struct device *dev)
+static int ov2680_suspend(struct device *dev)
{
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct ov2680_dev *sensor = to_ov2680_dev(sd);
@@ -1118,15 +1226,19 @@ static int __maybe_unused ov2680_suspend(struct device *dev)
if (sensor->is_streaming)
ov2680_stream_disable(sensor);
- return 0;
+ return ov2680_power_off(sensor);
}
-static int __maybe_unused ov2680_resume(struct device *dev)
+static int ov2680_resume(struct device *dev)
{
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct ov2680_dev *sensor = to_ov2680_dev(sd);
int ret;
+ ret = ov2680_power_on(sensor);
+ if (ret < 0)
+ goto stream_disable;
+
if (sensor->is_streaming) {
ret = ov2680_stream_enable(sensor);
if (ret < 0)
@@ -1142,9 +1254,8 @@ stream_disable:
return ret;
}
-static const struct dev_pm_ops ov2680_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ov2680_suspend, ov2680_resume)
-};
+static DEFINE_RUNTIME_DEV_PM_OPS(ov2680_pm_ops, ov2680_suspend, ov2680_resume,
+ NULL);
static const struct of_device_id ov2680_dt_ids[] = {
{ .compatible = "ovti,ov2680" },
@@ -1152,11 +1263,18 @@ static const struct of_device_id ov2680_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, ov2680_dt_ids);
+static const struct acpi_device_id ov2680_acpi_ids[] = {
+ { "OVTI2680" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(acpi, ov2680_acpi_ids);
+
static struct i2c_driver ov2680_i2c_driver = {
.driver = {
.name = "ov2680",
- .pm = &ov2680_pm_ops,
- .of_match_table = of_match_ptr(ov2680_dt_ids),
+ .pm = pm_sleep_ptr(&ov2680_pm_ops),
+ .of_match_table = ov2680_dt_ids,
+ .acpi_match_table = ov2680_acpi_ids,
},
.probe = ov2680_probe,
.remove = ov2680_remove,
diff --git a/drivers/media/i2c/ov2740.c b/drivers/media/i2c/ov2740.c
index 158d934733c3..41d4f85470fd 100644
--- a/drivers/media/i2c/ov2740.c
+++ b/drivers/media/i2c/ov2740.c
@@ -1223,7 +1223,7 @@ static struct i2c_driver ov2740_i2c_driver = {
module_i2c_driver(ov2740_i2c_driver);
MODULE_AUTHOR("Qiu, Tianshu <tian.shu.qiu@intel.com>");
-MODULE_AUTHOR("Shawn Tu <shawnx.tu@intel.com>");
+MODULE_AUTHOR("Shawn Tu");
MODULE_AUTHOR("Bingbu Cao <bingbu.cao@intel.com>");
MODULE_DESCRIPTION("OmniVision OV2740 sensor driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
index 36b509714c8c..5fe85aa2d2ec 100644
--- a/drivers/media/i2c/ov5640.c
+++ b/drivers/media/i2c/ov5640.c
@@ -13,8 +13,8 @@
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/init.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
@@ -568,9 +568,7 @@ static const struct reg_value ov5640_init_setting[] = {
{0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0}, {0x3000, 0x00, 0, 0},
{0x3002, 0x1c, 0, 0}, {0x3004, 0xff, 0, 0}, {0x3006, 0xc3, 0, 0},
{0x302e, 0x08, 0, 0}, {0x4300, 0x3f, 0, 0},
- {0x501f, 0x00, 0, 0}, {0x4407, 0x04, 0, 0},
- {0x440e, 0x00, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
- {0x4837, 0x0a, 0, 0}, {0x3824, 0x02, 0, 0},
+ {0x501f, 0x00, 0, 0}, {0x440e, 0x00, 0, 0}, {0x4837, 0x0a, 0, 0},
{0x5000, 0xa7, 0, 0}, {0x5001, 0xa3, 0, 0}, {0x5180, 0xff, 0, 0},
{0x5181, 0xf2, 0, 0}, {0x5182, 0x00, 0, 0}, {0x5183, 0x14, 0, 0},
{0x5184, 0x25, 0, 0}, {0x5185, 0x24, 0, 0}, {0x5186, 0x09, 0, 0},
@@ -634,7 +632,8 @@ static const struct reg_value ov5640_setting_low_res[] = {
{0x3a0a, 0x00, 0, 0}, {0x3a0b, 0xf6, 0, 0}, {0x3a0e, 0x03, 0, 0},
{0x3a0d, 0x04, 0, 0}, {0x3a14, 0x03, 0, 0}, {0x3a15, 0xd8, 0, 0},
{0x4001, 0x02, 0, 0}, {0x4004, 0x02, 0, 0},
- {0x4407, 0x04, 0, 0}, {0x5001, 0xa3, 0, 0},
+ {0x4407, 0x04, 0, 0}, {0x460b, 0x35, 0, 0}, {0x460c, 0x22, 0, 0},
+ {0x3824, 0x02, 0, 0}, {0x5001, 0xa3, 0, 0},
};
static const struct reg_value ov5640_setting_720P_1280_720[] = {
@@ -2453,16 +2452,13 @@ static void ov5640_power(struct ov5640_dev *sensor, bool enable)
static void ov5640_powerup_sequence(struct ov5640_dev *sensor)
{
if (sensor->pwdn_gpio) {
- gpiod_set_value_cansleep(sensor->reset_gpio, 0);
+ gpiod_set_value_cansleep(sensor->reset_gpio, 1);
/* camera power cycle */
ov5640_power(sensor, false);
- usleep_range(5000, 10000);
+ usleep_range(5000, 10000); /* t2 */
ov5640_power(sensor, true);
- usleep_range(5000, 10000);
-
- gpiod_set_value_cansleep(sensor->reset_gpio, 1);
- usleep_range(1000, 2000);
+ usleep_range(1000, 2000); /* t3 */
gpiod_set_value_cansleep(sensor->reset_gpio, 0);
} else {
@@ -2470,7 +2466,7 @@ static void ov5640_powerup_sequence(struct ov5640_dev *sensor)
ov5640_write_reg(sensor, OV5640_REG_SYS_CTRL0,
OV5640_REG_SYS_CTRL0_SW_RST);
}
- usleep_range(20000, 25000);
+ usleep_range(20000, 25000); /* t4 */
/*
* software standby: allows registers programming;
@@ -2543,9 +2539,9 @@ static int ov5640_set_power_mipi(struct ov5640_dev *sensor, bool on)
* "ov5640_set_stream_mipi()")
* [4] = 0 : Power up MIPI HS Tx
* [3] = 0 : Power up MIPI LS Rx
- * [2] = 0 : MIPI interface disabled
+ * [2] = 1 : MIPI interface enabled
*/
- ret = ov5640_write_reg(sensor, OV5640_REG_IO_MIPI_CTRL00, 0x40);
+ ret = ov5640_write_reg(sensor, OV5640_REG_IO_MIPI_CTRL00, 0x44);
if (ret)
return ret;
diff --git a/drivers/media/i2c/ov5670.c b/drivers/media/i2c/ov5670.c
index d722348b938b..29e773a997dd 100644
--- a/drivers/media/i2c/ov5670.c
+++ b/drivers/media/i2c/ov5670.c
@@ -2860,7 +2860,7 @@ static struct i2c_driver ov5670_i2c_driver = {
module_i2c_driver(ov5670_i2c_driver);
-MODULE_AUTHOR("Rapolu, Chiranjeevi <chiranjeevi.rapolu@intel.com>");
+MODULE_AUTHOR("Rapolu, Chiranjeevi");
MODULE_AUTHOR("Yang, Hyungwoo");
MODULE_DESCRIPTION("Omnivision ov5670 sensor driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/ov5675.c b/drivers/media/i2c/ov5675.c
index 700c4b69846f..d5a2a5f82312 100644
--- a/drivers/media/i2c/ov5675.c
+++ b/drivers/media/i2c/ov5675.c
@@ -1442,6 +1442,6 @@ static struct i2c_driver ov5675_i2c_driver = {
module_i2c_driver(ov5675_i2c_driver);
-MODULE_AUTHOR("Shawn Tu <shawnx.tu@intel.com>");
+MODULE_AUTHOR("Shawn Tu");
MODULE_DESCRIPTION("OmniVision OV5675 sensor driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/ov5693.c b/drivers/media/i2c/ov5693.c
index 7f9212cce239..488ee6d9d301 100644
--- a/drivers/media/i2c/ov5693.c
+++ b/drivers/media/i2c/ov5693.c
@@ -12,7 +12,6 @@
* Jake Day
*/
-#include <asm/unaligned.h>
#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -23,36 +22,32 @@
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/types.h>
+
+#include <media/v4l2-cci.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
-#define OV5693_REG_8BIT(n) ((1 << 16) | (n))
-#define OV5693_REG_16BIT(n) ((2 << 16) | (n))
-#define OV5693_REG_24BIT(n) ((3 << 16) | (n))
-#define OV5693_REG_SIZE_SHIFT 16
-#define OV5693_REG_ADDR_MASK 0xffff
-
/* System Control */
-#define OV5693_SW_RESET_REG OV5693_REG_8BIT(0x0103)
-#define OV5693_SW_STREAM_REG OV5693_REG_8BIT(0x0100)
+#define OV5693_SW_RESET_REG CCI_REG8(0x0103)
+#define OV5693_SW_STREAM_REG CCI_REG8(0x0100)
#define OV5693_START_STREAMING 0x01
#define OV5693_STOP_STREAMING 0x00
#define OV5693_SW_RESET 0x01
-#define OV5693_REG_CHIP_ID OV5693_REG_16BIT(0x300a)
+#define OV5693_REG_CHIP_ID CCI_REG16(0x300a)
/* Yes, this is right. The datasheet for the OV5693 gives its ID as 0x5690 */
#define OV5693_CHIP_ID 0x5690
/* Exposure */
-#define OV5693_EXPOSURE_CTRL_REG OV5693_REG_24BIT(0x3500)
+#define OV5693_EXPOSURE_CTRL_REG CCI_REG24(0x3500)
#define OV5693_EXPOSURE_CTRL_MASK GENMASK(19, 4)
#define OV5693_INTEGRATION_TIME_MARGIN 8
#define OV5693_EXPOSURE_MIN 1
#define OV5693_EXPOSURE_STEP 1
/* Analogue Gain */
-#define OV5693_GAIN_CTRL_REG OV5693_REG_16BIT(0x350a)
+#define OV5693_GAIN_CTRL_REG CCI_REG16(0x350a)
#define OV5693_GAIN_CTRL_MASK GENMASK(10, 4)
#define OV5693_GAIN_MIN 1
#define OV5693_GAIN_MAX 127
@@ -60,9 +55,9 @@
#define OV5693_GAIN_STEP 1
/* Digital Gain */
-#define OV5693_MWB_RED_GAIN_REG OV5693_REG_16BIT(0x3400)
-#define OV5693_MWB_GREEN_GAIN_REG OV5693_REG_16BIT(0x3402)
-#define OV5693_MWB_BLUE_GAIN_REG OV5693_REG_16BIT(0x3404)
+#define OV5693_MWB_RED_GAIN_REG CCI_REG16(0x3400)
+#define OV5693_MWB_GREEN_GAIN_REG CCI_REG16(0x3402)
+#define OV5693_MWB_BLUE_GAIN_REG CCI_REG16(0x3404)
#define OV5693_MWB_GAIN_MASK GENMASK(11, 0)
#define OV5693_MWB_GAIN_MAX 0x0fff
#define OV5693_DIGITAL_GAIN_MIN 1
@@ -71,36 +66,36 @@
#define OV5693_DIGITAL_GAIN_STEP 1
/* Timing and Format */
-#define OV5693_CROP_START_X_REG OV5693_REG_16BIT(0x3800)
-#define OV5693_CROP_START_Y_REG OV5693_REG_16BIT(0x3802)
-#define OV5693_CROP_END_X_REG OV5693_REG_16BIT(0x3804)
-#define OV5693_CROP_END_Y_REG OV5693_REG_16BIT(0x3806)
-#define OV5693_OUTPUT_SIZE_X_REG OV5693_REG_16BIT(0x3808)
-#define OV5693_OUTPUT_SIZE_Y_REG OV5693_REG_16BIT(0x380a)
-
-#define OV5693_TIMING_HTS_REG OV5693_REG_16BIT(0x380c)
+#define OV5693_CROP_START_X_REG CCI_REG16(0x3800)
+#define OV5693_CROP_START_Y_REG CCI_REG16(0x3802)
+#define OV5693_CROP_END_X_REG CCI_REG16(0x3804)
+#define OV5693_CROP_END_Y_REG CCI_REG16(0x3806)
+#define OV5693_OUTPUT_SIZE_X_REG CCI_REG16(0x3808)
+#define OV5693_OUTPUT_SIZE_Y_REG CCI_REG16(0x380a)
+
+#define OV5693_TIMING_HTS_REG CCI_REG16(0x380c)
#define OV5693_FIXED_PPL 2688U
-#define OV5693_TIMING_VTS_REG OV5693_REG_16BIT(0x380e)
+#define OV5693_TIMING_VTS_REG CCI_REG16(0x380e)
#define OV5693_TIMING_MAX_VTS 0xffff
#define OV5693_TIMING_MIN_VTS 0x04
-#define OV5693_OFFSET_START_X_REG OV5693_REG_16BIT(0x3810)
-#define OV5693_OFFSET_START_Y_REG OV5693_REG_16BIT(0x3812)
+#define OV5693_OFFSET_START_X_REG CCI_REG16(0x3810)
+#define OV5693_OFFSET_START_Y_REG CCI_REG16(0x3812)
-#define OV5693_SUB_INC_X_REG OV5693_REG_8BIT(0x3814)
-#define OV5693_SUB_INC_Y_REG OV5693_REG_8BIT(0x3815)
+#define OV5693_SUB_INC_X_REG CCI_REG8(0x3814)
+#define OV5693_SUB_INC_Y_REG CCI_REG8(0x3815)
-#define OV5693_FORMAT1_REG OV5693_REG_8BIT(0x3820)
+#define OV5693_FORMAT1_REG CCI_REG8(0x3820)
#define OV5693_FORMAT1_FLIP_VERT_ISP_EN BIT(6)
#define OV5693_FORMAT1_FLIP_VERT_SENSOR_EN BIT(1)
#define OV5693_FORMAT1_VBIN_EN BIT(0)
-#define OV5693_FORMAT2_REG OV5693_REG_8BIT(0x3821)
+#define OV5693_FORMAT2_REG CCI_REG8(0x3821)
#define OV5693_FORMAT2_HDR_EN BIT(7)
#define OV5693_FORMAT2_FLIP_HORZ_ISP_EN BIT(2)
#define OV5693_FORMAT2_FLIP_HORZ_SENSOR_EN BIT(1)
#define OV5693_FORMAT2_HBIN_EN BIT(0)
-#define OV5693_ISP_CTRL2_REG OV5693_REG_8BIT(0x5002)
+#define OV5693_ISP_CTRL2_REG CCI_REG8(0x5002)
#define OV5693_ISP_SCALE_ENABLE BIT(7)
/* Pixel Array */
@@ -116,7 +111,7 @@
#define OV5693_MIN_CROP_HEIGHT 2
/* Test Pattern */
-#define OV5693_TEST_PATTERN_REG OV5693_REG_8BIT(0x5e00)
+#define OV5693_TEST_PATTERN_REG CCI_REG8(0x5e00)
#define OV5693_TEST_PATTERN_ENABLE BIT(7)
#define OV5693_TEST_PATTERN_ROLLING BIT(6)
#define OV5693_TEST_PATTERN_RANDOM 0x01
@@ -137,19 +132,9 @@ static const char * const ov5693_supply_names[] = {
#define OV5693_NUM_SUPPLIES ARRAY_SIZE(ov5693_supply_names)
-struct ov5693_reg {
- u32 reg;
- u8 val;
-};
-
-struct ov5693_reg_list {
- u32 num_regs;
- const struct ov5693_reg *regs;
-};
-
struct ov5693_device {
- struct i2c_client *client;
struct device *dev;
+ struct regmap *regmap;
/* Protect against concurrent changes to controls */
struct mutex lock;
@@ -189,156 +174,151 @@ struct ov5693_device {
} ctrls;
};
-static const struct ov5693_reg ov5693_global_regs[] = {
- {OV5693_REG_8BIT(0x3016), 0xf0},
- {OV5693_REG_8BIT(0x3017), 0xf0},
- {OV5693_REG_8BIT(0x3018), 0xf0},
- {OV5693_REG_8BIT(0x3022), 0x01},
- {OV5693_REG_8BIT(0x3028), 0x44},
- {OV5693_REG_8BIT(0x3098), 0x02},
- {OV5693_REG_8BIT(0x3099), 0x19},
- {OV5693_REG_8BIT(0x309a), 0x02},
- {OV5693_REG_8BIT(0x309b), 0x01},
- {OV5693_REG_8BIT(0x309c), 0x00},
- {OV5693_REG_8BIT(0x30a0), 0xd2},
- {OV5693_REG_8BIT(0x30a2), 0x01},
- {OV5693_REG_8BIT(0x30b2), 0x00},
- {OV5693_REG_8BIT(0x30b3), 0x83},
- {OV5693_REG_8BIT(0x30b4), 0x03},
- {OV5693_REG_8BIT(0x30b5), 0x04},
- {OV5693_REG_8BIT(0x30b6), 0x01},
- {OV5693_REG_8BIT(0x3080), 0x01},
- {OV5693_REG_8BIT(0x3104), 0x21},
- {OV5693_REG_8BIT(0x3106), 0x00},
- {OV5693_REG_8BIT(0x3406), 0x01},
- {OV5693_REG_8BIT(0x3503), 0x07},
- {OV5693_REG_8BIT(0x350b), 0x40},
- {OV5693_REG_8BIT(0x3601), 0x0a},
- {OV5693_REG_8BIT(0x3602), 0x38},
- {OV5693_REG_8BIT(0x3612), 0x80},
- {OV5693_REG_8BIT(0x3620), 0x54},
- {OV5693_REG_8BIT(0x3621), 0xc7},
- {OV5693_REG_8BIT(0x3622), 0x0f},
- {OV5693_REG_8BIT(0x3625), 0x10},
- {OV5693_REG_8BIT(0x3630), 0x55},
- {OV5693_REG_8BIT(0x3631), 0xf4},
- {OV5693_REG_8BIT(0x3632), 0x00},
- {OV5693_REG_8BIT(0x3633), 0x34},
- {OV5693_REG_8BIT(0x3634), 0x02},
- {OV5693_REG_8BIT(0x364d), 0x0d},
- {OV5693_REG_8BIT(0x364f), 0xdd},
- {OV5693_REG_8BIT(0x3660), 0x04},
- {OV5693_REG_8BIT(0x3662), 0x10},
- {OV5693_REG_8BIT(0x3663), 0xf1},
- {OV5693_REG_8BIT(0x3665), 0x00},
- {OV5693_REG_8BIT(0x3666), 0x20},
- {OV5693_REG_8BIT(0x3667), 0x00},
- {OV5693_REG_8BIT(0x366a), 0x80},
- {OV5693_REG_8BIT(0x3680), 0xe0},
- {OV5693_REG_8BIT(0x3681), 0x00},
- {OV5693_REG_8BIT(0x3700), 0x42},
- {OV5693_REG_8BIT(0x3701), 0x14},
- {OV5693_REG_8BIT(0x3702), 0xa0},
- {OV5693_REG_8BIT(0x3703), 0xd8},
- {OV5693_REG_8BIT(0x3704), 0x78},
- {OV5693_REG_8BIT(0x3705), 0x02},
- {OV5693_REG_8BIT(0x370a), 0x00},
- {OV5693_REG_8BIT(0x370b), 0x20},
- {OV5693_REG_8BIT(0x370c), 0x0c},
- {OV5693_REG_8BIT(0x370d), 0x11},
- {OV5693_REG_8BIT(0x370e), 0x00},
- {OV5693_REG_8BIT(0x370f), 0x40},
- {OV5693_REG_8BIT(0x3710), 0x00},
- {OV5693_REG_8BIT(0x371a), 0x1c},
- {OV5693_REG_8BIT(0x371b), 0x05},
- {OV5693_REG_8BIT(0x371c), 0x01},
- {OV5693_REG_8BIT(0x371e), 0xa1},
- {OV5693_REG_8BIT(0x371f), 0x0c},
- {OV5693_REG_8BIT(0x3721), 0x00},
- {OV5693_REG_8BIT(0x3724), 0x10},
- {OV5693_REG_8BIT(0x3726), 0x00},
- {OV5693_REG_8BIT(0x372a), 0x01},
- {OV5693_REG_8BIT(0x3730), 0x10},
- {OV5693_REG_8BIT(0x3738), 0x22},
- {OV5693_REG_8BIT(0x3739), 0xe5},
- {OV5693_REG_8BIT(0x373a), 0x50},
- {OV5693_REG_8BIT(0x373b), 0x02},
- {OV5693_REG_8BIT(0x373c), 0x41},
- {OV5693_REG_8BIT(0x373f), 0x02},
- {OV5693_REG_8BIT(0x3740), 0x42},
- {OV5693_REG_8BIT(0x3741), 0x02},
- {OV5693_REG_8BIT(0x3742), 0x18},
- {OV5693_REG_8BIT(0x3743), 0x01},
- {OV5693_REG_8BIT(0x3744), 0x02},
- {OV5693_REG_8BIT(0x3747), 0x10},
- {OV5693_REG_8BIT(0x374c), 0x04},
- {OV5693_REG_8BIT(0x3751), 0xf0},
- {OV5693_REG_8BIT(0x3752), 0x00},
- {OV5693_REG_8BIT(0x3753), 0x00},
- {OV5693_REG_8BIT(0x3754), 0xc0},
- {OV5693_REG_8BIT(0x3755), 0x00},
- {OV5693_REG_8BIT(0x3756), 0x1a},
- {OV5693_REG_8BIT(0x3758), 0x00},
- {OV5693_REG_8BIT(0x3759), 0x0f},
- {OV5693_REG_8BIT(0x376b), 0x44},
- {OV5693_REG_8BIT(0x375c), 0x04},
- {OV5693_REG_8BIT(0x3774), 0x10},
- {OV5693_REG_8BIT(0x3776), 0x00},
- {OV5693_REG_8BIT(0x377f), 0x08},
- {OV5693_REG_8BIT(0x3780), 0x22},
- {OV5693_REG_8BIT(0x3781), 0x0c},
- {OV5693_REG_8BIT(0x3784), 0x2c},
- {OV5693_REG_8BIT(0x3785), 0x1e},
- {OV5693_REG_8BIT(0x378f), 0xf5},
- {OV5693_REG_8BIT(0x3791), 0xb0},
- {OV5693_REG_8BIT(0x3795), 0x00},
- {OV5693_REG_8BIT(0x3796), 0x64},
- {OV5693_REG_8BIT(0x3797), 0x11},
- {OV5693_REG_8BIT(0x3798), 0x30},
- {OV5693_REG_8BIT(0x3799), 0x41},
- {OV5693_REG_8BIT(0x379a), 0x07},
- {OV5693_REG_8BIT(0x379b), 0xb0},
- {OV5693_REG_8BIT(0x379c), 0x0c},
- {OV5693_REG_8BIT(0x3a04), 0x06},
- {OV5693_REG_8BIT(0x3a05), 0x14},
- {OV5693_REG_8BIT(0x3e07), 0x20},
- {OV5693_REG_8BIT(0x4000), 0x08},
- {OV5693_REG_8BIT(0x4001), 0x04},
- {OV5693_REG_8BIT(0x4004), 0x08},
- {OV5693_REG_8BIT(0x4006), 0x20},
- {OV5693_REG_8BIT(0x4008), 0x24},
- {OV5693_REG_8BIT(0x4009), 0x10},
- {OV5693_REG_8BIT(0x4058), 0x00},
- {OV5693_REG_8BIT(0x4101), 0xb2},
- {OV5693_REG_8BIT(0x4307), 0x31},
- {OV5693_REG_8BIT(0x4511), 0x05},
- {OV5693_REG_8BIT(0x4512), 0x01},
- {OV5693_REG_8BIT(0x481f), 0x30},
- {OV5693_REG_8BIT(0x4826), 0x2c},
- {OV5693_REG_8BIT(0x4d02), 0xfd},
- {OV5693_REG_8BIT(0x4d03), 0xf5},
- {OV5693_REG_8BIT(0x4d04), 0x0c},
- {OV5693_REG_8BIT(0x4d05), 0xcc},
- {OV5693_REG_8BIT(0x4837), 0x0a},
- {OV5693_REG_8BIT(0x5003), 0x20},
- {OV5693_REG_8BIT(0x5013), 0x00},
- {OV5693_REG_8BIT(0x5842), 0x01},
- {OV5693_REG_8BIT(0x5843), 0x2b},
- {OV5693_REG_8BIT(0x5844), 0x01},
- {OV5693_REG_8BIT(0x5845), 0x92},
- {OV5693_REG_8BIT(0x5846), 0x01},
- {OV5693_REG_8BIT(0x5847), 0x8f},
- {OV5693_REG_8BIT(0x5848), 0x01},
- {OV5693_REG_8BIT(0x5849), 0x0c},
- {OV5693_REG_8BIT(0x5e10), 0x0c},
- {OV5693_REG_8BIT(0x3820), 0x00},
- {OV5693_REG_8BIT(0x3821), 0x1e},
- {OV5693_REG_8BIT(0x5041), 0x14}
-};
-
-static const struct ov5693_reg_list ov5693_global_setting = {
- .num_regs = ARRAY_SIZE(ov5693_global_regs),
- .regs = ov5693_global_regs,
+static const struct cci_reg_sequence ov5693_global_regs[] = {
+ {CCI_REG8(0x3016), 0xf0},
+ {CCI_REG8(0x3017), 0xf0},
+ {CCI_REG8(0x3018), 0xf0},
+ {CCI_REG8(0x3022), 0x01},
+ {CCI_REG8(0x3028), 0x44},
+ {CCI_REG8(0x3098), 0x02},
+ {CCI_REG8(0x3099), 0x19},
+ {CCI_REG8(0x309a), 0x02},
+ {CCI_REG8(0x309b), 0x01},
+ {CCI_REG8(0x309c), 0x00},
+ {CCI_REG8(0x30a0), 0xd2},
+ {CCI_REG8(0x30a2), 0x01},
+ {CCI_REG8(0x30b2), 0x00},
+ {CCI_REG8(0x30b3), 0x83},
+ {CCI_REG8(0x30b4), 0x03},
+ {CCI_REG8(0x30b5), 0x04},
+ {CCI_REG8(0x30b6), 0x01},
+ {CCI_REG8(0x3080), 0x01},
+ {CCI_REG8(0x3104), 0x21},
+ {CCI_REG8(0x3106), 0x00},
+ {CCI_REG8(0x3406), 0x01},
+ {CCI_REG8(0x3503), 0x07},
+ {CCI_REG8(0x350b), 0x40},
+ {CCI_REG8(0x3601), 0x0a},
+ {CCI_REG8(0x3602), 0x38},
+ {CCI_REG8(0x3612), 0x80},
+ {CCI_REG8(0x3620), 0x54},
+ {CCI_REG8(0x3621), 0xc7},
+ {CCI_REG8(0x3622), 0x0f},
+ {CCI_REG8(0x3625), 0x10},
+ {CCI_REG8(0x3630), 0x55},
+ {CCI_REG8(0x3631), 0xf4},
+ {CCI_REG8(0x3632), 0x00},
+ {CCI_REG8(0x3633), 0x34},
+ {CCI_REG8(0x3634), 0x02},
+ {CCI_REG8(0x364d), 0x0d},
+ {CCI_REG8(0x364f), 0xdd},
+ {CCI_REG8(0x3660), 0x04},
+ {CCI_REG8(0x3662), 0x10},
+ {CCI_REG8(0x3663), 0xf1},
+ {CCI_REG8(0x3665), 0x00},
+ {CCI_REG8(0x3666), 0x20},
+ {CCI_REG8(0x3667), 0x00},
+ {CCI_REG8(0x366a), 0x80},
+ {CCI_REG8(0x3680), 0xe0},
+ {CCI_REG8(0x3681), 0x00},
+ {CCI_REG8(0x3700), 0x42},
+ {CCI_REG8(0x3701), 0x14},
+ {CCI_REG8(0x3702), 0xa0},
+ {CCI_REG8(0x3703), 0xd8},
+ {CCI_REG8(0x3704), 0x78},
+ {CCI_REG8(0x3705), 0x02},
+ {CCI_REG8(0x370a), 0x00},
+ {CCI_REG8(0x370b), 0x20},
+ {CCI_REG8(0x370c), 0x0c},
+ {CCI_REG8(0x370d), 0x11},
+ {CCI_REG8(0x370e), 0x00},
+ {CCI_REG8(0x370f), 0x40},
+ {CCI_REG8(0x3710), 0x00},
+ {CCI_REG8(0x371a), 0x1c},
+ {CCI_REG8(0x371b), 0x05},
+ {CCI_REG8(0x371c), 0x01},
+ {CCI_REG8(0x371e), 0xa1},
+ {CCI_REG8(0x371f), 0x0c},
+ {CCI_REG8(0x3721), 0x00},
+ {CCI_REG8(0x3724), 0x10},
+ {CCI_REG8(0x3726), 0x00},
+ {CCI_REG8(0x372a), 0x01},
+ {CCI_REG8(0x3730), 0x10},
+ {CCI_REG8(0x3738), 0x22},
+ {CCI_REG8(0x3739), 0xe5},
+ {CCI_REG8(0x373a), 0x50},
+ {CCI_REG8(0x373b), 0x02},
+ {CCI_REG8(0x373c), 0x41},
+ {CCI_REG8(0x373f), 0x02},
+ {CCI_REG8(0x3740), 0x42},
+ {CCI_REG8(0x3741), 0x02},
+ {CCI_REG8(0x3742), 0x18},
+ {CCI_REG8(0x3743), 0x01},
+ {CCI_REG8(0x3744), 0x02},
+ {CCI_REG8(0x3747), 0x10},
+ {CCI_REG8(0x374c), 0x04},
+ {CCI_REG8(0x3751), 0xf0},
+ {CCI_REG8(0x3752), 0x00},
+ {CCI_REG8(0x3753), 0x00},
+ {CCI_REG8(0x3754), 0xc0},
+ {CCI_REG8(0x3755), 0x00},
+ {CCI_REG8(0x3756), 0x1a},
+ {CCI_REG8(0x3758), 0x00},
+ {CCI_REG8(0x3759), 0x0f},
+ {CCI_REG8(0x376b), 0x44},
+ {CCI_REG8(0x375c), 0x04},
+ {CCI_REG8(0x3774), 0x10},
+ {CCI_REG8(0x3776), 0x00},
+ {CCI_REG8(0x377f), 0x08},
+ {CCI_REG8(0x3780), 0x22},
+ {CCI_REG8(0x3781), 0x0c},
+ {CCI_REG8(0x3784), 0x2c},
+ {CCI_REG8(0x3785), 0x1e},
+ {CCI_REG8(0x378f), 0xf5},
+ {CCI_REG8(0x3791), 0xb0},
+ {CCI_REG8(0x3795), 0x00},
+ {CCI_REG8(0x3796), 0x64},
+ {CCI_REG8(0x3797), 0x11},
+ {CCI_REG8(0x3798), 0x30},
+ {CCI_REG8(0x3799), 0x41},
+ {CCI_REG8(0x379a), 0x07},
+ {CCI_REG8(0x379b), 0xb0},
+ {CCI_REG8(0x379c), 0x0c},
+ {CCI_REG8(0x3a04), 0x06},
+ {CCI_REG8(0x3a05), 0x14},
+ {CCI_REG8(0x3e07), 0x20},
+ {CCI_REG8(0x4000), 0x08},
+ {CCI_REG8(0x4001), 0x04},
+ {CCI_REG8(0x4004), 0x08},
+ {CCI_REG8(0x4006), 0x20},
+ {CCI_REG8(0x4008), 0x24},
+ {CCI_REG8(0x4009), 0x10},
+ {CCI_REG8(0x4058), 0x00},
+ {CCI_REG8(0x4101), 0xb2},
+ {CCI_REG8(0x4307), 0x31},
+ {CCI_REG8(0x4511), 0x05},
+ {CCI_REG8(0x4512), 0x01},
+ {CCI_REG8(0x481f), 0x30},
+ {CCI_REG8(0x4826), 0x2c},
+ {CCI_REG8(0x4d02), 0xfd},
+ {CCI_REG8(0x4d03), 0xf5},
+ {CCI_REG8(0x4d04), 0x0c},
+ {CCI_REG8(0x4d05), 0xcc},
+ {CCI_REG8(0x4837), 0x0a},
+ {CCI_REG8(0x5003), 0x20},
+ {CCI_REG8(0x5013), 0x00},
+ {CCI_REG8(0x5842), 0x01},
+ {CCI_REG8(0x5843), 0x2b},
+ {CCI_REG8(0x5844), 0x01},
+ {CCI_REG8(0x5845), 0x92},
+ {CCI_REG8(0x5846), 0x01},
+ {CCI_REG8(0x5847), 0x8f},
+ {CCI_REG8(0x5848), 0x01},
+ {CCI_REG8(0x5849), 0x0c},
+ {CCI_REG8(0x5e10), 0x0c},
+ {CCI_REG8(0x3820), 0x00},
+ {CCI_REG8(0x3821), 0x1e},
+ {CCI_REG8(0x5041), 0x14}
};
static const struct v4l2_rect ov5693_default_crop = {
@@ -373,115 +353,6 @@ static const u8 ov5693_test_pattern_bits[] = {
OV5693_TEST_PATTERN_ROLLING,
};
-/* I2C I/O Operations */
-
-static int ov5693_read_reg(struct ov5693_device *ov5693, u32 addr, u32 *value)
-{
- struct i2c_client *client = ov5693->client;
- __be16 reg;
- u8 val[4];
- struct i2c_msg msg[] = {
- {
- .addr = client->addr,
- .flags = 0,
- .len = 2,
- .buf = (u8 *)&reg,
- },
- {
- .addr = client->addr,
- .flags = I2C_M_RD,
- .buf = (u8 *)&val,
- },
- };
- unsigned int len = ((addr >> OV5693_REG_SIZE_SHIFT) & 3);
- unsigned int i;
- int ret;
-
- reg = cpu_to_be16(addr & OV5693_REG_ADDR_MASK);
-
- msg[1].len = len;
-
- ret = i2c_transfer(client->adapter, msg, 2);
- if (ret < 0)
- return dev_err_probe(&client->dev, ret,
- "Failed to read register 0x%04x\n",
- addr & OV5693_REG_ADDR_MASK);
-
- *value = 0;
- for (i = 0; i < len; ++i) {
- *value <<= 8;
- *value |= val[i];
- }
-
- return 0;
-}
-
-static void ov5693_write_reg(struct ov5693_device *ov5693, u32 addr, u32 value,
- int *error)
-{
- struct i2c_client *client = ov5693->client;
- struct {
- __be16 reg;
- u8 val[4];
- } __packed buf;
- struct i2c_msg msg = {
- .addr = client->addr,
- .buf = (u8 *)&buf,
- };
- unsigned int len = ((addr >> OV5693_REG_SIZE_SHIFT) & 3);
- unsigned int i;
- int ret;
-
- if (*error < 0)
- return;
-
- buf.reg = cpu_to_be16(addr & OV5693_REG_ADDR_MASK);
- for (i = 0; i < len; ++i) {
- buf.val[len - i - 1] = value & 0xff;
- value >>= 8;
- }
-
- msg.len = len + 2;
-
- ret = i2c_transfer(client->adapter, &msg, 1);
- if (ret < 0) {
- dev_err(&client->dev, "Failed to write register 0x%04x: %d\n",
- addr & OV5693_REG_ADDR_MASK, ret);
- *error = ret;
- }
-}
-
-static int ov5693_write_reg_array(struct ov5693_device *ov5693,
- const struct ov5693_reg_list *reglist)
-{
- unsigned int i;
- int ret = 0;
-
- for (i = 0; i < reglist->num_regs; i++)
- ov5693_write_reg(ov5693, reglist->regs[i].reg,
- reglist->regs[i].val, &ret);
-
- return ret;
-}
-
-static int ov5693_update_bits(struct ov5693_device *ov5693, u32 address,
- u32 mask, u32 bits)
-{
- u32 value = 0;
- int ret;
-
- ret = ov5693_read_reg(ov5693, address, &value);
- if (ret)
- return ret;
-
- value &= ~mask;
- value |= bits;
-
- ov5693_write_reg(ov5693, address, value, &ret);
-
- return ret;
-}
-
/* V4L2 Controls Functions */
static int ov5693_flip_vert_configure(struct ov5693_device *ov5693,
@@ -491,8 +362,8 @@ static int ov5693_flip_vert_configure(struct ov5693_device *ov5693,
OV5693_FORMAT1_FLIP_VERT_SENSOR_EN;
int ret;
- ret = ov5693_update_bits(ov5693, OV5693_FORMAT1_REG, bits,
- enable ? bits : 0);
+ ret = cci_update_bits(ov5693->regmap, OV5693_FORMAT1_REG, bits,
+ enable ? bits : 0, NULL);
if (ret)
return ret;
@@ -506,8 +377,8 @@ static int ov5693_flip_horz_configure(struct ov5693_device *ov5693,
OV5693_FORMAT2_FLIP_HORZ_SENSOR_EN;
int ret;
- ret = ov5693_update_bits(ov5693, OV5693_FORMAT2_REG, bits,
- enable ? bits : 0);
+ ret = cci_update_bits(ov5693->regmap, OV5693_FORMAT2_REG, bits,
+ enable ? bits : 0, NULL);
if (ret)
return ret;
@@ -516,10 +387,11 @@ static int ov5693_flip_horz_configure(struct ov5693_device *ov5693,
static int ov5693_get_exposure(struct ov5693_device *ov5693, s32 *value)
{
- u32 exposure;
+ u64 exposure;
int ret;
- ret = ov5693_read_reg(ov5693, OV5693_EXPOSURE_CTRL_REG, &exposure);
+ ret = cci_read(ov5693->regmap, OV5693_EXPOSURE_CTRL_REG, &exposure,
+ NULL);
if (ret)
return ret;
@@ -536,17 +408,17 @@ static int ov5693_exposure_configure(struct ov5693_device *ov5693,
exposure = (exposure << 4) & OV5693_EXPOSURE_CTRL_MASK;
- ov5693_write_reg(ov5693, OV5693_EXPOSURE_CTRL_REG, exposure, &ret);
+ cci_write(ov5693->regmap, OV5693_EXPOSURE_CTRL_REG, exposure, &ret);
return ret;
}
static int ov5693_get_gain(struct ov5693_device *ov5693, u32 *gain)
{
- u32 value;
+ u64 value;
int ret;
- ret = ov5693_read_reg(ov5693, OV5693_GAIN_CTRL_REG, &value);
+ ret = cci_read(ov5693->regmap, OV5693_GAIN_CTRL_REG, &value, NULL);
if (ret)
return ret;
@@ -563,9 +435,9 @@ static int ov5693_digital_gain_configure(struct ov5693_device *ov5693,
gain &= OV5693_MWB_GAIN_MASK;
- ov5693_write_reg(ov5693, OV5693_MWB_RED_GAIN_REG, gain, &ret);
- ov5693_write_reg(ov5693, OV5693_MWB_GREEN_GAIN_REG, gain, &ret);
- ov5693_write_reg(ov5693, OV5693_MWB_BLUE_GAIN_REG, gain, &ret);
+ cci_write(ov5693->regmap, OV5693_MWB_RED_GAIN_REG, gain, &ret);
+ cci_write(ov5693->regmap, OV5693_MWB_GREEN_GAIN_REG, gain, &ret);
+ cci_write(ov5693->regmap, OV5693_MWB_BLUE_GAIN_REG, gain, &ret);
return ret;
}
@@ -576,7 +448,7 @@ static int ov5693_analog_gain_configure(struct ov5693_device *ov5693, u32 gain)
gain = (gain << 4) & OV5693_GAIN_CTRL_MASK;
- ov5693_write_reg(ov5693, OV5693_GAIN_CTRL_REG, gain, &ret);
+ cci_write(ov5693->regmap, OV5693_GAIN_CTRL_REG, gain, &ret);
return ret;
}
@@ -586,7 +458,7 @@ static int ov5693_vts_configure(struct ov5693_device *ov5693, u32 vblank)
u16 vts = ov5693->mode.format.height + vblank;
int ret = 0;
- ov5693_write_reg(ov5693, OV5693_TIMING_VTS_REG, vts, &ret);
+ cci_write(ov5693->regmap, OV5693_TIMING_VTS_REG, vts, &ret);
return ret;
}
@@ -595,8 +467,8 @@ static int ov5693_test_pattern_configure(struct ov5693_device *ov5693, u32 idx)
{
int ret = 0;
- ov5693_write_reg(ov5693, OV5693_TEST_PATTERN_REG,
- ov5693_test_pattern_bits[idx], &ret);
+ cci_write(ov5693->regmap, OV5693_TEST_PATTERN_REG,
+ ov5693_test_pattern_bits[idx], &ret);
return ret;
}
@@ -685,59 +557,54 @@ static int ov5693_mode_configure(struct ov5693_device *ov5693)
int ret = 0;
/* Crop Start X */
- ov5693_write_reg(ov5693, OV5693_CROP_START_X_REG, mode->crop.left,
- &ret);
+ cci_write(ov5693->regmap, OV5693_CROP_START_X_REG, mode->crop.left,
+ &ret);
/* Offset X */
- ov5693_write_reg(ov5693, OV5693_OFFSET_START_X_REG, 0, &ret);
+ cci_write(ov5693->regmap, OV5693_OFFSET_START_X_REG, 0, &ret);
/* Output Size X */
- ov5693_write_reg(ov5693, OV5693_OUTPUT_SIZE_X_REG, mode->format.width,
- &ret);
+ cci_write(ov5693->regmap, OV5693_OUTPUT_SIZE_X_REG, mode->format.width,
+ &ret);
/* Crop End X */
- ov5693_write_reg(ov5693, OV5693_CROP_END_X_REG,
- mode->crop.left + mode->crop.width, &ret);
+ cci_write(ov5693->regmap, OV5693_CROP_END_X_REG,
+ mode->crop.left + mode->crop.width, &ret);
/* Horizontal Total Size */
- ov5693_write_reg(ov5693, OV5693_TIMING_HTS_REG, OV5693_FIXED_PPL,
- &ret);
+ cci_write(ov5693->regmap, OV5693_TIMING_HTS_REG, OV5693_FIXED_PPL,
+ &ret);
/* Crop Start Y */
- ov5693_write_reg(ov5693, OV5693_CROP_START_Y_REG, mode->crop.top,
- &ret);
+ cci_write(ov5693->regmap, OV5693_CROP_START_Y_REG, mode->crop.top,
+ &ret);
/* Offset Y */
- ov5693_write_reg(ov5693, OV5693_OFFSET_START_Y_REG, 0, &ret);
+ cci_write(ov5693->regmap, OV5693_OFFSET_START_Y_REG, 0, &ret);
/* Output Size Y */
- ov5693_write_reg(ov5693, OV5693_OUTPUT_SIZE_Y_REG, mode->format.height,
- &ret);
+ cci_write(ov5693->regmap, OV5693_OUTPUT_SIZE_Y_REG, mode->format.height,
+ &ret);
/* Crop End Y */
- ov5693_write_reg(ov5693, OV5693_CROP_END_Y_REG,
- mode->crop.top + mode->crop.height, &ret);
+ cci_write(ov5693->regmap, OV5693_CROP_END_Y_REG,
+ mode->crop.top + mode->crop.height, &ret);
/* Subsample X increase */
- ov5693_write_reg(ov5693, OV5693_SUB_INC_X_REG,
- ((mode->inc_x_odd << 4) & 0xf0) | 0x01, &ret);
+ cci_write(ov5693->regmap, OV5693_SUB_INC_X_REG,
+ ((mode->inc_x_odd << 4) & 0xf0) | 0x01, &ret);
/* Subsample Y increase */
- ov5693_write_reg(ov5693, OV5693_SUB_INC_Y_REG,
- ((mode->inc_y_odd << 4) & 0xf0) | 0x01, &ret);
-
- if (ret)
- return ret;
+ cci_write(ov5693->regmap, OV5693_SUB_INC_Y_REG,
+ ((mode->inc_y_odd << 4) & 0xf0) | 0x01, &ret);
/* Binning */
- ret = ov5693_update_bits(ov5693, OV5693_FORMAT1_REG,
- OV5693_FORMAT1_VBIN_EN,
- mode->binning_y ? OV5693_FORMAT1_VBIN_EN : 0);
- if (ret)
- return ret;
+ cci_update_bits(ov5693->regmap, OV5693_FORMAT1_REG,
+ OV5693_FORMAT1_VBIN_EN,
+ mode->binning_y ? OV5693_FORMAT1_VBIN_EN : 0, &ret);
- ret = ov5693_update_bits(ov5693, OV5693_FORMAT2_REG,
- OV5693_FORMAT2_HBIN_EN,
- mode->binning_x ? OV5693_FORMAT2_HBIN_EN : 0);
+ cci_update_bits(ov5693->regmap, OV5693_FORMAT2_REG,
+ OV5693_FORMAT2_HBIN_EN,
+ mode->binning_x ? OV5693_FORMAT2_HBIN_EN : 0, &ret);
return ret;
}
@@ -746,9 +613,9 @@ static int ov5693_enable_streaming(struct ov5693_device *ov5693, bool enable)
{
int ret = 0;
- ov5693_write_reg(ov5693, OV5693_SW_STREAM_REG,
- enable ? OV5693_START_STREAMING :
- OV5693_STOP_STREAMING, &ret);
+ cci_write(ov5693->regmap, OV5693_SW_STREAM_REG,
+ enable ? OV5693_START_STREAMING : OV5693_STOP_STREAMING,
+ &ret);
return ret;
}
@@ -757,7 +624,7 @@ static int ov5693_sw_reset(struct ov5693_device *ov5693)
{
int ret = 0;
- ov5693_write_reg(ov5693, OV5693_SW_RESET_REG, OV5693_SW_RESET, &ret);
+ cci_write(ov5693->regmap, OV5693_SW_RESET_REG, OV5693_SW_RESET, &ret);
return ret;
}
@@ -771,7 +638,8 @@ static int ov5693_sensor_init(struct ov5693_device *ov5693)
return dev_err_probe(ov5693->dev, ret,
"software reset error\n");
- ret = ov5693_write_reg_array(ov5693, &ov5693_global_setting);
+ ret = cci_multi_reg_write(ov5693->regmap, ov5693_global_regs,
+ ARRAY_SIZE(ov5693_global_regs), NULL);
if (ret)
return dev_err_probe(ov5693->dev, ret,
"global settings error\n");
@@ -871,15 +739,15 @@ out_unlock:
static int ov5693_detect(struct ov5693_device *ov5693)
{
int ret;
- u32 id;
+ u64 id;
- ret = ov5693_read_reg(ov5693, OV5693_REG_CHIP_ID, &id);
+ ret = cci_read(ov5693->regmap, OV5693_REG_CHIP_ID, &id, NULL);
if (ret)
return ret;
if (id != OV5693_CHIP_ID)
return dev_err_probe(ov5693->dev, -ENODEV,
- "sensor ID mismatch. Found 0x%04x\n", id);
+ "sensor ID mismatch. Got 0x%04llx\n", id);
return 0;
}
@@ -1407,9 +1275,12 @@ static int ov5693_probe(struct i2c_client *client)
if (!ov5693)
return -ENOMEM;
- ov5693->client = client;
ov5693->dev = &client->dev;
+ ov5693->regmap = devm_cci_regmap_init_i2c(client, 16);
+ if (IS_ERR(ov5693->regmap))
+ return PTR_ERR(ov5693->regmap);
+
ret = ov5693_check_hwcfg(ov5693);
if (ret)
return ret;
diff --git a/drivers/media/i2c/ov7740.c b/drivers/media/i2c/ov7740.c
index 10e47c7d4e0c..dffdb475e433 100644
--- a/drivers/media/i2c/ov7740.c
+++ b/drivers/media/i2c/ov7740.c
@@ -1210,7 +1210,7 @@ static struct i2c_driver ov7740_i2c_driver = {
.driver = {
.name = "ov7740",
.pm = &ov7740_pm_ops,
- .of_match_table = of_match_ptr(ov7740_of_match),
+ .of_match_table = ov7740_of_match,
},
.probe = ov7740_probe,
.remove = ov7740_remove,
diff --git a/drivers/media/i2c/rdacm20.c b/drivers/media/i2c/rdacm20.c
index 01a2596282f0..f4e2e2f3972a 100644
--- a/drivers/media/i2c/rdacm20.c
+++ b/drivers/media/i2c/rdacm20.c
@@ -567,7 +567,6 @@ again:
static int rdacm20_probe(struct i2c_client *client)
{
struct rdacm20_device *dev;
- struct fwnode_handle *ep;
int ret;
dev = devm_kzalloc(&client->dev, sizeof(*dev), GFP_KERNEL);
@@ -616,24 +615,12 @@ static int rdacm20_probe(struct i2c_client *client)
if (ret < 0)
goto error_free_ctrls;
- ep = fwnode_graph_get_next_endpoint(dev_fwnode(&client->dev), NULL);
- if (!ep) {
- dev_err(&client->dev,
- "Unable to get endpoint in node %pOF\n",
- client->dev.of_node);
- ret = -ENOENT;
- goto error_free_ctrls;
- }
- dev->sd.fwnode = ep;
-
ret = v4l2_async_register_subdev(&dev->sd);
if (ret)
- goto error_put_node;
+ goto error_free_ctrls;
return 0;
-error_put_node:
- fwnode_handle_put(ep);
error_free_ctrls:
v4l2_ctrl_handler_free(&dev->ctrls);
error:
@@ -650,7 +637,6 @@ static void rdacm20_remove(struct i2c_client *client)
{
struct rdacm20_device *dev = i2c_to_rdacm20(client);
- fwnode_handle_put(dev->sd.fwnode);
v4l2_async_unregister_subdev(&dev->sd);
v4l2_ctrl_handler_free(&dev->ctrls);
media_entity_cleanup(&dev->sd.entity);
diff --git a/drivers/media/i2c/rdacm21.c b/drivers/media/i2c/rdacm21.c
index 043fec778a5e..a36a709243fd 100644
--- a/drivers/media/i2c/rdacm21.c
+++ b/drivers/media/i2c/rdacm21.c
@@ -351,7 +351,7 @@ static void ov10640_power_up(struct rdacm21_device *dev)
static int ov10640_check_id(struct rdacm21_device *dev)
{
unsigned int i;
- u8 val;
+ u8 val = 0;
/* Read OV10640 ID to test communications. */
for (i = 0; i < OV10640_PID_TIMEOUT; ++i) {
@@ -543,7 +543,6 @@ static int rdacm21_initialize(struct rdacm21_device *dev)
static int rdacm21_probe(struct i2c_client *client)
{
struct rdacm21_device *dev;
- struct fwnode_handle *ep;
int ret;
dev = devm_kzalloc(&client->dev, sizeof(*dev), GFP_KERNEL);
@@ -588,24 +587,12 @@ static int rdacm21_probe(struct i2c_client *client)
if (ret < 0)
goto error_free_ctrls;
- ep = fwnode_graph_get_next_endpoint(dev_fwnode(&client->dev), NULL);
- if (!ep) {
- dev_err(&client->dev,
- "Unable to get endpoint in node %pOF\n",
- client->dev.of_node);
- ret = -ENOENT;
- goto error_free_ctrls;
- }
- dev->sd.fwnode = ep;
-
ret = v4l2_async_register_subdev(&dev->sd);
if (ret)
- goto error_put_node;
+ goto error_free_ctrls;
return 0;
-error_put_node:
- fwnode_handle_put(dev->sd.fwnode);
error_free_ctrls:
v4l2_ctrl_handler_free(&dev->ctrls);
error:
diff --git a/drivers/media/i2c/st-mipid02.c b/drivers/media/i2c/st-mipid02.c
index 906553a28676..fa27638edc07 100644
--- a/drivers/media/i2c/st-mipid02.c
+++ b/drivers/media/i2c/st-mipid02.c
@@ -545,7 +545,14 @@ static int mipid02_configure_from_code(struct mipid02_dev *bridge)
static int mipid02_stream_disable(struct mipid02_dev *bridge)
{
struct i2c_client *client = bridge->i2c_client;
- int ret;
+ int ret = -EINVAL;
+
+ if (!bridge->s_subdev)
+ goto error;
+
+ ret = v4l2_subdev_call(bridge->s_subdev, video, s_stream, 0);
+ if (ret)
+ goto error;
/* Disable all lanes */
ret = mipid02_write_reg(bridge, MIPID02_CLK_LANE_REG1, 0);
@@ -633,6 +640,10 @@ static int mipid02_stream_enable(struct mipid02_dev *bridge)
if (ret)
goto error;
+ ret = v4l2_subdev_call(bridge->s_subdev, video, s_stream, 1);
+ if (ret)
+ goto error;
+
return 0;
error:
@@ -829,7 +840,7 @@ static const struct media_entity_operations mipid02_subdev_entity_ops = {
static int mipid02_async_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *s_subdev,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
struct mipid02_dev *bridge = to_mipid02_dev(notifier->sd);
struct i2c_client *client = bridge->i2c_client;
@@ -863,7 +874,7 @@ static int mipid02_async_bound(struct v4l2_async_notifier *notifier,
static void mipid02_async_unbind(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *s_subdev,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
struct mipid02_dev *bridge = to_mipid02_dev(notifier->sd);
@@ -879,7 +890,7 @@ static int mipid02_parse_rx_ep(struct mipid02_dev *bridge)
{
struct v4l2_fwnode_endpoint ep = { .bus_type = V4L2_MBUS_CSI2_DPHY };
struct i2c_client *client = bridge->i2c_client;
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asd;
struct device_node *ep_node;
int ret;
@@ -911,10 +922,10 @@ static int mipid02_parse_rx_ep(struct mipid02_dev *bridge)
bridge->rx = ep;
/* register async notifier so we get noticed when sensor is connected */
- v4l2_async_nf_init(&bridge->notifier);
+ v4l2_async_subdev_nf_init(&bridge->notifier, &bridge->sd);
asd = v4l2_async_nf_add_fwnode_remote(&bridge->notifier,
of_fwnode_handle(ep_node),
- struct v4l2_async_subdev);
+ struct v4l2_async_connection);
of_node_put(ep_node);
if (IS_ERR(asd)) {
@@ -924,7 +935,7 @@ static int mipid02_parse_rx_ep(struct mipid02_dev *bridge)
}
bridge->notifier.ops = &mipid02_notifier_ops;
- ret = v4l2_async_subdev_nf_register(&bridge->sd, &bridge->notifier);
+ ret = v4l2_async_nf_register(&bridge->notifier);
if (ret)
v4l2_async_nf_cleanup(&bridge->notifier);
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index 15f8163be9bf..2785935da497 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -133,8 +133,8 @@ static void i2c_rd(struct v4l2_subdev *sd, u16 reg, u8 *values, u32 n)
err = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
if (err != ARRAY_SIZE(msgs)) {
- v4l2_err(sd, "%s: reading register 0x%x from 0x%x failed\n",
- __func__, reg, client->addr);
+ v4l2_err(sd, "%s: reading register 0x%x from 0x%x failed: %d\n",
+ __func__, reg, client->addr, err);
}
}
@@ -165,8 +165,8 @@ static void i2c_wr(struct v4l2_subdev *sd, u16 reg, u8 *values, u32 n)
err = i2c_transfer(client->adapter, &msg, 1);
if (err != 1) {
- v4l2_err(sd, "%s: writing register 0x%x from 0x%x failed\n",
- __func__, reg, client->addr);
+ v4l2_err(sd, "%s: writing register 0x%x from 0x%x failed: %d\n",
+ __func__, reg, client->addr, err);
return;
}
diff --git a/drivers/media/i2c/tc358746.c b/drivers/media/i2c/tc358746.c
index 3f7e147ef594..566f5eaddd57 100644
--- a/drivers/media/i2c/tc358746.c
+++ b/drivers/media/i2c/tc358746.c
@@ -1426,7 +1426,7 @@ static int tc358746_init_controls(struct tc358746 *tc358746)
static int tc358746_notify_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
struct tc358746 *tc358746 =
container_of(notifier, struct tc358746, notifier);
@@ -1445,7 +1445,7 @@ static int tc358746_async_register(struct tc358746 *tc358746)
struct v4l2_fwnode_endpoint vep = {
.bus_type = V4L2_MBUS_PARALLEL,
};
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asd;
struct fwnode_handle *ep;
int err;
@@ -1460,9 +1460,9 @@ static int tc358746_async_register(struct tc358746 *tc358746)
return err;
}
- v4l2_async_nf_init(&tc358746->notifier);
+ v4l2_async_subdev_nf_init(&tc358746->notifier, &tc358746->sd);
asd = v4l2_async_nf_add_fwnode_remote(&tc358746->notifier, ep,
- struct v4l2_async_subdev);
+ struct v4l2_async_connection);
fwnode_handle_put(ep);
if (IS_ERR(asd)) {
@@ -1472,13 +1472,10 @@ static int tc358746_async_register(struct tc358746 *tc358746)
tc358746->notifier.ops = &tc358746_notify_ops;
- err = v4l2_async_subdev_nf_register(&tc358746->sd, &tc358746->notifier);
+ err = v4l2_async_nf_register(&tc358746->notifier);
if (err)
goto err_cleanup;
- tc358746->sd.fwnode = fwnode_graph_get_endpoint_by_id(
- dev_fwnode(tc358746->sd.dev), TC358746_SOURCE, 0, 0);
-
err = v4l2_async_register_subdev(&tc358746->sd);
if (err)
goto err_unregister;
@@ -1486,7 +1483,6 @@ static int tc358746_async_register(struct tc358746 *tc358746)
return 0;
err_unregister:
- fwnode_handle_put(tc358746->sd.fwnode);
v4l2_async_nf_unregister(&tc358746->notifier);
err_cleanup:
v4l2_async_nf_cleanup(&tc358746->notifier);
@@ -1605,7 +1601,6 @@ static void tc358746_remove(struct i2c_client *client)
v4l2_fwnode_endpoint_free(&tc358746->csi_vep);
v4l2_async_nf_unregister(&tc358746->notifier);
v4l2_async_nf_cleanup(&tc358746->notifier);
- fwnode_handle_put(sd->fwnode);
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index c7fb35ee3f9d..e543b3f7a4d8 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -2068,6 +2068,10 @@ static int tvp5150_parse_dt(struct tvp5150 *decoder, struct device_node *np)
tvpc->ent.name = devm_kasprintf(dev, GFP_KERNEL, "%s %s",
v4l2c->name, v4l2c->label ?
v4l2c->label : "");
+ if (!tvpc->ent.name) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
}
ep_np = of_graph_get_endpoint_by_regs(np, TVP5150_PAD_VID_OUT, 0);
diff --git a/drivers/media/i2c/video-i2c.c b/drivers/media/i2c/video-i2c.c
index 6f98abc7ccc1..537ebd9fa8d7 100644
--- a/drivers/media/i2c/video-i2c.c
+++ b/drivers/media/i2c/video-i2c.c
@@ -16,9 +16,9 @@
#include <linux/kthread.h>
#include <linux/i2c.h>
#include <linux/list.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/nvmem-provider.h>
#include <linux/regmap.h>
diff --git a/drivers/media/pci/Kconfig b/drivers/media/pci/Kconfig
index 480194543d05..ee095bde0b68 100644
--- a/drivers/media/pci/Kconfig
+++ b/drivers/media/pci/Kconfig
@@ -73,7 +73,7 @@ config VIDEO_PCI_SKELETON
Enable build of the skeleton PCI driver, used as a reference
when developing new drivers.
-source "drivers/media/pci/intel/ipu3/Kconfig"
+source "drivers/media/pci/intel/Kconfig"
endif #MEDIA_PCI_SUPPORT
endif #PCI
diff --git a/drivers/media/pci/bt8xx/Kconfig b/drivers/media/pci/bt8xx/Kconfig
index 2d674dc28cec..2f77628246e9 100644
--- a/drivers/media/pci/bt8xx/Kconfig
+++ b/drivers/media/pci/bt8xx/Kconfig
@@ -3,7 +3,7 @@ config VIDEO_BT848
tristate "BT848 Video For Linux"
depends on PCI && I2C && VIDEO_DEV
select I2C_ALGOBIT
- select VIDEOBUF_DMA_SG
+ select VIDEOBUF2_DMA_SG
depends on RC_CORE
depends on MEDIA_RADIO_SUPPORT
select VIDEO_TUNER
diff --git a/drivers/media/pci/bt8xx/bt848.h b/drivers/media/pci/bt8xx/bt848.h
index 16999e717d18..c8a0e1ab001f 100644
--- a/drivers/media/pci/bt8xx/bt848.h
+++ b/drivers/media/pci/bt8xx/bt848.h
@@ -231,7 +231,15 @@
#define BT848_INT_ETBF (1<<23)
+#define BT848_RISC_VIDEO 1
+#define BT848_RISC_TOP 2
+#define BT848_RISC_VBI 4
+
#define BT848_INT_RISCS (0xf<<28)
+#define BT848_INT_RISCS_VIDEO (BT848_RISC_VIDEO << 28)
+#define BT848_INT_RISCS_TOP (BT848_RISC_TOP << 28)
+#define BT848_INT_RISCS_VBI (BT848_RISC_VBI << 28)
+
#define BT848_INT_RISC_EN (1<<27)
#define BT848_INT_RACK (1<<25)
#define BT848_INT_FIELD (1<<24)
diff --git a/drivers/media/pci/bt8xx/bttv-audio-hook.c b/drivers/media/pci/bt8xx/bttv-audio-hook.c
index da1914a20b81..b5d071835354 100644
--- a/drivers/media/pci/bt8xx/bttv-audio-hook.c
+++ b/drivers/media/pci/bt8xx/bttv-audio-hook.c
@@ -293,16 +293,8 @@ void winfast2000_audio(struct bttv *btv, struct v4l2_tuner *t, int set)
{
unsigned long val;
- if (!set) {
- /* Not much to do here */
- t->audmode = V4L2_TUNER_MODE_LANG1;
- t->rxsubchans = V4L2_TUNER_SUB_MONO |
- V4L2_TUNER_SUB_STEREO |
- V4L2_TUNER_SUB_LANG1 |
- V4L2_TUNER_SUB_LANG2;
-
+ if (!set)
return;
- }
/*btor (0xc32000, BT848_GPIO_OUT_EN);*/
switch (t->audmode) {
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index 734f02b91aa3..aa708a0e5eac 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -641,15 +641,10 @@ static const unsigned int FORMATS = ARRAY_SIZE(formats);
#define VIDEO_RESOURCES (RESOURCE_VIDEO_READ | \
RESOURCE_VIDEO_STREAM)
-static
-int check_alloc_btres_lock(struct bttv *btv, struct bttv_fh *fh, int bit)
+int check_alloc_btres_lock(struct bttv *btv, int bit)
{
int xbits; /* mutual exclusive resources */
- if (fh->resources & bit)
- /* have it already allocated */
- return 1;
-
xbits = bit;
if (bit & (RESOURCE_VIDEO_READ | RESOURCE_VIDEO_STREAM))
xbits |= RESOURCE_VIDEO_READ | RESOURCE_VIDEO_STREAM;
@@ -663,7 +658,7 @@ int check_alloc_btres_lock(struct bttv *btv, struct bttv_fh *fh, int bit)
if ((bit & VIDEO_RESOURCES)
&& 0 == (btv->resources & VIDEO_RESOURCES)) {
/* Do crop - use current, don't - use default parameters. */
- __s32 top = btv->crop[!!fh->do_crop].rect.top;
+ __s32 top = btv->crop[!!btv->do_crop].rect.top;
if (btv->vbi_end > top)
goto fail;
@@ -672,17 +667,16 @@ int check_alloc_btres_lock(struct bttv *btv, struct bttv_fh *fh, int bit)
Claim scan lines crop[].rect.top to bottom. */
btv->crop_start = top;
} else if (bit & VBI_RESOURCES) {
- __s32 end = fh->vbi_fmt.end;
+ __s32 end = btv->vbi_fmt.end;
if (end > btv->crop_start)
goto fail;
- /* Claim scan lines above fh->vbi_fmt.end. */
+ /* Claim scan lines above btv->vbi_fmt.end. */
btv->vbi_end = end;
}
/* it's free, grab it */
- fh->resources |= bit;
btv->resources |= bit;
return 1;
@@ -691,9 +685,9 @@ int check_alloc_btres_lock(struct bttv *btv, struct bttv_fh *fh, int bit)
}
static
-int check_btres(struct bttv_fh *fh, int bit)
+int check_btres(struct bttv *btv, int bit)
{
- return (fh->resources & bit);
+ return (btv->resources & bit);
}
static
@@ -731,14 +725,12 @@ disclaim_video_lines(struct bttv *btv)
btwrite(0xfe, BT848_O_VDELAY_LO);
}
-static
-void free_btres_lock(struct bttv *btv, struct bttv_fh *fh, int bits)
+void free_btres_lock(struct bttv *btv, int bits)
{
- if ((fh->resources & bits) != bits) {
+ if ((btv->resources & bits) != bits) {
/* trying to free resources not allocated by us ... */
pr_err("BUG! (btres)\n");
}
- fh->resources &= ~bits;
btv->resources &= ~bits;
bits = btv->resources;
@@ -1111,8 +1103,8 @@ set_tvnorm(struct bttv *btv, unsigned int norm)
const struct bttv_tvnorm *tvnorm;
v4l2_std_id id;
- BUG_ON(norm >= BTTV_TVNORMS);
- BUG_ON(btv->tvnorm >= BTTV_TVNORMS);
+ WARN_ON(norm >= BTTV_TVNORMS);
+ WARN_ON(btv->tvnorm >= BTTV_TVNORMS);
tvnorm = &bttv_tvnorms[norm];
@@ -1174,7 +1166,7 @@ set_input(struct bttv *btv, unsigned int input, unsigned int norm)
set_tvnorm(btv, norm);
}
-static void init_irqreg(struct bttv *btv)
+void init_irqreg(struct bttv *btv)
{
/* clear status */
btwrite(0xfffffUL, BT848_INT_STAT);
@@ -1453,23 +1445,6 @@ void bttv_gpio_tracking(struct bttv *btv, char *comment)
btv->c.nr, outbits, data & outbits, data & ~outbits, comment);
}
-static void bttv_field_count(struct bttv *btv)
-{
- int need_count = 0;
-
- if (btv->users)
- need_count++;
-
- if (need_count) {
- /* start field counter */
- btor(BT848_INT_VSYNC,BT848_INT_MASK);
- } else {
- /* stop field counter */
- btand(~BT848_INT_VSYNC,BT848_INT_MASK);
- btv->field_count = 0;
- }
-}
-
static const struct bttv_format*
format_by_fourcc(int fourcc)
{
@@ -1487,158 +1462,132 @@ format_by_fourcc(int fourcc)
/* ----------------------------------------------------------------------- */
/* video4linux (1) interface */
-static int bttv_prepare_buffer(struct videobuf_queue *q,struct bttv *btv,
- struct bttv_buffer *buf,
- const struct bttv_format *fmt,
- unsigned int width, unsigned int height,
- enum v4l2_field field)
+static int queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
+ unsigned int *num_planes, unsigned int sizes[],
+ struct device *alloc_devs[])
{
- struct bttv_fh *fh = q->priv_data;
- int redo_dma_risc = 0;
- struct bttv_crop c;
- int norm;
- int rc;
+ struct bttv *btv = vb2_get_drv_priv(q);
+ unsigned int size = btv->fmt->depth * btv->width * btv->height >> 3;
- /* check settings */
- if (NULL == fmt)
- return -EINVAL;
- if (fmt->btformat == BT848_COLOR_FMT_RAW) {
- width = RAW_BPL;
- height = RAW_LINES*2;
- if (width*height > buf->vb.bsize)
- return -EINVAL;
- buf->vb.size = buf->vb.bsize;
-
- /* Make sure tvnorm and vbi_end remain consistent
- until we're done. */
-
- norm = btv->tvnorm;
-
- /* In this mode capturing always starts at defrect.top
- (default VDELAY), ignoring cropping parameters. */
- if (btv->vbi_end > bttv_tvnorms[norm].cropcap.defrect.top) {
- return -EINVAL;
- }
+ if (*num_planes)
+ return sizes[0] < size ? -EINVAL : 0;
+ *num_planes = 1;
+ sizes[0] = size;
- c.rect = bttv_tvnorms[norm].cropcap.defrect;
- } else {
- norm = btv->tvnorm;
- c = btv->crop[!!fh->do_crop];
-
- if (width < c.min_scaled_width ||
- width > c.max_scaled_width ||
- height < c.min_scaled_height)
- return -EINVAL;
-
- switch (field) {
- case V4L2_FIELD_TOP:
- case V4L2_FIELD_BOTTOM:
- case V4L2_FIELD_ALTERNATE:
- /* btv->crop counts frame lines. Max. scale
- factor is 16:1 for frames, 8:1 for fields. */
- if (height * 2 > c.max_scaled_height)
- return -EINVAL;
- break;
+ return 0;
+}
- default:
- if (height > c.max_scaled_height)
- return -EINVAL;
- break;
- }
+static void buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct bttv *btv = vb2_get_drv_priv(vq);
+ struct bttv_buffer *buf = container_of(vbuf, struct bttv_buffer, vbuf);
+ unsigned long flags;
- buf->vb.size = (width * height * fmt->depth) >> 3;
- if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size)
- return -EINVAL;
- }
-
- /* alloc + fill struct bttv_buffer (if changed) */
- if (buf->vb.width != width || buf->vb.height != height ||
- buf->vb.field != field ||
- buf->tvnorm != norm || buf->fmt != fmt ||
- buf->crop.top != c.rect.top ||
- buf->crop.left != c.rect.left ||
- buf->crop.width != c.rect.width ||
- buf->crop.height != c.rect.height) {
- buf->vb.width = width;
- buf->vb.height = height;
- buf->vb.field = field;
- buf->tvnorm = norm;
- buf->fmt = fmt;
- buf->crop = c.rect;
- redo_dma_risc = 1;
- }
-
- /* alloc risc memory */
- if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
- redo_dma_risc = 1;
- if (0 != (rc = videobuf_iolock(q,&buf->vb,&btv->fbuf)))
- goto fail;
+ spin_lock_irqsave(&btv->s_lock, flags);
+ if (list_empty(&btv->capture)) {
+ btv->loop_irq = BT848_RISC_VIDEO;
+ if (vb2_is_streaming(&btv->vbiq))
+ btv->loop_irq |= BT848_RISC_VBI;
+ bttv_set_dma(btv, BT848_CAP_CTL_CAPTURE_ODD |
+ BT848_CAP_CTL_CAPTURE_EVEN);
}
-
- if (redo_dma_risc)
- if (0 != (rc = bttv_buffer_risc(btv,buf)))
- goto fail;
-
- buf->vb.state = VIDEOBUF_PREPARED;
- return 0;
-
- fail:
- bttv_dma_free(q,btv,buf);
- return rc;
+ list_add_tail(&buf->list, &btv->capture);
+ spin_unlock_irqrestore(&btv->s_lock, flags);
}
-static int
-buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size)
+static int buf_prepare(struct vb2_buffer *vb)
{
- struct bttv_fh *fh = q->priv_data;
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct bttv *btv = vb2_get_drv_priv(vq);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct bttv_buffer *buf = container_of(vbuf, struct bttv_buffer, vbuf);
+ unsigned int size = (btv->fmt->depth * btv->width * btv->height) >> 3;
- *size = fh->fmt->depth*fh->width*fh->height >> 3;
- if (0 == *count)
- *count = gbuffers;
- if (*size * *count > gbuffers * gbufsize)
- *count = (gbuffers * gbufsize) / *size;
- return 0;
+ if (vb2_plane_size(vb, 0) < size)
+ return -EINVAL;
+ vb2_set_plane_payload(vb, 0, size);
+
+ if (btv->field != V4L2_FIELD_ALTERNATE) {
+ buf->vbuf.field = btv->field;
+ } else if (btv->field_last == V4L2_FIELD_TOP) {
+ buf->vbuf.field = V4L2_FIELD_BOTTOM;
+ btv->field_last = V4L2_FIELD_BOTTOM;
+ } else {
+ buf->vbuf.field = V4L2_FIELD_TOP;
+ btv->field_last = V4L2_FIELD_TOP;
+ }
+
+ /* Allocate memory for risc struct and create the risc program. */
+ return bttv_buffer_risc(btv, buf);
}
-static int
-buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
- enum v4l2_field field)
+static void buf_cleanup(struct vb2_buffer *vb)
{
- struct bttv_buffer *buf = container_of(vb,struct bttv_buffer,vb);
- struct bttv_fh *fh = q->priv_data;
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct bttv *btv = vb2_get_drv_priv(vq);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct bttv_buffer *buf = container_of(vbuf, struct bttv_buffer, vbuf);
- return bttv_prepare_buffer(q,fh->btv, buf, fh->fmt,
- fh->width, fh->height, field);
+ btcx_riscmem_free(btv->c.pci, &buf->top);
+ btcx_riscmem_free(btv->c.pci, &buf->bottom);
}
-static void
-buffer_queue(struct videobuf_queue *q, struct videobuf_buffer *vb)
+static int start_streaming(struct vb2_queue *q, unsigned int count)
{
- struct bttv_buffer *buf = container_of(vb,struct bttv_buffer,vb);
- struct bttv_fh *fh = q->priv_data;
- struct bttv *btv = fh->btv;
-
- buf->vb.state = VIDEOBUF_QUEUED;
- list_add_tail(&buf->vb.queue,&btv->capture);
- if (!btv->curr.frame_irq) {
- btv->loop_irq |= 1;
- bttv_set_dma(btv, 0x03);
+ int ret = 1;
+ int seqnr = 0;
+ struct bttv_buffer *buf;
+ struct bttv *btv = vb2_get_drv_priv(q);
+
+ ret = check_alloc_btres_lock(btv, RESOURCE_VIDEO_STREAM);
+ if (ret == 0) {
+ if (btv->field_count)
+ seqnr++;
+ while (!list_empty(&btv->capture)) {
+ buf = list_entry(btv->capture.next,
+ struct bttv_buffer, list);
+ list_del(&buf->list);
+ buf->vbuf.sequence = (btv->field_count >> 1) + seqnr++;
+ vb2_buffer_done(&buf->vbuf.vb2_buf,
+ VB2_BUF_STATE_QUEUED);
+ }
+ return !ret;
+ }
+ if (!vb2_is_streaming(&btv->vbiq)) {
+ init_irqreg(btv);
+ btv->field_count = 0;
}
+ btv->framedrop = 0;
+
+ return 0;
}
-static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
+static void stop_streaming(struct vb2_queue *q)
{
- struct bttv_buffer *buf = container_of(vb,struct bttv_buffer,vb);
- struct bttv_fh *fh = q->priv_data;
+ unsigned long flags;
+ struct bttv *btv = vb2_get_drv_priv(q);
- bttv_dma_free(q,fh->btv,buf);
+ vb2_wait_for_all_buffers(q);
+ spin_lock_irqsave(&btv->s_lock, flags);
+ free_btres_lock(btv, RESOURCE_VIDEO_STREAM);
+ if (!vb2_is_streaming(&btv->vbiq)) {
+ /* stop field counter */
+ btand(~BT848_INT_VSYNC, BT848_INT_MASK);
+ }
+ spin_unlock_irqrestore(&btv->s_lock, flags);
}
-static const struct videobuf_queue_ops bttv_video_qops = {
- .buf_setup = buffer_setup,
- .buf_prepare = buffer_prepare,
- .buf_queue = buffer_queue,
- .buf_release = buffer_release,
+static const struct vb2_ops bttv_video_qops = {
+ .queue_setup = queue_setup,
+ .buf_queue = buf_queue,
+ .buf_prepare = buf_prepare,
+ .buf_cleanup = buf_cleanup,
+ .start_streaming = start_streaming,
+ .stop_streaming = stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
static void radio_enable(struct bttv *btv)
@@ -1654,8 +1603,7 @@ static void radio_enable(struct bttv *btv)
static int bttv_s_std(struct file *file, void *priv, v4l2_std_id id)
{
- struct bttv_fh *fh = priv;
- struct bttv *btv = fh->btv;
+ struct bttv *btv = video_drvdata(file);
unsigned int i;
for (i = 0; i < BTTV_TVNORMS; i++)
@@ -1670,8 +1618,7 @@ static int bttv_s_std(struct file *file, void *priv, v4l2_std_id id)
static int bttv_g_std(struct file *file, void *priv, v4l2_std_id *id)
{
- struct bttv_fh *fh = priv;
- struct bttv *btv = fh->btv;
+ struct bttv *btv = video_drvdata(file);
*id = btv->std;
return 0;
@@ -1679,8 +1626,7 @@ static int bttv_g_std(struct file *file, void *priv, v4l2_std_id *id)
static int bttv_querystd(struct file *file, void *f, v4l2_std_id *id)
{
- struct bttv_fh *fh = f;
- struct bttv *btv = fh->btv;
+ struct bttv *btv = video_drvdata(file);
if (btread(BT848_DSTATUS) & BT848_DSTATUS_NUML)
*id &= V4L2_STD_625_50;
@@ -1692,8 +1638,7 @@ static int bttv_querystd(struct file *file, void *f, v4l2_std_id *id)
static int bttv_enum_input(struct file *file, void *priv,
struct v4l2_input *i)
{
- struct bttv_fh *fh = priv;
- struct bttv *btv = fh->btv;
+ struct bttv *btv = video_drvdata(file);
if (i->index >= bttv_tvcards[btv->c.type].video_inputs)
return -EINVAL;
@@ -1725,8 +1670,7 @@ static int bttv_enum_input(struct file *file, void *priv,
static int bttv_g_input(struct file *file, void *priv, unsigned int *i)
{
- struct bttv_fh *fh = priv;
- struct bttv *btv = fh->btv;
+ struct bttv *btv = video_drvdata(file);
*i = btv->input;
@@ -1735,8 +1679,7 @@ static int bttv_g_input(struct file *file, void *priv, unsigned int *i)
static int bttv_s_input(struct file *file, void *priv, unsigned int i)
{
- struct bttv_fh *fh = priv;
- struct bttv *btv = fh->btv;
+ struct bttv *btv = video_drvdata(file);
if (i >= bttv_tvcards[btv->c.type].video_inputs)
return -EINVAL;
@@ -1748,8 +1691,7 @@ static int bttv_s_input(struct file *file, void *priv, unsigned int i)
static int bttv_s_tuner(struct file *file, void *priv,
const struct v4l2_tuner *t)
{
- struct bttv_fh *fh = priv;
- struct bttv *btv = fh->btv;
+ struct bttv *btv = video_drvdata(file);
if (t->index)
return -EINVAL;
@@ -1767,8 +1709,7 @@ static int bttv_s_tuner(struct file *file, void *priv,
static int bttv_g_frequency(struct file *file, void *priv,
struct v4l2_frequency *f)
{
- struct bttv_fh *fh = priv;
- struct bttv *btv = fh->btv;
+ struct bttv *btv = video_drvdata(file);
if (f->tuner)
return -EINVAL;
@@ -1804,8 +1745,7 @@ static void bttv_set_frequency(struct bttv *btv, const struct v4l2_frequency *f)
static int bttv_s_frequency(struct file *file, void *priv,
const struct v4l2_frequency *f)
{
- struct bttv_fh *fh = priv;
- struct bttv *btv = fh->btv;
+ struct bttv *btv = video_drvdata(file);
if (f->tuner)
return -EINVAL;
@@ -1817,8 +1757,7 @@ static int bttv_s_frequency(struct file *file, void *priv,
static int bttv_log_status(struct file *file, void *f)
{
struct video_device *vdev = video_devdata(file);
- struct bttv_fh *fh = f;
- struct bttv *btv = fh->btv;
+ struct bttv *btv = video_drvdata(file);
v4l2_ctrl_handler_log_status(vdev->ctrl_handler, btv->c.v4l2_dev.name);
bttv_call_all(btv, core, log_status);
@@ -1829,8 +1768,7 @@ static int bttv_log_status(struct file *file, void *f)
static int bttv_g_register(struct file *file, void *f,
struct v4l2_dbg_register *reg)
{
- struct bttv_fh *fh = f;
- struct bttv *btv = fh->btv;
+ struct bttv *btv = video_drvdata(file);
/* bt848 has a 12-bit register space */
reg->reg &= 0xfff;
@@ -1843,8 +1781,7 @@ static int bttv_g_register(struct file *file, void *f,
static int bttv_s_register(struct file *file, void *f,
const struct v4l2_dbg_register *reg)
{
- struct bttv_fh *fh = f;
- struct bttv *btv = fh->btv;
+ struct bttv *btv = video_drvdata(file);
/* bt848 has a 12-bit register space */
btwrite(reg->val, reg->reg & 0xfff);
@@ -1904,16 +1841,11 @@ bttv_crop_adjust (struct bttv_crop * c,
also adjust the current cropping parameters to get closer to the
desired image size. */
static int
-limit_scaled_size_lock (struct bttv_fh * fh,
- __s32 * width,
- __s32 * height,
- enum v4l2_field field,
- unsigned int width_mask,
- unsigned int width_bias,
- int adjust_size,
- int adjust_crop)
-{
- struct bttv *btv = fh->btv;
+limit_scaled_size_lock(struct bttv *btv, __s32 *width, __s32 *height,
+ enum v4l2_field field, unsigned int width_mask,
+ unsigned int width_bias, int adjust_size,
+ int adjust_crop)
+{
const struct v4l2_rect *b;
struct bttv_crop *c;
__s32 min_width;
@@ -1922,8 +1854,8 @@ limit_scaled_size_lock (struct bttv_fh * fh,
__s32 max_height;
int rc;
- BUG_ON((int) width_mask >= 0 ||
- width_bias >= (unsigned int) -width_mask);
+ WARN_ON((int)width_mask >= 0 ||
+ width_bias >= (unsigned int)(-width_mask));
/* Make sure tvnorm, vbi_end and the current cropping parameters
remain consistent until we're done. */
@@ -1931,9 +1863,9 @@ limit_scaled_size_lock (struct bttv_fh * fh,
b = &bttv_tvnorms[btv->tvnorm].cropcap.bounds;
/* Do crop - use current, don't - use default parameters. */
- c = &btv->crop[!!fh->do_crop];
+ c = &btv->crop[!!btv->do_crop];
- if (fh->do_crop
+ if (btv->do_crop
&& adjust_size
&& adjust_crop
&& !locked_btres(btv, VIDEO_RESOURCES)) {
@@ -2007,52 +1939,31 @@ limit_scaled_size_lock (struct bttv_fh * fh,
return rc;
}
-/* ----------------------------------------------------------------------- */
-
-static struct videobuf_queue* bttv_queue(struct bttv_fh *fh)
-{
- struct videobuf_queue* q = NULL;
-
- switch (fh->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- q = &fh->cap;
- break;
- case V4L2_BUF_TYPE_VBI_CAPTURE:
- q = &fh->vbi;
- break;
- default:
- BUG();
- }
- return q;
-}
-
-static int bttv_resource(struct bttv_fh *fh)
+static int bttv_switch_type(struct bttv *btv, enum v4l2_buf_type type)
{
- int res = 0;
+ int res;
+ struct vb2_queue *q;
- switch (fh->type) {
+ switch (type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ q = &btv->capq;
res = RESOURCE_VIDEO_STREAM;
break;
case V4L2_BUF_TYPE_VBI_CAPTURE:
+ q = &btv->vbiq;
res = RESOURCE_VBI;
break;
default:
- BUG();
+ WARN_ON(1);
+ return -EINVAL;
}
- return res;
-}
-
-static int bttv_switch_type(struct bttv_fh *fh, enum v4l2_buf_type type)
-{
- struct videobuf_queue *q = bttv_queue(fh);
- int res = bttv_resource(fh);
- if (check_btres(fh,res))
+ if (check_btres(btv, res))
return -EBUSY;
- if (videobuf_queue_is_busy(q))
+ if (vb2_is_busy(q))
return -EBUSY;
- fh->type = type;
+ btv->type = type;
+
return 0;
}
@@ -2077,12 +1988,11 @@ pix_format_set_size (struct v4l2_pix_format * f,
static int bttv_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct bttv_fh *fh = priv;
+ struct bttv *btv = video_drvdata(file);
- pix_format_set_size(&f->fmt.pix, fh->fmt,
- fh->width, fh->height);
- f->fmt.pix.field = fh->cap.field;
- f->fmt.pix.pixelformat = fh->fmt->fourcc;
+ pix_format_set_size(&f->fmt.pix, btv->fmt, btv->width, btv->height);
+ f->fmt.pix.field = btv->field;
+ f->fmt.pix.pixelformat = btv->fmt->fourcc;
f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
return 0;
@@ -2105,8 +2015,7 @@ static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
const struct bttv_format *fmt;
- struct bttv_fh *fh = priv;
- struct bttv *btv = fh->btv;
+ struct bttv *btv = video_drvdata(file);
enum v4l2_field field;
__s32 width, height;
__s32 height2;
@@ -2133,7 +2042,7 @@ static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
}
fallthrough;
default: /* FIELD_ANY case */
- height2 = btv->crop[!!fh->do_crop].rect.height >> 1;
+ height2 = btv->crop[!!btv->do_crop].rect.height >> 1;
field = (f->fmt.pix.height > height2)
? V4L2_FIELD_INTERLACED
: V4L2_FIELD_BOTTOM;
@@ -2144,10 +2053,8 @@ static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
height = f->fmt.pix.height;
bttv_get_width_mask_vid_cap(fmt, &width_mask, &width_bias);
- rc = limit_scaled_size_lock(fh, &width, &height, field,
- width_mask, width_bias,
- /* adjust_size */ 1,
- /* adjust_crop */ 0);
+ rc = limit_scaled_size_lock(btv, &width, &height, field, width_mask,
+ width_bias, 1, 0);
if (0 != rc)
return rc;
@@ -2160,17 +2067,16 @@ static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
}
static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
+ struct v4l2_format *f)
{
int retval;
const struct bttv_format *fmt;
- struct bttv_fh *fh = priv;
- struct bttv *btv = fh->btv;
+ struct bttv *btv = video_drvdata(file);
__s32 width, height;
unsigned int width_mask, width_bias;
enum v4l2_field field;
- retval = bttv_switch_type(fh, f->type);
+ retval = bttv_switch_type(btv, f->type);
if (0 != retval)
return retval;
@@ -2184,24 +2090,25 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
fmt = format_by_fourcc(f->fmt.pix.pixelformat);
bttv_get_width_mask_vid_cap(fmt, &width_mask, &width_bias);
- retval = limit_scaled_size_lock(fh, &width, &height, f->fmt.pix.field,
- width_mask, width_bias,
- /* adjust_size */ 1,
- /* adjust_crop */ 1);
+ retval = limit_scaled_size_lock(btv, &width, &height, f->fmt.pix.field,
+ width_mask, width_bias, 1, 1);
if (0 != retval)
return retval;
f->fmt.pix.field = field;
/* update our state information */
- fh->fmt = fmt;
- fh->cap.field = f->fmt.pix.field;
- fh->cap.last = V4L2_FIELD_NONE;
- fh->width = f->fmt.pix.width;
- fh->height = f->fmt.pix.height;
- btv->init.fmt = fmt;
- btv->init.width = f->fmt.pix.width;
- btv->init.height = f->fmt.pix.height;
+ btv->fmt = fmt;
+ btv->width = f->fmt.pix.width;
+ btv->height = f->fmt.pix.height;
+ btv->field = f->fmt.pix.field;
+ /*
+ * When field is V4L2_FIELD_ALTERNATE, buffers will be either
+ * V4L2_FIELD_TOP or V4L2_FIELD_BOTTOM depending on the value of
+ * field_last. Initialize field_last to V4L2_FIELD_BOTTOM so that
+ * streaming starts with a V4L2_FIELD_TOP buffer.
+ */
+ btv->field_last = V4L2_FIELD_BOTTOM;
return 0;
}
@@ -2209,8 +2116,7 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
static int bttv_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct bttv_fh *fh = priv;
- struct bttv *btv = fh->btv;
+ struct bttv *btv = video_drvdata(file);
if (0 == v4l2)
return -EINVAL;
@@ -2257,73 +2163,10 @@ static int bttv_enum_fmt_vid_cap(struct file *file, void *priv,
return 0;
}
-static int bttv_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *p)
-{
- struct bttv_fh *fh = priv;
- return videobuf_reqbufs(bttv_queue(fh), p);
-}
-
-static int bttv_querybuf(struct file *file, void *priv,
- struct v4l2_buffer *b)
-{
- struct bttv_fh *fh = priv;
- return videobuf_querybuf(bttv_queue(fh), b);
-}
-
-static int bttv_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
-{
- struct bttv_fh *fh = priv;
- struct bttv *btv = fh->btv;
- int res = bttv_resource(fh);
-
- if (!check_alloc_btres_lock(btv, fh, res))
- return -EBUSY;
-
- return videobuf_qbuf(bttv_queue(fh), b);
-}
-
-static int bttv_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
-{
- struct bttv_fh *fh = priv;
- return videobuf_dqbuf(bttv_queue(fh), b,
- file->f_flags & O_NONBLOCK);
-}
-
-static int bttv_streamon(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct bttv_fh *fh = priv;
- struct bttv *btv = fh->btv;
- int res = bttv_resource(fh);
-
- if (!check_alloc_btres_lock(btv, fh, res))
- return -EBUSY;
- return videobuf_streamon(bttv_queue(fh));
-}
-
-
-static int bttv_streamoff(struct file *file, void *priv,
- enum v4l2_buf_type type)
-{
- struct bttv_fh *fh = priv;
- struct bttv *btv = fh->btv;
- int retval;
- int res = bttv_resource(fh);
-
-
- retval = videobuf_streamoff(bttv_queue(fh));
- if (retval < 0)
- return retval;
- free_btres_lock(btv, fh, res);
- return 0;
-}
-
static int bttv_g_parm(struct file *file, void *f,
struct v4l2_streamparm *parm)
{
- struct bttv_fh *fh = f;
- struct bttv *btv = fh->btv;
+ struct bttv *btv = video_drvdata(file);
if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
@@ -2337,8 +2180,7 @@ static int bttv_g_parm(struct file *file, void *f,
static int bttv_g_tuner(struct file *file, void *priv,
struct v4l2_tuner *t)
{
- struct bttv_fh *fh = priv;
- struct bttv *btv = fh->btv;
+ struct bttv *btv = video_drvdata(file);
if (0 != t->index)
return -EINVAL;
@@ -2360,8 +2202,7 @@ static int bttv_g_tuner(struct file *file, void *priv,
static int bttv_g_pixelaspect(struct file *file, void *priv,
int type, struct v4l2_fract *f)
{
- struct bttv_fh *fh = priv;
- struct bttv *btv = fh->btv;
+ struct bttv *btv = video_drvdata(file);
if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
@@ -2373,20 +2214,14 @@ static int bttv_g_pixelaspect(struct file *file, void *priv,
static int bttv_g_selection(struct file *file, void *f, struct v4l2_selection *sel)
{
- struct bttv_fh *fh = f;
- struct bttv *btv = fh->btv;
+ struct bttv *btv = video_drvdata(file);
if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
switch (sel->target) {
case V4L2_SEL_TGT_CROP:
- /*
- * No fh->do_crop = 1; because btv->crop[1] may be
- * inconsistent with fh->width or fh->height and apps
- * do not expect a change here.
- */
- sel->r = btv->crop[!!fh->do_crop].rect;
+ sel->r = btv->crop[!!btv->do_crop].rect;
break;
case V4L2_SEL_TGT_CROP_DEFAULT:
sel->r = bttv_tvnorms[btv->tvnorm].cropcap.defrect;
@@ -2403,8 +2238,7 @@ static int bttv_g_selection(struct file *file, void *f, struct v4l2_selection *s
static int bttv_s_selection(struct file *file, void *f, struct v4l2_selection *sel)
{
- struct bttv_fh *fh = f;
- struct bttv *btv = fh->btv;
+ struct bttv *btv = video_drvdata(file);
const struct v4l2_rect *b;
int retval;
struct bttv_crop c;
@@ -2424,9 +2258,8 @@ static int bttv_s_selection(struct file *file, void *f, struct v4l2_selection *s
read() may change vbi_end in check_alloc_btres_lock(). */
retval = -EBUSY;
- if (locked_btres(fh->btv, VIDEO_RESOURCES)) {
+ if (locked_btres(btv, VIDEO_RESOURCES))
return retval;
- }
b = &bttv_tvnorms[btv->tvnorm].cropcap.bounds;
@@ -2460,249 +2293,30 @@ static int bttv_s_selection(struct file *file, void *f, struct v4l2_selection *s
btv->crop[1] = c;
- fh->do_crop = 1;
-
- if (fh->width < c.min_scaled_width) {
- fh->width = c.min_scaled_width;
- btv->init.width = c.min_scaled_width;
- } else if (fh->width > c.max_scaled_width) {
- fh->width = c.max_scaled_width;
- btv->init.width = c.max_scaled_width;
- }
-
- if (fh->height < c.min_scaled_height) {
- fh->height = c.min_scaled_height;
- btv->init.height = c.min_scaled_height;
- } else if (fh->height > c.max_scaled_height) {
- fh->height = c.max_scaled_height;
- btv->init.height = c.max_scaled_height;
- }
-
- return 0;
-}
-
-static ssize_t bttv_read(struct file *file, char __user *data,
- size_t count, loff_t *ppos)
-{
- struct bttv_fh *fh = file->private_data;
- int retval = 0;
-
- if (fh->btv->errors)
- bttv_reinit_bt848(fh->btv);
- dprintk("%d: read count=%d type=%s\n",
- fh->btv->c.nr, (int)count, v4l2_type_names[fh->type]);
-
- switch (fh->type) {
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- if (!check_alloc_btres_lock(fh->btv, fh, RESOURCE_VIDEO_READ)) {
- /* VIDEO_READ in use by another fh,
- or VIDEO_STREAM by any fh. */
- return -EBUSY;
- }
- retval = videobuf_read_one(&fh->cap, data, count, ppos,
- file->f_flags & O_NONBLOCK);
- free_btres_lock(fh->btv, fh, RESOURCE_VIDEO_READ);
- break;
- case V4L2_BUF_TYPE_VBI_CAPTURE:
- if (!check_alloc_btres_lock(fh->btv,fh,RESOURCE_VBI))
- return -EBUSY;
- retval = videobuf_read_stream(&fh->vbi, data, count, ppos, 1,
- file->f_flags & O_NONBLOCK);
- break;
- default:
- BUG();
- }
- return retval;
-}
-
-static __poll_t bttv_poll(struct file *file, poll_table *wait)
-{
- struct bttv_fh *fh = file->private_data;
- struct bttv_buffer *buf;
- enum v4l2_field field;
- __poll_t rc = 0;
- __poll_t req_events = poll_requested_events(wait);
-
- if (v4l2_event_pending(&fh->fh))
- rc = EPOLLPRI;
- else if (req_events & EPOLLPRI)
- poll_wait(file, &fh->fh.wait, wait);
-
- if (!(req_events & (EPOLLIN | EPOLLRDNORM)))
- return rc;
-
- if (V4L2_BUF_TYPE_VBI_CAPTURE == fh->type) {
- if (!check_alloc_btres_lock(fh->btv,fh,RESOURCE_VBI))
- return rc | EPOLLERR;
- return rc | videobuf_poll_stream(file, &fh->vbi, wait);
- }
-
- if (check_btres(fh,RESOURCE_VIDEO_STREAM)) {
- /* streaming capture */
- if (list_empty(&fh->cap.stream))
- return rc | EPOLLERR;
- buf = list_entry(fh->cap.stream.next,struct bttv_buffer,vb.stream);
- } else {
- /* read() capture */
- if (NULL == fh->cap.read_buf) {
- /* need to capture a new frame */
- if (locked_btres(fh->btv,RESOURCE_VIDEO_STREAM))
- return rc | EPOLLERR;
- fh->cap.read_buf = videobuf_sg_alloc(fh->cap.msize);
- if (NULL == fh->cap.read_buf)
- return rc | EPOLLERR;
- fh->cap.read_buf->memory = V4L2_MEMORY_USERPTR;
- field = videobuf_next_field(&fh->cap);
- if (0 != fh->cap.ops->buf_prepare(&fh->cap,fh->cap.read_buf,field)) {
- kfree (fh->cap.read_buf);
- fh->cap.read_buf = NULL;
- return rc | EPOLLERR;
- }
- fh->cap.ops->buf_queue(&fh->cap,fh->cap.read_buf);
- fh->cap.read_off = 0;
- }
- buf = (struct bttv_buffer*)fh->cap.read_buf;
- }
-
- poll_wait(file, &buf->vb.done, wait);
- if (buf->vb.state == VIDEOBUF_DONE ||
- buf->vb.state == VIDEOBUF_ERROR)
- rc = rc | EPOLLIN|EPOLLRDNORM;
- return rc;
-}
-
-static int bttv_open(struct file *file)
-{
- struct video_device *vdev = video_devdata(file);
- struct bttv *btv = video_drvdata(file);
- struct bttv_fh *fh;
- enum v4l2_buf_type type = 0;
-
- dprintk("open dev=%s\n", video_device_node_name(vdev));
-
- if (vdev->vfl_type == VFL_TYPE_VIDEO) {
- type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- } else if (vdev->vfl_type == VFL_TYPE_VBI) {
- type = V4L2_BUF_TYPE_VBI_CAPTURE;
- } else {
- WARN_ON(1);
- return -ENODEV;
- }
-
- dprintk("%d: open called (type=%s)\n",
- btv->c.nr, v4l2_type_names[type]);
-
- /* allocate per filehandle data */
- fh = kmalloc(sizeof(*fh), GFP_KERNEL);
- if (unlikely(!fh))
- return -ENOMEM;
- btv->users++;
- file->private_data = fh;
-
- *fh = btv->init;
- v4l2_fh_init(&fh->fh, vdev);
-
- fh->type = type;
-
- videobuf_queue_sg_init(&fh->cap, &bttv_video_qops,
- &btv->c.pci->dev, &btv->s_lock,
- V4L2_BUF_TYPE_VIDEO_CAPTURE,
- V4L2_FIELD_INTERLACED,
- sizeof(struct bttv_buffer),
- fh, &btv->lock);
- videobuf_queue_sg_init(&fh->vbi, &bttv_vbi_qops,
- &btv->c.pci->dev, &btv->s_lock,
- V4L2_BUF_TYPE_VBI_CAPTURE,
- V4L2_FIELD_SEQ_TB,
- sizeof(struct bttv_buffer),
- fh, &btv->lock);
- set_tvnorm(btv,btv->tvnorm);
- set_input(btv, btv->input, btv->tvnorm);
- audio_mute(btv, btv->mute);
-
- /* The V4L2 spec requires one global set of cropping parameters
- which only change on request. These are stored in btv->crop[1].
- However for compatibility with V4L apps and cropping unaware
- V4L2 apps we now reset the cropping parameters as seen through
- this fh, which is to say VIDIOC_G_SELECTION and scaling limit checks
- will use btv->crop[0], the default cropping parameters for the
- current video standard, and VIDIOC_S_FMT will not implicitly
- change the cropping parameters until VIDIOC_S_SELECTION has been
- called. */
- fh->do_crop = !reset_crop; /* module parameter */
-
- /* Likewise there should be one global set of VBI capture
- parameters, but for compatibility with V4L apps and earlier
- driver versions each fh has its own parameters. */
- bttv_vbi_fmt_reset(&fh->vbi_fmt, btv->tvnorm);
-
- bttv_field_count(btv);
- v4l2_fh_add(&fh->fh);
- return 0;
-}
-
-static int bttv_release(struct file *file)
-{
- struct bttv_fh *fh = file->private_data;
- struct bttv *btv = fh->btv;
-
- /* stop video capture */
- if (check_btres(fh, RESOURCE_VIDEO_STREAM)) {
- videobuf_streamoff(&fh->cap);
- free_btres_lock(btv,fh,RESOURCE_VIDEO_STREAM);
- }
- if (fh->cap.read_buf) {
- buffer_release(&fh->cap,fh->cap.read_buf);
- kfree(fh->cap.read_buf);
- }
- if (check_btres(fh, RESOURCE_VIDEO_READ)) {
- free_btres_lock(btv, fh, RESOURCE_VIDEO_READ);
- }
-
- /* stop vbi capture */
- if (check_btres(fh, RESOURCE_VBI)) {
- videobuf_stop(&fh->vbi);
- free_btres_lock(btv,fh,RESOURCE_VBI);
- }
-
- /* free stuff */
+ btv->do_crop = 1;
- videobuf_mmap_free(&fh->cap);
- videobuf_mmap_free(&fh->vbi);
- file->private_data = NULL;
+ if (btv->width < c.min_scaled_width)
+ btv->width = c.min_scaled_width;
+ else if (btv->width > c.max_scaled_width)
+ btv->width = c.max_scaled_width;
- btv->users--;
- bttv_field_count(btv);
+ if (btv->height < c.min_scaled_height)
+ btv->height = c.min_scaled_height;
+ else if (btv->height > c.max_scaled_height)
+ btv->height = c.max_scaled_height;
- if (!btv->users)
- audio_mute(btv, btv->mute);
-
- v4l2_fh_del(&fh->fh);
- v4l2_fh_exit(&fh->fh);
- kfree(fh);
return 0;
}
-static int
-bttv_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct bttv_fh *fh = file->private_data;
-
- dprintk("%d: mmap type=%s 0x%lx+%ld\n",
- fh->btv->c.nr, v4l2_type_names[fh->type],
- vma->vm_start, vma->vm_end - vma->vm_start);
- return videobuf_mmap_mapper(bttv_queue(fh),vma);
-}
-
static const struct v4l2_file_operations bttv_fops =
{
.owner = THIS_MODULE,
- .open = bttv_open,
- .release = bttv_release,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
.unlocked_ioctl = video_ioctl2,
- .read = bttv_read,
- .mmap = bttv_mmap,
- .poll = bttv_poll,
+ .read = vb2_fop_read,
+ .mmap = vb2_fop_mmap,
+ .poll = vb2_fop_poll,
};
static const struct v4l2_ioctl_ops bttv_ioctl_ops = {
@@ -2715,17 +2329,18 @@ static const struct v4l2_ioctl_ops bttv_ioctl_ops = {
.vidioc_try_fmt_vbi_cap = bttv_try_fmt_vbi_cap,
.vidioc_s_fmt_vbi_cap = bttv_s_fmt_vbi_cap,
.vidioc_g_pixelaspect = bttv_g_pixelaspect,
- .vidioc_reqbufs = bttv_reqbufs,
- .vidioc_querybuf = bttv_querybuf,
- .vidioc_qbuf = bttv_qbuf,
- .vidioc_dqbuf = bttv_dqbuf,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_s_std = bttv_s_std,
.vidioc_g_std = bttv_g_std,
.vidioc_enum_input = bttv_enum_input,
.vidioc_g_input = bttv_g_input,
.vidioc_s_input = bttv_s_input,
- .vidioc_streamon = bttv_streamon,
- .vidioc_streamoff = bttv_streamoff,
.vidioc_g_tuner = bttv_g_tuner,
.vidioc_s_tuner = bttv_s_tuner,
.vidioc_g_selection = bttv_g_selection,
@@ -2756,52 +2371,40 @@ static int radio_open(struct file *file)
{
struct video_device *vdev = video_devdata(file);
struct bttv *btv = video_drvdata(file);
- struct bttv_fh *fh;
+ int ret = v4l2_fh_open(file);
- dprintk("open dev=%s\n", video_device_node_name(vdev));
+ if (ret)
+ return ret;
+ dprintk("open dev=%s\n", video_device_node_name(vdev));
dprintk("%d: open called (radio)\n", btv->c.nr);
- /* allocate per filehandle data */
- fh = kmalloc(sizeof(*fh), GFP_KERNEL);
- if (unlikely(!fh))
- return -ENOMEM;
- file->private_data = fh;
- *fh = btv->init;
- v4l2_fh_init(&fh->fh, vdev);
-
btv->radio_user++;
audio_mute(btv, btv->mute);
- v4l2_fh_add(&fh->fh);
-
return 0;
}
static int radio_release(struct file *file)
{
- struct bttv_fh *fh = file->private_data;
- struct bttv *btv = fh->btv;
+ struct bttv *btv = video_drvdata(file);
struct saa6588_command cmd;
- file->private_data = NULL;
- v4l2_fh_del(&fh->fh);
- v4l2_fh_exit(&fh->fh);
- kfree(fh);
-
btv->radio_user--;
bttv_call_all(btv, core, command, SAA6588_CMD_CLOSE, &cmd);
if (btv->radio_user == 0)
btv->has_radio_tuner = 0;
+
+ v4l2_fh_release(file);
+
return 0;
}
static int radio_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
{
- struct bttv_fh *fh = priv;
- struct bttv *btv = fh->btv;
+ struct bttv *btv = video_drvdata(file);
if (0 != t->index)
return -EINVAL;
@@ -2823,8 +2426,7 @@ static int radio_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
static int radio_s_tuner(struct file *file, void *priv,
const struct v4l2_tuner *t)
{
- struct bttv_fh *fh = priv;
- struct bttv *btv = fh->btv;
+ struct bttv *btv = video_drvdata(file);
if (0 != t->index)
return -EINVAL;
@@ -2837,8 +2439,7 @@ static int radio_s_tuner(struct file *file, void *priv,
static int radio_s_hw_freq_seek(struct file *file, void *priv,
const struct v4l2_hw_freq_seek *a)
{
- struct bttv_fh *fh = priv;
- struct bttv *btv = fh->btv;
+ struct bttv *btv = video_drvdata(file);
if (btv->has_tea575x)
return snd_tea575x_s_hw_freq_seek(file, &btv->tea, a);
@@ -2849,8 +2450,7 @@ static int radio_s_hw_freq_seek(struct file *file, void *priv,
static int radio_enum_freq_bands(struct file *file, void *priv,
struct v4l2_frequency_band *band)
{
- struct bttv_fh *fh = priv;
- struct bttv *btv = fh->btv;
+ struct bttv *btv = video_drvdata(file);
if (btv->has_tea575x)
return snd_tea575x_enum_freq_bands(&btv->tea, band);
@@ -2861,8 +2461,7 @@ static int radio_enum_freq_bands(struct file *file, void *priv,
static ssize_t radio_read(struct file *file, char __user *data,
size_t count, loff_t *ppos)
{
- struct bttv_fh *fh = file->private_data;
- struct bttv *btv = fh->btv;
+ struct bttv *btv = video_drvdata(file);
struct saa6588_command cmd;
cmd.block_count = count / 3;
@@ -2879,23 +2478,17 @@ static ssize_t radio_read(struct file *file, char __user *data,
static __poll_t radio_poll(struct file *file, poll_table *wait)
{
- struct bttv_fh *fh = file->private_data;
- struct bttv *btv = fh->btv;
- __poll_t req_events = poll_requested_events(wait);
+ struct bttv *btv = video_drvdata(file);
struct saa6588_command cmd;
- __poll_t res = 0;
+ __poll_t rc = v4l2_ctrl_poll(file, wait);
- if (v4l2_event_pending(&fh->fh))
- res = EPOLLPRI;
- else if (req_events & EPOLLPRI)
- poll_wait(file, &fh->fh.wait, wait);
radio_enable(btv);
cmd.instance = file;
cmd.event_list = wait;
- cmd.poll_mask = res;
+ cmd.poll_mask = 0;
bttv_call_all(btv, core, command, SAA6588_CMD_POLL, &cmd);
- return cmd.poll_mask;
+ return rc | cmd.poll_mask;
}
static const struct v4l2_file_operations radio_fops =
@@ -3070,17 +2663,19 @@ bttv_irq_next_video(struct bttv *btv, struct bttv_buffer_set *set)
/* capture request ? */
if (!list_empty(&btv->capture)) {
- set->frame_irq = 1;
- item = list_entry(btv->capture.next, struct bttv_buffer, vb.queue);
- if (V4L2_FIELD_HAS_TOP(item->vb.field))
+ set->frame_irq = BT848_RISC_VIDEO;
+ item = list_entry(btv->capture.next, struct bttv_buffer, list);
+
+ if (V4L2_FIELD_HAS_TOP(item->vbuf.field))
set->top = item;
- if (V4L2_FIELD_HAS_BOTTOM(item->vb.field))
+ if (V4L2_FIELD_HAS_BOTTOM(item->vbuf.field))
set->bottom = item;
/* capture request for other field ? */
- if (!V4L2_FIELD_HAS_BOTH(item->vb.field) &&
- (item->vb.queue.next != &btv->capture)) {
- item = list_entry(item->vb.queue.next, struct bttv_buffer, vb.queue);
+ if (!V4L2_FIELD_HAS_BOTH(item->vbuf.field) &&
+ item->list.next != &btv->capture) {
+ item = list_entry(item->list.next,
+ struct bttv_buffer, list);
/* Mike Isely <isely@pobox.com> - Only check
* and set up the bottom field in the logic
* below. Don't ever do the top field. This
@@ -3108,13 +2703,18 @@ bttv_irq_next_video(struct bttv *btv, struct bttv_buffer_set *set)
* sync within a single frame time. (Out of
* order fields can screw up deinterlacing
* algorithms.) */
- if (!V4L2_FIELD_HAS_BOTH(item->vb.field)) {
- if (NULL == set->bottom &&
- V4L2_FIELD_BOTTOM == item->vb.field) {
+ if (!V4L2_FIELD_HAS_BOTH(item->vbuf.field)) {
+ if (!set->bottom &&
+ item->vbuf.field == V4L2_FIELD_BOTTOM)
set->bottom = item;
+ if (set->top && set->bottom) {
+ /*
+ * The buffer set has a top buffer and
+ * a bottom buffer and they are not
+ * copies of each other.
+ */
+ set->top_irq = BT848_RISC_TOP;
}
- if (NULL != set->top && NULL != set->bottom)
- set->top_irq = 2;
}
}
}
@@ -3136,44 +2736,47 @@ bttv_irq_wakeup_video(struct bttv *btv, struct bttv_buffer_set *wakeup,
if (irq_debug > 1)
pr_debug("%d: wakeup: both=%p\n",
btv->c.nr, wakeup->top);
- wakeup->top->vb.ts = ts;
- wakeup->top->vb.field_count = btv->field_count;
- wakeup->top->vb.state = state;
- wake_up(&wakeup->top->vb.done);
+ wakeup->top->vbuf.vb2_buf.timestamp = ts;
+ wakeup->top->vbuf.sequence = btv->field_count >> 1;
+ vb2_buffer_done(&wakeup->top->vbuf.vb2_buf, state);
+ if (btv->field_count == 0)
+ btor(BT848_INT_VSYNC, BT848_INT_MASK);
}
} else {
if (NULL != wakeup->top && curr->top != wakeup->top) {
if (irq_debug > 1)
pr_debug("%d: wakeup: top=%p\n",
btv->c.nr, wakeup->top);
- wakeup->top->vb.ts = ts;
- wakeup->top->vb.field_count = btv->field_count;
- wakeup->top->vb.state = state;
- wake_up(&wakeup->top->vb.done);
+ wakeup->top->vbuf.vb2_buf.timestamp = ts;
+ wakeup->top->vbuf.sequence = btv->field_count >> 1;
+ vb2_buffer_done(&wakeup->top->vbuf.vb2_buf, state);
+ if (btv->field_count == 0)
+ btor(BT848_INT_VSYNC, BT848_INT_MASK);
}
if (NULL != wakeup->bottom && curr->bottom != wakeup->bottom) {
if (irq_debug > 1)
pr_debug("%d: wakeup: bottom=%p\n",
btv->c.nr, wakeup->bottom);
- wakeup->bottom->vb.ts = ts;
- wakeup->bottom->vb.field_count = btv->field_count;
- wakeup->bottom->vb.state = state;
- wake_up(&wakeup->bottom->vb.done);
+ wakeup->bottom->vbuf.vb2_buf.timestamp = ts;
+ wakeup->bottom->vbuf.sequence = btv->field_count >> 1;
+ vb2_buffer_done(&wakeup->bottom->vbuf.vb2_buf, state);
+ if (btv->field_count == 0)
+ btor(BT848_INT_VSYNC, BT848_INT_MASK);
}
}
}
static void
bttv_irq_wakeup_vbi(struct bttv *btv, struct bttv_buffer *wakeup,
- unsigned int state)
+ unsigned int state)
{
if (NULL == wakeup)
return;
-
- wakeup->vb.ts = ktime_get_ns();
- wakeup->vb.field_count = btv->field_count;
- wakeup->vb.state = state;
- wake_up(&wakeup->vb.done);
+ wakeup->vbuf.vb2_buf.timestamp = ktime_get_ns();
+ wakeup->vbuf.sequence = btv->field_count >> 1;
+ vb2_buffer_done(&wakeup->vbuf.vb2_buf, state);
+ if (btv->field_count == 0)
+ btor(BT848_INT_VSYNC, BT848_INT_MASK);
}
static void bttv_irq_timeout(struct timer_list *t)
@@ -3183,6 +2786,7 @@ static void bttv_irq_timeout(struct timer_list *t)
struct bttv_buffer *ovbi;
struct bttv_buffer *item;
unsigned long flags;
+ int seqnr = 0;
if (bttv_verbose) {
pr_info("%d: timeout: drop=%d irq=%d/%d, risc=%08x, ",
@@ -3206,21 +2810,25 @@ static void bttv_irq_timeout(struct timer_list *t)
bttv_set_dma(btv, 0);
/* wake up */
- bttv_irq_wakeup_video(btv, &old, &new, VIDEOBUF_ERROR);
- bttv_irq_wakeup_vbi(btv, ovbi, VIDEOBUF_ERROR);
+ bttv_irq_wakeup_video(btv, &old, &new, VB2_BUF_STATE_DONE);
+ bttv_irq_wakeup_vbi(btv, ovbi, VB2_BUF_STATE_DONE);
/* cancel all outstanding capture / vbi requests */
+ if (btv->field_count)
+ seqnr++;
while (!list_empty(&btv->capture)) {
- item = list_entry(btv->capture.next, struct bttv_buffer, vb.queue);
- list_del(&item->vb.queue);
- item->vb.state = VIDEOBUF_ERROR;
- wake_up(&item->vb.done);
+ item = list_entry(btv->capture.next, struct bttv_buffer, list);
+ list_del(&item->list);
+ item->vbuf.vb2_buf.timestamp = ktime_get_ns();
+ item->vbuf.sequence = (btv->field_count >> 1) + seqnr++;
+ vb2_buffer_done(&item->vbuf.vb2_buf, VB2_BUF_STATE_ERROR);
}
while (!list_empty(&btv->vcapture)) {
- item = list_entry(btv->vcapture.next, struct bttv_buffer, vb.queue);
- list_del(&item->vb.queue);
- item->vb.state = VIDEOBUF_ERROR;
- wake_up(&item->vb.done);
+ item = list_entry(btv->vcapture.next, struct bttv_buffer, list);
+ list_del(&item->list);
+ item->vbuf.vb2_buf.timestamp = ktime_get_ns();
+ item->vbuf.sequence = (btv->field_count >> 1) + seqnr++;
+ vb2_buffer_done(&item->vbuf.vb2_buf, VB2_BUF_STATE_ERROR);
}
btv->errors++;
@@ -3239,11 +2847,11 @@ bttv_irq_wakeup_top(struct bttv *btv)
btv->curr.top_irq = 0;
btv->curr.top = NULL;
bttv_risc_hook(btv, RISC_SLOT_O_FIELD, NULL, 0);
-
- wakeup->vb.ts = ktime_get_ns();
- wakeup->vb.field_count = btv->field_count;
- wakeup->vb.state = VIDEOBUF_DONE;
- wake_up(&wakeup->vb.done);
+ wakeup->vbuf.vb2_buf.timestamp = ktime_get_ns();
+ wakeup->vbuf.sequence = btv->field_count >> 1;
+ vb2_buffer_done(&wakeup->vbuf.vb2_buf, VB2_BUF_STATE_DONE);
+ if (btv->field_count == 0)
+ btor(BT848_INT_VSYNC, BT848_INT_MASK);
spin_unlock(&btv->s_lock);
}
@@ -3280,7 +2888,7 @@ bttv_irq_switch_video(struct bttv *btv)
/* switch over */
old = btv->curr;
btv->curr = new;
- btv->loop_irq &= ~1;
+ btv->loop_irq &= ~BT848_RISC_VIDEO;
bttv_buffer_activate_video(btv, &new);
bttv_set_dma(btv, 0);
@@ -3291,7 +2899,7 @@ bttv_irq_switch_video(struct bttv *btv)
}
/* wake up finished buffers */
- bttv_irq_wakeup_video(btv, &old, &new, VIDEOBUF_DONE);
+ bttv_irq_wakeup_video(btv, &old, &new, VB2_BUF_STATE_DONE);
spin_unlock(&btv->s_lock);
}
@@ -3305,7 +2913,7 @@ bttv_irq_switch_vbi(struct bttv *btv)
spin_lock(&btv->s_lock);
if (!list_empty(&btv->vcapture))
- new = list_entry(btv->vcapture.next, struct bttv_buffer, vb.queue);
+ new = list_entry(btv->vcapture.next, struct bttv_buffer, list);
old = btv->cvbi;
rc = btread(BT848_RISC_COUNT);
@@ -3320,11 +2928,11 @@ bttv_irq_switch_vbi(struct bttv *btv)
/* switch */
btv->cvbi = new;
- btv->loop_irq &= ~4;
+ btv->loop_irq &= ~BT848_RISC_VBI;
bttv_buffer_activate_vbi(btv, new);
bttv_set_dma(btv, 0);
- bttv_irq_wakeup_vbi(btv, old, VIDEOBUF_DONE);
+ bttv_irq_wakeup_vbi(btv, old, VB2_BUF_STATE_DONE);
spin_unlock(&btv->s_lock);
}
@@ -3383,13 +2991,13 @@ static irqreturn_t bttv_irq(int irq, void *dev_id)
wake_up(&btv->i2c_queue);
}
- if ((astat & BT848_INT_RISCI) && (stat & (4<<28)))
+ if ((astat & BT848_INT_RISCI) && (stat & BT848_INT_RISCS_VBI))
bttv_irq_switch_vbi(btv);
- if ((astat & BT848_INT_RISCI) && (stat & (2<<28)))
+ if ((astat & BT848_INT_RISCI) && (stat & BT848_INT_RISCS_TOP))
bttv_irq_wakeup_top(btv);
- if ((astat & BT848_INT_RISCI) && (stat & (1<<28)))
+ if ((astat & BT848_INT_RISCI) && (stat & BT848_INT_RISCS_VIDEO))
bttv_irq_switch_video(btv);
if ((astat & BT848_INT_HLOCK) && btv->opt_automute)
@@ -3445,11 +3053,12 @@ static irqreturn_t bttv_irq(int irq, void *dev_id)
/* ----------------------------------------------------------------------- */
/* initialization */
-static void vdev_init(struct bttv *btv,
- struct video_device *vfd,
- const struct video_device *template,
- const char *type_name)
+static int vdev_init(struct bttv *btv, struct video_device *vfd,
+ const struct video_device *template,
+ const char *type_name)
{
+ int err;
+ struct vb2_queue *q;
*vfd = *template;
vfd->v4l2_dev = &btv->c.v4l2_dev;
vfd->release = video_device_release_empty;
@@ -3463,6 +3072,36 @@ static void vdev_init(struct bttv *btv,
v4l2_disable_ioctl(vfd, VIDIOC_G_TUNER);
v4l2_disable_ioctl(vfd, VIDIOC_S_TUNER);
}
+
+ if (strcmp(type_name, "radio") == 0)
+ return 0;
+
+ if (strcmp(type_name, "video") == 0) {
+ q = &btv->capq;
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ q->ops = &bttv_video_qops;
+ } else if (strcmp(type_name, "vbi") == 0) {
+ q = &btv->vbiq;
+ q->type = V4L2_BUF_TYPE_VBI_CAPTURE;
+ q->ops = &bttv_vbi_qops;
+ } else {
+ return -EINVAL;
+ }
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ | VB2_DMABUF;
+ q->mem_ops = &vb2_dma_sg_memops;
+ q->drv_priv = btv;
+ q->gfp_flags = __GFP_DMA32;
+ q->buf_struct_size = sizeof(struct bttv_buffer);
+ q->lock = &btv->lock;
+ q->min_buffers_needed = 2;
+ q->dev = &btv->c.pci->dev;
+ err = vb2_queue_init(q);
+ if (err)
+ return err;
+ vfd->queue = q;
+
+ return 0;
}
static void bttv_unregister_video(struct bttv *btv)
@@ -3670,11 +3309,16 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
bttv_ctrl_coring.def = coring;
/* fill struct bttv with some useful defaults */
- btv->init.btv = btv;
- btv->init.fmt = format_by_fourcc(V4L2_PIX_FMT_BGR24);
- btv->init.width = 320;
- btv->init.height = 240;
+ btv->fmt = format_by_fourcc(V4L2_PIX_FMT_BGR24);
+ btv->width = 320;
+ btv->height = 240;
+ btv->field = V4L2_FIELD_INTERLACED;
btv->input = 0;
+ btv->tvnorm = 0; /* Index into bttv_tvnorms[] i.e. PAL. */
+ bttv_vbi_fmt_reset(&btv->vbi_fmt, btv->tvnorm);
+ btv->vbi_count[0] = VBI_DEFLINES;
+ btv->vbi_count[1] = VBI_DEFLINES;
+ btv->do_crop = 0;
v4l2_ctrl_new_std(hdl, &bttv_ctrl_ops,
V4L2_CID_BRIGHTNESS, 0, 0xff00, 0x100, 32768);
@@ -3749,7 +3393,7 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
result = btv->radio_ctrl_handler.error;
goto fail2;
}
- set_input(btv, 0, btv->tvnorm);
+ set_input(btv, btv->input, btv->tvnorm);
bttv_crop_reset(&btv->crop[0], btv->tvnorm);
btv->crop[1] = btv->crop[0]; /* current = default */
disclaim_vbi_lines(btv);
diff --git a/drivers/media/pci/bt8xx/bttv-risc.c b/drivers/media/pci/bt8xx/bttv-risc.c
index 4fa4b9da9634..436baf6c8b08 100644
--- a/drivers/media/pci/bt8xx/bttv-risc.c
+++ b/drivers/media/pci/bt8xx/bttv-risc.c
@@ -67,8 +67,10 @@ bttv_risc_packed(struct bttv *btv, struct btcx_riscmem *risc,
/* scan lines */
sg = sglist;
for (line = 0; line < store_lines; line++) {
- if ((btv->opt_vcr_hack) &&
- (line >= (store_lines - VCR_HACK_LINES)))
+ if ((line >= (store_lines - VCR_HACK_LINES)) &&
+ (btv->opt_vcr_hack ||
+ (V4L2_FIELD_HAS_BOTH(btv->field) ||
+ btv->field == V4L2_FIELD_ALTERNATE)))
continue;
while (offset && offset >= sg_dma_len(sg)) {
offset -= sg_dma_len(sg);
@@ -106,7 +108,7 @@ bttv_risc_packed(struct bttv *btv, struct btcx_riscmem *risc,
/* save pointer to jmp instruction address */
risc->jmp = rp;
- BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
+ WARN_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
return 0;
}
@@ -227,7 +229,7 @@ bttv_risc_planar(struct bttv *btv, struct btcx_riscmem *risc,
/* save pointer to jmp instruction address */
risc->jmp = rp;
- BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
+ WARN_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
return 0;
}
@@ -360,21 +362,75 @@ bttv_apply_geo(struct bttv *btv, struct bttv_geometry *geo, int odd)
/* ---------------------------------------------------------- */
/* risc group / risc main loop / dma management */
-void
-bttv_set_dma(struct bttv *btv, int override)
+static void bttv_set_risc_status(struct bttv *btv)
{
- unsigned long cmd;
- int capctl;
+ unsigned long cmd = BT848_RISC_JUMP;
+ if (btv->loop_irq) {
+ cmd |= BT848_RISC_IRQ;
+ cmd |= (btv->loop_irq & 0x0f) << 16;
+ cmd |= (~btv->loop_irq & 0x0f) << 20;
+ }
+ btv->main.cpu[RISC_SLOT_LOOP] = cpu_to_le32(cmd);
+}
+
+static void bttv_set_irq_timer(struct bttv *btv)
+{
+ if (btv->curr.frame_irq || btv->loop_irq || btv->cvbi)
+ mod_timer(&btv->timeout, jiffies + BTTV_TIMEOUT);
+ else
+ del_timer(&btv->timeout);
+}
+
+static int bttv_set_capture_control(struct bttv *btv, int start_capture)
+{
+ int capctl = 0;
+
+ if (btv->curr.top || btv->curr.bottom)
+ capctl = BT848_CAP_CTL_CAPTURE_ODD |
+ BT848_CAP_CTL_CAPTURE_EVEN;
+
+ if (btv->cvbi)
+ capctl |= BT848_CAP_CTL_CAPTURE_VBI_ODD |
+ BT848_CAP_CTL_CAPTURE_VBI_EVEN;
+
+ capctl |= start_capture;
+
+ btaor(capctl, ~0x0f, BT848_CAP_CTL);
+
+ return capctl;
+}
+
+static void bttv_start_dma(struct bttv *btv)
+{
+ if (btv->dma_on)
+ return;
+ btwrite(btv->main.dma, BT848_RISC_STRT_ADD);
+ btor(BT848_GPIO_DMA_CTL_RISC_ENABLE | BT848_GPIO_DMA_CTL_FIFO_ENABLE,
+ BT848_GPIO_DMA_CTL);
+ btv->dma_on = 1;
+}
+
+static void bttv_stop_dma(struct bttv *btv)
+{
+ if (!btv->dma_on)
+ return;
+ btand(~(BT848_GPIO_DMA_CTL_RISC_ENABLE |
+ BT848_GPIO_DMA_CTL_FIFO_ENABLE), BT848_GPIO_DMA_CTL);
+ btv->dma_on = 0;
+}
- btv->cap_ctl = 0;
- if (NULL != btv->curr.top) btv->cap_ctl |= 0x02;
- if (NULL != btv->curr.bottom) btv->cap_ctl |= 0x01;
- if (NULL != btv->cvbi) btv->cap_ctl |= 0x0c;
+void bttv_set_dma(struct bttv *btv, int start_capture)
+{
+ int capctl = 0;
+
+ bttv_set_risc_status(btv);
+ bttv_set_irq_timer(btv);
+ capctl = bttv_set_capture_control(btv, start_capture);
- capctl = 0;
- capctl |= (btv->cap_ctl & 0x03) ? 0x03 : 0x00; /* capture */
- capctl |= (btv->cap_ctl & 0x0c) ? 0x0c : 0x00; /* vbi data */
- capctl |= override;
+ if (capctl)
+ bttv_start_dma(btv);
+ else
+ bttv_stop_dma(btv);
d2printk("%d: capctl=%x lirq=%d top=%08llx/%08llx even=%08llx/%08llx\n",
btv->c.nr,capctl,btv->loop_irq,
@@ -382,34 +438,6 @@ bttv_set_dma(struct bttv *btv, int override)
btv->curr.top ? (unsigned long long)btv->curr.top->top.dma : 0,
btv->cvbi ? (unsigned long long)btv->cvbi->bottom.dma : 0,
btv->curr.bottom ? (unsigned long long)btv->curr.bottom->bottom.dma : 0);
-
- cmd = BT848_RISC_JUMP;
- if (btv->loop_irq) {
- cmd |= BT848_RISC_IRQ;
- cmd |= (btv->loop_irq & 0x0f) << 16;
- cmd |= (~btv->loop_irq & 0x0f) << 20;
- }
- if (btv->curr.frame_irq || btv->loop_irq || btv->cvbi) {
- mod_timer(&btv->timeout, jiffies+BTTV_TIMEOUT);
- } else {
- del_timer(&btv->timeout);
- }
- btv->main.cpu[RISC_SLOT_LOOP] = cpu_to_le32(cmd);
-
- btaor(capctl, ~0x0f, BT848_CAP_CTL);
- if (capctl) {
- if (btv->dma_on)
- return;
- btwrite(btv->main.dma, BT848_RISC_STRT_ADD);
- btor(3, BT848_GPIO_DMA_CTL);
- btv->dma_on = 1;
- } else {
- if (!btv->dma_on)
- return;
- btand(~3, BT848_GPIO_DMA_CTL);
- btv->dma_on = 0;
- }
- return;
}
int
@@ -478,17 +506,50 @@ bttv_risc_hook(struct bttv *btv, int slot, struct btcx_riscmem *risc,
return 0;
}
-void
-bttv_dma_free(struct videobuf_queue *q,struct bttv *btv, struct bttv_buffer *buf)
+int bttv_buffer_risc_vbi(struct bttv *btv, struct bttv_buffer *buf)
{
- struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb);
-
- videobuf_waiton(q, &buf->vb, 0, 0);
- videobuf_dma_unmap(q->dev, dma);
- videobuf_dma_free(dma);
- btcx_riscmem_free(btv->c.pci,&buf->bottom);
- btcx_riscmem_free(btv->c.pci,&buf->top);
- buf->vb.state = VIDEOBUF_NEEDS_INIT;
+ int r = 0;
+ unsigned int offset;
+ unsigned int bpl = 2044; /* max. vbipack */
+ unsigned int padding = VBI_BPL - bpl;
+ unsigned int skip_lines0 = 0;
+ unsigned int skip_lines1 = 0;
+ unsigned int min_vdelay = MIN_VDELAY;
+
+ const struct bttv_tvnorm *tvnorm = btv->vbi_fmt.tvnorm;
+ struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vbuf.vb2_buf, 0);
+ struct scatterlist *list = sgt->sgl;
+
+ if (btv->vbi_fmt.fmt.count[0] > 0)
+ skip_lines0 = max(0, (btv->vbi_fmt.fmt.start[0] -
+ tvnorm->vbistart[0]));
+ if (btv->vbi_fmt.fmt.count[1] > 0)
+ skip_lines1 = max(0, (btv->vbi_fmt.fmt.start[1] -
+ tvnorm->vbistart[1]));
+
+ if (btv->vbi_fmt.fmt.count[0] > 0) {
+ r = bttv_risc_packed(btv, &buf->top, list, 0, bpl, padding,
+ skip_lines0, btv->vbi_fmt.fmt.count[0]);
+ if (r)
+ return r;
+ }
+
+ if (btv->vbi_fmt.fmt.count[1] > 0) {
+ offset = btv->vbi_fmt.fmt.count[0] * VBI_BPL;
+ r = bttv_risc_packed(btv, &buf->bottom, list, offset, bpl,
+ padding, skip_lines1,
+ btv->vbi_fmt.fmt.count[1]);
+ if (r)
+ return r;
+ }
+
+ if (btv->vbi_fmt.end >= tvnorm->cropcap.bounds.top)
+ min_vdelay += btv->vbi_fmt.end - tvnorm->cropcap.bounds.top;
+
+ /* For bttv_buffer_activate_vbi(). */
+ buf->geo.vdelay = min_vdelay;
+
+ return r;
}
int
@@ -508,8 +569,7 @@ bttv_buffer_activate_vbi(struct bttv *btv,
if (vbi) {
unsigned int crop, vdelay;
- vbi->vb.state = VIDEOBUF_ACTIVE;
- list_del(&vbi->vb.queue);
+ list_del(&vbi->list);
/* VDELAY is start of video, end of VBI capturing. */
crop = btread(BT848_E_CROP);
@@ -525,12 +585,12 @@ bttv_buffer_activate_vbi(struct bttv *btv,
btwrite(crop, BT848_O_CROP);
}
- if (vbi->vbi_count[0] > 0) {
+ if (btv->vbi_count[0] > 0) {
top = &vbi->top;
top_irq_flags = 4;
}
- if (vbi->vbi_count[1] > 0) {
+ if (btv->vbi_count[1] > 0) {
top_irq_flags = 0;
bottom = &vbi->bottom;
bottom_irq_flags = 4;
@@ -550,16 +610,13 @@ bttv_buffer_activate_video(struct bttv *btv,
/* video capture */
if (NULL != set->top && NULL != set->bottom) {
if (set->top == set->bottom) {
- set->top->vb.state = VIDEOBUF_ACTIVE;
- if (set->top->vb.queue.next)
- list_del(&set->top->vb.queue);
+ if (set->top->list.next)
+ list_del(&set->top->list);
} else {
- set->top->vb.state = VIDEOBUF_ACTIVE;
- set->bottom->vb.state = VIDEOBUF_ACTIVE;
- if (set->top->vb.queue.next)
- list_del(&set->top->vb.queue);
- if (set->bottom->vb.queue.next)
- list_del(&set->bottom->vb.queue);
+ if (set->top->list.next)
+ list_del(&set->top->list);
+ if (set->bottom->list.next)
+ list_del(&set->bottom->list);
}
bttv_apply_geo(btv, &set->top->geo, 1);
bttv_apply_geo(btv, &set->bottom->geo,0);
@@ -572,9 +629,8 @@ bttv_buffer_activate_video(struct bttv *btv,
btaor((set->top->btswap & 0x0a) | (set->bottom->btswap & 0x05),
~0x0f, BT848_COLOR_CTL);
} else if (NULL != set->top) {
- set->top->vb.state = VIDEOBUF_ACTIVE;
- if (set->top->vb.queue.next)
- list_del(&set->top->vb.queue);
+ if (set->top->list.next)
+ list_del(&set->top->list);
bttv_apply_geo(btv, &set->top->geo,1);
bttv_apply_geo(btv, &set->top->geo,0);
bttv_risc_hook(btv, RISC_SLOT_O_FIELD, &set->top->top,
@@ -583,9 +639,8 @@ bttv_buffer_activate_video(struct bttv *btv,
btaor(set->top->btformat & 0xff, ~0xff, BT848_COLOR_FMT);
btaor(set->top->btswap & 0x0f, ~0x0f, BT848_COLOR_CTL);
} else if (NULL != set->bottom) {
- set->bottom->vb.state = VIDEOBUF_ACTIVE;
- if (set->bottom->vb.queue.next)
- list_del(&set->bottom->vb.queue);
+ if (set->bottom->list.next)
+ list_del(&set->bottom->list);
bttv_apply_geo(btv, &set->bottom->geo,1);
bttv_apply_geo(btv, &set->bottom->geo,0);
bttv_risc_hook(btv, RISC_SLOT_O_FIELD, NULL, 0);
@@ -606,156 +661,146 @@ bttv_buffer_activate_video(struct bttv *btv,
int
bttv_buffer_risc(struct bttv *btv, struct bttv_buffer *buf)
{
- const struct bttv_tvnorm *tvnorm = bttv_tvnorms + buf->tvnorm;
- struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb);
-
- dprintk("%d: buffer field: %s format: 0x%08x size: %dx%d\n",
- btv->c.nr, v4l2_field_names[buf->vb.field],
- buf->fmt->fourcc, buf->vb.width, buf->vb.height);
+ int r = 0;
+ const struct bttv_tvnorm *tvnorm = bttv_tvnorms + btv->tvnorm;
+ struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vbuf.vb2_buf, 0);
+ struct scatterlist *list = sgt->sgl;
+ unsigned long size = (btv->fmt->depth * btv->width * btv->height) >> 3;
/* packed pixel modes */
- if (buf->fmt->flags & FORMAT_FLAGS_PACKED) {
- int bpl = (buf->fmt->depth >> 3) * buf->vb.width;
- int bpf = bpl * (buf->vb.height >> 1);
-
- bttv_calc_geo(btv,&buf->geo,buf->vb.width,buf->vb.height,
- V4L2_FIELD_HAS_BOTH(buf->vb.field),
- tvnorm,&buf->crop);
-
- switch (buf->vb.field) {
+ if (btv->fmt->flags & FORMAT_FLAGS_PACKED) {
+ int bpl = (btv->fmt->depth >> 3) * btv->width;
+ int bpf = bpl * (btv->height >> 1);
+
+ bttv_calc_geo(btv, &buf->geo, btv->width, btv->height,
+ V4L2_FIELD_HAS_BOTH(buf->vbuf.field), tvnorm,
+ &btv->crop[!!btv->do_crop].rect);
+ switch (buf->vbuf.field) {
case V4L2_FIELD_TOP:
- bttv_risc_packed(btv,&buf->top,dma->sglist,
- /* offset */ 0,bpl,
- /* padding */ 0,/* skip_lines */ 0,
- buf->vb.height);
+ r = bttv_risc_packed(btv, &buf->top, list, 0, bpl, 0,
+ 0, btv->height);
break;
case V4L2_FIELD_BOTTOM:
- bttv_risc_packed(btv,&buf->bottom,dma->sglist,
- 0,bpl,0,0,buf->vb.height);
+ r = bttv_risc_packed(btv, &buf->bottom, list, 0, bpl,
+ 0, 0, btv->height);
break;
case V4L2_FIELD_INTERLACED:
- bttv_risc_packed(btv,&buf->top,dma->sglist,
- 0,bpl,bpl,0,buf->vb.height >> 1);
- bttv_risc_packed(btv,&buf->bottom,dma->sglist,
- bpl,bpl,bpl,0,buf->vb.height >> 1);
+ r = bttv_risc_packed(btv, &buf->top, list, 0, bpl,
+ bpl, 0, btv->height >> 1);
+ r = bttv_risc_packed(btv, &buf->bottom, list, bpl,
+ bpl, bpl, 0, btv->height >> 1);
break;
case V4L2_FIELD_SEQ_TB:
- bttv_risc_packed(btv,&buf->top,dma->sglist,
- 0,bpl,0,0,buf->vb.height >> 1);
- bttv_risc_packed(btv,&buf->bottom,dma->sglist,
- bpf,bpl,0,0,buf->vb.height >> 1);
+ r = bttv_risc_packed(btv, &buf->top, list, 0, bpl, 0,
+ 0, btv->height >> 1);
+ r = bttv_risc_packed(btv, &buf->bottom, list, bpf,
+ bpl, 0, 0, btv->height >> 1);
break;
default:
- BUG();
+ WARN_ON(1);
+ return -EINVAL;
}
}
-
/* planar modes */
- if (buf->fmt->flags & FORMAT_FLAGS_PLANAR) {
+ if (btv->fmt->flags & FORMAT_FLAGS_PLANAR) {
int uoffset, voffset;
int ypadding, cpadding, lines;
/* calculate chroma offsets */
- uoffset = buf->vb.width * buf->vb.height;
- voffset = buf->vb.width * buf->vb.height;
- if (buf->fmt->flags & FORMAT_FLAGS_CrCb) {
+ uoffset = btv->width * btv->height;
+ voffset = btv->width * btv->height;
+ if (btv->fmt->flags & FORMAT_FLAGS_CrCb) {
/* Y-Cr-Cb plane order */
- uoffset >>= buf->fmt->hshift;
- uoffset >>= buf->fmt->vshift;
+ uoffset >>= btv->fmt->hshift;
+ uoffset >>= btv->fmt->vshift;
uoffset += voffset;
} else {
/* Y-Cb-Cr plane order */
- voffset >>= buf->fmt->hshift;
- voffset >>= buf->fmt->vshift;
+ voffset >>= btv->fmt->hshift;
+ voffset >>= btv->fmt->vshift;
voffset += uoffset;
}
-
- switch (buf->vb.field) {
+ switch (buf->vbuf.field) {
case V4L2_FIELD_TOP:
- bttv_calc_geo(btv,&buf->geo,buf->vb.width,
- buf->vb.height,/* both_fields */ 0,
- tvnorm,&buf->crop);
- bttv_risc_planar(btv, &buf->top, dma->sglist,
- 0,buf->vb.width,0,buf->vb.height,
- uoffset,voffset,buf->fmt->hshift,
- buf->fmt->vshift,0);
+ bttv_calc_geo(btv, &buf->geo, btv->width, btv->height,
+ 0, tvnorm,
+ &btv->crop[!!btv->do_crop].rect);
+ r = bttv_risc_planar(btv, &buf->top, list, 0,
+ btv->width, 0, btv->height,
+ uoffset, voffset,
+ btv->fmt->hshift,
+ btv->fmt->vshift, 0);
break;
case V4L2_FIELD_BOTTOM:
- bttv_calc_geo(btv,&buf->geo,buf->vb.width,
- buf->vb.height,0,
- tvnorm,&buf->crop);
- bttv_risc_planar(btv, &buf->bottom, dma->sglist,
- 0,buf->vb.width,0,buf->vb.height,
- uoffset,voffset,buf->fmt->hshift,
- buf->fmt->vshift,0);
+ bttv_calc_geo(btv, &buf->geo, btv->width, btv->height,
+ 0, tvnorm,
+ &btv->crop[!!btv->do_crop].rect);
+ r = bttv_risc_planar(btv, &buf->bottom, list, 0,
+ btv->width, 0, btv->height,
+ uoffset, voffset,
+ btv->fmt->hshift,
+ btv->fmt->vshift, 0);
break;
case V4L2_FIELD_INTERLACED:
- bttv_calc_geo(btv,&buf->geo,buf->vb.width,
- buf->vb.height,1,
- tvnorm,&buf->crop);
- lines = buf->vb.height >> 1;
- ypadding = buf->vb.width;
- cpadding = buf->vb.width >> buf->fmt->hshift;
- bttv_risc_planar(btv,&buf->top,
- dma->sglist,
- 0,buf->vb.width,ypadding,lines,
- uoffset,voffset,
- buf->fmt->hshift,
- buf->fmt->vshift,
- cpadding);
- bttv_risc_planar(btv,&buf->bottom,
- dma->sglist,
- ypadding,buf->vb.width,ypadding,lines,
- uoffset+cpadding,
- voffset+cpadding,
- buf->fmt->hshift,
- buf->fmt->vshift,
- cpadding);
+ bttv_calc_geo(btv, &buf->geo, btv->width, btv->height,
+ 1, tvnorm,
+ &btv->crop[!!btv->do_crop].rect);
+ lines = btv->height >> 1;
+ ypadding = btv->width;
+ cpadding = btv->width >> btv->fmt->hshift;
+ r = bttv_risc_planar(btv, &buf->top, list, 0,
+ btv->width, ypadding, lines,
+ uoffset, voffset,
+ btv->fmt->hshift,
+ btv->fmt->vshift, cpadding);
+
+ r = bttv_risc_planar(btv, &buf->bottom, list,
+ ypadding, btv->width, ypadding,
+ lines, uoffset + cpadding,
+ voffset + cpadding,
+ btv->fmt->hshift,
+ btv->fmt->vshift, cpadding);
break;
case V4L2_FIELD_SEQ_TB:
- bttv_calc_geo(btv,&buf->geo,buf->vb.width,
- buf->vb.height,1,
- tvnorm,&buf->crop);
- lines = buf->vb.height >> 1;
- ypadding = buf->vb.width;
- cpadding = buf->vb.width >> buf->fmt->hshift;
- bttv_risc_planar(btv,&buf->top,
- dma->sglist,
- 0,buf->vb.width,0,lines,
- uoffset >> 1,
- voffset >> 1,
- buf->fmt->hshift,
- buf->fmt->vshift,
- 0);
- bttv_risc_planar(btv,&buf->bottom,
- dma->sglist,
- lines * ypadding,buf->vb.width,0,lines,
- lines * ypadding + (uoffset >> 1),
- lines * ypadding + (voffset >> 1),
- buf->fmt->hshift,
- buf->fmt->vshift,
- 0);
+ bttv_calc_geo(btv, &buf->geo, btv->width, btv->height,
+ 1, tvnorm,
+ &btv->crop[!!btv->do_crop].rect);
+ lines = btv->height >> 1;
+ ypadding = btv->width;
+ cpadding = btv->width >> btv->fmt->hshift;
+ r = bttv_risc_planar(btv, &buf->top, list, 0,
+ btv->width, 0, lines,
+ uoffset >> 1, voffset >> 1,
+ btv->fmt->hshift,
+ btv->fmt->vshift, 0);
+ r = bttv_risc_planar(btv, &buf->bottom, list,
+ lines * ypadding,
+ btv->width, 0, lines,
+ lines * ypadding + (uoffset >> 1),
+ lines * ypadding + (voffset >> 1),
+ btv->fmt->hshift,
+ btv->fmt->vshift, 0);
break;
default:
- BUG();
+ WARN_ON(1);
+ return -EINVAL;
}
}
-
/* raw data */
- if (buf->fmt->flags & FORMAT_FLAGS_RAW) {
+ if (btv->fmt->flags & FORMAT_FLAGS_RAW) {
/* build risc code */
- buf->vb.field = V4L2_FIELD_SEQ_TB;
- bttv_calc_geo(btv,&buf->geo,tvnorm->swidth,tvnorm->sheight,
- 1,tvnorm,&buf->crop);
- bttv_risc_packed(btv, &buf->top, dma->sglist,
- /* offset */ 0, RAW_BPL, /* padding */ 0,
- /* skip_lines */ 0, RAW_LINES);
- bttv_risc_packed(btv, &buf->bottom, dma->sglist,
- buf->vb.size/2 , RAW_BPL, 0, 0, RAW_LINES);
+ buf->vbuf.field = V4L2_FIELD_SEQ_TB;
+ bttv_calc_geo(btv, &buf->geo, tvnorm->swidth, tvnorm->sheight,
+ 1, tvnorm, &btv->crop[!!btv->do_crop].rect);
+ r = bttv_risc_packed(btv, &buf->top, list, 0, RAW_BPL, 0, 0,
+ RAW_LINES);
+ r = bttv_risc_packed(btv, &buf->bottom, list, size / 2,
+ RAW_BPL, 0, 0, RAW_LINES);
}
/* copy format info */
- buf->btformat = buf->fmt->btformat;
- buf->btswap = buf->fmt->btswap;
- return 0;
+ buf->btformat = btv->fmt->btformat;
+ buf->btswap = btv->fmt->btswap;
+
+ return r;
}
diff --git a/drivers/media/pci/bt8xx/bttv-vbi.c b/drivers/media/pci/bt8xx/bttv-vbi.c
index ce36a2c0f60b..ab213e51ec95 100644
--- a/drivers/media/pci/bt8xx/bttv-vbi.c
+++ b/drivers/media/pci/bt8xx/bttv-vbi.c
@@ -34,16 +34,6 @@
to be about 244. */
#define VBI_OFFSET 244
-/* 2048 for compatibility with earlier driver versions. The driver
- really stores 1024 + tvnorm->vbipack * 4 samples per line in the
- buffer. Note tvnorm->vbipack is <= 0xFF (limit of VBIPACK_LO + HI
- is 0x1FF DWORDs) and VBI read()s store a frame counter in the last
- four bytes of the VBI image. */
-#define VBI_BPL 2048
-
-/* Compatibility. */
-#define VBI_DEFLINES 16
-
static unsigned int vbibufs = 4;
static unsigned int vbi_debug;
@@ -67,165 +57,123 @@ do { \
/* ----------------------------------------------------------------------- */
/* vbi risc code + mm */
-static int vbi_buffer_setup(struct videobuf_queue *q,
- unsigned int *count, unsigned int *size)
+static int queue_setup_vbi(struct vb2_queue *q, unsigned int *num_buffers,
+ unsigned int *num_planes, unsigned int sizes[],
+ struct device *alloc_devs[])
{
- struct bttv_fh *fh = q->priv_data;
- struct bttv *btv = fh->btv;
-
- if (0 == *count)
- *count = vbibufs;
-
- *size = IMAGE_SIZE(&fh->vbi_fmt.fmt);
+ struct bttv *btv = vb2_get_drv_priv(q);
+ unsigned int size = IMAGE_SIZE(&btv->vbi_fmt.fmt);
- dprintk("setup: samples=%u start=%d,%d count=%u,%u\n",
- fh->vbi_fmt.fmt.samples_per_line,
- fh->vbi_fmt.fmt.start[0],
- fh->vbi_fmt.fmt.start[1],
- fh->vbi_fmt.fmt.count[0],
- fh->vbi_fmt.fmt.count[1]);
+ if (*num_planes)
+ return sizes[0] < size ? -EINVAL : 0;
+ *num_planes = 1;
+ sizes[0] = size;
return 0;
}
-static int vbi_buffer_prepare(struct videobuf_queue *q,
- struct videobuf_buffer *vb,
- enum v4l2_field field)
+static void buf_queue_vbi(struct vb2_buffer *vb)
{
- struct bttv_fh *fh = q->priv_data;
- struct bttv *btv = fh->btv;
- struct bttv_buffer *buf = container_of(vb,struct bttv_buffer,vb);
- const struct bttv_tvnorm *tvnorm;
- unsigned int skip_lines0, skip_lines1, min_vdelay;
- int redo_dma_risc;
- int rc;
-
- buf->vb.size = IMAGE_SIZE(&fh->vbi_fmt.fmt);
- if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size)
- return -EINVAL;
-
- tvnorm = fh->vbi_fmt.tvnorm;
-
- /* There's no VBI_VDELAY register, RISC must skip the lines
- we don't want. With default parameters we skip zero lines
- as earlier driver versions did. The driver permits video
- standard changes while capturing, so we use vbi_fmt.tvnorm
- instead of btv->tvnorm to skip zero lines after video
- standard changes as well. */
-
- skip_lines0 = 0;
- skip_lines1 = 0;
-
- if (fh->vbi_fmt.fmt.count[0] > 0)
- skip_lines0 = max(0, (fh->vbi_fmt.fmt.start[0]
- - tvnorm->vbistart[0]));
- if (fh->vbi_fmt.fmt.count[1] > 0)
- skip_lines1 = max(0, (fh->vbi_fmt.fmt.start[1]
- - tvnorm->vbistart[1]));
-
- redo_dma_risc = 0;
-
- if (buf->vbi_skip[0] != skip_lines0 ||
- buf->vbi_skip[1] != skip_lines1 ||
- buf->vbi_count[0] != fh->vbi_fmt.fmt.count[0] ||
- buf->vbi_count[1] != fh->vbi_fmt.fmt.count[1]) {
- buf->vbi_skip[0] = skip_lines0;
- buf->vbi_skip[1] = skip_lines1;
- buf->vbi_count[0] = fh->vbi_fmt.fmt.count[0];
- buf->vbi_count[1] = fh->vbi_fmt.fmt.count[1];
- redo_dma_risc = 1;
- }
-
- if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
- redo_dma_risc = 1;
- if (0 != (rc = videobuf_iolock(q, &buf->vb, NULL)))
- goto fail;
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct bttv *btv = vb2_get_drv_priv(vq);
+ struct bttv_buffer *buf = container_of(vbuf, struct bttv_buffer, vbuf);
+ unsigned long flags;
+
+ spin_lock_irqsave(&btv->s_lock, flags);
+ if (list_empty(&btv->vcapture)) {
+ btv->loop_irq = BT848_RISC_VBI;
+ if (vb2_is_streaming(&btv->capq))
+ btv->loop_irq |= BT848_RISC_VIDEO;
+ bttv_set_dma(btv, BT848_CAP_CTL_CAPTURE_VBI_ODD |
+ BT848_CAP_CTL_CAPTURE_VBI_EVEN);
}
+ list_add_tail(&buf->list, &btv->vcapture);
+ spin_unlock_irqrestore(&btv->s_lock, flags);
+}
- if (redo_dma_risc) {
- unsigned int bpl, padding, offset;
- struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb);
-
- bpl = 2044; /* max. vbipack */
- padding = VBI_BPL - bpl;
-
- if (fh->vbi_fmt.fmt.count[0] > 0) {
- rc = bttv_risc_packed(btv, &buf->top,
- dma->sglist,
- /* offset */ 0, bpl,
- padding, skip_lines0,
- fh->vbi_fmt.fmt.count[0]);
- if (0 != rc)
- goto fail;
- }
-
- if (fh->vbi_fmt.fmt.count[1] > 0) {
- offset = fh->vbi_fmt.fmt.count[0] * VBI_BPL;
+static int buf_prepare_vbi(struct vb2_buffer *vb)
+{
+ int ret = 0;
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct bttv *btv = vb2_get_drv_priv(vq);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct bttv_buffer *buf = container_of(vbuf, struct bttv_buffer, vbuf);
+ unsigned int size = IMAGE_SIZE(&btv->vbi_fmt.fmt);
+
+ if (vb2_plane_size(vb, 0) < size)
+ return -EINVAL;
+ vb2_set_plane_payload(vb, 0, size);
+ buf->vbuf.field = V4L2_FIELD_NONE;
+ ret = bttv_buffer_risc_vbi(btv, buf);
- rc = bttv_risc_packed(btv, &buf->bottom,
- dma->sglist,
- offset, bpl,
- padding, skip_lines1,
- fh->vbi_fmt.fmt.count[1]);
- if (0 != rc)
- goto fail;
- }
- }
+ return ret;
+}
- /* VBI capturing ends at VDELAY, start of video capturing,
- no matter where the RISC program ends. VDELAY minimum is 2,
- bounds.top is the corresponding first field line number
- times two. VDELAY counts half field lines. */
- min_vdelay = MIN_VDELAY;
- if (fh->vbi_fmt.end >= tvnorm->cropcap.bounds.top)
- min_vdelay += fh->vbi_fmt.end - tvnorm->cropcap.bounds.top;
-
- /* For bttv_buffer_activate_vbi(). */
- buf->geo.vdelay = min_vdelay;
-
- buf->vb.state = VIDEOBUF_PREPARED;
- buf->vb.field = field;
- dprintk("buf prepare %p: top=%p bottom=%p field=%s\n",
- vb, &buf->top, &buf->bottom,
- v4l2_field_names[buf->vb.field]);
- return 0;
+static void buf_cleanup_vbi(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct bttv_buffer *buf = container_of(vbuf, struct bttv_buffer, vbuf);
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct bttv *btv = vb2_get_drv_priv(vq);
- fail:
- bttv_dma_free(q,btv,buf);
- return rc;
+ btcx_riscmem_free(btv->c.pci, &buf->top);
+ btcx_riscmem_free(btv->c.pci, &buf->bottom);
}
-static void
-vbi_buffer_queue(struct videobuf_queue *q, struct videobuf_buffer *vb)
+static int start_streaming_vbi(struct vb2_queue *q, unsigned int count)
{
- struct bttv_fh *fh = q->priv_data;
- struct bttv *btv = fh->btv;
- struct bttv_buffer *buf = container_of(vb,struct bttv_buffer,vb);
-
- dprintk("queue %p\n",vb);
- buf->vb.state = VIDEOBUF_QUEUED;
- list_add_tail(&buf->vb.queue,&btv->vcapture);
- if (NULL == btv->cvbi) {
- fh->btv->loop_irq |= 4;
- bttv_set_dma(btv,0x0c);
+ int ret;
+ int seqnr = 0;
+ struct bttv_buffer *buf;
+ struct bttv *btv = vb2_get_drv_priv(q);
+
+ btv->framedrop = 0;
+ ret = check_alloc_btres_lock(btv, RESOURCE_VBI);
+ if (ret == 0) {
+ if (btv->field_count)
+ seqnr++;
+ while (!list_empty(&btv->vcapture)) {
+ buf = list_entry(btv->vcapture.next,
+ struct bttv_buffer, list);
+ list_del(&buf->list);
+ buf->vbuf.sequence = (btv->field_count >> 1) + seqnr++;
+ vb2_buffer_done(&buf->vbuf.vb2_buf,
+ VB2_BUF_STATE_QUEUED);
+ }
+ return !ret;
+ }
+ if (!vb2_is_streaming(&btv->capq)) {
+ init_irqreg(btv);
+ btv->field_count = 0;
}
+ return !ret;
}
-static void vbi_buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
+static void stop_streaming_vbi(struct vb2_queue *q)
{
- struct bttv_fh *fh = q->priv_data;
- struct bttv *btv = fh->btv;
- struct bttv_buffer *buf = container_of(vb,struct bttv_buffer,vb);
-
- dprintk("free %p\n",vb);
- bttv_dma_free(q,fh->btv,buf);
+ struct bttv *btv = vb2_get_drv_priv(q);
+ unsigned long flags;
+
+ vb2_wait_for_all_buffers(q);
+ spin_lock_irqsave(&btv->s_lock, flags);
+ free_btres_lock(btv, RESOURCE_VBI);
+ if (!vb2_is_streaming(&btv->capq)) {
+ /* stop field counter */
+ btand(~BT848_INT_VSYNC, BT848_INT_MASK);
+ }
+ spin_unlock_irqrestore(&btv->s_lock, flags);
}
-const struct videobuf_queue_ops bttv_vbi_qops = {
- .buf_setup = vbi_buffer_setup,
- .buf_prepare = vbi_buffer_prepare,
- .buf_queue = vbi_buffer_queue,
- .buf_release = vbi_buffer_release,
+const struct vb2_ops bttv_vbi_qops = {
+ .queue_setup = queue_setup_vbi,
+ .buf_queue = buf_queue_vbi,
+ .buf_prepare = buf_prepare_vbi,
+ .buf_cleanup = buf_cleanup_vbi,
+ .start_streaming = start_streaming_vbi,
+ .stop_streaming = stop_streaming_vbi,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
};
/* ----------------------------------------------------------------------- */
@@ -250,7 +198,7 @@ static int try_fmt(struct v4l2_vbi_format *f, const struct bttv_tvnorm *tvnorm,
if (min_start > max_start)
return -EBUSY;
- BUG_ON(max_start >= max_end);
+ WARN_ON(max_start >= max_end);
f->sampling_rate = tvnorm->Fsc;
f->samples_per_line = VBI_BPL;
@@ -299,8 +247,7 @@ static int try_fmt(struct v4l2_vbi_format *f, const struct bttv_tvnorm *tvnorm,
int bttv_try_fmt_vbi_cap(struct file *file, void *f, struct v4l2_format *frt)
{
- struct bttv_fh *fh = f;
- struct bttv *btv = fh->btv;
+ struct bttv *btv = video_drvdata(file);
const struct bttv_tvnorm *tvnorm;
__s32 crop_start;
@@ -317,8 +264,7 @@ int bttv_try_fmt_vbi_cap(struct file *file, void *f, struct v4l2_format *frt)
int bttv_s_fmt_vbi_cap(struct file *file, void *f, struct v4l2_format *frt)
{
- struct bttv_fh *fh = f;
- struct bttv *btv = fh->btv;
+ struct bttv *btv = video_drvdata(file);
const struct bttv_tvnorm *tvnorm;
__s32 start1, end;
int rc;
@@ -326,7 +272,7 @@ int bttv_s_fmt_vbi_cap(struct file *file, void *f, struct v4l2_format *frt)
mutex_lock(&btv->lock);
rc = -EBUSY;
- if (fh->resources & RESOURCE_VBI)
+ if (btv->resources & RESOURCE_VBI)
goto fail;
tvnorm = &bttv_tvnorms[btv->tvnorm];
@@ -346,13 +292,9 @@ int bttv_s_fmt_vbi_cap(struct file *file, void *f, struct v4l2_format *frt)
because vbi_fmt.end counts field lines times two. */
end = max(frt->fmt.vbi.start[0], start1) * 2 + 2;
- mutex_lock(&fh->vbi.vb_lock);
-
- fh->vbi_fmt.fmt = frt->fmt.vbi;
- fh->vbi_fmt.tvnorm = tvnorm;
- fh->vbi_fmt.end = end;
-
- mutex_unlock(&fh->vbi.vb_lock);
+ btv->vbi_fmt.fmt = frt->fmt.vbi;
+ btv->vbi_fmt.tvnorm = tvnorm;
+ btv->vbi_fmt.end = end;
rc = 0;
@@ -365,14 +307,14 @@ int bttv_s_fmt_vbi_cap(struct file *file, void *f, struct v4l2_format *frt)
int bttv_g_fmt_vbi_cap(struct file *file, void *f, struct v4l2_format *frt)
{
- struct bttv_fh *fh = f;
const struct bttv_tvnorm *tvnorm;
+ struct bttv *btv = video_drvdata(file);
- frt->fmt.vbi = fh->vbi_fmt.fmt;
+ frt->fmt.vbi = btv->vbi_fmt.fmt;
- tvnorm = &bttv_tvnorms[fh->btv->tvnorm];
+ tvnorm = &bttv_tvnorms[btv->tvnorm];
- if (tvnorm != fh->vbi_fmt.tvnorm) {
+ if (tvnorm != btv->vbi_fmt.tvnorm) {
__s32 max_end;
unsigned int i;
@@ -388,9 +330,8 @@ int bttv_g_fmt_vbi_cap(struct file *file, void *f, struct v4l2_format *frt)
for (i = 0; i < 2; ++i) {
__s32 new_start;
- new_start = frt->fmt.vbi.start[i]
- + tvnorm->vbistart[i]
- - fh->vbi_fmt.tvnorm->vbistart[i];
+ new_start = frt->fmt.vbi.start[i] + tvnorm->vbistart[i]
+ - btv->vbi_fmt.tvnorm->vbistart[i];
frt->fmt.vbi.start[i] = min(new_start, max_end - 1);
frt->fmt.vbi.count[i] =
@@ -430,8 +371,8 @@ void bttv_vbi_fmt_reset(struct bttv_vbi_fmt *f, unsigned int norm)
real_count = ((tvnorm->cropcap.defrect.top >> 1)
- tvnorm->vbistart[0]);
- BUG_ON(real_samples_per_line > VBI_BPL);
- BUG_ON(real_count > VBI_DEFLINES);
+ WARN_ON(real_samples_per_line > VBI_BPL);
+ WARN_ON(real_count > VBI_DEFLINES);
f->tvnorm = tvnorm;
diff --git a/drivers/media/pci/bt8xx/bttvp.h b/drivers/media/pci/bt8xx/bttvp.h
index 717f002a41df..0368a583cf07 100644
--- a/drivers/media/pci/bt8xx/bttvp.h
+++ b/drivers/media/pci/bt8xx/bttvp.h
@@ -26,7 +26,7 @@
#include <media/v4l2-common.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-fh.h>
-#include <media/videobuf-dma-sg.h>
+#include <media/videobuf2-dma-sg.h>
#include <media/tveeprom.h>
#include <media/rc-core.h>
#include <media/i2c/ir-kbd-i2c.h>
@@ -142,19 +142,15 @@ struct bttv_geometry {
struct bttv_buffer {
/* common v4l buffer stuff -- must be first */
- struct videobuf_buffer vb;
+ struct vb2_v4l2_buffer vbuf;
+ struct list_head list;
/* bttv specific */
- const struct bttv_format *fmt;
- unsigned int tvnorm;
int btformat;
int btswap;
struct bttv_geometry geo;
struct btcx_riscmem top;
struct btcx_riscmem bottom;
- struct v4l2_rect crop;
- unsigned int vbi_skip[2];
- unsigned int vbi_count[2];
};
struct bttv_buffer_set {
@@ -176,6 +172,8 @@ struct bttv_vbi_fmt {
};
/* bttv-vbi.c */
+extern const struct vb2_ops bttv_vbi_qops;
+
void bttv_vbi_fmt_reset(struct bttv_vbi_fmt *f, unsigned int norm);
struct bttv_crop {
@@ -192,31 +190,6 @@ struct bttv_crop {
__s32 max_scaled_height;
};
-struct bttv_fh {
- /* This must be the first field in this struct */
- struct v4l2_fh fh;
-
- struct bttv *btv;
- int resources;
- enum v4l2_buf_type type;
-
- /* video capture */
- struct videobuf_queue cap;
- const struct bttv_format *fmt;
- int width;
- int height;
-
- /* Application called VIDIOC_S_SELECTION. */
- int do_crop;
-
- /* vbi capture */
- struct videobuf_queue vbi;
- /* Current VBI capture window as seen through this fh (cannot
- be global for compatibility with earlier drivers). Protected
- by struct bttv.lock and struct bttv_fh.vbi.lock. */
- struct bttv_vbi_fmt vbi_fmt;
-};
-
/* ---------------------------------------------------------- */
/* bttv-risc.c */
@@ -237,20 +210,27 @@ int bttv_risc_hook(struct bttv *btv, int slot, struct btcx_riscmem *risc,
int bttv_buffer_risc(struct bttv *btv, struct bttv_buffer *buf);
int bttv_buffer_activate_video(struct bttv *btv,
struct bttv_buffer_set *set);
+int bttv_buffer_risc_vbi(struct bttv *btv, struct bttv_buffer *buf);
int bttv_buffer_activate_vbi(struct bttv *btv,
struct bttv_buffer *vbi);
-void bttv_dma_free(struct videobuf_queue *q, struct bttv *btv,
- struct bttv_buffer *buf);
/* ---------------------------------------------------------- */
/* bttv-vbi.c */
+/*
+ * 2048 for compatibility with earlier driver versions. The driver really
+ * stores 1024 + tvnorm->vbipack * 4 samples per line in the buffer. Note
+ * tvnorm->vbipack is <= 0xFF (limit of VBIPACK_LO + HI is 0x1FF DWORDs) and
+ * VBI read()s store a frame counter in the last four bytes of the VBI image.
+ */
+#define VBI_BPL 2048
+
+#define VBI_DEFLINES 16
+
int bttv_try_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *f);
int bttv_g_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *f);
int bttv_s_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *f);
-extern const struct videobuf_queue_ops bttv_vbi_qops;
-
/* ---------------------------------------------------------- */
/* bttv-gpio.c */
@@ -275,6 +255,8 @@ extern int fini_bttv_i2c(struct bttv *btv);
extern unsigned int bttv_verbose;
extern unsigned int bttv_debug;
extern unsigned int bttv_gpio;
+int check_alloc_btres_lock(struct bttv *btv, int bit);
+void free_btres_lock(struct bttv *btv, int bits);
extern void bttv_gpio_tracking(struct bttv *btv, char *comment);
#define dprintk(fmt, ...) \
@@ -396,7 +378,7 @@ struct bttv {
v4l2_std_id std;
int hue, contrast, bright, saturation;
struct v4l2_framebuffer fbuf;
- unsigned int field_count;
+ __u32 field_count;
/* various options */
int opt_combfilter;
@@ -436,7 +418,6 @@ struct bttv {
int loop_irq;
int new_input;
- unsigned long cap_ctl;
unsigned long dma_on;
struct timer_list timeout;
struct bttv_suspend_state state;
@@ -448,7 +429,25 @@ struct bttv {
unsigned int irq_me;
unsigned int users;
- struct bttv_fh init;
+ struct v4l2_fh fh;
+ enum v4l2_buf_type type;
+
+ enum v4l2_field field;
+ int field_last;
+
+ /* video capture */
+ struct vb2_queue capq;
+ const struct bttv_format *fmt;
+ int width;
+ int height;
+
+ /* vbi capture */
+ struct vb2_queue vbiq;
+ struct bttv_vbi_fmt vbi_fmt;
+ unsigned int vbi_count[2];
+
+ /* Application called VIDIOC_S_SELECTION. */
+ int do_crop;
/* used to make dvb-bt8xx autoloadable */
struct work_struct request_module_wk;
@@ -487,6 +486,8 @@ static inline unsigned int bttv_muxsel(const struct bttv *btv,
#endif
+void init_irqreg(struct bttv *btv);
+
#define btwrite(dat,adr) writel((dat), btv->bt848_mmio+(adr))
#define btread(adr) readl(btv->bt848_mmio+(adr))
diff --git a/drivers/media/pci/cx18/cx18-gpio.c b/drivers/media/pci/cx18/cx18-gpio.c
index 160c8377e352..c85eb8d25837 100644
--- a/drivers/media/pci/cx18/cx18-gpio.c
+++ b/drivers/media/pci/cx18/cx18-gpio.c
@@ -307,7 +307,7 @@ int cx18_gpio_register(struct cx18 *cx, u32 hw)
void cx18_reset_ir_gpio(void *data)
{
- struct cx18 *cx = to_cx18((struct v4l2_device *)data);
+ struct cx18 *cx = to_cx18(data);
if (cx->card->gpio_i2c_slave_reset.ir_reset_mask == 0)
return;
diff --git a/drivers/media/pci/cx18/cx18-irq.c b/drivers/media/pci/cx18/cx18-irq.c
index fb10e9c2c5b8..db63077821b1 100644
--- a/drivers/media/pci/cx18/cx18-irq.c
+++ b/drivers/media/pci/cx18/cx18-irq.c
@@ -30,7 +30,7 @@ static void epu_cmd(struct cx18 *cx, u32 sw1)
irqreturn_t cx18_irq_handler(int irq, void *dev_id)
{
- struct cx18 *cx = (struct cx18 *)dev_id;
+ struct cx18 *cx = dev_id;
u32 sw1, sw2, hw2;
sw1 = cx18_read_reg(cx, SW1_INT_STATUS) & cx->sw1_irq_mask;
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index 2ce2914576cf..c8705d786cdd 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -554,14 +554,14 @@ void cx23885_sram_channel_dump(struct cx23885_dev *dev,
for (i = 0; i < 4; i++) {
risc = cx_read(ch->cmds_start + 4 * (i + 14));
- pr_warn("%s: risc%d: ", dev->name, i);
+ pr_warn("%s: risc%d:", dev->name, i);
cx23885_risc_decode(risc);
}
for (i = 0; i < (64 >> 2); i += n) {
risc = cx_read(ch->ctrl_start + 4 * i);
/* No consideration for bits 63-32 */
- pr_warn("%s: (0x%08x) iq %x: ", dev->name,
+ pr_warn("%s: (0x%08x) iq %x:", dev->name,
ch->ctrl_start + 4 * i, i);
n = cx23885_risc_decode(risc);
for (j = 1; j < n; j++) {
@@ -594,7 +594,7 @@ static void cx23885_risc_disasm(struct cx23885_tsport *port,
pr_info("%s: risc disasm: %p [dma=0x%08lx]\n",
dev->name, risc->cpu, (unsigned long)risc->dma);
for (i = 0; i < (risc->size >> 2); i += n) {
- pr_info("%s: %04d: ", dev->name, i);
+ pr_info("%s: %04d:", dev->name, i);
n = cx23885_risc_decode(le32_to_cpu(risc->cpu[i]));
for (j = 1; j < n; j++)
pr_info("%s: %04d: 0x%08x [ arg #%d ]\n",
diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c
index 671fc0588e43..9af2c5596121 100644
--- a/drivers/media/pci/cx23885/cx23885-video.c
+++ b/drivers/media/pci/cx23885/cx23885-video.c
@@ -413,7 +413,7 @@ static int buffer_prepare(struct vb2_buffer *vb)
dev->height >> 1);
break;
default:
- BUG();
+ return -EINVAL; /* should not happen */
}
dprintk(2, "[%p/%d] buffer_init - %dx%d %dbpp 0x%08x - dma=0x%08lx\n",
buf, buf->vb.vb2_buf.index,
diff --git a/drivers/media/pci/intel/Kconfig b/drivers/media/pci/intel/Kconfig
new file mode 100644
index 000000000000..e113902fa806
--- /dev/null
+++ b/drivers/media/pci/intel/Kconfig
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config IPU_BRIDGE
+ tristate
+ depends on I2C && ACPI
+ help
+ This is a helper module for the IPU bridge, which can be
+ used by ipu3 and other drivers. In order to handle module
+ dependencies, this is selected by each driver that needs it.
+
+source "drivers/media/pci/intel/ipu3/Kconfig"
+source "drivers/media/pci/intel/ivsc/Kconfig"
diff --git a/drivers/media/pci/intel/Makefile b/drivers/media/pci/intel/Makefile
index 0b4236c4db49..f199a97e1d78 100644
--- a/drivers/media/pci/intel/Makefile
+++ b/drivers/media/pci/intel/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
#
-# Makefile for the IPU3 cio2 and ImGU drivers
+# Makefile for the IPU drivers
#
-
+obj-$(CONFIG_IPU_BRIDGE) += ipu-bridge.o
obj-y += ipu3/
+obj-y += ivsc/
diff --git a/drivers/media/pci/intel/ipu-bridge.c b/drivers/media/pci/intel/ipu-bridge.c
new file mode 100644
index 000000000000..1bde8b6e0b11
--- /dev/null
+++ b/drivers/media/pci/intel/ipu-bridge.c
@@ -0,0 +1,814 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Author: Dan Scally <djrscally@gmail.com> */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/mei_cl_bus.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/property.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+
+#include <media/ipu-bridge.h>
+#include <media/v4l2-fwnode.h>
+
+/*
+ * 92335fcf-3203-4472-af93-7b4453ac29da
+ *
+ * Used to build MEI CSI device name to lookup MEI CSI device by
+ * device_find_child_by_name().
+ */
+#define MEI_CSI_UUID \
+ UUID_LE(0x92335FCF, 0x3203, 0x4472, \
+ 0xAF, 0x93, 0x7B, 0x44, 0x53, 0xAC, 0x29, 0xDA)
+
+/*
+ * IVSC device name
+ *
+ * Used to match IVSC device by ipu_bridge_match_ivsc_dev()
+ */
+#define IVSC_DEV_NAME "intel_vsc"
+
+/*
+ * Extend this array with ACPI Hardware IDs of devices known to be working
+ * plus the number of link-frequencies expected by their drivers, along with
+ * the frequency values in hertz. This is somewhat opportunistic way of adding
+ * support for this for now in the hopes of a better source for the information
+ * (possibly some encoded value in the SSDB buffer that we're unaware of)
+ * becoming apparent in the future.
+ *
+ * Do not add an entry for a sensor that is not actually supported.
+ */
+static const struct ipu_sensor_config ipu_supported_sensors[] = {
+ /* Omnivision OV5693 */
+ IPU_SENSOR_CONFIG("INT33BE", 1, 419200000),
+ /* Omnivision OV8865 */
+ IPU_SENSOR_CONFIG("INT347A", 1, 360000000),
+ /* Omnivision OV7251 */
+ IPU_SENSOR_CONFIG("INT347E", 1, 319200000),
+ /* Omnivision OV2680 */
+ IPU_SENSOR_CONFIG("OVTI2680", 1, 331200000),
+ /* Omnivision ov8856 */
+ IPU_SENSOR_CONFIG("OVTI8856", 3, 180000000, 360000000, 720000000),
+ /* Omnivision ov2740 */
+ IPU_SENSOR_CONFIG("INT3474", 1, 360000000),
+ /* Hynix hi556 */
+ IPU_SENSOR_CONFIG("INT3537", 1, 437000000),
+ /* Omnivision ov13b10 */
+ IPU_SENSOR_CONFIG("OVTIDB10", 1, 560000000),
+ /* GalaxyCore GC0310 */
+ IPU_SENSOR_CONFIG("INT0310", 0),
+};
+
+static const struct ipu_property_names prop_names = {
+ .clock_frequency = "clock-frequency",
+ .rotation = "rotation",
+ .orientation = "orientation",
+ .bus_type = "bus-type",
+ .data_lanes = "data-lanes",
+ .remote_endpoint = "remote-endpoint",
+ .link_frequencies = "link-frequencies",
+};
+
+static const char * const ipu_vcm_types[] = {
+ "ad5823",
+ "dw9714",
+ "ad5816",
+ "dw9719",
+ "dw9718",
+ "dw9806b",
+ "wv517s",
+ "lc898122xa",
+ "lc898212axb",
+};
+
+/*
+ * Used to figure out IVSC acpi device by ipu_bridge_get_ivsc_acpi_dev()
+ * instead of device and driver match to probe IVSC device.
+ */
+static const struct acpi_device_id ivsc_acpi_ids[] = {
+ { "INTC1059" },
+ { "INTC1095" },
+ { "INTC100A" },
+ { "INTC10CF" },
+};
+
+static struct acpi_device *ipu_bridge_get_ivsc_acpi_dev(struct acpi_device *adev)
+{
+ acpi_handle handle = acpi_device_handle(adev);
+ struct acpi_device *consumer, *ivsc_adev;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(ivsc_acpi_ids); i++) {
+ const struct acpi_device_id *acpi_id = &ivsc_acpi_ids[i];
+
+ for_each_acpi_dev_match(ivsc_adev, acpi_id->id, NULL, -1)
+ /* camera sensor depends on IVSC in DSDT if exist */
+ for_each_acpi_consumer_dev(ivsc_adev, consumer)
+ if (consumer->handle == handle)
+ return ivsc_adev;
+ }
+
+ return NULL;
+}
+
+static int ipu_bridge_match_ivsc_dev(struct device *dev, const void *adev)
+{
+ if (ACPI_COMPANION(dev) != adev)
+ return 0;
+
+ if (!sysfs_streq(dev_name(dev), IVSC_DEV_NAME))
+ return 0;
+
+ return 1;
+}
+
+static struct device *ipu_bridge_get_ivsc_csi_dev(struct acpi_device *adev)
+{
+ struct device *dev, *csi_dev;
+ uuid_le uuid = MEI_CSI_UUID;
+ char name[64];
+
+ /* IVSC device on platform bus */
+ dev = bus_find_device(&platform_bus_type, NULL, adev,
+ ipu_bridge_match_ivsc_dev);
+ if (dev) {
+ snprintf(name, sizeof(name), "%s-%pUl", dev_name(dev), &uuid);
+
+ csi_dev = device_find_child_by_name(dev, name);
+
+ put_device(dev);
+
+ return csi_dev;
+ }
+
+ return NULL;
+}
+
+static int ipu_bridge_check_ivsc_dev(struct ipu_sensor *sensor,
+ struct acpi_device *sensor_adev)
+{
+ struct acpi_device *adev;
+ struct device *csi_dev;
+
+ adev = ipu_bridge_get_ivsc_acpi_dev(sensor_adev);
+ if (adev) {
+ csi_dev = ipu_bridge_get_ivsc_csi_dev(adev);
+ if (!csi_dev) {
+ acpi_dev_put(adev);
+ dev_err(&adev->dev, "Failed to find MEI CSI dev\n");
+ return -ENODEV;
+ }
+
+ sensor->csi_dev = csi_dev;
+ sensor->ivsc_adev = adev;
+ }
+
+ return 0;
+}
+
+static int ipu_bridge_read_acpi_buffer(struct acpi_device *adev, char *id,
+ void *data, u32 size)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ acpi_status status;
+ int ret = 0;
+
+ status = acpi_evaluate_object(adev->handle, id, NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ obj = buffer.pointer;
+ if (!obj) {
+ dev_err(&adev->dev, "Couldn't locate ACPI buffer\n");
+ return -ENODEV;
+ }
+
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ dev_err(&adev->dev, "Not an ACPI buffer\n");
+ ret = -ENODEV;
+ goto out_free_buff;
+ }
+
+ if (obj->buffer.length > size) {
+ dev_err(&adev->dev, "Given buffer is too small\n");
+ ret = -EINVAL;
+ goto out_free_buff;
+ }
+
+ memcpy(data, obj->buffer.pointer, obj->buffer.length);
+
+out_free_buff:
+ kfree(buffer.pointer);
+ return ret;
+}
+
+static u32 ipu_bridge_parse_rotation(struct acpi_device *adev,
+ struct ipu_sensor_ssdb *ssdb)
+{
+ switch (ssdb->degree) {
+ case IPU_SENSOR_ROTATION_NORMAL:
+ return 0;
+ case IPU_SENSOR_ROTATION_INVERTED:
+ return 180;
+ default:
+ dev_warn(&adev->dev,
+ "Unknown rotation %d. Assume 0 degree rotation\n",
+ ssdb->degree);
+ return 0;
+ }
+}
+
+static enum v4l2_fwnode_orientation ipu_bridge_parse_orientation(struct acpi_device *adev)
+{
+ enum v4l2_fwnode_orientation orientation;
+ struct acpi_pld_info *pld;
+ acpi_status status;
+
+ status = acpi_get_physical_device_location(adev->handle, &pld);
+ if (ACPI_FAILURE(status)) {
+ dev_warn(&adev->dev, "_PLD call failed, using default orientation\n");
+ return V4L2_FWNODE_ORIENTATION_EXTERNAL;
+ }
+
+ switch (pld->panel) {
+ case ACPI_PLD_PANEL_FRONT:
+ orientation = V4L2_FWNODE_ORIENTATION_FRONT;
+ break;
+ case ACPI_PLD_PANEL_BACK:
+ orientation = V4L2_FWNODE_ORIENTATION_BACK;
+ break;
+ case ACPI_PLD_PANEL_TOP:
+ case ACPI_PLD_PANEL_LEFT:
+ case ACPI_PLD_PANEL_RIGHT:
+ case ACPI_PLD_PANEL_UNKNOWN:
+ orientation = V4L2_FWNODE_ORIENTATION_EXTERNAL;
+ break;
+ default:
+ dev_warn(&adev->dev, "Unknown _PLD panel val %d\n", pld->panel);
+ orientation = V4L2_FWNODE_ORIENTATION_EXTERNAL;
+ break;
+ }
+
+ ACPI_FREE(pld);
+ return orientation;
+}
+
+int ipu_bridge_parse_ssdb(struct acpi_device *adev, struct ipu_sensor *sensor)
+{
+ struct ipu_sensor_ssdb ssdb = {};
+ int ret;
+
+ ret = ipu_bridge_read_acpi_buffer(adev, "SSDB", &ssdb, sizeof(ssdb));
+ if (ret)
+ return ret;
+
+ if (ssdb.vcmtype > ARRAY_SIZE(ipu_vcm_types)) {
+ dev_warn(&adev->dev, "Unknown VCM type %d\n", ssdb.vcmtype);
+ ssdb.vcmtype = 0;
+ }
+
+ if (ssdb.lanes > IPU_MAX_LANES) {
+ dev_err(&adev->dev, "Number of lanes in SSDB is invalid\n");
+ return -EINVAL;
+ }
+
+ sensor->link = ssdb.link;
+ sensor->lanes = ssdb.lanes;
+ sensor->mclkspeed = ssdb.mclkspeed;
+ sensor->rotation = ipu_bridge_parse_rotation(adev, &ssdb);
+ sensor->orientation = ipu_bridge_parse_orientation(adev);
+
+ if (ssdb.vcmtype)
+ sensor->vcm_type = ipu_vcm_types[ssdb.vcmtype - 1];
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(ipu_bridge_parse_ssdb, INTEL_IPU_BRIDGE);
+
+static void ipu_bridge_create_fwnode_properties(
+ struct ipu_sensor *sensor,
+ struct ipu_bridge *bridge,
+ const struct ipu_sensor_config *cfg)
+{
+ struct ipu_property_names *names = &sensor->prop_names;
+ struct software_node *nodes = sensor->swnodes;
+
+ sensor->prop_names = prop_names;
+
+ if (sensor->csi_dev) {
+ sensor->local_ref[0] =
+ SOFTWARE_NODE_REFERENCE(&nodes[SWNODE_IVSC_SENSOR_ENDPOINT]);
+ sensor->remote_ref[0] =
+ SOFTWARE_NODE_REFERENCE(&nodes[SWNODE_IVSC_IPU_ENDPOINT]);
+ sensor->ivsc_sensor_ref[0] =
+ SOFTWARE_NODE_REFERENCE(&nodes[SWNODE_SENSOR_ENDPOINT]);
+ sensor->ivsc_ipu_ref[0] =
+ SOFTWARE_NODE_REFERENCE(&nodes[SWNODE_IPU_ENDPOINT]);
+
+ sensor->ivsc_sensor_ep_properties[0] =
+ PROPERTY_ENTRY_U32(names->bus_type,
+ V4L2_FWNODE_BUS_TYPE_CSI2_DPHY);
+ sensor->ivsc_sensor_ep_properties[1] =
+ PROPERTY_ENTRY_U32_ARRAY_LEN(names->data_lanes,
+ bridge->data_lanes,
+ sensor->lanes);
+ sensor->ivsc_sensor_ep_properties[2] =
+ PROPERTY_ENTRY_REF_ARRAY(names->remote_endpoint,
+ sensor->ivsc_sensor_ref);
+
+ sensor->ivsc_ipu_ep_properties[0] =
+ PROPERTY_ENTRY_U32(names->bus_type,
+ V4L2_FWNODE_BUS_TYPE_CSI2_DPHY);
+ sensor->ivsc_ipu_ep_properties[1] =
+ PROPERTY_ENTRY_U32_ARRAY_LEN(names->data_lanes,
+ bridge->data_lanes,
+ sensor->lanes);
+ sensor->ivsc_ipu_ep_properties[2] =
+ PROPERTY_ENTRY_REF_ARRAY(names->remote_endpoint,
+ sensor->ivsc_ipu_ref);
+ } else {
+ sensor->local_ref[0] =
+ SOFTWARE_NODE_REFERENCE(&nodes[SWNODE_IPU_ENDPOINT]);
+ sensor->remote_ref[0] =
+ SOFTWARE_NODE_REFERENCE(&nodes[SWNODE_SENSOR_ENDPOINT]);
+ }
+
+ sensor->dev_properties[0] = PROPERTY_ENTRY_U32(
+ sensor->prop_names.clock_frequency,
+ sensor->mclkspeed);
+ sensor->dev_properties[1] = PROPERTY_ENTRY_U32(
+ sensor->prop_names.rotation,
+ sensor->rotation);
+ sensor->dev_properties[2] = PROPERTY_ENTRY_U32(
+ sensor->prop_names.orientation,
+ sensor->orientation);
+ if (sensor->vcm_type) {
+ sensor->vcm_ref[0] =
+ SOFTWARE_NODE_REFERENCE(&sensor->swnodes[SWNODE_VCM]);
+ sensor->dev_properties[3] =
+ PROPERTY_ENTRY_REF_ARRAY("lens-focus", sensor->vcm_ref);
+ }
+
+ sensor->ep_properties[0] = PROPERTY_ENTRY_U32(
+ sensor->prop_names.bus_type,
+ V4L2_FWNODE_BUS_TYPE_CSI2_DPHY);
+ sensor->ep_properties[1] = PROPERTY_ENTRY_U32_ARRAY_LEN(
+ sensor->prop_names.data_lanes,
+ bridge->data_lanes, sensor->lanes);
+ sensor->ep_properties[2] = PROPERTY_ENTRY_REF_ARRAY(
+ sensor->prop_names.remote_endpoint,
+ sensor->local_ref);
+
+ if (cfg->nr_link_freqs > 0)
+ sensor->ep_properties[3] = PROPERTY_ENTRY_U64_ARRAY_LEN(
+ sensor->prop_names.link_frequencies,
+ cfg->link_freqs,
+ cfg->nr_link_freqs);
+
+ sensor->ipu_properties[0] = PROPERTY_ENTRY_U32_ARRAY_LEN(
+ sensor->prop_names.data_lanes,
+ bridge->data_lanes, sensor->lanes);
+ sensor->ipu_properties[1] = PROPERTY_ENTRY_REF_ARRAY(
+ sensor->prop_names.remote_endpoint,
+ sensor->remote_ref);
+}
+
+static void ipu_bridge_init_swnode_names(struct ipu_sensor *sensor)
+{
+ snprintf(sensor->node_names.remote_port,
+ sizeof(sensor->node_names.remote_port),
+ SWNODE_GRAPH_PORT_NAME_FMT, sensor->link);
+ snprintf(sensor->node_names.port,
+ sizeof(sensor->node_names.port),
+ SWNODE_GRAPH_PORT_NAME_FMT, 0); /* Always port 0 */
+ snprintf(sensor->node_names.endpoint,
+ sizeof(sensor->node_names.endpoint),
+ SWNODE_GRAPH_ENDPOINT_NAME_FMT, 0); /* And endpoint 0 */
+ if (sensor->vcm_type) {
+ /* append link to distinguish nodes with same model VCM */
+ snprintf(sensor->node_names.vcm, sizeof(sensor->node_names.vcm),
+ "%s-%u", sensor->vcm_type, sensor->link);
+ }
+
+ if (sensor->csi_dev) {
+ snprintf(sensor->node_names.ivsc_sensor_port,
+ sizeof(sensor->node_names.ivsc_sensor_port),
+ SWNODE_GRAPH_PORT_NAME_FMT, 0);
+ snprintf(sensor->node_names.ivsc_ipu_port,
+ sizeof(sensor->node_names.ivsc_ipu_port),
+ SWNODE_GRAPH_PORT_NAME_FMT, 1);
+ }
+}
+
+static void ipu_bridge_init_swnode_group(struct ipu_sensor *sensor)
+{
+ struct software_node *nodes = sensor->swnodes;
+
+ sensor->group[SWNODE_SENSOR_HID] = &nodes[SWNODE_SENSOR_HID];
+ sensor->group[SWNODE_SENSOR_PORT] = &nodes[SWNODE_SENSOR_PORT];
+ sensor->group[SWNODE_SENSOR_ENDPOINT] = &nodes[SWNODE_SENSOR_ENDPOINT];
+ sensor->group[SWNODE_IPU_PORT] = &nodes[SWNODE_IPU_PORT];
+ sensor->group[SWNODE_IPU_ENDPOINT] = &nodes[SWNODE_IPU_ENDPOINT];
+ if (sensor->vcm_type)
+ sensor->group[SWNODE_VCM] = &nodes[SWNODE_VCM];
+
+ if (sensor->csi_dev) {
+ sensor->group[SWNODE_IVSC_HID] =
+ &nodes[SWNODE_IVSC_HID];
+ sensor->group[SWNODE_IVSC_SENSOR_PORT] =
+ &nodes[SWNODE_IVSC_SENSOR_PORT];
+ sensor->group[SWNODE_IVSC_SENSOR_ENDPOINT] =
+ &nodes[SWNODE_IVSC_SENSOR_ENDPOINT];
+ sensor->group[SWNODE_IVSC_IPU_PORT] =
+ &nodes[SWNODE_IVSC_IPU_PORT];
+ sensor->group[SWNODE_IVSC_IPU_ENDPOINT] =
+ &nodes[SWNODE_IVSC_IPU_ENDPOINT];
+
+ if (sensor->vcm_type)
+ sensor->group[SWNODE_VCM] = &nodes[SWNODE_VCM];
+ } else {
+ if (sensor->vcm_type)
+ sensor->group[SWNODE_IVSC_HID] = &nodes[SWNODE_VCM];
+ }
+}
+
+static void ipu_bridge_create_connection_swnodes(struct ipu_bridge *bridge,
+ struct ipu_sensor *sensor)
+{
+ struct ipu_node_names *names = &sensor->node_names;
+ struct software_node *nodes = sensor->swnodes;
+
+ ipu_bridge_init_swnode_names(sensor);
+
+ nodes[SWNODE_SENSOR_HID] = NODE_SENSOR(sensor->name,
+ sensor->dev_properties);
+ nodes[SWNODE_SENSOR_PORT] = NODE_PORT(sensor->node_names.port,
+ &nodes[SWNODE_SENSOR_HID]);
+ nodes[SWNODE_SENSOR_ENDPOINT] = NODE_ENDPOINT(
+ sensor->node_names.endpoint,
+ &nodes[SWNODE_SENSOR_PORT],
+ sensor->ep_properties);
+ nodes[SWNODE_IPU_PORT] = NODE_PORT(sensor->node_names.remote_port,
+ &bridge->ipu_hid_node);
+ nodes[SWNODE_IPU_ENDPOINT] = NODE_ENDPOINT(
+ sensor->node_names.endpoint,
+ &nodes[SWNODE_IPU_PORT],
+ sensor->ipu_properties);
+
+ if (sensor->csi_dev) {
+ snprintf(sensor->ivsc_name, sizeof(sensor->ivsc_name), "%s-%u",
+ acpi_device_hid(sensor->ivsc_adev), sensor->link);
+
+ nodes[SWNODE_IVSC_HID] = NODE_SENSOR(sensor->ivsc_name,
+ sensor->ivsc_properties);
+ nodes[SWNODE_IVSC_SENSOR_PORT] =
+ NODE_PORT(names->ivsc_sensor_port,
+ &nodes[SWNODE_IVSC_HID]);
+ nodes[SWNODE_IVSC_SENSOR_ENDPOINT] =
+ NODE_ENDPOINT(names->endpoint,
+ &nodes[SWNODE_IVSC_SENSOR_PORT],
+ sensor->ivsc_sensor_ep_properties);
+ nodes[SWNODE_IVSC_IPU_PORT] =
+ NODE_PORT(names->ivsc_ipu_port,
+ &nodes[SWNODE_IVSC_HID]);
+ nodes[SWNODE_IVSC_IPU_ENDPOINT] =
+ NODE_ENDPOINT(names->endpoint,
+ &nodes[SWNODE_IVSC_IPU_PORT],
+ sensor->ivsc_ipu_ep_properties);
+ }
+
+ nodes[SWNODE_VCM] = NODE_VCM(sensor->node_names.vcm);
+
+ ipu_bridge_init_swnode_group(sensor);
+}
+
+/*
+ * The actual instantiation must be done from a workqueue to avoid
+ * a deadlock on taking list_lock from v4l2-async twice.
+ */
+struct ipu_bridge_instantiate_vcm_work_data {
+ struct work_struct work;
+ struct device *sensor;
+ char name[16];
+ struct i2c_board_info board_info;
+};
+
+static void ipu_bridge_instantiate_vcm_work(struct work_struct *work)
+{
+ struct ipu_bridge_instantiate_vcm_work_data *data =
+ container_of(work, struct ipu_bridge_instantiate_vcm_work_data,
+ work);
+ struct acpi_device *adev = ACPI_COMPANION(data->sensor);
+ struct i2c_client *vcm_client;
+ bool put_fwnode = true;
+ int ret;
+
+ /*
+ * The client may get probed before the device_link gets added below
+ * make sure the sensor is powered-up during probe.
+ */
+ ret = pm_runtime_get_sync(data->sensor);
+ if (ret < 0) {
+ dev_err(data->sensor, "Error %d runtime-resuming sensor, cannot instantiate VCM\n",
+ ret);
+ goto out_pm_put;
+ }
+
+ /*
+ * Note the client is created only once and then kept around
+ * even after a rmmod, just like the software-nodes.
+ */
+ vcm_client = i2c_acpi_new_device_by_fwnode(acpi_fwnode_handle(adev),
+ 1, &data->board_info);
+ if (IS_ERR(vcm_client)) {
+ dev_err(data->sensor, "Error instantiating VCM client: %ld\n",
+ PTR_ERR(vcm_client));
+ goto out_pm_put;
+ }
+
+ device_link_add(&vcm_client->dev, data->sensor, DL_FLAG_PM_RUNTIME);
+
+ dev_info(data->sensor, "Instantiated %s VCM\n", data->board_info.type);
+ put_fwnode = false; /* Ownership has passed to the i2c-client */
+
+out_pm_put:
+ pm_runtime_put(data->sensor);
+ put_device(data->sensor);
+ if (put_fwnode)
+ fwnode_handle_put(data->board_info.fwnode);
+ kfree(data);
+}
+
+int ipu_bridge_instantiate_vcm(struct device *sensor)
+{
+ struct ipu_bridge_instantiate_vcm_work_data *data;
+ struct fwnode_handle *vcm_fwnode;
+ struct i2c_client *vcm_client;
+ struct acpi_device *adev;
+ char *sep;
+
+ adev = ACPI_COMPANION(sensor);
+ if (!adev)
+ return 0;
+
+ vcm_fwnode = fwnode_find_reference(dev_fwnode(sensor), "lens-focus", 0);
+ if (IS_ERR(vcm_fwnode))
+ return 0;
+
+ /* When reloading modules the client will already exist */
+ vcm_client = i2c_find_device_by_fwnode(vcm_fwnode);
+ if (vcm_client) {
+ fwnode_handle_put(vcm_fwnode);
+ put_device(&vcm_client->dev);
+ return 0;
+ }
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ fwnode_handle_put(vcm_fwnode);
+ return -ENOMEM;
+ }
+
+ INIT_WORK(&data->work, ipu_bridge_instantiate_vcm_work);
+ data->sensor = get_device(sensor);
+ snprintf(data->name, sizeof(data->name), "%s-VCM",
+ acpi_dev_name(adev));
+ data->board_info.dev_name = data->name;
+ data->board_info.fwnode = vcm_fwnode;
+ snprintf(data->board_info.type, sizeof(data->board_info.type),
+ "%pfwP", vcm_fwnode);
+ /* Strip "-<link>" postfix */
+ sep = strchrnul(data->board_info.type, '-');
+ *sep = 0;
+
+ queue_work(system_long_wq, &data->work);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(ipu_bridge_instantiate_vcm, INTEL_IPU_BRIDGE);
+
+static int ipu_bridge_instantiate_ivsc(struct ipu_sensor *sensor)
+{
+ struct fwnode_handle *fwnode;
+
+ if (!sensor->csi_dev)
+ return 0;
+
+ fwnode = software_node_fwnode(&sensor->swnodes[SWNODE_IVSC_HID]);
+ if (!fwnode)
+ return -ENODEV;
+
+ set_secondary_fwnode(sensor->csi_dev, fwnode);
+
+ return 0;
+}
+
+static void ipu_bridge_unregister_sensors(struct ipu_bridge *bridge)
+{
+ struct ipu_sensor *sensor;
+ unsigned int i;
+
+ for (i = 0; i < bridge->n_sensors; i++) {
+ sensor = &bridge->sensors[i];
+ software_node_unregister_node_group(sensor->group);
+ acpi_dev_put(sensor->adev);
+ put_device(sensor->csi_dev);
+ acpi_dev_put(sensor->ivsc_adev);
+ }
+}
+
+static int ipu_bridge_connect_sensor(const struct ipu_sensor_config *cfg,
+ struct ipu_bridge *bridge)
+{
+ struct fwnode_handle *fwnode, *primary;
+ struct ipu_sensor *sensor;
+ struct acpi_device *adev;
+ int ret;
+
+ for_each_acpi_dev_match(adev, cfg->hid, NULL, -1) {
+ if (!adev->status.enabled)
+ continue;
+
+ if (bridge->n_sensors >= IPU_MAX_PORTS) {
+ acpi_dev_put(adev);
+ dev_err(bridge->dev, "Exceeded available IPU ports\n");
+ return -EINVAL;
+ }
+
+ sensor = &bridge->sensors[bridge->n_sensors];
+
+ ret = bridge->parse_sensor_fwnode(adev, sensor);
+ if (ret)
+ goto err_put_adev;
+
+ snprintf(sensor->name, sizeof(sensor->name), "%s-%u",
+ cfg->hid, sensor->link);
+
+ ret = ipu_bridge_check_ivsc_dev(sensor, adev);
+ if (ret)
+ goto err_put_adev;
+
+ ipu_bridge_create_fwnode_properties(sensor, bridge, cfg);
+ ipu_bridge_create_connection_swnodes(bridge, sensor);
+
+ ret = software_node_register_node_group(sensor->group);
+ if (ret)
+ goto err_put_ivsc;
+
+ fwnode = software_node_fwnode(&sensor->swnodes[
+ SWNODE_SENSOR_HID]);
+ if (!fwnode) {
+ ret = -ENODEV;
+ goto err_free_swnodes;
+ }
+
+ sensor->adev = acpi_dev_get(adev);
+
+ primary = acpi_fwnode_handle(adev);
+ primary->secondary = fwnode;
+
+ ret = ipu_bridge_instantiate_ivsc(sensor);
+ if (ret)
+ goto err_free_swnodes;
+
+ dev_info(bridge->dev, "Found supported sensor %s\n",
+ acpi_dev_name(adev));
+
+ bridge->n_sensors++;
+ }
+
+ return 0;
+
+err_free_swnodes:
+ software_node_unregister_node_group(sensor->group);
+err_put_ivsc:
+ put_device(sensor->csi_dev);
+ acpi_dev_put(sensor->ivsc_adev);
+err_put_adev:
+ acpi_dev_put(adev);
+ return ret;
+}
+
+static int ipu_bridge_connect_sensors(struct ipu_bridge *bridge)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(ipu_supported_sensors); i++) {
+ const struct ipu_sensor_config *cfg =
+ &ipu_supported_sensors[i];
+
+ ret = ipu_bridge_connect_sensor(cfg, bridge);
+ if (ret)
+ goto err_unregister_sensors;
+ }
+
+ return 0;
+
+err_unregister_sensors:
+ ipu_bridge_unregister_sensors(bridge);
+ return ret;
+}
+
+static int ipu_bridge_ivsc_is_ready(void)
+{
+ struct acpi_device *sensor_adev, *adev;
+ struct device *csi_dev;
+ bool ready = true;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(ipu_supported_sensors); i++) {
+ const struct ipu_sensor_config *cfg =
+ &ipu_supported_sensors[i];
+
+ for_each_acpi_dev_match(sensor_adev, cfg->hid, NULL, -1) {
+ if (!sensor_adev->status.enabled)
+ continue;
+
+ adev = ipu_bridge_get_ivsc_acpi_dev(sensor_adev);
+ if (!adev)
+ continue;
+
+ csi_dev = ipu_bridge_get_ivsc_csi_dev(adev);
+ if (!csi_dev)
+ ready = false;
+
+ put_device(csi_dev);
+ acpi_dev_put(adev);
+ }
+ }
+
+ return ready;
+}
+
+int ipu_bridge_init(struct device *dev,
+ ipu_parse_sensor_fwnode_t parse_sensor_fwnode)
+{
+ struct fwnode_handle *fwnode;
+ struct ipu_bridge *bridge;
+ unsigned int i;
+ int ret;
+
+ if (!ipu_bridge_ivsc_is_ready())
+ return -EPROBE_DEFER;
+
+ bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
+ if (!bridge)
+ return -ENOMEM;
+
+ strscpy(bridge->ipu_node_name, IPU_HID,
+ sizeof(bridge->ipu_node_name));
+ bridge->ipu_hid_node.name = bridge->ipu_node_name;
+ bridge->dev = dev;
+ bridge->parse_sensor_fwnode = parse_sensor_fwnode;
+
+ ret = software_node_register(&bridge->ipu_hid_node);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register the IPU HID node\n");
+ goto err_free_bridge;
+ }
+
+ /*
+ * Map the lane arrangement, which is fixed for the IPU3 (meaning we
+ * only need one, rather than one per sensor). We include it as a
+ * member of the struct ipu_bridge rather than a global variable so
+ * that it survives if the module is unloaded along with the rest of
+ * the struct.
+ */
+ for (i = 0; i < IPU_MAX_LANES; i++)
+ bridge->data_lanes[i] = i + 1;
+
+ ret = ipu_bridge_connect_sensors(bridge);
+ if (ret || bridge->n_sensors == 0)
+ goto err_unregister_ipu;
+
+ dev_info(dev, "Connected %d cameras\n", bridge->n_sensors);
+
+ fwnode = software_node_fwnode(&bridge->ipu_hid_node);
+ if (!fwnode) {
+ dev_err(dev, "Error getting fwnode from ipu software_node\n");
+ ret = -ENODEV;
+ goto err_unregister_sensors;
+ }
+
+ set_secondary_fwnode(dev, fwnode);
+
+ return 0;
+
+err_unregister_sensors:
+ ipu_bridge_unregister_sensors(bridge);
+err_unregister_ipu:
+ software_node_unregister(&bridge->ipu_hid_node);
+err_free_bridge:
+ kfree(bridge);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(ipu_bridge_init, INTEL_IPU_BRIDGE);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Intel IPU Sensors Bridge driver");
diff --git a/drivers/media/pci/intel/ipu3/Kconfig b/drivers/media/pci/intel/ipu3/Kconfig
index 65b0c1598fbf..0951545eab21 100644
--- a/drivers/media/pci/intel/ipu3/Kconfig
+++ b/drivers/media/pci/intel/ipu3/Kconfig
@@ -8,6 +8,7 @@ config VIDEO_IPU3_CIO2
select VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
select VIDEOBUF2_DMA_SG
+ select IPU_BRIDGE if CIO2_BRIDGE
help
This is the Intel IPU3 CIO2 CSI-2 receiver unit, found in Intel
diff --git a/drivers/media/pci/intel/ipu3/Makefile b/drivers/media/pci/intel/ipu3/Makefile
index 933777e6ea8a..98ddd5beafe0 100644
--- a/drivers/media/pci/intel/ipu3/Makefile
+++ b/drivers/media/pci/intel/ipu3/Makefile
@@ -1,5 +1,2 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_VIDEO_IPU3_CIO2) += ipu3-cio2.o
-
-ipu3-cio2-y += ipu3-cio2-main.o
-ipu3-cio2-$(CONFIG_CIO2_BRIDGE) += cio2-bridge.o
diff --git a/drivers/media/pci/intel/ipu3/cio2-bridge.c b/drivers/media/pci/intel/ipu3/cio2-bridge.c
deleted file mode 100644
index 3c2accfe5455..000000000000
--- a/drivers/media/pci/intel/ipu3/cio2-bridge.c
+++ /dev/null
@@ -1,494 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Author: Dan Scally <djrscally@gmail.com> */
-
-#include <linux/acpi.h>
-#include <linux/device.h>
-#include <linux/i2c.h>
-#include <linux/pci.h>
-#include <linux/property.h>
-#include <media/v4l2-fwnode.h>
-
-#include "cio2-bridge.h"
-
-/*
- * Extend this array with ACPI Hardware IDs of devices known to be working
- * plus the number of link-frequencies expected by their drivers, along with
- * the frequency values in hertz. This is somewhat opportunistic way of adding
- * support for this for now in the hopes of a better source for the information
- * (possibly some encoded value in the SSDB buffer that we're unaware of)
- * becoming apparent in the future.
- *
- * Do not add an entry for a sensor that is not actually supported.
- */
-static const struct cio2_sensor_config cio2_supported_sensors[] = {
- /* Omnivision OV5693 */
- CIO2_SENSOR_CONFIG("INT33BE", 1, 419200000),
- /* Omnivision OV8865 */
- CIO2_SENSOR_CONFIG("INT347A", 1, 360000000),
- /* Omnivision OV7251 */
- CIO2_SENSOR_CONFIG("INT347E", 1, 319200000),
- /* Omnivision OV2680 */
- CIO2_SENSOR_CONFIG("OVTI2680", 0),
- /* Omnivision ov8856 */
- CIO2_SENSOR_CONFIG("OVTI8856", 3, 180000000, 360000000, 720000000),
- /* Omnivision ov2740 */
- CIO2_SENSOR_CONFIG("INT3474", 1, 360000000),
- /* Hynix hi556 */
- CIO2_SENSOR_CONFIG("INT3537", 1, 437000000),
- /* Omnivision ov13b10 */
- CIO2_SENSOR_CONFIG("OVTIDB10", 1, 560000000),
-};
-
-static const struct cio2_property_names prop_names = {
- .clock_frequency = "clock-frequency",
- .rotation = "rotation",
- .orientation = "orientation",
- .bus_type = "bus-type",
- .data_lanes = "data-lanes",
- .remote_endpoint = "remote-endpoint",
- .link_frequencies = "link-frequencies",
-};
-
-static const char * const cio2_vcm_types[] = {
- "ad5823",
- "dw9714",
- "ad5816",
- "dw9719",
- "dw9718",
- "dw9806b",
- "wv517s",
- "lc898122xa",
- "lc898212axb",
-};
-
-static int cio2_bridge_read_acpi_buffer(struct acpi_device *adev, char *id,
- void *data, u32 size)
-{
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *obj;
- acpi_status status;
- int ret = 0;
-
- status = acpi_evaluate_object(adev->handle, id, NULL, &buffer);
- if (ACPI_FAILURE(status))
- return -ENODEV;
-
- obj = buffer.pointer;
- if (!obj) {
- dev_err(&adev->dev, "Couldn't locate ACPI buffer\n");
- return -ENODEV;
- }
-
- if (obj->type != ACPI_TYPE_BUFFER) {
- dev_err(&adev->dev, "Not an ACPI buffer\n");
- ret = -ENODEV;
- goto out_free_buff;
- }
-
- if (obj->buffer.length > size) {
- dev_err(&adev->dev, "Given buffer is too small\n");
- ret = -EINVAL;
- goto out_free_buff;
- }
-
- memcpy(data, obj->buffer.pointer, obj->buffer.length);
-
-out_free_buff:
- kfree(buffer.pointer);
- return ret;
-}
-
-static u32 cio2_bridge_parse_rotation(struct cio2_sensor *sensor)
-{
- switch (sensor->ssdb.degree) {
- case CIO2_SENSOR_ROTATION_NORMAL:
- return 0;
- case CIO2_SENSOR_ROTATION_INVERTED:
- return 180;
- default:
- dev_warn(&sensor->adev->dev,
- "Unknown rotation %d. Assume 0 degree rotation\n",
- sensor->ssdb.degree);
- return 0;
- }
-}
-
-static enum v4l2_fwnode_orientation cio2_bridge_parse_orientation(struct cio2_sensor *sensor)
-{
- switch (sensor->pld->panel) {
- case ACPI_PLD_PANEL_FRONT:
- return V4L2_FWNODE_ORIENTATION_FRONT;
- case ACPI_PLD_PANEL_BACK:
- return V4L2_FWNODE_ORIENTATION_BACK;
- case ACPI_PLD_PANEL_TOP:
- case ACPI_PLD_PANEL_LEFT:
- case ACPI_PLD_PANEL_RIGHT:
- case ACPI_PLD_PANEL_UNKNOWN:
- return V4L2_FWNODE_ORIENTATION_EXTERNAL;
- default:
- dev_warn(&sensor->adev->dev, "Unknown _PLD panel value %d\n",
- sensor->pld->panel);
- return V4L2_FWNODE_ORIENTATION_EXTERNAL;
- }
-}
-
-static void cio2_bridge_create_fwnode_properties(
- struct cio2_sensor *sensor,
- struct cio2_bridge *bridge,
- const struct cio2_sensor_config *cfg)
-{
- u32 rotation;
- enum v4l2_fwnode_orientation orientation;
-
- rotation = cio2_bridge_parse_rotation(sensor);
- orientation = cio2_bridge_parse_orientation(sensor);
-
- sensor->prop_names = prop_names;
-
- sensor->local_ref[0] = SOFTWARE_NODE_REFERENCE(&sensor->swnodes[SWNODE_CIO2_ENDPOINT]);
- sensor->remote_ref[0] = SOFTWARE_NODE_REFERENCE(&sensor->swnodes[SWNODE_SENSOR_ENDPOINT]);
-
- sensor->dev_properties[0] = PROPERTY_ENTRY_U32(
- sensor->prop_names.clock_frequency,
- sensor->ssdb.mclkspeed);
- sensor->dev_properties[1] = PROPERTY_ENTRY_U32(
- sensor->prop_names.rotation,
- rotation);
- sensor->dev_properties[2] = PROPERTY_ENTRY_U32(
- sensor->prop_names.orientation,
- orientation);
- if (sensor->ssdb.vcmtype) {
- sensor->vcm_ref[0] =
- SOFTWARE_NODE_REFERENCE(&sensor->swnodes[SWNODE_VCM]);
- sensor->dev_properties[3] =
- PROPERTY_ENTRY_REF_ARRAY("lens-focus", sensor->vcm_ref);
- }
-
- sensor->ep_properties[0] = PROPERTY_ENTRY_U32(
- sensor->prop_names.bus_type,
- V4L2_FWNODE_BUS_TYPE_CSI2_DPHY);
- sensor->ep_properties[1] = PROPERTY_ENTRY_U32_ARRAY_LEN(
- sensor->prop_names.data_lanes,
- bridge->data_lanes,
- sensor->ssdb.lanes);
- sensor->ep_properties[2] = PROPERTY_ENTRY_REF_ARRAY(
- sensor->prop_names.remote_endpoint,
- sensor->local_ref);
-
- if (cfg->nr_link_freqs > 0)
- sensor->ep_properties[3] = PROPERTY_ENTRY_U64_ARRAY_LEN(
- sensor->prop_names.link_frequencies,
- cfg->link_freqs,
- cfg->nr_link_freqs);
-
- sensor->cio2_properties[0] = PROPERTY_ENTRY_U32_ARRAY_LEN(
- sensor->prop_names.data_lanes,
- bridge->data_lanes,
- sensor->ssdb.lanes);
- sensor->cio2_properties[1] = PROPERTY_ENTRY_REF_ARRAY(
- sensor->prop_names.remote_endpoint,
- sensor->remote_ref);
-}
-
-static void cio2_bridge_init_swnode_names(struct cio2_sensor *sensor)
-{
- snprintf(sensor->node_names.remote_port,
- sizeof(sensor->node_names.remote_port),
- SWNODE_GRAPH_PORT_NAME_FMT, sensor->ssdb.link);
- snprintf(sensor->node_names.port,
- sizeof(sensor->node_names.port),
- SWNODE_GRAPH_PORT_NAME_FMT, 0); /* Always port 0 */
- snprintf(sensor->node_names.endpoint,
- sizeof(sensor->node_names.endpoint),
- SWNODE_GRAPH_ENDPOINT_NAME_FMT, 0); /* And endpoint 0 */
-}
-
-static void cio2_bridge_init_swnode_group(struct cio2_sensor *sensor)
-{
- struct software_node *nodes = sensor->swnodes;
-
- sensor->group[SWNODE_SENSOR_HID] = &nodes[SWNODE_SENSOR_HID];
- sensor->group[SWNODE_SENSOR_PORT] = &nodes[SWNODE_SENSOR_PORT];
- sensor->group[SWNODE_SENSOR_ENDPOINT] = &nodes[SWNODE_SENSOR_ENDPOINT];
- sensor->group[SWNODE_CIO2_PORT] = &nodes[SWNODE_CIO2_PORT];
- sensor->group[SWNODE_CIO2_ENDPOINT] = &nodes[SWNODE_CIO2_ENDPOINT];
- if (sensor->ssdb.vcmtype)
- sensor->group[SWNODE_VCM] = &nodes[SWNODE_VCM];
-}
-
-static void cio2_bridge_create_connection_swnodes(struct cio2_bridge *bridge,
- struct cio2_sensor *sensor)
-{
- struct software_node *nodes = sensor->swnodes;
- char vcm_name[ACPI_ID_LEN + 4];
-
- cio2_bridge_init_swnode_names(sensor);
-
- nodes[SWNODE_SENSOR_HID] = NODE_SENSOR(sensor->name,
- sensor->dev_properties);
- nodes[SWNODE_SENSOR_PORT] = NODE_PORT(sensor->node_names.port,
- &nodes[SWNODE_SENSOR_HID]);
- nodes[SWNODE_SENSOR_ENDPOINT] = NODE_ENDPOINT(
- sensor->node_names.endpoint,
- &nodes[SWNODE_SENSOR_PORT],
- sensor->ep_properties);
- nodes[SWNODE_CIO2_PORT] = NODE_PORT(sensor->node_names.remote_port,
- &bridge->cio2_hid_node);
- nodes[SWNODE_CIO2_ENDPOINT] = NODE_ENDPOINT(
- sensor->node_names.endpoint,
- &nodes[SWNODE_CIO2_PORT],
- sensor->cio2_properties);
- if (sensor->ssdb.vcmtype) {
- /* append ssdb.link to distinguish VCM nodes with same HID */
- snprintf(vcm_name, sizeof(vcm_name), "%s-%u",
- cio2_vcm_types[sensor->ssdb.vcmtype - 1],
- sensor->ssdb.link);
- nodes[SWNODE_VCM] = NODE_VCM(vcm_name);
- }
-
- cio2_bridge_init_swnode_group(sensor);
-}
-
-static void cio2_bridge_instantiate_vcm_i2c_client(struct cio2_sensor *sensor)
-{
- struct i2c_board_info board_info = { };
- char name[16];
-
- if (!sensor->ssdb.vcmtype)
- return;
-
- snprintf(name, sizeof(name), "%s-VCM", acpi_dev_name(sensor->adev));
- board_info.dev_name = name;
- strscpy(board_info.type, cio2_vcm_types[sensor->ssdb.vcmtype - 1],
- ARRAY_SIZE(board_info.type));
- board_info.swnode = &sensor->swnodes[SWNODE_VCM];
-
- sensor->vcm_i2c_client =
- i2c_acpi_new_device_by_fwnode(acpi_fwnode_handle(sensor->adev),
- 1, &board_info);
- if (IS_ERR(sensor->vcm_i2c_client)) {
- dev_warn(&sensor->adev->dev, "Error instantiation VCM i2c-client: %ld\n",
- PTR_ERR(sensor->vcm_i2c_client));
- sensor->vcm_i2c_client = NULL;
- }
-}
-
-static void cio2_bridge_unregister_sensors(struct cio2_bridge *bridge)
-{
- struct cio2_sensor *sensor;
- unsigned int i;
-
- for (i = 0; i < bridge->n_sensors; i++) {
- sensor = &bridge->sensors[i];
- software_node_unregister_node_group(sensor->group);
- ACPI_FREE(sensor->pld);
- acpi_dev_put(sensor->adev);
- i2c_unregister_device(sensor->vcm_i2c_client);
- }
-}
-
-static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
- struct cio2_bridge *bridge,
- struct pci_dev *cio2)
-{
- struct fwnode_handle *fwnode, *primary;
- struct cio2_sensor *sensor;
- struct acpi_device *adev;
- acpi_status status;
- int ret;
-
- for_each_acpi_dev_match(adev, cfg->hid, NULL, -1) {
- if (!adev->status.enabled)
- continue;
-
- if (bridge->n_sensors >= CIO2_NUM_PORTS) {
- acpi_dev_put(adev);
- dev_err(&cio2->dev, "Exceeded available CIO2 ports\n");
- return -EINVAL;
- }
-
- sensor = &bridge->sensors[bridge->n_sensors];
-
- ret = cio2_bridge_read_acpi_buffer(adev, "SSDB",
- &sensor->ssdb,
- sizeof(sensor->ssdb));
- if (ret)
- goto err_put_adev;
-
- snprintf(sensor->name, sizeof(sensor->name), "%s-%u",
- cfg->hid, sensor->ssdb.link);
-
- if (sensor->ssdb.vcmtype > ARRAY_SIZE(cio2_vcm_types)) {
- dev_warn(&adev->dev, "Unknown VCM type %d\n",
- sensor->ssdb.vcmtype);
- sensor->ssdb.vcmtype = 0;
- }
-
- status = acpi_get_physical_device_location(adev->handle, &sensor->pld);
- if (ACPI_FAILURE(status)) {
- ret = -ENODEV;
- goto err_put_adev;
- }
-
- if (sensor->ssdb.lanes > CIO2_MAX_LANES) {
- dev_err(&adev->dev,
- "Number of lanes in SSDB is invalid\n");
- ret = -EINVAL;
- goto err_free_pld;
- }
-
- cio2_bridge_create_fwnode_properties(sensor, bridge, cfg);
- cio2_bridge_create_connection_swnodes(bridge, sensor);
-
- ret = software_node_register_node_group(sensor->group);
- if (ret)
- goto err_free_pld;
-
- fwnode = software_node_fwnode(&sensor->swnodes[
- SWNODE_SENSOR_HID]);
- if (!fwnode) {
- ret = -ENODEV;
- goto err_free_swnodes;
- }
-
- sensor->adev = acpi_dev_get(adev);
-
- primary = acpi_fwnode_handle(adev);
- primary->secondary = fwnode;
-
- cio2_bridge_instantiate_vcm_i2c_client(sensor);
-
- dev_info(&cio2->dev, "Found supported sensor %s\n",
- acpi_dev_name(adev));
-
- bridge->n_sensors++;
- }
-
- return 0;
-
-err_free_swnodes:
- software_node_unregister_node_group(sensor->group);
-err_free_pld:
- ACPI_FREE(sensor->pld);
-err_put_adev:
- acpi_dev_put(adev);
- return ret;
-}
-
-static int cio2_bridge_connect_sensors(struct cio2_bridge *bridge,
- struct pci_dev *cio2)
-{
- unsigned int i;
- int ret;
-
- for (i = 0; i < ARRAY_SIZE(cio2_supported_sensors); i++) {
- const struct cio2_sensor_config *cfg =
- &cio2_supported_sensors[i];
-
- ret = cio2_bridge_connect_sensor(cfg, bridge, cio2);
- if (ret)
- goto err_unregister_sensors;
- }
-
- return 0;
-
-err_unregister_sensors:
- cio2_bridge_unregister_sensors(bridge);
- return ret;
-}
-
-/*
- * The VCM cannot be probed until the PMIC is completely setup. We cannot rely
- * on -EPROBE_DEFER for this, since the consumer<->supplier relations between
- * the VCM and regulators/clks are not described in ACPI, instead they are
- * passed as board-data to the PMIC drivers. Since -PROBE_DEFER does not work
- * for the clks/regulators the VCM i2c-clients must not be instantiated until
- * the PMIC is fully setup.
- *
- * The sensor/VCM ACPI device has an ACPI _DEP on the PMIC, check this using the
- * acpi_dev_ready_for_enumeration() helper, like the i2c-core-acpi code does
- * for the sensors.
- */
-static int cio2_bridge_sensors_are_ready(void)
-{
- struct acpi_device *adev;
- bool ready = true;
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(cio2_supported_sensors); i++) {
- const struct cio2_sensor_config *cfg =
- &cio2_supported_sensors[i];
-
- for_each_acpi_dev_match(adev, cfg->hid, NULL, -1) {
- if (!adev->status.enabled)
- continue;
-
- if (!acpi_dev_ready_for_enumeration(adev))
- ready = false;
- }
- }
-
- return ready;
-}
-
-int cio2_bridge_init(struct pci_dev *cio2)
-{
- struct device *dev = &cio2->dev;
- struct fwnode_handle *fwnode;
- struct cio2_bridge *bridge;
- unsigned int i;
- int ret;
-
- if (!cio2_bridge_sensors_are_ready())
- return -EPROBE_DEFER;
-
- bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
- if (!bridge)
- return -ENOMEM;
-
- strscpy(bridge->cio2_node_name, CIO2_HID,
- sizeof(bridge->cio2_node_name));
- bridge->cio2_hid_node.name = bridge->cio2_node_name;
-
- ret = software_node_register(&bridge->cio2_hid_node);
- if (ret < 0) {
- dev_err(dev, "Failed to register the CIO2 HID node\n");
- goto err_free_bridge;
- }
-
- /*
- * Map the lane arrangement, which is fixed for the IPU3 (meaning we
- * only need one, rather than one per sensor). We include it as a
- * member of the struct cio2_bridge rather than a global variable so
- * that it survives if the module is unloaded along with the rest of
- * the struct.
- */
- for (i = 0; i < CIO2_MAX_LANES; i++)
- bridge->data_lanes[i] = i + 1;
-
- ret = cio2_bridge_connect_sensors(bridge, cio2);
- if (ret || bridge->n_sensors == 0)
- goto err_unregister_cio2;
-
- dev_info(dev, "Connected %d cameras\n", bridge->n_sensors);
-
- fwnode = software_node_fwnode(&bridge->cio2_hid_node);
- if (!fwnode) {
- dev_err(dev, "Error getting fwnode from cio2 software_node\n");
- ret = -ENODEV;
- goto err_unregister_sensors;
- }
-
- set_secondary_fwnode(dev, fwnode);
-
- return 0;
-
-err_unregister_sensors:
- cio2_bridge_unregister_sensors(bridge);
-err_unregister_cio2:
- software_node_unregister(&bridge->cio2_hid_node);
-err_free_bridge:
- kfree(bridge);
-
- return ret;
-}
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
index 34984a7474ed..5dd69a251b6a 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
@@ -22,6 +22,8 @@
#include <linux/pm_runtime.h>
#include <linux/property.h>
#include <linux/vmalloc.h>
+
+#include <media/ipu-bridge.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
@@ -354,7 +356,7 @@ static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q)
void __iomem *const base = cio2->base;
u8 lanes, csi2bus = q->csi2.port;
u8 sensor_vc = SENSOR_VIR_CH_DFLT;
- struct cio2_csi2_timing timing;
+ struct cio2_csi2_timing timing = { 0 };
int i, r;
fmt = cio2_find_format(NULL, &q->subdev_fmt.code);
@@ -1371,7 +1373,7 @@ static const struct v4l2_subdev_ops cio2_subdev_ops = {
/******* V4L2 sub-device asynchronous registration callbacks***********/
struct sensor_async_subdev {
- struct v4l2_async_subdev asd;
+ struct v4l2_async_connection asd;
struct csi2_bus_info csi2;
};
@@ -1381,15 +1383,20 @@ struct sensor_async_subdev {
/* The .bound() notifier callback when a match is found */
static int cio2_notifier_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
struct cio2_device *cio2 = to_cio2_device(notifier);
struct sensor_async_subdev *s_asd = to_sensor_asd(asd);
struct cio2_queue *q;
+ int ret;
if (cio2->queue[s_asd->csi2.port].sensor)
return -EBUSY;
+ ret = ipu_bridge_instantiate_vcm(sd->dev);
+ if (ret)
+ return ret;
+
q = &cio2->queue[s_asd->csi2.port];
q->csi2 = s_asd->csi2;
@@ -1402,7 +1409,7 @@ static int cio2_notifier_bound(struct v4l2_async_notifier *notifier,
/* The .unbind callback */
static void cio2_notifier_unbind(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
struct cio2_device *cio2 = to_cio2_device(notifier);
struct sensor_async_subdev *s_asd = to_sensor_asd(asd);
@@ -1416,11 +1423,11 @@ static int cio2_notifier_complete(struct v4l2_async_notifier *notifier)
struct cio2_device *cio2 = to_cio2_device(notifier);
struct device *dev = &cio2->pci_dev->dev;
struct sensor_async_subdev *s_asd;
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asd;
struct cio2_queue *q;
int ret;
- list_for_each_entry(asd, &cio2->notifier.asd_list, asd_list) {
+ list_for_each_entry(asd, &cio2->notifier.done_list, asc_entry) {
s_asd = to_sensor_asd(asd);
q = &cio2->queue[s_asd->csi2.port];
@@ -1499,7 +1506,7 @@ err_parse:
* suspend.
*/
cio2->notifier.ops = &cio2_async_ops;
- ret = v4l2_async_nf_register(&cio2->v4l2_dev, &cio2->notifier);
+ ret = v4l2_async_nf_register(&cio2->notifier);
if (ret)
dev_err(dev, "failed to register async notifier : %d\n", ret);
@@ -1724,7 +1731,7 @@ static int cio2_pci_probe(struct pci_dev *pci_dev,
return -EINVAL;
}
- r = cio2_bridge_init(pci_dev);
+ r = ipu_bridge_init(dev, ipu_bridge_parse_ssdb);
if (r)
return r;
}
@@ -1794,7 +1801,7 @@ static int cio2_pci_probe(struct pci_dev *pci_dev,
if (r)
goto fail_v4l2_device_unregister;
- v4l2_async_nf_init(&cio2->notifier);
+ v4l2_async_nf_init(&cio2->notifier, &cio2->v4l2_dev);
/* Register notifier for subdevices we care */
r = cio2_parse_firmware(cio2);
@@ -2057,3 +2064,4 @@ MODULE_AUTHOR("Yuning Pu <yuning.pu@intel.com>");
MODULE_AUTHOR("Yong Zhi <yong.zhi@intel.com>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("IPU3 CIO2 driver");
+MODULE_IMPORT_NS(INTEL_IPU_BRIDGE);
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.h b/drivers/media/pci/intel/ipu3/ipu3-cio2.h
index 3a1f394e05aa..d731ce8adbe3 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.h
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.h
@@ -459,10 +459,4 @@ static inline struct cio2_queue *vb2q_to_cio2_queue(struct vb2_queue *vq)
return container_of(vq, struct cio2_queue, vbq);
}
-#if IS_ENABLED(CONFIG_CIO2_BRIDGE)
-int cio2_bridge_init(struct pci_dev *cio2);
-#else
-static inline int cio2_bridge_init(struct pci_dev *cio2) { return 0; }
-#endif
-
#endif
diff --git a/drivers/media/pci/intel/ivsc/Kconfig b/drivers/media/pci/intel/ivsc/Kconfig
new file mode 100644
index 000000000000..1ef1c4e3750d
--- /dev/null
+++ b/drivers/media/pci/intel/ivsc/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (C) 2023, Intel Corporation. All rights reserved.
+
+config INTEL_VSC
+ tristate "Intel Visual Sensing Controller"
+ depends on INTEL_MEI && ACPI
+ help
+ This adds support for Intel Visual Sensing Controller (IVSC).
+
+ Enables the IVSC firmware services required for controlling
+ camera sensor ownership and CSI-2 link through Image Processing
+ Unit(IPU) driver of Intel.
diff --git a/drivers/media/pci/intel/ivsc/Makefile b/drivers/media/pci/intel/ivsc/Makefile
new file mode 100644
index 000000000000..00fad29a6e6e
--- /dev/null
+++ b/drivers/media/pci/intel/ivsc/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Copyright (C) 2023, Intel Corporation. All rights reserved.
+
+obj-$(CONFIG_INTEL_VSC) += ivsc-csi.o
+ivsc-csi-y += mei_csi.o
+
+obj-$(CONFIG_INTEL_VSC) += ivsc-ace.o
+ivsc-ace-y += mei_ace.o
diff --git a/drivers/media/pci/intel/ivsc/mei_ace.c b/drivers/media/pci/intel/ivsc/mei_ace.c
new file mode 100644
index 000000000000..a0491f307831
--- /dev/null
+++ b/drivers/media/pci/intel/ivsc/mei_ace.c
@@ -0,0 +1,579 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023 Intel Corporation. All rights reserved.
+ * Intel Visual Sensing Controller ACE Linux driver
+ */
+
+/*
+ * To set ownership of camera sensor, there is specific command, which
+ * is sent via MEI protocol. That's a two-step scheme where the firmware
+ * first acks receipt of the command and later responses the command was
+ * executed. The command sending function uses "completion" as the
+ * synchronization mechanism. The notification for command is received
+ * via a mei callback which wakes up the caller. There can be only one
+ * outstanding command at a time.
+ *
+ * The power line of camera sensor is directly connected to IVSC instead
+ * of host, when camera sensor ownership is switched to host, sensor is
+ * already powered up by firmware.
+ */
+
+#include <linux/acpi.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/mei_cl_bus.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/uuid.h>
+#include <linux/workqueue.h>
+
+#define MEI_ACE_DRIVER_NAME "ivsc_ace"
+
+/* indicating driver message */
+#define ACE_DRV_MSG 1
+/* indicating set command */
+#define ACE_CMD_SET 4
+/* command timeout determined experimentally */
+#define ACE_CMD_TIMEOUT (5 * HZ)
+/* indicating the first command block */
+#define ACE_CMD_INIT_BLOCK 1
+/* indicating the last command block */
+#define ACE_CMD_FINAL_BLOCK 1
+/* size of camera status notification content */
+#define ACE_CAMERA_STATUS_SIZE 5
+
+/* UUID used to get firmware id */
+#define ACE_GET_FW_ID_UUID UUID_LE(0x6167DCFB, 0x72F1, 0x4584, 0xBF, \
+ 0xE3, 0x84, 0x17, 0x71, 0xAA, 0x79, 0x0B)
+
+/* UUID used to get csi device */
+#define MEI_CSI_UUID UUID_LE(0x92335FCF, 0x3203, 0x4472, \
+ 0xAF, 0x93, 0x7b, 0x44, 0x53, 0xAC, 0x29, 0xDA)
+
+/* identify firmware event type */
+enum ace_event_type {
+ /* firmware ready */
+ ACE_FW_READY = 0x8,
+
+ /* command response */
+ ACE_CMD_RESPONSE = 0x10,
+};
+
+/* identify camera sensor ownership */
+enum ace_camera_owner {
+ ACE_CAMERA_IVSC,
+ ACE_CAMERA_HOST,
+};
+
+/* identify the command id supported by firmware IPC */
+enum ace_cmd_id {
+ /* used to switch camera sensor to host */
+ ACE_SWITCH_CAMERA_TO_HOST = 0x13,
+
+ /* used to switch camera sensor to IVSC */
+ ACE_SWITCH_CAMERA_TO_IVSC = 0x14,
+
+ /* used to get firmware id */
+ ACE_GET_FW_ID = 0x1A,
+};
+
+/* ACE command header structure */
+struct ace_cmd_hdr {
+ u32 firmware_id : 16;
+ u32 instance_id : 8;
+ u32 type : 5;
+ u32 rsp : 1;
+ u32 msg_tgt : 1;
+ u32 _hw_rsvd_1 : 1;
+ u32 param_size : 20;
+ u32 cmd_id : 8;
+ u32 final_block : 1;
+ u32 init_block : 1;
+ u32 _hw_rsvd_2 : 2;
+} __packed;
+
+/* ACE command parameter structure */
+union ace_cmd_param {
+ uuid_le uuid;
+ u32 param;
+};
+
+/* ACE command structure */
+struct ace_cmd {
+ struct ace_cmd_hdr hdr;
+ union ace_cmd_param param;
+} __packed;
+
+/* ACE notification header */
+union ace_notif_hdr {
+ struct _confirm {
+ u32 status : 24;
+ u32 type : 5;
+ u32 rsp : 1;
+ u32 msg_tgt : 1;
+ u32 _hw_rsvd_1 : 1;
+ u32 param_size : 20;
+ u32 cmd_id : 8;
+ u32 final_block : 1;
+ u32 init_block : 1;
+ u32 _hw_rsvd_2 : 2;
+ } __packed ack;
+
+ struct _event {
+ u32 rsvd1 : 16;
+ u32 event_type : 8;
+ u32 type : 5;
+ u32 ack : 1;
+ u32 msg_tgt : 1;
+ u32 _hw_rsvd_1 : 1;
+ u32 rsvd2 : 30;
+ u32 _hw_rsvd_2 : 2;
+ } __packed event;
+
+ struct _response {
+ u32 event_id : 16;
+ u32 notif_type : 8;
+ u32 type : 5;
+ u32 rsp : 1;
+ u32 msg_tgt : 1;
+ u32 _hw_rsvd_1 : 1;
+ u32 event_data_size : 16;
+ u32 request_target : 1;
+ u32 request_type : 5;
+ u32 cmd_id : 8;
+ u32 _hw_rsvd_2 : 2;
+ } __packed response;
+};
+
+/* ACE notification content */
+union ace_notif_cont {
+ u16 firmware_id;
+ u8 state_notif;
+ u8 camera_status[ACE_CAMERA_STATUS_SIZE];
+};
+
+/* ACE notification structure */
+struct ace_notif {
+ union ace_notif_hdr hdr;
+ union ace_notif_cont cont;
+} __packed;
+
+struct mei_ace {
+ struct mei_cl_device *cldev;
+
+ /* command ack */
+ struct ace_notif cmd_ack;
+ /* command response */
+ struct ace_notif cmd_response;
+ /* used to wait for command ack and response */
+ struct completion cmd_completion;
+ /* lock used to prevent multiple call to send command */
+ struct mutex lock;
+
+ /* used to construct command */
+ u16 firmware_id;
+
+ struct device *csi_dev;
+
+ /* runtime PM link from ace to csi */
+ struct device_link *csi_link;
+
+ struct work_struct work;
+};
+
+static inline void init_cmd_hdr(struct ace_cmd_hdr *hdr)
+{
+ memset(hdr, 0, sizeof(struct ace_cmd_hdr));
+
+ hdr->type = ACE_CMD_SET;
+ hdr->msg_tgt = ACE_DRV_MSG;
+ hdr->init_block = ACE_CMD_INIT_BLOCK;
+ hdr->final_block = ACE_CMD_FINAL_BLOCK;
+}
+
+static int construct_command(struct mei_ace *ace, struct ace_cmd *cmd,
+ enum ace_cmd_id cmd_id)
+{
+ union ace_cmd_param *param = &cmd->param;
+ struct ace_cmd_hdr *hdr = &cmd->hdr;
+
+ init_cmd_hdr(hdr);
+
+ hdr->cmd_id = cmd_id;
+ switch (cmd_id) {
+ case ACE_GET_FW_ID:
+ param->uuid = ACE_GET_FW_ID_UUID;
+ hdr->param_size = sizeof(param->uuid);
+ break;
+ case ACE_SWITCH_CAMERA_TO_IVSC:
+ param->param = 0;
+ hdr->firmware_id = ace->firmware_id;
+ hdr->param_size = sizeof(param->param);
+ break;
+ case ACE_SWITCH_CAMERA_TO_HOST:
+ hdr->firmware_id = ace->firmware_id;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return hdr->param_size + sizeof(cmd->hdr);
+}
+
+/* send command to firmware */
+static int mei_ace_send(struct mei_ace *ace, struct ace_cmd *cmd,
+ size_t len, bool only_ack)
+{
+ union ace_notif_hdr *resp_hdr = &ace->cmd_response.hdr;
+ union ace_notif_hdr *ack_hdr = &ace->cmd_ack.hdr;
+ struct ace_cmd_hdr *cmd_hdr = &cmd->hdr;
+ int ret;
+
+ mutex_lock(&ace->lock);
+
+ reinit_completion(&ace->cmd_completion);
+
+ ret = mei_cldev_send(ace->cldev, (u8 *)cmd, len);
+ if (ret < 0)
+ goto out;
+
+ ret = wait_for_completion_killable_timeout(&ace->cmd_completion,
+ ACE_CMD_TIMEOUT);
+ if (ret < 0) {
+ goto out;
+ } else if (!ret) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ if (ack_hdr->ack.cmd_id != cmd_hdr->cmd_id) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* command ack status */
+ ret = ack_hdr->ack.status;
+ if (ret) {
+ ret = -EIO;
+ goto out;
+ }
+
+ if (only_ack)
+ goto out;
+
+ ret = wait_for_completion_killable_timeout(&ace->cmd_completion,
+ ACE_CMD_TIMEOUT);
+ if (ret < 0) {
+ goto out;
+ } else if (!ret) {
+ ret = -ETIMEDOUT;
+ goto out;
+ } else {
+ ret = 0;
+ }
+
+ if (resp_hdr->response.cmd_id != cmd_hdr->cmd_id)
+ ret = -EINVAL;
+
+out:
+ mutex_unlock(&ace->lock);
+
+ return ret;
+}
+
+static int ace_set_camera_owner(struct mei_ace *ace,
+ enum ace_camera_owner owner)
+{
+ enum ace_cmd_id cmd_id;
+ struct ace_cmd cmd;
+ int cmd_size;
+ int ret;
+
+ if (owner == ACE_CAMERA_IVSC)
+ cmd_id = ACE_SWITCH_CAMERA_TO_IVSC;
+ else
+ cmd_id = ACE_SWITCH_CAMERA_TO_HOST;
+
+ cmd_size = construct_command(ace, &cmd, cmd_id);
+ if (cmd_size >= 0)
+ ret = mei_ace_send(ace, &cmd, cmd_size, false);
+ else
+ ret = cmd_size;
+
+ return ret;
+}
+
+/* the first command downloaded to firmware */
+static inline int ace_get_firmware_id(struct mei_ace *ace)
+{
+ struct ace_cmd cmd;
+ int cmd_size;
+ int ret;
+
+ cmd_size = construct_command(ace, &cmd, ACE_GET_FW_ID);
+ if (cmd_size >= 0)
+ ret = mei_ace_send(ace, &cmd, cmd_size, true);
+ else
+ ret = cmd_size;
+
+ return ret;
+}
+
+static void handle_command_response(struct mei_ace *ace,
+ struct ace_notif *resp, int len)
+{
+ union ace_notif_hdr *hdr = &resp->hdr;
+
+ switch (hdr->response.cmd_id) {
+ case ACE_SWITCH_CAMERA_TO_IVSC:
+ case ACE_SWITCH_CAMERA_TO_HOST:
+ memcpy(&ace->cmd_response, resp, len);
+ complete(&ace->cmd_completion);
+ break;
+ case ACE_GET_FW_ID:
+ break;
+ default:
+ break;
+ }
+}
+
+static void handle_command_ack(struct mei_ace *ace,
+ struct ace_notif *ack, int len)
+{
+ union ace_notif_hdr *hdr = &ack->hdr;
+
+ switch (hdr->ack.cmd_id) {
+ case ACE_GET_FW_ID:
+ ace->firmware_id = ack->cont.firmware_id;
+ fallthrough;
+ case ACE_SWITCH_CAMERA_TO_IVSC:
+ case ACE_SWITCH_CAMERA_TO_HOST:
+ memcpy(&ace->cmd_ack, ack, len);
+ complete(&ace->cmd_completion);
+ break;
+ default:
+ break;
+ }
+}
+
+/* callback for receive */
+static void mei_ace_rx(struct mei_cl_device *cldev)
+{
+ struct mei_ace *ace = mei_cldev_get_drvdata(cldev);
+ struct ace_notif event;
+ union ace_notif_hdr *hdr = &event.hdr;
+ int ret;
+
+ ret = mei_cldev_recv(cldev, (u8 *)&event, sizeof(event));
+ if (ret < 0) {
+ dev_err(&cldev->dev, "recv error: %d\n", ret);
+ return;
+ }
+
+ if (hdr->event.ack) {
+ handle_command_ack(ace, &event, ret);
+ return;
+ }
+
+ switch (hdr->event.event_type) {
+ case ACE_CMD_RESPONSE:
+ handle_command_response(ace, &event, ret);
+ break;
+ case ACE_FW_READY:
+ /*
+ * firmware ready notification sent to driver
+ * after HECI client connected with firmware.
+ */
+ dev_dbg(&cldev->dev, "firmware ready\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static int mei_ace_setup_dev_link(struct mei_ace *ace)
+{
+ struct device *dev = &ace->cldev->dev;
+ uuid_le uuid = MEI_CSI_UUID;
+ struct device *csi_dev;
+ char name[64];
+ int ret;
+
+ snprintf(name, sizeof(name), "%s-%pUl", dev_name(dev->parent), &uuid);
+
+ csi_dev = device_find_child_by_name(dev->parent, name);
+ if (!csi_dev) {
+ ret = -EPROBE_DEFER;
+ goto err;
+ }
+
+ /* setup link between mei_ace and mei_csi */
+ ace->csi_link = device_link_add(csi_dev, dev, DL_FLAG_PM_RUNTIME |
+ DL_FLAG_RPM_ACTIVE | DL_FLAG_STATELESS);
+ if (!ace->csi_link) {
+ ret = -EINVAL;
+ dev_err(dev, "failed to link to %s\n", dev_name(csi_dev));
+ goto err_put;
+ }
+
+ ace->csi_dev = csi_dev;
+
+ return 0;
+
+err_put:
+ put_device(csi_dev);
+
+err:
+ return ret;
+}
+
+/* switch camera to host before probe sensor device */
+static void mei_ace_post_probe_work(struct work_struct *work)
+{
+ struct acpi_device *adev;
+ struct mei_ace *ace;
+ struct device *dev;
+ int ret;
+
+ ace = container_of(work, struct mei_ace, work);
+ dev = &ace->cldev->dev;
+
+ ret = ace_set_camera_owner(ace, ACE_CAMERA_HOST);
+ if (ret) {
+ dev_err(dev, "switch camera to host failed: %d\n", ret);
+ return;
+ }
+
+ adev = ACPI_COMPANION(dev->parent);
+ if (!adev)
+ return;
+
+ acpi_dev_clear_dependencies(adev);
+}
+
+static int mei_ace_probe(struct mei_cl_device *cldev,
+ const struct mei_cl_device_id *id)
+{
+ struct device *dev = &cldev->dev;
+ struct mei_ace *ace;
+ int ret;
+
+ ace = devm_kzalloc(dev, sizeof(struct mei_ace), GFP_KERNEL);
+ if (!ace)
+ return -ENOMEM;
+
+ ace->cldev = cldev;
+ mutex_init(&ace->lock);
+ init_completion(&ace->cmd_completion);
+ INIT_WORK(&ace->work, mei_ace_post_probe_work);
+
+ mei_cldev_set_drvdata(cldev, ace);
+
+ ret = mei_cldev_enable(cldev);
+ if (ret < 0) {
+ dev_err(dev, "mei_cldev_enable failed: %d\n", ret);
+ goto destroy_mutex;
+ }
+
+ ret = mei_cldev_register_rx_cb(cldev, mei_ace_rx);
+ if (ret) {
+ dev_err(dev, "event cb registration failed: %d\n", ret);
+ goto err_disable;
+ }
+
+ ret = ace_get_firmware_id(ace);
+ if (ret) {
+ dev_err(dev, "get firmware id failed: %d\n", ret);
+ goto err_disable;
+ }
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ ret = mei_ace_setup_dev_link(ace);
+ if (ret)
+ goto disable_pm;
+
+ schedule_work(&ace->work);
+
+ return 0;
+
+disable_pm:
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+
+err_disable:
+ mei_cldev_disable(cldev);
+
+destroy_mutex:
+ mutex_destroy(&ace->lock);
+
+ return ret;
+}
+
+static void mei_ace_remove(struct mei_cl_device *cldev)
+{
+ struct mei_ace *ace = mei_cldev_get_drvdata(cldev);
+
+ cancel_work_sync(&ace->work);
+
+ device_link_del(ace->csi_link);
+ put_device(ace->csi_dev);
+
+ pm_runtime_disable(&cldev->dev);
+ pm_runtime_set_suspended(&cldev->dev);
+
+ ace_set_camera_owner(ace, ACE_CAMERA_IVSC);
+
+ mutex_destroy(&ace->lock);
+}
+
+static int __maybe_unused mei_ace_runtime_suspend(struct device *dev)
+{
+ struct mei_ace *ace = dev_get_drvdata(dev);
+
+ return ace_set_camera_owner(ace, ACE_CAMERA_IVSC);
+}
+
+static int __maybe_unused mei_ace_runtime_resume(struct device *dev)
+{
+ struct mei_ace *ace = dev_get_drvdata(dev);
+
+ return ace_set_camera_owner(ace, ACE_CAMERA_HOST);
+}
+
+static const struct dev_pm_ops mei_ace_pm_ops = {
+ SET_RUNTIME_PM_OPS(mei_ace_runtime_suspend,
+ mei_ace_runtime_resume, NULL)
+};
+
+#define MEI_ACE_UUID UUID_LE(0x5DB76CF6, 0x0A68, 0x4ED6, \
+ 0x9B, 0x78, 0x03, 0x61, 0x63, 0x5E, 0x24, 0x47)
+
+static const struct mei_cl_device_id mei_ace_tbl[] = {
+ { MEI_ACE_DRIVER_NAME, MEI_ACE_UUID, MEI_CL_VERSION_ANY },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(mei, mei_ace_tbl);
+
+static struct mei_cl_driver mei_ace_driver = {
+ .id_table = mei_ace_tbl,
+ .name = MEI_ACE_DRIVER_NAME,
+
+ .probe = mei_ace_probe,
+ .remove = mei_ace_remove,
+
+ .driver = {
+ .pm = &mei_ace_pm_ops,
+ },
+};
+
+module_mei_cl_driver(mei_ace_driver);
+
+MODULE_AUTHOR("Wentong Wu <wentong.wu@intel.com>");
+MODULE_AUTHOR("Zhifeng Wang <zhifeng.wang@intel.com>");
+MODULE_DESCRIPTION("Device driver for IVSC ACE");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/pci/intel/ivsc/mei_csi.c b/drivers/media/pci/intel/ivsc/mei_csi.c
new file mode 100644
index 000000000000..00ba611e0f68
--- /dev/null
+++ b/drivers/media/pci/intel/ivsc/mei_csi.c
@@ -0,0 +1,825 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023 Intel Corporation. All rights reserved.
+ * Intel Visual Sensing Controller CSI Linux driver
+ */
+
+/*
+ * To set ownership of CSI-2 link and to configure CSI-2 link, there
+ * are specific commands, which are sent via MEI protocol. The send
+ * command function uses "completion" as a synchronization mechanism.
+ * The response for command is received via a mei callback which wakes
+ * up the caller. There can be only one outstanding command at a time.
+ */
+
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/mei_cl_bus.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/units.h>
+#include <linux/uuid.h>
+#include <linux/workqueue.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+
+#define MEI_CSI_DRIVER_NAME "ivsc_csi"
+#define MEI_CSI_ENTITY_NAME "Intel IVSC CSI"
+
+#define MEI_CSI_LINK_FREQ_400MHZ 400000000ULL
+
+/* the 5s used here is based on experiment */
+#define CSI_CMD_TIMEOUT (5 * HZ)
+/* to setup CSI-2 link an extra delay needed and determined experimentally */
+#define CSI_FW_READY_DELAY_MS 100
+/* link frequency unit is 100kHz */
+#define CSI_LINK_FREQ(x) ((u32)(div_u64(x, 100 * HZ_PER_KHZ)))
+
+/*
+ * identify the command id supported by firmware
+ * IPC, as well as the privacy notification id
+ * used when processing privacy event.
+ */
+enum csi_cmd_id {
+ /* used to set csi ownership */
+ CSI_SET_OWNER = 0,
+
+ /* used to configure CSI-2 link */
+ CSI_SET_CONF = 2,
+
+ /* privacy notification id used when privacy state changes */
+ CSI_PRIVACY_NOTIF = 6,
+};
+
+/* CSI-2 link ownership definition */
+enum csi_link_owner {
+ CSI_LINK_IVSC,
+ CSI_LINK_HOST,
+};
+
+/* privacy status definition */
+enum ivsc_privacy_status {
+ CSI_PRIVACY_OFF,
+ CSI_PRIVACY_ON,
+ CSI_PRIVACY_MAX,
+};
+
+enum csi_pads {
+ CSI_PAD_SOURCE,
+ CSI_PAD_SINK,
+ CSI_NUM_PADS
+};
+
+/* configuration of the CSI-2 link between host and IVSC */
+struct csi_link_cfg {
+ /* number of data lanes used on the CSI-2 link */
+ u32 nr_of_lanes;
+
+ /* frequency of the CSI-2 link */
+ u32 link_freq;
+
+ /* for future use */
+ u32 rsvd[2];
+} __packed;
+
+/* CSI command structure */
+struct csi_cmd {
+ u32 cmd_id;
+ union _cmd_param {
+ u32 param;
+ struct csi_link_cfg conf;
+ } param;
+} __packed;
+
+/* CSI notification structure */
+struct csi_notif {
+ u32 cmd_id;
+ int status;
+ union _resp_cont {
+ u32 cont;
+ struct csi_link_cfg conf;
+ } cont;
+} __packed;
+
+struct mei_csi {
+ struct mei_cl_device *cldev;
+
+ /* command response */
+ struct csi_notif cmd_response;
+ /* used to wait for command response from firmware */
+ struct completion cmd_completion;
+ /* protect command download */
+ struct mutex lock;
+
+ struct v4l2_subdev subdev;
+ struct v4l2_subdev *remote;
+ struct v4l2_async_notifier notifier;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *freq_ctrl;
+ struct v4l2_ctrl *privacy_ctrl;
+ unsigned int remote_pad;
+ /* start streaming or not */
+ int streaming;
+
+ struct media_pad pads[CSI_NUM_PADS];
+ struct v4l2_mbus_framefmt format_mbus[CSI_NUM_PADS];
+
+ /* number of data lanes used on the CSI-2 link */
+ u32 nr_of_lanes;
+ /* frequency of the CSI-2 link */
+ u64 link_freq;
+
+ /* privacy status */
+ enum ivsc_privacy_status status;
+};
+
+static const struct v4l2_mbus_framefmt mei_csi_format_mbus_default = {
+ .width = 1,
+ .height = 1,
+ .code = MEDIA_BUS_FMT_Y8_1X8,
+ .field = V4L2_FIELD_NONE,
+};
+
+static s64 link_freq_menu_items[] = {
+ MEI_CSI_LINK_FREQ_400MHZ
+};
+
+static inline struct mei_csi *notifier_to_csi(struct v4l2_async_notifier *n)
+{
+ return container_of(n, struct mei_csi, notifier);
+}
+
+static inline struct mei_csi *sd_to_csi(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct mei_csi, subdev);
+}
+
+static inline struct mei_csi *ctrl_to_csi(struct v4l2_ctrl *ctrl)
+{
+ return container_of(ctrl->handler, struct mei_csi, ctrl_handler);
+}
+
+/* send a command to firmware and mutex must be held by caller */
+static int mei_csi_send(struct mei_csi *csi, u8 *buf, size_t len)
+{
+ struct csi_cmd *cmd = (struct csi_cmd *)buf;
+ int ret;
+
+ reinit_completion(&csi->cmd_completion);
+
+ ret = mei_cldev_send(csi->cldev, buf, len);
+ if (ret < 0)
+ goto out;
+
+ ret = wait_for_completion_killable_timeout(&csi->cmd_completion,
+ CSI_CMD_TIMEOUT);
+ if (ret < 0) {
+ goto out;
+ } else if (!ret) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ /* command response status */
+ ret = csi->cmd_response.status;
+ if (ret) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (csi->cmd_response.cmd_id != cmd->cmd_id)
+ ret = -EINVAL;
+
+out:
+ return ret;
+}
+
+/* set CSI-2 link ownership */
+static int csi_set_link_owner(struct mei_csi *csi, enum csi_link_owner owner)
+{
+ struct csi_cmd cmd = { 0 };
+ size_t cmd_size;
+ int ret;
+
+ cmd.cmd_id = CSI_SET_OWNER;
+ cmd.param.param = owner;
+ cmd_size = sizeof(cmd.cmd_id) + sizeof(cmd.param.param);
+
+ mutex_lock(&csi->lock);
+
+ ret = mei_csi_send(csi, (u8 *)&cmd, cmd_size);
+
+ mutex_unlock(&csi->lock);
+
+ return ret;
+}
+
+/* configure CSI-2 link between host and IVSC */
+static int csi_set_link_cfg(struct mei_csi *csi)
+{
+ struct csi_cmd cmd = { 0 };
+ size_t cmd_size;
+ int ret;
+
+ cmd.cmd_id = CSI_SET_CONF;
+ cmd.param.conf.nr_of_lanes = csi->nr_of_lanes;
+ cmd.param.conf.link_freq = CSI_LINK_FREQ(csi->link_freq);
+ cmd_size = sizeof(cmd.cmd_id) + sizeof(cmd.param.conf);
+
+ mutex_lock(&csi->lock);
+
+ ret = mei_csi_send(csi, (u8 *)&cmd, cmd_size);
+ /*
+ * wait configuration ready if download success. placing
+ * delay under mutex is to make sure current command flow
+ * completed before starting a possible new one.
+ */
+ if (!ret)
+ msleep(CSI_FW_READY_DELAY_MS);
+
+ mutex_unlock(&csi->lock);
+
+ return ret;
+}
+
+/* callback for receive */
+static void mei_csi_rx(struct mei_cl_device *cldev)
+{
+ struct mei_csi *csi = mei_cldev_get_drvdata(cldev);
+ struct csi_notif notif = { 0 };
+ int ret;
+
+ ret = mei_cldev_recv(cldev, (u8 *)&notif, sizeof(notif));
+ if (ret < 0) {
+ dev_err(&cldev->dev, "recv error: %d\n", ret);
+ return;
+ }
+
+ switch (notif.cmd_id) {
+ case CSI_PRIVACY_NOTIF:
+ if (notif.cont.cont < CSI_PRIVACY_MAX) {
+ csi->status = notif.cont.cont;
+ v4l2_ctrl_s_ctrl(csi->privacy_ctrl, csi->status);
+ }
+ break;
+ case CSI_SET_OWNER:
+ case CSI_SET_CONF:
+ memcpy(&csi->cmd_response, &notif, ret);
+
+ complete(&csi->cmd_completion);
+ break;
+ default:
+ break;
+ }
+}
+
+static int mei_csi_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct mei_csi *csi = sd_to_csi(sd);
+ s64 freq;
+ int ret;
+
+ if (enable && csi->streaming == 0) {
+ freq = v4l2_get_link_freq(csi->remote->ctrl_handler, 0, 0);
+ if (freq < 0) {
+ dev_err(&csi->cldev->dev,
+ "error %lld, invalid link_freq\n", freq);
+ ret = freq;
+ goto err;
+ }
+ csi->link_freq = freq;
+
+ /* switch CSI-2 link to host */
+ ret = csi_set_link_owner(csi, CSI_LINK_HOST);
+ if (ret < 0)
+ goto err;
+
+ /* configure CSI-2 link */
+ ret = csi_set_link_cfg(csi);
+ if (ret < 0)
+ goto err_switch;
+
+ ret = v4l2_subdev_call(csi->remote, video, s_stream, 1);
+ if (ret)
+ goto err_switch;
+ } else if (!enable && csi->streaming == 1) {
+ v4l2_subdev_call(csi->remote, video, s_stream, 0);
+
+ /* switch CSI-2 link to IVSC */
+ ret = csi_set_link_owner(csi, CSI_LINK_IVSC);
+ if (ret < 0)
+ dev_warn(&csi->cldev->dev,
+ "failed to switch CSI2 link: %d\n", ret);
+ }
+
+ csi->streaming = enable;
+
+ return 0;
+
+err_switch:
+ csi_set_link_owner(csi, CSI_LINK_IVSC);
+
+err:
+ return ret;
+}
+
+static struct v4l2_mbus_framefmt *
+mei_csi_get_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ unsigned int pad, u32 which)
+{
+ struct mei_csi *csi = sd_to_csi(sd);
+
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(sd, sd_state, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &csi->format_mbus[pad];
+ default:
+ return NULL;
+ }
+}
+
+static int mei_csi_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
+{
+ struct v4l2_mbus_framefmt *mbusformat;
+ struct mei_csi *csi = sd_to_csi(sd);
+ unsigned int i;
+
+ mutex_lock(&csi->lock);
+
+ for (i = 0; i < sd->entity.num_pads; i++) {
+ mbusformat = v4l2_subdev_get_try_format(sd, sd_state, i);
+ *mbusformat = mei_csi_format_mbus_default;
+ }
+
+ mutex_unlock(&csi->lock);
+
+ return 0;
+}
+
+static int mei_csi_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *mbusformat;
+ struct mei_csi *csi = sd_to_csi(sd);
+
+ mutex_lock(&csi->lock);
+
+ mbusformat = mei_csi_get_pad_format(sd, sd_state, format->pad,
+ format->which);
+ if (mbusformat)
+ format->format = *mbusformat;
+
+ mutex_unlock(&csi->lock);
+
+ return 0;
+}
+
+static int mei_csi_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *source_mbusformat;
+ struct v4l2_mbus_framefmt *mbusformat;
+ struct mei_csi *csi = sd_to_csi(sd);
+ struct media_pad *pad;
+
+ mbusformat = mei_csi_get_pad_format(sd, sd_state, format->pad,
+ format->which);
+ if (!mbusformat)
+ return -EINVAL;
+
+ source_mbusformat = mei_csi_get_pad_format(sd, sd_state, CSI_PAD_SOURCE,
+ format->which);
+ if (!source_mbusformat)
+ return -EINVAL;
+
+ v4l_bound_align_image(&format->format.width, 1, 65536, 0,
+ &format->format.height, 1, 65536, 0, 0);
+
+ switch (format->format.code) {
+ case MEDIA_BUS_FMT_RGB444_1X12:
+ case MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE:
+ case MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE:
+ case MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE:
+ case MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE:
+ case MEDIA_BUS_FMT_RGB565_1X16:
+ case MEDIA_BUS_FMT_BGR565_2X8_BE:
+ case MEDIA_BUS_FMT_BGR565_2X8_LE:
+ case MEDIA_BUS_FMT_RGB565_2X8_BE:
+ case MEDIA_BUS_FMT_RGB565_2X8_LE:
+ case MEDIA_BUS_FMT_RGB666_1X18:
+ case MEDIA_BUS_FMT_RBG888_1X24:
+ case MEDIA_BUS_FMT_RGB666_1X24_CPADHI:
+ case MEDIA_BUS_FMT_BGR888_1X24:
+ case MEDIA_BUS_FMT_GBR888_1X24:
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ case MEDIA_BUS_FMT_RGB888_2X12_BE:
+ case MEDIA_BUS_FMT_RGB888_2X12_LE:
+ case MEDIA_BUS_FMT_ARGB8888_1X32:
+ case MEDIA_BUS_FMT_RGB888_1X32_PADHI:
+ case MEDIA_BUS_FMT_RGB101010_1X30:
+ case MEDIA_BUS_FMT_RGB121212_1X36:
+ case MEDIA_BUS_FMT_RGB161616_1X48:
+ case MEDIA_BUS_FMT_Y8_1X8:
+ case MEDIA_BUS_FMT_UV8_1X8:
+ case MEDIA_BUS_FMT_UYVY8_1_5X8:
+ case MEDIA_BUS_FMT_VYUY8_1_5X8:
+ case MEDIA_BUS_FMT_YUYV8_1_5X8:
+ case MEDIA_BUS_FMT_YVYU8_1_5X8:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ case MEDIA_BUS_FMT_Y10_1X10:
+ case MEDIA_BUS_FMT_UYVY10_2X10:
+ case MEDIA_BUS_FMT_VYUY10_2X10:
+ case MEDIA_BUS_FMT_YUYV10_2X10:
+ case MEDIA_BUS_FMT_YVYU10_2X10:
+ case MEDIA_BUS_FMT_Y12_1X12:
+ case MEDIA_BUS_FMT_UYVY12_2X12:
+ case MEDIA_BUS_FMT_VYUY12_2X12:
+ case MEDIA_BUS_FMT_YUYV12_2X12:
+ case MEDIA_BUS_FMT_YVYU12_2X12:
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ case MEDIA_BUS_FMT_VYUY8_1X16:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_YVYU8_1X16:
+ case MEDIA_BUS_FMT_YDYUYDYV8_1X16:
+ case MEDIA_BUS_FMT_UYVY10_1X20:
+ case MEDIA_BUS_FMT_VYUY10_1X20:
+ case MEDIA_BUS_FMT_YUYV10_1X20:
+ case MEDIA_BUS_FMT_YVYU10_1X20:
+ case MEDIA_BUS_FMT_VUY8_1X24:
+ case MEDIA_BUS_FMT_YUV8_1X24:
+ case MEDIA_BUS_FMT_UYYVYY8_0_5X24:
+ case MEDIA_BUS_FMT_UYVY12_1X24:
+ case MEDIA_BUS_FMT_VYUY12_1X24:
+ case MEDIA_BUS_FMT_YUYV12_1X24:
+ case MEDIA_BUS_FMT_YVYU12_1X24:
+ case MEDIA_BUS_FMT_YUV10_1X30:
+ case MEDIA_BUS_FMT_UYYVYY10_0_5X30:
+ case MEDIA_BUS_FMT_AYUV8_1X32:
+ case MEDIA_BUS_FMT_UYYVYY12_0_5X36:
+ case MEDIA_BUS_FMT_YUV12_1X36:
+ case MEDIA_BUS_FMT_YUV16_1X48:
+ case MEDIA_BUS_FMT_UYYVYY16_0_5X48:
+ case MEDIA_BUS_FMT_JPEG_1X8:
+ case MEDIA_BUS_FMT_AHSV8888_1X32:
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ case MEDIA_BUS_FMT_SGBRG10_1X10:
+ case MEDIA_BUS_FMT_SGRBG10_1X10:
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ case MEDIA_BUS_FMT_SBGGR12_1X12:
+ case MEDIA_BUS_FMT_SGBRG12_1X12:
+ case MEDIA_BUS_FMT_SGRBG12_1X12:
+ case MEDIA_BUS_FMT_SRGGB12_1X12:
+ case MEDIA_BUS_FMT_SBGGR14_1X14:
+ case MEDIA_BUS_FMT_SGBRG14_1X14:
+ case MEDIA_BUS_FMT_SGRBG14_1X14:
+ case MEDIA_BUS_FMT_SRGGB14_1X14:
+ case MEDIA_BUS_FMT_SBGGR16_1X16:
+ case MEDIA_BUS_FMT_SGBRG16_1X16:
+ case MEDIA_BUS_FMT_SGRBG16_1X16:
+ case MEDIA_BUS_FMT_SRGGB16_1X16:
+ break;
+ default:
+ format->format.code = MEDIA_BUS_FMT_Y8_1X8;
+ break;
+ }
+
+ if (format->format.field == V4L2_FIELD_ANY)
+ format->format.field = V4L2_FIELD_NONE;
+
+ mutex_lock(&csi->lock);
+
+ pad = &csi->pads[format->pad];
+ if (pad->flags & MEDIA_PAD_FL_SOURCE)
+ format->format = csi->format_mbus[CSI_PAD_SINK];
+
+ *mbusformat = format->format;
+
+ if (pad->flags & MEDIA_PAD_FL_SINK)
+ *source_mbusformat = format->format;
+
+ mutex_unlock(&csi->lock);
+
+ return 0;
+}
+
+static int mei_csi_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct mei_csi *csi = ctrl_to_csi(ctrl);
+ s64 freq;
+
+ if (ctrl->id == V4L2_CID_LINK_FREQ) {
+ if (!csi->remote)
+ return -EINVAL;
+
+ freq = v4l2_get_link_freq(csi->remote->ctrl_handler, 0, 0);
+ if (freq < 0) {
+ dev_err(&csi->cldev->dev,
+ "error %lld, invalid link_freq\n", freq);
+ return -EINVAL;
+ }
+
+ link_freq_menu_items[0] = freq;
+ ctrl->val = 0;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static const struct v4l2_ctrl_ops mei_csi_ctrl_ops = {
+ .g_volatile_ctrl = mei_csi_g_volatile_ctrl,
+};
+
+static const struct v4l2_subdev_video_ops mei_csi_video_ops = {
+ .s_stream = mei_csi_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops mei_csi_pad_ops = {
+ .init_cfg = mei_csi_init_cfg,
+ .get_fmt = mei_csi_get_fmt,
+ .set_fmt = mei_csi_set_fmt,
+};
+
+static const struct v4l2_subdev_ops mei_csi_subdev_ops = {
+ .video = &mei_csi_video_ops,
+ .pad = &mei_csi_pad_ops,
+};
+
+static const struct media_entity_operations mei_csi_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static int mei_csi_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_connection *asd)
+{
+ struct mei_csi *csi = notifier_to_csi(notifier);
+ int pad;
+
+ pad = media_entity_get_fwnode_pad(&subdev->entity, asd->match.fwnode,
+ MEDIA_PAD_FL_SOURCE);
+ if (pad < 0)
+ return pad;
+
+ csi->remote = subdev;
+ csi->remote_pad = pad;
+
+ return media_create_pad_link(&subdev->entity, pad,
+ &csi->subdev.entity, 1,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+}
+
+static void mei_csi_notify_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_connection *asd)
+{
+ struct mei_csi *csi = notifier_to_csi(notifier);
+
+ csi->remote = NULL;
+}
+
+static const struct v4l2_async_notifier_operations mei_csi_notify_ops = {
+ .bound = mei_csi_notify_bound,
+ .unbind = mei_csi_notify_unbind,
+};
+
+static int mei_csi_init_controls(struct mei_csi *csi)
+{
+ u32 max;
+ int ret;
+
+ ret = v4l2_ctrl_handler_init(&csi->ctrl_handler, 2);
+ if (ret)
+ return ret;
+
+ csi->ctrl_handler.lock = &csi->lock;
+
+ max = ARRAY_SIZE(link_freq_menu_items) - 1;
+ csi->freq_ctrl = v4l2_ctrl_new_int_menu(&csi->ctrl_handler,
+ &mei_csi_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ max,
+ 0,
+ link_freq_menu_items);
+ if (csi->freq_ctrl)
+ csi->freq_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY |
+ V4L2_CTRL_FLAG_VOLATILE;
+
+ csi->privacy_ctrl = v4l2_ctrl_new_std(&csi->ctrl_handler, NULL,
+ V4L2_CID_PRIVACY, 0, 1, 1, 0);
+ if (csi->privacy_ctrl)
+ csi->privacy_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ if (csi->ctrl_handler.error)
+ return csi->ctrl_handler.error;
+
+ csi->subdev.ctrl_handler = &csi->ctrl_handler;
+
+ return 0;
+}
+
+static int mei_csi_parse_firmware(struct mei_csi *csi)
+{
+ struct v4l2_fwnode_endpoint v4l2_ep = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY,
+ };
+ struct device *dev = &csi->cldev->dev;
+ struct v4l2_async_connection *asd;
+ struct fwnode_handle *fwnode;
+ struct fwnode_handle *ep;
+ int ret;
+
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), 0, 0, 0);
+ if (!ep) {
+ dev_err(dev, "not connected to subdevice\n");
+ return -EINVAL;
+ }
+
+ ret = v4l2_fwnode_endpoint_parse(ep, &v4l2_ep);
+ if (ret) {
+ dev_err(dev, "could not parse v4l2 endpoint\n");
+ fwnode_handle_put(ep);
+ return -EINVAL;
+ }
+
+ fwnode = fwnode_graph_get_remote_endpoint(ep);
+ fwnode_handle_put(ep);
+
+ v4l2_async_subdev_nf_init(&csi->notifier, &csi->subdev);
+ csi->notifier.ops = &mei_csi_notify_ops;
+
+ asd = v4l2_async_nf_add_fwnode(&csi->notifier, fwnode,
+ struct v4l2_async_connection);
+ if (IS_ERR(asd)) {
+ fwnode_handle_put(fwnode);
+ return PTR_ERR(asd);
+ }
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(fwnode, &v4l2_ep);
+ fwnode_handle_put(fwnode);
+ if (ret)
+ return ret;
+ csi->nr_of_lanes = v4l2_ep.bus.mipi_csi2.num_data_lanes;
+
+ ret = v4l2_async_nf_register(&csi->notifier);
+ if (ret)
+ v4l2_async_nf_cleanup(&csi->notifier);
+
+ v4l2_fwnode_endpoint_free(&v4l2_ep);
+
+ return ret;
+}
+
+static int mei_csi_probe(struct mei_cl_device *cldev,
+ const struct mei_cl_device_id *id)
+{
+ struct device *dev = &cldev->dev;
+ struct mei_csi *csi;
+ int ret;
+
+ if (!dev_fwnode(dev))
+ return -EPROBE_DEFER;
+
+ csi = devm_kzalloc(dev, sizeof(struct mei_csi), GFP_KERNEL);
+ if (!csi)
+ return -ENOMEM;
+
+ csi->cldev = cldev;
+ mutex_init(&csi->lock);
+ init_completion(&csi->cmd_completion);
+
+ mei_cldev_set_drvdata(cldev, csi);
+
+ ret = mei_cldev_enable(cldev);
+ if (ret < 0) {
+ dev_err(dev, "mei_cldev_enable failed: %d\n", ret);
+ goto destroy_mutex;
+ }
+
+ ret = mei_cldev_register_rx_cb(cldev, mei_csi_rx);
+ if (ret) {
+ dev_err(dev, "event cb registration failed: %d\n", ret);
+ goto err_disable;
+ }
+
+ ret = mei_csi_parse_firmware(csi);
+ if (ret)
+ goto err_disable;
+
+ csi->subdev.dev = &cldev->dev;
+ v4l2_subdev_init(&csi->subdev, &mei_csi_subdev_ops);
+ v4l2_set_subdevdata(&csi->subdev, csi);
+ csi->subdev.flags = V4L2_SUBDEV_FL_HAS_DEVNODE |
+ V4L2_SUBDEV_FL_HAS_EVENTS;
+ csi->subdev.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
+ csi->subdev.entity.ops = &mei_csi_entity_ops;
+
+ snprintf(csi->subdev.name, sizeof(csi->subdev.name),
+ MEI_CSI_ENTITY_NAME);
+
+ ret = mei_csi_init_controls(csi);
+ if (ret)
+ goto err_ctrl_handler;
+
+ csi->format_mbus[CSI_PAD_SOURCE] = mei_csi_format_mbus_default;
+ csi->format_mbus[CSI_PAD_SINK] = mei_csi_format_mbus_default;
+
+ csi->pads[CSI_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ csi->pads[CSI_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&csi->subdev.entity, CSI_NUM_PADS,
+ csi->pads);
+ if (ret)
+ goto err_ctrl_handler;
+
+ ret = v4l2_subdev_init_finalize(&csi->subdev);
+ if (ret < 0)
+ goto err_entity;
+
+ ret = v4l2_async_register_subdev(&csi->subdev);
+ if (ret < 0)
+ goto err_subdev;
+
+ pm_runtime_enable(&cldev->dev);
+
+ return 0;
+
+err_subdev:
+ v4l2_subdev_cleanup(&csi->subdev);
+
+err_entity:
+ media_entity_cleanup(&csi->subdev.entity);
+
+err_ctrl_handler:
+ v4l2_ctrl_handler_free(&csi->ctrl_handler);
+ v4l2_async_nf_unregister(&csi->notifier);
+ v4l2_async_nf_cleanup(&csi->notifier);
+
+err_disable:
+ mei_cldev_disable(cldev);
+
+destroy_mutex:
+ mutex_destroy(&csi->lock);
+
+ return ret;
+}
+
+static void mei_csi_remove(struct mei_cl_device *cldev)
+{
+ struct mei_csi *csi = mei_cldev_get_drvdata(cldev);
+
+ v4l2_async_nf_unregister(&csi->notifier);
+ v4l2_async_nf_cleanup(&csi->notifier);
+ v4l2_ctrl_handler_free(&csi->ctrl_handler);
+ v4l2_async_unregister_subdev(&csi->subdev);
+ v4l2_subdev_cleanup(&csi->subdev);
+ media_entity_cleanup(&csi->subdev.entity);
+
+ pm_runtime_disable(&cldev->dev);
+
+ mutex_destroy(&csi->lock);
+}
+
+#define MEI_CSI_UUID UUID_LE(0x92335FCF, 0x3203, 0x4472, \
+ 0xAF, 0x93, 0x7b, 0x44, 0x53, 0xAC, 0x29, 0xDA)
+
+static const struct mei_cl_device_id mei_csi_tbl[] = {
+ { MEI_CSI_DRIVER_NAME, MEI_CSI_UUID, MEI_CL_VERSION_ANY },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(mei, mei_csi_tbl);
+
+static struct mei_cl_driver mei_csi_driver = {
+ .id_table = mei_csi_tbl,
+ .name = MEI_CSI_DRIVER_NAME,
+
+ .probe = mei_csi_probe,
+ .remove = mei_csi_remove,
+};
+
+module_mei_cl_driver(mei_csi_driver);
+
+MODULE_AUTHOR("Wentong Wu <wentong.wu@intel.com>");
+MODULE_AUTHOR("Zhifeng Wang <zhifeng.wang@intel.com>");
+MODULE_DESCRIPTION("Device driver for IVSC CSI");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/pci/saa7164/saa7164-encoder.c b/drivers/media/pci/saa7164/saa7164-encoder.c
index c1b6a0596801..bf73e9e83f52 100644
--- a/drivers/media/pci/saa7164/saa7164-encoder.c
+++ b/drivers/media/pci/saa7164/saa7164-encoder.c
@@ -383,7 +383,7 @@ int saa7164_s_frequency(struct saa7164_port *port,
else if (port->nr == SAA7164_PORT_ENC2)
tsport = &dev->ports[SAA7164_PORT_TS2];
else
- BUG();
+ return -EINVAL; /* should not happen */
fe = tsport->dvb.frontend;
diff --git a/drivers/media/pci/saa7164/saa7164-fw.c b/drivers/media/pci/saa7164/saa7164-fw.c
index 363689484c54..cc9f384f7f1e 100644
--- a/drivers/media/pci/saa7164/saa7164-fw.c
+++ b/drivers/media/pci/saa7164/saa7164-fw.c
@@ -271,7 +271,6 @@ int saa7164_downloadfirmware(struct saa7164_dev *dev)
dprintk(DBGLVL_FW, "%s() Loader 1 has loaded.\n",
__func__);
first_timeout = SAA_DEVICE_TIMEOUT;
- second_timeout = 60 * SAA_DEVICE_TIMEOUT;
second_timeout = 100;
err_flags = saa7164_readl(SAA_SECONDSTAGEERROR_FLAGS);
diff --git a/drivers/media/pci/ttpci/budget-av.c b/drivers/media/pci/ttpci/budget-av.c
index 824529f3c74b..230b104a7cdf 100644
--- a/drivers/media/pci/ttpci/budget-av.c
+++ b/drivers/media/pci/ttpci/budget-av.c
@@ -123,7 +123,7 @@ static int i2c_writereg(struct i2c_adapter *i2c, u8 id, u8 reg, u8 val)
static int ciintf_read_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address)
{
- struct budget_av *budget_av = (struct budget_av *) ca->data;
+ struct budget_av *budget_av = ca->data;
int result;
if (slot != 0)
@@ -142,7 +142,7 @@ static int ciintf_read_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int ad
static int ciintf_write_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address, u8 value)
{
- struct budget_av *budget_av = (struct budget_av *) ca->data;
+ struct budget_av *budget_av = ca->data;
int result;
if (slot != 0)
@@ -161,7 +161,7 @@ static int ciintf_write_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int a
static int ciintf_read_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 address)
{
- struct budget_av *budget_av = (struct budget_av *) ca->data;
+ struct budget_av *budget_av = ca->data;
int result;
if (slot != 0)
@@ -181,7 +181,7 @@ static int ciintf_read_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 addre
static int ciintf_write_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 address, u8 value)
{
- struct budget_av *budget_av = (struct budget_av *) ca->data;
+ struct budget_av *budget_av = ca->data;
int result;
if (slot != 0)
@@ -200,7 +200,7 @@ static int ciintf_write_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 addr
static int ciintf_slot_reset(struct dvb_ca_en50221 *ca, int slot)
{
- struct budget_av *budget_av = (struct budget_av *) ca->data;
+ struct budget_av *budget_av = ca->data;
struct saa7146_dev *saa = budget_av->budget.dev;
if (slot != 0)
@@ -229,7 +229,7 @@ static int ciintf_slot_reset(struct dvb_ca_en50221 *ca, int slot)
static int ciintf_slot_shutdown(struct dvb_ca_en50221 *ca, int slot)
{
- struct budget_av *budget_av = (struct budget_av *) ca->data;
+ struct budget_av *budget_av = ca->data;
struct saa7146_dev *saa = budget_av->budget.dev;
if (slot != 0)
@@ -245,7 +245,7 @@ static int ciintf_slot_shutdown(struct dvb_ca_en50221 *ca, int slot)
static int ciintf_slot_ts_enable(struct dvb_ca_en50221 *ca, int slot)
{
- struct budget_av *budget_av = (struct budget_av *) ca->data;
+ struct budget_av *budget_av = ca->data;
struct saa7146_dev *saa = budget_av->budget.dev;
if (slot != 0)
@@ -260,7 +260,7 @@ static int ciintf_slot_ts_enable(struct dvb_ca_en50221 *ca, int slot)
static int ciintf_poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open)
{
- struct budget_av *budget_av = (struct budget_av *) ca->data;
+ struct budget_av *budget_av = ca->data;
struct saa7146_dev *saa = budget_av->budget.dev;
int result;
@@ -491,7 +491,7 @@ static int philips_su1278_ty_ci_tuner_set_params(struct dvb_frontend *fe)
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
u32 div;
u8 buf[4];
- struct budget *budget = (struct budget *) fe->dvb->priv;
+ struct budget *budget = fe->dvb->priv;
struct i2c_msg msg = {.addr = 0x61,.flags = 0,.buf = buf,.len = sizeof(buf) };
if ((c->frequency < 950000) || (c->frequency > 2150000))
@@ -604,7 +604,7 @@ static const struct stv0299_config cinergy_1200s_1894_0010_config = {
static int philips_cu1216_tuner_set_params(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- struct budget *budget = (struct budget *) fe->dvb->priv;
+ struct budget *budget = fe->dvb->priv;
u8 buf[6];
struct i2c_msg msg = {.addr = 0x60,.flags = 0,.buf = buf,.len = sizeof(buf) };
int i;
@@ -668,7 +668,7 @@ static struct tda10023_config philips_cu1216_tda10023_config = {
static int philips_tu1216_tuner_init(struct dvb_frontend *fe)
{
- struct budget *budget = (struct budget *) fe->dvb->priv;
+ struct budget *budget = fe->dvb->priv;
static u8 tu1216_init[] = { 0x0b, 0xf5, 0x85, 0xab };
struct i2c_msg tuner_msg = {.addr = 0x60,.flags = 0,.buf = tu1216_init,.len = sizeof(tu1216_init) };
@@ -685,7 +685,7 @@ static int philips_tu1216_tuner_init(struct dvb_frontend *fe)
static int philips_tu1216_tuner_set_params(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- struct budget *budget = (struct budget *) fe->dvb->priv;
+ struct budget *budget = fe->dvb->priv;
u8 tuner_buf[4];
struct i2c_msg tuner_msg = {.addr = 0x60,.flags = 0,.buf = tuner_buf,.len =
sizeof(tuner_buf) };
@@ -769,7 +769,7 @@ static int philips_tu1216_tuner_set_params(struct dvb_frontend *fe)
static int philips_tu1216_request_firmware(struct dvb_frontend *fe,
const struct firmware **fw, char *name)
{
- struct budget *budget = (struct budget *) fe->dvb->priv;
+ struct budget *budget = fe->dvb->priv;
return request_firmware(fw, name, &budget->dev->pci->dev);
}
@@ -1353,7 +1353,7 @@ static void frontend_init(struct budget_av *budget_av)
static void budget_av_irq(struct saa7146_dev *dev, u32 * isr)
{
- struct budget_av *budget_av = (struct budget_av *) dev->ext_priv;
+ struct budget_av *budget_av = dev->ext_priv;
dprintk(8, "dev: %p, budget_av: %p\n", dev, budget_av);
@@ -1363,7 +1363,7 @@ static void budget_av_irq(struct saa7146_dev *dev, u32 * isr)
static int budget_av_detach(struct saa7146_dev *dev)
{
- struct budget_av *budget_av = (struct budget_av *) dev->ext_priv;
+ struct budget_av *budget_av = dev->ext_priv;
int err;
dprintk(2, "dev: %p\n", dev);
@@ -1412,7 +1412,7 @@ static int vidioc_enum_input(struct file *file, void *fh, struct v4l2_input *i)
static int vidioc_g_input(struct file *file, void *fh, unsigned int *i)
{
struct saa7146_dev *dev = video_drvdata(file);
- struct budget_av *budget_av = (struct budget_av *)dev->ext_priv;
+ struct budget_av *budget_av = dev->ext_priv;
*i = budget_av->cur_input;
@@ -1423,7 +1423,7 @@ static int vidioc_g_input(struct file *file, void *fh, unsigned int *i)
static int vidioc_s_input(struct file *file, void *fh, unsigned int input)
{
struct saa7146_dev *dev = video_drvdata(file);
- struct budget_av *budget_av = (struct budget_av *)dev->ext_priv;
+ struct budget_av *budget_av = dev->ext_priv;
dprintk(1, "VIDIOC_S_INPUT %d\n", input);
return saa7113_setinput(budget_av, input);
diff --git a/drivers/media/pci/ttpci/budget-ci.c b/drivers/media/pci/ttpci/budget-ci.c
index d59d18647371..66e1a004ee43 100644
--- a/drivers/media/pci/ttpci/budget-ci.c
+++ b/drivers/media/pci/ttpci/budget-ci.c
@@ -251,7 +251,7 @@ static void msp430_ir_deinit(struct budget_ci *budget_ci)
static int ciintf_read_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address)
{
- struct budget_ci *budget_ci = (struct budget_ci *) ca->data;
+ struct budget_ci *budget_ci = ca->data;
if (slot != 0)
return -EINVAL;
@@ -262,7 +262,7 @@ static int ciintf_read_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int ad
static int ciintf_write_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address, u8 value)
{
- struct budget_ci *budget_ci = (struct budget_ci *) ca->data;
+ struct budget_ci *budget_ci = ca->data;
if (slot != 0)
return -EINVAL;
@@ -273,7 +273,7 @@ static int ciintf_write_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int a
static int ciintf_read_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 address)
{
- struct budget_ci *budget_ci = (struct budget_ci *) ca->data;
+ struct budget_ci *budget_ci = ca->data;
if (slot != 0)
return -EINVAL;
@@ -284,7 +284,7 @@ static int ciintf_read_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 addre
static int ciintf_write_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 address, u8 value)
{
- struct budget_ci *budget_ci = (struct budget_ci *) ca->data;
+ struct budget_ci *budget_ci = ca->data;
if (slot != 0)
return -EINVAL;
@@ -295,7 +295,7 @@ static int ciintf_write_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 addr
static int ciintf_slot_reset(struct dvb_ca_en50221 *ca, int slot)
{
- struct budget_ci *budget_ci = (struct budget_ci *) ca->data;
+ struct budget_ci *budget_ci = ca->data;
struct saa7146_dev *saa = budget_ci->budget.dev;
if (slot != 0)
@@ -318,7 +318,7 @@ static int ciintf_slot_reset(struct dvb_ca_en50221 *ca, int slot)
static int ciintf_slot_shutdown(struct dvb_ca_en50221 *ca, int slot)
{
- struct budget_ci *budget_ci = (struct budget_ci *) ca->data;
+ struct budget_ci *budget_ci = ca->data;
struct saa7146_dev *saa = budget_ci->budget.dev;
if (slot != 0)
@@ -331,7 +331,7 @@ static int ciintf_slot_shutdown(struct dvb_ca_en50221 *ca, int slot)
static int ciintf_slot_ts_enable(struct dvb_ca_en50221 *ca, int slot)
{
- struct budget_ci *budget_ci = (struct budget_ci *) ca->data;
+ struct budget_ci *budget_ci = ca->data;
struct saa7146_dev *saa = budget_ci->budget.dev;
int tmp;
@@ -400,7 +400,7 @@ static void ciintf_interrupt(struct tasklet_struct *t)
static int ciintf_poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open)
{
- struct budget_ci *budget_ci = (struct budget_ci *) ca->data;
+ struct budget_ci *budget_ci = ca->data;
unsigned int flags;
// ensure we don't get spurious IRQs during initialisation
@@ -553,7 +553,7 @@ static void ciintf_deinit(struct budget_ci *budget_ci)
static void budget_ci_irq(struct saa7146_dev *dev, u32 * isr)
{
- struct budget_ci *budget_ci = (struct budget_ci *) dev->ext_priv;
+ struct budget_ci *budget_ci = dev->ext_priv;
dprintk(8, "dev: %p, budget_ci: %p\n", dev, budget_ci);
@@ -648,7 +648,7 @@ static int philips_su1278_tt_set_symbol_rate(struct dvb_frontend *fe, u32 srate,
static int philips_su1278_tt_tuner_set_params(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
- struct budget_ci *budget_ci = (struct budget_ci *) fe->dvb->priv;
+ struct budget_ci *budget_ci = fe->dvb->priv;
u32 div;
u8 buf[4];
struct i2c_msg msg = {.addr = 0x60,.flags = 0,.buf = buf,.len = sizeof(buf) };
@@ -698,7 +698,7 @@ static const struct stv0299_config philips_su1278_tt_config = {
static int philips_tdm1316l_tuner_init(struct dvb_frontend *fe)
{
- struct budget_ci *budget_ci = (struct budget_ci *) fe->dvb->priv;
+ struct budget_ci *budget_ci = fe->dvb->priv;
static u8 td1316_init[] = { 0x0b, 0xf5, 0x85, 0xab };
static u8 disable_mc44BC374c[] = { 0x1d, 0x74, 0xa0, 0x68 };
struct i2c_msg tuner_msg = {.addr = budget_ci->tuner_pll_address,.flags = 0,.buf = td1316_init,.len =
@@ -729,7 +729,7 @@ static int philips_tdm1316l_tuner_init(struct dvb_frontend *fe)
static int philips_tdm1316l_tuner_set_params(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
- struct budget_ci *budget_ci = (struct budget_ci *) fe->dvb->priv;
+ struct budget_ci *budget_ci = fe->dvb->priv;
u8 tuner_buf[4];
struct i2c_msg tuner_msg = {.addr = budget_ci->tuner_pll_address,.flags = 0,.buf = tuner_buf,.len = sizeof(tuner_buf) };
int tuner_frequency = 0;
@@ -815,7 +815,7 @@ static int philips_tdm1316l_tuner_set_params(struct dvb_frontend *fe)
static int philips_tdm1316l_request_firmware(struct dvb_frontend *fe,
const struct firmware **fw, char *name)
{
- struct budget_ci *budget_ci = (struct budget_ci *) fe->dvb->priv;
+ struct budget_ci *budget_ci = fe->dvb->priv;
return request_firmware(fw, name, &budget_ci->budget.dev->pci->dev);
}
@@ -845,7 +845,7 @@ static struct tda1004x_config philips_tdm1316l_config_invert = {
static int dvbc_philips_tdm1316l_tuner_set_params(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
- struct budget_ci *budget_ci = (struct budget_ci *) fe->dvb->priv;
+ struct budget_ci *budget_ci = fe->dvb->priv;
u8 tuner_buf[5];
struct i2c_msg tuner_msg = {.addr = budget_ci->tuner_pll_address,
.flags = 0,
@@ -1494,7 +1494,7 @@ out1:
static int budget_ci_detach(struct saa7146_dev *dev)
{
- struct budget_ci *budget_ci = (struct budget_ci *) dev->ext_priv;
+ struct budget_ci *budget_ci = dev->ext_priv;
struct saa7146_dev *saa = budget_ci->budget.dev;
int err;
diff --git a/drivers/media/pci/ttpci/budget-core.c b/drivers/media/pci/ttpci/budget-core.c
index 710595987522..25f44c3eebf3 100644
--- a/drivers/media/pci/ttpci/budget-core.c
+++ b/drivers/media/pci/ttpci/budget-core.c
@@ -147,7 +147,7 @@ static int start_ts_capture(struct budget *budget)
static int budget_read_fe_status(struct dvb_frontend *fe,
enum fe_status *status)
{
- struct budget *budget = (struct budget *) fe->dvb->priv;
+ struct budget *budget = fe->dvb->priv;
int synced;
int ret;
@@ -570,7 +570,7 @@ int ttpci_budget_deinit(struct budget *budget)
void ttpci_budget_irq10_handler(struct saa7146_dev *dev, u32 * isr)
{
- struct budget *budget = (struct budget *) dev->ext_priv;
+ struct budget *budget = dev->ext_priv;
dprintk(8, "dev: %p, budget: %p\n", dev, budget);
@@ -580,7 +580,7 @@ void ttpci_budget_irq10_handler(struct saa7146_dev *dev, u32 * isr)
void ttpci_budget_set_video_port(struct saa7146_dev *dev, int video_port)
{
- struct budget *budget = (struct budget *) dev->ext_priv;
+ struct budget *budget = dev->ext_priv;
spin_lock(&budget->feedlock);
budget->video_port = video_port;
diff --git a/drivers/media/pci/ttpci/budget.c b/drivers/media/pci/ttpci/budget.c
index a88711a3ac7f..b76a1b330b50 100644
--- a/drivers/media/pci/ttpci/budget.c
+++ b/drivers/media/pci/ttpci/budget.c
@@ -144,7 +144,7 @@ static int SetVoltage_Activy(struct budget *budget,
static int siemens_budget_set_voltage(struct dvb_frontend *fe,
enum fe_sec_voltage voltage)
{
- struct budget* budget = (struct budget*) fe->dvb->priv;
+ struct budget *budget = fe->dvb->priv;
return SetVoltage_Activy (budget, voltage);
}
@@ -152,7 +152,7 @@ static int siemens_budget_set_voltage(struct dvb_frontend *fe,
static int budget_set_tone(struct dvb_frontend *fe,
enum fe_sec_tone_mode tone)
{
- struct budget* budget = (struct budget*) fe->dvb->priv;
+ struct budget *budget = fe->dvb->priv;
switch (tone) {
case SEC_TONE_ON:
@@ -172,7 +172,7 @@ static int budget_set_tone(struct dvb_frontend *fe,
static int budget_diseqc_send_master_cmd(struct dvb_frontend* fe, struct dvb_diseqc_master_cmd* cmd)
{
- struct budget* budget = (struct budget*) fe->dvb->priv;
+ struct budget *budget = fe->dvb->priv;
SendDiSEqCMsg (budget, cmd->msg_len, cmd->msg, 0);
@@ -182,7 +182,7 @@ static int budget_diseqc_send_master_cmd(struct dvb_frontend* fe, struct dvb_dis
static int budget_diseqc_send_burst(struct dvb_frontend *fe,
enum fe_sec_mini_cmd minicmd)
{
- struct budget* budget = (struct budget*) fe->dvb->priv;
+ struct budget *budget = fe->dvb->priv;
SendDiSEqCMsg (budget, 0, NULL, minicmd);
@@ -192,7 +192,7 @@ static int budget_diseqc_send_burst(struct dvb_frontend *fe,
static int alps_bsrv2_tuner_set_params(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- struct budget* budget = (struct budget*) fe->dvb->priv;
+ struct budget *budget = fe->dvb->priv;
u8 pwr = 0;
u8 buf[4];
struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = buf, .len = sizeof(buf) };
@@ -234,7 +234,7 @@ static struct ves1x93_config alps_bsrv2_config =
static int alps_tdbe2_tuner_set_params(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- struct budget* budget = (struct budget*) fe->dvb->priv;
+ struct budget *budget = fe->dvb->priv;
u32 div;
u8 data[4];
struct i2c_msg msg = { .addr = 0x62, .flags = 0, .buf = data, .len = sizeof(data) };
@@ -320,7 +320,7 @@ static u8 tuner_address_grundig_29504_401_activy = 0x60;
static int grundig_29504_451_tuner_set_params(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- struct budget* budget = (struct budget*) fe->dvb->priv;
+ struct budget *budget = fe->dvb->priv;
u32 div;
u8 data[4];
struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = data, .len = sizeof(data) };
@@ -344,7 +344,7 @@ static struct tda8083_config grundig_29504_451_config = {
static int s5h1420_tuner_set_params(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
- struct budget* budget = (struct budget*) fe->dvb->priv;
+ struct budget *budget = fe->dvb->priv;
u32 div;
u8 data[4];
struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = data, .len = sizeof(data) };
@@ -405,7 +405,7 @@ static const struct stv0299_config alps_bsbe1_config_activy = {
static int alps_tdhd1_204_request_firmware(struct dvb_frontend *fe, const struct firmware **fw, char *name)
{
- struct budget *budget = (struct budget *)fe->dvb->priv;
+ struct budget *budget = fe->dvb->priv;
return request_firmware(fw, name, &budget->dev->pci->dev);
}
@@ -800,7 +800,7 @@ static int budget_attach (struct saa7146_dev* dev, struct saa7146_pci_extension_
static int budget_detach (struct saa7146_dev* dev)
{
- struct budget *budget = (struct budget*) dev->ext_priv;
+ struct budget *budget = dev->ext_priv;
int err;
if (budget->dvb_frontend) {
diff --git a/drivers/media/platform/allegro-dvt/allegro-core.c b/drivers/media/platform/allegro-dvt/allegro-core.c
index ec03e17727d7..da61f9beb6b4 100644
--- a/drivers/media/platform/allegro-dvt/allegro-core.c
+++ b/drivers/media/platform/allegro-dvt/allegro-core.c
@@ -17,7 +17,6 @@
#include <linux/mfd/syscon/xlnx-vcu.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
@@ -4007,7 +4006,7 @@ static struct platform_driver allegro_driver = {
.remove_new = allegro_remove,
.driver = {
.name = "allegro",
- .of_match_table = of_match_ptr(allegro_dt_ids),
+ .of_match_table = allegro_dt_ids,
.pm = &allegro_pm_ops,
},
};
diff --git a/drivers/media/platform/amphion/vdec.c b/drivers/media/platform/amphion/vdec.c
index 6515f3cdb7a7..133d77d1ea0c 100644
--- a/drivers/media/platform/amphion/vdec.c
+++ b/drivers/media/platform/amphion/vdec.c
@@ -299,7 +299,8 @@ static int vdec_update_state(struct vpu_inst *inst, enum vpu_codec_state state,
vdec->state = VPU_CODEC_STATE_DYAMIC_RESOLUTION_CHANGE;
if (inst->state != pre_state)
- vpu_trace(inst->dev, "[%d] %d -> %d\n", inst->id, pre_state, inst->state);
+ vpu_trace(inst->dev, "[%d] %s -> %s\n", inst->id,
+ vpu_codec_state_name(pre_state), vpu_codec_state_name(inst->state));
if (inst->state == VPU_CODEC_STATE_DYAMIC_RESOLUTION_CHANGE)
vdec_handle_resolution_change(inst);
@@ -741,6 +742,21 @@ static int vdec_frame_decoded(struct vpu_inst *inst, void *arg)
dev_info(inst->dev, "[%d] buf[%d] has been decoded\n", inst->id, info->id);
vpu_set_buffer_state(vbuf, VPU_BUF_STATE_DECODED);
vdec->decoded_frame_count++;
+ if (vdec->params.display_delay_enable) {
+ struct vpu_format *cur_fmt;
+
+ cur_fmt = vpu_get_format(inst, inst->cap_format.type);
+ vpu_set_buffer_state(vbuf, VPU_BUF_STATE_READY);
+ for (int i = 0; i < vbuf->vb2_buf.num_planes; i++)
+ vb2_set_plane_payload(&vbuf->vb2_buf,
+ i, vpu_get_fmt_plane_size(cur_fmt, i));
+ vbuf->field = cur_fmt->field;
+ vbuf->sequence = vdec->sequence++;
+ dev_dbg(inst->dev, "[%d][OUTPUT TS]%32lld\n", inst->id, vbuf->vb2_buf.timestamp);
+
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE);
+ vdec->display_frame_count++;
+ }
exit:
vpu_inst_unlock(inst);
@@ -768,14 +784,14 @@ static void vdec_buf_done(struct vpu_inst *inst, struct vpu_frame_info *frame)
struct vpu_format *cur_fmt;
struct vpu_vb2_buffer *vpu_buf;
struct vb2_v4l2_buffer *vbuf;
- u32 sequence;
int i;
if (!frame)
return;
vpu_inst_lock(inst);
- sequence = vdec->sequence++;
+ if (!vdec->params.display_delay_enable)
+ vdec->sequence++;
vpu_buf = vdec_find_buffer(inst, frame->luma);
vpu_inst_unlock(inst);
if (!vpu_buf) {
@@ -794,13 +810,17 @@ static void vdec_buf_done(struct vpu_inst *inst, struct vpu_frame_info *frame)
dev_err(inst->dev, "[%d] buffer id(%d, %d) dismatch\n",
inst->id, vbuf->vb2_buf.index, frame->id);
+ if (vpu_get_buffer_state(vbuf) == VPU_BUF_STATE_READY && vdec->params.display_delay_enable)
+ return;
+
if (vpu_get_buffer_state(vbuf) != VPU_BUF_STATE_DECODED)
dev_err(inst->dev, "[%d] buffer(%d) ready without decoded\n", inst->id, frame->id);
+
vpu_set_buffer_state(vbuf, VPU_BUF_STATE_READY);
for (i = 0; i < vbuf->vb2_buf.num_planes; i++)
vb2_set_plane_payload(&vbuf->vb2_buf, i, vpu_get_fmt_plane_size(cur_fmt, i));
vbuf->field = cur_fmt->field;
- vbuf->sequence = sequence;
+ vbuf->sequence = vdec->sequence;
dev_dbg(inst->dev, "[%d][OUTPUT TS]%32lld\n", inst->id, vbuf->vb2_buf.timestamp);
v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE);
@@ -999,6 +1019,7 @@ static int vdec_response_frame_abnormal(struct vpu_inst *inst)
{
struct vdec_t *vdec = inst->priv;
struct vpu_fs_info info;
+ int ret;
if (!vdec->req_frame_count)
return 0;
@@ -1006,7 +1027,9 @@ static int vdec_response_frame_abnormal(struct vpu_inst *inst)
memset(&info, 0, sizeof(info));
info.type = MEM_RES_FRAME;
info.tag = vdec->seq_tag + 0xf0;
- vpu_session_alloc_fs(inst, &info);
+ ret = vpu_session_alloc_fs(inst, &info);
+ if (ret)
+ return ret;
vdec->req_frame_count--;
return 0;
@@ -1037,8 +1060,8 @@ static int vdec_response_frame(struct vpu_inst *inst, struct vb2_v4l2_buffer *vb
return -EINVAL;
}
- dev_dbg(inst->dev, "[%d] state = %d, alloc fs %d, tag = 0x%x\n",
- inst->id, inst->state, vbuf->vb2_buf.index, vdec->seq_tag);
+ dev_dbg(inst->dev, "[%d] state = %s, alloc fs %d, tag = 0x%x\n",
+ inst->id, vpu_codec_state_name(inst->state), vbuf->vb2_buf.index, vdec->seq_tag);
vpu_buf = to_vpu_vb2_buffer(vbuf);
memset(&info, 0, sizeof(info));
@@ -1400,7 +1423,7 @@ static void vdec_abort(struct vpu_inst *inst)
struct vpu_rpc_buffer_desc desc;
int ret;
- vpu_trace(inst->dev, "[%d] state = %d\n", inst->id, inst->state);
+ vpu_trace(inst->dev, "[%d] state = %s\n", inst->id, vpu_codec_state_name(inst->state));
vdec->aborting = true;
vpu_iface_add_scode(inst, SCODE_PADDING_ABORT);
@@ -1453,9 +1476,7 @@ static void vdec_release(struct vpu_inst *inst)
{
if (inst->id != VPU_INST_NULL_ID)
vpu_trace(inst->dev, "[%d]\n", inst->id);
- vpu_inst_lock(inst);
vdec_stop(inst, true);
- vpu_inst_unlock(inst);
}
static void vdec_cleanup(struct vpu_inst *inst)
diff --git a/drivers/media/platform/amphion/venc.c b/drivers/media/platform/amphion/venc.c
index 58480e2755ec..4eb57d793a9c 100644
--- a/drivers/media/platform/amphion/venc.c
+++ b/drivers/media/platform/amphion/venc.c
@@ -268,7 +268,7 @@ static int venc_g_parm(struct file *file, void *fh, struct v4l2_streamparm *parm
{
struct vpu_inst *inst = to_inst(file);
struct venc_t *venc = inst->priv;
- struct v4l2_fract *timeperframe = &parm->parm.capture.timeperframe;
+ struct v4l2_fract *timeperframe;
if (!parm)
return -EINVAL;
@@ -279,6 +279,7 @@ static int venc_g_parm(struct file *file, void *fh, struct v4l2_streamparm *parm
if (!vpu_helper_check_type(inst, parm->type))
return -EINVAL;
+ timeperframe = &parm->parm.capture.timeperframe;
parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
parm->parm.capture.readbuffers = 0;
timeperframe->numerator = venc->params.frame_rate.numerator;
@@ -291,7 +292,7 @@ static int venc_s_parm(struct file *file, void *fh, struct v4l2_streamparm *parm
{
struct vpu_inst *inst = to_inst(file);
struct venc_t *venc = inst->priv;
- struct v4l2_fract *timeperframe = &parm->parm.capture.timeperframe;
+ struct v4l2_fract *timeperframe;
unsigned long n, d;
if (!parm)
@@ -303,6 +304,7 @@ static int venc_s_parm(struct file *file, void *fh, struct v4l2_streamparm *parm
if (!vpu_helper_check_type(inst, parm->type))
return -EINVAL;
+ timeperframe = &parm->parm.capture.timeperframe;
if (!timeperframe->numerator)
timeperframe->numerator = venc->params.frame_rate.numerator;
if (!timeperframe->denominator)
diff --git a/drivers/media/platform/amphion/vpu.h b/drivers/media/platform/amphion/vpu.h
index 3bfe193722af..5a701f64289e 100644
--- a/drivers/media/platform/amphion/vpu.h
+++ b/drivers/media/platform/amphion/vpu.h
@@ -355,6 +355,9 @@ void vpu_inst_record_flow(struct vpu_inst *inst, u32 flow);
int vpu_core_driver_init(void);
void vpu_core_driver_exit(void);
+const char *vpu_id_name(u32 id);
+const char *vpu_codec_state_name(enum vpu_codec_state state);
+
extern bool debug;
#define vpu_trace(dev, fmt, arg...) \
do { \
diff --git a/drivers/media/platform/amphion/vpu_cmds.c b/drivers/media/platform/amphion/vpu_cmds.c
index fa581ba6bab2..c2337812573e 100644
--- a/drivers/media/platform/amphion/vpu_cmds.c
+++ b/drivers/media/platform/amphion/vpu_cmds.c
@@ -9,8 +9,6 @@
#include <linux/list.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -98,7 +96,7 @@ static struct vpu_cmd_t *vpu_alloc_cmd(struct vpu_inst *inst, u32 id, void *data
cmd->id = id;
ret = vpu_iface_pack_cmd(inst->core, cmd->pkt, inst->id, id, data);
if (ret) {
- dev_err(inst->dev, "iface pack cmd(%d) fail\n", id);
+ dev_err(inst->dev, "iface pack cmd %s fail\n", vpu_id_name(id));
vfree(cmd->pkt);
vfree(cmd);
return NULL;
@@ -125,14 +123,14 @@ static int vpu_session_process_cmd(struct vpu_inst *inst, struct vpu_cmd_t *cmd)
{
int ret;
- dev_dbg(inst->dev, "[%d]send cmd(0x%x)\n", inst->id, cmd->id);
+ dev_dbg(inst->dev, "[%d]send cmd %s\n", inst->id, vpu_id_name(cmd->id));
vpu_iface_pre_send_cmd(inst);
ret = vpu_cmd_send(inst->core, cmd->pkt);
if (!ret) {
vpu_iface_post_send_cmd(inst);
vpu_inst_record_flow(inst, cmd->id);
} else {
- dev_err(inst->dev, "[%d] iface send cmd(0x%x) fail\n", inst->id, cmd->id);
+ dev_err(inst->dev, "[%d] iface send cmd %s fail\n", inst->id, vpu_id_name(cmd->id));
}
return ret;
@@ -149,7 +147,8 @@ static void vpu_process_cmd_request(struct vpu_inst *inst)
list_for_each_entry_safe(cmd, tmp, &inst->cmd_q, list) {
list_del_init(&cmd->list);
if (vpu_session_process_cmd(inst, cmd))
- dev_err(inst->dev, "[%d] process cmd(%d) fail\n", inst->id, cmd->id);
+ dev_err(inst->dev, "[%d] process cmd %s fail\n",
+ inst->id, vpu_id_name(cmd->id));
if (cmd->request) {
inst->pending = (void *)cmd;
break;
@@ -305,7 +304,8 @@ static void vpu_core_keep_active(struct vpu_core *core)
dev_dbg(core->dev, "try to wake up\n");
mutex_lock(&core->cmd_lock);
- vpu_cmd_send(core, &pkt);
+ if (vpu_cmd_send(core, &pkt))
+ dev_err(core->dev, "fail to keep active\n");
mutex_unlock(&core->cmd_lock);
}
@@ -313,7 +313,7 @@ static int vpu_session_send_cmd(struct vpu_inst *inst, u32 id, void *data)
{
unsigned long key;
int sync = false;
- int ret = -EINVAL;
+ int ret;
if (inst->id < 0)
return -EINVAL;
@@ -339,7 +339,7 @@ static int vpu_session_send_cmd(struct vpu_inst *inst, u32 id, void *data)
exit:
if (ret)
- dev_err(inst->dev, "[%d] send cmd(0x%x) fail\n", inst->id, id);
+ dev_err(inst->dev, "[%d] send cmd %s fail\n", inst->id, vpu_id_name(id));
return ret;
}
diff --git a/drivers/media/platform/amphion/vpu_core.c b/drivers/media/platform/amphion/vpu_core.c
index 7863b7b53494..1af6fc9460d4 100644
--- a/drivers/media/platform/amphion/vpu_core.c
+++ b/drivers/media/platform/amphion/vpu_core.c
@@ -9,7 +9,7 @@
#include <linux/list.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -88,6 +88,8 @@ static int vpu_core_boot_done(struct vpu_core *core)
core->supported_instance_count = min(core->supported_instance_count, count);
}
+ if (core->supported_instance_count >= BITS_PER_TYPE(core->instance_mask))
+ core->supported_instance_count = BITS_PER_TYPE(core->instance_mask);
core->fw_version = fw_version;
vpu_core_set_state(core, VPU_CORE_ACTIVE);
diff --git a/drivers/media/platform/amphion/vpu_dbg.c b/drivers/media/platform/amphion/vpu_dbg.c
index 44b830ae01d8..982c2c777484 100644
--- a/drivers/media/platform/amphion/vpu_dbg.c
+++ b/drivers/media/platform/amphion/vpu_dbg.c
@@ -50,6 +50,13 @@ static char *vpu_stat_name[] = {
[VPU_BUF_STATE_ERROR] = "error",
};
+static inline const char *to_vpu_stat_name(int state)
+{
+ if (state <= VPU_BUF_STATE_ERROR)
+ return vpu_stat_name[state];
+ return "unknown";
+}
+
static int vpu_dbg_instance(struct seq_file *s, void *data)
{
struct vpu_inst *inst = s->private;
@@ -67,7 +74,7 @@ static int vpu_dbg_instance(struct seq_file *s, void *data)
num = scnprintf(str, sizeof(str), "tgig = %d,pid = %d\n", inst->tgid, inst->pid);
if (seq_write(s, str, num))
return 0;
- num = scnprintf(str, sizeof(str), "state = %d\n", inst->state);
+ num = scnprintf(str, sizeof(str), "state = %s\n", vpu_codec_state_name(inst->state));
if (seq_write(s, str, num))
return 0;
num = scnprintf(str, sizeof(str),
@@ -141,7 +148,7 @@ static int vpu_dbg_instance(struct seq_file *s, void *data)
num = scnprintf(str, sizeof(str),
"output [%2d] state = %10s, %8s\n",
i, vb2_stat_name[vb->state],
- vpu_stat_name[vpu_get_buffer_state(vbuf)]);
+ to_vpu_stat_name(vpu_get_buffer_state(vbuf)));
if (seq_write(s, str, num))
return 0;
}
@@ -156,7 +163,7 @@ static int vpu_dbg_instance(struct seq_file *s, void *data)
num = scnprintf(str, sizeof(str),
"capture[%2d] state = %10s, %8s\n",
i, vb2_stat_name[vb->state],
- vpu_stat_name[vpu_get_buffer_state(vbuf)]);
+ to_vpu_stat_name(vpu_get_buffer_state(vbuf)));
if (seq_write(s, str, num))
return 0;
}
@@ -188,9 +195,9 @@ static int vpu_dbg_instance(struct seq_file *s, void *data)
if (!inst->flows[idx])
continue;
- num = scnprintf(str, sizeof(str), "\t[%s]0x%x\n",
+ num = scnprintf(str, sizeof(str), "\t[%s] %s\n",
inst->flows[idx] >= VPU_MSG_ID_NOOP ? "M" : "C",
- inst->flows[idx]);
+ vpu_id_name(inst->flows[idx]));
if (seq_write(s, str, num)) {
mutex_unlock(&inst->core->cmd_lock);
return 0;
diff --git a/drivers/media/platform/amphion/vpu_drv.c b/drivers/media/platform/amphion/vpu_drv.c
index 4187b2b5562f..2bf70aafd2ba 100644
--- a/drivers/media/platform/amphion/vpu_drv.c
+++ b/drivers/media/platform/amphion/vpu_drv.c
@@ -10,8 +10,8 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/dma-map-ops.h>
-#include <linux/of_device.h>
-#include <linux/of_address.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/types.h>
diff --git a/drivers/media/platform/amphion/vpu_helpers.c b/drivers/media/platform/amphion/vpu_helpers.c
index 019c77e84514..af3b336e5dc3 100644
--- a/drivers/media/platform/amphion/vpu_helpers.c
+++ b/drivers/media/platform/amphion/vpu_helpers.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include "vpu.h"
+#include "vpu_defs.h"
#include "vpu_core.h"
#include "vpu_rpc.h"
#include "vpu_helpers.h"
@@ -447,3 +448,63 @@ int vpu_find_src_by_dst(struct vpu_pair *pairs, u32 cnt, u32 dst)
return -EINVAL;
}
+
+const char *vpu_id_name(u32 id)
+{
+ switch (id) {
+ case VPU_CMD_ID_NOOP: return "noop";
+ case VPU_CMD_ID_CONFIGURE_CODEC: return "configure codec";
+ case VPU_CMD_ID_START: return "start";
+ case VPU_CMD_ID_STOP: return "stop";
+ case VPU_CMD_ID_ABORT: return "abort";
+ case VPU_CMD_ID_RST_BUF: return "reset buf";
+ case VPU_CMD_ID_SNAPSHOT: return "snapshot";
+ case VPU_CMD_ID_FIRM_RESET: return "reset firmware";
+ case VPU_CMD_ID_UPDATE_PARAMETER: return "update parameter";
+ case VPU_CMD_ID_FRAME_ENCODE: return "encode frame";
+ case VPU_CMD_ID_SKIP: return "skip";
+ case VPU_CMD_ID_FS_ALLOC: return "alloc fb";
+ case VPU_CMD_ID_FS_RELEASE: return "release fb";
+ case VPU_CMD_ID_TIMESTAMP: return "timestamp";
+ case VPU_CMD_ID_DEBUG: return "debug";
+ case VPU_MSG_ID_RESET_DONE: return "reset done";
+ case VPU_MSG_ID_START_DONE: return "start done";
+ case VPU_MSG_ID_STOP_DONE: return "stop done";
+ case VPU_MSG_ID_ABORT_DONE: return "abort done";
+ case VPU_MSG_ID_BUF_RST: return "buf reset done";
+ case VPU_MSG_ID_MEM_REQUEST: return "mem request";
+ case VPU_MSG_ID_PARAM_UPD_DONE: return "param upd done";
+ case VPU_MSG_ID_FRAME_INPUT_DONE: return "frame input done";
+ case VPU_MSG_ID_ENC_DONE: return "encode done";
+ case VPU_MSG_ID_DEC_DONE: return "frame display";
+ case VPU_MSG_ID_FRAME_REQ: return "fb request";
+ case VPU_MSG_ID_FRAME_RELEASE: return "fb release";
+ case VPU_MSG_ID_SEQ_HDR_FOUND: return "seq hdr found";
+ case VPU_MSG_ID_RES_CHANGE: return "resolution change";
+ case VPU_MSG_ID_PIC_HDR_FOUND: return "pic hdr found";
+ case VPU_MSG_ID_PIC_DECODED: return "picture decoded";
+ case VPU_MSG_ID_PIC_EOS: return "eos";
+ case VPU_MSG_ID_FIFO_LOW: return "fifo low";
+ case VPU_MSG_ID_BS_ERROR: return "bs error";
+ case VPU_MSG_ID_UNSUPPORTED: return "unsupported";
+ case VPU_MSG_ID_FIRMWARE_XCPT: return "exception";
+ case VPU_MSG_ID_PIC_SKIPPED: return "skipped";
+ }
+ return "<unknown>";
+}
+
+const char *vpu_codec_state_name(enum vpu_codec_state state)
+{
+ switch (state) {
+ case VPU_CODEC_STATE_DEINIT: return "initialization";
+ case VPU_CODEC_STATE_CONFIGURED: return "configured";
+ case VPU_CODEC_STATE_START: return "start";
+ case VPU_CODEC_STATE_STARTED: return "started";
+ case VPU_CODEC_STATE_ACTIVE: return "active";
+ case VPU_CODEC_STATE_SEEK: return "seek";
+ case VPU_CODEC_STATE_STOP: return "stop";
+ case VPU_CODEC_STATE_DRAIN: return "drain";
+ case VPU_CODEC_STATE_DYAMIC_RESOLUTION_CHANGE: return "resolution change";
+ }
+ return "<unknown>";
+}
diff --git a/drivers/media/platform/amphion/vpu_malone.c b/drivers/media/platform/amphion/vpu_malone.c
index c1d6606ad7e5..f771661980c0 100644
--- a/drivers/media/platform/amphion/vpu_malone.c
+++ b/drivers/media/platform/amphion/vpu_malone.c
@@ -9,8 +9,6 @@
#include <linux/list.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/rational.h>
diff --git a/drivers/media/platform/amphion/vpu_mbox.c b/drivers/media/platform/amphion/vpu_mbox.c
index b6d5b4844f67..c2963b8deb48 100644
--- a/drivers/media/platform/amphion/vpu_mbox.c
+++ b/drivers/media/platform/amphion/vpu_mbox.c
@@ -9,8 +9,6 @@
#include <linux/list.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_address.h>
#include <linux/platform_device.h>
#include "vpu.h"
#include "vpu_mbox.h"
diff --git a/drivers/media/platform/amphion/vpu_msgs.c b/drivers/media/platform/amphion/vpu_msgs.c
index 92672a802b49..d0ead051f7d1 100644
--- a/drivers/media/platform/amphion/vpu_msgs.c
+++ b/drivers/media/platform/amphion/vpu_msgs.c
@@ -32,7 +32,7 @@ static void vpu_session_handle_start_done(struct vpu_inst *inst, struct vpu_rpc_
static void vpu_session_handle_mem_request(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
{
- struct vpu_pkt_mem_req_data req_data;
+ struct vpu_pkt_mem_req_data req_data = { 0 };
vpu_iface_unpack_msg_data(inst->core, pkt, (void *)&req_data);
vpu_trace(inst->dev, "[%d] %d:%d %d:%d %d:%d\n",
@@ -80,7 +80,7 @@ static void vpu_session_handle_resolution_change(struct vpu_inst *inst, struct v
static void vpu_session_handle_enc_frame_done(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
{
- struct vpu_enc_pic_info info;
+ struct vpu_enc_pic_info info = { 0 };
vpu_iface_unpack_msg_data(inst->core, pkt, (void *)&info);
dev_dbg(inst->dev, "[%d] frame id = %d, wptr = 0x%x, size = %d\n",
@@ -90,7 +90,7 @@ static void vpu_session_handle_enc_frame_done(struct vpu_inst *inst, struct vpu_
static void vpu_session_handle_frame_request(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
{
- struct vpu_fs_info fs;
+ struct vpu_fs_info fs = { 0 };
vpu_iface_unpack_msg_data(inst->core, pkt, &fs);
call_void_vop(inst, event_notify, VPU_MSG_ID_FRAME_REQ, &fs);
@@ -107,7 +107,7 @@ static void vpu_session_handle_frame_release(struct vpu_inst *inst, struct vpu_r
info.type = inst->out_format.type;
call_void_vop(inst, buf_done, &info);
} else if (inst->core->type == VPU_CORE_TYPE_DEC) {
- struct vpu_fs_info fs;
+ struct vpu_fs_info fs = { 0 };
vpu_iface_unpack_msg_data(inst->core, pkt, &fs);
call_void_vop(inst, event_notify, VPU_MSG_ID_FRAME_RELEASE, &fs);
@@ -122,7 +122,7 @@ static void vpu_session_handle_input_done(struct vpu_inst *inst, struct vpu_rpc_
static void vpu_session_handle_pic_decoded(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
{
- struct vpu_dec_pic_info info;
+ struct vpu_dec_pic_info info = { 0 };
vpu_iface_unpack_msg_data(inst->core, pkt, (void *)&info);
call_void_vop(inst, get_one_frame, &info);
@@ -130,7 +130,7 @@ static void vpu_session_handle_pic_decoded(struct vpu_inst *inst, struct vpu_rpc
static void vpu_session_handle_pic_done(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
{
- struct vpu_dec_pic_info info;
+ struct vpu_dec_pic_info info = { 0 };
struct vpu_frame_info frame;
memset(&frame, 0, sizeof(frame));
@@ -210,7 +210,7 @@ static int vpu_session_handle_msg(struct vpu_inst *inst, struct vpu_rpc_event *m
return -EINVAL;
msg_id = ret;
- dev_dbg(inst->dev, "[%d] receive event(0x%x)\n", inst->id, msg_id);
+ dev_dbg(inst->dev, "[%d] receive event(%s)\n", inst->id, vpu_id_name(msg_id));
for (i = 0; i < ARRAY_SIZE(handlers); i++) {
if (handlers[i].id == msg_id) {
diff --git a/drivers/media/platform/amphion/vpu_rpc.c b/drivers/media/platform/amphion/vpu_rpc.c
index 676f7da041bd..f626a9f835e0 100644
--- a/drivers/media/platform/amphion/vpu_rpc.c
+++ b/drivers/media/platform/amphion/vpu_rpc.c
@@ -9,8 +9,6 @@
#include <linux/list.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/firmware/imx/ipc.h>
#include <linux/firmware/imx/svc/misc.h>
diff --git a/drivers/media/platform/amphion/vpu_v4l2.c b/drivers/media/platform/amphion/vpu_v4l2.c
index 021235e1c144..0f6e4c666440 100644
--- a/drivers/media/platform/amphion/vpu_v4l2.c
+++ b/drivers/media/platform/amphion/vpu_v4l2.c
@@ -489,6 +489,11 @@ static int vpu_vb2_queue_setup(struct vb2_queue *vq,
for (i = 0; i < cur_fmt->mem_planes; i++)
psize[i] = vpu_get_fmt_plane_size(cur_fmt, i);
+ if (V4L2_TYPE_IS_OUTPUT(vq->type) && inst->state == VPU_CODEC_STATE_SEEK) {
+ vpu_trace(inst->dev, "reinit when VIDIOC_REQBUFS(OUTPUT, 0)\n");
+ call_void_vop(inst, release);
+ }
+
return 0;
}
@@ -773,9 +778,9 @@ int vpu_v4l2_close(struct file *file)
v4l2_m2m_ctx_release(inst->fh.m2m_ctx);
inst->fh.m2m_ctx = NULL;
}
+ call_void_vop(inst, release);
vpu_inst_unlock(inst);
- call_void_vop(inst, release);
vpu_inst_unregister(inst);
vpu_inst_put(inst);
diff --git a/drivers/media/platform/amphion/vpu_windsor.c b/drivers/media/platform/amphion/vpu_windsor.c
index b245ff6a1102..5f1101d7cf9e 100644
--- a/drivers/media/platform/amphion/vpu_windsor.c
+++ b/drivers/media/platform/amphion/vpu_windsor.c
@@ -9,8 +9,6 @@
#include <linux/list.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/time64.h>
#include <media/videobuf2-v4l2.h>
diff --git a/drivers/media/platform/aspeed/aspeed-video.c b/drivers/media/platform/aspeed/aspeed-video.c
index 374eb7781936..a9c2c69b2ed9 100644
--- a/drivers/media/platform/aspeed/aspeed-video.c
+++ b/drivers/media/platform/aspeed/aspeed-video.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
@@ -1130,7 +1129,7 @@ static void aspeed_video_get_resolution(struct aspeed_video *video)
static void aspeed_video_set_resolution(struct aspeed_video *video)
{
struct v4l2_bt_timings *act = &video->active_timings;
- unsigned int size = act->width * act->height;
+ unsigned int size = act->width * ALIGN(act->height, 8);
/* Set capture/compression frame sizes */
aspeed_video_calc_compressed_size(video, size);
@@ -1147,7 +1146,7 @@ static void aspeed_video_set_resolution(struct aspeed_video *video)
u32 width = ALIGN(act->width, 64);
aspeed_video_write(video, VE_CAP_WINDOW, width << 16 | act->height);
- size = width * act->height;
+ size = width * ALIGN(act->height, 8);
} else {
aspeed_video_write(video, VE_CAP_WINDOW,
act->width << 16 | act->height);
diff --git a/drivers/media/platform/atmel/atmel-isi.c b/drivers/media/platform/atmel/atmel-isi.c
index c29e04864445..4046212d48b4 100644
--- a/drivers/media/platform/atmel/atmel-isi.c
+++ b/drivers/media/platform/atmel/atmel-isi.c
@@ -1120,7 +1120,7 @@ static int isi_graph_notify_complete(struct v4l2_async_notifier *notifier)
static void isi_graph_notify_unbind(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
struct atmel_isi *isi = notifier_to_isi(notifier);
@@ -1132,7 +1132,7 @@ static void isi_graph_notify_unbind(struct v4l2_async_notifier *notifier,
static int isi_graph_notify_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
struct atmel_isi *isi = notifier_to_isi(notifier);
@@ -1151,7 +1151,7 @@ static const struct v4l2_async_notifier_operations isi_graph_notify_ops = {
static int isi_graph_init(struct atmel_isi *isi)
{
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asd;
struct device_node *ep;
int ret;
@@ -1159,11 +1159,11 @@ static int isi_graph_init(struct atmel_isi *isi)
if (!ep)
return -EINVAL;
- v4l2_async_nf_init(&isi->notifier);
+ v4l2_async_nf_init(&isi->notifier, &isi->v4l2_dev);
asd = v4l2_async_nf_add_fwnode_remote(&isi->notifier,
of_fwnode_handle(ep),
- struct v4l2_async_subdev);
+ struct v4l2_async_connection);
of_node_put(ep);
if (IS_ERR(asd))
@@ -1171,7 +1171,7 @@ static int isi_graph_init(struct atmel_isi *isi)
isi->notifier.ops = &isi_graph_notify_ops;
- ret = v4l2_async_nf_register(&isi->v4l2_dev, &isi->notifier);
+ ret = v4l2_async_nf_register(&isi->notifier);
if (ret < 0) {
dev_err(isi->dev, "Notifier registration failed\n");
v4l2_async_nf_cleanup(&isi->notifier);
@@ -1187,7 +1187,6 @@ static int atmel_isi_probe(struct platform_device *pdev)
int irq;
struct atmel_isi *isi;
struct vb2_queue *q;
- struct resource *regs;
int ret, i;
isi = devm_kzalloc(&pdev->dev, sizeof(struct atmel_isi), GFP_KERNEL);
@@ -1268,8 +1267,7 @@ static int atmel_isi_probe(struct platform_device *pdev)
list_add(&isi->dma_desc[i].list, &isi->dma_desc_head);
}
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- isi->regs = devm_ioremap_resource(&pdev->dev, regs);
+ isi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(isi->regs)) {
ret = PTR_ERR(isi->regs);
goto err_ioremap;
diff --git a/drivers/media/platform/atmel/atmel-isi.h b/drivers/media/platform/atmel/atmel-isi.h
index 7ad3895a2c87..ef38eddef5fc 100644
--- a/drivers/media/platform/atmel/atmel-isi.h
+++ b/drivers/media/platform/atmel/atmel-isi.h
@@ -121,8 +121,6 @@
#define ISI_DATAWIDTH_8 0x01
#define ISI_DATAWIDTH_10 0x02
-struct v4l2_async_subdev;
-
struct isi_platform_data {
u8 has_emb_sync;
u8 hsync_act_low;
diff --git a/drivers/media/platform/cadence/cdns-csi2rx.c b/drivers/media/platform/cadence/cdns-csi2rx.c
index 9755d1c8ceb9..0d879d71d818 100644
--- a/drivers/media/platform/cadence/cdns-csi2rx.c
+++ b/drivers/media/platform/cadence/cdns-csi2rx.c
@@ -13,6 +13,7 @@
#include <linux/of_graph.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/reset.h>
#include <linux/slab.h>
#include <media/v4l2-ctrls.h>
@@ -30,6 +31,12 @@
#define CSI2RX_STATIC_CFG_DLANE_MAP(llane, plane) ((plane) << (16 + (llane) * 4))
#define CSI2RX_STATIC_CFG_LANES_MASK GENMASK(11, 8)
+#define CSI2RX_DPHY_LANE_CTRL_REG 0x40
+#define CSI2RX_DPHY_CL_RST BIT(16)
+#define CSI2RX_DPHY_DL_RST(i) BIT((i) + 12)
+#define CSI2RX_DPHY_CL_EN BIT(4)
+#define CSI2RX_DPHY_DL_EN(i) BIT(i)
+
#define CSI2RX_STREAM_BASE(n) (((n) + 1) * 0x100)
#define CSI2RX_STREAM_CTRL_REG(n) (CSI2RX_STREAM_BASE(n) + 0x000)
@@ -68,6 +75,9 @@ struct csi2rx_priv {
struct clk *sys_clk;
struct clk *p_clk;
struct clk *pixel_clk[CSI2RX_STREAMS_MAX];
+ struct reset_control *sys_rst;
+ struct reset_control *p_rst;
+ struct reset_control *pixel_rst[CSI2RX_STREAMS_MAX];
struct phy *dphy;
u8 lanes[CSI2RX_LANES_MAX];
@@ -101,6 +111,24 @@ static void csi2rx_reset(struct csi2rx_priv *csi2rx)
writel(0, csi2rx->base + CSI2RX_SOFT_RESET_REG);
}
+static int csi2rx_configure_ext_dphy(struct csi2rx_priv *csi2rx)
+{
+ union phy_configure_opts opts = { };
+ int ret;
+
+ ret = phy_power_on(csi2rx->dphy);
+ if (ret)
+ return ret;
+
+ ret = phy_configure(csi2rx->dphy, &opts);
+ if (ret) {
+ phy_power_off(csi2rx->dphy);
+ return ret;
+ }
+
+ return 0;
+}
+
static int csi2rx_start(struct csi2rx_priv *csi2rx)
{
unsigned int i;
@@ -112,6 +140,7 @@ static int csi2rx_start(struct csi2rx_priv *csi2rx)
if (ret)
return ret;
+ reset_control_deassert(csi2rx->p_rst);
csi2rx_reset(csi2rx);
reg = csi2rx->num_lanes << 8;
@@ -139,6 +168,17 @@ static int csi2rx_start(struct csi2rx_priv *csi2rx)
if (ret)
goto err_disable_pclk;
+ /* Enable DPHY clk and data lanes. */
+ if (csi2rx->dphy) {
+ reg = CSI2RX_DPHY_CL_EN | CSI2RX_DPHY_CL_RST;
+ for (i = 0; i < csi2rx->num_lanes; i++) {
+ reg |= CSI2RX_DPHY_DL_EN(csi2rx->lanes[i] - 1);
+ reg |= CSI2RX_DPHY_DL_RST(csi2rx->lanes[i] - 1);
+ }
+
+ writel(reg, csi2rx->base + CSI2RX_DPHY_LANE_CTRL_REG);
+ }
+
/*
* Create a static mapping between the CSI virtual channels
* and the output stream.
@@ -154,6 +194,8 @@ static int csi2rx_start(struct csi2rx_priv *csi2rx)
if (ret)
goto err_disable_pixclk;
+ reset_control_deassert(csi2rx->pixel_rst[i]);
+
writel(CSI2RX_STREAM_CFG_FIFO_MODE_LARGE_BUF,
csi2rx->base + CSI2RX_STREAM_CFG_REG(i));
@@ -169,13 +211,28 @@ static int csi2rx_start(struct csi2rx_priv *csi2rx)
if (ret)
goto err_disable_pixclk;
+ reset_control_deassert(csi2rx->sys_rst);
+
+ if (csi2rx->dphy) {
+ ret = csi2rx_configure_ext_dphy(csi2rx);
+ if (ret) {
+ dev_err(csi2rx->dev,
+ "Failed to configure external DPHY: %d\n", ret);
+ goto err_disable_sysclk;
+ }
+ }
+
clk_disable_unprepare(csi2rx->p_clk);
return 0;
+err_disable_sysclk:
+ clk_disable_unprepare(csi2rx->sys_clk);
err_disable_pixclk:
- for (; i > 0; i--)
+ for (; i > 0; i--) {
+ reset_control_assert(csi2rx->pixel_rst[i - 1]);
clk_disable_unprepare(csi2rx->pixel_clk[i - 1]);
+ }
err_disable_pclk:
clk_disable_unprepare(csi2rx->p_clk);
@@ -188,18 +245,28 @@ static void csi2rx_stop(struct csi2rx_priv *csi2rx)
unsigned int i;
clk_prepare_enable(csi2rx->p_clk);
+ reset_control_assert(csi2rx->sys_rst);
clk_disable_unprepare(csi2rx->sys_clk);
for (i = 0; i < csi2rx->max_streams; i++) {
writel(0, csi2rx->base + CSI2RX_STREAM_CTRL_REG(i));
+ reset_control_assert(csi2rx->pixel_rst[i]);
clk_disable_unprepare(csi2rx->pixel_clk[i]);
}
+ reset_control_assert(csi2rx->p_rst);
clk_disable_unprepare(csi2rx->p_clk);
if (v4l2_subdev_call(csi2rx->source_subdev, video, s_stream, false))
dev_warn(csi2rx->dev, "Couldn't disable our subdev\n");
+
+ if (csi2rx->dphy) {
+ writel(0, csi2rx->base + CSI2RX_DPHY_LANE_CTRL_REG);
+
+ if (phy_power_off(csi2rx->dphy))
+ dev_warn(csi2rx->dev, "Couldn't power off DPHY\n");
+ }
}
static int csi2rx_s_stream(struct v4l2_subdev *subdev, int enable)
@@ -246,7 +313,7 @@ static const struct v4l2_subdev_ops csi2rx_subdev_ops = {
static int csi2rx_async_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *s_subdev,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
struct v4l2_subdev *subdev = notifier->sd;
struct csi2rx_priv *csi2rx = v4l2_subdev_to_csi2rx(subdev);
@@ -299,21 +366,22 @@ static int csi2rx_get_resources(struct csi2rx_priv *csi2rx,
return PTR_ERR(csi2rx->p_clk);
}
+ csi2rx->sys_rst = devm_reset_control_get_optional_exclusive(&pdev->dev,
+ "sys");
+ if (IS_ERR(csi2rx->sys_rst))
+ return PTR_ERR(csi2rx->sys_rst);
+
+ csi2rx->p_rst = devm_reset_control_get_optional_exclusive(&pdev->dev,
+ "reg_bank");
+ if (IS_ERR(csi2rx->p_rst))
+ return PTR_ERR(csi2rx->p_rst);
+
csi2rx->dphy = devm_phy_optional_get(&pdev->dev, "dphy");
if (IS_ERR(csi2rx->dphy)) {
dev_err(&pdev->dev, "Couldn't get external D-PHY\n");
return PTR_ERR(csi2rx->dphy);
}
- /*
- * FIXME: Once we'll have external D-PHY support, the check
- * will need to be removed.
- */
- if (csi2rx->dphy) {
- dev_err(&pdev->dev, "External D-PHY not supported yet\n");
- return -EINVAL;
- }
-
ret = clk_prepare_enable(csi2rx->p_clk);
if (ret) {
dev_err(&pdev->dev, "Couldn't prepare and enable P clock\n");
@@ -343,20 +411,27 @@ static int csi2rx_get_resources(struct csi2rx_priv *csi2rx,
* FIXME: Once we'll have internal D-PHY support, the check
* will need to be removed.
*/
- if (csi2rx->has_internal_dphy) {
+ if (!csi2rx->dphy && csi2rx->has_internal_dphy) {
dev_err(&pdev->dev, "Internal D-PHY not supported yet\n");
return -EINVAL;
}
for (i = 0; i < csi2rx->max_streams; i++) {
- char clk_name[16];
+ char name[16];
- snprintf(clk_name, sizeof(clk_name), "pixel_if%u_clk", i);
- csi2rx->pixel_clk[i] = devm_clk_get(&pdev->dev, clk_name);
+ snprintf(name, sizeof(name), "pixel_if%u_clk", i);
+ csi2rx->pixel_clk[i] = devm_clk_get(&pdev->dev, name);
if (IS_ERR(csi2rx->pixel_clk[i])) {
- dev_err(&pdev->dev, "Couldn't get clock %s\n", clk_name);
+ dev_err(&pdev->dev, "Couldn't get clock %s\n", name);
return PTR_ERR(csi2rx->pixel_clk[i]);
}
+
+ snprintf(name, sizeof(name), "pixel_if%u", i);
+ csi2rx->pixel_rst[i] =
+ devm_reset_control_get_optional_exclusive(&pdev->dev,
+ name);
+ if (IS_ERR(csi2rx->pixel_rst[i]))
+ return PTR_ERR(csi2rx->pixel_rst[i]);
}
return 0;
@@ -365,7 +440,7 @@ static int csi2rx_get_resources(struct csi2rx_priv *csi2rx,
static int csi2rx_parse_dt(struct csi2rx_priv *csi2rx)
{
struct v4l2_fwnode_endpoint v4l2_ep = { .bus_type = 0 };
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asd;
struct fwnode_handle *fwh;
struct device_node *ep;
int ret;
@@ -399,17 +474,17 @@ static int csi2rx_parse_dt(struct csi2rx_priv *csi2rx)
return -EINVAL;
}
- v4l2_async_nf_init(&csi2rx->notifier);
+ v4l2_async_subdev_nf_init(&csi2rx->notifier, &csi2rx->subdev);
asd = v4l2_async_nf_add_fwnode_remote(&csi2rx->notifier, fwh,
- struct v4l2_async_subdev);
+ struct v4l2_async_connection);
of_node_put(ep);
if (IS_ERR(asd))
return PTR_ERR(asd);
csi2rx->notifier.ops = &csi2rx_notifier_ops;
- ret = v4l2_async_subdev_nf_register(&csi2rx->subdev, &csi2rx->notifier);
+ ret = v4l2_async_nf_register(&csi2rx->notifier);
if (ret)
v4l2_async_nf_cleanup(&csi2rx->notifier);
@@ -462,6 +537,7 @@ static int csi2rx_probe(struct platform_device *pdev)
dev_info(&pdev->dev,
"Probed CSI2RX with %u/%u lanes, %u streams, %s D-PHY\n",
csi2rx->num_lanes, csi2rx->max_lanes, csi2rx->max_streams,
+ csi2rx->dphy ? "external" :
csi2rx->has_internal_dphy ? "internal" : "no");
return 0;
@@ -482,6 +558,7 @@ static void csi2rx_remove(struct platform_device *pdev)
}
static const struct of_device_id csi2rx_of_table[] = {
+ { .compatible = "starfive,jh7110-csi2rx" },
{ .compatible = "cdns,csi2rx" },
{ },
};
diff --git a/drivers/media/platform/chips-media/coda-common.c b/drivers/media/platform/chips-media/coda-common.c
index ac9a642ae76f..cc4892129aaf 100644
--- a/drivers/media/platform/chips-media/coda-common.c
+++ b/drivers/media/platform/chips-media/coda-common.c
@@ -19,12 +19,12 @@
#include <linux/irq.h>
#include <linux/kfifo.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
-#include <linux/of.h>
#include <linux/ratelimit.h>
#include <linux/reset.h>
diff --git a/drivers/media/platform/intel/pxa_camera.c b/drivers/media/platform/intel/pxa_camera.c
index 9ed3c2e063de..6e6caf50e11e 100644
--- a/drivers/media/platform/intel/pxa_camera.c
+++ b/drivers/media/platform/intel/pxa_camera.c
@@ -2044,7 +2044,7 @@ static const struct video_device pxa_camera_videodev_template = {
static int pxa_camera_sensor_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
int err;
struct v4l2_device *v4l2_dev = notifier->v4l2_dev;
@@ -2123,7 +2123,7 @@ out:
static void pxa_camera_sensor_unbind(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
struct pxa_camera_dev *pcdev = v4l2_dev_to_pcdev(notifier->v4l2_dev);
@@ -2197,7 +2197,7 @@ static int pxa_camera_pdata_from_dt(struct device *dev,
struct pxa_camera_dev *pcdev)
{
u32 mclk_rate;
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asd;
struct device_node *np = dev->of_node;
struct v4l2_fwnode_endpoint ep = { .bus_type = 0 };
int err = of_property_read_u32(np, "clock-frequency",
@@ -2252,7 +2252,7 @@ static int pxa_camera_pdata_from_dt(struct device *dev,
asd = v4l2_async_nf_add_fwnode_remote(&pcdev->notifier,
of_fwnode_handle(np),
- struct v4l2_async_subdev);
+ struct v4l2_async_connection);
if (IS_ERR(asd))
err = PTR_ERR(asd);
out:
@@ -2274,9 +2274,8 @@ static int pxa_camera_probe(struct platform_device *pdev)
int irq;
int err = 0, i;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
- if (!res || irq < 0)
+ if (irq < 0)
return -ENODEV;
pcdev = devm_kzalloc(&pdev->dev, sizeof(*pcdev), GFP_KERNEL);
@@ -2289,27 +2288,41 @@ static int pxa_camera_probe(struct platform_device *pdev)
if (IS_ERR(pcdev->clk))
return PTR_ERR(pcdev->clk);
- v4l2_async_nf_init(&pcdev->notifier);
+ /*
+ * Request the regions.
+ */
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ pcdev->irq = irq;
+ pcdev->base = base;
+
+ err = v4l2_device_register(&pdev->dev, &pcdev->v4l2_dev);
+ if (err)
+ return err;
+
+ v4l2_async_nf_init(&pcdev->notifier, &pcdev->v4l2_dev);
pcdev->res = res;
pcdev->pdata = pdev->dev.platform_data;
if (pcdev->pdata) {
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asd;
pcdev->platform_flags = pcdev->pdata->flags;
pcdev->mclk = pcdev->pdata->mclk_10khz * 10000;
asd = v4l2_async_nf_add_i2c(&pcdev->notifier,
pcdev->pdata->sensor_i2c_adapter_id,
pcdev->pdata->sensor_i2c_address,
- struct v4l2_async_subdev);
+ struct v4l2_async_connection);
if (IS_ERR(asd))
err = PTR_ERR(asd);
} else if (pdev->dev.of_node) {
err = pxa_camera_pdata_from_dt(&pdev->dev, pcdev);
} else {
- return -ENODEV;
+ err = -ENODEV;
}
if (err < 0)
- return err;
+ goto exit_v4l2_device_unregister;
if (!(pcdev->platform_flags & (PXA_CAMERA_DATAWIDTH_8 |
PXA_CAMERA_DATAWIDTH_9 | PXA_CAMERA_DATAWIDTH_10))) {
@@ -2338,21 +2351,12 @@ static int pxa_camera_probe(struct platform_device *pdev)
spin_lock_init(&pcdev->lock);
mutex_init(&pcdev->mlock);
- /*
- * Request the regions.
- */
- base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- pcdev->irq = irq;
- pcdev->base = base;
-
/* request dma */
pcdev->dma_chans[0] = dma_request_chan(&pdev->dev, "CI_Y");
if (IS_ERR(pcdev->dma_chans[0])) {
dev_err(&pdev->dev, "Can't request DMA for Y\n");
- return PTR_ERR(pcdev->dma_chans[0]);
+ err = PTR_ERR(pcdev->dma_chans[0]);
+ goto exit_notifier_cleanup;
}
pcdev->dma_chans[1] = dma_request_chan(&pdev->dev, "CI_U");
@@ -2379,36 +2383,30 @@ static int pxa_camera_probe(struct platform_device *pdev)
}
}
- /* request irq */
- err = devm_request_irq(&pdev->dev, pcdev->irq, pxa_camera_irq, 0,
- PXA_CAM_DRV_NAME, pcdev);
- if (err) {
- dev_err(&pdev->dev, "Camera interrupt register failed\n");
- goto exit_free_dma;
- }
-
tasklet_setup(&pcdev->task_eof, pxa_camera_eof);
pxa_camera_activate(pcdev);
platform_set_drvdata(pdev, pcdev);
- err = v4l2_device_register(&pdev->dev, &pcdev->v4l2_dev);
- if (err)
- goto exit_deactivate;
err = pxa_camera_init_videobuf2(pcdev);
if (err)
- goto exit_notifier_cleanup;
+ goto exit_deactivate;
+
+ /* request irq */
+ err = devm_request_irq(&pdev->dev, pcdev->irq, pxa_camera_irq, 0,
+ PXA_CAM_DRV_NAME, pcdev);
+ if (err) {
+ dev_err(&pdev->dev, "Camera interrupt register failed\n");
+ goto exit_v4l2_device_unregister;
+ }
pcdev->notifier.ops = &pxa_camera_sensor_ops;
- err = v4l2_async_nf_register(&pcdev->v4l2_dev, &pcdev->notifier);
+ err = v4l2_async_nf_register(&pcdev->notifier);
if (err)
- goto exit_notifier_cleanup;
+ goto exit_deactivate;
return 0;
-exit_notifier_cleanup:
- v4l2_async_nf_cleanup(&pcdev->notifier);
- v4l2_device_unregister(&pcdev->v4l2_dev);
exit_deactivate:
pxa_camera_deactivate(pcdev);
tasklet_kill(&pcdev->task_eof);
@@ -2418,6 +2416,10 @@ exit_free_dma_u:
dma_release_channel(pcdev->dma_chans[1]);
exit_free_dma_y:
dma_release_channel(pcdev->dma_chans[0]);
+exit_notifier_cleanup:
+ v4l2_async_nf_cleanup(&pcdev->notifier);
+exit_v4l2_device_unregister:
+ v4l2_device_unregister(&pcdev->v4l2_dev);
return err;
}
@@ -2454,7 +2456,7 @@ static struct platform_driver pxa_camera_driver = {
.driver = {
.name = PXA_CAM_DRV_NAME,
.pm = &pxa_camera_pm,
- .of_match_table = of_match_ptr(pxa_camera_of_match),
+ .of_match_table = pxa_camera_of_match,
},
.probe = pxa_camera_probe,
.remove_new = pxa_camera_remove,
diff --git a/drivers/media/platform/marvell/cafe-driver.c b/drivers/media/platform/marvell/cafe-driver.c
index ae97ce4ead98..ef810249def6 100644
--- a/drivers/media/platform/marvell/cafe-driver.c
+++ b/drivers/media/platform/marvell/cafe-driver.c
@@ -478,7 +478,7 @@ static int cafe_pci_probe(struct pci_dev *pdev,
int ret;
struct cafe_camera *cam;
struct mcam_camera *mcam;
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asd;
struct i2c_client *i2c_dev;
/*
@@ -536,19 +536,24 @@ static int cafe_pci_probe(struct pci_dev *pdev,
if (ret)
goto out_pdown;
- v4l2_async_nf_init(&mcam->notifier);
+ ret = v4l2_device_register(mcam->dev, &mcam->v4l2_dev);
+ if (ret)
+ goto out_smbus_shutdown;
+
+ v4l2_async_nf_init(&mcam->notifier, &mcam->v4l2_dev);
asd = v4l2_async_nf_add_i2c(&mcam->notifier,
i2c_adapter_id(cam->i2c_adapter),
- ov7670_info.addr, struct v4l2_async_subdev);
+ ov7670_info.addr,
+ struct v4l2_async_connection);
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
- goto out_smbus_shutdown;
+ goto out_v4l2_device_unregister;
}
ret = mccic_register(mcam);
if (ret)
- goto out_smbus_shutdown;
+ goto out_v4l2_device_unregister;
clkdev_create(mcam->mclk, "xclk", "%d-%04x",
i2c_adapter_id(cam->i2c_adapter), ov7670_info.addr);
@@ -564,6 +569,8 @@ static int cafe_pci_probe(struct pci_dev *pdev,
out_mccic_shutdown:
mccic_shutdown(mcam);
+out_v4l2_device_unregister:
+ v4l2_device_unregister(&mcam->v4l2_dev);
out_smbus_shutdown:
cafe_smbus_shutdown(cam);
out_pdown:
@@ -586,6 +593,7 @@ out:
static void cafe_shutdown(struct cafe_camera *cam)
{
mccic_shutdown(&cam->mcam);
+ v4l2_device_unregister(&cam->mcam.v4l2_dev);
cafe_smbus_shutdown(cam);
free_irq(cam->pdev->irq, cam);
pci_iounmap(cam->pdev, cam->mcam.regs);
diff --git a/drivers/media/platform/marvell/mcam-core.c b/drivers/media/platform/marvell/mcam-core.c
index 154bdcb3f2cc..66688b4aece5 100644
--- a/drivers/media/platform/marvell/mcam-core.c
+++ b/drivers/media/platform/marvell/mcam-core.c
@@ -1756,7 +1756,7 @@ EXPORT_SYMBOL_GPL(mccic_irq);
*/
static int mccic_notify_bound(struct v4l2_async_notifier *notifier,
- struct v4l2_subdev *subdev, struct v4l2_async_subdev *asd)
+ struct v4l2_subdev *subdev, struct v4l2_async_connection *asd)
{
struct mcam_camera *cam = notifier_to_mcam(notifier);
int ret;
@@ -1801,7 +1801,7 @@ out:
}
static void mccic_notify_unbind(struct v4l2_async_notifier *notifier,
- struct v4l2_subdev *subdev, struct v4l2_async_subdev *asd)
+ struct v4l2_subdev *subdev, struct v4l2_async_connection *asd)
{
struct mcam_camera *cam = notifier_to_mcam(notifier);
@@ -1863,13 +1863,6 @@ int mccic_register(struct mcam_camera *cam)
goto out;
}
- /*
- * Register with V4L
- */
- ret = v4l2_device_register(cam->dev, &cam->v4l2_dev);
- if (ret)
- goto out;
-
mutex_init(&cam->s_mutex);
cam->state = S_NOTREADY;
mcam_set_config_needed(cam, 1);
@@ -1877,7 +1870,7 @@ int mccic_register(struct mcam_camera *cam)
cam->mbus_code = mcam_def_mbus_code;
cam->notifier.ops = &mccic_notify_ops;
- ret = v4l2_async_nf_register(&cam->v4l2_dev, &cam->notifier);
+ ret = v4l2_async_nf_register(&cam->notifier);
if (ret < 0) {
cam_warn(cam, "failed to register a sensor notifier");
goto out;
@@ -1915,7 +1908,6 @@ int mccic_register(struct mcam_camera *cam)
out:
v4l2_async_nf_unregister(&cam->notifier);
- v4l2_device_unregister(&cam->v4l2_dev);
v4l2_async_nf_cleanup(&cam->notifier);
return ret;
}
@@ -1937,7 +1929,6 @@ void mccic_shutdown(struct mcam_camera *cam)
mcam_free_dma_bufs(cam);
v4l2_ctrl_handler_free(&cam->ctrl_handler);
v4l2_async_nf_unregister(&cam->notifier);
- v4l2_device_unregister(&cam->v4l2_dev);
v4l2_async_nf_cleanup(&cam->notifier);
}
EXPORT_SYMBOL_GPL(mccic_shutdown);
diff --git a/drivers/media/platform/marvell/mmp-driver.c b/drivers/media/platform/marvell/mmp-driver.c
index e93feefb447b..170907cc1885 100644
--- a/drivers/media/platform/marvell/mmp-driver.c
+++ b/drivers/media/platform/marvell/mmp-driver.c
@@ -180,7 +180,7 @@ static int mmpcam_probe(struct platform_device *pdev)
struct resource *res;
struct fwnode_handle *ep;
struct mmp_camera_platform_data *pdata;
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asd;
int ret;
cam = devm_kzalloc(&pdev->dev, sizeof(*cam), GFP_KERNEL);
@@ -223,8 +223,7 @@ static int mmpcam_probe(struct platform_device *pdev)
/*
* Get our I/O memory.
*/
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mcam->regs = devm_ioremap_resource(&pdev->dev, res);
+ mcam->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(mcam->regs))
return PTR_ERR(mcam->regs);
mcam->regs_size = resource_size(res);
@@ -239,10 +238,10 @@ static int mmpcam_probe(struct platform_device *pdev)
if (!ep)
return -ENODEV;
- v4l2_async_nf_init(&mcam->notifier);
+ v4l2_async_nf_init(&mcam->notifier, &mcam->v4l2_dev);
asd = v4l2_async_nf_add_fwnode_remote(&mcam->notifier, ep,
- struct v4l2_async_subdev);
+ struct v4l2_async_connection);
fwnode_handle_put(ep);
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
@@ -362,7 +361,7 @@ static struct platform_driver mmpcam_driver = {
.remove_new = mmpcam_remove,
.driver = {
.name = "mmp-camera",
- .of_match_table = of_match_ptr(mmpcam_of_match),
+ .of_match_table = mmpcam_of_match,
.pm = &mmpcam_pm_ops,
}
};
diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
index 60425c99a2b8..7194f88edc0f 100644
--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
+++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
@@ -1403,6 +1403,7 @@ static void mtk_jpeg_remove(struct platform_device *pdev)
{
struct mtk_jpeg_dev *jpeg = platform_get_drvdata(pdev);
+ cancel_delayed_work_sync(&jpeg->job_timeout_work);
pm_runtime_disable(&pdev->dev);
video_unregister_device(jpeg->vdev);
v4l2_m2m_release(jpeg->m2m_dev);
diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c
index baa7be58ce69..4a6ee211e18f 100644
--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c
+++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c
@@ -12,7 +12,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <media/media-device.h>
diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c
index 244018365b6f..2bbc48c7402c 100644
--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c
+++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c
@@ -10,9 +10,9 @@
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <media/media-device.h>
diff --git a/drivers/media/platform/mediatek/mdp/mtk_mdp_comp.c b/drivers/media/platform/mediatek/mdp/mtk_mdp_comp.c
index ad5fab2d8bfa..3501ac411242 100644
--- a/drivers/media/platform/mediatek/mdp/mtk_mdp_comp.c
+++ b/drivers/media/platform/mediatek/mdp/mtk_mdp_comp.c
@@ -7,8 +7,6 @@
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
#include "mtk_mdp_comp.h"
diff --git a/drivers/media/platform/mediatek/mdp/mtk_mdp_core.c b/drivers/media/platform/mediatek/mdp/mtk_mdp_core.c
index 77e310e588e5..917cdf38f230 100644
--- a/drivers/media/platform/mediatek/mdp/mtk_mdp_core.c
+++ b/drivers/media/platform/mediatek/mdp/mtk_mdp_core.c
@@ -28,7 +28,7 @@ EXPORT_SYMBOL(mtk_mdp_dbg_level);
module_param(mtk_mdp_dbg_level, int, 0644);
-static const struct of_device_id mtk_mdp_comp_dt_ids[] = {
+static const struct of_device_id mtk_mdp_comp_dt_ids[] __maybe_unused = {
{
.compatible = "mediatek,mt8173-mdp-rdma",
.data = (void *)MTK_MDP_RDMA
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
index a605e80c7dc3..667933ea15f4 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
@@ -746,7 +746,7 @@ static const struct mdp_comp_ops *mdp_comp_ops[MDP_COMP_TYPE_COUNT] = {
[MDP_COMP_TYPE_CCORR] = &ccorr_ops,
};
-static const struct of_device_id mdp_comp_dt_ids[] = {
+static const struct of_device_id mdp_comp_dt_ids[] __maybe_unused = {
{
.compatible = "mediatek,mt8183-mdp3-rdma",
.data = (void *)MDP_COMP_TYPE_RDMA,
@@ -892,11 +892,13 @@ static int mdp_get_subsys_id(struct mdp_dev *mdp, struct device *dev,
ret = cmdq_dev_get_client_reg(&comp_pdev->dev, &cmdq_reg, index);
if (ret != 0) {
dev_err(&comp_pdev->dev, "cmdq_dev_get_subsys fail!\n");
+ put_device(&comp_pdev->dev);
return -EINVAL;
}
comp->subsys_id = cmdq_reg.subsys;
dev_dbg(&comp_pdev->dev, "subsys id=%d\n", cmdq_reg.subsys);
+ put_device(&comp_pdev->dev);
return 0;
}
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
index aa6c225302f0..cc44be10fdb7 100644
--- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
@@ -322,7 +322,7 @@ static struct platform_driver mdp_driver = {
.driver = {
.name = MDP_MODULE_NAME,
.pm = &mdp_pm_ops,
- .of_match_table = of_match_ptr(mdp_of_ids),
+ .of_match_table = mdp_of_ids,
},
};
diff --git a/drivers/media/platform/mediatek/vcodec/Makefile b/drivers/media/platform/mediatek/vcodec/Makefile
index 5f4c30fec85a..014abbfbd993 100644
--- a/drivers/media/platform/mediatek/vcodec/Makefile
+++ b/drivers/media/platform/mediatek/vcodec/Makefile
@@ -1,54 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_VIDEO_MEDIATEK_VCODEC) += mtk-vcodec-dec.o \
- mtk-vcodec-enc.o \
- mtk-vcodec-common.o \
- mtk-vcodec-dec-hw.o
-
-mtk-vcodec-dec-y := vdec/vdec_h264_if.o \
- vdec/vdec_vp8_if.o \
- vdec/vdec_vp8_req_if.o \
- vdec/vdec_vp9_if.o \
- vdec/vdec_vp9_req_lat_if.o \
- vdec/vdec_av1_req_lat_if.o \
- vdec/vdec_h264_req_if.o \
- vdec/vdec_h264_req_common.o \
- vdec/vdec_h264_req_multi_if.o \
- vdec/vdec_hevc_req_multi_if.o \
- mtk_vcodec_dec_drv.o \
- vdec_drv_if.o \
- vdec_vpu_if.o \
- vdec_msg_queue.o \
- mtk_vcodec_dec.o \
- mtk_vcodec_dec_stateful.o \
- mtk_vcodec_dec_stateless.o \
- mtk_vcodec_dec_pm.o \
-
-mtk-vcodec-dec-hw-y := mtk_vcodec_dec_hw.o
-
-mtk-vcodec-enc-y := venc/venc_vp8_if.o \
- venc/venc_h264_if.o \
- mtk_vcodec_enc.o \
- mtk_vcodec_enc_drv.o \
- mtk_vcodec_enc_pm.o \
- venc_drv_if.o \
- venc_vpu_if.o \
-
-
-mtk-vcodec-common-y := mtk_vcodec_intr.o \
- mtk_vcodec_util.o \
- mtk_vcodec_fw.o \
-
-ifneq ($(CONFIG_VIDEO_MEDIATEK_VCODEC_VPU),)
-mtk-vcodec-common-y += mtk_vcodec_fw_vpu.o
-endif
-
-ifneq ($(CONFIG_VIDEO_MEDIATEK_VCODEC_SCP),)
-mtk-vcodec-common-y += mtk_vcodec_fw_scp.o
-endif
-
-ifneq ($(CONFIG_DEBUG_FS),)
-obj-$(CONFIG_VIDEO_MEDIATEK_VCODEC) += mtk-vcodec-dbgfs.o
-
-mtk-vcodec-dbgfs-y := mtk_vcodec_dbgfs.o
-endif \ No newline at end of file
+obj-y += common/
+obj-y += encoder/
+obj-y += decoder/
diff --git a/drivers/media/platform/mediatek/vcodec/common/Makefile b/drivers/media/platform/mediatek/vcodec/common/Makefile
new file mode 100644
index 000000000000..d0479914dfb3
--- /dev/null
+++ b/drivers/media/platform/mediatek/vcodec/common/Makefile
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_VIDEO_MEDIATEK_VCODEC) += mtk-vcodec-common.o
+
+mtk-vcodec-common-y := mtk_vcodec_intr.o \
+ mtk_vcodec_util.o \
+ mtk_vcodec_fw.o \
+
+ifneq ($(CONFIG_VIDEO_MEDIATEK_VCODEC_VPU),)
+mtk-vcodec-common-y += mtk_vcodec_fw_vpu.o
+endif
+
+ifneq ($(CONFIG_VIDEO_MEDIATEK_VCODEC_SCP),)
+mtk-vcodec-common-y += mtk_vcodec_fw_scp.o
+endif
+
+ifneq ($(CONFIG_DEBUG_FS),)
+obj-$(CONFIG_VIDEO_MEDIATEK_VCODEC) += mtk-vcodec-dbgfs.o
+
+mtk-vcodec-dbgfs-y := mtk_vcodec_dbgfs.o
+endif \ No newline at end of file
diff --git a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_cmn_drv.h b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_cmn_drv.h
new file mode 100644
index 000000000000..6087e27bd604
--- /dev/null
+++ b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_cmn_drv.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023 MediaTek Inc.
+ * Author: Yunfei Dong <yunfei.dong@mediatek.com>
+ */
+
+#ifndef _MTK_VCODEC_COM_DRV_H_
+#define _MTK_VCODEC_COM_DRV_H_
+
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-core.h>
+
+#define MTK_VCODEC_MAX_PLANES 3
+
+#define WAIT_INTR_TIMEOUT_MS 1000
+
+/*
+ * enum mtk_q_type - Type of queue
+ */
+enum mtk_q_type {
+ MTK_Q_DATA_SRC = 0,
+ MTK_Q_DATA_DST = 1,
+};
+
+/*
+ * enum mtk_hw_reg_idx - MTK hw register base index
+ */
+enum mtk_hw_reg_idx {
+ VDEC_SYS,
+ VDEC_MISC,
+ VDEC_LD,
+ VDEC_TOP,
+ VDEC_CM,
+ VDEC_AD,
+ VDEC_AV,
+ VDEC_PP,
+ VDEC_HWD,
+ VDEC_HWQ,
+ VDEC_HWB,
+ VDEC_HWG,
+ NUM_MAX_VDEC_REG_BASE,
+ /* h264 encoder */
+ VENC_SYS = NUM_MAX_VDEC_REG_BASE,
+ /* vp8 encoder */
+ VENC_LT_SYS,
+ NUM_MAX_VCODEC_REG_BASE
+};
+
+/*
+ * struct mtk_vcodec_clk_info - Structure used to store clock name
+ */
+struct mtk_vcodec_clk_info {
+ const char *clk_name;
+ struct clk *vcodec_clk;
+};
+
+/*
+ * struct mtk_vcodec_clk - Structure used to store vcodec clock information
+ */
+struct mtk_vcodec_clk {
+ struct mtk_vcodec_clk_info *clk_info;
+ int clk_num;
+};
+
+/*
+ * struct mtk_vcodec_pm - Power management data structure
+ */
+struct mtk_vcodec_pm {
+ struct mtk_vcodec_clk vdec_clk;
+ struct mtk_vcodec_clk venc_clk;
+ struct device *dev;
+};
+
+/*
+ * enum mtk_vdec_hw_id - Hardware index used to separate
+ * different hardware
+ */
+enum mtk_vdec_hw_id {
+ MTK_VDEC_CORE,
+ MTK_VDEC_LAT0,
+ MTK_VDEC_LAT1,
+ MTK_VDEC_LAT_SOC,
+ MTK_VDEC_HW_MAX,
+};
+
+/**
+ * enum mtk_instance_state - The state of an MTK Vcodec instance.
+ * @MTK_STATE_FREE: default state when instance is created
+ * @MTK_STATE_INIT: vcodec instance is initialized
+ * @MTK_STATE_HEADER: vdec had sps/pps header parsed or venc
+ * had sps/pps header encoded
+ * @MTK_STATE_FLUSH: vdec is flushing. Only used by decoder
+ * @MTK_STATE_ABORT: vcodec should be aborted
+ */
+enum mtk_instance_state {
+ MTK_STATE_FREE = 0,
+ MTK_STATE_INIT = 1,
+ MTK_STATE_HEADER = 2,
+ MTK_STATE_FLUSH = 3,
+ MTK_STATE_ABORT = 4,
+};
+
+enum mtk_fmt_type {
+ MTK_FMT_DEC = 0,
+ MTK_FMT_ENC = 1,
+ MTK_FMT_FRAME = 2,
+};
+
+/*
+ * struct mtk_video_fmt - Structure used to store information about pixelformats
+ */
+struct mtk_video_fmt {
+ u32 fourcc;
+ enum mtk_fmt_type type;
+ u32 num_planes;
+ u32 flags;
+ struct v4l2_frmsize_stepwise frmsize;
+};
+
+/*
+ * struct mtk_q_data - Structure used to store information about queue
+ */
+struct mtk_q_data {
+ unsigned int visible_width;
+ unsigned int visible_height;
+ unsigned int coded_width;
+ unsigned int coded_height;
+ enum v4l2_field field;
+ unsigned int bytesperline[MTK_VCODEC_MAX_PLANES];
+ unsigned int sizeimage[MTK_VCODEC_MAX_PLANES];
+ const struct mtk_video_fmt *fmt;
+};
+
+/*
+ * enum mtk_instance_type - The type of an MTK Vcodec instance.
+ */
+enum mtk_instance_type {
+ MTK_INST_DECODER = 0,
+ MTK_INST_ENCODER = 1,
+};
+
+#endif /* _MTK_VCODEC_COM_DRV_H_ */
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dbgfs.c b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_dbgfs.c
index b5cdbbfcc388..5ad3797836db 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dbgfs.c
+++ b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_dbgfs.c
@@ -7,10 +7,11 @@
#include <linux/debugfs.h>
#include "mtk_vcodec_dbgfs.h"
-#include "mtk_vcodec_drv.h"
+#include "../decoder/mtk_vcodec_dec_drv.h"
+#include "../encoder/mtk_vcodec_enc_drv.h"
#include "mtk_vcodec_util.h"
-static void mtk_vdec_dbgfs_get_format_type(struct mtk_vcodec_ctx *ctx, char *buf,
+static void mtk_vdec_dbgfs_get_format_type(struct mtk_vcodec_dec_ctx *ctx, char *buf,
int *used, int total)
{
int curr_len;
@@ -72,7 +73,7 @@ static void mtk_vdec_dbgfs_get_help(char *buf, int *used, int total)
static ssize_t mtk_vdec_dbgfs_write(struct file *filp, const char __user *ubuf,
size_t count, loff_t *ppos)
{
- struct mtk_vcodec_dev *vcodec_dev = filp->private_data;
+ struct mtk_vcodec_dec_dev *vcodec_dev = filp->private_data;
struct mtk_vcodec_dbgfs *dbgfs = &vcodec_dev->dbgfs;
mutex_lock(&dbgfs->dbgfs_lock);
@@ -88,10 +89,10 @@ static ssize_t mtk_vdec_dbgfs_write(struct file *filp, const char __user *ubuf,
static ssize_t mtk_vdec_dbgfs_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *ppos)
{
- struct mtk_vcodec_dev *vcodec_dev = filp->private_data;
+ struct mtk_vcodec_dec_dev *vcodec_dev = filp->private_data;
struct mtk_vcodec_dbgfs *dbgfs = &vcodec_dev->dbgfs;
struct mtk_vcodec_dbgfs_inst *dbgfs_inst;
- struct mtk_vcodec_ctx *ctx;
+ struct mtk_vcodec_dec_ctx *ctx;
int total_len = 200 * (dbgfs->inst_count == 0 ? 1 : dbgfs->inst_count);
int used_len = 0, curr_len, ret;
bool dbgfs_index[MTK_VDEC_DBGFS_MAX] = {0};
@@ -143,10 +144,10 @@ static const struct file_operations vdec_fops = {
.read = mtk_vdec_dbgfs_read,
};
-void mtk_vcodec_dbgfs_create(struct mtk_vcodec_ctx *ctx)
+void mtk_vcodec_dbgfs_create(struct mtk_vcodec_dec_ctx *ctx)
{
struct mtk_vcodec_dbgfs_inst *dbgfs_inst;
- struct mtk_vcodec_dev *vcodec_dev = ctx->dev;
+ struct mtk_vcodec_dec_dev *vcodec_dev = ctx->dev;
dbgfs_inst = kzalloc(sizeof(*dbgfs_inst), GFP_KERNEL);
if (!dbgfs_inst)
@@ -161,53 +162,68 @@ void mtk_vcodec_dbgfs_create(struct mtk_vcodec_ctx *ctx)
}
EXPORT_SYMBOL_GPL(mtk_vcodec_dbgfs_create);
-void mtk_vcodec_dbgfs_remove(struct mtk_vcodec_dev *vcodec_dev, int ctx_id)
+void mtk_vcodec_dbgfs_remove(struct mtk_vcodec_dec_dev *vcodec_dev, int ctx_id)
{
struct mtk_vcodec_dbgfs_inst *dbgfs_inst;
list_for_each_entry(dbgfs_inst, &vcodec_dev->dbgfs.dbgfs_head, node) {
if (dbgfs_inst->inst_id == ctx_id) {
vcodec_dev->dbgfs.inst_count--;
- break;
+ list_del(&dbgfs_inst->node);
+ kfree(dbgfs_inst);
+ return;
}
}
-
- if (dbgfs_inst) {
- list_del(&dbgfs_inst->node);
- kfree(dbgfs_inst);
- }
}
EXPORT_SYMBOL_GPL(mtk_vcodec_dbgfs_remove);
-void mtk_vcodec_dbgfs_init(struct mtk_vcodec_dev *vcodec_dev, bool is_encode)
+static void mtk_vcodec_dbgfs_vdec_init(struct mtk_vcodec_dec_dev *vcodec_dev)
{
struct dentry *vcodec_root;
- if (is_encode)
- vcodec_dev->dbgfs.vcodec_root = debugfs_create_dir("vcodec-enc", NULL);
- else
- vcodec_dev->dbgfs.vcodec_root = debugfs_create_dir("vcodec-dec", NULL);
+ vcodec_dev->dbgfs.vcodec_root = debugfs_create_dir("vcodec-dec", NULL);
if (IS_ERR(vcodec_dev->dbgfs.vcodec_root))
- dev_err(&vcodec_dev->plat_dev->dev, "create vcodec dir err:%d\n",
- IS_ERR(vcodec_dev->dbgfs.vcodec_root));
+ dev_err(&vcodec_dev->plat_dev->dev, "create vcodec dir err:%ld\n",
+ PTR_ERR(vcodec_dev->dbgfs.vcodec_root));
vcodec_root = vcodec_dev->dbgfs.vcodec_root;
debugfs_create_x32("mtk_v4l2_dbg_level", 0644, vcodec_root, &mtk_v4l2_dbg_level);
debugfs_create_x32("mtk_vcodec_dbg", 0644, vcodec_root, &mtk_vcodec_dbg);
vcodec_dev->dbgfs.inst_count = 0;
- if (is_encode)
- return;
-
INIT_LIST_HEAD(&vcodec_dev->dbgfs.dbgfs_head);
debugfs_create_file("vdec", 0200, vcodec_root, vcodec_dev, &vdec_fops);
mutex_init(&vcodec_dev->dbgfs.dbgfs_lock);
}
+
+static void mtk_vcodec_dbgfs_venc_init(struct mtk_vcodec_enc_dev *vcodec_dev)
+{
+ struct dentry *vcodec_root;
+
+ vcodec_dev->dbgfs.vcodec_root = debugfs_create_dir("vcodec-enc", NULL);
+ if (IS_ERR(vcodec_dev->dbgfs.vcodec_root))
+ dev_err(&vcodec_dev->plat_dev->dev, "create venc dir err:%d\n",
+ IS_ERR(vcodec_dev->dbgfs.vcodec_root));
+
+ vcodec_root = vcodec_dev->dbgfs.vcodec_root;
+ debugfs_create_x32("mtk_v4l2_dbg_level", 0644, vcodec_root, &mtk_v4l2_dbg_level);
+ debugfs_create_x32("mtk_vcodec_dbg", 0644, vcodec_root, &mtk_vcodec_dbg);
+
+ vcodec_dev->dbgfs.inst_count = 0;
+}
+
+void mtk_vcodec_dbgfs_init(void *vcodec_dev, bool is_encode)
+{
+ if (is_encode)
+ mtk_vcodec_dbgfs_venc_init(vcodec_dev);
+ else
+ mtk_vcodec_dbgfs_vdec_init(vcodec_dev);
+}
EXPORT_SYMBOL_GPL(mtk_vcodec_dbgfs_init);
-void mtk_vcodec_dbgfs_deinit(struct mtk_vcodec_dev *vcodec_dev)
+void mtk_vcodec_dbgfs_deinit(struct mtk_vcodec_dbgfs *dbgfs)
{
- debugfs_remove_recursive(vcodec_dev->dbgfs.vcodec_root);
+ debugfs_remove_recursive(dbgfs->vcodec_root);
}
EXPORT_SYMBOL_GPL(mtk_vcodec_dbgfs_deinit);
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dbgfs.h b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_dbgfs.h
index 241ff8197e73..073d2fedb54a 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dbgfs.h
+++ b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_dbgfs.h
@@ -7,8 +7,8 @@
#ifndef __MTK_VCODEC_DBGFS_H__
#define __MTK_VCODEC_DBGFS_H__
-struct mtk_vcodec_dev;
-struct mtk_vcodec_ctx;
+struct mtk_vcodec_dec_dev;
+struct mtk_vcodec_dec_ctx;
/*
* enum mtk_vdec_dbgfs_log_index - used to get different debug information
@@ -22,12 +22,12 @@ enum mtk_vdec_dbgfs_log_index {
/**
* struct mtk_vcodec_dbgfs_inst - debugfs information for each inst
* @node: list node for each inst
- * @vcodec_ctx: struct mtk_vcodec_ctx
+ * @vcodec_ctx: struct mtk_vcodec_dec_ctx
* @inst_id: index of the context that the same with ctx->id
*/
struct mtk_vcodec_dbgfs_inst {
struct list_head node;
- struct mtk_vcodec_ctx *vcodec_ctx;
+ struct mtk_vcodec_dec_ctx *vcodec_ctx;
int inst_id;
};
@@ -50,24 +50,24 @@ struct mtk_vcodec_dbgfs {
};
#if defined(CONFIG_DEBUG_FS)
-void mtk_vcodec_dbgfs_create(struct mtk_vcodec_ctx *ctx);
-void mtk_vcodec_dbgfs_remove(struct mtk_vcodec_dev *vcodec_dev, int ctx_id);
-void mtk_vcodec_dbgfs_init(struct mtk_vcodec_dev *vcodec_dev, bool is_encode);
-void mtk_vcodec_dbgfs_deinit(struct mtk_vcodec_dev *vcodec_dev);
+void mtk_vcodec_dbgfs_create(struct mtk_vcodec_dec_ctx *ctx);
+void mtk_vcodec_dbgfs_remove(struct mtk_vcodec_dec_dev *vcodec_dev, int ctx_id);
+void mtk_vcodec_dbgfs_init(void *vcodec_dev, bool is_encode);
+void mtk_vcodec_dbgfs_deinit(struct mtk_vcodec_dbgfs *dbgfs);
#else
-static inline void mtk_vcodec_dbgfs_create(struct mtk_vcodec_ctx *ctx)
+static inline void mtk_vcodec_dbgfs_create(struct mtk_vcodec_dec_ctx *ctx)
{
}
-static inline void mtk_vcodec_dbgfs_remove(struct mtk_vcodec_dev *vcodec_dev, int ctx_id)
+static inline void mtk_vcodec_dbgfs_remove(struct mtk_vcodec_dec_dev *vcodec_dev, int ctx_id)
{
}
-static inline void mtk_vcodec_dbgfs_init(struct mtk_vcodec_dev *vcodec_dev, bool is_encode)
+static inline void mtk_vcodec_dbgfs_init(void *vcodec_dev, bool is_encode)
{
}
-static inline void mtk_vcodec_dbgfs_deinit(struct mtk_vcodec_dev *vcodec_dev)
+static inline void mtk_vcodec_dbgfs_deinit(struct mtk_vcodec_dbgfs *dbgfs)
{
}
#endif
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw.c b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw.c
index 556e54aadac9..08949b08fbc6 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw.c
+++ b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw.c
@@ -1,21 +1,26 @@
// SPDX-License-Identifier: GPL-2.0
-#include "mtk_vcodec_fw.h"
+#include "../decoder/mtk_vcodec_dec_drv.h"
+#include "../encoder/mtk_vcodec_enc_drv.h"
#include "mtk_vcodec_fw_priv.h"
-#include "mtk_vcodec_util.h"
-#include "mtk_vcodec_drv.h"
-struct mtk_vcodec_fw *mtk_vcodec_fw_select(struct mtk_vcodec_dev *dev,
- enum mtk_vcodec_fw_type type,
+struct mtk_vcodec_fw *mtk_vcodec_fw_select(void *priv, enum mtk_vcodec_fw_type type,
enum mtk_vcodec_fw_use fw_use)
{
+ struct platform_device *plat_dev;
+
+ if (fw_use == ENCODER)
+ plat_dev = ((struct mtk_vcodec_enc_dev *)priv)->plat_dev;
+ else
+ plat_dev = ((struct mtk_vcodec_dec_dev *)priv)->plat_dev;
+
switch (type) {
case VPU:
- return mtk_vcodec_fw_vpu_init(dev, fw_use);
+ return mtk_vcodec_fw_vpu_init(priv, fw_use);
case SCP:
- return mtk_vcodec_fw_scp_init(dev);
+ return mtk_vcodec_fw_scp_init(priv, fw_use);
default:
- mtk_v4l2_err("invalid vcodec fw type");
+ dev_err(&plat_dev->dev, "Invalid vcodec fw type");
return ERR_PTR(-EINVAL);
}
}
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw.h b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw.h
index 16824114657f..300363a40158 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw.h
+++ b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw.h
@@ -6,9 +6,10 @@
#include <linux/remoteproc.h>
#include <linux/remoteproc/mtk_scp.h>
-#include "../vpu/mtk_vpu.h"
+#include "../../vpu/mtk_vpu.h"
-struct mtk_vcodec_dev;
+struct mtk_vcodec_dec_dev;
+struct mtk_vcodec_enc_dev;
enum mtk_vcodec_fw_type {
VPU,
@@ -25,8 +26,7 @@ struct mtk_vcodec_fw;
typedef void (*mtk_vcodec_ipi_handler) (void *data,
unsigned int len, void *priv);
-struct mtk_vcodec_fw *mtk_vcodec_fw_select(struct mtk_vcodec_dev *dev,
- enum mtk_vcodec_fw_type type,
+struct mtk_vcodec_fw *mtk_vcodec_fw_select(void *priv, enum mtk_vcodec_fw_type type,
enum mtk_vcodec_fw_use fw_use);
void mtk_vcodec_fw_release(struct mtk_vcodec_fw *fw);
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw_priv.h b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_priv.h
index b41e66185cec..99603accd82e 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw_priv.h
+++ b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_priv.h
@@ -5,13 +5,15 @@
#include "mtk_vcodec_fw.h"
-struct mtk_vcodec_dev;
+struct mtk_vcodec_dec_dev;
+struct mtk_vcodec_enc_dev;
struct mtk_vcodec_fw {
enum mtk_vcodec_fw_type type;
const struct mtk_vcodec_fw_ops *ops;
struct platform_device *pdev;
struct mtk_scp *scp;
+ enum mtk_vcodec_fw_use fw_use;
};
struct mtk_vcodec_fw_ops {
@@ -28,22 +30,20 @@ struct mtk_vcodec_fw_ops {
};
#if IS_ENABLED(CONFIG_VIDEO_MEDIATEK_VCODEC_VPU)
-struct mtk_vcodec_fw *mtk_vcodec_fw_vpu_init(struct mtk_vcodec_dev *dev,
- enum mtk_vcodec_fw_use fw_use);
+struct mtk_vcodec_fw *mtk_vcodec_fw_vpu_init(void *priv, enum mtk_vcodec_fw_use fw_use);
#else
static inline struct mtk_vcodec_fw *
-mtk_vcodec_fw_vpu_init(struct mtk_vcodec_dev *dev,
- enum mtk_vcodec_fw_use fw_use)
+mtk_vcodec_fw_vpu_init(void *priv, enum mtk_vcodec_fw_use fw_use)
{
return ERR_PTR(-ENODEV);
}
#endif /* CONFIG_VIDEO_MEDIATEK_VCODEC_VPU */
#if IS_ENABLED(CONFIG_VIDEO_MEDIATEK_VCODEC_SCP)
-struct mtk_vcodec_fw *mtk_vcodec_fw_scp_init(struct mtk_vcodec_dev *dev);
+struct mtk_vcodec_fw *mtk_vcodec_fw_scp_init(void *priv, enum mtk_vcodec_fw_use fw_use);
#else
static inline struct mtk_vcodec_fw *
-mtk_vcodec_fw_scp_init(struct mtk_vcodec_dev *dev)
+mtk_vcodec_fw_scp_init(void *priv, enum mtk_vcodec_fw_use fw_use)
{
return ERR_PTR(-ENODEV);
}
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw_scp.c b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c
index d8e66b645bd8..9e744d07a1e8 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw_scp.c
+++ b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
+#include "../decoder/mtk_vcodec_dec_drv.h"
+#include "../encoder/mtk_vcodec_enc_drv.h"
#include "mtk_vcodec_fw_priv.h"
-#include "mtk_vcodec_util.h"
-#include "mtk_vcodec_drv.h"
static int mtk_vcodec_scp_load_firmware(struct mtk_vcodec_fw *fw)
{
@@ -53,18 +53,32 @@ static const struct mtk_vcodec_fw_ops mtk_vcodec_rproc_msg = {
.release = mtk_vcodec_scp_release,
};
-struct mtk_vcodec_fw *mtk_vcodec_fw_scp_init(struct mtk_vcodec_dev *dev)
+struct mtk_vcodec_fw *mtk_vcodec_fw_scp_init(void *priv, enum mtk_vcodec_fw_use fw_use)
{
struct mtk_vcodec_fw *fw;
+ struct platform_device *plat_dev;
struct mtk_scp *scp;
- scp = scp_get(dev->plat_dev);
+ if (fw_use == ENCODER) {
+ struct mtk_vcodec_enc_dev *enc_dev = priv;
+
+ plat_dev = enc_dev->plat_dev;
+ } else if (fw_use == DECODER) {
+ struct mtk_vcodec_dec_dev *dec_dev = priv;
+
+ plat_dev = dec_dev->plat_dev;
+ } else {
+ pr_err("Invalid fw_use %d (use a resonable fw id here)\n", fw_use);
+ return ERR_PTR(-EINVAL);
+ }
+
+ scp = scp_get(plat_dev);
if (!scp) {
- mtk_v4l2_err("could not get vdec scp handle");
+ dev_err(&plat_dev->dev, "could not get vdec scp handle");
return ERR_PTR(-EPROBE_DEFER);
}
- fw = devm_kzalloc(&dev->plat_dev->dev, sizeof(*fw), GFP_KERNEL);
+ fw = devm_kzalloc(&plat_dev->dev, sizeof(*fw), GFP_KERNEL);
fw->type = SCP;
fw->ops = &mtk_vcodec_rproc_msg;
fw->scp = scp;
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw_vpu.c b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c
index cfc7ebed8fb7..5e03b0886559 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw_vpu.c
+++ b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
+#include "../decoder/mtk_vcodec_dec_drv.h"
+#include "../encoder/mtk_vcodec_enc_drv.h"
#include "mtk_vcodec_fw_priv.h"
-#include "mtk_vcodec_util.h"
-#include "mtk_vcodec_drv.h"
static int mtk_vcodec_vpu_load_firmware(struct mtk_vcodec_fw *fw)
{
@@ -51,18 +51,32 @@ static void mtk_vcodec_vpu_release(struct mtk_vcodec_fw *fw)
put_device(&fw->pdev->dev);
}
-static void mtk_vcodec_vpu_reset_handler(void *priv)
+static void mtk_vcodec_vpu_reset_dec_handler(void *priv)
{
- struct mtk_vcodec_dev *dev = priv;
- struct mtk_vcodec_ctx *ctx;
+ struct mtk_vcodec_dec_dev *dev = priv;
+ struct mtk_vcodec_dec_ctx *ctx;
- mtk_v4l2_err("Watchdog timeout!!");
+ dev_err(&dev->plat_dev->dev, "Watchdog timeout!!");
mutex_lock(&dev->dev_mutex);
list_for_each_entry(ctx, &dev->ctx_list, list) {
ctx->state = MTK_STATE_ABORT;
- mtk_v4l2_debug(0, "[%d] Change to state MTK_STATE_ABORT",
- ctx->id);
+ mtk_v4l2_vdec_dbg(0, ctx, "[%d] Change to state MTK_STATE_ABORT", ctx->id);
+ }
+ mutex_unlock(&dev->dev_mutex);
+}
+
+static void mtk_vcodec_vpu_reset_enc_handler(void *priv)
+{
+ struct mtk_vcodec_enc_dev *dev = priv;
+ struct mtk_vcodec_enc_ctx *ctx;
+
+ dev_err(&dev->plat_dev->dev, "Watchdog timeout!!");
+
+ mutex_lock(&dev->dev_mutex);
+ list_for_each_entry(ctx, &dev->ctx_list, list) {
+ ctx->state = MTK_STATE_ABORT;
+ mtk_v4l2_vdec_dbg(0, ctx, "[%d] Change to state MTK_STATE_ABORT", ctx->id);
}
mutex_unlock(&dev->dev_mutex);
}
@@ -77,36 +91,46 @@ static const struct mtk_vcodec_fw_ops mtk_vcodec_vpu_msg = {
.release = mtk_vcodec_vpu_release,
};
-struct mtk_vcodec_fw *mtk_vcodec_fw_vpu_init(struct mtk_vcodec_dev *dev,
- enum mtk_vcodec_fw_use fw_use)
+struct mtk_vcodec_fw *mtk_vcodec_fw_vpu_init(void *priv, enum mtk_vcodec_fw_use fw_use)
{
struct platform_device *fw_pdev;
+ struct platform_device *plat_dev;
struct mtk_vcodec_fw *fw;
enum rst_id rst_id;
- switch (fw_use) {
- case ENCODER:
+ if (fw_use == ENCODER) {
+ struct mtk_vcodec_enc_dev *enc_dev = priv;
+
+ plat_dev = enc_dev->plat_dev;
rst_id = VPU_RST_ENC;
- break;
- case DECODER:
- default:
+ } else if (fw_use == DECODER) {
+ struct mtk_vcodec_dec_dev *dec_dev = priv;
+
+ plat_dev = dec_dev->plat_dev;
rst_id = VPU_RST_DEC;
- break;
+ } else {
+ pr_err("Invalid fw_use %d (use a resonable fw id here)\n", fw_use);
+ return ERR_PTR(-EINVAL);
}
- fw_pdev = vpu_get_plat_device(dev->plat_dev);
+ fw_pdev = vpu_get_plat_device(plat_dev);
if (!fw_pdev) {
- mtk_v4l2_err("firmware device is not ready");
+ dev_err(&plat_dev->dev, "firmware device is not ready");
return ERR_PTR(-EINVAL);
}
- vpu_wdt_reg_handler(fw_pdev, mtk_vcodec_vpu_reset_handler, dev, rst_id);
- fw = devm_kzalloc(&dev->plat_dev->dev, sizeof(*fw), GFP_KERNEL);
+ if (fw_use == DECODER)
+ vpu_wdt_reg_handler(fw_pdev, mtk_vcodec_vpu_reset_dec_handler, priv, rst_id);
+ else
+ vpu_wdt_reg_handler(fw_pdev, mtk_vcodec_vpu_reset_enc_handler, priv, rst_id);
+
+ fw = devm_kzalloc(&plat_dev->dev, sizeof(*fw), GFP_KERNEL);
if (!fw)
return ERR_PTR(-ENOMEM);
fw->type = VPU;
fw->ops = &mtk_vcodec_vpu_msg;
fw->pdev = fw_pdev;
+ fw->fw_use = fw_use;
return fw;
}
diff --git a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_intr.c b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_intr.c
new file mode 100644
index 000000000000..f203fc25636b
--- /dev/null
+++ b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_intr.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+* Copyright (c) 2016 MediaTek Inc.
+* Author: Tiffany Lin <tiffany.lin@mediatek.com>
+*/
+
+#include <linux/errno.h>
+#include <linux/wait.h>
+
+#include "../decoder/mtk_vcodec_dec_drv.h"
+#include "../encoder/mtk_vcodec_enc_drv.h"
+#include "mtk_vcodec_intr.h"
+
+int mtk_vcodec_wait_for_done_ctx(void *priv, int command, unsigned int timeout_ms,
+ unsigned int hw_id)
+{
+ int instance_type = *((int *)priv);
+ long timeout_jiff, ret;
+ int ctx_id, ctx_type, status = 0;
+ int *ctx_int_cond, *ctx_int_type;
+ wait_queue_head_t *ctx_queue;
+ struct platform_device *pdev;
+
+ if (instance_type == DECODER) {
+ struct mtk_vcodec_dec_ctx *ctx;
+
+ ctx = priv;
+ ctx_id = ctx->id;
+ ctx_type = ctx->type;
+ ctx_int_cond = ctx->int_cond;
+ ctx_int_type = ctx->int_type;
+ ctx_queue = ctx->queue;
+ pdev = ctx->dev->plat_dev;
+ } else {
+ struct mtk_vcodec_enc_ctx *ctx;
+
+ ctx = priv;
+ ctx_id = ctx->id;
+ ctx_type = ctx->type;
+ ctx_int_cond = ctx->int_cond;
+ ctx_int_type = ctx->int_type;
+ ctx_queue = ctx->queue;
+ pdev = ctx->dev->plat_dev;
+ }
+
+ timeout_jiff = msecs_to_jiffies(timeout_ms);
+ ret = wait_event_interruptible_timeout(ctx_queue[hw_id],
+ ctx_int_cond[hw_id],
+ timeout_jiff);
+
+ if (!ret) {
+ status = -1; /* timeout */
+ dev_err(&pdev->dev, "[%d] cmd=%d, type=%d, dec timeout=%ums (%d %d)",
+ ctx_id, command, ctx_type, timeout_ms,
+ ctx_int_cond[hw_id], ctx_int_type[hw_id]);
+ } else if (-ERESTARTSYS == ret) {
+ status = -1;
+ dev_err(&pdev->dev, "[%d] cmd=%d, type=%d, dec inter fail (%d %d)",
+ ctx_id, command, ctx_type,
+ ctx_int_cond[hw_id], ctx_int_type[hw_id]);
+ }
+
+ ctx_int_cond[hw_id] = 0;
+ ctx_int_type[hw_id] = 0;
+
+ return status;
+}
+EXPORT_SYMBOL(mtk_vcodec_wait_for_done_ctx);
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_intr.h b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_intr.h
index 9681f492813b..3e3cc71ee572 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_intr.h
+++ b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_intr.h
@@ -9,11 +9,11 @@
#define MTK_INST_IRQ_RECEIVED 0x1
-struct mtk_vcodec_ctx;
+struct mtk_vcodec_dec_ctx;
+struct mtk_vcodec_enc_ctx;
/* timeout is ms */
-int mtk_vcodec_wait_for_done_ctx(struct mtk_vcodec_ctx *ctx,
- int command, unsigned int timeout_ms,
+int mtk_vcodec_wait_for_done_ctx(void *priv, int command, unsigned int timeout_ms,
unsigned int hw_id);
#endif /* _MTK_VCODEC_INTR_H_ */
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_util.c b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_util.c
index f214e6f67005..908602031fd0 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_util.c
+++ b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_util.c
@@ -7,11 +7,11 @@
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/regmap.h>
-#include "mtk_vcodec_dec_hw.h"
-#include "mtk_vcodec_drv.h"
-#include "mtk_vcodec_util.h"
+#include "../decoder/mtk_vcodec_dec_drv.h"
+#include "../encoder/mtk_vcodec_enc_drv.h"
+#include "../decoder/mtk_vcodec_dec_hw.h"
#if defined(CONFIG_DEBUG_FS)
int mtk_vcodec_dbg;
@@ -21,59 +21,66 @@ int mtk_v4l2_dbg_level;
EXPORT_SYMBOL(mtk_v4l2_dbg_level);
#endif
-void __iomem *mtk_vcodec_get_reg_addr(struct mtk_vcodec_ctx *data,
- unsigned int reg_idx)
+void __iomem *mtk_vcodec_get_reg_addr(void __iomem **reg_base, unsigned int reg_idx)
{
- struct mtk_vcodec_ctx *ctx = (struct mtk_vcodec_ctx *)data;
-
- if (!data || reg_idx >= NUM_MAX_VCODEC_REG_BASE) {
- mtk_v4l2_err("Invalid arguments, reg_idx=%d", reg_idx);
+ if (reg_idx >= NUM_MAX_VCODEC_REG_BASE) {
+ pr_err(MTK_DBG_V4L2_STR "Invalid arguments, reg_idx=%d", reg_idx);
return NULL;
}
- return ctx->dev->reg_base[reg_idx];
+ return reg_base[reg_idx];
}
EXPORT_SYMBOL(mtk_vcodec_get_reg_addr);
-int mtk_vcodec_mem_alloc(struct mtk_vcodec_ctx *data,
- struct mtk_vcodec_mem *mem)
+int mtk_vcodec_write_vdecsys(struct mtk_vcodec_dec_ctx *ctx, unsigned int reg,
+ unsigned int val)
+{
+ struct mtk_vcodec_dec_dev *dev = ctx->dev;
+
+ if (dev->vdecsys_regmap)
+ return regmap_write(dev->vdecsys_regmap, reg, val);
+
+ writel(val, dev->reg_base[VDEC_SYS] + reg);
+
+ return 0;
+}
+EXPORT_SYMBOL(mtk_vcodec_write_vdecsys);
+
+int mtk_vcodec_mem_alloc(void *priv, struct mtk_vcodec_mem *mem)
{
unsigned long size = mem->size;
- struct mtk_vcodec_ctx *ctx = (struct mtk_vcodec_ctx *)data;
+ struct mtk_vcodec_dec_ctx *ctx = priv;
struct device *dev = &ctx->dev->plat_dev->dev;
mem->va = dma_alloc_coherent(dev, size, &mem->dma_addr, GFP_KERNEL);
if (!mem->va) {
- mtk_v4l2_err("%s dma_alloc size=%ld failed!", dev_name(dev),
- size);
+ mtk_v4l2_vdec_err(ctx, "%s dma_alloc size=%ld failed!", dev_name(dev), size);
return -ENOMEM;
}
- mtk_v4l2_debug(3, "[%d] - va = %p", ctx->id, mem->va);
- mtk_v4l2_debug(3, "[%d] - dma = 0x%lx", ctx->id,
- (unsigned long)mem->dma_addr);
- mtk_v4l2_debug(3, "[%d] size = 0x%lx", ctx->id, size);
+ mtk_v4l2_vdec_dbg(3, ctx, "[%d] - va = %p", ctx->id, mem->va);
+ mtk_v4l2_vdec_dbg(3, ctx, "[%d] - dma = 0x%lx", ctx->id,
+ (unsigned long)mem->dma_addr);
+ mtk_v4l2_vdec_dbg(3, ctx, "[%d] size = 0x%lx", ctx->id, size);
return 0;
}
EXPORT_SYMBOL(mtk_vcodec_mem_alloc);
-void mtk_vcodec_mem_free(struct mtk_vcodec_ctx *data,
- struct mtk_vcodec_mem *mem)
+void mtk_vcodec_mem_free(void *priv, struct mtk_vcodec_mem *mem)
{
unsigned long size = mem->size;
- struct mtk_vcodec_ctx *ctx = (struct mtk_vcodec_ctx *)data;
+ struct mtk_vcodec_dec_ctx *ctx = priv;
struct device *dev = &ctx->dev->plat_dev->dev;
if (!mem->va) {
- mtk_v4l2_err("%s dma_free size=%ld failed!", dev_name(dev),
- size);
+ mtk_v4l2_vdec_err(ctx, "%s dma_free size=%ld failed!", dev_name(dev), size);
return;
}
- mtk_v4l2_debug(3, "[%d] - va = %p", ctx->id, mem->va);
- mtk_v4l2_debug(3, "[%d] - dma = 0x%lx", ctx->id,
- (unsigned long)mem->dma_addr);
- mtk_v4l2_debug(3, "[%d] size = 0x%lx", ctx->id, size);
+ mtk_v4l2_vdec_dbg(3, ctx, "[%d] - va = %p", ctx->id, mem->va);
+ mtk_v4l2_vdec_dbg(3, ctx, "[%d] - dma = 0x%lx", ctx->id,
+ (unsigned long)mem->dma_addr);
+ mtk_v4l2_vdec_dbg(3, ctx, "[%d] size = 0x%lx", ctx->id, size);
dma_free_coherent(dev, size, mem->va, mem->dma_addr);
mem->va = NULL;
@@ -82,10 +89,10 @@ void mtk_vcodec_mem_free(struct mtk_vcodec_ctx *data,
}
EXPORT_SYMBOL(mtk_vcodec_mem_free);
-void *mtk_vcodec_get_hw_dev(struct mtk_vcodec_dev *dev, int hw_idx)
+void *mtk_vcodec_get_hw_dev(struct mtk_vcodec_dec_dev *dev, int hw_idx)
{
if (hw_idx >= MTK_VDEC_HW_MAX || hw_idx < 0 || !dev->subdev_dev[hw_idx]) {
- mtk_v4l2_err("hw idx is out of range:%d", hw_idx);
+ dev_err(&dev->plat_dev->dev, "hw idx is out of range:%d", hw_idx);
return NULL;
}
@@ -93,8 +100,8 @@ void *mtk_vcodec_get_hw_dev(struct mtk_vcodec_dev *dev, int hw_idx)
}
EXPORT_SYMBOL(mtk_vcodec_get_hw_dev);
-void mtk_vcodec_set_curr_ctx(struct mtk_vcodec_dev *vdec_dev,
- struct mtk_vcodec_ctx *ctx, int hw_idx)
+void mtk_vcodec_set_curr_ctx(struct mtk_vcodec_dec_dev *vdec_dev,
+ struct mtk_vcodec_dec_ctx *ctx, int hw_idx)
{
unsigned long flags;
struct mtk_vdec_hw_dev *subdev_dev;
@@ -103,7 +110,7 @@ void mtk_vcodec_set_curr_ctx(struct mtk_vcodec_dev *vdec_dev,
if (vdec_dev->vdec_pdata->is_subdev_supported) {
subdev_dev = mtk_vcodec_get_hw_dev(vdec_dev, hw_idx);
if (!subdev_dev) {
- mtk_v4l2_err("Failed to get hw dev");
+ dev_err(&vdec_dev->plat_dev->dev, "Failed to get hw dev");
spin_unlock_irqrestore(&vdec_dev->irqlock, flags);
return;
}
@@ -115,18 +122,18 @@ void mtk_vcodec_set_curr_ctx(struct mtk_vcodec_dev *vdec_dev,
}
EXPORT_SYMBOL(mtk_vcodec_set_curr_ctx);
-struct mtk_vcodec_ctx *mtk_vcodec_get_curr_ctx(struct mtk_vcodec_dev *vdec_dev,
- unsigned int hw_idx)
+struct mtk_vcodec_dec_ctx *mtk_vcodec_get_curr_ctx(struct mtk_vcodec_dec_dev *vdec_dev,
+ unsigned int hw_idx)
{
unsigned long flags;
- struct mtk_vcodec_ctx *ctx;
+ struct mtk_vcodec_dec_ctx *ctx;
struct mtk_vdec_hw_dev *subdev_dev;
spin_lock_irqsave(&vdec_dev->irqlock, flags);
if (vdec_dev->vdec_pdata->is_subdev_supported) {
subdev_dev = mtk_vcodec_get_hw_dev(vdec_dev, hw_idx);
if (!subdev_dev) {
- mtk_v4l2_err("Failed to get hw dev");
+ dev_err(&vdec_dev->plat_dev->dev, "Failed to get hw dev");
spin_unlock_irqrestore(&vdec_dev->irqlock, flags);
return NULL;
}
diff --git a/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_util.h b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_util.h
new file mode 100644
index 000000000000..85f615cdd4d3
--- /dev/null
+++ b/drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_util.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+* Copyright (c) 2016 MediaTek Inc.
+* Author: PC Chen <pc.chen@mediatek.com>
+* Tiffany Lin <tiffany.lin@mediatek.com>
+*/
+
+#ifndef _MTK_VCODEC_UTIL_H_
+#define _MTK_VCODEC_UTIL_H_
+
+#include <linux/types.h>
+#include <linux/dma-direction.h>
+
+#define MTK_DBG_VCODEC_STR "[MTK_VCODEC]"
+#define MTK_DBG_V4L2_STR "[MTK_V4L2]"
+
+struct mtk_vcodec_mem {
+ size_t size;
+ void *va;
+ dma_addr_t dma_addr;
+};
+
+struct mtk_vcodec_fb {
+ size_t size;
+ dma_addr_t dma_addr;
+};
+
+struct mtk_vcodec_dec_ctx;
+struct mtk_vcodec_dec_dev;
+
+#undef pr_fmt
+#define pr_fmt(fmt) "%s(),%d: " fmt, __func__, __LINE__
+
+#define mtk_v4l2_err(plat_dev, fmt, args...) \
+ dev_err(&(plat_dev)->dev, "[MTK_V4L2][ERROR] " fmt "\n", ##args)
+
+#define mtk_vcodec_err(inst_id, plat_dev, fmt, args...) \
+ dev_err(&(plat_dev)->dev, "[MTK_VCODEC][ERROR][%d]: " fmt "\n", inst_id, ##args)
+
+#if defined(CONFIG_DEBUG_FS)
+extern int mtk_v4l2_dbg_level;
+extern int mtk_vcodec_dbg;
+
+#define mtk_v4l2_debug(plat_dev, level, fmt, args...) \
+ do { \
+ if (mtk_v4l2_dbg_level >= (level)) \
+ dev_dbg(&(plat_dev)->dev, "[MTK_V4L2] %s, %d: " fmt "\n", \
+ __func__, __LINE__, ##args); \
+ } while (0)
+
+#define mtk_vcodec_debug(inst_id, plat_dev, fmt, args...) \
+ do { \
+ if (mtk_vcodec_dbg) \
+ dev_dbg(&(plat_dev)->dev, "[MTK_VCODEC][%d]: %s, %d " fmt "\n", \
+ inst_id, __func__, __LINE__, ##args); \
+ } while (0)
+#else
+#define mtk_v4l2_debug(plat_dev, level, fmt, args...) \
+ dev_dbg(&(plat_dev)->dev, "[MTK_V4L2]: " fmt "\n", ##args)
+
+#define mtk_vcodec_debug(inst_id, plat_dev, fmt, args...) \
+ dev_dbg(&(plat_dev)->dev, "[MTK_VCODEC][%d]: " fmt "\n", inst_id, ##args)
+#endif
+
+void __iomem *mtk_vcodec_get_reg_addr(void __iomem **reg_base, unsigned int reg_idx);
+int mtk_vcodec_write_vdecsys(struct mtk_vcodec_dec_ctx *ctx, unsigned int reg, unsigned int val);
+int mtk_vcodec_mem_alloc(void *priv, struct mtk_vcodec_mem *mem);
+void mtk_vcodec_mem_free(void *priv, struct mtk_vcodec_mem *mem);
+void mtk_vcodec_set_curr_ctx(struct mtk_vcodec_dec_dev *vdec_dev,
+ struct mtk_vcodec_dec_ctx *ctx, int hw_idx);
+struct mtk_vcodec_dec_ctx *mtk_vcodec_get_curr_ctx(struct mtk_vcodec_dec_dev *vdec_dev,
+ unsigned int hw_idx);
+void *mtk_vcodec_get_hw_dev(struct mtk_vcodec_dec_dev *dev, int hw_idx);
+
+#endif /* _MTK_VCODEC_UTIL_H_ */
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/Makefile b/drivers/media/platform/mediatek/vcodec/decoder/Makefile
new file mode 100644
index 000000000000..904cd22def84
--- /dev/null
+++ b/drivers/media/platform/mediatek/vcodec/decoder/Makefile
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_VIDEO_MEDIATEK_VCODEC) += mtk-vcodec-dec.o \
+ mtk-vcodec-dec-hw.o
+
+mtk-vcodec-dec-y := vdec/vdec_h264_if.o \
+ vdec/vdec_vp8_if.o \
+ vdec/vdec_vp8_req_if.o \
+ vdec/vdec_vp9_if.o \
+ vdec/vdec_vp9_req_lat_if.o \
+ vdec/vdec_av1_req_lat_if.o \
+ vdec/vdec_h264_req_if.o \
+ vdec/vdec_h264_req_common.o \
+ vdec/vdec_h264_req_multi_if.o \
+ vdec/vdec_hevc_req_multi_if.o \
+ mtk_vcodec_dec_drv.o \
+ vdec_drv_if.o \
+ vdec_vpu_if.o \
+ vdec_msg_queue.o \
+ mtk_vcodec_dec.o \
+ mtk_vcodec_dec_stateful.o \
+ mtk_vcodec_dec_stateless.o \
+ mtk_vcodec_dec_pm.o \
+
+mtk-vcodec-dec-hw-y := mtk_vcodec_dec_hw.o
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.c b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.c
index 93fcea821001..91ed576d6821 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.c
@@ -9,10 +9,8 @@
#include <media/v4l2-mem2mem.h>
#include <media/videobuf2-dma-contig.h>
-#include "mtk_vcodec_drv.h"
+#include "mtk_vcodec_dec_drv.h"
#include "mtk_vcodec_dec.h"
-#include "mtk_vcodec_intr.h"
-#include "mtk_vcodec_util.h"
#include "vdec_drv_if.h"
#include "mtk_vcodec_dec_pm.h"
@@ -35,11 +33,13 @@ mtk_vdec_find_format(struct v4l2_format *f,
return NULL;
}
-static bool mtk_vdec_get_cap_fmt(struct mtk_vcodec_ctx *ctx, int format_index)
+static bool mtk_vdec_get_cap_fmt(struct mtk_vcodec_dec_ctx *ctx, int format_index)
{
const struct mtk_vcodec_dec_pdata *dec_pdata = ctx->dev->vdec_pdata;
const struct mtk_video_fmt *fmt;
+ struct mtk_q_data *q_data;
int num_frame_count = 0, i;
+ bool ret = false;
fmt = &dec_pdata->vdec_formats[format_index];
for (i = 0; i < *dec_pdata->num_formats; i++) {
@@ -49,13 +49,29 @@ static bool mtk_vdec_get_cap_fmt(struct mtk_vcodec_ctx *ctx, int format_index)
num_frame_count++;
}
- if (num_frame_count == 1 || fmt->fourcc == V4L2_PIX_FMT_MM21)
+ if (num_frame_count == 1 || (!ctx->is_10bit_bitstream && fmt->fourcc == V4L2_PIX_FMT_MM21))
return true;
- return false;
+ q_data = &ctx->q_data[MTK_Q_DATA_SRC];
+ switch (q_data->fmt->fourcc) {
+ case V4L2_PIX_FMT_H264_SLICE:
+ if (ctx->is_10bit_bitstream && fmt->fourcc == V4L2_PIX_FMT_MT2110R)
+ ret = true;
+ break;
+ case V4L2_PIX_FMT_VP9_FRAME:
+ case V4L2_PIX_FMT_AV1_FRAME:
+ case V4L2_PIX_FMT_HEVC_SLICE:
+ if (ctx->is_10bit_bitstream && fmt->fourcc == V4L2_PIX_FMT_MT2110T)
+ ret = true;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
}
-static struct mtk_q_data *mtk_vdec_get_q_data(struct mtk_vcodec_ctx *ctx,
+static struct mtk_q_data *mtk_vdec_get_q_data(struct mtk_vcodec_dec_ctx *ctx,
enum v4l2_buf_type type)
{
if (V4L2_TYPE_IS_OUTPUT(type))
@@ -74,7 +90,7 @@ static int vidioc_try_decoder_cmd(struct file *file, void *priv,
static int vidioc_decoder_cmd(struct file *file, void *priv,
struct v4l2_decoder_cmd *cmd)
{
- struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
struct vb2_queue *src_vq, *dst_vq;
int ret;
@@ -82,7 +98,7 @@ static int vidioc_decoder_cmd(struct file *file, void *priv,
if (ret)
return ret;
- mtk_v4l2_debug(1, "decoder cmd=%u", cmd->cmd);
+ mtk_v4l2_vdec_dbg(1, ctx, "decoder cmd=%u", cmd->cmd);
dst_vq = v4l2_m2m_get_vq(ctx->m2m_ctx,
V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
switch (cmd->cmd) {
@@ -90,11 +106,11 @@ static int vidioc_decoder_cmd(struct file *file, void *priv,
src_vq = v4l2_m2m_get_vq(ctx->m2m_ctx,
V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
if (!vb2_is_streaming(src_vq)) {
- mtk_v4l2_debug(1, "Output stream is off. No need to flush.");
+ mtk_v4l2_vdec_dbg(1, ctx, "Output stream is off. No need to flush.");
return 0;
}
if (!vb2_is_streaming(dst_vq)) {
- mtk_v4l2_debug(1, "Capture stream is off. No need to flush.");
+ mtk_v4l2_vdec_dbg(1, ctx, "Capture stream is off. No need to flush.");
return 0;
}
v4l2_m2m_buf_queue(ctx->m2m_ctx, &ctx->empty_flush_buf.vb);
@@ -112,23 +128,23 @@ static int vidioc_decoder_cmd(struct file *file, void *priv,
return 0;
}
-void mtk_vdec_unlock(struct mtk_vcodec_ctx *ctx)
+void mtk_vdec_unlock(struct mtk_vcodec_dec_ctx *ctx)
{
mutex_unlock(&ctx->dev->dec_mutex[ctx->hw_id]);
}
-void mtk_vdec_lock(struct mtk_vcodec_ctx *ctx)
+void mtk_vdec_lock(struct mtk_vcodec_dec_ctx *ctx)
{
mutex_lock(&ctx->dev->dec_mutex[ctx->hw_id]);
}
-void mtk_vcodec_dec_release(struct mtk_vcodec_ctx *ctx)
+void mtk_vcodec_dec_release(struct mtk_vcodec_dec_ctx *ctx)
{
vdec_if_deinit(ctx);
ctx->state = MTK_STATE_FREE;
}
-void mtk_vcodec_dec_set_default_params(struct mtk_vcodec_ctx *ctx)
+void mtk_vcodec_dec_set_default_params(struct mtk_vcodec_dec_ctx *ctx)
{
struct mtk_q_data *q_data;
@@ -169,11 +185,10 @@ void mtk_vcodec_dec_set_default_params(struct mtk_vcodec_ctx *ctx)
static int vidioc_vdec_qbuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
- struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
if (ctx->state == MTK_STATE_ABORT) {
- mtk_v4l2_err("[%d] Call on QBUF after unrecoverable error",
- ctx->id);
+ mtk_v4l2_vdec_err(ctx, "[%d] Call on QBUF after unrecoverable error", ctx->id);
return -EIO;
}
@@ -183,11 +198,10 @@ static int vidioc_vdec_qbuf(struct file *file, void *priv,
static int vidioc_vdec_dqbuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
- struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
if (ctx->state == MTK_STATE_ABORT) {
- mtk_v4l2_err("[%d] Call on DQBUF after unrecoverable error",
- ctx->id);
+ mtk_v4l2_vdec_err(ctx, "[%d] Call on DQBUF after unrecoverable error", ctx->id);
return -EIO;
}
@@ -196,7 +210,7 @@ static int vidioc_vdec_dqbuf(struct file *file, void *priv,
static int mtk_vcodec_dec_get_chip_name(void *priv)
{
- struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
struct device *dev = &ctx->dev->plat_dev->dev;
if (of_device_is_compatible(dev->of_node, "mediatek,mt8173-vcodec-dec"))
@@ -218,7 +232,7 @@ static int mtk_vcodec_dec_get_chip_name(void *priv)
static int vidioc_vdec_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
struct device *dev = &ctx->dev->plat_dev->dev;
int platform_name = mtk_vcodec_dec_get_chip_name(priv);
@@ -231,7 +245,7 @@ static int vidioc_vdec_querycap(struct file *file, void *priv,
static int vidioc_vdec_subscribe_evt(struct v4l2_fh *fh,
const struct v4l2_event_subscription *sub)
{
- struct mtk_vcodec_ctx *ctx = fh_to_ctx(fh);
+ struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(fh);
if (ctx->dev->vdec_pdata->uses_stateless_api)
return v4l2_ctrl_subscribe_event(fh, sub);
@@ -246,7 +260,7 @@ static int vidioc_vdec_subscribe_evt(struct v4l2_fh *fh,
}
}
-static int vidioc_try_fmt(struct mtk_vcodec_ctx *ctx, struct v4l2_format *f,
+static int vidioc_try_fmt(struct mtk_vcodec_dec_ctx *ctx, struct v4l2_format *f,
const struct mtk_video_fmt *fmt)
{
struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
@@ -288,11 +302,10 @@ static int vidioc_try_fmt(struct mtk_vcodec_ctx *ctx, struct v4l2_format *f,
(pix_fmt_mp->height + 64) <= frmsize->max_height)
pix_fmt_mp->height += 64;
- mtk_v4l2_debug(0,
- "before resize width=%d, height=%d, after resize width=%d, height=%d, sizeimage=%d",
- tmp_w, tmp_h, pix_fmt_mp->width,
- pix_fmt_mp->height,
- pix_fmt_mp->width * pix_fmt_mp->height);
+ mtk_v4l2_vdec_dbg(0, ctx,
+ "before resize wxh=%dx%d, after resize wxh=%dx%d, sizeimage=%d",
+ tmp_w, tmp_h, pix_fmt_mp->width, pix_fmt_mp->height,
+ pix_fmt_mp->width * pix_fmt_mp->height);
pix_fmt_mp->num_planes = fmt->num_planes;
pix_fmt_mp->plane_fmt[0].sizeimage =
@@ -315,7 +328,7 @@ static int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv,
struct v4l2_format *f)
{
const struct mtk_video_fmt *fmt;
- struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
const struct mtk_vcodec_dec_pdata *dec_pdata = ctx->dev->vdec_pdata;
fmt = mtk_vdec_find_format(f, dec_pdata);
@@ -333,7 +346,7 @@ static int vidioc_try_fmt_vid_out_mplane(struct file *file, void *priv,
{
struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
const struct mtk_video_fmt *fmt;
- struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
const struct mtk_vcodec_dec_pdata *dec_pdata = ctx->dev->vdec_pdata;
fmt = mtk_vdec_find_format(f, dec_pdata);
@@ -344,7 +357,7 @@ static int vidioc_try_fmt_vid_out_mplane(struct file *file, void *priv,
}
if (pix_fmt_mp->plane_fmt[0].sizeimage == 0) {
- mtk_v4l2_err("sizeimage of output format must be given");
+ mtk_v4l2_vdec_err(ctx, "sizeimage of output format must be given");
return -EINVAL;
}
@@ -354,7 +367,7 @@ static int vidioc_try_fmt_vid_out_mplane(struct file *file, void *priv,
static int vidioc_vdec_g_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
struct mtk_q_data *q_data;
if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
@@ -403,7 +416,7 @@ static int vidioc_vdec_g_selection(struct file *file, void *priv,
static int vidioc_vdec_s_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
@@ -425,14 +438,14 @@ static int vidioc_vdec_s_selection(struct file *file, void *priv,
static int vidioc_vdec_s_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
struct v4l2_pix_format_mplane *pix_mp;
struct mtk_q_data *q_data;
int ret = 0;
const struct mtk_video_fmt *fmt;
const struct mtk_vcodec_dec_pdata *dec_pdata = ctx->dev->vdec_pdata;
- mtk_v4l2_debug(3, "[%d]", ctx->id);
+ mtk_v4l2_vdec_dbg(3, ctx, "[%d]", ctx->id);
q_data = mtk_vdec_get_q_data(ctx, f->type);
if (!q_data)
@@ -446,7 +459,7 @@ static int vidioc_vdec_s_fmt(struct file *file, void *priv,
if (!dec_pdata->uses_stateless_api &&
f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
vb2_is_busy(&ctx->m2m_ctx->out_q_ctx.q)) {
- mtk_v4l2_err("out_q_ctx buffers already requested");
+ mtk_v4l2_vdec_err(ctx, "out_q_ctx buffers already requested");
ret = -EBUSY;
}
@@ -456,7 +469,7 @@ static int vidioc_vdec_s_fmt(struct file *file, void *priv,
*/
if ((f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) &&
vb2_is_busy(&ctx->m2m_ctx->cap_q_ctx.q)) {
- mtk_v4l2_err("cap_q_ctx buffers already requested");
+ mtk_v4l2_vdec_err(ctx, "cap_q_ctx buffers already requested");
ret = -EBUSY;
}
@@ -491,8 +504,8 @@ static int vidioc_vdec_s_fmt(struct file *file, void *priv,
if (ctx->state == MTK_STATE_FREE) {
ret = vdec_if_init(ctx, q_data->fmt->fourcc);
if (ret) {
- mtk_v4l2_err("[%d]: vdec_if_init() fail ret=%d",
- ctx->id, ret);
+ mtk_v4l2_vdec_err(ctx, "[%d]: vdec_if_init() fail ret=%d",
+ ctx->id, ret);
return -EINVAL;
}
ctx->state = MTK_STATE_INIT;
@@ -515,8 +528,8 @@ static int vidioc_vdec_s_fmt(struct file *file, void *priv,
*/
ret = vdec_if_get_param(ctx, GET_PARAM_PIC_INFO, &ctx->picinfo);
if (ret) {
- mtk_v4l2_err("[%d]Error!! Get GET_PARAM_PICTURE_INFO Fail",
- ctx->id);
+ mtk_v4l2_vdec_err(ctx, "[%d]Error!! Get GET_PARAM_PICTURE_INFO Fail",
+ ctx->id);
}
ctx->last_decoded_picinfo = ctx->picinfo;
@@ -540,11 +553,13 @@ static int vidioc_vdec_s_fmt(struct file *file, void *priv,
ctx->q_data[MTK_Q_DATA_DST].coded_width = ctx->picinfo.buf_w;
ctx->q_data[MTK_Q_DATA_DST].coded_height = ctx->picinfo.buf_h;
- mtk_v4l2_debug(2, "[%d] vdec_if_init() num_plane = %d wxh=%dx%d pic wxh=%dx%d sz[0]=0x%x sz[1]=0x%x",
- ctx->id, pix_mp->num_planes, ctx->picinfo.buf_w, ctx->picinfo.buf_h,
- ctx->picinfo.pic_w, ctx->picinfo.pic_h,
- ctx->q_data[MTK_Q_DATA_DST].sizeimage[0],
- ctx->q_data[MTK_Q_DATA_DST].sizeimage[1]);
+ mtk_v4l2_vdec_dbg(2, ctx,
+ "[%d] init() plane:%d wxh=%dx%d pic wxh=%dx%d sz=0x%x_0x%x",
+ ctx->id, pix_mp->num_planes,
+ ctx->picinfo.buf_w, ctx->picinfo.buf_h,
+ ctx->picinfo.pic_w, ctx->picinfo.pic_h,
+ ctx->q_data[MTK_Q_DATA_DST].sizeimage[0],
+ ctx->q_data[MTK_Q_DATA_DST].sizeimage[1]);
}
return 0;
}
@@ -553,7 +568,7 @@ static int vidioc_enum_framesizes(struct file *file, void *priv,
struct v4l2_frmsizeenum *fsize)
{
int i = 0;
- struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
const struct mtk_vcodec_dec_pdata *dec_pdata = ctx->dev->vdec_pdata;
if (fsize->index != 0)
@@ -570,14 +585,11 @@ static int vidioc_enum_framesizes(struct file *file, void *priv,
fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
fsize->stepwise = dec_pdata->vdec_formats[i].frmsize;
- mtk_v4l2_debug(1, "%x, %d %d %d %d %d %d",
- ctx->dev->dec_capability,
- fsize->stepwise.min_width,
- fsize->stepwise.max_width,
- fsize->stepwise.step_width,
- fsize->stepwise.min_height,
- fsize->stepwise.max_height,
- fsize->stepwise.step_height);
+ mtk_v4l2_vdec_dbg(1, ctx, "%x, %d %d %d %d %d %d",
+ ctx->dev->dec_capability, fsize->stepwise.min_width,
+ fsize->stepwise.max_width, fsize->stepwise.step_width,
+ fsize->stepwise.min_height, fsize->stepwise.max_height,
+ fsize->stepwise.step_height);
return 0;
}
@@ -588,7 +600,7 @@ static int vidioc_enum_framesizes(struct file *file, void *priv,
static int vidioc_enum_fmt(struct v4l2_fmtdesc *f, void *priv,
bool output_queue)
{
- struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
const struct mtk_vcodec_dec_pdata *dec_pdata = ctx->dev->vdec_pdata;
const struct mtk_video_fmt *fmt;
int i, j = 0;
@@ -634,14 +646,14 @@ static int vidioc_vdec_enum_fmt_vid_out(struct file *file, void *priv,
static int vidioc_vdec_g_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(priv);
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
struct vb2_queue *vq;
struct mtk_q_data *q_data;
vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
if (!vq) {
- mtk_v4l2_err("no vb2 queue for type=%d", f->type);
+ mtk_v4l2_vdec_err(ctx, "no vb2 queue for type=%d", f->type);
return -EINVAL;
}
@@ -712,8 +724,8 @@ static int vidioc_vdec_g_fmt(struct file *file, void *priv,
pix_mp->plane_fmt[1].bytesperline = q_data->bytesperline[1];
pix_mp->plane_fmt[1].sizeimage = q_data->sizeimage[1];
- mtk_v4l2_debug(1, "[%d] type=%d state=%d Format information could not be read, not ready yet!",
- ctx->id, f->type, ctx->state);
+ mtk_v4l2_vdec_dbg(1, ctx, "[%d] type=%d state=%d Format information not ready!",
+ ctx->id, f->type, ctx->state);
}
return 0;
@@ -723,14 +735,14 @@ int vb2ops_vdec_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
unsigned int *nplanes, unsigned int sizes[],
struct device *alloc_devs[])
{
- struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vq);
+ struct mtk_vcodec_dec_ctx *ctx = vb2_get_drv_priv(vq);
struct mtk_q_data *q_data;
unsigned int i;
q_data = mtk_vdec_get_q_data(ctx, vq->type);
if (q_data == NULL) {
- mtk_v4l2_err("vq->type=%d err\n", vq->type);
+ mtk_v4l2_vdec_err(ctx, "vq->type=%d err\n", vq->type);
return -EINVAL;
}
@@ -756,30 +768,28 @@ int vb2ops_vdec_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
sizes[i] = q_data->sizeimage[i];
}
- mtk_v4l2_debug(1,
- "[%d]\t type = %d, get %d plane(s), %d buffer(s) of size 0x%x 0x%x ",
- ctx->id, vq->type, *nplanes, *nbuffers,
- sizes[0], sizes[1]);
+ mtk_v4l2_vdec_dbg(1, ctx,
+ "[%d]\t type = %d, get %d plane(s), %d buffer(s) of size 0x%x 0x%x ",
+ ctx->id, vq->type, *nplanes, *nbuffers, sizes[0], sizes[1]);
return 0;
}
int vb2ops_vdec_buf_prepare(struct vb2_buffer *vb)
{
- struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct mtk_vcodec_dec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct mtk_q_data *q_data;
int i;
- mtk_v4l2_debug(3, "[%d] (%d) id=%d",
- ctx->id, vb->vb2_queue->type, vb->index);
+ mtk_v4l2_vdec_dbg(3, ctx, "[%d] (%d) id=%d",
+ ctx->id, vb->vb2_queue->type, vb->index);
q_data = mtk_vdec_get_q_data(ctx, vb->vb2_queue->type);
for (i = 0; i < q_data->fmt->num_planes; i++) {
if (vb2_plane_size(vb, i) < q_data->sizeimage[i]) {
- mtk_v4l2_err("data will not fit into plane %d (%lu < %d)",
- i, vb2_plane_size(vb, i),
- q_data->sizeimage[i]);
+ mtk_v4l2_vdec_err(ctx, "data will not fit into plane %d (%lu < %d)",
+ i, vb2_plane_size(vb, i), q_data->sizeimage[i]);
return -EINVAL;
}
if (!V4L2_TYPE_IS_OUTPUT(vb->type))
@@ -791,7 +801,7 @@ int vb2ops_vdec_buf_prepare(struct vb2_buffer *vb)
void vb2ops_vdec_buf_finish(struct vb2_buffer *vb)
{
- struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct mtk_vcodec_dec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct vb2_v4l2_buffer *vb2_v4l2;
struct mtk_video_dec_buf *buf;
bool buf_error;
@@ -807,7 +817,7 @@ void vb2ops_vdec_buf_finish(struct vb2_buffer *vb)
mutex_unlock(&ctx->lock);
if (buf_error) {
- mtk_v4l2_err("Unrecoverable error on buffer.");
+ mtk_v4l2_vdec_err(ctx, "Unrecoverable error on buffer.");
ctx->state = MTK_STATE_ABORT;
}
}
@@ -829,7 +839,7 @@ int vb2ops_vdec_buf_init(struct vb2_buffer *vb)
int vb2ops_vdec_start_streaming(struct vb2_queue *q, unsigned int count)
{
- struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(q);
+ struct mtk_vcodec_dec_ctx *ctx = vb2_get_drv_priv(q);
if (ctx->state == MTK_STATE_FLUSH)
ctx->state = MTK_STATE_HEADER;
@@ -840,11 +850,11 @@ int vb2ops_vdec_start_streaming(struct vb2_queue *q, unsigned int count)
void vb2ops_vdec_stop_streaming(struct vb2_queue *q)
{
struct vb2_v4l2_buffer *src_buf = NULL, *dst_buf = NULL;
- struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(q);
+ struct mtk_vcodec_dec_ctx *ctx = vb2_get_drv_priv(q);
int ret;
- mtk_v4l2_debug(3, "[%d] (%d) state=(%x) ctx->decoded_frame_cnt=%d",
- ctx->id, q->type, ctx->state, ctx->decoded_frame_cnt);
+ mtk_v4l2_vdec_dbg(3, ctx, "[%d] (%d) state=(%x) ctx->decoded_frame_cnt=%d",
+ ctx->id, q->type, ctx->state, ctx->decoded_frame_cnt);
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
while ((src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx))) {
@@ -870,17 +880,17 @@ void vb2ops_vdec_stop_streaming(struct vb2_queue *q)
*/
ctx->picinfo = ctx->last_decoded_picinfo;
- mtk_v4l2_debug(2,
- "[%d]-> new(%d,%d), old(%d,%d), real(%d,%d)",
- ctx->id, ctx->last_decoded_picinfo.pic_w,
- ctx->last_decoded_picinfo.pic_h,
- ctx->picinfo.pic_w, ctx->picinfo.pic_h,
- ctx->last_decoded_picinfo.buf_w,
- ctx->last_decoded_picinfo.buf_h);
+ mtk_v4l2_vdec_dbg(2, ctx,
+ "[%d]-> new(%d,%d), old(%d,%d), real(%d,%d)",
+ ctx->id, ctx->last_decoded_picinfo.pic_w,
+ ctx->last_decoded_picinfo.pic_h,
+ ctx->picinfo.pic_w, ctx->picinfo.pic_h,
+ ctx->last_decoded_picinfo.buf_w,
+ ctx->last_decoded_picinfo.buf_h);
ret = ctx->dev->vdec_pdata->flush_decoder(ctx);
if (ret)
- mtk_v4l2_err("DecodeFinal failed, ret=%d", ret);
+ mtk_v4l2_vdec_err(ctx, "DecodeFinal failed, ret=%d", ret);
}
ctx->state = MTK_STATE_FLUSH;
@@ -895,17 +905,17 @@ void vb2ops_vdec_stop_streaming(struct vb2_queue *q)
static void m2mops_vdec_device_run(void *priv)
{
- struct mtk_vcodec_ctx *ctx = priv;
- struct mtk_vcodec_dev *dev = ctx->dev;
+ struct mtk_vcodec_dec_ctx *ctx = priv;
+ struct mtk_vcodec_dec_dev *dev = ctx->dev;
queue_work(dev->decode_workqueue, &ctx->decode_work);
}
static int m2mops_vdec_job_ready(void *m2m_priv)
{
- struct mtk_vcodec_ctx *ctx = m2m_priv;
+ struct mtk_vcodec_dec_ctx *ctx = m2m_priv;
- mtk_v4l2_debug(3, "[%d]", ctx->id);
+ mtk_v4l2_vdec_dbg(3, ctx, "[%d]", ctx->id);
if (ctx->state == MTK_STATE_ABORT)
return 0;
@@ -922,7 +932,7 @@ static int m2mops_vdec_job_ready(void *m2m_priv)
static void m2mops_vdec_job_abort(void *priv)
{
- struct mtk_vcodec_ctx *ctx = priv;
+ struct mtk_vcodec_dec_ctx *ctx = priv;
ctx->state = MTK_STATE_ABORT;
}
@@ -970,10 +980,10 @@ const struct v4l2_ioctl_ops mtk_vdec_ioctl_ops = {
int mtk_vcodec_dec_queue_init(void *priv, struct vb2_queue *src_vq,
struct vb2_queue *dst_vq)
{
- struct mtk_vcodec_ctx *ctx = priv;
+ struct mtk_vcodec_dec_ctx *ctx = priv;
int ret = 0;
- mtk_v4l2_debug(3, "[%d]", ctx->id);
+ mtk_v4l2_vdec_dbg(3, ctx, "[%d]", ctx->id);
src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
src_vq->io_modes = VB2_DMABUF | VB2_MMAP;
@@ -988,7 +998,7 @@ int mtk_vcodec_dec_queue_init(void *priv, struct vb2_queue *src_vq,
ret = vb2_queue_init(src_vq);
if (ret) {
- mtk_v4l2_err("Failed to initialize videobuf2 queue(output)");
+ mtk_v4l2_vdec_err(ctx, "Failed to initialize videobuf2 queue(output)");
return ret;
}
dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
@@ -1004,7 +1014,7 @@ int mtk_vcodec_dec_queue_init(void *priv, struct vb2_queue *src_vq,
ret = vb2_queue_init(dst_vq);
if (ret)
- mtk_v4l2_err("Failed to initialize videobuf2 queue(capture)");
+ mtk_v4l2_vdec_err(ctx, "Failed to initialize videobuf2 queue(capture)");
return ret;
}
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.h b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.h
index 4572f92826f2..ece27c880e50 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.h
+++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.h
@@ -11,6 +11,8 @@
#include <media/videobuf2-core.h>
#include <media/v4l2-mem2mem.h>
+#include "mtk_vcodec_dec_drv.h"
+
#define VCODEC_DEC_ALIGNED_64 64
#define VCODEC_CAPABILITY_4K_DISABLED 0x10
#define VCODEC_DEC_4K_CODED_WIDTH 4096U
@@ -78,12 +80,12 @@ extern const struct mtk_vcodec_dec_pdata mtk_vdec_single_core_pdata;
* mtk_vdec_lock get decoder hw lock and set curr_ctx
* to ctx instance that get lock
*/
-void mtk_vdec_unlock(struct mtk_vcodec_ctx *ctx);
-void mtk_vdec_lock(struct mtk_vcodec_ctx *ctx);
+void mtk_vdec_unlock(struct mtk_vcodec_dec_ctx *ctx);
+void mtk_vdec_lock(struct mtk_vcodec_dec_ctx *ctx);
int mtk_vcodec_dec_queue_init(void *priv, struct vb2_queue *src_vq,
struct vb2_queue *dst_vq);
-void mtk_vcodec_dec_set_default_params(struct mtk_vcodec_ctx *ctx);
-void mtk_vcodec_dec_release(struct mtk_vcodec_ctx *ctx);
+void mtk_vcodec_dec_set_default_params(struct mtk_vcodec_dec_ctx *ctx);
+void mtk_vcodec_dec_release(struct mtk_vcodec_dec_ctx *ctx);
/*
* VB2 ops
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_drv.c b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c
index d41f2121b94f..0a89ce452ac3 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_drv.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c
@@ -5,27 +5,28 @@
* Tiffany Lin <tiffany.lin@mediatek.com>
*/
+#include <linux/bitfield.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
#include <media/v4l2-event.h>
#include <media/v4l2-mem2mem.h>
#include <media/videobuf2-dma-contig.h>
#include <media/v4l2-device.h>
-#include "mtk_vcodec_drv.h"
#include "mtk_vcodec_dec.h"
#include "mtk_vcodec_dec_hw.h"
#include "mtk_vcodec_dec_pm.h"
-#include "mtk_vcodec_intr.h"
-#include "mtk_vcodec_util.h"
-#include "mtk_vcodec_fw.h"
+#include "../common/mtk_vcodec_intr.h"
-static int mtk_vcodec_get_hw_count(struct mtk_vcodec_dev *dev)
+static int mtk_vcodec_get_hw_count(struct mtk_vcodec_dec_ctx *ctx, struct mtk_vcodec_dec_dev *dev)
{
switch (dev->vdec_pdata->hw_arch) {
case MTK_VDEC_PURE_SINGLE_CORE:
@@ -33,27 +34,35 @@ static int mtk_vcodec_get_hw_count(struct mtk_vcodec_dev *dev)
case MTK_VDEC_LAT_SINGLE_CORE:
return MTK_VDEC_ONE_LAT_ONE_CORE;
default:
- mtk_v4l2_err("hw arch %d not supported", dev->vdec_pdata->hw_arch);
+ mtk_v4l2_vdec_err(ctx, "hw arch %d not supported", dev->vdec_pdata->hw_arch);
return MTK_VDEC_NO_HW;
}
}
+static bool mtk_vcodec_is_hw_active(struct mtk_vcodec_dec_dev *dev)
+{
+ u32 cg_status;
+
+ if (dev->vdecsys_regmap)
+ return !regmap_test_bits(dev->vdecsys_regmap, VDEC_HW_ACTIVE_ADDR,
+ VDEC_HW_ACTIVE_MASK);
+
+ cg_status = readl(dev->reg_base[VDEC_SYS] + VDEC_HW_ACTIVE_ADDR);
+ return !FIELD_GET(VDEC_HW_ACTIVE_MASK, cg_status);
+}
+
static irqreturn_t mtk_vcodec_dec_irq_handler(int irq, void *priv)
{
- struct mtk_vcodec_dev *dev = priv;
- struct mtk_vcodec_ctx *ctx;
- u32 cg_status = 0;
+ struct mtk_vcodec_dec_dev *dev = priv;
+ struct mtk_vcodec_dec_ctx *ctx;
unsigned int dec_done_status = 0;
void __iomem *vdec_misc_addr = dev->reg_base[VDEC_MISC] +
VDEC_IRQ_CFG_REG;
ctx = mtk_vcodec_get_curr_ctx(dev, MTK_VDEC_CORE);
- /* check if HW active or not */
- cg_status = readl(dev->reg_base[0]);
- if ((cg_status & VDEC_HW_ACTIVE) != 0) {
- mtk_v4l2_err("DEC ISR, VDEC active is not 0x0 (0x%08x)",
- cg_status);
+ if (!mtk_vcodec_is_hw_active(dev)) {
+ mtk_v4l2_vdec_err(ctx, "DEC ISR, VDEC active is not 0x0");
return IRQ_HANDLED;
}
@@ -69,40 +78,86 @@ static irqreturn_t mtk_vcodec_dec_irq_handler(int irq, void *priv)
writel((readl(vdec_misc_addr) & ~VDEC_IRQ_CLR),
dev->reg_base[VDEC_MISC] + VDEC_IRQ_CFG_REG);
- wake_up_ctx(ctx, MTK_INST_IRQ_RECEIVED, 0);
+ wake_up_dec_ctx(ctx, MTK_INST_IRQ_RECEIVED, 0);
- mtk_v4l2_debug(3,
- "mtk_vcodec_dec_irq_handler :wake up ctx %d, dec_done_status=%x",
- ctx->id, dec_done_status);
+ mtk_v4l2_vdec_dbg(3, ctx, "wake up ctx %d, dec_done_status=%x", ctx->id, dec_done_status);
return IRQ_HANDLED;
}
-static int mtk_vcodec_get_reg_bases(struct mtk_vcodec_dev *dev)
+static int mtk_vcodec_get_reg_bases(struct mtk_vcodec_dec_dev *dev)
{
struct platform_device *pdev = dev->plat_dev;
int reg_num, i;
+ struct resource *res;
+ bool has_vdecsys_reg;
+ int num_max_vdec_regs;
+ static const char * const mtk_dec_reg_names[] = {
+ "misc",
+ "ld",
+ "top",
+ "cm",
+ "ad",
+ "av",
+ "pp",
+ "hwd",
+ "hwq",
+ "hwb",
+ "hwg"
+ };
+
+ /*
+ * If we have reg-names in devicetree, this means that we're on a new
+ * register organization, which implies that the VDEC_SYS iospace gets
+ * R/W through a syscon (regmap).
+ * Here we try to get the "misc" iostart only to check if we have reg-names
+ */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "misc");
+ if (res)
+ has_vdecsys_reg = false;
+ else
+ has_vdecsys_reg = true;
+
+ num_max_vdec_regs = has_vdecsys_reg ? NUM_MAX_VDEC_REG_BASE :
+ ARRAY_SIZE(mtk_dec_reg_names);
/* Sizeof(u32) * 4 bytes for each register base. */
reg_num = of_property_count_elems_of_size(pdev->dev.of_node, "reg",
sizeof(u32) * 4);
- if (reg_num <= 0 || reg_num > NUM_MAX_VDEC_REG_BASE) {
+ if (reg_num <= 0 || reg_num > num_max_vdec_regs) {
dev_err(&pdev->dev, "Invalid register property size: %d\n", reg_num);
return -EINVAL;
}
- for (i = 0; i < reg_num; i++) {
- dev->reg_base[i] = devm_platform_ioremap_resource(pdev, i);
- if (IS_ERR(dev->reg_base[i]))
- return PTR_ERR(dev->reg_base[i]);
+ if (has_vdecsys_reg) {
+ for (i = 0; i < reg_num; i++) {
+ dev->reg_base[i] = devm_platform_ioremap_resource(pdev, i);
+ if (IS_ERR(dev->reg_base[i]))
+ return PTR_ERR(dev->reg_base[i]);
+
+ dev_dbg(&pdev->dev, "reg[%d] base=%p", i, dev->reg_base[i]);
+ }
+ } else {
+ for (i = 0; i < reg_num; i++) {
+ dev->reg_base[i+1] = devm_platform_ioremap_resource_byname(pdev, mtk_dec_reg_names[i]);
+ if (IS_ERR(dev->reg_base[i+1]))
+ return PTR_ERR(dev->reg_base[i+1]);
+
+ dev_dbg(&pdev->dev, "reg[%d] base=%p", i + 1, dev->reg_base[i + 1]);
+ }
- mtk_v4l2_debug(2, "reg[%d] base=%p", i, dev->reg_base[i]);
+ dev->vdecsys_regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "mediatek,vdecsys");
+ if (IS_ERR(dev->vdecsys_regmap)) {
+ dev_err(&pdev->dev, "Missing mediatek,vdecsys property");
+ return PTR_ERR(dev->vdecsys_regmap);
+ }
}
return 0;
}
-static int mtk_vcodec_init_dec_resources(struct mtk_vcodec_dev *dev)
+static int mtk_vcodec_init_dec_resources(struct mtk_vcodec_dec_dev *dev)
{
struct platform_device *pdev = dev->plat_dev;
int ret;
@@ -139,8 +194,8 @@ static int mtk_vcodec_init_dec_resources(struct mtk_vcodec_dev *dev)
static int fops_vcodec_open(struct file *file)
{
- struct mtk_vcodec_dev *dev = video_drvdata(file);
- struct mtk_vcodec_ctx *ctx = NULL;
+ struct mtk_vcodec_dec_dev *dev = video_drvdata(file);
+ struct mtk_vcodec_dec_ctx *ctx = NULL;
int ret = 0, i, hw_count;
struct vb2_queue *src_vq;
@@ -156,7 +211,7 @@ static int fops_vcodec_open(struct file *file)
INIT_LIST_HEAD(&ctx->list);
ctx->dev = dev;
if (ctx->dev->vdec_pdata->is_subdev_supported) {
- hw_count = mtk_vcodec_get_hw_count(dev);
+ hw_count = mtk_vcodec_get_hw_count(ctx, dev);
if (!hw_count || !dev->subdev_prob_done) {
ret = -EINVAL;
goto err_ctrls_setup;
@@ -176,15 +231,14 @@ static int fops_vcodec_open(struct file *file)
ctx->type = MTK_INST_DECODER;
ret = dev->vdec_pdata->ctrls_setup(ctx);
if (ret) {
- mtk_v4l2_err("Failed to setup mt vcodec controls");
+ mtk_v4l2_vdec_err(ctx, "Failed to setup mt vcodec controls");
goto err_ctrls_setup;
}
ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev_dec, ctx,
&mtk_vcodec_dec_queue_init);
if (IS_ERR((__force void *)ctx->m2m_ctx)) {
ret = PTR_ERR((__force void *)ctx->m2m_ctx);
- mtk_v4l2_err("Failed to v4l2_m2m_ctx_init() (%d)",
- ret);
+ mtk_v4l2_vdec_err(ctx, "Failed to v4l2_m2m_ctx_init() (%d)", ret);
goto err_m2m_ctx_init;
}
src_vq = v4l2_m2m_get_vq(ctx->m2m_ctx,
@@ -202,14 +256,14 @@ static int fops_vcodec_open(struct file *file)
* Return 0 if downloading firmware successfully,
* otherwise it is failed
*/
- mtk_v4l2_err("failed to load firmware!");
+ mtk_v4l2_vdec_err(ctx, "failed to load firmware!");
goto err_load_fw;
}
dev->dec_capability =
mtk_vcodec_fw_get_vdec_capa(dev->fw_handler);
- mtk_v4l2_debug(0, "decoder capability %x", dev->dec_capability);
+ mtk_v4l2_vdec_dbg(0, ctx, "decoder capability %x", dev->dec_capability);
}
ctx->dev->vdec_pdata->init_vdec_params(ctx);
@@ -218,8 +272,7 @@ static int fops_vcodec_open(struct file *file)
mtk_vcodec_dbgfs_create(ctx);
mutex_unlock(&dev->dev_mutex);
- mtk_v4l2_debug(0, "%s decoder [%d]", dev_name(&dev->plat_dev->dev),
- ctx->id);
+ mtk_v4l2_vdec_dbg(0, ctx, "%s decoder [%d]", dev_name(&dev->plat_dev->dev), ctx->id);
return ret;
/* Deinit when failure occurred */
@@ -238,10 +291,10 @@ err_ctrls_setup:
static int fops_vcodec_release(struct file *file)
{
- struct mtk_vcodec_dev *dev = video_drvdata(file);
- struct mtk_vcodec_ctx *ctx = fh_to_ctx(file->private_data);
+ struct mtk_vcodec_dec_dev *dev = video_drvdata(file);
+ struct mtk_vcodec_dec_ctx *ctx = fh_to_dec_ctx(file->private_data);
- mtk_v4l2_debug(0, "[%d] decoder", ctx->id);
+ mtk_v4l2_vdec_dbg(0, ctx, "[%d] decoder", ctx->id);
mutex_lock(&dev->dev_mutex);
/*
@@ -275,7 +328,7 @@ static const struct v4l2_file_operations mtk_vcodec_fops = {
static int mtk_vcodec_probe(struct platform_device *pdev)
{
- struct mtk_vcodec_dev *dev;
+ struct mtk_vcodec_dec_dev *dev;
struct video_device *vfd_dec;
phandle rproc_phandle;
enum mtk_vcodec_fw_type fw_type;
@@ -296,7 +349,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
&rproc_phandle)) {
fw_type = SCP;
} else {
- mtk_v4l2_err("Could not get vdec IPI device");
+ dev_dbg(&pdev->dev, "Could not get vdec IPI device");
return -ENODEV;
}
dma_set_max_seg_size(&pdev->dev, UINT_MAX);
@@ -316,7 +369,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
alloc_ordered_workqueue("core-decoder",
WQ_MEM_RECLAIM | WQ_FREEZABLE);
if (!dev->core_workqueue) {
- mtk_v4l2_err("Failed to create core workqueue");
+ dev_dbg(&pdev->dev, "Failed to create core workqueue");
ret = -EINVAL;
goto err_res;
}
@@ -332,15 +385,13 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
if (ret) {
- mtk_v4l2_err("v4l2_device_register err=%d", ret);
+ dev_err(&pdev->dev, "v4l2_device_register err=%d", ret);
goto err_core_workq;
}
- init_waitqueue_head(&dev->queue);
-
vfd_dec = video_device_alloc();
if (!vfd_dec) {
- mtk_v4l2_err("Failed to allocate video device");
+ dev_err(&pdev->dev, "Failed to allocate video device");
ret = -ENOMEM;
goto err_dec_alloc;
}
@@ -361,7 +412,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
dev->m2m_dev_dec = v4l2_m2m_init(&mtk_vdec_m2m_ops);
if (IS_ERR((__force void *)dev->m2m_dev_dec)) {
- mtk_v4l2_err("Failed to init mem2mem dec device");
+ dev_err(&pdev->dev, "Failed to init mem2mem dec device");
ret = PTR_ERR((__force void *)dev->m2m_dev_dec);
goto err_dec_alloc;
}
@@ -370,7 +421,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
alloc_ordered_workqueue(MTK_VCODEC_DEC_NAME,
WQ_MEM_RECLAIM | WQ_FREEZABLE);
if (!dev->decode_workqueue) {
- mtk_v4l2_err("Failed to create decode workqueue");
+ dev_err(&pdev->dev, "Failed to create decode workqueue");
ret = -EINVAL;
goto err_event_workq;
}
@@ -379,7 +430,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
ret = of_platform_populate(pdev->dev.of_node, NULL, NULL,
&pdev->dev);
if (ret) {
- mtk_v4l2_err("Main device of_platform_populate failed.");
+ dev_err(&pdev->dev, "Main device of_platform_populate failed.");
goto err_reg_cont;
}
} else {
@@ -392,7 +443,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
ret = video_register_device(vfd_dec, VFL_TYPE_VIDEO, -1);
if (ret) {
- mtk_v4l2_err("Failed to register video device");
+ dev_err(&pdev->dev, "Failed to register video device");
goto err_reg_cont;
}
@@ -411,21 +462,21 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
ret = v4l2_m2m_register_media_controller(dev->m2m_dev_dec, dev->vfd_dec,
MEDIA_ENT_F_PROC_VIDEO_DECODER);
if (ret) {
- mtk_v4l2_err("Failed to register media controller");
+ dev_err(&pdev->dev, "Failed to register media controller");
goto err_dec_mem_init;
}
ret = media_device_register(&dev->mdev_dec);
if (ret) {
- mtk_v4l2_err("Failed to register media device");
+ dev_err(&pdev->dev, "Failed to register media device");
goto err_media_reg;
}
- mtk_v4l2_debug(0, "media registered as /dev/media%d", vfd_dec->minor);
+ dev_dbg(&pdev->dev, "media registered as /dev/media%d", vfd_dec->minor);
}
mtk_vcodec_dbgfs_init(dev, false);
- mtk_v4l2_debug(0, "decoder registered as /dev/video%d", vfd_dec->minor);
+ dev_dbg(&pdev->dev, "decoder registered as /dev/video%d", vfd_dec->minor);
return 0;
@@ -484,7 +535,7 @@ MODULE_DEVICE_TABLE(of, mtk_vcodec_match);
static void mtk_vcodec_dec_remove(struct platform_device *pdev)
{
- struct mtk_vcodec_dev *dev = platform_get_drvdata(pdev);
+ struct mtk_vcodec_dec_dev *dev = platform_get_drvdata(pdev);
destroy_workqueue(dev->decode_workqueue);
@@ -500,7 +551,7 @@ static void mtk_vcodec_dec_remove(struct platform_device *pdev)
if (dev->vfd_dec)
video_unregister_device(dev->vfd_dec);
- mtk_vcodec_dbgfs_deinit(dev);
+ mtk_vcodec_dbgfs_deinit(&dev->dbgfs);
v4l2_device_unregister(&dev->v4l2_dev);
if (!dev->vdec_pdata->is_subdev_supported)
pm_runtime_disable(dev->pm.dev);
diff --git a/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h
new file mode 100644
index 000000000000..7e36b2c69b7d
--- /dev/null
+++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h
@@ -0,0 +1,324 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023 MediaTek Inc.
+ * Author: Yunfei Dong <yunfei.dong@mediatek.com>
+ */
+
+#ifndef _MTK_VCODEC_DEC_DRV_H_
+#define _MTK_VCODEC_DEC_DRV_H_
+
+#include "../common/mtk_vcodec_cmn_drv.h"
+#include "../common/mtk_vcodec_dbgfs.h"
+#include "../common/mtk_vcodec_fw_priv.h"
+#include "../common/mtk_vcodec_util.h"
+#include "vdec_msg_queue.h"
+
+#define MTK_VCODEC_DEC_NAME "mtk-vcodec-dec"
+
+#define IS_VDEC_LAT_ARCH(hw_arch) ((hw_arch) >= MTK_VDEC_LAT_SINGLE_CORE)
+#define IS_VDEC_INNER_RACING(capability) ((capability) & MTK_VCODEC_INNER_RACING)
+
+/*
+ * enum mtk_vdec_format_types - Structure used to get supported
+ * format types according to decoder capability
+ */
+enum mtk_vdec_format_types {
+ MTK_VDEC_FORMAT_MM21 = 0x20,
+ MTK_VDEC_FORMAT_MT21C = 0x40,
+ MTK_VDEC_FORMAT_H264_SLICE = 0x100,
+ MTK_VDEC_FORMAT_VP8_FRAME = 0x200,
+ MTK_VDEC_FORMAT_VP9_FRAME = 0x400,
+ MTK_VDEC_FORMAT_AV1_FRAME = 0x800,
+ MTK_VDEC_FORMAT_HEVC_FRAME = 0x1000,
+ MTK_VCODEC_INNER_RACING = 0x20000,
+ MTK_VDEC_IS_SUPPORT_10BIT = 0x40000,
+};
+
+/*
+ * enum mtk_vdec_hw_count - Supported hardware count
+ */
+enum mtk_vdec_hw_count {
+ MTK_VDEC_NO_HW = 0,
+ MTK_VDEC_ONE_CORE,
+ MTK_VDEC_ONE_LAT_ONE_CORE,
+ MTK_VDEC_MAX_HW_COUNT,
+};
+
+/*
+ * enum mtk_vdec_hw_arch - Used to separate different hardware architecture
+ */
+enum mtk_vdec_hw_arch {
+ MTK_VDEC_PURE_SINGLE_CORE,
+ MTK_VDEC_LAT_SINGLE_CORE,
+};
+
+/**
+ * struct vdec_pic_info - picture size information
+ * @pic_w: picture width
+ * @pic_h: picture height
+ * @buf_w: picture buffer width (64 aligned up from pic_w)
+ * @buf_h: picture buffer heiht (64 aligned up from pic_h)
+ * @fb_sz: bitstream size of each plane
+ * E.g. suppose picture size is 176x144,
+ * buffer size will be aligned to 176x160.
+ * @cap_fourcc: fourcc number(may changed when resolution change)
+ * @reserved: align struct to 64-bit in order to adjust 32-bit and 64-bit os.
+ */
+struct vdec_pic_info {
+ unsigned int pic_w;
+ unsigned int pic_h;
+ unsigned int buf_w;
+ unsigned int buf_h;
+ unsigned int fb_sz[VIDEO_MAX_PLANES];
+ unsigned int cap_fourcc;
+ unsigned int reserved;
+};
+
+/**
+ * struct mtk_vcodec_dec_pdata - compatible data for each IC
+ * @init_vdec_params: init vdec params
+ * @ctrls_setup: init vcodec dec ctrls
+ * @worker: worker to start a decode job
+ * @flush_decoder: function that flushes the decoder
+ * @get_cap_buffer: get capture buffer from capture queue
+ * @cap_to_disp: put capture buffer to disp list for lat and core arch
+ * @vdec_vb2_ops: struct vb2_ops
+ *
+ * @vdec_formats: supported video decoder formats
+ * @num_formats: count of video decoder formats
+ * @default_out_fmt: default output buffer format
+ * @default_cap_fmt: default capture buffer format
+ *
+ * @hw_arch: hardware arch is used to separate pure_sin_core and lat_sin_core
+ *
+ * @is_subdev_supported: whether support parent-node architecture(subdev)
+ * @uses_stateless_api: whether the decoder uses the stateless API with requests
+ */
+struct mtk_vcodec_dec_pdata {
+ void (*init_vdec_params)(struct mtk_vcodec_dec_ctx *ctx);
+ int (*ctrls_setup)(struct mtk_vcodec_dec_ctx *ctx);
+ void (*worker)(struct work_struct *work);
+ int (*flush_decoder)(struct mtk_vcodec_dec_ctx *ctx);
+ struct vdec_fb *(*get_cap_buffer)(struct mtk_vcodec_dec_ctx *ctx);
+ void (*cap_to_disp)(struct mtk_vcodec_dec_ctx *ctx, int error,
+ struct media_request *src_buf_req);
+
+ const struct vb2_ops *vdec_vb2_ops;
+
+ const struct mtk_video_fmt *vdec_formats;
+ const int *num_formats;
+ const struct mtk_video_fmt *default_out_fmt;
+ const struct mtk_video_fmt *default_cap_fmt;
+
+ enum mtk_vdec_hw_arch hw_arch;
+
+ bool is_subdev_supported;
+ bool uses_stateless_api;
+};
+
+/**
+ * struct mtk_vcodec_dec_ctx - Context (instance) private data.
+ *
+ * @type: type of decoder instance
+ * @dev: pointer to the mtk_vcodec_dec_dev of the device
+ * @list: link to ctx_list of mtk_vcodec_dec_dev
+ *
+ * @fh: struct v4l2_fh
+ * @m2m_ctx: pointer to the v4l2_m2m_ctx of the context
+ * @q_data: store information of input and output queue of the context
+ * @id: index of the context that this structure describes
+ * @state: state of the context
+ *
+ * @dec_if: hooked decoder driver interface
+ * @drv_handle: driver handle for specific decode/encode instance
+ *
+ * @picinfo: store picture info after header parsing
+ * @dpb_size: store dpb count after header parsing
+ *
+ * @int_cond: variable used by the waitqueue
+ * @int_type: type of the last interrupt
+ * @queue: waitqueue that can be used to wait for this context to finish
+ * @irq_status: irq status
+ *
+ * @ctrl_hdl: handler for v4l2 framework
+ * @decode_work: worker for the decoding
+ * @last_decoded_picinfo: pic information get from latest decode
+ * @empty_flush_buf: a fake size-0 capture buffer that indicates flush. Used
+ * for stateful decoder.
+ * @is_flushing: set to true if flushing is in progress.
+ *
+ * @current_codec: current set input codec, in V4L2 pixel format
+ * @capture_fourcc: capture queue type in V4L2 pixel format
+ *
+ * @colorspace: enum v4l2_colorspace; supplemental to pixelformat
+ * @ycbcr_enc: enum v4l2_ycbcr_encoding, Y'CbCr encoding
+ * @quantization: enum v4l2_quantization, colorspace quantization
+ * @xfer_func: enum v4l2_xfer_func, colorspace transfer function
+ *
+ * @decoded_frame_cnt: number of decoded frames
+ * @lock: protect variables accessed by V4L2 threads and worker thread such as
+ * mtk_video_dec_buf.
+ * @hw_id: hardware index used to identify different hardware.
+ *
+ * @msg_queue: msg queue used to store lat buffer information.
+ * @vpu_inst: vpu instance pointer.
+ *
+ * @is_10bit_bitstream: set to true if it's 10bit bitstream
+ */
+struct mtk_vcodec_dec_ctx {
+ enum mtk_instance_type type;
+ struct mtk_vcodec_dec_dev *dev;
+ struct list_head list;
+
+ struct v4l2_fh fh;
+ struct v4l2_m2m_ctx *m2m_ctx;
+ struct mtk_q_data q_data[2];
+ int id;
+ enum mtk_instance_state state;
+
+ const struct vdec_common_if *dec_if;
+ void *drv_handle;
+
+ struct vdec_pic_info picinfo;
+ int dpb_size;
+
+ int int_cond[MTK_VDEC_HW_MAX];
+ int int_type[MTK_VDEC_HW_MAX];
+ wait_queue_head_t queue[MTK_VDEC_HW_MAX];
+ unsigned int irq_status;
+
+ struct v4l2_ctrl_handler ctrl_hdl;
+ struct work_struct decode_work;
+ struct vdec_pic_info last_decoded_picinfo;
+ struct v4l2_m2m_buffer empty_flush_buf;
+ bool is_flushing;
+
+ u32 current_codec;
+ u32 capture_fourcc;
+
+ enum v4l2_colorspace colorspace;
+ enum v4l2_ycbcr_encoding ycbcr_enc;
+ enum v4l2_quantization quantization;
+ enum v4l2_xfer_func xfer_func;
+
+ int decoded_frame_cnt;
+ struct mutex lock;
+ int hw_id;
+
+ struct vdec_msg_queue msg_queue;
+ void *vpu_inst;
+
+ bool is_10bit_bitstream;
+};
+
+/**
+ * struct mtk_vcodec_dec_dev - driver data
+ * @v4l2_dev: V4L2 device to register video devices for.
+ * @vfd_dec: Video device for decoder
+ * @mdev_dec: Media device for decoder
+ *
+ * @m2m_dev_dec: m2m device for decoder
+ * @plat_dev: platform device
+ * @ctx_list: list of struct mtk_vcodec_ctx
+ * @curr_ctx: The context that is waiting for codec hardware
+ *
+ * @reg_base: Mapped address of MTK Vcodec registers.
+ * @vdec_pdata: decoder IC-specific data
+ * @vdecsys_regmap: VDEC_SYS register space passed through syscon
+ *
+ * @fw_handler: used to communicate with the firmware.
+ * @id_counter: used to identify current opened instance
+ *
+ * @dec_mutex: decoder hardware lock
+ * @dev_mutex: video_device lock
+ * @decode_workqueue: decode work queue
+ *
+ * @irqlock: protect data access by irq handler and work thread
+ * @dec_irq: decoder irq resource
+ *
+ * @pm: power management control
+ * @dec_capability: used to identify decode capability, ex: 4k
+ *
+ * @core_workqueue: queue used for core hardware decode
+ *
+ * @subdev_dev: subdev hardware device
+ * @subdev_prob_done: check whether all used hw device is prob done
+ * @subdev_bitmap: used to record hardware is ready or not
+ *
+ * @dec_active_cnt: used to mark whether need to record register value
+ * @vdec_racing_info: record register value
+ * @dec_racing_info_mutex: mutex lock used for inner racing mode
+ * @dbgfs: debug log related information
+ */
+struct mtk_vcodec_dec_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device *vfd_dec;
+ struct media_device mdev_dec;
+
+ struct v4l2_m2m_dev *m2m_dev_dec;
+ struct platform_device *plat_dev;
+ struct list_head ctx_list;
+ struct mtk_vcodec_dec_ctx *curr_ctx;
+
+ void __iomem *reg_base[NUM_MAX_VCODEC_REG_BASE];
+ const struct mtk_vcodec_dec_pdata *vdec_pdata;
+ struct regmap *vdecsys_regmap;
+
+ struct mtk_vcodec_fw *fw_handler;
+ u64 id_counter;
+
+ /* decoder hardware mutex lock */
+ struct mutex dec_mutex[MTK_VDEC_HW_MAX];
+ struct mutex dev_mutex;
+ struct workqueue_struct *decode_workqueue;
+
+ spinlock_t irqlock;
+ int dec_irq;
+
+ struct mtk_vcodec_pm pm;
+ unsigned int dec_capability;
+
+ struct workqueue_struct *core_workqueue;
+
+ void *subdev_dev[MTK_VDEC_HW_MAX];
+ int (*subdev_prob_done)(struct mtk_vcodec_dec_dev *vdec_dev);
+ DECLARE_BITMAP(subdev_bitmap, MTK_VDEC_HW_MAX);
+
+ atomic_t dec_active_cnt;
+ u32 vdec_racing_info[132];
+ /* Protects access to vdec_racing_info data */
+ struct mutex dec_racing_info_mutex;
+ struct mtk_vcodec_dbgfs dbgfs;
+};
+
+static inline struct mtk_vcodec_dec_ctx *fh_to_dec_ctx(struct v4l2_fh *fh)
+{
+ return container_of(fh, struct mtk_vcodec_dec_ctx, fh);
+}
+
+static inline struct mtk_vcodec_dec_ctx *ctrl_to_dec_ctx(struct v4l2_ctrl *ctrl)
+{
+ return container_of(ctrl->handler, struct mtk_vcodec_dec_ctx, ctrl_hdl);
+}
+
+/* Wake up context wait_queue */
+static inline void
+wake_up_dec_ctx(struct mtk_vcodec_dec_ctx *ctx, unsigned int reason, unsigned int hw_id)
+{
+ ctx->int_cond[hw_id] = 1;
+ ctx->int_type[hw_id] = reason;
+ wake_up_interruptible(&ctx->queue[hw_id]);
+}
+
+#define mtk_vdec_err(ctx, fmt, args...) \
+ mtk_vcodec_err((ctx)->id, (ctx)->dev->plat_dev, fmt, ##args)
+
+#define mtk_vdec_debug(ctx, fmt, args...) \
+ mtk_vcodec_debug((ctx)->id, (ctx)->dev->plat_dev, fmt, ##args)
+
+#define mtk_v4l2_vdec_err(ctx, fmt, args...) mtk_v4l2_err((ctx)->dev->plat_dev, fmt, ##args)
+
+#define mtk_v4l2_vdec_dbg(level, ctx, fmt, args...) \
+ mtk_v4l2_debug((ctx)->dev->plat_dev, level, fmt, ##args)
+
+#endif /* _MTK_VCODEC_DEC_DRV_H_ */
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_hw.c b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_hw.c
index e1cb2f8dca33..881d5de41e05 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_hw.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_hw.c
@@ -12,12 +12,10 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
-#include "mtk_vcodec_drv.h"
#include "mtk_vcodec_dec.h"
#include "mtk_vcodec_dec_hw.h"
#include "mtk_vcodec_dec_pm.h"
-#include "mtk_vcodec_intr.h"
-#include "mtk_vcodec_util.h"
+#include "../common/mtk_vcodec_intr.h"
static const struct of_device_id mtk_vdec_hw_match[] = {
{
@@ -36,7 +34,7 @@ static const struct of_device_id mtk_vdec_hw_match[] = {
};
MODULE_DEVICE_TABLE(of, mtk_vdec_hw_match);
-static int mtk_vdec_hw_prob_done(struct mtk_vcodec_dev *vdec_dev)
+static int mtk_vdec_hw_prob_done(struct mtk_vcodec_dec_dev *vdec_dev)
{
struct platform_device *pdev = vdec_dev->plat_dev;
struct device_node *subdev_node;
@@ -66,7 +64,7 @@ static int mtk_vdec_hw_prob_done(struct mtk_vcodec_dev *vdec_dev)
static irqreturn_t mtk_vdec_hw_irq_handler(int irq, void *priv)
{
struct mtk_vdec_hw_dev *dev = priv;
- struct mtk_vcodec_ctx *ctx;
+ struct mtk_vcodec_dec_ctx *ctx;
u32 cg_status;
unsigned int dec_done_status;
void __iomem *vdec_misc_addr = dev->reg_base[VDEC_HW_MISC] +
@@ -75,10 +73,9 @@ static irqreturn_t mtk_vdec_hw_irq_handler(int irq, void *priv)
ctx = mtk_vcodec_get_curr_ctx(dev->main_dev, dev->hw_idx);
/* check if HW active or not */
- cg_status = readl(dev->reg_base[VDEC_HW_SYS]);
- if (cg_status & VDEC_HW_ACTIVE) {
- mtk_v4l2_err("vdec active is not 0x0 (0x%08x)",
- cg_status);
+ cg_status = readl(dev->reg_base[VDEC_HW_SYS] + VDEC_HW_ACTIVE_ADDR);
+ if (cg_status & VDEC_HW_ACTIVE_MASK) {
+ mtk_v4l2_vdec_err(ctx, "vdec active is not 0x0 (0x%08x)", cg_status);
return IRQ_HANDLED;
}
@@ -91,10 +88,10 @@ static irqreturn_t mtk_vdec_hw_irq_handler(int irq, void *priv)
writel(dec_done_status | VDEC_IRQ_CFG, vdec_misc_addr);
writel(dec_done_status & ~VDEC_IRQ_CLR, vdec_misc_addr);
- wake_up_ctx(ctx, MTK_INST_IRQ_RECEIVED, dev->hw_idx);
+ wake_up_dec_ctx(ctx, MTK_INST_IRQ_RECEIVED, dev->hw_idx);
- mtk_v4l2_debug(3, "wake up ctx %d, dec_done_status=%x",
- ctx->id, dec_done_status);
+ mtk_v4l2_vdec_dbg(3, ctx, "wake up ctx %d, dec_done_status=%x",
+ ctx->id, dec_done_status);
return IRQ_HANDLED;
}
@@ -124,7 +121,7 @@ static int mtk_vdec_hw_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_vdec_hw_dev *subdev_dev;
- struct mtk_vcodec_dev *main_dev;
+ struct mtk_vcodec_dec_dev *main_dev;
const struct of_device_id *of_id;
int hw_idx;
int ret;
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_hw.h b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_hw.h
index 36faa8d9d681..83fe8b9428e6 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_hw.h
+++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_hw.h
@@ -10,9 +10,10 @@
#include <linux/io.h>
#include <linux/platform_device.h>
-#include "mtk_vcodec_drv.h"
+#include "mtk_vcodec_dec_drv.h"
-#define VDEC_HW_ACTIVE 0x10
+#define VDEC_HW_ACTIVE_ADDR 0x0
+#define VDEC_HW_ACTIVE_MASK BIT(4)
#define VDEC_IRQ_CFG 0x11
#define VDEC_IRQ_CLR 0x10
#define VDEC_IRQ_CFG_REG 0xa4
@@ -45,10 +46,10 @@ enum mtk_vdec_hw_reg_idx {
*/
struct mtk_vdec_hw_dev {
struct platform_device *plat_dev;
- struct mtk_vcodec_dev *main_dev;
+ struct mtk_vcodec_dec_dev *main_dev;
void __iomem *reg_base[VDEC_HW_MAX];
- struct mtk_vcodec_ctx *curr_ctx;
+ struct mtk_vcodec_dec_ctx *curr_ctx;
int dec_irq;
struct mtk_vcodec_pm pm;
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_pm.c b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_pm.c
index 777d445999e9..aefd3e9e3061 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_pm.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_pm.c
@@ -6,13 +6,11 @@
#include <linux/clk.h>
#include <linux/interrupt.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/pm_runtime.h>
#include "mtk_vcodec_dec_hw.h"
#include "mtk_vcodec_dec_pm.h"
-#include "mtk_vcodec_util.h"
int mtk_vcodec_init_dec_clk(struct platform_device *pdev, struct mtk_vcodec_pm *pm)
{
@@ -32,7 +30,7 @@ int mtk_vcodec_init_dec_clk(struct platform_device *pdev, struct mtk_vcodec_pm *
if (!dec_clk->clk_info)
return -ENOMEM;
} else {
- mtk_v4l2_err("Failed to get vdec clock count");
+ dev_err(&pdev->dev, "Failed to get vdec clock count");
return -EINVAL;
}
@@ -41,14 +39,13 @@ int mtk_vcodec_init_dec_clk(struct platform_device *pdev, struct mtk_vcodec_pm *
ret = of_property_read_string_index(pdev->dev.of_node,
"clock-names", i, &clk_info->clk_name);
if (ret) {
- mtk_v4l2_err("Failed to get clock name id = %d", i);
+ dev_err(&pdev->dev, "Failed to get clock name id = %d", i);
return ret;
}
clk_info->vcodec_clk = devm_clk_get(&pdev->dev,
clk_info->clk_name);
if (IS_ERR(clk_info->vcodec_clk)) {
- mtk_v4l2_err("devm_clk_get (%d)%s fail", i,
- clk_info->clk_name);
+ dev_err(&pdev->dev, "devm_clk_get (%d)%s fail", i, clk_info->clk_name);
return PTR_ERR(clk_info->vcodec_clk);
}
}
@@ -63,7 +60,7 @@ static int mtk_vcodec_dec_pw_on(struct mtk_vcodec_pm *pm)
ret = pm_runtime_resume_and_get(pm->dev);
if (ret)
- mtk_v4l2_err("pm_runtime_resume_and_get fail %d", ret);
+ dev_err(pm->dev, "pm_runtime_resume_and_get fail %d", ret);
return ret;
}
@@ -74,7 +71,7 @@ static void mtk_vcodec_dec_pw_off(struct mtk_vcodec_pm *pm)
ret = pm_runtime_put(pm->dev);
if (ret && ret != -EAGAIN)
- mtk_v4l2_err("pm_runtime_put fail %d", ret);
+ dev_err(pm->dev, "pm_runtime_put fail %d", ret);
}
static void mtk_vcodec_dec_clock_on(struct mtk_vcodec_pm *pm)
@@ -86,7 +83,7 @@ static void mtk_vcodec_dec_clock_on(struct mtk_vcodec_pm *pm)
for (i = 0; i < dec_clk->clk_num; i++) {
ret = clk_prepare_enable(dec_clk->clk_info[i].vcodec_clk);
if (ret) {
- mtk_v4l2_err("clk_prepare_enable %d %s fail %d", i,
+ dev_err(pm->dev, "clk_prepare_enable %d %s fail %d", i,
dec_clk->clk_info[i].clk_name, ret);
goto error;
}
@@ -108,7 +105,7 @@ static void mtk_vcodec_dec_clock_off(struct mtk_vcodec_pm *pm)
clk_disable_unprepare(dec_clk->clk_info[i].vcodec_clk);
}
-static void mtk_vcodec_dec_enable_irq(struct mtk_vcodec_dev *vdec_dev, int hw_idx)
+static void mtk_vcodec_dec_enable_irq(struct mtk_vcodec_dec_dev *vdec_dev, int hw_idx)
{
struct mtk_vdec_hw_dev *subdev_dev;
@@ -120,13 +117,13 @@ static void mtk_vcodec_dec_enable_irq(struct mtk_vcodec_dev *vdec_dev, int hw_id
if (subdev_dev)
enable_irq(subdev_dev->dec_irq);
else
- mtk_v4l2_err("Failed to get hw dev\n");
+ dev_err(&vdec_dev->plat_dev->dev, "Failed to get hw dev\n");
} else {
enable_irq(vdec_dev->dec_irq);
}
}
-static void mtk_vcodec_dec_disable_irq(struct mtk_vcodec_dev *vdec_dev, int hw_idx)
+static void mtk_vcodec_dec_disable_irq(struct mtk_vcodec_dec_dev *vdec_dev, int hw_idx)
{
struct mtk_vdec_hw_dev *subdev_dev;
@@ -138,13 +135,13 @@ static void mtk_vcodec_dec_disable_irq(struct mtk_vcodec_dev *vdec_dev, int hw_i
if (subdev_dev)
disable_irq(subdev_dev->dec_irq);
else
- mtk_v4l2_err("Failed to get hw dev\n");
+ dev_err(&vdec_dev->plat_dev->dev, "Failed to get hw dev\n");
} else {
disable_irq(vdec_dev->dec_irq);
}
}
-static void mtk_vcodec_load_racing_info(struct mtk_vcodec_ctx *ctx)
+static void mtk_vcodec_load_racing_info(struct mtk_vcodec_dec_ctx *ctx)
{
void __iomem *vdec_racing_addr;
int j;
@@ -158,7 +155,7 @@ static void mtk_vcodec_load_racing_info(struct mtk_vcodec_ctx *ctx)
mutex_unlock(&ctx->dev->dec_racing_info_mutex);
}
-static void mtk_vcodec_record_racing_info(struct mtk_vcodec_ctx *ctx)
+static void mtk_vcodec_record_racing_info(struct mtk_vcodec_dec_ctx *ctx)
{
void __iomem *vdec_racing_addr;
int j;
@@ -172,7 +169,7 @@ static void mtk_vcodec_record_racing_info(struct mtk_vcodec_ctx *ctx)
mutex_unlock(&ctx->dev->dec_racing_info_mutex);
}
-static struct mtk_vcodec_pm *mtk_vcodec_dec_get_pm(struct mtk_vcodec_dev *vdec_dev,
+static struct mtk_vcodec_pm *mtk_vcodec_dec_get_pm(struct mtk_vcodec_dec_dev *vdec_dev,
int hw_idx)
{
struct mtk_vdec_hw_dev *subdev_dev;
@@ -185,14 +182,14 @@ static struct mtk_vcodec_pm *mtk_vcodec_dec_get_pm(struct mtk_vcodec_dev *vdec_d
if (subdev_dev)
return &subdev_dev->pm;
- mtk_v4l2_err("Failed to get hw dev\n");
+ dev_err(&vdec_dev->plat_dev->dev, "Failed to get hw dev\n");
return NULL;
}
return &vdec_dev->pm;
}
-static void mtk_vcodec_dec_child_dev_on(struct mtk_vcodec_dev *vdec_dev,
+static void mtk_vcodec_dec_child_dev_on(struct mtk_vcodec_dec_dev *vdec_dev,
int hw_idx)
{
struct mtk_vcodec_pm *pm;
@@ -212,7 +209,7 @@ static void mtk_vcodec_dec_child_dev_on(struct mtk_vcodec_dev *vdec_dev,
}
}
-static void mtk_vcodec_dec_child_dev_off(struct mtk_vcodec_dev *vdec_dev,
+static void mtk_vcodec_dec_child_dev_off(struct mtk_vcodec_dec_dev *vdec_dev,
int hw_idx)
{
struct mtk_vcodec_pm *pm;
@@ -232,7 +229,7 @@ static void mtk_vcodec_dec_child_dev_off(struct mtk_vcodec_dev *vdec_dev,
}
}
-void mtk_vcodec_dec_enable_hardware(struct mtk_vcodec_ctx *ctx, int hw_idx)
+void mtk_vcodec_dec_enable_hardware(struct mtk_vcodec_dec_ctx *ctx, int hw_idx)
{
mutex_lock(&ctx->dev->dec_mutex[hw_idx]);
@@ -248,7 +245,7 @@ void mtk_vcodec_dec_enable_hardware(struct mtk_vcodec_ctx *ctx, int hw_idx)
}
EXPORT_SYMBOL_GPL(mtk_vcodec_dec_enable_hardware);
-void mtk_vcodec_dec_disable_hardware(struct mtk_vcodec_ctx *ctx, int hw_idx)
+void mtk_vcodec_dec_disable_hardware(struct mtk_vcodec_dec_ctx *ctx, int hw_idx)
{
if (IS_VDEC_INNER_RACING(ctx->dev->dec_capability))
mtk_vcodec_record_racing_info(ctx);
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_pm.h b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_pm.h
index dbcf3cabe6f3..87a50d589d42 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_pm.h
+++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_pm.h
@@ -7,11 +7,11 @@
#ifndef _MTK_VCODEC_DEC_PM_H_
#define _MTK_VCODEC_DEC_PM_H_
-#include "mtk_vcodec_drv.h"
+#include "mtk_vcodec_dec_drv.h"
int mtk_vcodec_init_dec_clk(struct platform_device *pdev, struct mtk_vcodec_pm *pm);
-void mtk_vcodec_dec_enable_hardware(struct mtk_vcodec_ctx *ctx, int hw_idx);
-void mtk_vcodec_dec_disable_hardware(struct mtk_vcodec_ctx *ctx, int hw_idx);
+void mtk_vcodec_dec_enable_hardware(struct mtk_vcodec_dec_ctx *ctx, int hw_idx);
+void mtk_vcodec_dec_disable_hardware(struct mtk_vcodec_dec_ctx *ctx, int hw_idx);
#endif /* _MTK_VCODEC_DEC_PM_H_ */
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateful.c b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateful.c
index 0fbd030026c7..11ca2c2fbaad 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateful.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateful.c
@@ -4,10 +4,7 @@
#include <media/v4l2-mem2mem.h>
#include <media/videobuf2-dma-contig.h>
-#include "mtk_vcodec_drv.h"
#include "mtk_vcodec_dec.h"
-#include "mtk_vcodec_intr.h"
-#include "mtk_vcodec_util.h"
#include "mtk_vcodec_dec_pm.h"
#include "vdec_drv_if.h"
@@ -55,21 +52,22 @@ static const unsigned int num_supported_formats =
* Note the buffers returned from codec driver may still be in driver's
* reference list.
*/
-static struct vb2_buffer *get_display_buffer(struct mtk_vcodec_ctx *ctx)
+static struct vb2_buffer *get_display_buffer(struct mtk_vcodec_dec_ctx *ctx)
{
struct vdec_fb *disp_frame_buffer = NULL;
struct mtk_video_dec_buf *dstbuf;
struct vb2_v4l2_buffer *vb;
- mtk_v4l2_debug(3, "[%d]", ctx->id);
+ mtk_v4l2_vdec_dbg(3, ctx, "[%d]", ctx->id);
if (vdec_if_get_param(ctx, GET_PARAM_DISP_FRAME_BUFFER,
&disp_frame_buffer)) {
- mtk_v4l2_err("[%d]Cannot get param : GET_PARAM_DISP_FRAME_BUFFER", ctx->id);
+ mtk_v4l2_vdec_err(ctx, "[%d]Cannot get param : GET_PARAM_DISP_FRAME_BUFFER",
+ ctx->id);
return NULL;
}
if (!disp_frame_buffer) {
- mtk_v4l2_debug(3, "No display frame buffer");
+ mtk_v4l2_vdec_dbg(3, ctx, "No display frame buffer");
return NULL;
}
@@ -78,9 +76,9 @@ static struct vb2_buffer *get_display_buffer(struct mtk_vcodec_ctx *ctx)
vb = &dstbuf->m2m_buf.vb;
mutex_lock(&ctx->lock);
if (dstbuf->used) {
- mtk_v4l2_debug(2, "[%d]status=%x queue id=%d to done_list %d",
- ctx->id, disp_frame_buffer->status,
- vb->vb2_buf.index, dstbuf->queued_in_vb2);
+ mtk_v4l2_vdec_dbg(2, ctx, "[%d]status=%x queue id=%d to done_list %d",
+ ctx->id, disp_frame_buffer->status,
+ vb->vb2_buf.index, dstbuf->queued_in_vb2);
v4l2_m2m_buf_done(vb, VB2_BUF_STATE_DONE);
ctx->decoded_frame_cnt++;
@@ -97,7 +95,7 @@ static struct vb2_buffer *get_display_buffer(struct mtk_vcodec_ctx *ctx)
* previous sps/pps/resolution change decode, or do nothing if user
* space still owns this buffer
*/
-static struct vb2_buffer *get_free_buffer(struct mtk_vcodec_ctx *ctx)
+static struct vb2_buffer *get_free_buffer(struct mtk_vcodec_dec_ctx *ctx)
{
struct mtk_video_dec_buf *dstbuf;
struct vdec_fb *free_frame_buffer = NULL;
@@ -105,16 +103,16 @@ static struct vb2_buffer *get_free_buffer(struct mtk_vcodec_ctx *ctx)
if (vdec_if_get_param(ctx, GET_PARAM_FREE_FRAME_BUFFER,
&free_frame_buffer)) {
- mtk_v4l2_err("[%d] Error!! Cannot get param", ctx->id);
+ mtk_v4l2_vdec_err(ctx, "[%d] Error!! Cannot get param", ctx->id);
return NULL;
}
if (!free_frame_buffer) {
- mtk_v4l2_debug(3, " No free frame buffer");
+ mtk_v4l2_vdec_dbg(3, ctx, " No free frame buffer");
return NULL;
}
- mtk_v4l2_debug(3, "[%d] tmp_frame_addr = 0x%p", ctx->id,
- free_frame_buffer);
+ mtk_v4l2_vdec_dbg(3, ctx, "[%d] tmp_frame_addr = 0x%p", ctx->id,
+ free_frame_buffer);
dstbuf = container_of(free_frame_buffer, struct mtk_video_dec_buf,
frame_buffer);
@@ -131,9 +129,9 @@ static struct vb2_buffer *get_free_buffer(struct mtk_vcodec_ctx *ctx)
* This reduce overheads that dq/q unused capture
* buffer. In this case, queued_in_vb2 = true.
*/
- mtk_v4l2_debug(2, "[%d]status=%x queue id=%d to rdy_queue %d",
- ctx->id, free_frame_buffer->status,
- vb->vb2_buf.index, dstbuf->queued_in_vb2);
+ mtk_v4l2_vdec_dbg(2, ctx, "[%d]status=%x queue id=%d to rdy_queue %d",
+ ctx->id, free_frame_buffer->status,
+ vb->vb2_buf.index, dstbuf->queued_in_vb2);
v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
} else if (!dstbuf->queued_in_vb2 && dstbuf->queued_in_v4l2) {
/*
@@ -146,10 +144,10 @@ static struct vb2_buffer *get_free_buffer(struct mtk_vcodec_ctx *ctx)
* means this buffer is not from previous decode
* output.
*/
- mtk_v4l2_debug(2,
- "[%d]status=%x queue id=%d to rdy_queue",
- ctx->id, free_frame_buffer->status,
- vb->vb2_buf.index);
+ mtk_v4l2_vdec_dbg(2, ctx,
+ "[%d]status=%x queue id=%d to rdy_queue",
+ ctx->id, free_frame_buffer->status,
+ vb->vb2_buf.index);
v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
dstbuf->queued_in_vb2 = true;
} else {
@@ -161,10 +159,10 @@ static struct vb2_buffer *get_free_buffer(struct mtk_vcodec_ctx *ctx)
* When this buffer q from user space, it could
* directly q to vb2 buffer
*/
- mtk_v4l2_debug(3, "[%d]status=%x err queue id=%d %d %d",
- ctx->id, free_frame_buffer->status,
- vb->vb2_buf.index, dstbuf->queued_in_vb2,
- dstbuf->queued_in_v4l2);
+ mtk_v4l2_vdec_dbg(3, ctx, "[%d]status=%x err queue id=%d %d %d",
+ ctx->id, free_frame_buffer->status,
+ vb->vb2_buf.index, dstbuf->queued_in_vb2,
+ dstbuf->queued_in_v4l2);
}
dstbuf->used = false;
}
@@ -172,37 +170,37 @@ static struct vb2_buffer *get_free_buffer(struct mtk_vcodec_ctx *ctx)
return &vb->vb2_buf;
}
-static void clean_display_buffer(struct mtk_vcodec_ctx *ctx)
+static void clean_display_buffer(struct mtk_vcodec_dec_ctx *ctx)
{
while (get_display_buffer(ctx))
;
}
-static void clean_free_buffer(struct mtk_vcodec_ctx *ctx)
+static void clean_free_buffer(struct mtk_vcodec_dec_ctx *ctx)
{
while (get_free_buffer(ctx))
;
}
-static void mtk_vdec_queue_res_chg_event(struct mtk_vcodec_ctx *ctx)
+static void mtk_vdec_queue_res_chg_event(struct mtk_vcodec_dec_ctx *ctx)
{
static const struct v4l2_event ev_src_ch = {
.type = V4L2_EVENT_SOURCE_CHANGE,
.u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION,
};
- mtk_v4l2_debug(1, "[%d]", ctx->id);
+ mtk_v4l2_vdec_dbg(1, ctx, "[%d]", ctx->id);
v4l2_event_queue_fh(&ctx->fh, &ev_src_ch);
}
-static int mtk_vdec_flush_decoder(struct mtk_vcodec_ctx *ctx)
+static int mtk_vdec_flush_decoder(struct mtk_vcodec_dec_ctx *ctx)
{
bool res_chg;
int ret;
ret = vdec_if_decode(ctx, NULL, NULL, &res_chg);
if (ret)
- mtk_v4l2_err("DecodeFinal failed, ret=%d", ret);
+ mtk_v4l2_vdec_err(ctx, "DecodeFinal failed, ret=%d", ret);
clean_display_buffer(ctx);
clean_free_buffer(ctx);
@@ -210,7 +208,7 @@ static int mtk_vdec_flush_decoder(struct mtk_vcodec_ctx *ctx)
return 0;
}
-static void mtk_vdec_update_fmt(struct mtk_vcodec_ctx *ctx,
+static void mtk_vdec_update_fmt(struct mtk_vcodec_dec_ctx *ctx,
unsigned int pixelformat)
{
const struct mtk_video_fmt *fmt;
@@ -221,24 +219,25 @@ static void mtk_vdec_update_fmt(struct mtk_vcodec_ctx *ctx,
for (k = 0; k < num_supported_formats; k++) {
fmt = &mtk_video_formats[k];
if (fmt->fourcc == pixelformat) {
- mtk_v4l2_debug(1, "Update cap fourcc(%d -> %d)",
- dst_q_data->fmt->fourcc, pixelformat);
+ mtk_v4l2_vdec_dbg(1, ctx, "Update cap fourcc(%d -> %d)",
+ dst_q_data->fmt->fourcc, pixelformat);
dst_q_data->fmt = fmt;
return;
}
}
- mtk_v4l2_err("Cannot get fourcc(%d), using init value", pixelformat);
+ mtk_v4l2_vdec_err(ctx, "Cannot get fourcc(%d), using init value", pixelformat);
}
-static int mtk_vdec_pic_info_update(struct mtk_vcodec_ctx *ctx)
+static int mtk_vdec_pic_info_update(struct mtk_vcodec_dec_ctx *ctx)
{
unsigned int dpbsize = 0;
int ret;
if (vdec_if_get_param(ctx, GET_PARAM_PIC_INFO,
&ctx->last_decoded_picinfo)) {
- mtk_v4l2_err("[%d]Error!! Cannot get param : GET_PARAM_PICTURE_INFO ERR", ctx->id);
+ mtk_v4l2_vdec_err(ctx, "[%d]Error!! Cannot get param : GET_PARAM_PICTURE_INFO ERR",
+ ctx->id);
return -EINVAL;
}
@@ -246,7 +245,7 @@ static int mtk_vdec_pic_info_update(struct mtk_vcodec_ctx *ctx)
ctx->last_decoded_picinfo.pic_h == 0 ||
ctx->last_decoded_picinfo.buf_w == 0 ||
ctx->last_decoded_picinfo.buf_h == 0) {
- mtk_v4l2_err("Cannot get correct pic info");
+ mtk_v4l2_vdec_err(ctx, "Cannot get correct pic info");
return -EINVAL;
}
@@ -258,15 +257,15 @@ static int mtk_vdec_pic_info_update(struct mtk_vcodec_ctx *ctx)
ctx->last_decoded_picinfo.pic_h == ctx->picinfo.pic_h)
return 0;
- mtk_v4l2_debug(1, "[%d]-> new(%d,%d), old(%d,%d), real(%d,%d)", ctx->id,
- ctx->last_decoded_picinfo.pic_w,
- ctx->last_decoded_picinfo.pic_h, ctx->picinfo.pic_w,
- ctx->picinfo.pic_h, ctx->last_decoded_picinfo.buf_w,
- ctx->last_decoded_picinfo.buf_h);
+ mtk_v4l2_vdec_dbg(1, ctx, "[%d]-> new(%d,%d), old(%d,%d), real(%d,%d)", ctx->id,
+ ctx->last_decoded_picinfo.pic_w,
+ ctx->last_decoded_picinfo.pic_h, ctx->picinfo.pic_w,
+ ctx->picinfo.pic_h, ctx->last_decoded_picinfo.buf_w,
+ ctx->last_decoded_picinfo.buf_h);
ret = vdec_if_get_param(ctx, GET_PARAM_DPB_SIZE, &dpbsize);
if (dpbsize == 0)
- mtk_v4l2_err("Incorrect dpb size, ret=%d", ret);
+ mtk_v4l2_vdec_err(ctx, "Incorrect dpb size, ret=%d", ret);
ctx->dpb_size = dpbsize;
@@ -275,9 +274,9 @@ static int mtk_vdec_pic_info_update(struct mtk_vcodec_ctx *ctx)
static void mtk_vdec_worker(struct work_struct *work)
{
- struct mtk_vcodec_ctx *ctx =
- container_of(work, struct mtk_vcodec_ctx, decode_work);
- struct mtk_vcodec_dev *dev = ctx->dev;
+ struct mtk_vcodec_dec_ctx *ctx =
+ container_of(work, struct mtk_vcodec_dec_ctx, decode_work);
+ struct mtk_vcodec_dec_dev *dev = ctx->dev;
struct vb2_v4l2_buffer *src_buf, *dst_buf;
struct mtk_vcodec_mem buf;
struct vdec_fb *pfb;
@@ -288,14 +287,14 @@ static void mtk_vdec_worker(struct work_struct *work)
src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
if (!src_buf) {
v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
- mtk_v4l2_debug(1, "[%d] src_buf empty!!", ctx->id);
+ mtk_v4l2_vdec_dbg(1, ctx, "[%d] src_buf empty!!", ctx->id);
return;
}
dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
if (!dst_buf) {
v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
- mtk_v4l2_debug(1, "[%d] dst_buf empty!!", ctx->id);
+ mtk_v4l2_vdec_dbg(1, ctx, "[%d] dst_buf empty!!", ctx->id);
return;
}
@@ -313,15 +312,15 @@ static void mtk_vdec_worker(struct work_struct *work)
vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 1);
pfb->base_c.size = ctx->picinfo.fb_sz[1];
pfb->status = 0;
- mtk_v4l2_debug(3, "===>[%d] vdec_if_decode() ===>", ctx->id);
+ mtk_v4l2_vdec_dbg(3, ctx, "===>[%d] vdec_if_decode() ===>", ctx->id);
- mtk_v4l2_debug(3,
- "id=%d Framebuf pfb=%p VA=%p Y_DMA=%pad C_DMA=%pad Size=%zx",
- dst_buf->vb2_buf.index, pfb, pfb->base_y.va,
- &pfb->base_y.dma_addr, &pfb->base_c.dma_addr, pfb->base_y.size);
+ mtk_v4l2_vdec_dbg(3, ctx,
+ "id=%d Framebuf pfb=%p VA=%p Y_DMA=%pad C_DMA=%pad Size=%zx",
+ dst_buf->vb2_buf.index, pfb, pfb->base_y.va,
+ &pfb->base_y.dma_addr, &pfb->base_c.dma_addr, pfb->base_y.size);
if (src_buf == &ctx->empty_flush_buf.vb) {
- mtk_v4l2_debug(1, "Got empty flush input buffer.");
+ mtk_v4l2_vdec_dbg(1, ctx, "Got empty flush input buffer.");
src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
/* update dst buf status */
@@ -350,12 +349,12 @@ static void mtk_vdec_worker(struct work_struct *work)
buf.size = (size_t)src_buf->vb2_buf.planes[0].bytesused;
if (!buf.va) {
v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
- mtk_v4l2_err("[%d] id=%d src_addr is NULL!!", ctx->id,
- src_buf->vb2_buf.index);
+ mtk_v4l2_vdec_err(ctx, "[%d] id=%d src_addr is NULL!!", ctx->id,
+ src_buf->vb2_buf.index);
return;
}
- mtk_v4l2_debug(3, "[%d] Bitstream VA=%p DMA=%pad Size=%zx vb=%p",
- ctx->id, buf.va, &buf.dma_addr, buf.size, src_buf);
+ mtk_v4l2_vdec_dbg(3, ctx, "[%d] Bitstream VA=%p DMA=%pad Size=%zx vb=%p",
+ ctx->id, buf.va, &buf.dma_addr, buf.size, src_buf);
dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp;
dst_buf->timecode = src_buf->timecode;
mutex_lock(&ctx->lock);
@@ -366,9 +365,10 @@ static void mtk_vdec_worker(struct work_struct *work)
ret = vdec_if_decode(ctx, &buf, pfb, &res_chg);
if (ret) {
- mtk_v4l2_err(" <===[%d], src_buf[%d] sz=0x%zx pts=%llu dst_buf[%d] vdec_if_decode() ret=%d res_chg=%d===>",
- ctx->id, src_buf->vb2_buf.index, buf.size,
- src_buf->vb2_buf.timestamp, dst_buf->vb2_buf.index, ret, res_chg);
+ mtk_v4l2_vdec_err(ctx,
+ "[%d] decode src[%d] sz=0x%zx pts=%llu dst[%d] ret=%d res_chg=%d",
+ ctx->id, src_buf->vb2_buf.index, buf.size,
+ src_buf->vb2_buf.timestamp, dst_buf->vb2_buf.index, ret, res_chg);
src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
if (ret == -EIO) {
mutex_lock(&ctx->lock);
@@ -417,12 +417,12 @@ static void vb2ops_vdec_stateful_buf_queue(struct vb2_buffer *vb)
bool res_chg = false;
int ret;
unsigned int dpbsize = 1, i;
- struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct mtk_vcodec_dec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct vb2_v4l2_buffer *vb2_v4l2;
struct mtk_q_data *dst_q_data;
- mtk_v4l2_debug(3, "[%d] (%d) id=%d, vb=%p", ctx->id,
- vb->vb2_queue->type, vb->index, vb);
+ mtk_v4l2_vdec_dbg(3, ctx, "[%d] (%d) id=%d, vb=%p", ctx->id,
+ vb->vb2_queue->type, vb->index, vb);
/*
* check if this buffer is ready to be used after decode
*/
@@ -448,20 +448,19 @@ static void vb2ops_vdec_stateful_buf_queue(struct vb2_buffer *vb)
v4l2_m2m_buf_queue(ctx->m2m_ctx, to_vb2_v4l2_buffer(vb));
if (ctx->state != MTK_STATE_INIT) {
- mtk_v4l2_debug(3, "[%d] already init driver %d", ctx->id,
- ctx->state);
+ mtk_v4l2_vdec_dbg(3, ctx, "[%d] already init driver %d", ctx->id, ctx->state);
return;
}
src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
if (!src_buf) {
- mtk_v4l2_err("No src buffer");
+ mtk_v4l2_vdec_err(ctx, "No src buffer");
return;
}
if (src_buf == &ctx->empty_flush_buf.vb) {
/* This shouldn't happen. Just in case. */
- mtk_v4l2_err("Invalid flush buffer.");
+ mtk_v4l2_vdec_err(ctx, "Invalid flush buffer.");
v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
return;
}
@@ -469,9 +468,8 @@ static void vb2ops_vdec_stateful_buf_queue(struct vb2_buffer *vb)
src_mem.va = vb2_plane_vaddr(&src_buf->vb2_buf, 0);
src_mem.dma_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
src_mem.size = (size_t)src_buf->vb2_buf.planes[0].bytesused;
- mtk_v4l2_debug(2, "[%d] buf id=%d va=%p dma=%pad size=%zx", ctx->id,
- src_buf->vb2_buf.index, src_mem.va, &src_mem.dma_addr,
- src_mem.size);
+ mtk_v4l2_vdec_dbg(2, ctx, "[%d] buf id=%d va=%p dma=%pad size=%zx", ctx->id,
+ src_buf->vb2_buf.index, src_mem.va, &src_mem.dma_addr, src_mem.size);
ret = vdec_if_decode(ctx, &src_mem, NULL, &res_chg);
if (ret || !res_chg) {
@@ -484,20 +482,22 @@ static void vb2ops_vdec_stateful_buf_queue(struct vb2_buffer *vb)
src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
if (ret == -EIO) {
- mtk_v4l2_err("[%d] Unrecoverable error in vdec_if_decode.", ctx->id);
+ mtk_v4l2_vdec_err(ctx, "[%d] Unrecoverable error in vdec_if_decode.",
+ ctx->id);
ctx->state = MTK_STATE_ABORT;
v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
} else {
v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
}
- mtk_v4l2_debug(ret ? 0 : 1,
- "[%d] vdec_if_decode() src_buf=%d, size=%zu, fail=%d, res_chg=%d",
- ctx->id, src_buf->vb2_buf.index, src_mem.size, ret, res_chg);
+ mtk_v4l2_vdec_dbg(ret ? 0 : 1, ctx,
+ "[%d] decode() src_buf=%d, size=%zu, fail=%d, res_chg=%d",
+ ctx->id, src_buf->vb2_buf.index, src_mem.size, ret, res_chg);
return;
}
if (vdec_if_get_param(ctx, GET_PARAM_PIC_INFO, &ctx->picinfo)) {
- mtk_v4l2_err("[%d]Error!! Cannot get param : GET_PARAM_PICTURE_INFO ERR", ctx->id);
+ mtk_v4l2_vdec_err(ctx, "[%d]Error!! Cannot get param : GET_PARAM_PICTURE_INFO ERR",
+ ctx->id);
return;
}
@@ -508,24 +508,24 @@ static void vb2ops_vdec_stateful_buf_queue(struct vb2_buffer *vb)
dst_q_data->bytesperline[i] = ctx->picinfo.buf_w;
}
- mtk_v4l2_debug(2, "[%d] vdec_if_init() OK wxh=%dx%d pic wxh=%dx%d sz[0]=0x%x sz[1]=0x%x",
- ctx->id, ctx->picinfo.buf_w, ctx->picinfo.buf_h, ctx->picinfo.pic_w,
- ctx->picinfo.pic_h, dst_q_data->sizeimage[0], dst_q_data->sizeimage[1]);
+ mtk_v4l2_vdec_dbg(2, ctx, "[%d] init OK wxh=%dx%d pic wxh=%dx%d sz[0]=0x%x sz[1]=0x%x",
+ ctx->id, ctx->picinfo.buf_w, ctx->picinfo.buf_h, ctx->picinfo.pic_w,
+ ctx->picinfo.pic_h, dst_q_data->sizeimage[0], dst_q_data->sizeimage[1]);
ret = vdec_if_get_param(ctx, GET_PARAM_DPB_SIZE, &dpbsize);
if (dpbsize == 0)
- mtk_v4l2_err("[%d] GET_PARAM_DPB_SIZE fail=%d", ctx->id, ret);
+ mtk_v4l2_vdec_err(ctx, "[%d] GET_PARAM_DPB_SIZE fail=%d", ctx->id, ret);
ctx->dpb_size = dpbsize;
ctx->state = MTK_STATE_HEADER;
- mtk_v4l2_debug(1, "[%d] dpbsize=%d", ctx->id, ctx->dpb_size);
+ mtk_v4l2_vdec_dbg(1, ctx, "[%d] dpbsize=%d", ctx->id, ctx->dpb_size);
mtk_vdec_queue_res_chg_event(ctx);
}
static int mtk_vdec_g_v_ctrl(struct v4l2_ctrl *ctrl)
{
- struct mtk_vcodec_ctx *ctx = ctrl_to_ctx(ctrl);
+ struct mtk_vcodec_dec_ctx *ctx = ctrl_to_dec_ctx(ctrl);
int ret = 0;
switch (ctrl->id) {
@@ -533,7 +533,7 @@ static int mtk_vdec_g_v_ctrl(struct v4l2_ctrl *ctrl)
if (ctx->state >= MTK_STATE_HEADER) {
ctrl->val = ctx->dpb_size;
} else {
- mtk_v4l2_debug(0, "Seqinfo not ready");
+ mtk_v4l2_vdec_dbg(0, ctx, "Seqinfo not ready");
ctrl->val = 0;
}
break;
@@ -547,7 +547,7 @@ static const struct v4l2_ctrl_ops mtk_vcodec_dec_ctrl_ops = {
.g_volatile_ctrl = mtk_vdec_g_v_ctrl,
};
-static int mtk_vcodec_dec_ctrls_setup(struct mtk_vcodec_ctx *ctx)
+static int mtk_vcodec_dec_ctrls_setup(struct mtk_vcodec_dec_ctx *ctx)
{
struct v4l2_ctrl *ctrl;
@@ -570,7 +570,7 @@ static int mtk_vcodec_dec_ctrls_setup(struct mtk_vcodec_ctx *ctx)
V4L2_MPEG_VIDEO_H264_PROFILE_MAIN);
if (ctx->ctrl_hdl.error) {
- mtk_v4l2_err("Adding control failed %d", ctx->ctrl_hdl.error);
+ mtk_v4l2_vdec_err(ctx, "Adding control failed %d", ctx->ctrl_hdl.error);
return ctx->ctrl_hdl.error;
}
@@ -578,7 +578,7 @@ static int mtk_vcodec_dec_ctrls_setup(struct mtk_vcodec_ctx *ctx)
return 0;
}
-static void mtk_init_vdec_params(struct mtk_vcodec_ctx *ctx)
+static void mtk_init_vdec_params(struct mtk_vcodec_dec_ctx *ctx)
{
unsigned int i;
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateless.c b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c
index db1e14a1bd6c..e29c9c58f3da 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateless.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_stateless.c
@@ -6,10 +6,7 @@
#include <media/v4l2-mem2mem.h>
#include <linux/module.h>
-#include "mtk_vcodec_drv.h"
#include "mtk_vcodec_dec.h"
-#include "mtk_vcodec_intr.h"
-#include "mtk_vcodec_util.h"
#include "mtk_vcodec_dec_pm.h"
#include "vdec_drv_if.h"
@@ -203,7 +200,7 @@ static const struct mtk_stateless_control mtk_stateless_controls[] = {
#define NUM_CTRLS ARRAY_SIZE(mtk_stateless_controls)
-static struct mtk_video_fmt mtk_video_formats[7];
+static struct mtk_video_fmt mtk_video_formats[9];
static struct mtk_video_fmt default_out_format;
static struct mtk_video_fmt default_cap_format;
@@ -218,7 +215,7 @@ static const struct v4l2_frmsize_stepwise stepwise_fhd = {
.step_height = 16
};
-static void mtk_vdec_stateless_cap_to_disp(struct mtk_vcodec_ctx *ctx, int error,
+static void mtk_vdec_stateless_cap_to_disp(struct mtk_vcodec_dec_ctx *ctx, int error,
struct media_request *src_buf_req)
{
struct vb2_v4l2_buffer *vb2_dst;
@@ -232,17 +229,17 @@ static void mtk_vdec_stateless_cap_to_disp(struct mtk_vcodec_ctx *ctx, int error
vb2_dst = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
if (vb2_dst) {
v4l2_m2m_buf_done(vb2_dst, state);
- mtk_v4l2_debug(2, "free frame buffer id:%d to done list",
- vb2_dst->vb2_buf.index);
+ mtk_v4l2_vdec_dbg(2, ctx, "free frame buffer id:%d to done list",
+ vb2_dst->vb2_buf.index);
} else {
- mtk_v4l2_err("dst buffer is NULL");
+ mtk_v4l2_vdec_err(ctx, "dst buffer is NULL");
}
if (src_buf_req)
v4l2_ctrl_request_complete(src_buf_req, &ctx->ctrl_hdl);
}
-static struct vdec_fb *vdec_get_cap_buffer(struct mtk_vcodec_ctx *ctx)
+static struct vdec_fb *vdec_get_cap_buffer(struct mtk_vcodec_dec_ctx *ctx)
{
struct mtk_video_dec_buf *framebuf;
struct vb2_v4l2_buffer *vb2_v4l2;
@@ -251,7 +248,7 @@ static struct vdec_fb *vdec_get_cap_buffer(struct mtk_vcodec_ctx *ctx)
vb2_v4l2 = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
if (!vb2_v4l2) {
- mtk_v4l2_debug(1, "[%d] dst_buf empty!!", ctx->id);
+ mtk_v4l2_vdec_dbg(1, ctx, "[%d] dst_buf empty!!", ctx->id);
return NULL;
}
@@ -269,25 +266,26 @@ static struct vdec_fb *vdec_get_cap_buffer(struct mtk_vcodec_ctx *ctx)
vb2_dma_contig_plane_dma_addr(dst_buf, 1);
pfb->base_c.size = ctx->q_data[MTK_Q_DATA_DST].sizeimage[1];
}
- mtk_v4l2_debug(1, "id=%d Framebuf pfb=%p VA=%p Y_DMA=%pad C_DMA=%pad Size=%zx frame_count = %d",
- dst_buf->index, pfb, pfb->base_y.va, &pfb->base_y.dma_addr,
- &pfb->base_c.dma_addr, pfb->base_y.size, ctx->decoded_frame_cnt);
+ mtk_v4l2_vdec_dbg(1, ctx,
+ "id=%d Framebuf pfb=%p VA=%p Y/C_DMA=%pad_%pad Sz=%zx frame_count = %d",
+ dst_buf->index, pfb, pfb->base_y.va, &pfb->base_y.dma_addr,
+ &pfb->base_c.dma_addr, pfb->base_y.size, ctx->decoded_frame_cnt);
return pfb;
}
static void vb2ops_vdec_buf_request_complete(struct vb2_buffer *vb)
{
- struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct mtk_vcodec_dec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
v4l2_ctrl_request_complete(vb->req_obj.req, &ctx->ctrl_hdl);
}
static void mtk_vdec_worker(struct work_struct *work)
{
- struct mtk_vcodec_ctx *ctx =
- container_of(work, struct mtk_vcodec_ctx, decode_work);
- struct mtk_vcodec_dev *dev = ctx->dev;
+ struct mtk_vcodec_dec_ctx *ctx =
+ container_of(work, struct mtk_vcodec_dec_ctx, decode_work);
+ struct mtk_vcodec_dec_dev *dev = ctx->dev;
struct vb2_v4l2_buffer *vb2_v4l2_src;
struct vb2_buffer *vb2_src;
struct mtk_vcodec_mem *bs_src;
@@ -300,7 +298,7 @@ static void mtk_vdec_worker(struct work_struct *work)
vb2_v4l2_src = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
if (!vb2_v4l2_src) {
v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
- mtk_v4l2_debug(1, "[%d] no available source buffer", ctx->id);
+ mtk_v4l2_vdec_dbg(1, ctx, "[%d] no available source buffer", ctx->id);
return;
}
@@ -309,33 +307,34 @@ static void mtk_vdec_worker(struct work_struct *work)
m2m_buf.vb);
bs_src = &dec_buf_src->bs_buffer;
- mtk_v4l2_debug(3, "[%d] (%d) id=%d, vb=%p", ctx->id,
- vb2_src->vb2_queue->type, vb2_src->index, vb2_src);
+ mtk_v4l2_vdec_dbg(3, ctx, "[%d] (%d) id=%d, vb=%p", ctx->id,
+ vb2_src->vb2_queue->type, vb2_src->index, vb2_src);
bs_src->va = vb2_plane_vaddr(vb2_src, 0);
bs_src->dma_addr = vb2_dma_contig_plane_dma_addr(vb2_src, 0);
bs_src->size = (size_t)vb2_src->planes[0].bytesused;
if (!bs_src->va) {
v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
- mtk_v4l2_err("[%d] id=%d source buffer is NULL", ctx->id,
- vb2_src->index);
+ mtk_v4l2_vdec_err(ctx, "[%d] id=%d source buffer is NULL", ctx->id,
+ vb2_src->index);
return;
}
- mtk_v4l2_debug(3, "[%d] Bitstream VA=%p DMA=%pad Size=%zx vb=%p",
- ctx->id, bs_src->va, &bs_src->dma_addr, bs_src->size, vb2_src);
+ mtk_v4l2_vdec_dbg(3, ctx, "[%d] Bitstream VA=%p DMA=%pad Size=%zx vb=%p",
+ ctx->id, bs_src->va, &bs_src->dma_addr, bs_src->size, vb2_src);
/* Apply request controls. */
src_buf_req = vb2_src->req_obj.req;
if (src_buf_req)
v4l2_ctrl_request_setup(src_buf_req, &ctx->ctrl_hdl);
else
- mtk_v4l2_err("vb2 buffer media request is NULL");
+ mtk_v4l2_vdec_err(ctx, "vb2 buffer media request is NULL");
ret = vdec_if_decode(ctx, bs_src, NULL, &res_chg);
if (ret && ret != -EAGAIN) {
- mtk_v4l2_err(" <===[%d], src_buf[%d] sz=0x%zx pts=%llu vdec_if_decode() ret=%d res_chg=%d===>",
- ctx->id, vb2_src->index, bs_src->size,
- vb2_src->timestamp, ret, res_chg);
+ mtk_v4l2_vdec_err(ctx,
+ "[%d] decode src_buf[%d] sz=0x%zx pts=%llu ret=%d res_chg=%d",
+ ctx->id, vb2_src->index, bs_src->size,
+ vb2_src->timestamp, ret, res_chg);
if (ret == -EIO) {
mutex_lock(&ctx->lock);
dec_buf_src->error = true;
@@ -360,10 +359,11 @@ static void mtk_vdec_worker(struct work_struct *work)
static void vb2ops_vdec_stateless_buf_queue(struct vb2_buffer *vb)
{
- struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct mtk_vcodec_dec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct vb2_v4l2_buffer *vb2_v4l2 = to_vb2_v4l2_buffer(vb);
- mtk_v4l2_debug(3, "[%d] (%d) id=%d, vb=%p", ctx->id, vb->vb2_queue->type, vb->index, vb);
+ mtk_v4l2_vdec_dbg(3, ctx, "[%d] (%d) id=%d, vb=%p", ctx->id, vb->vb2_queue->type,
+ vb->index, vb);
mutex_lock(&ctx->lock);
v4l2_m2m_buf_queue(ctx->m2m_ctx, vb2_v4l2);
@@ -374,35 +374,168 @@ static void vb2ops_vdec_stateless_buf_queue(struct vb2_buffer *vb)
/* If an OUTPUT buffer, we may need to update the state */
if (ctx->state == MTK_STATE_INIT) {
ctx->state = MTK_STATE_HEADER;
- mtk_v4l2_debug(1, "Init driver from init to header.");
+ mtk_v4l2_vdec_dbg(1, ctx, "Init driver from init to header.");
} else {
- mtk_v4l2_debug(3, "[%d] already init driver %d", ctx->id, ctx->state);
+ mtk_v4l2_vdec_dbg(3, ctx, "[%d] already init driver %d", ctx->id, ctx->state);
}
}
-static int mtk_vdec_flush_decoder(struct mtk_vcodec_ctx *ctx)
+static int mtk_vdec_flush_decoder(struct mtk_vcodec_dec_ctx *ctx)
{
bool res_chg;
return vdec_if_decode(ctx, NULL, NULL, &res_chg);
}
-static int mtk_vcodec_dec_ctrls_setup(struct mtk_vcodec_ctx *ctx)
+static int mtk_vcodec_get_pic_info(struct mtk_vcodec_dec_ctx *ctx)
+{
+ struct mtk_q_data *q_data;
+ int ret = 0;
+
+ q_data = &ctx->q_data[MTK_Q_DATA_DST];
+ if (q_data->fmt->num_planes == 1) {
+ mtk_v4l2_vdec_err(ctx, "[%d]Error!! 10bit mode not support one plane", ctx->id);
+ return -EINVAL;
+ }
+
+ ctx->capture_fourcc = q_data->fmt->fourcc;
+ ret = vdec_if_get_param(ctx, GET_PARAM_PIC_INFO, &ctx->picinfo);
+ if (ret) {
+ mtk_v4l2_vdec_err(ctx, "[%d]Error!! Get GET_PARAM_PICTURE_INFO Fail", ctx->id);
+ return ret;
+ }
+
+ ctx->last_decoded_picinfo = ctx->picinfo;
+
+ q_data->sizeimage[0] = ctx->picinfo.fb_sz[0];
+ q_data->bytesperline[0] = ctx->picinfo.buf_w * 5 / 4;
+
+ q_data->sizeimage[1] = ctx->picinfo.fb_sz[1];
+ q_data->bytesperline[1] = ctx->picinfo.buf_w * 5 / 4;
+
+ q_data->coded_width = ctx->picinfo.buf_w;
+ q_data->coded_height = ctx->picinfo.buf_h;
+ mtk_v4l2_vdec_dbg(1, ctx, "[%d] wxh=%dx%d pic wxh=%dx%d sz[0]=0x%x sz[1]=0x%x",
+ ctx->id, ctx->picinfo.buf_w, ctx->picinfo.buf_h,
+ ctx->picinfo.pic_w, ctx->picinfo.pic_h,
+ q_data->sizeimage[0], q_data->sizeimage[1]);
+
+ return ret;
+}
+
+static int mtk_vdec_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct mtk_vcodec_dec_ctx *ctx = ctrl_to_dec_ctx(ctrl);
+ struct v4l2_ctrl_h264_sps *h264;
+ struct v4l2_ctrl_hevc_sps *h265;
+ struct v4l2_ctrl_vp9_frame *frame;
+ struct v4l2_ctrl_av1_sequence *seq;
+ struct v4l2_ctrl *hdr_ctrl;
+ const struct mtk_vcodec_dec_pdata *dec_pdata = ctx->dev->vdec_pdata;
+ const struct mtk_video_fmt *fmt;
+ int i = 0, ret = 0;
+
+ hdr_ctrl = ctrl;
+ if (!hdr_ctrl || !hdr_ctrl->p_new.p)
+ return -EINVAL;
+
+ switch (hdr_ctrl->id) {
+ case V4L2_CID_STATELESS_H264_SPS:
+ h264 = (struct v4l2_ctrl_h264_sps *)hdr_ctrl->p_new.p;
+
+ if (h264->bit_depth_chroma_minus8 == 2 && h264->bit_depth_luma_minus8 == 2) {
+ ctx->is_10bit_bitstream = true;
+ } else if (h264->bit_depth_chroma_minus8 != 0 &&
+ h264->bit_depth_luma_minus8 != 0) {
+ mtk_v4l2_vdec_err(ctx, "H264: chroma_minus8:%d, luma_minus8:%d",
+ h264->bit_depth_chroma_minus8,
+ h264->bit_depth_luma_minus8);
+ return -EINVAL;
+ }
+ break;
+ case V4L2_CID_STATELESS_HEVC_SPS:
+ h265 = (struct v4l2_ctrl_hevc_sps *)hdr_ctrl->p_new.p;
+
+ if (h265->bit_depth_chroma_minus8 == 2 && h265->bit_depth_luma_minus8 == 2) {
+ ctx->is_10bit_bitstream = true;
+ } else if (h265->bit_depth_chroma_minus8 != 0 &&
+ h265->bit_depth_luma_minus8 != 0) {
+ mtk_v4l2_vdec_err(ctx, "HEVC: chroma_minus8:%d, luma_minus8:%d",
+ h265->bit_depth_chroma_minus8,
+ h265->bit_depth_luma_minus8);
+ return -EINVAL;
+ }
+ break;
+ case V4L2_CID_STATELESS_VP9_FRAME:
+ frame = (struct v4l2_ctrl_vp9_frame *)hdr_ctrl->p_new.p;
+
+ if (frame->bit_depth == 10) {
+ ctx->is_10bit_bitstream = true;
+ } else if (frame->bit_depth != 8) {
+ mtk_v4l2_vdec_err(ctx, "VP9: bit_depth:%d", frame->bit_depth);
+ return -EINVAL;
+ }
+ break;
+ case V4L2_CID_STATELESS_AV1_SEQUENCE:
+ seq = (struct v4l2_ctrl_av1_sequence *)hdr_ctrl->p_new.p;
+
+ if (seq->bit_depth == 10) {
+ ctx->is_10bit_bitstream = true;
+ } else if (seq->bit_depth != 8) {
+ mtk_v4l2_vdec_err(ctx, "AV1: bit_depth:%d", seq->bit_depth);
+ return -EINVAL;
+ }
+ break;
+ default:
+ mtk_v4l2_vdec_dbg(3, ctx, "Not supported to set ctrl id: 0x%x\n", hdr_ctrl->id);
+ return ret;
+ }
+
+ if (!ctx->is_10bit_bitstream)
+ return ret;
+
+ for (i = 0; i < *dec_pdata->num_formats; i++) {
+ fmt = &dec_pdata->vdec_formats[i];
+ if (fmt->fourcc == V4L2_PIX_FMT_MT2110R &&
+ hdr_ctrl->id == V4L2_CID_STATELESS_H264_SPS) {
+ ctx->q_data[MTK_Q_DATA_DST].fmt = fmt;
+ break;
+ }
+
+ if (fmt->fourcc == V4L2_PIX_FMT_MT2110T &&
+ (hdr_ctrl->id == V4L2_CID_STATELESS_HEVC_SPS ||
+ hdr_ctrl->id == V4L2_CID_STATELESS_VP9_FRAME ||
+ hdr_ctrl->id == V4L2_CID_STATELESS_AV1_SEQUENCE)) {
+ ctx->q_data[MTK_Q_DATA_DST].fmt = fmt;
+ break;
+ }
+ }
+ ret = mtk_vcodec_get_pic_info(ctx);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops mtk_vcodec_dec_ctrl_ops = {
+ .s_ctrl = mtk_vdec_s_ctrl,
+};
+
+static int mtk_vcodec_dec_ctrls_setup(struct mtk_vcodec_dec_ctx *ctx)
{
unsigned int i;
v4l2_ctrl_handler_init(&ctx->ctrl_hdl, NUM_CTRLS);
if (ctx->ctrl_hdl.error) {
- mtk_v4l2_err("v4l2_ctrl_handler_init failed\n");
+ mtk_v4l2_vdec_err(ctx, "v4l2_ctrl_handler_init failed\n");
return ctx->ctrl_hdl.error;
}
for (i = 0; i < NUM_CTRLS; i++) {
struct v4l2_ctrl_config cfg = mtk_stateless_controls[i].cfg;
-
+ cfg.ops = &mtk_vcodec_dec_ctrl_ops;
v4l2_ctrl_new_custom(&ctx->ctrl_hdl, &cfg, NULL);
if (ctx->ctrl_hdl.error) {
- mtk_v4l2_err("Adding control %d failed %d", i, ctx->ctrl_hdl.error);
+ mtk_v4l2_vdec_err(ctx, "Adding control %d failed %d", i,
+ ctx->ctrl_hdl.error);
return ctx->ctrl_hdl.error;
}
}
@@ -421,11 +554,11 @@ static int fops_media_request_validate(struct media_request *mreq)
/* We expect exactly one buffer with the request */
break;
case 0:
- mtk_v4l2_debug(1, "No buffer provided with the request");
+ pr_debug(MTK_DBG_VCODEC_STR "No buffer provided with the request.");
return -ENOENT;
default:
- mtk_v4l2_debug(1, "Too many buffers (%d) provided with the request",
- buffer_cnt);
+ pr_debug(MTK_DBG_VCODEC_STR "Too many buffers (%d) provided with the request.",
+ buffer_cnt);
return -EINVAL;
}
@@ -438,9 +571,9 @@ const struct media_device_ops mtk_vcodec_media_ops = {
};
static void mtk_vcodec_add_formats(unsigned int fourcc,
- struct mtk_vcodec_ctx *ctx)
+ struct mtk_vcodec_dec_ctx *ctx)
{
- struct mtk_vcodec_dev *dev = ctx->dev;
+ struct mtk_vcodec_dec_dev *dev = ctx->dev;
const struct mtk_vcodec_dec_pdata *pdata = dev->vdec_pdata;
int count_formats = *pdata->num_formats;
@@ -465,21 +598,23 @@ static void mtk_vcodec_add_formats(unsigned int fourcc,
break;
case V4L2_PIX_FMT_MM21:
case V4L2_PIX_FMT_MT21C:
+ case V4L2_PIX_FMT_MT2110T:
+ case V4L2_PIX_FMT_MT2110R:
mtk_video_formats[count_formats].fourcc = fourcc;
mtk_video_formats[count_formats].type = MTK_FMT_FRAME;
mtk_video_formats[count_formats].num_planes = 2;
break;
default:
- mtk_v4l2_err("Can not add unsupported format type");
+ mtk_v4l2_vdec_err(ctx, "Can not add unsupported format type");
return;
}
num_formats++;
- mtk_v4l2_debug(3, "num_formats: %d dec_capability: 0x%x",
- count_formats, ctx->dev->dec_capability);
+ mtk_v4l2_vdec_dbg(3, ctx, "num_formats: %d dec_capability: 0x%x",
+ count_formats, ctx->dev->dec_capability);
}
-static void mtk_vcodec_get_supported_formats(struct mtk_vcodec_ctx *ctx)
+static void mtk_vcodec_get_supported_formats(struct mtk_vcodec_dec_ctx *ctx)
{
int cap_format_count = 0, out_format_count = 0;
@@ -490,6 +625,12 @@ static void mtk_vcodec_get_supported_formats(struct mtk_vcodec_ctx *ctx)
mtk_vcodec_add_formats(V4L2_PIX_FMT_MT21C, ctx);
cap_format_count++;
}
+ if (ctx->dev->dec_capability & MTK_VDEC_IS_SUPPORT_10BIT) {
+ mtk_vcodec_add_formats(V4L2_PIX_FMT_MT2110T, ctx);
+ cap_format_count++;
+ mtk_vcodec_add_formats(V4L2_PIX_FMT_MT2110R, ctx);
+ cap_format_count++;
+ }
if (ctx->dev->dec_capability & MTK_VDEC_FORMAT_MM21) {
mtk_vcodec_add_formats(V4L2_PIX_FMT_MM21, ctx);
cap_format_count++;
@@ -522,7 +663,7 @@ static void mtk_vcodec_get_supported_formats(struct mtk_vcodec_ctx *ctx)
mtk_video_formats[cap_format_count + out_format_count - 1];
}
-static void mtk_init_vdec_params(struct mtk_vcodec_ctx *ctx)
+static void mtk_init_vdec_params(struct mtk_vcodec_dec_ctx *ctx)
{
struct vb2_queue *src_vq;
diff --git a/drivers/media/platform/mediatek/vcodec/vdec/vdec_av1_req_lat_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_av1_req_lat_if.c
index 404a1a23fd40..2b6a5adbc419 100644
--- a/drivers/media/platform/mediatek/vcodec/vdec/vdec_av1_req_lat_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_av1_req_lat_if.c
@@ -8,9 +8,8 @@
#include <linux/slab.h>
#include <media/videobuf2-dma-contig.h>
-#include "../mtk_vcodec_util.h"
#include "../mtk_vcodec_dec.h"
-#include "../mtk_vcodec_intr.h"
+#include "../../common/mtk_vcodec_intr.h"
#include "../vdec_drv_base.h"
#include "../vdec_drv_if.h"
#include "../vdec_vpu_if.h"
@@ -706,7 +705,7 @@ struct vdec_av1_slice_pfc {
* @seq: global picture sequence
*/
struct vdec_av1_slice_instance {
- struct mtk_vcodec_ctx *ctx;
+ struct mtk_vcodec_dec_ctx *ctx;
struct vdec_vpu_inst vpu;
struct mtk_vcodec_mem iq_table;
@@ -756,7 +755,7 @@ static inline bool vdec_av1_slice_need_scale(u32 ref_width, u32 ref_height,
(this_height <= (ref_height << 4));
}
-static void *vdec_av1_get_ctrl_ptr(struct mtk_vcodec_ctx *ctx, int id)
+static void *vdec_av1_get_ctrl_ptr(struct mtk_vcodec_dec_ctx *ctx, int id)
{
struct v4l2_ctrl *ctrl = v4l2_ctrl_find(&ctx->ctrl_hdl, id);
@@ -769,7 +768,7 @@ static void *vdec_av1_get_ctrl_ptr(struct mtk_vcodec_ctx *ctx, int id)
static int vdec_av1_slice_init_cdf_table(struct vdec_av1_slice_instance *instance)
{
u8 *remote_cdf_table;
- struct mtk_vcodec_ctx *ctx;
+ struct mtk_vcodec_dec_ctx *ctx;
struct vdec_av1_slice_init_vsi *vsi;
int ret;
@@ -778,12 +777,11 @@ static int vdec_av1_slice_init_cdf_table(struct vdec_av1_slice_instance *instanc
remote_cdf_table = mtk_vcodec_fw_map_dm_addr(ctx->dev->fw_handler,
(u32)vsi->cdf_table_addr);
if (IS_ERR(remote_cdf_table)) {
- mtk_vcodec_err(instance, "failed to map cdf table\n");
+ mtk_vdec_err(ctx, "failed to map cdf table\n");
return PTR_ERR(remote_cdf_table);
}
- mtk_vcodec_debug(instance, "map cdf table to 0x%p\n",
- remote_cdf_table);
+ mtk_vdec_debug(ctx, "map cdf table to 0x%p\n", remote_cdf_table);
if (instance->cdf_table.va)
mtk_vcodec_mem_free(ctx, &instance->cdf_table);
@@ -801,7 +799,7 @@ static int vdec_av1_slice_init_cdf_table(struct vdec_av1_slice_instance *instanc
static int vdec_av1_slice_init_iq_table(struct vdec_av1_slice_instance *instance)
{
u8 *remote_iq_table;
- struct mtk_vcodec_ctx *ctx;
+ struct mtk_vcodec_dec_ctx *ctx;
struct vdec_av1_slice_init_vsi *vsi;
int ret;
@@ -810,11 +808,11 @@ static int vdec_av1_slice_init_iq_table(struct vdec_av1_slice_instance *instance
remote_iq_table = mtk_vcodec_fw_map_dm_addr(ctx->dev->fw_handler,
(u32)vsi->iq_table_addr);
if (IS_ERR(remote_iq_table)) {
- mtk_vcodec_err(instance, "failed to map iq table\n");
+ mtk_vdec_err(ctx, "failed to map iq table\n");
return PTR_ERR(remote_iq_table);
}
- mtk_vcodec_debug(instance, "map iq table to 0x%p\n", remote_iq_table);
+ mtk_vdec_debug(ctx, "map iq table to 0x%p\n", remote_iq_table);
if (instance->iq_table.va)
mtk_vcodec_mem_free(ctx, &instance->iq_table);
@@ -862,8 +860,8 @@ static void vdec_av1_slice_decrease_ref_count(struct vdec_av1_slice_slot *slots,
frame_info[fb_idx].ref_count--;
if (frame_info[fb_idx].ref_count < 0) {
frame_info[fb_idx].ref_count = 0;
- mtk_v4l2_err("av1_error: %s() fb_idx %d decrease ref_count error\n",
- __func__, fb_idx);
+ pr_err(MTK_DBG_V4L2_STR "av1_error: %s() fb_idx %d decrease ref_count error\n",
+ __func__, fb_idx);
}
vdec_av1_slice_clear_fb(&frame_info[fb_idx]);
@@ -911,7 +909,7 @@ static void vdec_av1_slice_setup_slot(struct vdec_av1_slice_instance *instance,
vsi->slot_id = vdec_av1_slice_get_new_slot(vsi);
if (vsi->slot_id == AV1_INVALID_IDX) {
- mtk_v4l2_err("warning:av1 get invalid index slot\n");
+ mtk_v4l2_vdec_err(instance->ctx, "warning:av1 get invalid index slot\n");
vsi->slot_id = 0;
}
cur_frame_info = &vsi->slots.frame_info[vsi->slot_id];
@@ -938,7 +936,7 @@ static void vdec_av1_slice_setup_slot(struct vdec_av1_slice_instance *instance,
static int vdec_av1_slice_alloc_working_buffer(struct vdec_av1_slice_instance *instance,
struct vdec_av1_slice_vsi *vsi)
{
- struct mtk_vcodec_ctx *ctx = instance->ctx;
+ struct mtk_vcodec_dec_ctx *ctx = instance->ctx;
enum vdec_av1_slice_resolution_level level;
u32 max_sb_w, max_sb_h, max_w, max_h, w, h;
int i, ret;
@@ -965,8 +963,8 @@ static int vdec_av1_slice_alloc_working_buffer(struct vdec_av1_slice_instance *i
if (level == instance->level)
return 0;
- mtk_vcodec_debug(instance, "resolution level changed from %u to %u, %ux%u",
- instance->level, level, w, h);
+ mtk_vdec_debug(ctx, "resolution level changed from %u to %u, %ux%u",
+ instance->level, level, w, h);
max_sb_w = DIV_ROUND_UP(max_w, 128);
max_sb_h = DIV_ROUND_UP(max_h, 128);
@@ -1021,7 +1019,7 @@ err:
static void vdec_av1_slice_free_working_buffer(struct vdec_av1_slice_instance *instance)
{
- struct mtk_vcodec_ctx *ctx = instance->ctx;
+ struct mtk_vcodec_dec_ctx *ctx = instance->ctx;
int i;
for (i = 0; i < ARRAY_SIZE(instance->mv); i++)
@@ -1400,17 +1398,17 @@ static int vdec_av1_slice_setup_tile_group(struct vdec_av1_slice_instance *insta
if (tile_group->num_tiles != tge_size ||
tile_group->num_tiles > V4L2_AV1_MAX_TILE_COUNT) {
- mtk_vcodec_err(instance, "invalid tge_size %d, tile_num:%d\n",
- tge_size, tile_group->num_tiles);
+ mtk_vdec_err(instance->ctx, "invalid tge_size %d, tile_num:%d\n",
+ tge_size, tile_group->num_tiles);
return -EINVAL;
}
for (i = 0; i < tge_size; i++) {
if (i != ctrl_tge[i].tile_row * vsi->frame.uh.tile.tile_cols +
ctrl_tge[i].tile_col) {
- mtk_vcodec_err(instance, "invalid tge info %d, %d %d %d\n",
- i, ctrl_tge[i].tile_row, ctrl_tge[i].tile_col,
- vsi->frame.uh.tile.tile_rows);
+ mtk_vdec_err(instance->ctx, "invalid tge info %d, %d %d %d\n",
+ i, ctrl_tge[i].tile_row, ctrl_tge[i].tile_col,
+ vsi->frame.uh.tile.tile_rows);
return -EINVAL;
}
tile_group->tile_size[i] = ctrl_tge[i].tile_size;
@@ -1505,8 +1503,8 @@ static void vdec_av1_slice_setup_ref(struct vdec_av1_slice_pfc *pfc,
slot_id = frame->ref_frame_map[ref_idx];
frame_info = &slots->frame_info[slot_id];
if (slot_id == AV1_INVALID_IDX) {
- mtk_v4l2_err("cannot match reference[%d] 0x%llx\n", i,
- ctrl_fh->reference_frame_ts[ref_idx]);
+ pr_err(MTK_DBG_V4L2_STR "cannot match reference[%d] 0x%llx\n", i,
+ ctrl_fh->reference_frame_ts[ref_idx]);
frame->order_hints[i] = 0;
frame->ref_frame_valid[i] = 0;
continue;
@@ -1639,7 +1637,7 @@ static void vdec_av1_slice_setup_seg_buffer(struct vdec_av1_slice_instance *inst
/* reset segment buffer */
if (uh->primary_ref_frame == AV1_PRIMARY_REF_NONE || !uh->seg.segmentation_enabled) {
- mtk_vcodec_debug(instance, "reset seg %d\n", vsi->slot_id);
+ mtk_vdec_debug(instance->ctx, "reset seg %d\n", vsi->slot_id);
if (vsi->slot_id != AV1_INVALID_IDX) {
buf = &instance->seg[vsi->slot_id];
memset(buf->va, 0, buf->size);
@@ -1658,9 +1656,9 @@ static void vdec_av1_slice_setup_tile_buffer(struct vdec_av1_slice_instance *ins
u32 allow_update_cdf = 0;
u32 sb_boundary_x_m1 = 0, sb_boundary_y_m1 = 0;
int tile_info_base;
- u32 tile_buf_pa;
+ u64 tile_buf_pa;
u32 *tile_info_buf = instance->tile.va;
- u32 pa = (u32)bs->dma_addr;
+ u64 pa = (u64)bs->dma_addr;
if (uh->disable_cdf_update == 0)
allow_update_cdf = 1;
@@ -1673,8 +1671,12 @@ static void vdec_av1_slice_setup_tile_buffer(struct vdec_av1_slice_instance *ins
tile_info_buf[tile_info_base + 0] = (tile_group->tile_size[tile_num] << 3);
tile_buf_pa = pa + tile_group->tile_start_offset[tile_num];
- tile_info_buf[tile_info_base + 1] = (tile_buf_pa >> 4) << 4;
- tile_info_buf[tile_info_base + 2] = (tile_buf_pa % 16) << 3;
+ /* save av1 tile high 4bits(bit 32-35) address in lower 4 bits position
+ * and clear original for hw requirement.
+ */
+ tile_info_buf[tile_info_base + 1] = (tile_buf_pa & 0xFFFFFFF0ull) |
+ ((tile_buf_pa & 0xF00000000ull) >> 32);
+ tile_info_buf[tile_info_base + 2] = (tile_buf_pa & 0xFull) << 3;
sb_boundary_x_m1 =
(tile->mi_col_starts[tile_col + 1] - tile->mi_col_starts[tile_col] - 1) &
@@ -1690,18 +1692,18 @@ static void vdec_av1_slice_setup_tile_buffer(struct vdec_av1_slice_instance *ins
uh->disable_frame_end_update_cdf == 0)
tile_info_buf[tile_info_base + 4] |= (1 << 17);
- mtk_vcodec_debug(instance, "// tile buf %d pos(%dx%d) offset 0x%x\n",
- tile_num, tile_row, tile_col, tile_info_base);
- mtk_vcodec_debug(instance, "// %08x %08x %08x %08x\n",
- tile_info_buf[tile_info_base + 0],
- tile_info_buf[tile_info_base + 1],
- tile_info_buf[tile_info_base + 2],
- tile_info_buf[tile_info_base + 3]);
- mtk_vcodec_debug(instance, "// %08x %08x %08x %08x\n",
- tile_info_buf[tile_info_base + 4],
- tile_info_buf[tile_info_base + 5],
- tile_info_buf[tile_info_base + 6],
- tile_info_buf[tile_info_base + 7]);
+ mtk_vdec_debug(instance->ctx, "// tile buf %d pos(%dx%d) offset 0x%x\n",
+ tile_num, tile_row, tile_col, tile_info_base);
+ mtk_vdec_debug(instance->ctx, "// %08x %08x %08x %08x\n",
+ tile_info_buf[tile_info_base + 0],
+ tile_info_buf[tile_info_base + 1],
+ tile_info_buf[tile_info_base + 2],
+ tile_info_buf[tile_info_base + 3]);
+ mtk_vdec_debug(instance->ctx, "// %08x %08x %08x %08x\n",
+ tile_info_buf[tile_info_base + 4],
+ tile_info_buf[tile_info_base + 5],
+ tile_info_buf[tile_info_base + 6],
+ tile_info_buf[tile_info_base + 7]);
}
}
@@ -1743,8 +1745,8 @@ static int vdec_av1_slice_update_lat(struct vdec_av1_slice_instance *instance,
struct vdec_av1_slice_vsi *vsi;
vsi = &pfc->vsi;
- mtk_vcodec_debug(instance, "frame %u LAT CRC 0x%08x, output size is %d\n",
- pfc->seq, vsi->state.crc[0], vsi->state.out_size);
+ mtk_vdec_debug(instance->ctx, "frame %u LAT CRC 0x%08x, output size is %d\n",
+ pfc->seq, vsi->state.crc[0], vsi->state.out_size);
/* buffer full, need to re-decode */
if (vsi->state.full) {
@@ -1855,17 +1857,17 @@ static int vdec_av1_slice_update_core(struct vdec_av1_slice_instance *instance,
{
struct vdec_av1_slice_vsi *vsi = instance->core_vsi;
- mtk_vcodec_debug(instance, "frame %u Y_CRC %08x %08x %08x %08x\n",
- pfc->seq, vsi->state.crc[0], vsi->state.crc[1],
- vsi->state.crc[2], vsi->state.crc[3]);
- mtk_vcodec_debug(instance, "frame %u C_CRC %08x %08x %08x %08x\n",
- pfc->seq, vsi->state.crc[8], vsi->state.crc[9],
- vsi->state.crc[10], vsi->state.crc[11]);
+ mtk_vdec_debug(instance->ctx, "frame %u Y_CRC %08x %08x %08x %08x\n",
+ pfc->seq, vsi->state.crc[0], vsi->state.crc[1],
+ vsi->state.crc[2], vsi->state.crc[3]);
+ mtk_vdec_debug(instance->ctx, "frame %u C_CRC %08x %08x %08x %08x\n",
+ pfc->seq, vsi->state.crc[8], vsi->state.crc[9],
+ vsi->state.crc[10], vsi->state.crc[11]);
return 0;
}
-static int vdec_av1_slice_init(struct mtk_vcodec_ctx *ctx)
+static int vdec_av1_slice_init(struct mtk_vcodec_dec_ctx *ctx)
{
struct vdec_av1_slice_instance *instance;
struct vdec_av1_slice_init_vsi *vsi;
@@ -1883,14 +1885,14 @@ static int vdec_av1_slice_init(struct mtk_vcodec_ctx *ctx)
ret = vpu_dec_init(&instance->vpu);
if (ret) {
- mtk_vcodec_err(instance, "failed to init vpu dec, ret %d\n", ret);
+ mtk_vdec_err(ctx, "failed to init vpu dec, ret %d\n", ret);
goto error_vpu_init;
}
/* init vsi and global flags */
vsi = instance->vpu.vsi;
if (!vsi) {
- mtk_vcodec_err(instance, "failed to get AV1 vsi\n");
+ mtk_vdec_err(ctx, "failed to get AV1 vsi\n");
ret = -EINVAL;
goto error_vsi;
}
@@ -1898,20 +1900,20 @@ static int vdec_av1_slice_init(struct mtk_vcodec_ctx *ctx)
instance->core_vsi = mtk_vcodec_fw_map_dm_addr(ctx->dev->fw_handler, (u32)vsi->core_vsi);
if (!instance->core_vsi) {
- mtk_vcodec_err(instance, "failed to get AV1 core vsi\n");
+ mtk_vdec_err(ctx, "failed to get AV1 core vsi\n");
ret = -EINVAL;
goto error_vsi;
}
if (vsi->vsi_size != sizeof(struct vdec_av1_slice_vsi))
- mtk_vcodec_err(instance, "remote vsi size 0x%x mismatch! expected: 0x%zx\n",
- vsi->vsi_size, sizeof(struct vdec_av1_slice_vsi));
+ mtk_vdec_err(ctx, "remote vsi size 0x%x mismatch! expected: 0x%zx\n",
+ vsi->vsi_size, sizeof(struct vdec_av1_slice_vsi));
instance->irq_enabled = 1;
instance->inneracing_mode = IS_VDEC_INNER_RACING(instance->ctx->dev->dec_capability);
- mtk_vcodec_debug(instance, "vsi 0x%p core_vsi 0x%llx 0x%p, inneracing_mode %d\n",
- vsi, vsi->core_vsi, instance->core_vsi, instance->inneracing_mode);
+ mtk_vdec_debug(ctx, "vsi 0x%p core_vsi 0x%llx 0x%p, inneracing_mode %d\n",
+ vsi, vsi->core_vsi, instance->core_vsi, instance->inneracing_mode);
ret = vdec_av1_slice_init_cdf_table(instance);
if (ret)
@@ -1938,7 +1940,7 @@ static void vdec_av1_slice_deinit(void *h_vdec)
if (!instance)
return;
- mtk_vcodec_debug(instance, "h_vdec 0x%p\n", h_vdec);
+ mtk_vdec_debug(instance->ctx, "h_vdec 0x%p\n", h_vdec);
vpu_dec_deinit(&instance->vpu);
vdec_av1_slice_free_working_buffer(instance);
vdec_msg_queue_deinit(&instance->ctx->msg_queue, instance->ctx);
@@ -1951,7 +1953,7 @@ static int vdec_av1_slice_flush(void *h_vdec, struct mtk_vcodec_mem *bs,
struct vdec_av1_slice_instance *instance = h_vdec;
int i;
- mtk_vcodec_debug(instance, "flush ...\n");
+ mtk_vdec_debug(instance->ctx, "flush ...\n");
vdec_msg_queue_wait_lat_buf_full(&instance->ctx->msg_queue);
@@ -1963,10 +1965,10 @@ static int vdec_av1_slice_flush(void *h_vdec, struct mtk_vcodec_mem *bs,
static void vdec_av1_slice_get_pic_info(struct vdec_av1_slice_instance *instance)
{
- struct mtk_vcodec_ctx *ctx = instance->ctx;
+ struct mtk_vcodec_dec_ctx *ctx = instance->ctx;
u32 data[3];
- mtk_vcodec_debug(instance, "w %u h %u\n", ctx->picinfo.pic_w, ctx->picinfo.pic_h);
+ mtk_vdec_debug(ctx, "w %u h %u\n", ctx->picinfo.pic_w, ctx->picinfo.pic_h);
data[0] = ctx->picinfo.pic_w;
data[1] = ctx->picinfo.pic_h;
@@ -1989,15 +1991,15 @@ static inline void vdec_av1_slice_get_dpb_size(struct vdec_av1_slice_instance *i
static void vdec_av1_slice_get_crop_info(struct vdec_av1_slice_instance *instance,
struct v4l2_rect *cr)
{
- struct mtk_vcodec_ctx *ctx = instance->ctx;
+ struct mtk_vcodec_dec_ctx *ctx = instance->ctx;
cr->left = 0;
cr->top = 0;
cr->width = ctx->picinfo.pic_w;
cr->height = ctx->picinfo.pic_h;
- mtk_vcodec_debug(instance, "l=%d, t=%d, w=%d, h=%d\n",
- cr->left, cr->top, cr->width, cr->height);
+ mtk_vdec_debug(ctx, "l=%d, t=%d, w=%d, h=%d\n",
+ cr->left, cr->top, cr->width, cr->height);
}
static int vdec_av1_slice_get_param(void *h_vdec, enum vdec_get_param_type type, void *out)
@@ -2015,7 +2017,7 @@ static int vdec_av1_slice_get_param(void *h_vdec, enum vdec_get_param_type type,
vdec_av1_slice_get_crop_info(instance, out);
break;
default:
- mtk_vcodec_err(instance, "invalid get parameter type=%d\n", type);
+ mtk_vdec_err(instance->ctx, "invalid get parameter type=%d\n", type);
return -EINVAL;
}
@@ -2029,7 +2031,7 @@ static int vdec_av1_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
struct vdec_lat_buf *lat_buf;
struct vdec_av1_slice_pfc *pfc;
struct vdec_av1_slice_vsi *vsi;
- struct mtk_vcodec_ctx *ctx;
+ struct mtk_vcodec_dec_ctx *ctx;
int ret;
if (!instance || !instance->ctx)
@@ -2039,7 +2041,7 @@ static int vdec_av1_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
/* init msgQ for the first time */
if (vdec_msg_queue_init(&ctx->msg_queue, ctx,
vdec_av1_slice_core_decode, sizeof(*pfc))) {
- mtk_vcodec_err(instance, "failed to init AV1 msg queue\n");
+ mtk_vdec_err(ctx, "failed to init AV1 msg queue\n");
return -ENOMEM;
}
@@ -2049,7 +2051,7 @@ static int vdec_av1_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
lat_buf = vdec_msg_queue_dqbuf(&ctx->msg_queue.lat_ctx);
if (!lat_buf) {
- mtk_vcodec_err(instance, "failed to get AV1 lat buf\n");
+ mtk_vdec_err(ctx, "failed to get AV1 lat buf\n");
return -EAGAIN;
}
pfc = (struct vdec_av1_slice_pfc *)lat_buf->private_data;
@@ -2061,14 +2063,14 @@ static int vdec_av1_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
ret = vdec_av1_slice_setup_lat(instance, bs, lat_buf, pfc);
if (ret) {
- mtk_vcodec_err(instance, "failed to setup AV1 lat ret %d\n", ret);
+ mtk_vdec_err(ctx, "failed to setup AV1 lat ret %d\n", ret);
goto err_free_fb_out;
}
vdec_av1_slice_vsi_to_remote(vsi, instance->vsi);
ret = vpu_dec_start(&instance->vpu, NULL, 0);
if (ret) {
- mtk_vcodec_err(instance, "failed to dec AV1 ret %d\n", ret);
+ mtk_vdec_err(ctx, "failed to dec AV1 ret %d\n", ret);
goto err_free_fb_out;
}
if (instance->inneracing_mode)
@@ -2080,7 +2082,7 @@ static int vdec_av1_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
MTK_VDEC_LAT0);
/* update remote vsi if decode timeout */
if (ret) {
- mtk_vcodec_err(instance, "AV1 Frame %d decode timeout %d\n", pfc->seq, ret);
+ mtk_vdec_err(ctx, "AV1 Frame %d decode timeout %d\n", pfc->seq, ret);
WRITE_ONCE(instance->vsi->state.timeout, 1);
}
vpu_dec_end(&instance->vpu);
@@ -2091,7 +2093,7 @@ static int vdec_av1_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
/* LAT trans full, re-decode */
if (ret == -EAGAIN) {
- mtk_vcodec_err(instance, "AV1 Frame %d trans full\n", pfc->seq);
+ mtk_vdec_err(ctx, "AV1 Frame %d trans full\n", pfc->seq);
if (!instance->inneracing_mode)
vdec_msg_queue_qbuf(&ctx->msg_queue.lat_ctx, lat_buf);
return 0;
@@ -2099,14 +2101,14 @@ static int vdec_av1_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
/* LAT trans full, no more UBE or decode timeout */
if (ret == -ENOMEM || vsi->state.timeout) {
- mtk_vcodec_err(instance, "AV1 Frame %d insufficient buffer or timeout\n", pfc->seq);
+ mtk_vdec_err(ctx, "AV1 Frame %d insufficient buffer or timeout\n", pfc->seq);
if (!instance->inneracing_mode)
vdec_msg_queue_qbuf(&ctx->msg_queue.lat_ctx, lat_buf);
return -EBUSY;
}
vsi->trans.dma_addr_end += ctx->msg_queue.wdma_addr.dma_addr;
- mtk_vcodec_debug(instance, "lat dma 1 0x%pad 0x%pad\n",
- &pfc->vsi.trans.dma_addr, &pfc->vsi.trans.dma_addr_end);
+ mtk_vdec_debug(ctx, "lat dma 1 0x%pad 0x%pad\n",
+ &pfc->vsi.trans.dma_addr, &pfc->vsi.trans.dma_addr_end);
vdec_msg_queue_update_ube_wptr(&ctx->msg_queue, vsi->trans.dma_addr_end);
@@ -2120,7 +2122,7 @@ err_free_fb_out:
vdec_msg_queue_qbuf(&ctx->msg_queue.lat_ctx, lat_buf);
if (pfc)
- mtk_vcodec_err(instance, "slice dec number: %d err: %d", pfc->seq, ret);
+ mtk_vdec_err(ctx, "slice dec number: %d err: %d", pfc->seq, ret);
return ret;
}
@@ -2129,7 +2131,7 @@ static int vdec_av1_slice_core_decode(struct vdec_lat_buf *lat_buf)
{
struct vdec_av1_slice_instance *instance;
struct vdec_av1_slice_pfc *pfc;
- struct mtk_vcodec_ctx *ctx = NULL;
+ struct mtk_vcodec_dec_ctx *ctx = NULL;
struct vdec_fb *fb = NULL;
int ret = -EINVAL;
@@ -2153,13 +2155,13 @@ static int vdec_av1_slice_core_decode(struct vdec_lat_buf *lat_buf)
ret = vdec_av1_slice_setup_core(instance, fb, lat_buf, pfc);
if (ret) {
- mtk_vcodec_err(instance, "vdec_av1_slice_setup_core\n");
+ mtk_vdec_err(ctx, "vdec_av1_slice_setup_core\n");
goto err;
}
vdec_av1_slice_vsi_to_remote(&pfc->vsi, instance->core_vsi);
ret = vpu_dec_core(&instance->vpu);
if (ret) {
- mtk_vcodec_err(instance, "vpu_dec_core\n");
+ mtk_vdec_err(ctx, "vpu_dec_core\n");
goto err;
}
@@ -2169,7 +2171,7 @@ static int vdec_av1_slice_core_decode(struct vdec_lat_buf *lat_buf)
MTK_VDEC_CORE);
/* update remote vsi if decode timeout */
if (ret) {
- mtk_vcodec_err(instance, "AV1 frame %d core timeout\n", pfc->seq);
+ mtk_vdec_err(ctx, "AV1 frame %d core timeout\n", pfc->seq);
WRITE_ONCE(instance->vsi->state.timeout, 1);
}
vpu_dec_core_end(&instance->vpu);
@@ -2177,12 +2179,12 @@ static int vdec_av1_slice_core_decode(struct vdec_lat_buf *lat_buf)
ret = vdec_av1_slice_update_core(instance, lat_buf, pfc);
if (ret) {
- mtk_vcodec_err(instance, "vdec_av1_slice_update_core\n");
+ mtk_vdec_err(ctx, "vdec_av1_slice_update_core\n");
goto err;
}
- mtk_vcodec_debug(instance, "core dma_addr_end 0x%pad\n",
- &instance->core_vsi->trans.dma_addr_end);
+ mtk_vdec_debug(ctx, "core dma_addr_end 0x%pad\n",
+ &instance->core_vsi->trans.dma_addr_end);
vdec_msg_queue_update_ube_rptr(&ctx->msg_queue, instance->core_vsi->trans.dma_addr_end);
ctx->dev->vdec_pdata->cap_to_disp(ctx, 0, lat_buf->src_buf_req);
diff --git a/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_if.c
index 481655bb6016..bf7dffe60d07 100644
--- a/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_if.c
@@ -8,9 +8,8 @@
#include <linux/slab.h>
#include "../vdec_drv_if.h"
-#include "../mtk_vcodec_util.h"
#include "../mtk_vcodec_dec.h"
-#include "../mtk_vcodec_intr.h"
+#include "../../common/mtk_vcodec_intr.h"
#include "../vdec_vpu_if.h"
#include "../vdec_drv_base.h"
@@ -117,7 +116,7 @@ struct vdec_h264_vsi {
/**
* struct vdec_h264_inst - h264 decoder instance
* @num_nalu : how many nalus be decoded
- * @ctx : point to mtk_vcodec_ctx
+ * @ctx : point to mtk_vcodec_dec_ctx
* @pred_buf : HW working predication buffer
* @mv_buf : HW working motion vector buffer
* @vpu : VPU instance
@@ -125,7 +124,7 @@ struct vdec_h264_vsi {
*/
struct vdec_h264_inst {
unsigned int num_nalu;
- struct mtk_vcodec_ctx *ctx;
+ struct mtk_vcodec_dec_ctx *ctx;
struct mtk_vcodec_mem pred_buf;
struct mtk_vcodec_mem mv_buf[H264_MAX_FB_NUM];
struct vdec_vpu_inst vpu;
@@ -144,7 +143,7 @@ static int allocate_predication_buf(struct vdec_h264_inst *inst)
inst->pred_buf.size = BUF_PREDICTION_SZ;
err = mtk_vcodec_mem_alloc(inst->ctx, &inst->pred_buf);
if (err) {
- mtk_vcodec_err(inst, "failed to allocate ppl buf");
+ mtk_vdec_err(inst->ctx, "failed to allocate ppl buf");
return err;
}
@@ -156,8 +155,6 @@ static void free_predication_buf(struct vdec_h264_inst *inst)
{
struct mtk_vcodec_mem *mem = NULL;
- mtk_vcodec_debug_enter(inst);
-
inst->vsi->pred_buf_dma = 0;
mem = &inst->pred_buf;
if (mem->va)
@@ -178,7 +175,7 @@ static int alloc_mv_buf(struct vdec_h264_inst *inst, struct vdec_pic_info *pic)
mem->size = buf_sz;
err = mtk_vcodec_mem_alloc(inst->ctx, mem);
if (err) {
- mtk_vcodec_err(inst, "failed to allocate mv buf");
+ mtk_vdec_err(inst->ctx, "failed to allocate mv buf");
return err;
}
inst->vsi->mv_buf_dma[i] = mem->dma_addr;
@@ -209,9 +206,9 @@ static int check_list_validity(struct vdec_h264_inst *inst, bool disp_list)
if (list->count > H264_MAX_FB_NUM ||
list->read_idx >= H264_MAX_FB_NUM ||
list->write_idx >= H264_MAX_FB_NUM) {
- mtk_vcodec_err(inst, "%s list err: cnt=%d r_idx=%d w_idx=%d",
- disp_list ? "disp" : "free", list->count,
- list->read_idx, list->write_idx);
+ mtk_vdec_err(inst->ctx, "%s list err: cnt=%d r_idx=%d w_idx=%d",
+ disp_list ? "disp" : "free", list->count,
+ list->read_idx, list->write_idx);
return -EINVAL;
}
@@ -228,12 +225,12 @@ static void put_fb_to_free(struct vdec_h264_inst *inst, struct vdec_fb *fb)
list = &inst->vsi->list_free;
if (list->count == H264_MAX_FB_NUM) {
- mtk_vcodec_err(inst, "[FB] put fb free_list full");
+ mtk_vdec_err(inst->ctx, "[FB] put fb free_list full");
return;
}
- mtk_vcodec_debug(inst, "[FB] put fb into free_list @(%p, %llx)",
- fb->base_y.va, (u64)fb->base_y.dma_addr);
+ mtk_vdec_debug(inst->ctx, "[FB] put fb into free_list @(%p, %llx)",
+ fb->base_y.va, (u64)fb->base_y.dma_addr);
list->fb_list[list->write_idx].vdec_fb_va = (u64)(uintptr_t)fb;
list->write_idx = (list->write_idx == H264_MAX_FB_NUM - 1) ?
@@ -246,10 +243,9 @@ static void get_pic_info(struct vdec_h264_inst *inst,
struct vdec_pic_info *pic)
{
*pic = inst->vsi->pic;
- mtk_vcodec_debug(inst, "pic(%d, %d), buf(%d, %d)",
- pic->pic_w, pic->pic_h, pic->buf_w, pic->buf_h);
- mtk_vcodec_debug(inst, "fb size: Y(%d), C(%d)",
- pic->fb_sz[0], pic->fb_sz[1]);
+ mtk_vdec_debug(inst->ctx, "pic(%d, %d), buf(%d, %d)",
+ pic->pic_w, pic->pic_h, pic->buf_w, pic->buf_h);
+ mtk_vdec_debug(inst->ctx, "fb size: Y(%d), C(%d)", pic->fb_sz[0], pic->fb_sz[1]);
}
static void get_crop_info(struct vdec_h264_inst *inst, struct v4l2_rect *cr)
@@ -259,17 +255,17 @@ static void get_crop_info(struct vdec_h264_inst *inst, struct v4l2_rect *cr)
cr->width = inst->vsi->crop.width;
cr->height = inst->vsi->crop.height;
- mtk_vcodec_debug(inst, "l=%d, t=%d, w=%d, h=%d",
- cr->left, cr->top, cr->width, cr->height);
+ mtk_vdec_debug(inst->ctx, "l=%d, t=%d, w=%d, h=%d", cr->left, cr->top,
+ cr->width, cr->height);
}
static void get_dpb_size(struct vdec_h264_inst *inst, unsigned int *dpb_sz)
{
*dpb_sz = inst->vsi->dec.dpb_sz;
- mtk_vcodec_debug(inst, "sz=%d", *dpb_sz);
+ mtk_vdec_debug(inst->ctx, "sz=%d", *dpb_sz);
}
-static int vdec_h264_init(struct mtk_vcodec_ctx *ctx)
+static int vdec_h264_init(struct mtk_vcodec_dec_ctx *ctx)
{
struct vdec_h264_inst *inst = NULL;
int err;
@@ -285,7 +281,7 @@ static int vdec_h264_init(struct mtk_vcodec_ctx *ctx)
err = vpu_dec_init(&inst->vpu);
if (err) {
- mtk_vcodec_err(inst, "vdec_h264 init err=%d", err);
+ mtk_vdec_err(ctx, "vdec_h264 init err=%d", err);
goto error_free_inst;
}
@@ -294,7 +290,7 @@ static int vdec_h264_init(struct mtk_vcodec_ctx *ctx)
if (err)
goto error_deinit;
- mtk_vcodec_debug(inst, "H264 Instance >> %p", inst);
+ mtk_vdec_debug(ctx, "H264 Instance >> %p", inst);
ctx->drv_handle = inst;
return 0;
@@ -311,8 +307,6 @@ static void vdec_h264_deinit(void *h_vdec)
{
struct vdec_h264_inst *inst = (struct vdec_h264_inst *)h_vdec;
- mtk_vcodec_debug_enter(inst);
-
vpu_dec_deinit(&inst->vpu);
free_predication_buf(inst);
free_mv_buf(inst);
@@ -348,8 +342,8 @@ static int vdec_h264_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
uint64_t y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0;
uint64_t c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0;
- mtk_vcodec_debug(inst, "+ [%d] FB y_dma=%llx c_dma=%llx va=%p",
- ++inst->num_nalu, y_fb_dma, c_fb_dma, fb);
+ mtk_vdec_debug(inst->ctx, "+ [%d] FB y_dma=%llx c_dma=%llx va=%p",
+ ++inst->num_nalu, y_fb_dma, c_fb_dma, fb);
/* bs NULL means flush decoder */
if (bs == NULL)
@@ -359,15 +353,15 @@ static int vdec_h264_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
buf_sz = bs->size;
nal_start_idx = find_start_code(buf, buf_sz);
if (nal_start_idx < 0) {
- mtk_vcodec_err(inst, "invalid nal start code");
+ mtk_vdec_err(inst->ctx, "invalid nal start code");
err = -EIO;
goto err_free_fb_out;
}
nal_start = buf[nal_start_idx];
nal_type = NAL_TYPE(buf[nal_start_idx]);
- mtk_vcodec_debug(inst, "\n + NALU[%d] type %d +\n", inst->num_nalu,
- nal_type);
+ mtk_vdec_debug(inst->ctx, "\n + NALU[%d] type %d +\n", inst->num_nalu,
+ nal_type);
if (nal_type == NAL_H264_PPS) {
buf_sz -= nal_start_idx;
@@ -388,8 +382,7 @@ static int vdec_h264_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
err = vpu_dec_start(vpu, data, 2);
if (err) {
if (err > 0 && (DEC_ERR_RET(err) == H264_ERR_NOT_VALID)) {
- mtk_vcodec_err(inst, "- error bitstream - err = %d -",
- err);
+ mtk_vdec_err(inst->ctx, "- error bitstream - err = %d -", err);
err = -EIO;
}
goto err_free_fb_out;
@@ -399,7 +392,7 @@ static int vdec_h264_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
if (*res_chg) {
struct vdec_pic_info pic;
- mtk_vcodec_debug(inst, "- resolution changed -");
+ mtk_vdec_debug(inst->ctx, "- resolution changed -");
get_pic_info(inst, &pic);
if (inst->vsi->dec.realloc_mv_buf) {
@@ -420,13 +413,12 @@ static int vdec_h264_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
vpu_dec_end(vpu);
}
- mtk_vcodec_debug(inst, "\n - NALU[%d] type=%d -\n", inst->num_nalu,
- nal_type);
+ mtk_vdec_debug(inst->ctx, "\n - NALU[%d] type=%d -\n", inst->num_nalu, nal_type);
return 0;
err_free_fb_out:
put_fb_to_free(inst, fb);
- mtk_vcodec_err(inst, "\n - NALU[%d] err=%d -\n", inst->num_nalu, err);
+ mtk_vdec_err(inst->ctx, "\n - NALU[%d] err=%d -\n", inst->num_nalu, err);
return err;
}
@@ -440,8 +432,7 @@ static void vdec_h264_get_fb(struct vdec_h264_inst *inst,
return;
if (list->count == 0) {
- mtk_vcodec_debug(inst, "[FB] there is no %s fb",
- disp_list ? "disp" : "free");
+ mtk_vdec_debug(inst->ctx, "[FB] there is no %s fb", disp_list ? "disp" : "free");
*out_fb = NULL;
return;
}
@@ -451,10 +442,10 @@ static void vdec_h264_get_fb(struct vdec_h264_inst *inst,
fb->status |= (disp_list ? FB_ST_DISPLAY : FB_ST_FREE);
*out_fb = fb;
- mtk_vcodec_debug(inst, "[FB] get %s fb st=%d poc=%d %llx",
- disp_list ? "disp" : "free",
- fb->status, list->fb_list[list->read_idx].poc,
- list->fb_list[list->read_idx].vdec_fb_va);
+ mtk_vdec_debug(inst->ctx, "[FB] get %s fb st=%d poc=%d %llx",
+ disp_list ? "disp" : "free",
+ fb->status, list->fb_list[list->read_idx].poc,
+ list->fb_list[list->read_idx].vdec_fb_va);
list->read_idx = (list->read_idx == H264_MAX_FB_NUM - 1) ?
0 : list->read_idx + 1;
@@ -488,7 +479,7 @@ static int vdec_h264_get_param(void *h_vdec, enum vdec_get_param_type type,
break;
default:
- mtk_vcodec_err(inst, "invalid get parameter type=%d", type);
+ mtk_vdec_err(inst->ctx, "invalid get parameter type=%d", type);
return -EINVAL;
}
diff --git a/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_common.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_common.c
index 580ce979e2a3..5ca20d75dc8e 100644
--- a/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_common.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_common.c
@@ -33,7 +33,7 @@ void mtk_vdec_h264_get_ref_list(u8 *ref_list,
memset(&ref_list[num_valid], 0x20, 32 - num_valid);
}
-void *mtk_vdec_h264_get_ctrl_ptr(struct mtk_vcodec_ctx *ctx, int id)
+void *mtk_vdec_h264_get_ctrl_ptr(struct mtk_vcodec_dec_ctx *ctx, int id)
{
struct v4l2_ctrl *ctrl = v4l2_ctrl_find(&ctx->ctrl_hdl, id);
@@ -43,7 +43,7 @@ void *mtk_vdec_h264_get_ctrl_ptr(struct mtk_vcodec_ctx *ctx, int id)
return ctrl->p_cur.p;
}
-void mtk_vdec_h264_fill_dpb_info(struct mtk_vcodec_ctx *ctx,
+void mtk_vdec_h264_fill_dpb_info(struct mtk_vcodec_dec_ctx *ctx,
struct slice_api_h264_decode_param *decode_params,
struct mtk_h264_dpb_info *h264_dpb_info)
{
diff --git a/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_common.h b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_common.h
index 53d0a7c962a9..ac82be336055 100644
--- a/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_common.h
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_common.h
@@ -13,7 +13,7 @@
#include <media/v4l2-mem2mem.h>
#include <media/videobuf2-dma-contig.h>
-#include "../mtk_vcodec_drv.h"
+#include "../mtk_vcodec_dec_drv.h"
#define NAL_NON_IDR_SLICE 0x01
#define NAL_IDR_SLICE 0x05
@@ -182,7 +182,7 @@ void mtk_vdec_h264_get_ref_list(u8 *ref_list,
*
* Return: returns CID ctrl address.
*/
-void *mtk_vdec_h264_get_ctrl_ptr(struct mtk_vcodec_ctx *ctx, int id);
+void *mtk_vdec_h264_get_ctrl_ptr(struct mtk_vcodec_dec_ctx *ctx, int id);
/**
* mtk_vdec_h264_fill_dpb_info - get each CID contrl address.
@@ -191,7 +191,7 @@ void *mtk_vdec_h264_get_ctrl_ptr(struct mtk_vcodec_ctx *ctx, int id);
* @decode_params: slice decode params
* @h264_dpb_info: dpb buffer information
*/
-void mtk_vdec_h264_fill_dpb_info(struct mtk_vcodec_ctx *ctx,
+void mtk_vdec_h264_fill_dpb_info(struct mtk_vcodec_dec_ctx *ctx,
struct slice_api_h264_decode_param *decode_params,
struct mtk_h264_dpb_info *h264_dpb_info);
diff --git a/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_if.c
index 4bc05ab5afea..5600f1df653d 100644
--- a/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_if.c
@@ -6,9 +6,8 @@
#include <media/v4l2-h264.h>
#include <media/videobuf2-dma-contig.h>
-#include "../mtk_vcodec_util.h"
#include "../mtk_vcodec_dec.h"
-#include "../mtk_vcodec_intr.h"
+#include "../../common/mtk_vcodec_intr.h"
#include "../vdec_drv_base.h"
#include "../vdec_drv_if.h"
#include "../vdec_vpu_if.h"
@@ -74,7 +73,7 @@ struct vdec_h264_vsi {
/**
* struct vdec_h264_slice_inst - h264 decoder instance
* @num_nalu : how many nalus be decoded
- * @ctx : point to mtk_vcodec_ctx
+ * @ctx : point to mtk_vcodec_dec_ctx
* @pred_buf : HW working predication buffer
* @mv_buf : HW working motion vector buffer
* @vpu : VPU instance
@@ -84,7 +83,7 @@ struct vdec_h264_vsi {
*/
struct vdec_h264_slice_inst {
unsigned int num_nalu;
- struct mtk_vcodec_ctx *ctx;
+ struct mtk_vcodec_dec_ctx *ctx;
struct mtk_vcodec_mem pred_buf;
struct mtk_vcodec_mem mv_buf[H264_MAX_MV_NUM];
struct vdec_vpu_inst vpu;
@@ -162,7 +161,7 @@ static int allocate_predication_buf(struct vdec_h264_slice_inst *inst)
inst->pred_buf.size = BUF_PREDICTION_SZ;
err = mtk_vcodec_mem_alloc(inst->ctx, &inst->pred_buf);
if (err) {
- mtk_vcodec_err(inst, "failed to allocate ppl buf");
+ mtk_vdec_err(inst->ctx, "failed to allocate ppl buf");
return err;
}
@@ -174,8 +173,6 @@ static void free_predication_buf(struct vdec_h264_slice_inst *inst)
{
struct mtk_vcodec_mem *mem = &inst->pred_buf;
- mtk_vcodec_debug_enter(inst);
-
inst->vsi_ctx.pred_buf_dma = 0;
if (mem->va)
mtk_vcodec_mem_free(inst->ctx, mem);
@@ -189,7 +186,7 @@ static int alloc_mv_buf(struct vdec_h264_slice_inst *inst,
struct mtk_vcodec_mem *mem = NULL;
unsigned int buf_sz = mtk_vdec_h264_get_mv_buf_size(pic->buf_w, pic->buf_h);
- mtk_v4l2_debug(3, "size = 0x%x", buf_sz);
+ mtk_v4l2_vdec_dbg(3, inst->ctx, "size = 0x%x", buf_sz);
for (i = 0; i < H264_MAX_MV_NUM; i++) {
mem = &inst->mv_buf[i];
if (mem->va)
@@ -197,7 +194,7 @@ static int alloc_mv_buf(struct vdec_h264_slice_inst *inst,
mem->size = buf_sz;
err = mtk_vcodec_mem_alloc(inst->ctx, mem);
if (err) {
- mtk_vcodec_err(inst, "failed to allocate mv buf");
+ mtk_vdec_err(inst->ctx, "failed to allocate mv buf");
return err;
}
inst->vsi_ctx.mv_buf_dma[i] = mem->dma_addr;
@@ -222,7 +219,7 @@ static void free_mv_buf(struct vdec_h264_slice_inst *inst)
static void get_pic_info(struct vdec_h264_slice_inst *inst,
struct vdec_pic_info *pic)
{
- struct mtk_vcodec_ctx *ctx = inst->ctx;
+ struct mtk_vcodec_dec_ctx *ctx = inst->ctx;
ctx->picinfo.buf_w = ALIGN(ctx->picinfo.pic_w, VCODEC_DEC_ALIGNED_64);
ctx->picinfo.buf_h = ALIGN(ctx->picinfo.pic_h, VCODEC_DEC_ALIGNED_64);
@@ -232,11 +229,11 @@ static void get_pic_info(struct vdec_h264_slice_inst *inst,
ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes;
*pic = ctx->picinfo;
- mtk_vcodec_debug(inst, "pic(%d, %d), buf(%d, %d)",
- ctx->picinfo.pic_w, ctx->picinfo.pic_h,
- ctx->picinfo.buf_w, ctx->picinfo.buf_h);
- mtk_vcodec_debug(inst, "Y/C(%d, %d)", ctx->picinfo.fb_sz[0],
- ctx->picinfo.fb_sz[1]);
+ mtk_vdec_debug(inst->ctx, "pic(%d, %d), buf(%d, %d)",
+ ctx->picinfo.pic_w, ctx->picinfo.pic_h,
+ ctx->picinfo.buf_w, ctx->picinfo.buf_h);
+ mtk_vdec_debug(inst->ctx, "Y/C(%d, %d)", ctx->picinfo.fb_sz[0],
+ ctx->picinfo.fb_sz[1]);
if (ctx->last_decoded_picinfo.pic_w != ctx->picinfo.pic_w ||
ctx->last_decoded_picinfo.pic_h != ctx->picinfo.pic_h) {
@@ -245,12 +242,12 @@ static void get_pic_info(struct vdec_h264_slice_inst *inst,
ctx->last_decoded_picinfo.buf_h != ctx->picinfo.buf_h)
inst->vsi_ctx.dec.realloc_mv_buf = true;
- mtk_v4l2_debug(1, "ResChg: (%d %d) : old(%d, %d) -> new(%d, %d)",
- inst->vsi_ctx.dec.resolution_changed,
- inst->vsi_ctx.dec.realloc_mv_buf,
- ctx->last_decoded_picinfo.pic_w,
- ctx->last_decoded_picinfo.pic_h,
- ctx->picinfo.pic_w, ctx->picinfo.pic_h);
+ mtk_v4l2_vdec_dbg(1, inst->ctx, "ResChg: (%d %d) : old(%d, %d) -> new(%d, %d)",
+ inst->vsi_ctx.dec.resolution_changed,
+ inst->vsi_ctx.dec.realloc_mv_buf,
+ ctx->last_decoded_picinfo.pic_w,
+ ctx->last_decoded_picinfo.pic_h,
+ ctx->picinfo.pic_w, ctx->picinfo.pic_h);
}
}
@@ -261,17 +258,17 @@ static void get_crop_info(struct vdec_h264_slice_inst *inst, struct v4l2_rect *c
cr->width = inst->vsi_ctx.crop.width;
cr->height = inst->vsi_ctx.crop.height;
- mtk_vcodec_debug(inst, "l=%d, t=%d, w=%d, h=%d",
- cr->left, cr->top, cr->width, cr->height);
+ mtk_vdec_debug(inst->ctx, "l=%d, t=%d, w=%d, h=%d",
+ cr->left, cr->top, cr->width, cr->height);
}
static void get_dpb_size(struct vdec_h264_slice_inst *inst, unsigned int *dpb_sz)
{
*dpb_sz = inst->vsi_ctx.dec.dpb_sz;
- mtk_vcodec_debug(inst, "sz=%d", *dpb_sz);
+ mtk_vdec_debug(inst->ctx, "sz=%d", *dpb_sz);
}
-static int vdec_h264_slice_init(struct mtk_vcodec_ctx *ctx)
+static int vdec_h264_slice_init(struct mtk_vcodec_dec_ctx *ctx)
{
struct vdec_h264_slice_inst *inst;
int err;
@@ -287,7 +284,7 @@ static int vdec_h264_slice_init(struct mtk_vcodec_ctx *ctx)
err = vpu_dec_init(&inst->vpu);
if (err) {
- mtk_vcodec_err(inst, "vdec_h264 init err=%d", err);
+ mtk_vdec_err(ctx, "vdec_h264 init err=%d", err);
goto error_free_inst;
}
@@ -299,13 +296,13 @@ static int vdec_h264_slice_init(struct mtk_vcodec_ctx *ctx)
if (err)
goto error_deinit;
- mtk_vcodec_debug(inst, "struct size = %zu,%zu,%zu,%zu\n",
- sizeof(struct mtk_h264_sps_param),
- sizeof(struct mtk_h264_pps_param),
- sizeof(struct mtk_h264_dec_slice_param),
- sizeof(struct mtk_h264_dpb_info));
+ mtk_vdec_debug(ctx, "struct size = %zu,%zu,%zu,%zu\n",
+ sizeof(struct mtk_h264_sps_param),
+ sizeof(struct mtk_h264_pps_param),
+ sizeof(struct mtk_h264_dec_slice_param),
+ sizeof(struct mtk_h264_dpb_info));
- mtk_vcodec_debug(inst, "H264 Instance >> %p", inst);
+ mtk_vdec_debug(ctx, "H264 Instance >> %p", inst);
ctx->drv_handle = inst;
return 0;
@@ -322,8 +319,6 @@ static void vdec_h264_slice_deinit(void *h_vdec)
{
struct vdec_h264_slice_inst *inst = h_vdec;
- mtk_vcodec_debug_enter(inst);
-
vpu_dec_deinit(&inst->vpu);
free_predication_buf(inst);
free_mv_buf(inst);
@@ -358,8 +353,8 @@ static int vdec_h264_slice_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0;
c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0;
- mtk_vcodec_debug(inst, "+ [%d] FB y_dma=%llx c_dma=%llx va=%p",
- inst->num_nalu, y_fb_dma, c_fb_dma, fb);
+ mtk_vdec_debug(inst->ctx, "+ [%d] FB y_dma=%llx c_dma=%llx va=%p",
+ inst->num_nalu, y_fb_dma, c_fb_dma, fb);
inst->vsi_ctx.dec.bs_dma = (uint64_t)bs->dma_addr;
inst->vsi_ctx.dec.y_fb_dma = y_fb_dma;
@@ -384,7 +379,7 @@ static int vdec_h264_slice_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
*res_chg = inst->vsi_ctx.dec.resolution_changed;
if (*res_chg) {
- mtk_vcodec_debug(inst, "- resolution changed -");
+ mtk_vdec_debug(inst->ctx, "- resolution changed -");
if (inst->vsi_ctx.dec.realloc_mv_buf) {
err = alloc_mv_buf(inst, &inst->ctx->picinfo);
inst->vsi_ctx.dec.realloc_mv_buf = false;
@@ -408,11 +403,11 @@ static int vdec_h264_slice_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
vpu_dec_end(vpu);
memcpy(&inst->vsi_ctx, inst->vpu.vsi, sizeof(inst->vsi_ctx));
- mtk_vcodec_debug(inst, "\n - NALU[%d]", inst->num_nalu);
+ mtk_vdec_debug(inst->ctx, "\n - NALU[%d]", inst->num_nalu);
return 0;
err_free_fb_out:
- mtk_vcodec_err(inst, "\n - NALU[%d] err=%d -\n", inst->num_nalu, err);
+ mtk_vdec_err(inst->ctx, "\n - NALU[%d] err=%d -\n", inst->num_nalu, err);
return err;
}
@@ -434,7 +429,7 @@ static int vdec_h264_slice_get_param(void *h_vdec, enum vdec_get_param_type type
break;
default:
- mtk_vcodec_err(inst, "invalid get parameter type=%d", type);
+ mtk_vdec_err(inst->ctx, "invalid get parameter type=%d", type);
return -EINVAL;
}
diff --git a/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c
index a7e8e3257b7f..0e741e0dc8ba 100644
--- a/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c
@@ -10,9 +10,8 @@
#include <media/v4l2-mem2mem.h>
#include <media/videobuf2-dma-contig.h>
-#include "../mtk_vcodec_util.h"
#include "../mtk_vcodec_dec.h"
-#include "../mtk_vcodec_intr.h"
+#include "../../common/mtk_vcodec_intr.h"
#include "../vdec_drv_base.h"
#include "../vdec_drv_if.h"
#include "../vdec_vpu_if.h"
@@ -133,7 +132,7 @@ struct vdec_h264_slice_share_info {
* struct vdec_h264_slice_inst - h264 decoder instance
*
* @slice_dec_num: how many picture be decoded
- * @ctx: point to mtk_vcodec_ctx
+ * @ctx: point to mtk_vcodec_dec_ctx
* @pred_buf: HW working predication buffer
* @mv_buf: HW working motion vector buffer
* @vpu: VPU instance
@@ -153,7 +152,7 @@ struct vdec_h264_slice_share_info {
*/
struct vdec_h264_slice_inst {
unsigned int slice_dec_num;
- struct mtk_vcodec_ctx *ctx;
+ struct mtk_vcodec_dec_ctx *ctx;
struct mtk_vcodec_mem pred_buf;
struct mtk_vcodec_mem mv_buf[H264_MAX_MV_NUM];
struct vdec_vpu_inst vpu;
@@ -199,7 +198,7 @@ static int vdec_h264_slice_fill_decode_parameters(struct vdec_h264_slice_inst *i
return PTR_ERR(pps);
if (dec_params->flags & V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC) {
- mtk_vcodec_err(inst, "No support for H.264 field decoding.");
+ mtk_vdec_err(inst->ctx, "No support for H.264 field decoding.");
inst->is_field_bitstream = true;
return -EINVAL;
}
@@ -294,7 +293,7 @@ static void vdec_h264_slice_fill_decode_reflist(struct vdec_h264_slice_inst *ins
mtk_vdec_h264_fill_dpb_info(inst->ctx, &slice_param->decode_params,
slice_param->h264_dpb_info);
- mtk_v4l2_debug(3, "cur poc = %d\n", dec_params->bottom_field_order_cnt);
+ mtk_v4l2_vdec_dbg(3, inst->ctx, "cur poc = %d\n", dec_params->bottom_field_order_cnt);
/* Build the reference lists */
v4l2_h264_init_reflist_builder(&reflist_builder, dec_params, sps,
inst->dpb);
@@ -314,7 +313,7 @@ static int vdec_h264_slice_alloc_mv_buf(struct vdec_h264_slice_inst *inst,
struct mtk_vcodec_mem *mem;
int i, err;
- mtk_v4l2_debug(3, "size = 0x%x", buf_sz);
+ mtk_v4l2_vdec_dbg(3, inst->ctx, "size = 0x%x", buf_sz);
for (i = 0; i < H264_MAX_MV_NUM; i++) {
mem = &inst->mv_buf[i];
if (mem->va)
@@ -322,7 +321,7 @@ static int vdec_h264_slice_alloc_mv_buf(struct vdec_h264_slice_inst *inst,
mem->size = buf_sz;
err = mtk_vcodec_mem_alloc(inst->ctx, mem);
if (err) {
- mtk_vcodec_err(inst, "failed to allocate mv buf");
+ mtk_vdec_err(inst->ctx, "failed to allocate mv buf");
return err;
}
}
@@ -344,7 +343,7 @@ static void vdec_h264_slice_free_mv_buf(struct vdec_h264_slice_inst *inst)
static void vdec_h264_slice_get_pic_info(struct vdec_h264_slice_inst *inst)
{
- struct mtk_vcodec_ctx *ctx = inst->ctx;
+ struct mtk_vcodec_dec_ctx *ctx = inst->ctx;
u32 data[3];
data[0] = ctx->picinfo.pic_w;
@@ -359,11 +358,11 @@ static void vdec_h264_slice_get_pic_info(struct vdec_h264_slice_inst *inst)
inst->cap_num_planes =
ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes;
- mtk_vcodec_debug(inst, "pic(%d, %d), buf(%d, %d)",
- ctx->picinfo.pic_w, ctx->picinfo.pic_h,
- ctx->picinfo.buf_w, ctx->picinfo.buf_h);
- mtk_vcodec_debug(inst, "Y/C(%d, %d)", ctx->picinfo.fb_sz[0],
- ctx->picinfo.fb_sz[1]);
+ mtk_vdec_debug(ctx, "pic(%d, %d), buf(%d, %d)",
+ ctx->picinfo.pic_w, ctx->picinfo.pic_h,
+ ctx->picinfo.buf_w, ctx->picinfo.buf_h);
+ mtk_vdec_debug(ctx, "Y/C(%d, %d)", ctx->picinfo.fb_sz[0],
+ ctx->picinfo.fb_sz[1]);
if (ctx->last_decoded_picinfo.pic_w != ctx->picinfo.pic_w ||
ctx->last_decoded_picinfo.pic_h != ctx->picinfo.pic_h) {
@@ -372,12 +371,12 @@ static void vdec_h264_slice_get_pic_info(struct vdec_h264_slice_inst *inst)
ctx->last_decoded_picinfo.buf_h != ctx->picinfo.buf_h)
inst->realloc_mv_buf = true;
- mtk_v4l2_debug(1, "resChg: (%d %d) : old(%d, %d) -> new(%d, %d)",
- inst->resolution_changed,
- inst->realloc_mv_buf,
- ctx->last_decoded_picinfo.pic_w,
- ctx->last_decoded_picinfo.pic_h,
- ctx->picinfo.pic_w, ctx->picinfo.pic_h);
+ mtk_v4l2_vdec_dbg(1, inst->ctx, "resChg: (%d %d) : old(%d, %d) -> new(%d, %d)",
+ inst->resolution_changed,
+ inst->realloc_mv_buf,
+ ctx->last_decoded_picinfo.pic_w,
+ ctx->last_decoded_picinfo.pic_h,
+ ctx->picinfo.pic_w, ctx->picinfo.pic_h);
}
}
@@ -389,11 +388,11 @@ static void vdec_h264_slice_get_crop_info(struct vdec_h264_slice_inst *inst,
cr->width = inst->ctx->picinfo.pic_w;
cr->height = inst->ctx->picinfo.pic_h;
- mtk_vcodec_debug(inst, "l=%d, t=%d, w=%d, h=%d",
- cr->left, cr->top, cr->width, cr->height);
+ mtk_vdec_debug(inst->ctx, "l=%d, t=%d, w=%d, h=%d",
+ cr->left, cr->top, cr->width, cr->height);
}
-static int vdec_h264_slice_init(struct mtk_vcodec_ctx *ctx)
+static int vdec_h264_slice_init(struct mtk_vcodec_dec_ctx *ctx)
{
struct vdec_h264_slice_inst *inst;
int err, vsi_size;
@@ -412,7 +411,7 @@ static int vdec_h264_slice_init(struct mtk_vcodec_ctx *ctx)
err = vpu_dec_init(&inst->vpu);
if (err) {
- mtk_vcodec_err(inst, "vdec_h264 init err=%d", err);
+ mtk_vdec_err(ctx, "vdec_h264 init err=%d", err);
goto error_free_inst;
}
@@ -423,14 +422,14 @@ static int vdec_h264_slice_init(struct mtk_vcodec_ctx *ctx)
inst->resolution_changed = true;
inst->realloc_mv_buf = true;
- mtk_vcodec_debug(inst, "lat struct size = %d,%d,%d,%d vsi: %d\n",
- (int)sizeof(struct mtk_h264_sps_param),
- (int)sizeof(struct mtk_h264_pps_param),
- (int)sizeof(struct vdec_h264_slice_lat_dec_param),
- (int)sizeof(struct mtk_h264_dpb_info),
- vsi_size);
- mtk_vcodec_debug(inst, "lat H264 instance >> %p, codec_type = 0x%x",
- inst, inst->vpu.codec_type);
+ mtk_vdec_debug(ctx, "lat struct size = %d,%d,%d,%d vsi: %d\n",
+ (int)sizeof(struct mtk_h264_sps_param),
+ (int)sizeof(struct mtk_h264_pps_param),
+ (int)sizeof(struct vdec_h264_slice_lat_dec_param),
+ (int)sizeof(struct mtk_h264_dpb_info),
+ vsi_size);
+ mtk_vdec_debug(ctx, "lat H264 instance >> %p, codec_type = 0x%x",
+ inst, inst->vpu.codec_type);
ctx->drv_handle = inst;
return 0;
@@ -444,8 +443,6 @@ static void vdec_h264_slice_deinit(void *h_vdec)
{
struct vdec_h264_slice_inst *inst = h_vdec;
- mtk_vcodec_debug_enter(inst);
-
vpu_dec_deinit(&inst->vpu);
vdec_h264_slice_free_mv_buf(inst);
vdec_msg_queue_deinit(&inst->ctx->msg_queue, inst->ctx);
@@ -459,21 +456,21 @@ static int vdec_h264_slice_core_decode(struct vdec_lat_buf *lat_buf)
u64 vdec_fb_va;
u64 y_fb_dma, c_fb_dma;
int err, timeout, i;
- struct mtk_vcodec_ctx *ctx = lat_buf->ctx;
+ struct mtk_vcodec_dec_ctx *ctx = lat_buf->ctx;
struct vdec_h264_slice_inst *inst = ctx->drv_handle;
struct vb2_v4l2_buffer *vb2_v4l2;
struct vdec_h264_slice_share_info *share_info = lat_buf->private_data;
struct mtk_vcodec_mem *mem;
struct vdec_vpu_inst *vpu = &inst->vpu;
- mtk_vcodec_debug(inst, "[h264-core] vdec_h264 core decode");
+ mtk_vdec_debug(ctx, "[h264-core] vdec_h264 core decode");
memcpy(&inst->vsi_core->h264_slice_params, &share_info->h264_slice_params,
sizeof(share_info->h264_slice_params));
fb = ctx->dev->vdec_pdata->get_cap_buffer(ctx);
if (!fb) {
err = -EBUSY;
- mtk_vcodec_err(inst, "fb buffer is NULL");
+ mtk_vdec_err(ctx, "fb buffer is NULL");
goto vdec_dec_end;
}
@@ -485,8 +482,7 @@ static int vdec_h264_slice_core_decode(struct vdec_lat_buf *lat_buf)
else
c_fb_dma = (u64)fb->base_c.dma_addr;
- mtk_vcodec_debug(inst, "[h264-core] y/c addr = 0x%llx 0x%llx", y_fb_dma,
- c_fb_dma);
+ mtk_vdec_debug(ctx, "[h264-core] y/c addr = 0x%llx 0x%llx", y_fb_dma, c_fb_dma);
inst->vsi_core->dec.y_fb_dma = y_fb_dma;
inst->vsi_core->dec.c_fb_dma = c_fb_dma;
@@ -516,7 +512,7 @@ static int vdec_h264_slice_core_decode(struct vdec_lat_buf *lat_buf)
err = vpu_dec_core(vpu);
if (err) {
- mtk_vcodec_err(inst, "core decode err=%d", err);
+ mtk_vdec_err(ctx, "core decode err=%d", err);
goto vdec_dec_end;
}
@@ -524,27 +520,26 @@ static int vdec_h264_slice_core_decode(struct vdec_lat_buf *lat_buf)
timeout = mtk_vcodec_wait_for_done_ctx(inst->ctx, MTK_INST_IRQ_RECEIVED,
WAIT_INTR_TIMEOUT_MS, MTK_VDEC_CORE);
if (timeout)
- mtk_vcodec_err(inst, "core decode timeout: pic_%d",
- ctx->decoded_frame_cnt);
+ mtk_vdec_err(ctx, "core decode timeout: pic_%d", ctx->decoded_frame_cnt);
inst->vsi_core->dec.timeout = !!timeout;
vpu_dec_core_end(vpu);
- mtk_vcodec_debug(inst, "pic[%d] crc: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
- ctx->decoded_frame_cnt,
- inst->vsi_core->dec.crc[0], inst->vsi_core->dec.crc[1],
- inst->vsi_core->dec.crc[2], inst->vsi_core->dec.crc[3],
- inst->vsi_core->dec.crc[4], inst->vsi_core->dec.crc[5],
- inst->vsi_core->dec.crc[6], inst->vsi_core->dec.crc[7]);
+ mtk_vdec_debug(ctx, "pic[%d] crc: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
+ ctx->decoded_frame_cnt,
+ inst->vsi_core->dec.crc[0], inst->vsi_core->dec.crc[1],
+ inst->vsi_core->dec.crc[2], inst->vsi_core->dec.crc[3],
+ inst->vsi_core->dec.crc[4], inst->vsi_core->dec.crc[5],
+ inst->vsi_core->dec.crc[6], inst->vsi_core->dec.crc[7]);
vdec_dec_end:
vdec_msg_queue_update_ube_rptr(&lat_buf->ctx->msg_queue, share_info->trans_end);
ctx->dev->vdec_pdata->cap_to_disp(ctx, !!err, lat_buf->src_buf_req);
- mtk_vcodec_debug(inst, "core decode done err=%d", err);
+ mtk_vdec_debug(ctx, "core decode done err=%d", err);
ctx->decoded_frame_cnt++;
return 0;
}
-static void vdec_h264_insert_startcode(struct mtk_vcodec_dev *vcodec_dev, unsigned char *buf,
+static void vdec_h264_insert_startcode(struct mtk_vcodec_dec_dev *vcodec_dev, unsigned char *buf,
size_t *bs_size, struct mtk_h264_pps_param *pps)
{
struct device *dev = &vcodec_dev->plat_dev->dev;
@@ -596,7 +591,7 @@ static int vdec_h264_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
lat_buf = vdec_msg_queue_dqbuf(&inst->ctx->msg_queue.lat_ctx);
if (!lat_buf) {
- mtk_vcodec_debug(inst, "failed to get lat buffer");
+ mtk_vdec_debug(inst->ctx, "failed to get lat buffer");
return -EAGAIN;
}
share_info = lat_buf->private_data;
@@ -625,7 +620,7 @@ static int vdec_h264_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
*res_chg = inst->resolution_changed;
if (inst->resolution_changed) {
- mtk_vcodec_debug(inst, "- resolution changed -");
+ mtk_vdec_debug(inst->ctx, "- resolution changed -");
if (inst->realloc_mv_buf) {
err = vdec_h264_slice_alloc_mv_buf(inst, &inst->ctx->picinfo);
inst->realloc_mv_buf = false;
@@ -648,19 +643,19 @@ static int vdec_h264_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
inst->vsi->trans_end = inst->ctx->msg_queue.wdma_rptr_addr;
inst->vsi->trans_start = inst->ctx->msg_queue.wdma_wptr_addr;
- mtk_vcodec_debug(inst, "lat:trans(0x%llx 0x%llx) err:0x%llx",
- inst->vsi->wdma_start_addr,
- inst->vsi->wdma_end_addr,
- inst->vsi->wdma_err_addr);
-
- mtk_vcodec_debug(inst, "slice(0x%llx 0x%llx) rprt((0x%llx 0x%llx))",
- inst->vsi->slice_bc_start_addr,
- inst->vsi->slice_bc_end_addr,
- inst->vsi->trans_start,
- inst->vsi->trans_end);
+ mtk_vdec_debug(inst->ctx, "lat:trans(0x%llx 0x%llx) err:0x%llx",
+ inst->vsi->wdma_start_addr,
+ inst->vsi->wdma_end_addr,
+ inst->vsi->wdma_err_addr);
+
+ mtk_vdec_debug(inst->ctx, "slice(0x%llx 0x%llx) rprt((0x%llx 0x%llx))",
+ inst->vsi->slice_bc_start_addr,
+ inst->vsi->slice_bc_end_addr,
+ inst->vsi->trans_start,
+ inst->vsi->trans_end);
err = vpu_dec_start(vpu, data, 2);
if (err) {
- mtk_vcodec_debug(inst, "lat decode err: %d", err);
+ mtk_vdec_debug(inst->ctx, "lat decode err: %d", err);
goto err_free_fb_out;
}
@@ -679,7 +674,7 @@ static int vdec_h264_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
timeout = mtk_vcodec_wait_for_done_ctx(inst->ctx, MTK_INST_IRQ_RECEIVED,
WAIT_INTR_TIMEOUT_MS, MTK_VDEC_LAT0);
if (timeout)
- mtk_vcodec_err(inst, "lat decode timeout: pic_%d", inst->slice_dec_num);
+ mtk_vdec_err(inst->ctx, "lat decode timeout: pic_%d", inst->slice_dec_num);
inst->vsi->dec.timeout = !!timeout;
err = vpu_dec_end(vpu);
@@ -687,7 +682,7 @@ static int vdec_h264_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
if (!IS_VDEC_INNER_RACING(inst->ctx->dev->dec_capability))
vdec_msg_queue_qbuf(&inst->ctx->msg_queue.lat_ctx, lat_buf);
inst->slice_dec_num++;
- mtk_vcodec_err(inst, "lat dec fail: pic_%d err:%d", inst->slice_dec_num, err);
+ mtk_vdec_err(inst->ctx, "lat dec fail: pic_%d err:%d", inst->slice_dec_num, err);
return -EINVAL;
}
@@ -700,14 +695,14 @@ static int vdec_h264_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
sizeof(share_info->h264_slice_params));
vdec_msg_queue_qbuf(&inst->ctx->msg_queue.core_ctx, lat_buf);
}
- mtk_vcodec_debug(inst, "dec num: %d lat crc: 0x%x 0x%x 0x%x", inst->slice_dec_num,
- inst->vsi->dec.crc[0], inst->vsi->dec.crc[1], inst->vsi->dec.crc[2]);
+ mtk_vdec_debug(inst->ctx, "dec num: %d lat crc: 0x%x 0x%x 0x%x", inst->slice_dec_num,
+ inst->vsi->dec.crc[0], inst->vsi->dec.crc[1], inst->vsi->dec.crc[2]);
inst->slice_dec_num++;
return 0;
err_free_fb_out:
vdec_msg_queue_qbuf(&inst->ctx->msg_queue.lat_ctx, lat_buf);
- mtk_vcodec_err(inst, "slice dec number: %d err: %d", inst->slice_dec_num, err);
+ mtk_vdec_err(inst->ctx, "slice dec number: %d err: %d", inst->slice_dec_num, err);
return err;
}
@@ -734,8 +729,8 @@ static int vdec_h264_slice_single_decode(void *h_vdec, struct mtk_vcodec_mem *bs
y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0;
c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0;
- mtk_vcodec_debug(inst, "[h264-dec] [%d] y_dma=%llx c_dma=%llx",
- inst->ctx->decoded_frame_cnt, y_fb_dma, c_fb_dma);
+ mtk_vdec_debug(inst->ctx, "[h264-dec] [%d] y_dma=%llx c_dma=%llx",
+ inst->ctx->decoded_frame_cnt, y_fb_dma, c_fb_dma);
inst->vsi_ctx.dec.bs_buf_addr = (u64)bs->dma_addr;
inst->vsi_ctx.dec.bs_buf_size = bs->size;
@@ -759,7 +754,7 @@ static int vdec_h264_slice_single_decode(void *h_vdec, struct mtk_vcodec_mem *bs
*res_chg = inst->resolution_changed;
if (inst->resolution_changed) {
- mtk_vcodec_debug(inst, "- resolution changed -");
+ mtk_vdec_debug(inst->ctx, "- resolution changed -");
if (inst->realloc_mv_buf) {
err = vdec_h264_slice_alloc_mv_buf(inst, &inst->ctx->picinfo);
inst->realloc_mv_buf = false;
@@ -783,8 +778,7 @@ static int vdec_h264_slice_single_decode(void *h_vdec, struct mtk_vcodec_mem *bs
err = mtk_vcodec_wait_for_done_ctx(inst->ctx, MTK_INST_IRQ_RECEIVED,
WAIT_INTR_TIMEOUT_MS, MTK_VDEC_CORE);
if (err)
- mtk_vcodec_err(inst, "decode timeout: pic_%d",
- inst->ctx->decoded_frame_cnt);
+ mtk_vdec_err(inst->ctx, "decode timeout: pic_%d", inst->ctx->decoded_frame_cnt);
inst->vsi->dec.timeout = !!err;
err = vpu_dec_end(vpu);
@@ -792,19 +786,18 @@ static int vdec_h264_slice_single_decode(void *h_vdec, struct mtk_vcodec_mem *bs
goto err_free_fb_out;
memcpy(&inst->vsi_ctx, inst->vpu.vsi, sizeof(inst->vsi_ctx));
- mtk_vcodec_debug(inst, "pic[%d] crc: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
- inst->ctx->decoded_frame_cnt,
- inst->vsi_ctx.dec.crc[0], inst->vsi_ctx.dec.crc[1],
- inst->vsi_ctx.dec.crc[2], inst->vsi_ctx.dec.crc[3],
- inst->vsi_ctx.dec.crc[4], inst->vsi_ctx.dec.crc[5],
- inst->vsi_ctx.dec.crc[6], inst->vsi_ctx.dec.crc[7]);
+ mtk_vdec_debug(inst->ctx, "pic[%d] crc: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
+ inst->ctx->decoded_frame_cnt,
+ inst->vsi_ctx.dec.crc[0], inst->vsi_ctx.dec.crc[1],
+ inst->vsi_ctx.dec.crc[2], inst->vsi_ctx.dec.crc[3],
+ inst->vsi_ctx.dec.crc[4], inst->vsi_ctx.dec.crc[5],
+ inst->vsi_ctx.dec.crc[6], inst->vsi_ctx.dec.crc[7]);
inst->ctx->decoded_frame_cnt++;
return 0;
err_free_fb_out:
- mtk_vcodec_err(inst, "dec frame number: %d err: %d",
- inst->ctx->decoded_frame_cnt, err);
+ mtk_vdec_err(inst->ctx, "dec frame number: %d err: %d", inst->ctx->decoded_frame_cnt, err);
return err;
}
@@ -841,7 +834,7 @@ static int vdec_h264_slice_get_param(void *h_vdec, enum vdec_get_param_type type
vdec_h264_slice_get_crop_info(inst, out);
break;
default:
- mtk_vcodec_err(inst, "invalid get parameter type=%d", type);
+ mtk_vdec_err(inst->ctx, "invalid get parameter type=%d", type);
return -EINVAL;
}
return 0;
diff --git a/drivers/media/platform/mediatek/vcodec/vdec/vdec_hevc_req_multi_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c
index 1e6ab138b0bb..06ed47df693b 100644
--- a/drivers/media/platform/mediatek/vcodec/vdec/vdec_hevc_req_multi_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c
@@ -8,9 +8,8 @@
#include <linux/slab.h>
#include <media/videobuf2-dma-contig.h>
-#include "../mtk_vcodec_util.h"
#include "../mtk_vcodec_dec.h"
-#include "../mtk_vcodec_intr.h"
+#include "../../common/mtk_vcodec_intr.h"
#include "../vdec_drv_base.h"
#include "../vdec_drv_if.h"
#include "../vdec_vpu_if.h"
@@ -344,7 +343,7 @@ struct vdec_hevc_slice_share_info {
* struct vdec_hevc_slice_inst - hevc decoder instance
*
* @slice_dec_num: how many picture be decoded
- * @ctx: point to mtk_vcodec_ctx
+ * @ctx: point to mtk_vcodec_dec_ctx
* @mv_buf: HW working motion vector buffer
* @vpu: VPU instance
* @vsi: vsi used for lat
@@ -359,7 +358,7 @@ struct vdec_hevc_slice_share_info {
*/
struct vdec_hevc_slice_inst {
unsigned int slice_dec_num;
- struct mtk_vcodec_ctx *ctx;
+ struct mtk_vcodec_dec_ctx *ctx;
struct mtk_vcodec_mem mv_buf[HEVC_MAX_MV_NUM];
struct vdec_vpu_inst vpu;
struct vdec_hevc_slice_vsi *vsi;
@@ -380,7 +379,7 @@ static unsigned int vdec_hevc_get_mv_buf_size(unsigned int width, unsigned int h
return 64 * unit_size;
}
-static void *vdec_hevc_get_ctrl_ptr(struct mtk_vcodec_ctx *ctx, int id)
+static void *vdec_hevc_get_ctrl_ptr(struct mtk_vcodec_dec_ctx *ctx, int id)
{
struct v4l2_ctrl *ctrl = v4l2_ctrl_find(&ctx->ctrl_hdl, id);
@@ -390,7 +389,7 @@ static void *vdec_hevc_get_ctrl_ptr(struct mtk_vcodec_ctx *ctx, int id)
return ctrl->p_cur.p;
}
-static void vdec_hevc_fill_dpb_info(struct mtk_vcodec_ctx *ctx,
+static void vdec_hevc_fill_dpb_info(struct mtk_vcodec_dec_ctx *ctx,
struct slice_api_hevc_decode_param *decode_params,
struct mtk_hevc_dpb_info *hevc_dpb_info)
{
@@ -649,7 +648,7 @@ static int vdec_hevc_slice_alloc_mv_buf(struct vdec_hevc_slice_inst *inst,
struct mtk_vcodec_mem *mem;
int i, err;
- mtk_v4l2_debug(3, "allocate mv buffer size = 0x%x", buf_sz);
+ mtk_v4l2_vdec_dbg(3, inst->ctx, "allocate mv buffer size = 0x%x", buf_sz);
for (i = 0; i < HEVC_MAX_MV_NUM; i++) {
mem = &inst->mv_buf[i];
if (mem->va)
@@ -657,7 +656,7 @@ static int vdec_hevc_slice_alloc_mv_buf(struct vdec_hevc_slice_inst *inst,
mem->size = buf_sz;
err = mtk_vcodec_mem_alloc(inst->ctx, mem);
if (err) {
- mtk_vcodec_err(inst, "failed to allocate mv buf");
+ mtk_vdec_err(inst->ctx, "failed to allocate mv buf");
return err;
}
}
@@ -679,7 +678,7 @@ static void vdec_hevc_slice_free_mv_buf(struct vdec_hevc_slice_inst *inst)
static void vdec_hevc_slice_get_pic_info(struct vdec_hevc_slice_inst *inst)
{
- struct mtk_vcodec_ctx *ctx = inst->ctx;
+ struct mtk_vcodec_dec_ctx *ctx = inst->ctx;
u32 data[3];
data[0] = ctx->picinfo.pic_w;
@@ -694,11 +693,11 @@ static void vdec_hevc_slice_get_pic_info(struct vdec_hevc_slice_inst *inst)
inst->cap_num_planes =
ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes;
- mtk_vcodec_debug(inst, "pic(%d, %d), buf(%d, %d)",
- ctx->picinfo.pic_w, ctx->picinfo.pic_h,
- ctx->picinfo.buf_w, ctx->picinfo.buf_h);
- mtk_vcodec_debug(inst, "Y/C(%d, %d)", ctx->picinfo.fb_sz[0],
- ctx->picinfo.fb_sz[1]);
+ mtk_vdec_debug(ctx, "pic(%d, %d), buf(%d, %d)",
+ ctx->picinfo.pic_w, ctx->picinfo.pic_h,
+ ctx->picinfo.buf_w, ctx->picinfo.buf_h);
+ mtk_vdec_debug(ctx, "Y/C(%d, %d)", ctx->picinfo.fb_sz[0],
+ ctx->picinfo.fb_sz[1]);
if (ctx->last_decoded_picinfo.pic_w != ctx->picinfo.pic_w ||
ctx->last_decoded_picinfo.pic_h != ctx->picinfo.pic_h) {
@@ -707,12 +706,12 @@ static void vdec_hevc_slice_get_pic_info(struct vdec_hevc_slice_inst *inst)
ctx->last_decoded_picinfo.buf_h != ctx->picinfo.buf_h)
inst->realloc_mv_buf = true;
- mtk_v4l2_debug(1, "resChg: (%d %d) : old(%d, %d) -> new(%d, %d)",
- inst->resolution_changed,
- inst->realloc_mv_buf,
- ctx->last_decoded_picinfo.pic_w,
- ctx->last_decoded_picinfo.pic_h,
- ctx->picinfo.pic_w, ctx->picinfo.pic_h);
+ mtk_v4l2_vdec_dbg(1, inst->ctx, "resChg: (%d %d) : old(%d, %d) -> new(%d, %d)",
+ inst->resolution_changed,
+ inst->realloc_mv_buf,
+ ctx->last_decoded_picinfo.pic_w,
+ ctx->last_decoded_picinfo.pic_h,
+ ctx->picinfo.pic_w, ctx->picinfo.pic_h);
}
}
@@ -724,8 +723,8 @@ static void vdec_hevc_slice_get_crop_info(struct vdec_hevc_slice_inst *inst,
cr->width = inst->ctx->picinfo.pic_w;
cr->height = inst->ctx->picinfo.pic_h;
- mtk_vcodec_debug(inst, "l=%d, t=%d, w=%d, h=%d",
- cr->left, cr->top, cr->width, cr->height);
+ mtk_vdec_debug(inst->ctx, "l=%d, t=%d, w=%d, h=%d",
+ cr->left, cr->top, cr->width, cr->height);
}
static int vdec_hevc_slice_setup_lat_buffer(struct vdec_hevc_slice_inst *inst,
@@ -747,7 +746,7 @@ static int vdec_hevc_slice_setup_lat_buffer(struct vdec_hevc_slice_inst *inst,
*res_chg = inst->resolution_changed;
if (inst->resolution_changed) {
- mtk_vcodec_debug(inst, "- resolution changed -");
+ mtk_vdec_debug(inst->ctx, "- resolution changed -");
if (inst->realloc_mv_buf) {
err = vdec_hevc_slice_alloc_mv_buf(inst, &inst->ctx->picinfo);
inst->realloc_mv_buf = false;
@@ -779,16 +778,16 @@ static int vdec_hevc_slice_setup_lat_buffer(struct vdec_hevc_slice_inst *inst,
share_info->trans.dma_addr = inst->vsi->trans.dma_addr;
share_info->trans.dma_addr_end = inst->vsi->trans.dma_addr_end;
- mtk_vcodec_debug(inst, "lat: ube addr/size(0x%llx 0x%llx) err:0x%llx",
- inst->vsi->ube.buf,
- inst->vsi->ube.padding,
- inst->vsi->err_map.buf);
+ mtk_vdec_debug(inst->ctx, "lat: ube addr/size(0x%llx 0x%llx) err:0x%llx",
+ inst->vsi->ube.buf,
+ inst->vsi->ube.padding,
+ inst->vsi->err_map.buf);
- mtk_vcodec_debug(inst, "slice addr/size(0x%llx 0x%llx) trans start/end((0x%llx 0x%llx))",
- inst->vsi->slice_bc.buf,
- inst->vsi->slice_bc.padding,
- inst->vsi->trans.buf,
- inst->vsi->trans.padding);
+ mtk_vdec_debug(inst->ctx, "slice addr/size(0x%llx 0x%llx) trans start/end((0x%llx 0x%llx))",
+ inst->vsi->slice_bc.buf,
+ inst->vsi->slice_bc.padding,
+ inst->vsi->trans.buf,
+ inst->vsi->trans.padding);
return 0;
}
@@ -798,7 +797,7 @@ static int vdec_hevc_slice_setup_core_buffer(struct vdec_hevc_slice_inst *inst,
struct vdec_lat_buf *lat_buf)
{
struct mtk_vcodec_mem *mem;
- struct mtk_vcodec_ctx *ctx = inst->ctx;
+ struct mtk_vcodec_dec_ctx *ctx = inst->ctx;
struct vb2_v4l2_buffer *vb2_v4l2;
struct vdec_fb *fb;
u64 y_fb_dma, c_fb_dma;
@@ -806,7 +805,7 @@ static int vdec_hevc_slice_setup_core_buffer(struct vdec_hevc_slice_inst *inst,
fb = ctx->dev->vdec_pdata->get_cap_buffer(ctx);
if (!fb) {
- mtk_vcodec_err(inst, "fb buffer is NULL");
+ mtk_vdec_err(inst->ctx, "fb buffer is NULL");
return -EBUSY;
}
@@ -817,8 +816,7 @@ static int vdec_hevc_slice_setup_core_buffer(struct vdec_hevc_slice_inst *inst,
else
c_fb_dma = (u64)fb->base_c.dma_addr;
- mtk_vcodec_debug(inst, "[hevc-core] y/c addr = 0x%llx 0x%llx", y_fb_dma,
- c_fb_dma);
+ mtk_vdec_debug(inst->ctx, "[hevc-core] y/c addr = 0x%llx 0x%llx", y_fb_dma, c_fb_dma);
inst->vsi_core->fb.y.dma_addr = y_fb_dma;
inst->vsi_core->fb.y.size = ctx->picinfo.fb_sz[0];
@@ -854,7 +852,7 @@ static int vdec_hevc_slice_setup_core_buffer(struct vdec_hevc_slice_inst *inst,
return 0;
}
-static int vdec_hevc_slice_init(struct mtk_vcodec_ctx *ctx)
+static int vdec_hevc_slice_init(struct mtk_vcodec_dec_ctx *ctx)
{
struct vdec_hevc_slice_inst *inst;
int err, vsi_size;
@@ -874,7 +872,7 @@ static int vdec_hevc_slice_init(struct mtk_vcodec_ctx *ctx)
ctx->drv_handle = inst;
err = vpu_dec_init(&inst->vpu);
if (err) {
- mtk_vcodec_err(inst, "vdec_hevc init err=%d", err);
+ mtk_vdec_err(ctx, "vdec_hevc init err=%d", err);
goto error_free_inst;
}
@@ -891,14 +889,14 @@ static int vdec_hevc_slice_init(struct mtk_vcodec_ctx *ctx)
if (err)
goto error_free_inst;
- mtk_vcodec_debug(inst, "lat struct size = %d,%d,%d,%d vsi: %d\n",
- (int)sizeof(struct mtk_hevc_sps_param),
- (int)sizeof(struct mtk_hevc_pps_param),
- (int)sizeof(struct vdec_hevc_slice_lat_dec_param),
- (int)sizeof(struct mtk_hevc_dpb_info),
+ mtk_vdec_debug(ctx, "lat struct size = %d,%d,%d,%d vsi: %d\n",
+ (int)sizeof(struct mtk_hevc_sps_param),
+ (int)sizeof(struct mtk_hevc_pps_param),
+ (int)sizeof(struct vdec_hevc_slice_lat_dec_param),
+ (int)sizeof(struct mtk_hevc_dpb_info),
vsi_size);
- mtk_vcodec_debug(inst, "lat hevc instance >> %p, codec_type = 0x%x",
- inst, inst->vpu.codec_type);
+ mtk_vdec_debug(ctx, "lat hevc instance >> %p, codec_type = 0x%x",
+ inst, inst->vpu.codec_type);
return 0;
error_free_inst:
@@ -911,8 +909,6 @@ static void vdec_hevc_slice_deinit(void *h_vdec)
struct vdec_hevc_slice_inst *inst = h_vdec;
struct mtk_vcodec_mem *mem;
- mtk_vcodec_debug_enter(inst);
-
vpu_dec_deinit(&inst->vpu);
vdec_hevc_slice_free_mv_buf(inst);
@@ -927,12 +923,12 @@ static void vdec_hevc_slice_deinit(void *h_vdec)
static int vdec_hevc_slice_core_decode(struct vdec_lat_buf *lat_buf)
{
int err, timeout;
- struct mtk_vcodec_ctx *ctx = lat_buf->ctx;
+ struct mtk_vcodec_dec_ctx *ctx = lat_buf->ctx;
struct vdec_hevc_slice_inst *inst = ctx->drv_handle;
struct vdec_hevc_slice_share_info *share_info = lat_buf->private_data;
struct vdec_vpu_inst *vpu = &inst->vpu;
- mtk_vcodec_debug(inst, "[hevc-core] vdec_hevc core decode");
+ mtk_vdec_debug(ctx, "[hevc-core] vdec_hevc core decode");
memcpy(&inst->vsi_core->hevc_slice_params, &share_info->hevc_slice_params,
sizeof(share_info->hevc_slice_params));
@@ -944,7 +940,7 @@ static int vdec_hevc_slice_core_decode(struct vdec_lat_buf *lat_buf)
share_info);
err = vpu_dec_core(vpu);
if (err) {
- mtk_vcodec_err(inst, "core decode err=%d", err);
+ mtk_vdec_err(ctx, "core decode err=%d", err);
goto vdec_dec_end;
}
@@ -952,22 +948,21 @@ static int vdec_hevc_slice_core_decode(struct vdec_lat_buf *lat_buf)
timeout = mtk_vcodec_wait_for_done_ctx(inst->ctx, MTK_INST_IRQ_RECEIVED,
WAIT_INTR_TIMEOUT_MS, MTK_VDEC_CORE);
if (timeout)
- mtk_vcodec_err(inst, "core decode timeout: pic_%d",
- ctx->decoded_frame_cnt);
+ mtk_vdec_err(ctx, "core decode timeout: pic_%d", ctx->decoded_frame_cnt);
inst->vsi_core->dec.timeout = !!timeout;
vpu_dec_core_end(vpu);
- mtk_vcodec_debug(inst, "pic[%d] crc: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
- ctx->decoded_frame_cnt,
- inst->vsi_core->dec.crc[0], inst->vsi_core->dec.crc[1],
- inst->vsi_core->dec.crc[2], inst->vsi_core->dec.crc[3],
- inst->vsi_core->dec.crc[4], inst->vsi_core->dec.crc[5],
- inst->vsi_core->dec.crc[6], inst->vsi_core->dec.crc[7]);
+ mtk_vdec_debug(ctx, "pic[%d] crc: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
+ ctx->decoded_frame_cnt,
+ inst->vsi_core->dec.crc[0], inst->vsi_core->dec.crc[1],
+ inst->vsi_core->dec.crc[2], inst->vsi_core->dec.crc[3],
+ inst->vsi_core->dec.crc[4], inst->vsi_core->dec.crc[5],
+ inst->vsi_core->dec.crc[6], inst->vsi_core->dec.crc[7]);
vdec_dec_end:
vdec_msg_queue_update_ube_rptr(&lat_buf->ctx->msg_queue, share_info->trans.dma_addr_end);
ctx->dev->vdec_pdata->cap_to_disp(ctx, !!err, lat_buf->src_buf_req);
- mtk_vcodec_debug(inst, "core decode done err=%d", err);
+ mtk_vdec_debug(ctx, "core decode done err=%d", err);
ctx->decoded_frame_cnt++;
return 0;
}
@@ -995,7 +990,7 @@ static int vdec_hevc_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
lat_buf = vdec_msg_queue_dqbuf(&inst->ctx->msg_queue.lat_ctx);
if (!lat_buf) {
- mtk_vcodec_debug(inst, "failed to get lat buffer");
+ mtk_vdec_debug(inst->ctx, "failed to get lat buffer");
return -EAGAIN;
}
@@ -1010,7 +1005,7 @@ static int vdec_hevc_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
err = vpu_dec_start(vpu, data, 2);
if (err) {
- mtk_vcodec_debug(inst, "lat decode err: %d", err);
+ mtk_vdec_debug(inst->ctx, "lat decode err: %d", err);
goto err_free_fb_out;
}
@@ -1024,7 +1019,7 @@ static int vdec_hevc_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
timeout = mtk_vcodec_wait_for_done_ctx(inst->ctx, MTK_INST_IRQ_RECEIVED,
WAIT_INTR_TIMEOUT_MS, MTK_VDEC_LAT0);
if (timeout)
- mtk_vcodec_err(inst, "lat decode timeout: pic_%d", inst->slice_dec_num);
+ mtk_vdec_err(inst->ctx, "lat decode timeout: pic_%d", inst->slice_dec_num);
inst->vsi->dec.timeout = !!timeout;
err = vpu_dec_end(vpu);
@@ -1032,7 +1027,7 @@ static int vdec_hevc_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
if (!IS_VDEC_INNER_RACING(inst->ctx->dev->dec_capability))
vdec_msg_queue_qbuf(&inst->ctx->msg_queue.lat_ctx, lat_buf);
inst->slice_dec_num++;
- mtk_vcodec_err(inst, "lat dec fail: pic_%d err:%d", inst->slice_dec_num, err);
+ mtk_vdec_err(inst->ctx, "lat dec fail: pic_%d err:%d", inst->slice_dec_num, err);
return -EINVAL;
}
@@ -1045,14 +1040,14 @@ static int vdec_hevc_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
sizeof(share_info->hevc_slice_params));
vdec_msg_queue_qbuf(&inst->ctx->msg_queue.core_ctx, lat_buf);
}
- mtk_vcodec_debug(inst, "dec num: %d lat crc: 0x%x 0x%x 0x%x", inst->slice_dec_num,
- inst->vsi->dec.crc[0], inst->vsi->dec.crc[1], inst->vsi->dec.crc[2]);
+ mtk_vdec_debug(inst->ctx, "dec num: %d lat crc: 0x%x 0x%x 0x%x", inst->slice_dec_num,
+ inst->vsi->dec.crc[0], inst->vsi->dec.crc[1], inst->vsi->dec.crc[2]);
inst->slice_dec_num++;
return 0;
err_free_fb_out:
vdec_msg_queue_qbuf(&inst->ctx->msg_queue.lat_ctx, lat_buf);
- mtk_vcodec_err(inst, "slice dec number: %d err: %d", inst->slice_dec_num, err);
+ mtk_vdec_err(inst->ctx, "slice dec number: %d err: %d", inst->slice_dec_num, err);
return err;
}
@@ -1083,7 +1078,7 @@ static int vdec_hevc_slice_get_param(void *h_vdec, enum vdec_get_param_type type
vdec_hevc_slice_get_crop_info(inst, out);
break;
default:
- mtk_vcodec_err(inst, "invalid get parameter type=%d", type);
+ mtk_vdec_err(inst->ctx, "invalid get parameter type=%d", type);
return -EINVAL;
}
return 0;
diff --git a/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp8_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c
index 88c046731754..19407f9bc773 100644
--- a/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp8_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_if.c
@@ -7,9 +7,8 @@
#include <linux/slab.h>
#include "../vdec_drv_if.h"
-#include "../mtk_vcodec_util.h"
#include "../mtk_vcodec_dec.h"
-#include "../mtk_vcodec_intr.h"
+#include "../../common/mtk_vcodec_intr.h"
#include "../vdec_vpu_if.h"
#include "../vdec_drv_base.h"
@@ -91,7 +90,6 @@ struct vdec_vp8_vsi {
/**
* struct vdec_vp8_hw_reg_base - HW register base
- * @sys : base address for sys
* @misc : base address for misc
* @ld : base address for ld
* @top : base address for top
@@ -100,7 +98,6 @@ struct vdec_vp8_vsi {
* @hwb : base address for hwb
*/
struct vdec_vp8_hw_reg_base {
- void __iomem *sys;
void __iomem *misc;
void __iomem *ld;
void __iomem *top;
@@ -160,20 +157,21 @@ struct vdec_vp8_inst {
struct mtk_vcodec_mem working_buf;
struct vdec_vp8_hw_reg_base reg_base;
unsigned int frm_cnt;
- struct mtk_vcodec_ctx *ctx;
+ struct mtk_vcodec_dec_ctx *ctx;
struct vdec_vpu_inst vpu;
struct vdec_vp8_vsi *vsi;
};
static void get_hw_reg_base(struct vdec_vp8_inst *inst)
{
- inst->reg_base.top = mtk_vcodec_get_reg_addr(inst->ctx, VDEC_TOP);
- inst->reg_base.cm = mtk_vcodec_get_reg_addr(inst->ctx, VDEC_CM);
- inst->reg_base.hwd = mtk_vcodec_get_reg_addr(inst->ctx, VDEC_HWD);
- inst->reg_base.sys = mtk_vcodec_get_reg_addr(inst->ctx, VDEC_SYS);
- inst->reg_base.misc = mtk_vcodec_get_reg_addr(inst->ctx, VDEC_MISC);
- inst->reg_base.ld = mtk_vcodec_get_reg_addr(inst->ctx, VDEC_LD);
- inst->reg_base.hwb = mtk_vcodec_get_reg_addr(inst->ctx, VDEC_HWB);
+ void __iomem **reg_base = inst->ctx->dev->reg_base;
+
+ inst->reg_base.top = mtk_vcodec_get_reg_addr(reg_base, VDEC_TOP);
+ inst->reg_base.cm = mtk_vcodec_get_reg_addr(reg_base, VDEC_CM);
+ inst->reg_base.hwd = mtk_vcodec_get_reg_addr(reg_base, VDEC_HWD);
+ inst->reg_base.misc = mtk_vcodec_get_reg_addr(reg_base, VDEC_MISC);
+ inst->reg_base.ld = mtk_vcodec_get_reg_addr(reg_base, VDEC_LD);
+ inst->reg_base.hwb = mtk_vcodec_get_reg_addr(reg_base, VDEC_HWB);
}
static void write_hw_segmentation_data(struct vdec_vp8_inst *inst)
@@ -222,17 +220,16 @@ static void read_hw_segmentation_data(struct vdec_vp8_inst *inst)
static void enable_hw_rw_function(struct vdec_vp8_inst *inst)
{
u32 val = 0;
- void __iomem *sys = inst->reg_base.sys;
void __iomem *misc = inst->reg_base.misc;
void __iomem *ld = inst->reg_base.ld;
void __iomem *hwb = inst->reg_base.hwb;
void __iomem *hwd = inst->reg_base.hwd;
- writel(0x1, sys + VP8_RW_CKEN_SET);
+ mtk_vcodec_write_vdecsys(inst->ctx, VP8_RW_CKEN_SET, 0x1);
writel(0x101, ld + VP8_WO_VLD_SRST);
writel(0x101, hwb + VP8_WO_VLD_SRST);
- writel(1, sys);
+ mtk_vcodec_write_vdecsys(inst->ctx, 0, 0x1);
val = readl(misc + VP8_RW_MISC_SRST);
writel((val & 0xFFFFFFFE), misc + VP8_RW_MISC_SRST);
@@ -241,7 +238,7 @@ static void enable_hw_rw_function(struct vdec_vp8_inst *inst)
writel(0x71201100, misc + VP8_RW_MISC_FUNC_CON);
writel(0x0, ld + VP8_WO_VLD_SRST);
writel(0x0, hwb + VP8_WO_VLD_SRST);
- writel(0x1, sys + VP8_RW_DCM_CON);
+ mtk_vcodec_write_vdecsys(inst->ctx, VP8_RW_DCM_CON, 0x1);
writel(0x1, misc + VP8_RW_MISC_DCM_CON);
writel(0x1, hwd + VP8_RW_VP8_CTRL);
}
@@ -284,10 +281,10 @@ static void get_pic_info(struct vdec_vp8_inst *inst, struct vdec_pic_info *pic)
{
*pic = inst->vsi->pic;
- mtk_vcodec_debug(inst, "pic(%d, %d), buf(%d, %d)",
- pic->pic_w, pic->pic_h, pic->buf_w, pic->buf_h);
- mtk_vcodec_debug(inst, "fb size: Y(%d), C(%d)",
- pic->fb_sz[0], pic->fb_sz[1]);
+ mtk_vdec_debug(inst->ctx, "pic(%d, %d), buf(%d, %d)",
+ pic->pic_w, pic->pic_h, pic->buf_w, pic->buf_h);
+ mtk_vdec_debug(inst->ctx, "fb size: Y(%d), C(%d)",
+ pic->fb_sz[0], pic->fb_sz[1]);
}
static void vp8_dec_finish(struct vdec_vp8_inst *inst)
@@ -295,7 +292,7 @@ static void vp8_dec_finish(struct vdec_vp8_inst *inst)
struct vdec_fb_node *node;
uint64_t prev_y_dma = inst->vsi->dec.prev_y_dma;
- mtk_vcodec_debug(inst, "prev fb base dma=%llx", prev_y_dma);
+ mtk_vdec_debug(inst->ctx, "prev fb base dma=%llx", prev_y_dma);
/* put last decode ok frame to fb_free_list */
if (prev_y_dma != 0) {
@@ -370,7 +367,7 @@ static int alloc_working_buf(struct vdec_vp8_inst *inst)
mem->size = VP8_WORKING_BUF_SZ;
err = mtk_vcodec_mem_alloc(inst->ctx, mem);
if (err) {
- mtk_vcodec_err(inst, "Cannot allocate working buffer");
+ mtk_vdec_err(inst->ctx, "Cannot allocate working buffer");
return err;
}
@@ -388,7 +385,7 @@ static void free_working_buf(struct vdec_vp8_inst *inst)
inst->vsi->dec.working_buf_dma = 0;
}
-static int vdec_vp8_init(struct mtk_vcodec_ctx *ctx)
+static int vdec_vp8_init(struct mtk_vcodec_dec_ctx *ctx)
{
struct vdec_vp8_inst *inst;
int err;
@@ -404,7 +401,7 @@ static int vdec_vp8_init(struct mtk_vcodec_ctx *ctx)
err = vpu_dec_init(&inst->vpu);
if (err) {
- mtk_vcodec_err(inst, "vdec_vp8 init err=%d", err);
+ mtk_vdec_err(ctx, "vdec_vp8 init err=%d", err);
goto error_free_inst;
}
@@ -415,7 +412,7 @@ static int vdec_vp8_init(struct mtk_vcodec_ctx *ctx)
goto error_deinit;
get_hw_reg_base(inst);
- mtk_vcodec_debug(inst, "VP8 Instance >> %p", inst);
+ mtk_vdec_debug(ctx, "VP8 Instance >> %p", inst);
ctx->drv_handle = inst;
return 0;
@@ -448,8 +445,8 @@ static int vdec_vp8_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0;
c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0;
- mtk_vcodec_debug(inst, "+ [%d] FB y_dma=%llx c_dma=%llx fb=%p",
- inst->frm_cnt, y_fb_dma, c_fb_dma, fb);
+ mtk_vdec_debug(inst->ctx, "+ [%d] FB y_dma=%llx c_dma=%llx fb=%p",
+ inst->frm_cnt, y_fb_dma, c_fb_dma, fb);
inst->cur_fb = fb;
dec->bs_dma = (unsigned long)bs->dma_addr;
@@ -457,7 +454,7 @@ static int vdec_vp8_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
dec->cur_y_fb_dma = y_fb_dma;
dec->cur_c_fb_dma = c_fb_dma;
- mtk_vcodec_debug(inst, "\n + FRAME[%d] +\n", inst->frm_cnt);
+ mtk_vdec_debug(inst->ctx, "\n + FRAME[%d] +\n", inst->frm_cnt);
write_hw_segmentation_data(inst);
enable_hw_rw_function(inst);
@@ -472,7 +469,7 @@ static int vdec_vp8_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
if (err) {
add_fb_to_free_list(inst, fb);
if (dec->wait_key_frame) {
- mtk_vcodec_debug(inst, "wait key frame !");
+ mtk_vdec_debug(inst->ctx, "wait key frame !");
return 0;
}
@@ -480,7 +477,7 @@ static int vdec_vp8_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
}
if (dec->resolution_changed) {
- mtk_vcodec_debug(inst, "- resolution_changed -");
+ mtk_vdec_debug(inst->ctx, "- resolution_changed -");
*res_chg = true;
add_fb_to_free_list(inst, fb);
return 0;
@@ -500,14 +497,13 @@ static int vdec_vp8_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
if (err)
goto error;
- mtk_vcodec_debug(inst, "\n - FRAME[%d] - show=%d\n", inst->frm_cnt,
- dec->show_frame);
+ mtk_vdec_debug(inst->ctx, "\n - FRAME[%d] - show=%d\n", inst->frm_cnt, dec->show_frame);
inst->frm_cnt++;
*res_chg = false;
return 0;
error:
- mtk_vcodec_err(inst, "\n - FRAME[%d] - err=%d\n", inst->frm_cnt, err);
+ mtk_vdec_err(inst->ctx, "\n - FRAME[%d] - err=%d\n", inst->frm_cnt, err);
return err;
}
@@ -522,11 +518,10 @@ static void get_disp_fb(struct vdec_vp8_inst *inst, struct vdec_fb **out_fb)
list_move_tail(&node->list, &inst->available_fb_node_list);
fb = (struct vdec_fb *)node->fb;
fb->status |= FB_ST_DISPLAY;
- mtk_vcodec_debug(inst, "[FB] get disp fb %p st=%d",
- node->fb, fb->status);
+ mtk_vdec_debug(inst->ctx, "[FB] get disp fb %p st=%d", node->fb, fb->status);
} else {
fb = NULL;
- mtk_vcodec_debug(inst, "[FB] there is no disp fb");
+ mtk_vdec_debug(inst->ctx, "[FB] there is no disp fb");
}
*out_fb = fb;
@@ -543,11 +538,10 @@ static void get_free_fb(struct vdec_vp8_inst *inst, struct vdec_fb **out_fb)
list_move_tail(&node->list, &inst->available_fb_node_list);
fb = (struct vdec_fb *)node->fb;
fb->status |= FB_ST_FREE;
- mtk_vcodec_debug(inst, "[FB] get free fb %p st=%d",
- node->fb, fb->status);
+ mtk_vdec_debug(inst->ctx, "[FB] get free fb %p st=%d", node->fb, fb->status);
} else {
fb = NULL;
- mtk_vcodec_debug(inst, "[FB] there is no free fb");
+ mtk_vdec_debug(inst->ctx, "[FB] there is no free fb");
}
*out_fb = fb;
@@ -559,8 +553,8 @@ static void get_crop_info(struct vdec_vp8_inst *inst, struct v4l2_rect *cr)
cr->top = 0;
cr->width = inst->vsi->pic.pic_w;
cr->height = inst->vsi->pic.pic_h;
- mtk_vcodec_debug(inst, "get crop info l=%d, t=%d, w=%d, h=%d",
- cr->left, cr->top, cr->width, cr->height);
+ mtk_vdec_debug(inst->ctx, "get crop info l=%d, t=%d, w=%d, h=%d",
+ cr->left, cr->top, cr->width, cr->height);
}
static int vdec_vp8_get_param(void *h_vdec, enum vdec_get_param_type type,
@@ -590,7 +584,7 @@ static int vdec_vp8_get_param(void *h_vdec, enum vdec_get_param_type type,
break;
default:
- mtk_vcodec_err(inst, "invalid get parameter type=%d", type);
+ mtk_vdec_err(inst->ctx, "invalid get parameter type=%d", type);
return -EINVAL;
}
@@ -601,8 +595,6 @@ static void vdec_vp8_deinit(void *h_vdec)
{
struct vdec_vp8_inst *inst = (struct vdec_vp8_inst *)h_vdec;
- mtk_vcodec_debug_enter(inst);
-
vpu_dec_deinit(&inst->vpu);
free_working_buf(inst);
kfree(inst);
diff --git a/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp8_req_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c
index e1fe2603e92e..f64b21c07169 100644
--- a/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp8_req_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c
@@ -9,9 +9,8 @@
#include <media/videobuf2-dma-contig.h>
#include <uapi/linux/v4l2-controls.h>
-#include "../mtk_vcodec_util.h"
#include "../mtk_vcodec_dec.h"
-#include "../mtk_vcodec_intr.h"
+#include "../../common/mtk_vcodec_intr.h"
#include "../vdec_drv_base.h"
#include "../vdec_drv_if.h"
#include "../vdec_vpu_if.h"
@@ -101,12 +100,12 @@ struct vdec_vp8_slice_inst {
struct mtk_vcodec_mem wrap_y_buf;
struct mtk_vcodec_mem wrap_c_buf;
struct mtk_vcodec_mem vld_wrapper_buf;
- struct mtk_vcodec_ctx *ctx;
+ struct mtk_vcodec_dec_ctx *ctx;
struct vdec_vpu_inst vpu;
struct vdec_vp8_slice_vsi *vsi;
};
-static void *vdec_vp8_slice_get_ctrl_ptr(struct mtk_vcodec_ctx *ctx, int id)
+static void *vdec_vp8_slice_get_ctrl_ptr(struct mtk_vcodec_dec_ctx *ctx, int id)
{
struct v4l2_ctrl *ctrl = v4l2_ctrl_find(&ctx->ctrl_hdl, id);
@@ -118,7 +117,7 @@ static void *vdec_vp8_slice_get_ctrl_ptr(struct mtk_vcodec_ctx *ctx, int id)
static void vdec_vp8_slice_get_pic_info(struct vdec_vp8_slice_inst *inst)
{
- struct mtk_vcodec_ctx *ctx = inst->ctx;
+ struct mtk_vcodec_dec_ctx *ctx = inst->ctx;
unsigned int data[3];
data[0] = ctx->picinfo.pic_w;
@@ -137,11 +136,11 @@ static void vdec_vp8_slice_get_pic_info(struct vdec_vp8_slice_inst *inst)
inst->vsi->pic.buf_h = ctx->picinfo.buf_h;
inst->vsi->pic.fb_sz[0] = ctx->picinfo.fb_sz[0];
inst->vsi->pic.fb_sz[1] = ctx->picinfo.fb_sz[1];
- mtk_vcodec_debug(inst, "pic(%d, %d), buf(%d, %d)",
- ctx->picinfo.pic_w, ctx->picinfo.pic_h,
- ctx->picinfo.buf_w, ctx->picinfo.buf_h);
- mtk_vcodec_debug(inst, "fb size: Y(%d), C(%d)",
- ctx->picinfo.fb_sz[0], ctx->picinfo.fb_sz[1]);
+ mtk_vdec_debug(inst->ctx, "pic(%d, %d), buf(%d, %d)",
+ ctx->picinfo.pic_w, ctx->picinfo.pic_h,
+ ctx->picinfo.buf_w, ctx->picinfo.buf_h);
+ mtk_vdec_debug(inst->ctx, "fb size: Y(%d), C(%d)",
+ ctx->picinfo.fb_sz[0], ctx->picinfo.fb_sz[1]);
}
static int vdec_vp8_slice_alloc_working_buf(struct vdec_vp8_slice_inst *inst)
@@ -153,7 +152,7 @@ static int vdec_vp8_slice_alloc_working_buf(struct vdec_vp8_slice_inst *inst)
mem->size = VP8_SEG_ID_SZ;
err = mtk_vcodec_mem_alloc(inst->ctx, mem);
if (err) {
- mtk_vcodec_err(inst, "Cannot allocate working buffer");
+ mtk_vdec_err(inst->ctx, "Cannot allocate working buffer");
return err;
}
inst->vsi->dec.seg_id_buf_dma = (u64)mem->dma_addr;
@@ -162,7 +161,7 @@ static int vdec_vp8_slice_alloc_working_buf(struct vdec_vp8_slice_inst *inst)
mem->size = VP8_PP_WRAPY_SZ;
err = mtk_vcodec_mem_alloc(inst->ctx, mem);
if (err) {
- mtk_vcodec_err(inst, "cannot allocate WRAP Y buffer");
+ mtk_vdec_err(inst->ctx, "cannot allocate WRAP Y buffer");
return err;
}
inst->vsi->dec.wrap_y_dma = (u64)mem->dma_addr;
@@ -171,7 +170,7 @@ static int vdec_vp8_slice_alloc_working_buf(struct vdec_vp8_slice_inst *inst)
mem->size = VP8_PP_WRAPC_SZ;
err = mtk_vcodec_mem_alloc(inst->ctx, mem);
if (err) {
- mtk_vcodec_err(inst, "cannot allocate WRAP C buffer");
+ mtk_vdec_err(inst->ctx, "cannot allocate WRAP C buffer");
return err;
}
inst->vsi->dec.wrap_c_dma = (u64)mem->dma_addr;
@@ -180,7 +179,7 @@ static int vdec_vp8_slice_alloc_working_buf(struct vdec_vp8_slice_inst *inst)
mem->size = VP8_VLD_PRED_SZ;
err = mtk_vcodec_mem_alloc(inst->ctx, mem);
if (err) {
- mtk_vcodec_err(inst, "cannot allocate vld wrapper buffer");
+ mtk_vdec_err(inst->ctx, "cannot allocate vld wrapper buffer");
return err;
}
inst->vsi->dec.vld_wrapper_dma = (u64)mem->dma_addr;
@@ -233,7 +232,7 @@ static u64 vdec_vp8_slice_get_ref_by_ts(const struct v4l2_ctrl_vp8_frame *frame_
static int vdec_vp8_slice_get_decode_parameters(struct vdec_vp8_slice_inst *inst)
{
const struct v4l2_ctrl_vp8_frame *frame_header;
- struct mtk_vcodec_ctx *ctx = inst->ctx;
+ struct mtk_vcodec_dec_ctx *ctx = inst->ctx;
struct vb2_queue *vq;
struct vb2_buffer *vb;
u64 referenct_ts;
@@ -249,8 +248,8 @@ static int vdec_vp8_slice_get_decode_parameters(struct vdec_vp8_slice_inst *inst
vb = vb2_find_buffer(vq, referenct_ts);
if (!vb) {
if (!V4L2_VP8_FRAME_IS_KEY_FRAME(frame_header))
- mtk_vcodec_err(inst, "reference invalid: index(%d) ts(%lld)",
- index, referenct_ts);
+ mtk_vdec_err(inst->ctx, "reference invalid: index(%d) ts(%lld)",
+ index, referenct_ts);
inst->vsi->vp8_dpb_info[index].reference_flag = 0;
continue;
}
@@ -272,7 +271,7 @@ static int vdec_vp8_slice_get_decode_parameters(struct vdec_vp8_slice_inst *inst
return 0;
}
-static int vdec_vp8_slice_init(struct mtk_vcodec_ctx *ctx)
+static int vdec_vp8_slice_init(struct mtk_vcodec_dec_ctx *ctx)
{
struct vdec_vp8_slice_inst *inst;
int err;
@@ -291,7 +290,7 @@ static int vdec_vp8_slice_init(struct mtk_vcodec_ctx *ctx)
err = vpu_dec_init(&inst->vpu);
if (err) {
- mtk_vcodec_err(inst, "vdec_vp8 init err=%d", err);
+ mtk_vdec_err(ctx, "vdec_vp8 init err=%d", err);
goto error_free_inst;
}
@@ -300,11 +299,11 @@ static int vdec_vp8_slice_init(struct mtk_vcodec_ctx *ctx)
if (err)
goto error_deinit;
- mtk_vcodec_debug(inst, "vp8 struct size = %d vsi: %d\n",
- (int)sizeof(struct v4l2_ctrl_vp8_frame),
- (int)sizeof(struct vdec_vp8_slice_vsi));
- mtk_vcodec_debug(inst, "vp8:%p, codec_type = 0x%x vsi: 0x%p",
- inst, inst->vpu.codec_type, inst->vpu.vsi);
+ mtk_vdec_debug(ctx, "vp8 struct size = %d vsi: %d\n",
+ (int)sizeof(struct v4l2_ctrl_vp8_frame),
+ (int)sizeof(struct vdec_vp8_slice_vsi));
+ mtk_vdec_debug(ctx, "vp8:%p, codec_type = 0x%x vsi: 0x%p",
+ inst, inst->vpu.codec_type, inst->vpu.vsi);
ctx->drv_handle = inst;
return 0;
@@ -350,10 +349,10 @@ static int vdec_vp8_slice_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
inst->vsi->dec.cur_y_fb_dma = y_fb_dma;
inst->vsi->dec.cur_c_fb_dma = c_fb_dma;
- mtk_vcodec_debug(inst, "frame[%d] bs(%zu 0x%llx) y/c(0x%llx 0x%llx)",
- inst->ctx->decoded_frame_cnt,
- bs->size, (u64)bs->dma_addr,
- y_fb_dma, c_fb_dma);
+ mtk_vdec_debug(inst->ctx, "frame[%d] bs(%zu 0x%llx) y/c(0x%llx 0x%llx)",
+ inst->ctx->decoded_frame_cnt,
+ bs->size, (u64)bs->dma_addr,
+ y_fb_dma, c_fb_dma);
v4l2_m2m_buf_copy_metadata(&src_buf_info->m2m_buf.vb,
&dst_buf_info->m2m_buf.vb, true);
@@ -364,12 +363,12 @@ static int vdec_vp8_slice_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
err = vpu_dec_start(vpu, &data, 1);
if (err) {
- mtk_vcodec_debug(inst, "vp8 dec start err!");
+ mtk_vdec_debug(inst->ctx, "vp8 dec start err!");
goto error;
}
if (inst->vsi->dec.resolution_changed) {
- mtk_vcodec_debug(inst, "- resolution_changed -");
+ mtk_vdec_debug(inst->ctx, "- resolution_changed -");
*res_chg = true;
return 0;
}
@@ -380,15 +379,15 @@ static int vdec_vp8_slice_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
err = vpu_dec_end(vpu);
if (err || timeout)
- mtk_vcodec_debug(inst, "vp8 dec error timeout:%d err: %d pic_%d",
- timeout, err, inst->ctx->decoded_frame_cnt);
+ mtk_vdec_debug(inst->ctx, "vp8 dec error timeout:%d err: %d pic_%d",
+ timeout, err, inst->ctx->decoded_frame_cnt);
- mtk_vcodec_debug(inst, "pic[%d] crc: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
- inst->ctx->decoded_frame_cnt,
- inst->vsi->dec.crc[0], inst->vsi->dec.crc[1],
- inst->vsi->dec.crc[2], inst->vsi->dec.crc[3],
- inst->vsi->dec.crc[4], inst->vsi->dec.crc[5],
- inst->vsi->dec.crc[6], inst->vsi->dec.crc[7]);
+ mtk_vdec_debug(inst->ctx, "pic[%d] crc: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
+ inst->ctx->decoded_frame_cnt,
+ inst->vsi->dec.crc[0], inst->vsi->dec.crc[1],
+ inst->vsi->dec.crc[2], inst->vsi->dec.crc[3],
+ inst->vsi->dec.crc[4], inst->vsi->dec.crc[5],
+ inst->vsi->dec.crc[6], inst->vsi->dec.crc[7]);
inst->ctx->decoded_frame_cnt++;
error:
@@ -404,13 +403,13 @@ static int vdec_vp8_slice_get_param(void *h_vdec, enum vdec_get_param_type type,
vdec_vp8_slice_get_pic_info(inst);
break;
case GET_PARAM_CROP_INFO:
- mtk_vcodec_debug(inst, "No need to get vp8 crop information.");
+ mtk_vdec_debug(inst->ctx, "No need to get vp8 crop information.");
break;
case GET_PARAM_DPB_SIZE:
*((unsigned int *)out) = VP8_DPB_SIZE;
break;
default:
- mtk_vcodec_err(inst, "invalid get parameter type=%d", type);
+ mtk_vdec_err(inst->ctx, "invalid get parameter type=%d", type);
return -EINVAL;
}
@@ -421,8 +420,6 @@ static void vdec_vp8_slice_deinit(void *h_vdec)
{
struct vdec_vp8_slice_inst *inst = h_vdec;
- mtk_vcodec_debug_enter(inst);
-
vpu_dec_deinit(&inst->vpu);
vdec_vp8_slice_free_working_buf(inst);
kfree(inst);
diff --git a/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
index 70b8383f7c8e..55355fa70090 100644
--- a/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_if.c
@@ -12,7 +12,7 @@
#include <linux/delay.h>
#include <linux/time.h>
-#include "../mtk_vcodec_intr.h"
+#include "../../common/mtk_vcodec_intr.h"
#include "../vdec_drv_base.h"
#include "../vdec_vpu_if.h"
@@ -196,7 +196,7 @@ struct vdec_vp9_inst {
struct list_head fb_free_list;
struct list_head fb_disp_list;
struct vdec_fb *cur_fb;
- struct mtk_vcodec_ctx *ctx;
+ struct mtk_vcodec_dec_ctx *ctx;
struct vdec_vpu_inst vpu;
struct vdec_vp9_vsi *vsi;
unsigned int total_frm_cnt;
@@ -226,10 +226,11 @@ static struct vdec_fb *vp9_rm_from_fb_use_list(struct vdec_vp9_inst
if (fb->base_y.va == addr) {
list_move_tail(&node->list,
&inst->available_fb_node_list);
- break;
+ return fb;
}
}
- return fb;
+
+ return NULL;
}
static void vp9_add_to_fb_free_list(struct vdec_vp9_inst *inst,
@@ -246,7 +247,7 @@ static void vp9_add_to_fb_free_list(struct vdec_vp9_inst *inst,
list_move_tail(&node->list, &inst->fb_free_list);
}
} else {
- mtk_vcodec_debug(inst, "No free fb node");
+ mtk_vdec_debug(inst->ctx, "No free fb node");
}
}
@@ -330,7 +331,7 @@ static int vp9_get_sf_ref_fb(struct vdec_vp9_inst *inst)
}
if (idx == ARRAY_SIZE(vsi->sf_ref_fb)) {
- mtk_vcodec_err(inst, "List Full");
+ mtk_vdec_err(inst->ctx, "List Full");
return -1;
}
@@ -339,7 +340,7 @@ static int vp9_get_sf_ref_fb(struct vdec_vp9_inst *inst)
vsi->buf_len_sz_y;
if (mtk_vcodec_mem_alloc(inst->ctx, mem_basy_y)) {
- mtk_vcodec_err(inst, "Cannot allocate sf_ref_buf y_buf");
+ mtk_vdec_err(inst->ctx, "Cannot allocate sf_ref_buf y_buf");
return -1;
}
@@ -348,7 +349,7 @@ static int vp9_get_sf_ref_fb(struct vdec_vp9_inst *inst)
vsi->buf_len_sz_c;
if (mtk_vcodec_mem_alloc(inst->ctx, mem_basy_c)) {
- mtk_vcodec_err(inst, "Cannot allocate sf_ref_fb c_buf");
+ mtk_vdec_err(inst->ctx, "Cannot allocate sf_ref_fb c_buf");
return -1;
}
vsi->sf_ref_fb[idx].used = 0;
@@ -377,17 +378,13 @@ static bool vp9_alloc_work_buf(struct vdec_vp9_inst *inst)
if ((vsi->pic_w > max_pic_w) ||
(vsi->pic_h > max_pic_h)) {
- mtk_vcodec_err(inst, "Invalid w/h %d/%d",
- vsi->pic_w, vsi->pic_h);
+ mtk_vdec_err(inst->ctx, "Invalid w/h %d/%d", vsi->pic_w, vsi->pic_h);
return false;
}
- mtk_vcodec_debug(inst, "BUF CHG(%d): w/h/sb_w/sb_h=%d/%d/%d/%d",
- vsi->resolution_changed,
- vsi->pic_w,
- vsi->pic_h,
- vsi->buf_w,
- vsi->buf_h);
+ mtk_vdec_debug(inst->ctx, "BUF CHG(%d): w/h/sb_w/sb_h=%d/%d/%d/%d",
+ vsi->resolution_changed, vsi->pic_w,
+ vsi->pic_h, vsi->buf_w, vsi->buf_h);
mem = &inst->mv_buf;
if (mem->va)
@@ -398,7 +395,7 @@ static bool vp9_alloc_work_buf(struct vdec_vp9_inst *inst)
result = mtk_vcodec_mem_alloc(inst->ctx, mem);
if (result) {
mem->size = 0;
- mtk_vcodec_err(inst, "Cannot allocate mv_buf");
+ mtk_vdec_err(inst->ctx, "Cannot allocate mv_buf");
return false;
}
/* Set the va again */
@@ -415,7 +412,7 @@ static bool vp9_alloc_work_buf(struct vdec_vp9_inst *inst)
result = mtk_vcodec_mem_alloc(inst->ctx, mem);
if (result) {
mem->size = 0;
- mtk_vcodec_err(inst, "Cannot allocate seg_id_buf");
+ mtk_vdec_err(inst->ctx, "Cannot allocate seg_id_buf");
return false;
}
/* Set the va again */
@@ -436,7 +433,7 @@ static bool vp9_add_to_fb_disp_list(struct vdec_vp9_inst *inst,
struct vdec_fb_node *node;
if (!fb) {
- mtk_vcodec_err(inst, "fb == NULL");
+ mtk_vdec_err(inst->ctx, "fb == NULL");
return false;
}
@@ -446,7 +443,7 @@ static bool vp9_add_to_fb_disp_list(struct vdec_vp9_inst *inst,
node->fb = fb;
list_move_tail(&node->list, &inst->fb_disp_list);
} else {
- mtk_vcodec_err(inst, "No available fb node");
+ mtk_vdec_err(inst->ctx, "No available fb node");
return false;
}
@@ -492,10 +489,10 @@ static void vp9_swap_frm_bufs(struct vdec_vp9_inst *inst)
* size
*/
if (frm_to_show->fb != NULL)
- mtk_vcodec_err(inst,
- "inst->cur_fb->base_y.size=%zu, frm_to_show->fb.base_y.size=%zu",
- inst->cur_fb->base_y.size,
- frm_to_show->fb->base_y.size);
+ mtk_vdec_err(inst->ctx,
+ "base_y.size=%zu, frm_to_show: base_y.size=%zu",
+ inst->cur_fb->base_y.size,
+ frm_to_show->fb->base_y.size);
}
if (!vp9_is_sf_ref_fb(inst, inst->cur_fb)) {
if (vsi->show_frame & BIT(0))
@@ -535,7 +532,7 @@ static void vp9_swap_frm_bufs(struct vdec_vp9_inst *inst)
static bool vp9_wait_dec_end(struct vdec_vp9_inst *inst)
{
- struct mtk_vcodec_ctx *ctx = inst->ctx;
+ struct mtk_vcodec_dec_ctx *ctx = inst->ctx;
mtk_vcodec_wait_for_done_ctx(inst->ctx,
MTK_INST_IRQ_RECEIVED,
@@ -547,7 +544,7 @@ static bool vp9_wait_dec_end(struct vdec_vp9_inst *inst)
return false;
}
-static struct vdec_vp9_inst *vp9_alloc_inst(struct mtk_vcodec_ctx *ctx)
+static struct vdec_vp9_inst *vp9_alloc_inst(struct mtk_vcodec_dec_ctx *ctx)
{
int result;
struct mtk_vcodec_mem mem;
@@ -582,20 +579,19 @@ static bool vp9_decode_end_proc(struct vdec_vp9_inst *inst)
if (!vsi->show_existing_frame) {
ret = vp9_wait_dec_end(inst);
if (!ret) {
- mtk_vcodec_err(inst, "Decode failed, Decode Timeout @[%d]",
- vsi->frm_num);
+ mtk_vdec_err(inst->ctx, "Decode failed, Decode Timeout @[%d]",
+ vsi->frm_num);
return false;
}
if (vpu_dec_end(&inst->vpu)) {
- mtk_vcodec_err(inst, "vp9_dec_vpu_end failed");
+ mtk_vdec_err(inst->ctx, "vp9_dec_vpu_end failed");
return false;
}
- mtk_vcodec_debug(inst, "Decode Ok @%d (%d/%d)", vsi->frm_num,
- vsi->pic_w, vsi->pic_h);
+ mtk_vdec_debug(inst->ctx, "Decode Ok @%d (%d/%d)", vsi->frm_num,
+ vsi->pic_w, vsi->pic_h);
} else {
- mtk_vcodec_debug(inst, "Decode Ok @%d (show_existing_frame)",
- vsi->frm_num);
+ mtk_vdec_debug(inst->ctx, "Decode Ok @%d (show_existing_frame)", vsi->frm_num);
}
vp9_swap_frm_bufs(inst);
@@ -624,10 +620,9 @@ static struct vdec_fb *vp9_rm_from_fb_disp_list(struct vdec_vp9_inst *inst)
fb = (struct vdec_fb *)node->fb;
fb->status |= FB_ST_DISPLAY;
list_move_tail(&node->list, &inst->available_fb_node_list);
- mtk_vcodec_debug(inst, "[FB] get disp fb %p st=%d",
- node->fb, fb->status);
+ mtk_vdec_debug(inst->ctx, "[FB] get disp fb %p st=%d", node->fb, fb->status);
} else
- mtk_vcodec_debug(inst, "[FB] there is no disp fb");
+ mtk_vdec_debug(inst->ctx, "[FB] there is no disp fb");
return fb;
}
@@ -638,7 +633,7 @@ static bool vp9_add_to_fb_use_list(struct vdec_vp9_inst *inst,
struct vdec_fb_node *node;
if (!fb) {
- mtk_vcodec_debug(inst, "fb == NULL");
+ mtk_vdec_debug(inst->ctx, "fb == NULL");
return false;
}
@@ -648,7 +643,7 @@ static bool vp9_add_to_fb_use_list(struct vdec_vp9_inst *inst,
node->fb = fb;
list_move_tail(&node->list, &inst->fb_use_list);
} else {
- mtk_vcodec_err(inst, "No free fb node");
+ mtk_vdec_err(inst->ctx, "No free fb node");
return false;
}
return true;
@@ -665,7 +660,7 @@ static void vp9_reset(struct vdec_vp9_inst *inst)
inst->vsi->sf_next_ref_fb_idx = vp9_get_sf_ref_fb(inst);
if (vpu_dec_reset(&inst->vpu))
- mtk_vcodec_err(inst, "vp9_dec_vpu_reset failed");
+ mtk_vdec_err(inst->ctx, "vp9_dec_vpu_reset failed");
/* Set the va again, since vpu_dec_reset will clear mv_buf in vpu */
inst->vsi->mv_buf.va = (unsigned long)inst->mv_buf.va;
@@ -706,11 +701,9 @@ static void get_pic_info(struct vdec_vp9_inst *inst, struct vdec_pic_info *pic)
pic->buf_w = inst->vsi->buf_w;
pic->buf_h = inst->vsi->buf_h;
- mtk_vcodec_debug(inst, "pic(%d, %d), buf(%d, %d)",
- pic->pic_w, pic->pic_h, pic->buf_w, pic->buf_h);
- mtk_vcodec_debug(inst, "fb size: Y(%d), C(%d)",
- pic->fb_sz[0],
- pic->fb_sz[1]);
+ mtk_vdec_debug(inst->ctx, "pic(%d, %d), buf(%d, %d)",
+ pic->pic_w, pic->pic_h, pic->buf_w, pic->buf_h);
+ mtk_vdec_debug(inst->ctx, "fb size: Y(%d), C(%d)", pic->fb_sz[0], pic->fb_sz[1]);
}
static void get_disp_fb(struct vdec_vp9_inst *inst, struct vdec_fb **out_fb)
@@ -732,10 +725,9 @@ static void get_free_fb(struct vdec_vp9_inst *inst, struct vdec_fb **out_fb)
list_move_tail(&node->list, &inst->available_fb_node_list);
fb = (struct vdec_fb *)node->fb;
fb->status |= FB_ST_FREE;
- mtk_vcodec_debug(inst, "[FB] get free fb %p st=%d",
- node->fb, fb->status);
+ mtk_vdec_debug(inst->ctx, "[FB] get free fb %p st=%d", node->fb, fb->status);
} else {
- mtk_vcodec_debug(inst, "[FB] there is no free fb");
+ mtk_vdec_debug(inst->ctx, "[FB] there is no free fb");
}
*out_fb = fb;
@@ -744,18 +736,15 @@ static void get_free_fb(struct vdec_vp9_inst *inst, struct vdec_fb **out_fb)
static int validate_vsi_array_indexes(struct vdec_vp9_inst *inst,
struct vdec_vp9_vsi *vsi) {
if (vsi->sf_frm_idx >= VP9_MAX_FRM_BUF_NUM - 1) {
- mtk_vcodec_err(inst, "Invalid vsi->sf_frm_idx=%u.",
- vsi->sf_frm_idx);
+ mtk_vdec_err(inst->ctx, "Invalid vsi->sf_frm_idx=%u.", vsi->sf_frm_idx);
return -EIO;
}
if (vsi->frm_to_show_idx >= VP9_MAX_FRM_BUF_NUM) {
- mtk_vcodec_err(inst, "Invalid vsi->frm_to_show_idx=%u.",
- vsi->frm_to_show_idx);
+ mtk_vdec_err(inst->ctx, "Invalid vsi->frm_to_show_idx=%u.", vsi->frm_to_show_idx);
return -EIO;
}
if (vsi->new_fb_idx >= VP9_MAX_FRM_BUF_NUM) {
- mtk_vcodec_err(inst, "Invalid vsi->new_fb_idx=%u.",
- vsi->new_fb_idx);
+ mtk_vdec_err(inst->ctx, "Invalid vsi->new_fb_idx=%u.", vsi->new_fb_idx);
return -EIO;
}
return 0;
@@ -769,7 +758,7 @@ static void vdec_vp9_deinit(void *h_vdec)
ret = vpu_dec_deinit(&inst->vpu);
if (ret)
- mtk_vcodec_err(inst, "vpu_dec_deinit failed");
+ mtk_vdec_err(inst->ctx, "vpu_dec_deinit failed");
mem = &inst->mv_buf;
if (mem->va)
@@ -783,7 +772,7 @@ static void vdec_vp9_deinit(void *h_vdec)
vp9_free_inst(inst);
}
-static int vdec_vp9_init(struct mtk_vcodec_ctx *ctx)
+static int vdec_vp9_init(struct mtk_vcodec_dec_ctx *ctx)
{
struct vdec_vp9_inst *inst;
@@ -798,7 +787,7 @@ static int vdec_vp9_init(struct mtk_vcodec_ctx *ctx)
inst->vpu.ctx = ctx;
if (vpu_dec_init(&inst->vpu)) {
- mtk_vcodec_err(inst, "vp9_dec_vpu_init failed");
+ mtk_vdec_err(inst->ctx, "vp9_dec_vpu_init failed");
goto err_deinit_inst;
}
@@ -829,17 +818,17 @@ static int vdec_vp9_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
*res_chg = false;
if ((bs == NULL) && (fb == NULL)) {
- mtk_vcodec_debug(inst, "[EOS]");
+ mtk_vdec_debug(inst->ctx, "[EOS]");
vp9_reset(inst);
return ret;
}
if (bs == NULL) {
- mtk_vcodec_err(inst, "bs == NULL");
+ mtk_vdec_err(inst->ctx, "bs == NULL");
return -EINVAL;
}
- mtk_vcodec_debug(inst, "Input BS Size = %zu", bs->size);
+ mtk_vdec_debug(inst->ctx, "Input BS Size = %zu", bs->size);
while (1) {
struct vdec_fb *cur_fb = NULL;
@@ -882,7 +871,7 @@ static int vdec_vp9_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
ret = vpu_dec_start(&inst->vpu, data, 3);
if (ret) {
- mtk_vcodec_err(inst, "vpu_dec_start failed");
+ mtk_vdec_err(inst->ctx, "vpu_dec_start failed");
goto DECODE_ERROR;
}
@@ -892,7 +881,7 @@ static int vdec_vp9_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
if (vsi->show_frame & BIT(2)) {
ret = vpu_dec_start(&inst->vpu, NULL, 0);
if (ret) {
- mtk_vcodec_err(inst, "vpu trig decoder failed");
+ mtk_vdec_err(inst->ctx, "vpu trig decoder failed");
goto DECODE_ERROR;
}
}
@@ -900,7 +889,7 @@ static int vdec_vp9_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
ret = validate_vsi_array_indexes(inst, vsi);
if (ret) {
- mtk_vcodec_err(inst, "Invalid values from VPU.");
+ mtk_vdec_err(inst->ctx, "Invalid values from VPU.");
goto DECODE_ERROR;
}
@@ -926,18 +915,18 @@ static int vdec_vp9_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
if (!vp9_is_sf_ref_fb(inst, inst->cur_fb))
vp9_add_to_fb_use_list(inst, inst->cur_fb);
- mtk_vcodec_debug(inst, "[#pic %d]", vsi->frm_num);
+ mtk_vdec_debug(inst->ctx, "[#pic %d]", vsi->frm_num);
if (vsi->show_existing_frame)
- mtk_vcodec_debug(inst,
- "drv->new_fb_idx=%d, drv->frm_to_show_idx=%d",
- vsi->new_fb_idx, vsi->frm_to_show_idx);
+ mtk_vdec_debug(inst->ctx,
+ "drv->new_fb_idx=%d, drv->frm_to_show_idx=%d",
+ vsi->new_fb_idx, vsi->frm_to_show_idx);
if (vsi->show_existing_frame && (vsi->frm_to_show_idx <
VP9_MAX_FRM_BUF_NUM)) {
- mtk_vcodec_debug(inst,
- "Skip Decode drv->new_fb_idx=%d, drv->frm_to_show_idx=%d",
- vsi->new_fb_idx, vsi->frm_to_show_idx);
+ mtk_vdec_debug(inst->ctx,
+ "Skip Decode drv->new_fb_idx=%d, drv->frm_to_show_idx=%d",
+ vsi->new_fb_idx, vsi->frm_to_show_idx);
vp9_ref_cnt_fb(inst, &vsi->new_fb_idx,
vsi->frm_to_show_idx);
@@ -954,14 +943,14 @@ static int vdec_vp9_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
if (vsi->resolution_changed) {
*res_chg = true;
- mtk_vcodec_debug(inst, "VDEC_ST_RESOLUTION_CHANGED");
+ mtk_vdec_debug(inst->ctx, "VDEC_ST_RESOLUTION_CHANGED");
ret = 0;
goto DECODE_ERROR;
}
if (!vp9_decode_end_proc(inst)) {
- mtk_vcodec_err(inst, "vp9_decode_end_proc");
+ mtk_vdec_err(inst->ctx, "vp9_decode_end_proc");
ret = -EINVAL;
goto DECODE_ERROR;
}
@@ -985,8 +974,8 @@ static void get_crop_info(struct vdec_vp9_inst *inst, struct v4l2_rect *cr)
cr->top = 0;
cr->width = inst->vsi->pic_w;
cr->height = inst->vsi->pic_h;
- mtk_vcodec_debug(inst, "get crop info l=%d, t=%d, w=%d, h=%d\n",
- cr->left, cr->top, cr->width, cr->height);
+ mtk_vdec_debug(inst->ctx, "get crop info l=%d, t=%d, w=%d, h=%d\n",
+ cr->left, cr->top, cr->width, cr->height);
}
static int vdec_vp9_get_param(void *h_vdec, enum vdec_get_param_type type,
@@ -1012,7 +1001,7 @@ static int vdec_vp9_get_param(void *h_vdec, enum vdec_get_param_type type,
get_crop_info(inst, out);
break;
default:
- mtk_vcodec_err(inst, "not supported param type %d", type);
+ mtk_vdec_err(inst->ctx, "not supported param type %d", type);
ret = -EINVAL;
break;
}
diff --git a/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_req_lat_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c
index c2f90848f498..e393e3e668f8 100644
--- a/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_req_lat_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c
@@ -9,9 +9,8 @@
#include <media/videobuf2-dma-contig.h>
#include <media/v4l2-vp9.h>
-#include "../mtk_vcodec_util.h"
#include "../mtk_vcodec_dec.h"
-#include "../mtk_vcodec_intr.h"
+#include "../../common/mtk_vcodec_intr.h"
#include "../vdec_drv_base.h"
#include "../vdec_drv_if.h"
#include "../vdec_vpu_if.h"
@@ -445,7 +444,7 @@ struct vdec_vp9_slice_ref {
* @counts_helper: counts table according to newest kernel spec
*/
struct vdec_vp9_slice_instance {
- struct mtk_vcodec_ctx *ctx;
+ struct mtk_vcodec_dec_ctx *ctx;
struct vdec_vpu_inst vpu;
int seq;
@@ -506,7 +505,7 @@ static int vdec_vp9_slice_init_default_frame_ctx(struct vdec_vp9_slice_instance
{
struct vdec_vp9_slice_frame_ctx *remote_frame_ctx;
struct vdec_vp9_slice_frame_ctx *frame_ctx;
- struct mtk_vcodec_ctx *ctx;
+ struct mtk_vcodec_dec_ctx *ctx;
struct vdec_vp9_slice_init_vsi *vsi;
int ret = 0;
@@ -518,7 +517,7 @@ static int vdec_vp9_slice_init_default_frame_ctx(struct vdec_vp9_slice_instance
remote_frame_ctx = mtk_vcodec_fw_map_dm_addr(ctx->dev->fw_handler,
(u32)vsi->default_frame_ctx);
if (!remote_frame_ctx) {
- mtk_vcodec_err(instance, "failed to map default frame ctx\n");
+ mtk_vdec_err(ctx, "failed to map default frame ctx\n");
return -EINVAL;
}
@@ -543,7 +542,7 @@ out:
static int vdec_vp9_slice_alloc_working_buffer(struct vdec_vp9_slice_instance *instance,
struct vdec_vp9_slice_vsi *vsi)
{
- struct mtk_vcodec_ctx *ctx = instance->ctx;
+ struct mtk_vcodec_dec_ctx *ctx = instance->ctx;
enum vdec_vp9_slice_resolution_level level;
/* super blocks */
unsigned int max_sb_w;
@@ -577,8 +576,8 @@ static int vdec_vp9_slice_alloc_working_buffer(struct vdec_vp9_slice_instance *i
if (level == instance->level)
return 0;
- mtk_vcodec_debug(instance, "resolution level changed, from %u to %u, %ux%u",
- instance->level, level, w, h);
+ mtk_vdec_debug(ctx, "resolution level changed, from %u to %u, %ux%u",
+ instance->level, level, w, h);
max_sb_w = DIV_ROUND_UP(max_w, 64);
max_sb_h = DIV_ROUND_UP(max_h, 64);
@@ -635,7 +634,7 @@ err:
static void vdec_vp9_slice_free_working_buffer(struct vdec_vp9_slice_instance *instance)
{
- struct mtk_vcodec_ctx *ctx = instance->ctx;
+ struct mtk_vcodec_dec_ctx *ctx = instance->ctx;
int i;
for (i = 0; i < ARRAY_SIZE(instance->mv); i++) {
@@ -1025,9 +1024,9 @@ static int vdec_vp9_slice_setup_prob_buffer(struct vdec_vp9_slice_instance *inst
uh = &vsi->frame.uh;
- mtk_vcodec_debug(instance, "ctx dirty %u idx %d\n",
- instance->dirty[uh->frame_context_idx],
- uh->frame_context_idx);
+ mtk_vdec_debug(instance->ctx, "ctx dirty %u idx %d\n",
+ instance->dirty[uh->frame_context_idx],
+ uh->frame_context_idx);
if (instance->dirty[uh->frame_context_idx])
frame_ctx = &instance->frame_ctx[uh->frame_context_idx];
@@ -1051,7 +1050,7 @@ static void vdec_vp9_slice_setup_seg_buffer(struct vdec_vp9_slice_instance *inst
uh->error_resilient_mode ||
uh->frame_width != instance->width ||
uh->frame_height != instance->height) {
- mtk_vcodec_debug(instance, "reset seg\n");
+ mtk_vdec_debug(instance->ctx, "reset seg\n");
memset(buf->va, 0, buf->size);
}
}
@@ -1093,16 +1092,14 @@ static int vdec_vp9_slice_setup_tile_buffer(struct vdec_vp9_slice_instance *inst
cols = 1 << cols_log2;
if (rows > 4 || cols > 64) {
- mtk_vcodec_err(instance, "tile_rows %u tile_cols %u\n",
- rows, cols);
+ mtk_vdec_err(instance->ctx, "tile_rows %u tile_cols %u\n", rows, cols);
return -EINVAL;
}
offset = uh->uncompressed_header_size +
uh->header_size_in_bytes;
if (bs->size <= offset) {
- mtk_vcodec_err(instance, "bs size %zu tile offset %u\n",
- bs->size, offset);
+ mtk_vdec_err(instance->ctx, "bs size %zu tile offset %u\n", bs->size, offset);
return -EINVAL;
}
@@ -1596,14 +1593,12 @@ static int vdec_vp9_slice_update_single(struct vdec_vp9_slice_instance *instance
vsi = &pfc->vsi;
memcpy(&pfc->state[0], &vsi->state, sizeof(vsi->state));
- mtk_vcodec_debug(instance, "Frame %u Y_CRC %08x %08x %08x %08x\n",
- pfc->seq,
- vsi->state.crc[0], vsi->state.crc[1],
- vsi->state.crc[2], vsi->state.crc[3]);
- mtk_vcodec_debug(instance, "Frame %u C_CRC %08x %08x %08x %08x\n",
- pfc->seq,
- vsi->state.crc[4], vsi->state.crc[5],
- vsi->state.crc[6], vsi->state.crc[7]);
+ mtk_vdec_debug(instance->ctx, "Frame %u Y_CRC %08x %08x %08x %08x\n",
+ pfc->seq, vsi->state.crc[0], vsi->state.crc[1],
+ vsi->state.crc[2], vsi->state.crc[3]);
+ mtk_vdec_debug(instance->ctx, "Frame %u C_CRC %08x %08x %08x %08x\n",
+ pfc->seq, vsi->state.crc[4], vsi->state.crc[5],
+ vsi->state.crc[6], vsi->state.crc[7]);
vdec_vp9_slice_update_prob(instance, vsi);
@@ -1624,10 +1619,10 @@ static int vdec_vp9_slice_update_lat(struct vdec_vp9_slice_instance *instance,
vsi = &pfc->vsi;
memcpy(&pfc->state[0], &vsi->state, sizeof(vsi->state));
- mtk_vcodec_debug(instance, "Frame %u LAT CRC 0x%08x %lx %lx\n",
- pfc->seq, vsi->state.crc[0],
- (unsigned long)vsi->trans.dma_addr,
- (unsigned long)vsi->trans.dma_addr_end);
+ mtk_vdec_debug(instance->ctx, "Frame %u LAT CRC 0x%08x %lx %lx\n",
+ pfc->seq, vsi->state.crc[0],
+ (unsigned long)vsi->trans.dma_addr,
+ (unsigned long)vsi->trans.dma_addr_end);
/* buffer full, need to re-decode */
if (vsi->state.full) {
@@ -1844,19 +1839,17 @@ static int vdec_vp9_slice_update_core(struct vdec_vp9_slice_instance *instance,
vsi = &pfc->vsi;
memcpy(&pfc->state[1], &vsi->state, sizeof(vsi->state));
- mtk_vcodec_debug(instance, "Frame %u Y_CRC %08x %08x %08x %08x\n",
- pfc->seq,
- vsi->state.crc[0], vsi->state.crc[1],
- vsi->state.crc[2], vsi->state.crc[3]);
- mtk_vcodec_debug(instance, "Frame %u C_CRC %08x %08x %08x %08x\n",
- pfc->seq,
- vsi->state.crc[4], vsi->state.crc[5],
- vsi->state.crc[6], vsi->state.crc[7]);
+ mtk_vdec_debug(instance->ctx, "Frame %u Y_CRC %08x %08x %08x %08x\n",
+ pfc->seq, vsi->state.crc[0], vsi->state.crc[1],
+ vsi->state.crc[2], vsi->state.crc[3]);
+ mtk_vdec_debug(instance->ctx, "Frame %u C_CRC %08x %08x %08x %08x\n",
+ pfc->seq, vsi->state.crc[4], vsi->state.crc[5],
+ vsi->state.crc[6], vsi->state.crc[7]);
return 0;
}
-static int vdec_vp9_slice_init(struct mtk_vcodec_ctx *ctx)
+static int vdec_vp9_slice_init(struct mtk_vcodec_dec_ctx *ctx)
{
struct vdec_vp9_slice_instance *instance;
struct vdec_vp9_slice_init_vsi *vsi;
@@ -1874,7 +1867,7 @@ static int vdec_vp9_slice_init(struct mtk_vcodec_ctx *ctx)
ret = vpu_dec_init(&instance->vpu);
if (ret) {
- mtk_vcodec_err(instance, "failed to init vpu dec, ret %d\n", ret);
+ mtk_vdec_err(ctx, "failed to init vpu dec, ret %d\n", ret);
goto error_vpu_init;
}
@@ -1882,7 +1875,7 @@ static int vdec_vp9_slice_init(struct mtk_vcodec_ctx *ctx)
vsi = instance->vpu.vsi;
if (!vsi) {
- mtk_vcodec_err(instance, "failed to get VP9 vsi\n");
+ mtk_vdec_err(ctx, "failed to get VP9 vsi\n");
ret = -EINVAL;
goto error_vsi;
}
@@ -1890,7 +1883,7 @@ static int vdec_vp9_slice_init(struct mtk_vcodec_ctx *ctx)
instance->core_vsi = mtk_vcodec_fw_map_dm_addr(ctx->dev->fw_handler,
(u32)vsi->core_vsi);
if (!instance->core_vsi) {
- mtk_vcodec_err(instance, "failed to get VP9 core vsi\n");
+ mtk_vdec_err(ctx, "failed to get VP9 core vsi\n");
ret = -EINVAL;
goto error_vsi;
}
@@ -1931,7 +1924,7 @@ static int vdec_vp9_slice_flush(void *h_vdec, struct mtk_vcodec_mem *bs,
{
struct vdec_vp9_slice_instance *instance = h_vdec;
- mtk_vcodec_debug(instance, "flush ...\n");
+ mtk_vdec_debug(instance->ctx, "flush ...\n");
if (instance->ctx->dev->vdec_pdata->hw_arch != MTK_VDEC_PURE_SINGLE_CORE)
vdec_msg_queue_wait_lat_buf_full(&instance->ctx->msg_queue);
return vpu_dec_reset(&instance->vpu);
@@ -1939,11 +1932,10 @@ static int vdec_vp9_slice_flush(void *h_vdec, struct mtk_vcodec_mem *bs,
static void vdec_vp9_slice_get_pic_info(struct vdec_vp9_slice_instance *instance)
{
- struct mtk_vcodec_ctx *ctx = instance->ctx;
+ struct mtk_vcodec_dec_ctx *ctx = instance->ctx;
unsigned int data[3];
- mtk_vcodec_debug(instance, "w %u h %u\n",
- ctx->picinfo.pic_w, ctx->picinfo.pic_h);
+ mtk_vdec_debug(instance->ctx, "w %u h %u\n", ctx->picinfo.pic_w, ctx->picinfo.pic_h);
data[0] = ctx->picinfo.pic_w;
data[1] = ctx->picinfo.pic_h;
@@ -1975,11 +1967,10 @@ static int vdec_vp9_slice_get_param(void *h_vdec, enum vdec_get_param_type type,
vdec_vp9_slice_get_dpb_size(instance, out);
break;
case GET_PARAM_CROP_INFO:
- mtk_vcodec_debug(instance, "No need to get vp9 crop information.");
+ mtk_vdec_debug(instance->ctx, "No need to get vp9 crop information.");
break;
default:
- mtk_vcodec_err(instance, "invalid get parameter type=%d\n",
- type);
+ mtk_vdec_err(instance->ctx, "invalid get parameter type=%d\n", type);
return -EINVAL;
}
@@ -1992,7 +1983,7 @@ static int vdec_vp9_slice_single_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
struct vdec_vp9_slice_instance *instance = h_vdec;
struct vdec_vp9_slice_pfc *pfc = &instance->sc_pfc;
struct vdec_vp9_slice_vsi *vsi;
- struct mtk_vcodec_ctx *ctx;
+ struct mtk_vcodec_dec_ctx *ctx;
int ret;
if (!instance || !instance->ctx)
@@ -2011,14 +2002,14 @@ static int vdec_vp9_slice_single_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
ret = vdec_vp9_slice_setup_single(instance, bs, fb, pfc);
if (ret) {
- mtk_vcodec_err(instance, "Failed to setup VP9 single ret %d\n", ret);
+ mtk_vdec_err(ctx, "Failed to setup VP9 single ret %d\n", ret);
return ret;
}
vdec_vp9_slice_vsi_to_remote(vsi, instance->vsi);
ret = vpu_dec_start(&instance->vpu, NULL, 0);
if (ret) {
- mtk_vcodec_err(instance, "Failed to dec VP9 ret %d\n", ret);
+ mtk_vdec_err(ctx, "Failed to dec VP9 ret %d\n", ret);
return ret;
}
@@ -2026,7 +2017,7 @@ static int vdec_vp9_slice_single_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
WAIT_INTR_TIMEOUT_MS, MTK_VDEC_CORE);
/* update remote vsi if decode timeout */
if (ret) {
- mtk_vcodec_err(instance, "VP9 decode timeout %d\n", ret);
+ mtk_vdec_err(ctx, "VP9 decode timeout %d\n", ret);
WRITE_ONCE(instance->vsi->state.timeout, 1);
}
@@ -2035,7 +2026,7 @@ static int vdec_vp9_slice_single_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
vdec_vp9_slice_vsi_from_remote(vsi, instance->vsi, 0);
ret = vdec_vp9_slice_update_single(instance, pfc);
if (ret) {
- mtk_vcodec_err(instance, "VP9 decode error: %d\n", ret);
+ mtk_vdec_err(ctx, "VP9 decode error: %d\n", ret);
return ret;
}
@@ -2050,7 +2041,7 @@ static int vdec_vp9_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
struct vdec_lat_buf *lat_buf;
struct vdec_vp9_slice_pfc *pfc;
struct vdec_vp9_slice_vsi *vsi;
- struct mtk_vcodec_ctx *ctx;
+ struct mtk_vcodec_dec_ctx *ctx;
int ret;
if (!instance || !instance->ctx)
@@ -2069,7 +2060,7 @@ static int vdec_vp9_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
lat_buf = vdec_msg_queue_dqbuf(&instance->ctx->msg_queue.lat_ctx);
if (!lat_buf) {
- mtk_vcodec_debug(instance, "Failed to get VP9 lat buf\n");
+ mtk_vdec_debug(ctx, "Failed to get VP9 lat buf\n");
return -EAGAIN;
}
pfc = (struct vdec_vp9_slice_pfc *)lat_buf->private_data;
@@ -2081,14 +2072,14 @@ static int vdec_vp9_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
ret = vdec_vp9_slice_setup_lat(instance, bs, lat_buf, pfc);
if (ret) {
- mtk_vcodec_err(instance, "Failed to setup VP9 lat ret %d\n", ret);
+ mtk_vdec_err(ctx, "Failed to setup VP9 lat ret %d\n", ret);
goto err_free_fb_out;
}
vdec_vp9_slice_vsi_to_remote(vsi, instance->vsi);
ret = vpu_dec_start(&instance->vpu, NULL, 0);
if (ret) {
- mtk_vcodec_err(instance, "Failed to dec VP9 ret %d\n", ret);
+ mtk_vdec_err(ctx, "Failed to dec VP9 ret %d\n", ret);
goto err_free_fb_out;
}
@@ -2097,7 +2088,7 @@ static int vdec_vp9_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
WAIT_INTR_TIMEOUT_MS, MTK_VDEC_LAT0);
/* update remote vsi if decode timeout */
if (ret) {
- mtk_vcodec_err(instance, "VP9 decode timeout %d pic %d\n", ret, pfc->seq);
+ mtk_vdec_err(ctx, "VP9 decode timeout %d pic %d\n", ret, pfc->seq);
WRITE_ONCE(instance->vsi->state.timeout, 1);
}
vpu_dec_end(&instance->vpu);
@@ -2108,13 +2099,13 @@ static int vdec_vp9_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
/* LAT trans full, no more UBE or decode timeout */
if (ret) {
- mtk_vcodec_err(instance, "VP9 decode error: %d\n", ret);
+ mtk_vdec_err(ctx, "VP9 decode error: %d\n", ret);
goto err_free_fb_out;
}
- mtk_vcodec_debug(instance, "lat dma addr: 0x%lx 0x%lx\n",
- (unsigned long)pfc->vsi.trans.dma_addr,
- (unsigned long)pfc->vsi.trans.dma_addr_end);
+ mtk_vdec_debug(ctx, "lat dma addr: 0x%lx 0x%lx\n",
+ (unsigned long)pfc->vsi.trans.dma_addr,
+ (unsigned long)pfc->vsi.trans.dma_addr_end);
vdec_msg_queue_update_ube_wptr(&ctx->msg_queue,
vsi->trans.dma_addr_end +
@@ -2145,7 +2136,7 @@ static int vdec_vp9_slice_core_decode(struct vdec_lat_buf *lat_buf)
{
struct vdec_vp9_slice_instance *instance;
struct vdec_vp9_slice_pfc *pfc;
- struct mtk_vcodec_ctx *ctx = NULL;
+ struct mtk_vcodec_dec_ctx *ctx = NULL;
struct vdec_fb *fb = NULL;
int ret = -EINVAL;
@@ -2169,14 +2160,14 @@ static int vdec_vp9_slice_core_decode(struct vdec_lat_buf *lat_buf)
ret = vdec_vp9_slice_setup_core(instance, fb, lat_buf, pfc);
if (ret) {
- mtk_vcodec_err(instance, "vdec_vp9_slice_setup_core\n");
+ mtk_vdec_err(ctx, "vdec_vp9_slice_setup_core\n");
goto err;
}
vdec_vp9_slice_vsi_to_remote(&pfc->vsi, instance->core_vsi);
ret = vpu_dec_core(&instance->vpu);
if (ret) {
- mtk_vcodec_err(instance, "vpu_dec_core\n");
+ mtk_vdec_err(ctx, "vpu_dec_core\n");
goto err;
}
@@ -2185,7 +2176,7 @@ static int vdec_vp9_slice_core_decode(struct vdec_lat_buf *lat_buf)
WAIT_INTR_TIMEOUT_MS, MTK_VDEC_CORE);
/* update remote vsi if decode timeout */
if (ret) {
- mtk_vcodec_err(instance, "VP9 core timeout pic %d\n", pfc->seq);
+ mtk_vdec_err(ctx, "VP9 core timeout pic %d\n", pfc->seq);
WRITE_ONCE(instance->core_vsi->state.timeout, 1);
}
vpu_dec_core_end(&instance->vpu);
@@ -2194,13 +2185,13 @@ static int vdec_vp9_slice_core_decode(struct vdec_lat_buf *lat_buf)
vdec_vp9_slice_vsi_from_remote(&pfc->vsi, instance->core_vsi, 1);
ret = vdec_vp9_slice_update_core(instance, lat_buf, pfc);
if (ret) {
- mtk_vcodec_err(instance, "vdec_vp9_slice_update_core\n");
+ mtk_vdec_err(ctx, "vdec_vp9_slice_update_core\n");
goto err;
}
pfc->vsi.trans.dma_addr_end += ctx->msg_queue.wdma_addr.dma_addr;
- mtk_vcodec_debug(instance, "core dma_addr_end 0x%lx\n",
- (unsigned long)pfc->vsi.trans.dma_addr_end);
+ mtk_vdec_debug(ctx, "core dma_addr_end 0x%lx\n",
+ (unsigned long)pfc->vsi.trans.dma_addr_end);
vdec_msg_queue_update_ube_rptr(&ctx->msg_queue, pfc->vsi.trans.dma_addr_end);
ctx->dev->vdec_pdata->cap_to_disp(ctx, 0, lat_buf->src_buf_req);
diff --git a/drivers/media/platform/mediatek/vcodec/vdec_drv_base.h b/drivers/media/platform/mediatek/vcodec/decoder/vdec_drv_base.h
index e913f963b7db..f6abb9365234 100644
--- a/drivers/media/platform/mediatek/vcodec/vdec_drv_base.h
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec_drv_base.h
@@ -15,7 +15,7 @@ struct vdec_common_if {
* @ctx : [in] mtk v4l2 context
* @h_vdec : [out] driver handle
*/
- int (*init)(struct mtk_vcodec_ctx *ctx);
+ int (*init)(struct mtk_vcodec_dec_ctx *ctx);
/**
* (*decode)() - trigger decode
diff --git a/drivers/media/platform/mediatek/vcodec/vdec_drv_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec_drv_if.c
index 06d393174cc2..d0b459b1603f 100644
--- a/drivers/media/platform/mediatek/vcodec/vdec_drv_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec_drv_if.c
@@ -14,7 +14,7 @@
#include "vdec_drv_base.h"
#include "mtk_vcodec_dec_pm.h"
-int vdec_if_init(struct mtk_vcodec_ctx *ctx, unsigned int fourcc)
+int vdec_if_init(struct mtk_vcodec_dec_ctx *ctx, unsigned int fourcc)
{
enum mtk_vdec_hw_arch hw_arch = ctx->dev->vdec_pdata->hw_arch;
int ret = 0;
@@ -68,14 +68,14 @@ int vdec_if_init(struct mtk_vcodec_ctx *ctx, unsigned int fourcc)
return ret;
}
-int vdec_if_decode(struct mtk_vcodec_ctx *ctx, struct mtk_vcodec_mem *bs,
+int vdec_if_decode(struct mtk_vcodec_dec_ctx *ctx, struct mtk_vcodec_mem *bs,
struct vdec_fb *fb, bool *res_chg)
{
int ret = 0;
if (bs) {
if ((bs->dma_addr & 63) != 0) {
- mtk_v4l2_err("bs dma_addr should 64 byte align");
+ mtk_v4l2_vdec_err(ctx, "bs dma_addr should 64 byte align");
return -EINVAL;
}
}
@@ -83,7 +83,7 @@ int vdec_if_decode(struct mtk_vcodec_ctx *ctx, struct mtk_vcodec_mem *bs,
if (fb) {
if (((fb->base_y.dma_addr & 511) != 0) ||
((fb->base_c.dma_addr & 511) != 0)) {
- mtk_v4l2_err("frame buffer dma_addr should 512 byte align");
+ mtk_v4l2_vdec_err(ctx, "frame buffer dma_addr should 512 byte align");
return -EINVAL;
}
}
@@ -100,7 +100,7 @@ int vdec_if_decode(struct mtk_vcodec_ctx *ctx, struct mtk_vcodec_mem *bs,
return ret;
}
-int vdec_if_get_param(struct mtk_vcodec_ctx *ctx, enum vdec_get_param_type type,
+int vdec_if_get_param(struct mtk_vcodec_dec_ctx *ctx, enum vdec_get_param_type type,
void *out)
{
int ret = 0;
@@ -115,7 +115,7 @@ int vdec_if_get_param(struct mtk_vcodec_ctx *ctx, enum vdec_get_param_type type,
return ret;
}
-void vdec_if_deinit(struct mtk_vcodec_ctx *ctx)
+void vdec_if_deinit(struct mtk_vcodec_dec_ctx *ctx)
{
if (!ctx->drv_handle)
return;
diff --git a/drivers/media/platform/mediatek/vcodec/vdec_drv_if.h b/drivers/media/platform/mediatek/vcodec/decoder/vdec_drv_if.h
index a8da6a59a6a5..bfd297c96850 100644
--- a/drivers/media/platform/mediatek/vcodec/vdec_drv_if.h
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec_drv_if.h
@@ -8,9 +8,7 @@
#ifndef _VDEC_DRV_IF_H_
#define _VDEC_DRV_IF_H_
-#include "mtk_vcodec_drv.h"
#include "mtk_vcodec_dec.h"
-#include "mtk_vcodec_util.h"
/**
@@ -69,14 +67,14 @@ extern const struct vdec_common_if vdec_av1_slice_lat_if;
* @ctx : [in] v4l2 context
* @fourcc : [in] video format fourcc, V4L2_PIX_FMT_H264/VP8/VP9..
*/
-int vdec_if_init(struct mtk_vcodec_ctx *ctx, unsigned int fourcc);
+int vdec_if_init(struct mtk_vcodec_dec_ctx *ctx, unsigned int fourcc);
/**
* vdec_if_deinit() - deinitialize decode driver
* @ctx : [in] v4l2 context
*
*/
-void vdec_if_deinit(struct mtk_vcodec_ctx *ctx);
+void vdec_if_deinit(struct mtk_vcodec_dec_ctx *ctx);
/**
* vdec_if_decode() - trigger decode
@@ -90,7 +88,7 @@ void vdec_if_deinit(struct mtk_vcodec_ctx *ctx);
*
* Return: 0 on success. -EIO on unrecoverable error.
*/
-int vdec_if_decode(struct mtk_vcodec_ctx *ctx, struct mtk_vcodec_mem *bs,
+int vdec_if_decode(struct mtk_vcodec_dec_ctx *ctx, struct mtk_vcodec_mem *bs,
struct vdec_fb *fb, bool *res_chg);
/**
@@ -99,7 +97,7 @@ int vdec_if_decode(struct mtk_vcodec_ctx *ctx, struct mtk_vcodec_mem *bs,
* @type : [in] input parameter type
* @out : [out] buffer to store query result
*/
-int vdec_if_get_param(struct mtk_vcodec_ctx *ctx, enum vdec_get_param_type type,
+int vdec_if_get_param(struct mtk_vcodec_dec_ctx *ctx, enum vdec_get_param_type type,
void *out);
#endif
diff --git a/drivers/media/platform/mediatek/vcodec/vdec_ipi_msg.h b/drivers/media/platform/mediatek/vcodec/decoder/vdec_ipi_msg.h
index 47070be2a991..47070be2a991 100644
--- a/drivers/media/platform/mediatek/vcodec/vdec_ipi_msg.h
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec_ipi_msg.h
diff --git a/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec_msg_queue.c
index 04e6dc6cfa1d..f283c4703dc6 100644
--- a/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec_msg_queue.c
@@ -8,8 +8,8 @@
#include <linux/interrupt.h>
#include <linux/kthread.h>
+#include "mtk_vcodec_dec_drv.h"
#include "mtk_vcodec_dec_pm.h"
-#include "mtk_vcodec_drv.h"
#include "vdec_msg_queue.h"
#define VDEC_MSG_QUEUE_TIMEOUT_MS 1500
@@ -77,7 +77,7 @@ int vdec_msg_queue_qbuf(struct vdec_msg_queue_ctx *msg_ctx, struct vdec_lat_buf
head = vdec_get_buf_list(msg_ctx->hardware_index, buf);
if (!head) {
- mtk_v4l2_err("fail to qbuf: %d", msg_ctx->hardware_index);
+ mtk_v4l2_vdec_err(buf->ctx, "fail to qbuf: %d", msg_ctx->hardware_index);
return -EINVAL;
}
@@ -95,8 +95,8 @@ int vdec_msg_queue_qbuf(struct vdec_msg_queue_ctx *msg_ctx, struct vdec_lat_buf
}
}
- mtk_v4l2_debug(3, "enqueue buf type: %d addr: 0x%p num: %d",
- msg_ctx->hardware_index, buf, msg_ctx->ready_num);
+ mtk_v4l2_vdec_dbg(3, buf->ctx, "enqueue buf type: %d addr: 0x%p num: %d",
+ msg_ctx->hardware_index, buf, msg_ctx->ready_num);
spin_unlock(&msg_ctx->ready_lock);
return 0;
@@ -123,8 +123,6 @@ struct vdec_lat_buf *vdec_msg_queue_dqbuf(struct vdec_msg_queue_ctx *msg_ctx)
spin_lock(&msg_ctx->ready_lock);
if (list_empty(&msg_ctx->ready_queue)) {
- mtk_v4l2_debug(3, "queue is NULL, type:%d num: %d",
- msg_ctx->hardware_index, msg_ctx->ready_num);
spin_unlock(&msg_ctx->ready_lock);
if (msg_ctx->hardware_index == MTK_VDEC_CORE)
@@ -146,15 +144,15 @@ struct vdec_lat_buf *vdec_msg_queue_dqbuf(struct vdec_msg_queue_ctx *msg_ctx)
head = vdec_get_buf_list(msg_ctx->hardware_index, buf);
if (!head) {
spin_unlock(&msg_ctx->ready_lock);
- mtk_v4l2_err("fail to dqbuf: %d", msg_ctx->hardware_index);
+ mtk_v4l2_vdec_err(buf->ctx, "fail to dqbuf: %d", msg_ctx->hardware_index);
return NULL;
}
list_del(head);
vdec_msg_queue_dec(&buf->ctx->msg_queue, msg_ctx->hardware_index);
msg_ctx->ready_num--;
- mtk_v4l2_debug(3, "dqueue buf type:%d addr: 0x%p num: %d",
- msg_ctx->hardware_index, buf, msg_ctx->ready_num);
+ mtk_v4l2_vdec_dbg(3, buf->ctx, "dqueue buf type:%d addr: 0x%p num: %d",
+ msg_ctx->hardware_index, buf, msg_ctx->ready_num);
spin_unlock(&msg_ctx->ready_lock);
return buf;
@@ -164,7 +162,7 @@ void vdec_msg_queue_update_ube_rptr(struct vdec_msg_queue *msg_queue, uint64_t u
{
spin_lock(&msg_queue->lat_ctx.ready_lock);
msg_queue->wdma_rptr_addr = ube_rptr;
- mtk_v4l2_debug(3, "update ube rprt (0x%llx)", ube_rptr);
+ mtk_v4l2_vdec_dbg(3, msg_queue->ctx, "update ube rprt (0x%llx)", ube_rptr);
spin_unlock(&msg_queue->lat_ctx.ready_lock);
}
@@ -172,20 +170,19 @@ void vdec_msg_queue_update_ube_wptr(struct vdec_msg_queue *msg_queue, uint64_t u
{
spin_lock(&msg_queue->lat_ctx.ready_lock);
msg_queue->wdma_wptr_addr = ube_wptr;
- mtk_v4l2_debug(3, "update ube wprt: (0x%llx 0x%llx) offset: 0x%llx",
- msg_queue->wdma_rptr_addr, msg_queue->wdma_wptr_addr,
- ube_wptr);
+ mtk_v4l2_vdec_dbg(3, msg_queue->ctx, "update ube wprt: (0x%llx 0x%llx) offset: 0x%llx",
+ msg_queue->wdma_rptr_addr, msg_queue->wdma_wptr_addr,
+ ube_wptr);
spin_unlock(&msg_queue->lat_ctx.ready_lock);
}
bool vdec_msg_queue_wait_lat_buf_full(struct vdec_msg_queue *msg_queue)
{
if (atomic_read(&msg_queue->lat_list_cnt) == NUM_BUFFER_COUNT) {
- mtk_v4l2_debug(3, "wait buf full: list(%d %d) ready_num:%d status:%d",
- atomic_read(&msg_queue->lat_list_cnt),
- atomic_read(&msg_queue->core_list_cnt),
- msg_queue->lat_ctx.ready_num,
- msg_queue->status);
+ mtk_v4l2_vdec_dbg(3, msg_queue->ctx, "wait buf full: (%d %d) ready:%d status:%d",
+ atomic_read(&msg_queue->lat_list_cnt),
+ atomic_read(&msg_queue->core_list_cnt),
+ msg_queue->lat_ctx.ready_num, msg_queue->status);
return true;
}
@@ -193,16 +190,16 @@ bool vdec_msg_queue_wait_lat_buf_full(struct vdec_msg_queue *msg_queue)
vdec_msg_queue_qbuf(&msg_queue->core_ctx, &msg_queue->empty_lat_buf);
wait_event(msg_queue->core_dec_done, msg_queue->flush_done);
- mtk_v4l2_debug(3, "flush done => ready_num:%d status:%d list(%d %d)",
- msg_queue->lat_ctx.ready_num, msg_queue->status,
- atomic_read(&msg_queue->lat_list_cnt),
- atomic_read(&msg_queue->core_list_cnt));
+ mtk_v4l2_vdec_dbg(3, msg_queue->ctx, "flush done => ready_num:%d status:%d list(%d %d)",
+ msg_queue->lat_ctx.ready_num, msg_queue->status,
+ atomic_read(&msg_queue->lat_list_cnt),
+ atomic_read(&msg_queue->core_list_cnt));
return false;
}
void vdec_msg_queue_deinit(struct vdec_msg_queue *msg_queue,
- struct mtk_vcodec_ctx *ctx)
+ struct mtk_vcodec_dec_ctx *ctx)
{
struct vdec_lat_buf *lat_buf;
struct mtk_vcodec_mem *mem;
@@ -231,6 +228,7 @@ void vdec_msg_queue_deinit(struct vdec_msg_queue *msg_queue,
mtk_vcodec_mem_free(ctx, mem);
kfree(lat_buf->private_data);
+ lat_buf->private_data = NULL;
}
if (msg_queue->wdma_addr.size)
@@ -241,9 +239,9 @@ static void vdec_msg_queue_core_work(struct work_struct *work)
{
struct vdec_msg_queue *msg_queue =
container_of(work, struct vdec_msg_queue, core_work);
- struct mtk_vcodec_ctx *ctx =
- container_of(msg_queue, struct mtk_vcodec_ctx, msg_queue);
- struct mtk_vcodec_dev *dev = ctx->dev;
+ struct mtk_vcodec_dec_ctx *ctx =
+ container_of(msg_queue, struct mtk_vcodec_dec_ctx, msg_queue);
+ struct mtk_vcodec_dec_dev *dev = ctx->dev;
struct vdec_lat_buf *lat_buf;
spin_lock(&msg_queue->core_ctx.ready_lock);
@@ -282,7 +280,7 @@ static void vdec_msg_queue_core_work(struct work_struct *work)
}
int vdec_msg_queue_init(struct vdec_msg_queue *msg_queue,
- struct mtk_vcodec_ctx *ctx, core_decode_cb_t core_decode,
+ struct mtk_vcodec_dec_ctx *ctx, core_decode_cb_t core_decode,
int private_size)
{
struct vdec_lat_buf *lat_buf;
@@ -306,7 +304,8 @@ int vdec_msg_queue_init(struct vdec_msg_queue *msg_queue,
ctx->picinfo.buf_h);
err = mtk_vcodec_mem_alloc(ctx, &msg_queue->wdma_addr);
if (err) {
- mtk_v4l2_err("failed to allocate wdma_addr buf");
+ mtk_v4l2_vdec_err(ctx, "failed to allocate wdma_addr buf");
+ msg_queue->wdma_addr.size = 0;
return -ENOMEM;
}
msg_queue->wdma_rptr_addr = msg_queue->wdma_addr.dma_addr;
@@ -316,20 +315,21 @@ int vdec_msg_queue_init(struct vdec_msg_queue *msg_queue,
msg_queue->empty_lat_buf.core_decode = NULL;
msg_queue->empty_lat_buf.is_last_frame = true;
+ msg_queue->ctx = ctx;
for (i = 0; i < NUM_BUFFER_COUNT; i++) {
lat_buf = &msg_queue->lat_buf[i];
lat_buf->wdma_err_addr.size = VDEC_ERR_MAP_SZ_AVC;
err = mtk_vcodec_mem_alloc(ctx, &lat_buf->wdma_err_addr);
if (err) {
- mtk_v4l2_err("failed to allocate wdma_err_addr buf[%d]", i);
+ mtk_v4l2_vdec_err(ctx, "failed to allocate wdma_err_addr buf[%d]", i);
goto mem_alloc_err;
}
lat_buf->slice_bc_addr.size = VDEC_LAT_SLICE_HEADER_SZ;
err = mtk_vcodec_mem_alloc(ctx, &lat_buf->slice_bc_addr);
if (err) {
- mtk_v4l2_err("failed to allocate wdma_addr buf[%d]", i);
+ mtk_v4l2_vdec_err(ctx, "failed to allocate wdma_addr buf[%d]", i);
goto mem_alloc_err;
}
@@ -337,15 +337,15 @@ int vdec_msg_queue_init(struct vdec_msg_queue *msg_queue,
lat_buf->rd_mv_addr.size = VDEC_RD_MV_BUFFER_SZ;
err = mtk_vcodec_mem_alloc(ctx, &lat_buf->rd_mv_addr);
if (err) {
- mtk_v4l2_err("failed to allocate rd_mv_addr buf[%d]", i);
- return -ENOMEM;
+ mtk_v4l2_vdec_err(ctx, "failed to allocate rd_mv_addr buf[%d]", i);
+ goto mem_alloc_err;
}
lat_buf->tile_addr.size = VDEC_LAT_TILE_SZ;
err = mtk_vcodec_mem_alloc(ctx, &lat_buf->tile_addr);
if (err) {
- mtk_v4l2_err("failed to allocate tile_addr buf[%d]", i);
- return -ENOMEM;
+ mtk_v4l2_vdec_err(ctx, "failed to allocate tile_addr buf[%d]", i);
+ goto mem_alloc_err;
}
}
@@ -360,7 +360,7 @@ int vdec_msg_queue_init(struct vdec_msg_queue *msg_queue,
lat_buf->is_last_frame = false;
err = vdec_msg_queue_qbuf(&msg_queue->lat_ctx, lat_buf);
if (err) {
- mtk_v4l2_err("failed to qbuf buf[%d]", i);
+ mtk_v4l2_vdec_err(ctx, "failed to qbuf buf[%d]", i);
goto mem_alloc_err;
}
}
diff --git a/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.h b/drivers/media/platform/mediatek/vcodec/decoder/vdec_msg_queue.h
index 2a745e902ad1..1d9beb9e4a14 100644
--- a/drivers/media/platform/mediatek/vcodec/vdec_msg_queue.h
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec_msg_queue.h
@@ -12,13 +12,11 @@
#include <linux/slab.h>
#include <media/videobuf2-v4l2.h>
-#include "mtk_vcodec_util.h"
-
#define NUM_BUFFER_COUNT 3
struct vdec_lat_buf;
-struct mtk_vcodec_ctx;
-struct mtk_vcodec_dev;
+struct mtk_vcodec_dec_ctx;
+struct mtk_vcodec_dec_dev;
typedef int (*core_decode_cb_t)(struct vdec_lat_buf *lat_buf);
/**
@@ -76,7 +74,7 @@ struct vdec_lat_buf {
struct media_request *src_buf_req;
void *private_data;
- struct mtk_vcodec_ctx *ctx;
+ struct mtk_vcodec_dec_ctx *ctx;
core_decode_cb_t core_decode;
struct list_head lat_list;
struct list_head core_list;
@@ -100,6 +98,7 @@ struct vdec_lat_buf {
* @empty_lat_buf: the last lat buf used to flush decode
* @core_dec_done: core work queue decode done event
* @status: current context decode status for core hardware
+ * @ctx: mtk vcodec context information
*/
struct vdec_msg_queue {
struct vdec_lat_buf lat_buf[NUM_BUFFER_COUNT];
@@ -118,6 +117,7 @@ struct vdec_msg_queue {
struct vdec_lat_buf empty_lat_buf;
wait_queue_head_t core_dec_done;
int status;
+ struct mtk_vcodec_dec_ctx *ctx;
};
/**
@@ -130,7 +130,7 @@ struct vdec_msg_queue {
* Return: returns 0 if init successfully, or fail.
*/
int vdec_msg_queue_init(struct vdec_msg_queue *msg_queue,
- struct mtk_vcodec_ctx *ctx, core_decode_cb_t core_decode,
+ struct mtk_vcodec_dec_ctx *ctx, core_decode_cb_t core_decode,
int private_size);
/**
@@ -186,6 +186,6 @@ bool vdec_msg_queue_wait_lat_buf_full(struct vdec_msg_queue *msg_queue);
* @ctx: v4l2 ctx
*/
void vdec_msg_queue_deinit(struct vdec_msg_queue *msg_queue,
- struct mtk_vcodec_ctx *ctx);
+ struct mtk_vcodec_dec_ctx *ctx);
#endif
diff --git a/drivers/media/platform/mediatek/vcodec/vdec_vpu_if.c b/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c
index df309e8e9379..82e57ae983d5 100644
--- a/drivers/media/platform/mediatek/vcodec/vdec_vpu_if.c
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c
@@ -4,19 +4,17 @@
* Author: PC Chen <pc.chen@mediatek.com>
*/
-#include "mtk_vcodec_drv.h"
-#include "mtk_vcodec_util.h"
+#include "mtk_vcodec_dec_drv.h"
#include "vdec_drv_if.h"
#include "vdec_ipi_msg.h"
#include "vdec_vpu_if.h"
-#include "mtk_vcodec_fw.h"
static void handle_init_ack_msg(const struct vdec_vpu_ipi_init_ack *msg)
{
struct vdec_vpu_inst *vpu = (struct vdec_vpu_inst *)
(unsigned long)msg->ap_inst_addr;
- mtk_vcodec_debug(vpu, "+ ap_inst_addr = 0x%llx", msg->ap_inst_addr);
+ mtk_vdec_debug(vpu->ctx, "+ ap_inst_addr = 0x%llx", msg->ap_inst_addr);
/* mapping VPU address to kernel virtual address */
/* the content in vsi is initialized to 0 in VPU */
@@ -24,7 +22,7 @@ static void handle_init_ack_msg(const struct vdec_vpu_ipi_init_ack *msg)
msg->vpu_inst_addr);
vpu->inst_addr = msg->vpu_inst_addr;
- mtk_vcodec_debug(vpu, "- vpu_inst_addr = 0x%x", vpu->inst_addr);
+ mtk_vdec_debug(vpu->ctx, "- vpu_inst_addr = 0x%x", vpu->inst_addr);
/* Set default ABI version if dealing with unversioned firmware. */
vpu->fw_abi_version = 0;
@@ -40,7 +38,7 @@ static void handle_init_ack_msg(const struct vdec_vpu_ipi_init_ack *msg)
/* Check firmware version. */
vpu->fw_abi_version = msg->vdec_abi_version;
- mtk_vcodec_debug(vpu, "firmware version 0x%x\n", vpu->fw_abi_version);
+ mtk_vdec_debug(vpu->ctx, "firmware version 0x%x\n", vpu->fw_abi_version);
switch (vpu->fw_abi_version) {
case 1:
break;
@@ -48,8 +46,7 @@ static void handle_init_ack_msg(const struct vdec_vpu_ipi_init_ack *msg)
vpu->inst_id = msg->inst_id;
break;
default:
- mtk_vcodec_err(vpu, "unhandled firmware version 0x%x\n",
- vpu->fw_abi_version);
+ mtk_vdec_err(vpu->ctx, "unhandled firmware version 0x%x\n", vpu->fw_abi_version);
vpu->failure = 1;
break;
}
@@ -60,7 +57,7 @@ static void handle_get_param_msg_ack(const struct vdec_vpu_ipi_get_param_ack *ms
struct vdec_vpu_inst *vpu = (struct vdec_vpu_inst *)
(unsigned long)msg->ap_inst_addr;
- mtk_vcodec_debug(vpu, "+ ap_inst_addr = 0x%llx", msg->ap_inst_addr);
+ mtk_vdec_debug(vpu->ctx, "+ ap_inst_addr = 0x%llx", msg->ap_inst_addr);
/* param_type is enum vdec_get_param_type */
switch (msg->param_type) {
@@ -69,12 +66,27 @@ static void handle_get_param_msg_ack(const struct vdec_vpu_ipi_get_param_ack *ms
vpu->fb_sz[1] = msg->data[1];
break;
default:
- mtk_vcodec_err(vpu, "invalid get param type=%d", msg->param_type);
+ mtk_vdec_err(vpu->ctx, "invalid get param type=%d", msg->param_type);
vpu->failure = 1;
break;
}
}
+static bool vpu_dec_check_ap_inst(struct mtk_vcodec_dec_dev *dec_dev, struct vdec_vpu_inst *vpu)
+{
+ struct mtk_vcodec_dec_ctx *ctx;
+ int ret = false;
+
+ list_for_each_entry(ctx, &dec_dev->ctx_list, list) {
+ if (!IS_ERR_OR_NULL(ctx) && ctx->vpu_inst == vpu) {
+ ret = true;
+ break;
+ }
+ }
+
+ return ret;
+}
+
/*
* vpu_dec_ipi_handler - Handler for VPU ipi message.
*
@@ -87,44 +99,51 @@ static void handle_get_param_msg_ack(const struct vdec_vpu_ipi_get_param_ack *ms
*/
static void vpu_dec_ipi_handler(void *data, unsigned int len, void *priv)
{
+ struct mtk_vcodec_dec_dev *dec_dev;
const struct vdec_vpu_ipi_ack *msg = data;
- struct vdec_vpu_inst *vpu = (struct vdec_vpu_inst *)
- (unsigned long)msg->ap_inst_addr;
+ struct vdec_vpu_inst *vpu;
- if (!vpu) {
- mtk_v4l2_err("ap_inst_addr is NULL, did the SCP hang or crash?");
+ dec_dev = (struct mtk_vcodec_dec_dev *)priv;
+ vpu = (struct vdec_vpu_inst *)(unsigned long)msg->ap_inst_addr;
+ if (!priv || !vpu) {
+ pr_err(MTK_DBG_V4L2_STR "ap_inst_addr is NULL, did the SCP hang or crash?");
return;
}
- mtk_vcodec_debug(vpu, "+ id=%X", msg->msg_id);
+ if (!vpu_dec_check_ap_inst(dec_dev, vpu) || msg->msg_id < VPU_IPIMSG_DEC_INIT_ACK ||
+ msg->msg_id > VPU_IPIMSG_DEC_GET_PARAM_ACK) {
+ mtk_v4l2_vdec_err(vpu->ctx, "vdec msg id not correctly => 0x%x", msg->msg_id);
+ vpu->failure = -EINVAL;
+ goto error;
+ }
vpu->failure = msg->status;
- vpu->signaled = 1;
+ if (msg->status != 0)
+ goto error;
- if (msg->status == 0) {
- switch (msg->msg_id) {
- case VPU_IPIMSG_DEC_INIT_ACK:
- handle_init_ack_msg(data);
- break;
+ switch (msg->msg_id) {
+ case VPU_IPIMSG_DEC_INIT_ACK:
+ handle_init_ack_msg(data);
+ break;
- case VPU_IPIMSG_DEC_START_ACK:
- case VPU_IPIMSG_DEC_END_ACK:
- case VPU_IPIMSG_DEC_DEINIT_ACK:
- case VPU_IPIMSG_DEC_RESET_ACK:
- case VPU_IPIMSG_DEC_CORE_ACK:
- case VPU_IPIMSG_DEC_CORE_END_ACK:
- break;
+ case VPU_IPIMSG_DEC_START_ACK:
+ case VPU_IPIMSG_DEC_END_ACK:
+ case VPU_IPIMSG_DEC_DEINIT_ACK:
+ case VPU_IPIMSG_DEC_RESET_ACK:
+ case VPU_IPIMSG_DEC_CORE_ACK:
+ case VPU_IPIMSG_DEC_CORE_END_ACK:
+ break;
- case VPU_IPIMSG_DEC_GET_PARAM_ACK:
- handle_get_param_msg_ack(data);
- break;
- default:
- mtk_vcodec_err(vpu, "invalid msg=%X", msg->msg_id);
- break;
- }
+ case VPU_IPIMSG_DEC_GET_PARAM_ACK:
+ handle_get_param_msg_ack(data);
+ break;
+ default:
+ mtk_vdec_err(vpu->ctx, "invalid msg=%X", msg->msg_id);
+ break;
}
- mtk_vcodec_debug(vpu, "- id=%X", msg->msg_id);
+error:
+ vpu->signaled = 1;
}
static int vcodec_vpu_send_msg(struct vdec_vpu_inst *vpu, void *msg, int len)
@@ -132,7 +151,7 @@ static int vcodec_vpu_send_msg(struct vdec_vpu_inst *vpu, void *msg, int len)
int err, id, msgid;
msgid = *(uint32_t *)msg;
- mtk_vcodec_debug(vpu, "id=%X", msgid);
+ mtk_vdec_debug(vpu->ctx, "id=%X", msgid);
vpu->failure = 0;
vpu->signaled = 0;
@@ -150,8 +169,8 @@ static int vcodec_vpu_send_msg(struct vdec_vpu_inst *vpu, void *msg, int len)
err = mtk_vcodec_fw_ipi_send(vpu->ctx->dev->fw_handler, id, msg,
len, 2000);
if (err) {
- mtk_vcodec_err(vpu, "send fail vpu_id=%d msg_id=%X status=%d",
- id, msgid, err);
+ mtk_vdec_err(vpu->ctx, "send fail vpu_id=%d msg_id=%X status=%d",
+ id, msgid, err);
return err;
}
@@ -163,7 +182,7 @@ static int vcodec_send_ap_ipi(struct vdec_vpu_inst *vpu, unsigned int msg_id)
struct vdec_ap_ipi_cmd msg;
int err = 0;
- mtk_vcodec_debug(vpu, "+ id=%X", msg_id);
+ mtk_vdec_debug(vpu->ctx, "+ id=%X", msg_id);
memset(&msg, 0, sizeof(msg));
msg.msg_id = msg_id;
@@ -174,7 +193,7 @@ static int vcodec_send_ap_ipi(struct vdec_vpu_inst *vpu, unsigned int msg_id)
msg.codec_type = vpu->codec_type;
err = vcodec_vpu_send_msg(vpu, &msg, sizeof(msg));
- mtk_vcodec_debug(vpu, "- id=%X ret=%d", msg_id, err);
+ mtk_vdec_debug(vpu->ctx, "- id=%X ret=%d", msg_id, err);
return err;
}
@@ -183,24 +202,23 @@ int vpu_dec_init(struct vdec_vpu_inst *vpu)
struct vdec_ap_ipi_init msg;
int err;
- mtk_vcodec_debug_enter(vpu);
-
init_waitqueue_head(&vpu->wq);
vpu->handler = vpu_dec_ipi_handler;
+ vpu->ctx->vpu_inst = vpu;
err = mtk_vcodec_fw_ipi_register(vpu->ctx->dev->fw_handler, vpu->id,
- vpu->handler, "vdec", NULL);
+ vpu->handler, "vdec", vpu->ctx->dev);
if (err) {
- mtk_vcodec_err(vpu, "vpu_ipi_register fail status=%d", err);
+ mtk_vdec_err(vpu->ctx, "vpu_ipi_register fail status=%d", err);
return err;
}
if (vpu->ctx->dev->vdec_pdata->hw_arch == MTK_VDEC_LAT_SINGLE_CORE) {
err = mtk_vcodec_fw_ipi_register(vpu->ctx->dev->fw_handler,
vpu->core_id, vpu->handler,
- "vdec", NULL);
+ "vdec", vpu->ctx->dev);
if (err) {
- mtk_vcodec_err(vpu, "vpu_ipi_register core fail status=%d", err);
+ mtk_vdec_err(vpu->ctx, "vpu_ipi_register core fail status=%d", err);
return err;
}
}
@@ -210,10 +228,10 @@ int vpu_dec_init(struct vdec_vpu_inst *vpu)
msg.ap_inst_addr = (unsigned long)vpu;
msg.codec_type = vpu->codec_type;
- mtk_vcodec_debug(vpu, "vdec_inst=%p", vpu);
+ mtk_vdec_debug(vpu->ctx, "vdec_inst=%p", vpu);
err = vcodec_vpu_send_msg(vpu, (void *)&msg, sizeof(msg));
- mtk_vcodec_debug(vpu, "- ret=%d", err);
+ mtk_vdec_debug(vpu->ctx, "- ret=%d", err);
return err;
}
@@ -223,10 +241,8 @@ int vpu_dec_start(struct vdec_vpu_inst *vpu, uint32_t *data, unsigned int len)
int i;
int err = 0;
- mtk_vcodec_debug_enter(vpu);
-
if (len > ARRAY_SIZE(msg.data)) {
- mtk_vcodec_err(vpu, "invalid len = %d\n", len);
+ mtk_vdec_err(vpu->ctx, "invalid len = %d\n", len);
return -EINVAL;
}
@@ -242,7 +258,7 @@ int vpu_dec_start(struct vdec_vpu_inst *vpu, uint32_t *data, unsigned int len)
msg.codec_type = vpu->codec_type;
err = vcodec_vpu_send_msg(vpu, (void *)&msg, sizeof(msg));
- mtk_vcodec_debug(vpu, "- ret=%d", err);
+ mtk_vdec_debug(vpu->ctx, "- ret=%d", err);
return err;
}
@@ -252,10 +268,8 @@ int vpu_dec_get_param(struct vdec_vpu_inst *vpu, uint32_t *data,
struct vdec_ap_ipi_get_param msg;
int err;
- mtk_vcodec_debug_enter(vpu);
-
if (len > ARRAY_SIZE(msg.data)) {
- mtk_vcodec_err(vpu, "invalid len = %d\n", len);
+ mtk_vdec_err(vpu->ctx, "invalid len = %d\n", len);
return -EINVAL;
}
@@ -267,7 +281,7 @@ int vpu_dec_get_param(struct vdec_vpu_inst *vpu, uint32_t *data,
msg.codec_type = vpu->codec_type;
err = vcodec_vpu_send_msg(vpu, (void *)&msg, sizeof(msg));
- mtk_vcodec_debug(vpu, "- ret=%d", err);
+ mtk_vdec_debug(vpu->ctx, "- ret=%d", err);
return err;
}
diff --git a/drivers/media/platform/mediatek/vcodec/vdec_vpu_if.h b/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.h
index 0436bba91457..fbb3f34a73f0 100644
--- a/drivers/media/platform/mediatek/vcodec/vdec_vpu_if.h
+++ b/drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.h
@@ -7,9 +7,7 @@
#ifndef _VDEC_VPU_IF_H_
#define _VDEC_VPU_IF_H_
-#include "mtk_vcodec_fw.h"
-
-struct mtk_vcodec_ctx;
+struct mtk_vcodec_dec_ctx;
/**
* struct vdec_vpu_inst - VPU instance for video codec
@@ -40,7 +38,7 @@ struct vdec_vpu_inst {
uint32_t fw_abi_version;
uint32_t inst_id;
unsigned int signaled;
- struct mtk_vcodec_ctx *ctx;
+ struct mtk_vcodec_dec_ctx *ctx;
wait_queue_head_t wq;
mtk_vcodec_ipi_handler handler;
unsigned int codec_type;
diff --git a/drivers/media/platform/mediatek/vcodec/encoder/Makefile b/drivers/media/platform/mediatek/vcodec/encoder/Makefile
new file mode 100644
index 000000000000..e621b5b7e5e6
--- /dev/null
+++ b/drivers/media/platform/mediatek/vcodec/encoder/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_VIDEO_MEDIATEK_VCODEC) += mtk-vcodec-enc.o
+
+mtk-vcodec-enc-y := venc/venc_vp8_if.o \
+ venc/venc_h264_if.o \
+ mtk_vcodec_enc.o \
+ mtk_vcodec_enc_drv.o \
+ mtk_vcodec_enc_pm.o \
+ venc_drv_if.o \
+ venc_vpu_if.o \
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c
index 315e97a2450e..04948d3eb011 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c
+++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c
@@ -10,10 +10,7 @@
#include <media/videobuf2-dma-contig.h>
#include <linux/pm_runtime.h>
-#include "mtk_vcodec_drv.h"
#include "mtk_vcodec_enc.h"
-#include "mtk_vcodec_intr.h"
-#include "mtk_vcodec_util.h"
#include "venc_drv_if.h"
#define MTK_VENC_MIN_W 160U
@@ -45,69 +42,59 @@ static const struct v4l2_frmsize_stepwise mtk_venc_4k_framesizes = {
static int vidioc_venc_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct mtk_vcodec_ctx *ctx = ctrl_to_ctx(ctrl);
+ struct mtk_vcodec_enc_ctx *ctx = ctrl_to_enc_ctx(ctrl);
struct mtk_enc_params *p = &ctx->enc_params;
int ret = 0;
switch (ctrl->id) {
case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
- mtk_v4l2_debug(2, "V4L2_CID_MPEG_VIDEO_BITRATE_MODE val= %d",
- ctrl->val);
+ mtk_v4l2_venc_dbg(2, ctx, "V4L2_CID_MPEG_VIDEO_BITRATE_MODE val= %d", ctrl->val);
if (ctrl->val != V4L2_MPEG_VIDEO_BITRATE_MODE_CBR) {
- mtk_v4l2_err("Unsupported bitrate mode =%d", ctrl->val);
+ mtk_v4l2_venc_err(ctx, "Unsupported bitrate mode =%d", ctrl->val);
ret = -EINVAL;
}
break;
case V4L2_CID_MPEG_VIDEO_BITRATE:
- mtk_v4l2_debug(2, "V4L2_CID_MPEG_VIDEO_BITRATE val = %d",
- ctrl->val);
+ mtk_v4l2_venc_dbg(2, ctx, "V4L2_CID_MPEG_VIDEO_BITRATE val = %d", ctrl->val);
p->bitrate = ctrl->val;
ctx->param_change |= MTK_ENCODE_PARAM_BITRATE;
break;
case V4L2_CID_MPEG_VIDEO_B_FRAMES:
- mtk_v4l2_debug(2, "V4L2_CID_MPEG_VIDEO_B_FRAMES val = %d",
- ctrl->val);
+ mtk_v4l2_venc_dbg(2, ctx, "V4L2_CID_MPEG_VIDEO_B_FRAMES val = %d", ctrl->val);
p->num_b_frame = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE:
- mtk_v4l2_debug(2, "V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE val = %d",
- ctrl->val);
+ mtk_v4l2_venc_dbg(2, ctx, "V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE val = %d",
+ ctrl->val);
p->rc_frame = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_MAX_QP:
- mtk_v4l2_debug(2, "V4L2_CID_MPEG_VIDEO_H264_MAX_QP val = %d",
- ctrl->val);
+ mtk_v4l2_venc_dbg(2, ctx, "V4L2_CID_MPEG_VIDEO_H264_MAX_QP val = %d", ctrl->val);
p->h264_max_qp = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
- mtk_v4l2_debug(2, "V4L2_CID_MPEG_VIDEO_HEADER_MODE val = %d",
- ctrl->val);
+ mtk_v4l2_venc_dbg(2, ctx, "V4L2_CID_MPEG_VIDEO_HEADER_MODE val = %d", ctrl->val);
p->seq_hdr_mode = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE:
- mtk_v4l2_debug(2, "V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE val = %d",
- ctrl->val);
+ mtk_v4l2_venc_dbg(2, ctx, "V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE val = %d", ctrl->val);
p->rc_mb = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
- mtk_v4l2_debug(2, "V4L2_CID_MPEG_VIDEO_H264_PROFILE val = %d",
- ctrl->val);
+ mtk_v4l2_venc_dbg(2, ctx, "V4L2_CID_MPEG_VIDEO_H264_PROFILE val = %d", ctrl->val);
p->h264_profile = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
- mtk_v4l2_debug(2, "V4L2_CID_MPEG_VIDEO_H264_LEVEL val = %d",
- ctrl->val);
+ mtk_v4l2_venc_dbg(2, ctx, "V4L2_CID_MPEG_VIDEO_H264_LEVEL val = %d", ctrl->val);
p->h264_level = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD:
- mtk_v4l2_debug(2, "V4L2_CID_MPEG_VIDEO_H264_I_PERIOD val = %d",
- ctrl->val);
+ mtk_v4l2_venc_dbg(2, ctx, "V4L2_CID_MPEG_VIDEO_H264_I_PERIOD val = %d", ctrl->val);
p->intra_period = ctrl->val;
ctx->param_change |= MTK_ENCODE_PARAM_INTRA_PERIOD;
break;
case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
- mtk_v4l2_debug(2, "V4L2_CID_MPEG_VIDEO_GOP_SIZE val = %d",
- ctrl->val);
+ mtk_v4l2_venc_dbg(2, ctx, "V4L2_CID_MPEG_VIDEO_GOP_SIZE val = %d", ctrl->val);
p->gop_size = ctrl->val;
ctx->param_change |= MTK_ENCODE_PARAM_GOP_SIZE;
break;
@@ -116,10 +103,10 @@ static int vidioc_venc_s_ctrl(struct v4l2_ctrl *ctrl)
* FIXME - what vp8 profiles are actually supported?
* The ctrl is added (with only profile 0 supported) for now.
*/
- mtk_v4l2_debug(2, "V4L2_CID_MPEG_VIDEO_VP8_PROFILE val = %d", ctrl->val);
+ mtk_v4l2_venc_dbg(2, ctx, "V4L2_CID_MPEG_VIDEO_VP8_PROFILE val = %d", ctrl->val);
break;
case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME:
- mtk_v4l2_debug(2, "V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME");
+ mtk_v4l2_venc_dbg(2, ctx, "V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME");
p->force_intra = 1;
ctx->param_change |= MTK_ENCODE_PARAM_FORCE_INTRA;
break;
@@ -172,7 +159,7 @@ static int vidioc_enum_framesizes(struct file *file, void *fh,
struct v4l2_frmsizeenum *fsize)
{
const struct mtk_video_fmt *fmt;
- struct mtk_vcodec_ctx *ctx = fh_to_ctx(fh);
+ struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(fh);
if (fsize->index != 0)
return -EINVAL;
@@ -196,7 +183,7 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
const struct mtk_vcodec_enc_pdata *pdata =
- fh_to_ctx(priv)->dev->venc_pdata;
+ fh_to_enc_ctx(priv)->dev->venc_pdata;
return vidioc_enum_fmt(f, pdata->capture_formats,
pdata->num_capture_formats);
@@ -206,7 +193,7 @@ static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
const struct mtk_vcodec_enc_pdata *pdata =
- fh_to_ctx(priv)->dev->venc_pdata;
+ fh_to_enc_ctx(priv)->dev->venc_pdata;
return vidioc_enum_fmt(f, pdata->output_formats,
pdata->num_output_formats);
@@ -214,7 +201,7 @@ static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
static int mtk_vcodec_enc_get_chip_name(void *priv)
{
- struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv);
struct device *dev = &ctx->dev->plat_dev->dev;
if (of_device_is_compatible(dev->of_node, "mediatek,mt8173-vcodec-enc"))
@@ -234,7 +221,7 @@ static int mtk_vcodec_enc_get_chip_name(void *priv)
static int vidioc_venc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv);
struct device *dev = &ctx->dev->plat_dev->dev;
int platform_name = mtk_vcodec_enc_get_chip_name(priv);
@@ -247,7 +234,7 @@ static int vidioc_venc_querycap(struct file *file, void *priv,
static int vidioc_venc_s_parm(struct file *file, void *priv,
struct v4l2_streamparm *a)
{
- struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv);
struct v4l2_fract *timeperframe = &a->parm.output.timeperframe;
if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
@@ -270,7 +257,7 @@ static int vidioc_venc_s_parm(struct file *file, void *priv,
static int vidioc_venc_g_parm(struct file *file, void *priv,
struct v4l2_streamparm *a)
{
- struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv);
if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
return -EINVAL;
@@ -284,7 +271,7 @@ static int vidioc_venc_g_parm(struct file *file, void *priv,
return 0;
}
-static struct mtk_q_data *mtk_venc_get_q_data(struct mtk_vcodec_ctx *ctx,
+static struct mtk_q_data *mtk_venc_get_q_data(struct mtk_vcodec_enc_ctx *ctx,
enum v4l2_buf_type type)
{
if (V4L2_TYPE_IS_OUTPUT(type))
@@ -304,7 +291,7 @@ static void vidioc_try_fmt_cap(struct v4l2_format *f)
/* V4L2 specification suggests the driver corrects the format struct if any of
* the dimensions is unsupported
*/
-static int vidioc_try_fmt_out(struct mtk_vcodec_ctx *ctx, struct v4l2_format *f,
+static int vidioc_try_fmt_out(struct mtk_vcodec_enc_ctx *ctx, struct v4l2_format *f,
const struct mtk_video_fmt *fmt)
{
struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
@@ -341,11 +328,12 @@ static int vidioc_try_fmt_out(struct mtk_vcodec_ctx *ctx, struct v4l2_format *f,
if (pix_fmt_mp->height < tmp_h && (pix_fmt_mp->height + 32) <= max_height)
pix_fmt_mp->height += 32;
- mtk_v4l2_debug(0, "before resize w=%d, h=%d, after resize w=%d, h=%d, sizeimage=%d %d",
- tmp_w, tmp_h, pix_fmt_mp->width,
- pix_fmt_mp->height,
- pix_fmt_mp->plane_fmt[0].sizeimage,
- pix_fmt_mp->plane_fmt[1].sizeimage);
+ mtk_v4l2_venc_dbg(0, ctx,
+ "before resize wxh=%dx%d, after resize wxh=%dx%d, sizeimage=%d %d",
+ tmp_w, tmp_h, pix_fmt_mp->width,
+ pix_fmt_mp->height,
+ pix_fmt_mp->plane_fmt[0].sizeimage,
+ pix_fmt_mp->plane_fmt[1].sizeimage);
pix_fmt_mp->num_planes = fmt->num_planes;
pix_fmt_mp->plane_fmt[0].sizeimage =
@@ -376,8 +364,8 @@ static int vidioc_try_fmt_out(struct mtk_vcodec_ctx *ctx, struct v4l2_format *f,
return 0;
}
-static void mtk_venc_set_param(struct mtk_vcodec_ctx *ctx,
- struct venc_enc_param *param)
+static void mtk_venc_set_param(struct mtk_vcodec_enc_ctx *ctx,
+ struct venc_enc_param *param)
{
struct mtk_q_data *q_data_src = &ctx->q_data[MTK_Q_DATA_SRC];
struct mtk_enc_params *enc_params = &ctx->enc_params;
@@ -396,7 +384,7 @@ static void mtk_venc_set_param(struct mtk_vcodec_ctx *ctx,
param->input_yuv_fmt = VENC_YUV_FORMAT_NV21;
break;
default:
- mtk_v4l2_err("Unsupported fourcc =%d", q_data_src->fmt->fourcc);
+ mtk_v4l2_venc_err(ctx, "Unsupported fourcc =%d", q_data_src->fmt->fourcc);
break;
}
param->h264_profile = enc_params->h264_profile;
@@ -414,19 +402,19 @@ static void mtk_venc_set_param(struct mtk_vcodec_ctx *ctx,
param->gop_size = enc_params->gop_size;
param->bitrate = enc_params->bitrate;
- mtk_v4l2_debug(0,
- "fmt 0x%x, P/L %d/%d, w/h %d/%d, buf %d/%d, fps/bps %d/%d, gop %d, i_period %d",
- param->input_yuv_fmt, param->h264_profile,
- param->h264_level, param->width, param->height,
- param->buf_width, param->buf_height,
- param->frm_rate, param->bitrate,
- param->gop_size, param->intra_period);
+ mtk_v4l2_venc_dbg(0, ctx,
+ "fmt 0x%x, P/L %d/%d w/h %d/%d buf %d/%d fps/bps %d/%d gop %d i_per %d",
+ param->input_yuv_fmt, param->h264_profile,
+ param->h264_level, param->width, param->height,
+ param->buf_width, param->buf_height,
+ param->frm_rate, param->bitrate,
+ param->gop_size, param->intra_period);
}
static int vidioc_venc_s_fmt_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv);
const struct mtk_vcodec_enc_pdata *pdata = ctx->dev->venc_pdata;
struct vb2_queue *vq;
struct mtk_q_data *q_data = mtk_venc_get_q_data(ctx, f->type);
@@ -435,12 +423,12 @@ static int vidioc_venc_s_fmt_cap(struct file *file, void *priv,
vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
if (!vq) {
- mtk_v4l2_err("fail to get vq");
+ mtk_v4l2_venc_err(ctx, "fail to get vq");
return -EINVAL;
}
if (vb2_is_busy(vq)) {
- mtk_v4l2_err("queue busy");
+ mtk_v4l2_venc_err(ctx, "queue busy");
return -EBUSY;
}
@@ -468,8 +456,8 @@ static int vidioc_venc_s_fmt_cap(struct file *file, void *priv,
if (ctx->state == MTK_STATE_FREE) {
ret = venc_if_init(ctx, q_data->fmt->fourcc);
if (ret) {
- mtk_v4l2_err("venc_if_init failed=%d, codec type=%x",
- ret, q_data->fmt->fourcc);
+ mtk_v4l2_venc_err(ctx, "venc_if_init failed=%d, codec type=%x",
+ ret, q_data->fmt->fourcc);
return -EBUSY;
}
ctx->state = MTK_STATE_INIT;
@@ -481,7 +469,7 @@ static int vidioc_venc_s_fmt_cap(struct file *file, void *priv,
static int vidioc_venc_s_fmt_out(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv);
const struct mtk_vcodec_enc_pdata *pdata = ctx->dev->venc_pdata;
struct vb2_queue *vq;
struct mtk_q_data *q_data = mtk_venc_get_q_data(ctx, f->type);
@@ -490,12 +478,12 @@ static int vidioc_venc_s_fmt_out(struct file *file, void *priv,
vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
if (!vq) {
- mtk_v4l2_err("fail to get vq");
+ mtk_v4l2_venc_err(ctx, "fail to get vq");
return -EINVAL;
}
if (vb2_is_busy(vq)) {
- mtk_v4l2_err("queue busy");
+ mtk_v4l2_venc_err(ctx, "queue busy");
return -EBUSY;
}
@@ -536,7 +524,7 @@ static int vidioc_venc_g_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
- struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv);
struct vb2_queue *vq;
struct mtk_q_data *q_data = mtk_venc_get_q_data(ctx, f->type);
int i;
@@ -569,7 +557,7 @@ static int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv,
struct v4l2_format *f)
{
const struct mtk_video_fmt *fmt;
- struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv);
const struct mtk_vcodec_enc_pdata *pdata = ctx->dev->venc_pdata;
fmt = mtk_venc_find_format(f->fmt.pix.pixelformat, pdata);
@@ -591,7 +579,7 @@ static int vidioc_try_fmt_vid_out_mplane(struct file *file, void *priv,
struct v4l2_format *f)
{
const struct mtk_video_fmt *fmt;
- struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv);
const struct mtk_vcodec_enc_pdata *pdata = ctx->dev->venc_pdata;
fmt = mtk_venc_find_format(f->fmt.pix.pixelformat, pdata);
@@ -612,7 +600,7 @@ static int vidioc_try_fmt_vid_out_mplane(struct file *file, void *priv,
static int vidioc_venc_g_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv);
struct mtk_q_data *q_data = mtk_venc_get_q_data(ctx, s->type);
if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
@@ -642,7 +630,7 @@ static int vidioc_venc_g_selection(struct file *file, void *priv,
static int vidioc_venc_s_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv);
struct mtk_q_data *q_data = mtk_venc_get_q_data(ctx, s->type);
if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
@@ -667,11 +655,11 @@ static int vidioc_venc_s_selection(struct file *file, void *priv,
static int vidioc_venc_qbuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
- struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv);
if (ctx->state == MTK_STATE_ABORT) {
- mtk_v4l2_err("[%d] Call on QBUF after unrecoverable error",
- ctx->id);
+ mtk_v4l2_venc_err(ctx, "[%d] Call on QBUF after unrecoverable error",
+ ctx->id);
return -EIO;
}
@@ -681,12 +669,12 @@ static int vidioc_venc_qbuf(struct file *file, void *priv,
static int vidioc_venc_dqbuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
- struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv);
int ret;
if (ctx->state == MTK_STATE_ABORT) {
- mtk_v4l2_err("[%d] Call on QBUF after unrecoverable error",
- ctx->id);
+ mtk_v4l2_venc_err(ctx, "[%d] Call on QBUF after unrecoverable error",
+ ctx->id);
return -EIO;
}
@@ -719,13 +707,13 @@ static int vidioc_venc_dqbuf(struct file *file, void *priv,
static int vidioc_encoder_cmd(struct file *file, void *priv,
struct v4l2_encoder_cmd *cmd)
{
- struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(priv);
struct vb2_queue *src_vq, *dst_vq;
int ret;
if (ctx->state == MTK_STATE_ABORT) {
- mtk_v4l2_err("[%d] Call to CMD after unrecoverable error",
- ctx->id);
+ mtk_v4l2_venc_err(ctx, "[%d] Call to CMD after unrecoverable error",
+ ctx->id);
return -EIO;
}
@@ -737,7 +725,7 @@ static int vidioc_encoder_cmd(struct file *file, void *priv,
if (ctx->is_flushing)
return -EBUSY;
- mtk_v4l2_debug(1, "encoder cmd=%u", cmd->cmd);
+ mtk_v4l2_venc_dbg(1, ctx, "encoder cmd=%u", cmd->cmd);
dst_vq = v4l2_m2m_get_vq(ctx->m2m_ctx,
V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
@@ -746,11 +734,11 @@ static int vidioc_encoder_cmd(struct file *file, void *priv,
src_vq = v4l2_m2m_get_vq(ctx->m2m_ctx,
V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
if (!vb2_is_streaming(src_vq)) {
- mtk_v4l2_debug(1, "Output stream is off. No need to flush.");
+ mtk_v4l2_venc_dbg(1, ctx, "Output stream is off. No need to flush.");
return 0;
}
if (!vb2_is_streaming(dst_vq)) {
- mtk_v4l2_debug(1, "Capture stream is off. No need to flush.");
+ mtk_v4l2_venc_dbg(1, ctx, "Capture stream is off. No need to flush.");
return 0;
}
ctx->is_flushing = true;
@@ -813,7 +801,7 @@ static int vb2ops_venc_queue_setup(struct vb2_queue *vq,
unsigned int sizes[],
struct device *alloc_devs[])
{
- struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vq);
+ struct mtk_vcodec_enc_ctx *ctx = vb2_get_drv_priv(vq);
struct mtk_q_data *q_data = mtk_venc_get_q_data(ctx, vq->type);
unsigned int i;
@@ -837,15 +825,14 @@ static int vb2ops_venc_queue_setup(struct vb2_queue *vq,
static int vb2ops_venc_buf_prepare(struct vb2_buffer *vb)
{
- struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct mtk_vcodec_enc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct mtk_q_data *q_data = mtk_venc_get_q_data(ctx, vb->vb2_queue->type);
int i;
for (i = 0; i < q_data->fmt->num_planes; i++) {
if (vb2_plane_size(vb, i) < q_data->sizeimage[i]) {
- mtk_v4l2_err("data will not fit into plane %d (%lu < %d)",
- i, vb2_plane_size(vb, i),
- q_data->sizeimage[i]);
+ mtk_v4l2_venc_err(ctx, "data will not fit into plane %d (%lu < %d)",
+ i, vb2_plane_size(vb, i), q_data->sizeimage[i]);
return -EINVAL;
}
}
@@ -855,7 +842,7 @@ static int vb2ops_venc_buf_prepare(struct vb2_buffer *vb)
static void vb2ops_venc_buf_queue(struct vb2_buffer *vb)
{
- struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct mtk_vcodec_enc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct vb2_v4l2_buffer *vb2_v4l2 =
container_of(vb, struct vb2_v4l2_buffer, vb2_buf);
@@ -865,10 +852,8 @@ static void vb2ops_venc_buf_queue(struct vb2_buffer *vb)
if ((vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) &&
(ctx->param_change != MTK_ENCODE_PARAM_NONE)) {
- mtk_v4l2_debug(1, "[%d] Before id=%d encode parameter change %x",
- ctx->id,
- vb2_v4l2->vb2_buf.index,
- ctx->param_change);
+ mtk_v4l2_venc_dbg(1, ctx, "[%d] Before id=%d encode parameter change %x",
+ ctx->id, vb2_v4l2->vb2_buf.index, ctx->param_change);
mtk_buf->param_change = ctx->param_change;
mtk_buf->enc_params = ctx->enc_params;
ctx->param_change = MTK_ENCODE_PARAM_NONE;
@@ -879,7 +864,7 @@ static void vb2ops_venc_buf_queue(struct vb2_buffer *vb)
static int vb2ops_venc_start_streaming(struct vb2_queue *q, unsigned int count)
{
- struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(q);
+ struct mtk_vcodec_enc_ctx *ctx = vb2_get_drv_priv(q);
struct venc_enc_param param;
int ret, pm_ret;
int i;
@@ -903,14 +888,14 @@ static int vb2ops_venc_start_streaming(struct vb2_queue *q, unsigned int count)
ret = pm_runtime_resume_and_get(&ctx->dev->plat_dev->dev);
if (ret < 0) {
- mtk_v4l2_err("pm_runtime_resume_and_get fail %d", ret);
+ mtk_v4l2_venc_err(ctx, "pm_runtime_resume_and_get fail %d", ret);
goto err_start_stream;
}
mtk_venc_set_param(ctx, &param);
ret = venc_if_set_param(ctx, VENC_SET_PARAM_ENC, &param);
if (ret) {
- mtk_v4l2_err("venc_if_set_param failed=%d", ret);
+ mtk_v4l2_venc_err(ctx, "venc_if_set_param failed=%d", ret);
ctx->state = MTK_STATE_ABORT;
goto err_set_param;
}
@@ -923,7 +908,7 @@ static int vb2ops_venc_start_streaming(struct vb2_queue *q, unsigned int count)
VENC_SET_PARAM_PREPEND_HEADER,
NULL);
if (ret) {
- mtk_v4l2_err("venc_if_set_param failed=%d", ret);
+ mtk_v4l2_venc_err(ctx, "venc_if_set_param failed=%d", ret);
ctx->state = MTK_STATE_ABORT;
goto err_set_param;
}
@@ -935,7 +920,7 @@ static int vb2ops_venc_start_streaming(struct vb2_queue *q, unsigned int count)
err_set_param:
pm_ret = pm_runtime_put(&ctx->dev->plat_dev->dev);
if (pm_ret < 0)
- mtk_v4l2_err("pm_runtime_put fail %d", pm_ret);
+ mtk_v4l2_venc_err(ctx, "pm_runtime_put fail %d", pm_ret);
err_start_stream:
for (i = 0; i < q->num_buffers; ++i) {
@@ -946,9 +931,8 @@ err_start_stream:
* can be marked as done.
*/
if (buf && buf->state == VB2_BUF_STATE_ACTIVE) {
- mtk_v4l2_debug(0, "[%d] id=%d, type=%d, %d -> VB2_BUF_STATE_QUEUED",
- ctx->id, i, q->type,
- (int)buf->state);
+ mtk_v4l2_venc_dbg(0, ctx, "[%d] id=%d, type=%d, %d->VB2_BUF_STATE_QUEUED",
+ ctx->id, i, q->type, (int)buf->state);
v4l2_m2m_buf_done(to_vb2_v4l2_buffer(buf),
VB2_BUF_STATE_QUEUED);
}
@@ -959,11 +943,11 @@ err_start_stream:
static void vb2ops_venc_stop_streaming(struct vb2_queue *q)
{
- struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(q);
+ struct mtk_vcodec_enc_ctx *ctx = vb2_get_drv_priv(q);
struct vb2_v4l2_buffer *src_buf, *dst_buf;
int ret;
- mtk_v4l2_debug(2, "[%d]-> type=%d", ctx->id, q->type);
+ mtk_v4l2_venc_dbg(2, ctx, "[%d]-> type=%d", ctx->id, q->type);
if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
while ((dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx))) {
@@ -974,7 +958,7 @@ static void vb2ops_venc_stop_streaming(struct vb2_queue *q)
if (ctx->is_flushing) {
struct v4l2_m2m_buffer *b, *n;
- mtk_v4l2_debug(1, "STREAMOFF called while flushing");
+ mtk_v4l2_venc_dbg(1, ctx, "STREAMOFF called while flushing");
/*
* STREAMOFF could be called before the flush buffer is
* dequeued. Check whether empty flush buf is still in
@@ -1008,21 +992,21 @@ static void vb2ops_venc_stop_streaming(struct vb2_queue *q)
vb2_is_streaming(&ctx->m2m_ctx->out_q_ctx.q)) ||
(q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
vb2_is_streaming(&ctx->m2m_ctx->cap_q_ctx.q))) {
- mtk_v4l2_debug(1, "[%d]-> q type %d out=%d cap=%d",
- ctx->id, q->type,
- vb2_is_streaming(&ctx->m2m_ctx->out_q_ctx.q),
- vb2_is_streaming(&ctx->m2m_ctx->cap_q_ctx.q));
+ mtk_v4l2_venc_dbg(1, ctx, "[%d]-> q type %d out=%d cap=%d",
+ ctx->id, q->type,
+ vb2_is_streaming(&ctx->m2m_ctx->out_q_ctx.q),
+ vb2_is_streaming(&ctx->m2m_ctx->cap_q_ctx.q));
return;
}
/* Release the encoder if both streams are stopped. */
ret = venc_if_deinit(ctx);
if (ret)
- mtk_v4l2_err("venc_if_deinit failed=%d", ret);
+ mtk_v4l2_venc_err(ctx, "venc_if_deinit failed=%d", ret);
ret = pm_runtime_put(&ctx->dev->plat_dev->dev);
if (ret < 0)
- mtk_v4l2_err("pm_runtime_put fail %d", ret);
+ mtk_v4l2_venc_err(ctx, "pm_runtime_put fail %d", ret);
ctx->state = MTK_STATE_FREE;
}
@@ -1048,7 +1032,7 @@ static const struct vb2_ops mtk_venc_vb2_ops = {
static int mtk_venc_encode_header(void *priv)
{
- struct mtk_vcodec_ctx *ctx = priv;
+ struct mtk_vcodec_enc_ctx *ctx = priv;
int ret;
struct vb2_v4l2_buffer *src_buf, *dst_buf;
struct mtk_vcodec_mem bs_buf;
@@ -1056,7 +1040,7 @@ static int mtk_venc_encode_header(void *priv)
dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
if (!dst_buf) {
- mtk_v4l2_debug(1, "No dst buffer");
+ mtk_v4l2_venc_dbg(1, ctx, "No dst buffer");
return -EINVAL;
}
@@ -1064,12 +1048,10 @@ static int mtk_venc_encode_header(void *priv)
bs_buf.dma_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
bs_buf.size = (size_t)dst_buf->vb2_buf.planes[0].length;
- mtk_v4l2_debug(1,
- "[%d] buf id=%d va=0x%p dma_addr=0x%llx size=%zu",
- ctx->id,
- dst_buf->vb2_buf.index, bs_buf.va,
- (u64)bs_buf.dma_addr,
- bs_buf.size);
+ mtk_v4l2_venc_dbg(1, ctx,
+ "[%d] buf id=%d va=0x%p dma_addr=0x%llx size=%zu",
+ ctx->id, dst_buf->vb2_buf.index, bs_buf.va,
+ (u64)bs_buf.dma_addr, bs_buf.size);
ret = venc_if_encode(ctx,
VENC_START_OPT_ENCODE_SEQUENCE_HEADER,
@@ -1079,7 +1061,7 @@ static int mtk_venc_encode_header(void *priv)
vb2_set_plane_payload(&dst_buf->vb2_buf, 0, 0);
ctx->state = MTK_STATE_ABORT;
v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
- mtk_v4l2_err("venc_if_encode failed=%d", ret);
+ mtk_v4l2_venc_err(ctx, "venc_if_encode failed=%d", ret);
return -EINVAL;
}
src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
@@ -1087,7 +1069,7 @@ static int mtk_venc_encode_header(void *priv)
dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp;
dst_buf->timecode = src_buf->timecode;
} else {
- mtk_v4l2_err("No timestamp for the header buffer.");
+ mtk_v4l2_venc_err(ctx, "No timestamp for the header buffer.");
}
ctx->state = MTK_STATE_HEADER;
@@ -1097,7 +1079,7 @@ static int mtk_venc_encode_header(void *priv)
return 0;
}
-static int mtk_venc_param_change(struct mtk_vcodec_ctx *ctx)
+static int mtk_venc_param_change(struct mtk_vcodec_enc_ctx *ctx)
{
struct venc_enc_param enc_prm;
struct vb2_v4l2_buffer *vb2_v4l2 = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
@@ -1116,10 +1098,8 @@ static int mtk_venc_param_change(struct mtk_vcodec_ctx *ctx)
if (mtk_buf->param_change & MTK_ENCODE_PARAM_BITRATE) {
enc_prm.bitrate = mtk_buf->enc_params.bitrate;
- mtk_v4l2_debug(1, "[%d] id=%d, change param br=%d",
- ctx->id,
- vb2_v4l2->vb2_buf.index,
- enc_prm.bitrate);
+ mtk_v4l2_venc_dbg(1, ctx, "[%d] id=%d, change param br=%d",
+ ctx->id, vb2_v4l2->vb2_buf.index, enc_prm.bitrate);
ret |= venc_if_set_param(ctx,
VENC_SET_PARAM_ADJUST_BITRATE,
&enc_prm);
@@ -1127,27 +1107,23 @@ static int mtk_venc_param_change(struct mtk_vcodec_ctx *ctx)
if (!ret && mtk_buf->param_change & MTK_ENCODE_PARAM_FRAMERATE) {
enc_prm.frm_rate = mtk_buf->enc_params.framerate_num /
mtk_buf->enc_params.framerate_denom;
- mtk_v4l2_debug(1, "[%d] id=%d, change param fr=%d",
- ctx->id,
- vb2_v4l2->vb2_buf.index,
- enc_prm.frm_rate);
+ mtk_v4l2_venc_dbg(1, ctx, "[%d] id=%d, change param fr=%d",
+ ctx->id, vb2_v4l2->vb2_buf.index, enc_prm.frm_rate);
ret |= venc_if_set_param(ctx,
VENC_SET_PARAM_ADJUST_FRAMERATE,
&enc_prm);
}
if (!ret && mtk_buf->param_change & MTK_ENCODE_PARAM_GOP_SIZE) {
enc_prm.gop_size = mtk_buf->enc_params.gop_size;
- mtk_v4l2_debug(1, "change param intra period=%d",
- enc_prm.gop_size);
+ mtk_v4l2_venc_dbg(1, ctx, "change param intra period=%d", enc_prm.gop_size);
ret |= venc_if_set_param(ctx,
VENC_SET_PARAM_GOP_SIZE,
&enc_prm);
}
if (!ret && mtk_buf->param_change & MTK_ENCODE_PARAM_FORCE_INTRA) {
- mtk_v4l2_debug(1, "[%d] id=%d, change param force I=%d",
- ctx->id,
- vb2_v4l2->vb2_buf.index,
- mtk_buf->enc_params.force_intra);
+ mtk_v4l2_venc_dbg(1, ctx, "[%d] id=%d, change param force I=%d",
+ ctx->id, vb2_v4l2->vb2_buf.index,
+ mtk_buf->enc_params.force_intra);
if (mtk_buf->enc_params.force_intra)
ret |= venc_if_set_param(ctx,
VENC_SET_PARAM_FORCE_INTRA,
@@ -1158,8 +1134,8 @@ static int mtk_venc_param_change(struct mtk_vcodec_ctx *ctx)
if (ret) {
ctx->state = MTK_STATE_ABORT;
- mtk_v4l2_err("venc_if_set_param %d failed=%d",
- mtk_buf->param_change, ret);
+ mtk_v4l2_venc_err(ctx, "venc_if_set_param %d failed=%d",
+ mtk_buf->param_change, ret);
return -1;
}
@@ -1176,7 +1152,7 @@ static int mtk_venc_param_change(struct mtk_vcodec_ctx *ctx)
*/
static void mtk_venc_worker(struct work_struct *work)
{
- struct mtk_vcodec_ctx *ctx = container_of(work, struct mtk_vcodec_ctx,
+ struct mtk_vcodec_enc_ctx *ctx = container_of(work, struct mtk_vcodec_enc_ctx,
encode_work);
struct vb2_v4l2_buffer *src_buf, *dst_buf;
struct venc_frm_buf frm_buf;
@@ -1220,14 +1196,11 @@ static void mtk_venc_worker(struct work_struct *work)
bs_buf.dma_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
bs_buf.size = (size_t)dst_buf->vb2_buf.planes[0].length;
- mtk_v4l2_debug(2,
- "Framebuf PA=%llx Size=0x%zx;PA=0x%llx Size=0x%zx;PA=0x%llx Size=%zu",
- (u64)frm_buf.fb_addr[0].dma_addr,
- frm_buf.fb_addr[0].size,
- (u64)frm_buf.fb_addr[1].dma_addr,
- frm_buf.fb_addr[1].size,
- (u64)frm_buf.fb_addr[2].dma_addr,
- frm_buf.fb_addr[2].size);
+ mtk_v4l2_venc_dbg(2, ctx,
+ "Framebuf PA=%llx Size=0x%zx;PA=0x%llx Size=0x%zx;PA=0x%llx Size=%zu",
+ (u64)frm_buf.fb_addr[0].dma_addr, frm_buf.fb_addr[0].size,
+ (u64)frm_buf.fb_addr[1].dma_addr, frm_buf.fb_addr[1].size,
+ (u64)frm_buf.fb_addr[2].dma_addr, frm_buf.fb_addr[2].size);
ret = venc_if_encode(ctx, VENC_START_OPT_ENCODE_FRAME,
&frm_buf, &bs_buf, &enc_result);
@@ -1242,25 +1215,24 @@ static void mtk_venc_worker(struct work_struct *work)
v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
vb2_set_plane_payload(&dst_buf->vb2_buf, 0, 0);
v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
- mtk_v4l2_err("venc_if_encode failed=%d", ret);
+ mtk_v4l2_venc_err(ctx, "venc_if_encode failed=%d", ret);
} else {
v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
vb2_set_plane_payload(&dst_buf->vb2_buf, 0, enc_result.bs_size);
v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
- mtk_v4l2_debug(2, "venc_if_encode bs size=%d",
- enc_result.bs_size);
+ mtk_v4l2_venc_dbg(2, ctx, "venc_if_encode bs size=%d",
+ enc_result.bs_size);
}
v4l2_m2m_job_finish(ctx->dev->m2m_dev_enc, ctx->m2m_ctx);
- mtk_v4l2_debug(1, "<=== src_buf[%d] dst_buf[%d] venc_if_encode ret=%d Size=%u===>",
- src_buf->vb2_buf.index, dst_buf->vb2_buf.index, ret,
- enc_result.bs_size);
+ mtk_v4l2_venc_dbg(1, ctx, "<=== src_buf[%d] dst_buf[%d] venc_if_encode ret=%d Size=%u===>",
+ src_buf->vb2_buf.index, dst_buf->vb2_buf.index, ret, enc_result.bs_size);
}
static void m2mops_venc_device_run(void *priv)
{
- struct mtk_vcodec_ctx *ctx = priv;
+ struct mtk_vcodec_enc_ctx *ctx = priv;
if ((ctx->q_data[MTK_Q_DATA_DST].fmt->fourcc == V4L2_PIX_FMT_H264) &&
(ctx->state != MTK_STATE_HEADER)) {
@@ -1276,11 +1248,10 @@ static void m2mops_venc_device_run(void *priv)
static int m2mops_venc_job_ready(void *m2m_priv)
{
- struct mtk_vcodec_ctx *ctx = m2m_priv;
+ struct mtk_vcodec_enc_ctx *ctx = m2m_priv;
if (ctx->state == MTK_STATE_ABORT || ctx->state == MTK_STATE_FREE) {
- mtk_v4l2_debug(3, "[%d]Not ready: state=0x%x.",
- ctx->id, ctx->state);
+ mtk_v4l2_venc_dbg(3, ctx, "[%d]Not ready: state=0x%x.", ctx->id, ctx->state);
return 0;
}
@@ -1289,7 +1260,7 @@ static int m2mops_venc_job_ready(void *m2m_priv)
static void m2mops_venc_job_abort(void *priv)
{
- struct mtk_vcodec_ctx *ctx = priv;
+ struct mtk_vcodec_enc_ctx *ctx = priv;
ctx->state = MTK_STATE_ABORT;
}
@@ -1300,7 +1271,7 @@ const struct v4l2_m2m_ops mtk_venc_m2m_ops = {
.job_abort = m2mops_venc_job_abort,
};
-void mtk_vcodec_enc_set_default_params(struct mtk_vcodec_ctx *ctx)
+void mtk_vcodec_enc_set_default_params(struct mtk_vcodec_enc_ctx *ctx)
{
struct mtk_q_data *q_data;
@@ -1361,7 +1332,7 @@ void mtk_vcodec_enc_set_default_params(struct mtk_vcodec_ctx *ctx)
ctx->enc_params.framerate_denom = MTK_DEFAULT_FRAMERATE_DENOM;
}
-int mtk_vcodec_enc_ctrls_setup(struct mtk_vcodec_ctx *ctx)
+int mtk_vcodec_enc_ctrls_setup(struct mtk_vcodec_enc_ctx *ctx)
{
const struct v4l2_ctrl_ops *ops = &mtk_vcodec_enc_ctrl_ops;
struct v4l2_ctrl_handler *handler = &ctx->ctrl_hdl;
@@ -1415,8 +1386,7 @@ int mtk_vcodec_enc_ctrls_setup(struct mtk_vcodec_ctx *ctx)
if (handler->error) {
- mtk_v4l2_err("Init control handler fail %d",
- handler->error);
+ mtk_v4l2_venc_err(ctx, "Init control handler fail %d", handler->error);
return handler->error;
}
@@ -1428,7 +1398,7 @@ int mtk_vcodec_enc_ctrls_setup(struct mtk_vcodec_ctx *ctx)
int mtk_vcodec_enc_queue_init(void *priv, struct vb2_queue *src_vq,
struct vb2_queue *dst_vq)
{
- struct mtk_vcodec_ctx *ctx = priv;
+ struct mtk_vcodec_enc_ctx *ctx = priv;
int ret;
/* Note: VB2_USERPTR works with dma-contig because mt8173
@@ -1463,28 +1433,28 @@ int mtk_vcodec_enc_queue_init(void *priv, struct vb2_queue *src_vq,
return vb2_queue_init(dst_vq);
}
-int mtk_venc_unlock(struct mtk_vcodec_ctx *ctx)
+int mtk_venc_unlock(struct mtk_vcodec_enc_ctx *ctx)
{
- struct mtk_vcodec_dev *dev = ctx->dev;
+ struct mtk_vcodec_enc_dev *dev = ctx->dev;
mutex_unlock(&dev->enc_mutex);
return 0;
}
-int mtk_venc_lock(struct mtk_vcodec_ctx *ctx)
+int mtk_venc_lock(struct mtk_vcodec_enc_ctx *ctx)
{
- struct mtk_vcodec_dev *dev = ctx->dev;
+ struct mtk_vcodec_enc_dev *dev = ctx->dev;
mutex_lock(&dev->enc_mutex);
return 0;
}
-void mtk_vcodec_enc_release(struct mtk_vcodec_ctx *ctx)
+void mtk_vcodec_enc_release(struct mtk_vcodec_enc_ctx *ctx)
{
int ret = venc_if_deinit(ctx);
if (ret)
- mtk_v4l2_err("venc_if_deinit failed=%d", ret);
+ mtk_v4l2_venc_err(ctx, "venc_if_deinit failed=%d", ret);
ctx->state = MTK_STATE_FREE;
}
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.h b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.h
index 513ee7993e34..82246401ed4a 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.h
+++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.h
@@ -11,6 +11,8 @@
#include <media/videobuf2-core.h>
#include <media/v4l2-mem2mem.h>
+#include "mtk_vcodec_enc_drv.h"
+
#define MTK_VENC_IRQ_STATUS_SPS 0x1
#define MTK_VENC_IRQ_STATUS_PPS 0x2
#define MTK_VENC_IRQ_STATUS_FRM 0x4
@@ -39,12 +41,12 @@ struct mtk_video_enc_buf {
extern const struct v4l2_ioctl_ops mtk_venc_ioctl_ops;
extern const struct v4l2_m2m_ops mtk_venc_m2m_ops;
-int mtk_venc_unlock(struct mtk_vcodec_ctx *ctx);
-int mtk_venc_lock(struct mtk_vcodec_ctx *ctx);
+int mtk_venc_unlock(struct mtk_vcodec_enc_ctx *ctx);
+int mtk_venc_lock(struct mtk_vcodec_enc_ctx *ctx);
int mtk_vcodec_enc_queue_init(void *priv, struct vb2_queue *src_vq,
struct vb2_queue *dst_vq);
-void mtk_vcodec_enc_release(struct mtk_vcodec_ctx *ctx);
-int mtk_vcodec_enc_ctrls_setup(struct mtk_vcodec_ctx *ctx);
-void mtk_vcodec_enc_set_default_params(struct mtk_vcodec_ctx *ctx);
+void mtk_vcodec_enc_release(struct mtk_vcodec_enc_ctx *ctx);
+int mtk_vcodec_enc_ctrls_setup(struct mtk_vcodec_enc_ctx *ctx);
+void mtk_vcodec_enc_set_default_params(struct mtk_vcodec_enc_ctx *ctx);
#endif /* _MTK_VCODEC_ENC_H_ */
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_drv.c b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c
index 5df0a22ff3b5..6319f24bc714 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_drv.c
+++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c
@@ -9,19 +9,16 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <media/v4l2-event.h>
#include <media/v4l2-mem2mem.h>
#include <media/videobuf2-dma-contig.h>
-#include "mtk_vcodec_drv.h"
#include "mtk_vcodec_enc.h"
#include "mtk_vcodec_enc_pm.h"
-#include "mtk_vcodec_intr.h"
-#include "mtk_vcodec_util.h"
-#include "mtk_vcodec_fw.h"
+#include "../common/mtk_vcodec_intr.h"
static const struct mtk_video_fmt mtk_video_formats_output[] = {
{
@@ -85,8 +82,8 @@ static void clean_irq_status(unsigned int irq_status, void __iomem *addr)
}
static irqreturn_t mtk_vcodec_enc_irq_handler(int irq, void *priv)
{
- struct mtk_vcodec_dev *dev = priv;
- struct mtk_vcodec_ctx *ctx;
+ struct mtk_vcodec_enc_dev *dev = priv;
+ struct mtk_vcodec_enc_ctx *ctx;
unsigned long flags;
void __iomem *addr;
int core_id;
@@ -97,12 +94,11 @@ static irqreturn_t mtk_vcodec_enc_irq_handler(int irq, void *priv)
core_id = dev->venc_pdata->core_id;
if (core_id < 0 || core_id >= NUM_MAX_VCODEC_REG_BASE) {
- mtk_v4l2_err("Invalid core id: %d, ctx id: %d",
- core_id, ctx->id);
+ mtk_v4l2_venc_err(ctx, "Invalid core id: %d, ctx id: %d", core_id, ctx->id);
return IRQ_HANDLED;
}
- mtk_v4l2_debug(1, "id: %d, core id: %d", ctx->id, core_id);
+ mtk_v4l2_venc_dbg(1, ctx, "id: %d, core id: %d", ctx->id, core_id);
addr = dev->reg_base[core_id] + MTK_VENC_IRQ_ACK_OFFSET;
@@ -111,14 +107,14 @@ static irqreturn_t mtk_vcodec_enc_irq_handler(int irq, void *priv)
clean_irq_status(ctx->irq_status, addr);
- wake_up_ctx(ctx, MTK_INST_IRQ_RECEIVED, 0);
+ wake_up_enc_ctx(ctx, MTK_INST_IRQ_RECEIVED, 0);
return IRQ_HANDLED;
}
static int fops_vcodec_open(struct file *file)
{
- struct mtk_vcodec_dev *dev = video_drvdata(file);
- struct mtk_vcodec_ctx *ctx = NULL;
+ struct mtk_vcodec_enc_dev *dev = video_drvdata(file);
+ struct mtk_vcodec_enc_ctx *ctx = NULL;
int ret = 0;
struct vb2_queue *src_vq;
@@ -143,16 +139,14 @@ static int fops_vcodec_open(struct file *file)
ctx->type = MTK_INST_ENCODER;
ret = mtk_vcodec_enc_ctrls_setup(ctx);
if (ret) {
- mtk_v4l2_err("Failed to setup controls() (%d)",
- ret);
+ mtk_v4l2_venc_err(ctx, "Failed to setup controls() (%d)", ret);
goto err_ctrls_setup;
}
ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev_enc, ctx,
&mtk_vcodec_enc_queue_init);
if (IS_ERR((__force void *)ctx->m2m_ctx)) {
ret = PTR_ERR((__force void *)ctx->m2m_ctx);
- mtk_v4l2_err("Failed to v4l2_m2m_ctx_init() (%d)",
- ret);
+ mtk_v4l2_venc_err(ctx, "Failed to v4l2_m2m_ctx_init() (%d)", ret);
goto err_m2m_ctx_init;
}
src_vq = v4l2_m2m_get_vq(ctx->m2m_ctx,
@@ -171,23 +165,23 @@ static int fops_vcodec_open(struct file *file)
* Return 0 if downloading firmware successfully,
* otherwise it is failed
*/
- mtk_v4l2_err("vpu_load_firmware failed!");
+ mtk_v4l2_venc_err(ctx, "vpu_load_firmware failed!");
goto err_load_fw;
}
dev->enc_capability =
mtk_vcodec_fw_get_venc_capa(dev->fw_handler);
- mtk_v4l2_debug(0, "encoder capability %x", dev->enc_capability);
+ mtk_v4l2_venc_dbg(0, ctx, "encoder capability %x", dev->enc_capability);
}
- mtk_v4l2_debug(2, "Create instance [%d]@%p m2m_ctx=%p ",
- ctx->id, ctx, ctx->m2m_ctx);
+ mtk_v4l2_venc_dbg(2, ctx, "Create instance [%d]@%p m2m_ctx=%p ",
+ ctx->id, ctx, ctx->m2m_ctx);
list_add(&ctx->list, &dev->ctx_list);
mutex_unlock(&dev->dev_mutex);
- mtk_v4l2_debug(0, "%s encoder [%d]", dev_name(&dev->plat_dev->dev),
- ctx->id);
+ mtk_v4l2_venc_dbg(0, ctx, "%s encoder [%d]", dev_name(&dev->plat_dev->dev),
+ ctx->id);
return ret;
/* Deinit when failure occurred */
@@ -206,10 +200,10 @@ err_ctrls_setup:
static int fops_vcodec_release(struct file *file)
{
- struct mtk_vcodec_dev *dev = video_drvdata(file);
- struct mtk_vcodec_ctx *ctx = fh_to_ctx(file->private_data);
+ struct mtk_vcodec_enc_dev *dev = video_drvdata(file);
+ struct mtk_vcodec_enc_ctx *ctx = fh_to_enc_ctx(file->private_data);
- mtk_v4l2_debug(1, "[%d] encoder", ctx->id);
+ mtk_v4l2_venc_dbg(1, ctx, "[%d] encoder", ctx->id);
mutex_lock(&dev->dev_mutex);
v4l2_m2m_ctx_release(ctx->m2m_ctx);
@@ -235,7 +229,7 @@ static const struct v4l2_file_operations mtk_vcodec_fops = {
static int mtk_vcodec_probe(struct platform_device *pdev)
{
- struct mtk_vcodec_dev *dev;
+ struct mtk_vcodec_enc_dev *dev;
struct video_device *vfd_enc;
phandle rproc_phandle;
enum mtk_vcodec_fw_type fw_type;
@@ -255,7 +249,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
&rproc_phandle)) {
fw_type = SCP;
} else {
- mtk_v4l2_err("Could not get venc IPI device");
+ dev_err(&pdev->dev, "[MTK VCODEC] Could not get venc IPI device");
return -ENODEV;
}
dma_set_max_seg_size(&pdev->dev, UINT_MAX);
@@ -267,7 +261,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
dev->venc_pdata = of_device_get_match_data(&pdev->dev);
ret = mtk_vcodec_init_enc_clk(dev);
if (ret < 0) {
- dev_err(&pdev->dev, "Failed to get mtk vcodec clock source!");
+ dev_err(&pdev->dev, "[MTK VCODEC] Failed to get mtk vcodec clock source!");
goto err_enc_pm;
}
@@ -292,7 +286,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
0, pdev->name, dev);
if (ret) {
dev_err(&pdev->dev,
- "Failed to install dev->enc_irq %d (%d) core_id (%d)",
+ "[MTK VCODEC] Failed to install dev->enc_irq %d (%d) core_id (%d)",
dev->enc_irq, ret, dev->venc_pdata->core_id);
ret = -EINVAL;
goto err_res;
@@ -307,16 +301,14 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
if (ret) {
- mtk_v4l2_err("v4l2_device_register err=%d", ret);
+ dev_err(&pdev->dev, "[MTK VCODEC] v4l2_device_register err=%d", ret);
goto err_res;
}
- init_waitqueue_head(&dev->queue);
-
/* allocate video device for encoder and register it */
vfd_enc = video_device_alloc();
if (!vfd_enc) {
- mtk_v4l2_err("Failed to allocate video device");
+ dev_err(&pdev->dev, "[MTK VCODEC] Failed to allocate video device");
ret = -ENOMEM;
goto err_enc_alloc;
}
@@ -337,7 +329,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
dev->m2m_dev_enc = v4l2_m2m_init(&mtk_venc_m2m_ops);
if (IS_ERR((__force void *)dev->m2m_dev_enc)) {
- mtk_v4l2_err("Failed to init mem2mem enc device");
+ dev_err(&pdev->dev, "[MTK VCODEC] Failed to init mem2mem enc device");
ret = PTR_ERR((__force void *)dev->m2m_dev_enc);
goto err_enc_mem_init;
}
@@ -347,20 +339,20 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
WQ_MEM_RECLAIM |
WQ_FREEZABLE);
if (!dev->encode_workqueue) {
- mtk_v4l2_err("Failed to create encode workqueue");
+ dev_err(&pdev->dev, "[MTK VCODEC] Failed to create encode workqueue");
ret = -EINVAL;
goto err_event_workq;
}
ret = video_register_device(vfd_enc, VFL_TYPE_VIDEO, -1);
if (ret) {
- mtk_v4l2_err("Failed to register video device");
+ dev_err(&pdev->dev, "[MTK VCODEC] Failed to register video device");
goto err_enc_reg;
}
mtk_vcodec_dbgfs_init(dev, true);
- mtk_v4l2_debug(0, "encoder %d registered as /dev/video%d",
- dev->venc_pdata->core_id, vfd_enc->num);
+ dev_dbg(&pdev->dev, "[MTK VCODEC] encoder %d registered as /dev/video%d",
+ dev->venc_pdata->core_id, vfd_enc->num);
return 0;
@@ -459,9 +451,8 @@ MODULE_DEVICE_TABLE(of, mtk_vcodec_enc_match);
static void mtk_vcodec_enc_remove(struct platform_device *pdev)
{
- struct mtk_vcodec_dev *dev = platform_get_drvdata(pdev);
+ struct mtk_vcodec_enc_dev *dev = platform_get_drvdata(pdev);
- mtk_v4l2_debug_enter();
destroy_workqueue(dev->encode_workqueue);
if (dev->m2m_dev_enc)
v4l2_m2m_release(dev->m2m_dev_enc);
@@ -469,7 +460,7 @@ static void mtk_vcodec_enc_remove(struct platform_device *pdev)
if (dev->vfd_enc)
video_unregister_device(dev->vfd_enc);
- mtk_vcodec_dbgfs_deinit(dev);
+ mtk_vcodec_dbgfs_deinit(&dev->dbgfs);
v4l2_device_unregister(&dev->v4l2_dev);
pm_runtime_disable(dev->pm.dev);
mtk_vcodec_fw_release(dev->fw_handler);
diff --git a/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h
new file mode 100644
index 000000000000..a042f607ed8d
--- /dev/null
+++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h
@@ -0,0 +1,248 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023 MediaTek Inc.
+ * Author: Yunfei Dong <yunfei.dong@mediatek.com>
+ */
+
+#ifndef _MTK_VCODEC_ENC_DRV_H_
+#define _MTK_VCODEC_ENC_DRV_H_
+
+#include "../common/mtk_vcodec_cmn_drv.h"
+#include "../common/mtk_vcodec_dbgfs.h"
+#include "../common/mtk_vcodec_fw_priv.h"
+#include "../common/mtk_vcodec_util.h"
+
+#define MTK_VCODEC_ENC_NAME "mtk-vcodec-enc"
+
+#define MTK_ENC_CTX_IS_EXT(ctx) ((ctx)->dev->venc_pdata->uses_ext)
+#define MTK_ENC_IOVA_IS_34BIT(ctx) ((ctx)->dev->venc_pdata->uses_34bit)
+
+/**
+ * struct mtk_vcodec_enc_pdata - compatible data for each IC
+ *
+ * @uses_ext: whether the encoder uses the extended firmware messaging format
+ * @min_bitrate: minimum supported encoding bitrate
+ * @max_bitrate: maximum supported encoding bitrate
+ * @capture_formats: array of supported capture formats
+ * @num_capture_formats: number of entries in capture_formats
+ * @output_formats: array of supported output formats
+ * @num_output_formats: number of entries in output_formats
+ * @core_id: stand for h264 or vp8 encode index
+ * @uses_34bit: whether the encoder uses 34-bit iova
+ */
+struct mtk_vcodec_enc_pdata {
+ bool uses_ext;
+ u64 min_bitrate;
+ u64 max_bitrate;
+ const struct mtk_video_fmt *capture_formats;
+ size_t num_capture_formats;
+ const struct mtk_video_fmt *output_formats;
+ size_t num_output_formats;
+ u8 core_id;
+ bool uses_34bit;
+};
+
+/*
+ * enum mtk_encode_param - General encoding parameters type
+ */
+enum mtk_encode_param {
+ MTK_ENCODE_PARAM_NONE = 0,
+ MTK_ENCODE_PARAM_BITRATE = (1 << 0),
+ MTK_ENCODE_PARAM_FRAMERATE = (1 << 1),
+ MTK_ENCODE_PARAM_INTRA_PERIOD = (1 << 2),
+ MTK_ENCODE_PARAM_FORCE_INTRA = (1 << 3),
+ MTK_ENCODE_PARAM_GOP_SIZE = (1 << 4),
+};
+
+/**
+ * struct mtk_enc_params - General encoding parameters
+ * @bitrate: target bitrate in bits per second
+ * @num_b_frame: number of b frames between p-frame
+ * @rc_frame: frame based rate control
+ * @rc_mb: macroblock based rate control
+ * @seq_hdr_mode: H.264 sequence header is encoded separately or joined
+ * with the first frame
+ * @intra_period: I frame period
+ * @gop_size: group of picture size, it's used as the intra frame period
+ * @framerate_num: frame rate numerator. ex: framerate_num=30 and
+ * framerate_denom=1 means FPS is 30
+ * @framerate_denom: frame rate denominator. ex: framerate_num=30 and
+ * framerate_denom=1 means FPS is 30
+ * @h264_max_qp: Max value for H.264 quantization parameter
+ * @h264_profile: V4L2 defined H.264 profile
+ * @h264_level: V4L2 defined H.264 level
+ * @force_intra: force/insert intra frame
+ */
+struct mtk_enc_params {
+ unsigned int bitrate;
+ unsigned int num_b_frame;
+ unsigned int rc_frame;
+ unsigned int rc_mb;
+ unsigned int seq_hdr_mode;
+ unsigned int intra_period;
+ unsigned int gop_size;
+ unsigned int framerate_num;
+ unsigned int framerate_denom;
+ unsigned int h264_max_qp;
+ unsigned int h264_profile;
+ unsigned int h264_level;
+ unsigned int force_intra;
+};
+
+/**
+ * struct mtk_vcodec_enc_ctx - Context (instance) private data.
+ *
+ * @type: type of encoder instance
+ * @dev: pointer to the mtk_vcodec_enc_dev of the device
+ * @list: link to ctx_list of mtk_vcodec_enc_dev
+ *
+ * @fh: struct v4l2_fh
+ * @m2m_ctx: pointer to the v4l2_m2m_ctx of the context
+ * @q_data: store information of input and output queue of the context
+ * @id: index of the context that this structure describes
+ * @state: state of the context
+ * @param_change: indicate encode parameter type
+ * @enc_params: encoding parameters
+ *
+ * @enc_if: hooked encoder driver interface
+ * @drv_handle: driver handle for specific decode/encode instance
+ *
+ * @int_cond: variable used by the waitqueue
+ * @int_type: type of the last interrupt
+ * @queue: waitqueue that can be used to wait for this context to finish
+ * @irq_status: irq status
+ *
+ * @ctrl_hdl: handler for v4l2 framework
+ * @encode_work: worker for the encoding
+ * @empty_flush_buf: a fake size-0 capture buffer that indicates flush. Used for encoder.
+ * @is_flushing: set to true if flushing is in progress.
+ *
+ * @colorspace: enum v4l2_colorspace; supplemental to pixelformat
+ * @ycbcr_enc: enum v4l2_ycbcr_encoding, Y'CbCr encoding
+ * @quantization: enum v4l2_quantization, colorspace quantization
+ * @xfer_func: enum v4l2_xfer_func, colorspace transfer function
+ *
+ * @q_mutex: vb2_queue mutex.
+ * @vpu_inst: vpu instance pointer.
+ */
+struct mtk_vcodec_enc_ctx {
+ enum mtk_instance_type type;
+ struct mtk_vcodec_enc_dev *dev;
+ struct list_head list;
+
+ struct v4l2_fh fh;
+ struct v4l2_m2m_ctx *m2m_ctx;
+ struct mtk_q_data q_data[2];
+ int id;
+ enum mtk_instance_state state;
+ enum mtk_encode_param param_change;
+ struct mtk_enc_params enc_params;
+
+ const struct venc_common_if *enc_if;
+ void *drv_handle;
+
+ int int_cond[MTK_VDEC_HW_MAX];
+ int int_type[MTK_VDEC_HW_MAX];
+ wait_queue_head_t queue[MTK_VDEC_HW_MAX];
+ unsigned int irq_status;
+
+ struct v4l2_ctrl_handler ctrl_hdl;
+ struct work_struct encode_work;
+ struct v4l2_m2m_buffer empty_flush_buf;
+ bool is_flushing;
+
+ enum v4l2_colorspace colorspace;
+ enum v4l2_ycbcr_encoding ycbcr_enc;
+ enum v4l2_quantization quantization;
+ enum v4l2_xfer_func xfer_func;
+
+ struct mutex q_mutex;
+ void *vpu_inst;
+};
+
+/**
+ * struct mtk_vcodec_enc_dev - driver data
+ * @v4l2_dev: V4L2 device to register video devices for.
+ * @vfd_enc: Video device for encoder.
+ *
+ * @m2m_dev_enc: m2m device for encoder.
+ * @plat_dev: platform device
+ * @ctx_list: list of struct mtk_vcodec_ctx
+ * @curr_ctx: The context that is waiting for codec hardware
+ *
+ * @reg_base: Mapped address of MTK Vcodec registers.
+ * @venc_pdata: encoder IC-specific data
+ *
+ * @fw_handler: used to communicate with the firmware.
+ * @id_counter: used to identify current opened instance
+ *
+ * @enc_mutex: encoder hardware lock.
+ * @dev_mutex: video_device lock
+ * @encode_workqueue: encode work queue
+ *
+ * @enc_irq: h264 encoder irq resource
+ * @irqlock: protect data access by irq handler and work thread
+ *
+ * @pm: power management control
+ * @enc_capability: used to identify encode capability
+ * @dbgfs: debug log related information
+ */
+struct mtk_vcodec_enc_dev {
+ struct v4l2_device v4l2_dev;
+ struct video_device *vfd_enc;
+
+ struct v4l2_m2m_dev *m2m_dev_enc;
+ struct platform_device *plat_dev;
+ struct list_head ctx_list;
+ struct mtk_vcodec_enc_ctx *curr_ctx;
+
+ void __iomem *reg_base[NUM_MAX_VCODEC_REG_BASE];
+ const struct mtk_vcodec_enc_pdata *venc_pdata;
+
+ struct mtk_vcodec_fw *fw_handler;
+ u64 id_counter;
+
+ /* encoder hardware mutex lock */
+ struct mutex enc_mutex;
+ struct mutex dev_mutex;
+ struct workqueue_struct *encode_workqueue;
+
+ int enc_irq;
+ spinlock_t irqlock;
+
+ struct mtk_vcodec_pm pm;
+ unsigned int enc_capability;
+ struct mtk_vcodec_dbgfs dbgfs;
+};
+
+static inline struct mtk_vcodec_enc_ctx *fh_to_enc_ctx(struct v4l2_fh *fh)
+{
+ return container_of(fh, struct mtk_vcodec_enc_ctx, fh);
+}
+
+static inline struct mtk_vcodec_enc_ctx *ctrl_to_enc_ctx(struct v4l2_ctrl *ctrl)
+{
+ return container_of(ctrl->handler, struct mtk_vcodec_enc_ctx, ctrl_hdl);
+}
+
+/* Wake up context wait_queue */
+static inline void
+wake_up_enc_ctx(struct mtk_vcodec_enc_ctx *ctx, unsigned int reason, unsigned int hw_id)
+{
+ ctx->int_cond[hw_id] = 1;
+ ctx->int_type[hw_id] = reason;
+ wake_up_interruptible(&ctx->queue[hw_id]);
+}
+
+#define mtk_venc_err(ctx, fmt, args...) \
+ mtk_vcodec_err((ctx)->id, (ctx)->dev->plat_dev, fmt, ##args)
+
+#define mtk_venc_debug(ctx, fmt, args...) \
+ mtk_vcodec_debug((ctx)->id, (ctx)->dev->plat_dev, fmt, ##args)
+
+#define mtk_v4l2_venc_err(ctx, fmt, args...) mtk_v4l2_err((ctx)->dev->plat_dev, fmt, ##args)
+
+#define mtk_v4l2_venc_dbg(level, ctx, fmt, args...) \
+ mtk_v4l2_debug((ctx)->dev->plat_dev, level, fmt, ##args)
+
+#endif /* _MTK_VCODEC_ENC_DRV_H_ */
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_pm.c b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.c
index 7055954eb2af..3fce936e61b9 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_pm.c
+++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.c
@@ -5,14 +5,13 @@
*/
#include <linux/clk.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/pm_runtime.h>
+#include "mtk_vcodec_enc_drv.h"
#include "mtk_vcodec_enc_pm.h"
-#include "mtk_vcodec_util.h"
-int mtk_vcodec_init_enc_clk(struct mtk_vcodec_dev *mtkdev)
+int mtk_vcodec_init_enc_clk(struct mtk_vcodec_enc_dev *mtkdev)
{
struct platform_device *pdev;
struct mtk_vcodec_pm *pm;
@@ -35,7 +34,7 @@ int mtk_vcodec_init_enc_clk(struct mtk_vcodec_dev *mtkdev)
if (!enc_clk->clk_info)
return -ENOMEM;
} else {
- mtk_v4l2_err("Failed to get venc clock count");
+ dev_err(pm->dev, "[MTK VCODEC] Failed to get venc clock count");
return -EINVAL;
}
@@ -44,13 +43,13 @@ int mtk_vcodec_init_enc_clk(struct mtk_vcodec_dev *mtkdev)
ret = of_property_read_string_index(pdev->dev.of_node,
"clock-names", i, &clk_info->clk_name);
if (ret) {
- mtk_v4l2_err("venc failed to get clk name %d", i);
+ dev_err(pm->dev, "[MTK VCODEC] venc failed to get clk name %d", i);
return ret;
}
clk_info->vcodec_clk = devm_clk_get(&pdev->dev,
clk_info->clk_name);
if (IS_ERR(clk_info->vcodec_clk)) {
- mtk_v4l2_err("venc devm_clk_get (%d)%s fail", i,
+ dev_err(pm->dev, "[MTK VCODEC] venc devm_clk_get (%d)%s fail", i,
clk_info->clk_name);
return PTR_ERR(clk_info->vcodec_clk);
}
@@ -67,7 +66,7 @@ void mtk_vcodec_enc_clock_on(struct mtk_vcodec_pm *pm)
for (i = 0; i < enc_clk->clk_num; i++) {
ret = clk_prepare_enable(enc_clk->clk_info[i].vcodec_clk);
if (ret) {
- mtk_v4l2_err("venc clk_prepare_enable %d %s fail %d", i,
+ dev_err(pm->dev, "[MTK VCODEC] venc clk_prepare_enable %d %s fail %d", i,
enc_clk->clk_info[i].clk_name, ret);
goto clkerr;
}
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_pm.h b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.h
index bc455cefc0cd..e50be0575190 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_pm.h
+++ b/drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.h
@@ -7,9 +7,9 @@
#ifndef _MTK_VCODEC_ENC_PM_H_
#define _MTK_VCODEC_ENC_PM_H_
-#include "mtk_vcodec_drv.h"
+#include "mtk_vcodec_enc_drv.h"
-int mtk_vcodec_init_enc_clk(struct mtk_vcodec_dev *dev);
+int mtk_vcodec_init_enc_clk(struct mtk_vcodec_enc_dev *dev);
void mtk_vcodec_enc_clock_on(struct mtk_vcodec_pm *pm);
void mtk_vcodec_enc_clock_off(struct mtk_vcodec_pm *pm);
diff --git a/drivers/media/platform/mediatek/vcodec/venc/venc_h264_if.c b/drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c
index 60fd165c0d94..a68dac72c4e4 100644
--- a/drivers/media/platform/mediatek/vcodec/venc/venc_h264_if.c
+++ b/drivers/media/platform/mediatek/vcodec/encoder/venc/venc_h264_if.c
@@ -10,9 +10,8 @@
#include <linux/kernel.h>
#include <linux/slab.h>
-#include "../mtk_vcodec_drv.h"
-#include "../mtk_vcodec_util.h"
-#include "../mtk_vcodec_intr.h"
+#include "../mtk_vcodec_enc_drv.h"
+#include "../../common/mtk_vcodec_intr.h"
#include "../mtk_vcodec_enc.h"
#include "../mtk_vcodec_enc_pm.h"
#include "../venc_drv_base.h"
@@ -221,7 +220,7 @@ struct venc_h264_inst {
struct venc_vpu_inst vpu_inst;
struct venc_h264_vsi *vsi;
struct venc_h264_vsi_34 *vsi_34;
- struct mtk_vcodec_ctx *ctx;
+ struct mtk_vcodec_enc_ctx *ctx;
};
static inline u32 h264_read_reg(struct venc_h264_inst *inst, u32 addr)
@@ -240,13 +239,13 @@ static unsigned int h264_get_profile(struct venc_h264_inst *inst,
case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH:
return 100;
case V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE:
- mtk_vcodec_err(inst, "unsupported CONSTRAINED_BASELINE");
+ mtk_venc_err(inst->ctx, "unsupported CONSTRAINED_BASELINE");
return 0;
case V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED:
- mtk_vcodec_err(inst, "unsupported EXTENDED");
+ mtk_venc_err(inst->ctx, "unsupported EXTENDED");
return 0;
default:
- mtk_vcodec_debug(inst, "unsupported profile %d", profile);
+ mtk_venc_debug(inst->ctx, "unsupported profile %d", profile);
return 100;
}
}
@@ -256,7 +255,7 @@ static unsigned int h264_get_level(struct venc_h264_inst *inst,
{
switch (level) {
case V4L2_MPEG_VIDEO_H264_LEVEL_1B:
- mtk_vcodec_err(inst, "unsupported 1B");
+ mtk_venc_err(inst->ctx, "unsupported 1B");
return 0;
case V4L2_MPEG_VIDEO_H264_LEVEL_1_0:
return 10;
@@ -289,7 +288,7 @@ static unsigned int h264_get_level(struct venc_h264_inst *inst,
case V4L2_MPEG_VIDEO_H264_LEVEL_5_1:
return 51;
default:
- mtk_vcodec_debug(inst, "unsupported level %d", level);
+ mtk_venc_debug(inst->ctx, "unsupported level %d", level);
return 31;
}
}
@@ -298,8 +297,6 @@ static void h264_enc_free_work_buf(struct venc_h264_inst *inst)
{
int i;
- mtk_vcodec_debug_enter(inst);
-
/* Except the SKIP_FRAME buffers,
* other buffers need to be freed by AP.
*/
@@ -309,8 +306,6 @@ static void h264_enc_free_work_buf(struct venc_h264_inst *inst)
}
mtk_vcodec_mem_free(inst->ctx, &inst->pps_buf);
-
- mtk_vcodec_debug_leave(inst);
}
static int h264_enc_alloc_work_buf(struct venc_h264_inst *inst, bool is_34bit)
@@ -321,8 +316,6 @@ static int h264_enc_alloc_work_buf(struct venc_h264_inst *inst, bool is_34bit)
u32 vpua, wb_size;
int ret = 0;
- mtk_vcodec_debug_enter(inst);
-
if (is_34bit)
wb_34 = inst->vsi_34->work_bufs;
else
@@ -366,8 +359,7 @@ static int h264_enc_alloc_work_buf(struct venc_h264_inst *inst, bool is_34bit)
ret = mtk_vcodec_mem_alloc(inst->ctx,
&inst->work_bufs[i]);
if (ret) {
- mtk_vcodec_err(inst,
- "cannot allocate buf %d", i);
+ mtk_venc_err(inst->ctx, "cannot allocate buf %d", i);
goto err_alloc;
}
/*
@@ -391,23 +383,20 @@ static int h264_enc_alloc_work_buf(struct venc_h264_inst *inst, bool is_34bit)
else
wb[i].iova = inst->work_bufs[i].dma_addr;
- mtk_vcodec_debug(inst,
- "work_buf[%d] va=0x%p iova=%pad size=%zu",
- i, inst->work_bufs[i].va,
- &inst->work_bufs[i].dma_addr,
- inst->work_bufs[i].size);
+ mtk_venc_debug(inst->ctx, "work_buf[%d] va=0x%p iova=%pad size=%zu",
+ i, inst->work_bufs[i].va,
+ &inst->work_bufs[i].dma_addr,
+ inst->work_bufs[i].size);
}
/* the pps_buf is used by AP side only */
inst->pps_buf.size = 128;
ret = mtk_vcodec_mem_alloc(inst->ctx, &inst->pps_buf);
if (ret) {
- mtk_vcodec_err(inst, "cannot allocate pps_buf");
+ mtk_venc_err(inst->ctx, "cannot allocate pps_buf");
goto err_alloc;
}
- mtk_vcodec_debug_leave(inst);
-
return ret;
err_alloc:
@@ -419,12 +408,12 @@ err_alloc:
static unsigned int h264_enc_wait_venc_done(struct venc_h264_inst *inst)
{
unsigned int irq_status = 0;
- struct mtk_vcodec_ctx *ctx = (struct mtk_vcodec_ctx *)inst->ctx;
+ struct mtk_vcodec_enc_ctx *ctx = (struct mtk_vcodec_enc_ctx *)inst->ctx;
if (!mtk_vcodec_wait_for_done_ctx(ctx, MTK_INST_IRQ_RECEIVED,
WAIT_INTR_TIMEOUT_MS, 0)) {
irq_status = ctx->irq_status;
- mtk_vcodec_debug(inst, "irq_status %x <-", irq_status);
+ mtk_venc_debug(ctx, "irq_status %x <-", irq_status);
}
return irq_status;
}
@@ -452,21 +441,18 @@ static int h264_encode_sps(struct venc_h264_inst *inst,
int ret = 0;
unsigned int irq_status;
- mtk_vcodec_debug_enter(inst);
-
ret = vpu_enc_encode(&inst->vpu_inst, H264_BS_MODE_SPS, NULL, bs_buf, NULL);
if (ret)
return ret;
irq_status = h264_enc_wait_venc_done(inst);
if (irq_status != MTK_VENC_IRQ_STATUS_SPS) {
- mtk_vcodec_err(inst, "expect irq status %d",
- MTK_VENC_IRQ_STATUS_SPS);
+ mtk_venc_err(inst->ctx, "expect irq status %d", MTK_VENC_IRQ_STATUS_SPS);
return -EINVAL;
}
*bs_size = h264_read_reg(inst, VENC_PIC_BITSTREAM_BYTE_CNT);
- mtk_vcodec_debug(inst, "bs size %d <-", *bs_size);
+ mtk_venc_debug(inst->ctx, "bs size %d <-", *bs_size);
return ret;
}
@@ -478,21 +464,18 @@ static int h264_encode_pps(struct venc_h264_inst *inst,
int ret = 0;
unsigned int irq_status;
- mtk_vcodec_debug_enter(inst);
-
ret = vpu_enc_encode(&inst->vpu_inst, H264_BS_MODE_PPS, NULL, bs_buf, NULL);
if (ret)
return ret;
irq_status = h264_enc_wait_venc_done(inst);
if (irq_status != MTK_VENC_IRQ_STATUS_PPS) {
- mtk_vcodec_err(inst, "expect irq status %d",
- MTK_VENC_IRQ_STATUS_PPS);
+ mtk_venc_err(inst->ctx, "expect irq status %d", MTK_VENC_IRQ_STATUS_PPS);
return -EINVAL;
}
*bs_size = h264_read_reg(inst, VENC_PIC_BITSTREAM_BYTE_CNT);
- mtk_vcodec_debug(inst, "bs size %d <-", *bs_size);
+ mtk_venc_debug(inst->ctx, "bs size %d <-", *bs_size);
return ret;
}
@@ -529,10 +512,9 @@ static int h264_encode_frame(struct venc_h264_inst *inst,
unsigned int intra_period;
unsigned int irq_status;
struct venc_frame_info frame_info;
- struct mtk_vcodec_ctx *ctx = inst->ctx;
+ struct mtk_vcodec_enc_ctx *ctx = inst->ctx;
- mtk_vcodec_debug_enter(inst);
- mtk_vcodec_debug(inst, "frm_cnt = %d\n ", inst->frm_cnt);
+ mtk_venc_debug(ctx, "frm_cnt = %d\n ", inst->frm_cnt);
if (MTK_ENC_IOVA_IS_34BIT(ctx)) {
gop_size = inst->vsi_34->config.gop_size;
@@ -545,9 +527,9 @@ static int h264_encode_frame(struct venc_h264_inst *inst,
frame_info.skip_frm_count = inst->skip_frm_cnt;
frame_info.frm_type = h264_frame_type(inst->frm_cnt, gop_size,
intra_period);
- mtk_vcodec_debug(inst, "frm_count = %d,skip_frm_count =%d,frm_type=%d.\n",
- frame_info.frm_count, frame_info.skip_frm_count,
- frame_info.frm_type);
+ mtk_venc_debug(ctx, "frm_count = %d,skip_frm_count =%d,frm_type=%d.\n",
+ frame_info.frm_count, frame_info.skip_frm_count,
+ frame_info.frm_type);
ret = vpu_enc_encode(&inst->vpu_inst, H264_BS_MODE_FRAME,
frm_buf, bs_buf, &frame_info);
@@ -570,15 +552,15 @@ static int h264_encode_frame(struct venc_h264_inst *inst,
irq_status = h264_enc_wait_venc_done(inst);
if (irq_status != MTK_VENC_IRQ_STATUS_FRM) {
- mtk_vcodec_err(inst, "irq_status=%d failed", irq_status);
+ mtk_venc_err(ctx, "irq_status=%d failed", irq_status);
return -EIO;
}
*bs_size = h264_read_reg(inst, VENC_PIC_BITSTREAM_BYTE_CNT);
++inst->frm_cnt;
- mtk_vcodec_debug(inst, "frm %d bs_size %d key_frm %d <-",
- inst->frm_cnt, *bs_size, inst->vpu_inst.is_key_frm);
+ mtk_venc_debug(ctx, "frm %d bs_size %d key_frm %d <-",
+ inst->frm_cnt, *bs_size, inst->vpu_inst.is_key_frm);
return 0;
}
@@ -589,7 +571,7 @@ static void h264_encode_filler(struct venc_h264_inst *inst, void *buf,
unsigned char *p = buf;
if (size < H264_FILLER_MARKER_SIZE) {
- mtk_vcodec_err(inst, "filler size too small %d", size);
+ mtk_venc_err(inst->ctx, "filler size too small %d", size);
return;
}
@@ -599,7 +581,7 @@ static void h264_encode_filler(struct venc_h264_inst *inst, void *buf,
memset(p, 0xff, size);
}
-static int h264_enc_init(struct mtk_vcodec_ctx *ctx)
+static int h264_enc_init(struct mtk_vcodec_enc_ctx *ctx)
{
const bool is_ext = MTK_ENC_CTX_IS_EXT(ctx);
int ret = 0;
@@ -612,9 +594,7 @@ static int h264_enc_init(struct mtk_vcodec_ctx *ctx)
inst->ctx = ctx;
inst->vpu_inst.ctx = ctx;
inst->vpu_inst.id = is_ext ? SCP_IPI_VENC_H264 : IPI_VENC_H264;
- inst->hw_base = mtk_vcodec_get_reg_addr(inst->ctx, VENC_SYS);
-
- mtk_vcodec_debug_enter(inst);
+ inst->hw_base = mtk_vcodec_get_reg_addr(inst->ctx->dev->reg_base, VENC_SYS);
ret = vpu_enc_init(&inst->vpu_inst);
@@ -623,8 +603,6 @@ static int h264_enc_init(struct mtk_vcodec_ctx *ctx)
else
inst->vsi = (struct venc_h264_vsi *)inst->vpu_inst.vsi;
- mtk_vcodec_debug_leave(inst);
-
if (ret)
kfree(inst);
else
@@ -641,9 +619,9 @@ static int h264_enc_encode(void *handle,
{
int ret = 0;
struct venc_h264_inst *inst = (struct venc_h264_inst *)handle;
- struct mtk_vcodec_ctx *ctx = inst->ctx;
+ struct mtk_vcodec_enc_ctx *ctx = inst->ctx;
- mtk_vcodec_debug(inst, "opt %d ->", opt);
+ mtk_venc_debug(ctx, "opt %d ->", opt);
enable_irq(ctx->dev->enc_irq);
@@ -678,7 +656,7 @@ static int h264_enc_encode(void *handle,
break;
}
- mtk_vcodec_debug(inst, "h264_encode_frame prepend SPS/PPS");
+ mtk_venc_debug(ctx, "h264_encode_frame prepend SPS/PPS");
ret = h264_encode_header(inst, bs_buf, &bs_size_hdr);
if (ret)
@@ -705,9 +683,8 @@ static int h264_enc_encode(void *handle,
result->bs_size = hdr_sz + filler_sz + bs_size_frm;
- mtk_vcodec_debug(inst, "hdr %d filler %d frame %d bs %d",
- hdr_sz, filler_sz, bs_size_frm,
- result->bs_size);
+ mtk_venc_debug(ctx, "hdr %d filler %d frame %d bs %d",
+ hdr_sz, filler_sz, bs_size_frm, result->bs_size);
inst->prepend_hdr = 0;
result->is_key_frm = inst->vpu_inst.is_key_frm;
@@ -715,7 +692,7 @@ static int h264_enc_encode(void *handle,
}
default:
- mtk_vcodec_err(inst, "venc_start_opt %d not supported", opt);
+ mtk_venc_err(ctx, "venc_start_opt %d not supported", opt);
ret = -EINVAL;
break;
}
@@ -723,7 +700,7 @@ static int h264_enc_encode(void *handle,
encode_err:
disable_irq(ctx->dev->enc_irq);
- mtk_vcodec_debug(inst, "opt %d <-", opt);
+ mtk_venc_debug(ctx, "opt %d <-", opt);
return ret;
}
@@ -772,10 +749,10 @@ static int h264_enc_set_param(void *handle,
{
int ret = 0;
struct venc_h264_inst *inst = (struct venc_h264_inst *)handle;
- struct mtk_vcodec_ctx *ctx = inst->ctx;
+ struct mtk_vcodec_enc_ctx *ctx = inst->ctx;
const bool is_34bit = MTK_ENC_IOVA_IS_34BIT(ctx);
- mtk_vcodec_debug(inst, "->type=%d", type);
+ mtk_venc_debug(ctx, "->type=%d", type);
switch (type) {
case VENC_SET_PARAM_ENC:
@@ -798,7 +775,7 @@ static int h264_enc_set_param(void *handle,
case VENC_SET_PARAM_PREPEND_HEADER:
inst->prepend_hdr = 1;
- mtk_vcodec_debug(inst, "set prepend header mode");
+ mtk_venc_debug(ctx, "set prepend header mode");
break;
case VENC_SET_PARAM_FORCE_INTRA:
case VENC_SET_PARAM_GOP_SIZE:
@@ -811,8 +788,6 @@ static int h264_enc_set_param(void *handle,
break;
}
- mtk_vcodec_debug_leave(inst);
-
return ret;
}
@@ -821,14 +796,11 @@ static int h264_enc_deinit(void *handle)
int ret = 0;
struct venc_h264_inst *inst = (struct venc_h264_inst *)handle;
- mtk_vcodec_debug_enter(inst);
-
ret = vpu_enc_deinit(&inst->vpu_inst);
if (inst->work_buf_allocated)
h264_enc_free_work_buf(inst);
- mtk_vcodec_debug_leave(inst);
kfree(inst);
return ret;
diff --git a/drivers/media/platform/mediatek/vcodec/venc/venc_vp8_if.c b/drivers/media/platform/mediatek/vcodec/encoder/venc/venc_vp8_if.c
index 56ce58f761f1..05abca91e742 100644
--- a/drivers/media/platform/mediatek/vcodec/venc/venc_vp8_if.c
+++ b/drivers/media/platform/mediatek/vcodec/encoder/venc/venc_vp8_if.c
@@ -9,9 +9,8 @@
#include <linux/kernel.h>
#include <linux/slab.h>
-#include "../mtk_vcodec_drv.h"
-#include "../mtk_vcodec_util.h"
-#include "../mtk_vcodec_intr.h"
+#include "../mtk_vcodec_enc_drv.h"
+#include "../../common/mtk_vcodec_intr.h"
#include "../mtk_vcodec_enc.h"
#include "../mtk_vcodec_enc_pm.h"
#include "../venc_drv_base.h"
@@ -129,7 +128,7 @@ struct venc_vp8_inst {
unsigned int ts_mode;
struct venc_vpu_inst vpu_inst;
struct venc_vp8_vsi *vsi;
- struct mtk_vcodec_ctx *ctx;
+ struct mtk_vcodec_enc_ctx *ctx;
};
static inline u32 vp8_enc_read_reg(struct venc_vp8_inst *inst, u32 addr)
@@ -141,16 +140,12 @@ static void vp8_enc_free_work_buf(struct venc_vp8_inst *inst)
{
int i;
- mtk_vcodec_debug_enter(inst);
-
/* Buffers need to be freed by AP. */
for (i = 0; i < VENC_VP8_VPU_WORK_BUF_MAX; i++) {
if (inst->work_bufs[i].size == 0)
continue;
mtk_vcodec_mem_free(inst->ctx, &inst->work_bufs[i]);
}
-
- mtk_vcodec_debug_leave(inst);
}
static int vp8_enc_alloc_work_buf(struct venc_vp8_inst *inst)
@@ -159,8 +154,6 @@ static int vp8_enc_alloc_work_buf(struct venc_vp8_inst *inst)
int ret = 0;
struct venc_vp8_vpu_buf *wb = inst->vsi->work_bufs;
- mtk_vcodec_debug_enter(inst);
-
for (i = 0; i < VENC_VP8_VPU_WORK_BUF_MAX; i++) {
if (wb[i].size == 0)
continue;
@@ -177,8 +170,7 @@ static int vp8_enc_alloc_work_buf(struct venc_vp8_inst *inst)
inst->work_bufs[i].size = wb[i].size;
ret = mtk_vcodec_mem_alloc(inst->ctx, &inst->work_bufs[i]);
if (ret) {
- mtk_vcodec_err(inst,
- "cannot alloc work_bufs[%d]", i);
+ mtk_venc_err(inst->ctx, "cannot alloc work_bufs[%d]", i);
goto err_alloc;
}
/*
@@ -199,15 +191,12 @@ static int vp8_enc_alloc_work_buf(struct venc_vp8_inst *inst)
}
wb[i].iova = inst->work_bufs[i].dma_addr;
- mtk_vcodec_debug(inst,
- "work_bufs[%d] va=0x%p,iova=%pad,size=%zu",
- i, inst->work_bufs[i].va,
- &inst->work_bufs[i].dma_addr,
- inst->work_bufs[i].size);
+ mtk_venc_debug(inst->ctx, "work_bufs[%d] va=0x%p,iova=%pad,size=%zu",
+ i, inst->work_bufs[i].va,
+ &inst->work_bufs[i].dma_addr,
+ inst->work_bufs[i].size);
}
- mtk_vcodec_debug_leave(inst);
-
return ret;
err_alloc:
@@ -219,12 +208,12 @@ err_alloc:
static unsigned int vp8_enc_wait_venc_done(struct venc_vp8_inst *inst)
{
unsigned int irq_status = 0;
- struct mtk_vcodec_ctx *ctx = (struct mtk_vcodec_ctx *)inst->ctx;
+ struct mtk_vcodec_enc_ctx *ctx = (struct mtk_vcodec_enc_ctx *)inst->ctx;
if (!mtk_vcodec_wait_for_done_ctx(ctx, MTK_INST_IRQ_RECEIVED,
WAIT_INTR_TIMEOUT_MS, 0)) {
irq_status = ctx->irq_status;
- mtk_vcodec_debug(inst, "isr return %x", irq_status);
+ mtk_venc_debug(ctx, "isr return %x", irq_status);
}
return irq_status;
}
@@ -269,8 +258,7 @@ static int vp8_enc_compose_one_frame(struct venc_vp8_inst *inst,
}
if (bs_buf->size < bs_hdr_len + bs_frm_size + ac_tag_size) {
- mtk_vcodec_err(inst, "bitstream buf size is too small(%zu)",
- bs_buf->size);
+ mtk_venc_err(inst->ctx, "bitstream buf size is too small(%zu)", bs_buf->size);
return -EINVAL;
}
@@ -300,7 +288,7 @@ static int vp8_enc_encode_frame(struct venc_vp8_inst *inst,
int ret = 0;
unsigned int irq_status;
- mtk_vcodec_debug(inst, "->frm_cnt=%d", inst->frm_cnt);
+ mtk_venc_debug(inst->ctx, "->frm_cnt=%d", inst->frm_cnt);
ret = vpu_enc_encode(&inst->vpu_inst, 0, frm_buf, bs_buf, NULL);
if (ret)
@@ -308,23 +296,22 @@ static int vp8_enc_encode_frame(struct venc_vp8_inst *inst,
irq_status = vp8_enc_wait_venc_done(inst);
if (irq_status != MTK_VENC_IRQ_STATUS_FRM) {
- mtk_vcodec_err(inst, "irq_status=%d failed", irq_status);
+ mtk_venc_err(inst->ctx, "irq_status=%d failed", irq_status);
return -EIO;
}
if (vp8_enc_compose_one_frame(inst, bs_buf, bs_size)) {
- mtk_vcodec_err(inst, "vp8_enc_compose_one_frame failed");
+ mtk_venc_err(inst->ctx, "vp8_enc_compose_one_frame failed");
return -EINVAL;
}
inst->frm_cnt++;
- mtk_vcodec_debug(inst, "<-size=%d key_frm=%d", *bs_size,
- inst->vpu_inst.is_key_frm);
+ mtk_venc_debug(inst->ctx, "<-size=%d key_frm=%d", *bs_size, inst->vpu_inst.is_key_frm);
return ret;
}
-static int vp8_enc_init(struct mtk_vcodec_ctx *ctx)
+static int vp8_enc_init(struct mtk_vcodec_enc_ctx *ctx)
{
int ret = 0;
struct venc_vp8_inst *inst;
@@ -336,16 +323,12 @@ static int vp8_enc_init(struct mtk_vcodec_ctx *ctx)
inst->ctx = ctx;
inst->vpu_inst.ctx = ctx;
inst->vpu_inst.id = IPI_VENC_VP8;
- inst->hw_base = mtk_vcodec_get_reg_addr(inst->ctx, VENC_LT_SYS);
-
- mtk_vcodec_debug_enter(inst);
+ inst->hw_base = mtk_vcodec_get_reg_addr(inst->ctx->dev->reg_base, VENC_LT_SYS);
ret = vpu_enc_init(&inst->vpu_inst);
inst->vsi = (struct venc_vp8_vsi *)inst->vpu_inst.vsi;
- mtk_vcodec_debug_leave(inst);
-
if (ret)
kfree(inst);
else
@@ -362,9 +345,7 @@ static int vp8_enc_encode(void *handle,
{
int ret = 0;
struct venc_vp8_inst *inst = (struct venc_vp8_inst *)handle;
- struct mtk_vcodec_ctx *ctx = inst->ctx;
-
- mtk_vcodec_debug_enter(inst);
+ struct mtk_vcodec_enc_ctx *ctx = inst->ctx;
enable_irq(ctx->dev->enc_irq);
@@ -378,7 +359,7 @@ static int vp8_enc_encode(void *handle,
break;
default:
- mtk_vcodec_err(inst, "opt not support:%d", opt);
+ mtk_venc_err(ctx, "opt not support:%d", opt);
ret = -EINVAL;
break;
}
@@ -386,8 +367,6 @@ static int vp8_enc_encode(void *handle,
encode_err:
disable_irq(ctx->dev->enc_irq);
- mtk_vcodec_debug_leave(inst);
-
return ret;
}
@@ -398,7 +377,7 @@ static int vp8_enc_set_param(void *handle,
int ret = 0;
struct venc_vp8_inst *inst = (struct venc_vp8_inst *)handle;
- mtk_vcodec_debug(inst, "->type=%d", type);
+ mtk_venc_debug(inst->ctx, "->type=%d", type);
switch (type) {
case VENC_SET_PARAM_ENC:
@@ -429,7 +408,7 @@ static int vp8_enc_set_param(void *handle,
*/
case VENC_SET_PARAM_TS_MODE:
inst->ts_mode = 1;
- mtk_vcodec_debug(inst, "set ts_mode");
+ mtk_venc_debug(inst->ctx, "set ts_mode");
break;
default:
@@ -437,8 +416,6 @@ static int vp8_enc_set_param(void *handle,
break;
}
- mtk_vcodec_debug_leave(inst);
-
return ret;
}
@@ -447,16 +424,12 @@ static int vp8_enc_deinit(void *handle)
int ret = 0;
struct venc_vp8_inst *inst = (struct venc_vp8_inst *)handle;
- mtk_vcodec_debug_enter(inst);
-
ret = vpu_enc_deinit(&inst->vpu_inst);
if (inst->work_buf_allocated)
vp8_enc_free_work_buf(inst);
- mtk_vcodec_debug_leave(inst);
kfree(inst);
-
return ret;
}
diff --git a/drivers/media/platform/mediatek/vcodec/venc_drv_base.h b/drivers/media/platform/mediatek/vcodec/encoder/venc_drv_base.h
index 3d718411dc73..856d50151bf6 100644
--- a/drivers/media/platform/mediatek/vcodec/venc_drv_base.h
+++ b/drivers/media/platform/mediatek/vcodec/encoder/venc_drv_base.h
@@ -9,7 +9,7 @@
#ifndef _VENC_DRV_BASE_
#define _VENC_DRV_BASE_
-#include "mtk_vcodec_drv.h"
+#include "mtk_vcodec_enc_drv.h"
#include "venc_drv_if.h"
@@ -19,7 +19,7 @@ struct venc_common_if {
* @ctx: [in] mtk v4l2 context
* @handle: [out] driver handle
*/
- int (*init)(struct mtk_vcodec_ctx *ctx);
+ int (*init)(struct mtk_vcodec_enc_ctx *ctx);
/**
* (*encode)() - trigger encode
diff --git a/drivers/media/platform/mediatek/vcodec/venc_drv_if.c b/drivers/media/platform/mediatek/vcodec/encoder/venc_drv_if.c
index ce0bce811615..1bdaecdd64a7 100644
--- a/drivers/media/platform/mediatek/vcodec/venc_drv_if.c
+++ b/drivers/media/platform/mediatek/vcodec/encoder/venc_drv_if.c
@@ -16,7 +16,7 @@
#include "mtk_vcodec_enc.h"
#include "mtk_vcodec_enc_pm.h"
-int venc_if_init(struct mtk_vcodec_ctx *ctx, unsigned int fourcc)
+int venc_if_init(struct mtk_vcodec_enc_ctx *ctx, unsigned int fourcc)
{
int ret = 0;
@@ -40,8 +40,8 @@ int venc_if_init(struct mtk_vcodec_ctx *ctx, unsigned int fourcc)
return ret;
}
-int venc_if_set_param(struct mtk_vcodec_ctx *ctx,
- enum venc_set_param_type type, struct venc_enc_param *in)
+int venc_if_set_param(struct mtk_vcodec_enc_ctx *ctx,
+ enum venc_set_param_type type, struct venc_enc_param *in)
{
int ret = 0;
@@ -54,7 +54,7 @@ int venc_if_set_param(struct mtk_vcodec_ctx *ctx,
return ret;
}
-int venc_if_encode(struct mtk_vcodec_ctx *ctx,
+int venc_if_encode(struct mtk_vcodec_enc_ctx *ctx,
enum venc_start_opt opt, struct venc_frm_buf *frm_buf,
struct mtk_vcodec_mem *bs_buf,
struct venc_done_result *result)
@@ -81,7 +81,7 @@ int venc_if_encode(struct mtk_vcodec_ctx *ctx,
return ret;
}
-int venc_if_deinit(struct mtk_vcodec_ctx *ctx)
+int venc_if_deinit(struct mtk_vcodec_enc_ctx *ctx)
{
int ret = 0;
diff --git a/drivers/media/platform/mediatek/vcodec/venc_drv_if.h b/drivers/media/platform/mediatek/vcodec/encoder/venc_drv_if.h
index 0b04a1020873..d00fb68b8235 100644
--- a/drivers/media/platform/mediatek/vcodec/venc_drv_if.h
+++ b/drivers/media/platform/mediatek/vcodec/encoder/venc_drv_if.h
@@ -9,8 +9,7 @@
#ifndef _VENC_DRV_IF_H_
#define _VENC_DRV_IF_H_
-#include "mtk_vcodec_drv.h"
-#include "mtk_vcodec_util.h"
+#include "mtk_vcodec_enc_drv.h"
/*
* enum venc_yuv_fmt - The type of input yuv format
@@ -132,14 +131,14 @@ extern const struct venc_common_if venc_vp8_if;
* @fourcc: encoder input format
* Return: 0 if creating handle successfully, otherwise it is failed.
*/
-int venc_if_init(struct mtk_vcodec_ctx *ctx, unsigned int fourcc);
+int venc_if_init(struct mtk_vcodec_enc_ctx *ctx, unsigned int fourcc);
/*
* venc_if_deinit - Release the driver handle
* @ctx: device context
* Return: 0 if releasing handle successfully, otherwise it is failed.
*/
-int venc_if_deinit(struct mtk_vcodec_ctx *ctx);
+int venc_if_deinit(struct mtk_vcodec_enc_ctx *ctx);
/*
* venc_if_set_param - Set parameter to driver
@@ -148,7 +147,7 @@ int venc_if_deinit(struct mtk_vcodec_ctx *ctx);
* @in: input parameter
* Return: 0 if setting param successfully, otherwise it is failed.
*/
-int venc_if_set_param(struct mtk_vcodec_ctx *ctx,
+int venc_if_set_param(struct mtk_vcodec_enc_ctx *ctx,
enum venc_set_param_type type,
struct venc_enc_param *in);
@@ -161,7 +160,7 @@ int venc_if_set_param(struct mtk_vcodec_ctx *ctx,
* @result: encode result
* Return: 0 if encoding frame successfully, otherwise it is failed.
*/
-int venc_if_encode(struct mtk_vcodec_ctx *ctx,
+int venc_if_encode(struct mtk_vcodec_enc_ctx *ctx,
enum venc_start_opt opt,
struct venc_frm_buf *frm_buf,
struct mtk_vcodec_mem *bs_buf,
diff --git a/drivers/media/platform/mediatek/vcodec/venc_ipi_msg.h b/drivers/media/platform/mediatek/vcodec/encoder/venc_ipi_msg.h
index bb16d96a7f57..bb16d96a7f57 100644
--- a/drivers/media/platform/mediatek/vcodec/venc_ipi_msg.h
+++ b/drivers/media/platform/mediatek/vcodec/encoder/venc_ipi_msg.h
diff --git a/drivers/media/platform/mediatek/vcodec/venc_vpu_if.c b/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c
index 09e7eaa25aab..d299cc2962a5 100644
--- a/drivers/media/platform/mediatek/vcodec/venc_vpu_if.c
+++ b/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c
@@ -4,8 +4,7 @@
* Author: PoChun Lin <pochun.lin@mediatek.com>
*/
-#include "mtk_vcodec_drv.h"
-#include "mtk_vcodec_fw.h"
+#include "mtk_vcodec_enc_drv.h"
#include "venc_ipi_msg.h"
#include "venc_vpu_if.h"
@@ -22,14 +21,13 @@ static void handle_enc_init_msg(struct venc_vpu_inst *vpu, const void *data)
return;
/* Check firmware version. */
- mtk_vcodec_debug(vpu, "firmware version: 0x%x\n",
- msg->venc_abi_version);
+ mtk_venc_debug(vpu->ctx, "firmware version: 0x%x\n", msg->venc_abi_version);
switch (msg->venc_abi_version) {
case 1:
break;
default:
- mtk_vcodec_err(vpu, "unhandled firmware version 0x%x\n",
- msg->venc_abi_version);
+ mtk_venc_err(vpu->ctx, "unhandled firmware version 0x%x\n",
+ msg->venc_abi_version);
vpu->failure = 1;
break;
}
@@ -44,19 +42,47 @@ static void handle_enc_encode_msg(struct venc_vpu_inst *vpu, const void *data)
vpu->is_key_frm = msg->is_key_frm;
}
+static bool vpu_enc_check_ap_inst(struct mtk_vcodec_enc_dev *enc_dev, struct venc_vpu_inst *vpu)
+{
+ struct mtk_vcodec_enc_ctx *ctx;
+ int ret = false;
+
+ list_for_each_entry(ctx, &enc_dev->ctx_list, list) {
+ if (!IS_ERR_OR_NULL(ctx) && ctx->vpu_inst == vpu) {
+ ret = true;
+ break;
+ }
+ }
+
+ return ret;
+}
+
static void vpu_enc_ipi_handler(void *data, unsigned int len, void *priv)
{
+ struct mtk_vcodec_enc_dev *enc_dev;
const struct venc_vpu_ipi_msg_common *msg = data;
- struct venc_vpu_inst *vpu =
- (struct venc_vpu_inst *)(unsigned long)msg->venc_inst;
+ struct venc_vpu_inst *vpu;
- mtk_vcodec_debug(vpu, "msg_id %x inst %p status %d",
- msg->msg_id, vpu, msg->status);
+ enc_dev = (struct mtk_vcodec_enc_dev *)priv;
+ vpu = (struct venc_vpu_inst *)(unsigned long)msg->venc_inst;
+ if (!priv || !vpu) {
+ pr_err(MTK_DBG_V4L2_STR "venc_inst is NULL, did the SCP hang or crash?");
+ return;
+ }
+
+ mtk_venc_debug(vpu->ctx, "msg_id %x inst %p status %d", msg->msg_id, vpu, msg->status);
+ if (!vpu_enc_check_ap_inst(enc_dev, vpu) || msg->msg_id < VPU_IPIMSG_ENC_INIT_DONE ||
+ msg->msg_id > VPU_IPIMSG_ENC_DEINIT_DONE) {
+ mtk_v4l2_venc_err(vpu->ctx, "venc msg id not correctly => 0x%x", msg->msg_id);
+ vpu->failure = -EINVAL;
+ goto error;
+ }
- vpu->signaled = 1;
vpu->failure = (msg->status != VENC_IPI_MSG_STATUS_OK);
- if (vpu->failure)
- goto failure;
+ if (vpu->failure) {
+ mtk_venc_err(vpu->ctx, "vpu enc status failure %d", vpu->failure);
+ goto error;
+ }
switch (msg->msg_id) {
case VPU_IPIMSG_ENC_INIT_DONE:
@@ -70,12 +96,12 @@ static void vpu_enc_ipi_handler(void *data, unsigned int len, void *priv)
case VPU_IPIMSG_ENC_DEINIT_DONE:
break;
default:
- mtk_vcodec_err(vpu, "unknown msg id %x", msg->msg_id);
+ mtk_venc_err(vpu->ctx, "unknown msg id %x", msg->msg_id);
break;
}
-failure:
- mtk_vcodec_debug_leave(vpu);
+error:
+ vpu->signaled = 1;
}
static int vpu_enc_send_msg(struct venc_vpu_inst *vpu, void *msg,
@@ -83,25 +109,21 @@ static int vpu_enc_send_msg(struct venc_vpu_inst *vpu, void *msg,
{
int status;
- mtk_vcodec_debug_enter(vpu);
-
if (!vpu->ctx->dev->fw_handler) {
- mtk_vcodec_err(vpu, "inst dev is NULL");
+ mtk_venc_err(vpu->ctx, "inst dev is NULL");
return -EINVAL;
}
status = mtk_vcodec_fw_ipi_send(vpu->ctx->dev->fw_handler, vpu->id, msg,
len, 2000);
if (status) {
- mtk_vcodec_err(vpu, "vpu_ipi_send msg_id %x len %d fail %d",
- *(uint32_t *)msg, len, status);
+ mtk_venc_err(vpu->ctx, "vpu_ipi_send msg_id %x len %d fail %d",
+ *(uint32_t *)msg, len, status);
return -EINVAL;
}
if (vpu->failure)
return -EINVAL;
- mtk_vcodec_debug_leave(vpu);
-
return 0;
}
@@ -110,17 +132,16 @@ int vpu_enc_init(struct venc_vpu_inst *vpu)
int status;
struct venc_ap_ipi_msg_init out;
- mtk_vcodec_debug_enter(vpu);
-
init_waitqueue_head(&vpu->wq_hd);
vpu->signaled = 0;
vpu->failure = 0;
+ vpu->ctx->vpu_inst = vpu;
status = mtk_vcodec_fw_ipi_register(vpu->ctx->dev->fw_handler, vpu->id,
vpu_enc_ipi_handler, "venc", NULL);
if (status) {
- mtk_vcodec_err(vpu, "vpu_ipi_register fail %d", status);
+ mtk_venc_err(vpu->ctx, "vpu_ipi_register fail %d", status);
return -EINVAL;
}
@@ -128,12 +149,10 @@ int vpu_enc_init(struct venc_vpu_inst *vpu)
out.msg_id = AP_IPIMSG_ENC_INIT;
out.venc_inst = (unsigned long)vpu;
if (vpu_enc_send_msg(vpu, &out, sizeof(out))) {
- mtk_vcodec_err(vpu, "AP_IPIMSG_ENC_INIT fail");
+ mtk_venc_err(vpu->ctx, "AP_IPIMSG_ENC_INIT fail");
return -EINVAL;
}
- mtk_vcodec_debug_leave(vpu);
-
return 0;
}
@@ -166,7 +185,7 @@ int vpu_enc_set_param(struct venc_vpu_inst *vpu,
sizeof(struct venc_ap_ipi_msg_set_param);
struct venc_ap_ipi_msg_set_param_ext out;
- mtk_vcodec_debug(vpu, "id %d ->", id);
+ mtk_venc_debug(vpu->ctx, "id %d ->", id);
memset(&out, 0, sizeof(out));
out.base.msg_id = AP_IPIMSG_ENC_SET_PARAM;
@@ -208,16 +227,15 @@ int vpu_enc_set_param(struct venc_vpu_inst *vpu,
out.base.data_item = 0;
break;
default:
- mtk_vcodec_err(vpu, "id %d not supported", id);
+ mtk_venc_err(vpu->ctx, "id %d not supported", id);
return -EINVAL;
}
if (vpu_enc_send_msg(vpu, &out, msg_size)) {
- mtk_vcodec_err(vpu,
- "AP_IPIMSG_ENC_SET_PARAM %d fail", id);
+ mtk_venc_err(vpu->ctx, "AP_IPIMSG_ENC_SET_PARAM %d fail", id);
return -EINVAL;
}
- mtk_vcodec_debug(vpu, "id %d <-", id);
+ mtk_venc_debug(vpu->ctx, "id %d <-", id);
return 0;
}
@@ -234,7 +252,7 @@ static int vpu_enc_encode_32bits(struct venc_vpu_inst *vpu,
sizeof(struct venc_ap_ipi_msg_enc);
struct venc_ap_ipi_msg_enc_ext out;
- mtk_vcodec_debug(vpu, "bs_mode %d ->", bs_mode);
+ mtk_venc_debug(vpu->ctx, "bs_mode %d ->", bs_mode);
memset(&out, 0, sizeof(out));
out.base.msg_id = AP_IPIMSG_ENC_ENCODE;
@@ -248,7 +266,7 @@ static int vpu_enc_encode_32bits(struct venc_vpu_inst *vpu,
out.base.input_addr[1] = frm_buf->fb_addr[1].dma_addr;
out.base.input_addr[2] = frm_buf->fb_addr[2].dma_addr;
} else {
- mtk_vcodec_err(vpu, "dma_addr not align to 16");
+ mtk_venc_err(vpu->ctx, "dma_addr not align to 16");
return -EINVAL;
}
}
@@ -263,8 +281,7 @@ static int vpu_enc_encode_32bits(struct venc_vpu_inst *vpu,
out.data[2] = frame_info->frm_type;
}
if (vpu_enc_send_msg(vpu, &out, msg_size)) {
- mtk_vcodec_err(vpu, "AP_IPIMSG_ENC_ENCODE %d fail",
- bs_mode);
+ mtk_venc_err(vpu->ctx, "AP_IPIMSG_ENC_ENCODE %d fail", bs_mode);
return -EINVAL;
}
@@ -280,7 +297,7 @@ static int vpu_enc_encode_34bits(struct venc_vpu_inst *vpu,
struct venc_ap_ipi_msg_enc_ext_34 out;
size_t msg_size = sizeof(struct venc_ap_ipi_msg_enc_ext_34);
- mtk_vcodec_debug(vpu, "bs_mode %d ->", bs_mode);
+ mtk_venc_debug(vpu->ctx, "bs_mode %d ->", bs_mode);
memset(&out, 0, sizeof(out));
out.msg_id = AP_IPIMSG_ENC_ENCODE;
@@ -295,7 +312,7 @@ static int vpu_enc_encode_34bits(struct venc_vpu_inst *vpu,
out.input_addr[1] = frm_buf->fb_addr[1].dma_addr;
out.input_addr[2] = frm_buf->fb_addr[2].dma_addr;
} else {
- mtk_vcodec_err(vpu, "dma_addr not align to 16");
+ mtk_venc_err(vpu->ctx, "dma_addr not align to 16");
return -EINVAL;
}
}
@@ -310,8 +327,7 @@ static int vpu_enc_encode_34bits(struct venc_vpu_inst *vpu,
out.data[2] = frame_info->frm_type;
}
if (vpu_enc_send_msg(vpu, &out, msg_size)) {
- mtk_vcodec_err(vpu, "AP_IPIMSG_ENC_ENCODE %d fail",
- bs_mode);
+ mtk_venc_err(vpu->ctx, "AP_IPIMSG_ENC_ENCODE %d fail", bs_mode);
return -EINVAL;
}
@@ -335,8 +351,8 @@ int vpu_enc_encode(struct venc_vpu_inst *vpu, unsigned int bs_mode,
if (ret)
return ret;
- mtk_vcodec_debug(vpu, "bs_mode %d state %d size %d key_frm %d <-",
- bs_mode, vpu->state, vpu->bs_size, vpu->is_key_frm);
+ mtk_venc_debug(vpu->ctx, "bs_mode %d state %d size %d key_frm %d <-",
+ bs_mode, vpu->state, vpu->bs_size, vpu->is_key_frm);
return 0;
}
@@ -345,17 +361,13 @@ int vpu_enc_deinit(struct venc_vpu_inst *vpu)
{
struct venc_ap_ipi_msg_deinit out;
- mtk_vcodec_debug_enter(vpu);
-
memset(&out, 0, sizeof(out));
out.msg_id = AP_IPIMSG_ENC_DEINIT;
out.vpu_inst_addr = vpu->inst_addr;
if (vpu_enc_send_msg(vpu, &out, sizeof(out))) {
- mtk_vcodec_err(vpu, "AP_IPIMSG_ENC_DEINIT fail");
+ mtk_venc_err(vpu->ctx, "AP_IPIMSG_ENC_DEINIT fail");
return -EINVAL;
}
- mtk_vcodec_debug_leave(vpu);
-
return 0;
}
diff --git a/drivers/media/platform/mediatek/vcodec/venc_vpu_if.h b/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.h
index f83bc1b3f2bf..ede55fc3bd07 100644
--- a/drivers/media/platform/mediatek/vcodec/venc_vpu_if.h
+++ b/drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.h
@@ -7,7 +7,6 @@
#ifndef _VENC_VPU_IF_H_
#define _VENC_VPU_IF_H_
-#include "mtk_vcodec_fw.h"
#include "venc_drv_if.h"
/*
@@ -35,7 +34,7 @@ struct venc_vpu_inst {
unsigned int inst_addr;
void *vsi;
int id;
- struct mtk_vcodec_ctx *ctx;
+ struct mtk_vcodec_enc_ctx *ctx;
};
int vpu_enc_init(struct venc_vpu_inst *vpu);
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_drv.h b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_drv.h
deleted file mode 100644
index f17d67e781c9..000000000000
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_drv.h
+++ /dev/null
@@ -1,548 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
-* Copyright (c) 2016 MediaTek Inc.
-* Author: PC Chen <pc.chen@mediatek.com>
-* Tiffany Lin <tiffany.lin@mediatek.com>
-*/
-
-#ifndef _MTK_VCODEC_DRV_H_
-#define _MTK_VCODEC_DRV_H_
-
-#include <linux/platform_device.h>
-#include <linux/videodev2.h>
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-ioctl.h>
-#include <media/v4l2-mem2mem.h>
-#include <media/videobuf2-core.h>
-
-#include "mtk_vcodec_dbgfs.h"
-#include "mtk_vcodec_util.h"
-#include "vdec_msg_queue.h"
-
-#define MTK_VCODEC_DEC_NAME "mtk-vcodec-dec"
-#define MTK_VCODEC_ENC_NAME "mtk-vcodec-enc"
-
-#define MTK_VCODEC_MAX_PLANES 3
-#define MTK_V4L2_BENCHMARK 0
-#define WAIT_INTR_TIMEOUT_MS 1000
-#define IS_VDEC_LAT_ARCH(hw_arch) ((hw_arch) >= MTK_VDEC_LAT_SINGLE_CORE)
-#define IS_VDEC_INNER_RACING(capability) ((capability) & MTK_VCODEC_INNER_RACING)
-
-/*
- * enum mtk_hw_reg_idx - MTK hw register base index
- */
-enum mtk_hw_reg_idx {
- VDEC_SYS,
- VDEC_MISC,
- VDEC_LD,
- VDEC_TOP,
- VDEC_CM,
- VDEC_AD,
- VDEC_AV,
- VDEC_PP,
- VDEC_HWD,
- VDEC_HWQ,
- VDEC_HWB,
- VDEC_HWG,
- NUM_MAX_VDEC_REG_BASE,
- /* h264 encoder */
- VENC_SYS = NUM_MAX_VDEC_REG_BASE,
- /* vp8 encoder */
- VENC_LT_SYS,
- NUM_MAX_VCODEC_REG_BASE
-};
-
-/*
- * enum mtk_instance_type - The type of an MTK Vcodec instance.
- */
-enum mtk_instance_type {
- MTK_INST_DECODER = 0,
- MTK_INST_ENCODER = 1,
-};
-
-/**
- * enum mtk_instance_state - The state of an MTK Vcodec instance.
- * @MTK_STATE_FREE: default state when instance is created
- * @MTK_STATE_INIT: vcodec instance is initialized
- * @MTK_STATE_HEADER: vdec had sps/pps header parsed or venc
- * had sps/pps header encoded
- * @MTK_STATE_FLUSH: vdec is flushing. Only used by decoder
- * @MTK_STATE_ABORT: vcodec should be aborted
- */
-enum mtk_instance_state {
- MTK_STATE_FREE = 0,
- MTK_STATE_INIT = 1,
- MTK_STATE_HEADER = 2,
- MTK_STATE_FLUSH = 3,
- MTK_STATE_ABORT = 4,
-};
-
-/*
- * enum mtk_encode_param - General encoding parameters type
- */
-enum mtk_encode_param {
- MTK_ENCODE_PARAM_NONE = 0,
- MTK_ENCODE_PARAM_BITRATE = (1 << 0),
- MTK_ENCODE_PARAM_FRAMERATE = (1 << 1),
- MTK_ENCODE_PARAM_INTRA_PERIOD = (1 << 2),
- MTK_ENCODE_PARAM_FORCE_INTRA = (1 << 3),
- MTK_ENCODE_PARAM_GOP_SIZE = (1 << 4),
-};
-
-enum mtk_fmt_type {
- MTK_FMT_DEC = 0,
- MTK_FMT_ENC = 1,
- MTK_FMT_FRAME = 2,
-};
-
-/*
- * enum mtk_vdec_hw_id - Hardware index used to separate
- * different hardware
- */
-enum mtk_vdec_hw_id {
- MTK_VDEC_CORE,
- MTK_VDEC_LAT0,
- MTK_VDEC_LAT1,
- MTK_VDEC_LAT_SOC,
- MTK_VDEC_HW_MAX,
-};
-
-/*
- * enum mtk_vdec_hw_count - Supported hardware count
- */
-enum mtk_vdec_hw_count {
- MTK_VDEC_NO_HW = 0,
- MTK_VDEC_ONE_CORE,
- MTK_VDEC_ONE_LAT_ONE_CORE,
- MTK_VDEC_MAX_HW_COUNT,
-};
-
-/*
- * struct mtk_video_fmt - Structure used to store information about pixelformats
- */
-struct mtk_video_fmt {
- u32 fourcc;
- enum mtk_fmt_type type;
- u32 num_planes;
- u32 flags;
- struct v4l2_frmsize_stepwise frmsize;
-};
-
-/*
- * enum mtk_q_type - Type of queue
- */
-enum mtk_q_type {
- MTK_Q_DATA_SRC = 0,
- MTK_Q_DATA_DST = 1,
-};
-
-/*
- * struct mtk_q_data - Structure used to store information about queue
- */
-struct mtk_q_data {
- unsigned int visible_width;
- unsigned int visible_height;
- unsigned int coded_width;
- unsigned int coded_height;
- enum v4l2_field field;
- unsigned int bytesperline[MTK_VCODEC_MAX_PLANES];
- unsigned int sizeimage[MTK_VCODEC_MAX_PLANES];
- const struct mtk_video_fmt *fmt;
-};
-
-/**
- * struct mtk_enc_params - General encoding parameters
- * @bitrate: target bitrate in bits per second
- * @num_b_frame: number of b frames between p-frame
- * @rc_frame: frame based rate control
- * @rc_mb: macroblock based rate control
- * @seq_hdr_mode: H.264 sequence header is encoded separately or joined
- * with the first frame
- * @intra_period: I frame period
- * @gop_size: group of picture size, it's used as the intra frame period
- * @framerate_num: frame rate numerator. ex: framerate_num=30 and
- * framerate_denom=1 means FPS is 30
- * @framerate_denom: frame rate denominator. ex: framerate_num=30 and
- * framerate_denom=1 means FPS is 30
- * @h264_max_qp: Max value for H.264 quantization parameter
- * @h264_profile: V4L2 defined H.264 profile
- * @h264_level: V4L2 defined H.264 level
- * @force_intra: force/insert intra frame
- */
-struct mtk_enc_params {
- unsigned int bitrate;
- unsigned int num_b_frame;
- unsigned int rc_frame;
- unsigned int rc_mb;
- unsigned int seq_hdr_mode;
- unsigned int intra_period;
- unsigned int gop_size;
- unsigned int framerate_num;
- unsigned int framerate_denom;
- unsigned int h264_max_qp;
- unsigned int h264_profile;
- unsigned int h264_level;
- unsigned int force_intra;
-};
-
-/*
- * struct mtk_vcodec_clk_info - Structure used to store clock name
- */
-struct mtk_vcodec_clk_info {
- const char *clk_name;
- struct clk *vcodec_clk;
-};
-
-/*
- * struct mtk_vcodec_clk - Structure used to store vcodec clock information
- */
-struct mtk_vcodec_clk {
- struct mtk_vcodec_clk_info *clk_info;
- int clk_num;
-};
-
-/*
- * struct mtk_vcodec_pm - Power management data structure
- */
-struct mtk_vcodec_pm {
- struct mtk_vcodec_clk vdec_clk;
- struct mtk_vcodec_clk venc_clk;
- struct device *dev;
-};
-
-/**
- * struct vdec_pic_info - picture size information
- * @pic_w: picture width
- * @pic_h: picture height
- * @buf_w: picture buffer width (64 aligned up from pic_w)
- * @buf_h: picture buffer heiht (64 aligned up from pic_h)
- * @fb_sz: bitstream size of each plane
- * E.g. suppose picture size is 176x144,
- * buffer size will be aligned to 176x160.
- * @cap_fourcc: fourcc number(may changed when resolution change)
- * @reserved: align struct to 64-bit in order to adjust 32-bit and 64-bit os.
- */
-struct vdec_pic_info {
- unsigned int pic_w;
- unsigned int pic_h;
- unsigned int buf_w;
- unsigned int buf_h;
- unsigned int fb_sz[VIDEO_MAX_PLANES];
- unsigned int cap_fourcc;
- unsigned int reserved;
-};
-
-/**
- * struct mtk_vcodec_ctx - Context (instance) private data.
- *
- * @type: type of the instance - decoder or encoder
- * @dev: pointer to the mtk_vcodec_dev of the device
- * @list: link to ctx_list of mtk_vcodec_dev
- * @fh: struct v4l2_fh
- * @m2m_ctx: pointer to the v4l2_m2m_ctx of the context
- * @q_data: store information of input and output queue
- * of the context
- * @id: index of the context that this structure describes
- * @state: state of the context
- * @param_change: indicate encode parameter type
- * @enc_params: encoding parameters
- * @dec_if: hooked decoder driver interface
- * @enc_if: hooked encoder driver interface
- * @drv_handle: driver handle for specific decode/encode instance
- *
- * @picinfo: store picture info after header parsing
- * @dpb_size: store dpb count after header parsing
- * @int_cond: variable used by the waitqueue
- * @int_type: type of the last interrupt
- * @queue: waitqueue that can be used to wait for this context to
- * finish
- * @irq_status: irq status
- *
- * @ctrl_hdl: handler for v4l2 framework
- * @decode_work: worker for the decoding
- * @encode_work: worker for the encoding
- * @last_decoded_picinfo: pic information get from latest decode
- * @empty_flush_buf: a fake size-0 capture buffer that indicates flush. Only
- * to be used with encoder and stateful decoder.
- * @is_flushing: set to true if flushing is in progress.
- * @current_codec: current set input codec, in V4L2 pixel format
- * @capture_fourcc: capture queue type in V4L2 pixel format
- *
- * @colorspace: enum v4l2_colorspace; supplemental to pixelformat
- * @ycbcr_enc: enum v4l2_ycbcr_encoding, Y'CbCr encoding
- * @quantization: enum v4l2_quantization, colorspace quantization
- * @xfer_func: enum v4l2_xfer_func, colorspace transfer function
- * @decoded_frame_cnt: number of decoded frames
- * @lock: protect variables accessed by V4L2 threads and worker thread such as
- * mtk_video_dec_buf.
- * @hw_id: hardware index used to identify different hardware.
- *
- * @msg_queue: msg queue used to store lat buffer information.
- * @q_mutex: vb2_queue mutex.
- */
-struct mtk_vcodec_ctx {
- enum mtk_instance_type type;
- struct mtk_vcodec_dev *dev;
- struct list_head list;
-
- struct v4l2_fh fh;
- struct v4l2_m2m_ctx *m2m_ctx;
- struct mtk_q_data q_data[2];
- int id;
- enum mtk_instance_state state;
- enum mtk_encode_param param_change;
- struct mtk_enc_params enc_params;
-
- const struct vdec_common_if *dec_if;
- const struct venc_common_if *enc_if;
- void *drv_handle;
-
- struct vdec_pic_info picinfo;
- int dpb_size;
-
- int int_cond[MTK_VDEC_HW_MAX];
- int int_type[MTK_VDEC_HW_MAX];
- wait_queue_head_t queue[MTK_VDEC_HW_MAX];
- unsigned int irq_status;
-
- struct v4l2_ctrl_handler ctrl_hdl;
- struct work_struct decode_work;
- struct work_struct encode_work;
- struct vdec_pic_info last_decoded_picinfo;
- struct v4l2_m2m_buffer empty_flush_buf;
- bool is_flushing;
-
- u32 current_codec;
- u32 capture_fourcc;
-
- enum v4l2_colorspace colorspace;
- enum v4l2_ycbcr_encoding ycbcr_enc;
- enum v4l2_quantization quantization;
- enum v4l2_xfer_func xfer_func;
-
- int decoded_frame_cnt;
- struct mutex lock;
- int hw_id;
-
- struct vdec_msg_queue msg_queue;
-
- struct mutex q_mutex;
-};
-
-/*
- * enum mtk_vdec_hw_arch - Used to separate different hardware architecture
- */
-enum mtk_vdec_hw_arch {
- MTK_VDEC_PURE_SINGLE_CORE,
- MTK_VDEC_LAT_SINGLE_CORE,
-};
-
-/*
- * struct mtk_vdec_format_types - Structure used to get supported
- * format types according to decoder capability
- */
-enum mtk_vdec_format_types {
- MTK_VDEC_FORMAT_MM21 = 0x20,
- MTK_VDEC_FORMAT_MT21C = 0x40,
- MTK_VDEC_FORMAT_H264_SLICE = 0x100,
- MTK_VDEC_FORMAT_VP8_FRAME = 0x200,
- MTK_VDEC_FORMAT_VP9_FRAME = 0x400,
- MTK_VDEC_FORMAT_AV1_FRAME = 0x800,
- MTK_VDEC_FORMAT_HEVC_FRAME = 0x1000,
- MTK_VCODEC_INNER_RACING = 0x20000,
-};
-
-/**
- * struct mtk_vcodec_dec_pdata - compatible data for each IC
- * @init_vdec_params: init vdec params
- * @ctrls_setup: init vcodec dec ctrls
- * @worker: worker to start a decode job
- * @flush_decoder: function that flushes the decoder
- * @get_cap_buffer: get capture buffer from capture queue
- * @cap_to_disp: put capture buffer to disp list for lat and core arch
- * @vdec_vb2_ops: struct vb2_ops
- *
- * @vdec_formats: supported video decoder formats
- * @num_formats: count of video decoder formats
- * @default_out_fmt: default output buffer format
- * @default_cap_fmt: default capture buffer format
- *
- * @hw_arch: hardware arch is used to separate pure_sin_core and lat_sin_core
- *
- * @is_subdev_supported: whether support parent-node architecture(subdev)
- * @uses_stateless_api: whether the decoder uses the stateless API with requests
- */
-
-struct mtk_vcodec_dec_pdata {
- void (*init_vdec_params)(struct mtk_vcodec_ctx *ctx);
- int (*ctrls_setup)(struct mtk_vcodec_ctx *ctx);
- void (*worker)(struct work_struct *work);
- int (*flush_decoder)(struct mtk_vcodec_ctx *ctx);
- struct vdec_fb *(*get_cap_buffer)(struct mtk_vcodec_ctx *ctx);
- void (*cap_to_disp)(struct mtk_vcodec_ctx *ctx, int error,
- struct media_request *src_buf_req);
-
- struct vb2_ops *vdec_vb2_ops;
-
- const struct mtk_video_fmt *vdec_formats;
- const int *num_formats;
- const struct mtk_video_fmt *default_out_fmt;
- const struct mtk_video_fmt *default_cap_fmt;
-
- enum mtk_vdec_hw_arch hw_arch;
-
- bool is_subdev_supported;
- bool uses_stateless_api;
-};
-
-/**
- * struct mtk_vcodec_enc_pdata - compatible data for each IC
- *
- * @uses_ext: whether the encoder uses the extended firmware messaging format
- * @min_bitrate: minimum supported encoding bitrate
- * @max_bitrate: maximum supported encoding bitrate
- * @capture_formats: array of supported capture formats
- * @num_capture_formats: number of entries in capture_formats
- * @output_formats: array of supported output formats
- * @num_output_formats: number of entries in output_formats
- * @core_id: stand for h264 or vp8 encode index
- * @uses_34bit: whether the encoder uses 34-bit iova
- */
-struct mtk_vcodec_enc_pdata {
- bool uses_ext;
- unsigned long min_bitrate;
- unsigned long max_bitrate;
- const struct mtk_video_fmt *capture_formats;
- size_t num_capture_formats;
- const struct mtk_video_fmt *output_formats;
- size_t num_output_formats;
- int core_id;
- bool uses_34bit;
-};
-
-#define MTK_ENC_CTX_IS_EXT(ctx) ((ctx)->dev->venc_pdata->uses_ext)
-#define MTK_ENC_IOVA_IS_34BIT(ctx) ((ctx)->dev->venc_pdata->uses_34bit)
-
-/**
- * struct mtk_vcodec_dev - driver data
- * @v4l2_dev: V4L2 device to register video devices for.
- * @vfd_dec: Video device for decoder
- * @mdev_dec: Media device for decoder
- * @vfd_enc: Video device for encoder.
- *
- * @m2m_dev_dec: m2m device for decoder
- * @m2m_dev_enc: m2m device for encoder.
- * @plat_dev: platform device
- * @ctx_list: list of struct mtk_vcodec_ctx
- * @irqlock: protect data access by irq handler and work thread
- * @curr_ctx: The context that is waiting for codec hardware
- *
- * @reg_base: Mapped address of MTK Vcodec registers.
- * @vdec_pdata: decoder IC-specific data
- * @venc_pdata: encoder IC-specific data
- *
- * @fw_handler: used to communicate with the firmware.
- * @id_counter: used to identify current opened instance
- *
- * @decode_workqueue: decode work queue
- * @encode_workqueue: encode work queue
- *
- * @int_cond: used to identify interrupt condition happen
- * @int_type: used to identify what kind of interrupt condition happen
- * @dev_mutex: video_device lock
- * @queue: waitqueue for waiting for completion of device commands
- *
- * @dec_irq: decoder irq resource
- * @enc_irq: h264 encoder irq resource
- *
- * @dec_mutex: decoder hardware lock
- * @enc_mutex: encoder hardware lock.
- *
- * @pm: power management control
- * @dec_capability: used to identify decode capability, ex: 4k
- * @enc_capability: used to identify encode capability
- *
- * @core_workqueue: queue used for core hardware decode
- *
- * @subdev_dev: subdev hardware device
- * @subdev_prob_done: check whether all used hw device is prob done
- * @subdev_bitmap: used to record hardware is ready or not
- *
- * @dec_active_cnt: used to mark whether need to record register value
- * @vdec_racing_info: record register value
- * @dec_racing_info_mutex: mutex lock used for inner racing mode
- * @dbgfs: debug log related information
- */
-struct mtk_vcodec_dev {
- struct v4l2_device v4l2_dev;
- struct video_device *vfd_dec;
- struct media_device mdev_dec;
- struct video_device *vfd_enc;
-
- struct v4l2_m2m_dev *m2m_dev_dec;
- struct v4l2_m2m_dev *m2m_dev_enc;
- struct platform_device *plat_dev;
- struct list_head ctx_list;
- spinlock_t irqlock;
- struct mtk_vcodec_ctx *curr_ctx;
- void __iomem *reg_base[NUM_MAX_VCODEC_REG_BASE];
- const struct mtk_vcodec_dec_pdata *vdec_pdata;
- const struct mtk_vcodec_enc_pdata *venc_pdata;
-
- struct mtk_vcodec_fw *fw_handler;
-
- unsigned long id_counter;
-
- struct workqueue_struct *decode_workqueue;
- struct workqueue_struct *encode_workqueue;
- int int_cond;
- int int_type;
- struct mutex dev_mutex;
- wait_queue_head_t queue;
-
- int dec_irq;
- int enc_irq;
-
- /* decoder hardware mutex lock */
- struct mutex dec_mutex[MTK_VDEC_HW_MAX];
- struct mutex enc_mutex;
-
- struct mtk_vcodec_pm pm;
- unsigned int dec_capability;
- unsigned int enc_capability;
-
- struct workqueue_struct *core_workqueue;
-
- void *subdev_dev[MTK_VDEC_HW_MAX];
- int (*subdev_prob_done)(struct mtk_vcodec_dev *vdec_dev);
- DECLARE_BITMAP(subdev_bitmap, MTK_VDEC_HW_MAX);
-
- atomic_t dec_active_cnt;
- u32 vdec_racing_info[132];
- /* Protects access to vdec_racing_info data */
- struct mutex dec_racing_info_mutex;
-
- struct mtk_vcodec_dbgfs dbgfs;
-};
-
-static inline struct mtk_vcodec_ctx *fh_to_ctx(struct v4l2_fh *fh)
-{
- return container_of(fh, struct mtk_vcodec_ctx, fh);
-}
-
-static inline struct mtk_vcodec_ctx *ctrl_to_ctx(struct v4l2_ctrl *ctrl)
-{
- return container_of(ctrl->handler, struct mtk_vcodec_ctx, ctrl_hdl);
-}
-
-/* Wake up context wait_queue */
-static inline void
-wake_up_ctx(struct mtk_vcodec_ctx *ctx, unsigned int reason, unsigned int hw_id)
-{
- ctx->int_cond[hw_id] = 1;
- ctx->int_type[hw_id] = reason;
- wake_up_interruptible(&ctx->queue[hw_id]);
-}
-
-#endif /* _MTK_VCODEC_DRV_H_ */
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_intr.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_intr.c
deleted file mode 100644
index 552b4c93d972..000000000000
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_intr.c
+++ /dev/null
@@ -1,43 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
-* Copyright (c) 2016 MediaTek Inc.
-* Author: Tiffany Lin <tiffany.lin@mediatek.com>
-*/
-
-#include <linux/errno.h>
-#include <linux/wait.h>
-
-#include "mtk_vcodec_drv.h"
-#include "mtk_vcodec_intr.h"
-#include "mtk_vcodec_util.h"
-
-int mtk_vcodec_wait_for_done_ctx(struct mtk_vcodec_ctx *ctx,
- int command, unsigned int timeout_ms,
- unsigned int hw_id)
-{
- long timeout_jiff, ret;
- int status = 0;
-
- timeout_jiff = msecs_to_jiffies(timeout_ms);
- ret = wait_event_interruptible_timeout(ctx->queue[hw_id],
- ctx->int_cond[hw_id],
- timeout_jiff);
-
- if (!ret) {
- status = -1; /* timeout */
- mtk_v4l2_err("[%d] cmd=%d, type=%d, dec timeout=%ums (%d %d)",
- ctx->id, command, ctx->type, timeout_ms,
- ctx->int_cond[hw_id], ctx->int_type[hw_id]);
- } else if (-ERESTARTSYS == ret) {
- status = -1;
- mtk_v4l2_err("[%d] cmd=%d, type=%d, dec inter fail (%d %d)",
- ctx->id, command, ctx->type,
- ctx->int_cond[hw_id], ctx->int_type[hw_id]);
- }
-
- ctx->int_cond[hw_id] = 0;
- ctx->int_type[hw_id] = 0;
-
- return status;
-}
-EXPORT_SYMBOL(mtk_vcodec_wait_for_done_ctx);
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_util.h b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_util.h
deleted file mode 100644
index 88d389b65f13..000000000000
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_util.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
-* Copyright (c) 2016 MediaTek Inc.
-* Author: PC Chen <pc.chen@mediatek.com>
-* Tiffany Lin <tiffany.lin@mediatek.com>
-*/
-
-#ifndef _MTK_VCODEC_UTIL_H_
-#define _MTK_VCODEC_UTIL_H_
-
-#include <linux/types.h>
-#include <linux/dma-direction.h>
-
-struct mtk_vcodec_mem {
- size_t size;
- void *va;
- dma_addr_t dma_addr;
-};
-
-struct mtk_vcodec_fb {
- size_t size;
- dma_addr_t dma_addr;
-};
-
-struct mtk_vcodec_ctx;
-struct mtk_vcodec_dev;
-
-#undef pr_fmt
-#define pr_fmt(fmt) "%s(),%d: " fmt, __func__, __LINE__
-
-#define mtk_v4l2_err(fmt, args...) \
- pr_err("[MTK_V4L2][ERROR] " fmt "\n", ##args)
-
-#define mtk_vcodec_err(h, fmt, args...) \
- pr_err("[MTK_VCODEC][ERROR][%d]: " fmt "\n", \
- ((struct mtk_vcodec_ctx *)(h)->ctx)->id, ##args)
-
-#if defined(CONFIG_DEBUG_FS)
-extern int mtk_v4l2_dbg_level;
-extern int mtk_vcodec_dbg;
-
-#define mtk_v4l2_debug(level, fmt, args...) \
- do { \
- if (mtk_v4l2_dbg_level >= (level)) \
- pr_debug("[MTK_V4L2] %s, %d: " fmt "\n", \
- __func__, __LINE__, ##args); \
- } while (0)
-
-#define mtk_vcodec_debug(h, fmt, args...) \
- do { \
- if (mtk_vcodec_dbg) \
- dev_dbg(&(((struct mtk_vcodec_ctx *)(h)->ctx)->dev->plat_dev->dev), \
- "[MTK_VCODEC][%d]: %s, %d " fmt "\n", \
- ((struct mtk_vcodec_ctx *)(h)->ctx)->id, \
- __func__, __LINE__, ##args); \
- } while (0)
-#else
-#define mtk_v4l2_debug(level, fmt, args...) pr_debug(fmt, ##args)
-
-#define mtk_vcodec_debug(h, fmt, args...) \
- pr_debug("[MTK_VCODEC][%d]: " fmt "\n", \
- ((struct mtk_vcodec_ctx *)(h)->ctx)->id, ##args)
-#endif
-
-#define mtk_v4l2_debug_enter() mtk_v4l2_debug(3, "+")
-#define mtk_v4l2_debug_leave() mtk_v4l2_debug(3, "-")
-
-#define mtk_vcodec_debug_enter(h) mtk_vcodec_debug(h, "+")
-#define mtk_vcodec_debug_leave(h) mtk_vcodec_debug(h, "-")
-
-void __iomem *mtk_vcodec_get_reg_addr(struct mtk_vcodec_ctx *data,
- unsigned int reg_idx);
-int mtk_vcodec_mem_alloc(struct mtk_vcodec_ctx *data,
- struct mtk_vcodec_mem *mem);
-void mtk_vcodec_mem_free(struct mtk_vcodec_ctx *data,
- struct mtk_vcodec_mem *mem);
-void mtk_vcodec_set_curr_ctx(struct mtk_vcodec_dev *vdec_dev,
- struct mtk_vcodec_ctx *ctx, int hw_idx);
-struct mtk_vcodec_ctx *mtk_vcodec_get_curr_ctx(struct mtk_vcodec_dev *vdec_dev,
- unsigned int hw_idx);
-void *mtk_vcodec_get_hw_dev(struct mtk_vcodec_dev *dev, int hw_idx);
-
-#endif /* _MTK_VCODEC_UTIL_H_ */
diff --git a/drivers/media/platform/mediatek/vpu/mtk_vpu.c b/drivers/media/platform/mediatek/vpu/mtk_vpu.c
index 4c8f5296d120..7243604a82a5 100644
--- a/drivers/media/platform/mediatek/vpu/mtk_vpu.c
+++ b/drivers/media/platform/mediatek/vpu/mtk_vpu.c
@@ -9,10 +9,10 @@
#include <linux/interrupt.h>
#include <linux/iommu.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/sizes.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/media/platform/microchip/microchip-csi2dc.c b/drivers/media/platform/microchip/microchip-csi2dc.c
index bfb3edcf018a..988c1cc1d8b6 100644
--- a/drivers/media/platform/microchip/microchip-csi2dc.c
+++ b/drivers/media/platform/microchip/microchip-csi2dc.c
@@ -476,7 +476,7 @@ static const struct v4l2_subdev_ops csi2dc_subdev_ops = {
static int csi2dc_async_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
struct csi2dc_device *csi2dc = container_of(notifier,
struct csi2dc_device, notifier);
@@ -520,14 +520,14 @@ static const struct v4l2_async_notifier_operations csi2dc_async_ops = {
static int csi2dc_prepare_notifier(struct csi2dc_device *csi2dc,
struct fwnode_handle *input_fwnode)
{
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asd;
int ret = 0;
- v4l2_async_nf_init(&csi2dc->notifier);
+ v4l2_async_subdev_nf_init(&csi2dc->notifier, &csi2dc->csi2dc_sd);
asd = v4l2_async_nf_add_fwnode_remote(&csi2dc->notifier,
input_fwnode,
- struct v4l2_async_subdev);
+ struct v4l2_async_connection);
fwnode_handle_put(input_fwnode);
@@ -542,8 +542,7 @@ static int csi2dc_prepare_notifier(struct csi2dc_device *csi2dc,
csi2dc->notifier.ops = &csi2dc_async_ops;
- ret = v4l2_async_subdev_nf_register(&csi2dc->csi2dc_sd,
- &csi2dc->notifier);
+ ret = v4l2_async_nf_register(&csi2dc->notifier);
if (ret) {
dev_err(csi2dc->dev, "fail to register async notifier: %d\n",
ret);
diff --git a/drivers/media/platform/microchip/microchip-isc-base.c b/drivers/media/platform/microchip/microchip-isc-base.c
index 4e657fad33d0..8dbf7bc1e863 100644
--- a/drivers/media/platform/microchip/microchip-isc-base.c
+++ b/drivers/media/platform/microchip/microchip-isc-base.c
@@ -1712,7 +1712,7 @@ static int isc_ctrl_init(struct isc_device *isc)
static int isc_async_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
struct isc_device *isc = container_of(notifier->v4l2_dev,
struct isc_device, v4l2_dev);
@@ -1741,7 +1741,7 @@ static int isc_async_bound(struct v4l2_async_notifier *notifier,
static void isc_async_unbind(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
struct isc_device *isc = container_of(notifier->v4l2_dev,
struct isc_device, v4l2_dev);
diff --git a/drivers/media/platform/microchip/microchip-isc.h b/drivers/media/platform/microchip/microchip-isc.h
index e3a6c7367e70..ad4e98a1dd8f 100644
--- a/drivers/media/platform/microchip/microchip-isc.h
+++ b/drivers/media/platform/microchip/microchip-isc.h
@@ -44,7 +44,7 @@ struct isc_buffer {
struct isc_subdev_entity {
struct v4l2_subdev *sd;
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asd;
struct device_node *epn;
struct v4l2_async_notifier notifier;
diff --git a/drivers/media/platform/microchip/microchip-sama5d2-isc.c b/drivers/media/platform/microchip/microchip-sama5d2-isc.c
index 746f4a2fa9f6..5ac149cf3647 100644
--- a/drivers/media/platform/microchip/microchip-sama5d2-isc.c
+++ b/drivers/media/platform/microchip/microchip-sama5d2-isc.c
@@ -409,7 +409,6 @@ static int microchip_isc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct isc_device *isc;
- struct resource *res;
void __iomem *io_base;
struct isc_subdev_entity *subdev_entity;
int irq;
@@ -423,8 +422,7 @@ static int microchip_isc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, isc);
isc->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- io_base = devm_ioremap_resource(dev, res);
+ io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(io_base))
return PTR_ERR(io_base);
@@ -525,15 +523,15 @@ static int microchip_isc_probe(struct platform_device *pdev)
}
list_for_each_entry(subdev_entity, &isc->subdev_entities, list) {
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asd;
struct fwnode_handle *fwnode =
of_fwnode_handle(subdev_entity->epn);
- v4l2_async_nf_init(&subdev_entity->notifier);
+ v4l2_async_nf_init(&subdev_entity->notifier, &isc->v4l2_dev);
asd = v4l2_async_nf_add_fwnode_remote(&subdev_entity->notifier,
fwnode,
- struct v4l2_async_subdev);
+ struct v4l2_async_connection);
of_node_put(subdev_entity->epn);
subdev_entity->epn = NULL;
@@ -545,8 +543,7 @@ static int microchip_isc_probe(struct platform_device *pdev)
subdev_entity->notifier.ops = &microchip_isc_async_ops;
- ret = v4l2_async_nf_register(&isc->v4l2_dev,
- &subdev_entity->notifier);
+ ret = v4l2_async_nf_register(&subdev_entity->notifier);
if (ret) {
dev_err(dev, "fail to register async notifier\n");
goto cleanup_subdev;
diff --git a/drivers/media/platform/microchip/microchip-sama7g5-isc.c b/drivers/media/platform/microchip/microchip-sama7g5-isc.c
index 79ae696764d0..73445f33d26b 100644
--- a/drivers/media/platform/microchip/microchip-sama7g5-isc.c
+++ b/drivers/media/platform/microchip/microchip-sama7g5-isc.c
@@ -398,7 +398,6 @@ static int microchip_xisc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct isc_device *isc;
- struct resource *res;
void __iomem *io_base;
struct isc_subdev_entity *subdev_entity;
int irq;
@@ -412,8 +411,7 @@ static int microchip_xisc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, isc);
isc->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- io_base = devm_ioremap_resource(dev, res);
+ io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(io_base))
return PTR_ERR(io_base);
@@ -515,15 +513,15 @@ static int microchip_xisc_probe(struct platform_device *pdev)
}
list_for_each_entry(subdev_entity, &isc->subdev_entities, list) {
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asd;
struct fwnode_handle *fwnode =
of_fwnode_handle(subdev_entity->epn);
- v4l2_async_nf_init(&subdev_entity->notifier);
+ v4l2_async_nf_init(&subdev_entity->notifier, &isc->v4l2_dev);
asd = v4l2_async_nf_add_fwnode_remote(&subdev_entity->notifier,
fwnode,
- struct v4l2_async_subdev);
+ struct v4l2_async_connection);
of_node_put(subdev_entity->epn);
subdev_entity->epn = NULL;
@@ -535,8 +533,7 @@ static int microchip_xisc_probe(struct platform_device *pdev)
subdev_entity->notifier.ops = &microchip_isc_async_ops;
- ret = v4l2_async_nf_register(&isc->v4l2_dev,
- &subdev_entity->notifier);
+ ret = v4l2_async_nf_register(&subdev_entity->notifier);
if (ret) {
dev_err(dev, "fail to register async notifier\n");
goto cleanup_subdev;
diff --git a/drivers/media/platform/nvidia/tegra-vde/vde.c b/drivers/media/platform/nvidia/tegra-vde/vde.c
index 7157734a1550..81a0d3b76b88 100644
--- a/drivers/media/platform/nvidia/tegra-vde/vde.c
+++ b/drivers/media/platform/nvidia/tegra-vde/vde.c
@@ -12,7 +12,8 @@
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/slab.h>
diff --git a/drivers/media/platform/nxp/Kconfig b/drivers/media/platform/nxp/Kconfig
index a0ca6b297fb8..40e3436669e2 100644
--- a/drivers/media/platform/nxp/Kconfig
+++ b/drivers/media/platform/nxp/Kconfig
@@ -17,6 +17,17 @@ config VIDEO_IMX7_CSI
Driver for the NXP Camera Sensor Interface (CSI) Bridge. This device
is found in the i.MX6UL/L, i.MX7 and i.MX8M[MQ] SoCs.
+config VIDEO_IMX8MQ_MIPI_CSI2
+ tristate "NXP i.MX8MQ MIPI CSI-2 receiver"
+ depends on ARCH_MXC || COMPILE_TEST
+ depends on VIDEO_DEV
+ select MEDIA_CONTROLLER
+ select V4L2_FWNODE
+ select VIDEO_V4L2_SUBDEV_API
+ help
+ Video4Linux2 driver for the MIPI CSI-2 receiver found on the i.MX8MQ
+ SoC.
+
config VIDEO_IMX_MIPI_CSIS
tristate "NXP MIPI CSI-2 CSIS receiver found on i.MX7 and i.MX8 models"
depends on ARCH_MXC || COMPILE_TEST
diff --git a/drivers/media/platform/nxp/Makefile b/drivers/media/platform/nxp/Makefile
index b8e672b75fed..4d90eb713652 100644
--- a/drivers/media/platform/nxp/Makefile
+++ b/drivers/media/platform/nxp/Makefile
@@ -5,6 +5,7 @@ obj-y += imx-jpeg/
obj-y += imx8-isi/
obj-$(CONFIG_VIDEO_IMX7_CSI) += imx7-media-csi.o
+obj-$(CONFIG_VIDEO_IMX8MQ_MIPI_CSI2) += imx8mq-mipi-csi2.o
obj-$(CONFIG_VIDEO_IMX_MIPI_CSIS) += imx-mipi-csis.o
obj-$(CONFIG_VIDEO_IMX_PXP) += imx-pxp.o
obj-$(CONFIG_VIDEO_MX2_EMMAPRP) += mx2_emmaprp.o
diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
index 9512c0a61966..b7a720198ce5 100644
--- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
+++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
@@ -2742,7 +2742,6 @@ static int mxc_jpeg_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "choose slot %d\n", jpeg->slot_data.slot);
dec_irq = platform_get_irq(pdev, 0);
if (dec_irq < 0) {
- dev_err(&pdev->dev, "Failed to get irq %d\n", dec_irq);
ret = dec_irq;
goto err_irq;
}
diff --git a/drivers/media/platform/nxp/imx-mipi-csis.c b/drivers/media/platform/nxp/imx-mipi-csis.c
index 05d52762e792..16f19a640130 100644
--- a/drivers/media/platform/nxp/imx-mipi-csis.c
+++ b/drivers/media/platform/nxp/imx-mipi-csis.c
@@ -22,7 +22,6 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
@@ -1230,7 +1229,7 @@ mipi_notifier_to_csis_state(struct v4l2_async_notifier *n)
static int mipi_csis_notify_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
struct mipi_csis_device *csis = mipi_notifier_to_csis_state(notifier);
struct media_pad *sink = &csis->sd.entity.pads[CSIS_PAD_SINK];
@@ -1247,12 +1246,12 @@ static int mipi_csis_async_register(struct mipi_csis_device *csis)
struct v4l2_fwnode_endpoint vep = {
.bus_type = V4L2_MBUS_CSI2_DPHY,
};
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asd;
struct fwnode_handle *ep;
unsigned int i;
int ret;
- v4l2_async_nf_init(&csis->notifier);
+ v4l2_async_subdev_nf_init(&csis->notifier, &csis->sd);
ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(csis->dev), 0, 0,
FWNODE_GRAPH_ENDPOINT_NEXT);
@@ -1278,7 +1277,7 @@ static int mipi_csis_async_register(struct mipi_csis_device *csis)
dev_dbg(csis->dev, "flags: 0x%08x\n", csis->bus.flags);
asd = v4l2_async_nf_add_fwnode_remote(&csis->notifier, ep,
- struct v4l2_async_subdev);
+ struct v4l2_async_connection);
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
goto err_parse;
@@ -1288,7 +1287,7 @@ static int mipi_csis_async_register(struct mipi_csis_device *csis)
csis->notifier.ops = &mipi_csis_notify_ops;
- ret = v4l2_async_subdev_nf_register(&csis->sd, &csis->notifier);
+ ret = v4l2_async_nf_register(&csis->notifier);
if (ret)
return ret;
@@ -1365,13 +1364,6 @@ static int mipi_csis_subdev_init(struct mipi_csis_device *csis)
sd->dev = csis->dev;
- sd->fwnode = fwnode_graph_get_endpoint_by_id(dev_fwnode(csis->dev),
- 1, 0, 0);
- if (!sd->fwnode) {
- dev_err(csis->dev, "Unable to retrieve endpoint for port@1\n");
- return -ENOENT;
- }
-
csis->pads[CSIS_PAD_SINK].flags = MEDIA_PAD_FL_SINK
| MEDIA_PAD_FL_MUST_CONNECT;
csis->pads[CSIS_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE
diff --git a/drivers/media/platform/nxp/imx-pxp.c b/drivers/media/platform/nxp/imx-pxp.c
index 90f319857c23..e62dc5c1a4ae 100644
--- a/drivers/media/platform/nxp/imx-pxp.c
+++ b/drivers/media/platform/nxp/imx-pxp.c
@@ -19,7 +19,6 @@
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/sched.h>
diff --git a/drivers/media/platform/nxp/imx7-media-csi.c b/drivers/media/platform/nxp/imx7-media-csi.c
index 791bde67f439..15049c6aab37 100644
--- a/drivers/media/platform/nxp/imx7-media-csi.c
+++ b/drivers/media/platform/nxp/imx7-media-csi.c
@@ -13,7 +13,7 @@
#include <linux/mfd/syscon.h>
#include <linux/minmax.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
@@ -1076,6 +1076,7 @@ static int imx7_csi_video_enum_framesizes(struct file *file, void *fh,
struct v4l2_frmsizeenum *fsize)
{
const struct imx7_csi_pixfmt *cc;
+ u32 walign;
if (fsize->index > 0)
return -EINVAL;
@@ -1085,16 +1086,17 @@ static int imx7_csi_video_enum_framesizes(struct file *file, void *fh,
return -EINVAL;
/*
- * TODO: The constraints are hardware-specific and may depend on the
- * pixel format. This should come from the driver using
- * imx_media_capture.
+ * The width alignment is 8 bytes as indicated by the
+ * CSI_IMAG_PARA.IMAGE_WIDTH documentation. Convert it to pixels.
*/
+ walign = 8 * 8 / cc->bpp;
+
fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
- fsize->stepwise.min_width = 1;
- fsize->stepwise.max_width = 65535;
+ fsize->stepwise.min_width = walign;
+ fsize->stepwise.max_width = round_down(65535U, walign);
fsize->stepwise.min_height = 1;
fsize->stepwise.max_height = 65535;
- fsize->stepwise.step_width = 1;
+ fsize->stepwise.step_width = walign;
fsize->stepwise.step_height = 1;
return 0;
@@ -2035,7 +2037,7 @@ static const struct media_entity_operations imx7_csi_entity_ops = {
static int imx7_csi_notify_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
struct imx7_csi *csi = imx7_csi_notifier_to_dev(notifier);
struct media_pad *sink = &csi->sd.entity.pads[IMX7_CSI_PAD_SINK];
@@ -2060,11 +2062,11 @@ static const struct v4l2_async_notifier_operations imx7_csi_notify_ops = {
static int imx7_csi_async_register(struct imx7_csi *csi)
{
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asd;
struct fwnode_handle *ep;
int ret;
- v4l2_async_nf_init(&csi->notifier);
+ v4l2_async_nf_init(&csi->notifier, &csi->v4l2_dev);
ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(csi->dev), 0, 0,
FWNODE_GRAPH_ENDPOINT_NEXT);
@@ -2075,7 +2077,7 @@ static int imx7_csi_async_register(struct imx7_csi *csi)
}
asd = v4l2_async_nf_add_fwnode_remote(&csi->notifier, ep,
- struct v4l2_async_subdev);
+ struct v4l2_async_connection);
fwnode_handle_put(ep);
@@ -2087,7 +2089,7 @@ static int imx7_csi_async_register(struct imx7_csi *csi)
csi->notifier.ops = &imx7_csi_notify_ops;
- ret = v4l2_async_nf_register(&csi->v4l2_dev, &csi->notifier);
+ ret = v4l2_async_nf_register(&csi->notifier);
if (ret)
goto error;
diff --git a/drivers/media/platform/nxp/imx8-isi/Makefile b/drivers/media/platform/nxp/imx8-isi/Makefile
index 9bff9297686d..4713c4e8b64b 100644
--- a/drivers/media/platform/nxp/imx8-isi/Makefile
+++ b/drivers/media/platform/nxp/imx8-isi/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
-imx8-isi-y := imx8-isi-core.o imx8-isi-crossbar.o imx8-isi-hw.o \
- imx8-isi-pipe.o imx8-isi-video.o
+imx8-isi-y := imx8-isi-core.o imx8-isi-crossbar.o imx8-isi-gasket.o \
+ imx8-isi-hw.o imx8-isi-pipe.o imx8-isi-video.o
imx8-isi-$(CONFIG_DEBUG_FS) += imx8-isi-debug.o
imx8-isi-$(CONFIG_VIDEO_IMX8_ISI_M2M) += imx8-isi-m2m.o
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c
index 253e77189b69..81be744e9f1b 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c
@@ -9,7 +9,7 @@
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
@@ -30,12 +30,12 @@
*/
struct mxc_isi_async_subdev {
- struct v4l2_async_subdev asd;
+ struct v4l2_async_connection asd;
unsigned int port;
};
static inline struct mxc_isi_async_subdev *
-asd_to_mxc_isi_async_subdev(struct v4l2_async_subdev *asd)
+asd_to_mxc_isi_async_subdev(struct v4l2_async_connection *asd)
{
return container_of(asd, struct mxc_isi_async_subdev, asd);
};
@@ -48,12 +48,12 @@ notifier_to_mxc_isi_dev(struct v4l2_async_notifier *n)
static int mxc_isi_async_notifier_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asc)
{
const unsigned int link_flags = MEDIA_LNK_FL_IMMUTABLE
| MEDIA_LNK_FL_ENABLED;
struct mxc_isi_dev *isi = notifier_to_mxc_isi_dev(notifier);
- struct mxc_isi_async_subdev *masd = asd_to_mxc_isi_async_subdev(asd);
+ struct mxc_isi_async_subdev *masd = asd_to_mxc_isi_async_subdev(asc);
struct media_pad *pad = &isi->crossbar.pads[masd->port];
struct device_link *link;
@@ -175,7 +175,7 @@ static int mxc_isi_v4l2_init(struct mxc_isi_dev *isi)
}
/* Initialize, fill and register the async notifier. */
- v4l2_async_nf_init(&isi->notifier);
+ v4l2_async_nf_init(&isi->notifier, v4l2_dev);
isi->notifier.ops = &mxc_isi_async_notifier_ops;
for (i = 0; i < isi->pdata->num_ports; ++i) {
@@ -200,7 +200,7 @@ static int mxc_isi_v4l2_init(struct mxc_isi_dev *isi)
masd->port = i;
}
- ret = v4l2_async_nf_register(v4l2_dev, &isi->notifier);
+ ret = v4l2_async_nf_register(&isi->notifier);
if (ret < 0) {
dev_err(isi->dev,
"Failed to register async notifier: %d\n", ret);
@@ -289,7 +289,7 @@ static const struct mxc_isi_plat_data mxc_imx8mn_data = {
.clks = mxc_imx8mn_clks,
.num_clks = ARRAY_SIZE(mxc_imx8mn_clks),
.buf_active_reverse = false,
- .has_gasket = true,
+ .gasket_ops = &mxc_imx8_gasket_ops,
.has_36bit_dma = false,
};
@@ -303,10 +303,24 @@ static const struct mxc_isi_plat_data mxc_imx8mp_data = {
.clks = mxc_imx8mn_clks,
.num_clks = ARRAY_SIZE(mxc_imx8mn_clks),
.buf_active_reverse = true,
- .has_gasket = true,
+ .gasket_ops = &mxc_imx8_gasket_ops,
.has_36bit_dma = true,
};
+static const struct mxc_isi_plat_data mxc_imx93_data = {
+ .model = MXC_ISI_IMX93,
+ .num_ports = 1,
+ .num_channels = 1,
+ .reg_offset = 0,
+ .ier_reg = &mxc_imx8_isi_ier_v2,
+ .set_thd = &mxc_imx8_isi_thd_v1,
+ .clks = mxc_imx8mn_clks,
+ .num_clks = ARRAY_SIZE(mxc_imx8mn_clks),
+ .buf_active_reverse = true,
+ .gasket_ops = &mxc_imx93_gasket_ops,
+ .has_36bit_dma = false,
+};
+
/* -----------------------------------------------------------------------------
* Power management
*/
@@ -443,7 +457,7 @@ static int mxc_isi_probe(struct platform_device *pdev)
return PTR_ERR(isi->regs);
}
- if (isi->pdata->has_gasket) {
+ if (isi->pdata->gasket_ops) {
isi->gasket = syscon_regmap_lookup_by_phandle(dev->of_node,
"fsl,blk-ctrl");
if (IS_ERR(isi->gasket)) {
@@ -518,6 +532,7 @@ static int mxc_isi_remove(struct platform_device *pdev)
static const struct of_device_id mxc_isi_of_match[] = {
{ .compatible = "fsl,imx8mn-isi", .data = &mxc_imx8mn_data },
{ .compatible = "fsl,imx8mp-isi", .data = &mxc_imx8mp_data },
+ { .compatible = "fsl,imx93-isi", .data = &mxc_imx93_data },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, mxc_isi_of_match);
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h b/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h
index e469788a9e6c..2810ebe9b5f7 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h
@@ -147,9 +147,18 @@ struct mxc_isi_set_thd {
struct mxc_isi_panic_thd panic_set_thd_v;
};
+struct mxc_gasket_ops {
+ void (*enable)(struct mxc_isi_dev *isi,
+ const struct v4l2_mbus_frame_desc *fd,
+ const struct v4l2_mbus_framefmt *fmt,
+ const unsigned int port);
+ void (*disable)(struct mxc_isi_dev *isi, const unsigned int port);
+};
+
enum model {
MXC_ISI_IMX8MN,
MXC_ISI_IMX8MP,
+ MXC_ISI_IMX93,
};
struct mxc_isi_plat_data {
@@ -159,10 +168,10 @@ struct mxc_isi_plat_data {
unsigned int reg_offset;
const struct mxc_isi_ier_reg *ier_reg;
const struct mxc_isi_set_thd *set_thd;
+ const struct mxc_gasket_ops *gasket_ops;
const struct clk_bulk_data *clks;
unsigned int num_clks;
bool buf_active_reverse;
- bool has_gasket;
bool has_36bit_dma;
};
@@ -286,6 +295,9 @@ struct mxc_isi_dev {
struct dentry *debugfs_root;
};
+extern const struct mxc_gasket_ops mxc_imx8_gasket_ops;
+extern const struct mxc_gasket_ops mxc_imx93_gasket_ops;
+
int mxc_isi_crossbar_init(struct mxc_isi_dev *isi);
void mxc_isi_crossbar_cleanup(struct mxc_isi_crossbar *xbar);
int mxc_isi_crossbar_register(struct mxc_isi_crossbar *xbar);
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c
index f7447b2f4d77..792f031e032a 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-crossbar.c
@@ -15,7 +15,6 @@
#include <linux/types.h>
#include <media/media-entity.h>
-#include <media/mipi-csi2.h>
#include <media/v4l2-subdev.h>
#include "imx8-isi-core.h"
@@ -25,32 +24,18 @@ static inline struct mxc_isi_crossbar *to_isi_crossbar(struct v4l2_subdev *sd)
return container_of(sd, struct mxc_isi_crossbar, sd);
}
-/* -----------------------------------------------------------------------------
- * Media block control (i.MX8MN and i.MX8MP only)
- */
-#define GASKET_BASE(n) (0x0060 + (n) * 0x30)
-
-#define GASKET_CTRL 0x0000
-#define GASKET_CTRL_DATA_TYPE(dt) ((dt) << 8)
-#define GASKET_CTRL_DATA_TYPE_MASK (0x3f << 8)
-#define GASKET_CTRL_DUAL_COMP_ENABLE BIT(1)
-#define GASKET_CTRL_ENABLE BIT(0)
-
-#define GASKET_HSIZE 0x0004
-#define GASKET_VSIZE 0x0008
-
static int mxc_isi_crossbar_gasket_enable(struct mxc_isi_crossbar *xbar,
struct v4l2_subdev_state *state,
struct v4l2_subdev *remote_sd,
u32 remote_pad, unsigned int port)
{
struct mxc_isi_dev *isi = xbar->isi;
+ const struct mxc_gasket_ops *gasket_ops = isi->pdata->gasket_ops;
const struct v4l2_mbus_framefmt *fmt;
struct v4l2_mbus_frame_desc fd;
- u32 val;
int ret;
- if (!isi->pdata->has_gasket)
+ if (!gasket_ops)
return 0;
/*
@@ -77,17 +62,7 @@ static int mxc_isi_crossbar_gasket_enable(struct mxc_isi_crossbar *xbar,
if (!fmt)
return -EINVAL;
- regmap_write(isi->gasket, GASKET_BASE(port) + GASKET_HSIZE, fmt->width);
- regmap_write(isi->gasket, GASKET_BASE(port) + GASKET_VSIZE, fmt->height);
-
- val = GASKET_CTRL_DATA_TYPE(fd.entry[0].bus.csi2.dt)
- | GASKET_CTRL_ENABLE;
-
- if (fd.entry[0].bus.csi2.dt == MIPI_CSI2_DT_YUV422_8B)
- val |= GASKET_CTRL_DUAL_COMP_ENABLE;
-
- regmap_write(isi->gasket, GASKET_BASE(port) + GASKET_CTRL, val);
-
+ gasket_ops->enable(isi, &fd, fmt, port);
return 0;
}
@@ -95,11 +70,12 @@ static void mxc_isi_crossbar_gasket_disable(struct mxc_isi_crossbar *xbar,
unsigned int port)
{
struct mxc_isi_dev *isi = xbar->isi;
+ const struct mxc_gasket_ops *gasket_ops = isi->pdata->gasket_ops;
- if (!isi->pdata->has_gasket)
+ if (!gasket_ops)
return;
- regmap_write(isi->gasket, GASKET_BASE(port) + GASKET_CTRL, 0);
+ gasket_ops->disable(isi, port);
}
/* -----------------------------------------------------------------------------
@@ -483,7 +459,7 @@ int mxc_isi_crossbar_init(struct mxc_isi_dev *isi)
xbar->inputs = kcalloc(xbar->num_sinks, sizeof(*xbar->inputs),
GFP_KERNEL);
- if (!xbar->pads) {
+ if (!xbar->inputs) {
ret = -ENOMEM;
goto err_free;
}
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-gasket.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-gasket.c
new file mode 100644
index 000000000000..f69c3b5d4782
--- /dev/null
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-gasket.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019-2023 NXP
+ */
+
+#include <linux/regmap.h>
+
+#include <media/mipi-csi2.h>
+
+#include "imx8-isi-core.h"
+
+/* -----------------------------------------------------------------------------
+ * i.MX8MN and i.MX8MP gasket
+ */
+
+#define GASKET_BASE(n) (0x0060 + (n) * 0x30)
+
+#define GASKET_CTRL 0x0000
+#define GASKET_CTRL_DATA_TYPE(dt) ((dt) << 8)
+#define GASKET_CTRL_DATA_TYPE_MASK (0x3f << 8)
+#define GASKET_CTRL_DUAL_COMP_ENABLE BIT(1)
+#define GASKET_CTRL_ENABLE BIT(0)
+
+#define GASKET_HSIZE 0x0004
+#define GASKET_VSIZE 0x0008
+
+static void mxc_imx8_gasket_enable(struct mxc_isi_dev *isi,
+ const struct v4l2_mbus_frame_desc *fd,
+ const struct v4l2_mbus_framefmt *fmt,
+ const unsigned int port)
+{
+ u32 val;
+
+ regmap_write(isi->gasket, GASKET_BASE(port) + GASKET_HSIZE, fmt->width);
+ regmap_write(isi->gasket, GASKET_BASE(port) + GASKET_VSIZE, fmt->height);
+
+ val = GASKET_CTRL_DATA_TYPE(fd->entry[0].bus.csi2.dt);
+ if (fd->entry[0].bus.csi2.dt == MIPI_CSI2_DT_YUV422_8B)
+ val |= GASKET_CTRL_DUAL_COMP_ENABLE;
+
+ val |= GASKET_CTRL_ENABLE;
+ regmap_write(isi->gasket, GASKET_BASE(port) + GASKET_CTRL, val);
+}
+
+static void mxc_imx8_gasket_disable(struct mxc_isi_dev *isi,
+ const unsigned int port)
+{
+ regmap_write(isi->gasket, GASKET_BASE(port) + GASKET_CTRL, 0);
+}
+
+const struct mxc_gasket_ops mxc_imx8_gasket_ops = {
+ .enable = mxc_imx8_gasket_enable,
+ .disable = mxc_imx8_gasket_disable,
+};
+
+/* -----------------------------------------------------------------------------
+ * i.MX93 gasket
+ */
+
+#define DISP_MIX_CAMERA_MUX 0x30
+#define DISP_MIX_CAMERA_MUX_DATA_TYPE(x) (((x) & 0x3f) << 3)
+#define DISP_MIX_CAMERA_MUX_GASKET_ENABLE BIT(16)
+
+static void mxc_imx93_gasket_enable(struct mxc_isi_dev *isi,
+ const struct v4l2_mbus_frame_desc *fd,
+ const struct v4l2_mbus_framefmt *fmt,
+ const unsigned int port)
+{
+ u32 val;
+
+ val = DISP_MIX_CAMERA_MUX_DATA_TYPE(fd->entry[0].bus.csi2.dt);
+ val |= DISP_MIX_CAMERA_MUX_GASKET_ENABLE;
+ regmap_write(isi->gasket, DISP_MIX_CAMERA_MUX, val);
+}
+
+static void mxc_imx93_gasket_disable(struct mxc_isi_dev *isi,
+ unsigned int port)
+{
+ regmap_write(isi->gasket, DISP_MIX_CAMERA_MUX, 0);
+}
+
+const struct mxc_gasket_ops mxc_imx93_gasket_ops = {
+ .enable = mxc_imx93_gasket_enable,
+ .disable = mxc_imx93_gasket_disable,
+};
diff --git a/drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c b/drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c
index c4454aa1cb34..65d20e9bae69 100644
--- a/drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c
+++ b/drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c
@@ -791,7 +791,6 @@ int mxc_isi_pipe_init(struct mxc_isi_dev *isi, unsigned int id)
irq = platform_get_irq(to_platform_device(isi->dev), id);
if (irq < 0) {
- dev_err(pipe->isi->dev, "Failed to get IRQ (%d)\n", irq);
ret = irq;
goto error;
}
diff --git a/drivers/staging/media/imx/imx8mq-mipi-csi2.c b/drivers/media/platform/nxp/imx8mq-mipi-csi2.c
index ca2efcc21efe..ed048f73c982 100644
--- a/drivers/staging/media/imx/imx8mq-mipi-csi2.c
+++ b/drivers/media/platform/nxp/imx8mq-mipi-csi2.c
@@ -17,7 +17,6 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
@@ -567,7 +566,7 @@ mipi_notifier_to_csi2_state(struct v4l2_async_notifier *n)
static int imx8mq_mipi_csi_notify_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
struct csi_state *state = mipi_notifier_to_csi2_state(notifier);
struct media_pad *sink = &state->sd.entity.pads[MIPI_CSI2_PAD_SINK];
@@ -587,12 +586,12 @@ static int imx8mq_mipi_csi_async_register(struct csi_state *state)
struct v4l2_fwnode_endpoint vep = {
.bus_type = V4L2_MBUS_CSI2_DPHY,
};
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asd;
struct fwnode_handle *ep;
unsigned int i;
int ret;
- v4l2_async_nf_init(&state->notifier);
+ v4l2_async_subdev_nf_init(&state->notifier, &state->sd);
ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(state->dev), 0, 0,
FWNODE_GRAPH_ENDPOINT_NEXT);
@@ -619,7 +618,7 @@ static int imx8mq_mipi_csi_async_register(struct csi_state *state)
state->bus.flags);
asd = v4l2_async_nf_add_fwnode_remote(&state->notifier, ep,
- struct v4l2_async_subdev);
+ struct v4l2_async_connection);
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
goto err_parse;
@@ -629,7 +628,7 @@ static int imx8mq_mipi_csi_async_register(struct csi_state *state)
state->notifier.ops = &imx8mq_mipi_csi_notify_ops;
- ret = v4l2_async_subdev_nf_register(&state->sd, &state->notifier);
+ ret = v4l2_async_nf_register(&state->notifier);
if (ret)
return ret;
diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
index 1ef26aea3eae..f11dc59135a5 100644
--- a/drivers/media/platform/qcom/camss/camss.c
+++ b/drivers/media/platform/qcom/camss/camss.c
@@ -1383,7 +1383,7 @@ static void camss_unregister_entities(struct camss *camss)
static int camss_subdev_notifier_bound(struct v4l2_async_notifier *async,
struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
struct camss *camss = container_of(async, struct camss, notifier);
struct camss_async_subdev *csd =
@@ -1615,14 +1615,6 @@ static int camss_probe(struct platform_device *pdev)
if (!camss->vfe)
return -ENOMEM;
- v4l2_async_nf_init(&camss->notifier);
-
- num_subdevs = camss_of_parse_ports(camss);
- if (num_subdevs < 0) {
- ret = num_subdevs;
- goto err_cleanup;
- }
-
ret = camss_icc_get(camss);
if (ret < 0)
goto err_cleanup;
@@ -1648,15 +1640,22 @@ static int camss_probe(struct platform_device *pdev)
goto err_cleanup;
}
+ v4l2_async_nf_init(&camss->notifier, &camss->v4l2_dev);
+
+ num_subdevs = camss_of_parse_ports(camss);
+ if (num_subdevs < 0) {
+ ret = num_subdevs;
+ goto err_cleanup;
+ }
+
ret = camss_register_entities(camss);
if (ret < 0)
- goto err_register_entities;
+ goto err_cleanup;
if (num_subdevs) {
camss->notifier.ops = &camss_subdev_notifier_ops;
- ret = v4l2_async_nf_register(&camss->v4l2_dev,
- &camss->notifier);
+ ret = v4l2_async_nf_register(&camss->notifier);
if (ret) {
dev_err(dev,
"Failed to register async subdev nodes: %d\n",
@@ -1691,9 +1690,8 @@ static int camss_probe(struct platform_device *pdev)
err_register_subdevs:
camss_unregister_entities(camss);
-err_register_entities:
- v4l2_device_unregister(&camss->v4l2_dev);
err_cleanup:
+ v4l2_device_unregister(&camss->v4l2_dev);
v4l2_async_nf_cleanup(&camss->notifier);
return ret;
diff --git a/drivers/media/platform/qcom/camss/camss.h b/drivers/media/platform/qcom/camss/camss.h
index 3acd2b3403e8..f6c326cb853b 100644
--- a/drivers/media/platform/qcom/camss/camss.h
+++ b/drivers/media/platform/qcom/camss/camss.h
@@ -113,7 +113,7 @@ struct camss_camera_interface {
};
struct camss_async_subdev {
- struct v4l2_async_subdev asd; /* must be first */
+ struct v4l2_async_connection asd; /* must be first */
struct camss_camera_interface interface;
};
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
index 2ae867cb4c48..054b8e74ba4f 100644
--- a/drivers/media/platform/qcom/venus/core.c
+++ b/drivers/media/platform/qcom/venus/core.c
@@ -11,7 +11,8 @@
#include <linux/devcoredump.h>
#include <linux/list.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -246,7 +247,7 @@ err:
static void venus_assign_register_offsets(struct venus_core *core)
{
- if (IS_V6(core)) {
+ if (IS_IRIS2(core) || IS_IRIS2_1(core)) {
core->vbif_base = core->base + VBIF_BASE;
core->cpu_base = core->base + CPU_BASE_V6;
core->cpu_cs_base = core->base + CPU_CS_BASE_V6;
@@ -684,6 +685,7 @@ static const struct venus_resources sdm845_res = {
.vcodec_clks_num = 2,
.max_load = 3110400, /* 4096x2160@90 */
.hfi_version = HFI_VERSION_4XX,
+ .vpu_version = VPU_VERSION_AR50,
.vmem_id = VIDC_RESOURCE_NONE,
.vmem_size = 0,
.vmem_addr = 0,
@@ -709,6 +711,7 @@ static const struct venus_resources sdm845_res_v2 = {
.vcodec_num = 2,
.max_load = 3110400, /* 4096x2160@90 */
.hfi_version = HFI_VERSION_4XX,
+ .vpu_version = VPU_VERSION_AR50,
.vmem_id = VIDC_RESOURCE_NONE,
.vmem_size = 0,
.vmem_addr = 0,
@@ -756,10 +759,15 @@ static const struct venus_resources sc7180_res = {
.opp_pmdomain = (const char *[]) { "cx", NULL },
.vcodec_num = 1,
.hfi_version = HFI_VERSION_4XX,
+ .vpu_version = VPU_VERSION_AR50,
.vmem_id = VIDC_RESOURCE_NONE,
.vmem_size = 0,
.vmem_addr = 0,
.dma_mask = 0xe0000000 - 1,
+ .cp_start = 0,
+ .cp_size = 0x70800000,
+ .cp_nonpixel_start = 0x1000000,
+ .cp_nonpixel_size = 0x24800000,
.fwname = "qcom/venus-5.4/venus.mdt",
};
@@ -809,12 +817,13 @@ static const struct venus_resources sm8250_res = {
.vcodec_num = 1,
.max_load = 7833600,
.hfi_version = HFI_VERSION_6XX,
+ .vpu_version = VPU_VERSION_IRIS2,
.num_vpp_pipes = 4,
.vmem_id = VIDC_RESOURCE_NONE,
.vmem_size = 0,
.vmem_addr = 0,
.dma_mask = 0xe0000000 - 1,
- .fwname = "qcom/vpu-1.0/venus.mdt",
+ .fwname = "qcom/vpu-1.0/venus.mbn",
};
static const struct freq_tbl sc7280_freq_table[] = {
@@ -866,6 +875,7 @@ static const struct venus_resources sc7280_res = {
.opp_pmdomain = (const char *[]) { "cx", NULL },
.vcodec_num = 1,
.hfi_version = HFI_VERSION_6XX,
+ .vpu_version = VPU_VERSION_IRIS2_1,
.num_vpp_pipes = 1,
.vmem_id = VIDC_RESOURCE_NONE,
.vmem_size = 0,
diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h
index 320bde0f83cb..4a633261ece4 100644
--- a/drivers/media/platform/qcom/venus/core.h
+++ b/drivers/media/platform/qcom/venus/core.h
@@ -48,6 +48,14 @@ struct bw_tbl {
u32 peak_10bit;
};
+enum vpu_version {
+ VPU_VERSION_AR50,
+ VPU_VERSION_AR50_LITE,
+ VPU_VERSION_IRIS1,
+ VPU_VERSION_IRIS2,
+ VPU_VERSION_IRIS2_1,
+};
+
struct venus_resources {
u64 dma_mask;
const struct freq_tbl *freq_tbl;
@@ -71,6 +79,7 @@ struct venus_resources {
const char * const resets[VIDC_RESETS_NUM_MAX];
unsigned int resets_num;
enum hfi_version hfi_version;
+ enum vpu_version vpu_version;
u8 num_vpp_pipes;
u32 max_load;
unsigned int vmem_id;
@@ -160,6 +169,7 @@ struct venus_format {
* @core0_usage_count: usage counter for core0
* @core1_usage_count: usage counter for core1
* @root: debugfs root directory
+ * @venus_ver: the venus firmware version
*/
struct venus_core {
void __iomem *base;
@@ -386,7 +396,8 @@ enum venus_inst_modes {
* @ycbcr_enc: current YCbCr encoding
* @quantization: current quantization
* @xfer_func: current xfer function
- * @codec_state: current codec API state (see DEC/ENC_STATE_)
+ * @codec_state: current decoder API state (see DEC_STATE_)
+ * @enc_state: current encoder API state (see ENC_STATE_)
* @reconf_wait: wait queue for resolution change event
* @subscriptions: used to hold current events subscriptions
* @buf_count: used to count number of buffers (reqbuf(0))
@@ -505,6 +516,12 @@ struct venus_inst {
#define IS_V4(core) ((core)->res->hfi_version == HFI_VERSION_4XX)
#define IS_V6(core) ((core)->res->hfi_version == HFI_VERSION_6XX)
+#define IS_AR50(core) ((core)->res->vpu_version == VPU_VERSION_AR50)
+#define IS_AR50_LITE(core) ((core)->res->vpu_version == VPU_VERSION_AR50_LITE)
+#define IS_IRIS1(core) ((core)->res->vpu_version == VPU_VERSION_IRIS1)
+#define IS_IRIS2(core) ((core)->res->vpu_version == VPU_VERSION_IRIS2)
+#define IS_IRIS2_1(core) ((core)->res->vpu_version == VPU_VERSION_IRIS2_1)
+
#define ctrl_to_inst(ctrl) \
container_of((ctrl)->handler, struct venus_inst, ctrl_handler)
diff --git a/drivers/media/platform/qcom/venus/firmware.c b/drivers/media/platform/qcom/venus/firmware.c
index cfb11c551167..fe7da2b30482 100644
--- a/drivers/media/platform/qcom/venus/firmware.c
+++ b/drivers/media/platform/qcom/venus/firmware.c
@@ -10,6 +10,7 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/of_device.h>
#include <linux/firmware/qcom/qcom_scm.h>
@@ -29,7 +30,7 @@ static void venus_reset_cpu(struct venus_core *core)
u32 fw_size = core->fw.mapped_mem_size;
void __iomem *wrapper_base;
- if (IS_V6(core))
+ if (IS_IRIS2_1(core))
wrapper_base = core->wrapper_tz_base;
else
wrapper_base = core->wrapper_base;
@@ -41,7 +42,7 @@ static void venus_reset_cpu(struct venus_core *core)
writel(fw_size, wrapper_base + WRAPPER_NONPIX_START_ADDR);
writel(fw_size, wrapper_base + WRAPPER_NONPIX_END_ADDR);
- if (IS_V6(core)) {
+ if (IS_IRIS2_1(core)) {
/* Bring XTSS out of reset */
writel(0, wrapper_base + WRAPPER_TZ_XTSS_SW_RESET);
} else {
@@ -67,7 +68,7 @@ int venus_set_hw_state(struct venus_core *core, bool resume)
if (resume) {
venus_reset_cpu(core);
} else {
- if (IS_V6(core))
+ if (IS_IRIS2_1(core))
writel(WRAPPER_XTSS_SW_RESET_BIT,
core->wrapper_tz_base + WRAPPER_TZ_XTSS_SW_RESET);
else
@@ -82,9 +83,9 @@ static int venus_load_fw(struct venus_core *core, const char *fwname,
phys_addr_t *mem_phys, size_t *mem_size)
{
const struct firmware *mdt;
+ struct reserved_mem *rmem;
struct device_node *node;
struct device *dev;
- struct resource r;
ssize_t fw_size;
void *mem_va;
int ret;
@@ -99,13 +100,16 @@ static int venus_load_fw(struct venus_core *core, const char *fwname,
return -EINVAL;
}
- ret = of_address_to_resource(node, 0, &r);
- if (ret)
- goto err_put_node;
+ rmem = of_reserved_mem_lookup(node);
+ of_node_put(node);
+ if (!rmem) {
+ dev_err(dev, "failed to lookup reserved memory-region\n");
+ return -EINVAL;
+ }
ret = request_firmware(&mdt, fwname, dev);
if (ret < 0)
- goto err_put_node;
+ return ret;
fw_size = qcom_mdt_get_size(mdt);
if (fw_size < 0) {
@@ -113,17 +117,17 @@ static int venus_load_fw(struct venus_core *core, const char *fwname,
goto err_release_fw;
}
- *mem_phys = r.start;
- *mem_size = resource_size(&r);
+ *mem_phys = rmem->base;
+ *mem_size = rmem->size;
if (*mem_size < fw_size || fw_size > VENUS_FW_MEM_SIZE) {
ret = -EINVAL;
goto err_release_fw;
}
- mem_va = memremap(r.start, *mem_size, MEMREMAP_WC);
+ mem_va = memremap(*mem_phys, *mem_size, MEMREMAP_WC);
if (!mem_va) {
- dev_err(dev, "unable to map memory region: %pR\n", &r);
+ dev_err(dev, "unable to map memory region %pa size %#zx\n", mem_phys, *mem_size);
ret = -ENOMEM;
goto err_release_fw;
}
@@ -138,8 +142,6 @@ static int venus_load_fw(struct venus_core *core, const char *fwname,
memunmap(mem_va);
err_release_fw:
release_firmware(mdt);
-err_put_node:
- of_node_put(node);
return ret;
}
@@ -179,7 +181,7 @@ static int venus_shutdown_no_tz(struct venus_core *core)
void __iomem *wrapper_base = core->wrapper_base;
void __iomem *wrapper_tz_base = core->wrapper_tz_base;
- if (IS_V6(core)) {
+ if (IS_IRIS2_1(core)) {
/* Assert the reset to XTSS */
reg = readl(wrapper_tz_base + WRAPPER_TZ_XTSS_SW_RESET);
reg |= WRAPPER_XTSS_SW_RESET_BIT;
@@ -241,6 +243,16 @@ int venus_boot(struct venus_core *core)
return ret;
if (core->use_tz && res->cp_size) {
+ /*
+ * Clues for porting using downstream data:
+ * cp_start = 0
+ * cp_size = venus_ns/virtual-addr-pool[0] - yes, address and not size!
+ * This works, as the non-secure context bank is placed
+ * contiguously right after the Content Protection region.
+ *
+ * cp_nonpixel_start = venus_sec_non_pixel/virtual-addr-pool[0]
+ * cp_nonpixel_size = venus_sec_non_pixel/virtual-addr-pool[1]
+ */
ret = qcom_scm_mem_protect_video_var(res->cp_start,
res->cp_size,
res->cp_nonpixel_start,
diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c
index 1822e85ab6bf..8295542e1a7c 100644
--- a/drivers/media/platform/qcom/venus/helpers.c
+++ b/drivers/media/platform/qcom/venus/helpers.c
@@ -189,7 +189,7 @@ int venus_helper_alloc_dpb_bufs(struct venus_inst *inst)
if (ret)
return ret;
- count = HFI_BUFREQ_COUNT_MIN(&bufreq, ver);
+ count = hfi_bufreq_get_count_min(&bufreq, ver);
for (i = 0; i < count; i++) {
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
@@ -668,6 +668,7 @@ int venus_helper_get_bufreq(struct venus_inst *inst, u32 type,
struct hfi_buffer_requirements *req)
{
u32 ptype = HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS;
+ enum hfi_version ver = inst->core->res->hfi_version;
union hfi_get_property hprop;
unsigned int i;
int ret;
@@ -675,12 +676,12 @@ int venus_helper_get_bufreq(struct venus_inst *inst, u32 type,
memset(req, 0, sizeof(*req));
if (type == HFI_BUFFER_OUTPUT || type == HFI_BUFFER_OUTPUT2)
- req->count_min = inst->fw_min_cnt;
+ hfi_bufreq_set_count_min(req, ver, inst->fw_min_cnt);
ret = platform_get_bufreq(inst, type, req);
if (!ret) {
if (type == HFI_BUFFER_OUTPUT || type == HFI_BUFFER_OUTPUT2)
- inst->fw_min_cnt = req->count_min;
+ inst->fw_min_cnt = hfi_bufreq_get_count_min(req, ver);
return 0;
}
diff --git a/drivers/media/platform/qcom/venus/hfi_helper.h b/drivers/media/platform/qcom/venus/hfi_helper.h
index 0abbc50c5864..e4c05d62cfc7 100644
--- a/drivers/media/platform/qcom/venus/hfi_helper.h
+++ b/drivers/media/platform/qcom/venus/hfi_helper.h
@@ -1170,14 +1170,6 @@ struct hfi_buffer_display_hold_count_actual {
u32 hold_count;
};
-/* HFI 4XX reorder the fields, use these macros */
-#define HFI_BUFREQ_HOLD_COUNT(bufreq, ver) \
- ((ver) == HFI_VERSION_4XX ? 0 : (bufreq)->hold_count)
-#define HFI_BUFREQ_COUNT_MIN(bufreq, ver) \
- ((ver) == HFI_VERSION_4XX ? (bufreq)->hold_count : (bufreq)->count_min)
-#define HFI_BUFREQ_COUNT_MIN_HOST(bufreq, ver) \
- ((ver) == HFI_VERSION_4XX ? (bufreq)->count_min : 0)
-
struct hfi_buffer_requirements {
u32 type;
u32 size;
@@ -1189,6 +1181,59 @@ struct hfi_buffer_requirements {
u32 alignment;
};
+/* On HFI 4XX, some of the struct members have been swapped. */
+static inline u32 hfi_bufreq_get_hold_count(struct hfi_buffer_requirements *req,
+ u32 ver)
+{
+ if (ver == HFI_VERSION_4XX)
+ return 0;
+
+ return req->hold_count;
+};
+
+static inline u32 hfi_bufreq_get_count_min(struct hfi_buffer_requirements *req,
+ u32 ver)
+{
+ if (ver == HFI_VERSION_4XX)
+ return req->hold_count;
+
+ return req->count_min;
+};
+
+static inline u32 hfi_bufreq_get_count_min_host(struct hfi_buffer_requirements *req,
+ u32 ver)
+{
+ if (ver == HFI_VERSION_4XX)
+ return req->count_min;
+
+ return 0;
+};
+
+static inline void hfi_bufreq_set_hold_count(struct hfi_buffer_requirements *req,
+ u32 ver, u32 val)
+{
+ if (ver == HFI_VERSION_4XX)
+ return;
+
+ req->hold_count = val;
+};
+
+static inline void hfi_bufreq_set_count_min(struct hfi_buffer_requirements *req,
+ u32 ver, u32 val)
+{
+ if (ver == HFI_VERSION_4XX)
+ req->hold_count = val;
+
+ req->count_min = val;
+};
+
+static inline void hfi_bufreq_set_count_min_host(struct hfi_buffer_requirements *req,
+ u32 ver, u32 val)
+{
+ if (ver == HFI_VERSION_4XX)
+ req->count_min = val;
+};
+
struct hfi_data_payload {
u32 size;
u8 data[1];
diff --git a/drivers/media/platform/qcom/venus/hfi_msgs.c b/drivers/media/platform/qcom/venus/hfi_msgs.c
index 3d5dadfa1900..7cab685a2ec8 100644
--- a/drivers/media/platform/qcom/venus/hfi_msgs.c
+++ b/drivers/media/platform/qcom/venus/hfi_msgs.c
@@ -99,7 +99,7 @@ static void event_seq_changed(struct venus_core *core, struct venus_inst *inst,
case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS:
data_ptr += sizeof(u32);
bufreq = (struct hfi_buffer_requirements *)data_ptr;
- event.buf_count = HFI_BUFREQ_COUNT_MIN(bufreq, ver);
+ event.buf_count = hfi_bufreq_get_count_min(bufreq, ver);
data_ptr += sizeof(*bufreq);
break;
case HFI_INDEX_EXTRADATA_INPUT_CROP:
diff --git a/drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c b/drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c
index e97ff8cf6d64..f5a655973c08 100644
--- a/drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c
+++ b/drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c
@@ -1215,24 +1215,24 @@ static int bufreq_dec(struct hfi_plat_buffers_params *params, u32 buftype,
out_min_count = output_buffer_count(VIDC_SESSION_TYPE_DEC, codec);
/* Max of driver and FW count */
- out_min_count = max(out_min_count, bufreq->count_min);
+ out_min_count = max(out_min_count, hfi_bufreq_get_count_min(bufreq, version));
bufreq->type = buftype;
bufreq->region_size = 0;
- bufreq->count_min = 1;
bufreq->count_actual = 1;
- bufreq->hold_count = 1;
+ hfi_bufreq_set_count_min(bufreq, version, 1);
+ hfi_bufreq_set_hold_count(bufreq, version, 1);
bufreq->contiguous = 1;
bufreq->alignment = 256;
if (buftype == HFI_BUFFER_INPUT) {
- bufreq->count_min = MIN_INPUT_BUFFERS;
+ hfi_bufreq_set_count_min(bufreq, version, MIN_INPUT_BUFFERS);
bufreq->size =
calculate_dec_input_frame_size(width, height, codec,
max_mbs_per_frame,
buffer_size_limit);
} else if (buftype == HFI_BUFFER_OUTPUT || buftype == HFI_BUFFER_OUTPUT2) {
- bufreq->count_min = out_min_count;
+ hfi_bufreq_set_count_min(bufreq, version, out_min_count);
bufreq->size =
venus_helper_get_framesz_raw(params->hfi_color_fmt,
out_width, out_height);
@@ -1269,7 +1269,7 @@ static int bufreq_enc(struct hfi_plat_buffers_params *params, u32 buftype,
u32 work_mode = params->enc.work_mode;
u32 rc_type = params->enc.rc_type;
u32 num_vpp_pipes = params->num_vpp_pipes;
- u32 num_ref;
+ u32 num_ref, count_min;
switch (codec) {
case V4L2_PIX_FMT_H264:
@@ -1289,21 +1289,21 @@ static int bufreq_enc(struct hfi_plat_buffers_params *params, u32 buftype,
bufreq->type = buftype;
bufreq->region_size = 0;
- bufreq->count_min = 1;
bufreq->count_actual = 1;
- bufreq->hold_count = 1;
+ hfi_bufreq_set_count_min(bufreq, version, 1);
+ hfi_bufreq_set_hold_count(bufreq, version, 1);
bufreq->contiguous = 1;
bufreq->alignment = 256;
if (buftype == HFI_BUFFER_INPUT) {
- bufreq->count_min = MIN_INPUT_BUFFERS;
+ hfi_bufreq_set_count_min(bufreq, version, MIN_INPUT_BUFFERS);
bufreq->size =
venus_helper_get_framesz_raw(params->hfi_color_fmt,
width, height);
} else if (buftype == HFI_BUFFER_OUTPUT ||
buftype == HFI_BUFFER_OUTPUT2) {
- bufreq->count_min =
- output_buffer_count(VIDC_SESSION_TYPE_ENC, codec);
+ count_min = output_buffer_count(VIDC_SESSION_TYPE_ENC, codec);
+ hfi_bufreq_set_count_min(bufreq, version, count_min);
bufreq->size = calculate_enc_output_frame_size(width, height,
rc_type);
} else if (buftype == HFI_BUFFER_INTERNAL_SCRATCH(version)) {
diff --git a/drivers/media/platform/qcom/venus/hfi_platform.c b/drivers/media/platform/qcom/venus/hfi_platform.c
index f07f554bc5fe..643e5aa138f5 100644
--- a/drivers/media/platform/qcom/venus/hfi_platform.c
+++ b/drivers/media/platform/qcom/venus/hfi_platform.c
@@ -2,7 +2,7 @@
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*/
-#include <linux/of_device.h>
+#include <linux/of.h>
#include "hfi_platform.h"
#include "core.h"
@@ -80,7 +80,7 @@ hfi_platform_get_codecs(struct venus_core *core, u32 *enc_codecs, u32 *dec_codec
if (plat->codecs)
plat->codecs(enc_codecs, dec_codecs, count);
- if (of_device_is_compatible(core->dev->of_node, "qcom,sc7280-venus")) {
+ if (IS_IRIS2_1(core)) {
*enc_codecs &= ~HFI_VIDEO_CODEC_VP8;
*dec_codecs &= ~HFI_VIDEO_CODEC_VP8;
}
diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c
index f0b46389e8d5..19fc6575a489 100644
--- a/drivers/media/platform/qcom/venus/hfi_venus.c
+++ b/drivers/media/platform/qcom/venus/hfi_venus.c
@@ -131,7 +131,6 @@ struct venus_hfi_device {
static bool venus_pkt_debug;
int venus_fw_debug = HFI_DEBUG_MSG_ERROR | HFI_DEBUG_MSG_FATAL;
-static bool venus_sys_idle_indicator;
static bool venus_fw_low_power_mode = true;
static int venus_hw_rsp_timeout = 1000;
static bool venus_fw_coverage;
@@ -448,23 +447,25 @@ static int venus_boot_core(struct venus_hfi_device *hdev)
{
struct device *dev = hdev->core->dev;
static const unsigned int max_tries = 100;
- u32 ctrl_status = 0, mask_val;
+ u32 ctrl_status = 0, mask_val = 0;
unsigned int count = 0;
void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
void __iomem *wrapper_base = hdev->core->wrapper_base;
int ret = 0;
- writel(BIT(VIDC_CTRL_INIT_CTRL_SHIFT), cpu_cs_base + VIDC_CTRL_INIT);
- if (IS_V6(hdev->core)) {
+ if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core)) {
mask_val = readl(wrapper_base + WRAPPER_INTR_MASK);
mask_val &= ~(WRAPPER_INTR_MASK_A2HWD_BASK_V6 |
WRAPPER_INTR_MASK_A2HCPU_MASK);
} else {
mask_val = WRAPPER_INTR_MASK_A2HVCODEC_MASK;
}
+
writel(mask_val, wrapper_base + WRAPPER_INTR_MASK);
- writel(1, cpu_cs_base + CPU_CS_SCIACMDARG3);
+ if (IS_V1(hdev->core))
+ writel(1, cpu_cs_base + CPU_CS_SCIACMDARG3);
+ writel(BIT(VIDC_CTRL_INIT_CTRL_SHIFT), cpu_cs_base + VIDC_CTRL_INIT);
while (!ctrl_status && count < max_tries) {
ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
if ((ctrl_status & CPU_CS_SCIACMDARG0_ERROR_STATUS_MASK) == 4) {
@@ -480,7 +481,7 @@ static int venus_boot_core(struct venus_hfi_device *hdev)
if (count >= max_tries)
ret = -ETIMEDOUT;
- if (IS_V6(hdev->core)) {
+ if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core)) {
writel(0x1, cpu_cs_base + CPU_CS_H2XSOFTINTEN_V6);
writel(0x0, cpu_cs_base + CPU_CS_X2RPMH_V6);
}
@@ -548,10 +549,10 @@ static int venus_halt_axi(struct venus_hfi_device *hdev)
u32 mask_val;
int ret;
- if (IS_V6(hdev->core)) {
+ if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core)) {
writel(0x3, cpu_cs_base + CPU_CS_X2RPMH_V6);
- if (hdev->core->res->num_vpp_pipes == 1)
+ if (IS_IRIS2_1(hdev->core))
goto skip_aon_mvp_noc;
writel(0x1, aon_base + AON_WRAPPER_MVP_NOC_LPI_CONTROL);
@@ -927,17 +928,12 @@ static int venus_sys_set_default_properties(struct venus_hfi_device *hdev)
if (ret)
dev_warn(dev, "setting fw debug msg ON failed (%d)\n", ret);
- /*
- * Idle indicator is disabled by default on some 4xx firmware versions,
- * enable it explicitly in order to make suspend functional by checking
- * WFI (wait-for-interrupt) bit.
- */
- if (IS_V4(hdev->core) || IS_V6(hdev->core))
- venus_sys_idle_indicator = true;
-
- ret = venus_sys_set_idle_message(hdev, venus_sys_idle_indicator);
- if (ret)
- dev_warn(dev, "setting idle response ON failed (%d)\n", ret);
+ /* HFI_PROPERTY_SYS_IDLE_INDICATOR is not supported beyond 8916 (HFI V1) */
+ if (IS_V1(hdev->core)) {
+ ret = venus_sys_set_idle_message(hdev, false);
+ if (ret)
+ dev_warn(dev, "setting idle response ON failed (%d)\n", ret);
+ }
ret = venus_sys_set_power_control(hdev, venus_fw_low_power_mode);
if (ret)
@@ -1114,7 +1110,7 @@ static irqreturn_t venus_isr(struct venus_core *core)
wrapper_base = hdev->core->wrapper_base;
status = readl(wrapper_base + WRAPPER_INTR_STATUS);
- if (IS_V6(core)) {
+ if (IS_IRIS2(core) || IS_IRIS2_1(core)) {
if (status & WRAPPER_INTR_STATUS_A2H_MASK ||
status & WRAPPER_INTR_STATUS_A2HWD_MASK_V6 ||
status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK)
@@ -1126,7 +1122,7 @@ static irqreturn_t venus_isr(struct venus_core *core)
hdev->irq_status = status;
}
writel(1, cpu_cs_base + CPU_CS_A2HSOFTINTCLR);
- if (!IS_V6(core))
+ if (!(IS_IRIS2(core) || IS_IRIS2_1(core)))
writel(status, wrapper_base + WRAPPER_INTR_CLEAR);
return IRQ_WAKE_THREAD;
@@ -1521,7 +1517,7 @@ static bool venus_cpu_and_video_core_idle(struct venus_hfi_device *hdev)
void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
u32 ctrl_status, cpu_status;
- if (IS_V6(hdev->core))
+ if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core))
cpu_status = readl(wrapper_tz_base + WRAPPER_TZ_CPU_STATUS_V6);
else
cpu_status = readl(wrapper_base + WRAPPER_CPU_STATUS);
@@ -1541,7 +1537,7 @@ static bool venus_cpu_idle_and_pc_ready(struct venus_hfi_device *hdev)
void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
u32 ctrl_status, cpu_status;
- if (IS_V6(hdev->core))
+ if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core))
cpu_status = readl(wrapper_tz_base + WRAPPER_TZ_CPU_STATUS_V6);
else
cpu_status = readl(wrapper_base + WRAPPER_CPU_STATUS);
diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
index f5676440dd36..dbf305cec120 100644
--- a/drivers/media/platform/qcom/venus/vdec.c
+++ b/drivers/media/platform/qcom/venus/vdec.c
@@ -727,7 +727,7 @@ static int vdec_set_work_route(struct venus_inst *inst)
u32 ptype = HFI_PROPERTY_PARAM_WORK_ROUTE;
struct hfi_video_work_route wr;
- if (!IS_V6(inst->core))
+ if (!(IS_IRIS2(inst->core) || IS_IRIS2_1(inst->core)))
return 0;
wr.video_work_route = inst->core->res->num_vpp_pipes;
@@ -899,13 +899,13 @@ static int vdec_num_buffers(struct venus_inst *inst, unsigned int *in_num,
if (ret)
return ret;
- *in_num = HFI_BUFREQ_COUNT_MIN(&bufreq, ver);
+ *in_num = hfi_bufreq_get_count_min(&bufreq, ver);
ret = venus_helper_get_bufreq(inst, HFI_BUFFER_OUTPUT, &bufreq);
if (ret)
return ret;
- *out_num = HFI_BUFREQ_COUNT_MIN(&bufreq, ver);
+ *out_num = hfi_bufreq_get_count_min(&bufreq, ver);
return 0;
}
@@ -1019,14 +1019,14 @@ static int vdec_verify_conf(struct venus_inst *inst)
return ret;
if (inst->num_output_bufs < bufreq.count_actual ||
- inst->num_output_bufs < HFI_BUFREQ_COUNT_MIN(&bufreq, ver))
+ inst->num_output_bufs < hfi_bufreq_get_count_min(&bufreq, ver))
return -EINVAL;
ret = venus_helper_get_bufreq(inst, HFI_BUFFER_INPUT, &bufreq);
if (ret)
return ret;
- if (inst->num_input_bufs < HFI_BUFREQ_COUNT_MIN(&bufreq, ver))
+ if (inst->num_input_bufs < hfi_bufreq_get_count_min(&bufreq, ver))
return -EINVAL;
return 0;
diff --git a/drivers/media/platform/qcom/venus/vdec_ctrls.c b/drivers/media/platform/qcom/venus/vdec_ctrls.c
index fbe12a608b21..7e0f29bf7fae 100644
--- a/drivers/media/platform/qcom/venus/vdec_ctrls.c
+++ b/drivers/media/platform/qcom/venus/vdec_ctrls.c
@@ -79,7 +79,7 @@ static int vdec_op_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
ret = venus_helper_get_bufreq(inst, HFI_BUFFER_OUTPUT, &bufreq);
if (!ret)
- ctrl->val = HFI_BUFREQ_COUNT_MIN(&bufreq, ver);
+ ctrl->val = hfi_bufreq_get_count_min(&bufreq, ver);
break;
default:
return -EINVAL;
diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c
index 6d773b000e8a..44b13696cf82 100644
--- a/drivers/media/platform/qcom/venus/venc.c
+++ b/drivers/media/platform/qcom/venus/venc.c
@@ -1207,7 +1207,7 @@ static int venc_verify_conf(struct venus_inst *inst)
return ret;
if (inst->num_output_bufs < bufreq.count_actual ||
- inst->num_output_bufs < HFI_BUFREQ_COUNT_MIN(&bufreq, ver))
+ inst->num_output_bufs < hfi_bufreq_get_count_min(&bufreq, ver))
return -EINVAL;
ret = venus_helper_get_bufreq(inst, HFI_BUFFER_INPUT, &bufreq);
@@ -1215,7 +1215,7 @@ static int venc_verify_conf(struct venus_inst *inst)
return ret;
if (inst->num_input_bufs < bufreq.count_actual ||
- inst->num_input_bufs < HFI_BUFREQ_COUNT_MIN(&bufreq, ver))
+ inst->num_input_bufs < hfi_bufreq_get_count_min(&bufreq, ver))
return -EINVAL;
return 0;
diff --git a/drivers/media/platform/qcom/venus/venc_ctrls.c b/drivers/media/platform/qcom/venus/venc_ctrls.c
index 7468e43800a9..d9d2a293f3ef 100644
--- a/drivers/media/platform/qcom/venus/venc_ctrls.c
+++ b/drivers/media/platform/qcom/venus/venc_ctrls.c
@@ -358,7 +358,7 @@ static int venc_op_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
ret = venus_helper_get_bufreq(inst, HFI_BUFFER_INPUT, &bufreq);
if (!ret)
- ctrl->val = HFI_BUFREQ_COUNT_MIN(&bufreq, ver);
+ ctrl->val = hfi_bufreq_get_count_min(&bufreq, ver);
break;
default:
return -EINVAL;
diff --git a/drivers/media/platform/renesas/rcar-isp.c b/drivers/media/platform/renesas/rcar-isp.c
index fee1a066f56b..7360cf3863f2 100644
--- a/drivers/media/platform/renesas/rcar-isp.c
+++ b/drivers/media/platform/renesas/rcar-isp.c
@@ -12,7 +12,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
@@ -326,7 +326,7 @@ static const struct v4l2_subdev_ops rcar_isp_subdev_ops = {
static int risp_notify_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
struct rcar_isp *isp = notifier_to_isp(notifier);
int pad;
@@ -350,7 +350,7 @@ static int risp_notify_bound(struct v4l2_async_notifier *notifier,
static void risp_notify_unbind(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
struct rcar_isp *isp = notifier_to_isp(notifier);
@@ -366,7 +366,7 @@ static const struct v4l2_async_notifier_operations risp_notify_ops = {
static int risp_parse_dt(struct rcar_isp *isp)
{
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asd;
struct fwnode_handle *fwnode;
struct fwnode_handle *ep;
unsigned int id;
@@ -392,16 +392,16 @@ static int risp_parse_dt(struct rcar_isp *isp)
dev_dbg(isp->dev, "Found '%pOF'\n", to_of_node(fwnode));
- v4l2_async_nf_init(&isp->notifier);
+ v4l2_async_subdev_nf_init(&isp->notifier, &isp->subdev);
isp->notifier.ops = &risp_notify_ops;
asd = v4l2_async_nf_add_fwnode(&isp->notifier, fwnode,
- struct v4l2_async_subdev);
+ struct v4l2_async_connection);
fwnode_handle_put(fwnode);
if (IS_ERR(asd))
return PTR_ERR(asd);
- ret = v4l2_async_subdev_nf_register(&isp->subdev, &isp->notifier);
+ ret = v4l2_async_nf_register(&isp->notifier);
if (ret)
v4l2_async_nf_cleanup(&isp->notifier);
diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-core.c b/drivers/media/platform/renesas/rcar-vin/rcar-core.c
index 3c4f5eb93be1..809c3a38cc4a 100644
--- a/drivers/media/platform/renesas/rcar-vin/rcar-core.c
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-core.c
@@ -12,7 +12,6 @@
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -252,7 +251,7 @@ static int rvin_group_notify_complete(struct v4l2_async_notifier *notifier)
static void rvin_group_notify_unbind(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asc)
{
struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
unsigned int i;
@@ -264,7 +263,7 @@ static void rvin_group_notify_unbind(struct v4l2_async_notifier *notifier,
mutex_lock(&vin->group->lock);
for (i = 0; i < RVIN_CSI_MAX; i++) {
- if (vin->group->remotes[i].asd != asd)
+ if (vin->group->remotes[i].asc != asc)
continue;
vin->group->remotes[i].subdev = NULL;
vin_dbg(vin, "Unbind %s from slot %u\n", subdev->name, i);
@@ -278,7 +277,7 @@ static void rvin_group_notify_unbind(struct v4l2_async_notifier *notifier,
static int rvin_group_notify_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asc)
{
struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
unsigned int i;
@@ -286,7 +285,7 @@ static int rvin_group_notify_bound(struct v4l2_async_notifier *notifier,
mutex_lock(&vin->group->lock);
for (i = 0; i < RVIN_CSI_MAX; i++) {
- if (vin->group->remotes[i].asd != asd)
+ if (vin->group->remotes[i].asc != asc)
continue;
vin->group->remotes[i].subdev = subdev;
vin_dbg(vin, "Bound %s to slot %u\n", subdev->name, i);
@@ -311,7 +310,7 @@ static int rvin_group_parse_of(struct rvin_dev *vin, unsigned int port,
struct v4l2_fwnode_endpoint vep = {
.bus_type = V4L2_MBUS_CSI2_DPHY,
};
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asc;
int ret;
ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(vin->dev), port, id, 0);
@@ -327,14 +326,14 @@ static int rvin_group_parse_of(struct rvin_dev *vin, unsigned int port,
goto out;
}
- asd = v4l2_async_nf_add_fwnode(&vin->group->notifier, fwnode,
- struct v4l2_async_subdev);
- if (IS_ERR(asd)) {
- ret = PTR_ERR(asd);
+ asc = v4l2_async_nf_add_fwnode(&vin->group->notifier, fwnode,
+ struct v4l2_async_connection);
+ if (IS_ERR(asc)) {
+ ret = PTR_ERR(asc);
goto out;
}
- vin->group->remotes[vep.base.id].asd = asd;
+ vin->group->remotes[vep.base.id].asc = asc;
vin_dbg(vin, "Add group OF device %pOF to slot %u\n",
to_of_node(fwnode), vep.base.id);
@@ -376,7 +375,7 @@ static int rvin_group_notifier_init(struct rvin_dev *vin, unsigned int port,
mutex_unlock(&vin->group->lock);
- v4l2_async_nf_init(&vin->group->notifier);
+ v4l2_async_nf_init(&vin->group->notifier, &vin->v4l2_dev);
/*
* Some subdevices may overlap but the parser function can handle it and
@@ -387,7 +386,7 @@ static int rvin_group_notifier_init(struct rvin_dev *vin, unsigned int port,
continue;
for (id = 0; id < max_id; id++) {
- if (vin->group->remotes[id].asd)
+ if (vin->group->remotes[id].asc)
continue;
ret = rvin_group_parse_of(vin->group->vin[i], port, id);
@@ -396,11 +395,11 @@ static int rvin_group_notifier_init(struct rvin_dev *vin, unsigned int port,
}
}
- if (list_empty(&vin->group->notifier.asd_list))
+ if (list_empty(&vin->group->notifier.waiting_list))
return 0;
vin->group->notifier.ops = &rvin_group_notify_ops;
- ret = v4l2_async_nf_register(&vin->v4l2_dev, &vin->group->notifier);
+ ret = v4l2_async_nf_register(&vin->group->notifier);
if (ret < 0) {
vin_err(vin, "Notifier registration failed\n");
v4l2_async_nf_cleanup(&vin->group->notifier);
@@ -611,7 +610,7 @@ static int rvin_parallel_notify_complete(struct v4l2_async_notifier *notifier)
static void rvin_parallel_notify_unbind(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asc)
{
struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
@@ -624,7 +623,7 @@ static void rvin_parallel_notify_unbind(struct v4l2_async_notifier *notifier,
static int rvin_parallel_notify_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asc)
{
struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
int ret;
@@ -656,7 +655,7 @@ static int rvin_parallel_parse_of(struct rvin_dev *vin)
struct v4l2_fwnode_endpoint vep = {
.bus_type = V4L2_MBUS_UNKNOWN,
};
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asc;
int ret;
ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(vin->dev), 0, 0, 0);
@@ -687,14 +686,14 @@ static int rvin_parallel_parse_of(struct rvin_dev *vin)
goto out;
}
- asd = v4l2_async_nf_add_fwnode(&vin->notifier, fwnode,
- struct v4l2_async_subdev);
- if (IS_ERR(asd)) {
- ret = PTR_ERR(asd);
+ asc = v4l2_async_nf_add_fwnode(&vin->notifier, fwnode,
+ struct v4l2_async_connection);
+ if (IS_ERR(asc)) {
+ ret = PTR_ERR(asc);
goto out;
}
- vin->parallel.asd = asd;
+ vin->parallel.asc = asc;
vin_dbg(vin, "Add parallel OF device %pOF\n", to_of_node(fwnode));
out:
@@ -713,20 +712,20 @@ static int rvin_parallel_init(struct rvin_dev *vin)
{
int ret;
- v4l2_async_nf_init(&vin->notifier);
+ v4l2_async_nf_init(&vin->notifier, &vin->v4l2_dev);
ret = rvin_parallel_parse_of(vin);
if (ret)
return ret;
- if (!vin->parallel.asd)
+ if (!vin->parallel.asc)
return -ENODEV;
vin_dbg(vin, "Found parallel subdevice %pOF\n",
- to_of_node(vin->parallel.asd->match.fwnode));
+ to_of_node(vin->parallel.asc->match.fwnode));
vin->notifier.ops = &rvin_parallel_notify_ops;
- ret = v4l2_async_nf_register(&vin->v4l2_dev, &vin->notifier);
+ ret = v4l2_async_nf_register(&vin->notifier);
if (ret < 0) {
vin_err(vin, "Notifier registration failed\n");
v4l2_async_nf_cleanup(&vin->notifier);
diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-csi2.c b/drivers/media/platform/renesas/rcar-vin/rcar-csi2.c
index 7a134c0eff57..f6326df0b09b 100644
--- a/drivers/media/platform/renesas/rcar-vin/rcar-csi2.c
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-csi2.c
@@ -10,7 +10,6 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -133,6 +132,111 @@ struct rcar_csi2;
#define PHYFRX_FORCERX_MODE_1 BIT(1)
#define PHYFRX_FORCERX_MODE_0 BIT(0)
+/* V4H BASE registers */
+#define V4H_N_LANES_REG 0x0004
+#define V4H_CSI2_RESETN_REG 0x0008
+#define V4H_PHY_MODE_REG 0x001c
+#define V4H_PHY_SHUTDOWNZ_REG 0x0040
+#define V4H_DPHY_RSTZ_REG 0x0044
+#define V4H_FLDC_REG 0x0804
+#define V4H_FLDD_REG 0x0808
+#define V4H_IDIC_REG 0x0810
+#define V4H_PHY_EN_REG 0x2000
+
+#define V4H_ST_PHYST_REG 0x2814
+#define V4H_ST_PHYST_ST_PHY_READY BIT(31)
+#define V4H_ST_PHYST_ST_STOPSTATE_3 BIT(3)
+#define V4H_ST_PHYST_ST_STOPSTATE_2 BIT(2)
+#define V4H_ST_PHYST_ST_STOPSTATE_1 BIT(1)
+#define V4H_ST_PHYST_ST_STOPSTATE_0 BIT(0)
+
+/* V4H PPI registers */
+#define V4H_PPI_STARTUP_RW_COMMON_DPHY_REG(n) (0x21800 + ((n) * 2)) /* n = 0 - 9 */
+#define V4H_PPI_STARTUP_RW_COMMON_STARTUP_1_1_REG 0x21822
+#define V4H_PPI_CALIBCTRL_RW_COMMON_BG_0_REG 0x2184c
+#define V4H_PPI_RW_LPDCOCAL_TIMEBASE_REG 0x21c02
+#define V4H_PPI_RW_LPDCOCAL_NREF_REG 0x21c04
+#define V4H_PPI_RW_LPDCOCAL_NREF_RANGE_REG 0x21c06
+#define V4H_PPI_RW_LPDCOCAL_TWAIT_CONFIG_REG 0x21c0a
+#define V4H_PPI_RW_LPDCOCAL_VT_CONFIG_REG 0x21c0c
+#define V4H_PPI_RW_LPDCOCAL_COARSE_CFG_REG 0x21c10
+#define V4H_PPI_RW_COMMON_CFG_REG 0x21c6c
+#define V4H_PPI_RW_TERMCAL_CFG_0_REG 0x21c80
+#define V4H_PPI_RW_OFFSETCAL_CFG_0_REG 0x21ca0
+
+/* V4H CORE registers */
+#define V4H_CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_REG(n) (0x22040 + ((n) * 2)) /* n = 0 - 15 */
+#define V4H_CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_REG(n) (0x22440 + ((n) * 2)) /* n = 0 - 15 */
+#define V4H_CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_REG(n) (0x22840 + ((n) * 2)) /* n = 0 - 15 */
+#define V4H_CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_REG(n) (0x22c40 + ((n) * 2)) /* n = 0 - 15 */
+#define V4H_CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_REG(n) (0x23040 + ((n) * 2)) /* n = 0 - 15 */
+#define V4H_CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_REG(n) (0x23840 + ((n) * 2)) /* n = 0 - 11 */
+#define V4H_CORE_DIG_RW_COMMON_REG(n) (0x23880 + ((n) * 2)) /* n = 0 - 15 */
+#define V4H_CORE_DIG_ANACTRL_RW_COMMON_ANACTRL_REG(n) (0x239e0 + ((n) * 2)) /* n = 0 - 3 */
+#define V4H_CORE_DIG_CLANE_1_RW_CFG_0_REG 0x2a400
+#define V4H_CORE_DIG_CLANE_1_RW_HS_TX_6_REG 0x2a60c
+
+/* V4H C-PHY */
+#define V4H_CORE_DIG_RW_TRIO0_REG(n) (0x22100 + ((n) * 2)) /* n = 0 - 3 */
+#define V4H_CORE_DIG_RW_TRIO1_REG(n) (0x22500 + ((n) * 2)) /* n = 0 - 3 */
+#define V4H_CORE_DIG_RW_TRIO2_REG(n) (0x22900 + ((n) * 2)) /* n = 0 - 3 */
+#define V4H_CORE_DIG_CLANE_0_RW_LP_0_REG 0x2a080
+#define V4H_CORE_DIG_CLANE_0_RW_HS_RX_REG(n) (0x2a100 + ((n) * 2)) /* n = 0 - 6 */
+#define V4H_CORE_DIG_CLANE_1_RW_LP_0_REG 0x2a480
+#define V4H_CORE_DIG_CLANE_1_RW_HS_RX_REG(n) (0x2a500 + ((n) * 2)) /* n = 0 - 6 */
+#define V4H_CORE_DIG_CLANE_2_RW_LP_0_REG 0x2a880
+#define V4H_CORE_DIG_CLANE_2_RW_HS_RX_REG(n) (0x2a900 + ((n) * 2)) /* n = 0 - 6 */
+
+struct rcsi2_cphy_setting {
+ u16 msps;
+ u16 rx2;
+ u16 trio0;
+ u16 trio1;
+ u16 trio2;
+ u16 lane27;
+ u16 lane29;
+};
+
+static const struct rcsi2_cphy_setting cphy_setting_table_r8a779g0[] = {
+ { .msps = 80, .rx2 = 0x38, .trio0 = 0x024a, .trio1 = 0x0134, .trio2 = 0x6a, .lane27 = 0x0000, .lane29 = 0x0a24 },
+ { .msps = 100, .rx2 = 0x38, .trio0 = 0x024a, .trio1 = 0x00f5, .trio2 = 0x55, .lane27 = 0x0000, .lane29 = 0x0a24 },
+ { .msps = 200, .rx2 = 0x38, .trio0 = 0x024a, .trio1 = 0x0077, .trio2 = 0x2b, .lane27 = 0x0000, .lane29 = 0x0a44 },
+ { .msps = 300, .rx2 = 0x38, .trio0 = 0x024a, .trio1 = 0x004d, .trio2 = 0x1d, .lane27 = 0x0000, .lane29 = 0x0a44 },
+ { .msps = 400, .rx2 = 0x38, .trio0 = 0x024a, .trio1 = 0x0038, .trio2 = 0x16, .lane27 = 0x0000, .lane29 = 0x0a64 },
+ { .msps = 500, .rx2 = 0x38, .trio0 = 0x024a, .trio1 = 0x002b, .trio2 = 0x12, .lane27 = 0x0000, .lane29 = 0x0a64 },
+ { .msps = 600, .rx2 = 0x38, .trio0 = 0x024a, .trio1 = 0x0023, .trio2 = 0x0f, .lane27 = 0x0000, .lane29 = 0x0a64 },
+ { .msps = 700, .rx2 = 0x38, .trio0 = 0x024a, .trio1 = 0x001d, .trio2 = 0x0d, .lane27 = 0x0000, .lane29 = 0x0a84 },
+ { .msps = 800, .rx2 = 0x38, .trio0 = 0x024a, .trio1 = 0x0018, .trio2 = 0x0c, .lane27 = 0x0000, .lane29 = 0x0a84 },
+ { .msps = 900, .rx2 = 0x38, .trio0 = 0x024a, .trio1 = 0x0015, .trio2 = 0x0b, .lane27 = 0x0000, .lane29 = 0x0a84 },
+ { .msps = 1000, .rx2 = 0x3e, .trio0 = 0x024a, .trio1 = 0x0012, .trio2 = 0x0a, .lane27 = 0x0400, .lane29 = 0x0a84 },
+ { .msps = 1100, .rx2 = 0x44, .trio0 = 0x024a, .trio1 = 0x000f, .trio2 = 0x09, .lane27 = 0x0800, .lane29 = 0x0a84 },
+ { .msps = 1200, .rx2 = 0x4a, .trio0 = 0x024a, .trio1 = 0x000e, .trio2 = 0x08, .lane27 = 0x0c00, .lane29 = 0x0a84 },
+ { .msps = 1300, .rx2 = 0x51, .trio0 = 0x024a, .trio1 = 0x000c, .trio2 = 0x08, .lane27 = 0x0c00, .lane29 = 0x0aa4 },
+ { .msps = 1400, .rx2 = 0x57, .trio0 = 0x024a, .trio1 = 0x000b, .trio2 = 0x07, .lane27 = 0x1000, .lane29 = 0x0aa4 },
+ { .msps = 1500, .rx2 = 0x5d, .trio0 = 0x044a, .trio1 = 0x0009, .trio2 = 0x07, .lane27 = 0x1000, .lane29 = 0x0aa4 },
+ { .msps = 1600, .rx2 = 0x63, .trio0 = 0x044a, .trio1 = 0x0008, .trio2 = 0x07, .lane27 = 0x1400, .lane29 = 0x0aa4 },
+ { .msps = 1700, .rx2 = 0x6a, .trio0 = 0x044a, .trio1 = 0x0007, .trio2 = 0x06, .lane27 = 0x1400, .lane29 = 0x0aa4 },
+ { .msps = 1800, .rx2 = 0x70, .trio0 = 0x044a, .trio1 = 0x0007, .trio2 = 0x06, .lane27 = 0x1400, .lane29 = 0x0aa4 },
+ { .msps = 1900, .rx2 = 0x76, .trio0 = 0x044a, .trio1 = 0x0006, .trio2 = 0x06, .lane27 = 0x1400, .lane29 = 0x0aa4 },
+ { .msps = 2000, .rx2 = 0x7c, .trio0 = 0x044a, .trio1 = 0x0005, .trio2 = 0x06, .lane27 = 0x1800, .lane29 = 0x0aa4 },
+ { .msps = 2100, .rx2 = 0x83, .trio0 = 0x044a, .trio1 = 0x0005, .trio2 = 0x05, .lane27 = 0x1800, .lane29 = 0x0aa4 },
+ { .msps = 2200, .rx2 = 0x89, .trio0 = 0x064a, .trio1 = 0x0004, .trio2 = 0x05, .lane27 = 0x1800, .lane29 = 0x0aa4 },
+ { .msps = 2300, .rx2 = 0x8f, .trio0 = 0x064a, .trio1 = 0x0003, .trio2 = 0x05, .lane27 = 0x1800, .lane29 = 0x0aa4 },
+ { .msps = 2400, .rx2 = 0x95, .trio0 = 0x064a, .trio1 = 0x0003, .trio2 = 0x05, .lane27 = 0x1800, .lane29 = 0x0aa4 },
+ { .msps = 2500, .rx2 = 0x9c, .trio0 = 0x064a, .trio1 = 0x0003, .trio2 = 0x05, .lane27 = 0x1c00, .lane29 = 0x0aa4 },
+ { .msps = 2600, .rx2 = 0xa2, .trio0 = 0x064a, .trio1 = 0x0002, .trio2 = 0x05, .lane27 = 0x1c00, .lane29 = 0x0ad4 },
+ { .msps = 2700, .rx2 = 0xa8, .trio0 = 0x064a, .trio1 = 0x0002, .trio2 = 0x05, .lane27 = 0x1c00, .lane29 = 0x0ad4 },
+ { .msps = 2800, .rx2 = 0xae, .trio0 = 0x064a, .trio1 = 0x0002, .trio2 = 0x04, .lane27 = 0x1c00, .lane29 = 0x0ad4 },
+ { .msps = 2900, .rx2 = 0xb5, .trio0 = 0x084a, .trio1 = 0x0001, .trio2 = 0x04, .lane27 = 0x1c00, .lane29 = 0x0ad4 },
+ { .msps = 3000, .rx2 = 0xbb, .trio0 = 0x084a, .trio1 = 0x0001, .trio2 = 0x04, .lane27 = 0x1c00, .lane29 = 0x0ad4 },
+ { .msps = 3100, .rx2 = 0xc1, .trio0 = 0x084a, .trio1 = 0x0001, .trio2 = 0x04, .lane27 = 0x1c00, .lane29 = 0x0ad4 },
+ { .msps = 3200, .rx2 = 0xc7, .trio0 = 0x084a, .trio1 = 0x0001, .trio2 = 0x04, .lane27 = 0x1c00, .lane29 = 0x0ad4 },
+ { .msps = 3300, .rx2 = 0xce, .trio0 = 0x084a, .trio1 = 0x0001, .trio2 = 0x04, .lane27 = 0x1c00, .lane29 = 0x0ad4 },
+ { .msps = 3400, .rx2 = 0xd4, .trio0 = 0x084a, .trio1 = 0x0001, .trio2 = 0x04, .lane27 = 0x1c00, .lane29 = 0x0ad4 },
+ { .msps = 3500, .rx2 = 0xda, .trio0 = 0x084a, .trio1 = 0x0001, .trio2 = 0x04, .lane27 = 0x1c00, .lane29 = 0x0ad4 },
+ { /* sentinel */ },
+};
+
struct phtw_value {
u16 data;
u16 code;
@@ -538,6 +642,11 @@ static void rcsi2_write(struct rcar_csi2 *priv, unsigned int reg, u32 data)
iowrite32(data, priv->base + reg);
}
+static void rcsi2_write16(struct rcar_csi2 *priv, unsigned int reg, u16 data)
+{
+ iowrite16(data, priv->base + reg);
+}
+
static void rcsi2_enter_standby_gen3(struct rcar_csi2 *priv)
{
rcsi2_write(priv, PHYCNT_REG, 0);
@@ -645,6 +754,10 @@ static int rcsi2_calc_mbps(struct rcar_csi2 *priv, unsigned int bpp,
mbps = v4l2_ctrl_g_ctrl_int64(ctrl) * bpp;
do_div(mbps, lanes * 1000000);
+ /* Adjust for C-PHY, divide by 2.8. */
+ if (priv->cphy)
+ mbps = div_u64(mbps * 5, 14);
+
return mbps;
}
@@ -834,6 +947,173 @@ static int rcsi2_start_receiver_gen3(struct rcar_csi2 *priv)
return 0;
}
+static int rcsi2_wait_phy_start_v4h(struct rcar_csi2 *priv, u32 match)
+{
+ unsigned int timeout;
+ u32 status;
+
+ for (timeout = 0; timeout <= 10; timeout++) {
+ status = rcsi2_read(priv, V4H_ST_PHYST_REG);
+ if ((status & match) == match)
+ return 0;
+
+ usleep_range(1000, 2000);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int rcsi2_c_phy_setting_v4h(struct rcar_csi2 *priv, int msps)
+{
+ const struct rcsi2_cphy_setting *conf;
+
+ for (conf = cphy_setting_table_r8a779g0; conf->msps != 0; conf++) {
+ if (conf->msps > msps)
+ break;
+ }
+
+ if (!conf->msps) {
+ dev_err(priv->dev, "Unsupported PHY speed for msps setting (%u Msps)", msps);
+ return -ERANGE;
+ }
+
+ /* C-PHY specific */
+ rcsi2_write16(priv, V4H_CORE_DIG_RW_COMMON_REG(7), 0x0155);
+ rcsi2_write16(priv, V4H_PPI_STARTUP_RW_COMMON_DPHY_REG(7), 0x0068);
+ rcsi2_write16(priv, V4H_PPI_STARTUP_RW_COMMON_DPHY_REG(8), 0x0010);
+
+ rcsi2_write16(priv, V4H_CORE_DIG_CLANE_0_RW_LP_0_REG, 0x463c);
+ rcsi2_write16(priv, V4H_CORE_DIG_CLANE_1_RW_LP_0_REG, 0x463c);
+ rcsi2_write16(priv, V4H_CORE_DIG_CLANE_2_RW_LP_0_REG, 0x463c);
+
+ rcsi2_write16(priv, V4H_CORE_DIG_CLANE_0_RW_HS_RX_REG(0), 0x00d5);
+ rcsi2_write16(priv, V4H_CORE_DIG_CLANE_1_RW_HS_RX_REG(0), 0x00d5);
+ rcsi2_write16(priv, V4H_CORE_DIG_CLANE_2_RW_HS_RX_REG(0), 0x00d5);
+
+ rcsi2_write16(priv, V4H_CORE_DIG_CLANE_0_RW_HS_RX_REG(1), 0x0013);
+ rcsi2_write16(priv, V4H_CORE_DIG_CLANE_1_RW_HS_RX_REG(1), 0x0013);
+ rcsi2_write16(priv, V4H_CORE_DIG_CLANE_2_RW_HS_RX_REG(1), 0x0013);
+
+ rcsi2_write16(priv, V4H_CORE_DIG_CLANE_0_RW_HS_RX_REG(5), 0x0013);
+ rcsi2_write16(priv, V4H_CORE_DIG_CLANE_1_RW_HS_RX_REG(5), 0x0013);
+ rcsi2_write16(priv, V4H_CORE_DIG_CLANE_2_RW_HS_RX_REG(5), 0x0013);
+
+ rcsi2_write16(priv, V4H_CORE_DIG_CLANE_0_RW_HS_RX_REG(6), 0x000a);
+ rcsi2_write16(priv, V4H_CORE_DIG_CLANE_1_RW_HS_RX_REG(6), 0x000a);
+ rcsi2_write16(priv, V4H_CORE_DIG_CLANE_2_RW_HS_RX_REG(6), 0x000a);
+
+ rcsi2_write16(priv, V4H_CORE_DIG_CLANE_0_RW_HS_RX_REG(2), conf->rx2);
+ rcsi2_write16(priv, V4H_CORE_DIG_CLANE_1_RW_HS_RX_REG(2), conf->rx2);
+ rcsi2_write16(priv, V4H_CORE_DIG_CLANE_2_RW_HS_RX_REG(2), conf->rx2);
+
+ rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_REG(2), 0x0001);
+ rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_REG(2), 0);
+ rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_REG(2), 0x0001);
+ rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_REG(2), 0x0001);
+ rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_REG(2), 0);
+
+ rcsi2_write16(priv, V4H_CORE_DIG_RW_TRIO0_REG(0), conf->trio0);
+ rcsi2_write16(priv, V4H_CORE_DIG_RW_TRIO1_REG(0), conf->trio0);
+ rcsi2_write16(priv, V4H_CORE_DIG_RW_TRIO2_REG(0), conf->trio0);
+
+ rcsi2_write16(priv, V4H_CORE_DIG_RW_TRIO0_REG(2), conf->trio2);
+ rcsi2_write16(priv, V4H_CORE_DIG_RW_TRIO1_REG(2), conf->trio2);
+ rcsi2_write16(priv, V4H_CORE_DIG_RW_TRIO2_REG(2), conf->trio2);
+
+ rcsi2_write16(priv, V4H_CORE_DIG_RW_TRIO0_REG(1), conf->trio1);
+ rcsi2_write16(priv, V4H_CORE_DIG_RW_TRIO1_REG(1), conf->trio1);
+ rcsi2_write16(priv, V4H_CORE_DIG_RW_TRIO2_REG(1), conf->trio1);
+
+ /*
+ * Configure pin-swap.
+ * TODO: This registers is not documented yet, the values should depend
+ * on the 'clock-lanes' and 'data-lanes' devicetree properties.
+ */
+ rcsi2_write16(priv, V4H_CORE_DIG_CLANE_1_RW_CFG_0_REG, 0xf5);
+ rcsi2_write16(priv, V4H_CORE_DIG_CLANE_1_RW_HS_TX_6_REG, 0x5000);
+
+ /* Leave Shutdown mode */
+ rcsi2_write(priv, V4H_DPHY_RSTZ_REG, BIT(0));
+ rcsi2_write(priv, V4H_PHY_SHUTDOWNZ_REG, BIT(0));
+
+ /* Wait for calibration */
+ if (rcsi2_wait_phy_start_v4h(priv, V4H_ST_PHYST_ST_PHY_READY)) {
+ dev_err(priv->dev, "PHY calibration failed\n");
+ return -ETIMEDOUT;
+ }
+
+ /* C-PHY setting - analog programing*/
+ rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_REG(9), conf->lane29);
+ rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_REG(7), conf->lane27);
+
+ return 0;
+}
+
+static int rcsi2_start_receiver_v4h(struct rcar_csi2 *priv)
+{
+ const struct rcar_csi2_format *format;
+ unsigned int lanes;
+ int msps;
+ int ret;
+
+ /* Calculate parameters */
+ format = rcsi2_code_to_fmt(priv->mf.code);
+ if (!format)
+ return -EINVAL;
+
+ ret = rcsi2_get_active_lanes(priv, &lanes);
+ if (ret)
+ return ret;
+
+ msps = rcsi2_calc_mbps(priv, format->bpp, lanes);
+ if (msps < 0)
+ return msps;
+
+ /* Reset LINK and PHY*/
+ rcsi2_write(priv, V4H_CSI2_RESETN_REG, 0);
+ rcsi2_write(priv, V4H_DPHY_RSTZ_REG, 0);
+ rcsi2_write(priv, V4H_PHY_SHUTDOWNZ_REG, 0);
+
+ /* PHY static setting */
+ rcsi2_write(priv, V4H_PHY_EN_REG, BIT(0));
+ rcsi2_write(priv, V4H_FLDC_REG, 0);
+ rcsi2_write(priv, V4H_FLDD_REG, 0);
+ rcsi2_write(priv, V4H_IDIC_REG, 0);
+ rcsi2_write(priv, V4H_PHY_MODE_REG, BIT(0));
+ rcsi2_write(priv, V4H_N_LANES_REG, lanes - 1);
+
+ /* Reset CSI2 */
+ rcsi2_write(priv, V4H_CSI2_RESETN_REG, BIT(0));
+
+ /* Registers static setting through APB */
+ /* Common setting */
+ rcsi2_write16(priv, V4H_CORE_DIG_ANACTRL_RW_COMMON_ANACTRL_REG(0), 0x1bfd);
+ rcsi2_write16(priv, V4H_PPI_STARTUP_RW_COMMON_STARTUP_1_1_REG, 0x0233);
+ rcsi2_write16(priv, V4H_PPI_STARTUP_RW_COMMON_DPHY_REG(6), 0x0027);
+ rcsi2_write16(priv, V4H_PPI_CALIBCTRL_RW_COMMON_BG_0_REG, 0x01f4);
+ rcsi2_write16(priv, V4H_PPI_RW_TERMCAL_CFG_0_REG, 0x0013);
+ rcsi2_write16(priv, V4H_PPI_RW_OFFSETCAL_CFG_0_REG, 0x0003);
+ rcsi2_write16(priv, V4H_PPI_RW_LPDCOCAL_TIMEBASE_REG, 0x004f);
+ rcsi2_write16(priv, V4H_PPI_RW_LPDCOCAL_NREF_REG, 0x0320);
+ rcsi2_write16(priv, V4H_PPI_RW_LPDCOCAL_NREF_RANGE_REG, 0x000f);
+ rcsi2_write16(priv, V4H_PPI_RW_LPDCOCAL_TWAIT_CONFIG_REG, 0xfe18);
+ rcsi2_write16(priv, V4H_PPI_RW_LPDCOCAL_VT_CONFIG_REG, 0x0c3c);
+ rcsi2_write16(priv, V4H_PPI_RW_LPDCOCAL_COARSE_CFG_REG, 0x0105);
+ rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_REG(6), 0x1000);
+ rcsi2_write16(priv, V4H_PPI_RW_COMMON_CFG_REG, 0x0003);
+
+ /* C-PHY settings */
+ ret = rcsi2_c_phy_setting_v4h(priv, msps);
+ if (ret)
+ return ret;
+
+ rcsi2_wait_phy_start_v4h(priv, V4H_ST_PHYST_ST_STOPSTATE_0 |
+ V4H_ST_PHYST_ST_STOPSTATE_1 |
+ V4H_ST_PHYST_ST_STOPSTATE_2);
+
+ return 0;
+}
+
static int rcsi2_start(struct rcar_csi2 *priv)
{
int ret;
@@ -989,12 +1269,12 @@ static irqreturn_t rcsi2_irq_thread(int irq, void *data)
static int rcsi2_notify_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asc)
{
struct rcar_csi2 *priv = notifier_to_csi2(notifier);
int pad;
- pad = media_entity_get_fwnode_pad(&subdev->entity, asd->match.fwnode,
+ pad = media_entity_get_fwnode_pad(&subdev->entity, asc->match.fwnode,
MEDIA_PAD_FL_SOURCE);
if (pad < 0) {
dev_err(priv->dev, "Failed to find pad for %s\n", subdev->name);
@@ -1014,7 +1294,7 @@ static int rcsi2_notify_bound(struct v4l2_async_notifier *notifier,
static void rcsi2_notify_unbind(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asc)
{
struct rcar_csi2 *priv = notifier_to_csi2(notifier);
@@ -1091,7 +1371,7 @@ static int rcsi2_parse_v4l2(struct rcar_csi2 *priv,
static int rcsi2_parse_dt(struct rcar_csi2 *priv)
{
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asc;
struct fwnode_handle *fwnode;
struct fwnode_handle *ep;
struct v4l2_fwnode_endpoint v4l2_ep = {
@@ -1123,16 +1403,16 @@ static int rcsi2_parse_dt(struct rcar_csi2 *priv)
dev_dbg(priv->dev, "Found '%pOF'\n", to_of_node(fwnode));
- v4l2_async_nf_init(&priv->notifier);
+ v4l2_async_subdev_nf_init(&priv->notifier, &priv->subdev);
priv->notifier.ops = &rcar_csi2_notify_ops;
- asd = v4l2_async_nf_add_fwnode(&priv->notifier, fwnode,
- struct v4l2_async_subdev);
+ asc = v4l2_async_nf_add_fwnode(&priv->notifier, fwnode,
+ struct v4l2_async_connection);
fwnode_handle_put(fwnode);
- if (IS_ERR(asd))
- return PTR_ERR(asd);
+ if (IS_ERR(asc))
+ return PTR_ERR(asc);
- ret = v4l2_async_subdev_nf_register(&priv->subdev, &priv->notifier);
+ ret = v4l2_async_nf_register(&priv->notifier);
if (ret)
v4l2_async_nf_cleanup(&priv->notifier);
@@ -1496,6 +1776,12 @@ static const struct rcar_csi2_info rcar_csi2_info_r8a779a0 = {
.support_dphy = true,
};
+static const struct rcar_csi2_info rcar_csi2_info_r8a779g0 = {
+ .start_receiver = rcsi2_start_receiver_v4h,
+ .use_isp = true,
+ .support_cphy = true,
+};
+
static const struct of_device_id rcar_csi2_of_table[] = {
{
.compatible = "renesas,r8a774a1-csi2",
@@ -1545,6 +1831,10 @@ static const struct of_device_id rcar_csi2_of_table[] = {
.compatible = "renesas,r8a779a0-csi2",
.data = &rcar_csi2_info_r8a779a0,
},
+ {
+ .compatible = "renesas,r8a779g0-csi2",
+ .data = &rcar_csi2_info_r8a779g0,
+ },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, rcar_csi2_of_table);
diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-vin.h b/drivers/media/platform/renesas/rcar-vin/rcar-vin.h
index cb206d3976dd..792336dada44 100644
--- a/drivers/media/platform/renesas/rcar-vin/rcar-vin.h
+++ b/drivers/media/platform/renesas/rcar-vin/rcar-vin.h
@@ -106,7 +106,7 @@ struct rvin_video_format {
/**
* struct rvin_parallel_entity - Parallel video input endpoint descriptor
- * @asd: sub-device descriptor for async framework
+ * @asc: async connection descriptor for async framework
* @subdev: subdevice matched using async framework
* @mbus_type: media bus type
* @bus: media bus parallel configuration
@@ -115,7 +115,7 @@ struct rvin_video_format {
*
*/
struct rvin_parallel_entity {
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asc;
struct v4l2_subdev *subdev;
enum v4l2_mbus_type mbus_type;
@@ -272,10 +272,10 @@ struct rvin_dev {
*
* @lock: protects the count, notifier, vin and csi members
* @count: number of enabled VIN instances found in DT
- * @notifier: group notifier for CSI-2 async subdevices
+ * @notifier: group notifier for CSI-2 async connections
* @vin: VIN instances which are part of the group
* @link_setup: Callback to create all links for the media graph
- * @remotes: array of pairs of fwnode and subdev pointers
+ * @remotes: array of pairs of async connection and subdev pointers
* to all remote subdevices.
*/
struct rvin_group {
@@ -291,7 +291,7 @@ struct rvin_group {
int (*link_setup)(struct rvin_dev *vin);
struct {
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asc;
struct v4l2_subdev *subdev;
} remotes[RVIN_REMOTES_MAX];
};
diff --git a/drivers/media/platform/renesas/rcar_drif.c b/drivers/media/platform/renesas/rcar_drif.c
index 3a92f4535c18..163a4ba61c17 100644
--- a/drivers/media/platform/renesas/rcar_drif.c
+++ b/drivers/media/platform/renesas/rcar_drif.c
@@ -44,8 +44,9 @@
#include <linux/ioctl.h>
#include <linux/iopoll.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_graph.h>
-#include <linux/of_device.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <media/v4l2-async.h>
@@ -1097,7 +1098,7 @@ static void rcar_drif_sdr_unregister(struct rcar_drif_sdr *sdr)
/* Sub-device bound callback */
static int rcar_drif_notify_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
struct rcar_drif_sdr *sdr =
container_of(notifier, struct rcar_drif_sdr, notifier);
@@ -1112,7 +1113,7 @@ static int rcar_drif_notify_bound(struct v4l2_async_notifier *notifier,
/* Sub-device unbind callback */
static void rcar_drif_notify_unbind(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
struct rcar_drif_sdr *sdr =
container_of(notifier, struct rcar_drif_sdr, notifier);
@@ -1205,9 +1206,9 @@ static int rcar_drif_parse_subdevs(struct rcar_drif_sdr *sdr)
{
struct v4l2_async_notifier *notifier = &sdr->notifier;
struct fwnode_handle *fwnode, *ep;
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asd;
- v4l2_async_nf_init(notifier);
+ v4l2_async_nf_init(&sdr->notifier, &sdr->v4l2_dev);
ep = fwnode_graph_get_next_endpoint(of_fwnode_handle(sdr->dev->of_node),
NULL);
@@ -1225,7 +1226,7 @@ static int rcar_drif_parse_subdevs(struct rcar_drif_sdr *sdr)
}
asd = v4l2_async_nf_add_fwnode(notifier, fwnode,
- struct v4l2_async_subdev);
+ struct v4l2_async_connection);
fwnode_handle_put(fwnode);
if (IS_ERR(asd))
return PTR_ERR(asd);
@@ -1341,7 +1342,7 @@ static int rcar_drif_sdr_probe(struct rcar_drif_sdr *sdr)
sdr->notifier.ops = &rcar_drif_notify_ops;
/* Register notifier */
- ret = v4l2_async_nf_register(&sdr->v4l2_dev, &sdr->notifier);
+ ret = v4l2_async_nf_register(&sdr->notifier);
if (ret < 0) {
dev_err(sdr->dev, "failed: notifier register ret %d\n", ret);
goto cleanup;
diff --git a/drivers/media/platform/renesas/rcar_fdp1.c b/drivers/media/platform/renesas/rcar_fdp1.c
index ab39cd2201c8..a2565b269f3b 100644
--- a/drivers/media/platform/renesas/rcar_fdp1.c
+++ b/drivers/media/platform/renesas/rcar_fdp1.c
@@ -18,7 +18,6 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/sched.h>
diff --git a/drivers/media/platform/renesas/rcar_jpu.c b/drivers/media/platform/renesas/rcar_jpu.c
index 2b8cb50f54de..fff349e45067 100644
--- a/drivers/media/platform/renesas/rcar_jpu.c
+++ b/drivers/media/platform/renesas/rcar_jpu.c
@@ -22,7 +22,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -1601,10 +1600,8 @@ static int jpu_probe(struct platform_device *pdev)
/* interrupt service routine registration */
jpu->irq = ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(&pdev->dev, "cannot find IRQ\n");
+ if (ret < 0)
return ret;
- }
ret = devm_request_irq(&pdev->dev, jpu->irq, jpu_irq_handler, 0,
dev_name(&pdev->dev), jpu);
diff --git a/drivers/media/platform/renesas/renesas-ceu.c b/drivers/media/platform/renesas/renesas-ceu.c
index 5c9e27f8c94b..ec631c6e2a57 100644
--- a/drivers/media/platform/renesas/renesas-ceu.c
+++ b/drivers/media/platform/renesas/renesas-ceu.c
@@ -22,7 +22,6 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -152,7 +151,7 @@ static inline struct ceu_buffer *vb2_to_ceu(struct vb2_v4l2_buffer *vbuf)
* ceu_subdev - Wraps v4l2 sub-device and provides async subdevice.
*/
struct ceu_subdev {
- struct v4l2_async_subdev asd;
+ struct v4l2_async_connection asd;
struct v4l2_subdev *v4l2_sd;
/* per-subdevice mbus configuration options */
@@ -160,7 +159,7 @@ struct ceu_subdev {
struct ceu_mbus_fmt mbus_fmt;
};
-static struct ceu_subdev *to_ceu_subdev(struct v4l2_async_subdev *asd)
+static struct ceu_subdev *to_ceu_subdev(struct v4l2_async_connection *asd)
{
return container_of(asd, struct ceu_subdev, asd);
}
@@ -1375,7 +1374,7 @@ static void ceu_vdev_release(struct video_device *vdev)
static int ceu_notify_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *v4l2_sd,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
struct v4l2_device *v4l2_dev = notifier->v4l2_dev;
struct ceu_device *ceudev = v4l2_to_ceu(v4l2_dev);
@@ -1658,7 +1657,7 @@ static int ceu_probe(struct platform_device *pdev)
if (ret)
goto error_pm_disable;
- v4l2_async_nf_init(&ceudev->notifier);
+ v4l2_async_nf_init(&ceudev->notifier, &ceudev->v4l2_dev);
if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
ceu_data = of_device_get_match_data(dev);
@@ -1680,7 +1679,7 @@ static int ceu_probe(struct platform_device *pdev)
ceudev->notifier.v4l2_dev = &ceudev->v4l2_dev;
ceudev->notifier.ops = &ceu_notify_ops;
- ret = v4l2_async_nf_register(&ceudev->v4l2_dev, &ceudev->notifier);
+ ret = v4l2_async_nf_register(&ceudev->notifier);
if (ret)
goto error_cleanup;
diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-core.c b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-core.c
index 7a71370fcc32..280efd2a8185 100644
--- a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-core.c
+++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-core.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -93,7 +92,7 @@ static int rzg2l_cru_group_notify_complete(struct v4l2_async_notifier *notifier)
static void rzg2l_cru_group_notify_unbind(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
struct rzg2l_cru_dev *cru = notifier_to_cru(notifier);
@@ -111,7 +110,7 @@ static void rzg2l_cru_group_notify_unbind(struct v4l2_async_notifier *notifier,
static int rzg2l_cru_group_notify_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
struct rzg2l_cru_dev *cru = notifier_to_cru(notifier);
@@ -139,7 +138,7 @@ static int rzg2l_cru_mc_parse_of(struct rzg2l_cru_dev *cru)
.bus_type = V4L2_MBUS_CSI2_DPHY,
};
struct fwnode_handle *ep, *fwnode;
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asd;
int ret;
ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(cru->dev), 1, 0, 0);
@@ -163,7 +162,7 @@ static int rzg2l_cru_mc_parse_of(struct rzg2l_cru_dev *cru)
}
asd = v4l2_async_nf_add_fwnode(&cru->notifier, fwnode,
- struct v4l2_async_subdev);
+ struct v4l2_async_connection);
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
goto out;
@@ -183,7 +182,7 @@ static int rzg2l_cru_mc_parse_of_graph(struct rzg2l_cru_dev *cru)
{
int ret;
- v4l2_async_nf_init(&cru->notifier);
+ v4l2_async_nf_init(&cru->notifier, &cru->v4l2_dev);
ret = rzg2l_cru_mc_parse_of(cru);
if (ret)
@@ -191,10 +190,10 @@ static int rzg2l_cru_mc_parse_of_graph(struct rzg2l_cru_dev *cru)
cru->notifier.ops = &rzg2l_cru_async_ops;
- if (list_empty(&cru->notifier.asd_list))
+ if (list_empty(&cru->notifier.waiting_list))
return 0;
- ret = v4l2_async_nf_register(&cru->v4l2_dev, &cru->notifier);
+ ret = v4l2_async_nf_register(&cru->notifier);
if (ret < 0) {
dev_err(cru->dev, "Notifier registration failed\n");
v4l2_async_nf_cleanup(&cru->notifier);
diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h
index 0b682cbae3eb..811603f18af0 100644
--- a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h
+++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h
@@ -45,7 +45,7 @@ enum rzg2l_cru_dma_state {
};
struct rzg2l_cru_csi {
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asd;
struct v4l2_subdev *subdev;
u32 channel;
};
diff --git a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c
index d6489c62b081..ad2bd71037ab 100644
--- a/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c
+++ b/drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c
@@ -11,7 +11,6 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -600,7 +599,7 @@ static const struct v4l2_subdev_ops rzg2l_csi2_subdev_ops = {
static int rzg2l_csi2_notify_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
struct rzg2l_csi2 *csi2 = notifier_to_csi2(notifier);
@@ -616,7 +615,7 @@ static int rzg2l_csi2_notify_bound(struct v4l2_async_notifier *notifier,
static void rzg2l_csi2_notify_unbind(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
struct rzg2l_csi2 *csi2 = notifier_to_csi2(notifier);
@@ -647,7 +646,7 @@ static int rzg2l_csi2_parse_dt(struct rzg2l_csi2 *csi2)
struct v4l2_fwnode_endpoint v4l2_ep = {
.bus_type = V4L2_MBUS_CSI2_DPHY
};
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asd;
struct fwnode_handle *fwnode;
struct fwnode_handle *ep;
int ret;
@@ -674,16 +673,16 @@ static int rzg2l_csi2_parse_dt(struct rzg2l_csi2 *csi2)
fwnode = fwnode_graph_get_remote_endpoint(ep);
fwnode_handle_put(ep);
- v4l2_async_nf_init(&csi2->notifier);
+ v4l2_async_subdev_nf_init(&csi2->notifier, &csi2->subdev);
csi2->notifier.ops = &rzg2l_csi2_notify_ops;
asd = v4l2_async_nf_add_fwnode(&csi2->notifier, fwnode,
- struct v4l2_async_subdev);
+ struct v4l2_async_connection);
fwnode_handle_put(fwnode);
if (IS_ERR(asd))
return PTR_ERR(asd);
- ret = v4l2_async_subdev_nf_register(&csi2->subdev, &csi2->notifier);
+ ret = v4l2_async_nf_register(&csi2->notifier);
if (ret)
v4l2_async_nf_cleanup(&csi2->notifier);
diff --git a/drivers/media/platform/renesas/sh_vou.c b/drivers/media/platform/renesas/sh_vou.c
index 8fe3272a541f..f792aedc9d82 100644
--- a/drivers/media/platform/renesas/sh_vou.c
+++ b/drivers/media/platform/renesas/sh_vou.c
@@ -1223,19 +1223,19 @@ static int sh_vou_probe(struct platform_device *pdev)
struct i2c_adapter *i2c_adap;
struct video_device *vdev;
struct sh_vou_device *vou_dev;
- struct resource *reg_res;
struct v4l2_subdev *subdev;
struct vb2_queue *q;
int irq, ret;
- reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- irq = platform_get_irq(pdev, 0);
-
- if (!vou_pdata || !reg_res || irq <= 0) {
+ if (!vou_pdata) {
dev_err(&pdev->dev, "Insufficient VOU platform information.\n");
return -ENODEV;
}
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
vou_dev = devm_kzalloc(&pdev->dev, sizeof(*vou_dev), GFP_KERNEL);
if (!vou_dev)
return -ENOMEM;
@@ -1264,7 +1264,7 @@ static int sh_vou_probe(struct platform_device *pdev)
pix->sizeimage = VOU_MAX_IMAGE_WIDTH * 2 * 480;
pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
- vou_dev->base = devm_ioremap_resource(&pdev->dev, reg_res);
+ vou_dev->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(vou_dev->base))
return PTR_ERR(vou_dev->base);
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_drv.c b/drivers/media/platform/renesas/vsp1/vsp1_drv.c
index a9db84be4822..1aac44d68731 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_drv.c
@@ -13,7 +13,6 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
index a1293c45aae1..d30f0ecb1bfd 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
@@ -148,7 +148,7 @@ struct rkisp1_info {
* @port: port number (0: MIPI, 1: Parallel)
*/
struct rkisp1_sensor_async {
- struct v4l2_async_subdev asd;
+ struct v4l2_async_connection asd;
unsigned int index;
struct fwnode_handle *source_ep;
unsigned int lanes;
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c
index d7acc94e10f8..fdff3d0da4e5 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c
@@ -381,6 +381,7 @@ static int rkisp1_csi_s_stream(struct v4l2_subdev *sd, int enable)
struct rkisp1_csi *csi = to_rkisp1_csi(sd);
struct rkisp1_device *rkisp1 = csi->rkisp1;
struct rkisp1_sensor_async *source_asd;
+ struct v4l2_async_connection *asc;
struct media_pad *source_pad;
struct v4l2_subdev *source;
int ret;
@@ -406,7 +407,11 @@ static int rkisp1_csi_s_stream(struct v4l2_subdev *sd, int enable)
return -EPIPE;
}
- source_asd = container_of(source->asd, struct rkisp1_sensor_async, asd);
+ asc = v4l2_async_connection_unique(source);
+ if (!asc)
+ return -EPIPE;
+
+ source_asd = container_of(asc, struct rkisp1_sensor_async, asd);
if (source_asd->mbus_type != V4L2_MBUS_CSI2_DPHY)
return -EINVAL;
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
index 4762cb32353d..c41abd2833f1 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
@@ -13,7 +13,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
#include <media/v4l2-fwnode.h>
@@ -122,12 +122,12 @@ struct rkisp1_isr_data {
static int rkisp1_subdev_notifier_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asc)
{
struct rkisp1_device *rkisp1 =
container_of(notifier, struct rkisp1_device, notifier);
struct rkisp1_sensor_async *s_asd =
- container_of(asd, struct rkisp1_sensor_async, asd);
+ container_of(asc, struct rkisp1_sensor_async, asd);
int source_pad;
int ret;
@@ -165,10 +165,10 @@ static int rkisp1_subdev_notifier_complete(struct v4l2_async_notifier *notifier)
return v4l2_device_register_subdev_nodes(&rkisp1->v4l2_dev);
}
-static void rkisp1_subdev_notifier_destroy(struct v4l2_async_subdev *asd)
+static void rkisp1_subdev_notifier_destroy(struct v4l2_async_connection *asc)
{
struct rkisp1_sensor_async *rk_asd =
- container_of(asd, struct rkisp1_sensor_async, asd);
+ container_of(asc, struct rkisp1_sensor_async, asd);
fwnode_handle_put(rk_asd->source_ep);
}
@@ -187,7 +187,7 @@ static int rkisp1_subdev_notifier_register(struct rkisp1_device *rkisp1)
unsigned int index = 0;
int ret = 0;
- v4l2_async_nf_init(ntf);
+ v4l2_async_nf_init(ntf, &rkisp1->v4l2_dev);
ntf->ops = &rkisp1_subdev_notifier_ops;
@@ -287,7 +287,7 @@ static int rkisp1_subdev_notifier_register(struct rkisp1_device *rkisp1)
if (!index)
dev_dbg(rkisp1->dev, "no remote subdevice found\n");
- ret = v4l2_async_nf_register(&rkisp1->v4l2_dev, ntf);
+ ret = v4l2_async_nf_register(ntf);
if (ret) {
v4l2_async_nf_cleanup(ntf);
return ret;
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
index 585cf3f53469..07fbb77ce234 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
@@ -868,9 +868,13 @@ static int rkisp1_isp_s_stream(struct v4l2_subdev *sd, int enable)
mbus_flags = 0;
} else {
const struct rkisp1_sensor_async *asd;
+ struct v4l2_async_connection *asc;
- asd = container_of(rkisp1->source->asd,
- struct rkisp1_sensor_async, asd);
+ asc = v4l2_async_connection_unique(rkisp1->source);
+ if (!asc)
+ return -EPIPE;
+
+ asd = container_of(asc, struct rkisp1_sensor_async, asd);
mbus_type = asd->mbus_type;
mbus_flags = asd->mbus_flags;
diff --git a/drivers/media/platform/samsung/exynos-gsc/gsc-core.c b/drivers/media/platform/samsung/exynos-gsc/gsc-core.c
index 1fb34de70649..618ae55fe396 100644
--- a/drivers/media/platform/samsung/exynos-gsc/gsc-core.c
+++ b/drivers/media/platform/samsung/exynos-gsc/gsc-core.c
@@ -20,7 +20,6 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <media/v4l2-ioctl.h>
#include "gsc-core.h"
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-core.c b/drivers/media/platform/samsung/exynos4-is/fimc-core.c
index 976b4f747ad4..97908778e1c8 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-core.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-core.c
@@ -19,7 +19,6 @@
#include <linux/mfd/syscon.h>
#include <linux/io.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <media/v4l2-ioctl.h>
@@ -924,7 +923,6 @@ static int fimc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
u32 lclk_freq = 0;
struct fimc_dev *fimc;
- struct resource *res;
int ret = 0;
int irq;
@@ -961,8 +959,7 @@ static int fimc_probe(struct platform_device *pdev)
return PTR_ERR(fimc->sysreg);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- fimc->regs = devm_ioremap_resource(dev, res);
+ fimc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(fimc->regs))
return PTR_ERR(fimc->regs);
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-lite.c b/drivers/media/platform/samsung/exynos4-is/fimc-lite.c
index c3146ae08447..9396b10b5b1c 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-lite.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-lite.c
@@ -1450,7 +1450,6 @@ static int fimc_lite_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
const struct of_device_id *of_id;
struct fimc_lite *fimc;
- struct resource *res;
int ret;
int irq;
@@ -1479,8 +1478,7 @@ static int fimc_lite_probe(struct platform_device *pdev)
spin_lock_init(&fimc->slock);
mutex_init(&fimc->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- fimc->regs = devm_ioremap_resource(dev, res);
+ fimc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(fimc->regs))
return PTR_ERR(fimc->regs);
diff --git a/drivers/media/platform/samsung/exynos4-is/media-dev.c b/drivers/media/platform/samsung/exynos4-is/media-dev.c
index c9cb9a216fae..5f10bb4eb4f7 100644
--- a/drivers/media/platform/samsung/exynos4-is/media-dev.c
+++ b/drivers/media/platform/samsung/exynos4-is/media-dev.c
@@ -17,7 +17,6 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
-#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
@@ -401,7 +400,7 @@ static int fimc_md_parse_one_endpoint(struct fimc_md *fmd,
int index = fmd->num_sensors;
struct fimc_source_info *pd = &fmd->sensor[index].pdata;
struct device_node *rem, *np;
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asd;
struct v4l2_fwnode_endpoint endpoint = { .bus_type = 0 };
int ret;
@@ -466,7 +465,7 @@ static int fimc_md_parse_one_endpoint(struct fimc_md *fmd,
asd = v4l2_async_nf_add_fwnode_remote(&fmd->subdev_notifier,
of_fwnode_handle(ep),
- struct v4l2_async_subdev);
+ struct v4l2_async_connection);
of_node_put(ep);
@@ -1372,7 +1371,7 @@ err:
static int subdev_notifier_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
struct fimc_md *fmd = notifier_to_fimc_md(notifier);
struct fimc_sensor_info *si = NULL;
@@ -1479,7 +1478,7 @@ static int fimc_md_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fmd);
- v4l2_async_nf_init(&fmd->subdev_notifier);
+ v4l2_async_nf_init(&fmd->subdev_notifier, &fmd->v4l2_dev);
ret = fimc_md_register_platform_entities(fmd, dev->of_node);
if (ret)
@@ -1507,8 +1506,7 @@ static int fimc_md_probe(struct platform_device *pdev)
fmd->subdev_notifier.ops = &subdev_notifier_ops;
fmd->num_sensors = 0;
- ret = v4l2_async_nf_register(&fmd->v4l2_dev,
- &fmd->subdev_notifier);
+ ret = v4l2_async_nf_register(&fmd->subdev_notifier);
if (ret)
goto err_clk_p;
}
diff --git a/drivers/media/platform/samsung/exynos4-is/media-dev.h b/drivers/media/platform/samsung/exynos4-is/media-dev.h
index 079105d88bab..786264cf79dc 100644
--- a/drivers/media/platform/samsung/exynos4-is/media-dev.h
+++ b/drivers/media/platform/samsung/exynos4-is/media-dev.h
@@ -82,7 +82,7 @@ struct fimc_camclk_info {
*/
struct fimc_sensor_info {
struct fimc_source_info pdata;
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asd;
struct v4l2_subdev *subdev;
struct fimc_dev *host;
};
diff --git a/drivers/media/platform/samsung/s3c-camif/camif-core.c b/drivers/media/platform/samsung/s3c-camif/camif-core.c
index afe1fcc37354..e4529f666e20 100644
--- a/drivers/media/platform/samsung/s3c-camif/camif-core.c
+++ b/drivers/media/platform/samsung/s3c-camif/camif-core.c
@@ -381,8 +381,8 @@ static int camif_request_irqs(struct platform_device *pdev,
init_waitqueue_head(&vp->irq_queue);
irq = platform_get_irq(pdev, i);
- if (irq <= 0)
- return -ENXIO;
+ if (irq < 0)
+ return irq;
ret = devm_request_irq(&pdev->dev, irq, s3c_camif_irq_handler,
0, dev_name(&pdev->dev), vp);
diff --git a/drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c b/drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c
index c3c7e48f1b6e..d2c4a0178b3c 100644
--- a/drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c
@@ -2870,10 +2870,8 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
/* interrupt service routine registration */
jpeg->irq = ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(&pdev->dev, "cannot find IRQ\n");
+ if (ret < 0)
return ret;
- }
ret = devm_request_irq(&pdev->dev, jpeg->irq, jpeg->variant->jpeg_irq,
0, dev_name(&pdev->dev), jpeg);
@@ -3164,7 +3162,7 @@ static struct platform_driver s5p_jpeg_driver = {
.probe = s5p_jpeg_probe,
.remove_new = s5p_jpeg_remove,
.driver = {
- .of_match_table = of_match_ptr(samsung_jpeg_match),
+ .of_match_table = samsung_jpeg_match,
.name = S5P_JPEG_M2M_NAME,
.pm = &s5p_jpeg_pm_ops,
},
diff --git a/drivers/media/platform/st/stm32/stm32-dcmi.c b/drivers/media/platform/st/stm32/stm32-dcmi.c
index dad6e22e4ce4..8cb4fdcae137 100644
--- a/drivers/media/platform/st/stm32/stm32-dcmi.c
+++ b/drivers/media/platform/st/stm32/stm32-dcmi.c
@@ -134,6 +134,7 @@ struct stm32_dcmi {
struct video_device *vdev;
struct v4l2_async_notifier notifier;
struct v4l2_subdev *source;
+ struct v4l2_subdev *s_subdev;
struct v4l2_format fmt;
struct v4l2_rect crop;
bool do_crop;
@@ -692,51 +693,6 @@ static int dcmi_pipeline_s_fmt(struct stm32_dcmi *dcmi,
return 0;
}
-static int dcmi_pipeline_s_stream(struct stm32_dcmi *dcmi, int state)
-{
- struct media_entity *entity = &dcmi->vdev->entity;
- struct v4l2_subdev *subdev;
- struct media_pad *pad;
- int ret;
-
- /* Start/stop all entities within pipeline */
- while (1) {
- pad = &entity->pads[0];
- if (!(pad->flags & MEDIA_PAD_FL_SINK))
- break;
-
- pad = media_pad_remote_pad_first(pad);
- if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
- break;
-
- entity = pad->entity;
- subdev = media_entity_to_v4l2_subdev(entity);
-
- ret = v4l2_subdev_call(subdev, video, s_stream, state);
- if (ret < 0 && ret != -ENOIOCTLCMD) {
- dev_err(dcmi->dev, "%s: \"%s\" failed to %s streaming (%d)\n",
- __func__, subdev->name,
- state ? "start" : "stop", ret);
- return ret;
- }
-
- dev_dbg(dcmi->dev, "\"%s\" is %s\n",
- subdev->name, state ? "started" : "stopped");
- }
-
- return 0;
-}
-
-static int dcmi_pipeline_start(struct stm32_dcmi *dcmi)
-{
- return dcmi_pipeline_s_stream(dcmi, 1);
-}
-
-static void dcmi_pipeline_stop(struct stm32_dcmi *dcmi)
-{
- dcmi_pipeline_s_stream(dcmi, 0);
-}
-
static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct stm32_dcmi *dcmi = vb2_get_drv_priv(vq);
@@ -758,9 +714,12 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
goto err_pm_put;
}
- ret = dcmi_pipeline_start(dcmi);
- if (ret)
+ ret = v4l2_subdev_call(dcmi->s_subdev, video, s_stream, 1);
+ if (ret < 0) {
+ dev_err(dcmi->dev, "%s: Failed to start source subdev, error (%d)\n",
+ __func__, ret);
goto err_media_pipeline_stop;
+ }
spin_lock_irq(&dcmi->irqlock);
@@ -862,7 +821,7 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count)
return 0;
err_pipeline_stop:
- dcmi_pipeline_stop(dcmi);
+ v4l2_subdev_call(dcmi->s_subdev, video, s_stream, 0);
err_media_pipeline_stop:
video_device_pipeline_stop(dcmi->vdev);
@@ -889,8 +848,12 @@ static void dcmi_stop_streaming(struct vb2_queue *vq)
{
struct stm32_dcmi *dcmi = vb2_get_drv_priv(vq);
struct dcmi_buf *buf, *node;
+ int ret;
- dcmi_pipeline_stop(dcmi);
+ ret = v4l2_subdev_call(dcmi->s_subdev, video, s_stream, 0);
+ if (ret < 0)
+ dev_err(dcmi->dev, "%s: Failed to stop source subdev, error (%d)\n",
+ __func__, ret);
video_device_pipeline_stop(dcmi->vdev);
@@ -1837,7 +1800,7 @@ static int dcmi_graph_notify_complete(struct v4l2_async_notifier *notifier)
static void dcmi_graph_notify_unbind(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
struct stm32_dcmi *dcmi = notifier_to_dcmi(notifier);
@@ -1849,7 +1812,7 @@ static void dcmi_graph_notify_unbind(struct v4l2_async_notifier *notifier,
static int dcmi_graph_notify_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
struct stm32_dcmi *dcmi = notifier_to_dcmi(notifier);
unsigned int ret;
@@ -1876,6 +1839,8 @@ static int dcmi_graph_notify_bound(struct v4l2_async_notifier *notifier,
dev_dbg(dcmi->dev, "DCMI is now linked to \"%s\"\n",
subdev->name);
+ dcmi->s_subdev = subdev;
+
return ret;
}
@@ -1887,7 +1852,7 @@ static const struct v4l2_async_notifier_operations dcmi_graph_notify_ops = {
static int dcmi_graph_init(struct stm32_dcmi *dcmi)
{
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asd;
struct device_node *ep;
int ret;
@@ -1897,11 +1862,11 @@ static int dcmi_graph_init(struct stm32_dcmi *dcmi)
return -EINVAL;
}
- v4l2_async_nf_init(&dcmi->notifier);
+ v4l2_async_nf_init(&dcmi->notifier, &dcmi->v4l2_dev);
asd = v4l2_async_nf_add_fwnode_remote(&dcmi->notifier,
of_fwnode_handle(ep),
- struct v4l2_async_subdev);
+ struct v4l2_async_connection);
of_node_put(ep);
@@ -1912,7 +1877,7 @@ static int dcmi_graph_init(struct stm32_dcmi *dcmi)
dcmi->notifier.ops = &dcmi_graph_notify_ops;
- ret = v4l2_async_nf_register(&dcmi->v4l2_dev, &dcmi->notifier);
+ ret = v4l2_async_nf_register(&dcmi->notifier);
if (ret < 0) {
dev_err(dcmi->dev, "Failed to register notifier\n");
v4l2_async_nf_cleanup(&dcmi->notifier);
@@ -1932,7 +1897,6 @@ static int dcmi_probe(struct platform_device *pdev)
struct dma_chan *chan;
struct dma_slave_caps caps;
struct clk *mclk;
- int irq;
int ret = 0;
match = of_match_device(of_match_ptr(stm32_dcmi_of_match), &pdev->dev);
@@ -1981,19 +1945,11 @@ static int dcmi_probe(struct platform_device *pdev)
dcmi->bus.data_shift = ep.bus.parallel.data_shift;
dcmi->bus_type = ep.bus_type;
- irq = platform_get_irq(pdev, 0);
- if (irq <= 0)
- return irq ? irq : -ENXIO;
-
- dcmi->irq = irq;
-
- dcmi->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!dcmi->res) {
- dev_err(&pdev->dev, "Could not get resource\n");
- return -ENODEV;
- }
+ dcmi->irq = platform_get_irq(pdev, 0);
+ if (dcmi->irq < 0)
+ return dcmi->irq;
- dcmi->regs = devm_ioremap_resource(&pdev->dev, dcmi->res);
+ dcmi->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &dcmi->res);
if (IS_ERR(dcmi->regs))
return PTR_ERR(dcmi->regs);
diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
index d6e7d1b36083..ad13d447d483 100644
--- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
+++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
@@ -12,7 +12,6 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -42,7 +41,7 @@ static const struct media_entity_operations sun4i_csi_video_entity_ops = {
static int sun4i_csi_notify_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
struct sun4i_csi *csi = container_of(notifier, struct sun4i_csi,
notifier);
@@ -118,11 +117,11 @@ static int sun4i_csi_notifier_init(struct sun4i_csi *csi)
struct v4l2_fwnode_endpoint vep = {
.bus_type = V4L2_MBUS_PARALLEL,
};
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asd;
struct fwnode_handle *ep;
int ret;
- v4l2_async_nf_init(&csi->notifier);
+ v4l2_async_nf_init(&csi->notifier, &csi->v4l);
ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(csi->dev), 0, 0,
FWNODE_GRAPH_ENDPOINT_NEXT);
@@ -136,7 +135,7 @@ static int sun4i_csi_notifier_init(struct sun4i_csi *csi)
csi->bus = vep.bus.parallel;
asd = v4l2_async_nf_add_fwnode_remote(&csi->notifier, ep,
- struct v4l2_async_subdev);
+ struct v4l2_async_connection);
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
goto out;
@@ -240,7 +239,7 @@ static int sun4i_csi_probe(struct platform_device *pdev)
if (ret)
goto err_unregister_media;
- ret = v4l2_async_nf_register(&csi->v4l, &csi->notifier);
+ ret = v4l2_async_nf_register(&csi->notifier);
if (ret) {
dev_err(csi->dev, "Couldn't register our notifier.\n");
goto err_unregister_media;
diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
index e2723cfa4515..c6ba385c0c86 100644
--- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
+++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
@@ -11,7 +11,6 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
@@ -427,7 +426,7 @@ static struct platform_driver sun6i_csi_platform_driver = {
.remove_new = sun6i_csi_remove,
.driver = {
.name = SUN6I_CSI_NAME,
- .of_match_table = of_match_ptr(sun6i_csi_of_match),
+ .of_match_table = sun6i_csi_of_match,
.pm = &sun6i_csi_pm_ops,
},
};
diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi_bridge.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi_bridge.c
index 4db950973ce2..e573413123b9 100644
--- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi_bridge.c
+++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi_bridge.c
@@ -642,7 +642,7 @@ static int sun6i_csi_bridge_link(struct sun6i_csi_device *csi_dev,
static int
sun6i_csi_bridge_notifier_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *remote_subdev,
- struct v4l2_async_subdev *async_subdev)
+ struct v4l2_async_connection *async_subdev)
{
struct sun6i_csi_device *csi_dev =
container_of(notifier, struct sun6i_csi_device,
@@ -819,7 +819,10 @@ int sun6i_csi_bridge_setup(struct sun6i_csi_device *csi_dev)
/* V4L2 Async */
- v4l2_async_nf_init(notifier);
+ if (csi_dev->isp_available)
+ v4l2_async_subdev_nf_init(notifier, subdev);
+ else
+ v4l2_async_nf_init(notifier, v4l2_dev);
notifier->ops = &sun6i_csi_bridge_notifier_ops;
sun6i_csi_bridge_source_setup(csi_dev, &bridge->source_parallel,
@@ -828,10 +831,7 @@ int sun6i_csi_bridge_setup(struct sun6i_csi_device *csi_dev)
sun6i_csi_bridge_source_setup(csi_dev, &bridge->source_mipi_csi2,
SUN6I_CSI_PORT_MIPI_CSI2, NULL);
- if (csi_dev->isp_available)
- ret = v4l2_async_subdev_nf_register(subdev, notifier);
- else
- ret = v4l2_async_nf_register(v4l2_dev, notifier);
+ ret = v4l2_async_nf_register(notifier);
if (ret) {
dev_err(dev, "failed to register v4l2 async notifier: %d\n",
ret);
diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi_bridge.h b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi_bridge.h
index ee592a14b9c5..44653b38f722 100644
--- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi_bridge.h
+++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi_bridge.h
@@ -34,7 +34,7 @@ struct sun6i_csi_bridge_source {
};
struct sun6i_csi_bridge_async_subdev {
- struct v4l2_async_subdev async_subdev;
+ struct v4l2_async_connection async_subdev;
struct sun6i_csi_bridge_source *source;
};
diff --git a/drivers/media/platform/sunxi/sun6i-mipi-csi2/sun6i_mipi_csi2.c b/drivers/media/platform/sunxi/sun6i-mipi-csi2/sun6i_mipi_csi2.c
index dce130b4c9f6..08d86c17b284 100644
--- a/drivers/media/platform/sunxi/sun6i-mipi-csi2/sun6i_mipi_csi2.c
+++ b/drivers/media/platform/sunxi/sun6i-mipi-csi2/sun6i_mipi_csi2.c
@@ -7,7 +7,6 @@
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -408,7 +407,7 @@ static const struct media_entity_operations sun6i_mipi_csi2_entity_ops = {
static int
sun6i_mipi_csi2_notifier_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *remote_subdev,
- struct v4l2_async_subdev *async_subdev)
+ struct v4l2_async_connection *async_subdev)
{
struct v4l2_subdev *subdev = notifier->sd;
struct sun6i_mipi_csi2_device *csi2_dev =
@@ -462,7 +461,7 @@ sun6i_mipi_csi2_bridge_source_setup(struct sun6i_mipi_csi2_device *csi2_dev)
{
struct v4l2_async_notifier *notifier = &csi2_dev->bridge.notifier;
struct v4l2_fwnode_endpoint *endpoint = &csi2_dev->bridge.endpoint;
- struct v4l2_async_subdev *subdev_async;
+ struct v4l2_async_connection *subdev_async;
struct fwnode_handle *handle;
struct device *dev = csi2_dev->dev;
int ret;
@@ -480,7 +479,7 @@ sun6i_mipi_csi2_bridge_source_setup(struct sun6i_mipi_csi2_device *csi2_dev)
subdev_async =
v4l2_async_nf_add_fwnode_remote(notifier, handle,
- struct v4l2_async_subdev);
+ struct v4l2_async_connection);
if (IS_ERR(subdev_async))
ret = PTR_ERR(subdev_async);
@@ -531,7 +530,7 @@ static int sun6i_mipi_csi2_bridge_setup(struct sun6i_mipi_csi2_device *csi2_dev)
/* V4L2 Async */
- v4l2_async_nf_init(notifier);
+ v4l2_async_subdev_nf_init(notifier, subdev);
notifier->ops = &sun6i_mipi_csi2_notifier_ops;
ret = sun6i_mipi_csi2_bridge_source_setup(csi2_dev);
@@ -540,7 +539,7 @@ static int sun6i_mipi_csi2_bridge_setup(struct sun6i_mipi_csi2_device *csi2_dev)
/* Only register the notifier when a sensor is connected. */
if (ret != -ENODEV) {
- ret = v4l2_async_subdev_nf_register(subdev, notifier);
+ ret = v4l2_async_nf_register(notifier);
if (ret < 0)
goto error_v4l2_notifier_cleanup;
@@ -757,7 +756,7 @@ static struct platform_driver sun6i_mipi_csi2_platform_driver = {
.remove_new = sun6i_mipi_csi2_remove,
.driver = {
.name = SUN6I_MIPI_CSI2_NAME,
- .of_match_table = of_match_ptr(sun6i_mipi_csi2_of_match),
+ .of_match_table = sun6i_mipi_csi2_of_match,
.pm = &sun6i_mipi_csi2_pm_ops,
},
};
diff --git a/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_mipi_csi2.c b/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_mipi_csi2.c
index 23d32e198aaa..14a1844812c0 100644
--- a/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_mipi_csi2.c
+++ b/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_mipi_csi2.c
@@ -8,7 +8,6 @@
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -445,7 +444,7 @@ static const struct media_entity_operations sun8i_a83t_mipi_csi2_entity_ops = {
static int
sun8i_a83t_mipi_csi2_notifier_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *remote_subdev,
- struct v4l2_async_subdev *async_subdev)
+ struct v4l2_async_connection *async_subdev)
{
struct v4l2_subdev *subdev = notifier->sd;
struct sun8i_a83t_mipi_csi2_device *csi2_dev =
@@ -499,7 +498,7 @@ sun8i_a83t_mipi_csi2_bridge_source_setup(struct sun8i_a83t_mipi_csi2_device *csi
{
struct v4l2_async_notifier *notifier = &csi2_dev->bridge.notifier;
struct v4l2_fwnode_endpoint *endpoint = &csi2_dev->bridge.endpoint;
- struct v4l2_async_subdev *subdev_async;
+ struct v4l2_async_connection *subdev_async;
struct fwnode_handle *handle;
struct device *dev = csi2_dev->dev;
int ret;
@@ -517,7 +516,7 @@ sun8i_a83t_mipi_csi2_bridge_source_setup(struct sun8i_a83t_mipi_csi2_device *csi
subdev_async =
v4l2_async_nf_add_fwnode_remote(notifier, handle,
- struct v4l2_async_subdev);
+ struct v4l2_async_connection);
if (IS_ERR(subdev_async))
ret = PTR_ERR(subdev_async);
@@ -569,7 +568,7 @@ sun8i_a83t_mipi_csi2_bridge_setup(struct sun8i_a83t_mipi_csi2_device *csi2_dev)
/* V4L2 Async */
- v4l2_async_nf_init(notifier);
+ v4l2_async_subdev_nf_init(notifier, subdev);
notifier->ops = &sun8i_a83t_mipi_csi2_notifier_ops;
ret = sun8i_a83t_mipi_csi2_bridge_source_setup(csi2_dev);
@@ -578,7 +577,7 @@ sun8i_a83t_mipi_csi2_bridge_setup(struct sun8i_a83t_mipi_csi2_device *csi2_dev)
/* Only register the notifier when a sensor is connected. */
if (ret != -ENODEV) {
- ret = v4l2_async_subdev_nf_register(subdev, notifier);
+ ret = v4l2_async_nf_register(notifier);
if (ret < 0)
goto error_v4l2_notifier_cleanup;
@@ -824,7 +823,7 @@ static struct platform_driver sun8i_a83t_mipi_csi2_platform_driver = {
.remove_new = sun8i_a83t_mipi_csi2_remove,
.driver = {
.name = SUN8I_A83T_MIPI_CSI2_NAME,
- .of_match_table = of_match_ptr(sun8i_a83t_mipi_csi2_of_match),
+ .of_match_table = sun8i_a83t_mipi_csi2_of_match,
.pm = &sun8i_a83t_mipi_csi2_pm_ops,
},
};
diff --git a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
index e4b0fd793f55..90ab1d77b6a5 100644
--- a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
+++ b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
@@ -11,9 +11,9 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
diff --git a/drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c b/drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c
index bd0c4257bbff..0b025ec91826 100644
--- a/drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c
+++ b/drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c
@@ -9,9 +9,9 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
diff --git a/drivers/media/platform/ti/am437x/am437x-vpfe.c b/drivers/media/platform/ti/am437x/am437x-vpfe.c
index ffe1887cc429..63092013d476 100644
--- a/drivers/media/platform/ti/am437x/am437x-vpfe.c
+++ b/drivers/media/platform/ti/am437x/am437x-vpfe.c
@@ -2144,7 +2144,7 @@ static const struct v4l2_ioctl_ops vpfe_ioctl_ops = {
static int
vpfe_async_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
struct vpfe_device *vpfe = container_of(notifier->v4l2_dev,
struct vpfe_device, v4l2_dev);
@@ -2300,7 +2300,7 @@ vpfe_get_pdata(struct vpfe_device *vpfe)
dev_dbg(dev, "vpfe_get_pdata\n");
- v4l2_async_nf_init(&vpfe->notifier);
+ v4l2_async_nf_init(&vpfe->notifier, &vpfe->v4l2_dev);
if (!IS_ENABLED(CONFIG_OF) || !dev->of_node)
return dev->platform_data;
@@ -2370,8 +2370,7 @@ vpfe_get_pdata(struct vpfe_device *vpfe)
pdata->asd[i] = v4l2_async_nf_add_fwnode(&vpfe->notifier,
of_fwnode_handle(rem),
- struct
- v4l2_async_subdev);
+ struct v4l2_async_connection);
of_node_put(rem);
if (IS_ERR(pdata->asd[i]))
goto cleanup;
@@ -2404,10 +2403,17 @@ static int vpfe_probe(struct platform_device *pdev)
vpfe->pdev = &pdev->dev;
+ ret = v4l2_device_register(&pdev->dev, &vpfe->v4l2_dev);
+ if (ret) {
+ vpfe_err(vpfe, "Unable to register v4l2 device.\n");
+ return ret;
+ }
+
vpfe_cfg = vpfe_get_pdata(vpfe);
if (!vpfe_cfg) {
dev_err(&pdev->dev, "No platform data\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto probe_out_cleanup;
}
vpfe->cfg = vpfe_cfg;
@@ -2420,10 +2426,8 @@ static int vpfe_probe(struct platform_device *pdev)
}
ret = platform_get_irq(pdev, 0);
- if (ret <= 0) {
- ret = -ENODEV;
+ if (ret < 0)
goto probe_out_cleanup;
- }
vpfe->irq = ret;
ret = devm_request_irq(vpfe->pdev, vpfe->irq, vpfe_isr, 0,
@@ -2434,13 +2438,6 @@ static int vpfe_probe(struct platform_device *pdev)
goto probe_out_cleanup;
}
- ret = v4l2_device_register(&pdev->dev, &vpfe->v4l2_dev);
- if (ret) {
- vpfe_err(vpfe,
- "Unable to register v4l2 device.\n");
- goto probe_out_cleanup;
- }
-
/* set the driver data in platform device */
platform_set_drvdata(pdev, vpfe);
/* Enabling module functional clock */
@@ -2450,7 +2447,7 @@ static int vpfe_probe(struct platform_device *pdev)
ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0) {
vpfe_err(vpfe, "Unable to resume device.\n");
- goto probe_out_v4l2_unregister;
+ goto probe_out_cleanup;
}
vpfe_ccdc_config_defaults(ccdc);
@@ -2463,23 +2460,22 @@ static int vpfe_probe(struct platform_device *pdev)
GFP_KERNEL);
if (!vpfe->sd) {
ret = -ENOMEM;
- goto probe_out_v4l2_unregister;
+ goto probe_out_cleanup;
}
vpfe->notifier.ops = &vpfe_async_ops;
- ret = v4l2_async_nf_register(&vpfe->v4l2_dev, &vpfe->notifier);
+ ret = v4l2_async_nf_register(&vpfe->notifier);
if (ret) {
vpfe_err(vpfe, "Error registering async notifier\n");
ret = -EINVAL;
- goto probe_out_v4l2_unregister;
+ goto probe_out_cleanup;
}
return 0;
-probe_out_v4l2_unregister:
- v4l2_device_unregister(&vpfe->v4l2_dev);
probe_out_cleanup:
v4l2_async_nf_cleanup(&vpfe->notifier);
+ v4l2_device_unregister(&vpfe->v4l2_dev);
return ret;
}
@@ -2494,8 +2490,8 @@ static void vpfe_remove(struct platform_device *pdev)
v4l2_async_nf_unregister(&vpfe->notifier);
v4l2_async_nf_cleanup(&vpfe->notifier);
- v4l2_device_unregister(&vpfe->v4l2_dev);
video_unregister_device(&vpfe->video_dev);
+ v4l2_device_unregister(&vpfe->v4l2_dev);
}
#ifdef CONFIG_PM_SLEEP
@@ -2630,7 +2626,7 @@ static struct platform_driver vpfe_driver = {
.driver = {
.name = VPFE_MODULE_NAME,
.pm = &vpfe_pm_ops,
- .of_match_table = of_match_ptr(vpfe_of_match),
+ .of_match_table = vpfe_of_match,
},
};
diff --git a/drivers/media/platform/ti/am437x/am437x-vpfe.h b/drivers/media/platform/ti/am437x/am437x-vpfe.h
index f8b4e917b91a..50c3c793b370 100644
--- a/drivers/media/platform/ti/am437x/am437x-vpfe.h
+++ b/drivers/media/platform/ti/am437x/am437x-vpfe.h
@@ -84,7 +84,7 @@ struct vpfe_config {
/* information about each subdev */
struct vpfe_subdev_info sub_devs[VPFE_MAX_SUBDEV];
/* Flat array, arranged in groups */
- struct v4l2_async_subdev *asd[VPFE_MAX_SUBDEV];
+ struct v4l2_async_connection *asd[VPFE_MAX_SUBDEV];
};
struct vpfe_cap_buffer {
diff --git a/drivers/media/platform/ti/cal/cal-camerarx.c b/drivers/media/platform/ti/cal/cal-camerarx.c
index 16ae52879a79..1a4273bbe752 100644
--- a/drivers/media/platform/ti/cal/cal-camerarx.c
+++ b/drivers/media/platform/ti/cal/cal-camerarx.c
@@ -50,10 +50,16 @@ static s64 cal_camerarx_get_ext_link_freq(struct cal_camerarx *phy)
struct v4l2_mbus_config_mipi_csi2 *mipi_csi2 = &phy->endpoint.bus.mipi_csi2;
u32 num_lanes = mipi_csi2->num_data_lanes;
const struct cal_format_info *fmtinfo;
+ struct v4l2_subdev_state *state;
+ struct v4l2_mbus_framefmt *fmt;
u32 bpp;
s64 freq;
- fmtinfo = cal_format_by_code(phy->formats[CAL_CAMERARX_PAD_SINK].code);
+ state = v4l2_subdev_get_locked_active_state(&phy->subdev);
+
+ fmt = v4l2_subdev_get_pad_format(&phy->subdev, state, CAL_CAMERARX_PAD_SINK);
+
+ fmtinfo = cal_format_by_code(fmt->code);
if (!fmtinfo)
return -EINVAL;
@@ -583,33 +589,6 @@ done:
return ret;
}
-int cal_camerarx_get_remote_frame_desc(struct cal_camerarx *phy,
- struct v4l2_mbus_frame_desc *desc)
-{
- struct media_pad *pad;
- int ret;
-
- if (!phy->source)
- return -EPIPE;
-
- pad = media_pad_remote_pad_first(&phy->pads[CAL_CAMERARX_PAD_SINK]);
- if (!pad)
- return -EPIPE;
-
- ret = v4l2_subdev_call(phy->source, pad, get_frame_desc, pad->index,
- desc);
- if (ret)
- return ret;
-
- if (desc->type != V4L2_MBUS_FRAME_DESC_TYPE_CSI2) {
- dev_err(phy->cal->dev,
- "Frame descriptor does not describe CSI-2 link");
- return -EINVAL;
- }
-
- return 0;
-}
-
/* ------------------------------------------------------------------
* V4L2 Subdev Operations
* ------------------------------------------------------------------
@@ -620,34 +599,20 @@ static inline struct cal_camerarx *to_cal_camerarx(struct v4l2_subdev *sd)
return container_of(sd, struct cal_camerarx, subdev);
}
-static struct v4l2_mbus_framefmt *
-cal_camerarx_get_pad_format(struct cal_camerarx *phy,
- struct v4l2_subdev_state *state,
- unsigned int pad, u32 which)
-{
- switch (which) {
- case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(&phy->subdev, state, pad);
- case V4L2_SUBDEV_FORMAT_ACTIVE:
- return &phy->formats[pad];
- default:
- return NULL;
- }
-}
-
static int cal_camerarx_sd_s_stream(struct v4l2_subdev *sd, int enable)
{
struct cal_camerarx *phy = to_cal_camerarx(sd);
+ struct v4l2_subdev_state *state;
int ret = 0;
- mutex_lock(&phy->mutex);
+ state = v4l2_subdev_lock_and_get_active_state(sd);
if (enable)
ret = cal_camerarx_start(phy);
else
cal_camerarx_stop(phy);
- mutex_unlock(&phy->mutex);
+ v4l2_subdev_unlock_state(state);
return ret;
}
@@ -657,62 +622,44 @@ static int cal_camerarx_sd_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_mbus_code_enum *code)
{
struct cal_camerarx *phy = to_cal_camerarx(sd);
- int ret = 0;
-
- mutex_lock(&phy->mutex);
/* No transcoding, source and sink codes must match. */
if (cal_rx_pad_is_source(code->pad)) {
struct v4l2_mbus_framefmt *fmt;
- if (code->index > 0) {
- ret = -EINVAL;
- goto out;
- }
+ if (code->index > 0)
+ return -EINVAL;
- fmt = cal_camerarx_get_pad_format(phy, state,
- CAL_CAMERARX_PAD_SINK,
- code->which);
+ fmt = v4l2_subdev_get_pad_format(&phy->subdev, state,
+ CAL_CAMERARX_PAD_SINK);
code->code = fmt->code;
} else {
- if (code->index >= cal_num_formats) {
- ret = -EINVAL;
- goto out;
- }
+ if (code->index >= cal_num_formats)
+ return -EINVAL;
code->code = cal_formats[code->index].code;
}
-out:
- mutex_unlock(&phy->mutex);
-
- return ret;
+ return 0;
}
static int cal_camerarx_sd_enum_frame_size(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
struct v4l2_subdev_frame_size_enum *fse)
{
- struct cal_camerarx *phy = to_cal_camerarx(sd);
const struct cal_format_info *fmtinfo;
- int ret = 0;
if (fse->index > 0)
return -EINVAL;
- mutex_lock(&phy->mutex);
-
/* No transcoding, source and sink formats must match. */
if (cal_rx_pad_is_source(fse->pad)) {
struct v4l2_mbus_framefmt *fmt;
- fmt = cal_camerarx_get_pad_format(phy, state,
- CAL_CAMERARX_PAD_SINK,
- fse->which);
- if (fse->code != fmt->code) {
- ret = -EINVAL;
- goto out;
- }
+ fmt = v4l2_subdev_get_pad_format(sd, state,
+ CAL_CAMERARX_PAD_SINK);
+ if (fse->code != fmt->code)
+ return -EINVAL;
fse->min_width = fmt->width;
fse->max_width = fmt->width;
@@ -720,10 +667,8 @@ static int cal_camerarx_sd_enum_frame_size(struct v4l2_subdev *sd,
fse->max_height = fmt->height;
} else {
fmtinfo = cal_format_by_code(fse->code);
- if (!fmtinfo) {
- ret = -EINVAL;
- goto out;
- }
+ if (!fmtinfo)
+ return -EINVAL;
fse->min_width = CAL_MIN_WIDTH_BYTES * 8 / ALIGN(fmtinfo->bpp, 8);
fse->max_width = CAL_MAX_WIDTH_BYTES * 8 / ALIGN(fmtinfo->bpp, 8);
@@ -731,27 +676,6 @@ static int cal_camerarx_sd_enum_frame_size(struct v4l2_subdev *sd,
fse->max_height = CAL_MAX_HEIGHT_LINES;
}
-out:
- mutex_unlock(&phy->mutex);
-
- return ret;
-}
-
-static int cal_camerarx_sd_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *state,
- struct v4l2_subdev_format *format)
-{
- struct cal_camerarx *phy = to_cal_camerarx(sd);
- struct v4l2_mbus_framefmt *fmt;
-
- mutex_lock(&phy->mutex);
-
- fmt = cal_camerarx_get_pad_format(phy, state, format->pad,
- format->which);
- format->format = *fmt;
-
- mutex_unlock(&phy->mutex);
-
return 0;
}
@@ -759,14 +683,13 @@ static int cal_camerarx_sd_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
struct v4l2_subdev_format *format)
{
- struct cal_camerarx *phy = to_cal_camerarx(sd);
const struct cal_format_info *fmtinfo;
struct v4l2_mbus_framefmt *fmt;
unsigned int bpp;
/* No transcoding, source and sink formats must match. */
if (cal_rx_pad_is_source(format->pad))
- return cal_camerarx_sd_get_fmt(sd, state, format);
+ return v4l2_subdev_get_fmt(sd, state, format);
/*
* Default to the first format if the requested media bus code isn't
@@ -790,20 +713,13 @@ static int cal_camerarx_sd_set_fmt(struct v4l2_subdev *sd,
/* Store the format and propagate it to the source pad. */
- mutex_lock(&phy->mutex);
-
- fmt = cal_camerarx_get_pad_format(phy, state,
- CAL_CAMERARX_PAD_SINK,
- format->which);
+ fmt = v4l2_subdev_get_pad_format(sd, state, CAL_CAMERARX_PAD_SINK);
*fmt = format->format;
- fmt = cal_camerarx_get_pad_format(phy, state,
- CAL_CAMERARX_PAD_FIRST_SOURCE,
- format->which);
+ fmt = v4l2_subdev_get_pad_format(sd, state,
+ CAL_CAMERARX_PAD_FIRST_SOURCE);
*fmt = format->format;
- mutex_unlock(&phy->mutex);
-
return 0;
}
@@ -817,7 +733,7 @@ static int cal_camerarx_sd_init_cfg(struct v4l2_subdev *sd,
.format = {
.width = 640,
.height = 480,
- .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .code = MEDIA_BUS_FMT_UYVY8_1X16,
.field = V4L2_FIELD_NONE,
.colorspace = V4L2_COLORSPACE_SRGB,
.ycbcr_enc = V4L2_YCBCR_ENC_601,
@@ -829,6 +745,40 @@ static int cal_camerarx_sd_init_cfg(struct v4l2_subdev *sd,
return cal_camerarx_sd_set_fmt(sd, state, &format);
}
+static int cal_camerarx_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_mbus_frame_desc *fd)
+{
+ struct cal_camerarx *phy = to_cal_camerarx(sd);
+ struct v4l2_mbus_frame_desc remote_desc;
+ const struct media_pad *remote_pad;
+ int ret;
+
+ remote_pad = media_pad_remote_pad_first(&phy->pads[CAL_CAMERARX_PAD_SINK]);
+ if (!remote_pad)
+ return -EPIPE;
+
+ ret = v4l2_subdev_call(phy->source, pad, get_frame_desc,
+ remote_pad->index, &remote_desc);
+ if (ret)
+ return ret;
+
+ if (remote_desc.type != V4L2_MBUS_FRAME_DESC_TYPE_CSI2) {
+ cal_err(phy->cal,
+ "Frame descriptor does not describe CSI-2 link");
+ return -EINVAL;
+ }
+
+ if (remote_desc.num_entries > 1)
+ cal_err(phy->cal,
+ "Multiple streams not supported in remote frame descriptor, using the first one\n");
+
+ fd->type = V4L2_MBUS_FRAME_DESC_TYPE_CSI2;
+ fd->num_entries = 1;
+ fd->entry[0] = remote_desc.entry[0];
+
+ return 0;
+}
+
static const struct v4l2_subdev_video_ops cal_camerarx_video_ops = {
.s_stream = cal_camerarx_sd_s_stream,
};
@@ -837,8 +787,9 @@ static const struct v4l2_subdev_pad_ops cal_camerarx_pad_ops = {
.init_cfg = cal_camerarx_sd_init_cfg,
.enum_mbus_code = cal_camerarx_sd_enum_mbus_code,
.enum_frame_size = cal_camerarx_sd_enum_frame_size,
- .get_fmt = cal_camerarx_sd_get_fmt,
+ .get_fmt = v4l2_subdev_get_fmt,
.set_fmt = cal_camerarx_sd_set_fmt,
+ .get_frame_desc = cal_camerarx_get_frame_desc,
};
static const struct v4l2_subdev_ops cal_camerarx_subdev_ops = {
@@ -864,7 +815,7 @@ struct cal_camerarx *cal_camerarx_create(struct cal_dev *cal,
unsigned int i;
int ret;
- phy = kzalloc(sizeof(*phy), GFP_KERNEL);
+ phy = devm_kzalloc(cal->dev, sizeof(*phy), GFP_KERNEL);
if (!phy)
return ERR_PTR(-ENOMEM);
@@ -872,7 +823,6 @@ struct cal_camerarx *cal_camerarx_create(struct cal_dev *cal,
phy->instance = instance;
spin_lock_init(&phy->vc_lock);
- mutex_init(&phy->mutex);
phy->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
(instance == 0) ?
@@ -881,8 +831,7 @@ struct cal_camerarx *cal_camerarx_create(struct cal_dev *cal,
phy->base = devm_ioremap_resource(cal->dev, phy->res);
if (IS_ERR(phy->base)) {
cal_err(cal, "failed to ioremap\n");
- ret = PTR_ERR(phy->base);
- goto error;
+ return ERR_CAST(phy->base);
}
cal_dbg(1, cal, "ioresource %s at %pa - %pa\n",
@@ -890,11 +839,11 @@ struct cal_camerarx *cal_camerarx_create(struct cal_dev *cal,
ret = cal_camerarx_regmap_init(cal, phy);
if (ret)
- goto error;
+ return ERR_PTR(ret);
ret = cal_camerarx_parse_dt(phy);
if (ret)
- goto error;
+ return ERR_PTR(ret);
/* Initialize the V4L2 subdev and media entity. */
sd = &phy->subdev;
@@ -911,21 +860,25 @@ struct cal_camerarx *cal_camerarx_create(struct cal_dev *cal,
ret = media_entity_pads_init(&sd->entity, ARRAY_SIZE(phy->pads),
phy->pads);
if (ret)
- goto error;
+ goto err_node_put;
- ret = cal_camerarx_sd_init_cfg(sd, NULL);
+ ret = v4l2_subdev_init_finalize(sd);
if (ret)
- goto error;
+ goto err_entity_cleanup;
ret = v4l2_device_register_subdev(&cal->v4l2_dev, sd);
if (ret)
- goto error;
+ goto err_free_state;
return phy;
-error:
+err_free_state:
+ v4l2_subdev_cleanup(sd);
+err_entity_cleanup:
media_entity_cleanup(&phy->subdev.entity);
- kfree(phy);
+err_node_put:
+ of_node_put(phy->source_ep_node);
+ of_node_put(phy->source_node);
return ERR_PTR(ret);
}
@@ -935,9 +888,8 @@ void cal_camerarx_destroy(struct cal_camerarx *phy)
return;
v4l2_device_unregister_subdev(&phy->subdev);
+ v4l2_subdev_cleanup(&phy->subdev);
media_entity_cleanup(&phy->subdev.entity);
of_node_put(phy->source_ep_node);
of_node_put(phy->source_node);
- mutex_destroy(&phy->mutex);
- kfree(phy);
}
diff --git a/drivers/media/platform/ti/cal/cal-video.c b/drivers/media/platform/ti/cal/cal-video.c
index ca906a9e4222..a8abcd0fee17 100644
--- a/drivers/media/platform/ti/cal/cal-video.c
+++ b/drivers/media/platform/ti/cal/cal-video.c
@@ -687,21 +687,34 @@ static void cal_release_buffers(struct cal_ctx *ctx,
static int cal_video_check_format(struct cal_ctx *ctx)
{
const struct v4l2_mbus_framefmt *format;
+ struct v4l2_subdev_state *state;
struct media_pad *remote_pad;
+ int ret = 0;
remote_pad = media_pad_remote_pad_first(&ctx->pad);
if (!remote_pad)
return -ENODEV;
- format = &ctx->phy->formats[remote_pad->index];
+ state = v4l2_subdev_lock_and_get_active_state(&ctx->phy->subdev);
+
+ format = v4l2_subdev_get_pad_format(&ctx->phy->subdev, state, remote_pad->index);
+ if (!format) {
+ ret = -EINVAL;
+ goto out;
+ }
if (ctx->fmtinfo->code != format->code ||
ctx->v_fmt.fmt.pix.height != format->height ||
ctx->v_fmt.fmt.pix.width != format->width ||
- ctx->v_fmt.fmt.pix.field != format->field)
- return -EPIPE;
+ ctx->v_fmt.fmt.pix.field != format->field) {
+ ret = -EPIPE;
+ goto out;
+ }
- return 0;
+out:
+ v4l2_subdev_unlock_state(state);
+
+ return ret;
}
static int cal_start_streaming(struct vb2_queue *vq, unsigned int count)
@@ -894,7 +907,7 @@ static int cal_ctx_v4l2_init_mc_format(struct cal_ctx *ctx)
const struct cal_format_info *fmtinfo;
struct v4l2_pix_format *pix_fmt = &ctx->v_fmt.fmt.pix;
- fmtinfo = cal_format_by_code(MEDIA_BUS_FMT_UYVY8_2X8);
+ fmtinfo = cal_format_by_code(MEDIA_BUS_FMT_UYVY8_1X16);
if (!fmtinfo)
return -EINVAL;
diff --git a/drivers/media/platform/ti/cal/cal.c b/drivers/media/platform/ti/cal/cal.c
index 9c5105223d6b..528909ae4bd6 100644
--- a/drivers/media/platform/ti/cal/cal.c
+++ b/drivers/media/platform/ti/cal/cal.c
@@ -13,7 +13,7 @@
#include <linux/interrupt.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
@@ -61,49 +61,25 @@ MODULE_PARM_DESC(mc_api, "activates the MC API");
const struct cal_format_info cal_formats[] = {
{
.fourcc = V4L2_PIX_FMT_YUYV,
- .code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .code = MEDIA_BUS_FMT_YUYV8_1X16,
.bpp = 16,
}, {
.fourcc = V4L2_PIX_FMT_UYVY,
- .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .code = MEDIA_BUS_FMT_UYVY8_1X16,
.bpp = 16,
}, {
.fourcc = V4L2_PIX_FMT_YVYU,
- .code = MEDIA_BUS_FMT_YVYU8_2X8,
+ .code = MEDIA_BUS_FMT_YVYU8_1X16,
.bpp = 16,
}, {
.fourcc = V4L2_PIX_FMT_VYUY,
- .code = MEDIA_BUS_FMT_VYUY8_2X8,
+ .code = MEDIA_BUS_FMT_VYUY8_1X16,
.bpp = 16,
}, {
- .fourcc = V4L2_PIX_FMT_RGB565, /* gggbbbbb rrrrrggg */
- .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .code = MEDIA_BUS_FMT_RGB565_1X16,
.bpp = 16,
}, {
- .fourcc = V4L2_PIX_FMT_RGB565X, /* rrrrrggg gggbbbbb */
- .code = MEDIA_BUS_FMT_RGB565_2X8_BE,
- .bpp = 16,
- }, {
- .fourcc = V4L2_PIX_FMT_RGB555, /* gggbbbbb arrrrrgg */
- .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
- .bpp = 16,
- }, {
- .fourcc = V4L2_PIX_FMT_RGB555X, /* arrrrrgg gggbbbbb */
- .code = MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE,
- .bpp = 16,
- }, {
- .fourcc = V4L2_PIX_FMT_RGB24, /* rgb */
- .code = MEDIA_BUS_FMT_RGB888_2X12_LE,
- .bpp = 24,
- }, {
- .fourcc = V4L2_PIX_FMT_BGR24, /* bgr */
- .code = MEDIA_BUS_FMT_RGB888_2X12_BE,
- .bpp = 24,
- }, {
- .fourcc = V4L2_PIX_FMT_RGB32, /* argb */
- .code = MEDIA_BUS_FMT_ARGB8888_1X32,
- .bpp = 32,
- }, {
.fourcc = V4L2_PIX_FMT_SBGGR8,
.code = MEDIA_BUS_FMT_SBGGR8_1X8,
.bpp = 8,
@@ -470,30 +446,24 @@ static bool cal_ctx_wr_dma_stopped(struct cal_ctx *ctx)
}
static int
-cal_get_remote_frame_desc_entry(struct cal_camerarx *phy,
+cal_get_remote_frame_desc_entry(struct cal_ctx *ctx,
struct v4l2_mbus_frame_desc_entry *entry)
{
struct v4l2_mbus_frame_desc fd;
+ struct media_pad *phy_source_pad;
int ret;
- ret = cal_camerarx_get_remote_frame_desc(phy, &fd);
- if (ret) {
- if (ret != -ENOIOCTLCMD)
- dev_err(phy->cal->dev,
- "Failed to get remote frame desc: %d\n", ret);
- return ret;
- }
-
- if (fd.num_entries == 0) {
- dev_err(phy->cal->dev,
- "No streams found in the remote frame descriptor\n");
-
+ phy_source_pad = media_pad_remote_pad_first(&ctx->pad);
+ if (!phy_source_pad)
return -ENODEV;
- }
- if (fd.num_entries > 1)
- dev_dbg(phy->cal->dev,
- "Multiple streams not supported in remote frame descriptor, using the first one\n");
+ ret = v4l2_subdev_call(&ctx->phy->subdev, pad, get_frame_desc,
+ phy_source_pad->index, &fd);
+ if (ret)
+ return ret;
+
+ if (fd.num_entries != 1)
+ return -EINVAL;
*entry = fd.entry[0];
@@ -505,7 +475,7 @@ int cal_ctx_prepare(struct cal_ctx *ctx)
struct v4l2_mbus_frame_desc_entry entry;
int ret;
- ret = cal_get_remote_frame_desc_entry(ctx->phy, &entry);
+ ret = cal_get_remote_frame_desc_entry(ctx, &entry);
if (ret == -ENOIOCTLCMD) {
ctx->vc = 0;
@@ -804,19 +774,19 @@ static irqreturn_t cal_irq(int irq_cal, void *data)
*/
struct cal_v4l2_async_subdev {
- struct v4l2_async_subdev asd; /* Must be first */
+ struct v4l2_async_connection asd; /* Must be first */
struct cal_camerarx *phy;
};
static inline struct cal_v4l2_async_subdev *
-to_cal_asd(struct v4l2_async_subdev *asd)
+to_cal_asd(struct v4l2_async_connection *asd)
{
return container_of(asd, struct cal_v4l2_async_subdev, asd);
}
static int cal_async_notifier_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
struct cal_camerarx *phy = to_cal_asd(asd)->phy;
int pad;
@@ -895,7 +865,7 @@ static int cal_async_notifier_register(struct cal_dev *cal)
unsigned int i;
int ret;
- v4l2_async_nf_init(&cal->notifier);
+ v4l2_async_nf_init(&cal->notifier, &cal->v4l2_dev);
cal->notifier.ops = &cal_async_notifier_ops;
for (i = 0; i < cal->data->num_csi2_phy; ++i) {
@@ -919,7 +889,7 @@ static int cal_async_notifier_register(struct cal_dev *cal)
casd->phy = phy;
}
- ret = v4l2_async_nf_register(&cal->v4l2_dev, &cal->notifier);
+ ret = v4l2_async_nf_register(&cal->notifier);
if (ret) {
cal_err(cal, "Error registering async notifier\n");
goto error;
diff --git a/drivers/media/platform/ti/cal/cal.h b/drivers/media/platform/ti/cal/cal.h
index de73d6d21b6f..0856297adc0b 100644
--- a/drivers/media/platform/ti/cal/cal.h
+++ b/drivers/media/platform/ti/cal/cal.h
@@ -177,7 +177,6 @@ struct cal_camerarx {
struct v4l2_subdev subdev;
struct media_pad pads[CAL_CAMERARX_NUM_PADS];
- struct v4l2_mbus_framefmt formats[CAL_CAMERARX_NUM_PADS];
/* protects the vc_* fields below */
spinlock_t vc_lock;
@@ -185,13 +184,6 @@ struct cal_camerarx {
u16 vc_frame_number[4];
u32 vc_sequence[4];
- /*
- * Lock for camerarx ops. Protects:
- * - formats
- * - enable_count
- */
- struct mutex mutex;
-
unsigned int enable_count;
};
@@ -327,8 +319,6 @@ const struct cal_format_info *cal_format_by_code(u32 code);
void cal_quickdump_regs(struct cal_dev *cal);
-int cal_camerarx_get_remote_frame_desc(struct cal_camerarx *phy,
- struct v4l2_mbus_frame_desc *desc);
void cal_camerarx_disable(struct cal_camerarx *phy);
void cal_camerarx_i913_errata(struct cal_camerarx *phy);
struct cal_camerarx *cal_camerarx_create(struct cal_dev *cal,
diff --git a/drivers/media/platform/ti/davinci/vpif_capture.c b/drivers/media/platform/ti/davinci/vpif_capture.c
index 44d269d6038c..99fae8830c41 100644
--- a/drivers/media/platform/ti/davinci/vpif_capture.c
+++ b/drivers/media/platform/ti/davinci/vpif_capture.c
@@ -1363,12 +1363,12 @@ static inline void free_vpif_objs(void)
static int vpif_async_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
int i;
for (i = 0; i < vpif_obj.config->asd_sizes[0]; i++) {
- struct v4l2_async_subdev *_asd = vpif_obj.config->asd[i];
+ struct v4l2_async_connection *_asd = vpif_obj.config->asd[i];
const struct fwnode_handle *fwnode = _asd->match.fwnode;
if (fwnode == subdev->fwnode) {
@@ -1483,7 +1483,8 @@ static const struct v4l2_async_notifier_operations vpif_async_ops = {
};
static struct vpif_capture_config *
-vpif_capture_get_pdata(struct platform_device *pdev)
+vpif_capture_get_pdata(struct platform_device *pdev,
+ struct v4l2_device *v4l2_dev)
{
struct device_node *endpoint = NULL;
struct device_node *rem = NULL;
@@ -1492,7 +1493,7 @@ vpif_capture_get_pdata(struct platform_device *pdev)
struct vpif_capture_chan_config *chan;
unsigned int i;
- v4l2_async_nf_init(&vpif_obj.notifier);
+ v4l2_async_nf_init(&vpif_obj.notifier, v4l2_dev);
/*
* DT boot: OF node from parent device contains
@@ -1570,8 +1571,7 @@ vpif_capture_get_pdata(struct platform_device *pdev)
pdata->asd[i] = v4l2_async_nf_add_fwnode(&vpif_obj.notifier,
of_fwnode_handle(rem),
- struct
- v4l2_async_subdev);
+ struct v4l2_async_connection);
if (IS_ERR(pdata->asd[i]))
goto err_cleanup;
@@ -1609,18 +1609,12 @@ static __init int vpif_probe(struct platform_device *pdev)
int res_idx = 0;
int i, err;
- pdev->dev.platform_data = vpif_capture_get_pdata(pdev);
- if (!pdev->dev.platform_data) {
- dev_warn(&pdev->dev, "Missing platform data. Giving up.\n");
- return -EINVAL;
- }
-
vpif_dev = &pdev->dev;
err = initialize_vpif();
if (err) {
v4l2_err(vpif_dev->driver, "Error initializing vpif\n");
- goto cleanup;
+ return err;
}
err = v4l2_device_register(vpif_dev, &vpif_obj.v4l2_dev);
@@ -1647,13 +1641,21 @@ static __init int vpif_probe(struct platform_device *pdev)
goto vpif_unregister;
} while (++res_idx);
+ pdev->dev.platform_data =
+ vpif_capture_get_pdata(pdev, &vpif_obj.v4l2_dev);
+ if (!pdev->dev.platform_data) {
+ err = -EINVAL;
+ dev_warn(&pdev->dev, "Missing platform data. Giving up.\n");
+ goto vpif_unregister;
+ }
+
vpif_obj.config = pdev->dev.platform_data;
subdev_count = vpif_obj.config->subdev_count;
vpif_obj.sd = kcalloc(subdev_count, sizeof(*vpif_obj.sd), GFP_KERNEL);
if (!vpif_obj.sd) {
err = -ENOMEM;
- goto vpif_unregister;
+ goto probe_subdev_out;
}
if (!vpif_obj.config->asd_sizes[0]) {
@@ -1684,8 +1686,7 @@ static __init int vpif_probe(struct platform_device *pdev)
goto probe_subdev_out;
} else {
vpif_obj.notifier.ops = &vpif_async_ops;
- err = v4l2_async_nf_register(&vpif_obj.v4l2_dev,
- &vpif_obj.notifier);
+ err = v4l2_async_nf_register(&vpif_obj.notifier);
if (err) {
vpif_err("Error registering async notifier\n");
err = -EINVAL;
@@ -1696,14 +1697,13 @@ static __init int vpif_probe(struct platform_device *pdev)
return 0;
probe_subdev_out:
+ v4l2_async_nf_cleanup(&vpif_obj.notifier);
/* free sub devices memory */
kfree(vpif_obj.sd);
vpif_unregister:
v4l2_device_unregister(&vpif_obj.v4l2_dev);
vpif_free:
free_vpif_objs();
-cleanup:
- v4l2_async_nf_cleanup(&vpif_obj.notifier);
return err;
}
diff --git a/drivers/media/platform/ti/omap3isp/isp.c b/drivers/media/platform/ti/omap3isp/isp.c
index f3aaa9e76492..1cda23244c7b 100644
--- a/drivers/media/platform/ti/omap3isp/isp.c
+++ b/drivers/media/platform/ti/omap3isp/isp.c
@@ -2002,6 +2002,7 @@ static void isp_remove(struct platform_device *pdev)
struct isp_device *isp = platform_get_drvdata(pdev);
v4l2_async_nf_unregister(&isp->notifier);
+ v4l2_async_nf_cleanup(&isp->notifier);
isp_unregister_entities(isp);
isp_cleanup_modules(isp);
isp_xclk_cleanup(isp);
@@ -2011,7 +2012,6 @@ static void isp_remove(struct platform_device *pdev)
__omap3isp_put(isp, false);
media_entity_enum_cleanup(&isp->crashed);
- v4l2_async_nf_cleanup(&isp->notifier);
kfree(isp);
}
@@ -2022,35 +2022,34 @@ enum isp_of_phy {
ISP_OF_PHY_CSIPHY2,
};
-static int isp_subdev_notifier_complete(struct v4l2_async_notifier *async)
+static int isp_subdev_notifier_bound(struct v4l2_async_notifier *async,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_connection *asc)
{
struct isp_device *isp = container_of(async, struct isp_device,
notifier);
- struct v4l2_device *v4l2_dev = &isp->v4l2_dev;
- struct v4l2_subdev *sd;
+ struct isp_bus_cfg *bus_cfg =
+ &container_of(asc, struct isp_async_subdev, asd)->bus;
int ret;
mutex_lock(&isp->media_dev.graph_mutex);
+ ret = isp_link_entity(isp, &sd->entity, bus_cfg->interface);
+ mutex_unlock(&isp->media_dev.graph_mutex);
- ret = media_entity_enum_init(&isp->crashed, &isp->media_dev);
- if (ret) {
- mutex_unlock(&isp->media_dev.graph_mutex);
- return ret;
- }
-
- list_for_each_entry(sd, &v4l2_dev->subdevs, list) {
- if (sd->notifier != &isp->notifier)
- continue;
+ return ret;
+}
- ret = isp_link_entity(isp, &sd->entity,
- v4l2_subdev_to_bus_cfg(sd)->interface);
- if (ret < 0) {
- mutex_unlock(&isp->media_dev.graph_mutex);
- return ret;
- }
- }
+static int isp_subdev_notifier_complete(struct v4l2_async_notifier *async)
+{
+ struct isp_device *isp = container_of(async, struct isp_device,
+ notifier);
+ int ret;
+ mutex_lock(&isp->media_dev.graph_mutex);
+ ret = media_entity_enum_init(&isp->crashed, &isp->media_dev);
mutex_unlock(&isp->media_dev.graph_mutex);
+ if (ret)
+ return ret;
ret = v4l2_device_register_subdev_nodes(&isp->v4l2_dev);
if (ret < 0)
@@ -2240,6 +2239,7 @@ static int isp_parse_of_endpoints(struct isp_device *isp)
}
static const struct v4l2_async_notifier_operations isp_subdev_notifier_ops = {
+ .bound = isp_subdev_notifier_bound,
.complete = isp_subdev_notifier_complete,
};
@@ -2288,13 +2288,8 @@ static int isp_probe(struct platform_device *pdev)
mutex_init(&isp->isp_mutex);
spin_lock_init(&isp->stat_lock);
- v4l2_async_nf_init(&isp->notifier);
isp->dev = &pdev->dev;
- ret = isp_parse_of_endpoints(isp);
- if (ret < 0)
- goto error;
-
isp->ref_count = 0;
ret = dma_coerce_mask_and_coherent(isp->dev, DMA_BIT_MASK(32));
@@ -2329,9 +2324,8 @@ static int isp_probe(struct platform_device *pdev)
for (i = 0; i < 2; i++) {
unsigned int map_idx = i ? OMAP3_ISP_IOMEM_CSI2A_REGS1 : 0;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, i);
isp->mmio_base[map_idx] =
- devm_ioremap_resource(isp->dev, mem);
+ devm_platform_get_and_ioremap_resource(pdev, i, &mem);
if (IS_ERR(isp->mmio_base[map_idx])) {
ret = PTR_ERR(isp->mmio_base[map_idx]);
goto error;
@@ -2398,10 +2392,8 @@ static int isp_probe(struct platform_device *pdev)
/* Interrupt */
ret = platform_get_irq(pdev, 0);
- if (ret <= 0) {
- ret = -ENODEV;
+ if (ret < 0)
goto error_iommu;
- }
isp->irq_num = ret;
if (devm_request_irq(isp->dev, isp->irq_num, isp_isr, IRQF_SHARED,
@@ -2426,7 +2418,13 @@ static int isp_probe(struct platform_device *pdev)
isp->notifier.ops = &isp_subdev_notifier_ops;
- ret = v4l2_async_nf_register(&isp->v4l2_dev, &isp->notifier);
+ v4l2_async_nf_init(&isp->notifier, &isp->v4l2_dev);
+
+ ret = isp_parse_of_endpoints(isp);
+ if (ret < 0)
+ goto error_register_entities;
+
+ ret = v4l2_async_nf_register(&isp->notifier);
if (ret)
goto error_register_entities;
@@ -2436,6 +2434,7 @@ static int isp_probe(struct platform_device *pdev)
return 0;
error_register_entities:
+ v4l2_async_nf_cleanup(&isp->notifier);
isp_unregister_entities(isp);
error_modules:
isp_cleanup_modules(isp);
@@ -2445,7 +2444,6 @@ error_isp:
isp_xclk_cleanup(isp);
__omap3isp_put(isp, false);
error:
- v4l2_async_nf_cleanup(&isp->notifier);
mutex_destroy(&isp->isp_mutex);
error_release_isp:
kfree(isp);
diff --git a/drivers/media/platform/ti/omap3isp/isp.h b/drivers/media/platform/ti/omap3isp/isp.h
index a9d760fbf349..b4793631ad97 100644
--- a/drivers/media/platform/ti/omap3isp/isp.h
+++ b/drivers/media/platform/ti/omap3isp/isp.h
@@ -220,12 +220,21 @@ struct isp_device {
};
struct isp_async_subdev {
- struct v4l2_async_subdev asd;
+ struct v4l2_async_connection asd;
struct isp_bus_cfg bus;
};
-#define v4l2_subdev_to_bus_cfg(sd) \
- (&container_of((sd)->asd, struct isp_async_subdev, asd)->bus)
+static inline struct isp_bus_cfg *
+v4l2_subdev_to_bus_cfg(struct v4l2_subdev *sd)
+{
+ struct v4l2_async_connection *asc;
+
+ asc = v4l2_async_connection_unique(sd);
+ if (!asc)
+ return NULL;
+
+ return &container_of(asc, struct isp_async_subdev, asd)->bus;
+}
#define v4l2_dev_to_isp_device(dev) \
container_of(dev, struct isp_device, v4l2_dev)
diff --git a/drivers/media/platform/ti/omap3isp/ispccdc.c b/drivers/media/platform/ti/omap3isp/ispccdc.c
index fdcdffe5fecb..2fe42aa91800 100644
--- a/drivers/media/platform/ti/omap3isp/ispccdc.c
+++ b/drivers/media/platform/ti/omap3isp/ispccdc.c
@@ -1140,8 +1140,13 @@ static void ccdc_configure(struct isp_ccdc_device *ccdc)
if (ccdc->input == CCDC_INPUT_PARALLEL) {
struct v4l2_subdev *sd =
to_isp_pipeline(&ccdc->subdev.entity)->external;
+ struct isp_bus_cfg *bus_cfg;
- parcfg = &v4l2_subdev_to_bus_cfg(sd)->bus.parallel;
+ bus_cfg = v4l2_subdev_to_bus_cfg(sd);
+ if (WARN_ON(!bus_cfg))
+ return;
+
+ parcfg = &bus_cfg->bus.parallel;
ccdc->bt656 = parcfg->bt656;
}
@@ -2436,7 +2441,11 @@ static int ccdc_link_validate(struct v4l2_subdev *sd,
if (ccdc->input == CCDC_INPUT_PARALLEL) {
struct v4l2_subdev *sd =
media_entity_to_v4l2_subdev(link->source->entity);
- struct isp_bus_cfg *bus_cfg = v4l2_subdev_to_bus_cfg(sd);
+ struct isp_bus_cfg *bus_cfg;
+
+ bus_cfg = v4l2_subdev_to_bus_cfg(sd);
+ if (WARN_ON(!bus_cfg))
+ return -EPIPE;
parallel_shift = bus_cfg->bus.parallel.data_lane_shift;
} else {
diff --git a/drivers/media/platform/ti/omap3isp/ispccp2.c b/drivers/media/platform/ti/omap3isp/ispccp2.c
index fc90ff88464f..da5f0176ec78 100644
--- a/drivers/media/platform/ti/omap3isp/ispccp2.c
+++ b/drivers/media/platform/ti/omap3isp/ispccp2.c
@@ -360,6 +360,8 @@ static int ccp2_if_configure(struct isp_ccp2_device *ccp2)
pad = media_pad_remote_pad_first(&ccp2->pads[CCP2_PAD_SINK]);
sensor = media_entity_to_v4l2_subdev(pad->entity);
buscfg = v4l2_subdev_to_bus_cfg(pipe->external);
+ if (WARN_ON(!buscfg))
+ return -EPIPE;
ret = ccp2_phyif_config(ccp2, &buscfg->bus.ccp2);
if (ret < 0)
diff --git a/drivers/media/platform/ti/omap3isp/ispcsi2.c b/drivers/media/platform/ti/omap3isp/ispcsi2.c
index 6870980a2fa9..0f9a54b11f98 100644
--- a/drivers/media/platform/ti/omap3isp/ispcsi2.c
+++ b/drivers/media/platform/ti/omap3isp/ispcsi2.c
@@ -564,6 +564,8 @@ static int csi2_configure(struct isp_csi2_device *csi2)
pad = media_pad_remote_pad_first(&csi2->pads[CSI2_PAD_SINK]);
sensor = media_entity_to_v4l2_subdev(pad->entity);
buscfg = v4l2_subdev_to_bus_cfg(pipe->external);
+ if (WARN_ON(!buscfg))
+ return -EPIPE;
csi2->frame_skip = 0;
v4l2_subdev_call(sensor, sensor, g_skip_frames, &csi2->frame_skip);
diff --git a/drivers/media/platform/ti/omap3isp/ispcsiphy.c b/drivers/media/platform/ti/omap3isp/ispcsiphy.c
index 1bde76c0adbe..29a84d8ca0df 100644
--- a/drivers/media/platform/ti/omap3isp/ispcsiphy.c
+++ b/drivers/media/platform/ti/omap3isp/ispcsiphy.c
@@ -163,13 +163,17 @@ static int csiphy_set_power(struct isp_csiphy *phy, u32 power)
static int omap3isp_csiphy_config(struct isp_csiphy *phy)
{
struct isp_pipeline *pipe = to_isp_pipeline(phy->entity);
- struct isp_bus_cfg *buscfg = v4l2_subdev_to_bus_cfg(pipe->external);
+ struct isp_bus_cfg *buscfg;
struct isp_csiphy_lanes_cfg *lanes;
int csi2_ddrclk_khz;
unsigned int num_data_lanes, used_lanes = 0;
unsigned int i;
u32 reg;
+ buscfg = v4l2_subdev_to_bus_cfg(pipe->external);
+ if (WARN_ON(!buscfg))
+ return -EPIPE;
+
if (buscfg->interface == ISP_INTERFACE_CCP2B_PHY1
|| buscfg->interface == ISP_INTERFACE_CCP2B_PHY2) {
lanes = &buscfg->bus.ccp2.lanecfg;
@@ -306,8 +310,13 @@ void omap3isp_csiphy_release(struct isp_csiphy *phy)
mutex_lock(&phy->mutex);
if (phy->entity) {
struct isp_pipeline *pipe = to_isp_pipeline(phy->entity);
- struct isp_bus_cfg *buscfg =
- v4l2_subdev_to_bus_cfg(pipe->external);
+ struct isp_bus_cfg *buscfg;
+
+ buscfg = v4l2_subdev_to_bus_cfg(pipe->external);
+ if (WARN_ON(!buscfg)) {
+ mutex_unlock(&phy->mutex);
+ return;
+ }
csiphy_routing_cfg(phy, buscfg->interface, false,
buscfg->bus.ccp2.phy_layer);
diff --git a/drivers/media/platform/verisilicon/hantro_drv.c b/drivers/media/platform/verisilicon/hantro_drv.c
index c0a368bacf88..423fc85d79ee 100644
--- a/drivers/media/platform/verisilicon/hantro_drv.c
+++ b/drivers/media/platform/verisilicon/hantro_drv.c
@@ -986,7 +986,6 @@ static int hantro_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
struct hantro_dev *vpu;
- struct resource *res;
int num_bases;
int i, ret;
@@ -1047,11 +1046,9 @@ static int hantro_probe(struct platform_device *pdev)
return -ENOMEM;
for (i = 0; i < num_bases; i++) {
- res = vpu->variant->reg_names ?
- platform_get_resource_byname(vpu->pdev, IORESOURCE_MEM,
- vpu->variant->reg_names[i]) :
- platform_get_resource(vpu->pdev, IORESOURCE_MEM, 0);
- vpu->reg_bases[i] = devm_ioremap_resource(vpu->dev, res);
+ vpu->reg_bases[i] = vpu->variant->reg_names ?
+ devm_platform_ioremap_resource_byname(pdev, vpu->variant->reg_names[i]) :
+ devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(vpu->reg_bases[i]))
return PTR_ERR(vpu->reg_bases[i]);
}
@@ -1088,8 +1085,8 @@ static int hantro_probe(struct platform_device *pdev)
irq_name = "default";
irq = platform_get_irq(vpu->pdev, 0);
}
- if (irq <= 0)
- return -ENXIO;
+ if (irq < 0)
+ return irq;
ret = devm_request_irq(vpu->dev, irq,
vpu->variant->irqs[i].handler, 0,
@@ -1225,7 +1222,7 @@ static struct platform_driver hantro_driver = {
.remove_new = hantro_remove,
.driver = {
.name = DRIVER_NAME,
- .of_match_table = of_match_ptr(of_hantro_match),
+ .of_match_table = of_hantro_match,
.pm = &hantro_pm_ops,
},
};
diff --git a/drivers/media/platform/verisilicon/hantro_v4l2.c b/drivers/media/platform/verisilicon/hantro_v4l2.c
index e871c078dd59..b3ae037a50f6 100644
--- a/drivers/media/platform/verisilicon/hantro_v4l2.c
+++ b/drivers/media/platform/verisilicon/hantro_v4l2.c
@@ -297,6 +297,7 @@ static int hantro_try_fmt(const struct hantro_ctx *ctx,
enum v4l2_buf_type type)
{
const struct hantro_fmt *fmt;
+ const struct hantro_fmt *vpu_fmt;
bool capture = V4L2_TYPE_IS_CAPTURE(type);
bool coded;
@@ -316,19 +317,23 @@ static int hantro_try_fmt(const struct hantro_ctx *ctx,
if (coded) {
pix_mp->num_planes = 1;
- } else if (!ctx->is_encoder) {
+ vpu_fmt = fmt;
+ } else if (ctx->is_encoder) {
+ vpu_fmt = hantro_find_format(ctx, ctx->dst_fmt.pixelformat);
+ } else {
/*
* Width/height on the CAPTURE end of a decoder are ignored and
* replaced by the OUTPUT ones.
*/
pix_mp->width = ctx->src_fmt.width;
pix_mp->height = ctx->src_fmt.height;
+ vpu_fmt = fmt;
}
pix_mp->field = V4L2_FIELD_NONE;
v4l2_apply_frmsize_constraints(&pix_mp->width, &pix_mp->height,
- &fmt->frmsize);
+ &vpu_fmt->frmsize);
if (!coded) {
/* Fill remaining fields */
diff --git a/drivers/media/platform/video-mux.c b/drivers/media/platform/video-mux.c
index 6d273abfe16c..5de6b6694f53 100644
--- a/drivers/media/platform/video-mux.c
+++ b/drivers/media/platform/video-mux.c
@@ -314,7 +314,7 @@ static const struct v4l2_subdev_ops video_mux_subdev_ops = {
static int video_mux_notify_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
struct video_mux *vmux = notifier_to_video_mux(notifier);
@@ -331,10 +331,10 @@ static int video_mux_async_register(struct video_mux *vmux,
unsigned int i;
int ret;
- v4l2_async_nf_init(&vmux->notifier);
+ v4l2_async_subdev_nf_init(&vmux->notifier, &vmux->subdev);
for (i = 0; i < num_input_pads; i++) {
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asd;
struct fwnode_handle *ep, *remote_ep;
ep = fwnode_graph_get_endpoint_by_id(
@@ -352,7 +352,7 @@ static int video_mux_async_register(struct video_mux *vmux,
fwnode_handle_put(remote_ep);
asd = v4l2_async_nf_add_fwnode_remote(&vmux->notifier, ep,
- struct v4l2_async_subdev);
+ struct v4l2_async_connection);
fwnode_handle_put(ep);
@@ -366,7 +366,7 @@ static int video_mux_async_register(struct video_mux *vmux,
vmux->notifier.ops = &video_mux_notify_ops;
- ret = v4l2_async_subdev_nf_register(&vmux->subdev, &vmux->notifier);
+ ret = v4l2_async_nf_register(&vmux->notifier);
if (ret)
goto err_nf_cleanup;
diff --git a/drivers/media/platform/xilinx/xilinx-vipp.c b/drivers/media/platform/xilinx/xilinx-vipp.c
index 3123216b3f70..4285770fde18 100644
--- a/drivers/media/platform/xilinx/xilinx-vipp.c
+++ b/drivers/media/platform/xilinx/xilinx-vipp.c
@@ -34,13 +34,13 @@
* @subdev: V4L2 subdev
*/
struct xvip_graph_entity {
- struct v4l2_async_subdev asd; /* must be first */
+ struct v4l2_async_connection asd; /* must be first */
struct media_entity *entity;
struct v4l2_subdev *subdev;
};
static inline struct xvip_graph_entity *
-to_xvip_entity(struct v4l2_async_subdev *asd)
+to_xvip_entity(struct v4l2_async_connection *asd)
{
return container_of(asd, struct xvip_graph_entity, asd);
}
@@ -54,9 +54,9 @@ xvip_graph_find_entity(struct xvip_composite_device *xdev,
const struct fwnode_handle *fwnode)
{
struct xvip_graph_entity *entity;
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asd;
- list_for_each_entry(asd, &xdev->notifier.asd_list, asd_list) {
+ list_for_each_entry(asd, &xdev->notifier.done_list, asc_entry) {
entity = to_xvip_entity(asd);
if (entity->asd.match.fwnode == fwnode)
return entity;
@@ -285,13 +285,13 @@ static int xvip_graph_notify_complete(struct v4l2_async_notifier *notifier)
struct xvip_composite_device *xdev =
container_of(notifier, struct xvip_composite_device, notifier);
struct xvip_graph_entity *entity;
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asd;
int ret;
dev_dbg(xdev->dev, "notify complete, all subdevs registered\n");
/* Create links for every entity. */
- list_for_each_entry(asd, &xdev->notifier.asd_list, asd_list) {
+ list_for_each_entry(asd, &xdev->notifier.done_list, asc_entry) {
entity = to_xvip_entity(asd);
ret = xvip_graph_build_one(xdev, entity);
if (ret < 0)
@@ -312,36 +312,14 @@ static int xvip_graph_notify_complete(struct v4l2_async_notifier *notifier)
static int xvip_graph_notify_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *unused)
+ struct v4l2_async_connection *asc)
{
- struct xvip_composite_device *xdev =
- container_of(notifier, struct xvip_composite_device, notifier);
- struct xvip_graph_entity *entity;
- struct v4l2_async_subdev *asd;
-
- /* Locate the entity corresponding to the bound subdev and store the
- * subdev pointer.
- */
- list_for_each_entry(asd, &xdev->notifier.asd_list, asd_list) {
- entity = to_xvip_entity(asd);
-
- if (entity->asd.match.fwnode != subdev->fwnode)
- continue;
+ struct xvip_graph_entity *entity = to_xvip_entity(asc);
- if (entity->subdev) {
- dev_err(xdev->dev, "duplicate subdev for node %p\n",
- entity->asd.match.fwnode);
- return -EINVAL;
- }
+ entity->entity = &subdev->entity;
+ entity->subdev = subdev;
- dev_dbg(xdev->dev, "subdev %s bound\n", subdev->name);
- entity->entity = &subdev->entity;
- entity->subdev = subdev;
- return 0;
- }
-
- dev_err(xdev->dev, "no entity for subdev %s\n", subdev->name);
- return -EINVAL;
+ return 0;
}
static const struct v4l2_async_notifier_operations xvip_graph_notify_ops = {
@@ -402,7 +380,7 @@ err_notifier_cleanup:
static int xvip_graph_parse(struct xvip_composite_device *xdev)
{
struct xvip_graph_entity *entity;
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asd;
int ret;
/*
@@ -415,7 +393,7 @@ static int xvip_graph_parse(struct xvip_composite_device *xdev)
if (ret < 0)
return 0;
- list_for_each_entry(asd, &xdev->notifier.asd_list, asd_list) {
+ list_for_each_entry(asd, &xdev->notifier.waiting_list, asc_entry) {
entity = to_xvip_entity(asd);
ret = xvip_graph_parse_one(xdev, entity->asd.match.fwnode);
if (ret < 0) {
@@ -516,6 +494,8 @@ static int xvip_graph_init(struct xvip_composite_device *xdev)
goto done;
}
+ v4l2_async_nf_init(&xdev->notifier, &xdev->v4l2_dev);
+
/* Parse the graph to extract a list of subdevice DT nodes. */
ret = xvip_graph_parse(xdev);
if (ret < 0) {
@@ -523,7 +503,7 @@ static int xvip_graph_init(struct xvip_composite_device *xdev)
goto done;
}
- if (list_empty(&xdev->notifier.asd_list)) {
+ if (list_empty(&xdev->notifier.waiting_list)) {
dev_err(xdev->dev, "no subdev found in graph\n");
ret = -ENOENT;
goto done;
@@ -532,7 +512,7 @@ static int xvip_graph_init(struct xvip_composite_device *xdev)
/* Register the subdevices notifier. */
xdev->notifier.ops = &xvip_graph_notify_ops;
- ret = v4l2_async_nf_register(&xdev->v4l2_dev, &xdev->notifier);
+ ret = v4l2_async_nf_register(&xdev->notifier);
if (ret < 0) {
dev_err(xdev->dev, "notifier registration failed\n");
goto done;
@@ -596,7 +576,6 @@ static int xvip_composite_probe(struct platform_device *pdev)
xdev->dev = &pdev->dev;
INIT_LIST_HEAD(&xdev->dmas);
- v4l2_async_nf_init(&xdev->notifier);
ret = xvip_composite_v4l2_init(xdev);
if (ret < 0)
diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
index b31b7ed60bbe..3da8e5102bec 100644
--- a/drivers/media/radio/wl128x/fmdrv_common.c
+++ b/drivers/media/radio/wl128x/fmdrv_common.c
@@ -1282,7 +1282,8 @@ static int fm_download_firmware(struct fmdev *fmdev, const u8 *fw_name)
fw_data += (sizeof(struct bts_action) + (action->size));
fw_len -= (sizeof(struct bts_action) + (action->size));
}
- fmdbg("Transfered only %d of %d bytes of the firmware to chip\n", fw_entry->size - fw_len, fw_entry->size);
+ fmdbg("Transferred only %d of %d bytes of the firmware to chip\n",
+ fw_entry->size - fw_len, fw_entry->size);
rel_fw:
release_firmware(fw_entry);
clear_bit(FM_FW_DW_INPROGRESS, &fmdev->flag);
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index 922c790b577e..07bdf649c60d 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -274,6 +274,7 @@ config IR_MCEUSB
config IR_MESON
tristate "Amlogic Meson IR remote receiver"
depends on ARCH_MESON || COMPILE_TEST
+ select REGMAP_MMIO
help
Say Y if you want to use the IR remote receiver available
on Amlogic Meson SoCs.
diff --git a/drivers/media/rc/gpio-ir-recv.c b/drivers/media/rc/gpio-ir-recv.c
index 27f55ea966c6..41eeec648803 100644
--- a/drivers/media/rc/gpio-ir-recv.c
+++ b/drivers/media/rc/gpio-ir-recv.c
@@ -205,7 +205,7 @@ static struct platform_driver gpio_ir_recv_driver = {
.remove_new = gpio_ir_recv_remove,
.driver = {
.name = KBUILD_MODNAME,
- .of_match_table = of_match_ptr(gpio_ir_recv_of_match),
+ .of_match_table = gpio_ir_recv_of_match,
#ifdef CONFIG_PM
.pm = &gpio_ir_recv_pm_ops,
#endif
diff --git a/drivers/media/rc/gpio-ir-tx.c b/drivers/media/rc/gpio-ir-tx.c
index 2b829c146db1..1a8fea357f14 100644
--- a/drivers/media/rc/gpio-ir-tx.c
+++ b/drivers/media/rc/gpio-ir-tx.c
@@ -199,7 +199,7 @@ static struct platform_driver gpio_ir_tx_driver = {
.probe = gpio_ir_tx_probe,
.driver = {
.name = DRIVER_NAME,
- .of_match_table = of_match_ptr(gpio_ir_tx_of_match),
+ .of_match_table = gpio_ir_tx_of_match,
},
};
module_platform_driver(gpio_ir_tx_driver);
diff --git a/drivers/media/rc/ir-rx51.c b/drivers/media/rc/ir-rx51.c
index adbbe639a261..13e81bf8005d 100644
--- a/drivers/media/rc/ir-rx51.c
+++ b/drivers/media/rc/ir-rx51.c
@@ -275,7 +275,7 @@ static struct platform_driver ir_rx51_platform_driver = {
.resume = ir_rx51_resume,
.driver = {
.name = KBUILD_MODNAME,
- .of_match_table = of_match_ptr(ir_rx51_match),
+ .of_match_table = ir_rx51_match,
},
};
module_platform_driver(ir_rx51_platform_driver);
diff --git a/drivers/media/rc/meson-ir.c b/drivers/media/rc/meson-ir.c
index 49aa309d1a8c..70322fab34ac 100644
--- a/drivers/media/rc/meson-ir.c
+++ b/drivers/media/rc/meson-ir.c
@@ -10,71 +10,57 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/bitfield.h>
+#include <linux/regmap.h>
#include <media/rc-core.h>
#define DRIVER_NAME "meson-ir"
-/* valid on all Meson platforms */
#define IR_DEC_LDR_ACTIVE 0x00
#define IR_DEC_LDR_IDLE 0x04
#define IR_DEC_LDR_REPEAT 0x08
#define IR_DEC_BIT_0 0x0c
#define IR_DEC_REG0 0x10
+#define IR_DEC_REG0_BASE_TIME GENMASK(11, 0)
#define IR_DEC_FRAME 0x14
#define IR_DEC_STATUS 0x18
+#define IR_DEC_STATUS_PULSE BIT(8)
#define IR_DEC_REG1 0x1c
-/* only available on Meson 8b and newer */
+#define IR_DEC_REG1_TIME_IV GENMASK(28, 16)
+#define IR_DEC_REG1_ENABLE BIT(15)
+#define IR_DEC_REG1_MODE GENMASK(8, 7)
+#define IR_DEC_REG1_IRQSEL GENMASK(3, 2)
+#define IR_DEC_REG1_RESET BIT(0)
+/* The following regs are only available on Meson 8b and newer */
#define IR_DEC_REG2 0x20
+#define IR_DEC_REG2_MODE GENMASK(3, 0)
-#define REG0_RATE_MASK GENMASK(11, 0)
+#define DEC_MODE_NEC 0x0
+#define DEC_MODE_RAW 0x2
-#define DECODE_MODE_NEC 0x0
-#define DECODE_MODE_RAW 0x2
+#define IRQSEL_NEC_MODE 0
+#define IRQSEL_RISE_FALL 1
+#define IRQSEL_FALL 2
+#define IRQSEL_RISE 3
-/* Meson 6b uses REG1 to configure the mode */
-#define REG1_MODE_MASK GENMASK(8, 7)
-#define REG1_MODE_SHIFT 7
-
-/* Meson 8b / GXBB use REG2 to configure the mode */
-#define REG2_MODE_MASK GENMASK(3, 0)
-#define REG2_MODE_SHIFT 0
-
-#define REG1_TIME_IV_MASK GENMASK(28, 16)
-
-#define REG1_IRQSEL_MASK GENMASK(3, 2)
-#define REG1_IRQSEL_NEC_MODE 0
-#define REG1_IRQSEL_RISE_FALL 1
-#define REG1_IRQSEL_FALL 2
-#define REG1_IRQSEL_RISE 3
-
-#define REG1_RESET BIT(0)
-#define REG1_ENABLE BIT(15)
-
-#define STATUS_IR_DEC_IN BIT(8)
-
-#define MESON_TRATE 10 /* us */
+#define MESON_RAW_TRATE 10 /* us */
+#define MESON_HW_TRATE 20 /* us */
struct meson_ir {
- void __iomem *reg;
+ struct regmap *reg;
struct rc_dev *rc;
spinlock_t lock;
};
-static void meson_ir_set_mask(struct meson_ir *ir, unsigned int reg,
- u32 mask, u32 value)
-{
- u32 data;
-
- data = readl(ir->reg + reg);
- data &= ~mask;
- data |= (value & mask);
- writel(data, ir->reg + reg);
-}
+static const struct regmap_config meson_ir_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
static irqreturn_t meson_ir_irq(int irqno, void *dev_id)
{
@@ -84,12 +70,12 @@ static irqreturn_t meson_ir_irq(int irqno, void *dev_id)
spin_lock(&ir->lock);
- duration = readl_relaxed(ir->reg + IR_DEC_REG1);
- duration = FIELD_GET(REG1_TIME_IV_MASK, duration);
- rawir.duration = duration * MESON_TRATE;
+ regmap_read(ir->reg, IR_DEC_REG1, &duration);
+ duration = FIELD_GET(IR_DEC_REG1_TIME_IV, duration);
+ rawir.duration = duration * MESON_RAW_TRATE;
- status = readl_relaxed(ir->reg + IR_DEC_STATUS);
- rawir.pulse = !!(status & STATUS_IR_DEC_IN);
+ regmap_read(ir->reg, IR_DEC_STATUS, &status);
+ rawir.pulse = !!(status & IR_DEC_STATUS_PULSE);
ir_raw_event_store_with_timeout(ir->rc, &rawir);
@@ -102,6 +88,7 @@ static int meson_ir_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
+ void __iomem *res_start;
const char *map_name;
struct meson_ir *ir;
int irq, ret;
@@ -110,7 +97,12 @@ static int meson_ir_probe(struct platform_device *pdev)
if (!ir)
return -ENOMEM;
- ir->reg = devm_platform_ioremap_resource(pdev, 0);
+ res_start = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(res_start))
+ return PTR_ERR(res_start);
+
+ ir->reg = devm_regmap_init_mmio(&pdev->dev, res_start,
+ &meson_ir_regmap_config);
if (IS_ERR(ir->reg))
return PTR_ERR(ir->reg);
@@ -131,7 +123,7 @@ static int meson_ir_probe(struct platform_device *pdev)
map_name = of_get_property(node, "linux,rc-map-name", NULL);
ir->rc->map_name = map_name ? map_name : RC_MAP_EMPTY;
ir->rc->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
- ir->rc->rx_resolution = MESON_TRATE;
+ ir->rc->rx_resolution = MESON_RAW_TRATE;
ir->rc->min_timeout = 1;
ir->rc->timeout = IR_DEFAULT_TIMEOUT;
ir->rc->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
@@ -153,24 +145,28 @@ static int meson_ir_probe(struct platform_device *pdev)
}
/* Reset the decoder */
- meson_ir_set_mask(ir, IR_DEC_REG1, REG1_RESET, REG1_RESET);
- meson_ir_set_mask(ir, IR_DEC_REG1, REG1_RESET, 0);
+ regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_RESET,
+ IR_DEC_REG1_RESET);
+ regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_RESET, 0);
/* Set general operation mode (= raw/software decoding) */
if (of_device_is_compatible(node, "amlogic,meson6-ir"))
- meson_ir_set_mask(ir, IR_DEC_REG1, REG1_MODE_MASK,
- FIELD_PREP(REG1_MODE_MASK, DECODE_MODE_RAW));
+ regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_MODE,
+ FIELD_PREP(IR_DEC_REG1_MODE, DEC_MODE_RAW));
else
- meson_ir_set_mask(ir, IR_DEC_REG2, REG2_MODE_MASK,
- FIELD_PREP(REG2_MODE_MASK, DECODE_MODE_RAW));
+ regmap_update_bits(ir->reg, IR_DEC_REG2, IR_DEC_REG2_MODE,
+ FIELD_PREP(IR_DEC_REG2_MODE, DEC_MODE_RAW));
/* Set rate */
- meson_ir_set_mask(ir, IR_DEC_REG0, REG0_RATE_MASK, MESON_TRATE - 1);
+ regmap_update_bits(ir->reg, IR_DEC_REG0, IR_DEC_REG0_BASE_TIME,
+ FIELD_PREP(IR_DEC_REG0_BASE_TIME,
+ MESON_RAW_TRATE - 1));
/* IRQ on rising and falling edges */
- meson_ir_set_mask(ir, IR_DEC_REG1, REG1_IRQSEL_MASK,
- FIELD_PREP(REG1_IRQSEL_MASK, REG1_IRQSEL_RISE_FALL));
+ regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_IRQSEL,
+ FIELD_PREP(IR_DEC_REG1_IRQSEL, IRQSEL_RISE_FALL));
/* Enable the decoder */
- meson_ir_set_mask(ir, IR_DEC_REG1, REG1_ENABLE, REG1_ENABLE);
+ regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_ENABLE,
+ IR_DEC_REG1_ENABLE);
dev_info(dev, "receiver initialized\n");
@@ -184,7 +180,7 @@ static void meson_ir_remove(struct platform_device *pdev)
/* Disable the decoder */
spin_lock_irqsave(&ir->lock, flags);
- meson_ir_set_mask(ir, IR_DEC_REG1, REG1_ENABLE, 0);
+ regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_ENABLE, 0);
spin_unlock_irqrestore(&ir->lock, flags);
}
@@ -202,14 +198,16 @@ static void meson_ir_shutdown(struct platform_device *pdev)
* bootloader a chance to power the system back on
*/
if (of_device_is_compatible(node, "amlogic,meson6-ir"))
- meson_ir_set_mask(ir, IR_DEC_REG1, REG1_MODE_MASK,
- DECODE_MODE_NEC << REG1_MODE_SHIFT);
+ regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_MODE,
+ FIELD_PREP(IR_DEC_REG1_MODE, DEC_MODE_NEC));
else
- meson_ir_set_mask(ir, IR_DEC_REG2, REG2_MODE_MASK,
- DECODE_MODE_NEC << REG2_MODE_SHIFT);
+ regmap_update_bits(ir->reg, IR_DEC_REG2, IR_DEC_REG2_MODE,
+ FIELD_PREP(IR_DEC_REG2_MODE, DEC_MODE_NEC));
/* Set rate to default value */
- meson_ir_set_mask(ir, IR_DEC_REG0, REG0_RATE_MASK, 0x13);
+ regmap_update_bits(ir->reg, IR_DEC_REG0, IR_DEC_REG0_BASE_TIME,
+ FIELD_PREP(IR_DEC_REG0_BASE_TIME,
+ MESON_HW_TRATE - 1));
spin_unlock_irqrestore(&ir->lock, flags);
}
diff --git a/drivers/media/rc/mtk-cir.c b/drivers/media/rc/mtk-cir.c
index df9349330a93..4e294e59d3cb 100644
--- a/drivers/media/rc/mtk-cir.c
+++ b/drivers/media/rc/mtk-cir.c
@@ -8,7 +8,8 @@
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/reset.h>
#include <media/rc-core.h>
diff --git a/drivers/media/rc/sunxi-cir.c b/drivers/media/rc/sunxi-cir.c
index 75b7aed1579c..bf58c965ead8 100644
--- a/drivers/media/rc/sunxi-cir.c
+++ b/drivers/media/rc/sunxi-cir.c
@@ -13,7 +13,8 @@
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/reset.h>
#include <media/rc-core.h>
diff --git a/drivers/media/test-drivers/vivid/vivid-core.c b/drivers/media/test-drivers/vivid/vivid-core.c
index c2167ccfd222..e95bdccfc18e 100644
--- a/drivers/media/test-drivers/vivid/vivid-core.c
+++ b/drivers/media/test-drivers/vivid/vivid-core.c
@@ -628,7 +628,6 @@ static int vivid_fop_release(struct file *file)
v4l2_info(&dev->v4l2_dev, "reconnect\n");
vivid_reconnect(dev);
}
- mutex_unlock(&dev->mutex);
if (file->private_data == dev->radio_rx_rds_owner) {
dev->radio_rx_rds_last_block = 0;
dev->radio_rx_rds_owner = NULL;
@@ -637,6 +636,7 @@ static int vivid_fop_release(struct file *file)
dev->radio_tx_rds_last_block = 0;
dev->radio_tx_rds_owner = NULL;
}
+ mutex_unlock(&dev->mutex);
if (vdev->queue)
return vb2_fop_release(file);
return v4l2_fh_release(file);
diff --git a/drivers/media/tuners/qt1010.c b/drivers/media/tuners/qt1010.c
index 3853a3d43d4f..a7b19863f489 100644
--- a/drivers/media/tuners/qt1010.c
+++ b/drivers/media/tuners/qt1010.c
@@ -345,11 +345,12 @@ static int qt1010_init(struct dvb_frontend *fe)
else
valptr = &tmpval;
- BUG_ON(i >= ARRAY_SIZE(i2c_data) - 1);
-
- err = qt1010_init_meas1(priv, i2c_data[i+1].reg,
- i2c_data[i].reg,
- i2c_data[i].val, valptr);
+ if (i >= ARRAY_SIZE(i2c_data) - 1)
+ err = -EIO;
+ else
+ err = qt1010_init_meas1(priv, i2c_data[i + 1].reg,
+ i2c_data[i].reg,
+ i2c_data[i].val, valptr);
i++;
break;
}
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 1e9c8d01523b..33a2aa8907e6 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -322,6 +322,8 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
ret = -EOPNOTSUPP;
} else if ((msg[0].addr == state->af9033_i2c_addr[0]) ||
(msg[0].addr == state->af9033_i2c_addr[1])) {
+ if (msg[0].len < 3 || msg[1].len < 1)
+ return -EOPNOTSUPP;
/* demod access via firmware interface */
u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
msg[0].buf[2];
@@ -381,6 +383,8 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
ret = -EOPNOTSUPP;
} else if ((msg[0].addr == state->af9033_i2c_addr[0]) ||
(msg[0].addr == state->af9033_i2c_addr[1])) {
+ if (msg[0].len < 3)
+ return -EOPNOTSUPP;
/* demod access via firmware interface */
u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
msg[0].buf[2];
@@ -388,10 +392,7 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
if (msg[0].addr == state->af9033_i2c_addr[1])
reg |= 0x100000;
- ret = (msg[0].len >= 3) ? af9035_wr_regs(d, reg,
- &msg[0].buf[3],
- msg[0].len - 3)
- : -EOPNOTSUPP;
+ ret = af9035_wr_regs(d, reg, &msg[0].buf[3], msg[0].len - 3);
} else {
/* I2C write */
u8 buf[MAX_XFER_SIZE];
diff --git a/drivers/media/usb/dvb-usb-v2/anysee.c b/drivers/media/usb/dvb-usb-v2/anysee.c
index aa45b5d263f6..a1235d0cce92 100644
--- a/drivers/media/usb/dvb-usb-v2/anysee.c
+++ b/drivers/media/usb/dvb-usb-v2/anysee.c
@@ -202,7 +202,7 @@ static int anysee_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
while (i < num) {
if (num > i + 1 && (msg[i+1].flags & I2C_M_RD)) {
- if (msg[i].len > 2 || msg[i+1].len > 60) {
+ if (msg[i].len != 2 || msg[i + 1].len > 60) {
ret = -EOPNOTSUPP;
break;
}
diff --git a/drivers/media/usb/dvb-usb-v2/az6007.c b/drivers/media/usb/dvb-usb-v2/az6007.c
index 2dcbb49d66da..2410054ddb2c 100644
--- a/drivers/media/usb/dvb-usb-v2/az6007.c
+++ b/drivers/media/usb/dvb-usb-v2/az6007.c
@@ -788,6 +788,10 @@ static int az6007_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
if (az6007_xfer_debug)
printk(KERN_DEBUG "az6007: I2C W addr=0x%x len=%d\n",
addr, msgs[i].len);
+ if (msgs[i].len < 1) {
+ ret = -EIO;
+ goto err;
+ }
req = AZ6007_I2C_WR;
index = msgs[i].buf[0];
value = addr | (1 << 8);
@@ -802,6 +806,10 @@ static int az6007_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
if (az6007_xfer_debug)
printk(KERN_DEBUG "az6007: I2C R addr=0x%x len=%d\n",
addr, msgs[i].len);
+ if (msgs[i].len < 1) {
+ ret = -EIO;
+ goto err;
+ }
req = AZ6007_I2C_RD;
index = msgs[i].buf[0];
value = addr;
diff --git a/drivers/media/usb/dvb-usb-v2/gl861.c b/drivers/media/usb/dvb-usb-v2/gl861.c
index 0c434259c36f..c71e7b93476d 100644
--- a/drivers/media/usb/dvb-usb-v2/gl861.c
+++ b/drivers/media/usb/dvb-usb-v2/gl861.c
@@ -120,7 +120,7 @@ static int gl861_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
} else if (num == 2 && !(msg[0].flags & I2C_M_RD) &&
(msg[1].flags & I2C_M_RD)) {
/* I2C write + read */
- if (msg[0].len > 1 || msg[1].len > sizeof(ctx->buf)) {
+ if (msg[0].len != 1 || msg[1].len > sizeof(ctx->buf)) {
ret = -EOPNOTSUPP;
goto err;
}
diff --git a/drivers/media/usb/dvb-usb/af9005.c b/drivers/media/usb/dvb-usb/af9005.c
index 0827bf3d4e8c..13604e6acdb8 100644
--- a/drivers/media/usb/dvb-usb/af9005.c
+++ b/drivers/media/usb/dvb-usb/af9005.c
@@ -422,6 +422,10 @@ static int af9005_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
if (ret == 0)
ret = 2;
} else {
+ if (msg[0].len < 2) {
+ ret = -EOPNOTSUPP;
+ goto unlock;
+ }
/* write one or more registers */
reg = msg[0].buf[0];
addr = msg[0].addr;
@@ -431,6 +435,7 @@ static int af9005_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
ret = 1;
}
+unlock:
mutex_unlock(&d->i2c_mutex);
return ret;
}
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index 970b84c3f0b5..b3bb1805829a 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -128,6 +128,10 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
switch (num) {
case 2:
+ if (msg[0].len < 1) {
+ num = -EOPNOTSUPP;
+ break;
+ }
/* read stv0299 register */
value = msg[0].buf[0];/* register */
for (i = 0; i < msg[1].len; i++) {
@@ -139,6 +143,10 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
case 1:
switch (msg[0].addr) {
case 0x68:
+ if (msg[0].len < 2) {
+ num = -EOPNOTSUPP;
+ break;
+ }
/* write to stv0299 register */
buf6[0] = 0x2a;
buf6[1] = msg[0].buf[0];
@@ -148,6 +156,10 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
break;
case 0x60:
if (msg[0].flags == 0) {
+ if (msg[0].len < 4) {
+ num = -EOPNOTSUPP;
+ break;
+ }
/* write to tuner pll */
buf6[0] = 0x2c;
buf6[1] = 5;
@@ -159,6 +171,10 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
dw210x_op_rw(d->udev, 0xb2, 0, 0,
buf6, 7, DW210X_WRITE_MSG);
} else {
+ if (msg[0].len < 1) {
+ num = -EOPNOTSUPP;
+ break;
+ }
/* read from tuner */
dw210x_op_rw(d->udev, 0xb5, 0, 0,
buf6, 1, DW210X_READ_MSG);
@@ -166,12 +182,20 @@ static int dw2102_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
}
break;
case (DW2102_RC_QUERY):
+ if (msg[0].len < 2) {
+ num = -EOPNOTSUPP;
+ break;
+ }
dw210x_op_rw(d->udev, 0xb8, 0, 0,
buf6, 2, DW210X_READ_MSG);
msg[0].buf[0] = buf6[0];
msg[0].buf[1] = buf6[1];
break;
case (DW2102_VOLTAGE_CTRL):
+ if (msg[0].len < 1) {
+ num = -EOPNOTSUPP;
+ break;
+ }
buf6[0] = 0x30;
buf6[1] = msg[0].buf[0];
dw210x_op_rw(d->udev, 0xb2, 0, 0,
diff --git a/drivers/media/usb/dvb-usb/m920x.c b/drivers/media/usb/dvb-usb/m920x.c
index fea5bcf72a31..c88a202daf5f 100644
--- a/drivers/media/usb/dvb-usb/m920x.c
+++ b/drivers/media/usb/dvb-usb/m920x.c
@@ -277,7 +277,6 @@ static int m920x_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int nu
char *read = kmalloc(1, GFP_KERNEL);
if (!read) {
ret = -ENOMEM;
- kfree(read);
goto unlock;
}
@@ -288,8 +287,10 @@ static int m920x_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int nu
if ((ret = m920x_read(d->udev, M9206_I2C, 0x0,
0x20 | stop,
- read, 1)) != 0)
+ read, 1)) != 0) {
+ kfree(read);
goto unlock;
+ }
msg[i].buf[j] = read[0];
}
diff --git a/drivers/media/usb/dvb-usb/opera1.c b/drivers/media/usb/dvb-usb/opera1.c
index 98b2177667d2..d269f8bb2dee 100644
--- a/drivers/media/usb/dvb-usb/opera1.c
+++ b/drivers/media/usb/dvb-usb/opera1.c
@@ -439,9 +439,14 @@ MODULE_DEVICE_TABLE(usb, opera1_table);
static int opera1_read_mac_address(struct dvb_usb_device *d, u8 mac[6])
{
+ int ret;
u8 command[] = { READ_MAC_ADDR };
- opera1_xilinx_rw(d->udev, 0xb1, 0xa0, command, 1, OPERA_WRITE_MSG);
- opera1_xilinx_rw(d->udev, 0xb1, 0xa1, mac, 6, OPERA_READ_MSG);
+ ret = opera1_xilinx_rw(d->udev, 0xb1, 0xa0, command, 1, OPERA_WRITE_MSG);
+ if (ret)
+ return ret;
+ ret = opera1_xilinx_rw(d->udev, 0xb1, 0xa1, mac, 6, OPERA_READ_MSG);
+ if (ret)
+ return ret;
return 0;
}
static int opera1_xilinx_load_firmware(struct usb_device *dev,
diff --git a/drivers/media/usb/go7007/go7007-i2c.c b/drivers/media/usb/go7007/go7007-i2c.c
index 38339dd2f83f..2880370e45c8 100644
--- a/drivers/media/usb/go7007/go7007-i2c.c
+++ b/drivers/media/usb/go7007/go7007-i2c.c
@@ -165,8 +165,6 @@ static int go7007_i2c_master_xfer(struct i2c_adapter *adapter,
} else if (msgs[i].len == 3) {
if (msgs[i].flags & I2C_M_RD)
return -EIO;
- if (msgs[i].len != 3)
- return -EIO;
if (go7007_i2c_xfer(go, msgs[i].addr, 0,
(msgs[i].buf[0] << 8) | msgs[i].buf[1],
0x01, &msgs[i].buf[2]) < 0)
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
index 75c89b07e86a..29cc207194b9 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
@@ -3285,12 +3285,14 @@ int pvr2_hdw_get_cropcap(struct pvr2_hdw *hdw, struct v4l2_cropcap *pp)
/* Return information about the tuner */
int pvr2_hdw_get_tuner_status(struct pvr2_hdw *hdw,struct v4l2_tuner *vtp)
{
- LOCK_TAKE(hdw->big_lock); do {
+ LOCK_TAKE(hdw->big_lock);
+ do {
if (hdw->tuner_signal_stale) {
pvr2_hdw_status_poll(hdw);
}
memcpy(vtp,&hdw->tuner_signal_info,sizeof(struct v4l2_tuner));
- } while (0); LOCK_GIVE(hdw->big_lock);
+ } while (0);
+ LOCK_GIVE(hdw->big_lock);
return 0;
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-sysfs.c b/drivers/media/usb/pvrusb2/pvrusb2-sysfs.c
index a8c0b513e58e..3077399901aa 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-sysfs.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-sysfs.c
@@ -77,7 +77,7 @@ static ssize_t show_name(struct device *class_dev,
pvr2_sysfs_trace("pvr2_sysfs(%p) show_name(cid=%d) is %s",
cip->chptr, cip->ctl_id, name);
if (!name) return -EINVAL;
- return scnprintf(buf, PAGE_SIZE, "%s\n", name);
+ return sysfs_emit(buf, "%s\n", name);
}
static ssize_t show_type(struct device *class_dev,
@@ -98,7 +98,7 @@ static ssize_t show_type(struct device *class_dev,
}
pvr2_sysfs_trace("pvr2_sysfs(%p) show_type(cid=%d) is %s",
cip->chptr, cip->ctl_id, name);
- return scnprintf(buf, PAGE_SIZE, "%s\n", name);
+ return sysfs_emit(buf, "%s\n", name);
}
static ssize_t show_min(struct device *class_dev,
@@ -111,7 +111,7 @@ static ssize_t show_min(struct device *class_dev,
val = pvr2_ctrl_get_min(cip->cptr);
pvr2_sysfs_trace("pvr2_sysfs(%p) show_min(cid=%d) is %ld",
cip->chptr, cip->ctl_id, val);
- return scnprintf(buf, PAGE_SIZE, "%ld\n", val);
+ return sysfs_emit(buf, "%ld\n", val);
}
static ssize_t show_max(struct device *class_dev,
@@ -124,7 +124,7 @@ static ssize_t show_max(struct device *class_dev,
val = pvr2_ctrl_get_max(cip->cptr);
pvr2_sysfs_trace("pvr2_sysfs(%p) show_max(cid=%d) is %ld",
cip->chptr, cip->ctl_id, val);
- return scnprintf(buf, PAGE_SIZE, "%ld\n", val);
+ return sysfs_emit(buf, "%ld\n", val);
}
static ssize_t show_def(struct device *class_dev,
@@ -544,7 +544,7 @@ static ssize_t v4l_minor_number_show(struct device *class_dev,
struct pvr2_sysfs *sfp;
sfp = dev_get_drvdata(class_dev);
if (!sfp) return -EINVAL;
- return scnprintf(buf,PAGE_SIZE,"%d\n",
+ return sysfs_emit(buf, "%d\n",
pvr2_hdw_v4l_get_minor_number(sfp->channel.hdw,
pvr2_v4l_type_video));
}
@@ -556,7 +556,7 @@ static ssize_t bus_info_show(struct device *class_dev,
struct pvr2_sysfs *sfp;
sfp = dev_get_drvdata(class_dev);
if (!sfp) return -EINVAL;
- return scnprintf(buf,PAGE_SIZE,"%s\n",
+ return sysfs_emit(buf, "%s\n",
pvr2_hdw_get_bus_info(sfp->channel.hdw));
}
@@ -567,7 +567,7 @@ static ssize_t hdw_name_show(struct device *class_dev,
struct pvr2_sysfs *sfp;
sfp = dev_get_drvdata(class_dev);
if (!sfp) return -EINVAL;
- return scnprintf(buf,PAGE_SIZE,"%s\n",
+ return sysfs_emit(buf, "%s\n",
pvr2_hdw_get_type(sfp->channel.hdw));
}
@@ -578,7 +578,7 @@ static ssize_t hdw_desc_show(struct device *class_dev,
struct pvr2_sysfs *sfp;
sfp = dev_get_drvdata(class_dev);
if (!sfp) return -EINVAL;
- return scnprintf(buf,PAGE_SIZE,"%s\n",
+ return sysfs_emit(buf, "%s\n",
pvr2_hdw_get_desc(sfp->channel.hdw));
}
@@ -590,7 +590,7 @@ static ssize_t v4l_radio_minor_number_show(struct device *class_dev,
struct pvr2_sysfs *sfp;
sfp = dev_get_drvdata(class_dev);
if (!sfp) return -EINVAL;
- return scnprintf(buf,PAGE_SIZE,"%d\n",
+ return sysfs_emit(buf, "%d\n",
pvr2_hdw_v4l_get_minor_number(sfp->channel.hdw,
pvr2_v4l_type_radio));
}
@@ -602,7 +602,7 @@ static ssize_t unit_number_show(struct device *class_dev,
struct pvr2_sysfs *sfp;
sfp = dev_get_drvdata(class_dev);
if (!sfp) return -EINVAL;
- return scnprintf(buf,PAGE_SIZE,"%d\n",
+ return sysfs_emit(buf, "%d\n",
pvr2_hdw_get_unit_number(sfp->channel.hdw));
}
diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c
index 640737d3b8ae..8a39cac76c58 100644
--- a/drivers/media/usb/siano/smsusb.c
+++ b/drivers/media/usb/siano/smsusb.c
@@ -455,12 +455,7 @@ static int smsusb_init_device(struct usb_interface *intf, int board_id)
rc = smscore_register_device(&params, &dev->coredev, 0, mdev);
if (rc < 0) {
pr_err("smscore_register_device(...) failed, rc %d\n", rc);
- smsusb_term_device(intf);
-#ifdef CONFIG_MEDIA_CONTROLLER_DVB
- media_device_unregister(mdev);
-#endif
- kfree(mdev);
- return rc;
+ goto err_unregister_device;
}
smscore_set_board_id(dev->coredev, board_id);
@@ -477,8 +472,7 @@ static int smsusb_init_device(struct usb_interface *intf, int board_id)
rc = smsusb_start_streaming(dev);
if (rc < 0) {
pr_err("smsusb_start_streaming(...) failed\n");
- smsusb_term_device(intf);
- return rc;
+ goto err_unregister_device;
}
dev->state = SMSUSB_ACTIVE;
@@ -486,13 +480,20 @@ static int smsusb_init_device(struct usb_interface *intf, int board_id)
rc = smscore_start_device(dev->coredev);
if (rc < 0) {
pr_err("smscore_start_device(...) failed\n");
- smsusb_term_device(intf);
- return rc;
+ goto err_unregister_device;
}
pr_debug("device 0x%p created\n", dev);
return rc;
+
+err_unregister_device:
+ smsusb_term_device(intf);
+#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+ media_device_unregister(mdev);
+#endif
+ kfree(mdev);
+ return rc;
}
static int smsusb_probe(struct usb_interface *intf,
diff --git a/drivers/media/usb/ttusb-dec/ttusbdecfe.c b/drivers/media/usb/ttusb-dec/ttusbdecfe.c
index ea25b96b8bbf..dff6bf532ce3 100644
--- a/drivers/media/usb/ttusb-dec/ttusbdecfe.c
+++ b/drivers/media/usb/ttusb-dec/ttusbdecfe.c
@@ -76,7 +76,7 @@ static int ttusbdecfe_dvbt_read_status(struct dvb_frontend *fe,
static int ttusbdecfe_dvbt_set_frontend(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
- struct ttusbdecfe_state* state = (struct ttusbdecfe_state*) fe->demodulator_priv;
+ struct ttusbdecfe_state *state = fe->demodulator_priv;
u8 b[] = { 0x00, 0x00, 0x00, 0x03,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x01,
@@ -103,7 +103,7 @@ static int ttusbdecfe_dvbt_get_tune_settings(struct dvb_frontend* fe,
static int ttusbdecfe_dvbs_set_frontend(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
- struct ttusbdecfe_state* state = (struct ttusbdecfe_state*) fe->demodulator_priv;
+ struct ttusbdecfe_state *state = fe->demodulator_priv;
u8 b[] = { 0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00,
@@ -137,7 +137,7 @@ static int ttusbdecfe_dvbs_set_frontend(struct dvb_frontend *fe)
static int ttusbdecfe_dvbs_diseqc_send_master_cmd(struct dvb_frontend* fe, struct dvb_diseqc_master_cmd *cmd)
{
- struct ttusbdecfe_state* state = (struct ttusbdecfe_state*) fe->demodulator_priv;
+ struct ttusbdecfe_state *state = fe->demodulator_priv;
u8 b[] = { 0x00, 0xff, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00 };
@@ -158,7 +158,7 @@ static int ttusbdecfe_dvbs_diseqc_send_master_cmd(struct dvb_frontend* fe, struc
static int ttusbdecfe_dvbs_set_tone(struct dvb_frontend *fe,
enum fe_sec_tone_mode tone)
{
- struct ttusbdecfe_state* state = (struct ttusbdecfe_state*) fe->demodulator_priv;
+ struct ttusbdecfe_state *state = fe->demodulator_priv;
state->hi_band = (SEC_TONE_ON == tone);
@@ -169,7 +169,7 @@ static int ttusbdecfe_dvbs_set_tone(struct dvb_frontend *fe,
static int ttusbdecfe_dvbs_set_voltage(struct dvb_frontend *fe,
enum fe_sec_voltage voltage)
{
- struct ttusbdecfe_state* state = (struct ttusbdecfe_state*) fe->demodulator_priv;
+ struct ttusbdecfe_state *state = fe->demodulator_priv;
switch (voltage) {
case SEC_VOLTAGE_13:
@@ -187,7 +187,7 @@ static int ttusbdecfe_dvbs_set_voltage(struct dvb_frontend *fe,
static void ttusbdecfe_release(struct dvb_frontend* fe)
{
- struct ttusbdecfe_state* state = (struct ttusbdecfe_state*) fe->demodulator_priv;
+ struct ttusbdecfe_state *state = fe->demodulator_priv;
kfree(state);
}
diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
index 348559bc2468..f77ebd688cde 100644
--- a/drivers/media/v4l2-core/Kconfig
+++ b/drivers/media/v4l2-core/Kconfig
@@ -74,6 +74,15 @@ config V4L2_FWNODE
config V4L2_ASYNC
tristate
+config V4L2_CCI
+ tristate
+
+config V4L2_CCI_I2C
+ tristate
+ depends on I2C
+ select REGMAP_I2C
+ select V4L2_CCI
+
# Used by drivers that need Videobuf modules
config VIDEOBUF_GEN
tristate
diff --git a/drivers/media/v4l2-core/Makefile b/drivers/media/v4l2-core/Makefile
index 41d91bd10cf2..be2551705755 100644
--- a/drivers/media/v4l2-core/Makefile
+++ b/drivers/media/v4l2-core/Makefile
@@ -25,6 +25,7 @@ videodev-$(CONFIG_VIDEO_V4L2_I2C) += v4l2-i2c.o
# (e. g. LC_ALL=C sort Makefile)
obj-$(CONFIG_V4L2_ASYNC) += v4l2-async.o
+obj-$(CONFIG_V4L2_CCI) += v4l2-cci.o
obj-$(CONFIG_V4L2_FLASH_LED_CLASS) += v4l2-flash-led-class.o
obj-$(CONFIG_V4L2_FWNODE) += v4l2-fwnode.o
obj-$(CONFIG_V4L2_H264) += v4l2-h264.o
diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c
index b16b5f4cb91e..091e8cf4114b 100644
--- a/drivers/media/v4l2-core/v4l2-async.c
+++ b/drivers/media/v4l2-core/v4l2-async.c
@@ -28,22 +28,22 @@
static int v4l2_async_nf_call_bound(struct v4l2_async_notifier *n,
struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asc)
{
if (!n->ops || !n->ops->bound)
return 0;
- return n->ops->bound(n, subdev, asd);
+ return n->ops->bound(n, subdev, asc);
}
static void v4l2_async_nf_call_unbind(struct v4l2_async_notifier *n,
struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asc)
{
if (!n->ops || !n->ops->unbind)
return;
- n->ops->unbind(n, subdev, asd);
+ n->ops->unbind(n, subdev, asc);
}
static int v4l2_async_nf_call_complete(struct v4l2_async_notifier *n)
@@ -55,131 +55,142 @@ static int v4l2_async_nf_call_complete(struct v4l2_async_notifier *n)
}
static void v4l2_async_nf_call_destroy(struct v4l2_async_notifier *n,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asc)
{
if (!n->ops || !n->ops->destroy)
return;
- n->ops->destroy(asd);
+ n->ops->destroy(asc);
}
static bool match_i2c(struct v4l2_async_notifier *notifier,
- struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
+ struct v4l2_subdev *sd,
+ struct v4l2_async_match_desc *match)
{
#if IS_ENABLED(CONFIG_I2C)
struct i2c_client *client = i2c_verify_client(sd->dev);
return client &&
- asd->match.i2c.adapter_id == client->adapter->nr &&
- asd->match.i2c.address == client->addr;
+ match->i2c.adapter_id == client->adapter->nr &&
+ match->i2c.address == client->addr;
#else
return false;
#endif
}
+static struct device *notifier_dev(struct v4l2_async_notifier *notifier)
+{
+ if (notifier->sd)
+ return notifier->sd->dev;
+
+ if (notifier->v4l2_dev)
+ return notifier->v4l2_dev->dev;
+
+ return NULL;
+}
+
static bool
match_fwnode_one(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd, struct fwnode_handle *sd_fwnode,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_match_desc *match)
{
- struct fwnode_handle *other_fwnode;
- struct fwnode_handle *dev_fwnode;
- bool asd_fwnode_is_ep;
- bool sd_fwnode_is_ep;
- struct device *dev;
+ struct fwnode_handle *asd_dev_fwnode;
+ bool ret;
- /*
- * Both the subdev and the async subdev can provide either an endpoint
- * fwnode or a device fwnode. Start with the simple case of direct
- * fwnode matching.
- */
- if (sd_fwnode == asd->match.fwnode)
- return true;
+ dev_dbg(notifier_dev(notifier),
+ "v4l2-async: fwnode match: need %pfw, trying %pfw\n",
+ sd_fwnode, match->fwnode);
- /*
- * Otherwise, check if the sd fwnode and the asd fwnode refer to an
- * endpoint or a device. If they're of the same type, there's no match.
- * Technically speaking this checks if the nodes refer to a connected
- * endpoint, which is the simplest check that works for both OF and
- * ACPI. This won't make a difference, as drivers should not try to
- * match unconnected endpoints.
- */
- sd_fwnode_is_ep = fwnode_graph_is_endpoint(sd_fwnode);
- asd_fwnode_is_ep = fwnode_graph_is_endpoint(asd->match.fwnode);
+ if (sd_fwnode == match->fwnode) {
+ dev_dbg(notifier_dev(notifier),
+ "v4l2-async: direct match found\n");
+ return true;
+ }
- if (sd_fwnode_is_ep == asd_fwnode_is_ep)
+ if (!fwnode_graph_is_endpoint(match->fwnode)) {
+ dev_dbg(notifier_dev(notifier),
+ "v4l2-async: direct match not found\n");
return false;
-
- /*
- * The sd and asd fwnodes are of different types. Get the device fwnode
- * parent of the endpoint fwnode, and compare it with the other fwnode.
- */
- if (sd_fwnode_is_ep) {
- dev_fwnode = fwnode_graph_get_port_parent(sd_fwnode);
- other_fwnode = asd->match.fwnode;
- } else {
- dev_fwnode = fwnode_graph_get_port_parent(asd->match.fwnode);
- other_fwnode = sd_fwnode;
}
- fwnode_handle_put(dev_fwnode);
+ asd_dev_fwnode = fwnode_graph_get_port_parent(match->fwnode);
- if (dev_fwnode != other_fwnode)
- return false;
+ ret = sd_fwnode == asd_dev_fwnode;
- /*
- * We have a heterogeneous match. Retrieve the struct device of the side
- * that matched on a device fwnode to print its driver name.
- */
- if (sd_fwnode_is_ep)
- dev = notifier->v4l2_dev ? notifier->v4l2_dev->dev
- : notifier->sd->dev;
- else
- dev = sd->dev;
-
- if (dev && dev->driver) {
- if (sd_fwnode_is_ep)
- dev_warn(dev, "Driver %s uses device fwnode, incorrect match may occur\n",
- dev->driver->name);
- dev_notice(dev, "Consider updating driver %s to match on endpoints\n",
- dev->driver->name);
- }
+ fwnode_handle_put(asd_dev_fwnode);
- return true;
+ dev_dbg(notifier_dev(notifier),
+ "v4l2-async: device--endpoint match %sfound\n",
+ ret ? "" : "not ");
+
+ return ret;
}
static bool match_fwnode(struct v4l2_async_notifier *notifier,
- struct v4l2_subdev *sd, struct v4l2_async_subdev *asd)
+ struct v4l2_subdev *sd,
+ struct v4l2_async_match_desc *match)
{
- if (match_fwnode_one(notifier, sd, sd->fwnode, asd))
+ dev_dbg(notifier_dev(notifier),
+ "v4l2-async: matching for notifier %pfw, sd fwnode %pfw\n",
+ dev_fwnode(notifier_dev(notifier)), sd->fwnode);
+
+ if (!list_empty(&sd->async_subdev_endpoint_list)) {
+ struct v4l2_async_subdev_endpoint *ase;
+
+ dev_dbg(sd->dev,
+ "v4l2-async: endpoint fwnode list available, looking for %pfw\n",
+ match->fwnode);
+
+ list_for_each_entry(ase, &sd->async_subdev_endpoint_list,
+ async_subdev_endpoint_entry) {
+ bool matched = ase->endpoint == match->fwnode;
+
+ dev_dbg(sd->dev,
+ "v4l2-async: endpoint-endpoint match %sfound with %pfw\n",
+ matched ? "" : "not ", ase->endpoint);
+
+ if (matched)
+ return true;
+ }
+
+ dev_dbg(sd->dev, "async: no endpoint matched\n");
+
+ return false;
+ }
+
+ if (match_fwnode_one(notifier, sd, sd->fwnode, match))
return true;
/* Also check the secondary fwnode. */
if (IS_ERR_OR_NULL(sd->fwnode->secondary))
return false;
- return match_fwnode_one(notifier, sd, sd->fwnode->secondary, asd);
+ dev_dbg(notifier_dev(notifier),
+ "v4l2-async: trying secondary fwnode match\n");
+
+ return match_fwnode_one(notifier, sd, sd->fwnode->secondary, match);
}
static LIST_HEAD(subdev_list);
static LIST_HEAD(notifier_list);
static DEFINE_MUTEX(list_lock);
-static struct v4l2_async_subdev *
+static struct v4l2_async_connection *
v4l2_async_find_match(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd)
{
bool (*match)(struct v4l2_async_notifier *notifier,
- struct v4l2_subdev *sd, struct v4l2_async_subdev *asd);
- struct v4l2_async_subdev *asd;
+ struct v4l2_subdev *sd,
+ struct v4l2_async_match_desc *match);
+ struct v4l2_async_connection *asc;
- list_for_each_entry(asd, &notifier->waiting, list) {
+ list_for_each_entry(asc, &notifier->waiting_list, asc_entry) {
/* bus_type has been verified valid before */
- switch (asd->match_type) {
- case V4L2_ASYNC_MATCH_I2C:
+ switch (asc->match.type) {
+ case V4L2_ASYNC_MATCH_TYPE_I2C:
match = match_i2c;
break;
- case V4L2_ASYNC_MATCH_FWNODE:
+ case V4L2_ASYNC_MATCH_TYPE_FWNODE:
match = match_fwnode;
break;
default:
@@ -189,28 +200,26 @@ v4l2_async_find_match(struct v4l2_async_notifier *notifier,
}
/* match cannot be NULL here */
- if (match(notifier, sd, asd))
- return asd;
+ if (match(notifier, sd, &asc->match))
+ return asc;
}
return NULL;
}
-/* Compare two async sub-device descriptors for equivalence */
-static bool asd_equal(struct v4l2_async_subdev *asd_x,
- struct v4l2_async_subdev *asd_y)
+/* Compare two async match descriptors for equivalence */
+static bool v4l2_async_match_equal(struct v4l2_async_match_desc *match1,
+ struct v4l2_async_match_desc *match2)
{
- if (asd_x->match_type != asd_y->match_type)
+ if (match1->type != match2->type)
return false;
- switch (asd_x->match_type) {
- case V4L2_ASYNC_MATCH_I2C:
- return asd_x->match.i2c.adapter_id ==
- asd_y->match.i2c.adapter_id &&
- asd_x->match.i2c.address ==
- asd_y->match.i2c.address;
- case V4L2_ASYNC_MATCH_FWNODE:
- return asd_x->match.fwnode == asd_y->match.fwnode;
+ switch (match1->type) {
+ case V4L2_ASYNC_MATCH_TYPE_I2C:
+ return match1->i2c.adapter_id == match2->i2c.adapter_id &&
+ match1->i2c.address == match2->i2c.address;
+ case V4L2_ASYNC_MATCH_TYPE_FWNODE:
+ return match1->fwnode == match2->fwnode;
default:
break;
}
@@ -224,7 +233,7 @@ v4l2_async_find_subdev_notifier(struct v4l2_subdev *sd)
{
struct v4l2_async_notifier *n;
- list_for_each_entry(n, &notifier_list, list)
+ list_for_each_entry(n, &notifier_list, notifier_entry)
if (n->sd == sd)
return n;
@@ -247,14 +256,14 @@ v4l2_async_nf_find_v4l2_dev(struct v4l2_async_notifier *notifier)
static bool
v4l2_async_nf_can_complete(struct v4l2_async_notifier *notifier)
{
- struct v4l2_subdev *sd;
+ struct v4l2_async_connection *asc;
- if (!list_empty(&notifier->waiting))
+ if (!list_empty(&notifier->waiting_list))
return false;
- list_for_each_entry(sd, &notifier->done, async_list) {
+ list_for_each_entry(asc, &notifier->done_list, asc_entry) {
struct v4l2_async_notifier *subdev_notifier =
- v4l2_async_find_subdev_notifier(sd);
+ v4l2_async_find_subdev_notifier(asc->sd);
if (subdev_notifier &&
!v4l2_async_nf_can_complete(subdev_notifier))
@@ -271,22 +280,33 @@ v4l2_async_nf_can_complete(struct v4l2_async_notifier *notifier)
static int
v4l2_async_nf_try_complete(struct v4l2_async_notifier *notifier)
{
+ struct v4l2_async_notifier *__notifier = notifier;
+
/* Quick check whether there are still more sub-devices here. */
- if (!list_empty(&notifier->waiting))
+ if (!list_empty(&notifier->waiting_list))
return 0;
+ if (notifier->sd)
+ dev_dbg(notifier_dev(notifier),
+ "v4l2-async: trying to complete\n");
+
/* Check the entire notifier tree; find the root notifier first. */
while (notifier->parent)
notifier = notifier->parent;
/* This is root if it has v4l2_dev. */
- if (!notifier->v4l2_dev)
+ if (!notifier->v4l2_dev) {
+ dev_dbg(notifier_dev(__notifier),
+ "v4l2-async: V4L2 device not available\n");
return 0;
+ }
/* Is everything ready? */
if (!v4l2_async_nf_can_complete(notifier))
return 0;
+ dev_dbg(notifier_dev(__notifier), "v4l2-async: complete\n");
+
return v4l2_async_nf_call_complete(notifier);
}
@@ -314,41 +334,53 @@ static int v4l2_async_create_ancillary_links(struct v4l2_async_notifier *n,
static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier,
struct v4l2_device *v4l2_dev,
struct v4l2_subdev *sd,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asc)
{
struct v4l2_async_notifier *subdev_notifier;
+ bool registered = false;
int ret;
- ret = v4l2_device_register_subdev(v4l2_dev, sd);
- if (ret < 0)
- return ret;
+ if (list_empty(&sd->asc_list)) {
+ ret = v4l2_device_register_subdev(v4l2_dev, sd);
+ if (ret < 0)
+ return ret;
+ registered = true;
+ }
- ret = v4l2_async_nf_call_bound(notifier, sd, asd);
+ ret = v4l2_async_nf_call_bound(notifier, sd, asc);
if (ret < 0) {
- v4l2_device_unregister_subdev(sd);
- return ret;
+ if (asc->match.type == V4L2_ASYNC_MATCH_TYPE_FWNODE)
+ dev_dbg(notifier_dev(notifier),
+ "failed binding %pfw (%d)\n",
+ asc->match.fwnode, ret);
+ goto err_unregister_subdev;
}
- /*
- * Depending of the function of the entities involved, we may want to
- * create links between them (for example between a sensor and its lens
- * or between a sensor's source pad and the connected device's sink
- * pad).
- */
- ret = v4l2_async_create_ancillary_links(notifier, sd);
- if (ret) {
- v4l2_async_nf_call_unbind(notifier, sd, asd);
- v4l2_device_unregister_subdev(sd);
- return ret;
+ if (registered) {
+ /*
+ * Depending of the function of the entities involved, we may
+ * want to create links between them (for example between a
+ * sensor and its lens or between a sensor's source pad and the
+ * connected device's sink pad).
+ */
+ ret = v4l2_async_create_ancillary_links(notifier, sd);
+ if (ret) {
+ if (asc->match.type == V4L2_ASYNC_MATCH_TYPE_FWNODE)
+ dev_dbg(notifier_dev(notifier),
+ "failed creating links for %pfw (%d)\n",
+ asc->match.fwnode, ret);
+ goto err_call_unbind;
+ }
}
- /* Remove from the waiting list */
- list_del(&asd->list);
- sd->asd = asd;
- sd->notifier = notifier;
+ list_add(&asc->asc_subdev_entry, &sd->asc_list);
+ asc->sd = sd;
+
+ /* Move from the waiting list to notifier's done */
+ list_move(&asc->asc_entry, &notifier->done_list);
- /* Move from the global subdevice list to notifier's done */
- list_move(&sd->async_list, &notifier->done);
+ dev_dbg(notifier_dev(notifier), "v4l2-async: %s bound (ret %d)\n",
+ dev_name(sd->dev), ret);
/*
* See if the sub-device has a notifier. If not, return here.
@@ -365,6 +397,16 @@ static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier,
subdev_notifier->parent = notifier;
return v4l2_async_nf_try_all_subdevs(subdev_notifier);
+
+err_call_unbind:
+ v4l2_async_nf_call_unbind(notifier, sd, asc);
+ list_del(&asc->asc_subdev_entry);
+
+err_unregister_subdev:
+ if (registered)
+ v4l2_device_unregister_subdev(sd);
+
+ return ret;
}
/* Test all async sub-devices in a notifier for a match. */
@@ -378,16 +420,21 @@ v4l2_async_nf_try_all_subdevs(struct v4l2_async_notifier *notifier)
if (!v4l2_dev)
return 0;
+ dev_dbg(notifier_dev(notifier), "v4l2-async: trying all sub-devices\n");
+
again:
list_for_each_entry(sd, &subdev_list, async_list) {
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asc;
int ret;
- asd = v4l2_async_find_match(notifier, sd);
- if (!asd)
+ asc = v4l2_async_find_match(notifier, sd);
+ if (!asc)
continue;
- ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd);
+ dev_dbg(notifier_dev(notifier),
+ "v4l2-async: match found, subdev %s\n", sd->name);
+
+ ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asc);
if (ret < 0)
return ret;
@@ -403,37 +450,33 @@ again:
return 0;
}
-static void v4l2_async_cleanup(struct v4l2_subdev *sd)
+static void v4l2_async_unbind_subdev_one(struct v4l2_async_notifier *notifier,
+ struct v4l2_async_connection *asc)
{
- v4l2_device_unregister_subdev(sd);
- /*
- * Subdevice driver will reprobe and put the subdev back
- * onto the list
- */
- list_del_init(&sd->async_list);
- sd->asd = NULL;
+ list_move_tail(&asc->asc_entry, &notifier->waiting_list);
+ if (list_is_singular(&asc->asc_subdev_entry)) {
+ v4l2_async_nf_call_unbind(notifier, asc->sd, asc);
+ v4l2_device_unregister_subdev(asc->sd);
+ asc->sd = NULL;
+ }
+ list_del(&asc->asc_subdev_entry);
}
/* Unbind all sub-devices in the notifier tree. */
static void
-v4l2_async_nf_unbind_all_subdevs(struct v4l2_async_notifier *notifier,
- bool readd)
+v4l2_async_nf_unbind_all_subdevs(struct v4l2_async_notifier *notifier)
{
- struct v4l2_subdev *sd, *tmp;
+ struct v4l2_async_connection *asc, *asc_tmp;
- list_for_each_entry_safe(sd, tmp, &notifier->done, async_list) {
+ list_for_each_entry_safe(asc, asc_tmp, &notifier->done_list,
+ asc_entry) {
struct v4l2_async_notifier *subdev_notifier =
- v4l2_async_find_subdev_notifier(sd);
+ v4l2_async_find_subdev_notifier(asc->sd);
if (subdev_notifier)
- v4l2_async_nf_unbind_all_subdevs(subdev_notifier, true);
-
- v4l2_async_nf_call_unbind(notifier, sd, sd->asd);
- if (readd)
- list_add_tail(&sd->asd->list, &notifier->waiting);
- v4l2_async_cleanup(sd);
+ v4l2_async_nf_unbind_all_subdevs(subdev_notifier);
- list_move(&sd->async_list, &subdev_list);
+ v4l2_async_unbind_subdev_one(notifier, asc);
}
notifier->parent = NULL;
@@ -441,106 +484,109 @@ v4l2_async_nf_unbind_all_subdevs(struct v4l2_async_notifier *notifier,
/* See if an async sub-device can be found in a notifier's lists. */
static bool
-__v4l2_async_nf_has_async_subdev(struct v4l2_async_notifier *notifier,
- struct v4l2_async_subdev *asd)
+v4l2_async_nf_has_async_match_entry(struct v4l2_async_notifier *notifier,
+ struct v4l2_async_match_desc *match)
{
- struct v4l2_async_subdev *asd_y;
- struct v4l2_subdev *sd;
+ struct v4l2_async_connection *asc;
- list_for_each_entry(asd_y, &notifier->waiting, list)
- if (asd_equal(asd, asd_y))
+ list_for_each_entry(asc, &notifier->waiting_list, asc_entry)
+ if (v4l2_async_match_equal(&asc->match, match))
return true;
- list_for_each_entry(sd, &notifier->done, async_list) {
- if (WARN_ON(!sd->asd))
- continue;
-
- if (asd_equal(asd, sd->asd))
+ list_for_each_entry(asc, &notifier->done_list, asc_entry)
+ if (v4l2_async_match_equal(&asc->match, match))
return true;
- }
return false;
}
/*
- * Find out whether an async sub-device was set up already or
- * whether it exists in a given notifier before @this_index.
- * If @this_index < 0, search the notifier's entire @asd_list.
+ * Find out whether an async sub-device was set up already or whether it exists
+ * in a given notifier.
*/
static bool
-v4l2_async_nf_has_async_subdev(struct v4l2_async_notifier *notifier,
- struct v4l2_async_subdev *asd, int this_index)
+v4l2_async_nf_has_async_match(struct v4l2_async_notifier *notifier,
+ struct v4l2_async_match_desc *match)
{
- struct v4l2_async_subdev *asd_y;
- int j = 0;
+ struct list_head *heads[] = {
+ &notifier->waiting_list,
+ &notifier->done_list,
+ };
+ unsigned int i;
lockdep_assert_held(&list_lock);
/* Check that an asd is not being added more than once. */
- list_for_each_entry(asd_y, &notifier->asd_list, asd_list) {
- if (this_index >= 0 && j++ >= this_index)
- break;
- if (asd_equal(asd, asd_y))
- return true;
+ for (i = 0; i < ARRAY_SIZE(heads); i++) {
+ struct v4l2_async_connection *asc;
+
+ list_for_each_entry(asc, heads[i], asc_entry) {
+ if (&asc->match == match)
+ continue;
+ if (v4l2_async_match_equal(&asc->match, match))
+ return true;
+ }
}
- /* Check that an asd does not exist in other notifiers. */
- list_for_each_entry(notifier, &notifier_list, list)
- if (__v4l2_async_nf_has_async_subdev(notifier, asd))
+ /* Check that an asc does not exist in other notifiers. */
+ list_for_each_entry(notifier, &notifier_list, notifier_entry)
+ if (v4l2_async_nf_has_async_match_entry(notifier, match))
return true;
return false;
}
-static int v4l2_async_nf_asd_valid(struct v4l2_async_notifier *notifier,
- struct v4l2_async_subdev *asd,
- int this_index)
+static int v4l2_async_nf_match_valid(struct v4l2_async_notifier *notifier,
+ struct v4l2_async_match_desc *match)
{
- struct device *dev =
- notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL;
-
- if (!asd)
- return -EINVAL;
+ struct device *dev = notifier_dev(notifier);
- switch (asd->match_type) {
- case V4L2_ASYNC_MATCH_I2C:
- case V4L2_ASYNC_MATCH_FWNODE:
- if (v4l2_async_nf_has_async_subdev(notifier, asd, this_index)) {
- dev_dbg(dev, "subdev descriptor already listed in this or other notifiers\n");
+ switch (match->type) {
+ case V4L2_ASYNC_MATCH_TYPE_I2C:
+ case V4L2_ASYNC_MATCH_TYPE_FWNODE:
+ if (v4l2_async_nf_has_async_match(notifier, match)) {
+ dev_dbg(dev, "v4l2-async: match descriptor already listed in a notifier\n");
return -EEXIST;
}
break;
default:
- dev_err(dev, "Invalid match type %u on %p\n",
- asd->match_type, asd);
+ dev_err(dev, "v4l2-async: Invalid match type %u on %p\n",
+ match->type, match);
return -EINVAL;
}
return 0;
}
-void v4l2_async_nf_init(struct v4l2_async_notifier *notifier)
+void v4l2_async_nf_init(struct v4l2_async_notifier *notifier,
+ struct v4l2_device *v4l2_dev)
{
- INIT_LIST_HEAD(&notifier->asd_list);
+ INIT_LIST_HEAD(&notifier->waiting_list);
+ INIT_LIST_HEAD(&notifier->done_list);
+ notifier->v4l2_dev = v4l2_dev;
}
EXPORT_SYMBOL(v4l2_async_nf_init);
-static int __v4l2_async_nf_register(struct v4l2_async_notifier *notifier)
+void v4l2_async_subdev_nf_init(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd)
{
- struct v4l2_async_subdev *asd;
- int ret, i = 0;
+ INIT_LIST_HEAD(&notifier->waiting_list);
+ INIT_LIST_HEAD(&notifier->done_list);
+ notifier->sd = sd;
+}
+EXPORT_SYMBOL_GPL(v4l2_async_subdev_nf_init);
- INIT_LIST_HEAD(&notifier->waiting);
- INIT_LIST_HEAD(&notifier->done);
+static int __v4l2_async_nf_register(struct v4l2_async_notifier *notifier)
+{
+ struct v4l2_async_connection *asc;
+ int ret;
mutex_lock(&list_lock);
- list_for_each_entry(asd, &notifier->asd_list, asd_list) {
- ret = v4l2_async_nf_asd_valid(notifier, asd, i++);
+ list_for_each_entry(asc, &notifier->waiting_list, asc_entry) {
+ ret = v4l2_async_nf_match_valid(notifier, &asc->match);
if (ret)
goto err_unlock;
-
- list_add_tail(&asd->list, &notifier->waiting);
}
ret = v4l2_async_nf_try_all_subdevs(notifier);
@@ -552,7 +598,7 @@ static int __v4l2_async_nf_register(struct v4l2_async_notifier *notifier)
goto err_unbind;
/* Keep also completed notifiers on the list */
- list_add(&notifier->list, &notifier_list);
+ list_add(&notifier->notifier_entry, &notifier_list);
mutex_unlock(&list_lock);
@@ -562,7 +608,7 @@ err_unbind:
/*
* On failure, unbind all sub-devices registered through this notifier.
*/
- v4l2_async_nf_unbind_all_subdevs(notifier, false);
+ v4l2_async_nf_unbind_all_subdevs(notifier);
err_unlock:
mutex_unlock(&list_lock);
@@ -570,16 +616,13 @@ err_unlock:
return ret;
}
-int v4l2_async_nf_register(struct v4l2_device *v4l2_dev,
- struct v4l2_async_notifier *notifier)
+int v4l2_async_nf_register(struct v4l2_async_notifier *notifier)
{
int ret;
- if (WARN_ON(!v4l2_dev || notifier->sd))
+ if (WARN_ON(!notifier->v4l2_dev == !notifier->sd))
return -EINVAL;
- notifier->v4l2_dev = v4l2_dev;
-
ret = __v4l2_async_nf_register(notifier);
if (ret)
notifier->v4l2_dev = NULL;
@@ -588,36 +631,15 @@ int v4l2_async_nf_register(struct v4l2_device *v4l2_dev,
}
EXPORT_SYMBOL(v4l2_async_nf_register);
-int v4l2_async_subdev_nf_register(struct v4l2_subdev *sd,
- struct v4l2_async_notifier *notifier)
-{
- int ret;
-
- if (WARN_ON(!sd || notifier->v4l2_dev))
- return -EINVAL;
-
- notifier->sd = sd;
-
- ret = __v4l2_async_nf_register(notifier);
- if (ret)
- notifier->sd = NULL;
-
- return ret;
-}
-EXPORT_SYMBOL(v4l2_async_subdev_nf_register);
-
static void
__v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier)
{
if (!notifier || (!notifier->v4l2_dev && !notifier->sd))
return;
- v4l2_async_nf_unbind_all_subdevs(notifier, false);
-
- notifier->sd = NULL;
- notifier->v4l2_dev = NULL;
+ v4l2_async_nf_unbind_all_subdevs(notifier);
- list_del(&notifier->list);
+ list_del(&notifier->notifier_entry);
}
void v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier)
@@ -632,24 +654,25 @@ EXPORT_SYMBOL(v4l2_async_nf_unregister);
static void __v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier)
{
- struct v4l2_async_subdev *asd, *tmp;
+ struct v4l2_async_connection *asc, *tmp;
- if (!notifier || !notifier->asd_list.next)
+ if (!notifier || !notifier->waiting_list.next)
return;
- list_for_each_entry_safe(asd, tmp, &notifier->asd_list, asd_list) {
- switch (asd->match_type) {
- case V4L2_ASYNC_MATCH_FWNODE:
- fwnode_handle_put(asd->match.fwnode);
- break;
- default:
- break;
- }
+ WARN_ON(!list_empty(&notifier->done_list));
+
+ list_for_each_entry_safe(asc, tmp, &notifier->waiting_list, asc_entry) {
+ list_del(&asc->asc_entry);
+ v4l2_async_nf_call_destroy(notifier, asc);
- list_del(&asd->asd_list);
- v4l2_async_nf_call_destroy(notifier, asd);
- kfree(asd);
+ if (asc->match.type == V4L2_ASYNC_MATCH_TYPE_FWNODE)
+ fwnode_handle_put(asc->match.fwnode);
+
+ kfree(asc);
}
+
+ notifier->sd = NULL;
+ notifier->v4l2_dev = NULL;
}
void v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier)
@@ -662,143 +685,156 @@ void v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier)
}
EXPORT_SYMBOL_GPL(v4l2_async_nf_cleanup);
-int __v4l2_async_nf_add_subdev(struct v4l2_async_notifier *notifier,
- struct v4l2_async_subdev *asd)
+static void __v4l2_async_nf_add_connection(struct v4l2_async_notifier *notifier,
+ struct v4l2_async_connection *asc)
{
- int ret;
-
mutex_lock(&list_lock);
- ret = v4l2_async_nf_asd_valid(notifier, asd, -1);
- if (ret)
- goto unlock;
+ list_add_tail(&asc->asc_entry, &notifier->waiting_list);
- list_add_tail(&asd->asd_list, &notifier->asd_list);
-
-unlock:
mutex_unlock(&list_lock);
- return ret;
}
-EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_subdev);
-struct v4l2_async_subdev *
+struct v4l2_async_connection *
__v4l2_async_nf_add_fwnode(struct v4l2_async_notifier *notifier,
struct fwnode_handle *fwnode,
- unsigned int asd_struct_size)
+ unsigned int asc_struct_size)
{
- struct v4l2_async_subdev *asd;
- int ret;
+ struct v4l2_async_connection *asc;
- asd = kzalloc(asd_struct_size, GFP_KERNEL);
- if (!asd)
+ asc = kzalloc(asc_struct_size, GFP_KERNEL);
+ if (!asc)
return ERR_PTR(-ENOMEM);
- asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
- asd->match.fwnode = fwnode_handle_get(fwnode);
+ asc->notifier = notifier;
+ asc->match.type = V4L2_ASYNC_MATCH_TYPE_FWNODE;
+ asc->match.fwnode = fwnode_handle_get(fwnode);
- ret = __v4l2_async_nf_add_subdev(notifier, asd);
- if (ret) {
- fwnode_handle_put(fwnode);
- kfree(asd);
- return ERR_PTR(ret);
- }
+ __v4l2_async_nf_add_connection(notifier, asc);
- return asd;
+ return asc;
}
EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_fwnode);
-struct v4l2_async_subdev *
+struct v4l2_async_connection *
__v4l2_async_nf_add_fwnode_remote(struct v4l2_async_notifier *notif,
struct fwnode_handle *endpoint,
- unsigned int asd_struct_size)
+ unsigned int asc_struct_size)
{
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asc;
struct fwnode_handle *remote;
remote = fwnode_graph_get_remote_endpoint(endpoint);
if (!remote)
return ERR_PTR(-ENOTCONN);
- asd = __v4l2_async_nf_add_fwnode(notif, remote, asd_struct_size);
+ asc = __v4l2_async_nf_add_fwnode(notif, remote, asc_struct_size);
/*
* Calling __v4l2_async_nf_add_fwnode grabs a refcount,
* so drop the one we got in fwnode_graph_get_remote_port_parent.
*/
fwnode_handle_put(remote);
- return asd;
+ return asc;
}
EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_fwnode_remote);
-struct v4l2_async_subdev *
+struct v4l2_async_connection *
__v4l2_async_nf_add_i2c(struct v4l2_async_notifier *notifier, int adapter_id,
- unsigned short address, unsigned int asd_struct_size)
+ unsigned short address, unsigned int asc_struct_size)
{
- struct v4l2_async_subdev *asd;
- int ret;
+ struct v4l2_async_connection *asc;
- asd = kzalloc(asd_struct_size, GFP_KERNEL);
- if (!asd)
+ asc = kzalloc(asc_struct_size, GFP_KERNEL);
+ if (!asc)
return ERR_PTR(-ENOMEM);
- asd->match_type = V4L2_ASYNC_MATCH_I2C;
- asd->match.i2c.adapter_id = adapter_id;
- asd->match.i2c.address = address;
+ asc->notifier = notifier;
+ asc->match.type = V4L2_ASYNC_MATCH_TYPE_I2C;
+ asc->match.i2c.adapter_id = adapter_id;
+ asc->match.i2c.address = address;
- ret = __v4l2_async_nf_add_subdev(notifier, asd);
- if (ret) {
- kfree(asd);
- return ERR_PTR(ret);
- }
+ __v4l2_async_nf_add_connection(notifier, asc);
- return asd;
+ return asc;
}
EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_i2c);
+int v4l2_async_subdev_endpoint_add(struct v4l2_subdev *sd,
+ struct fwnode_handle *fwnode)
+{
+ struct v4l2_async_subdev_endpoint *ase;
+
+ ase = kmalloc(sizeof(*ase), GFP_KERNEL);
+ if (!ase)
+ return -ENOMEM;
+
+ ase->endpoint = fwnode;
+ list_add(&ase->async_subdev_endpoint_entry,
+ &sd->async_subdev_endpoint_list);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_async_subdev_endpoint_add);
+
+struct v4l2_async_connection *
+v4l2_async_connection_unique(struct v4l2_subdev *sd)
+{
+ if (!list_is_singular(&sd->asc_list))
+ return NULL;
+
+ return list_first_entry(&sd->asc_list,
+ struct v4l2_async_connection, asc_subdev_entry);
+}
+EXPORT_SYMBOL_GPL(v4l2_async_connection_unique);
+
int v4l2_async_register_subdev(struct v4l2_subdev *sd)
{
struct v4l2_async_notifier *subdev_notifier;
struct v4l2_async_notifier *notifier;
+ struct v4l2_async_connection *asc;
int ret;
+ INIT_LIST_HEAD(&sd->asc_list);
+
/*
- * No reference taken. The reference is held by the device
- * (struct v4l2_subdev.dev), and async sub-device does not
- * exist independently of the device at any point of time.
+ * No reference taken. The reference is held by the device (struct
+ * v4l2_subdev.dev), and async sub-device does not exist independently
+ * of the device at any point of time.
+ *
+ * The async sub-device shall always be registered for its device node,
+ * not the endpoint node.
*/
- if (!sd->fwnode && sd->dev)
+ if (!sd->fwnode && sd->dev) {
sd->fwnode = dev_fwnode(sd->dev);
+ } else if (fwnode_graph_is_endpoint(sd->fwnode)) {
+ dev_warn(sd->dev, "sub-device fwnode is an endpoint!\n");
+ return -EINVAL;
+ }
mutex_lock(&list_lock);
- INIT_LIST_HEAD(&sd->async_list);
-
- list_for_each_entry(notifier, &notifier_list, list) {
+ list_for_each_entry(notifier, &notifier_list, notifier_entry) {
struct v4l2_device *v4l2_dev =
v4l2_async_nf_find_v4l2_dev(notifier);
- struct v4l2_async_subdev *asd;
if (!v4l2_dev)
continue;
- asd = v4l2_async_find_match(notifier, sd);
- if (!asd)
- continue;
+ while ((asc = v4l2_async_find_match(notifier, sd))) {
+ ret = v4l2_async_match_notify(notifier, v4l2_dev, sd,
+ asc);
+ if (ret)
+ goto err_unbind;
- ret = v4l2_async_match_notify(notifier, v4l2_dev, sd, asd);
- if (ret)
- goto err_unbind;
-
- ret = v4l2_async_nf_try_complete(notifier);
- if (ret)
- goto err_unbind;
-
- goto out_unlock;
+ ret = v4l2_async_nf_try_complete(notifier);
+ if (ret)
+ goto err_unbind;
+ }
}
/* None matched, wait for hot-plugging */
list_add(&sd->async_list, &subdev_list);
-out_unlock:
mutex_unlock(&list_lock);
return 0;
@@ -810,11 +846,10 @@ err_unbind:
*/
subdev_notifier = v4l2_async_find_subdev_notifier(sd);
if (subdev_notifier)
- v4l2_async_nf_unbind_all_subdevs(subdev_notifier, false);
+ v4l2_async_nf_unbind_all_subdevs(subdev_notifier);
- if (sd->asd)
- v4l2_async_nf_call_unbind(notifier, sd, sd->asd);
- v4l2_async_cleanup(sd);
+ if (asc)
+ v4l2_async_unbind_subdev_one(notifier, asc);
mutex_unlock(&list_lock);
@@ -824,6 +859,8 @@ EXPORT_SYMBOL(v4l2_async_register_subdev);
void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
{
+ struct v4l2_async_connection *asc, *asc_tmp;
+
if (!sd->async_list.next)
return;
@@ -836,30 +873,34 @@ void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
kfree(sd->subdev_notifier);
sd->subdev_notifier = NULL;
- if (sd->asd) {
- struct v4l2_async_notifier *notifier = sd->notifier;
+ if (sd->asc_list.next) {
+ list_for_each_entry_safe(asc, asc_tmp, &sd->asc_list,
+ asc_subdev_entry) {
+ list_move(&asc->asc_entry,
+ &asc->notifier->waiting_list);
- list_add(&sd->asd->list, &notifier->waiting);
-
- v4l2_async_nf_call_unbind(notifier, sd, sd->asd);
+ v4l2_async_unbind_subdev_one(asc->notifier, asc);
+ list_del(&asc->asc_subdev_entry);
+ }
}
- v4l2_async_cleanup(sd);
+ list_del(&sd->async_list);
+ sd->async_list.next = NULL;
mutex_unlock(&list_lock);
}
EXPORT_SYMBOL(v4l2_async_unregister_subdev);
-static void print_waiting_subdev(struct seq_file *s,
- struct v4l2_async_subdev *asd)
+static void print_waiting_match(struct seq_file *s,
+ struct v4l2_async_match_desc *match)
{
- switch (asd->match_type) {
- case V4L2_ASYNC_MATCH_I2C:
- seq_printf(s, " [i2c] dev=%d-%04x\n", asd->match.i2c.adapter_id,
- asd->match.i2c.address);
+ switch (match->type) {
+ case V4L2_ASYNC_MATCH_TYPE_I2C:
+ seq_printf(s, " [i2c] dev=%d-%04x\n", match->i2c.adapter_id,
+ match->i2c.address);
break;
- case V4L2_ASYNC_MATCH_FWNODE: {
- struct fwnode_handle *devnode, *fwnode = asd->match.fwnode;
+ case V4L2_ASYNC_MATCH_TYPE_FWNODE: {
+ struct fwnode_handle *devnode, *fwnode = match->fwnode;
devnode = fwnode_graph_is_endpoint(fwnode) ?
fwnode_graph_get_port_parent(fwnode) :
@@ -889,14 +930,14 @@ v4l2_async_nf_name(struct v4l2_async_notifier *notifier)
static int pending_subdevs_show(struct seq_file *s, void *data)
{
struct v4l2_async_notifier *notif;
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asc;
mutex_lock(&list_lock);
- list_for_each_entry(notif, &notifier_list, list) {
+ list_for_each_entry(notif, &notifier_list, notifier_entry) {
seq_printf(s, "%s:\n", v4l2_async_nf_name(notif));
- list_for_each_entry(asd, &notif->waiting, list)
- print_waiting_subdev(s, asd);
+ list_for_each_entry(asc, &notif->waiting_list, asc_entry)
+ print_waiting_match(s, &asc->match);
}
mutex_unlock(&list_lock);
diff --git a/drivers/media/v4l2-core/v4l2-cci.c b/drivers/media/v4l2-core/v4l2-cci.c
new file mode 100644
index 000000000000..bc2dbec019b0
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-cci.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MIPI Camera Control Interface (CCI) register access helpers.
+ *
+ * Copyright (C) 2023 Hans de Goede <hansg@kernel.org>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/dev_printk.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+#include <asm/unaligned.h>
+
+#include <media/v4l2-cci.h>
+
+int cci_read(struct regmap *map, u32 reg, u64 *val, int *err)
+{
+ unsigned int len;
+ u8 buf[8];
+ int ret;
+
+ if (err && *err)
+ return *err;
+
+ len = FIELD_GET(CCI_REG_WIDTH_MASK, reg);
+ reg = FIELD_GET(CCI_REG_ADDR_MASK, reg);
+
+ ret = regmap_bulk_read(map, reg, buf, len);
+ if (ret) {
+ dev_err(regmap_get_device(map), "Error reading reg 0x%4x: %d\n",
+ reg, ret);
+ goto out;
+ }
+
+ switch (len) {
+ case 1:
+ *val = buf[0];
+ break;
+ case 2:
+ *val = get_unaligned_be16(buf);
+ break;
+ case 3:
+ *val = get_unaligned_be24(buf);
+ break;
+ case 4:
+ *val = get_unaligned_be32(buf);
+ break;
+ case 8:
+ *val = get_unaligned_be64(buf);
+ break;
+ default:
+ dev_err(regmap_get_device(map), "Error invalid reg-width %u for reg 0x%04x\n",
+ len, reg);
+ ret = -EINVAL;
+ break;
+ }
+
+out:
+ if (ret && err)
+ *err = ret;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cci_read);
+
+int cci_write(struct regmap *map, u32 reg, u64 val, int *err)
+{
+ unsigned int len;
+ u8 buf[8];
+ int ret;
+
+ if (err && *err)
+ return *err;
+
+ len = FIELD_GET(CCI_REG_WIDTH_MASK, reg);
+ reg = FIELD_GET(CCI_REG_ADDR_MASK, reg);
+
+ switch (len) {
+ case 1:
+ buf[0] = val;
+ break;
+ case 2:
+ put_unaligned_be16(val, buf);
+ break;
+ case 3:
+ put_unaligned_be24(val, buf);
+ break;
+ case 4:
+ put_unaligned_be32(val, buf);
+ break;
+ case 8:
+ put_unaligned_be64(val, buf);
+ break;
+ default:
+ dev_err(regmap_get_device(map), "Error invalid reg-width %u for reg 0x%04x\n",
+ len, reg);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = regmap_bulk_write(map, reg, buf, len);
+ if (ret)
+ dev_err(regmap_get_device(map), "Error writing reg 0x%4x: %d\n",
+ reg, ret);
+
+out:
+ if (ret && err)
+ *err = ret;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cci_write);
+
+int cci_update_bits(struct regmap *map, u32 reg, u64 mask, u64 val, int *err)
+{
+ u64 readval;
+ int ret;
+
+ ret = cci_read(map, reg, &readval, err);
+ if (ret)
+ return ret;
+
+ val = (readval & ~mask) | (val & mask);
+
+ return cci_write(map, reg, val, err);
+}
+EXPORT_SYMBOL_GPL(cci_update_bits);
+
+int cci_multi_reg_write(struct regmap *map, const struct cci_reg_sequence *regs,
+ unsigned int num_regs, int *err)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < num_regs; i++) {
+ ret = cci_write(map, regs[i].reg, regs[i].val, err);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cci_multi_reg_write);
+
+#if IS_ENABLED(CONFIG_V4L2_CCI_I2C)
+struct regmap *devm_cci_regmap_init_i2c(struct i2c_client *client,
+ int reg_addr_bits)
+{
+ struct regmap_config config = {
+ .reg_bits = reg_addr_bits,
+ .val_bits = 8,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .disable_locking = true,
+ };
+
+ return devm_regmap_init_i2c(client, &config);
+}
+EXPORT_SYMBOL_GPL(devm_cci_regmap_init_i2c);
+#endif
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Hans de Goede <hansg@kernel.org>");
+MODULE_DESCRIPTION("MIPI Camera Control Interface (CCI) support");
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index bee1535b04d3..3a4b15a98e02 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -262,6 +262,10 @@ const struct v4l2_format_info *v4l2_format_info(u32 format)
{ .format = V4L2_PIX_FMT_VYUY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_Y212, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_YUV48_12, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 },
+ { .format = V4L2_PIX_FMT_MT2110T, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 2,
+ .block_w = { 16, 8, 0, 0 }, .block_h = { 32, 16, 0, 0 }},
+ { .format = V4L2_PIX_FMT_MT2110R, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 2,
+ .block_w = { 16, 8, 0, 0 }, .block_h = { 32, 16, 0, 0 }},
/* YUV planar formats */
{ .format = V4L2_PIX_FMT_NV12, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 },
diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
index 049c2f2001ea..7f181fbbb140 100644
--- a/drivers/media/v4l2-core/v4l2-fwnode.c
+++ b/drivers/media/v4l2-core/v4l2-fwnode.c
@@ -568,19 +568,29 @@ int v4l2_fwnode_parse_link(struct fwnode_handle *fwnode,
link->local_id = fwep.id;
link->local_port = fwep.port;
link->local_node = fwnode_graph_get_port_parent(fwnode);
+ if (!link->local_node)
+ return -ENOLINK;
fwnode = fwnode_graph_get_remote_endpoint(fwnode);
- if (!fwnode) {
- fwnode_handle_put(fwnode);
- return -ENOLINK;
- }
+ if (!fwnode)
+ goto err_put_local_node;
fwnode_graph_parse_endpoint(fwnode, &fwep);
link->remote_id = fwep.id;
link->remote_port = fwep.port;
link->remote_node = fwnode_graph_get_port_parent(fwnode);
+ if (!link->remote_node)
+ goto err_put_remote_endpoint;
return 0;
+
+err_put_remote_endpoint:
+ fwnode_handle_put(fwnode);
+
+err_put_local_node:
+ fwnode_handle_put(link->local_node);
+
+ return -ENOLINK;
}
EXPORT_SYMBOL_GPL(v4l2_fwnode_parse_link);
@@ -798,103 +808,6 @@ int v4l2_fwnode_device_parse(struct device *dev,
}
EXPORT_SYMBOL_GPL(v4l2_fwnode_device_parse);
-static int
-v4l2_async_nf_fwnode_parse_endpoint(struct device *dev,
- struct v4l2_async_notifier *notifier,
- struct fwnode_handle *endpoint,
- unsigned int asd_struct_size,
- parse_endpoint_func parse_endpoint)
-{
- struct v4l2_fwnode_endpoint vep = { .bus_type = 0 };
- struct v4l2_async_subdev *asd;
- int ret;
-
- asd = kzalloc(asd_struct_size, GFP_KERNEL);
- if (!asd)
- return -ENOMEM;
-
- asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
- asd->match.fwnode =
- fwnode_graph_get_remote_port_parent(endpoint);
- if (!asd->match.fwnode) {
- dev_dbg(dev, "no remote endpoint found\n");
- ret = -ENOTCONN;
- goto out_err;
- }
-
- ret = v4l2_fwnode_endpoint_alloc_parse(endpoint, &vep);
- if (ret) {
- dev_warn(dev, "unable to parse V4L2 fwnode endpoint (%d)\n",
- ret);
- goto out_err;
- }
-
- ret = parse_endpoint ? parse_endpoint(dev, &vep, asd) : 0;
- if (ret == -ENOTCONN)
- dev_dbg(dev, "ignoring port@%u/endpoint@%u\n", vep.base.port,
- vep.base.id);
- else if (ret < 0)
- dev_warn(dev,
- "driver could not parse port@%u/endpoint@%u (%d)\n",
- vep.base.port, vep.base.id, ret);
- v4l2_fwnode_endpoint_free(&vep);
- if (ret < 0)
- goto out_err;
-
- ret = __v4l2_async_nf_add_subdev(notifier, asd);
- if (ret < 0) {
- /* not an error if asd already exists */
- if (ret == -EEXIST)
- ret = 0;
- goto out_err;
- }
-
- return 0;
-
-out_err:
- fwnode_handle_put(asd->match.fwnode);
- kfree(asd);
-
- return ret == -ENOTCONN ? 0 : ret;
-}
-
-int
-v4l2_async_nf_parse_fwnode_endpoints(struct device *dev,
- struct v4l2_async_notifier *notifier,
- size_t asd_struct_size,
- parse_endpoint_func parse_endpoint)
-{
- struct fwnode_handle *fwnode;
- int ret = 0;
-
- if (WARN_ON(asd_struct_size < sizeof(struct v4l2_async_subdev)))
- return -EINVAL;
-
- fwnode_graph_for_each_endpoint(dev_fwnode(dev), fwnode) {
- struct fwnode_handle *dev_fwnode;
- bool is_available;
-
- dev_fwnode = fwnode_graph_get_port_parent(fwnode);
- is_available = fwnode_device_is_available(dev_fwnode);
- fwnode_handle_put(dev_fwnode);
- if (!is_available)
- continue;
-
-
- ret = v4l2_async_nf_fwnode_parse_endpoint(dev, notifier,
- fwnode,
- asd_struct_size,
- parse_endpoint);
- if (ret < 0)
- break;
- }
-
- fwnode_handle_put(fwnode);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(v4l2_async_nf_parse_fwnode_endpoints);
-
/*
* v4l2_fwnode_reference_parse - parse references for async sub-devices
* @dev: the device node the properties of which are parsed for references
@@ -918,10 +831,10 @@ static int v4l2_fwnode_reference_parse(struct device *dev,
!(ret = fwnode_property_get_reference_args(dev_fwnode(dev), prop,
NULL, 0, index, &args));
index++) {
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asd;
asd = v4l2_async_nf_add_fwnode(notifier, args.fwnode,
- struct v4l2_async_subdev);
+ struct v4l2_async_connection);
fwnode_handle_put(args.fwnode);
if (IS_ERR(asd)) {
/* not an error if asd already exists */
@@ -1223,10 +1136,10 @@ v4l2_fwnode_reference_parse_int_props(struct device *dev,
props,
nprops)));
index++) {
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asd;
asd = v4l2_async_nf_add_fwnode(notifier, fwnode,
- struct v4l2_async_subdev);
+ struct v4l2_async_connection);
fwnode_handle_put(fwnode);
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
@@ -1302,7 +1215,7 @@ int v4l2_async_register_subdev_sensor(struct v4l2_subdev *sd)
if (!notifier)
return -ENOMEM;
- v4l2_async_nf_init(notifier);
+ v4l2_async_subdev_nf_init(notifier, sd);
ret = v4l2_subdev_get_privacy_led(sd);
if (ret < 0)
@@ -1312,7 +1225,7 @@ int v4l2_async_register_subdev_sensor(struct v4l2_subdev *sd)
if (ret < 0)
goto out_cleanup;
- ret = v4l2_async_subdev_nf_register(sd, notifier);
+ ret = v4l2_async_nf_register(notifier);
if (ret < 0)
goto out_cleanup;
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 01ba27f2ef87..f4d9d6279094 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -1508,6 +1508,8 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_QC10C: descr = "QCOM Compressed 10-bit Format"; break;
case V4L2_PIX_FMT_AJPG: descr = "Aspeed JPEG"; break;
case V4L2_PIX_FMT_AV1_FRAME: descr = "AV1 Frame"; break;
+ case V4L2_PIX_FMT_MT2110T: descr = "Mediatek 10bit Tile Mode"; break;
+ case V4L2_PIX_FMT_MT2110R: descr = "Mediatek 10bit Raster Mode"; break;
default:
if (fmt->description[0])
return;
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index 2ec179cd1264..b92348ad61f6 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -200,9 +200,6 @@ static inline int check_format(struct v4l2_subdev *sd,
if (!format)
return -EINVAL;
- if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS))
- format->stream = 0;
-
return check_which(format->which) ? : check_pad(sd, format->pad) ? :
check_state(sd, state, format->which, format->pad, format->stream);
}
@@ -230,9 +227,6 @@ static int call_enum_mbus_code(struct v4l2_subdev *sd,
if (!code)
return -EINVAL;
- if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS))
- code->stream = 0;
-
return check_which(code->which) ? : check_pad(sd, code->pad) ? :
check_state(sd, state, code->which, code->pad, code->stream) ? :
sd->ops->pad->enum_mbus_code(sd, state, code);
@@ -245,9 +239,6 @@ static int call_enum_frame_size(struct v4l2_subdev *sd,
if (!fse)
return -EINVAL;
- if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS))
- fse->stream = 0;
-
return check_which(fse->which) ? : check_pad(sd, fse->pad) ? :
check_state(sd, state, fse->which, fse->pad, fse->stream) ? :
sd->ops->pad->enum_frame_size(sd, state, fse);
@@ -283,9 +274,6 @@ static int call_enum_frame_interval(struct v4l2_subdev *sd,
if (!fie)
return -EINVAL;
- if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS))
- fie->stream = 0;
-
return check_which(fie->which) ? : check_pad(sd, fie->pad) ? :
check_state(sd, state, fie->which, fie->pad, fie->stream) ? :
sd->ops->pad->enum_frame_interval(sd, state, fie);
@@ -298,9 +286,6 @@ static inline int check_selection(struct v4l2_subdev *sd,
if (!sel)
return -EINVAL;
- if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS))
- sel->stream = 0;
-
return check_which(sel->which) ? : check_pad(sd, sel->pad) ? :
check_state(sd, state, sel->which, sel->pad, sel->stream);
}
@@ -1467,8 +1452,20 @@ EXPORT_SYMBOL_GPL(__v4l2_subdev_init_finalize);
void v4l2_subdev_cleanup(struct v4l2_subdev *sd)
{
+ struct v4l2_async_subdev_endpoint *ase, *ase_tmp;
+
__v4l2_subdev_state_free(sd->active_state);
sd->active_state = NULL;
+
+ if (list_empty(&sd->async_subdev_endpoint_list))
+ return;
+
+ list_for_each_entry_safe(ase, ase_tmp, &sd->async_subdev_endpoint_list,
+ async_subdev_endpoint_entry) {
+ list_del(&ase->async_subdev_endpoint_entry);
+
+ kfree(ase);
+ }
}
EXPORT_SYMBOL_GPL(v4l2_subdev_cleanup);
@@ -1605,7 +1602,7 @@ EXPORT_SYMBOL_GPL(__v4l2_subdev_next_active_route);
int v4l2_subdev_set_routing_with_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
- struct v4l2_subdev_krouting *routing,
+ const struct v4l2_subdev_krouting *routing,
const struct v4l2_mbus_framefmt *fmt)
{
struct v4l2_subdev_stream_configs *stream_configs;
@@ -1992,11 +1989,16 @@ int v4l2_subdev_enable_streams(struct v4l2_subdev *sd, u32 pad,
goto done;
}
+ dev_dbg(dev, "enable streams %u:%#llx\n", pad, streams_mask);
+
/* Call the .enable_streams() operation. */
ret = v4l2_subdev_call(sd, pad, enable_streams, state, pad,
streams_mask);
- if (ret)
+ if (ret) {
+ dev_dbg(dev, "enable streams %u:%#llx failed: %d\n", pad,
+ streams_mask, ret);
goto done;
+ }
/* Mark the streams as enabled. */
for (i = 0; i < state->stream_configs.num_configs; ++i) {
@@ -2104,11 +2106,16 @@ int v4l2_subdev_disable_streams(struct v4l2_subdev *sd, u32 pad,
goto done;
}
+ dev_dbg(dev, "disable streams %u:%#llx\n", pad, streams_mask);
+
/* Call the .disable_streams() operation. */
ret = v4l2_subdev_call(sd, pad, disable_streams, state, pad,
streams_mask);
- if (ret)
+ if (ret) {
+ dev_dbg(dev, "disable streams %u:%#llx failed: %d\n", pad,
+ streams_mask, ret);
goto done;
+ }
/* Mark the streams as disabled. */
for (i = 0; i < state->stream_configs.num_configs; ++i) {
@@ -2182,6 +2189,7 @@ void v4l2_subdev_init(struct v4l2_subdev *sd, const struct v4l2_subdev_ops *ops)
sd->dev_priv = NULL;
sd->host_priv = NULL;
sd->privacy_led = NULL;
+ INIT_LIST_HEAD(&sd->async_subdev_endpoint_list);
#if defined(CONFIG_MEDIA_CONTROLLER)
sd->entity.name = sd->name;
sd->entity.obj_type = MEDIA_ENTITY_TYPE_V4L2_SUBDEV;
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 85be64579fc9..aea95745c73f 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -1220,7 +1220,7 @@ config MFD_RC5T583
different functionality of the device.
config MFD_RK8XX
- bool
+ tristate
select MFD_CORE
config MFD_RK8XX_I2C
@@ -1371,8 +1371,9 @@ config MFD_SC27XX_PMIC
and it also adds the irq_chip parts for handling the PMIC chip events.
config RZ_MTU3
- bool "Renesas RZ/G2L MTU3a core driver"
+ tristate "Renesas RZ/G2L MTU3a core driver"
depends on (ARCH_RZG2L && OF) || COMPILE_TEST
+ select MFD_CORE
help
Select this option to enable Renesas RZ/G2L MTU3a core driver for
the Multi-Function Timer Pulse Unit 3 (MTU3a) hardware available
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index 9d9e9787d5e8..15c95828b09a 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -21,7 +21,6 @@
#include <linux/mfd/abx500/ab8500.h>
#include <linux/mfd/dbx500-prcmu.h>
#include <linux/of.h>
-#include <linux/of_device.h>
/*
* Interrupt register offsets
diff --git a/drivers/mfd/acer-ec-a500.c b/drivers/mfd/acer-ec-a500.c
index feb757e90dc3..79405835ff8e 100644
--- a/drivers/mfd/acer-ec-a500.c
+++ b/drivers/mfd/acer-ec-a500.c
@@ -9,7 +9,7 @@
#include <linux/i2c.h>
#include <linux/mfd/core.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/reboot.h>
#include <linux/regmap.h>
diff --git a/drivers/mfd/act8945a.c b/drivers/mfd/act8945a.c
index 2406fcdff5f9..4e32ac3d573e 100644
--- a/drivers/mfd/act8945a.c
+++ b/drivers/mfd/act8945a.c
@@ -10,7 +10,7 @@
#include <linux/i2c.h>
#include <linux/mfd/core.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regmap.h>
static const struct mfd_cell act8945a_devs[] = {
@@ -68,7 +68,7 @@ MODULE_DEVICE_TABLE(of, act8945a_of_match);
static struct i2c_driver act8945a_i2c_driver = {
.driver = {
.name = "act8945a",
- .of_match_table = of_match_ptr(act8945a_of_match),
+ .of_match_table = act8945a_of_match,
},
.probe = act8945a_i2c_probe,
.id_table = act8945a_i2c_id,
diff --git a/drivers/mfd/altera-a10sr.c b/drivers/mfd/altera-a10sr.c
index 34ef526f4aee..d53e433ab5c1 100644
--- a/drivers/mfd/altera-a10sr.c
+++ b/drivers/mfd/altera-a10sr.c
@@ -163,7 +163,7 @@ static struct spi_driver altr_a10sr_spi_driver = {
.probe = altr_a10sr_spi_probe,
.driver = {
.name = "altr_a10sr",
- .of_match_table = of_match_ptr(altr_a10sr_spi_of_match),
+ .of_match_table = altr_a10sr_spi_of_match,
},
.id_table = altr_a10sr_spi_ids,
};
diff --git a/drivers/mfd/altera-sysmgr.c b/drivers/mfd/altera-sysmgr.c
index af205813b281..0e52bd2ebd74 100644
--- a/drivers/mfd/altera-sysmgr.c
+++ b/drivers/mfd/altera-sysmgr.c
@@ -14,8 +14,7 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index c166fcd331f1..19a0adf8ce3d 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -15,7 +15,6 @@
#include <linux/mfd/core.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/mfd/atc260x-core.c b/drivers/mfd/atc260x-core.c
index 7c5de3ae776e..67473b58b03d 100644
--- a/drivers/mfd/atc260x-core.c
+++ b/drivers/mfd/atc260x-core.c
@@ -11,7 +11,6 @@
#include <linux/mfd/core.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/regmap.h>
#define ATC260X_CHIP_REV_MAX 31
diff --git a/drivers/mfd/atmel-hlcdc.c b/drivers/mfd/atmel-hlcdc.c
index 3c2414ba4b01..20de7f49a830 100644
--- a/drivers/mfd/atmel-hlcdc.c
+++ b/drivers/mfd/atmel-hlcdc.c
@@ -83,7 +83,6 @@ static int atmel_hlcdc_probe(struct platform_device *pdev)
struct atmel_hlcdc_regmap *hregmap;
struct device *dev = &pdev->dev;
struct atmel_hlcdc *hlcdc;
- struct resource *res;
hregmap = devm_kzalloc(dev, sizeof(*hregmap), GFP_KERNEL);
if (!hregmap)
@@ -93,8 +92,7 @@ static int atmel_hlcdc_probe(struct platform_device *pdev)
if (!hlcdc)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hregmap->regs = devm_ioremap_resource(dev, res);
+ hregmap->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hregmap->regs))
return PTR_ERR(hregmap->regs);
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index c03bc5cda080..87603eeaa277 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -342,7 +342,7 @@ static const struct regmap_config axp152_regmap_config = {
.wr_table = &axp152_writeable_table,
.volatile_table = &axp152_volatile_table,
.max_register = AXP152_PWM1_DUTY_CYCLE,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static const struct regmap_config axp192_regmap_config = {
@@ -360,7 +360,7 @@ static const struct regmap_config axp20x_regmap_config = {
.wr_table = &axp20x_writeable_table,
.volatile_table = &axp20x_volatile_table,
.max_register = AXP20X_OCV(AXP20X_OCV_MAX),
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static const struct regmap_config axp22x_regmap_config = {
@@ -369,7 +369,7 @@ static const struct regmap_config axp22x_regmap_config = {
.wr_table = &axp22x_writeable_table,
.volatile_table = &axp22x_volatile_table,
.max_register = AXP22X_BATLOW_THRES1,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static const struct regmap_config axp288_regmap_config = {
@@ -378,7 +378,7 @@ static const struct regmap_config axp288_regmap_config = {
.wr_table = &axp288_writeable_table,
.volatile_table = &axp288_volatile_table,
.max_register = AXP288_FG_TUNE5,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static const struct regmap_config axp313a_regmap_config = {
@@ -396,7 +396,7 @@ static const struct regmap_config axp806_regmap_config = {
.wr_table = &axp806_writeable_table,
.volatile_table = &axp806_volatile_table,
.max_register = AXP806_REG_ADDR_EXT,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static const struct regmap_config axp15060_regmap_config = {
@@ -405,7 +405,7 @@ static const struct regmap_config axp15060_regmap_config = {
.wr_table = &axp15060_writeable_table,
.volatile_table = &axp15060_volatile_table,
.max_register = AXP15060_IRQ2_STATE,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
#define INIT_REGMAP_IRQ(_variant, _irq, _off, _mask) \
diff --git a/drivers/mfd/bcm590xx.c b/drivers/mfd/bcm590xx.c
index 9f39b46b87f4..92eede9a5e61 100644
--- a/drivers/mfd/bcm590xx.c
+++ b/drivers/mfd/bcm590xx.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index 92f4dfccc3cc..79d393b602bf 100644
--- a/drivers/mfd/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -10,7 +10,7 @@
#include <linux/mfd/core.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/platform_data/cros_ec_chardev.h>
#include <linux/platform_data/cros_ec_commands.h>
diff --git a/drivers/mfd/cs47l15-tables.c b/drivers/mfd/cs47l15-tables.c
index 3c77f0a24e9b..59b005cc1e33 100644
--- a/drivers/mfd/cs47l15-tables.c
+++ b/drivers/mfd/cs47l15-tables.c
@@ -1249,7 +1249,7 @@ const struct regmap_config cs47l15_16bit_spi_regmap = {
.readable_reg = &cs47l15_16bit_readable_register,
.volatile_reg = &cs47l15_16bit_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = cs47l15_reg_default,
.num_reg_defaults = ARRAY_SIZE(cs47l15_reg_default),
};
@@ -1264,7 +1264,7 @@ const struct regmap_config cs47l15_16bit_i2c_regmap = {
.readable_reg = &cs47l15_16bit_readable_register,
.volatile_reg = &cs47l15_16bit_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = cs47l15_reg_default,
.num_reg_defaults = ARRAY_SIZE(cs47l15_reg_default),
};
@@ -1281,7 +1281,7 @@ const struct regmap_config cs47l15_32bit_spi_regmap = {
.readable_reg = &cs47l15_32bit_readable_register,
.volatile_reg = &cs47l15_32bit_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
EXPORT_SYMBOL_GPL(cs47l15_32bit_spi_regmap);
@@ -1295,6 +1295,6 @@ const struct regmap_config cs47l15_32bit_i2c_regmap = {
.readable_reg = &cs47l15_32bit_readable_register,
.volatile_reg = &cs47l15_32bit_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
EXPORT_SYMBOL_GPL(cs47l15_32bit_i2c_regmap);
diff --git a/drivers/mfd/cs47l24-tables.c b/drivers/mfd/cs47l24-tables.c
index c289d92a5c1d..878dfd298a17 100644
--- a/drivers/mfd/cs47l24-tables.c
+++ b/drivers/mfd/cs47l24-tables.c
@@ -1616,7 +1616,7 @@ const struct regmap_config cs47l24_spi_regmap = {
.readable_reg = cs47l24_readable_register,
.volatile_reg = cs47l24_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = cs47l24_reg_default,
.num_reg_defaults = ARRAY_SIZE(cs47l24_reg_default),
};
diff --git a/drivers/mfd/cs47l35-tables.c b/drivers/mfd/cs47l35-tables.c
index a0bc6c5100d6..274f4b05850a 100644
--- a/drivers/mfd/cs47l35-tables.c
+++ b/drivers/mfd/cs47l35-tables.c
@@ -1498,7 +1498,7 @@ const struct regmap_config cs47l35_16bit_spi_regmap = {
.readable_reg = cs47l35_16bit_readable_register,
.volatile_reg = cs47l35_16bit_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = cs47l35_reg_default,
.num_reg_defaults = ARRAY_SIZE(cs47l35_reg_default),
};
@@ -1515,7 +1515,7 @@ const struct regmap_config cs47l35_16bit_i2c_regmap = {
.readable_reg = cs47l35_16bit_readable_register,
.volatile_reg = cs47l35_16bit_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = cs47l35_reg_default,
.num_reg_defaults = ARRAY_SIZE(cs47l35_reg_default),
};
@@ -1534,7 +1534,7 @@ const struct regmap_config cs47l35_32bit_spi_regmap = {
.readable_reg = cs47l35_32bit_readable_register,
.volatile_reg = cs47l35_32bit_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
EXPORT_SYMBOL_GPL(cs47l35_32bit_spi_regmap);
@@ -1550,6 +1550,6 @@ const struct regmap_config cs47l35_32bit_i2c_regmap = {
.readable_reg = cs47l35_32bit_readable_register,
.volatile_reg = cs47l35_32bit_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
EXPORT_SYMBOL_GPL(cs47l35_32bit_i2c_regmap);
diff --git a/drivers/mfd/cs47l85-tables.c b/drivers/mfd/cs47l85-tables.c
index 270d8eda3f5f..f397894827ce 100644
--- a/drivers/mfd/cs47l85-tables.c
+++ b/drivers/mfd/cs47l85-tables.c
@@ -2836,7 +2836,7 @@ const struct regmap_config cs47l85_16bit_spi_regmap = {
.readable_reg = cs47l85_16bit_readable_register,
.volatile_reg = cs47l85_16bit_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = cs47l85_reg_default,
.num_reg_defaults = ARRAY_SIZE(cs47l85_reg_default),
};
@@ -2853,7 +2853,7 @@ const struct regmap_config cs47l85_16bit_i2c_regmap = {
.readable_reg = cs47l85_16bit_readable_register,
.volatile_reg = cs47l85_16bit_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = cs47l85_reg_default,
.num_reg_defaults = ARRAY_SIZE(cs47l85_reg_default),
};
@@ -2872,7 +2872,7 @@ const struct regmap_config cs47l85_32bit_spi_regmap = {
.readable_reg = cs47l85_32bit_readable_register,
.volatile_reg = cs47l85_32bit_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
EXPORT_SYMBOL_GPL(cs47l85_32bit_spi_regmap);
@@ -2888,6 +2888,6 @@ const struct regmap_config cs47l85_32bit_i2c_regmap = {
.readable_reg = cs47l85_32bit_readable_register,
.volatile_reg = cs47l85_32bit_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
EXPORT_SYMBOL_GPL(cs47l85_32bit_i2c_regmap);
diff --git a/drivers/mfd/cs47l90-tables.c b/drivers/mfd/cs47l90-tables.c
index 7345fc09c0bb..6f9ceb36c533 100644
--- a/drivers/mfd/cs47l90-tables.c
+++ b/drivers/mfd/cs47l90-tables.c
@@ -2539,7 +2539,7 @@ const struct regmap_config cs47l90_16bit_spi_regmap = {
.readable_reg = cs47l90_16bit_readable_register,
.volatile_reg = cs47l90_16bit_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = cs47l90_reg_default,
.num_reg_defaults = ARRAY_SIZE(cs47l90_reg_default),
};
@@ -2556,7 +2556,7 @@ const struct regmap_config cs47l90_16bit_i2c_regmap = {
.readable_reg = cs47l90_16bit_readable_register,
.volatile_reg = cs47l90_16bit_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = cs47l90_reg_default,
.num_reg_defaults = ARRAY_SIZE(cs47l90_reg_default),
};
@@ -2575,7 +2575,7 @@ const struct regmap_config cs47l90_32bit_spi_regmap = {
.readable_reg = cs47l90_32bit_readable_register,
.volatile_reg = cs47l90_32bit_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
EXPORT_SYMBOL_GPL(cs47l90_32bit_spi_regmap);
@@ -2591,6 +2591,6 @@ const struct regmap_config cs47l90_32bit_i2c_regmap = {
.readable_reg = cs47l90_32bit_readable_register,
.volatile_reg = cs47l90_32bit_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
EXPORT_SYMBOL_GPL(cs47l90_32bit_i2c_regmap);
diff --git a/drivers/mfd/cs47l92-tables.c b/drivers/mfd/cs47l92-tables.c
index f296e355df4d..4d9ba865aaf6 100644
--- a/drivers/mfd/cs47l92-tables.c
+++ b/drivers/mfd/cs47l92-tables.c
@@ -1890,7 +1890,7 @@ const struct regmap_config cs47l92_16bit_spi_regmap = {
.readable_reg = &cs47l92_16bit_readable_register,
.volatile_reg = &cs47l92_16bit_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = cs47l92_reg_default,
.num_reg_defaults = ARRAY_SIZE(cs47l92_reg_default),
};
@@ -1907,7 +1907,7 @@ const struct regmap_config cs47l92_16bit_i2c_regmap = {
.readable_reg = &cs47l92_16bit_readable_register,
.volatile_reg = &cs47l92_16bit_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = cs47l92_reg_default,
.num_reg_defaults = ARRAY_SIZE(cs47l92_reg_default),
};
@@ -1926,7 +1926,7 @@ const struct regmap_config cs47l92_32bit_spi_regmap = {
.readable_reg = &cs47l92_32bit_readable_register,
.volatile_reg = &cs47l92_32bit_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
EXPORT_SYMBOL_GPL(cs47l92_32bit_spi_regmap);
@@ -1942,6 +1942,6 @@ const struct regmap_config cs47l92_32bit_i2c_regmap = {
.readable_reg = &cs47l92_32bit_readable_register,
.volatile_reg = &cs47l92_32bit_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
EXPORT_SYMBOL_GPL(cs47l92_32bit_i2c_regmap);
diff --git a/drivers/mfd/da9052-i2c.c b/drivers/mfd/da9052-i2c.c
index 541e2d47677e..fd000a21bcba 100644
--- a/drivers/mfd/da9052-i2c.c
+++ b/drivers/mfd/da9052-i2c.c
@@ -13,14 +13,11 @@
#include <linux/mfd/core.h>
#include <linux/i2c.h>
#include <linux/err.h>
+#include <linux/of.h>
#include <linux/mfd/da9052/da9052.h>
#include <linux/mfd/da9052/reg.h>
-#ifdef CONFIG_OF
-#include <linux/of.h>
-#include <linux/of_device.h>
-#endif
/* I2C safe register check */
static inline bool i2c_safe_reg(unsigned char reg)
diff --git a/drivers/mfd/da9055-i2c.c b/drivers/mfd/da9055-i2c.c
index bbaf4f07f274..9a5f51b60bad 100644
--- a/drivers/mfd/da9055-i2c.c
+++ b/drivers/mfd/da9055-i2c.c
@@ -11,7 +11,6 @@
#include <linux/i2c.h>
#include <linux/err.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/mfd/da9055/core.h>
diff --git a/drivers/mfd/da9062-core.c b/drivers/mfd/da9062-core.c
index 48f58b6f5629..45da007d3e70 100644
--- a/drivers/mfd/da9062-core.c
+++ b/drivers/mfd/da9062-core.c
@@ -9,7 +9,7 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/interrupt.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/irq.h>
#include <linux/mfd/core.h>
diff --git a/drivers/mfd/exynos-lpass.c b/drivers/mfd/exynos-lpass.c
index 166cd21088cd..1506d8d352b1 100644
--- a/drivers/mfd/exynos-lpass.c
+++ b/drivers/mfd/exynos-lpass.c
@@ -109,14 +109,12 @@ static int exynos_lpass_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct exynos_lpass *lpass;
void __iomem *base_top;
- struct resource *res;
lpass = devm_kzalloc(dev, sizeof(*lpass), GFP_KERNEL);
if (!lpass)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base_top = devm_ioremap_resource(dev, res);
+ base_top = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base_top))
return PTR_ERR(base_top);
diff --git a/drivers/mfd/hi6421-pmic-core.c b/drivers/mfd/hi6421-pmic-core.c
index cb5cf4a81c06..a6a890537a1e 100644
--- a/drivers/mfd/hi6421-pmic-core.c
+++ b/drivers/mfd/hi6421-pmic-core.c
@@ -59,7 +59,7 @@ static int hi6421_pmic_probe(struct platform_device *pdev)
id = of_match_device(of_hi6421_pmic_match, &pdev->dev);
if (!id)
return -EINVAL;
- type = (enum hi6421_type)id->data;
+ type = (uintptr_t)id->data;
pmic = devm_kzalloc(&pdev->dev, sizeof(*pmic), GFP_KERNEL);
if (!pmic)
diff --git a/drivers/mfd/hi655x-pmic.c b/drivers/mfd/hi655x-pmic.c
index a58e42ddcd0c..8feae8d8fd9d 100644
--- a/drivers/mfd/hi655x-pmic.c
+++ b/drivers/mfd/hi655x-pmic.c
@@ -16,7 +16,7 @@
#include <linux/mfd/hi655x-pmic.h>
#include <linux/module.h>
#include <linux/gpio/consumer.h>
-#include <linux/of_platform.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -100,8 +100,7 @@ static int hi655x_pmic_probe(struct platform_device *pdev)
return -ENOMEM;
pmic->dev = dev;
- pmic->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, pmic->res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -163,7 +162,7 @@ MODULE_DEVICE_TABLE(of, hi655x_pmic_match);
static struct platform_driver hi655x_pmic_driver = {
.driver = {
.name = "hi655x-pmic",
- .of_match_table = of_match_ptr(hi655x_pmic_match),
+ .of_match_table = hi655x_pmic_match,
},
.probe = hi655x_pmic_probe,
.remove = hi655x_pmic_remove,
diff --git a/drivers/mfd/ipaq-micro.c b/drivers/mfd/ipaq-micro.c
index 6d3968458e81..c964ea6539aa 100644
--- a/drivers/mfd/ipaq-micro.c
+++ b/drivers/mfd/ipaq-micro.c
@@ -78,8 +78,6 @@ EXPORT_SYMBOL(ipaq_micro_tx_msg);
static void micro_rx_msg(struct ipaq_micro *micro, u8 id, int len, u8 *data)
{
- int i;
-
dev_dbg(micro->dev, "RX msg: %02x, %d bytes\n", id, len);
spin_lock(&micro->lock);
@@ -131,10 +129,7 @@ static void micro_rx_msg(struct ipaq_micro *micro, u8 id, int len, u8 *data)
break;
default:
dev_err(micro->dev,
- "unknown msg %d [%d] ", id, len);
- for (i = 0; i < len; ++i)
- pr_cont("0x%02x ", data[i]);
- pr_cont("\n");
+ "unknown msg %d [%d] %*ph\n", id, len, len, data);
}
spin_unlock(&micro->lock);
}
diff --git a/drivers/mfd/iqs62x.c b/drivers/mfd/iqs62x.c
index dfe9cb79e6a1..e03b4d38fbb0 100644
--- a/drivers/mfd/iqs62x.c
+++ b/drivers/mfd/iqs62x.c
@@ -27,7 +27,7 @@
#include <linux/mfd/iqs62x.h>
#include <linux/module.h>
#include <linux/notifier.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/slab.h>
diff --git a/drivers/mfd/lochnagar-i2c.c b/drivers/mfd/lochnagar-i2c.c
index 3c8843117080..59092f839d65 100644
--- a/drivers/mfd/lochnagar-i2c.c
+++ b/drivers/mfd/lochnagar-i2c.c
@@ -379,7 +379,7 @@ static int lochnagar_i2c_probe(struct i2c_client *i2c)
static struct i2c_driver lochnagar_i2c_driver = {
.driver = {
.name = "lochnagar",
- .of_match_table = of_match_ptr(lochnagar_of_match),
+ .of_match_table = lochnagar_of_match,
.suppress_bind_attrs = true,
},
.probe = lochnagar_i2c_probe,
diff --git a/drivers/mfd/lp873x.c b/drivers/mfd/lp873x.c
index 6639f0fad4ea..de7ab7aed3c6 100644
--- a/drivers/mfd/lp873x.c
+++ b/drivers/mfd/lp873x.c
@@ -7,8 +7,8 @@
#include <linux/interrupt.h>
#include <linux/mfd/core.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/mfd/lp873x.h>
diff --git a/drivers/mfd/lp87565.c b/drivers/mfd/lp87565.c
index 88ce4d7c50a7..1b7f8349911d 100644
--- a/drivers/mfd/lp87565.c
+++ b/drivers/mfd/lp87565.c
@@ -92,7 +92,7 @@ static int lp87565_probe(struct i2c_client *client)
of_id = of_match_device(of_lp87565_match_table, &client->dev);
if (of_id)
- lp87565->dev_type = (enum lp87565_device_type)of_id->data;
+ lp87565->dev_type = (uintptr_t)of_id->data;
i2c_set_clientdata(client, lp87565);
diff --git a/drivers/mfd/madera-i2c.c b/drivers/mfd/madera-i2c.c
index 0968aa9733ac..a404ea26bc79 100644
--- a/drivers/mfd/madera-i2c.c
+++ b/drivers/mfd/madera-i2c.c
@@ -10,7 +10,6 @@
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/mfd/madera/core.h>
diff --git a/drivers/mfd/madera-spi.c b/drivers/mfd/madera-spi.c
index da84eb50e53a..ad07ebe29e59 100644
--- a/drivers/mfd/madera-spi.c
+++ b/drivers/mfd/madera-spi.c
@@ -9,7 +9,6 @@
#include <linux/err.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c
index 25ed8846b7fb..1f4f5002595c 100644
--- a/drivers/mfd/max14577.c
+++ b/drivers/mfd/max14577.c
@@ -402,8 +402,7 @@ static int max14577_i2c_probe(struct i2c_client *i2c)
of_id = of_match_device(max14577_dt_match, &i2c->dev);
if (of_id)
- max14577->dev_type =
- (enum maxim_device_type)of_id->data;
+ max14577->dev_type = (uintptr_t)of_id->data;
} else {
max14577->dev_type = id->driver_data;
}
diff --git a/drivers/mfd/max77541.c b/drivers/mfd/max77541.c
index e147e949c2b3..10c2e274b4af 100644
--- a/drivers/mfd/max77541.c
+++ b/drivers/mfd/max77541.c
@@ -173,7 +173,7 @@ static int max77541_probe(struct i2c_client *client)
i2c_set_clientdata(client, max77541);
max77541->i2c = client;
- max77541->id = (enum max7754x_ids)device_get_match_data(dev);
+ max77541->id = (uintptr_t)device_get_match_data(dev);
if (!max77541->id)
max77541->id = (enum max7754x_ids)id->driver_data;
diff --git a/drivers/mfd/max77620.c b/drivers/mfd/max77620.c
index 5811ed8f4840..e63e8e47d908 100644
--- a/drivers/mfd/max77620.c
+++ b/drivers/mfd/max77620.c
@@ -30,7 +30,6 @@
#include <linux/mfd/max77620.h>
#include <linux/init.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
diff --git a/drivers/mfd/max77686.c b/drivers/mfd/max77686.c
index 01833086ca7d..91c286c4571c 100644
--- a/drivers/mfd/max77686.c
+++ b/drivers/mfd/max77686.c
@@ -20,7 +20,6 @@
#include <linux/mfd/max77686-private.h>
#include <linux/err.h>
#include <linux/of.h>
-#include <linux/of_device.h>
static const struct mfd_cell max77686_devs[] = {
{ .name = "max77686-pmic", },
diff --git a/drivers/mfd/max77843.c b/drivers/mfd/max77843.c
index b3689c13a14d..fcff0c498c0f 100644
--- a/drivers/mfd/max77843.c
+++ b/drivers/mfd/max77843.c
@@ -13,7 +13,7 @@
#include <linux/mfd/core.h>
#include <linux/mfd/max77693-common.h>
#include <linux/mfd/max77843-private.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
static const struct mfd_cell max77843_devs[] = {
diff --git a/drivers/mfd/max8907.c b/drivers/mfd/max8907.c
index 78b5ee688dec..8bbe7979db91 100644
--- a/drivers/mfd/max8907.c
+++ b/drivers/mfd/max8907.c
@@ -15,7 +15,6 @@
#include <linux/mfd/max8907.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c
index 0246bbe80354..105d79b91493 100644
--- a/drivers/mfd/max8925-core.c
+++ b/drivers/mfd/max8925-core.c
@@ -17,7 +17,6 @@
#include <linux/mfd/core.h>
#include <linux/mfd/max8925.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
static const struct resource bk_resources[] = {
{ 0x84, 0x84, "mode control", IORESOURCE_REG, },
diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
index 94c09a5eab32..110bef71f208 100644
--- a/drivers/mfd/max8997.c
+++ b/drivers/mfd/max8997.c
@@ -11,7 +11,6 @@
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c
index 33a3ec5464fb..4cc426a6c767 100644
--- a/drivers/mfd/max8998.c
+++ b/drivers/mfd/max8998.c
@@ -12,7 +12,6 @@
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/pm_runtime.h>
#include <linux/mutex.h>
diff --git a/drivers/mfd/mc13xxx-i2c.c b/drivers/mfd/mc13xxx-i2c.c
index de59b498c925..6bc0e755ba34 100644
--- a/drivers/mfd/mc13xxx-i2c.c
+++ b/drivers/mfd/mc13xxx-i2c.c
@@ -53,7 +53,6 @@ static const struct regmap_config mc13xxx_regmap_i2c_config = {
static int mc13xxx_i2c_probe(struct i2c_client *client)
{
- const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct mc13xxx *mc13xxx;
int ret;
@@ -73,13 +72,7 @@ static int mc13xxx_i2c_probe(struct i2c_client *client)
return ret;
}
- if (client->dev.of_node) {
- const struct of_device_id *of_id =
- of_match_device(mc13xxx_dt_ids, &client->dev);
- mc13xxx->variant = of_id->data;
- } else {
- mc13xxx->variant = (void *)id->driver_data;
- }
+ mc13xxx->variant = i2c_get_match_data(client);
return mc13xxx_common_init(&client->dev);
}
diff --git a/drivers/mfd/mt6358-irq.c b/drivers/mfd/mt6358-irq.c
index 389756436af6..49830b526ee8 100644
--- a/drivers/mfd/mt6358-irq.c
+++ b/drivers/mfd/mt6358-irq.c
@@ -3,6 +3,8 @@
// Copyright (c) 2020 MediaTek Inc.
#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/mfd/mt6357/core.h>
#include <linux/mfd/mt6357/registers.h>
#include <linux/mfd/mt6358/core.h>
@@ -11,9 +13,6 @@
#include <linux/mfd/mt6359/registers.h>
#include <linux/mfd/mt6397/core.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c
index f6c1f80f94a4..4449dde05021 100644
--- a/drivers/mfd/mt6397-core.c
+++ b/drivers/mfd/mt6397-core.c
@@ -6,9 +6,10 @@
#include <linux/interrupt.h>
#include <linux/ioport.h>
+#include <linux/irqdomain.h>
#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/mfd/core.h>
#include <linux/mfd/mt6323/core.h>
diff --git a/drivers/mfd/mt6397-irq.c b/drivers/mfd/mt6397-irq.c
index 72f923e47752..886745b5b607 100644
--- a/drivers/mfd/mt6397-irq.c
+++ b/drivers/mfd/mt6397-irq.c
@@ -3,10 +3,9 @@
// Copyright (c) 2019 MediaTek Inc.
#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/suspend.h>
diff --git a/drivers/mfd/mxs-lradc.c b/drivers/mfd/mxs-lradc.c
index 111d11fd25aa..21f3033d6eb5 100644
--- a/drivers/mfd/mxs-lradc.c
+++ b/drivers/mfd/mxs-lradc.c
@@ -142,7 +142,7 @@ static int mxs_lradc_probe(struct platform_device *pdev)
if (!of_id)
return -EINVAL;
- lradc->soc = (enum mxs_lradc_id)of_id->data;
+ lradc->soc = (uintptr_t)of_id->data;
lradc->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(lradc->clk)) {
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 7f5775109593..78f1bb55dbc0 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -534,7 +534,6 @@ static int usbhs_omap_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct usbhs_omap_platform_data *pdata = dev_get_platdata(dev);
struct usbhs_hcd_omap *omap;
- struct resource *res;
int ret = 0;
int i;
bool need_logic_fck;
@@ -569,8 +568,7 @@ static int usbhs_omap_probe(struct platform_device *pdev)
return -ENOMEM;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- omap->uhh_base = devm_ioremap_resource(dev, res);
+ omap->uhh_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(omap->uhh_base))
return PTR_ERR(omap->uhh_base);
diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c
index 69cbc2097911..906353735c78 100644
--- a/drivers/mfd/omap-usb-tll.c
+++ b/drivers/mfd/omap-usb-tll.c
@@ -200,15 +200,13 @@ static unsigned ohci_omap3_fslsmode(enum usbhs_omap_port_mode mode)
static int usbtll_omap_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *res;
struct usbtll_omap *tll;
void __iomem *base;
int i, nch, ver;
dev_dbg(dev, "starting TI HSUSB TLL Controller\n");
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/mfd/palmas.c b/drivers/mfd/palmas.c
index a36f12402987..6e562bab62e4 100644
--- a/drivers/mfd/palmas.c
+++ b/drivers/mfd/palmas.c
@@ -18,7 +18,8 @@
#include <linux/err.h>
#include <linux/mfd/core.h>
#include <linux/mfd/palmas.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
static const struct regmap_config palmas_regmap_config[PALMAS_NUM_CLIENTS] = {
{
diff --git a/drivers/mfd/qcom-pm8008.c b/drivers/mfd/qcom-pm8008.c
index 94a8cca1d955..3ac3742f438b 100644
--- a/drivers/mfd/qcom-pm8008.c
+++ b/drivers/mfd/qcom-pm8008.c
@@ -9,7 +9,7 @@
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/pinctrl/consumer.h>
#include <linux/regmap.h>
diff --git a/drivers/mfd/qcom-pm8xxx.c b/drivers/mfd/qcom-pm8xxx.c
index 9a948df8c28d..07c531bd1236 100644
--- a/drivers/mfd/qcom-pm8xxx.c
+++ b/drivers/mfd/qcom-pm8xxx.c
@@ -103,8 +103,9 @@ static int
pm8xxx_config_irq(struct pm_irq_chip *chip, unsigned int bp, unsigned int cp)
{
int rc;
+ unsigned long flags;
- spin_lock(&chip->pm_irq_lock);
+ spin_lock_irqsave(&chip->pm_irq_lock, flags);
rc = regmap_write(chip->regmap, SSBI_REG_ADDR_IRQ_BLK_SEL, bp);
if (rc) {
pr_err("Failed Selecting Block %d rc=%d\n", bp, rc);
@@ -116,7 +117,7 @@ pm8xxx_config_irq(struct pm_irq_chip *chip, unsigned int bp, unsigned int cp)
if (rc)
pr_err("Failed Configuring IRQ rc=%d\n", rc);
bail:
- spin_unlock(&chip->pm_irq_lock);
+ spin_unlock_irqrestore(&chip->pm_irq_lock, flags);
return rc;
}
@@ -321,6 +322,7 @@ static int pm8xxx_irq_get_irqchip_state(struct irq_data *d,
struct pm_irq_chip *chip = irq_data_get_irq_chip_data(d);
unsigned int pmirq = irqd_to_hwirq(d);
unsigned int bits;
+ unsigned long flags;
int irq_bit;
u8 block;
int rc;
@@ -331,7 +333,7 @@ static int pm8xxx_irq_get_irqchip_state(struct irq_data *d,
block = pmirq / 8;
irq_bit = pmirq % 8;
- spin_lock(&chip->pm_irq_lock);
+ spin_lock_irqsave(&chip->pm_irq_lock, flags);
rc = regmap_write(chip->regmap, SSBI_REG_ADDR_IRQ_BLK_SEL, block);
if (rc) {
pr_err("Failed Selecting Block %d rc=%d\n", block, rc);
@@ -346,7 +348,7 @@ static int pm8xxx_irq_get_irqchip_state(struct irq_data *d,
*state = !!(bits & BIT(irq_bit));
bail:
- spin_unlock(&chip->pm_irq_lock);
+ spin_unlock_irqrestore(&chip->pm_irq_lock, flags);
return rc;
}
diff --git a/drivers/mfd/rave-sp.c b/drivers/mfd/rave-sp.c
index 545196c85b5c..da50eba10014 100644
--- a/drivers/mfd/rave-sp.c
+++ b/drivers/mfd/rave-sp.c
@@ -18,7 +18,7 @@
#include <linux/mfd/rave-sp.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/of_platform.h>
#include <linux/sched.h>
#include <linux/serdev.h>
#include <asm/unaligned.h>
diff --git a/drivers/mfd/rk8xx-core.c b/drivers/mfd/rk8xx-core.c
index e8fc9e2ab1d0..11a831e92da8 100644
--- a/drivers/mfd/rk8xx-core.c
+++ b/drivers/mfd/rk8xx-core.c
@@ -14,7 +14,7 @@
#include <linux/mfd/rk808.h>
#include <linux/mfd/core.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/reboot.h>
diff --git a/drivers/mfd/rn5t618.c b/drivers/mfd/rn5t618.c
index 333fef8729a5..0fe616b2db8e 100644
--- a/drivers/mfd/rn5t618.c
+++ b/drivers/mfd/rn5t618.c
@@ -277,7 +277,7 @@ static SIMPLE_DEV_PM_OPS(rn5t618_i2c_dev_pm_ops,
static struct i2c_driver rn5t618_i2c_driver = {
.driver = {
.name = "rn5t618",
- .of_match_table = of_match_ptr(rn5t618_of_match),
+ .of_match_table = rn5t618_of_match,
.pm = &rn5t618_i2c_dev_pm_ops,
},
.probe = rn5t618_i2c_probe,
diff --git a/drivers/mfd/rohm-bd71828.c b/drivers/mfd/rohm-bd71828.c
index 93d80a79b901..594718f7e8e1 100644
--- a/drivers/mfd/rohm-bd71828.c
+++ b/drivers/mfd/rohm-bd71828.c
@@ -15,7 +15,7 @@
#include <linux/mfd/rohm-bd71828.h>
#include <linux/mfd/rohm-generic.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/types.h>
diff --git a/drivers/mfd/rohm-bd718x7.c b/drivers/mfd/rohm-bd718x7.c
index 0b58ecc78334..4798bdf27afb 100644
--- a/drivers/mfd/rohm-bd718x7.c
+++ b/drivers/mfd/rohm-bd718x7.c
@@ -14,7 +14,7 @@
#include <linux/mfd/rohm-bd718x7.h>
#include <linux/mfd/core.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/types.h>
diff --git a/drivers/mfd/rohm-bd9576.c b/drivers/mfd/rohm-bd9576.c
index 645673322ec0..bceac7016740 100644
--- a/drivers/mfd/rohm-bd9576.c
+++ b/drivers/mfd/rohm-bd9576.c
@@ -13,7 +13,7 @@
#include <linux/mfd/rohm-bd957x.h>
#include <linux/mfd/rohm-generic.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/types.h>
diff --git a/drivers/mfd/rsmu_i2c.c b/drivers/mfd/rsmu_i2c.c
index 26972a5aff45..06d78a1cf1cc 100644
--- a/drivers/mfd/rsmu_i2c.c
+++ b/drivers/mfd/rsmu_i2c.c
@@ -277,7 +277,7 @@ MODULE_DEVICE_TABLE(of, rsmu_i2c_of_match);
static struct i2c_driver rsmu_i2c_driver = {
.driver = {
.name = "rsmu-i2c",
- .of_match_table = of_match_ptr(rsmu_i2c_of_match),
+ .of_match_table = rsmu_i2c_of_match,
},
.probe = rsmu_i2c_probe,
.remove = rsmu_i2c_remove,
diff --git a/drivers/mfd/rsmu_spi.c b/drivers/mfd/rsmu_spi.c
index a4a595bb8d0d..ca0a1202c3ce 100644
--- a/drivers/mfd/rsmu_spi.c
+++ b/drivers/mfd/rsmu_spi.c
@@ -262,7 +262,7 @@ MODULE_DEVICE_TABLE(of, rsmu_spi_of_match);
static struct spi_driver rsmu_spi_driver = {
.driver = {
.name = "rsmu-spi",
- .of_match_table = of_match_ptr(rsmu_spi_of_match),
+ .of_match_table = rsmu_spi_of_match,
},
.probe = rsmu_spi_probe,
.remove = rsmu_spi_remove,
diff --git a/drivers/mfd/rt5033.c b/drivers/mfd/rt5033.c
index 67b0a228db24..7e23ab3d5842 100644
--- a/drivers/mfd/rt5033.c
+++ b/drivers/mfd/rt5033.c
@@ -10,9 +10,9 @@
*/
#include <linux/err.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/interrupt.h>
-#include <linux/of_device.h>
#include <linux/mfd/core.h>
#include <linux/mfd/rt5033.h>
#include <linux/mfd/rt5033-private.h>
diff --git a/drivers/mfd/rz-mtu3.c b/drivers/mfd/rz-mtu3.c
index 04006f4aa702..f3dac4a29a83 100644
--- a/drivers/mfd/rz-mtu3.c
+++ b/drivers/mfd/rz-mtu3.c
@@ -11,7 +11,9 @@
#include <linux/irq.h>
#include <linux/mfd/core.h>
#include <linux/mfd/rz-mtu3.h>
-#include <linux/of_platform.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/spinlock.h>
@@ -20,7 +22,7 @@
struct rz_mtu3_priv {
void __iomem *mmio;
struct reset_control *rstc;
- raw_spinlock_t lock;
+ spinlock_t lock;
};
/******* MTU3 registers (original offset is +0x1200) *******/
@@ -174,11 +176,11 @@ void rz_mtu3_shared_reg_update_bit(struct rz_mtu3_channel *ch, u16 offset,
struct rz_mtu3_priv *priv = mtu->priv_data;
unsigned long tmdr, flags;
- raw_spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&priv->lock, flags);
tmdr = rz_mtu3_shared_reg_read(ch, offset);
__assign_bit(pos, &tmdr, !!val);
rz_mtu3_shared_reg_write(ch, offset, tmdr);
- raw_spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->lock, flags);
}
EXPORT_SYMBOL_GPL(rz_mtu3_shared_reg_update_bit);
@@ -250,16 +252,17 @@ static void rz_mtu3_start_stop_ch(struct rz_mtu3_channel *ch, bool start)
u16 offset;
u8 bitpos;
- /* start stop register shared by multiple timer channels */
- raw_spin_lock_irqsave(&priv->lock, flags);
-
offset = rz_mtu3_get_tstr_offset(ch);
bitpos = rz_mtu3_get_tstr_bit_pos(ch);
+
+ /* start stop register shared by multiple timer channels */
+ spin_lock_irqsave(&priv->lock, flags);
+
tstr = rz_mtu3_shared_reg_read(ch, offset);
__assign_bit(bitpos, &tstr, start);
rz_mtu3_shared_reg_write(ch, offset, tstr);
- raw_spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->lock, flags);
}
bool rz_mtu3_is_enabled(struct rz_mtu3_channel *ch)
@@ -267,21 +270,18 @@ bool rz_mtu3_is_enabled(struct rz_mtu3_channel *ch)
struct rz_mtu3 *mtu = dev_get_drvdata(ch->dev->parent);
struct rz_mtu3_priv *priv = mtu->priv_data;
unsigned long flags, tstr;
- bool ret = false;
u16 offset;
u8 bitpos;
- /* start stop register shared by multiple timer channels */
- raw_spin_lock_irqsave(&priv->lock, flags);
-
offset = rz_mtu3_get_tstr_offset(ch);
bitpos = rz_mtu3_get_tstr_bit_pos(ch);
- tstr = rz_mtu3_shared_reg_read(ch, offset);
- ret = tstr & BIT(bitpos);
- raw_spin_unlock_irqrestore(&priv->lock, flags);
+ /* start stop register shared by multiple timer channels */
+ spin_lock_irqsave(&priv->lock, flags);
+ tstr = rz_mtu3_shared_reg_read(ch, offset);
+ spin_unlock_irqrestore(&priv->lock, flags);
- return ret;
+ return tstr & BIT(bitpos);
}
EXPORT_SYMBOL_GPL(rz_mtu3_is_enabled);
@@ -349,7 +349,7 @@ static int rz_mtu3_probe(struct platform_device *pdev)
return PTR_ERR(ddata->clk);
reset_control_deassert(priv->rstc);
- raw_spin_lock_init(&priv->lock);
+ spin_lock_init(&priv->lock);
platform_set_drvdata(pdev, ddata);
for (i = 0; i < RZ_MTU_NUM_CHANNELS; i++) {
diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c
index d2f631901886..a6b0d7300b2d 100644
--- a/drivers/mfd/sec-core.c
+++ b/drivers/mfd/sec-core.c
@@ -10,8 +10,6 @@
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
#include <linux/mutex.h>
diff --git a/drivers/mfd/sprd-sc27xx-spi.c b/drivers/mfd/sprd-sc27xx-spi.c
index d21f32cc784d..81e517cdfb27 100644
--- a/drivers/mfd/sprd-sc27xx-spi.c
+++ b/drivers/mfd/sprd-sc27xx-spi.c
@@ -8,7 +8,7 @@
#include <linux/module.h>
#include <linux/mfd/core.h>
#include <linux/mfd/sc27xx-pmic.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
diff --git a/drivers/mfd/ssbi.c b/drivers/mfd/ssbi.c
index dee89db3471d..b0b0be483dbf 100644
--- a/drivers/mfd/ssbi.c
+++ b/drivers/mfd/ssbi.c
@@ -14,12 +14,12 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/ssbi.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
/* SSBI 2.0 controller registers */
#define SSBI2_CMD 0x0008
diff --git a/drivers/mfd/stm32-lptimer.c b/drivers/mfd/stm32-lptimer.c
index fa322f4412c8..b2704a9809c7 100644
--- a/drivers/mfd/stm32-lptimer.c
+++ b/drivers/mfd/stm32-lptimer.c
@@ -9,6 +9,7 @@
#include <linux/mfd/stm32-lptimer.h>
#include <linux/module.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#define STM32_LPTIM_MAX_REGISTER 0x3fc
diff --git a/drivers/mfd/stm32-timers.c b/drivers/mfd/stm32-timers.c
index 44ed2fce0319..732a28db80fa 100644
--- a/drivers/mfd/stm32-timers.c
+++ b/drivers/mfd/stm32-timers.c
@@ -8,6 +8,7 @@
#include <linux/mfd/stm32-timers.h>
#include <linux/module.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/reset.h>
#define STM32_TIMERS_MAX_REGISTERS 0x3fc
@@ -226,8 +227,7 @@ static int stm32_timers_probe(struct platform_device *pdev)
if (!ddata)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mmio = devm_ioremap_resource(dev, res);
+ mmio = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(mmio))
return PTR_ERR(mmio);
diff --git a/drivers/mfd/stmpe-i2c.c b/drivers/mfd/stmpe-i2c.c
index 1d7b401776d1..fe018bedab98 100644
--- a/drivers/mfd/stmpe-i2c.c
+++ b/drivers/mfd/stmpe-i2c.c
@@ -87,7 +87,7 @@ stmpe_i2c_probe(struct i2c_client *i2c)
dev_info(&i2c->dev, "matching on node name, compatible is preferred\n");
partnum = id->driver_data;
} else
- partnum = (enum stmpe_partnum)of_id->data;
+ partnum = (uintptr_t)of_id->data;
return stmpe_probe(&i2c_ci, partnum);
}
diff --git a/drivers/mfd/stpmic1.c b/drivers/mfd/stpmic1.c
index 3cc7492f828f..c5128fe96cc7 100644
--- a/drivers/mfd/stpmic1.c
+++ b/drivers/mfd/stpmic1.c
@@ -219,7 +219,7 @@ MODULE_DEVICE_TABLE(of, stpmic1_of_match);
static struct i2c_driver stpmic1_driver = {
.driver = {
.name = "stpmic1",
- .of_match_table = of_match_ptr(stpmic1_of_match),
+ .of_match_table = stpmic1_of_match,
.pm = pm_sleep_ptr(&stpmic1_pm),
},
.probe = stpmic1_probe,
diff --git a/drivers/mfd/sun4i-gpadc.c b/drivers/mfd/sun4i-gpadc.c
index d1cbea27b136..3029d48e982c 100644
--- a/drivers/mfd/sun4i-gpadc.c
+++ b/drivers/mfd/sun4i-gpadc.c
@@ -8,8 +8,8 @@
#include <linux/kernel.h>
#include <linux/mfd/core.h>
#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/mfd/sun4i-gpadc.h>
diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c
index 16df64e3c0be..db28eb0c8995 100644
--- a/drivers/mfd/tc3589x.c
+++ b/drivers/mfd/tc3589x.c
@@ -340,7 +340,7 @@ tc3589x_of_probe(struct device *dev, enum tc3589x_version *version)
of_id = of_match_device(tc3589x_match, dev);
if (!of_id)
return ERR_PTR(-ENODEV);
- *version = (enum tc3589x_version) of_id->data;
+ *version = (uintptr_t) of_id->data;
for_each_child_of_node(np, child) {
if (of_device_is_compatible(child, "toshiba,tc3589x-gpio"))
@@ -483,7 +483,7 @@ static struct i2c_driver tc3589x_driver = {
.driver = {
.name = "tc3589x",
.pm = pm_sleep_ptr(&tc3589x_dev_pm_ops),
- .of_match_table = of_match_ptr(tc3589x_match),
+ .of_match_table = tc3589x_match,
},
.probe = tc3589x_probe,
.remove = tc3589x_remove,
diff --git a/drivers/mfd/ti-lmu.c b/drivers/mfd/ti-lmu.c
index 4f06adad7b5e..cfc9f88b9842 100644
--- a/drivers/mfd/ti-lmu.c
+++ b/drivers/mfd/ti-lmu.c
@@ -17,7 +17,6 @@
#include <linux/mfd/ti-lmu-register.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/slab.h>
struct ti_lmu_data {
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
index 07825cfd8aa8..b88eb70c17b3 100644
--- a/drivers/mfd/ti_am335x_tscadc.c
+++ b/drivers/mfd/ti_am335x_tscadc.c
@@ -14,7 +14,7 @@
#include <linux/mfd/core.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/mfd/ti_am335x_tscadc.h>
@@ -201,8 +201,7 @@ static int ti_tscadc_probe(struct platform_device *pdev)
else
tscadc->irq = err;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- tscadc->tscadc_base = devm_ioremap_resource(&pdev->dev, res);
+ tscadc->tscadc_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(tscadc->tscadc_base))
return PTR_ERR(tscadc->tscadc_base);
diff --git a/drivers/mfd/tps6507x.c b/drivers/mfd/tps6507x.c
index 9716bf703c7a..95dafb0e9f00 100644
--- a/drivers/mfd/tps6507x.c
+++ b/drivers/mfd/tps6507x.c
@@ -20,7 +20,6 @@
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tps6507x.h>
diff --git a/drivers/mfd/tps65090.c b/drivers/mfd/tps65090.c
index a35ad70755fb..9245e11219f3 100644
--- a/drivers/mfd/tps65090.c
+++ b/drivers/mfd/tps65090.c
@@ -17,7 +17,6 @@
#include <linux/mfd/core.h>
#include <linux/mfd/tps65090.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/err.h>
#define NUM_INT_REG 2
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
index 60599291b315..029ecc32f078 100644
--- a/drivers/mfd/tps65217.c
+++ b/drivers/mfd/tps65217.c
@@ -17,7 +17,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c
index 619bf7adb20c..11e4e52b56be 100644
--- a/drivers/mfd/tps65218.c
+++ b/drivers/mfd/tps65218.c
@@ -15,7 +15,6 @@
#include <linux/regmap.h>
#include <linux/err.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
diff --git a/drivers/mfd/tps6594-core.c b/drivers/mfd/tps6594-core.c
index 15f314833207..0fb9c5cf213a 100644
--- a/drivers/mfd/tps6594-core.c
+++ b/drivers/mfd/tps6594-core.c
@@ -9,7 +9,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tps6594.h>
diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
index d85675a4d9a8..9ce34dfd99b3 100644
--- a/drivers/mfd/twl6040.c
+++ b/drivers/mfd/twl6040.c
@@ -16,8 +16,6 @@
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/of.h>
-#include <linux/of_irq.h>
-#include <linux/of_platform.h>
#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/i2c.h>
diff --git a/drivers/mfd/wm5102-tables.c b/drivers/mfd/wm5102-tables.c
index 6bba39657991..f77ecc635b6f 100644
--- a/drivers/mfd/wm5102-tables.c
+++ b/drivers/mfd/wm5102-tables.c
@@ -1938,7 +1938,7 @@ const struct regmap_config wm5102_i2c_regmap = {
.readable_reg = wm5102_readable_register,
.volatile_reg = wm5102_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = wm5102_reg_default,
.num_reg_defaults = ARRAY_SIZE(wm5102_reg_default),
};
diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c
index 65b9b1d6daec..eba324875afd 100644
--- a/drivers/mfd/wm5110-tables.c
+++ b/drivers/mfd/wm5110-tables.c
@@ -3218,7 +3218,7 @@ const struct regmap_config wm5110_i2c_regmap = {
.readable_reg = wm5110_readable_register,
.volatile_reg = wm5110_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = wm5110_reg_default,
.num_reg_defaults = ARRAY_SIZE(wm5110_reg_default),
};
diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c
index e86b6a4896a6..e7e68929275e 100644
--- a/drivers/mfd/wm831x-core.c
+++ b/drivers/mfd/wm831x-core.c
@@ -15,8 +15,7 @@
#include <linux/mfd/core.h>
#include <linux/slab.h>
#include <linux/err.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/mfd/wm831x/core.h>
#include <linux/mfd/wm831x/pdata.h>
diff --git a/drivers/mfd/wm831x-i2c.c b/drivers/mfd/wm831x-i2c.c
index 997837f13180..694ddbbf0372 100644
--- a/drivers/mfd/wm831x-i2c.c
+++ b/drivers/mfd/wm831x-i2c.c
@@ -36,7 +36,7 @@ static int wm831x_i2c_probe(struct i2c_client *i2c)
dev_err(&i2c->dev, "Failed to match device\n");
return -ENODEV;
}
- type = (enum wm831x_parent)of_id->data;
+ type = (uintptr_t)of_id->data;
} else {
type = (enum wm831x_parent)id->driver_data;
}
diff --git a/drivers/mfd/wm831x-spi.c b/drivers/mfd/wm831x-spi.c
index 7bcddccbf155..76be7ef5c970 100644
--- a/drivers/mfd/wm831x-spi.c
+++ b/drivers/mfd/wm831x-spi.c
@@ -33,7 +33,7 @@ static int wm831x_spi_probe(struct spi_device *spi)
dev_err(&spi->dev, "Failed to match device\n");
return -ENODEV;
}
- type = (enum wm831x_parent)of_id->data;
+ type = (uintptr_t)of_id->data;
} else {
type = (enum wm831x_parent)id->driver_data;
}
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 1e4f1694f065..aba7af688175 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -628,7 +628,7 @@ static int wm8994_i2c_probe(struct i2c_client *i2c)
if (i2c->dev.of_node) {
of_id = of_match_device(wm8994_of_match, &i2c->dev);
if (of_id)
- wm8994->type = (enum wm8994_type)of_id->data;
+ wm8994->type = (uintptr_t)of_id->data;
} else {
wm8994->type = id->driver_data;
}
diff --git a/drivers/mfd/wm8994-regmap.c b/drivers/mfd/wm8994-regmap.c
index cd4fef7df336..ee2ed6773afd 100644
--- a/drivers/mfd/wm8994-regmap.c
+++ b/drivers/mfd/wm8994-regmap.c
@@ -1238,7 +1238,7 @@ struct regmap_config wm1811_regmap_config = {
.reg_bits = 16,
.val_bits = 16,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = wm1811_defaults,
.num_reg_defaults = ARRAY_SIZE(wm1811_defaults),
@@ -1253,7 +1253,7 @@ struct regmap_config wm8994_regmap_config = {
.reg_bits = 16,
.val_bits = 16,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = wm8994_defaults,
.num_reg_defaults = ARRAY_SIZE(wm8994_defaults),
@@ -1268,7 +1268,7 @@ struct regmap_config wm8958_regmap_config = {
.reg_bits = 16,
.val_bits = 16,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = wm8958_defaults,
.num_reg_defaults = ARRAY_SIZE(wm8958_defaults),
diff --git a/drivers/mfd/wm8997-tables.c b/drivers/mfd/wm8997-tables.c
index 3476787c485e..288c57b2d21e 100644
--- a/drivers/mfd/wm8997-tables.c
+++ b/drivers/mfd/wm8997-tables.c
@@ -1523,7 +1523,7 @@ const struct regmap_config wm8997_i2c_regmap = {
.readable_reg = wm8997_readable_register,
.volatile_reg = wm8997_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = wm8997_reg_default,
.num_reg_defaults = ARRAY_SIZE(wm8997_reg_default),
};
diff --git a/drivers/mfd/wm8998-tables.c b/drivers/mfd/wm8998-tables.c
index 9b34a6d76094..b3e6e85bee89 100644
--- a/drivers/mfd/wm8998-tables.c
+++ b/drivers/mfd/wm8998-tables.c
@@ -1556,7 +1556,7 @@ const struct regmap_config wm8998_i2c_regmap = {
.readable_reg = wm8998_readable_register,
.volatile_reg = wm8998_volatile_register,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.reg_defaults = wm8998_reg_default,
.num_reg_defaults = ARRAY_SIZE(wm8998_reg_default),
};
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 75e427f124b2..cadd4a820c03 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -496,6 +496,7 @@ config HISI_HIKEY_USB
config OPEN_DICE
tristate "Open Profile for DICE driver"
depends on OF_RESERVED_MEM
+ depends on HAS_IOMEM
help
This driver exposes a DICE reserved memory region to userspace via
a character device. The memory region contains Compound Device
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c
index 7f9f562d6433..ee590c4a1537 100644
--- a/drivers/misc/atmel-ssc.c
+++ b/drivers/misc/atmel-ssc.c
@@ -212,8 +212,7 @@ static int ssc_probe(struct platform_device *pdev)
of_property_read_bool(np, "atmel,clk-from-rk-pin");
}
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ssc->regs = devm_ioremap_resource(&pdev->dev, regs);
+ ssc->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &regs);
if (IS_ERR(ssc->regs))
return PTR_ERR(ssc->regs);
diff --git a/drivers/misc/bcm-vk/bcm_vk.h b/drivers/misc/bcm-vk/bcm_vk.h
index 25d51222eedf..386884c2a263 100644
--- a/drivers/misc/bcm-vk/bcm_vk.h
+++ b/drivers/misc/bcm-vk/bcm_vk.h
@@ -340,7 +340,7 @@ struct bcm_vk_proc_mon_info {
};
struct bcm_vk_hb_ctrl {
- struct timer_list timer;
+ struct delayed_work work;
u32 last_uptime;
u32 lost_cnt;
};
diff --git a/drivers/misc/bcm-vk/bcm_vk_msg.c b/drivers/misc/bcm-vk/bcm_vk_msg.c
index 3c081504f38c..e17d81231ea6 100644
--- a/drivers/misc/bcm-vk/bcm_vk_msg.c
+++ b/drivers/misc/bcm-vk/bcm_vk_msg.c
@@ -137,11 +137,11 @@ void bcm_vk_set_host_alert(struct bcm_vk *vk, u32 bit_mask)
#define BCM_VK_HB_TIMER_VALUE (BCM_VK_HB_TIMER_S * HZ)
#define BCM_VK_HB_LOST_MAX (27 / BCM_VK_HB_TIMER_S)
-static void bcm_vk_hb_poll(struct timer_list *t)
+static void bcm_vk_hb_poll(struct work_struct *work)
{
u32 uptime_s;
- struct bcm_vk_hb_ctrl *hb = container_of(t, struct bcm_vk_hb_ctrl,
- timer);
+ struct bcm_vk_hb_ctrl *hb = container_of(to_delayed_work(work), struct bcm_vk_hb_ctrl,
+ work);
struct bcm_vk *vk = container_of(hb, struct bcm_vk, hb_ctrl);
if (bcm_vk_drv_access_ok(vk) && hb_mon_is_on()) {
@@ -177,22 +177,22 @@ static void bcm_vk_hb_poll(struct timer_list *t)
bcm_vk_set_host_alert(vk, ERR_LOG_HOST_HB_FAIL);
}
/* re-arm timer */
- mod_timer(&hb->timer, jiffies + BCM_VK_HB_TIMER_VALUE);
+ schedule_delayed_work(&hb->work, BCM_VK_HB_TIMER_VALUE);
}
void bcm_vk_hb_init(struct bcm_vk *vk)
{
struct bcm_vk_hb_ctrl *hb = &vk->hb_ctrl;
- timer_setup(&hb->timer, bcm_vk_hb_poll, 0);
- mod_timer(&hb->timer, jiffies + BCM_VK_HB_TIMER_VALUE);
+ INIT_DELAYED_WORK(&hb->work, bcm_vk_hb_poll);
+ schedule_delayed_work(&hb->work, BCM_VK_HB_TIMER_VALUE);
}
void bcm_vk_hb_deinit(struct bcm_vk *vk)
{
struct bcm_vk_hb_ctrl *hb = &vk->hb_ctrl;
- del_timer(&hb->timer);
+ cancel_delayed_work_sync(&hb->work);
}
static void bcm_vk_msgid_bitmap_clear(struct bcm_vk *vk,
diff --git a/drivers/misc/bcm-vk/bcm_vk_tty.c b/drivers/misc/bcm-vk/bcm_vk_tty.c
index 6669625ba4c8..2bce835ca43e 100644
--- a/drivers/misc/bcm-vk/bcm_vk_tty.c
+++ b/drivers/misc/bcm-vk/bcm_vk_tty.c
@@ -186,9 +186,8 @@ static void bcm_vk_tty_doorbell(struct bcm_vk *vk, u32 db_val)
VK_BAR0_REGSEG_DB_BASE + VK_BAR0_REGSEG_TTY_DB_OFFSET);
}
-static int bcm_vk_tty_write(struct tty_struct *tty,
- const unsigned char *buffer,
- int count)
+static ssize_t bcm_vk_tty_write(struct tty_struct *tty, const u8 *buffer,
+ size_t count)
{
int index;
struct bcm_vk *vk;
diff --git a/drivers/misc/cxl/base.c b/drivers/misc/cxl/base.c
index cc0caf9192dc..b054562c046e 100644
--- a/drivers/misc/cxl/base.c
+++ b/drivers/misc/cxl/base.c
@@ -7,6 +7,7 @@
#include <linux/rcupdate.h>
#include <asm/errno.h>
#include <misc/cxl-base.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
#include "cxl.h"
diff --git a/drivers/misc/eeprom/idt_89hpesx.c b/drivers/misc/eeprom/idt_89hpesx.c
index 740c06382b83..1d1f30b5c426 100644
--- a/drivers/misc/eeprom/idt_89hpesx.c
+++ b/drivers/misc/eeprom/idt_89hpesx.c
@@ -913,15 +913,9 @@ static ssize_t idt_dbgfs_csr_write(struct file *filep, const char __user *ubuf,
return 0;
/* Copy data from User-space */
- buf = kmalloc(count + 1, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- if (copy_from_user(buf, ubuf, count)) {
- ret = -EFAULT;
- goto free_buf;
- }
- buf[count] = 0;
+ buf = memdup_user_nul(ubuf, count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
/* Find position of colon in the buffer */
colon_ch = strnchr(buf, count, ':');
@@ -1294,14 +1288,15 @@ static int idt_create_sysfs_files(struct idt_89hpesx_dev *pdev)
return 0;
}
- /* Allocate memory for attribute file */
- pdev->ee_file = devm_kmalloc(dev, sizeof(*pdev->ee_file), GFP_KERNEL);
+ /*
+ * Allocate memory for attribute file and copy the declared EEPROM attr
+ * structure to change some of fields
+ */
+ pdev->ee_file = devm_kmemdup(dev, &bin_attr_eeprom,
+ sizeof(*pdev->ee_file), GFP_KERNEL);
if (!pdev->ee_file)
return -ENOMEM;
- /* Copy the declared EEPROM attr structure to change some of fields */
- memcpy(pdev->ee_file, &bin_attr_eeprom, sizeof(*pdev->ee_file));
-
/* In case of read-only EEPROM get rid of write ability */
if (pdev->eero) {
pdev->ee_file->attr.mode &= ~0200;
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 9666d28037e1..a66b7c111cd5 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/sort.h>
#include <linux/of_platform.h>
#include <linux/rpmsg.h>
@@ -756,6 +757,7 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
{
struct fastrpc_session_ctx *sess = fl->sctx;
struct fastrpc_map *map = NULL;
+ struct sg_table *table;
int err = 0;
if (!fastrpc_map_lookup(fl, fd, ppmap, true))
@@ -783,11 +785,12 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
goto attach_err;
}
- map->table = dma_buf_map_attachment_unlocked(map->attach, DMA_BIDIRECTIONAL);
- if (IS_ERR(map->table)) {
- err = PTR_ERR(map->table);
+ table = dma_buf_map_attachment_unlocked(map->attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(table)) {
+ err = PTR_ERR(table);
goto map_err;
}
+ map->table = table;
if (attr & FASTRPC_ATTR_SECUREMAP) {
map->phys = sg_phys(map->table->sgl);
@@ -1322,13 +1325,18 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl,
return 0;
err_invoke:
if (fl->cctx->vmcount) {
- struct qcom_scm_vmperm perm;
+ u64 src_perms = 0;
+ struct qcom_scm_vmperm dst_perms;
+ u32 i;
+
+ for (i = 0; i < fl->cctx->vmcount; i++)
+ src_perms |= BIT(fl->cctx->vmperms[i].vmid);
- perm.vmid = QCOM_SCM_VMID_HLOS;
- perm.perm = QCOM_SCM_PERM_RWX;
+ dst_perms.vmid = QCOM_SCM_VMID_HLOS;
+ dst_perms.perm = QCOM_SCM_PERM_RWX;
err = qcom_scm_assign_mem(fl->cctx->remote_heap->phys,
(u64)fl->cctx->remote_heap->size,
- &fl->cctx->perms, &perm, 1);
+ &src_perms, &dst_perms, 1);
if (err)
dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
fl->cctx->remote_heap->phys, fl->cctx->remote_heap->size, err);
@@ -1866,7 +1874,11 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
return -EINVAL;
}
- err = fastrpc_buf_alloc(fl, fl->sctx->dev, req.size, &buf);
+ if (req.flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
+ err = fastrpc_remote_heap_alloc(fl, dev, req.size, &buf);
+ else
+ err = fastrpc_buf_alloc(fl, dev, req.size, &buf);
+
if (err) {
dev_err(dev, "failed to allocate buffer\n");
return err;
@@ -1905,12 +1917,8 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
/* Add memory to static PD pool, protection thru hypervisor */
if (req.flags == ADSP_MMAP_REMOTE_HEAP_ADDR && fl->cctx->vmcount) {
- struct qcom_scm_vmperm perm;
-
- perm.vmid = QCOM_SCM_VMID_HLOS;
- perm.perm = QCOM_SCM_PERM_RWX;
- err = qcom_scm_assign_mem(buf->phys, buf->size,
- &fl->cctx->perms, &perm, 1);
+ err = qcom_scm_assign_mem(buf->phys, (u64)buf->size,
+ &fl->cctx->perms, fl->cctx->vmperms, fl->cctx->vmcount);
if (err) {
dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
buf->phys, buf->size, err);
diff --git a/drivers/misc/genwqe/card_base.c b/drivers/misc/genwqe/card_base.c
index b03010810b89..224a7e97cbea 100644
--- a/drivers/misc/genwqe/card_base.c
+++ b/drivers/misc/genwqe/card_base.c
@@ -42,7 +42,7 @@ MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("GPL");
static char genwqe_driver_name[] = GENWQE_DEVNAME;
-static struct class *class_genwqe;
+
static struct dentry *debugfs_genwqe;
static struct genwqe_dev *genwqe_devices[GENWQE_CARD_NO_MAX];
@@ -105,6 +105,26 @@ static const struct pci_device_id genwqe_device_table[] = {
MODULE_DEVICE_TABLE(pci, genwqe_device_table);
/**
+ * genwqe_devnode() - Set default access mode for genwqe devices.
+ * @dev: Pointer to device (unused)
+ * @mode: Carrier to pass-back given mode (permissions)
+ *
+ * Default mode should be rw for everybody. Do not change default
+ * device name.
+ */
+static char *genwqe_devnode(const struct device *dev, umode_t *mode)
+{
+ if (mode)
+ *mode = 0666;
+ return NULL;
+}
+
+static const struct class class_genwqe = {
+ .name = GENWQE_DEVNAME,
+ .devnode = genwqe_devnode,
+};
+
+/**
* genwqe_dev_alloc() - Create and prepare a new card descriptor
*
* Return: Pointer to card descriptor, or ERR_PTR(err) on error
@@ -126,7 +146,7 @@ static struct genwqe_dev *genwqe_dev_alloc(void)
return ERR_PTR(-ENOMEM);
cd->card_idx = i;
- cd->class_genwqe = class_genwqe;
+ cd->class_genwqe = &class_genwqe;
cd->debugfs_genwqe = debugfs_genwqe;
/*
@@ -1340,35 +1360,18 @@ static struct pci_driver genwqe_driver = {
};
/**
- * genwqe_devnode() - Set default access mode for genwqe devices.
- * @dev: Pointer to device (unused)
- * @mode: Carrier to pass-back given mode (permissions)
- *
- * Default mode should be rw for everybody. Do not change default
- * device name.
- */
-static char *genwqe_devnode(const struct device *dev, umode_t *mode)
-{
- if (mode)
- *mode = 0666;
- return NULL;
-}
-
-/**
* genwqe_init_module() - Driver registration and initialization
*/
static int __init genwqe_init_module(void)
{
int rc;
- class_genwqe = class_create(GENWQE_DEVNAME);
- if (IS_ERR(class_genwqe)) {
+ rc = class_register(&class_genwqe);
+ if (rc) {
pr_err("[%s] create class failed\n", __func__);
return -ENOMEM;
}
- class_genwqe->devnode = genwqe_devnode;
-
debugfs_genwqe = debugfs_create_dir(GENWQE_DEVNAME, NULL);
rc = pci_register_driver(&genwqe_driver);
@@ -1381,7 +1384,7 @@ static int __init genwqe_init_module(void)
err_out0:
debugfs_remove(debugfs_genwqe);
- class_destroy(class_genwqe);
+ class_unregister(&class_genwqe);
return rc;
}
@@ -1392,7 +1395,7 @@ static void __exit genwqe_exit_module(void)
{
pci_unregister_driver(&genwqe_driver);
debugfs_remove(debugfs_genwqe);
- class_destroy(class_genwqe);
+ class_unregister(&class_genwqe);
}
module_init(genwqe_init_module);
diff --git a/drivers/misc/genwqe/card_base.h b/drivers/misc/genwqe/card_base.h
index 0e902977d35f..d700266f2cd0 100644
--- a/drivers/misc/genwqe/card_base.h
+++ b/drivers/misc/genwqe/card_base.h
@@ -289,7 +289,7 @@ struct genwqe_dev {
/* char device */
dev_t devnum_genwqe; /* major/minor num card */
- struct class *class_genwqe; /* reference to class object */
+ const struct class *class_genwqe; /* reference to class object */
struct device *dev; /* for device creation */
struct cdev cdev_genwqe; /* char device for card */
diff --git a/drivers/misc/hi6421v600-irq.c b/drivers/misc/hi6421v600-irq.c
index caa3de37698b..b075d803a2c2 100644
--- a/drivers/misc/hi6421v600-irq.c
+++ b/drivers/misc/hi6421v600-irq.c
@@ -244,10 +244,8 @@ static int hi6421v600_irq_probe(struct platform_device *pdev)
pmic_pdev = container_of(pmic_dev, struct platform_device, dev);
priv->irq = platform_get_irq(pmic_pdev, 0);
- if (priv->irq < 0) {
- dev_err(dev, "Error %d when getting IRQs\n", priv->irq);
+ if (priv->irq < 0)
return priv->irq;
- }
platform_set_drvdata(pdev, priv);
diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c
index 2fde8d63c5fe..f1b74d3f8958 100644
--- a/drivers/misc/hpilo.c
+++ b/drivers/misc/hpilo.c
@@ -25,7 +25,9 @@
#include <linux/slab.h>
#include "hpilo.h"
-static struct class *ilo_class;
+static const struct class ilo_class = {
+ .name = "iLO",
+};
static unsigned int ilo_major;
static unsigned int max_ccb = 16;
static char ilo_hwdev[MAX_ILO_DEV];
@@ -746,7 +748,7 @@ static void ilo_remove(struct pci_dev *pdev)
minor = MINOR(ilo_hw->cdev.dev);
for (i = minor; i < minor + max_ccb; i++)
- device_destroy(ilo_class, MKDEV(ilo_major, i));
+ device_destroy(&ilo_class, MKDEV(ilo_major, i));
cdev_del(&ilo_hw->cdev);
ilo_disable_interrupts(ilo_hw);
@@ -839,7 +841,7 @@ static int ilo_probe(struct pci_dev *pdev,
for (minor = 0 ; minor < max_ccb; minor++) {
struct device *dev;
- dev = device_create(ilo_class, &pdev->dev,
+ dev = device_create(&ilo_class, &pdev->dev,
MKDEV(ilo_major, minor), NULL,
"hpilo!d%dccb%d", devnum, minor);
if (IS_ERR(dev))
@@ -882,11 +884,9 @@ static int __init ilo_init(void)
int error;
dev_t dev;
- ilo_class = class_create("iLO");
- if (IS_ERR(ilo_class)) {
- error = PTR_ERR(ilo_class);
+ error = class_register(&ilo_class);
+ if (error)
goto out;
- }
error = alloc_chrdev_region(&dev, 0, MAX_OPEN, ILO_NAME);
if (error)
@@ -902,7 +902,7 @@ static int __init ilo_init(void)
chr_remove:
unregister_chrdev_region(dev, MAX_OPEN);
class_destroy:
- class_destroy(ilo_class);
+ class_unregister(&ilo_class);
out:
return error;
}
@@ -911,7 +911,7 @@ static void __exit ilo_exit(void)
{
pci_unregister_driver(&ilo_driver);
unregister_chrdev_region(MKDEV(ilo_major, 0), MAX_OPEN);
- class_destroy(ilo_class);
+ class_unregister(&ilo_class);
}
MODULE_VERSION("1.5.0");
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
index 299d316f1bda..49868a45c0ad 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
@@ -26,7 +26,7 @@
#include <linux/miscdevice.h>
#include <linux/pm_runtime.h>
#include <linux/atomic.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include "lis3lv02d.h"
#define DRIVER_NAME "lis3lv02d"
diff --git a/drivers/misc/mchp_pci1xxxx/Kconfig b/drivers/misc/mchp_pci1xxxx/Kconfig
index 4abb47de7219..64e457581fb4 100644
--- a/drivers/misc/mchp_pci1xxxx/Kconfig
+++ b/drivers/misc/mchp_pci1xxxx/Kconfig
@@ -2,6 +2,7 @@ config GP_PCI1XXXX
tristate "Microchip PCI1XXXX PCIe to GPIO Expander + OTP/EEPROM manager"
depends on PCI
depends on GPIOLIB
+ depends on NVMEM_SYSFS
select GPIOLIB_IRQCHIP
select AUXILIARY_BUS
help
diff --git a/drivers/misc/mchp_pci1xxxx/Makefile b/drivers/misc/mchp_pci1xxxx/Makefile
index fc4615cfe28b..ae31251dab37 100644
--- a/drivers/misc/mchp_pci1xxxx/Makefile
+++ b/drivers/misc/mchp_pci1xxxx/Makefile
@@ -1 +1 @@
-obj-$(CONFIG_GP_PCI1XXXX) := mchp_pci1xxxx_gp.o mchp_pci1xxxx_gpio.o
+obj-$(CONFIG_GP_PCI1XXXX) := mchp_pci1xxxx_gp.o mchp_pci1xxxx_gpio.o mchp_pci1xxxx_otpe2p.o
diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
new file mode 100644
index 000000000000..16695cb5e69c
--- /dev/null
+++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c
@@ -0,0 +1,443 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2022-2023 Microchip Technology Inc.
+// PCI1xxxx OTP/EEPROM driver
+
+#include <linux/auxiliary_bus.h>
+#include <linux/device.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+
+#include "mchp_pci1xxxx_gp.h"
+
+#define AUX_DRIVER_NAME "PCI1xxxxOTPE2P"
+#define EEPROM_NAME "pci1xxxx_eeprom"
+#define OTP_NAME "pci1xxxx_otp"
+
+#define PERI_PF3_SYSTEM_REG_ADDR_BASE 0x2000
+#define PERI_PF3_SYSTEM_REG_LENGTH 0x4000
+
+#define EEPROM_SIZE_BYTES 8192
+#define OTP_SIZE_BYTES 8192
+
+#define CONFIG_REG_ADDR_BASE 0
+#define EEPROM_REG_ADDR_BASE 0x0E00
+#define OTP_REG_ADDR_BASE 0x1000
+
+#define MMAP_OTP_OFFSET(x) (OTP_REG_ADDR_BASE + (x))
+#define MMAP_EEPROM_OFFSET(x) (EEPROM_REG_ADDR_BASE + (x))
+#define MMAP_CFG_OFFSET(x) (CONFIG_REG_ADDR_BASE + (x))
+
+#define EEPROM_CMD_REG 0x00
+#define EEPROM_DATA_REG 0x04
+
+#define EEPROM_CMD_EPC_WRITE (BIT(29) | BIT(28))
+#define EEPROM_CMD_EPC_TIMEOUT_BIT BIT(17)
+#define EEPROM_CMD_EPC_BUSY_BIT BIT(31)
+
+#define STATUS_READ_DELAY_US 1
+#define STATUS_READ_TIMEOUT_US 20000
+
+#define OTP_ADDR_HIGH_OFFSET 0x04
+#define OTP_ADDR_LOW_OFFSET 0x08
+#define OTP_PRGM_DATA_OFFSET 0x10
+#define OTP_PRGM_MODE_OFFSET 0x14
+#define OTP_RD_DATA_OFFSET 0x18
+#define OTP_FUNC_CMD_OFFSET 0x20
+#define OTP_CMD_GO_OFFSET 0x28
+#define OTP_PASS_FAIL_OFFSET 0x2C
+#define OTP_STATUS_OFFSET 0x30
+
+#define OTP_FUNC_RD_BIT BIT(0)
+#define OTP_FUNC_PGM_BIT BIT(1)
+#define OTP_CMD_GO_BIT BIT(0)
+#define OTP_STATUS_BUSY_BIT BIT(0)
+#define OTP_PGM_MODE_BYTE_BIT BIT(0)
+#define OTP_FAIL_BIT BIT(0)
+
+#define OTP_PWR_DN_BIT BIT(0)
+#define OTP_PWR_DN_OFFSET 0x00
+
+#define CFG_SYS_LOCK_OFFSET 0xA0
+#define CFG_SYS_LOCK_PF3 BIT(5)
+
+#define BYTE_LOW (GENMASK(7, 0))
+#define BYTE_HIGH (GENMASK(12, 8))
+
+struct pci1xxxx_otp_eeprom_device {
+ struct auxiliary_device *pdev;
+ void __iomem *reg_base;
+ struct nvmem_config nvmem_config_eeprom;
+ struct nvmem_device *nvmem_eeprom;
+ struct nvmem_config nvmem_config_otp;
+ struct nvmem_device *nvmem_otp;
+};
+
+static int set_sys_lock(struct pci1xxxx_otp_eeprom_device *priv)
+{
+ void __iomem *sys_lock = priv->reg_base +
+ MMAP_CFG_OFFSET(CFG_SYS_LOCK_OFFSET);
+ u8 data;
+
+ writel(CFG_SYS_LOCK_PF3, sys_lock);
+ data = readl(sys_lock);
+ if (data != CFG_SYS_LOCK_PF3)
+ return -EPERM;
+
+ return 0;
+}
+
+static void release_sys_lock(struct pci1xxxx_otp_eeprom_device *priv)
+{
+ void __iomem *sys_lock = priv->reg_base +
+ MMAP_CFG_OFFSET(CFG_SYS_LOCK_OFFSET);
+ writel(0, sys_lock);
+}
+
+static bool is_eeprom_responsive(struct pci1xxxx_otp_eeprom_device *priv)
+{
+ void __iomem *rb = priv->reg_base;
+ u32 regval;
+ int ret;
+
+ writel(EEPROM_CMD_EPC_TIMEOUT_BIT,
+ rb + MMAP_EEPROM_OFFSET(EEPROM_CMD_REG));
+ writel(EEPROM_CMD_EPC_BUSY_BIT,
+ rb + MMAP_EEPROM_OFFSET(EEPROM_CMD_REG));
+
+ /* Wait for the EPC_BUSY bit to get cleared or timeout bit to get set*/
+ ret = read_poll_timeout(readl, regval, !(regval & EEPROM_CMD_EPC_BUSY_BIT),
+ STATUS_READ_DELAY_US, STATUS_READ_TIMEOUT_US,
+ true, rb + MMAP_EEPROM_OFFSET(EEPROM_CMD_REG));
+
+ /* Return failure if either of software or hardware timeouts happen */
+ if (ret < 0 || (!ret && (regval & EEPROM_CMD_EPC_TIMEOUT_BIT)))
+ return false;
+
+ return true;
+}
+
+static int pci1xxxx_eeprom_read(void *priv_t, unsigned int off,
+ void *buf_t, size_t count)
+{
+ struct pci1xxxx_otp_eeprom_device *priv = priv_t;
+ void __iomem *rb = priv->reg_base;
+ char *buf = buf_t;
+ u32 regval;
+ u32 byte;
+ int ret;
+
+ if (off >= priv->nvmem_config_eeprom.size)
+ return -EFAULT;
+
+ if ((off + count) > priv->nvmem_config_eeprom.size)
+ count = priv->nvmem_config_eeprom.size - off;
+
+ ret = set_sys_lock(priv);
+ if (ret)
+ return ret;
+
+ for (byte = 0; byte < count; byte++) {
+ writel(EEPROM_CMD_EPC_BUSY_BIT | (off + byte), rb +
+ MMAP_EEPROM_OFFSET(EEPROM_CMD_REG));
+
+ ret = read_poll_timeout(readl, regval,
+ !(regval & EEPROM_CMD_EPC_BUSY_BIT),
+ STATUS_READ_DELAY_US,
+ STATUS_READ_TIMEOUT_US, true,
+ rb + MMAP_EEPROM_OFFSET(EEPROM_CMD_REG));
+ if (ret < 0 || (!ret && (regval & EEPROM_CMD_EPC_TIMEOUT_BIT))) {
+ ret = -EIO;
+ goto error;
+ }
+
+ buf[byte] = readl(rb + MMAP_EEPROM_OFFSET(EEPROM_DATA_REG));
+ }
+ ret = byte;
+error:
+ release_sys_lock(priv);
+ return ret;
+}
+
+static int pci1xxxx_eeprom_write(void *priv_t, unsigned int off,
+ void *value_t, size_t count)
+{
+ struct pci1xxxx_otp_eeprom_device *priv = priv_t;
+ void __iomem *rb = priv->reg_base;
+ char *value = value_t;
+ u32 regval;
+ u32 byte;
+ int ret;
+
+ if (off >= priv->nvmem_config_eeprom.size)
+ return -EFAULT;
+
+ if ((off + count) > priv->nvmem_config_eeprom.size)
+ count = priv->nvmem_config_eeprom.size - off;
+
+ ret = set_sys_lock(priv);
+ if (ret)
+ return ret;
+
+ for (byte = 0; byte < count; byte++) {
+ writel(*(value + byte), rb + MMAP_EEPROM_OFFSET(EEPROM_DATA_REG));
+ regval = EEPROM_CMD_EPC_TIMEOUT_BIT | EEPROM_CMD_EPC_WRITE |
+ (off + byte);
+ writel(regval, rb + MMAP_EEPROM_OFFSET(EEPROM_CMD_REG));
+ writel(EEPROM_CMD_EPC_BUSY_BIT | regval,
+ rb + MMAP_EEPROM_OFFSET(EEPROM_CMD_REG));
+
+ ret = read_poll_timeout(readl, regval,
+ !(regval & EEPROM_CMD_EPC_BUSY_BIT),
+ STATUS_READ_DELAY_US,
+ STATUS_READ_TIMEOUT_US, true,
+ rb + MMAP_EEPROM_OFFSET(EEPROM_CMD_REG));
+ if (ret < 0 || (!ret && (regval & EEPROM_CMD_EPC_TIMEOUT_BIT))) {
+ ret = -EIO;
+ goto error;
+ }
+ }
+ ret = byte;
+error:
+ release_sys_lock(priv);
+ return ret;
+}
+
+static void otp_device_set_address(struct pci1xxxx_otp_eeprom_device *priv,
+ u16 address)
+{
+ u16 lo, hi;
+
+ lo = address & BYTE_LOW;
+ hi = (address & BYTE_HIGH) >> 8;
+ writew(lo, priv->reg_base + MMAP_OTP_OFFSET(OTP_ADDR_LOW_OFFSET));
+ writew(hi, priv->reg_base + MMAP_OTP_OFFSET(OTP_ADDR_HIGH_OFFSET));
+}
+
+static int pci1xxxx_otp_read(void *priv_t, unsigned int off,
+ void *buf_t, size_t count)
+{
+ struct pci1xxxx_otp_eeprom_device *priv = priv_t;
+ void __iomem *rb = priv->reg_base;
+ char *buf = buf_t;
+ u32 regval;
+ u32 byte;
+ int ret;
+ u8 data;
+
+ if (off >= priv->nvmem_config_otp.size)
+ return -EFAULT;
+
+ if ((off + count) > priv->nvmem_config_otp.size)
+ count = priv->nvmem_config_otp.size - off;
+
+ ret = set_sys_lock(priv);
+ if (ret)
+ return ret;
+
+ for (byte = 0; byte < count; byte++) {
+ otp_device_set_address(priv, (u16)(off + byte));
+ data = readl(rb + MMAP_OTP_OFFSET(OTP_FUNC_CMD_OFFSET));
+ writel(data | OTP_FUNC_RD_BIT,
+ rb + MMAP_OTP_OFFSET(OTP_FUNC_CMD_OFFSET));
+ data = readl(rb + MMAP_OTP_OFFSET(OTP_CMD_GO_OFFSET));
+ writel(data | OTP_CMD_GO_BIT,
+ rb + MMAP_OTP_OFFSET(OTP_CMD_GO_OFFSET));
+
+ ret = read_poll_timeout(readl, regval,
+ !(regval & OTP_STATUS_BUSY_BIT),
+ STATUS_READ_DELAY_US,
+ STATUS_READ_TIMEOUT_US, true,
+ rb + MMAP_OTP_OFFSET(OTP_STATUS_OFFSET));
+
+ data = readl(rb + MMAP_OTP_OFFSET(OTP_PASS_FAIL_OFFSET));
+ if (ret < 0 || data & OTP_FAIL_BIT) {
+ ret = -EIO;
+ goto error;
+ }
+
+ buf[byte] = readl(rb + MMAP_OTP_OFFSET(OTP_RD_DATA_OFFSET));
+ }
+ ret = byte;
+error:
+ release_sys_lock(priv);
+ return ret;
+}
+
+static int pci1xxxx_otp_write(void *priv_t, unsigned int off,
+ void *value_t, size_t count)
+{
+ struct pci1xxxx_otp_eeprom_device *priv = priv_t;
+ void __iomem *rb = priv->reg_base;
+ char *value = value_t;
+ u32 regval;
+ u32 byte;
+ int ret;
+ u8 data;
+
+ if (off >= priv->nvmem_config_otp.size)
+ return -EFAULT;
+
+ if ((off + count) > priv->nvmem_config_otp.size)
+ count = priv->nvmem_config_otp.size - off;
+
+ ret = set_sys_lock(priv);
+ if (ret)
+ return ret;
+
+ for (byte = 0; byte < count; byte++) {
+ otp_device_set_address(priv, (u16)(off + byte));
+
+ /*
+ * Set OTP_PGM_MODE_BYTE command bit in OTP_PRGM_MODE register
+ * to enable Byte programming
+ */
+ data = readl(rb + MMAP_OTP_OFFSET(OTP_PRGM_MODE_OFFSET));
+ writel(data | OTP_PGM_MODE_BYTE_BIT,
+ rb + MMAP_OTP_OFFSET(OTP_PRGM_MODE_OFFSET));
+ writel(*(value + byte), rb + MMAP_OTP_OFFSET(OTP_PRGM_DATA_OFFSET));
+ data = readl(rb + MMAP_OTP_OFFSET(OTP_FUNC_CMD_OFFSET));
+ writel(data | OTP_FUNC_PGM_BIT,
+ rb + MMAP_OTP_OFFSET(OTP_FUNC_CMD_OFFSET));
+ data = readl(rb + MMAP_OTP_OFFSET(OTP_CMD_GO_OFFSET));
+ writel(data | OTP_CMD_GO_BIT,
+ rb + MMAP_OTP_OFFSET(OTP_CMD_GO_OFFSET));
+
+ ret = read_poll_timeout(readl, regval,
+ !(regval & OTP_STATUS_BUSY_BIT),
+ STATUS_READ_DELAY_US,
+ STATUS_READ_TIMEOUT_US, true,
+ rb + MMAP_OTP_OFFSET(OTP_STATUS_OFFSET));
+
+ data = readl(rb + MMAP_OTP_OFFSET(OTP_PASS_FAIL_OFFSET));
+ if (ret < 0 || data & OTP_FAIL_BIT) {
+ ret = -EIO;
+ goto error;
+ }
+ }
+ ret = byte;
+error:
+ release_sys_lock(priv);
+ return ret;
+}
+
+static int pci1xxxx_otp_eeprom_probe(struct auxiliary_device *aux_dev,
+ const struct auxiliary_device_id *id)
+{
+ struct auxiliary_device_wrapper *aux_dev_wrapper;
+ struct pci1xxxx_otp_eeprom_device *priv;
+ struct gp_aux_data_type *pdata;
+ int ret;
+ u8 data;
+
+ aux_dev_wrapper = container_of(aux_dev, struct auxiliary_device_wrapper,
+ aux_dev);
+ pdata = &aux_dev_wrapper->gp_aux_data;
+ if (!pdata)
+ return -EINVAL;
+
+ priv = devm_kzalloc(&aux_dev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->pdev = aux_dev;
+
+ if (!devm_request_mem_region(&aux_dev->dev, pdata->region_start +
+ PERI_PF3_SYSTEM_REG_ADDR_BASE,
+ PERI_PF3_SYSTEM_REG_LENGTH,
+ aux_dev->name))
+ return -ENOMEM;
+
+ priv->reg_base = devm_ioremap(&aux_dev->dev, pdata->region_start +
+ PERI_PF3_SYSTEM_REG_ADDR_BASE,
+ PERI_PF3_SYSTEM_REG_LENGTH);
+ if (!priv->reg_base)
+ return -ENOMEM;
+
+ ret = set_sys_lock(priv);
+ if (ret)
+ return ret;
+
+ /* Set OTP_PWR_DN to 0 to make OTP Operational */
+ data = readl(priv->reg_base + MMAP_OTP_OFFSET(OTP_PWR_DN_OFFSET));
+ writel(data & ~OTP_PWR_DN_BIT,
+ priv->reg_base + MMAP_OTP_OFFSET(OTP_PWR_DN_OFFSET));
+
+ dev_set_drvdata(&aux_dev->dev, priv);
+
+ if (is_eeprom_responsive(priv)) {
+ priv->nvmem_config_eeprom.type = NVMEM_TYPE_EEPROM;
+ priv->nvmem_config_eeprom.name = EEPROM_NAME;
+ priv->nvmem_config_eeprom.dev = &aux_dev->dev;
+ priv->nvmem_config_eeprom.owner = THIS_MODULE;
+ priv->nvmem_config_eeprom.reg_read = pci1xxxx_eeprom_read;
+ priv->nvmem_config_eeprom.reg_write = pci1xxxx_eeprom_write;
+ priv->nvmem_config_eeprom.priv = priv;
+ priv->nvmem_config_eeprom.stride = 1;
+ priv->nvmem_config_eeprom.word_size = 1;
+ priv->nvmem_config_eeprom.size = EEPROM_SIZE_BYTES;
+
+ priv->nvmem_eeprom = devm_nvmem_register(&aux_dev->dev,
+ &priv->nvmem_config_eeprom);
+ if (IS_ERR(priv->nvmem_eeprom))
+ return PTR_ERR(priv->nvmem_eeprom);
+ }
+
+ release_sys_lock(priv);
+
+ priv->nvmem_config_otp.type = NVMEM_TYPE_OTP;
+ priv->nvmem_config_otp.name = OTP_NAME;
+ priv->nvmem_config_otp.dev = &aux_dev->dev;
+ priv->nvmem_config_otp.owner = THIS_MODULE;
+ priv->nvmem_config_otp.reg_read = pci1xxxx_otp_read;
+ priv->nvmem_config_otp.reg_write = pci1xxxx_otp_write;
+ priv->nvmem_config_otp.priv = priv;
+ priv->nvmem_config_otp.stride = 1;
+ priv->nvmem_config_otp.word_size = 1;
+ priv->nvmem_config_otp.size = OTP_SIZE_BYTES;
+
+ priv->nvmem_otp = devm_nvmem_register(&aux_dev->dev,
+ &priv->nvmem_config_otp);
+ if (IS_ERR(priv->nvmem_otp))
+ return PTR_ERR(priv->nvmem_otp);
+
+ return ret;
+}
+
+static void pci1xxxx_otp_eeprom_remove(struct auxiliary_device *aux_dev)
+{
+ struct pci1xxxx_otp_eeprom_device *priv;
+ void __iomem *sys_lock;
+
+ priv = dev_get_drvdata(&aux_dev->dev);
+ sys_lock = priv->reg_base + MMAP_CFG_OFFSET(CFG_SYS_LOCK_OFFSET);
+ writel(CFG_SYS_LOCK_PF3, sys_lock);
+
+ /* Shut down OTP */
+ writel(OTP_PWR_DN_BIT,
+ priv->reg_base + MMAP_OTP_OFFSET(OTP_PWR_DN_OFFSET));
+
+ writel(0, sys_lock);
+}
+
+static const struct auxiliary_device_id pci1xxxx_otp_eeprom_auxiliary_id_table[] = {
+ {.name = "mchp_pci1xxxx_gp.gp_otp_e2p"},
+ {},
+};
+MODULE_DEVICE_TABLE(auxiliary, pci1xxxx_otp_eeprom_auxiliary_id_table);
+
+static struct auxiliary_driver pci1xxxx_otp_eeprom_driver = {
+ .driver = {
+ .name = AUX_DRIVER_NAME,
+ },
+ .probe = pci1xxxx_otp_eeprom_probe,
+ .remove = pci1xxxx_otp_eeprom_remove,
+ .id_table = pci1xxxx_otp_eeprom_auxiliary_id_table
+};
+module_auxiliary_driver(pci1xxxx_otp_eeprom_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kumaravel Thiagarajan <kumaravel.thiagarajan@microchip.com>");
+MODULE_AUTHOR("Tharun Kumar P <tharunkumar.pasumarthi@microchip.com>");
+MODULE_AUTHOR("Vaibhaav Ram T.L <vaibhaavram.tl@microchip.com>");
+MODULE_DESCRIPTION("Microchip Technology Inc. PCI1xxxx OTP EEPROM Programmer");
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index b8b716faf192..2733070acf39 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -184,6 +184,7 @@ static int mei_fwver(struct mei_cl_device *cldev)
cldev->bus->fw_ver[i].hotfix = fwver->ver[i].hotfix;
cldev->bus->fw_ver[i].buildno = fwver->ver[i].buildno;
}
+ cldev->bus->fw_ver_received = 1;
return ret;
}
@@ -237,8 +238,11 @@ static void mei_gsc_mkhi_ver(struct mei_cl_device *cldev)
{
int ret;
- /* No need to enable the client if nothing is needed from it */
- if (!cldev->bus->fw_f_fw_ver_supported)
+ /*
+ * No need to enable the client if nothing is needed from it.
+ * No need to fill in version if it is already filled in by the fix address client.
+ */
+ if (!cldev->bus->fw_f_fw_ver_supported || cldev->bus->fw_ver_received)
return;
ret = mei_cldev_enable(cldev);
@@ -555,8 +559,8 @@ static struct mei_fixup {
MEI_FIXUP(MEI_UUID_NFC_HCI, mei_nfc),
MEI_FIXUP(MEI_UUID_WD, mei_wd),
MEI_FIXUP(MEI_UUID_MKHIF_FIX, mei_mkhi_fix),
- MEI_FIXUP(MEI_UUID_IGSC_MKHI, mei_gsc_mkhi_ver),
MEI_FIXUP(MEI_UUID_IGSC_MKHI_FIX, mei_gsc_mkhi_fix_ver),
+ MEI_FIXUP(MEI_UUID_IGSC_MKHI, mei_gsc_mkhi_ver),
MEI_FIXUP(MEI_UUID_HDCP, whitelist),
MEI_FIXUP(MEI_UUID_ANY, vt_support),
MEI_FIXUP(MEI_UUID_PAVP, pxp_is_ready),
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 33ec6424dfee..2e65ce6bdec7 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -1329,6 +1329,7 @@ static struct mei_cl_device *mei_cl_bus_dev_alloc(struct mei_device *bus,
mei_cl_bus_set_name(cldev);
cldev->is_added = 0;
INIT_LIST_HEAD(&cldev->bus_list);
+ device_enable_async_suspend(&cldev->dev);
return cldev;
}
diff --git a/drivers/misc/mei/gsc-me.c b/drivers/misc/mei/gsc-me.c
index e63cabd0818d..6be8f1cc052c 100644
--- a/drivers/misc/mei/gsc-me.c
+++ b/drivers/misc/mei/gsc-me.c
@@ -312,4 +312,5 @@ module_auxiliary_driver(mei_gsc_driver);
MODULE_AUTHOR("Intel Corporation");
MODULE_ALIAS("auxiliary:i915.mei-gsc");
MODULE_ALIAS("auxiliary:i915.mei-gscfi");
+MODULE_DESCRIPTION("Intel(R) Graphics System Controller");
MODULE_LICENSE("GPL");
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index bac8852aad51..c35e005b26be 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -142,6 +142,9 @@ int mei_reset(struct mei_device *dev)
mei_hbm_reset(dev);
+ /* clean stale FW version */
+ dev->fw_ver_received = 0;
+
memset(dev->rd_msg_hdr, 0, sizeof(dev->rd_msg_hdr));
if (ret) {
@@ -157,7 +160,10 @@ int mei_reset(struct mei_device *dev)
ret = mei_hw_start(dev);
if (ret) {
- dev_err(dev->dev, "hw_start failed ret = %d\n", ret);
+ char fw_sts_str[MEI_FW_STATUS_STR_SZ];
+
+ mei_fw_status_str(dev, fw_sts_str, MEI_FW_STATUS_STR_SZ);
+ dev_err(dev->dev, "hw_start failed ret = %d fw status = %s\n", ret, fw_sts_str);
return ret;
}
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 51876da3fd65..bb4e9eabda97 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -27,7 +27,10 @@
#include "mei_dev.h"
#include "client.h"
-static struct class *mei_class;
+static const struct class mei_class = {
+ .name = "mei",
+};
+
static dev_t mei_devt;
#define MEI_MAX_DEVS MINORMASK
static DEFINE_MUTEX(mei_minor_lock);
@@ -1115,7 +1118,7 @@ void mei_set_devstate(struct mei_device *dev, enum mei_dev_state state)
dev->dev_state = state;
- clsdev = class_find_device_by_devt(mei_class, dev->cdev.dev);
+ clsdev = class_find_device_by_devt(&mei_class, dev->cdev.dev);
if (clsdev) {
sysfs_notify(&clsdev->kobj, NULL, "dev_state");
put_device(clsdev);
@@ -1232,7 +1235,7 @@ int mei_register(struct mei_device *dev, struct device *parent)
goto err_dev_add;
}
- clsdev = device_create_with_groups(mei_class, parent, devno,
+ clsdev = device_create_with_groups(&mei_class, parent, devno,
dev, mei_groups,
"mei%d", dev->minor);
@@ -1264,7 +1267,7 @@ void mei_deregister(struct mei_device *dev)
mei_dbgfs_deregister(dev);
- device_destroy(mei_class, devno);
+ device_destroy(&mei_class, devno);
mei_minor_free(dev);
}
@@ -1274,12 +1277,9 @@ static int __init mei_init(void)
{
int ret;
- mei_class = class_create("mei");
- if (IS_ERR(mei_class)) {
- pr_err("couldn't create class\n");
- ret = PTR_ERR(mei_class);
- goto err;
- }
+ ret = class_register(&mei_class);
+ if (ret)
+ return ret;
ret = alloc_chrdev_region(&mei_devt, 0, MEI_MAX_DEVS, "mei");
if (ret < 0) {
@@ -1298,15 +1298,14 @@ static int __init mei_init(void)
err_chrdev:
unregister_chrdev_region(mei_devt, MEI_MAX_DEVS);
err_class:
- class_destroy(mei_class);
-err:
+ class_unregister(&mei_class);
return ret;
}
static void __exit mei_exit(void)
{
unregister_chrdev_region(mei_devt, MEI_MAX_DEVS);
- class_destroy(mei_class);
+ class_unregister(&mei_class);
mei_cl_bus_exit();
}
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 895011b7a0bf..cdf8a2edf0b3 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -512,6 +512,7 @@ struct mei_dev_timeouts {
* @fw_ver : FW versions
*
* @fw_f_fw_ver_supported : fw feature: fw version supported
+ * @fw_ver_received : fw version received
*
* @me_clients_rwsem: rw lock over me_clients list
* @me_clients : list of FW clients
@@ -604,6 +605,7 @@ struct mei_device {
struct mei_fw_version fw_ver[MEI_MAX_FW_VER_BLOCKS];
unsigned int fw_f_fw_ver_supported:1;
+ unsigned int fw_ver_received:1;
struct rw_semaphore me_clients_rwsem;
struct list_head me_clients;
diff --git a/drivers/misc/mei/pxp/mei_pxp.c b/drivers/misc/mei/pxp/mei_pxp.c
index 3bf560bbdee0..2dcb9169e404 100644
--- a/drivers/misc/mei/pxp/mei_pxp.c
+++ b/drivers/misc/mei/pxp/mei_pxp.c
@@ -40,8 +40,7 @@ mei_pxp_send_message(struct device *dev, const void *message, size_t size)
cldev = to_mei_cl_device(dev);
- /* temporary drop const qualifier till the API is fixed */
- byte = mei_cldev_send(cldev, (u8 *)message, size);
+ byte = mei_cldev_send(cldev, message, size);
if (byte < 0) {
dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
return byte;
diff --git a/drivers/misc/qcom-coincell.c b/drivers/misc/qcom-coincell.c
index 54d4f6ee8888..3c57f7429147 100644
--- a/drivers/misc/qcom-coincell.c
+++ b/drivers/misc/qcom-coincell.c
@@ -8,7 +8,6 @@
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/regmap.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
struct qcom_coincell {
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index 61209739dc43..e248c0a8882f 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -10,8 +10,8 @@
#include <linux/genalloc.h>
#include <linux/io.h>
#include <linux/list_sort.h>
+#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index 01d2257deea4..c1a134bd8ba7 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -16,14 +16,12 @@
#include <linux/ti_wilink_st.h>
-extern void st_kim_recv(void *, const unsigned char *, long);
-void st_int_recv(void *, const unsigned char *, long);
/*
* function pointer pointing to either,
* st_kim_recv during registration to receive fw download responses
* st_int_recv after registration to receive proto stack responses
*/
-static void (*st_recv) (void *, const unsigned char *, long);
+static void (*st_recv)(void *disc_data, const u8 *ptr, size_t count);
/********************************************************************/
static void add_channel_to_table(struct st_data_s *st_gdata,
@@ -225,10 +223,8 @@ static inline void st_wakeup_ack(struct st_data_s *st_gdata,
* HCI-Events, ACL, SCO, 4 types of HCI-LL PM packets
* CH-8 packets from FM, CH-9 packets from GPS cores.
*/
-void st_int_recv(void *disc_data,
- const unsigned char *data, long count)
+static void st_int_recv(void *disc_data, const u8 *ptr, size_t count)
{
- char *ptr;
struct st_proto_s *proto;
unsigned short payload_len = 0;
int len = 0;
@@ -237,14 +233,12 @@ void st_int_recv(void *disc_data,
struct st_data_s *st_gdata = (struct st_data_s *)disc_data;
unsigned long flags;
- ptr = (char *)data;
- /* tty_receive sent null ? */
- if (unlikely(ptr == NULL) || (st_gdata == NULL)) {
+ if (st_gdata == NULL) {
pr_err(" received null from TTY ");
return;
}
- pr_debug("count %ld rx_state %ld"
+ pr_debug("count %zu rx_state %ld"
"rx_count %ld", count, st_gdata->rx_state,
st_gdata->rx_count);
@@ -796,8 +790,8 @@ static void st_tty_close(struct tty_struct *tty)
pr_debug("%s: done ", __func__);
}
-static void st_tty_receive(struct tty_struct *tty, const unsigned char *data,
- const char *tty_flags, int count)
+static void st_tty_receive(struct tty_struct *tty, const u8 *data,
+ const u8 *tty_flags, size_t count)
{
#ifdef VERBOSE
print_hex_dump(KERN_DEBUG, ">in>", DUMP_PREFIX_NONE,
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index f2f6cab97c08..fe682e0553b2 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -127,21 +127,14 @@ static inline int kim_check_data_len(struct kim_data_s *kim_gdata, int len)
* have been observed to come in bursts of different
* tty_receive and hence the logic
*/
-static void kim_int_recv(struct kim_data_s *kim_gdata,
- const unsigned char *data, long count)
+static void kim_int_recv(struct kim_data_s *kim_gdata, const u8 *ptr,
+ size_t count)
{
- const unsigned char *ptr;
int len = 0;
unsigned char *plen;
pr_debug("%s", __func__);
/* Decode received bytes here */
- ptr = data;
- if (unlikely(ptr == NULL)) {
- pr_err(" received null from TTY ");
- return;
- }
-
while (count) {
if (kim_gdata->rx_count) {
len = min_t(unsigned int, kim_gdata->rx_count, count);
@@ -424,7 +417,7 @@ static long download_firmware(struct kim_data_s *kim_gdata)
* 1. response to read local version
* 2. during send/recv's of firmware download
*/
-void st_kim_recv(void *disc_data, const unsigned char *data, long count)
+void st_kim_recv(void *disc_data, const u8 *data, size_t count)
{
struct st_data_s *st_gdata = (struct st_data_s *)disc_data;
struct kim_data_s *kim_gdata = st_gdata->kim_data;
diff --git a/drivers/misc/tps6594-esm.c b/drivers/misc/tps6594-esm.c
index 05e2c151e632..b4d67a1a24e4 100644
--- a/drivers/misc/tps6594-esm.c
+++ b/drivers/misc/tps6594-esm.c
@@ -56,8 +56,7 @@ static int tps6594_esm_probe(struct platform_device *pdev)
for (i = 0; i < pdev->num_resources; i++) {
irq = platform_get_irq_byname(pdev, pdev->resource[i].name);
if (irq < 0)
- return dev_err_probe(dev, irq, "Failed to get %s irq\n",
- pdev->resource[i].name);
+ return irq;
ret = devm_request_threaded_irq(dev, irq, NULL,
tps6594_esm_isr, IRQF_ONESHOT,
@@ -82,7 +81,7 @@ static int tps6594_esm_probe(struct platform_device *pdev)
return 0;
}
-static int tps6594_esm_remove(struct platform_device *pdev)
+static void tps6594_esm_remove(struct platform_device *pdev)
{
struct tps6594 *tps = dev_get_drvdata(pdev->dev.parent);
struct device *dev = &pdev->dev;
@@ -103,8 +102,6 @@ static int tps6594_esm_remove(struct platform_device *pdev)
out:
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
-
- return ret;
}
static int tps6594_esm_suspend(struct device *dev)
@@ -138,7 +135,7 @@ static struct platform_driver tps6594_esm_driver = {
.pm = pm_sleep_ptr(&tps6594_esm_pm_ops),
},
.probe = tps6594_esm_probe,
- .remove = tps6594_esm_remove,
+ .remove_new = tps6594_esm_remove,
};
module_platform_driver(tps6594_esm_driver);
diff --git a/drivers/misc/tps6594-pfsm.c b/drivers/misc/tps6594-pfsm.c
index 5223d1580807..88dcac814892 100644
--- a/drivers/misc/tps6594-pfsm.c
+++ b/drivers/misc/tps6594-pfsm.c
@@ -266,8 +266,7 @@ static int tps6594_pfsm_probe(struct platform_device *pdev)
for (i = 0 ; i < pdev->num_resources ; i++) {
irq = platform_get_irq_byname(pdev, pdev->resource[i].name);
if (irq < 0)
- return dev_err_probe(dev, irq, "Failed to get %s irq\n",
- pdev->resource[i].name);
+ return irq;
ret = devm_request_threaded_irq(dev, irq, NULL,
tps6594_pfsm_isr, IRQF_ONESHOT,
@@ -281,13 +280,11 @@ static int tps6594_pfsm_probe(struct platform_device *pdev)
return misc_register(&pfsm->miscdev);
}
-static int tps6594_pfsm_remove(struct platform_device *pdev)
+static void tps6594_pfsm_remove(struct platform_device *pdev)
{
struct tps6594_pfsm *pfsm = platform_get_drvdata(pdev);
misc_deregister(&pfsm->miscdev);
-
- return 0;
}
static struct platform_driver tps6594_pfsm_driver = {
@@ -295,7 +292,7 @@ static struct platform_driver tps6594_pfsm_driver = {
.name = "tps6594-pfsm",
},
.probe = tps6594_pfsm_probe,
- .remove = tps6594_pfsm_remove,
+ .remove_new = tps6594_pfsm_remove,
};
module_platform_driver(tps6594_pfsm_driver);
diff --git a/drivers/misc/vcpu_stall_detector.c b/drivers/misc/vcpu_stall_detector.c
index 53b5506080e1..6479c962da1a 100644
--- a/drivers/misc/vcpu_stall_detector.c
+++ b/drivers/misc/vcpu_stall_detector.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/nmi.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/param.h>
#include <linux/percpu.h>
#include <linux/platform_device.h>
diff --git a/drivers/misc/xilinx_sdfec.c b/drivers/misc/xilinx_sdfec.c
index 270ff4c5971a..94a0ee19bf20 100644
--- a/drivers/misc/xilinx_sdfec.c
+++ b/drivers/misc/xilinx_sdfec.c
@@ -15,7 +15,8 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/clk.h>
@@ -1347,7 +1348,6 @@ static int xsdfec_probe(struct platform_device *pdev)
{
struct xsdfec_dev *xsdfec;
struct device *dev;
- struct resource *res;
int err;
bool irq_enabled = true;
@@ -1363,8 +1363,7 @@ static int xsdfec_probe(struct platform_device *pdev)
return err;
dev = xsdfec->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- xsdfec->regs = devm_ioremap_resource(dev, res);
+ xsdfec->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xsdfec->regs)) {
err = PTR_ERR(xsdfec->regs);
goto err_xsdfec_dev;
diff --git a/drivers/misc/xilinx_tmr_inject.c b/drivers/misc/xilinx_tmr_inject.c
index d96f6d7cd109..9fc5835bfebc 100644
--- a/drivers/misc/xilinx_tmr_inject.c
+++ b/drivers/misc/xilinx_tmr_inject.c
@@ -11,7 +11,8 @@
#include <asm/xilinx_mb_manager.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/fault-inject.h>
/* TMR Inject Register offsets */
diff --git a/drivers/misc/xilinx_tmr_manager.c b/drivers/misc/xilinx_tmr_manager.c
index 0ef55e06d3a0..03912a90fd95 100644
--- a/drivers/misc/xilinx_tmr_manager.c
+++ b/drivers/misc/xilinx_tmr_manager.c
@@ -15,7 +15,8 @@
#include <asm/xilinx_mb_manager.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
/* TMR Manager Register offsets */
#define XTMR_MANAGER_CR_OFFSET 0x0
@@ -170,8 +171,7 @@ static int xtmr_manager_probe(struct platform_device *pdev)
if (!xtmr_manager)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- xtmr_manager->regs = devm_ioremap_resource(&pdev->dev, res);
+ xtmr_manager->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(xtmr_manager->regs))
return PTR_ERR(xtmr_manager->regs);
diff --git a/drivers/mmc/core/sdio_uart.c b/drivers/mmc/core/sdio_uart.c
index aa659758563f..ef38dcd3a887 100644
--- a/drivers/mmc/core/sdio_uart.c
+++ b/drivers/mmc/core/sdio_uart.c
@@ -760,8 +760,8 @@ static void sdio_uart_hangup(struct tty_struct *tty)
tty_port_hangup(&port->port);
}
-static int sdio_uart_write(struct tty_struct *tty, const unsigned char *buf,
- int count)
+static ssize_t sdio_uart_write(struct tty_struct *tty, const u8 *buf,
+ size_t count)
{
struct sdio_uart_port *port = tty->driver_data;
int ret;
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 153fb8d0008e..df589d9b4d70 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -32,7 +32,6 @@
#include <linux/interrupt.h>
#include <linux/reboot.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
#include <linux/mtd/map.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/cfi.h>
@@ -650,7 +649,7 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
/*
* Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5
- * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19
+ * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19
* http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf
* http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf
* http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index a7714e3de887..22e73dd6118b 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -1599,7 +1599,7 @@ static void doc_unregister_sysfs(struct platform_device *pdev,
*/
static int flashcontrol_show(struct seq_file *s, void *p)
{
- struct docg3 *docg3 = (struct docg3 *)s->private;
+ struct docg3 *docg3 = s->private;
u8 fctrl;
@@ -1621,7 +1621,7 @@ DEFINE_SHOW_ATTRIBUTE(flashcontrol);
static int asic_mode_show(struct seq_file *s, void *p)
{
- struct docg3 *docg3 = (struct docg3 *)s->private;
+ struct docg3 *docg3 = s->private;
int pctrl, mode;
@@ -1658,7 +1658,7 @@ DEFINE_SHOW_ATTRIBUTE(asic_mode);
static int device_id_show(struct seq_file *s, void *p)
{
- struct docg3 *docg3 = (struct docg3 *)s->private;
+ struct docg3 *docg3 = s->private;
int id;
mutex_lock(&docg3->cascade->lock);
@@ -1672,7 +1672,7 @@ DEFINE_SHOW_ATTRIBUTE(device_id);
static int protection_show(struct seq_file *s, void *p)
{
- struct docg3 *docg3 = (struct docg3 *)s->private;
+ struct docg3 *docg3 = s->private;
int protect, dps0, dps0_low, dps0_high, dps1, dps1_low, dps1_high;
mutex_lock(&docg3->cascade->lock);
diff --git a/drivers/mtd/devices/mchp23k256.c b/drivers/mtd/devices/mchp23k256.c
index 3a6ea7a6a30c..d533475fda15 100644
--- a/drivers/mtd/devices/mchp23k256.c
+++ b/drivers/mtd/devices/mchp23k256.c
@@ -15,7 +15,7 @@
#include <linux/sizes.h>
#include <linux/spi/flash.h>
#include <linux/spi/spi.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#define MAX_CMD_SIZE 4
diff --git a/drivers/mtd/devices/mchp48l640.c b/drivers/mtd/devices/mchp48l640.c
index 40cd5041174c..f576e6a890e8 100644
--- a/drivers/mtd/devices/mchp48l640.c
+++ b/drivers/mtd/devices/mchp48l640.c
@@ -22,7 +22,7 @@
#include <linux/sizes.h>
#include <linux/spi/flash.h>
#include <linux/spi/spi.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
struct mchp48_caps {
unsigned int size;
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 1d3b2a94581f..0c1b93303618 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -13,7 +13,6 @@
#include <linux/err.h>
#include <linux/math64.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c
index cc17133be297..0a35e5236ae5 100644
--- a/drivers/mtd/devices/spear_smi.c
+++ b/drivers/mtd/devices/spear_smi.c
@@ -937,7 +937,6 @@ static int spear_smi_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct spear_smi_plat_data *pdata = NULL;
struct spear_smi *dev;
- struct resource *smi_base;
int irq, ret = 0;
int i;
@@ -975,9 +974,7 @@ static int spear_smi_probe(struct platform_device *pdev)
goto err;
}
- smi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- dev->io_base = devm_ioremap_resource(&pdev->dev, smi_base);
+ dev->io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dev->io_base)) {
ret = PTR_ERR(dev->io_base);
goto err;
@@ -996,21 +993,17 @@ static int spear_smi_probe(struct platform_device *pdev)
dev->num_flashes = MAX_NUM_FLASH_CHIP;
}
- dev->clk = devm_clk_get(&pdev->dev, NULL);
+ dev->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(dev->clk)) {
ret = PTR_ERR(dev->clk);
goto err;
}
- ret = clk_prepare_enable(dev->clk);
- if (ret)
- goto err;
-
ret = devm_request_irq(&pdev->dev, irq, spear_smi_int_handler, 0,
pdev->name, dev);
if (ret) {
dev_err(&dev->pdev->dev, "SMI IRQ allocation failed\n");
- goto err_irq;
+ goto err;
}
mutex_init(&dev->lock);
@@ -1023,14 +1016,11 @@ static int spear_smi_probe(struct platform_device *pdev)
ret = spear_smi_setup_banks(pdev, i, pdata->np[i]);
if (ret) {
dev_err(&dev->pdev->dev, "bank setup failed\n");
- goto err_irq;
+ goto err;
}
}
return 0;
-
-err_irq:
- clk_disable_unprepare(dev->clk);
err:
return ret;
}
@@ -1059,8 +1049,6 @@ static int spear_smi_remove(struct platform_device *pdev)
WARN_ON(mtd_device_unregister(&flash->mtd));
}
- clk_disable_unprepare(dev->clk);
-
return 0;
}
diff --git a/drivers/mtd/devices/st_spi_fsm.c b/drivers/mtd/devices/st_spi_fsm.c
index 3dbb1aa80bfa..95530cbbb1e0 100644
--- a/drivers/mtd/devices/st_spi_fsm.c
+++ b/drivers/mtd/devices/st_spi_fsm.c
@@ -2016,7 +2016,6 @@ static int stfsm_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct flash_info *info;
- struct resource *res;
struct stfsm *fsm;
int ret;
@@ -2033,18 +2032,9 @@ static int stfsm_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fsm);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Resource not found\n");
- return -ENODEV;
- }
-
- fsm->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(fsm->base)) {
- dev_err(&pdev->dev,
- "Failed to reserve memory region %pR\n", res);
+ fsm->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(fsm->base))
return PTR_ERR(fsm->base);
- }
fsm->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(fsm->clk)) {
diff --git a/drivers/mtd/lpddr/lpddr2_nvm.c b/drivers/mtd/lpddr/lpddr2_nvm.c
index e71af4c49096..f4e5174b2449 100644
--- a/drivers/mtd/lpddr/lpddr2_nvm.c
+++ b/drivers/mtd/lpddr/lpddr2_nvm.c
@@ -412,7 +412,6 @@ static int lpddr2_nvm_probe(struct platform_device *pdev)
struct map_info *map;
struct mtd_info *mtd;
struct resource *add_range;
- struct resource *control_regs;
struct pcm_int_data *pcm_data;
/* Allocate memory control_regs data structures */
@@ -452,8 +451,7 @@ static int lpddr2_nvm_probe(struct platform_device *pdev)
simple_map_init(map); /* fill with default methods */
- control_regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- pcm_data->ctl_regs = devm_ioremap_resource(&pdev->dev, control_regs);
+ pcm_data->ctl_regs = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(pcm_data->ctl_regs))
return PTR_ERR(pcm_data->ctl_regs);
diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c
index 67a1dbfdd72c..a1da1c8973c0 100644
--- a/drivers/mtd/maps/lantiq-flash.c
+++ b/drivers/mtd/maps/lantiq-flash.c
@@ -118,11 +118,9 @@ ltq_mtd_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ltq_mtd);
- ltq_mtd->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!ltq_mtd->res) {
- dev_err(&pdev->dev, "failed to get memory resource\n");
- return -ENOENT;
- }
+ ltq_mtd->map->virt = devm_platform_get_and_ioremap_resource(pdev, 0, &ltq_mtd->res);
+ if (IS_ERR(ltq_mtd->map->virt))
+ return PTR_ERR(ltq_mtd->map->virt);
ltq_mtd->map = devm_kzalloc(&pdev->dev, sizeof(struct map_info),
GFP_KERNEL);
@@ -131,9 +129,6 @@ ltq_mtd_probe(struct platform_device *pdev)
ltq_mtd->map->phys = ltq_mtd->res->start;
ltq_mtd->map->size = resource_size(ltq_mtd->res);
- ltq_mtd->map->virt = devm_ioremap_resource(&pdev->dev, ltq_mtd->res);
- if (IS_ERR(ltq_mtd->map->virt))
- return PTR_ERR(ltq_mtd->map->virt);
ltq_mtd->map->name = ltq_map_name;
ltq_mtd->map->bankwidth = 2;
diff --git a/drivers/mtd/maps/physmap-bt1-rom.c b/drivers/mtd/maps/physmap-bt1-rom.c
index 58782cfaf71c..60dccc48f99e 100644
--- a/drivers/mtd/maps/physmap-bt1-rom.c
+++ b/drivers/mtd/maps/physmap-bt1-rom.c
@@ -14,7 +14,6 @@
#include <linux/mtd/xip.h>
#include <linux/mux/consumer.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/string.h>
#include <linux/types.h>
diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c
index c73854da5136..78710fbc8e7f 100644
--- a/drivers/mtd/maps/physmap-core.c
+++ b/drivers/mtd/maps/physmap-core.c
@@ -508,8 +508,7 @@ static int physmap_flash_probe(struct platform_device *dev)
for (i = 0; i < info->nmaps; i++) {
struct resource *res;
- res = platform_get_resource(dev, IORESOURCE_MEM, i);
- info->maps[i].virt = devm_ioremap_resource(&dev->dev, res);
+ info->maps[i].virt = devm_platform_get_and_ioremap_resource(dev, i, &res);
if (IS_ERR(info->maps[i].virt)) {
err = PTR_ERR(info->maps[i].virt);
goto err_out;
diff --git a/drivers/mtd/maps/physmap-gemini.c b/drivers/mtd/maps/physmap-gemini.c
index d4a46e159d38..9d3b4bf84a1a 100644
--- a/drivers/mtd/maps/physmap-gemini.c
+++ b/drivers/mtd/maps/physmap-gemini.c
@@ -8,10 +8,10 @@
*/
#include <linux/export.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/mtd/map.h>
#include <linux/mtd/xip.h>
#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/bitops.h>
#include <linux/pinctrl/consumer.h>
diff --git a/drivers/mtd/maps/physmap-ixp4xx.c b/drivers/mtd/maps/physmap-ixp4xx.c
index 6a054229a8a0..c561468f95f6 100644
--- a/drivers/mtd/maps/physmap-ixp4xx.c
+++ b/drivers/mtd/maps/physmap-ixp4xx.c
@@ -11,7 +11,7 @@
*/
#include <linux/export.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/mtd/map.h>
#include <linux/mtd/xip.h>
#include "physmap-ixp4xx.h"
diff --git a/drivers/mtd/maps/physmap-ixp4xx.h b/drivers/mtd/maps/physmap-ixp4xx.h
index b0fc49b7f3ed..46824c57e58a 100644
--- a/drivers/mtd/maps/physmap-ixp4xx.h
+++ b/drivers/mtd/maps/physmap-ixp4xx.h
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/mtd/map.h>
#ifdef CONFIG_MTD_PHYSMAP_IXP4XX
diff --git a/drivers/mtd/maps/physmap-versatile.c b/drivers/mtd/maps/physmap-versatile.c
index a1b8b7b25f88..2e779111bf79 100644
--- a/drivers/mtd/maps/physmap-versatile.c
+++ b/drivers/mtd/maps/physmap-versatile.c
@@ -9,9 +9,9 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/mtd/map.h>
#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/bitops.h>
#include "physmap-versatile.h"
@@ -206,7 +206,7 @@ int of_flash_probe_versatile(struct platform_device *pdev,
if (!sysnp)
return -ENODEV;
- versatile_flashprot = (enum versatile_flashprot)devid->data;
+ versatile_flashprot = (uintptr_t)devid->data;
rmap = syscon_node_to_regmap(sysnp);
of_node_put(sysnp);
if (IS_ERR(rmap))
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index cedd8ef9a6bf..4c921dce7396 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -123,8 +123,7 @@ static int platram_probe(struct platform_device *pdev)
info->pdata = pdata;
/* get the resource for the memory mapping */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- info->map.virt = devm_ioremap_resource(&pdev->dev, res);
+ info->map.virt = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(info->map.virt)) {
err = PTR_ERR(info->map.virt);
goto exit_free;
diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c
index 860b19f77090..2bfdf1b7e18a 100644
--- a/drivers/mtd/maps/sun_uflash.c
+++ b/drivers/mtd/maps/sun_uflash.c
@@ -14,7 +14,7 @@
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <asm/prom.h>
#include <linux/uaccess.h>
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index fa476fb4dffb..9751416c2a91 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -262,7 +262,7 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd)
}
if (mtd_type_is_nand(mbd->mtd))
- pr_warn("%s: MTD device '%s' is NAND, please consider using UBI block devices instead.\n",
+ pr_warn_ratelimited("%s: MTD device '%s' is NAND, please consider using UBI block devices instead.\n",
mbd->tr->name, mbd->mtd->name);
/* OK, it's not open. Create cache info for it */
diff --git a/drivers/mtd/mtdblock_ro.c b/drivers/mtd/mtdblock_ro.c
index 66ffc9f1ead2..ef6299af60e4 100644
--- a/drivers/mtd/mtdblock_ro.c
+++ b/drivers/mtd/mtdblock_ro.c
@@ -49,7 +49,7 @@ static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
dev->readonly = 1;
if (mtd_type_is_nand(mtd))
- pr_warn("%s: MTD device '%s' is NAND, please consider using UBI block devices instead.\n",
+ pr_warn_ratelimited("%s: MTD device '%s' is NAND, please consider using UBI block devices instead.\n",
tr->name, mtd->name);
if (add_mtd_blktrans_dev(dev))
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index e00b12aa5ec9..9bd661be3ae9 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -93,10 +93,39 @@ static void mtd_release(struct device *dev)
struct mtd_info *mtd = dev_get_drvdata(dev);
dev_t index = MTD_DEVT(mtd->index);
+ idr_remove(&mtd_idr, mtd->index);
+ of_node_put(mtd_get_of_node(mtd));
+
+ if (mtd_is_partition(mtd))
+ release_mtd_partition(mtd);
+
/* remove /dev/mtdXro node */
device_destroy(&mtd_class, index + 1);
}
+static void mtd_device_release(struct kref *kref)
+{
+ struct mtd_info *mtd = container_of(kref, struct mtd_info, refcnt);
+ bool is_partition = mtd_is_partition(mtd);
+
+ debugfs_remove_recursive(mtd->dbg.dfs_dir);
+
+ /* Try to remove the NVMEM provider */
+ nvmem_unregister(mtd->nvmem);
+
+ device_unregister(&mtd->dev);
+
+ /*
+ * Clear dev so mtd can be safely re-registered later if desired.
+ * Should not be done for partition,
+ * as it was already destroyed in device_unregister().
+ */
+ if (!is_partition)
+ memset(&mtd->dev, 0, sizeof(mtd->dev));
+
+ module_put(THIS_MODULE);
+}
+
#define MTD_DEVICE_ATTR_RO(name) \
static DEVICE_ATTR(name, 0444, mtd_##name##_show, NULL)
@@ -666,7 +695,7 @@ int add_mtd_device(struct mtd_info *mtd)
}
mtd->index = i;
- mtd->usecount = 0;
+ kref_init(&mtd->refcnt);
/* default value if not set by driver */
if (mtd->bitflip_threshold == 0)
@@ -779,7 +808,6 @@ int del_mtd_device(struct mtd_info *mtd)
{
int ret;
struct mtd_notifier *not;
- struct device_node *mtd_of_node;
mutex_lock(&mtd_table_mutex);
@@ -793,28 +821,8 @@ int del_mtd_device(struct mtd_info *mtd)
list_for_each_entry(not, &mtd_notifiers, list)
not->remove(mtd);
- if (mtd->usecount) {
- printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n",
- mtd->index, mtd->name, mtd->usecount);
- ret = -EBUSY;
- } else {
- mtd_of_node = mtd_get_of_node(mtd);
- debugfs_remove_recursive(mtd->dbg.dfs_dir);
-
- /* Try to remove the NVMEM provider */
- nvmem_unregister(mtd->nvmem);
-
- device_unregister(&mtd->dev);
-
- /* Clear dev so mtd can be safely re-registered later if desired */
- memset(&mtd->dev, 0, sizeof(mtd->dev));
-
- idr_remove(&mtd_idr, mtd->index);
- of_node_put(mtd_of_node);
-
- module_put(THIS_MODULE);
- ret = 0;
- }
+ kref_put(&mtd->refcnt, mtd_device_release);
+ ret = 0;
out_error:
mutex_unlock(&mtd_table_mutex);
@@ -1227,25 +1235,27 @@ int __get_mtd_device(struct mtd_info *mtd)
struct mtd_info *master = mtd_get_master(mtd);
int err;
- if (!try_module_get(master->owner))
- return -ENODEV;
-
if (master->_get_device) {
err = master->_get_device(mtd);
-
- if (err) {
- module_put(master->owner);
+ if (err)
return err;
- }
}
- master->usecount++;
+ if (!try_module_get(master->owner)) {
+ if (master->_put_device)
+ master->_put_device(master);
+ return -ENODEV;
+ }
- while (mtd->parent) {
- mtd->usecount++;
+ while (mtd) {
+ if (mtd != master)
+ kref_get(&mtd->refcnt);
mtd = mtd->parent;
}
+ if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
+ kref_get(&master->refcnt);
+
return 0;
}
EXPORT_SYMBOL_GPL(__get_mtd_device);
@@ -1329,18 +1339,23 @@ void __put_mtd_device(struct mtd_info *mtd)
{
struct mtd_info *master = mtd_get_master(mtd);
- while (mtd->parent) {
- --mtd->usecount;
- BUG_ON(mtd->usecount < 0);
- mtd = mtd->parent;
+ while (mtd) {
+ /* kref_put() can relese mtd, so keep a reference mtd->parent */
+ struct mtd_info *parent = mtd->parent;
+
+ if (mtd != master)
+ kref_put(&mtd->refcnt, mtd_device_release);
+ mtd = parent;
}
- master->usecount--;
+ if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
+ kref_put(&master->refcnt, mtd_device_release);
+
+ module_put(master->owner);
+ /* must be the last as master can be freed in the _put_device */
if (master->_put_device)
master->_put_device(master);
-
- module_put(master->owner);
}
EXPORT_SYMBOL_GPL(__put_mtd_device);
diff --git a/drivers/mtd/mtdcore.h b/drivers/mtd/mtdcore.h
index b5eefeabf310..b014861a06a6 100644
--- a/drivers/mtd/mtdcore.h
+++ b/drivers/mtd/mtdcore.h
@@ -12,6 +12,7 @@ int __must_check add_mtd_device(struct mtd_info *mtd);
int del_mtd_device(struct mtd_info *mtd);
int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
int del_mtd_partitions(struct mtd_info *);
+void release_mtd_partition(struct mtd_info *mtd);
struct mtd_partitions;
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index a46affbb037d..23483db8f30c 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -32,6 +32,12 @@ static inline void free_partition(struct mtd_info *mtd)
kfree(mtd);
}
+void release_mtd_partition(struct mtd_info *mtd)
+{
+ WARN_ON(!list_empty(&mtd->part.node));
+ free_partition(mtd);
+}
+
static struct mtd_info *allocate_partition(struct mtd_info *parent,
const struct mtd_partition *part,
int partno, uint64_t cur_offset)
@@ -309,13 +315,11 @@ static int __mtd_del_partition(struct mtd_info *mtd)
sysfs_remove_files(&mtd->dev.kobj, mtd_partition_attrs);
+ list_del_init(&mtd->part.node);
err = del_mtd_device(mtd);
if (err)
return err;
- list_del(&mtd->part.node);
- free_partition(mtd);
-
return 0;
}
@@ -333,6 +337,7 @@ static int __del_mtd_partitions(struct mtd_info *mtd)
__del_mtd_partitions(child);
pr_info("Deleting %s MTD partition\n", child->name);
+ list_del_init(&child->part.node);
ret = del_mtd_device(child);
if (ret < 0) {
pr_err("Error when deleting partition \"%s\" (%d)\n",
@@ -340,9 +345,6 @@ static int __del_mtd_partitions(struct mtd_info *mtd)
err = ret;
continue;
}
-
- list_del(&child->part.node);
- free_partition(child);
}
return err;
diff --git a/drivers/mtd/nand/ecc-mxic.c b/drivers/mtd/nand/ecc-mxic.c
index 22a760e6024e..47e10945b8d2 100644
--- a/drivers/mtd/nand/ecc-mxic.c
+++ b/drivers/mtd/nand/ecc-mxic.c
@@ -18,7 +18,7 @@
#include <linux/mtd/nand.h>
#include <linux/mtd/nand-ecc-mxic.h>
#include <linux/mutex.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/mtd/nand/ecc.c b/drivers/mtd/nand/ecc.c
index 5250764cedee..8f996e8d61b8 100644
--- a/drivers/mtd/nand/ecc.c
+++ b/drivers/mtd/nand/ecc.c
@@ -95,9 +95,9 @@
#include <linux/module.h>
#include <linux/mtd/nand.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_platform.h>
static LIST_HEAD(on_host_hw_engines);
diff --git a/drivers/mtd/nand/onenand/onenand_omap2.c b/drivers/mtd/nand/onenand/onenand_omap2.c
index ff7af98604df..a12f8f3efd07 100644
--- a/drivers/mtd/nand/onenand/onenand_omap2.c
+++ b/drivers/mtd/nand/onenand/onenand_omap2.c
@@ -13,7 +13,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/onenand.h>
#include <linux/mtd/partitions.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/omap-gpmc.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
@@ -467,12 +467,6 @@ static int omap2_onenand_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "error getting memory resource\n");
- return -EINVAL;
- }
-
r = of_property_read_u32(np, "reg", &val);
if (r) {
dev_err(dev, "reg not found in DT\n");
@@ -486,11 +480,11 @@ static int omap2_onenand_probe(struct platform_device *pdev)
init_completion(&c->irq_done);
init_completion(&c->dma_done);
c->gpmc_cs = val;
- c->phys_base = res->start;
- c->onenand.base = devm_ioremap_resource(dev, res);
+ c->onenand.base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(c->onenand.base))
return PTR_ERR(c->onenand.base);
+ c->phys_base = res->start;
c->int_gpiod = devm_gpiod_get_optional(dev, "int", GPIOD_IN);
if (IS_ERR(c->int_gpiod)) {
diff --git a/drivers/mtd/nand/onenand/onenand_samsung.c b/drivers/mtd/nand/onenand/onenand_samsung.c
index 92151aa52964..fd6890a03d55 100644
--- a/drivers/mtd/nand/onenand/onenand_samsung.c
+++ b/drivers/mtd/nand/onenand/onenand_samsung.c
@@ -860,8 +860,7 @@ static int s3c_onenand_probe(struct platform_device *pdev)
s3c_onenand_setup(mtd);
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- onenand->base = devm_ioremap_resource(&pdev->dev, r);
+ onenand->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
if (IS_ERR(onenand->base))
return PTR_ERR(onenand->base);
@@ -874,8 +873,7 @@ static int s3c_onenand_probe(struct platform_device *pdev)
this->options |= ONENAND_SKIP_UNLOCK_CHECK;
if (onenand->type != TYPE_S5PC110) {
- r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- onenand->ahb_addr = devm_ioremap_resource(&pdev->dev, r);
+ onenand->ahb_addr = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(onenand->ahb_addr))
return PTR_ERR(onenand->ahb_addr);
@@ -895,8 +893,7 @@ static int s3c_onenand_probe(struct platform_device *pdev)
this->subpagesize = mtd->writesize;
} else { /* S5PC110 */
- r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- onenand->dma_addr = devm_ioremap_resource(&pdev->dev, r);
+ onenand->dma_addr = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(onenand->dma_addr))
return PTR_ERR(onenand->dma_addr);
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index b523354dfb00..cbf8ae85e1ae 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -160,7 +160,7 @@ config MTD_NAND_MARVELL
including:
- PXA3xx processors (NFCv1)
- 32-bit Armada platforms (XP, 37x, 38x, 39x) (NFCv2)
- - 64-bit Aramda platforms (7k, 8k) (NFCv2)
+ - 64-bit Aramda platforms (7k, 8k, ac5) (NFCv2)
config MTD_NAND_SLC_LPC32XX
tristate "NXP LPC32xx SLC NAND controller"
@@ -204,13 +204,6 @@ config MTD_NAND_BCM47XXNFLASH
registered by bcma as platform devices. This enables driver for
NAND flash memories. For now only BCM4706 is supported.
-config MTD_NAND_OXNAS
- tristate "Oxford Semiconductor NAND controller"
- depends on ARCH_OXNAS || COMPILE_TEST
- depends on HAS_IOMEM
- help
- This enables the NAND flash controller on Oxford Semiconductor SoCs.
-
config MTD_NAND_MPC5121_NFC
tristate "MPC5121 NAND controller"
depends on PPC_MPC512x
diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile
index d93e861d8ba7..25120a4afada 100644
--- a/drivers/mtd/nand/raw/Makefile
+++ b/drivers/mtd/nand/raw/Makefile
@@ -26,7 +26,6 @@ obj-$(CONFIG_MTD_NAND_MARVELL) += marvell_nand.o
obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o
obj-$(CONFIG_MTD_NAND_PASEMI) += pasemi_nand.o
obj-$(CONFIG_MTD_NAND_ORION) += orion_nand.o
-obj-$(CONFIG_MTD_NAND_OXNAS) += oxnas_nand.o
obj-$(CONFIG_MTD_NAND_FSL_ELBC) += fsl_elbc_nand.o
obj-$(CONFIG_MTD_NAND_FSL_IFC) += fsl_ifc_nand.o
obj-$(CONFIG_MTD_NAND_FSL_UPM) += fsl_upm.o
diff --git a/drivers/mtd/nand/raw/ams-delta.c b/drivers/mtd/nand/raw/ams-delta.c
index fa621ffa6490..919816a7aca7 100644
--- a/drivers/mtd/nand/raw/ams-delta.c
+++ b/drivers/mtd/nand/raw/ams-delta.c
@@ -22,7 +22,7 @@
#include <linux/mtd/nand-gpio.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c
index 906eef70cb6d..4621ec549cc7 100644
--- a/drivers/mtd/nand/raw/arasan-nand-controller.c
+++ b/drivers/mtd/nand/raw/arasan-nand-controller.c
@@ -1440,45 +1440,29 @@ static int anfc_probe(struct platform_device *pdev)
anfc_reset(nfc);
- nfc->controller_clk = devm_clk_get(&pdev->dev, "controller");
+ nfc->controller_clk = devm_clk_get_enabled(&pdev->dev, "controller");
if (IS_ERR(nfc->controller_clk))
return PTR_ERR(nfc->controller_clk);
- nfc->bus_clk = devm_clk_get(&pdev->dev, "bus");
+ nfc->bus_clk = devm_clk_get_enabled(&pdev->dev, "bus");
if (IS_ERR(nfc->bus_clk))
return PTR_ERR(nfc->bus_clk);
- ret = clk_prepare_enable(nfc->controller_clk);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(nfc->bus_clk);
- if (ret)
- goto disable_controller_clk;
-
ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (ret)
- goto disable_bus_clk;
+ return ret;
ret = anfc_parse_cs(nfc);
if (ret)
- goto disable_bus_clk;
+ return ret;
ret = anfc_chips_init(nfc);
if (ret)
- goto disable_bus_clk;
+ return ret;
platform_set_drvdata(pdev, nfc);
return 0;
-
-disable_bus_clk:
- clk_disable_unprepare(nfc->bus_clk);
-
-disable_controller_clk:
- clk_disable_unprepare(nfc->controller_clk);
-
- return ret;
}
static void anfc_remove(struct platform_device *pdev)
@@ -1486,9 +1470,6 @@ static void anfc_remove(struct platform_device *pdev)
struct arasan_nfc *nfc = platform_get_drvdata(pdev);
anfc_chips_cleanup(nfc);
-
- clk_disable_unprepare(nfc->bus_clk);
- clk_disable_unprepare(nfc->controller_clk);
}
static const struct of_device_id anfc_ids[] = {
diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c
index 81e3d682a8cd..3f494f7c7ecb 100644
--- a/drivers/mtd/nand/raw/atmel/nand-controller.c
+++ b/drivers/mtd/nand/raw/atmel/nand-controller.c
@@ -1791,8 +1791,7 @@ atmel_nand_controller_legacy_add_nands(struct atmel_nand_controller *nc)
nand->numcs = 1;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- nand->cs[0].io.virt = devm_ioremap_resource(dev, res);
+ nand->cs[0].io.virt = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(nand->cs[0].io.virt))
return PTR_ERR(nand->cs[0].io.virt);
diff --git a/drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c b/drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c
index 71ddcc611f6e..9596629000f4 100644
--- a/drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c
+++ b/drivers/mtd/nand/raw/brcmnand/bcm63138_nand.c
@@ -61,15 +61,13 @@ static int bcm63138_nand_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct bcm63138_nand_soc *priv;
struct brcmnand_soc *soc;
- struct resource *res;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
soc = &priv->soc;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-int-base");
- priv->base = devm_ioremap_resource(dev, res);
+ priv->base = devm_platform_ioremap_resource_byname(pdev, "nand-int-base");
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index 2e9c2e2d9c9f..440bef477930 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -272,6 +272,7 @@ struct brcmnand_controller {
const unsigned int *page_sizes;
unsigned int page_size_shift;
unsigned int max_oob;
+ u32 ecc_level_shift;
u32 features;
/* for low-power standby/resume only */
@@ -596,6 +597,34 @@ enum {
INTFC_CTLR_READY = BIT(31),
};
+/***********************************************************************
+ * NAND ACC CONTROL bitfield
+ *
+ * Some bits have remained constant throughout hardware revision, while
+ * others have shifted around.
+ ***********************************************************************/
+
+/* Constant for all versions (where supported) */
+enum {
+ /* See BRCMNAND_HAS_CACHE_MODE */
+ ACC_CONTROL_CACHE_MODE = BIT(22),
+
+ /* See BRCMNAND_HAS_PREFETCH */
+ ACC_CONTROL_PREFETCH = BIT(23),
+
+ ACC_CONTROL_PAGE_HIT = BIT(24),
+ ACC_CONTROL_WR_PREEMPT = BIT(25),
+ ACC_CONTROL_PARTIAL_PAGE = BIT(26),
+ ACC_CONTROL_RD_ERASED = BIT(27),
+ ACC_CONTROL_FAST_PGM_RDIN = BIT(28),
+ ACC_CONTROL_WR_ECC = BIT(30),
+ ACC_CONTROL_RD_ECC = BIT(31),
+};
+
+#define ACC_CONTROL_ECC_SHIFT 16
+/* Only for v7.2 */
+#define ACC_CONTROL_ECC_EXT_SHIFT 13
+
static inline bool brcmnand_non_mmio_ops(struct brcmnand_controller *ctrl)
{
#if IS_ENABLED(CONFIG_MTD_NAND_BRCMNAND_BCMA)
@@ -737,6 +766,12 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
else if (of_property_read_bool(ctrl->dev->of_node, "brcm,nand-has-wp"))
ctrl->features |= BRCMNAND_HAS_WP;
+ /* v7.2 has different ecc level shift in the acc register */
+ if (ctrl->nand_version == 0x0702)
+ ctrl->ecc_level_shift = ACC_CONTROL_ECC_EXT_SHIFT;
+ else
+ ctrl->ecc_level_shift = ACC_CONTROL_ECC_SHIFT;
+
return 0;
}
@@ -931,30 +966,6 @@ static inline int brcmnand_cmd_shift(struct brcmnand_controller *ctrl)
return 0;
}
-/***********************************************************************
- * NAND ACC CONTROL bitfield
- *
- * Some bits have remained constant throughout hardware revision, while
- * others have shifted around.
- ***********************************************************************/
-
-/* Constant for all versions (where supported) */
-enum {
- /* See BRCMNAND_HAS_CACHE_MODE */
- ACC_CONTROL_CACHE_MODE = BIT(22),
-
- /* See BRCMNAND_HAS_PREFETCH */
- ACC_CONTROL_PREFETCH = BIT(23),
-
- ACC_CONTROL_PAGE_HIT = BIT(24),
- ACC_CONTROL_WR_PREEMPT = BIT(25),
- ACC_CONTROL_PARTIAL_PAGE = BIT(26),
- ACC_CONTROL_RD_ERASED = BIT(27),
- ACC_CONTROL_FAST_PGM_RDIN = BIT(28),
- ACC_CONTROL_WR_ECC = BIT(30),
- ACC_CONTROL_RD_ECC = BIT(31),
-};
-
static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl)
{
if (ctrl->nand_version == 0x0702)
@@ -967,18 +978,15 @@ static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl)
return GENMASK(4, 0);
}
-#define NAND_ACC_CONTROL_ECC_SHIFT 16
-#define NAND_ACC_CONTROL_ECC_EXT_SHIFT 13
-
static inline u32 brcmnand_ecc_level_mask(struct brcmnand_controller *ctrl)
{
u32 mask = (ctrl->nand_version >= 0x0600) ? 0x1f : 0x0f;
- mask <<= NAND_ACC_CONTROL_ECC_SHIFT;
+ mask <<= ACC_CONTROL_ECC_SHIFT;
/* v7.2 includes additional ECC levels */
- if (ctrl->nand_version >= 0x0702)
- mask |= 0x7 << NAND_ACC_CONTROL_ECC_EXT_SHIFT;
+ if (ctrl->nand_version == 0x0702)
+ mask |= 0x7 << ACC_CONTROL_ECC_EXT_SHIFT;
return mask;
}
@@ -992,8 +1000,8 @@ static void brcmnand_set_ecc_enabled(struct brcmnand_host *host, int en)
if (en) {
acc_control |= ecc_flags; /* enable RD/WR ECC */
- acc_control |= host->hwcfg.ecc_level
- << NAND_ACC_CONTROL_ECC_SHIFT;
+ acc_control &= ~brcmnand_ecc_level_mask(ctrl);
+ acc_control |= host->hwcfg.ecc_level << ctrl->ecc_level_shift;
} else {
acc_control &= ~ecc_flags; /* disable RD/WR ECC */
acc_control &= ~brcmnand_ecc_level_mask(ctrl);
@@ -1072,6 +1080,14 @@ static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl,
cpu_relax();
} while (time_after(limit, jiffies));
+ /*
+ * do a final check after time out in case the CPU was busy and the driver
+ * did not get enough time to perform the polling to avoid false alarms
+ */
+ val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
+ if ((val & mask) == expected_val)
+ return 0;
+
dev_warn(ctrl->dev, "timeout on status poll (expected %x got %x)\n",
expected_val, val & mask);
@@ -1461,19 +1477,33 @@ static int write_oob_to_regs(struct brcmnand_controller *ctrl, int i,
const u8 *oob, int sas, int sector_1k)
{
int tbytes = sas << sector_1k;
- int j;
+ int j, k = 0;
+ u32 last = 0xffffffff;
+ u8 *plast = (u8 *)&last;
/* Adjust OOB values for 1K sector size */
if (sector_1k && (i & 0x01))
tbytes = max(0, tbytes - (int)ctrl->max_oob);
tbytes = min_t(int, tbytes, ctrl->max_oob);
- for (j = 0; j < tbytes; j += 4)
+ /*
+ * tbytes may not be multiple of words. Make sure we don't read out of
+ * the boundary and stop at last word.
+ */
+ for (j = 0; (j + 3) < tbytes; j += 4)
oob_reg_write(ctrl, j,
(oob[j + 0] << 24) |
(oob[j + 1] << 16) |
(oob[j + 2] << 8) |
(oob[j + 3] << 0));
+
+ /* handle the remaing bytes */
+ while (j < tbytes)
+ plast[k++] = oob[j++];
+
+ if (tbytes & 0x3)
+ oob_reg_write(ctrl, (tbytes & ~0x3), (__force u32)cpu_to_be32(last));
+
return tbytes;
}
@@ -1592,7 +1622,17 @@ static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd)
dev_dbg(ctrl->dev, "send native cmd %d addr 0x%llx\n", cmd, cmd_addr);
- BUG_ON(ctrl->cmd_pending != 0);
+ /*
+ * If we came here through _panic_write and there is a pending
+ * command, try to wait for it. If it times out, rather than
+ * hitting BUG_ON, just return so we don't crash while crashing.
+ */
+ if (oops_in_progress) {
+ if (ctrl->cmd_pending &&
+ bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0))
+ return;
+ } else
+ BUG_ON(ctrl->cmd_pending != 0);
ctrl->cmd_pending = cmd;
ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0);
@@ -1626,13 +1666,13 @@ static bool brcmstb_nand_wait_for_completion(struct nand_chip *chip)
disable_ctrl_irqs(ctrl);
sts = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY,
NAND_CTRL_RDY, 0);
- err = (sts < 0) ? true : false;
+ err = sts < 0;
} else {
unsigned long timeo = msecs_to_jiffies(
NAND_POLL_STATUS_TIMEOUT_MS);
/* wait for completion interrupt */
sts = wait_for_completion_timeout(&ctrl->done, timeo);
- err = (sts <= 0) ? true : false;
+ err = !sts;
}
return err;
@@ -1648,6 +1688,7 @@ static int brcmnand_waitfunc(struct nand_chip *chip)
if (ctrl->cmd_pending)
err = brcmstb_nand_wait_for_completion(chip);
+ ctrl->cmd_pending = 0;
if (err) {
u32 cmd = brcmnand_read_reg(ctrl, BRCMNAND_CMD_START)
>> brcmnand_cmd_shift(ctrl);
@@ -1656,8 +1697,8 @@ static int brcmnand_waitfunc(struct nand_chip *chip)
"timeout waiting for command %#02x\n", cmd);
dev_err_ratelimited(ctrl->dev, "intfc status %08x\n",
brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS));
+ return -ETIMEDOUT;
}
- ctrl->cmd_pending = 0;
return brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
INTFC_FLASH_STATUS;
}
@@ -2561,7 +2602,7 @@ static int brcmnand_set_cfg(struct brcmnand_host *host,
tmp &= ~brcmnand_ecc_level_mask(ctrl);
tmp &= ~brcmnand_spare_area_mask(ctrl);
if (ctrl->nand_version >= 0x0302) {
- tmp |= cfg->ecc_level << NAND_ACC_CONTROL_ECC_SHIFT;
+ tmp |= cfg->ecc_level << ctrl->ecc_level_shift;
tmp |= cfg->spare_area_size;
}
nand_writereg(ctrl, acc_control_offs, tmp);
@@ -2612,6 +2653,8 @@ static int brcmnand_setup_dev(struct brcmnand_host *host)
struct nand_chip *chip = &host->chip;
const struct nand_ecc_props *requirements =
nanddev_get_ecc_requirements(&chip->base);
+ struct nand_memory_organization *memorg =
+ nanddev_get_memorg(&chip->base);
struct brcmnand_controller *ctrl = host->ctrl;
struct brcmnand_cfg *cfg = &host->hwcfg;
char msg[128];
@@ -2633,10 +2676,11 @@ static int brcmnand_setup_dev(struct brcmnand_host *host)
if (cfg->spare_area_size > ctrl->max_oob)
cfg->spare_area_size = ctrl->max_oob;
/*
- * Set oobsize to be consistent with controller's spare_area_size, as
- * the rest is inaccessible.
+ * Set mtd and memorg oobsize to be consistent with controller's
+ * spare_area_size, as the rest is inaccessible.
*/
mtd->oobsize = cfg->spare_area_size * (mtd->writesize >> FC_SHIFT);
+ memorg->oobsize = mtd->oobsize;
cfg->device_size = mtd->size;
cfg->block_size = mtd->erasesize;
@@ -3202,6 +3246,10 @@ int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
ret = brcmnand_init_cs(host, NULL);
if (ret) {
+ if (ret == -EPROBE_DEFER) {
+ of_node_put(child);
+ goto err;
+ }
devm_kfree(dev, host);
continue; /* Try all chip-selects */
}
diff --git a/drivers/mtd/nand/raw/brcmnand/iproc_nand.c b/drivers/mtd/nand/raw/brcmnand/iproc_nand.c
index d32950847a62..089c70fc6edf 100644
--- a/drivers/mtd/nand/raw/brcmnand/iproc_nand.c
+++ b/drivers/mtd/nand/raw/brcmnand/iproc_nand.c
@@ -103,7 +103,6 @@ static int iproc_nand_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct iproc_nand_soc *priv;
struct brcmnand_soc *soc;
- struct resource *res;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -112,13 +111,11 @@ static int iproc_nand_probe(struct platform_device *pdev)
spin_lock_init(&priv->idm_lock);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iproc-idm");
- priv->idm_base = devm_ioremap_resource(dev, res);
+ priv->idm_base = devm_platform_ioremap_resource_byname(pdev, "iproc-idm");
if (IS_ERR(priv->idm_base))
return PTR_ERR(priv->idm_base);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iproc-ext");
- priv->ext_base = devm_ioremap_resource(dev, res);
+ priv->ext_base = devm_platform_ioremap_resource_byname(pdev, "iproc-ext");
if (IS_ERR(priv->ext_base))
return PTR_ERR(priv->ext_base);
diff --git a/drivers/mtd/nand/raw/davinci_nand.c b/drivers/mtd/nand/raw/davinci_nand.c
index 415d6aaa8255..e75d81cf8c21 100644
--- a/drivers/mtd/nand/raw/davinci_nand.c
+++ b/drivers/mtd/nand/raw/davinci_nand.c
@@ -18,7 +18,6 @@
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/slab.h>
-#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/platform_data/mtd-davinci.h>
diff --git a/drivers/mtd/nand/raw/denali_dt.c b/drivers/mtd/nand/raw/denali_dt.c
index 915047e3fbc2..edac8749bb93 100644
--- a/drivers/mtd/nand/raw/denali_dt.c
+++ b/drivers/mtd/nand/raw/denali_dt.c
@@ -13,7 +13,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
diff --git a/drivers/mtd/nand/raw/fsl_ifc_nand.c b/drivers/mtd/nand/raw/fsl_ifc_nand.c
index fa537fee6701..20bb1e0cb5eb 100644
--- a/drivers/mtd/nand/raw/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/raw/fsl_ifc_nand.c
@@ -8,6 +8,7 @@
*/
#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/of_address.h>
diff --git a/drivers/mtd/nand/raw/fsl_upm.c b/drivers/mtd/nand/raw/fsl_upm.c
index 7366e85c09fd..315e9d2b573d 100644
--- a/drivers/mtd/nand/raw/fsl_upm.c
+++ b/drivers/mtd/nand/raw/fsl_upm.c
@@ -13,7 +13,8 @@
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/mtd.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <asm/fsl_lbc.h>
@@ -172,8 +173,7 @@ static int fun_probe(struct platform_device *ofdev)
if (!fun)
return -ENOMEM;
- io_res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
- fun->io_base = devm_ioremap_resource(&ofdev->dev, io_res);
+ fun->io_base = devm_platform_get_and_ioremap_resource(ofdev, 0, &io_res);
if (IS_ERR(fun->io_base))
return PTR_ERR(fun->io_base);
diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
index 7b4742420dfc..811982da3557 100644
--- a/drivers/mtd/nand/raw/fsmc_nand.c
+++ b/drivers/mtd/nand/raw/fsmc_nand.c
@@ -1066,16 +1066,12 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
host->regs_va = base + FSMC_NOR_REG_SIZE +
(host->bank * FSMC_NAND_BANK_SZ);
- host->clk = devm_clk_get(&pdev->dev, NULL);
+ host->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(host->clk)) {
dev_err(&pdev->dev, "failed to fetch block clock\n");
return PTR_ERR(host->clk);
}
- ret = clk_prepare_enable(host->clk);
- if (ret)
- return ret;
-
/*
* This device ID is actually a common AMBA ID as used on the
* AMBA PrimeCell bus. However it is not a PrimeCell.
@@ -1111,7 +1107,7 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
if (!host->read_dma_chan) {
dev_err(&pdev->dev, "Unable to get read dma channel\n");
ret = -ENODEV;
- goto disable_clk;
+ goto disable_fsmc;
}
host->write_dma_chan = dma_request_channel(mask, filter, NULL);
if (!host->write_dma_chan) {
@@ -1155,9 +1151,8 @@ release_dma_write_chan:
release_dma_read_chan:
if (host->mode == USE_DMA_ACCESS)
dma_release_channel(host->read_dma_chan);
-disable_clk:
+disable_fsmc:
fsmc_nand_disable(host);
- clk_disable_unprepare(host->clk);
return ret;
}
@@ -1182,7 +1177,6 @@ static void fsmc_nand_remove(struct platform_device *pdev)
dma_release_channel(host->write_dma_chan);
dma_release_channel(host->read_dma_chan);
}
- clk_disable_unprepare(host->clk);
}
}
@@ -1200,9 +1194,14 @@ static int fsmc_nand_suspend(struct device *dev)
static int fsmc_nand_resume(struct device *dev)
{
struct fsmc_nand_data *host = dev_get_drvdata(dev);
+ int ret;
if (host) {
- clk_prepare_enable(host->clk);
+ ret = clk_prepare_enable(host->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clk\n");
+ return ret;
+ }
if (host->dev_timings)
fsmc_nand_setup(host, host->dev_timings);
nand_reset(&host->nand, 0);
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index 500e7a28d2e4..e71ad2fcec23 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -13,7 +13,7 @@
#include <linux/module.h>
#include <linux/mtd/partitions.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/dma/mxs-dma.h>
#include "gpmi-nand.h"
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
index 9054559e52dd..525c34c281b6 100644
--- a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
@@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
index b9f135297aa0..6748226b8bd1 100644
--- a/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/mtd/nand/raw/intel-nand-controller.c b/drivers/mtd/nand/raw/intel-nand-controller.c
index a9909eb08124..cb5d88f42297 100644
--- a/drivers/mtd/nand/raw/intel-nand-controller.c
+++ b/drivers/mtd/nand/raw/intel-nand-controller.c
@@ -626,16 +626,10 @@ static int ebu_nand_probe(struct platform_device *pdev)
goto err_of_node_put;
}
- ebu_host->clk = devm_clk_get(dev, NULL);
+ ebu_host->clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(ebu_host->clk)) {
ret = dev_err_probe(dev, PTR_ERR(ebu_host->clk),
- "failed to get clock\n");
- goto err_of_node_put;
- }
-
- ret = clk_prepare_enable(ebu_host->clk);
- if (ret) {
- dev_err(dev, "failed to enable clock: %d\n", ret);
+ "failed to get and enable clock\n");
goto err_of_node_put;
}
@@ -643,7 +637,7 @@ static int ebu_nand_probe(struct platform_device *pdev)
if (IS_ERR(ebu_host->dma_tx)) {
ret = dev_err_probe(dev, PTR_ERR(ebu_host->dma_tx),
"failed to request DMA tx chan!.\n");
- goto err_disable_unprepare_clk;
+ goto err_of_node_put;
}
ebu_host->dma_rx = dma_request_chan(dev, "rx");
@@ -698,8 +692,6 @@ err_clean_nand:
nand_cleanup(&ebu_host->chip);
err_cleanup_dma:
ebu_dma_cleanup(ebu_host);
-err_disable_unprepare_clk:
- clk_disable_unprepare(ebu_host->clk);
err_of_node_put:
of_node_put(chip_np);
@@ -716,7 +708,6 @@ static void ebu_nand_remove(struct platform_device *pdev)
nand_cleanup(&ebu_host->chip);
ebu_nand_disable(&ebu_host->chip);
ebu_dma_cleanup(ebu_host);
- clk_disable_unprepare(ebu_host->clk);
}
static const struct of_device_id ebu_nand_match[] = {
diff --git a/drivers/mtd/nand/raw/lpc32xx_mlc.c b/drivers/mtd/nand/raw/lpc32xx_mlc.c
index b3136ae6f4e9..488fd452611a 100644
--- a/drivers/mtd/nand/raw/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/raw/lpc32xx_mlc.c
@@ -695,8 +695,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
host->pdev = pdev;
- rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->io_base = devm_ioremap_resource(&pdev->dev, rc);
+ host->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &rc);
if (IS_ERR(host->io_base))
return PTR_ERR(host->io_base);
diff --git a/drivers/mtd/nand/raw/lpc32xx_slc.c b/drivers/mtd/nand/raw/lpc32xx_slc.c
index 3139b6107660..1c5fa855b9f2 100644
--- a/drivers/mtd/nand/raw/lpc32xx_slc.c
+++ b/drivers/mtd/nand/raw/lpc32xx_slc.c
@@ -836,8 +836,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
if (!host)
return -ENOMEM;
- rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->io_base = devm_ioremap_resource(&pdev->dev, rc);
+ host->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &rc);
if (IS_ERR(host->io_base))
return PTR_ERR(host->io_base);
@@ -872,15 +871,12 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
mtd->dev.parent = &pdev->dev;
/* Get NAND clock */
- host->clk = devm_clk_get(&pdev->dev, NULL);
+ host->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(host->clk)) {
dev_err(&pdev->dev, "Clock failure\n");
res = -ENOENT;
goto enable_wp;
}
- res = clk_prepare_enable(host->clk);
- if (res)
- goto enable_wp;
/* Set NAND IO addresses and command/ready functions */
chip->legacy.IO_ADDR_R = SLC_DATA(host->io_base);
@@ -908,13 +904,13 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
GFP_KERNEL);
if (host->data_buf == NULL) {
res = -ENOMEM;
- goto unprepare_clk;
+ goto enable_wp;
}
res = lpc32xx_nand_dma_setup(host);
if (res) {
res = -EIO;
- goto unprepare_clk;
+ goto enable_wp;
}
/* Find NAND device */
@@ -935,8 +931,6 @@ cleanup_nand:
nand_cleanup(chip);
release_dma:
dma_release_channel(host->dma_chan);
-unprepare_clk:
- clk_disable_unprepare(host->clk);
enable_wp:
lpc32xx_wp_enable(host);
@@ -963,7 +957,6 @@ static void lpc32xx_nand_remove(struct platform_device *pdev)
tmp &= ~SLCCFG_CE_LOW;
writel(tmp, SLC_CTRL(host->io_base));
- clk_disable_unprepare(host->clk);
lpc32xx_wp_enable(host);
}
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index 30c15e4e1cc0..2c94da7a3b3a 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -77,9 +77,10 @@
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/mtd/rawnand.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/iopoll.h>
#include <linux/interrupt.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
@@ -375,6 +376,7 @@ static inline struct marvell_nand_chip_sel *to_nand_sel(struct marvell_nand_chip
* BCH error detection and correction algorithm,
* NDCB3 register has been added
* @use_dma: Use dma for data transfers
+ * @max_mode_number: Maximum timing mode supported by the controller
*/
struct marvell_nfc_caps {
unsigned int max_cs_nb;
@@ -383,6 +385,7 @@ struct marvell_nfc_caps {
bool legacy_of_bindings;
bool is_nfcv2;
bool use_dma;
+ unsigned int max_mode_number;
};
/**
@@ -2376,6 +2379,9 @@ static int marvell_nfc_setup_interface(struct nand_chip *chip, int chipnr,
if (IS_ERR(sdr))
return PTR_ERR(sdr);
+ if (nfc->caps->max_mode_number && nfc->caps->max_mode_number < conf->timings.mode)
+ return -EOPNOTSUPP;
+
/*
* SDR timings are given in pico-seconds while NFC timings must be
* expressed in NAND controller clock cycles, which is half of the
@@ -3073,6 +3079,13 @@ static const struct marvell_nfc_caps marvell_armada_8k_nfc_caps = {
.is_nfcv2 = true,
};
+static const struct marvell_nfc_caps marvell_ac5_caps = {
+ .max_cs_nb = 2,
+ .max_rb_nb = 1,
+ .is_nfcv2 = true,
+ .max_mode_number = 3,
+};
+
static const struct marvell_nfc_caps marvell_armada370_nfc_caps = {
.max_cs_nb = 4,
.max_rb_nb = 2,
@@ -3122,6 +3135,10 @@ static const struct of_device_id marvell_nfc_of_ids[] = {
.data = &marvell_armada_8k_nfc_caps,
},
{
+ .compatible = "marvell,ac5-nand-controller",
+ .data = &marvell_ac5_caps,
+ },
+ {
.compatible = "marvell,armada370-nand-controller",
.data = &marvell_armada370_nfc_caps,
},
diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
index b10011dec1e6..25e3c1cb605e 100644
--- a/drivers/mtd/nand/raw/meson_nand.c
+++ b/drivers/mtd/nand/raw/meson_nand.c
@@ -19,7 +19,6 @@
#include <linux/module.h>
#include <linux/iopoll.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/sched/task_stack.h>
#define NFC_REG_CMD 0x00
@@ -135,6 +134,7 @@ struct meson_nfc_nand_chip {
struct meson_nand_ecc {
u32 bch;
u32 strength;
+ u32 size;
};
struct meson_nfc_data {
@@ -190,7 +190,8 @@ struct meson_nfc {
};
enum {
- NFC_ECC_BCH8_1K = 2,
+ NFC_ECC_BCH8_512 = 1,
+ NFC_ECC_BCH8_1K,
NFC_ECC_BCH24_1K,
NFC_ECC_BCH30_1K,
NFC_ECC_BCH40_1K,
@@ -198,15 +199,16 @@ enum {
NFC_ECC_BCH60_1K,
};
-#define MESON_ECC_DATA(b, s) { .bch = (b), .strength = (s)}
+#define MESON_ECC_DATA(b, s, sz) { .bch = (b), .strength = (s), .size = (sz) }
static struct meson_nand_ecc meson_ecc[] = {
- MESON_ECC_DATA(NFC_ECC_BCH8_1K, 8),
- MESON_ECC_DATA(NFC_ECC_BCH24_1K, 24),
- MESON_ECC_DATA(NFC_ECC_BCH30_1K, 30),
- MESON_ECC_DATA(NFC_ECC_BCH40_1K, 40),
- MESON_ECC_DATA(NFC_ECC_BCH50_1K, 50),
- MESON_ECC_DATA(NFC_ECC_BCH60_1K, 60),
+ MESON_ECC_DATA(NFC_ECC_BCH8_512, 8, 512),
+ MESON_ECC_DATA(NFC_ECC_BCH8_1K, 8, 1024),
+ MESON_ECC_DATA(NFC_ECC_BCH24_1K, 24, 1024),
+ MESON_ECC_DATA(NFC_ECC_BCH30_1K, 30, 1024),
+ MESON_ECC_DATA(NFC_ECC_BCH40_1K, 40, 1024),
+ MESON_ECC_DATA(NFC_ECC_BCH50_1K, 50, 1024),
+ MESON_ECC_DATA(NFC_ECC_BCH60_1K, 60, 1024),
};
static int meson_nand_calc_ecc_bytes(int step_size, int strength)
@@ -224,8 +226,27 @@ static int meson_nand_calc_ecc_bytes(int step_size, int strength)
NAND_ECC_CAPS_SINGLE(meson_gxl_ecc_caps,
meson_nand_calc_ecc_bytes, 1024, 8, 24, 30, 40, 50, 60);
-NAND_ECC_CAPS_SINGLE(meson_axg_ecc_caps,
- meson_nand_calc_ecc_bytes, 1024, 8);
+
+static const int axg_stepinfo_strengths[] = { 8 };
+
+static const struct nand_ecc_step_info axg_stepinfo[] = {
+ {
+ .stepsize = 1024,
+ .strengths = axg_stepinfo_strengths,
+ .nstrengths = ARRAY_SIZE(axg_stepinfo_strengths)
+ },
+ {
+ .stepsize = 512,
+ .strengths = axg_stepinfo_strengths,
+ .nstrengths = ARRAY_SIZE(axg_stepinfo_strengths)
+ },
+};
+
+static const struct nand_ecc_caps meson_axg_ecc_caps = {
+ .stepinfos = axg_stepinfo,
+ .nstepinfos = ARRAY_SIZE(axg_stepinfo),
+ .calc_ecc_bytes = meson_nand_calc_ecc_bytes,
+};
static struct meson_nfc_nand_chip *to_meson_nand(struct nand_chip *nand)
{
@@ -400,9 +421,10 @@ static void meson_nfc_set_data_oob(struct nand_chip *nand,
}
}
-static int meson_nfc_wait_no_rb_pin(struct meson_nfc *nfc, int timeout_ms,
+static int meson_nfc_wait_no_rb_pin(struct nand_chip *nand, int timeout_ms,
bool need_cmd_read0)
{
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
u32 cmd, cfg;
meson_nfc_cmd_idle(nfc, nfc->timing.twb);
@@ -414,8 +436,7 @@ static int meson_nfc_wait_no_rb_pin(struct meson_nfc *nfc, int timeout_ms,
writel(cfg, nfc->reg_base + NFC_REG_CFG);
reinit_completion(&nfc->completion);
- cmd = nfc->param.chip_select | NFC_CMD_CLE | NAND_CMD_STATUS;
- writel(cmd, nfc->reg_base + NFC_REG_CMD);
+ nand_status_op(nand, NULL);
/* use the max erase time as the maximum clock for waiting R/B */
cmd = NFC_CMD_RB | NFC_CMD_RB_INT_NO_PIN | nfc->timing.tbers_max;
@@ -425,12 +446,8 @@ static int meson_nfc_wait_no_rb_pin(struct meson_nfc *nfc, int timeout_ms,
msecs_to_jiffies(timeout_ms)))
return -ETIMEDOUT;
- if (need_cmd_read0) {
- cmd = nfc->param.chip_select | NFC_CMD_CLE | NAND_CMD_READ0;
- writel(cmd, nfc->reg_base + NFC_REG_CMD);
- meson_nfc_drain_cmd(nfc);
- meson_nfc_wait_cmd_finish(nfc, CMD_FIFO_EMPTY_TIMEOUT);
- }
+ if (need_cmd_read0)
+ nand_exit_status_op(nand);
return 0;
}
@@ -463,9 +480,11 @@ static int meson_nfc_wait_rb_pin(struct meson_nfc *nfc, int timeout_ms)
return ret;
}
-static int meson_nfc_queue_rb(struct meson_nfc *nfc, int timeout_ms,
+static int meson_nfc_queue_rb(struct nand_chip *nand, int timeout_ms,
bool need_cmd_read0)
{
+ struct meson_nfc *nfc = nand_get_controller_data(nand);
+
if (nfc->no_rb_pin) {
/* This mode is used when there is no wired R/B pin.
* It works like 'nand_soft_waitrdy()', but instead of
@@ -477,7 +496,7 @@ static int meson_nfc_queue_rb(struct meson_nfc *nfc, int timeout_ms,
* needed (for all cases except page programming - this
* is reason of 'need_cmd_read0' flag).
*/
- return meson_nfc_wait_no_rb_pin(nfc, timeout_ms,
+ return meson_nfc_wait_no_rb_pin(nand, timeout_ms,
need_cmd_read0);
} else {
return meson_nfc_wait_rb_pin(nfc, timeout_ms);
@@ -687,7 +706,7 @@ static int meson_nfc_rw_cmd_prepare_and_execute(struct nand_chip *nand,
if (in) {
nfc->cmdfifo.rw.cmd1 = cs | NFC_CMD_CLE | NAND_CMD_READSTART;
writel(nfc->cmdfifo.rw.cmd1, nfc->reg_base + NFC_REG_CMD);
- meson_nfc_queue_rb(nfc, PSEC_TO_MSEC(sdr->tR_max), true);
+ meson_nfc_queue_rb(nand, PSEC_TO_MSEC(sdr->tR_max), true);
} else {
meson_nfc_cmd_idle(nfc, nfc->timing.tadl);
}
@@ -733,7 +752,7 @@ static int meson_nfc_write_page_sub(struct nand_chip *nand,
cmd = nfc->param.chip_select | NFC_CMD_CLE | NAND_CMD_PAGEPROG;
writel(cmd, nfc->reg_base + NFC_REG_CMD);
- meson_nfc_queue_rb(nfc, PSEC_TO_MSEC(sdr->tPROG_max), false);
+ meson_nfc_queue_rb(nand, PSEC_TO_MSEC(sdr->tPROG_max), false);
meson_nfc_dma_buffer_release(nand, data_len, info_len, DMA_TO_DEVICE);
@@ -1049,7 +1068,7 @@ static int meson_nfc_exec_op(struct nand_chip *nand,
break;
case NAND_OP_WAITRDY_INSTR:
- meson_nfc_queue_rb(nfc, instr->ctx.waitrdy.timeout_ms,
+ meson_nfc_queue_rb(nand, instr->ctx.waitrdy.timeout_ms,
true);
if (instr->delay_ns)
meson_nfc_cmd_idle(nfc, delay_idle);
@@ -1259,7 +1278,8 @@ static int meson_nand_bch_mode(struct nand_chip *nand)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(meson_ecc); i++) {
- if (meson_ecc[i].strength == nand->ecc.strength) {
+ if (meson_ecc[i].strength == nand->ecc.strength &&
+ meson_ecc[i].size == nand->ecc.size) {
meson_chip->bch_mode = meson_ecc[i].bch;
return 0;
}
diff --git a/drivers/mtd/nand/raw/mpc5121_nfc.c b/drivers/mtd/nand/raw/mpc5121_nfc.c
index ab05ee65702c..215610f808f1 100644
--- a/drivers/mtd/nand/raw/mpc5121_nfc.c
+++ b/drivers/mtd/nand/raw/mpc5121_nfc.c
@@ -21,10 +21,10 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
+#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <asm/mpc5121.h>
@@ -595,8 +595,6 @@ static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd)
struct nand_chip *chip = mtd_to_nand(mtd);
struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
- clk_disable_unprepare(prv->clk);
-
if (prv->csreg)
iounmap(prv->csreg);
}
@@ -717,17 +715,12 @@ static int mpc5121_nfc_probe(struct platform_device *op)
}
/* Enable NFC clock */
- clk = devm_clk_get(dev, "ipg");
+ clk = devm_clk_get_enabled(dev, "ipg");
if (IS_ERR(clk)) {
- dev_err(dev, "Unable to acquire NFC clock!\n");
+ dev_err(dev, "Unable to acquire and enable NFC clock!\n");
retval = PTR_ERR(clk);
goto error;
}
- retval = clk_prepare_enable(clk);
- if (retval) {
- dev_err(dev, "Unable to enable NFC clock!\n");
- goto error;
- }
prv->clk = clk;
/* Reset NAND Flash controller */
diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
index b2fa6b2074ab..29c8bddde67f 100644
--- a/drivers/mtd/nand/raw/mtk_nand.c
+++ b/drivers/mtd/nand/raw/mtk_nand.c
@@ -16,7 +16,6 @@
#include <linux/module.h>
#include <linux/iopoll.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/mtd/nand-ecc-mtk.h>
/* NAND controller register definition */
@@ -1119,32 +1118,6 @@ static irqreturn_t mtk_nfc_irq(int irq, void *id)
return IRQ_HANDLED;
}
-static int mtk_nfc_enable_clk(struct device *dev, struct mtk_nfc_clk *clk)
-{
- int ret;
-
- ret = clk_prepare_enable(clk->nfi_clk);
- if (ret) {
- dev_err(dev, "failed to enable nfi clk\n");
- return ret;
- }
-
- ret = clk_prepare_enable(clk->pad_clk);
- if (ret) {
- dev_err(dev, "failed to enable pad clk\n");
- clk_disable_unprepare(clk->nfi_clk);
- return ret;
- }
-
- return 0;
-}
-
-static void mtk_nfc_disable_clk(struct mtk_nfc_clk *clk)
-{
- clk_disable_unprepare(clk->nfi_clk);
- clk_disable_unprepare(clk->pad_clk);
-}
-
static int mtk_nfc_ooblayout_free(struct mtd_info *mtd, int section,
struct mtd_oob_region *oob_region)
{
@@ -1546,40 +1519,36 @@ static int mtk_nfc_probe(struct platform_device *pdev)
goto release_ecc;
}
- nfc->clk.nfi_clk = devm_clk_get(dev, "nfi_clk");
+ nfc->clk.nfi_clk = devm_clk_get_enabled(dev, "nfi_clk");
if (IS_ERR(nfc->clk.nfi_clk)) {
dev_err(dev, "no clk\n");
ret = PTR_ERR(nfc->clk.nfi_clk);
goto release_ecc;
}
- nfc->clk.pad_clk = devm_clk_get(dev, "pad_clk");
+ nfc->clk.pad_clk = devm_clk_get_enabled(dev, "pad_clk");
if (IS_ERR(nfc->clk.pad_clk)) {
dev_err(dev, "no pad clk\n");
ret = PTR_ERR(nfc->clk.pad_clk);
goto release_ecc;
}
- ret = mtk_nfc_enable_clk(dev, &nfc->clk);
- if (ret)
- goto release_ecc;
-
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = -EINVAL;
- goto clk_disable;
+ goto release_ecc;
}
ret = devm_request_irq(dev, irq, mtk_nfc_irq, 0x0, "mtk-nand", nfc);
if (ret) {
dev_err(dev, "failed to request nfi irq\n");
- goto clk_disable;
+ goto release_ecc;
}
ret = dma_set_mask(dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(dev, "failed to set dma mask\n");
- goto clk_disable;
+ goto release_ecc;
}
platform_set_drvdata(pdev, nfc);
@@ -1587,14 +1556,11 @@ static int mtk_nfc_probe(struct platform_device *pdev)
ret = mtk_nfc_nand_chips_init(dev, nfc);
if (ret) {
dev_err(dev, "failed to init nand chips\n");
- goto clk_disable;
+ goto release_ecc;
}
return 0;
-clk_disable:
- mtk_nfc_disable_clk(&nfc->clk);
-
release_ecc:
mtk_ecc_release(nfc->ecc);
@@ -1619,7 +1585,6 @@ static void mtk_nfc_remove(struct platform_device *pdev)
}
mtk_ecc_release(nfc->ecc);
- mtk_nfc_disable_clk(&nfc->clk);
}
#ifdef CONFIG_PM_SLEEP
@@ -1627,7 +1592,8 @@ static int mtk_nfc_suspend(struct device *dev)
{
struct mtk_nfc *nfc = dev_get_drvdata(dev);
- mtk_nfc_disable_clk(&nfc->clk);
+ clk_disable_unprepare(nfc->clk.nfi_clk);
+ clk_disable_unprepare(nfc->clk.pad_clk);
return 0;
}
@@ -1642,9 +1608,18 @@ static int mtk_nfc_resume(struct device *dev)
udelay(200);
- ret = mtk_nfc_enable_clk(dev, &nfc->clk);
- if (ret)
+ ret = clk_prepare_enable(nfc->clk.nfi_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable nfi clk\n");
return ret;
+ }
+
+ ret = clk_prepare_enable(nfc->clk.pad_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable pad clk\n");
+ clk_disable_unprepare(nfc->clk.nfi_clk);
+ return ret;
+ }
/* reset NAND chip if VCC was powered off */
list_for_each_entry(chip, &nfc->chips, node) {
diff --git a/drivers/mtd/nand/raw/mxc_nand.c b/drivers/mtd/nand/raw/mxc_nand.c
index 3d4b2e8294ea..003008355b3c 100644
--- a/drivers/mtd/nand/raw/mxc_nand.c
+++ b/drivers/mtd/nand/raw/mxc_nand.c
@@ -20,7 +20,6 @@
#include <linux/irq.h>
#include <linux/completion.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#define DRIVER_NAME "mxc_nand"
@@ -1696,7 +1695,6 @@ static int mxcnd_probe(struct platform_device *pdev)
struct nand_chip *this;
struct mtd_info *mtd;
struct mxc_nand_host *host;
- struct resource *res;
int err = 0;
/* Allocate memory for MTD device structure and private data */
@@ -1740,17 +1738,15 @@ static int mxcnd_probe(struct platform_device *pdev)
this->options |= NAND_KEEP_TIMINGS;
if (host->devtype_data->needs_ip) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host->regs_ip = devm_ioremap_resource(&pdev->dev, res);
+ host->regs_ip = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host->regs_ip))
return PTR_ERR(host->regs_ip);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ host->base = devm_platform_ioremap_resource(pdev, 1);
} else {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ host->base = devm_platform_ioremap_resource(pdev, 0);
}
- host->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(host->base))
return PTR_ERR(host->base);
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index a6af521832aa..d4b55155aeae 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -1885,6 +1885,7 @@ int nand_exit_status_op(struct nand_chip *chip)
return 0;
}
+EXPORT_SYMBOL_GPL(nand_exit_status_op);
/**
* nand_erase_op - Do an erase operation
diff --git a/drivers/mtd/nand/raw/ndfc.c b/drivers/mtd/nand/raw/ndfc.c
index 57f3db32122d..3bb32a7c6d67 100644
--- a/drivers/mtd/nand/raw/ndfc.c
+++ b/drivers/mtd/nand/raw/ndfc.c
@@ -22,8 +22,9 @@
#include <linux/mtd/ndfc.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
+#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <asm/io.h>
#define NDFC_MAX_CS 4
diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c
index db22b3af16d8..c45bef6158e7 100644
--- a/drivers/mtd/nand/raw/omap2.c
+++ b/drivers/mtd/nand/raw/omap2.c
@@ -22,7 +22,7 @@
#include <linux/iopoll.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/of_platform.h>
#include <linux/platform_data/elm.h>
@@ -2219,8 +2219,7 @@ static int omap_nand_probe(struct platform_device *pdev)
}
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- vaddr = devm_ioremap_resource(&pdev->dev, res);
+ vaddr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
diff --git a/drivers/mtd/nand/raw/orion_nand.c b/drivers/mtd/nand/raw/orion_nand.c
index 7e0313889b50..2951d81614fd 100644
--- a/drivers/mtd/nand/raw/orion_nand.c
+++ b/drivers/mtd/nand/raw/orion_nand.c
@@ -169,16 +169,10 @@ static int __init orion_nand_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, info);
/* Not all platforms can gate the clock, so it is optional. */
- info->clk = devm_clk_get_optional(&pdev->dev, NULL);
+ info->clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
if (IS_ERR(info->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(info->clk),
- "failed to get clock!\n");
-
- ret = clk_prepare_enable(info->clk);
- if (ret) {
- dev_err(&pdev->dev, "failed to prepare clock!\n");
- return ret;
- }
+ "failed to get and enable clock!\n");
/*
* This driver assumes that the default ECC engine should be TYPE_SOFT.
@@ -189,19 +183,13 @@ static int __init orion_nand_probe(struct platform_device *pdev)
ret = nand_scan(nc, 1);
if (ret)
- goto no_dev;
+ return ret;
mtd->name = "orion_nand";
ret = mtd_device_register(mtd, board->parts, board->nr_parts);
- if (ret) {
+ if (ret)
nand_cleanup(nc);
- goto no_dev;
- }
-
- return 0;
-no_dev:
- clk_disable_unprepare(info->clk);
return ret;
}
@@ -215,8 +203,6 @@ static void orion_nand_remove(struct platform_device *pdev)
WARN_ON(ret);
nand_cleanup(chip);
-
- clk_disable_unprepare(info->clk);
}
#ifdef CONFIG_OF
diff --git a/drivers/mtd/nand/raw/oxnas_nand.c b/drivers/mtd/nand/raw/oxnas_nand.c
deleted file mode 100644
index e3c9807df1cd..000000000000
--- a/drivers/mtd/nand/raw/oxnas_nand.c
+++ /dev/null
@@ -1,209 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Oxford Semiconductor OXNAS NAND driver
-
- * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com>
- * Heavily based on plat_nand.c :
- * Author: Vitaly Wool <vitalywool@gmail.com>
- * Copyright (C) 2013 Ma Haijun <mahaijuns@gmail.com>
- * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
- */
-
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/clk.h>
-#include <linux/reset.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
-#include <linux/of.h>
-
-/* Nand commands */
-#define OXNAS_NAND_CMD_ALE BIT(18)
-#define OXNAS_NAND_CMD_CLE BIT(19)
-
-#define OXNAS_NAND_MAX_CHIPS 1
-
-struct oxnas_nand_ctrl {
- struct nand_controller base;
- void __iomem *io_base;
- struct clk *clk;
- struct nand_chip *chips[OXNAS_NAND_MAX_CHIPS];
- unsigned int nchips;
-};
-
-static uint8_t oxnas_nand_read_byte(struct nand_chip *chip)
-{
- struct oxnas_nand_ctrl *oxnas = nand_get_controller_data(chip);
-
- return readb(oxnas->io_base);
-}
-
-static void oxnas_nand_read_buf(struct nand_chip *chip, u8 *buf, int len)
-{
- struct oxnas_nand_ctrl *oxnas = nand_get_controller_data(chip);
-
- ioread8_rep(oxnas->io_base, buf, len);
-}
-
-static void oxnas_nand_write_buf(struct nand_chip *chip, const u8 *buf,
- int len)
-{
- struct oxnas_nand_ctrl *oxnas = nand_get_controller_data(chip);
-
- iowrite8_rep(oxnas->io_base, buf, len);
-}
-
-/* Single CS command control */
-static void oxnas_nand_cmd_ctrl(struct nand_chip *chip, int cmd,
- unsigned int ctrl)
-{
- struct oxnas_nand_ctrl *oxnas = nand_get_controller_data(chip);
-
- if (ctrl & NAND_CLE)
- writeb(cmd, oxnas->io_base + OXNAS_NAND_CMD_CLE);
- else if (ctrl & NAND_ALE)
- writeb(cmd, oxnas->io_base + OXNAS_NAND_CMD_ALE);
-}
-
-/*
- * Probe for the NAND device.
- */
-static int oxnas_nand_probe(struct platform_device *pdev)
-{
- struct device_node *np = pdev->dev.of_node;
- struct device_node *nand_np;
- struct oxnas_nand_ctrl *oxnas;
- struct nand_chip *chip;
- struct mtd_info *mtd;
- int count = 0;
- int err = 0;
- int i;
-
- /* Allocate memory for the device structure (and zero it) */
- oxnas = devm_kzalloc(&pdev->dev, sizeof(*oxnas),
- GFP_KERNEL);
- if (!oxnas)
- return -ENOMEM;
-
- nand_controller_init(&oxnas->base);
-
- oxnas->io_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(oxnas->io_base))
- return PTR_ERR(oxnas->io_base);
-
- oxnas->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(oxnas->clk))
- oxnas->clk = NULL;
-
- /* Only a single chip node is supported */
- count = of_get_child_count(np);
- if (count > 1)
- return -EINVAL;
-
- err = clk_prepare_enable(oxnas->clk);
- if (err)
- return err;
-
- device_reset_optional(&pdev->dev);
-
- for_each_child_of_node(np, nand_np) {
- chip = devm_kzalloc(&pdev->dev, sizeof(struct nand_chip),
- GFP_KERNEL);
- if (!chip) {
- err = -ENOMEM;
- goto err_release_child;
- }
-
- chip->controller = &oxnas->base;
-
- nand_set_flash_node(chip, nand_np);
- nand_set_controller_data(chip, oxnas);
-
- mtd = nand_to_mtd(chip);
- mtd->dev.parent = &pdev->dev;
- mtd->priv = chip;
-
- chip->legacy.cmd_ctrl = oxnas_nand_cmd_ctrl;
- chip->legacy.read_buf = oxnas_nand_read_buf;
- chip->legacy.read_byte = oxnas_nand_read_byte;
- chip->legacy.write_buf = oxnas_nand_write_buf;
- chip->legacy.chip_delay = 30;
-
- /* Scan to find existence of the device */
- err = nand_scan(chip, 1);
- if (err)
- goto err_release_child;
-
- err = mtd_device_register(mtd, NULL, 0);
- if (err)
- goto err_cleanup_nand;
-
- oxnas->chips[oxnas->nchips++] = chip;
- }
-
- /* Exit if no chips found */
- if (!oxnas->nchips) {
- err = -ENODEV;
- goto err_clk_unprepare;
- }
-
- platform_set_drvdata(pdev, oxnas);
-
- return 0;
-
-err_cleanup_nand:
- nand_cleanup(chip);
-err_release_child:
- of_node_put(nand_np);
-
- for (i = 0; i < oxnas->nchips; i++) {
- chip = oxnas->chips[i];
- WARN_ON(mtd_device_unregister(nand_to_mtd(chip)));
- nand_cleanup(chip);
- }
-
-err_clk_unprepare:
- clk_disable_unprepare(oxnas->clk);
- return err;
-}
-
-static void oxnas_nand_remove(struct platform_device *pdev)
-{
- struct oxnas_nand_ctrl *oxnas = platform_get_drvdata(pdev);
- struct nand_chip *chip;
- int i;
-
- for (i = 0; i < oxnas->nchips; i++) {
- chip = oxnas->chips[i];
- WARN_ON(mtd_device_unregister(nand_to_mtd(chip)));
- nand_cleanup(chip);
- }
-
- clk_disable_unprepare(oxnas->clk);
-}
-
-static const struct of_device_id oxnas_nand_match[] = {
- { .compatible = "oxsemi,ox820-nand" },
- {},
-};
-MODULE_DEVICE_TABLE(of, oxnas_nand_match);
-
-static struct platform_driver oxnas_nand_driver = {
- .probe = oxnas_nand_probe,
- .remove_new = oxnas_nand_remove,
- .driver = {
- .name = "oxnas_nand",
- .of_match_table = oxnas_nand_match,
- },
-};
-
-module_platform_driver(oxnas_nand_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
-MODULE_DESCRIPTION("Oxnas NAND driver");
-MODULE_ALIAS("platform:oxnas_nand");
diff --git a/drivers/mtd/nand/raw/pl35x-nand-controller.c b/drivers/mtd/nand/raw/pl35x-nand-controller.c
index 28b7bd7e22eb..8da5fee321b5 100644
--- a/drivers/mtd/nand/raw/pl35x-nand-controller.c
+++ b/drivers/mtd/nand/raw/pl35x-nand-controller.c
@@ -23,9 +23,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/clk.h>
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 72d6168d8a1b..64499c1b3603 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -2,19 +2,19 @@
/*
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
*/
-#include <linux/clk.h>
-#include <linux/slab.h>
#include <linux/bitops.h>
-#include <linux/dma/qcom_adm.h>
-#include <linux/dma-mapping.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma/qcom_adm.h>
+#include <linux/dma/qcom_bam_dma.h>
#include <linux/module.h>
-#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
+#include <linux/mtd/rawnand.h>
#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/delay.h>
-#include <linux/dma/qcom_bam_dma.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
/* NANDc reg offsets */
#define NAND_FLASH_CMD 0x00
@@ -123,8 +123,8 @@
/* NAND_ERASED_CW_DETECT_CFG bits */
#define ERASED_CW_ECC_MASK 1
#define AUTO_DETECT_RES 0
-#define MASK_ECC (1 << ERASED_CW_ECC_MASK)
-#define RESET_ERASED_DET (1 << AUTO_DETECT_RES)
+#define MASK_ECC BIT(ERASED_CW_ECC_MASK)
+#define RESET_ERASED_DET BIT(AUTO_DETECT_RES)
#define ACTIVE_ERASED_DET (0 << AUTO_DETECT_RES)
#define CLR_ERASED_PAGE_DET (RESET_ERASED_DET | MASK_ECC)
#define SET_ERASED_PAGE_DET (ACTIVE_ERASED_DET | MASK_ECC)
@@ -157,6 +157,7 @@
#define OP_PAGE_PROGRAM_WITH_ECC 0x7
#define OP_PROGRAM_PAGE_SPARE 0x9
#define OP_BLOCK_ERASE 0xa
+#define OP_CHECK_STATUS 0xc
#define OP_FETCH_ID 0xb
#define OP_RESET_DEVICE 0xd
@@ -211,7 +212,7 @@ nandc_set_reg(chip, reg, \
/* Returns the dma address for reg read buffer */
#define reg_buf_dma_addr(chip, vaddr) \
((chip)->reg_read_dma + \
- ((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf))
+ ((u8 *)(vaddr) - (u8 *)(chip)->reg_read_buf))
#define QPIC_PER_CW_CMD_ELEMENTS 32
#define QPIC_PER_CW_CMD_SGL 32
@@ -235,6 +236,8 @@ nandc_set_reg(chip, reg, \
*/
#define NAND_ERASED_CW_SET BIT(4)
+#define MAX_ADDRESS_CYCLE 5
+
/*
* This data type corresponds to the BAM transaction which will be used for all
* NAND transfers.
@@ -382,6 +385,9 @@ struct nandc_regs {
* @reg_read_pos: marker for data read in reg_read_buf
*
* @cmd1/vld: some fixed controller register values
+ *
+ * @exec_opwrite: flag to select correct number of code word
+ * while reading status
*/
struct qcom_nand_controller {
struct device *dev;
@@ -432,6 +438,7 @@ struct qcom_nand_controller {
int reg_read_pos;
u32 cmd1, vld;
+ bool exec_opwrite;
};
/*
@@ -448,6 +455,29 @@ struct qcom_nand_boot_partition {
};
/*
+ * Qcom op for each exec_op transfer
+ *
+ * @data_instr: data instruction pointer
+ * @data_instr_idx: data instruction index
+ * @rdy_timeout_ms: wait ready timeout in ms
+ * @rdy_delay_ns: Additional delay in ns
+ * @addr1_reg: Address1 register value
+ * @addr2_reg: Address2 register value
+ * @cmd_reg: CMD register value
+ * @flag: flag for misc instruction
+ */
+struct qcom_op {
+ const struct nand_op_instr *data_instr;
+ unsigned int data_instr_idx;
+ unsigned int rdy_timeout_ms;
+ unsigned int rdy_delay_ns;
+ u32 addr1_reg;
+ u32 addr2_reg;
+ u32 cmd_reg;
+ u8 flag;
+};
+
+/*
* NAND chip structure
*
* @boot_partitions: array of boot partitions where offset and size of the
@@ -1273,182 +1303,33 @@ static void config_nand_cw_write(struct nand_chip *chip)
write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
}
-/*
- * the following functions are used within chip->legacy.cmdfunc() to
- * perform different NAND_CMD_* commands
- */
-
-/* sets up descriptors for NAND_CMD_PARAM */
-static int nandc_param(struct qcom_nand_host *host)
-{
- struct nand_chip *chip = &host->chip;
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-
- /*
- * NAND_CMD_PARAM is called before we know much about the FLASH chip
- * in use. we configure the controller to perform a raw read of 512
- * bytes to read onfi params
- */
- if (nandc->props->qpic_v2)
- nandc_set_reg(chip, NAND_FLASH_CMD, OP_PAGE_READ_ONFI_READ |
- PAGE_ACC | LAST_PAGE);
- else
- nandc_set_reg(chip, NAND_FLASH_CMD, OP_PAGE_READ |
- PAGE_ACC | LAST_PAGE);
-
- nandc_set_reg(chip, NAND_ADDR0, 0);
- nandc_set_reg(chip, NAND_ADDR1, 0);
- nandc_set_reg(chip, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
- | 512 << UD_SIZE_BYTES
- | 5 << NUM_ADDR_CYCLES
- | 0 << SPARE_SIZE_BYTES);
- nandc_set_reg(chip, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
- | 0 << CS_ACTIVE_BSY
- | 17 << BAD_BLOCK_BYTE_NUM
- | 1 << BAD_BLOCK_IN_SPARE_AREA
- | 2 << WR_RD_BSY_GAP
- | 0 << WIDE_FLASH
- | 1 << DEV0_CFG1_ECC_DISABLE);
- if (!nandc->props->qpic_v2)
- nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
-
- /* configure CMD1 and VLD for ONFI param probing in QPIC v1 */
- if (!nandc->props->qpic_v2) {
- nandc_set_reg(chip, NAND_DEV_CMD_VLD,
- (nandc->vld & ~READ_START_VLD));
- nandc_set_reg(chip, NAND_DEV_CMD1,
- (nandc->cmd1 & ~(0xFF << READ_ADDR))
- | NAND_CMD_PARAM << READ_ADDR);
- }
-
- nandc_set_reg(chip, NAND_EXEC_CMD, 1);
-
- if (!nandc->props->qpic_v2) {
- nandc_set_reg(chip, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
- nandc_set_reg(chip, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
- }
-
- nandc_set_read_loc(chip, 0, 0, 0, 512, 1);
-
- if (!nandc->props->qpic_v2) {
- write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
- write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
- }
-
- nandc->buf_count = 512;
- memset(nandc->data_buffer, 0xff, nandc->buf_count);
-
- config_nand_single_cw_page_read(chip, false, 0);
-
- read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
- nandc->buf_count, 0);
-
- /* restore CMD1 and VLD regs */
- if (!nandc->props->qpic_v2) {
- write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
- write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL);
- }
-
- return 0;
-}
-
-/* sets up descriptors for NAND_CMD_ERASE1 */
-static int erase_block(struct qcom_nand_host *host, int page_addr)
-{
- struct nand_chip *chip = &host->chip;
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-
- nandc_set_reg(chip, NAND_FLASH_CMD,
- OP_BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
- nandc_set_reg(chip, NAND_ADDR0, page_addr);
- nandc_set_reg(chip, NAND_ADDR1, 0);
- nandc_set_reg(chip, NAND_DEV0_CFG0,
- host->cfg0_raw & ~(7 << CW_PER_PAGE));
- nandc_set_reg(chip, NAND_DEV0_CFG1, host->cfg1_raw);
- nandc_set_reg(chip, NAND_EXEC_CMD, 1);
- nandc_set_reg(chip, NAND_FLASH_STATUS, host->clrflashstatus);
- nandc_set_reg(chip, NAND_READ_STATUS, host->clrreadstatus);
-
- write_reg_dma(nandc, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-
- read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
-
- write_reg_dma(nandc, NAND_FLASH_STATUS, 1, 0);
- write_reg_dma(nandc, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
-
- return 0;
-}
-
-/* sets up descriptors for NAND_CMD_READID */
-static int read_id(struct qcom_nand_host *host, int column)
-{
- struct nand_chip *chip = &host->chip;
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-
- if (column == -1)
- return 0;
-
- nandc_set_reg(chip, NAND_FLASH_CMD, OP_FETCH_ID);
- nandc_set_reg(chip, NAND_ADDR0, column);
- nandc_set_reg(chip, NAND_ADDR1, 0);
- nandc_set_reg(chip, NAND_FLASH_CHIP_SELECT,
- nandc->props->is_bam ? 0 : DM_EN);
- nandc_set_reg(chip, NAND_EXEC_CMD, 1);
-
- write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-
- read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
-
- return 0;
-}
-
-/* sets up descriptors for NAND_CMD_RESET */
-static int reset(struct qcom_nand_host *host)
-{
- struct nand_chip *chip = &host->chip;
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-
- nandc_set_reg(chip, NAND_FLASH_CMD, OP_RESET_DEVICE);
- nandc_set_reg(chip, NAND_EXEC_CMD, 1);
-
- write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
- write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
-
- read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
-
- return 0;
-}
-
/* helpers to submit/free our list of dma descriptors */
static int submit_descs(struct qcom_nand_controller *nandc)
{
- struct desc_info *desc;
+ struct desc_info *desc, *n;
dma_cookie_t cookie = 0;
struct bam_transaction *bam_txn = nandc->bam_txn;
- int r;
+ int ret = 0;
if (nandc->props->is_bam) {
if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
- r = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
- if (r)
- return r;
+ ret = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
+ if (ret)
+ goto err_unmap_free_desc;
}
if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
- r = prepare_bam_async_desc(nandc, nandc->tx_chan,
+ ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
DMA_PREP_INTERRUPT);
- if (r)
- return r;
+ if (ret)
+ goto err_unmap_free_desc;
}
if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
- r = prepare_bam_async_desc(nandc, nandc->cmd_chan,
+ ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
DMA_PREP_CMD);
- if (r)
- return r;
+ if (ret)
+ goto err_unmap_free_desc;
}
}
@@ -1470,19 +1351,17 @@ static int submit_descs(struct qcom_nand_controller *nandc)
if (!wait_for_completion_timeout(&bam_txn->txn_done,
QPIC_NAND_COMPLETION_TIMEOUT))
- return -ETIMEDOUT;
+ ret = -ETIMEDOUT;
} else {
if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
- return -ETIMEDOUT;
+ ret = -ETIMEDOUT;
}
- return 0;
-}
-
-static void free_descs(struct qcom_nand_controller *nandc)
-{
- struct desc_info *desc, *n;
-
+err_unmap_free_desc:
+ /*
+ * Unmap the dma sg_list and free the desc allocated by both
+ * prepare_bam_async_desc() and prep_adm_dma_desc() functions.
+ */
list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
list_del(&desc->node);
@@ -1495,6 +1374,8 @@ static void free_descs(struct qcom_nand_controller *nandc)
kfree(desc);
}
+
+ return ret;
}
/* reset the register read buffer for next NAND operation */
@@ -1504,152 +1385,6 @@ static void clear_read_regs(struct qcom_nand_controller *nandc)
nandc_read_buffer_sync(nandc, false);
}
-static void pre_command(struct qcom_nand_host *host, int command)
-{
- struct nand_chip *chip = &host->chip;
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-
- nandc->buf_count = 0;
- nandc->buf_start = 0;
- host->use_ecc = false;
- host->last_command = command;
-
- clear_read_regs(nandc);
-
- if (command == NAND_CMD_RESET || command == NAND_CMD_READID ||
- command == NAND_CMD_PARAM || command == NAND_CMD_ERASE1)
- clear_bam_transaction(nandc);
-}
-
-/*
- * this is called after NAND_CMD_PAGEPROG and NAND_CMD_ERASE1 to set our
- * privately maintained status byte, this status byte can be read after
- * NAND_CMD_STATUS is called
- */
-static void parse_erase_write_errors(struct qcom_nand_host *host, int command)
-{
- struct nand_chip *chip = &host->chip;
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- struct nand_ecc_ctrl *ecc = &chip->ecc;
- int num_cw;
- int i;
-
- num_cw = command == NAND_CMD_PAGEPROG ? ecc->steps : 1;
- nandc_read_buffer_sync(nandc, true);
-
- for (i = 0; i < num_cw; i++) {
- u32 flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
-
- if (flash_status & FS_MPU_ERR)
- host->status &= ~NAND_STATUS_WP;
-
- if (flash_status & FS_OP_ERR || (i == (num_cw - 1) &&
- (flash_status &
- FS_DEVICE_STS_ERR)))
- host->status |= NAND_STATUS_FAIL;
- }
-}
-
-static void post_command(struct qcom_nand_host *host, int command)
-{
- struct nand_chip *chip = &host->chip;
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-
- switch (command) {
- case NAND_CMD_READID:
- nandc_read_buffer_sync(nandc, true);
- memcpy(nandc->data_buffer, nandc->reg_read_buf,
- nandc->buf_count);
- break;
- case NAND_CMD_PAGEPROG:
- case NAND_CMD_ERASE1:
- parse_erase_write_errors(host, command);
- break;
- default:
- break;
- }
-}
-
-/*
- * Implements chip->legacy.cmdfunc. It's only used for a limited set of
- * commands. The rest of the commands wouldn't be called by upper layers.
- * For example, NAND_CMD_READOOB would never be called because we have our own
- * versions of read_oob ops for nand_ecc_ctrl.
- */
-static void qcom_nandc_command(struct nand_chip *chip, unsigned int command,
- int column, int page_addr)
-{
- struct qcom_nand_host *host = to_qcom_nand_host(chip);
- struct nand_ecc_ctrl *ecc = &chip->ecc;
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- bool wait = false;
- int ret = 0;
-
- pre_command(host, command);
-
- switch (command) {
- case NAND_CMD_RESET:
- ret = reset(host);
- wait = true;
- break;
-
- case NAND_CMD_READID:
- nandc->buf_count = 4;
- ret = read_id(host, column);
- wait = true;
- break;
-
- case NAND_CMD_PARAM:
- ret = nandc_param(host);
- wait = true;
- break;
-
- case NAND_CMD_ERASE1:
- ret = erase_block(host, page_addr);
- wait = true;
- break;
-
- case NAND_CMD_READ0:
- /* we read the entire page for now */
- WARN_ON(column != 0);
-
- host->use_ecc = true;
- set_address(host, 0, page_addr);
- update_rw_regs(host, ecc->steps, true, 0);
- break;
-
- case NAND_CMD_SEQIN:
- WARN_ON(column != 0);
- set_address(host, 0, page_addr);
- break;
-
- case NAND_CMD_PAGEPROG:
- case NAND_CMD_STATUS:
- case NAND_CMD_NONE:
- default:
- break;
- }
-
- if (ret) {
- dev_err(nandc->dev, "failure executing command %d\n",
- command);
- free_descs(nandc);
- return;
- }
-
- if (wait) {
- ret = submit_descs(nandc);
- if (ret)
- dev_err(nandc->dev,
- "failure submitting descs for command %d\n",
- command);
- }
-
- free_descs(nandc);
-
- post_command(host, command);
-}
-
/*
* when using BCH ECC, the HW flags an error in NAND_FLASH_STATUS if it read
* an erased CW, and reports an erased CW in NAND_ERASED_CW_DETECT_STATUS.
@@ -1736,6 +1471,9 @@ qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
int raw_cw = cw;
nand_read_page_op(chip, page, 0, NULL, 0);
+ nandc->buf_count = 0;
+ nandc->buf_start = 0;
+ clear_read_regs(nandc);
host->use_ecc = false;
if (nandc->props->qpic_v2)
@@ -1786,7 +1524,6 @@ qcom_nandc_read_cw_raw(struct mtd_info *mtd, struct nand_chip *chip,
read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
ret = submit_descs(nandc);
- free_descs(nandc);
if (ret) {
dev_err(nandc->dev, "failure to read raw cw %d\n", cw);
return ret;
@@ -1819,7 +1556,7 @@ check_for_erased_page(struct qcom_nand_host *host, u8 *data_buf,
struct mtd_info *mtd = nand_to_mtd(chip);
struct nand_ecc_ctrl *ecc = &chip->ecc;
u8 *cw_data_buf, *cw_oob_buf;
- int cw, data_size, oob_size, ret = 0;
+ int cw, data_size, oob_size, ret;
if (!data_buf)
data_buf = nand_get_data_buf(chip);
@@ -2040,8 +1777,6 @@ static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
}
ret = submit_descs(nandc);
- free_descs(nandc);
-
if (ret) {
dev_err(nandc->dev, "failure to read page/oob\n");
return ret;
@@ -2080,8 +1815,6 @@ static int copy_last_cw(struct qcom_nand_host *host, int page)
if (ret)
dev_err(nandc->dev, "failed to copy last codeword\n");
- free_descs(nandc);
-
return ret;
}
@@ -2149,17 +1882,25 @@ static void qcom_nandc_codeword_fixup(struct qcom_nand_host *host, int page)
}
/* implements ecc->read_page() */
-static int qcom_nandc_read_page(struct nand_chip *chip, uint8_t *buf,
+static int qcom_nandc_read_page(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
struct qcom_nand_host *host = to_qcom_nand_host(chip);
struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
u8 *data_buf, *oob_buf = NULL;
if (host->nr_boot_partitions)
qcom_nandc_codeword_fixup(host, page);
nand_read_page_op(chip, page, 0, NULL, 0);
+ nandc->buf_count = 0;
+ nandc->buf_start = 0;
+ host->use_ecc = true;
+ clear_read_regs(nandc);
+ set_address(host, 0, page);
+ update_rw_regs(host, ecc->steps, true, 0);
+
data_buf = buf;
oob_buf = oob_required ? chip->oob_poi : NULL;
@@ -2169,7 +1910,7 @@ static int qcom_nandc_read_page(struct nand_chip *chip, uint8_t *buf,
}
/* implements ecc->read_page_raw() */
-static int qcom_nandc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
+static int qcom_nandc_read_page_raw(struct nand_chip *chip, u8 *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
@@ -2215,7 +1956,7 @@ static int qcom_nandc_read_oob(struct nand_chip *chip, int page)
}
/* implements ecc->write_page() */
-static int qcom_nandc_write_page(struct nand_chip *chip, const uint8_t *buf,
+static int qcom_nandc_write_page(struct nand_chip *chip, const u8 *buf,
int oob_required, int page)
{
struct qcom_nand_host *host = to_qcom_nand_host(chip);
@@ -2229,6 +1970,9 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const uint8_t *buf,
nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ set_address(host, 0, page);
+ nandc->buf_count = 0;
+ nandc->buf_start = 0;
clear_read_regs(nandc);
clear_bam_transaction(nandc);
@@ -2251,7 +1995,6 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const uint8_t *buf,
oob_size = ecc->bytes;
}
-
write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
@@ -2276,20 +2019,17 @@ static int qcom_nandc_write_page(struct nand_chip *chip, const uint8_t *buf,
}
ret = submit_descs(nandc);
- if (ret)
+ if (ret) {
dev_err(nandc->dev, "failure to write page\n");
+ return ret;
+ }
- free_descs(nandc);
-
- if (!ret)
- ret = nand_prog_page_end_op(chip);
-
- return ret;
+ return nand_prog_page_end_op(chip);
}
/* implements ecc->write_page_raw() */
static int qcom_nandc_write_page_raw(struct nand_chip *chip,
- const uint8_t *buf, int oob_required,
+ const u8 *buf, int oob_required,
int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
@@ -2352,15 +2092,12 @@ static int qcom_nandc_write_page_raw(struct nand_chip *chip,
}
ret = submit_descs(nandc);
- if (ret)
+ if (ret) {
dev_err(nandc->dev, "failure to write raw page\n");
+ return ret;
+ }
- free_descs(nandc);
-
- if (!ret)
- ret = nand_prog_page_end_op(chip);
-
- return ret;
+ return nand_prog_page_end_op(chip);
}
/*
@@ -2404,12 +2141,9 @@ static int qcom_nandc_write_oob(struct nand_chip *chip, int page)
config_nand_cw_write(chip);
ret = submit_descs(nandc);
-
- free_descs(nandc);
-
if (ret) {
dev_err(nandc->dev, "failure to write oob\n");
- return -EIO;
+ return ret;
}
return nand_prog_page_end_op(chip);
@@ -2483,73 +2217,12 @@ static int qcom_nandc_block_markbad(struct nand_chip *chip, loff_t ofs)
config_nand_cw_write(chip);
ret = submit_descs(nandc);
-
- free_descs(nandc);
-
if (ret) {
dev_err(nandc->dev, "failure to update BBM\n");
- return -EIO;
- }
-
- return nand_prog_page_end_op(chip);
-}
-
-/*
- * the three functions below implement chip->legacy.read_byte(),
- * chip->legacy.read_buf() and chip->legacy.write_buf() respectively. these
- * aren't used for reading/writing page data, they are used for smaller data
- * like reading id, status etc
- */
-static uint8_t qcom_nandc_read_byte(struct nand_chip *chip)
-{
- struct qcom_nand_host *host = to_qcom_nand_host(chip);
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- u8 *buf = nandc->data_buffer;
- u8 ret = 0x0;
-
- if (host->last_command == NAND_CMD_STATUS) {
- ret = host->status;
-
- host->status = NAND_STATUS_READY | NAND_STATUS_WP;
-
return ret;
}
- if (nandc->buf_start < nandc->buf_count)
- ret = buf[nandc->buf_start++];
-
- return ret;
-}
-
-static void qcom_nandc_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
-{
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
-
- memcpy(buf, nandc->data_buffer + nandc->buf_start, real_len);
- nandc->buf_start += real_len;
-}
-
-static void qcom_nandc_write_buf(struct nand_chip *chip, const uint8_t *buf,
- int len)
-{
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
- int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
-
- memcpy(nandc->data_buffer + nandc->buf_start, buf, real_len);
-
- nandc->buf_start += real_len;
-}
-
-/* we support only one external chip for now */
-static void qcom_nandc_select_chip(struct nand_chip *chip, int chipnr)
-{
- struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-
- if (chipnr <= 0)
- return;
-
- dev_warn(nandc->dev, "invalid chip select\n");
+ return nand_prog_page_end_op(chip);
}
/*
@@ -2660,7 +2333,7 @@ static int qcom_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
}
static int qcom_nand_ooblayout_free(struct mtd_info *mtd, int section,
- struct mtd_oob_region *oobregion)
+ struct mtd_oob_region *oobregion)
{
struct nand_chip *chip = mtd_to_nand(mtd);
struct qcom_nand_host *host = to_qcom_nand_host(chip);
@@ -2685,6 +2358,7 @@ qcom_nandc_calc_ecc_bytes(int step_size, int strength)
{
return strength == 4 ? 12 : 16;
}
+
NAND_ECC_CAPS_SINGLE(qcom_nandc_ecc_caps, qcom_nandc_calc_ecc_bytes,
NANDC_STEP_SIZE, 4, 8);
@@ -2867,8 +2541,479 @@ static int qcom_nand_attach_chip(struct nand_chip *chip)
return 0;
}
+static int qcom_op_cmd_mapping(struct nand_chip *chip, u8 opcode,
+ struct qcom_op *q_op)
+{
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ int cmd;
+
+ switch (opcode) {
+ case NAND_CMD_RESET:
+ cmd = OP_RESET_DEVICE;
+ break;
+ case NAND_CMD_READID:
+ cmd = OP_FETCH_ID;
+ break;
+ case NAND_CMD_PARAM:
+ if (nandc->props->qpic_v2)
+ cmd = OP_PAGE_READ_ONFI_READ;
+ else
+ cmd = OP_PAGE_READ;
+ break;
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ cmd = OP_BLOCK_ERASE;
+ break;
+ case NAND_CMD_STATUS:
+ cmd = OP_CHECK_STATUS;
+ break;
+ case NAND_CMD_PAGEPROG:
+ cmd = OP_PROGRAM_PAGE;
+ q_op->flag = OP_PROGRAM_PAGE;
+ nandc->exec_opwrite = true;
+ break;
+ case NAND_CMD_READ0:
+ case NAND_CMD_READSTART:
+ if (host->use_ecc)
+ cmd = OP_PAGE_READ_WITH_ECC;
+ else
+ cmd = OP_PAGE_READ;
+ break;
+ default:
+ dev_err(nandc->dev, "Opcode not supported: %u\n", opcode);
+ return -EOPNOTSUPP;
+ }
+
+ return cmd;
+}
+
+/* NAND framework ->exec_op() hooks and related helpers */
+static int qcom_parse_instructions(struct nand_chip *chip,
+ const struct nand_subop *subop,
+ struct qcom_op *q_op)
+{
+ const struct nand_op_instr *instr = NULL;
+ unsigned int op_id;
+ int i, ret;
+
+ for (op_id = 0; op_id < subop->ninstrs; op_id++) {
+ unsigned int offset, naddrs;
+ const u8 *addrs;
+
+ instr = &subop->instrs[op_id];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ ret = qcom_op_cmd_mapping(chip, instr->ctx.cmd.opcode, q_op);
+ if (ret < 0)
+ return ret;
+
+ q_op->cmd_reg = ret;
+ q_op->rdy_delay_ns = instr->delay_ns;
+ break;
+
+ case NAND_OP_ADDR_INSTR:
+ offset = nand_subop_get_addr_start_off(subop, op_id);
+ naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
+ addrs = &instr->ctx.addr.addrs[offset];
+
+ for (i = 0; i < min_t(unsigned int, 4, naddrs); i++)
+ q_op->addr1_reg |= addrs[i] << (i * 8);
+
+ if (naddrs > 4)
+ q_op->addr2_reg |= addrs[4];
+
+ q_op->rdy_delay_ns = instr->delay_ns;
+ break;
+
+ case NAND_OP_DATA_IN_INSTR:
+ q_op->data_instr = instr;
+ q_op->data_instr_idx = op_id;
+ q_op->rdy_delay_ns = instr->delay_ns;
+ fallthrough;
+ case NAND_OP_DATA_OUT_INSTR:
+ q_op->rdy_delay_ns = instr->delay_ns;
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+ q_op->rdy_timeout_ms = instr->ctx.waitrdy.timeout_ms;
+ q_op->rdy_delay_ns = instr->delay_ns;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static void qcom_delay_ns(unsigned int ns)
+{
+ if (!ns)
+ return;
+
+ if (ns < 10000)
+ ndelay(ns);
+ else
+ udelay(DIV_ROUND_UP(ns, 1000));
+}
+
+static int qcom_wait_rdy_poll(struct nand_chip *chip, unsigned int time_ms)
+{
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ unsigned long start = jiffies + msecs_to_jiffies(time_ms);
+ u32 flash;
+
+ nandc_read_buffer_sync(nandc, true);
+
+ do {
+ flash = le32_to_cpu(nandc->reg_read_buf[0]);
+ if (flash & FS_READY_BSY_N)
+ return 0;
+ cpu_relax();
+ } while (time_after(start, jiffies));
+
+ dev_err(nandc->dev, "Timeout waiting for device to be ready:0x%08x\n", flash);
+
+ return -ETIMEDOUT;
+}
+
+static int qcom_read_status_exec(struct nand_chip *chip,
+ const struct nand_subop *subop)
+{
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ struct qcom_op q_op = {};
+ const struct nand_op_instr *instr = NULL;
+ unsigned int op_id = 0;
+ unsigned int len = 0;
+ int ret, num_cw, i;
+ u32 flash_status;
+
+ host->status = NAND_STATUS_READY | NAND_STATUS_WP;
+
+ ret = qcom_parse_instructions(chip, subop, &q_op);
+ if (ret)
+ return ret;
+
+ num_cw = nandc->exec_opwrite ? ecc->steps : 1;
+ nandc->exec_opwrite = false;
+
+ nandc->buf_count = 0;
+ nandc->buf_start = 0;
+ host->use_ecc = false;
+
+ clear_read_regs(nandc);
+ clear_bam_transaction(nandc);
+
+ nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
+ nandc_set_reg(chip, NAND_EXEC_CMD, 1);
+
+ write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
+ write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+ read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+
+ ret = submit_descs(nandc);
+ if (ret) {
+ dev_err(nandc->dev, "failure in submitting status descriptor\n");
+ goto err_out;
+ }
+
+ nandc_read_buffer_sync(nandc, true);
+
+ for (i = 0; i < num_cw; i++) {
+ flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
+
+ if (flash_status & FS_MPU_ERR)
+ host->status &= ~NAND_STATUS_WP;
+
+ if (flash_status & FS_OP_ERR ||
+ (i == (num_cw - 1) && (flash_status & FS_DEVICE_STS_ERR)))
+ host->status |= NAND_STATUS_FAIL;
+ }
+
+ flash_status = host->status;
+ instr = q_op.data_instr;
+ op_id = q_op.data_instr_idx;
+ len = nand_subop_get_data_len(subop, op_id);
+ memcpy(instr->ctx.data.buf.in, &flash_status, len);
+
+err_out:
+ return ret;
+}
+
+static int qcom_read_id_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
+{
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_op q_op = {};
+ const struct nand_op_instr *instr = NULL;
+ unsigned int op_id = 0;
+ unsigned int len = 0;
+ int ret;
+
+ ret = qcom_parse_instructions(chip, subop, &q_op);
+ if (ret)
+ return ret;
+
+ nandc->buf_count = 0;
+ nandc->buf_start = 0;
+ host->use_ecc = false;
+
+ clear_read_regs(nandc);
+ clear_bam_transaction(nandc);
+
+ nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
+ nandc_set_reg(chip, NAND_ADDR0, q_op.addr1_reg);
+ nandc_set_reg(chip, NAND_ADDR1, q_op.addr2_reg);
+ nandc_set_reg(chip, NAND_FLASH_CHIP_SELECT,
+ nandc->props->is_bam ? 0 : DM_EN);
+
+ nandc_set_reg(chip, NAND_EXEC_CMD, 1);
+
+ write_reg_dma(nandc, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
+ write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+
+ read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
+
+ ret = submit_descs(nandc);
+ if (ret) {
+ dev_err(nandc->dev, "failure in submitting read id descriptor\n");
+ goto err_out;
+ }
+
+ instr = q_op.data_instr;
+ op_id = q_op.data_instr_idx;
+ len = nand_subop_get_data_len(subop, op_id);
+
+ nandc_read_buffer_sync(nandc, true);
+ memcpy(instr->ctx.data.buf.in, nandc->reg_read_buf, len);
+
+err_out:
+ return ret;
+}
+
+static int qcom_misc_cmd_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
+{
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_op q_op = {};
+ int ret;
+ int instrs = 1;
+
+ ret = qcom_parse_instructions(chip, subop, &q_op);
+ if (ret)
+ return ret;
+
+ if (q_op.flag == OP_PROGRAM_PAGE) {
+ goto wait_rdy;
+ } else if (q_op.cmd_reg == OP_BLOCK_ERASE) {
+ q_op.cmd_reg |= PAGE_ACC | LAST_PAGE;
+ nandc_set_reg(chip, NAND_ADDR0, q_op.addr1_reg);
+ nandc_set_reg(chip, NAND_ADDR1, q_op.addr2_reg);
+ nandc_set_reg(chip, NAND_DEV0_CFG0,
+ host->cfg0_raw & ~(7 << CW_PER_PAGE));
+ nandc_set_reg(chip, NAND_DEV0_CFG1, host->cfg1_raw);
+ instrs = 3;
+ } else {
+ return 0;
+ }
+
+ nandc->buf_count = 0;
+ nandc->buf_start = 0;
+ host->use_ecc = false;
+
+ clear_read_regs(nandc);
+ clear_bam_transaction(nandc);
+
+ nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
+ nandc_set_reg(chip, NAND_EXEC_CMD, 1);
+
+ write_reg_dma(nandc, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
+ (q_op.cmd_reg == OP_BLOCK_ERASE) ? write_reg_dma(nandc, NAND_DEV0_CFG0,
+ 2, NAND_BAM_NEXT_SGL) : read_reg_dma(nandc,
+ NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+
+ write_reg_dma(nandc, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
+ read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
+
+ ret = submit_descs(nandc);
+ if (ret) {
+ dev_err(nandc->dev, "failure in submitting misc descriptor\n");
+ goto err_out;
+ }
+
+wait_rdy:
+ qcom_delay_ns(q_op.rdy_delay_ns);
+ ret = qcom_wait_rdy_poll(chip, q_op.rdy_timeout_ms);
+
+err_out:
+ return ret;
+}
+
+static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_subop *subop)
+{
+ struct qcom_nand_host *host = to_qcom_nand_host(chip);
+ struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+ struct qcom_op q_op = {};
+ const struct nand_op_instr *instr = NULL;
+ unsigned int op_id = 0;
+ unsigned int len = 0;
+ int ret;
+
+ ret = qcom_parse_instructions(chip, subop, &q_op);
+ if (ret)
+ return ret;
+
+ q_op.cmd_reg |= PAGE_ACC | LAST_PAGE;
+
+ nandc->buf_count = 0;
+ nandc->buf_start = 0;
+ host->use_ecc = false;
+ clear_read_regs(nandc);
+ clear_bam_transaction(nandc);
+
+ nandc_set_reg(chip, NAND_FLASH_CMD, q_op.cmd_reg);
+
+ nandc_set_reg(chip, NAND_ADDR0, 0);
+ nandc_set_reg(chip, NAND_ADDR1, 0);
+ nandc_set_reg(chip, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
+ | 512 << UD_SIZE_BYTES
+ | 5 << NUM_ADDR_CYCLES
+ | 0 << SPARE_SIZE_BYTES);
+ nandc_set_reg(chip, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
+ | 0 << CS_ACTIVE_BSY
+ | 17 << BAD_BLOCK_BYTE_NUM
+ | 1 << BAD_BLOCK_IN_SPARE_AREA
+ | 2 << WR_RD_BSY_GAP
+ | 0 << WIDE_FLASH
+ | 1 << DEV0_CFG1_ECC_DISABLE);
+ if (!nandc->props->qpic_v2)
+ nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
+
+ /* configure CMD1 and VLD for ONFI param probing in QPIC v1 */
+ if (!nandc->props->qpic_v2) {
+ nandc_set_reg(chip, NAND_DEV_CMD_VLD,
+ (nandc->vld & ~READ_START_VLD));
+ nandc_set_reg(chip, NAND_DEV_CMD1,
+ (nandc->cmd1 & ~(0xFF << READ_ADDR))
+ | NAND_CMD_PARAM << READ_ADDR);
+ }
+
+ nandc_set_reg(chip, NAND_EXEC_CMD, 1);
+
+ if (!nandc->props->qpic_v2) {
+ nandc_set_reg(chip, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
+ nandc_set_reg(chip, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
+ }
+
+ instr = q_op.data_instr;
+ op_id = q_op.data_instr_idx;
+ len = nand_subop_get_data_len(subop, op_id);
+
+ nandc_set_read_loc(chip, 0, 0, 0, len, 1);
+
+ if (!nandc->props->qpic_v2) {
+ write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0);
+ write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
+ }
+
+ nandc->buf_count = len;
+ memset(nandc->data_buffer, 0xff, nandc->buf_count);
+
+ config_nand_single_cw_page_read(chip, false, 0);
+
+ read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
+ nandc->buf_count, 0);
+
+ /* restore CMD1 and VLD regs */
+ if (!nandc->props->qpic_v2) {
+ write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1, 0);
+ write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1, NAND_BAM_NEXT_SGL);
+ }
+
+ ret = submit_descs(nandc);
+ if (ret) {
+ dev_err(nandc->dev, "failure in submitting param page descriptor\n");
+ goto err_out;
+ }
+
+ ret = qcom_wait_rdy_poll(chip, q_op.rdy_timeout_ms);
+ if (ret)
+ goto err_out;
+
+ memcpy(instr->ctx.data.buf.in, nandc->data_buffer, len);
+
+err_out:
+ return ret;
+}
+
+static const struct nand_op_parser qcom_op_parser = NAND_OP_PARSER(
+ NAND_OP_PARSER_PATTERN(
+ qcom_read_id_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYCLE),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 8)),
+ NAND_OP_PARSER_PATTERN(
+ qcom_read_status_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 1)),
+ NAND_OP_PARSER_PATTERN(
+ qcom_param_page_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYCLE),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(true),
+ NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 512)),
+ NAND_OP_PARSER_PATTERN(
+ qcom_misc_cmd_type_exec,
+ NAND_OP_PARSER_PAT_CMD_ELEM(false),
+ NAND_OP_PARSER_PAT_ADDR_ELEM(true, MAX_ADDRESS_CYCLE),
+ NAND_OP_PARSER_PAT_CMD_ELEM(true),
+ NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
+ );
+
+static int qcom_check_op(struct nand_chip *chip,
+ const struct nand_operation *op)
+{
+ const struct nand_op_instr *instr;
+ int op_id;
+
+ for (op_id = 0; op_id < op->ninstrs; op_id++) {
+ instr = &op->instrs[op_id];
+
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ if (instr->ctx.cmd.opcode != NAND_CMD_RESET &&
+ instr->ctx.cmd.opcode != NAND_CMD_READID &&
+ instr->ctx.cmd.opcode != NAND_CMD_PARAM &&
+ instr->ctx.cmd.opcode != NAND_CMD_ERASE1 &&
+ instr->ctx.cmd.opcode != NAND_CMD_ERASE2 &&
+ instr->ctx.cmd.opcode != NAND_CMD_STATUS &&
+ instr->ctx.cmd.opcode != NAND_CMD_PAGEPROG &&
+ instr->ctx.cmd.opcode != NAND_CMD_READ0 &&
+ instr->ctx.cmd.opcode != NAND_CMD_READSTART)
+ return -EOPNOTSUPP;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int qcom_nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op, bool check_only)
+{
+ if (check_only)
+ return qcom_check_op(chip, op);
+
+ return nand_op_parser_exec_op(chip, &qcom_op_parser, op, check_only);
+}
+
static const struct nand_controller_ops qcom_nandc_ops = {
.attach_chip = qcom_nand_attach_chip,
+ .exec_op = qcom_nand_exec_op,
};
static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
@@ -2912,19 +3057,17 @@ static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
*/
nandc->buf_size = 532;
- nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size,
- GFP_KERNEL);
+ nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size, GFP_KERNEL);
if (!nandc->data_buffer)
return -ENOMEM;
- nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs),
- GFP_KERNEL);
+ nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs), GFP_KERNEL);
if (!nandc->regs)
return -ENOMEM;
- nandc->reg_read_buf = devm_kcalloc(nandc->dev,
- MAX_REG_RD, sizeof(*nandc->reg_read_buf),
- GFP_KERNEL);
+ nandc->reg_read_buf = devm_kcalloc(nandc->dev, MAX_REG_RD,
+ sizeof(*nandc->reg_read_buf),
+ GFP_KERNEL);
if (!nandc->reg_read_buf)
return -ENOMEM;
@@ -2969,7 +3112,7 @@ static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
/*
* Initially allocate BAM transaction to read ONFI param page.
* After detecting all the devices, this BAM transaction will
- * be freed and the next BAM tranasction will be allocated with
+ * be freed and the next BAM transaction will be allocated with
* maximum codeword size
*/
nandc->max_cwperpage = 1;
@@ -3135,14 +3278,6 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
mtd->owner = THIS_MODULE;
mtd->dev.parent = dev;
- chip->legacy.cmdfunc = qcom_nandc_command;
- chip->legacy.select_chip = qcom_nandc_select_chip;
- chip->legacy.read_byte = qcom_nandc_read_byte;
- chip->legacy.read_buf = qcom_nandc_read_buf;
- chip->legacy.write_buf = qcom_nandc_write_buf;
- chip->legacy.set_features = nand_get_set_features_notsupp;
- chip->legacy.get_features = nand_get_set_features_notsupp;
-
/*
* the bad block marker is readable only when we read the last codeword
* of a page with ECC disabled. currently, the nand_base and nand_bbt
diff --git a/drivers/mtd/nand/raw/rockchip-nand-controller.c b/drivers/mtd/nand/raw/rockchip-nand-controller.c
index 5a04680342c3..5bc90ffa721f 100644
--- a/drivers/mtd/nand/raw/rockchip-nand-controller.c
+++ b/drivers/mtd/nand/raw/rockchip-nand-controller.c
@@ -15,7 +15,6 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/mtd/nand/raw/s3c2410.c b/drivers/mtd/nand/raw/s3c2410.c
index ac80aaf5b4e3..3d3d5c9814ff 100644
--- a/drivers/mtd/nand/raw/s3c2410.c
+++ b/drivers/mtd/nand/raw/s3c2410.c
@@ -26,7 +26,6 @@
#include <linux/clk.h>
#include <linux/cpufreq.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
diff --git a/drivers/mtd/nand/raw/sh_flctl.c b/drivers/mtd/nand/raw/sh_flctl.c
index 63bf20c41719..3e5df75cbc98 100644
--- a/drivers/mtd/nand/raw/sh_flctl.c
+++ b/drivers/mtd/nand/raw/sh_flctl.c
@@ -17,7 +17,6 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/sh_dma.h>
@@ -1124,8 +1123,7 @@ static int flctl_probe(struct platform_device *pdev)
if (!flctl)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- flctl->reg = devm_ioremap_resource(&pdev->dev, res);
+ flctl->reg = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(flctl->reg))
return PTR_ERR(flctl->reg);
flctl->fifo = res->start + 0x24; /* FLDTFIFO */
diff --git a/drivers/mtd/nand/raw/socrates_nand.c b/drivers/mtd/nand/raw/socrates_nand.c
index a8b720ffe9e8..76d50eb9f1db 100644
--- a/drivers/mtd/nand/raw/socrates_nand.c
+++ b/drivers/mtd/nand/raw/socrates_nand.c
@@ -8,8 +8,9 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
+#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/io.h>
#define FPGA_NAND_CMD_MASK (0x7 << 28)
diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
index 10c11cecac08..88811139aaf5 100644
--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
@@ -1922,8 +1922,8 @@ static int stm32_fmc2_nfc_probe(struct platform_device *pdev)
if (!(nfc->cs_assigned & BIT(chip_cs)))
continue;
- res = platform_get_resource(pdev, IORESOURCE_MEM, mem_region);
- nfc->data_base[chip_cs] = devm_ioremap_resource(dev, res);
+ nfc->data_base[chip_cs] = devm_platform_get_and_ioremap_resource(pdev,
+ mem_region, &res);
if (IS_ERR(nfc->data_base[chip_cs]))
return PTR_ERR(nfc->data_base[chip_cs]);
@@ -1951,21 +1951,17 @@ static int stm32_fmc2_nfc_probe(struct platform_device *pdev)
init_completion(&nfc->complete);
- nfc->clk = devm_clk_get(nfc->cdev, NULL);
- if (IS_ERR(nfc->clk))
+ nfc->clk = devm_clk_get_enabled(nfc->cdev, NULL);
+ if (IS_ERR(nfc->clk)) {
+ dev_err(dev, "can not get and enable the clock\n");
return PTR_ERR(nfc->clk);
-
- ret = clk_prepare_enable(nfc->clk);
- if (ret) {
- dev_err(dev, "can not enable the clock\n");
- return ret;
}
rstc = devm_reset_control_get(dev, NULL);
if (IS_ERR(rstc)) {
ret = PTR_ERR(rstc);
if (ret == -EPROBE_DEFER)
- goto err_clk_disable;
+ return ret;
} else {
reset_control_assert(rstc);
reset_control_deassert(rstc);
@@ -2018,9 +2014,6 @@ err_release_dma:
sg_free_table(&nfc->dma_data_sg);
sg_free_table(&nfc->dma_ecc_sg);
-err_clk_disable:
- clk_disable_unprepare(nfc->clk);
-
return ret;
}
@@ -2045,8 +2038,6 @@ static void stm32_fmc2_nfc_remove(struct platform_device *pdev)
sg_free_table(&nfc->dma_data_sg);
sg_free_table(&nfc->dma_ecc_sg);
- clk_disable_unprepare(nfc->clk);
-
stm32_fmc2_nfc_wp_enable(nand);
}
diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c
index 9884304634f6..9abf38049d35 100644
--- a/drivers/mtd/nand/raw/sunxi_nand.c
+++ b/drivers/mtd/nand/raw/sunxi_nand.c
@@ -19,7 +19,6 @@
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
@@ -2087,8 +2086,7 @@ static int sunxi_nfc_probe(struct platform_device *pdev)
nand_controller_init(&nfc->controller);
INIT_LIST_HEAD(&nfc->chips);
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- nfc->regs = devm_ioremap_resource(dev, r);
+ nfc->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
if (IS_ERR(nfc->regs))
return PTR_ERR(nfc->regs);
@@ -2096,37 +2094,26 @@ static int sunxi_nfc_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- nfc->ahb_clk = devm_clk_get(dev, "ahb");
+ nfc->ahb_clk = devm_clk_get_enabled(dev, "ahb");
if (IS_ERR(nfc->ahb_clk)) {
dev_err(dev, "failed to retrieve ahb clk\n");
return PTR_ERR(nfc->ahb_clk);
}
- ret = clk_prepare_enable(nfc->ahb_clk);
- if (ret)
- return ret;
-
- nfc->mod_clk = devm_clk_get(dev, "mod");
+ nfc->mod_clk = devm_clk_get_enabled(dev, "mod");
if (IS_ERR(nfc->mod_clk)) {
dev_err(dev, "failed to retrieve mod clk\n");
- ret = PTR_ERR(nfc->mod_clk);
- goto out_ahb_clk_unprepare;
+ return PTR_ERR(nfc->mod_clk);
}
- ret = clk_prepare_enable(nfc->mod_clk);
- if (ret)
- goto out_ahb_clk_unprepare;
-
nfc->reset = devm_reset_control_get_optional_exclusive(dev, "ahb");
- if (IS_ERR(nfc->reset)) {
- ret = PTR_ERR(nfc->reset);
- goto out_mod_clk_unprepare;
- }
+ if (IS_ERR(nfc->reset))
+ return PTR_ERR(nfc->reset);
ret = reset_control_deassert(nfc->reset);
if (ret) {
dev_err(dev, "reset err %d\n", ret);
- goto out_mod_clk_unprepare;
+ return ret;
}
nfc->caps = of_device_get_match_data(&pdev->dev);
@@ -2165,10 +2152,6 @@ out_release_dmac:
dma_release_channel(nfc->dmac);
out_ahb_reset_reassert:
reset_control_assert(nfc->reset);
-out_mod_clk_unprepare:
- clk_disable_unprepare(nfc->mod_clk);
-out_ahb_clk_unprepare:
- clk_disable_unprepare(nfc->ahb_clk);
return ret;
}
@@ -2183,8 +2166,6 @@ static void sunxi_nfc_remove(struct platform_device *pdev)
if (nfc->dmac)
dma_release_channel(nfc->dmac);
- clk_disable_unprepare(nfc->mod_clk);
- clk_disable_unprepare(nfc->ahb_clk);
}
static const struct sunxi_nfc_caps sunxi_nfc_a10_caps = {
diff --git a/drivers/mtd/nand/raw/vf610_nfc.c b/drivers/mtd/nand/raw/vf610_nfc.c
index 86522048e271..3f783b8f76c9 100644
--- a/drivers/mtd/nand/raw/vf610_nfc.c
+++ b/drivers/mtd/nand/raw/vf610_nfc.c
@@ -827,30 +827,24 @@ static int vf610_nfc_probe(struct platform_device *pdev)
mtd->name = DRV_NAME;
irq = platform_get_irq(pdev, 0);
- if (irq <= 0)
- return -EINVAL;
+ if (irq < 0)
+ return irq;
nfc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(nfc->regs))
return PTR_ERR(nfc->regs);
- nfc->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(nfc->clk))
+ nfc->clk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(nfc->clk)) {
+ dev_err(nfc->dev, "Unable to get and enable clock!\n");
return PTR_ERR(nfc->clk);
-
- err = clk_prepare_enable(nfc->clk);
- if (err) {
- dev_err(nfc->dev, "Unable to enable clock!\n");
- return err;
}
of_id = of_match_device(vf610_nfc_dt_ids, &pdev->dev);
- if (!of_id) {
- err = -ENODEV;
- goto err_disable_clk;
- }
+ if (!of_id)
+ return -ENODEV;
- nfc->variant = (enum vf610_nfc_variant)of_id->data;
+ nfc->variant = (uintptr_t)of_id->data;
for_each_available_child_of_node(nfc->dev->of_node, child) {
if (of_device_is_compatible(child, "fsl,vf610-nfc-nandcs")) {
@@ -858,9 +852,8 @@ static int vf610_nfc_probe(struct platform_device *pdev)
if (nand_get_flash_node(chip)) {
dev_err(nfc->dev,
"Only one NAND chip supported!\n");
- err = -EINVAL;
of_node_put(child);
- goto err_disable_clk;
+ return -EINVAL;
}
nand_set_flash_node(chip, child);
@@ -869,8 +862,7 @@ static int vf610_nfc_probe(struct platform_device *pdev)
if (!nand_get_flash_node(chip)) {
dev_err(nfc->dev, "NAND chip sub-node missing!\n");
- err = -ENODEV;
- goto err_disable_clk;
+ return -ENODEV;
}
chip->options |= NAND_NO_SUBPAGE_WRITE;
@@ -880,7 +872,7 @@ static int vf610_nfc_probe(struct platform_device *pdev)
err = devm_request_irq(nfc->dev, irq, vf610_nfc_irq, 0, DRV_NAME, nfc);
if (err) {
dev_err(nfc->dev, "Error requesting IRQ!\n");
- goto err_disable_clk;
+ return err;
}
vf610_nfc_preinit_controller(nfc);
@@ -892,7 +884,7 @@ static int vf610_nfc_probe(struct platform_device *pdev)
/* Scan the NAND chip */
err = nand_scan(chip, 1);
if (err)
- goto err_disable_clk;
+ return err;
platform_set_drvdata(pdev, nfc);
@@ -904,8 +896,6 @@ static int vf610_nfc_probe(struct platform_device *pdev)
err_cleanup_nand:
nand_cleanup(chip);
-err_disable_clk:
- clk_disable_unprepare(nfc->clk);
return err;
}
@@ -918,7 +908,6 @@ static void vf610_nfc_remove(struct platform_device *pdev)
ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
- clk_disable_unprepare(nfc->clk);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/mtd/nand/raw/xway_nand.c b/drivers/mtd/nand/raw/xway_nand.c
index 6b1e2a2bba15..51d802a165ed 100644
--- a/drivers/mtd/nand/raw/xway_nand.c
+++ b/drivers/mtd/nand/raw/xway_nand.c
@@ -7,7 +7,8 @@
#include <linux/mtd/rawnand.h>
#include <linux/of_gpio.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <lantiq_soc.h>
diff --git a/drivers/mtd/nand/spi/esmt.c b/drivers/mtd/nand/spi/esmt.c
index 1a3ffb982335..31c439a557b1 100644
--- a/drivers/mtd/nand/spi/esmt.c
+++ b/drivers/mtd/nand/spi/esmt.c
@@ -121,6 +121,15 @@ static const struct spinand_info esmt_c8_spinand_table[] = {
&update_cache_variants),
0,
SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL)),
+ SPINAND_INFO("F50D2G41KA",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x51),
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&f50l1g41lb_ooblayout, NULL)),
};
static const struct spinand_manufacturer_ops esmt_spinand_manuf_ops = {
diff --git a/drivers/mtd/nand/spi/gigadevice.c b/drivers/mtd/nand/spi/gigadevice.c
index cfd7c3b26dc4..987710e09441 100644
--- a/drivers/mtd/nand/spi/gigadevice.c
+++ b/drivers/mtd/nand/spi/gigadevice.c
@@ -511,6 +511,26 @@ static const struct spinand_info gigadevice_spinand_table[] = {
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
gd5fxgq4uexxg_ecc_get_status)),
+ SPINAND_INFO("GD5F1GQ5RExxH",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x21),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
+ gd5fxgq4uexxg_ecc_get_status)),
+ SPINAND_INFO("GD5F1GQ4RExxH",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xc9),
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(4, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants_1gq5,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&gd5fxgqx_variant2_ooblayout,
+ gd5fxgq4uexxg_ecc_get_status)),
};
static const struct spinand_manufacturer_ops gigadevice_spinand_manuf_ops = {
diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c
index a80427c13121..bbbcaa87c0bc 100644
--- a/drivers/mtd/nand/spi/toshiba.c
+++ b/drivers/mtd/nand/spi/toshiba.c
@@ -266,6 +266,39 @@ static const struct spinand_info toshiba_spinand_table[] = {
SPINAND_HAS_QE_BIT,
SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 1Gb (1st generation) */
+ SPINAND_INFO("TC58NYG0S3HBAI4",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xA1),
+ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ 0,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 4Gb (1st generation) */
+ SPINAND_INFO("TH58NYG2S3HBAI4",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xAC),
+ NAND_MEMORG(1, 2048, 128, 64, 4096, 80, 1, 2, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
+ /* 1.8V 8Gb (1st generation) */
+ SPINAND_INFO("TH58NYG3S0HBAI6",
+ SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xA3),
+ NAND_MEMORG(1, 4096, 256, 64, 4096, 80, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_x4_variants,
+ &update_cache_x4_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&tx58cxgxsxraix_ooblayout,
+ tx58cxgxsxraix_ecc_get_status)),
};
static const struct spinand_manufacturer_ops toshiba_spinand_manuf_ops = {
diff --git a/drivers/mtd/spi-nor/atmel.c b/drivers/mtd/spi-nor/atmel.c
index 656dd80a0be7..58968c1e7d2f 100644
--- a/drivers/mtd/spi-nor/atmel.c
+++ b/drivers/mtd/spi-nor/atmel.c
@@ -48,9 +48,11 @@ static const struct spi_nor_locking_ops at25fs_nor_locking_ops = {
.is_locked = at25fs_nor_is_locked,
};
-static void at25fs_nor_late_init(struct spi_nor *nor)
+static int at25fs_nor_late_init(struct spi_nor *nor)
{
nor->params->locking_ops = &at25fs_nor_locking_ops;
+
+ return 0;
}
static const struct spi_nor_fixups at25fs_nor_fixups = {
@@ -149,9 +151,11 @@ static const struct spi_nor_locking_ops atmel_nor_global_protection_ops = {
.is_locked = atmel_nor_is_global_protected,
};
-static void atmel_nor_global_protection_late_init(struct spi_nor *nor)
+static int atmel_nor_global_protection_late_init(struct spi_nor *nor)
{
nor->params->locking_ops = &atmel_nor_global_protection_ops;
+
+ return 0;
}
static const struct spi_nor_fixups atmel_nor_global_protection_fixups = {
diff --git a/drivers/mtd/spi-nor/controllers/nxp-spifi.c b/drivers/mtd/spi-nor/controllers/nxp-spifi.c
index 794c7b7d5c92..5d8f47ab146f 100644
--- a/drivers/mtd/spi-nor/controllers/nxp-spifi.c
+++ b/drivers/mtd/spi-nor/controllers/nxp-spifi.c
@@ -17,7 +17,6 @@
#include <linux/mtd/partitions.h>
#include <linux/mtd/spi-nor.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
@@ -395,30 +394,18 @@ static int nxp_spifi_probe(struct platform_device *pdev)
if (IS_ERR(spifi->flash_base))
return PTR_ERR(spifi->flash_base);
- spifi->clk_spifi = devm_clk_get(&pdev->dev, "spifi");
+ spifi->clk_spifi = devm_clk_get_enabled(&pdev->dev, "spifi");
if (IS_ERR(spifi->clk_spifi)) {
- dev_err(&pdev->dev, "spifi clock not found\n");
+ dev_err(&pdev->dev, "spifi clock not found or unable to enable\n");
return PTR_ERR(spifi->clk_spifi);
}
- spifi->clk_reg = devm_clk_get(&pdev->dev, "reg");
+ spifi->clk_reg = devm_clk_get_enabled(&pdev->dev, "reg");
if (IS_ERR(spifi->clk_reg)) {
- dev_err(&pdev->dev, "reg clock not found\n");
+ dev_err(&pdev->dev, "reg clock not found or unable to enable\n");
return PTR_ERR(spifi->clk_reg);
}
- ret = clk_prepare_enable(spifi->clk_reg);
- if (ret) {
- dev_err(&pdev->dev, "unable to enable reg clock\n");
- return ret;
- }
-
- ret = clk_prepare_enable(spifi->clk_spifi);
- if (ret) {
- dev_err(&pdev->dev, "unable to enable spifi clock\n");
- goto dis_clk_reg;
- }
-
spifi->dev = &pdev->dev;
platform_set_drvdata(pdev, spifi);
@@ -431,24 +418,17 @@ static int nxp_spifi_probe(struct platform_device *pdev)
flash_np = of_get_next_available_child(pdev->dev.of_node, NULL);
if (!flash_np) {
dev_err(&pdev->dev, "no SPI flash device to configure\n");
- ret = -ENODEV;
- goto dis_clks;
+ return -ENODEV;
}
ret = nxp_spifi_setup_flash(spifi, flash_np);
of_node_put(flash_np);
if (ret) {
dev_err(&pdev->dev, "unable to setup flash chip\n");
- goto dis_clks;
+ return ret;
}
return 0;
-
-dis_clks:
- clk_disable_unprepare(spifi->clk_spifi);
-dis_clk_reg:
- clk_disable_unprepare(spifi->clk_reg);
- return ret;
}
static int nxp_spifi_remove(struct platform_device *pdev)
@@ -456,8 +436,6 @@ static int nxp_spifi_remove(struct platform_device *pdev)
struct nxp_spifi *spifi = platform_get_drvdata(pdev);
mtd_device_unregister(&spifi->nor.mtd);
- clk_disable_unprepare(spifi->clk_spifi);
- clk_disable_unprepare(spifi->clk_reg);
return 0;
}
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index 5f29fac8669a..1b0c6770c14e 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -870,21 +870,22 @@ static int spi_nor_write_16bit_sr_and_check(struct spi_nor *nor, u8 sr1)
ret = spi_nor_read_cr(nor, &sr_cr[1]);
if (ret)
return ret;
- } else if (nor->params->quad_enable) {
+ } else if (spi_nor_get_protocol_width(nor->read_proto) == 4 &&
+ spi_nor_get_protocol_width(nor->write_proto) == 4 &&
+ nor->params->quad_enable) {
/*
* If the Status Register 2 Read command (35h) is not
* supported, we should at least be sure we don't
* change the value of the SR2 Quad Enable bit.
*
- * We can safely assume that when the Quad Enable method is
- * set, the value of the QE bit is one, as a consequence of the
- * nor->params->quad_enable() call.
+ * When the Quad Enable method is set and the buswidth is 4, we
+ * can safely assume that the value of the QE bit is one, as a
+ * consequence of the nor->params->quad_enable() call.
*
- * We can safely assume that the Quad Enable bit is present in
- * the Status Register 2 at BIT(1). According to the JESD216
- * revB standard, BFPT DWORDS[15], bits 22:20, the 16-bit
- * Write Status (01h) command is available just for the cases
- * in which the QE bit is described in SR2 at BIT(1).
+ * According to the JESD216 revB standard, BFPT DWORDS[15],
+ * bits 22:20, the 16-bit Write Status (01h) command is
+ * available just for the cases in which the QE bit is
+ * described in SR2 at BIT(1).
*/
sr_cr[1] = SR2_QUAD_EN_BIT1;
} else {
@@ -2844,6 +2845,9 @@ static void spi_nor_init_flags(struct spi_nor *nor)
if (of_property_read_bool(np, "broken-flash-reset"))
nor->flags |= SNOR_F_BROKEN_RESET;
+ if (of_property_read_bool(np, "no-wp"))
+ nor->flags |= SNOR_F_NO_WP;
+
if (flags & SPI_NOR_SWP_IS_VOLATILE)
nor->flags |= SNOR_F_SWP_IS_VOLATILE;
@@ -2897,16 +2901,23 @@ static void spi_nor_init_fixup_flags(struct spi_nor *nor)
* SFDP standard, or where SFDP tables are not defined at all.
* Will replace the spi_nor_manufacturer_init_params() method.
*/
-static void spi_nor_late_init_params(struct spi_nor *nor)
+static int spi_nor_late_init_params(struct spi_nor *nor)
{
struct spi_nor_flash_parameter *params = nor->params;
+ int ret;
if (nor->manufacturer && nor->manufacturer->fixups &&
- nor->manufacturer->fixups->late_init)
- nor->manufacturer->fixups->late_init(nor);
+ nor->manufacturer->fixups->late_init) {
+ ret = nor->manufacturer->fixups->late_init(nor);
+ if (ret)
+ return ret;
+ }
- if (nor->info->fixups && nor->info->fixups->late_init)
- nor->info->fixups->late_init(nor);
+ if (nor->info->fixups && nor->info->fixups->late_init) {
+ ret = nor->info->fixups->late_init(nor);
+ if (ret)
+ return ret;
+ }
/* Default method kept for backward compatibility. */
if (!params->set_4byte_addr_mode)
@@ -2924,6 +2935,8 @@ static void spi_nor_late_init_params(struct spi_nor *nor)
if (nor->info->n_banks > 1)
params->bank_size = div64_u64(params->size, nor->info->n_banks);
+
+ return 0;
}
/**
@@ -3082,22 +3095,20 @@ static int spi_nor_init_params(struct spi_nor *nor)
spi_nor_init_params_deprecated(nor);
}
- spi_nor_late_init_params(nor);
-
- return 0;
+ return spi_nor_late_init_params(nor);
}
-/** spi_nor_octal_dtr_enable() - enable Octal DTR I/O if needed
+/** spi_nor_set_octal_dtr() - enable or disable Octal DTR I/O.
* @nor: pointer to a 'struct spi_nor'
* @enable: whether to enable or disable Octal DTR
*
* Return: 0 on success, -errno otherwise.
*/
-static int spi_nor_octal_dtr_enable(struct spi_nor *nor, bool enable)
+static int spi_nor_set_octal_dtr(struct spi_nor *nor, bool enable)
{
int ret;
- if (!nor->params->octal_dtr_enable)
+ if (!nor->params->set_octal_dtr)
return 0;
if (!(nor->read_proto == SNOR_PROTO_8_8_8_DTR &&
@@ -3107,7 +3118,7 @@ static int spi_nor_octal_dtr_enable(struct spi_nor *nor, bool enable)
if (!(nor->flags & SNOR_F_IO_MODE_EN_VOLATILE))
return 0;
- ret = nor->params->octal_dtr_enable(nor, enable);
+ ret = nor->params->set_octal_dtr(nor, enable);
if (ret)
return ret;
@@ -3168,7 +3179,7 @@ static int spi_nor_init(struct spi_nor *nor)
{
int err;
- err = spi_nor_octal_dtr_enable(nor, true);
+ err = spi_nor_set_octal_dtr(nor, true);
if (err) {
dev_dbg(nor->dev, "octal mode not supported\n");
return err;
@@ -3270,7 +3281,7 @@ static int spi_nor_suspend(struct mtd_info *mtd)
int ret;
/* Disable octal DTR mode if we enabled it. */
- ret = spi_nor_octal_dtr_enable(nor, false);
+ ret = spi_nor_set_octal_dtr(nor, false);
if (ret)
dev_err(nor->dev, "suspend() failed\n");
diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h
index 4fb5ff09c63a..9217379b9cfe 100644
--- a/drivers/mtd/spi-nor/core.h
+++ b/drivers/mtd/spi-nor/core.h
@@ -132,6 +132,7 @@ enum spi_nor_option_flags {
SNOR_F_SWP_IS_VOLATILE = BIT(13),
SNOR_F_RWW = BIT(14),
SNOR_F_ECC = BIT(15),
+ SNOR_F_NO_WP = BIT(16),
};
struct spi_nor_read_command {
@@ -363,7 +364,7 @@ struct spi_nor_otp {
* @erase_map: the erase map parsed from the SFDP Sector Map Parameter
* Table.
* @otp: SPI NOR OTP info.
- * @octal_dtr_enable: enables SPI NOR octal DTR mode.
+ * @set_octal_dtr: enables or disables SPI NOR octal DTR mode.
* @quad_enable: enables SPI NOR quad mode.
* @set_4byte_addr_mode: puts the SPI NOR in 4 byte addressing mode.
* @convert_addr: converts an absolute address into something the flash
@@ -377,6 +378,7 @@ struct spi_nor_otp {
* than reading the status register to indicate they
* are ready for a new command
* @locking_ops: SPI NOR locking methods.
+ * @priv: flash's private data.
*/
struct spi_nor_flash_parameter {
u64 bank_size;
@@ -397,7 +399,7 @@ struct spi_nor_flash_parameter {
struct spi_nor_erase_map erase_map;
struct spi_nor_otp otp;
- int (*octal_dtr_enable)(struct spi_nor *nor, bool enable);
+ int (*set_octal_dtr)(struct spi_nor *nor, bool enable);
int (*quad_enable)(struct spi_nor *nor);
int (*set_4byte_addr_mode)(struct spi_nor *nor, bool enable);
u32 (*convert_addr)(struct spi_nor *nor, u32 addr);
@@ -405,6 +407,7 @@ struct spi_nor_flash_parameter {
int (*ready)(struct spi_nor *nor);
const struct spi_nor_locking_ops *locking_ops;
+ void *priv;
};
/**
@@ -431,7 +434,7 @@ struct spi_nor_fixups {
const struct sfdp_parameter_header *bfpt_header,
const struct sfdp_bfpt *bfpt);
int (*post_sfdp)(struct spi_nor *nor);
- void (*late_init)(struct spi_nor *nor);
+ int (*late_init)(struct spi_nor *nor);
};
/**
diff --git a/drivers/mtd/spi-nor/debugfs.c b/drivers/mtd/spi-nor/debugfs.c
index e11536fffe0f..6e163cb5b478 100644
--- a/drivers/mtd/spi-nor/debugfs.c
+++ b/drivers/mtd/spi-nor/debugfs.c
@@ -27,6 +27,7 @@ static const char *const snor_f_names[] = {
SNOR_F_NAME(SWP_IS_VOLATILE),
SNOR_F_NAME(RWW),
SNOR_F_NAME(ECC),
+ SNOR_F_NAME(NO_WP),
};
#undef SNOR_F_NAME
diff --git a/drivers/mtd/spi-nor/issi.c b/drivers/mtd/spi-nor/issi.c
index 400e2b42f45a..accdf7aa2bfd 100644
--- a/drivers/mtd/spi-nor/issi.c
+++ b/drivers/mtd/spi-nor/issi.c
@@ -29,7 +29,7 @@ static const struct spi_nor_fixups is25lp256_fixups = {
.post_bfpt = is25lp256_post_bfpt_fixups,
};
-static void pm25lv_nor_late_init(struct spi_nor *nor)
+static int pm25lv_nor_late_init(struct spi_nor *nor)
{
struct spi_nor_erase_map *map = &nor->params->erase_map;
int i;
@@ -38,6 +38,8 @@ static void pm25lv_nor_late_init(struct spi_nor *nor)
for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
if (map->erase_type[i].size == 4096)
map->erase_type[i].opcode = SPINOR_OP_BE_4K_PMC;
+
+ return 0;
}
static const struct spi_nor_fixups pm25lv_nor_fixups = {
diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c
index 04888258e891..eb149e517c1f 100644
--- a/drivers/mtd/spi-nor/macronix.c
+++ b/drivers/mtd/spi-nor/macronix.c
@@ -110,10 +110,12 @@ static void macronix_nor_default_init(struct spi_nor *nor)
nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable;
}
-static void macronix_nor_late_init(struct spi_nor *nor)
+static int macronix_nor_late_init(struct spi_nor *nor)
{
if (!nor->params->set_4byte_addr_mode)
nor->params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode_en4b_ex4b;
+
+ return 0;
}
static const struct spi_nor_fixups macronix_nor_fixups = {
diff --git a/drivers/mtd/spi-nor/micron-st.c b/drivers/mtd/spi-nor/micron-st.c
index 4b919756a205..6ad080c52ab5 100644
--- a/drivers/mtd/spi-nor/micron-st.c
+++ b/drivers/mtd/spi-nor/micron-st.c
@@ -120,7 +120,7 @@ static int micron_st_nor_octal_dtr_dis(struct spi_nor *nor)
return 0;
}
-static int micron_st_nor_octal_dtr_enable(struct spi_nor *nor, bool enable)
+static int micron_st_nor_set_octal_dtr(struct spi_nor *nor, bool enable)
{
return enable ? micron_st_nor_octal_dtr_en(nor) :
micron_st_nor_octal_dtr_dis(nor);
@@ -128,7 +128,7 @@ static int micron_st_nor_octal_dtr_enable(struct spi_nor *nor, bool enable)
static void mt35xu512aba_default_init(struct spi_nor *nor)
{
- nor->params->octal_dtr_enable = micron_st_nor_octal_dtr_enable;
+ nor->params->set_octal_dtr = micron_st_nor_set_octal_dtr;
}
static int mt35xu512aba_post_sfdp_fixup(struct spi_nor *nor)
@@ -429,7 +429,7 @@ static void micron_st_nor_default_init(struct spi_nor *nor)
nor->params->quad_enable = NULL;
}
-static void micron_st_nor_late_init(struct spi_nor *nor)
+static int micron_st_nor_late_init(struct spi_nor *nor)
{
struct spi_nor_flash_parameter *params = nor->params;
@@ -438,6 +438,8 @@ static void micron_st_nor_late_init(struct spi_nor *nor)
if (!params->set_4byte_addr_mode)
params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode_wren_en4b_ex4b;
+
+ return 0;
}
static const struct spi_nor_fixups micron_st_nor_fixups = {
diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c
index 15f9a80c10b9..709822fced86 100644
--- a/drivers/mtd/spi-nor/spansion.c
+++ b/drivers/mtd/spi-nor/spansion.c
@@ -4,14 +4,19 @@
* Copyright (C) 2014, Freescale Semiconductor, Inc.
*/
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/errno.h>
#include <linux/mtd/spi-nor.h>
#include "core.h"
/* flash_info mfr_flag. Used to clear sticky prorietary SR bits. */
#define USE_CLSR BIT(0)
+#define USE_CLPEF BIT(1)
#define SPINOR_OP_CLSR 0x30 /* Clear status register 1 */
+#define SPINOR_OP_CLPEF 0x82 /* Clear program/erase failure flags */
#define SPINOR_OP_RD_ANY_REG 0x65 /* Read any register */
#define SPINOR_OP_WR_ANY_REG 0x71 /* Write any register */
#define SPINOR_REG_CYPRESS_VREG 0x00800000
@@ -19,21 +24,16 @@
#define SPINOR_REG_CYPRESS_STR1V \
(SPINOR_REG_CYPRESS_VREG + SPINOR_REG_CYPRESS_STR1)
#define SPINOR_REG_CYPRESS_CFR1 0x2
-#define SPINOR_REG_CYPRESS_CFR1V \
- (SPINOR_REG_CYPRESS_VREG + SPINOR_REG_CYPRESS_CFR1)
#define SPINOR_REG_CYPRESS_CFR1_QUAD_EN BIT(1) /* Quad Enable */
#define SPINOR_REG_CYPRESS_CFR2 0x3
#define SPINOR_REG_CYPRESS_CFR2V \
(SPINOR_REG_CYPRESS_VREG + SPINOR_REG_CYPRESS_CFR2)
+#define SPINOR_REG_CYPRESS_CFR2_MEMLAT_MASK GENMASK(3, 0)
#define SPINOR_REG_CYPRESS_CFR2_MEMLAT_11_24 0xb
#define SPINOR_REG_CYPRESS_CFR2_ADRBYT BIT(7)
#define SPINOR_REG_CYPRESS_CFR3 0x4
-#define SPINOR_REG_CYPRESS_CFR3V \
- (SPINOR_REG_CYPRESS_VREG + SPINOR_REG_CYPRESS_CFR3)
#define SPINOR_REG_CYPRESS_CFR3_PGSZ BIT(4) /* Page size. */
#define SPINOR_REG_CYPRESS_CFR5 0x6
-#define SPINOR_REG_CYPRESS_CFR5V \
- (SPINOR_REG_CYPRESS_VREG + SPINOR_REG_CYPRESS_CFR5)
#define SPINOR_REG_CYPRESS_CFR5_BIT6 BIT(6)
#define SPINOR_REG_CYPRESS_CFR5_DDR BIT(1)
#define SPINOR_REG_CYPRESS_CFR5_OPI BIT(0)
@@ -57,22 +57,32 @@
SPI_MEM_OP_DUMMY(ndummy, 0), \
SPI_MEM_OP_DATA_IN(1, buf, 0))
-#define SPANSION_CLSR_OP \
- SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLSR, 0), \
+#define SPANSION_OP(opcode) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 0), \
SPI_MEM_OP_NO_ADDR, \
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
/**
+ * struct spansion_nor_params - Spansion private parameters.
+ * @clsr: Clear Status Register or Clear Program and Erase Failure Flag
+ * opcode.
+ */
+struct spansion_nor_params {
+ u8 clsr;
+};
+
+/**
* spansion_nor_clear_sr() - Clear the Status Register.
* @nor: pointer to 'struct spi_nor'.
*/
static void spansion_nor_clear_sr(struct spi_nor *nor)
{
+ const struct spansion_nor_params *priv_params = nor->params->priv;
int ret;
if (nor->spimem) {
- struct spi_mem_op op = SPANSION_CLSR_OP;
+ struct spi_mem_op op = SPANSION_OP(priv_params->clsr);
spi_nor_spimem_setup_op(nor, &op, nor->reg_proto);
@@ -88,11 +98,17 @@ static void spansion_nor_clear_sr(struct spi_nor *nor)
static int cypress_nor_sr_ready_and_clear_reg(struct spi_nor *nor, u64 addr)
{
+ struct spi_nor_flash_parameter *params = nor->params;
struct spi_mem_op op =
- CYPRESS_NOR_RD_ANY_REG_OP(nor->params->addr_mode_nbytes, addr,
+ CYPRESS_NOR_RD_ANY_REG_OP(params->addr_mode_nbytes, addr,
0, nor->bouncebuf);
int ret;
+ if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) {
+ op.dummy.nbytes = params->rdsr_dummy;
+ op.data.nbytes = 2;
+ }
+
ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto);
if (ret)
return ret;
@@ -141,18 +157,26 @@ static int cypress_nor_sr_ready_and_clear(struct spi_nor *nor)
return 1;
}
-static int cypress_nor_octal_dtr_en(struct spi_nor *nor)
+static int cypress_nor_set_memlat(struct spi_nor *nor, u64 addr)
{
struct spi_mem_op op;
u8 *buf = nor->bouncebuf;
int ret;
u8 addr_mode_nbytes = nor->params->addr_mode_nbytes;
+ op = (struct spi_mem_op)
+ CYPRESS_NOR_RD_ANY_REG_OP(addr_mode_nbytes, addr, 0, buf);
+
+ ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto);
+ if (ret)
+ return ret;
+
/* Use 24 dummy cycles for memory array reads. */
- *buf = SPINOR_REG_CYPRESS_CFR2_MEMLAT_11_24;
+ *buf &= ~SPINOR_REG_CYPRESS_CFR2_MEMLAT_MASK;
+ *buf |= FIELD_PREP(SPINOR_REG_CYPRESS_CFR2_MEMLAT_MASK,
+ SPINOR_REG_CYPRESS_CFR2_MEMLAT_11_24);
op = (struct spi_mem_op)
- CYPRESS_NOR_WR_ANY_REG_OP(addr_mode_nbytes,
- SPINOR_REG_CYPRESS_CFR2V, 1, buf);
+ CYPRESS_NOR_WR_ANY_REG_OP(addr_mode_nbytes, addr, 1, buf);
ret = spi_nor_write_any_volatile_reg(nor, &op, nor->reg_proto);
if (ret)
@@ -160,15 +184,41 @@ static int cypress_nor_octal_dtr_en(struct spi_nor *nor)
nor->read_dummy = 24;
+ return 0;
+}
+
+static int cypress_nor_set_octal_dtr_bits(struct spi_nor *nor, u64 addr)
+{
+ struct spi_mem_op op;
+ u8 *buf = nor->bouncebuf;
+
/* Set the octal and DTR enable bits. */
buf[0] = SPINOR_REG_CYPRESS_CFR5_OCT_DTR_EN;
op = (struct spi_mem_op)
- CYPRESS_NOR_WR_ANY_REG_OP(addr_mode_nbytes,
- SPINOR_REG_CYPRESS_CFR5V, 1, buf);
+ CYPRESS_NOR_WR_ANY_REG_OP(nor->params->addr_mode_nbytes,
+ addr, 1, buf);
- ret = spi_nor_write_any_volatile_reg(nor, &op, nor->reg_proto);
- if (ret)
- return ret;
+ return spi_nor_write_any_volatile_reg(nor, &op, nor->reg_proto);
+}
+
+static int cypress_nor_octal_dtr_en(struct spi_nor *nor)
+{
+ const struct spi_nor_flash_parameter *params = nor->params;
+ u8 *buf = nor->bouncebuf;
+ u64 addr;
+ int i, ret;
+
+ for (i = 0; i < params->n_dice; i++) {
+ addr = params->vreg_offset[i] + SPINOR_REG_CYPRESS_CFR2;
+ ret = cypress_nor_set_memlat(nor, addr);
+ if (ret)
+ return ret;
+
+ addr = params->vreg_offset[i] + SPINOR_REG_CYPRESS_CFR5;
+ ret = cypress_nor_set_octal_dtr_bits(nor, addr);
+ if (ret)
+ return ret;
+ }
/* Read flash ID to make sure the switch was successful. */
ret = spi_nor_read_id(nor, nor->addr_nbytes, 3, buf,
@@ -184,11 +234,10 @@ static int cypress_nor_octal_dtr_en(struct spi_nor *nor)
return 0;
}
-static int cypress_nor_octal_dtr_dis(struct spi_nor *nor)
+static int cypress_nor_set_single_spi_bits(struct spi_nor *nor, u64 addr)
{
struct spi_mem_op op;
u8 *buf = nor->bouncebuf;
- int ret;
/*
* The register is 1-byte wide, but 1-byte transactions are not allowed
@@ -198,11 +247,23 @@ static int cypress_nor_octal_dtr_dis(struct spi_nor *nor)
buf[0] = SPINOR_REG_CYPRESS_CFR5_OCT_DTR_DS;
buf[1] = 0;
op = (struct spi_mem_op)
- CYPRESS_NOR_WR_ANY_REG_OP(nor->addr_nbytes,
- SPINOR_REG_CYPRESS_CFR5V, 2, buf);
- ret = spi_nor_write_any_volatile_reg(nor, &op, SNOR_PROTO_8_8_8_DTR);
- if (ret)
- return ret;
+ CYPRESS_NOR_WR_ANY_REG_OP(nor->addr_nbytes, addr, 2, buf);
+ return spi_nor_write_any_volatile_reg(nor, &op, SNOR_PROTO_8_8_8_DTR);
+}
+
+static int cypress_nor_octal_dtr_dis(struct spi_nor *nor)
+{
+ const struct spi_nor_flash_parameter *params = nor->params;
+ u8 *buf = nor->bouncebuf;
+ u64 addr;
+ int i, ret;
+
+ for (i = 0; i < params->n_dice; i++) {
+ addr = params->vreg_offset[i] + SPINOR_REG_CYPRESS_CFR5;
+ ret = cypress_nor_set_single_spi_bits(nor, addr);
+ if (ret)
+ return ret;
+ }
/* Read flash ID to make sure the switch was successful. */
ret = spi_nor_read_id(nor, 0, 0, buf, SNOR_PROTO_1_1_1);
@@ -283,10 +344,6 @@ static int cypress_nor_quad_enable_volatile(struct spi_nor *nor)
u8 i;
int ret;
- if (!params->n_dice)
- return cypress_nor_quad_enable_volatile_reg(nor,
- SPINOR_REG_CYPRESS_CFR1V);
-
for (i = 0; i < params->n_dice; i++) {
addr = params->vreg_offset[i] + SPINOR_REG_CYPRESS_CFR1;
ret = cypress_nor_quad_enable_volatile_reg(nor, addr);
@@ -408,28 +465,17 @@ static int cypress_nor_set_addr_mode_nbytes(struct spi_nor *nor)
return 0;
}
-static int cypress_nor_get_page_size_single_chip(struct spi_nor *nor)
-{
- struct spi_mem_op op =
- CYPRESS_NOR_RD_ANY_REG_OP(nor->params->addr_mode_nbytes,
- SPINOR_REG_CYPRESS_CFR3V, 0,
- nor->bouncebuf);
- int ret;
-
- ret = spi_nor_read_any_reg(nor, &op, nor->reg_proto);
- if (ret)
- return ret;
-
- if (nor->bouncebuf[0] & SPINOR_REG_CYPRESS_CFR3_PGSZ)
- nor->params->page_size = 512;
- else
- nor->params->page_size = 256;
-
- return 0;
-}
-
-
-static int cypress_nor_get_page_size_mcp(struct spi_nor *nor)
+/**
+ * cypress_nor_get_page_size() - Get flash page size configuration.
+ * @nor: pointer to a 'struct spi_nor'
+ *
+ * The BFPT table advertises a 512B or 256B page size depending on part but the
+ * page size is actually configurable (with the default being 256B). Read from
+ * CFR3V[4] and set the correct size.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int cypress_nor_get_page_size(struct spi_nor *nor)
{
struct spi_mem_op op =
CYPRESS_NOR_RD_ANY_REG_OP(nor->params->addr_mode_nbytes,
@@ -459,23 +505,6 @@ static int cypress_nor_get_page_size_mcp(struct spi_nor *nor)
return 0;
}
-/**
- * cypress_nor_get_page_size() - Get flash page size configuration.
- * @nor: pointer to a 'struct spi_nor'
- *
- * The BFPT table advertises a 512B or 256B page size depending on part but the
- * page size is actually configurable (with the default being 256B). Read from
- * CFR3V[4] and set the correct size.
- *
- * Return: 0 on success, -errno otherwise.
- */
-static int cypress_nor_get_page_size(struct spi_nor *nor)
-{
- if (nor->params->n_dice)
- return cypress_nor_get_page_size_mcp(nor);
- return cypress_nor_get_page_size_single_chip(nor);
-}
-
static void cypress_nor_ecc_init(struct spi_nor *nor)
{
/*
@@ -512,25 +541,39 @@ s25fs256t_post_bfpt_fixup(struct spi_nor *nor,
if (nor->bouncebuf[0])
return -ENODEV;
- return cypress_nor_get_page_size(nor);
+ return 0;
}
static int s25fs256t_post_sfdp_fixup(struct spi_nor *nor)
{
struct spi_nor_flash_parameter *params = nor->params;
+ /*
+ * S25FS256T does not define the SCCR map, but we would like to use the
+ * same code base for both single and multi chip package devices, thus
+ * set the vreg_offset and n_dice to be able to do so.
+ */
+ params->vreg_offset = devm_kmalloc(nor->dev, sizeof(u32), GFP_KERNEL);
+ if (!params->vreg_offset)
+ return -ENOMEM;
+
+ params->vreg_offset[0] = SPINOR_REG_CYPRESS_VREG;
+ params->n_dice = 1;
+
/* PP_1_1_4_4B is supported but missing in 4BAIT. */
params->hwcaps.mask |= SNOR_HWCAPS_PP_1_1_4;
spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP_1_1_4],
SPINOR_OP_PP_1_1_4_4B,
SNOR_PROTO_1_1_4);
- return 0;
+ return cypress_nor_get_page_size(nor);
}
-static void s25fs256t_late_init(struct spi_nor *nor)
+static int s25fs256t_late_init(struct spi_nor *nor)
{
cypress_nor_ecc_init(nor);
+
+ return 0;
}
static struct spi_nor_fixups s25fs256t_fixups = {
@@ -558,10 +601,20 @@ s25hx_t_post_bfpt_fixup(struct spi_nor *nor,
static int s25hx_t_post_sfdp_fixup(struct spi_nor *nor)
{
- struct spi_nor_erase_type *erase_type =
- nor->params->erase_map.erase_type;
+ struct spi_nor_flash_parameter *params = nor->params;
+ struct spi_nor_erase_type *erase_type = params->erase_map.erase_type;
unsigned int i;
+ if (!params->n_dice || !params->vreg_offset) {
+ dev_err(nor->dev, "%s failed. The volatile register offset could not be retrieved from SFDP.\n",
+ __func__);
+ return -EOPNOTSUPP;
+ }
+
+ /* The 2 Gb parts duplicate info and advertise 4 dice instead of 2. */
+ if (params->size == SZ_256M)
+ params->n_dice = 2;
+
/*
* In some parts, 3byte erase opcodes are advertised by 4BAIT.
* Convert them to 4byte erase opcodes.
@@ -579,25 +632,19 @@ static int s25hx_t_post_sfdp_fixup(struct spi_nor *nor)
}
}
- /* The 2 Gb parts duplicate info and advertise 4 dice instead of 2. */
- if (nor->params->size == SZ_256M)
- nor->params->n_dice = 2;
-
return cypress_nor_get_page_size(nor);
}
-static void s25hx_t_late_init(struct spi_nor *nor)
+static int s25hx_t_late_init(struct spi_nor *nor)
{
struct spi_nor_flash_parameter *params = nor->params;
/* Fast Read 4B requires mode cycles */
params->reads[SNOR_CMD_READ_FAST].num_mode_clocks = 8;
-
+ params->ready = cypress_nor_sr_ready_and_clear;
cypress_nor_ecc_init(nor);
- /* Replace ready() with multi die version */
- if (params->n_dice)
- params->ready = cypress_nor_sr_ready_and_clear;
+ return 0;
}
static struct spi_nor_fixups s25hx_t_fixups = {
@@ -607,7 +654,7 @@ static struct spi_nor_fixups s25hx_t_fixups = {
};
/**
- * cypress_nor_octal_dtr_enable() - Enable octal DTR on Cypress flashes.
+ * cypress_nor_set_octal_dtr() - Enable or disable octal DTR on Cypress flashes.
* @nor: pointer to a 'struct spi_nor'
* @enable: whether to enable or disable Octal DTR
*
@@ -616,7 +663,7 @@ static struct spi_nor_fixups s25hx_t_fixups = {
*
* Return: 0 on success, -errno otherwise.
*/
-static int cypress_nor_octal_dtr_enable(struct spi_nor *nor, bool enable)
+static int cypress_nor_set_octal_dtr(struct spi_nor *nor, bool enable)
{
return enable ? cypress_nor_octal_dtr_en(nor) :
cypress_nor_octal_dtr_dis(nor);
@@ -624,22 +671,34 @@ static int cypress_nor_octal_dtr_enable(struct spi_nor *nor, bool enable)
static int s28hx_t_post_sfdp_fixup(struct spi_nor *nor)
{
+ struct spi_nor_flash_parameter *params = nor->params;
+
+ if (!params->n_dice || !params->vreg_offset) {
+ dev_err(nor->dev, "%s failed. The volatile register offset could not be retrieved from SFDP.\n",
+ __func__);
+ return -EOPNOTSUPP;
+ }
+
+ /* The 2 Gb parts duplicate info and advertise 4 dice instead of 2. */
+ if (params->size == SZ_256M)
+ params->n_dice = 2;
+
/*
* On older versions of the flash the xSPI Profile 1.0 table has the
* 8D-8D-8D Fast Read opcode as 0x00. But it actually should be 0xEE.
*/
- if (nor->params->reads[SNOR_CMD_READ_8_8_8_DTR].opcode == 0)
- nor->params->reads[SNOR_CMD_READ_8_8_8_DTR].opcode =
+ if (params->reads[SNOR_CMD_READ_8_8_8_DTR].opcode == 0)
+ params->reads[SNOR_CMD_READ_8_8_8_DTR].opcode =
SPINOR_OP_CYPRESS_RD_FAST;
/* This flash is also missing the 4-byte Page Program opcode bit. */
- spi_nor_set_pp_settings(&nor->params->page_programs[SNOR_CMD_PP],
+ spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
SPINOR_OP_PP_4B, SNOR_PROTO_1_1_1);
/*
* Since xSPI Page Program opcode is backward compatible with
* Legacy SPI, use Legacy SPI opcode there as well.
*/
- spi_nor_set_pp_settings(&nor->params->page_programs[SNOR_CMD_PP_8_8_8_DTR],
+ spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP_8_8_8_DTR],
SPINOR_OP_PP_4B, SNOR_PROTO_8_8_8_DTR);
/*
@@ -647,7 +706,7 @@ static int s28hx_t_post_sfdp_fixup(struct spi_nor *nor)
* address bytes needed for Read Status Register command as 0 but the
* actual value for that is 4.
*/
- nor->params->rdsr_addr_nbytes = 4;
+ params->rdsr_addr_nbytes = 4;
return cypress_nor_get_page_size(nor);
}
@@ -656,19 +715,18 @@ static int s28hx_t_post_bfpt_fixup(struct spi_nor *nor,
const struct sfdp_parameter_header *bfpt_header,
const struct sfdp_bfpt *bfpt)
{
- int ret;
-
- ret = cypress_nor_set_addr_mode_nbytes(nor);
- if (ret)
- return ret;
-
- return 0;
+ return cypress_nor_set_addr_mode_nbytes(nor);
}
-static void s28hx_t_late_init(struct spi_nor *nor)
+static int s28hx_t_late_init(struct spi_nor *nor)
{
- nor->params->octal_dtr_enable = cypress_nor_octal_dtr_enable;
+ struct spi_nor_flash_parameter *params = nor->params;
+
+ params->set_octal_dtr = cypress_nor_set_octal_dtr;
+ params->ready = cypress_nor_sr_ready_and_clear;
cypress_nor_ecc_init(nor);
+
+ return 0;
}
static const struct spi_nor_fixups s28hx_t_fixups = {
@@ -792,47 +850,59 @@ static const struct flash_info spansion_nor_parts[] = {
FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
{ "s25fs256t", INFO6(0x342b19, 0x0f0890, 0, 0)
PARSE_SFDP
+ MFR_FLAGS(USE_CLPEF)
.fixups = &s25fs256t_fixups },
- { "s25hl512t", INFO6(0x342a1a, 0x0f0390, 256 * 1024, 256)
+ { "s25hl512t", INFO6(0x342a1a, 0x0f0390, 0, 0)
PARSE_SFDP
- MFR_FLAGS(USE_CLSR)
+ MFR_FLAGS(USE_CLPEF)
.fixups = &s25hx_t_fixups },
- { "s25hl01gt", INFO6(0x342a1b, 0x0f0390, 256 * 1024, 512)
+ { "s25hl01gt", INFO6(0x342a1b, 0x0f0390, 0, 0)
PARSE_SFDP
- MFR_FLAGS(USE_CLSR)
+ MFR_FLAGS(USE_CLPEF)
.fixups = &s25hx_t_fixups },
{ "s25hl02gt", INFO6(0x342a1c, 0x0f0090, 0, 0)
PARSE_SFDP
+ MFR_FLAGS(USE_CLPEF)
FLAGS(NO_CHIP_ERASE)
.fixups = &s25hx_t_fixups },
- { "s25hs512t", INFO6(0x342b1a, 0x0f0390, 256 * 1024, 256)
+ { "s25hs512t", INFO6(0x342b1a, 0x0f0390, 0, 0)
PARSE_SFDP
- MFR_FLAGS(USE_CLSR)
+ MFR_FLAGS(USE_CLPEF)
.fixups = &s25hx_t_fixups },
- { "s25hs01gt", INFO6(0x342b1b, 0x0f0390, 256 * 1024, 512)
+ { "s25hs01gt", INFO6(0x342b1b, 0x0f0390, 0, 0)
PARSE_SFDP
- MFR_FLAGS(USE_CLSR)
+ MFR_FLAGS(USE_CLPEF)
.fixups = &s25hx_t_fixups },
{ "s25hs02gt", INFO6(0x342b1c, 0x0f0090, 0, 0)
PARSE_SFDP
+ MFR_FLAGS(USE_CLPEF)
FLAGS(NO_CHIP_ERASE)
.fixups = &s25hx_t_fixups },
{ "cy15x104q", INFO6(0x042cc2, 0x7f7f7f, 512 * 1024, 1)
FLAGS(SPI_NOR_NO_ERASE) },
- { "s28hl512t", INFO(0x345a1a, 0, 256 * 1024, 256)
+ { "s28hl512t", INFO(0x345a1a, 0, 0, 0)
+ PARSE_SFDP
+ MFR_FLAGS(USE_CLPEF)
+ .fixups = &s28hx_t_fixups,
+ },
+ { "s28hl01gt", INFO(0x345a1b, 0, 0, 0)
PARSE_SFDP
+ MFR_FLAGS(USE_CLPEF)
.fixups = &s28hx_t_fixups,
},
- { "s28hl01gt", INFO(0x345a1b, 0, 256 * 1024, 512)
+ { "s28hs512t", INFO(0x345b1a, 0, 0, 0)
PARSE_SFDP
+ MFR_FLAGS(USE_CLPEF)
.fixups = &s28hx_t_fixups,
},
- { "s28hs512t", INFO(0x345b1a, 0, 256 * 1024, 256)
+ { "s28hs01gt", INFO(0x345b1b, 0, 0, 0)
PARSE_SFDP
+ MFR_FLAGS(USE_CLPEF)
.fixups = &s28hx_t_fixups,
},
- { "s28hs01gt", INFO(0x345b1b, 0, 256 * 1024, 512)
+ { "s28hs02gt", INFO(0x345b1c, 0, 0, 0)
PARSE_SFDP
+ MFR_FLAGS(USE_CLPEF)
.fixups = &s28hx_t_fixups,
},
};
@@ -876,17 +946,35 @@ static int spansion_nor_sr_ready_and_clear(struct spi_nor *nor)
return !(nor->bouncebuf[0] & SR_WIP);
}
-static void spansion_nor_late_init(struct spi_nor *nor)
+static int spansion_nor_late_init(struct spi_nor *nor)
{
- if (nor->params->size > SZ_16M) {
+ struct spi_nor_flash_parameter *params = nor->params;
+ struct spansion_nor_params *priv_params;
+ u8 mfr_flags = nor->info->mfr_flags;
+
+ if (params->size > SZ_16M) {
nor->flags |= SNOR_F_4B_OPCODES;
/* No small sector erase for 4-byte command set */
nor->erase_opcode = SPINOR_OP_SE;
nor->mtd.erasesize = nor->info->sector_size;
}
- if (nor->info->mfr_flags & USE_CLSR)
- nor->params->ready = spansion_nor_sr_ready_and_clear;
+ if (mfr_flags & (USE_CLSR | USE_CLPEF)) {
+ priv_params = devm_kmalloc(nor->dev, sizeof(*priv_params),
+ GFP_KERNEL);
+ if (!priv_params)
+ return -ENOMEM;
+
+ if (mfr_flags & USE_CLSR)
+ priv_params->clsr = SPINOR_OP_CLSR;
+ else if (mfr_flags & USE_CLPEF)
+ priv_params->clsr = SPINOR_OP_CLPEF;
+
+ params->priv = priv_params;
+ params->ready = spansion_nor_sr_ready_and_clear;
+ }
+
+ return 0;
}
static const struct spi_nor_fixups spansion_nor_fixups = {
diff --git a/drivers/mtd/spi-nor/sst.c b/drivers/mtd/spi-nor/sst.c
index 688eb20c763e..197d2c1101ed 100644
--- a/drivers/mtd/spi-nor/sst.c
+++ b/drivers/mtd/spi-nor/sst.c
@@ -49,9 +49,11 @@ static const struct spi_nor_locking_ops sst26vf_nor_locking_ops = {
.is_locked = sst26vf_nor_is_locked,
};
-static void sst26vf_nor_late_init(struct spi_nor *nor)
+static int sst26vf_nor_late_init(struct spi_nor *nor)
{
nor->params->locking_ops = &sst26vf_nor_locking_ops;
+
+ return 0;
}
static const struct spi_nor_fixups sst26vf_nor_fixups = {
@@ -111,6 +113,10 @@ static const struct flash_info sst_nor_parts[] = {
SPI_NOR_QUAD_READ) },
{ "sst26vf016b", INFO(0xbf2641, 0, 64 * 1024, 32)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
+ { "sst26vf032b", INFO(0xbf2642, 0, 0, 0)
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
+ PARSE_SFDP
+ .fixups = &sst26vf_nor_fixups },
{ "sst26vf064b", INFO(0xbf2643, 0, 64 * 1024, 128)
FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
@@ -203,10 +209,12 @@ out:
return ret;
}
-static void sst_nor_late_init(struct spi_nor *nor)
+static int sst_nor_late_init(struct spi_nor *nor)
{
if (nor->info->mfr_flags & SST_WRITE)
nor->mtd._write = sst_nor_write;
+
+ return 0;
}
static const struct spi_nor_fixups sst_nor_fixups = {
diff --git a/drivers/mtd/spi-nor/swp.c b/drivers/mtd/spi-nor/swp.c
index 0ba716e84377..5ab9d5324860 100644
--- a/drivers/mtd/spi-nor/swp.c
+++ b/drivers/mtd/spi-nor/swp.c
@@ -214,8 +214,13 @@ static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
status_new = (status_old & ~mask & ~tb_mask) | val;
- /* Disallow further writes if WP pin is asserted */
- status_new |= SR_SRWD;
+ /*
+ * Disallow further writes if WP# pin is neither left floating nor
+ * wrongly tied to GND (that includes internal pull-downs).
+ * WP# pin hard strapped to GND can be a valid use case.
+ */
+ if (!(nor->flags & SNOR_F_NO_WP))
+ status_new |= SR_SRWD;
if (!use_top)
status_new |= tb_mask;
diff --git a/drivers/mtd/spi-nor/winbond.c b/drivers/mtd/spi-nor/winbond.c
index 834d6ba5ce70..cd99c9a1c568 100644
--- a/drivers/mtd/spi-nor/winbond.c
+++ b/drivers/mtd/spi-nor/winbond.c
@@ -120,8 +120,9 @@ static const struct flash_info winbond_nor_parts[] = {
NO_SFDP_FLAGS(SECT_4K) },
{ "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16)
NO_SFDP_FLAGS(SECT_4K) },
- { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256)
- NO_SFDP_FLAGS(SECT_4K) },
+ { "w25q128", INFO(0xef4018, 0, 0, 0)
+ PARSE_SFDP
+ FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
{ "w25q256", INFO(0xef4019, 0, 64 * 1024, 512)
NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
.fixups = &w25q256_fixups },
@@ -216,7 +217,7 @@ static const struct spi_nor_otp_ops winbond_nor_otp_ops = {
.is_locked = spi_nor_otp_is_locked_sr2,
};
-static void winbond_nor_late_init(struct spi_nor *nor)
+static int winbond_nor_late_init(struct spi_nor *nor)
{
struct spi_nor_flash_parameter *params = nor->params;
@@ -232,6 +233,8 @@ static void winbond_nor_late_init(struct spi_nor *nor)
* from BFPT, if any.
*/
params->set_4byte_addr_mode = winbond_nor_set_4byte_addr_mode;
+
+ return 0;
}
static const struct spi_nor_fixups winbond_nor_fixups = {
diff --git a/drivers/mtd/spi-nor/xilinx.c b/drivers/mtd/spi-nor/xilinx.c
index 7175de8aa336..00d53eae5ee8 100644
--- a/drivers/mtd/spi-nor/xilinx.c
+++ b/drivers/mtd/spi-nor/xilinx.c
@@ -155,10 +155,12 @@ static int xilinx_nor_setup(struct spi_nor *nor,
return 0;
}
-static void xilinx_nor_late_init(struct spi_nor *nor)
+static int xilinx_nor_late_init(struct spi_nor *nor)
{
nor->params->setup = xilinx_nor_setup;
nor->params->ready = xilinx_nor_sr_ready;
+
+ return 0;
}
static const struct spi_nor_fixups xilinx_nor_fixups = {
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 688075859ae4..ed3a589def6b 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -159,7 +159,7 @@ static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
#endif
static void ldisc_receive(struct tty_struct *tty, const u8 *data,
- const char *flags, int count)
+ const u8 *flags, size_t count)
{
struct sk_buff *skb = NULL;
struct ser_device *ser;
diff --git a/drivers/net/can/can327.c b/drivers/net/can/can327.c
index dc7192ecb001..24af63961030 100644
--- a/drivers/net/can/can327.c
+++ b/drivers/net/can/can327.c
@@ -885,10 +885,10 @@ static bool can327_is_valid_rx_char(u8 c)
* This will not be re-entered while running, but other ldisc
* functions may be called in parallel.
*/
-static void can327_ldisc_rx(struct tty_struct *tty, const unsigned char *cp,
- const char *fp, int count)
+static void can327_ldisc_rx(struct tty_struct *tty, const u8 *cp,
+ const u8 *fp, size_t count)
{
- struct can327 *elm = (struct can327 *)tty->disc_data;
+ struct can327 *elm = tty->disc_data;
size_t first_new_char_idx;
if (elm->uart_side_failure)
@@ -901,15 +901,17 @@ static void can327_ldisc_rx(struct tty_struct *tty, const unsigned char *cp,
*/
first_new_char_idx = elm->rxfill;
- while (count-- && elm->rxfill < CAN327_SIZE_RXBUF) {
+ while (count--) {
+ if (elm->rxfill >= CAN327_SIZE_RXBUF) {
+ netdev_err(elm->dev,
+ "Receive buffer overflowed. Bad chip or wiring? count = %zu",
+ count);
+ goto uart_failure;
+ }
if (fp && *fp++) {
netdev_err(elm->dev,
"Error in received character stream. Check your wiring.");
-
- can327_uart_side_failure(elm);
-
- spin_unlock_bh(&elm->lock);
- return;
+ goto uart_failure;
}
/* Ignore NUL characters, which the PIC microcontroller may
@@ -925,10 +927,7 @@ static void can327_ldisc_rx(struct tty_struct *tty, const unsigned char *cp,
netdev_err(elm->dev,
"Received illegal character %02x.\n",
*cp);
- can327_uart_side_failure(elm);
-
- spin_unlock_bh(&elm->lock);
- return;
+ goto uart_failure;
}
elm->rxbuf[elm->rxfill++] = *cp;
@@ -937,19 +936,13 @@ static void can327_ldisc_rx(struct tty_struct *tty, const unsigned char *cp,
cp++;
}
- if (count >= 0) {
- netdev_err(elm->dev,
- "Receive buffer overflowed. Bad chip or wiring? count = %i",
- count);
-
- can327_uart_side_failure(elm);
-
- spin_unlock_bh(&elm->lock);
- return;
- }
-
can327_parse_rxbuf(elm, first_new_char_idx);
spin_unlock_bh(&elm->lock);
+
+ return;
+uart_failure:
+ can327_uart_side_failure(elm);
+ spin_unlock_bh(&elm->lock);
}
/* Write out remaining transmit buffer.
@@ -990,7 +983,7 @@ static void can327_ldisc_tx_worker(struct work_struct *work)
/* Called by the driver when there's room for more data. */
static void can327_ldisc_tx_wakeup(struct tty_struct *tty)
{
- struct can327 *elm = (struct can327 *)tty->disc_data;
+ struct can327 *elm = tty->disc_data;
schedule_work(&elm->tx_work);
}
@@ -1067,7 +1060,7 @@ static int can327_ldisc_open(struct tty_struct *tty)
*/
static void can327_ldisc_close(struct tty_struct *tty)
{
- struct can327 *elm = (struct can327 *)tty->disc_data;
+ struct can327 *elm = tty->disc_data;
/* unregister_netdev() calls .ndo_stop() so we don't have to. */
unregister_candev(elm->dev);
@@ -1092,7 +1085,7 @@ static void can327_ldisc_close(struct tty_struct *tty)
static int can327_ldisc_ioctl(struct tty_struct *tty, unsigned int cmd,
unsigned long arg)
{
- struct can327 *elm = (struct can327 *)tty->disc_data;
+ struct can327 *elm = tty->disc_data;
unsigned int tmp;
switch (cmd) {
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index ac86640998a8..5bca719d61f5 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -108,11 +108,6 @@ struct ems_pci_card {
#define EMS_PCI_BASE_SIZE 4096 /* size of controller area */
-#ifndef PCI_VENDOR_ID_ASIX
-#define PCI_VENDOR_ID_ASIX 0x125b
-#define PCI_DEVICE_ID_ASIX_9110 0x9110
-#define PCI_SUBVENDOR_ID_ASIX 0xa000
-#endif
#define PCI_SUBDEVICE_ID_EMS 0x4010
static const struct pci_device_id ems_pci_tbl[] = {
@@ -123,7 +118,7 @@ static const struct pci_device_id ems_pci_tbl[] = {
/* CPC-104P v2 */
{PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_PLX, 0x4002},
/* CPC-PCIe v3 */
- {PCI_VENDOR_ID_ASIX, PCI_DEVICE_ID_ASIX_9110, PCI_SUBVENDOR_ID_ASIX, PCI_SUBDEVICE_ID_EMS},
+ {PCI_VENDOR_ID_ASIX, PCI_DEVICE_ID_ASIX_AX99100_LB, 0xa000, PCI_SUBDEVICE_ID_EMS},
{0,}
};
MODULE_DEVICE_TABLE(pci, ems_pci_tbl);
diff --git a/drivers/net/can/slcan/slcan-core.c b/drivers/net/can/slcan/slcan-core.c
index f4db77007c13..24c6622d36bd 100644
--- a/drivers/net/can/slcan/slcan-core.c
+++ b/drivers/net/can/slcan/slcan-core.c
@@ -583,7 +583,7 @@ static void slcan_transmit(struct work_struct *work)
*/
static void slcan_write_wakeup(struct tty_struct *tty)
{
- struct slcan *sl = (struct slcan *)tty->disc_data;
+ struct slcan *sl = tty->disc_data;
schedule_work(&sl->tx_work);
}
@@ -774,11 +774,10 @@ static const struct net_device_ops slcan_netdev_ops = {
* be re-entered while running but other ldisc functions may be called
* in parallel
*/
-static void slcan_receive_buf(struct tty_struct *tty,
- const unsigned char *cp, const char *fp,
- int count)
+static void slcan_receive_buf(struct tty_struct *tty, const u8 *cp,
+ const u8 *fp, size_t count)
{
- struct slcan *sl = (struct slcan *)tty->disc_data;
+ struct slcan *sl = tty->disc_data;
if (!netif_running(sl->dev))
return;
@@ -862,7 +861,7 @@ static int slcan_open(struct tty_struct *tty)
*/
static void slcan_close(struct tty_struct *tty)
{
- struct slcan *sl = (struct slcan *)tty->disc_data;
+ struct slcan *sl = tty->disc_data;
unregister_candev(sl->dev);
@@ -886,7 +885,7 @@ static void slcan_close(struct tty_struct *tty)
static int slcan_ioctl(struct tty_struct *tty, unsigned int cmd,
unsigned long arg)
{
- struct slcan *sl = (struct slcan *)tty->disc_data;
+ struct slcan *sl = tty->disc_data;
unsigned int tmp;
switch (cmd) {
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 6673122266b7..42db7679c360 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -2335,13 +2335,27 @@ static u32 ksz_get_phy_flags(struct dsa_switch *ds, int port)
{
struct ksz_device *dev = ds->priv;
- if (dev->chip_id == KSZ8830_CHIP_ID) {
+ switch (dev->chip_id) {
+ case KSZ8830_CHIP_ID:
/* Silicon Errata Sheet (DS80000830A):
* Port 1 does not work with LinkMD Cable-Testing.
* Port 1 does not respond to received PAUSE control frames.
*/
if (!port)
return MICREL_KSZ8_P1_ERRATA;
+ break;
+ case KSZ9477_CHIP_ID:
+ /* KSZ9477 Errata DS80000754C
+ *
+ * Module 4: Energy Efficient Ethernet (EEE) feature select must
+ * be manually disabled
+ * The EEE feature is enabled by default, but it is not fully
+ * operational. It must be manually disabled through register
+ * controls. If not disabled, the PHY ports can auto-negotiate
+ * to enable EEE, and this feature can cause link drops when
+ * linked to another device supporting EEE.
+ */
+ return MICREL_NO_EEE;
}
return 0;
diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h
index dee35ba924ad..0617d5ccd3ff 100644
--- a/drivers/net/dsa/sja1105/sja1105.h
+++ b/drivers/net/dsa/sja1105/sja1105.h
@@ -132,6 +132,8 @@ struct sja1105_info {
int max_frame_mem;
int num_ports;
bool multiple_cascade_ports;
+ /* Every {port, TXQ} has its own CBS shaper */
+ bool fixed_cbs_mapping;
enum dsa_tag_protocol tag_proto;
const struct sja1105_dynamic_table_ops *dyn_ops;
const struct sja1105_table_ops *static_ops;
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 331bb1c6676a..a23d980d28f5 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -2115,11 +2115,36 @@ static void sja1105_bridge_leave(struct dsa_switch *ds, int port,
}
#define BYTES_PER_KBIT (1000LL / 8)
+/* Port 0 (the uC port) does not have CBS shapers */
+#define SJA1110_FIXED_CBS(port, prio) ((((port) - 1) * SJA1105_NUM_TC) + (prio))
+
+static int sja1105_find_cbs_shaper(struct sja1105_private *priv,
+ int port, int prio)
+{
+ int i;
+
+ if (priv->info->fixed_cbs_mapping) {
+ i = SJA1110_FIXED_CBS(port, prio);
+ if (i >= 0 && i < priv->info->num_cbs_shapers)
+ return i;
+
+ return -1;
+ }
+
+ for (i = 0; i < priv->info->num_cbs_shapers; i++)
+ if (priv->cbs[i].port == port && priv->cbs[i].prio == prio)
+ return i;
+
+ return -1;
+}
static int sja1105_find_unused_cbs_shaper(struct sja1105_private *priv)
{
int i;
+ if (priv->info->fixed_cbs_mapping)
+ return -1;
+
for (i = 0; i < priv->info->num_cbs_shapers; i++)
if (!priv->cbs[i].idle_slope && !priv->cbs[i].send_slope)
return i;
@@ -2150,14 +2175,20 @@ static int sja1105_setup_tc_cbs(struct dsa_switch *ds, int port,
{
struct sja1105_private *priv = ds->priv;
struct sja1105_cbs_entry *cbs;
+ s64 port_transmit_rate_kbps;
int index;
if (!offload->enable)
return sja1105_delete_cbs_shaper(priv, port, offload->queue);
- index = sja1105_find_unused_cbs_shaper(priv);
- if (index < 0)
- return -ENOSPC;
+ /* The user may be replacing an existing shaper */
+ index = sja1105_find_cbs_shaper(priv, port, offload->queue);
+ if (index < 0) {
+ /* That isn't the case - see if we can allocate a new one */
+ index = sja1105_find_unused_cbs_shaper(priv);
+ if (index < 0)
+ return -ENOSPC;
+ }
cbs = &priv->cbs[index];
cbs->port = port;
@@ -2167,9 +2198,17 @@ static int sja1105_setup_tc_cbs(struct dsa_switch *ds, int port,
*/
cbs->credit_hi = offload->hicredit;
cbs->credit_lo = abs(offload->locredit);
- /* User space is in kbits/sec, hardware in bytes/sec */
- cbs->idle_slope = offload->idleslope * BYTES_PER_KBIT;
- cbs->send_slope = abs(offload->sendslope * BYTES_PER_KBIT);
+ /* User space is in kbits/sec, while the hardware in bytes/sec times
+ * link speed. Since the given offload->sendslope is good only for the
+ * current link speed anyway, and user space is likely to reprogram it
+ * when that changes, don't even bother to track the port's link speed,
+ * but deduce the port transmit rate from idleslope - sendslope.
+ */
+ port_transmit_rate_kbps = offload->idleslope - offload->sendslope;
+ cbs->idle_slope = div_s64(offload->idleslope * BYTES_PER_KBIT,
+ port_transmit_rate_kbps);
+ cbs->send_slope = div_s64(abs(offload->sendslope * BYTES_PER_KBIT),
+ port_transmit_rate_kbps);
/* Convert the negative values from 64-bit 2's complement
* to 32-bit 2's complement (for the case of 0x80000000 whose
* negative is still negative).
diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c
index 5ce29c8057a4..834b5c1b4db0 100644
--- a/drivers/net/dsa/sja1105/sja1105_spi.c
+++ b/drivers/net/dsa/sja1105/sja1105_spi.c
@@ -781,6 +781,7 @@ const struct sja1105_info sja1110a_info = {
.tag_proto = DSA_TAG_PROTO_SJA1110,
.can_limit_mcast_flood = true,
.multiple_cascade_ports = true,
+ .fixed_cbs_mapping = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
@@ -831,6 +832,7 @@ const struct sja1105_info sja1110b_info = {
.tag_proto = DSA_TAG_PROTO_SJA1110,
.can_limit_mcast_flood = true,
.multiple_cascade_ports = true,
+ .fixed_cbs_mapping = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
@@ -881,6 +883,7 @@ const struct sja1105_info sja1110c_info = {
.tag_proto = DSA_TAG_PROTO_SJA1110,
.can_limit_mcast_flood = true,
.multiple_cascade_ports = true,
+ .fixed_cbs_mapping = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
@@ -931,6 +934,7 @@ const struct sja1105_info sja1110d_info = {
.tag_proto = DSA_TAG_PROTO_SJA1110,
.can_limit_mcast_flood = true,
.multiple_cascade_ports = true,
+ .fixed_cbs_mapping = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index f178ed9899a9..3ae8e8af8ab3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -3721,6 +3721,60 @@ struct hwrm_func_backing_store_qcaps_v2_output {
u8 valid;
};
+/* hwrm_func_dbr_pacing_qcfg_input (size:128b/16B) */
+struct hwrm_func_dbr_pacing_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_func_dbr_pacing_qcfg_output (size:512b/64B) */
+struct hwrm_func_dbr_pacing_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+#define FUNC_DBR_PACING_QCFG_RESP_FLAGS_DBR_NQ_EVENT_ENABLED 0x1UL
+ u8 unused_0[7];
+ __le32 dbr_stat_db_fifo_reg;
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK 0x3UL
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_SFT 0
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_GRC 0x1UL
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR0 0x2UL
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1 0x3UL
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_LAST \
+ FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_MASK 0xfffffffcUL
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SFT 2
+ __le32 dbr_stat_db_fifo_reg_watermark_mask;
+ u8 dbr_stat_db_fifo_reg_watermark_shift;
+ u8 unused_1[3];
+ __le32 dbr_stat_db_fifo_reg_fifo_room_mask;
+ u8 dbr_stat_db_fifo_reg_fifo_room_shift;
+ u8 unused_2[3];
+ __le32 dbr_throttling_aeq_arm_reg;
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_MASK 0x3UL
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_SFT 0
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_GRC 0x1UL
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR0 0x2UL
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1 0x3UL
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_LAST \
+ FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_MASK 0xfffffffcUL
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SFT 2
+ u8 dbr_throttling_aeq_arm_reg_val;
+ u8 unused_3[7];
+ __le32 primary_nq_id;
+ __le32 pacing_threshold;
+ u8 unused_4[7];
+ u8 valid;
+};
+
/* hwrm_func_drv_if_change_input (size:192b/24B) */
struct hwrm_func_drv_if_change_input {
__le16 req_type;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 852eb449ccae..6ba2b9398633 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -345,7 +345,7 @@ static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp)
edev->hw_ring_stats_size = bp->hw_ring_stats_size;
edev->pf_port_id = bp->pf.port_id;
edev->en_state = bp->state;
-
+ edev->bar0 = bp->bar0;
edev->ulp_tbl->msix_requested = bnxt_get_ulp_msix_num(bp);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index 80cbc4b6130a..6ff77f082e6c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -81,6 +81,7 @@ struct bnxt_en_dev {
* mode only. Will be
* updated in resume.
*/
+ void __iomem *bar0;
};
static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index e0a4cb7e3f50..c153dc083aff 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -1402,7 +1402,7 @@ static void enetc_fixup_clear_rss_rfs(struct pci_dev *pdev)
return;
si = enetc_psi_create(pdev);
- if (si)
+ if (!IS_ERR(si))
enetc_psi_destroy(pdev);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_PF,
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index ea0e38b4d9e9..f281e42a7ef9 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -570,7 +570,10 @@ static int gve_rx_append_frags(struct napi_struct *napi,
if (!skb)
return -1;
- skb_shinfo(rx->ctx.skb_tail)->frag_list = skb;
+ if (rx->ctx.skb_tail == rx->ctx.skb_head)
+ skb_shinfo(rx->ctx.skb_head)->frag_list = skb;
+ else
+ rx->ctx.skb_tail->next = skb;
rx->ctx.skb_tail = skb;
num_frags = 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index a4b43bcd2f0c..aaf1f42624a7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -814,6 +814,7 @@ struct hnae3_tc_info {
u8 max_tc; /* Total number of TCs */
u8 num_tc; /* Total number of enabled TCs */
bool mqprio_active;
+ bool dcb_ets_active;
};
#define HNAE3_MAX_DSCP 64
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index f276b5ecb431..b8508533878b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -1045,6 +1045,7 @@ hns3_dbg_dev_specs(struct hnae3_handle *h, char *buf, int len, int *pos)
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
struct hnae3_dev_specs *dev_specs = &ae_dev->dev_specs;
struct hnae3_knic_private_info *kinfo = &h->kinfo;
+ struct net_device *dev = kinfo->netdev;
*pos += scnprintf(buf + *pos, len - *pos, "dev_spec:\n");
*pos += scnprintf(buf + *pos, len - *pos, "MAC entry num: %u\n",
@@ -1087,6 +1088,9 @@ hns3_dbg_dev_specs(struct hnae3_handle *h, char *buf, int len, int *pos)
dev_specs->mc_mac_size);
*pos += scnprintf(buf + *pos, len - *pos, "MAC statistics number: %u\n",
dev_specs->mac_stats_num);
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "TX timeout threshold: %d seconds\n",
+ dev->watchdog_timeo / HZ);
}
static int hns3_dbg_dev_info(struct hnae3_handle *h, char *buf, int len)
@@ -1411,9 +1415,9 @@ int hns3_dbg_init(struct hnae3_handle *handle)
return 0;
out:
- mutex_destroy(&handle->dbgfs_lock);
debugfs_remove_recursive(handle->hnae3_dbgfs);
handle->hnae3_dbgfs = NULL;
+ mutex_destroy(&handle->dbgfs_lock);
return ret;
}
@@ -1421,6 +1425,9 @@ void hns3_dbg_uninit(struct hnae3_handle *handle)
{
u32 i;
+ debugfs_remove_recursive(handle->hnae3_dbgfs);
+ handle->hnae3_dbgfs = NULL;
+
for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++)
if (handle->dbgfs_buf[i]) {
kvfree(handle->dbgfs_buf[i]);
@@ -1428,8 +1435,6 @@ void hns3_dbg_uninit(struct hnae3_handle *handle)
}
mutex_destroy(&handle->dbgfs_lock);
- debugfs_remove_recursive(handle->hnae3_dbgfs);
- handle->hnae3_dbgfs = NULL;
}
void hns3_dbg_register_debugfs(const char *debugfs_dir_name)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index eac2d0573241..b4895c7b3efd 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -2103,8 +2103,12 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
*/
if (test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state) && num &&
!ring->pending_buf && num <= HNS3_MAX_PUSH_BD_NUM && doorbell) {
+ /* This smp_store_release() pairs with smp_load_aquire() in
+ * hns3_nic_reclaim_desc(). Ensure that the BD valid bit
+ * is updated.
+ */
+ smp_store_release(&ring->last_to_use, ring->next_to_use);
hns3_tx_push_bd(ring, num);
- WRITE_ONCE(ring->last_to_use, ring->next_to_use);
return;
}
@@ -2115,6 +2119,11 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
return;
}
+ /* This smp_store_release() pairs with smp_load_aquire() in
+ * hns3_nic_reclaim_desc(). Ensure that the BD valid bit is updated.
+ */
+ smp_store_release(&ring->last_to_use, ring->next_to_use);
+
if (ring->tqp->mem_base)
hns3_tx_mem_doorbell(ring);
else
@@ -2122,7 +2131,6 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG);
ring->pending_buf = 0;
- WRITE_ONCE(ring->last_to_use, ring->next_to_use);
}
static void hns3_tsyn(struct net_device *netdev, struct sk_buff *skb,
@@ -3308,8 +3316,6 @@ static void hns3_set_default_feature(struct net_device *netdev)
netdev->priv_flags |= IFF_UNICAST_FLT;
- netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
-
netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
@@ -3563,9 +3569,8 @@ static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
static bool hns3_nic_reclaim_desc(struct hns3_enet_ring *ring,
int *bytes, int *pkts, int budget)
{
- /* pair with ring->last_to_use update in hns3_tx_doorbell(),
- * smp_store_release() is not used in hns3_tx_doorbell() because
- * the doorbell operation already have the needed barrier operation.
+ /* This smp_load_acquire() pairs with smp_store_release() in
+ * hns3_tx_doorbell().
*/
int ltu = smp_load_acquire(&ring->last_to_use);
int ntc = ring->next_to_clean;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 36858a72d771..682239f33082 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -773,7 +773,9 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
hns3_get_ksettings(h, cmd);
break;
case HNAE3_MEDIA_TYPE_FIBER:
- if (module_type == HNAE3_MODULE_TYPE_CR)
+ if (module_type == HNAE3_MODULE_TYPE_UNKNOWN)
+ cmd->base.port = PORT_OTHER;
+ else if (module_type == HNAE3_MODULE_TYPE_CR)
cmd->base.port = PORT_DA;
else
cmd->base.port = PORT_FIBRE;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index fad5a5ff3cda..b98301e205f7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -259,7 +259,7 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
int ret;
if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
- hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
+ h->kinfo.tc_info.mqprio_active)
return -EINVAL;
ret = hclge_ets_validate(hdev, ets, &num_tc, &map_changed);
@@ -275,10 +275,7 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
}
hclge_tm_schd_info_update(hdev, num_tc);
- if (num_tc > 1)
- hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
- else
- hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
+ h->kinfo.tc_info.dcb_ets_active = num_tc > 1;
ret = hclge_ieee_ets_to_tm_info(hdev, ets);
if (ret)
@@ -487,7 +484,7 @@ static u8 hclge_getdcbx(struct hnae3_handle *h)
struct hclge_vport *vport = hclge_get_vport(h);
struct hclge_dev *hdev = vport->back;
- if (hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
+ if (h->kinfo.tc_info.mqprio_active)
return 0;
return hdev->dcbx_cap;
@@ -611,7 +608,8 @@ static int hclge_setup_tc(struct hnae3_handle *h,
if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
return -EBUSY;
- if (hdev->flag & HCLGE_FLAG_DCB_ENABLE)
+ kinfo = &vport->nic.kinfo;
+ if (kinfo->tc_info.dcb_ets_active)
return -EINVAL;
ret = hclge_mqprio_qopt_check(hdev, mqprio_qopt);
@@ -625,7 +623,6 @@ static int hclge_setup_tc(struct hnae3_handle *h,
if (ret)
return ret;
- kinfo = &vport->nic.kinfo;
memcpy(&old_tc_info, &kinfo->tc_info, sizeof(old_tc_info));
hclge_sync_mqprio_qopt(&kinfo->tc_info, mqprio_qopt);
kinfo->tc_info.mqprio_active = tc > 0;
@@ -634,13 +631,6 @@ static int hclge_setup_tc(struct hnae3_handle *h,
if (ret)
goto err_out;
- hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
-
- if (tc > 1)
- hdev->flag |= HCLGE_FLAG_MQPRIO_ENABLE;
- else
- hdev->flag &= ~HCLGE_FLAG_MQPRIO_ENABLE;
-
return hclge_notify_init_up(hdev);
err_out:
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index f01a7a9ee02c..ff3f8f424ad9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -1519,7 +1519,7 @@ static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
struct hclge_desc desc[3];
int pos = 0;
int ret, i;
- u32 *req;
+ __le32 *req;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true);
desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
@@ -1544,22 +1544,22 @@ static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
tcam_msg.loc);
/* tcam_data0 ~ tcam_data1 */
- req = (u32 *)req1->tcam_data;
+ req = (__le32 *)req1->tcam_data;
for (i = 0; i < 2; i++)
pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
- "%08x\n", *req++);
+ "%08x\n", le32_to_cpu(*req++));
/* tcam_data2 ~ tcam_data7 */
- req = (u32 *)req2->tcam_data;
+ req = (__le32 *)req2->tcam_data;
for (i = 0; i < 6; i++)
pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
- "%08x\n", *req++);
+ "%08x\n", le32_to_cpu(*req++));
/* tcam_data8 ~ tcam_data12 */
- req = (u32 *)req3->tcam_data;
+ req = (__le32 *)req3->tcam_data;
for (i = 0; i < 5; i++)
pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
- "%08x\n", *req++);
+ "%08x\n", le32_to_cpu(*req++));
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 0f50dba6cc47..8ca368424436 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -11026,6 +11026,7 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle,
static void hclge_info_show(struct hclge_dev *hdev)
{
+ struct hnae3_handle *handle = &hdev->vport->nic;
struct device *dev = &hdev->pdev->dev;
dev_info(dev, "PF info begin:\n");
@@ -11042,9 +11043,9 @@ static void hclge_info_show(struct hclge_dev *hdev)
dev_info(dev, "This is %s PF\n",
hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
dev_info(dev, "DCB %s\n",
- hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
+ handle->kinfo.tc_info.dcb_ets_active ? "enable" : "disable");
dev_info(dev, "MQPRIO %s\n",
- hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
+ handle->kinfo.tc_info.mqprio_active ? "enable" : "disable");
dev_info(dev, "Default tx spare buffer size: %u\n",
hdev->tx_spare_buf_size);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index ec233ec57222..7bc2049b723d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -919,8 +919,6 @@ struct hclge_dev {
#define HCLGE_FLAG_MAIN BIT(0)
#define HCLGE_FLAG_DCB_CAPABLE BIT(1)
-#define HCLGE_FLAG_DCB_ENABLE BIT(2)
-#define HCLGE_FLAG_MQPRIO_ENABLE BIT(3)
u32 flag;
u32 pkt_buf_size; /* Total pf buf size for tx/rx */
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 015b78144114..a2b759531cb7 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -34,11 +34,11 @@ struct igb_adapter;
/* TX/RX descriptor defines */
#define IGB_DEFAULT_TXD 256
#define IGB_DEFAULT_TX_WORK 128
-#define IGB_MIN_TXD 80
+#define IGB_MIN_TXD 64
#define IGB_MAX_TXD 4096
#define IGB_DEFAULT_RXD 256
-#define IGB_MIN_RXD 80
+#define IGB_MIN_RXD 64
#define IGB_MAX_RXD 4096
#define IGB_DEFAULT_ITR 3 /* dynamic */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 1ab787ed254d..13ba9c74bd84 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3933,8 +3933,9 @@ static void igb_probe_vfs(struct igb_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
struct e1000_hw *hw = &adapter->hw;
- /* Virtualization features not supported on i210 family. */
- if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
+ /* Virtualization features not supported on i210 and 82580 family. */
+ if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211) ||
+ (hw->mac.type == e1000_82580))
return;
/* Of the below we really only want the effect of getting
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index 57d39ee00b58..7b83678ba83a 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -39,11 +39,11 @@ enum latency_range {
/* Tx/Rx descriptor defines */
#define IGBVF_DEFAULT_TXD 256
#define IGBVF_MAX_TXD 4096
-#define IGBVF_MIN_TXD 80
+#define IGBVF_MIN_TXD 64
#define IGBVF_DEFAULT_RXD 256
#define IGBVF_MAX_RXD 4096
-#define IGBVF_MIN_RXD 80
+#define IGBVF_MIN_RXD 64
#define IGBVF_MIN_ITR_USECS 10 /* 100000 irq/sec */
#define IGBVF_MAX_ITR_USECS 10000 /* 100 irq/sec */
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 8ebe6999a528..f48f82d5e274 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -379,11 +379,11 @@ static inline u32 igc_rss_type(const union igc_adv_rx_desc *rx_desc)
/* TX/RX descriptor defines */
#define IGC_DEFAULT_TXD 256
#define IGC_DEFAULT_TX_WORK 128
-#define IGC_MIN_TXD 80
+#define IGC_MIN_TXD 64
#define IGC_MAX_TXD 4096
#define IGC_DEFAULT_RXD 256
-#define IGC_MIN_RXD 80
+#define IGC_MIN_RXD 64
#define IGC_MAX_RXD 4096
/* Supported Rx Buffer Sizes */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index c2f68678e947..23c2f2ed2fb8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -846,6 +846,21 @@ static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
return 0;
}
+static void nix_get_aq_req_smq(struct rvu *rvu, struct nix_aq_enq_req *req,
+ u16 *smq, u16 *smq_mask)
+{
+ struct nix_cn10k_aq_enq_req *aq_req;
+
+ if (!is_rvu_otx2(rvu)) {
+ aq_req = (struct nix_cn10k_aq_enq_req *)req;
+ *smq = aq_req->sq.smq;
+ *smq_mask = aq_req->sq_mask.smq;
+ } else {
+ *smq = req->sq.smq;
+ *smq_mask = req->sq_mask.smq;
+ }
+}
+
static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
struct nix_aq_enq_req *req,
struct nix_aq_enq_rsp *rsp)
@@ -857,6 +872,7 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
struct rvu_block *block;
struct admin_queue *aq;
struct rvu_pfvf *pfvf;
+ u16 smq, smq_mask;
void *ctx, *mask;
bool ena;
u64 cfg;
@@ -928,13 +944,14 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
if (rc)
return rc;
+ nix_get_aq_req_smq(rvu, req, &smq, &smq_mask);
/* Check if SQ pointed SMQ belongs to this PF/VF or not */
if (req->ctype == NIX_AQ_CTYPE_SQ &&
((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
(req->op == NIX_AQ_INSTOP_WRITE &&
- req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) {
+ req->sq_mask.ena && req->sq.ena && smq_mask))) {
if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
- pcifunc, req->sq.smq))
+ pcifunc, smq))
return NIX_AF_ERR_AQ_ENQUEUE;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
index 92d3952dfa8b..feeb41693c17 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
@@ -17,8 +17,10 @@ tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state,
if (err)
return err;
- if (mlx5e_is_eswitch_flow(parse_state->flow))
+ if (mlx5e_is_eswitch_flow(parse_state->flow)) {
attr->esw_attr->split_count = attr->esw_attr->out_count;
+ parse_state->if_count = 0;
+ }
attr->flags |= MLX5_ATTR_FLAG_CT;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
index 291193f7120d..f63402c48028 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
@@ -294,6 +294,7 @@ parse_mirred_ovs_master(struct mlx5e_tc_act_parse_state *parse_state,
if (err)
return err;
+ parse_state->if_count = 0;
esw_attr->out_count++;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c
index 3b272bbf4c53..368a95fa77d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c
@@ -98,8 +98,10 @@ tc_act_parse_pedit(struct mlx5e_tc_act_parse_state *parse_state,
attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- if (ns_type == MLX5_FLOW_NAMESPACE_FDB)
+ if (ns_type == MLX5_FLOW_NAMESPACE_FDB) {
esw_attr->split_count = esw_attr->out_count;
+ parse_state->if_count = 0;
+ }
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c
index ad09a8a5f36e..2d1d4a04501b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c
@@ -66,6 +66,7 @@ tc_act_parse_redirect_ingress(struct mlx5e_tc_act_parse_state *parse_state,
if (err)
return err;
+ parse_state->if_count = 0;
esw_attr->out_count++;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
index c8a3eaf189f6..a13c5e707b83 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
@@ -166,6 +166,7 @@ tc_act_parse_vlan(struct mlx5e_tc_act_parse_state *parse_state,
return err;
esw_attr->split_count = esw_attr->out_count;
+ parse_state->if_count = 0;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c
index 310b99230760..f17575b09788 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c
@@ -65,8 +65,10 @@ tc_act_parse_vlan_mangle(struct mlx5e_tc_act_parse_state *parse_state,
if (err)
return err;
- if (ns_type == MLX5_FLOW_NAMESPACE_FDB)
+ if (ns_type == MLX5_FLOW_NAMESPACE_FDB) {
attr->esw_attr->split_count = attr->esw_attr->out_count;
+ parse_state->if_count = 0;
+ }
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 318083690fcd..c24828b688ac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -3936,6 +3936,7 @@ parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
}
i_split = i + 1;
+ parse_state->if_count = 0;
list_add(&attr->list, &flow->attrs);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 6cd7d6497e10..d4cde6555063 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1276,12 +1276,19 @@ int
mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
enum mlx5_eswitch_vport_event enabled_events)
{
+ bool pf_needed;
int ret;
+ pf_needed = mlx5_core_is_ecpf_esw_manager(esw->dev) ||
+ esw->mode == MLX5_ESWITCH_LEGACY;
+
/* Enable PF vport */
- ret = mlx5_eswitch_load_pf_vf_vport(esw, MLX5_VPORT_PF, enabled_events);
- if (ret)
- return ret;
+ if (pf_needed) {
+ ret = mlx5_eswitch_load_pf_vf_vport(esw, MLX5_VPORT_PF,
+ enabled_events);
+ if (ret)
+ return ret;
+ }
/* Enable external host PF HCA */
ret = host_pf_enable_hca(esw->dev);
@@ -1317,7 +1324,8 @@ ec_vf_err:
ecpf_err:
host_pf_disable_hca(esw->dev);
pf_hca_err:
- mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF);
+ if (pf_needed)
+ mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF);
return ret;
}
@@ -1335,7 +1343,10 @@ void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
}
host_pf_disable_hca(esw->dev);
- mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF);
+
+ if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
+ esw->mode == MLX5_ESWITCH_LEGACY)
+ mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF);
}
static void mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 752fb0dfb111..b296ac52a439 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -3216,26 +3216,47 @@ esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
esw_acl_ingress_ofld_cleanup(esw, vport);
}
-static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
+static int esw_create_offloads_acl_tables(struct mlx5_eswitch *esw)
{
- struct mlx5_vport *vport;
+ struct mlx5_vport *uplink, *manager;
+ int ret;
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
- if (IS_ERR(vport))
- return PTR_ERR(vport);
+ uplink = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
+ if (IS_ERR(uplink))
+ return PTR_ERR(uplink);
+
+ ret = esw_vport_create_offloads_acl_tables(esw, uplink);
+ if (ret)
+ return ret;
+
+ manager = mlx5_eswitch_get_vport(esw, esw->manager_vport);
+ if (IS_ERR(manager)) {
+ ret = PTR_ERR(manager);
+ goto err_manager;
+ }
- return esw_vport_create_offloads_acl_tables(esw, vport);
+ ret = esw_vport_create_offloads_acl_tables(esw, manager);
+ if (ret)
+ goto err_manager;
+
+ return 0;
+
+err_manager:
+ esw_vport_destroy_offloads_acl_tables(esw, uplink);
+ return ret;
}
-static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
+static void esw_destroy_offloads_acl_tables(struct mlx5_eswitch *esw)
{
struct mlx5_vport *vport;
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
- if (IS_ERR(vport))
- return;
+ vport = mlx5_eswitch_get_vport(esw, esw->manager_vport);
+ if (!IS_ERR(vport))
+ esw_vport_destroy_offloads_acl_tables(esw, vport);
- esw_vport_destroy_offloads_acl_tables(esw, vport);
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
+ if (!IS_ERR(vport))
+ esw_vport_destroy_offloads_acl_tables(esw, vport);
}
int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
@@ -3280,7 +3301,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
}
esw->fdb_table.offloads.indir = indir;
- err = esw_create_uplink_offloads_acl_tables(esw);
+ err = esw_create_offloads_acl_tables(esw);
if (err)
goto create_acl_err;
@@ -3321,7 +3342,7 @@ create_fdb_err:
create_restore_err:
esw_destroy_offloads_table(esw);
create_offloads_err:
- esw_destroy_uplink_offloads_acl_tables(esw);
+ esw_destroy_offloads_acl_tables(esw);
create_acl_err:
mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
create_indir_err:
@@ -3337,7 +3358,7 @@ static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
esw_destroy_offloads_fdb_tables(esw);
esw_destroy_restore_table(esw);
esw_destroy_offloads_table(esw);
- esw_destroy_uplink_offloads_acl_tables(esw);
+ esw_destroy_offloads_acl_tables(esw);
mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
mutex_destroy(&esw->fdb_table.offloads.vports.lock);
}
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 2375cef577e4..f77a2d3ef37e 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -359,26 +359,36 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
/* Handle a received packet. Second half: Touches packet payload. */
void __efx_rx_packet(struct efx_channel *channel)
{
+ struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
struct efx_nic *efx = channel->efx;
struct efx_rx_buffer *rx_buf =
- efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
+ efx_rx_buffer(rx_queue, channel->rx_pkt_index);
u8 *eh = efx_rx_buf_va(rx_buf);
/* Read length from the prefix if necessary. This already
* excludes the length of the prefix itself.
*/
- if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN)
+ if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN) {
rx_buf->len = le16_to_cpup((__le16 *)
(eh + efx->rx_packet_len_offset));
+ /* A known issue may prevent this being filled in;
+ * if that happens, just drop the packet.
+ * Must do that in the driver since passing a zero-length
+ * packet up to the stack may cause a crash.
+ */
+ if (unlikely(!rx_buf->len)) {
+ efx_free_rx_buffers(rx_queue, rx_buf,
+ channel->rx_pkt_n_frags);
+ channel->n_rx_frm_trunc++;
+ goto out;
+ }
+ }
/* If we're in loopback test, then pass the packet directly to the
* loopback layer, and free the rx_buf here
*/
if (unlikely(efx->loopback_selftest)) {
- struct efx_rx_queue *rx_queue;
-
efx_loopback_rx_packet(efx, eh, rx_buf->len);
- rx_queue = efx_channel_get_rx_queue(channel);
efx_free_rx_buffers(rx_queue, rx_buf,
channel->rx_pkt_n_frags);
goto out;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 35f4b1484029..0f28795e581c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -419,9 +419,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
return ERR_PTR(phy_mode);
plat->phy_interface = phy_mode;
- plat->mac_interface = stmmac_of_get_mac_mode(np);
- if (plat->mac_interface < 0)
- plat->mac_interface = plat->phy_interface;
+ rc = stmmac_of_get_mac_mode(np);
+ plat->mac_interface = rc < 0 ? plat->phy_interface : rc;
/* Some wrapper drivers still rely on phy_node. Let's save it while
* they are not converted to phylink. */
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 9fb567524220..6ed38a3cdd73 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -427,8 +427,8 @@ out:
* a block of 6pack data has been received, which can now be decapsulated
* and sent on to some IP layer for further processing.
*/
-static void sixpack_receive_buf(struct tty_struct *tty,
- const unsigned char *cp, const char *fp, int count)
+static void sixpack_receive_buf(struct tty_struct *tty, const u8 *cp,
+ const u8 *fp, size_t count)
{
struct sixpack *sp;
int count1;
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index c251e04ae047..5f38a002bd9e 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -874,8 +874,8 @@ static int mkiss_ioctl(struct tty_struct *tty, unsigned int cmd,
* a block of data has been received, which can now be decapsulated
* and sent on to the AX.25 layer for further processing.
*/
-static void mkiss_receive_buf(struct tty_struct *tty, const unsigned char *cp,
- const char *fp, int count)
+static void mkiss_receive_buf(struct tty_struct *tty, const u8 *cp,
+ const u8 *fp, size_t count)
{
struct mkiss *ax = mkiss_get(tty);
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index c3f30663070f..b7e151439c48 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -1330,8 +1330,7 @@ static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
struct crypto_aead *tfm;
int ret;
- /* Pick a sync gcm(aes) cipher to ensure order is preserved. */
- tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
+ tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
if (IS_ERR(tfm))
return tfm;
diff --git a/drivers/net/mctp/mctp-serial.c b/drivers/net/mctp/mctp-serial.c
index 9f9eaf896047..5bf6fdff701c 100644
--- a/drivers/net/mctp/mctp-serial.c
+++ b/drivers/net/mctp/mctp-serial.c
@@ -390,9 +390,8 @@ static void mctp_serial_push(struct mctp_serial *dev, unsigned char c)
}
}
-static void mctp_serial_tty_receive_buf(struct tty_struct *tty,
- const unsigned char *c,
- const char *f, int len)
+static void mctp_serial_tty_receive_buf(struct tty_struct *tty, const u8 *c,
+ const u8 *f, size_t len)
{
struct mctp_serial *dev = tty->disc_data;
int i;
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index b6d7981b2d1e..927d3d54658e 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -1800,9 +1800,6 @@ static const struct ksz9477_errata_write ksz9477_errata_writes[] = {
/* Transmit waveform amplitude can be improved (1000BASE-T, 100BASE-TX, 10BASE-Te) */
{0x1c, 0x04, 0x00d0},
- /* Energy Efficient Ethernet (EEE) feature select must be manually disabled */
- {0x07, 0x3c, 0x0000},
-
/* Register settings are required to meet data sheet supply current specifications */
{0x1c, 0x13, 0x6eff},
{0x1c, 0x14, 0xe6ff},
@@ -1847,6 +1844,12 @@ static int ksz9477_config_init(struct phy_device *phydev)
return err;
}
+ /* According to KSZ9477 Errata DS80000754C (Module 4) all EEE modes
+ * in this switch shall be regarded as broken.
+ */
+ if (phydev->dev_flags & MICREL_NO_EEE)
+ phydev->eee_broken_modes = -1;
+
err = genphy_restart_aneg(phydev);
if (err)
return err;
diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c
index 15a179631903..fbaaa8c102a1 100644
--- a/drivers/net/ppp/ppp_async.c
+++ b/drivers/net/ppp/ppp_async.c
@@ -98,7 +98,7 @@ static int ppp_async_send(struct ppp_channel *chan, struct sk_buff *skb);
static int ppp_async_push(struct asyncppp *ap);
static void ppp_async_flush_output(struct asyncppp *ap);
static void ppp_async_input(struct asyncppp *ap, const unsigned char *buf,
- const char *flags, int count);
+ const u8 *flags, int count);
static int ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd,
unsigned long arg);
static void ppp_async_process(struct tasklet_struct *t);
@@ -257,9 +257,8 @@ static void ppp_asynctty_hangup(struct tty_struct *tty)
* Pppd reads and writes packets via /dev/ppp instead.
*/
static ssize_t
-ppp_asynctty_read(struct tty_struct *tty, struct file *file,
- unsigned char *buf, size_t count,
- void **cookie, unsigned long offset)
+ppp_asynctty_read(struct tty_struct *tty, struct file *file, u8 *buf,
+ size_t count, void **cookie, unsigned long offset)
{
return -EAGAIN;
}
@@ -269,8 +268,8 @@ ppp_asynctty_read(struct tty_struct *tty, struct file *file,
* from the ppp generic stuff.
*/
static ssize_t
-ppp_asynctty_write(struct tty_struct *tty, struct file *file,
- const unsigned char *buf, size_t count)
+ppp_asynctty_write(struct tty_struct *tty, struct file *file, const u8 *buf,
+ size_t count)
{
return -EAGAIN;
}
@@ -328,17 +327,10 @@ ppp_asynctty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
return err;
}
-/* No kernel lock - fine */
-static __poll_t
-ppp_asynctty_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
-{
- return 0;
-}
-
/* May sleep, don't call from interrupt level or with interrupts disabled */
static void
-ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
- const char *cflags, int count)
+ppp_asynctty_receive(struct tty_struct *tty, const u8 *buf, const u8 *cflags,
+ size_t count)
{
struct asyncppp *ap = ap_get(tty);
unsigned long flags;
@@ -378,7 +370,6 @@ static struct tty_ldisc_ops ppp_ldisc = {
.read = ppp_asynctty_read,
.write = ppp_asynctty_write,
.ioctl = ppp_asynctty_ioctl,
- .poll = ppp_asynctty_poll,
.receive_buf = ppp_asynctty_receive,
.write_wakeup = ppp_asynctty_wakeup,
};
@@ -827,8 +818,7 @@ process_input_packet(struct asyncppp *ap)
other ldisc functions but will not be re-entered */
static void
-ppp_async_input(struct asyncppp *ap, const unsigned char *buf,
- const char *flags, int count)
+ppp_async_input(struct asyncppp *ap, const u8 *buf, const u8 *flags, int count)
{
struct sk_buff *skb;
int c, i, j, n, s, f;
diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
index 18283b7b94bc..ebcdffdf4f0e 100644
--- a/drivers/net/ppp/ppp_synctty.c
+++ b/drivers/net/ppp/ppp_synctty.c
@@ -93,8 +93,8 @@ static int ppp_sync_ioctl(struct ppp_channel *chan, unsigned int cmd,
static void ppp_sync_process(struct tasklet_struct *t);
static int ppp_sync_push(struct syncppp *ap);
static void ppp_sync_flush_output(struct syncppp *ap);
-static void ppp_sync_input(struct syncppp *ap, const unsigned char *buf,
- const char *flags, int count);
+static void ppp_sync_input(struct syncppp *ap, const u8 *buf, const u8 *flags,
+ int count);
static const struct ppp_channel_ops sync_ops = {
.start_xmit = ppp_sync_send,
@@ -255,8 +255,7 @@ static void ppp_sync_hangup(struct tty_struct *tty)
* Pppd reads and writes packets via /dev/ppp instead.
*/
static ssize_t
-ppp_sync_read(struct tty_struct *tty, struct file *file,
- unsigned char *buf, size_t count,
+ppp_sync_read(struct tty_struct *tty, struct file *file, u8 *buf, size_t count,
void **cookie, unsigned long offset)
{
return -EAGAIN;
@@ -267,8 +266,8 @@ ppp_sync_read(struct tty_struct *tty, struct file *file,
* from the ppp generic stuff.
*/
static ssize_t
-ppp_sync_write(struct tty_struct *tty, struct file *file,
- const unsigned char *buf, size_t count)
+ppp_sync_write(struct tty_struct *tty, struct file *file, const u8 *buf,
+ size_t count)
{
return -EAGAIN;
}
@@ -321,17 +320,10 @@ ppp_synctty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
return err;
}
-/* No kernel lock - fine */
-static __poll_t
-ppp_sync_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
-{
- return 0;
-}
-
/* May sleep, don't call from interrupt level or with interrupts disabled */
static void
-ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
- const char *cflags, int count)
+ppp_sync_receive(struct tty_struct *tty, const u8 *buf, const u8 *cflags,
+ size_t count)
{
struct syncppp *ap = sp_get(tty);
unsigned long flags;
@@ -371,7 +363,6 @@ static struct tty_ldisc_ops ppp_sync_ldisc = {
.read = ppp_sync_read,
.write = ppp_sync_write,
.ioctl = ppp_synctty_ioctl,
- .poll = ppp_sync_poll,
.receive_buf = ppp_sync_receive,
.write_wakeup = ppp_sync_wakeup,
};
@@ -663,8 +654,7 @@ ppp_sync_flush_output(struct syncppp *ap)
* frame is considered to be in error and is tossed.
*/
static void
-ppp_sync_input(struct syncppp *ap, const unsigned char *buf,
- const char *flags, int count)
+ppp_sync_input(struct syncppp *ap, const u8 *buf, const u8 *flags, int count)
{
struct sk_buff *skb;
unsigned char *p;
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index 6865d32270e5..e4280e37fec9 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -685,8 +685,8 @@ static void sl_setup(struct net_device *dev)
* in parallel
*/
-static void slip_receive_buf(struct tty_struct *tty, const unsigned char *cp,
- const char *fp, int count)
+static void slip_receive_buf(struct tty_struct *tty, const u8 *cp, const u8 *fp,
+ size_t count)
{
struct slip *sl = tty->disc_data;
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index ce1f6081d582..83b8452220ec 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1322,11 +1322,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
}
/* close the requested serial port */
-static int hso_serial_write(struct tty_struct *tty, const unsigned char *buf,
- int count)
+static ssize_t hso_serial_write(struct tty_struct *tty, const u8 *buf,
+ size_t count)
{
struct hso_serial *serial = tty->driver_data;
- int space, tx_bytes;
unsigned long flags;
/* sanity check */
@@ -1337,21 +1336,16 @@ static int hso_serial_write(struct tty_struct *tty, const unsigned char *buf,
spin_lock_irqsave(&serial->serial_lock, flags);
- space = serial->tx_data_length - serial->tx_buffer_count;
- tx_bytes = (count < space) ? count : space;
+ count = min_t(size_t, serial->tx_data_length - serial->tx_buffer_count,
+ count);
+ memcpy(serial->tx_buffer + serial->tx_buffer_count, buf, count);
+ serial->tx_buffer_count += count;
- if (!tx_bytes)
- goto out;
-
- memcpy(serial->tx_buffer + serial->tx_buffer_count, buf, tx_bytes);
- serial->tx_buffer_count += tx_bytes;
-
-out:
spin_unlock_irqrestore(&serial->serial_lock, flags);
hso_kick_transmit(serial);
/* done */
- return tx_bytes;
+ return count;
}
/* how much room is there for writing */
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index d43e62ebc2fc..9c6f4f83f22b 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -344,6 +344,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
struct veth_rq *rq = NULL;
+ int ret = NETDEV_TX_OK;
struct net_device *rcv;
int length = skb->len;
bool use_napi = false;
@@ -378,11 +379,12 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
} else {
drop:
atomic64_inc(&priv->dropped);
+ ret = NET_XMIT_DROP;
}
rcu_read_unlock();
- return NETDEV_TX_OK;
+ return ret;
}
static u64 veth_stats_tx(struct net_device *dev, u64 *packets, u64 *bytes)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 231ad91a919d..fe7f314d65c9 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -132,6 +132,14 @@ struct virtnet_interrupt_coalesce {
u32 max_usecs;
};
+/* The dma information of pages allocated at a time. */
+struct virtnet_rq_dma {
+ dma_addr_t addr;
+ u32 ref;
+ u16 len;
+ u16 need_sync;
+};
+
/* Internal representation of a send virtqueue */
struct send_queue {
/* Virtqueue associated with this send _queue */
@@ -185,6 +193,12 @@ struct receive_queue {
char name[16];
struct xdp_rxq_info xdp_rxq;
+
+ /* Record the last dma info to free after new pages is allocated. */
+ struct virtnet_rq_dma *last_dma;
+
+ /* Do dma by self */
+ bool do_dma;
};
/* This structure can contain rss message with maximum settings for indirection table and keysize
@@ -580,6 +594,156 @@ ok:
return skb;
}
+static void virtnet_rq_unmap(struct receive_queue *rq, void *buf, u32 len)
+{
+ struct page *page = virt_to_head_page(buf);
+ struct virtnet_rq_dma *dma;
+ void *head;
+ int offset;
+
+ head = page_address(page);
+
+ dma = head;
+
+ --dma->ref;
+
+ if (dma->ref) {
+ if (dma->need_sync && len) {
+ offset = buf - (head + sizeof(*dma));
+
+ virtqueue_dma_sync_single_range_for_cpu(rq->vq, dma->addr, offset,
+ len, DMA_FROM_DEVICE);
+ }
+
+ return;
+ }
+
+ virtqueue_dma_unmap_single_attrs(rq->vq, dma->addr, dma->len,
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ put_page(page);
+}
+
+static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
+{
+ void *buf;
+
+ buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
+ if (buf && rq->do_dma)
+ virtnet_rq_unmap(rq, buf, *len);
+
+ return buf;
+}
+
+static void *virtnet_rq_detach_unused_buf(struct receive_queue *rq)
+{
+ void *buf;
+
+ buf = virtqueue_detach_unused_buf(rq->vq);
+ if (buf && rq->do_dma)
+ virtnet_rq_unmap(rq, buf, 0);
+
+ return buf;
+}
+
+static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
+{
+ struct virtnet_rq_dma *dma;
+ dma_addr_t addr;
+ u32 offset;
+ void *head;
+
+ if (!rq->do_dma) {
+ sg_init_one(rq->sg, buf, len);
+ return;
+ }
+
+ head = page_address(rq->alloc_frag.page);
+
+ offset = buf - head;
+
+ dma = head;
+
+ addr = dma->addr - sizeof(*dma) + offset;
+
+ sg_init_table(rq->sg, 1);
+ rq->sg[0].dma_address = addr;
+ rq->sg[0].length = len;
+}
+
+static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
+{
+ struct page_frag *alloc_frag = &rq->alloc_frag;
+ struct virtnet_rq_dma *dma;
+ void *buf, *head;
+ dma_addr_t addr;
+
+ if (unlikely(!skb_page_frag_refill(size, alloc_frag, gfp)))
+ return NULL;
+
+ head = page_address(alloc_frag->page);
+
+ if (rq->do_dma) {
+ dma = head;
+
+ /* new pages */
+ if (!alloc_frag->offset) {
+ if (rq->last_dma) {
+ /* Now, the new page is allocated, the last dma
+ * will not be used. So the dma can be unmapped
+ * if the ref is 0.
+ */
+ virtnet_rq_unmap(rq, rq->last_dma, 0);
+ rq->last_dma = NULL;
+ }
+
+ dma->len = alloc_frag->size - sizeof(*dma);
+
+ addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
+ dma->len, DMA_FROM_DEVICE, 0);
+ if (virtqueue_dma_mapping_error(rq->vq, addr))
+ return NULL;
+
+ dma->addr = addr;
+ dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
+
+ /* Add a reference to dma to prevent the entire dma from
+ * being released during error handling. This reference
+ * will be freed after the pages are no longer used.
+ */
+ get_page(alloc_frag->page);
+ dma->ref = 1;
+ alloc_frag->offset = sizeof(*dma);
+
+ rq->last_dma = dma;
+ }
+
+ ++dma->ref;
+ }
+
+ buf = head + alloc_frag->offset;
+
+ get_page(alloc_frag->page);
+ alloc_frag->offset += size;
+
+ return buf;
+}
+
+static void virtnet_rq_set_premapped(struct virtnet_info *vi)
+{
+ int i;
+
+ /* disable for big mode */
+ if (!vi->mergeable_rx_bufs && vi->big_packets)
+ return;
+
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ if (virtqueue_set_dma_premapped(vi->rq[i].vq))
+ continue;
+
+ vi->rq[i].do_dma = true;
+ }
+}
+
static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
{
unsigned int len;
@@ -935,7 +1099,7 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
void *buf;
int off;
- buf = virtqueue_get_buf(rq->vq, &buflen);
+ buf = virtnet_rq_get_buf(rq, &buflen, NULL);
if (unlikely(!buf))
goto err_buf;
@@ -1155,7 +1319,7 @@ static void mergeable_buf_free(struct receive_queue *rq, int num_buf,
int len;
while (num_buf-- > 1) {
- buf = virtqueue_get_buf(rq->vq, &len);
+ buf = virtnet_rq_get_buf(rq, &len, NULL);
if (unlikely(!buf)) {
pr_debug("%s: rx error: %d buffers missing\n",
dev->name, num_buf);
@@ -1263,7 +1427,7 @@ static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
return -EINVAL;
while (--*num_buf > 0) {
- buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);
+ buf = virtnet_rq_get_buf(rq, &len, &ctx);
if (unlikely(!buf)) {
pr_debug("%s: rx error: %d buffers out of %d missing\n",
dev->name, *num_buf,
@@ -1492,7 +1656,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
while (--num_buf) {
int num_skb_frags;
- buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);
+ buf = virtnet_rq_get_buf(rq, &len, &ctx);
if (unlikely(!buf)) {
pr_debug("%s: rx error: %d buffers out of %d missing\n",
dev->name, num_buf,
@@ -1651,7 +1815,6 @@ frame_err:
static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
gfp_t gfp)
{
- struct page_frag *alloc_frag = &rq->alloc_frag;
char *buf;
unsigned int xdp_headroom = virtnet_get_headroom(vi);
void *ctx = (void *)(unsigned long)xdp_headroom;
@@ -1660,17 +1823,21 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
len = SKB_DATA_ALIGN(len) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp)))
+
+ buf = virtnet_rq_alloc(rq, len, gfp);
+ if (unlikely(!buf))
return -ENOMEM;
- buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
- get_page(alloc_frag->page);
- alloc_frag->offset += len;
- sg_init_one(rq->sg, buf + VIRTNET_RX_PAD + xdp_headroom,
- vi->hdr_len + GOOD_PACKET_LEN);
+ virtnet_rq_init_one_sg(rq, buf + VIRTNET_RX_PAD + xdp_headroom,
+ vi->hdr_len + GOOD_PACKET_LEN);
+
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
- if (err < 0)
+ if (err < 0) {
+ if (rq->do_dma)
+ virtnet_rq_unmap(rq, buf, 0);
put_page(virt_to_head_page(buf));
+ }
+
return err;
}
@@ -1747,23 +1914,22 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
unsigned int headroom = virtnet_get_headroom(vi);
unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
- char *buf;
+ unsigned int len, hole;
void *ctx;
+ char *buf;
int err;
- unsigned int len, hole;
/* Extra tailroom is needed to satisfy XDP's assumption. This
* means rx frags coalescing won't work, but consider we've
* disabled GSO for XDP, it won't be a big issue.
*/
len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
- if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp)))
+
+ buf = virtnet_rq_alloc(rq, len + room, gfp);
+ if (unlikely(!buf))
return -ENOMEM;
- buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
buf += headroom; /* advance address leaving hole at front of pkt */
- get_page(alloc_frag->page);
- alloc_frag->offset += len + room;
hole = alloc_frag->size - alloc_frag->offset;
if (hole < len + room) {
/* To avoid internal fragmentation, if there is very likely not
@@ -1777,11 +1943,15 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
alloc_frag->offset += hole;
}
- sg_init_one(rq->sg, buf, len);
+ virtnet_rq_init_one_sg(rq, buf, len);
+
ctx = mergeable_len_to_ctx(len + room, headroom);
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
- if (err < 0)
+ if (err < 0) {
+ if (rq->do_dma)
+ virtnet_rq_unmap(rq, buf, 0);
put_page(virt_to_head_page(buf));
+ }
return err;
}
@@ -1902,13 +2072,13 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
void *ctx;
while (stats.packets < budget &&
- (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) {
+ (buf = virtnet_rq_get_buf(rq, &len, &ctx))) {
receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
stats.packets++;
}
} else {
while (stats.packets < budget &&
- (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
+ (buf = virtnet_rq_get_buf(rq, &len, NULL)) != NULL) {
receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats);
stats.packets++;
}
@@ -3808,8 +3978,11 @@ static void free_receive_page_frags(struct virtnet_info *vi)
{
int i;
for (i = 0; i < vi->max_queue_pairs; i++)
- if (vi->rq[i].alloc_frag.page)
+ if (vi->rq[i].alloc_frag.page) {
+ if (vi->rq[i].do_dma && vi->rq[i].last_dma)
+ virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0);
put_page(vi->rq[i].alloc_frag.page);
+ }
}
static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
@@ -3846,9 +4019,10 @@ static void free_unused_bufs(struct virtnet_info *vi)
}
for (i = 0; i < vi->max_queue_pairs; i++) {
- struct virtqueue *vq = vi->rq[i].vq;
- while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
- virtnet_rq_free_unused_buf(vq, buf);
+ struct receive_queue *rq = &vi->rq[i];
+
+ while ((buf = virtnet_rq_detach_unused_buf(rq)) != NULL)
+ virtnet_rq_free_unused_buf(rq->vq, buf);
cond_resched();
}
}
@@ -4022,6 +4196,8 @@ static int init_vqs(struct virtnet_info *vi)
if (ret)
goto err_free;
+ virtnet_rq_set_premapped(vi);
+
cpus_read_lock();
virtnet_set_affinity(vi);
cpus_read_unlock();
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index 5e5c7bf51174..1584665fe3cb 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -286,8 +286,7 @@ static bool mt76u_check_sg(struct mt76_dev *dev)
struct usb_device *udev = interface_to_usbdev(uintf);
return (!disable_usb_sg && udev->bus->sg_tablesize > 0 &&
- (udev->bus->no_sg_constraint ||
- udev->speed == USB_SPEED_WIRELESS));
+ udev->bus->no_sg_constraint);
}
static int
diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c
index dca25a0c2f33..3ae4b41c59ac 100644
--- a/drivers/nfc/nxp-nci/i2c.c
+++ b/drivers/nfc/nxp-nci/i2c.c
@@ -336,6 +336,7 @@ MODULE_DEVICE_TABLE(of, of_nxp_nci_i2c_match);
#ifdef CONFIG_ACPI
static const struct acpi_device_id acpi_id[] = {
{ "NXP1001" },
+ { "NXP1002" },
{ "NXP7471" },
{ }
};
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index da9befa3d6c4..5bc9c4874fe3 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -226,6 +226,19 @@ config NVMEM_QCOM_QFPROM
This driver can also be built as a module. If so, the module
will be called nvmem_qfprom.
+config NVMEM_QCOM_SEC_QFPROM
+ tristate "QCOM SECURE QFPROM Support"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on HAS_IOMEM
+ depends on OF
+ select QCOM_SCM
+ help
+ Say y here to enable secure QFPROM support. The secure QFPROM provides access
+ functions for QFPROM data to rest of the drivers via nvmem interface.
+
+ This driver can also be built as a module. If so, the module will be called
+ nvmem_sec_qfprom.
+
config NVMEM_RAVE_SP_EEPROM
tristate "Rave SP EEPROM Support"
depends on RAVE_SP_CORE
@@ -247,7 +260,7 @@ config NVMEM_ROCKCHIP_EFUSE
depends on ARCH_ROCKCHIP || COMPILE_TEST
depends on HAS_IOMEM
help
- This is a simple drive to dump specified values of Rockchip SoC
+ This is a simple driver to dump specified values of Rockchip SoC
from eFuse, such as cpu-leakage.
This driver can also be built as a module. If so, the module
@@ -258,8 +271,8 @@ config NVMEM_ROCKCHIP_OTP
depends on ARCH_ROCKCHIP || COMPILE_TEST
depends on HAS_IOMEM
help
- This is a simple drive to dump specified values of Rockchip SoC
- from otp, such as cpu-leakage.
+ This is a simple driver to dump specified values of Rockchip SoC
+ from OTP, such as cpu-leakage.
This driver can also be built as a module. If so, the module
will be called nvmem_rockchip_otp.
@@ -392,4 +405,16 @@ config NVMEM_ZYNQMP
If sure, say yes. If unsure, say no.
+config NVMEM_QORIQ_EFUSE
+ tristate "NXP QorIQ eFuse support"
+ depends on PPC_85xx || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ This driver provides read support for the eFuses (SFP) on NXP QorIQ
+ series SoC's. This includes secure boot settings, the globally unique
+ NXP ID 'FUIDR' and the OEM unique ID 'OUIDR'.
+
+ This driver can also be built as a module. If so, the module
+ will be called nvmem_qoriq_efuse.
+
endif
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index cc23ce4ffb1f..423baf089515 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -46,6 +46,8 @@ obj-$(CONFIG_NVMEM_NINTENDO_OTP) += nvmem-nintendo-otp.o
nvmem-nintendo-otp-y := nintendo-otp.o
obj-$(CONFIG_NVMEM_QCOM_QFPROM) += nvmem_qfprom.o
nvmem_qfprom-y := qfprom.o
+obj-$(CONFIG_NVMEM_QCOM_SEC_QFPROM) += nvmem_sec_qfprom.o
+nvmem_sec_qfprom-y := sec-qfprom.o
obj-$(CONFIG_NVMEM_RAVE_SP_EEPROM) += nvmem-rave-sp-eeprom.o
nvmem-rave-sp-eeprom-y := rave-sp-eeprom.o
obj-$(CONFIG_NVMEM_RMEM) += nvmem-rmem.o
@@ -77,3 +79,5 @@ obj-$(CONFIG_NVMEM_VF610_OCOTP) += nvmem-vf610-ocotp.o
nvmem-vf610-ocotp-y := vf610-ocotp.o
obj-$(CONFIG_NVMEM_ZYNQMP) += nvmem_zynqmp_nvmem.o
nvmem_zynqmp_nvmem-y := zynqmp_nvmem.o
+obj-$(CONFIG_NVMEM_QORIQ_EFUSE) += nvmem-qoriq-efuse.o
+nvmem-qoriq-efuse-y := qoriq-efuse.o
diff --git a/drivers/nvmem/bcm-ocotp.c b/drivers/nvmem/bcm-ocotp.c
index 0c1fa0c4feb2..2490f44caa40 100644
--- a/drivers/nvmem/bcm-ocotp.c
+++ b/drivers/nvmem/bcm-ocotp.c
@@ -8,7 +8,6 @@
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
/*
diff --git a/drivers/nvmem/brcm_nvram.c b/drivers/nvmem/brcm_nvram.c
index 4567c597c87f..9737104f3b76 100644
--- a/drivers/nvmem/brcm_nvram.c
+++ b/drivers/nvmem/brcm_nvram.c
@@ -159,8 +159,7 @@ static int brcm_nvram_probe(struct platform_device *pdev)
return -ENOMEM;
priv->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(dev, res);
+ priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 3f8c7718412b..eaf6a3fe8ca6 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -17,7 +17,6 @@
#include <linux/nvmem-provider.h>
#include <linux/gpio/consumer.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/slab.h>
struct nvmem_device {
@@ -772,12 +771,16 @@ int __nvmem_layout_register(struct nvmem_layout *layout, struct module *owner)
list_add(&layout->node, &nvmem_layouts);
spin_unlock(&nvmem_layout_lock);
+ blocking_notifier_call_chain(&nvmem_notifier, NVMEM_LAYOUT_ADD, layout);
+
return 0;
}
EXPORT_SYMBOL_GPL(__nvmem_layout_register);
void nvmem_layout_unregister(struct nvmem_layout *layout)
{
+ blocking_notifier_call_chain(&nvmem_notifier, NVMEM_LAYOUT_REMOVE, layout);
+
spin_lock(&nvmem_layout_lock);
list_del(&layout->node);
spin_unlock(&nvmem_layout_lock);
@@ -786,10 +789,10 @@ EXPORT_SYMBOL_GPL(nvmem_layout_unregister);
static struct nvmem_layout *nvmem_layout_get(struct nvmem_device *nvmem)
{
- struct device_node *layout_np, *np = nvmem->dev.of_node;
+ struct device_node *layout_np;
struct nvmem_layout *l, *layout = ERR_PTR(-EPROBE_DEFER);
- layout_np = of_get_child_by_name(np, "nvmem-layout");
+ layout_np = of_nvmem_layout_get_container(nvmem);
if (!layout_np)
return NULL;
@@ -998,17 +1001,17 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
if (rval)
goto err_remove_cells;
- dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
-
- rval = device_add(&nvmem->dev);
+ rval = nvmem_add_cells_from_fixed_layout(nvmem);
if (rval)
goto err_remove_cells;
- rval = nvmem_add_cells_from_fixed_layout(nvmem);
+ rval = nvmem_add_cells_from_layout(nvmem);
if (rval)
goto err_remove_cells;
- rval = nvmem_add_cells_from_layout(nvmem);
+ dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
+
+ rval = device_add(&nvmem->dev);
if (rval)
goto err_remove_cells;
diff --git a/drivers/nvmem/imx-iim.c b/drivers/nvmem/imx-iim.c
index c86339a7f583..f13bbd164086 100644
--- a/drivers/nvmem/imx-iim.c
+++ b/drivers/nvmem/imx-iim.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/clk.h>
diff --git a/drivers/nvmem/imx-ocotp-ele.c b/drivers/nvmem/imx-ocotp-ele.c
index f1cbbc9afeb8..cf920542f939 100644
--- a/drivers/nvmem/imx-ocotp-ele.c
+++ b/drivers/nvmem/imx-ocotp-ele.c
@@ -9,7 +9,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/nvmem/imx-ocotp-scu.c b/drivers/nvmem/imx-ocotp-scu.c
index 399e1eb8b4c1..c38d9c1c3f48 100644
--- a/drivers/nvmem/imx-ocotp-scu.c
+++ b/drivers/nvmem/imx-ocotp-scu.c
@@ -11,7 +11,7 @@
#include <linux/firmware/imx/sci.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index ab556c011f3e..a223d9537f22 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -20,7 +20,6 @@
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/delay.h>
diff --git a/drivers/nvmem/lpc18xx_otp.c b/drivers/nvmem/lpc18xx_otp.c
index 16c92ea85d49..adc9948e7b2e 100644
--- a/drivers/nvmem/lpc18xx_otp.c
+++ b/drivers/nvmem/lpc18xx_otp.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -68,14 +67,12 @@ static int lpc18xx_otp_probe(struct platform_device *pdev)
{
struct nvmem_device *nvmem;
struct lpc18xx_otp *otp;
- struct resource *res;
otp = devm_kzalloc(&pdev->dev, sizeof(*otp), GFP_KERNEL);
if (!otp)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- otp->base = devm_ioremap_resource(&pdev->dev, res);
+ otp->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(otp->base))
return PTR_ERR(otp->base);
diff --git a/drivers/nvmem/meson-mx-efuse.c b/drivers/nvmem/meson-mx-efuse.c
index 13eb14316f46..d6d7aeda31f9 100644
--- a/drivers/nvmem/meson-mx-efuse.c
+++ b/drivers/nvmem/meson-mx-efuse.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include <linux/slab.h>
@@ -194,7 +193,6 @@ static int meson_mx_efuse_probe(struct platform_device *pdev)
{
const struct meson_mx_efuse_platform_data *drvdata;
struct meson_mx_efuse *efuse;
- struct resource *res;
drvdata = of_device_get_match_data(&pdev->dev);
if (!drvdata)
@@ -204,8 +202,7 @@ static int meson_mx_efuse_probe(struct platform_device *pdev)
if (!efuse)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- efuse->base = devm_ioremap_resource(&pdev->dev, res);
+ efuse->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(efuse->base))
return PTR_ERR(efuse->base);
diff --git a/drivers/nvmem/qcom-spmi-sdam.c b/drivers/nvmem/qcom-spmi-sdam.c
index f822790db49e..70f2d4f2efbf 100644
--- a/drivers/nvmem/qcom-spmi-sdam.c
+++ b/drivers/nvmem/qcom-spmi-sdam.c
@@ -6,8 +6,8 @@
#include <linux/device.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
#include <linux/nvmem-provider.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#define SDAM_MEM_START 0x40
diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
index c1e893c8a247..14814cba2dd6 100644
--- a/drivers/nvmem/qfprom.c
+++ b/drivers/nvmem/qfprom.c
@@ -374,8 +374,7 @@ static int qfprom_probe(struct platform_device *pdev)
return -ENOMEM;
/* The corrected section is always provided */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->qfpcorrected = devm_ioremap_resource(dev, res);
+ priv->qfpcorrected = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(priv->qfpcorrected))
return PTR_ERR(priv->qfpcorrected);
@@ -402,12 +401,10 @@ static int qfprom_probe(struct platform_device *pdev)
priv->qfpraw = devm_ioremap_resource(dev, res);
if (IS_ERR(priv->qfpraw))
return PTR_ERR(priv->qfpraw);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- priv->qfpconf = devm_ioremap_resource(dev, res);
+ priv->qfpconf = devm_platform_ioremap_resource(pdev, 2);
if (IS_ERR(priv->qfpconf))
return PTR_ERR(priv->qfpconf);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
- priv->qfpsecurity = devm_ioremap_resource(dev, res);
+ priv->qfpsecurity = devm_platform_ioremap_resource(pdev, 3);
if (IS_ERR(priv->qfpsecurity))
return PTR_ERR(priv->qfpsecurity);
@@ -427,12 +424,8 @@ static int qfprom_probe(struct platform_device *pdev)
return PTR_ERR(priv->vcc);
priv->secclk = devm_clk_get(dev, "core");
- if (IS_ERR(priv->secclk)) {
- ret = PTR_ERR(priv->secclk);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "Error getting clock: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(priv->secclk))
+ return dev_err_probe(dev, PTR_ERR(priv->secclk), "Error getting clock\n");
/* Only enable writing if we have SoC data. */
if (priv->soc_data)
diff --git a/drivers/nvmem/qoriq-efuse.c b/drivers/nvmem/qoriq-efuse.c
new file mode 100644
index 000000000000..e7fd04d6dd94
--- /dev/null
+++ b/drivers/nvmem/qoriq-efuse.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023 Westermo Network Technologies AB
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/nvmem-provider.h>
+#include <linux/platform_device.h>
+
+struct qoriq_efuse_priv {
+ void __iomem *base;
+};
+
+static int qoriq_efuse_read(void *context, unsigned int offset, void *val,
+ size_t bytes)
+{
+ struct qoriq_efuse_priv *priv = context;
+
+ /* .stride = 4 so offset is guaranteed to be aligned */
+ __ioread32_copy(val, priv->base + offset, bytes / 4);
+
+ /* Ignore trailing bytes (there shouldn't be any) */
+
+ return 0;
+}
+
+static int qoriq_efuse_probe(struct platform_device *pdev)
+{
+ struct nvmem_config config = {
+ .dev = &pdev->dev,
+ .read_only = true,
+ .reg_read = qoriq_efuse_read,
+ .stride = sizeof(u32),
+ .word_size = sizeof(u32),
+ .name = "qoriq_efuse_read",
+ .id = NVMEM_DEVID_AUTO,
+ .root_only = true,
+ };
+ struct qoriq_efuse_priv *priv;
+ struct nvmem_device *nvmem;
+ struct resource *res;
+
+ priv = devm_kzalloc(config.dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ config.size = resource_size(res);
+ config.priv = priv;
+ nvmem = devm_nvmem_register(config.dev, &config);
+
+ return PTR_ERR_OR_ZERO(nvmem);
+}
+
+static const struct of_device_id qoriq_efuse_of_match[] = {
+ { .compatible = "fsl,t1023-sfp", },
+ {/* sentinel */},
+};
+MODULE_DEVICE_TABLE(of, qoriq_efuse_of_match);
+
+static struct platform_driver qoriq_efuse_driver = {
+ .probe = qoriq_efuse_probe,
+ .driver = {
+ .name = "qoriq-efuse",
+ .of_match_table = qoriq_efuse_of_match,
+ },
+};
+module_platform_driver(qoriq_efuse_driver);
+
+MODULE_AUTHOR("Richard Alpe <richard.alpe@bit42.se>");
+MODULE_DESCRIPTION("NXP QorIQ Security Fuse Processor (SFP) Reader");
+MODULE_LICENSE("GPL");
diff --git a/drivers/nvmem/rave-sp-eeprom.c b/drivers/nvmem/rave-sp-eeprom.c
index c456011b75e8..df6a1c594b78 100644
--- a/drivers/nvmem/rave-sp-eeprom.c
+++ b/drivers/nvmem/rave-sp-eeprom.c
@@ -10,7 +10,7 @@
#include <linux/mfd/rave-sp.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
diff --git a/drivers/nvmem/rockchip-efuse.c b/drivers/nvmem/rockchip-efuse.c
index e4579de5d014..4004c5bece42 100644
--- a/drivers/nvmem/rockchip-efuse.c
+++ b/drivers/nvmem/rockchip-efuse.c
@@ -267,8 +267,7 @@ static int rockchip_efuse_probe(struct platform_device *pdev)
if (!efuse)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- efuse->base = devm_ioremap_resource(dev, res);
+ efuse->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(efuse->base))
return PTR_ERR(efuse->base);
diff --git a/drivers/nvmem/sc27xx-efuse.c b/drivers/nvmem/sc27xx-efuse.c
index c825fc902d10..2210da40dfbd 100644
--- a/drivers/nvmem/sc27xx-efuse.c
+++ b/drivers/nvmem/sc27xx-efuse.c
@@ -4,7 +4,6 @@
#include <linux/hwspinlock.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/nvmem-provider.h>
diff --git a/drivers/nvmem/sec-qfprom.c b/drivers/nvmem/sec-qfprom.c
new file mode 100644
index 000000000000..e48c2dc0c44b
--- /dev/null
+++ b/drivers/nvmem/sec-qfprom.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/firmware/qcom/qcom_scm.h>
+#include <linux/mod_devicetable.h>
+#include <linux/nvmem-provider.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+/**
+ * struct sec_qfprom - structure holding secure qfprom attributes
+ *
+ * @base: starting physical address for secure qfprom corrected address space.
+ * @dev: qfprom device structure.
+ */
+struct sec_qfprom {
+ phys_addr_t base;
+ struct device *dev;
+};
+
+static int sec_qfprom_reg_read(void *context, unsigned int reg, void *_val, size_t bytes)
+{
+ struct sec_qfprom *priv = context;
+ unsigned int i;
+ u8 *val = _val;
+ u32 read_val;
+ u8 *tmp;
+
+ for (i = 0; i < bytes; i++, reg++) {
+ if (i == 0 || reg % 4 == 0) {
+ if (qcom_scm_io_readl(priv->base + (reg & ~3), &read_val)) {
+ dev_err(priv->dev, "Couldn't access fuse register\n");
+ return -EINVAL;
+ }
+ tmp = (u8 *)&read_val;
+ }
+
+ val[i] = tmp[reg & 3];
+ }
+
+ return 0;
+}
+
+static int sec_qfprom_probe(struct platform_device *pdev)
+{
+ struct nvmem_config econfig = {
+ .name = "sec-qfprom",
+ .stride = 1,
+ .word_size = 1,
+ .id = NVMEM_DEVID_AUTO,
+ .reg_read = sec_qfprom_reg_read,
+ };
+ struct device *dev = &pdev->dev;
+ struct nvmem_device *nvmem;
+ struct sec_qfprom *priv;
+ struct resource *res;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ priv->base = res->start;
+
+ econfig.size = resource_size(res);
+ econfig.dev = dev;
+ econfig.priv = priv;
+
+ priv->dev = dev;
+
+ nvmem = devm_nvmem_register(dev, &econfig);
+
+ return PTR_ERR_OR_ZERO(nvmem);
+}
+
+static const struct of_device_id sec_qfprom_of_match[] = {
+ { .compatible = "qcom,sec-qfprom" },
+ {/* sentinel */},
+};
+MODULE_DEVICE_TABLE(of, sec_qfprom_of_match);
+
+static struct platform_driver qfprom_driver = {
+ .probe = sec_qfprom_probe,
+ .driver = {
+ .name = "qcom_sec_qfprom",
+ .of_match_table = sec_qfprom_of_match,
+ },
+};
+module_platform_driver(qfprom_driver);
+MODULE_DESCRIPTION("Qualcomm Secure QFPROM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/nvmem/snvs_lpgpr.c b/drivers/nvmem/snvs_lpgpr.c
index 4692aa985bd6..89c27112320f 100644
--- a/drivers/nvmem/snvs_lpgpr.c
+++ b/drivers/nvmem/snvs_lpgpr.c
@@ -7,7 +7,8 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#define IMX6Q_SNVS_HPLR 0x00
diff --git a/drivers/nvmem/sprd-efuse.c b/drivers/nvmem/sprd-efuse.c
index 4f1fcbfec394..7e6e31db4baa 100644
--- a/drivers/nvmem/sprd-efuse.c
+++ b/drivers/nvmem/sprd-efuse.c
@@ -7,7 +7,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#define SPRD_EFUSE_ENABLE 0x20
diff --git a/drivers/nvmem/stm32-romem.c b/drivers/nvmem/stm32-romem.c
index 38d0bf557129..0f84044bd1ad 100644
--- a/drivers/nvmem/stm32-romem.c
+++ b/drivers/nvmem/stm32-romem.c
@@ -196,8 +196,7 @@ static int stm32_romem_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(dev, res);
+ priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
diff --git a/drivers/nvmem/sunplus-ocotp.c b/drivers/nvmem/sunplus-ocotp.c
index f85350b17d67..f3a18aa0a6c7 100644
--- a/drivers/nvmem/sunplus-ocotp.c
+++ b/drivers/nvmem/sunplus-ocotp.c
@@ -13,8 +13,8 @@
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/nvmem-provider.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
/*
diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c
index a970f1741cc6..5d364d85347f 100644
--- a/drivers/nvmem/sunxi_sid.c
+++ b/drivers/nvmem/sunxi_sid.c
@@ -12,7 +12,6 @@
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/random.h>
@@ -125,7 +124,6 @@ static int sun8i_sid_read_by_reg(void *context, unsigned int offset,
static int sunxi_sid_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *res;
struct nvmem_config *nvmem_cfg;
struct nvmem_device *nvmem;
struct sunxi_sid *sid;
@@ -142,8 +140,7 @@ static int sunxi_sid_probe(struct platform_device *pdev)
return -EINVAL;
sid->value_offset = cfg->value_offset;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- sid->base = devm_ioremap_resource(dev, res);
+ sid->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sid->base))
return PTR_ERR(sid->base);
diff --git a/drivers/nvmem/u-boot-env.c b/drivers/nvmem/u-boot-env.c
index ee9fd9989b6e..c4ae94af4af7 100644
--- a/drivers/nvmem/u-boot-env.c
+++ b/drivers/nvmem/u-boot-env.c
@@ -11,7 +11,7 @@
#include <linux/mtd/mtd.h>
#include <linux/nvmem-consumer.h>
#include <linux/nvmem-provider.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -47,7 +47,7 @@ struct u_boot_env_image_broadcom {
__le32 magic;
__le32 len;
__le32 crc32;
- uint8_t data[0];
+ DECLARE_FLEX_ARRAY(uint8_t, data);
} __packed;
static int u_boot_env_read(void *context, unsigned int offset, void *val,
diff --git a/drivers/nvmem/uniphier-efuse.c b/drivers/nvmem/uniphier-efuse.c
index aca910b3b6f8..0a1dbb80537e 100644
--- a/drivers/nvmem/uniphier-efuse.c
+++ b/drivers/nvmem/uniphier-efuse.c
@@ -41,8 +41,7 @@ static int uniphier_efuse_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(dev, res);
+ priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 3bacbaf16f42..1f236aaf7867 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -2655,6 +2655,7 @@ enum parport_pc_pci_cards {
netmos_9815,
netmos_9901,
netmos_9865,
+ asix_ax99100,
quatech_sppxp100,
wch_ch382l,
};
@@ -2733,6 +2734,7 @@ static struct parport_pc_pci {
/* netmos_9815 */ { 2, { { 0, 1 }, { 2, 3 }, } },
/* netmos_9901 */ { 1, { { 0, -1 }, } },
/* netmos_9865 */ { 1, { { 0, -1 }, } },
+ /* asix_ax99100 */ { 1, { { 0, 1 }, } },
/* quatech_sppxp100 */ { 1, { { 0, 1 }, } },
/* wch_ch382l */ { 1, { { 2, -1 }, } },
};
@@ -2823,6 +2825,9 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
0xA000, 0x1000, 0, 0, netmos_9865 },
{ PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
0xA000, 0x2000, 0, 0, netmos_9865 },
+ /* ASIX AX99100 PCIe to Multi I/O Controller */
+ { PCI_VENDOR_ID_ASIX, PCI_DEVICE_ID_ASIX_AX99100,
+ 0xA000, 0x2000, 0, 0, asix_ax99100 },
/* Quatech SPPXP-100 Parallel port PCI ExpressCard */
{ PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SPPXP_100,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 },
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index e72419d7e72e..dddb235dd020 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -19,6 +19,7 @@ if PCCARD
config PCMCIA
tristate "16-bit PCMCIA support"
+ depends on HAS_IOMEM
select CRC32
default y
help
diff --git a/drivers/peci/controller/Kconfig b/drivers/peci/controller/Kconfig
index 2fc5e2abb74a..4f9c245ad042 100644
--- a/drivers/peci/controller/Kconfig
+++ b/drivers/peci/controller/Kconfig
@@ -16,3 +16,19 @@ config PECI_ASPEED
This driver can also be built as a module. If so, the module will
be called peci-aspeed.
+
+config PECI_NPCM
+ tristate "Nuvoton NPCM PECI support"
+ depends on ARCH_NPCM || COMPILE_TEST
+ depends on OF
+ select REGMAP_MMIO
+ help
+ This option enables PECI controller driver for Nuvoton NPCM7XX
+ and NPCM8XX SoCs. It allows BMC to discover devices connected
+ to it and communicate with them using PECI protocol.
+
+ Say Y here if you want support for the Platform Environment Control
+ Interface (PECI) bus adapter driver on the Nuvoton NPCM SoCs.
+
+ This support is also available as a module. If so, the module
+ will be called peci-npcm.
diff --git a/drivers/peci/controller/Makefile b/drivers/peci/controller/Makefile
index 022c28ef1bf0..e247449bb423 100644
--- a/drivers/peci/controller/Makefile
+++ b/drivers/peci/controller/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_PECI_ASPEED) += peci-aspeed.o
+obj-$(CONFIG_PECI_NPCM) += peci-npcm.o
diff --git a/drivers/peci/controller/peci-aspeed.c b/drivers/peci/controller/peci-aspeed.c
index 731c5d8f75c6..7fdc25afcf2f 100644
--- a/drivers/peci/controller/peci-aspeed.c
+++ b/drivers/peci/controller/peci-aspeed.c
@@ -468,7 +468,7 @@ static void aspeed_peci_property_setup(struct aspeed_peci *priv)
ASPEED_PECI_CMD_TIMEOUT_MS_DEFAULT, &priv->cmd_timeout_ms);
}
-static struct peci_controller_ops aspeed_ops = {
+static const struct peci_controller_ops aspeed_ops = {
.xfer = aspeed_peci_xfer,
};
diff --git a/drivers/peci/controller/peci-npcm.c b/drivers/peci/controller/peci-npcm.c
new file mode 100644
index 000000000000..ec613d35c796
--- /dev/null
+++ b/drivers/peci/controller/peci-npcm.c
@@ -0,0 +1,298 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Nuvoton Technology corporation
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/peci.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+/* NPCM GCR module */
+#define NPCM_INTCR3_OFFSET 0x9C
+#define NPCM_INTCR3_PECIVSEL BIT(19)
+
+/* NPCM PECI Registers */
+#define NPCM_PECI_CTL_STS 0x00
+#define NPCM_PECI_RD_LENGTH 0x04
+#define NPCM_PECI_ADDR 0x08
+#define NPCM_PECI_CMD 0x0C
+#define NPCM_PECI_CTL2 0x10
+#define NPCM_PECI_WR_LENGTH 0x1C
+#define NPCM_PECI_PDDR 0x2C
+#define NPCM_PECI_DAT_INOUT(n) (0x100 + ((n) * 4))
+
+#define NPCM_PECI_MAX_REG 0x200
+
+/* NPCM_PECI_CTL_STS - 0x00 : Control Register */
+#define NPCM_PECI_CTRL_DONE_INT_EN BIT(6)
+#define NPCM_PECI_CTRL_ABRT_ERR BIT(4)
+#define NPCM_PECI_CTRL_CRC_ERR BIT(3)
+#define NPCM_PECI_CTRL_DONE BIT(1)
+#define NPCM_PECI_CTRL_START_BUSY BIT(0)
+
+/* NPCM_PECI_RD_LENGTH - 0x04 : Command Register */
+#define NPCM_PECI_RD_LEN_MASK GENMASK(6, 0)
+
+/* NPCM_PECI_CMD - 0x10 : Command Register */
+#define NPCM_PECI_CTL2_MASK GENMASK(7, 6)
+
+/* NPCM_PECI_WR_LENGTH - 0x1C : Command Register */
+#define NPCM_PECI_WR_LEN_MASK GENMASK(6, 0)
+
+/* NPCM_PECI_PDDR - 0x2C : Command Register */
+#define NPCM_PECI_PDDR_MASK GENMASK(4, 0)
+
+#define NPCM_PECI_INT_MASK (NPCM_PECI_CTRL_ABRT_ERR | \
+ NPCM_PECI_CTRL_CRC_ERR | \
+ NPCM_PECI_CTRL_DONE)
+
+#define NPCM_PECI_IDLE_CHECK_TIMEOUT_USEC (50 * USEC_PER_MSEC)
+#define NPCM_PECI_IDLE_CHECK_INTERVAL_USEC (10 * USEC_PER_MSEC)
+#define NPCM_PECI_CMD_TIMEOUT_MS_DEFAULT 1000
+#define NPCM_PECI_CMD_TIMEOUT_MS_MAX 60000
+#define NPCM_PECI_HOST_NEG_BIT_RATE_DEFAULT 15
+#define NPCM_PECI_PULL_DOWN_DEFAULT 0
+
+struct npcm_peci {
+ u32 cmd_timeout_ms;
+ struct completion xfer_complete;
+ struct regmap *regmap;
+ u32 status;
+ spinlock_t lock; /* to sync completion status handling */
+ struct peci_controller *controller;
+ struct device *dev;
+ struct clk *clk;
+ int irq;
+};
+
+static int npcm_peci_xfer(struct peci_controller *controller, u8 addr, struct peci_request *req)
+{
+ struct npcm_peci *priv = dev_get_drvdata(controller->dev.parent);
+ unsigned long timeout = msecs_to_jiffies(priv->cmd_timeout_ms);
+ unsigned int msg_rd;
+ u32 cmd_sts;
+ int i, ret;
+
+ /* Check command sts and bus idle state */
+ ret = regmap_read_poll_timeout(priv->regmap, NPCM_PECI_CTL_STS, cmd_sts,
+ !(cmd_sts & NPCM_PECI_CTRL_START_BUSY),
+ NPCM_PECI_IDLE_CHECK_INTERVAL_USEC,
+ NPCM_PECI_IDLE_CHECK_TIMEOUT_USEC);
+ if (ret)
+ return ret; /* -ETIMEDOUT */
+
+ spin_lock_irq(&priv->lock);
+ reinit_completion(&priv->xfer_complete);
+
+ regmap_write(priv->regmap, NPCM_PECI_ADDR, addr);
+ regmap_write(priv->regmap, NPCM_PECI_RD_LENGTH, NPCM_PECI_WR_LEN_MASK & req->rx.len);
+ regmap_write(priv->regmap, NPCM_PECI_WR_LENGTH, NPCM_PECI_WR_LEN_MASK & req->tx.len);
+
+ if (req->tx.len) {
+ regmap_write(priv->regmap, NPCM_PECI_CMD, req->tx.buf[0]);
+
+ for (i = 0; i < (req->tx.len - 1); i++)
+ regmap_write(priv->regmap, NPCM_PECI_DAT_INOUT(i), req->tx.buf[i + 1]);
+ }
+
+#if IS_ENABLED(CONFIG_DYNAMIC_DEBUG)
+ dev_dbg(priv->dev, "addr : %#02x, tx.len : %#02x, rx.len : %#02x\n",
+ addr, req->tx.len, req->rx.len);
+ print_hex_dump_bytes("TX : ", DUMP_PREFIX_NONE, req->tx.buf, req->tx.len);
+#endif
+
+ priv->status = 0;
+ regmap_update_bits(priv->regmap, NPCM_PECI_CTL_STS, NPCM_PECI_CTRL_START_BUSY,
+ NPCM_PECI_CTRL_START_BUSY);
+
+ spin_unlock_irq(&priv->lock);
+
+ ret = wait_for_completion_interruptible_timeout(&priv->xfer_complete, timeout);
+ if (ret < 0)
+ return ret;
+
+ if (ret == 0) {
+ dev_dbg(priv->dev, "timeout waiting for a response\n");
+ return -ETIMEDOUT;
+ }
+
+ spin_lock_irq(&priv->lock);
+
+ if (priv->status != NPCM_PECI_CTRL_DONE) {
+ spin_unlock_irq(&priv->lock);
+ dev_dbg(priv->dev, "no valid response, status: %#02x\n", priv->status);
+ return -EIO;
+ }
+
+ regmap_write(priv->regmap, NPCM_PECI_CMD, 0);
+
+ for (i = 0; i < req->rx.len; i++) {
+ regmap_read(priv->regmap, NPCM_PECI_DAT_INOUT(i), &msg_rd);
+ req->rx.buf[i] = (u8)msg_rd;
+ }
+
+ spin_unlock_irq(&priv->lock);
+
+#if IS_ENABLED(CONFIG_DYNAMIC_DEBUG)
+ print_hex_dump_bytes("RX : ", DUMP_PREFIX_NONE, req->rx.buf, req->rx.len);
+#endif
+ return 0;
+}
+
+static irqreturn_t npcm_peci_irq_handler(int irq, void *arg)
+{
+ struct npcm_peci *priv = arg;
+ u32 status_ack = 0;
+ u32 status;
+
+ spin_lock(&priv->lock);
+ regmap_read(priv->regmap, NPCM_PECI_CTL_STS, &status);
+ priv->status |= (status & NPCM_PECI_INT_MASK);
+
+ if (status & NPCM_PECI_CTRL_CRC_ERR)
+ status_ack |= NPCM_PECI_CTRL_CRC_ERR;
+
+ if (status & NPCM_PECI_CTRL_ABRT_ERR)
+ status_ack |= NPCM_PECI_CTRL_ABRT_ERR;
+
+ /*
+ * All commands should be ended up with a NPCM_PECI_CTRL_DONE
+ * bit set even in an error case.
+ */
+ if (status & NPCM_PECI_CTRL_DONE) {
+ status_ack |= NPCM_PECI_CTRL_DONE;
+ complete(&priv->xfer_complete);
+ }
+
+ regmap_write_bits(priv->regmap, NPCM_PECI_CTL_STS, NPCM_PECI_INT_MASK, status_ack);
+
+ spin_unlock(&priv->lock);
+ return IRQ_HANDLED;
+}
+
+static int npcm_peci_init_ctrl(struct npcm_peci *priv)
+{
+ u32 cmd_sts;
+ int ret;
+
+ priv->clk = devm_clk_get_enabled(priv->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(priv->dev, "failed to get ref clock\n");
+ return PTR_ERR(priv->clk);
+ }
+
+ ret = device_property_read_u32(priv->dev, "cmd-timeout-ms", &priv->cmd_timeout_ms);
+ if (ret) {
+ priv->cmd_timeout_ms = NPCM_PECI_CMD_TIMEOUT_MS_DEFAULT;
+ } else if (priv->cmd_timeout_ms > NPCM_PECI_CMD_TIMEOUT_MS_MAX ||
+ priv->cmd_timeout_ms == 0) {
+ dev_warn(priv->dev, "invalid cmd-timeout-ms: %u, falling back to: %u\n",
+ priv->cmd_timeout_ms, NPCM_PECI_CMD_TIMEOUT_MS_DEFAULT);
+
+ priv->cmd_timeout_ms = NPCM_PECI_CMD_TIMEOUT_MS_DEFAULT;
+ }
+
+ regmap_update_bits(priv->regmap, NPCM_PECI_CTL2, NPCM_PECI_CTL2_MASK,
+ NPCM_PECI_PULL_DOWN_DEFAULT << 6);
+
+ regmap_update_bits(priv->regmap, NPCM_PECI_PDDR, NPCM_PECI_PDDR_MASK,
+ NPCM_PECI_HOST_NEG_BIT_RATE_DEFAULT);
+
+ ret = regmap_read_poll_timeout(priv->regmap, NPCM_PECI_CTL_STS, cmd_sts,
+ !(cmd_sts & NPCM_PECI_CTRL_START_BUSY),
+ NPCM_PECI_IDLE_CHECK_INTERVAL_USEC,
+ NPCM_PECI_IDLE_CHECK_TIMEOUT_USEC);
+ if (ret)
+ return ret; /* -ETIMEDOUT */
+
+ /* PECI interrupt enable */
+ regmap_update_bits(priv->regmap, NPCM_PECI_CTL_STS, NPCM_PECI_CTRL_DONE_INT_EN,
+ NPCM_PECI_CTRL_DONE_INT_EN);
+
+ return 0;
+}
+
+static const struct regmap_config npcm_peci_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = NPCM_PECI_MAX_REG,
+ .fast_io = true,
+};
+
+static struct peci_controller_ops npcm_ops = {
+ .xfer = npcm_peci_xfer,
+};
+
+static int npcm_peci_probe(struct platform_device *pdev)
+{
+ struct peci_controller *controller;
+ struct npcm_peci *priv;
+ void __iomem *base;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = &pdev->dev;
+ dev_set_drvdata(&pdev->dev, priv);
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ priv->regmap = devm_regmap_init_mmio(&pdev->dev, base, &npcm_peci_regmap_config);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ priv->irq = platform_get_irq(pdev, 0);
+ if (priv->irq < 0)
+ return priv->irq;
+
+ ret = devm_request_irq(&pdev->dev, priv->irq, npcm_peci_irq_handler,
+ 0, "peci-npcm-irq", priv);
+ if (ret)
+ return ret;
+
+ init_completion(&priv->xfer_complete);
+ spin_lock_init(&priv->lock);
+
+ ret = npcm_peci_init_ctrl(priv);
+ if (ret)
+ return ret;
+
+ controller = devm_peci_controller_add(priv->dev, &npcm_ops);
+ if (IS_ERR(controller))
+ return dev_err_probe(priv->dev, PTR_ERR(controller),
+ "failed to add npcm peci controller\n");
+
+ priv->controller = controller;
+
+ return 0;
+}
+
+static const struct of_device_id npcm_peci_of_table[] = {
+ { .compatible = "nuvoton,npcm750-peci", },
+ { .compatible = "nuvoton,npcm845-peci", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, npcm_peci_of_table);
+
+static struct platform_driver npcm_peci_driver = {
+ .probe = npcm_peci_probe,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = npcm_peci_of_table,
+ },
+};
+module_platform_driver(npcm_peci_driver);
+
+MODULE_AUTHOR("Tomer Maimon <tomer.maimon@nuvoton.com>");
+MODULE_DESCRIPTION("NPCM PECI driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(PECI);
diff --git a/drivers/peci/core.c b/drivers/peci/core.c
index 9c8cf07e51c7..0f83a9c6093b 100644
--- a/drivers/peci/core.c
+++ b/drivers/peci/core.c
@@ -44,7 +44,7 @@ int peci_controller_scan_devices(struct peci_controller *controller)
}
static struct peci_controller *peci_controller_alloc(struct device *dev,
- struct peci_controller_ops *ops)
+ const struct peci_controller_ops *ops)
{
struct peci_controller *controller;
int ret;
@@ -113,7 +113,7 @@ static void unregister_controller(void *_controller)
* Return: Pointer to the newly allocated controller or ERR_PTR() in case of failure.
*/
struct peci_controller *devm_peci_controller_add(struct device *dev,
- struct peci_controller_ops *ops)
+ const struct peci_controller_ops *ops)
{
struct peci_controller *controller;
int ret;
diff --git a/drivers/peci/cpu.c b/drivers/peci/cpu.c
index de4a7b3e5966..bd990acd92b8 100644
--- a/drivers/peci/cpu.c
+++ b/drivers/peci/cpu.c
@@ -323,6 +323,11 @@ static const struct peci_device_id peci_cpu_device_ids[] = {
.model = INTEL_FAM6_ICELAKE_D,
.data = "icxd",
},
+ { /* Sapphire Rapids Xeon */
+ .family = 6,
+ .model = INTEL_FAM6_SAPPHIRERAPIDS_X,
+ .data = "spr",
+ },
{ }
};
MODULE_DEVICE_TABLE(peci, peci_cpu_device_ids);
diff --git a/drivers/perf/arm_pmuv3.c b/drivers/perf/arm_pmuv3.c
index e5a2ac4155f6..8fcaa26f0f8a 100644
--- a/drivers/perf/arm_pmuv3.c
+++ b/drivers/perf/arm_pmuv3.c
@@ -749,6 +749,8 @@ static void armv8pmu_start(struct arm_pmu *cpu_pmu)
/* Enable all counters */
armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
+
+ kvm_vcpu_pmu_resync_el0();
}
static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
diff --git a/drivers/perf/cxl_pmu.c b/drivers/perf/cxl_pmu.c
index 0a8f597e695b..365d964b0f6a 100644
--- a/drivers/perf/cxl_pmu.c
+++ b/drivers/perf/cxl_pmu.c
@@ -25,7 +25,7 @@
#include "../cxl/pmu.h"
#define CXL_PMU_CAP_REG 0x0
-#define CXL_PMU_CAP_NUM_COUNTERS_MSK GENMASK_ULL(4, 0)
+#define CXL_PMU_CAP_NUM_COUNTERS_MSK GENMASK_ULL(5, 0)
#define CXL_PMU_CAP_COUNTER_WIDTH_MSK GENMASK_ULL(15, 8)
#define CXL_PMU_CAP_NUM_EVN_CAP_REG_SUP_MSK GENMASK_ULL(24, 20)
#define CXL_PMU_CAP_FILTERS_SUP_MSK GENMASK_ULL(39, 32)
diff --git a/drivers/perf/riscv_pmu.c b/drivers/perf/riscv_pmu.c
index 56897d4d4fd3..1f9a35f724f5 100644
--- a/drivers/perf/riscv_pmu.c
+++ b/drivers/perf/riscv_pmu.c
@@ -14,9 +14,81 @@
#include <linux/perf/riscv_pmu.h>
#include <linux/printk.h>
#include <linux/smp.h>
+#include <linux/sched_clock.h>
#include <asm/sbi.h>
+static bool riscv_perf_user_access(struct perf_event *event)
+{
+ return ((event->attr.type == PERF_TYPE_HARDWARE) ||
+ (event->attr.type == PERF_TYPE_HW_CACHE) ||
+ (event->attr.type == PERF_TYPE_RAW)) &&
+ !!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT);
+}
+
+void arch_perf_update_userpage(struct perf_event *event,
+ struct perf_event_mmap_page *userpg, u64 now)
+{
+ struct clock_read_data *rd;
+ unsigned int seq;
+ u64 ns;
+
+ userpg->cap_user_time = 0;
+ userpg->cap_user_time_zero = 0;
+ userpg->cap_user_time_short = 0;
+ userpg->cap_user_rdpmc = riscv_perf_user_access(event);
+
+#ifdef CONFIG_RISCV_PMU
+ /*
+ * The counters are 64-bit but the priv spec doesn't mandate all the
+ * bits to be implemented: that's why, counter width can vary based on
+ * the cpu vendor.
+ */
+ if (userpg->cap_user_rdpmc)
+ userpg->pmc_width = to_riscv_pmu(event->pmu)->ctr_get_width(event->hw.idx) + 1;
+#endif
+
+ do {
+ rd = sched_clock_read_begin(&seq);
+
+ userpg->time_mult = rd->mult;
+ userpg->time_shift = rd->shift;
+ userpg->time_zero = rd->epoch_ns;
+ userpg->time_cycles = rd->epoch_cyc;
+ userpg->time_mask = rd->sched_clock_mask;
+
+ /*
+ * Subtract the cycle base, such that software that
+ * doesn't know about cap_user_time_short still 'works'
+ * assuming no wraps.
+ */
+ ns = mul_u64_u32_shr(rd->epoch_cyc, rd->mult, rd->shift);
+ userpg->time_zero -= ns;
+
+ } while (sched_clock_read_retry(seq));
+
+ userpg->time_offset = userpg->time_zero - now;
+
+ /*
+ * time_shift is not expected to be greater than 31 due to
+ * the original published conversion algorithm shifting a
+ * 32-bit value (now specifies a 64-bit value) - refer
+ * perf_event_mmap_page documentation in perf_event.h.
+ */
+ if (userpg->time_shift == 32) {
+ userpg->time_shift = 31;
+ userpg->time_mult >>= 1;
+ }
+
+ /*
+ * Internal timekeeping for enabled/running/stopped times
+ * is always computed with the sched_clock.
+ */
+ userpg->cap_user_time = 1;
+ userpg->cap_user_time_zero = 1;
+ userpg->cap_user_time_short = 1;
+}
+
static unsigned long csr_read_num(int csr_num)
{
#define switchcase_csr_read(__csr_num, __val) {\
@@ -171,6 +243,8 @@ int riscv_pmu_event_set_period(struct perf_event *event)
local64_set(&hwc->prev_count, (u64)-left);
+ perf_event_update_userpage(event);
+
return overflow;
}
@@ -264,6 +338,9 @@ static int riscv_pmu_event_init(struct perf_event *event)
hwc->idx = -1;
hwc->event_base = mapped_event;
+ if (rvpmu->event_init)
+ rvpmu->event_init(event);
+
if (!is_sampling_event(event)) {
/*
* For non-sampling runs, limit the sample_period to half
@@ -280,6 +357,39 @@ static int riscv_pmu_event_init(struct perf_event *event)
return 0;
}
+static int riscv_pmu_event_idx(struct perf_event *event)
+{
+ struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
+
+ if (!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT))
+ return 0;
+
+ if (rvpmu->csr_index)
+ return rvpmu->csr_index(event) + 1;
+
+ return 0;
+}
+
+static void riscv_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
+{
+ struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
+
+ if (rvpmu->event_mapped) {
+ rvpmu->event_mapped(event, mm);
+ perf_event_update_userpage(event);
+ }
+}
+
+static void riscv_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm)
+{
+ struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu);
+
+ if (rvpmu->event_unmapped) {
+ rvpmu->event_unmapped(event, mm);
+ perf_event_update_userpage(event);
+ }
+}
+
struct riscv_pmu *riscv_pmu_alloc(void)
{
struct riscv_pmu *pmu;
@@ -304,6 +414,9 @@ struct riscv_pmu *riscv_pmu_alloc(void)
}
pmu->pmu = (struct pmu) {
.event_init = riscv_pmu_event_init,
+ .event_mapped = riscv_pmu_event_mapped,
+ .event_unmapped = riscv_pmu_event_unmapped,
+ .event_idx = riscv_pmu_event_idx,
.add = riscv_pmu_add,
.del = riscv_pmu_del,
.start = riscv_pmu_start,
diff --git a/drivers/perf/riscv_pmu_legacy.c b/drivers/perf/riscv_pmu_legacy.c
index ca9e20bfc7ac..79fdd667922e 100644
--- a/drivers/perf/riscv_pmu_legacy.c
+++ b/drivers/perf/riscv_pmu_legacy.c
@@ -13,7 +13,7 @@
#include <linux/platform_device.h>
#define RISCV_PMU_LEGACY_CYCLE 0
-#define RISCV_PMU_LEGACY_INSTRET 1
+#define RISCV_PMU_LEGACY_INSTRET 2
static bool pmu_init_done;
@@ -71,6 +71,29 @@ static void pmu_legacy_ctr_start(struct perf_event *event, u64 ival)
local64_set(&hwc->prev_count, initial_val);
}
+static uint8_t pmu_legacy_csr_index(struct perf_event *event)
+{
+ return event->hw.idx;
+}
+
+static void pmu_legacy_event_mapped(struct perf_event *event, struct mm_struct *mm)
+{
+ if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES &&
+ event->attr.config != PERF_COUNT_HW_INSTRUCTIONS)
+ return;
+
+ event->hw.flags |= PERF_EVENT_FLAG_USER_READ_CNT;
+}
+
+static void pmu_legacy_event_unmapped(struct perf_event *event, struct mm_struct *mm)
+{
+ if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES &&
+ event->attr.config != PERF_COUNT_HW_INSTRUCTIONS)
+ return;
+
+ event->hw.flags &= ~PERF_EVENT_FLAG_USER_READ_CNT;
+}
+
/*
* This is just a simple implementation to allow legacy implementations
* compatible with new RISC-V PMU driver framework.
@@ -91,6 +114,9 @@ static void pmu_legacy_init(struct riscv_pmu *pmu)
pmu->ctr_get_width = NULL;
pmu->ctr_clear_idx = NULL;
pmu->ctr_read = pmu_legacy_read_ctr;
+ pmu->event_mapped = pmu_legacy_event_mapped;
+ pmu->event_unmapped = pmu_legacy_event_unmapped;
+ pmu->csr_index = pmu_legacy_csr_index;
perf_pmu_register(&pmu->pmu, "cpu", PERF_TYPE_RAW);
}
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index 4163ff517471..9a51053b1f99 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -24,6 +24,14 @@
#include <asm/sbi.h>
#include <asm/hwcap.h>
+#define SYSCTL_NO_USER_ACCESS 0
+#define SYSCTL_USER_ACCESS 1
+#define SYSCTL_LEGACY 2
+
+#define PERF_EVENT_FLAG_NO_USER_ACCESS BIT(SYSCTL_NO_USER_ACCESS)
+#define PERF_EVENT_FLAG_USER_ACCESS BIT(SYSCTL_USER_ACCESS)
+#define PERF_EVENT_FLAG_LEGACY BIT(SYSCTL_LEGACY)
+
PMU_FORMAT_ATTR(event, "config:0-47");
PMU_FORMAT_ATTR(firmware, "config:63");
@@ -43,6 +51,9 @@ static const struct attribute_group *riscv_pmu_attr_groups[] = {
NULL,
};
+/* Allow user mode access by default */
+static int sysctl_perf_user_access __read_mostly = SYSCTL_USER_ACCESS;
+
/*
* RISC-V doesn't have heterogeneous harts yet. This need to be part of
* per_cpu in case of harts with different pmu counters
@@ -301,6 +312,11 @@ int riscv_pmu_get_hpm_info(u32 *hw_ctr_width, u32 *num_hw_ctr)
}
EXPORT_SYMBOL_GPL(riscv_pmu_get_hpm_info);
+static uint8_t pmu_sbi_csr_index(struct perf_event *event)
+{
+ return pmu_ctr_list[event->hw.idx].csr - CSR_CYCLE;
+}
+
static unsigned long pmu_sbi_get_filter_flags(struct perf_event *event)
{
unsigned long cflags = 0;
@@ -329,18 +345,34 @@ static int pmu_sbi_ctr_get_idx(struct perf_event *event)
struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events);
struct sbiret ret;
int idx;
- uint64_t cbase = 0;
+ uint64_t cbase = 0, cmask = rvpmu->cmask;
unsigned long cflags = 0;
cflags = pmu_sbi_get_filter_flags(event);
+
+ /*
+ * In legacy mode, we have to force the fixed counters for those events
+ * but not in the user access mode as we want to use the other counters
+ * that support sampling/filtering.
+ */
+ if (hwc->flags & PERF_EVENT_FLAG_LEGACY) {
+ if (event->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
+ cflags |= SBI_PMU_CFG_FLAG_SKIP_MATCH;
+ cmask = 1;
+ } else if (event->attr.config == PERF_COUNT_HW_INSTRUCTIONS) {
+ cflags |= SBI_PMU_CFG_FLAG_SKIP_MATCH;
+ cmask = 1UL << (CSR_INSTRET - CSR_CYCLE);
+ }
+ }
+
/* retrieve the available counter index */
#if defined(CONFIG_32BIT)
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase,
- rvpmu->cmask, cflags, hwc->event_base, hwc->config,
+ cmask, cflags, hwc->event_base, hwc->config,
hwc->config >> 32);
#else
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase,
- rvpmu->cmask, cflags, hwc->event_base, hwc->config, 0);
+ cmask, cflags, hwc->event_base, hwc->config, 0);
#endif
if (ret.error) {
pr_debug("Not able to find a counter for event %lx config %llx\n",
@@ -474,6 +506,22 @@ static u64 pmu_sbi_ctr_read(struct perf_event *event)
return val;
}
+static void pmu_sbi_set_scounteren(void *arg)
+{
+ struct perf_event *event = (struct perf_event *)arg;
+
+ csr_write(CSR_SCOUNTEREN,
+ csr_read(CSR_SCOUNTEREN) | (1 << pmu_sbi_csr_index(event)));
+}
+
+static void pmu_sbi_reset_scounteren(void *arg)
+{
+ struct perf_event *event = (struct perf_event *)arg;
+
+ csr_write(CSR_SCOUNTEREN,
+ csr_read(CSR_SCOUNTEREN) & ~(1 << pmu_sbi_csr_index(event)));
+}
+
static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival)
{
struct sbiret ret;
@@ -490,6 +538,10 @@ static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival)
if (ret.error && (ret.error != SBI_ERR_ALREADY_STARTED))
pr_err("Starting counter idx %d failed with error %d\n",
hwc->idx, sbi_err_map_linux_errno(ret.error));
+
+ if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) &&
+ (hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT))
+ pmu_sbi_set_scounteren((void *)event);
}
static void pmu_sbi_ctr_stop(struct perf_event *event, unsigned long flag)
@@ -497,6 +549,10 @@ static void pmu_sbi_ctr_stop(struct perf_event *event, unsigned long flag)
struct sbiret ret;
struct hw_perf_event *hwc = &event->hw;
+ if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) &&
+ (hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT))
+ pmu_sbi_reset_scounteren((void *)event);
+
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, hwc->idx, 1, flag, 0, 0, 0);
if (ret.error && (ret.error != SBI_ERR_ALREADY_STOPPED) &&
flag != SBI_PMU_STOP_FLAG_RESET)
@@ -704,10 +760,13 @@ static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node)
struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events);
/*
- * Enable the access for CYCLE, TIME, and INSTRET CSRs from userspace,
- * as is necessary to maintain uABI compatibility.
+ * We keep enabling userspace access to CYCLE, TIME and INSTRET via the
+ * legacy option but that will be removed in the future.
*/
- csr_write(CSR_SCOUNTEREN, 0x7);
+ if (sysctl_perf_user_access == SYSCTL_LEGACY)
+ csr_write(CSR_SCOUNTEREN, 0x7);
+ else
+ csr_write(CSR_SCOUNTEREN, 0x2);
/* Stop all the counters so that they can be enabled from perf */
pmu_sbi_stop_all(pmu);
@@ -838,6 +897,121 @@ static void riscv_pmu_destroy(struct riscv_pmu *pmu)
cpuhp_state_remove_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node);
}
+static void pmu_sbi_event_init(struct perf_event *event)
+{
+ /*
+ * The permissions are set at event_init so that we do not depend
+ * on the sysctl value that can change.
+ */
+ if (sysctl_perf_user_access == SYSCTL_NO_USER_ACCESS)
+ event->hw.flags |= PERF_EVENT_FLAG_NO_USER_ACCESS;
+ else if (sysctl_perf_user_access == SYSCTL_USER_ACCESS)
+ event->hw.flags |= PERF_EVENT_FLAG_USER_ACCESS;
+ else
+ event->hw.flags |= PERF_EVENT_FLAG_LEGACY;
+}
+
+static void pmu_sbi_event_mapped(struct perf_event *event, struct mm_struct *mm)
+{
+ if (event->hw.flags & PERF_EVENT_FLAG_NO_USER_ACCESS)
+ return;
+
+ if (event->hw.flags & PERF_EVENT_FLAG_LEGACY) {
+ if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES &&
+ event->attr.config != PERF_COUNT_HW_INSTRUCTIONS) {
+ return;
+ }
+ }
+
+ /*
+ * The user mmapped the event to directly access it: this is where
+ * we determine based on sysctl_perf_user_access if we grant userspace
+ * the direct access to this event. That means that within the same
+ * task, some events may be directly accessible and some other may not,
+ * if the user changes the value of sysctl_perf_user_accesss in the
+ * meantime.
+ */
+
+ event->hw.flags |= PERF_EVENT_FLAG_USER_READ_CNT;
+
+ /*
+ * We must enable userspace access *before* advertising in the user page
+ * that it is possible to do so to avoid any race.
+ * And we must notify all cpus here because threads that currently run
+ * on other cpus will try to directly access the counter too without
+ * calling pmu_sbi_ctr_start.
+ */
+ if (event->hw.flags & PERF_EVENT_FLAG_USER_ACCESS)
+ on_each_cpu_mask(mm_cpumask(mm),
+ pmu_sbi_set_scounteren, (void *)event, 1);
+}
+
+static void pmu_sbi_event_unmapped(struct perf_event *event, struct mm_struct *mm)
+{
+ if (event->hw.flags & PERF_EVENT_FLAG_NO_USER_ACCESS)
+ return;
+
+ if (event->hw.flags & PERF_EVENT_FLAG_LEGACY) {
+ if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES &&
+ event->attr.config != PERF_COUNT_HW_INSTRUCTIONS) {
+ return;
+ }
+ }
+
+ /*
+ * Here we can directly remove user access since the user does not have
+ * access to the user page anymore so we avoid the racy window where the
+ * user could have read cap_user_rdpmc to true right before we disable
+ * it.
+ */
+ event->hw.flags &= ~PERF_EVENT_FLAG_USER_READ_CNT;
+
+ if (event->hw.flags & PERF_EVENT_FLAG_USER_ACCESS)
+ on_each_cpu_mask(mm_cpumask(mm),
+ pmu_sbi_reset_scounteren, (void *)event, 1);
+}
+
+static void riscv_pmu_update_counter_access(void *info)
+{
+ if (sysctl_perf_user_access == SYSCTL_LEGACY)
+ csr_write(CSR_SCOUNTEREN, 0x7);
+ else
+ csr_write(CSR_SCOUNTEREN, 0x2);
+}
+
+static int riscv_pmu_proc_user_access_handler(struct ctl_table *table,
+ int write, void *buffer,
+ size_t *lenp, loff_t *ppos)
+{
+ int prev = sysctl_perf_user_access;
+ int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+
+ /*
+ * Test against the previous value since we clear SCOUNTEREN when
+ * sysctl_perf_user_access is set to SYSCTL_USER_ACCESS, but we should
+ * not do that if that was already the case.
+ */
+ if (ret || !write || prev == sysctl_perf_user_access)
+ return ret;
+
+ on_each_cpu(riscv_pmu_update_counter_access, NULL, 1);
+
+ return 0;
+}
+
+static struct ctl_table sbi_pmu_sysctl_table[] = {
+ {
+ .procname = "perf_user_access",
+ .data = &sysctl_perf_user_access,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = riscv_pmu_proc_user_access_handler,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_TWO,
+ },
+ { }
+};
+
static int pmu_sbi_device_probe(struct platform_device *pdev)
{
struct riscv_pmu *pmu = NULL;
@@ -881,6 +1055,10 @@ static int pmu_sbi_device_probe(struct platform_device *pdev)
pmu->ctr_get_width = pmu_sbi_ctr_get_width;
pmu->ctr_clear_idx = pmu_sbi_ctr_clear_idx;
pmu->ctr_read = pmu_sbi_ctr_read;
+ pmu->event_init = pmu_sbi_event_init;
+ pmu->event_mapped = pmu_sbi_event_mapped;
+ pmu->event_unmapped = pmu_sbi_event_unmapped;
+ pmu->csr_index = pmu_sbi_csr_index;
ret = cpuhp_state_add_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node);
if (ret)
@@ -894,6 +1072,8 @@ static int pmu_sbi_device_probe(struct platform_device *pdev)
if (ret)
goto out_unregister;
+ register_sysctl("kernel", sbi_pmu_sysctl_table);
+
return 0;
out_unregister:
@@ -907,7 +1087,7 @@ out_free:
static struct platform_driver pmu_sbi_driver = {
.probe = pmu_sbi_device_probe,
.driver = {
- .name = RISCV_PMU_PDEV_NAME,
+ .name = RISCV_PMU_SBI_PDEV_NAME,
},
};
@@ -934,7 +1114,7 @@ static int __init pmu_sbi_devinit(void)
if (ret)
return ret;
- pdev = platform_device_register_simple(RISCV_PMU_PDEV_NAME, -1, NULL, 0);
+ pdev = platform_device_register_simple(RISCV_PMU_SBI_PDEV_NAME, -1, NULL, 0);
if (IS_ERR(pdev)) {
platform_driver_unregister(&pmu_sbi_driver);
return PTR_ERR(pdev);
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 8dba9596408f..d1670bbe6d6b 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -87,11 +87,13 @@ source "drivers/phy/motorola/Kconfig"
source "drivers/phy/mscc/Kconfig"
source "drivers/phy/qualcomm/Kconfig"
source "drivers/phy/ralink/Kconfig"
+source "drivers/phy/realtek/Kconfig"
source "drivers/phy/renesas/Kconfig"
source "drivers/phy/rockchip/Kconfig"
source "drivers/phy/samsung/Kconfig"
source "drivers/phy/socionext/Kconfig"
source "drivers/phy/st/Kconfig"
+source "drivers/phy/starfive/Kconfig"
source "drivers/phy/sunplus/Kconfig"
source "drivers/phy/tegra/Kconfig"
source "drivers/phy/ti/Kconfig"
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index 54f312c10a40..868a220ed0f6 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -26,11 +26,13 @@ obj-y += allwinner/ \
mscc/ \
qualcomm/ \
ralink/ \
+ realtek/ \
renesas/ \
rockchip/ \
samsung/ \
socionext/ \
st/ \
+ starfive/ \
sunplus/ \
tegra/ \
ti/ \
diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
index 56d53f78d002..ec551464dd4f 100644
--- a/drivers/phy/allwinner/phy-sun4i-usb.c
+++ b/drivers/phy/allwinner/phy-sun4i-usb.c
@@ -23,8 +23,6 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/phy/phy.h>
#include <linux/phy/phy-sun4i-usb.h>
diff --git a/drivers/phy/allwinner/phy-sun50i-usb3.c b/drivers/phy/allwinner/phy-sun50i-usb3.c
index 84055b720016..363f9a0df503 100644
--- a/drivers/phy/allwinner/phy-sun50i-usb3.c
+++ b/drivers/phy/allwinner/phy-sun50i-usb3.c
@@ -16,6 +16,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
diff --git a/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c b/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c
index 6e9af79e152c..08a86962d949 100644
--- a/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c
+++ b/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c
@@ -13,8 +13,8 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/phy/phy.h>
diff --git a/drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c b/drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c
index a3e1108b736d..ae898f93f97b 100644
--- a/drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c
+++ b/drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.c
@@ -11,6 +11,7 @@
#include <linux/regmap.h>
#include <linux/delay.h>
#include <linux/mfd/syscon.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <dt-bindings/phy/phy.h>
diff --git a/drivers/phy/amlogic/phy-meson-axg-pcie.c b/drivers/phy/amlogic/phy-meson-axg-pcie.c
index 2299bab38e05..60be5cdc600b 100644
--- a/drivers/phy/amlogic/phy-meson-axg-pcie.c
+++ b/drivers/phy/amlogic/phy-meson-axg-pcie.c
@@ -4,6 +4,7 @@
*
* Copyright (C) 2020 Remi Pommarel <repk@triplefau.lt>
*/
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/phy/phy.h>
#include <linux/regmap.h>
diff --git a/drivers/phy/amlogic/phy-meson-g12a-mipi-dphy-analog.c b/drivers/phy/amlogic/phy-meson-g12a-mipi-dphy-analog.c
index cabdddbbabfd..46e5f7e7eb6c 100644
--- a/drivers/phy/amlogic/phy-meson-g12a-mipi-dphy-analog.c
+++ b/drivers/phy/amlogic/phy-meson-g12a-mipi-dphy-analog.c
@@ -13,6 +13,7 @@
#include <linux/regmap.h>
#include <linux/delay.h>
#include <linux/mfd/syscon.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <dt-bindings/phy/phy.h>
diff --git a/drivers/phy/amlogic/phy-meson-g12a-usb2.c b/drivers/phy/amlogic/phy-meson-g12a-usb2.c
index ec2555bb83d5..0e0b5c00b676 100644
--- a/drivers/phy/amlogic/phy-meson-g12a-usb2.c
+++ b/drivers/phy/amlogic/phy-meson-g12a-usb2.c
@@ -14,7 +14,7 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/phy/phy.h>
@@ -319,7 +319,7 @@ static int phy_meson_g12a_usb2_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
- priv->soc_id = (enum meson_soc_id)of_device_get_match_data(&pdev->dev);
+ priv->soc_id = (uintptr_t)of_device_get_match_data(&pdev->dev);
priv->regmap = devm_regmap_init_mmio(dev, base,
&phy_meson_g12a_usb2_regmap_conf);
diff --git a/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c b/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c
index d2a1da8d9e58..2712c4bd549d 100644
--- a/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c
+++ b/drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c
@@ -11,7 +11,7 @@
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/regmap.h>
#include <linux/reset.h>
diff --git a/drivers/phy/amlogic/phy-meson-gxl-usb2.c b/drivers/phy/amlogic/phy-meson-gxl-usb2.c
index db17c3448bfe..14ea89927ab1 100644
--- a/drivers/phy/amlogic/phy-meson-gxl-usb2.c
+++ b/drivers/phy/amlogic/phy-meson-gxl-usb2.c
@@ -8,8 +8,8 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/phy/phy.h>
diff --git a/drivers/phy/amlogic/phy-meson8-hdmi-tx.c b/drivers/phy/amlogic/phy-meson8-hdmi-tx.c
index f9a6572c27d8..2617f7f6c2ec 100644
--- a/drivers/phy/amlogic/phy-meson8-hdmi-tx.c
+++ b/drivers/phy/amlogic/phy-meson8-hdmi-tx.c
@@ -10,7 +10,7 @@
#include <linux/clk.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/property.h>
diff --git a/drivers/phy/amlogic/phy-meson8b-usb2.c b/drivers/phy/amlogic/phy-meson8b-usb2.c
index dd96763911b8..d63147c41b8c 100644
--- a/drivers/phy/amlogic/phy-meson8b-usb2.c
+++ b/drivers/phy/amlogic/phy-meson8b-usb2.c
@@ -8,8 +8,8 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/reset.h>
diff --git a/drivers/phy/broadcom/phy-bcm-ns-usb3.c b/drivers/phy/broadcom/phy-bcm-ns-usb3.c
index bbfad209c890..69584b685edb 100644
--- a/drivers/phy/broadcom/phy-bcm-ns-usb3.c
+++ b/drivers/phy/broadcom/phy-bcm-ns-usb3.c
@@ -206,7 +206,7 @@ static int bcm_ns_usb3_mdio_probe(struct mdio_device *mdiodev)
of_id = of_match_device(bcm_ns_usb3_id_table, dev);
if (!of_id)
return -EINVAL;
- usb3->family = (enum bcm_ns_family)of_id->data;
+ usb3->family = (uintptr_t)of_id->data;
syscon_np = of_parse_phandle(dev->of_node, "usb3-dmp-syscon", 0);
err = of_address_to_resource(syscon_np, 0, &res);
diff --git a/drivers/phy/broadcom/phy-bcm-sr-usb.c b/drivers/phy/broadcom/phy-bcm-sr-usb.c
index 0002da3b5b5d..b0bd18a5df87 100644
--- a/drivers/phy/broadcom/phy-bcm-sr-usb.c
+++ b/drivers/phy/broadcom/phy-bcm-sr-usb.c
@@ -311,7 +311,7 @@ static int bcm_usb_phy_probe(struct platform_device *pdev)
of_id = of_match_node(bcm_usb_phy_of_match, dn);
if (of_id)
- version = (enum bcm_usb_phy_version)of_id->data;
+ version = (uintptr_t)of_id->data;
else
return -ENODEV;
diff --git a/drivers/phy/broadcom/phy-bcm63xx-usbh.c b/drivers/phy/broadcom/phy-bcm63xx-usbh.c
index 6c05ba8b08be..f8183dea774b 100644
--- a/drivers/phy/broadcom/phy-bcm63xx-usbh.c
+++ b/drivers/phy/broadcom/phy-bcm63xx-usbh.c
@@ -17,6 +17,7 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
diff --git a/drivers/phy/broadcom/phy-brcm-sata.c b/drivers/phy/broadcom/phy-brcm-sata.c
index 769c707d9b71..ed9e18791ec9 100644
--- a/drivers/phy/broadcom/phy-brcm-sata.c
+++ b/drivers/phy/broadcom/phy-brcm-sata.c
@@ -772,7 +772,7 @@ static int brcm_sata_phy_probe(struct platform_device *pdev)
of_id = of_match_node(brcm_sata_phy_of_match, dn);
if (of_id)
- priv->version = (enum brcm_sata_phy_version)of_id->data;
+ priv->version = (uintptr_t)of_id->data;
else
priv->version = BRCM_SATA_PHY_STB_28NM;
diff --git a/drivers/phy/broadcom/phy-brcm-usb.c b/drivers/phy/broadcom/phy-brcm-usb.c
index a4cfb777dd83..a16f0b58eb74 100644
--- a/drivers/phy/broadcom/phy-brcm-usb.c
+++ b/drivers/phy/broadcom/phy-brcm-usb.c
@@ -11,7 +11,6 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
diff --git a/drivers/phy/cadence/cdns-dphy-rx.c b/drivers/phy/cadence/cdns-dphy-rx.c
index c05b043893a9..7729cf80a9bd 100644
--- a/drivers/phy/cadence/cdns-dphy-rx.c
+++ b/drivers/phy/cadence/cdns-dphy-rx.c
@@ -7,6 +7,7 @@
#include <linux/bitops.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/phy/phy.h>
#include <linux/phy/phy-mipi-dphy.h>
diff --git a/drivers/phy/cadence/cdns-dphy.c b/drivers/phy/cadence/cdns-dphy.c
index 6e58012b6488..dddb66de6dba 100644
--- a/drivers/phy/cadence/cdns-dphy.c
+++ b/drivers/phy/cadence/cdns-dphy.c
@@ -9,8 +9,7 @@
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c
index 7df9c79a772a..d4eb93ce8232 100644
--- a/drivers/phy/cadence/phy-cadence-sierra.c
+++ b/drivers/phy/cadence/phy-cadence-sierra.c
@@ -30,23 +30,34 @@
#define SIERRA_COMMON_CDB_OFFSET 0x0
#define SIERRA_MACRO_ID_REG 0x0
#define SIERRA_CMN_PLLLC_GEN_PREG 0x42
+#define SIERRA_CMN_PLLLC_FBDIV_INT_MODE0_PREG 0x43
+#define SIERRA_CMN_PLLLC_DCOCAL_CTRL_PREG 0x45
+#define SIERRA_CMN_PLLLC_INIT_PREG 0x46
+#define SIERRA_CMN_PLLLC_ITERTMR_PREG 0x47
#define SIERRA_CMN_PLLLC_MODE_PREG 0x48
#define SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG 0x49
#define SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG 0x4A
#define SIERRA_CMN_PLLLC_LOCK_CNTSTART_PREG 0x4B
+#define SIERRA_CMN_PLLLC_LOCKSEARCH_PREG 0x4C
#define SIERRA_CMN_PLLLC_CLK1_PREG 0x4D
+#define SIERRA_CMN_PLLLC_CLK0_PREG 0x4E
#define SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG 0x4F
#define SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG 0x50
#define SIERRA_CMN_PLLLC_DSMCORR_PREG 0x51
#define SIERRA_CMN_PLLLC_SS_PREG 0x52
#define SIERRA_CMN_PLLLC_SS_AMP_STEP_SIZE_PREG 0x53
#define SIERRA_CMN_PLLLC_SSTWOPT_PREG 0x54
+#define SIERRA_CMN_PLLCSM_PLLEN_TMR_PREG 0x5D
+#define SIERRA_CMN_PLLCSM_PLLPRE_TMR_PREG 0x5E
#define SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG 0x62
#define SIERRA_CMN_PLLLC_LOCK_DELAY_CTRL_PREG 0x63
+#define SIERRA_SDOSCCAL_CLK_CNT_PREG 0x6E
#define SIERRA_CMN_REFRCV_PREG 0x98
+#define SIERRA_CMN_RESCAL_CTRLA_PREG 0xA0
#define SIERRA_CMN_REFRCV1_PREG 0xB8
#define SIERRA_CMN_PLLLC1_GEN_PREG 0xC2
#define SIERRA_CMN_PLLLC1_FBDIV_INT_PREG 0xC3
+#define SIERRA_CMN_PLLLC1_DCOCAL_CTRL_PREG 0xC5
#define SIERRA_CMN_PLLLC1_LF_COEFF_MODE0_PREG 0xCA
#define SIERRA_CMN_PLLLC1_CLK0_PREG 0xCE
#define SIERRA_CMN_PLLLC1_BWCAL_MODE0_PREG 0xD0
@@ -86,6 +97,7 @@
#define SIERRA_DFE_BIASTRIM_PREG 0x04C
#define SIERRA_DRVCTRL_ATTEN_PREG 0x06A
#define SIERRA_DRVCTRL_BOOST_PREG 0x06F
+#define SIERRA_LANE_TX_RECEIVER_DETECT_PREG 0x071
#define SIERRA_TX_RCVDET_OVRD_PREG 0x072
#define SIERRA_CLKPATHCTRL_TMR_PREG 0x081
#define SIERRA_RX_CREQ_FLTR_A_MODE3_PREG 0x085
@@ -101,6 +113,8 @@
#define SIERRA_CREQ_SPARE_PREG 0x096
#define SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG 0x097
#define SIERRA_CTLELUT_CTRL_PREG 0x098
+#define SIERRA_DEQ_BLK_TAU_CTRL1_PREG 0x0AC
+#define SIERRA_DEQ_BLK_TAU_CTRL4_PREG 0x0AF
#define SIERRA_DFE_ECMP_RATESEL_PREG 0x0C0
#define SIERRA_DFE_SMP_RATESEL_PREG 0x0C1
#define SIERRA_DEQ_PHALIGN_CTRL 0x0C4
@@ -129,6 +143,9 @@
#define SIERRA_DEQ_GLUT14 0x0F6
#define SIERRA_DEQ_GLUT15 0x0F7
#define SIERRA_DEQ_GLUT16 0x0F8
+#define SIERRA_POSTPRECUR_EN_CEPH_CTRL_PREG 0x0F9
+#define SIERRA_TAU_EN_CEPH2TO0_PREG 0x0FB
+#define SIERRA_TAU_EN_CEPH5TO3_PREG 0x0FC
#define SIERRA_DEQ_ALUT0 0x108
#define SIERRA_DEQ_ALUT1 0x109
#define SIERRA_DEQ_ALUT2 0x10A
@@ -143,6 +160,7 @@
#define SIERRA_DEQ_ALUT11 0x113
#define SIERRA_DEQ_ALUT12 0x114
#define SIERRA_DEQ_ALUT13 0x115
+#define SIERRA_OEPH_EN_CTRL_PREG 0x124
#define SIERRA_DEQ_DFETAP_CTRL_PREG 0x128
#define SIERRA_DEQ_DFETAP0 0x129
#define SIERRA_DEQ_DFETAP1 0x12B
@@ -157,6 +175,7 @@
#define SIERRA_DEQ_TAU_CTRL2_PREG 0x151
#define SIERRA_DEQ_TAU_CTRL3_PREG 0x152
#define SIERRA_DEQ_OPENEYE_CTRL_PREG 0x158
+#define SIERRA_DEQ_CONCUR_EPIOFFSET_MODE_PREG 0x159
#define SIERRA_DEQ_PICTRL_PREG 0x161
#define SIERRA_CPICAL_TMRVAL_MODE1_PREG 0x170
#define SIERRA_CPICAL_TMRVAL_MODE0_PREG 0x171
@@ -165,6 +184,7 @@
#define SIERRA_CPI_RESBIAS_BIN_PREG 0x17E
#define SIERRA_CPI_TRIM_PREG 0x17F
#define SIERRA_CPICAL_RES_STARTCODE_MODE23_PREG 0x183
+#define SIERRA_CPICAL_RES_STARTCODE_MODE01_PREG 0x184
#define SIERRA_EPI_CTRL_PREG 0x187
#define SIERRA_LFPSDET_SUPPORT_PREG 0x188
#define SIERRA_LFPSFILT_NS_PREG 0x18A
@@ -176,6 +196,7 @@
#define SIERRA_RXBUFFER_CTLECTRL_PREG 0x19E
#define SIERRA_RXBUFFER_RCDFECTRL_PREG 0x19F
#define SIERRA_RXBUFFER_DFECTRL_PREG 0x1A0
+#define SIERRA_LN_SPARE_REG_PREG 0x1B0
#define SIERRA_DEQ_TAU_CTRL1_FAST_MAINT_PREG 0x14F
#define SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG 0x150
@@ -2402,6 +2423,77 @@ static struct cdns_sierra_vals usb_100_ext_ssc_ln_vals = {
.num_regs = ARRAY_SIZE(cdns_usb_ln_regs_ext_ssc),
};
+/* SGMII PHY common configuration */
+static const struct cdns_reg_pairs sgmii_pma_cmn_vals[] = {
+ {0x0180, SIERRA_SDOSCCAL_CLK_CNT_PREG},
+ {0x6000, SIERRA_CMN_REFRCV_PREG},
+ {0x0031, SIERRA_CMN_RESCAL_CTRLA_PREG},
+ {0x001C, SIERRA_CMN_PLLLC_FBDIV_INT_MODE0_PREG},
+ {0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
+ {0x0000, SIERRA_CMN_PLLLC_LOCKSEARCH_PREG},
+ {0x8103, SIERRA_CMN_PLLLC_CLK0_PREG},
+ {0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG},
+ {0x0027, SIERRA_CMN_PLLCSM_PLLEN_TMR_PREG},
+ {0x0062, SIERRA_CMN_PLLCSM_PLLPRE_TMR_PREG},
+ {0x0800, SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG},
+ {0x0000, SIERRA_CMN_PLLLC_INIT_PREG},
+ {0x0000, SIERRA_CMN_PLLLC_ITERTMR_PREG},
+ {0x0020, SIERRA_CMN_PLLLC_LOCK_CNTSTART_PREG},
+ {0x0013, SIERRA_CMN_PLLLC_DCOCAL_CTRL_PREG},
+ {0x0013, SIERRA_CMN_PLLLC1_DCOCAL_CTRL_PREG},
+};
+
+static struct cdns_sierra_vals sgmii_cmn_vals = {
+ .reg_pairs = sgmii_pma_cmn_vals,
+ .num_regs = ARRAY_SIZE(sgmii_pma_cmn_vals),
+};
+
+/* SGMII PHY lane configuration */
+static const struct cdns_reg_pairs sgmii_ln_regs[] = {
+ {0x691E, SIERRA_DET_STANDEC_D_PREG},
+ {0x0FFE, SIERRA_PSC_RX_A0_PREG},
+ {0x0104, SIERRA_PLLCTRL_FBDIV_MODE01_PREG},
+ {0x0013, SIERRA_PLLCTRL_SUBRATE_PREG},
+ {0x0106, SIERRA_PLLCTRL_GEN_D_PREG},
+ {0x5234, SIERRA_PLLCTRL_CPGAIN_MODE_PREG},
+ {0x0000, SIERRA_DRVCTRL_ATTEN_PREG},
+ {0x00AB, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x3C0E, SIERRA_CREQ_CCLKDET_MODE01_PREG},
+ {0x3220, SIERRA_CREQ_FSMCLK_SEL_PREG},
+ {0x0000, SIERRA_CREQ_EQ_CTRL_PREG},
+ {0x6320, SIERRA_DEQ_CONCUR_EPIOFFSET_MODE_PREG},
+ {0x0000, SIERRA_CPI_OUTBUF_RATESEL_PREG},
+ {0x15A2, SIERRA_LN_SPARE_REG_PREG},
+ {0x7900, SIERRA_DEQ_BLK_TAU_CTRL1_PREG},
+ {0x2202, SIERRA_DEQ_BLK_TAU_CTRL4_PREG},
+ {0x2206, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0005, SIERRA_LANE_TX_RECEIVER_DETECT_PREG},
+ {0x8001, SIERRA_CREQ_SPARE_PREG},
+ {0x0000, SIERRA_DEQ_CONCUR_CTRL1_PREG},
+ {0xD004, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x0101, SIERRA_DEQ_GLUT9},
+ {0x0101, SIERRA_DEQ_GLUT10},
+ {0x0101, SIERRA_DEQ_GLUT11},
+ {0x0101, SIERRA_DEQ_GLUT12},
+ {0x0000, SIERRA_DEQ_GLUT13},
+ {0x0000, SIERRA_DEQ_GLUT16},
+ {0x0000, SIERRA_POSTPRECUR_EN_CEPH_CTRL_PREG},
+ {0x0000, SIERRA_TAU_EN_CEPH2TO0_PREG},
+ {0x0003, SIERRA_TAU_EN_CEPH5TO3_PREG},
+ {0x0101, SIERRA_DEQ_ALUT8},
+ {0x0101, SIERRA_DEQ_ALUT9},
+ {0x0100, SIERRA_DEQ_ALUT10},
+ {0x0000, SIERRA_OEPH_EN_CTRL_PREG},
+ {0x5425, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x7458, SIERRA_CPICAL_RES_STARTCODE_MODE23_PREG},
+ {0x321F, SIERRA_CPICAL_RES_STARTCODE_MODE01_PREG},
+};
+
+static struct cdns_sierra_vals sgmii_pma_ln_vals = {
+ .reg_pairs = sgmii_ln_regs,
+ .num_regs = ARRAY_SIZE(sgmii_ln_regs),
+};
+
static const struct cdns_sierra_data cdns_map_sierra = {
.id_value = SIERRA_MACRO_ID,
.block_offset_shift = 0x2,
@@ -2449,6 +2541,9 @@ static const struct cdns_sierra_data cdns_map_sierra = {
},
},
[TYPE_SGMII] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sgmii_cmn_vals,
+ },
[TYPE_PCIE] = {
[NO_SSC] = &sgmii_100_no_ssc_plllc1_opt3_cmn_vals,
[EXTERNAL_SSC] = &sgmii_100_no_ssc_plllc1_opt3_cmn_vals,
@@ -2487,6 +2582,9 @@ static const struct cdns_sierra_data cdns_map_sierra = {
},
},
[TYPE_SGMII] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &sgmii_pma_ln_vals,
+ },
[TYPE_PCIE] = {
[NO_SSC] = &sgmii_100_no_ssc_plllc1_opt3_ln_vals,
[EXTERNAL_SSC] = &sgmii_100_no_ssc_plllc1_opt3_ln_vals,
diff --git a/drivers/phy/cadence/phy-cadence-torrent.c b/drivers/phy/cadence/phy-cadence-torrent.c
index 37b6b5c05be8..a75c96385c57 100644
--- a/drivers/phy/cadence/phy-cadence-torrent.c
+++ b/drivers/phy/cadence/phy-cadence-torrent.c
@@ -17,8 +17,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
@@ -27,14 +25,11 @@
#define REF_CLK_19_2MHZ 19200000
#define REF_CLK_25MHZ 25000000
#define REF_CLK_100MHZ 100000000
+#define REF_CLK_156_25MHZ 156250000
#define MAX_NUM_LANES 4
#define DEFAULT_MAX_BIT_RATE 8100 /* in Mbps */
-#define NUM_SSC_MODE 3
-#define NUM_REF_CLK 3
-#define NUM_PHY_TYPE 6
-
#define POLL_TIMEOUT_US 5000
#define PLL_LOCK_TIMEOUT 100000
@@ -106,6 +101,7 @@
#define CMN_PLL0_HIGH_THR_M0 0x0093U
#define CMN_PLL0_DSM_DIAG_M0 0x0094U
#define CMN_PLL0_DSM_FBH_OVRD_M0 0x0095U
+#define CMN_PLL0_DSM_FBL_OVRD_M0 0x0096U
#define CMN_PLL0_SS_CTRL1_M0 0x0098U
#define CMN_PLL0_SS_CTRL2_M0 0x0099U
#define CMN_PLL0_SS_CTRL3_M0 0x009AU
@@ -196,6 +192,10 @@
#define RX_PSC_A2 0x0002U
#define RX_PSC_A3 0x0003U
#define RX_PSC_CAL 0x0006U
+#define RX_SDCAL0_INIT_TMR 0x0044U
+#define RX_SDCAL0_ITER_TMR 0x0045U
+#define RX_SDCAL1_INIT_TMR 0x004CU
+#define RX_SDCAL1_ITER_TMR 0x004DU
#define RX_CDRLF_CNFG 0x0080U
#define RX_CDRLF_CNFG3 0x0082U
#define RX_SIGDET_HL_FILT_TMR 0x0090U
@@ -294,20 +294,49 @@ enum cdns_torrent_phy_type {
TYPE_SGMII,
TYPE_QSGMII,
TYPE_USB,
+ TYPE_USXGMII,
};
enum cdns_torrent_ref_clk {
CLK_19_2_MHZ,
CLK_25_MHZ,
- CLK_100_MHZ
+ CLK_100_MHZ,
+ CLK_156_25_MHZ,
+ CLK_ANY,
};
enum cdns_torrent_ssc_mode {
NO_SSC,
EXTERNAL_SSC,
- INTERNAL_SSC
+ INTERNAL_SSC,
+ ANY_SSC,
};
+/* Unique key id for vals table entry
+ * REFCLK0_RATE | REFCLK1_RATE | LINK0_TYPE | LINK1_TYPE | SSC_TYPE
+ */
+#define REFCLK0_SHIFT 12
+#define REFCLK0_MASK GENMASK(14, 12)
+#define REFCLK1_SHIFT 9
+#define REFCLK1_MASK GENMASK(11, 9)
+#define LINK0_SHIFT 6
+#define LINK0_MASK GENMASK(8, 6)
+#define LINK1_SHIFT 3
+#define LINK1_MASK GENMASK(5, 3)
+#define SSC_SHIFT 0
+#define SSC_MASK GENMASK(2, 0)
+
+#define CDNS_TORRENT_KEY(refclk0, refclk1, link0, link1, ssc) \
+ ((((refclk0) << REFCLK0_SHIFT) & REFCLK0_MASK) | \
+ (((refclk1) << REFCLK1_SHIFT) & REFCLK1_MASK) | \
+ (((link0) << LINK0_SHIFT) & LINK0_MASK) | \
+ (((link1) << LINK1_SHIFT) & LINK1_MASK) | \
+ (((ssc) << SSC_SHIFT) & SSC_MASK))
+
+#define CDNS_TORRENT_KEY_ANYCLK(link0, link1) \
+ CDNS_TORRENT_KEY(CLK_ANY, CLK_ANY, \
+ (link0), (link1), ANY_SSC)
+
struct cdns_torrent_inst {
struct phy *phy;
u32 mlane;
@@ -394,21 +423,26 @@ struct cdns_torrent_vals {
u32 num_regs;
};
+struct cdns_torrent_vals_entry {
+ u32 key;
+ struct cdns_torrent_vals *vals;
+};
+
+struct cdns_torrent_vals_table {
+ struct cdns_torrent_vals_entry *entries;
+ u32 num_entries;
+};
+
struct cdns_torrent_data {
u8 block_offset_shift;
u8 reg_offset_shift;
- struct cdns_torrent_vals *link_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
- [NUM_SSC_MODE];
- struct cdns_torrent_vals *xcvr_diag_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
- [NUM_SSC_MODE];
- struct cdns_torrent_vals *pcs_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
- [NUM_SSC_MODE];
- struct cdns_torrent_vals *cmn_vals[NUM_REF_CLK][NUM_PHY_TYPE]
- [NUM_PHY_TYPE][NUM_SSC_MODE];
- struct cdns_torrent_vals *tx_ln_vals[NUM_REF_CLK][NUM_PHY_TYPE]
- [NUM_PHY_TYPE][NUM_SSC_MODE];
- struct cdns_torrent_vals *rx_ln_vals[NUM_REF_CLK][NUM_PHY_TYPE]
- [NUM_PHY_TYPE][NUM_SSC_MODE];
+ struct cdns_torrent_vals_table link_cmn_vals_tbl;
+ struct cdns_torrent_vals_table xcvr_diag_vals_tbl;
+ struct cdns_torrent_vals_table pcs_cmn_vals_tbl;
+ struct cdns_torrent_vals_table phy_pma_cmn_vals_tbl;
+ struct cdns_torrent_vals_table cmn_vals_tbl;
+ struct cdns_torrent_vals_table tx_ln_vals_tbl;
+ struct cdns_torrent_vals_table rx_ln_vals_tbl;
};
struct cdns_regmap_cdb_context {
@@ -417,6 +451,24 @@ struct cdns_regmap_cdb_context {
u8 reg_offset_shift;
};
+static struct cdns_torrent_vals *cdns_torrent_get_tbl_vals(const struct cdns_torrent_vals_table *tbl,
+ enum cdns_torrent_ref_clk refclk0,
+ enum cdns_torrent_ref_clk refclk1,
+ enum cdns_torrent_phy_type link0,
+ enum cdns_torrent_phy_type link1,
+ enum cdns_torrent_ssc_mode ssc)
+{
+ int i;
+ u32 key = CDNS_TORRENT_KEY(refclk0, refclk1, link0, link1, ssc);
+
+ for (i = 0; i < tbl->num_entries; i++) {
+ if (tbl->entries[i].key == key)
+ return tbl->entries[i].vals;
+ }
+
+ return NULL;
+}
+
static int cdns_regmap_write(void *context, unsigned int reg, unsigned int val)
{
struct cdns_regmap_cdb_context *ctx = context;
@@ -644,6 +696,8 @@ static const char *cdns_torrent_get_phy_type(enum cdns_torrent_phy_type phy_type
return "QSGMII";
case TYPE_USB:
return "USB";
+ case TYPE_USXGMII:
+ return "USXGMII";
default:
return "None";
}
@@ -2244,6 +2298,7 @@ static int cdns_torrent_phy_init(struct phy *phy)
struct cdns_torrent_inst *inst = phy_get_drvdata(phy);
enum cdns_torrent_phy_type phy_type = inst->phy_type;
enum cdns_torrent_ssc_mode ssc = inst->ssc_mode;
+ struct cdns_torrent_vals *phy_pma_cmn_vals;
struct cdns_torrent_vals *pcs_cmn_vals;
struct cdns_reg_pairs *reg_pairs;
struct regmap *regmap;
@@ -2258,13 +2313,16 @@ static int cdns_torrent_phy_init(struct phy *phy)
/**
* Spread spectrum generation is not required or supported
- * for SGMII/QSGMII
+ * for SGMII/QSGMII/USXGMII
*/
- if (phy_type == TYPE_SGMII || phy_type == TYPE_QSGMII)
+ if (phy_type == TYPE_SGMII || phy_type == TYPE_QSGMII || phy_type == TYPE_USXGMII)
ssc = NO_SSC;
/* PHY configuration specific registers for single link */
- link_cmn_vals = init_data->link_cmn_vals[phy_type][TYPE_NONE][ssc];
+ link_cmn_vals = cdns_torrent_get_tbl_vals(&init_data->link_cmn_vals_tbl,
+ CLK_ANY, CLK_ANY,
+ phy_type, TYPE_NONE,
+ ANY_SSC);
if (link_cmn_vals) {
reg_pairs = link_cmn_vals->reg_pairs;
num_regs = link_cmn_vals->num_regs;
@@ -2281,7 +2339,10 @@ static int cdns_torrent_phy_init(struct phy *phy)
reg_pairs[i].val);
}
- xcvr_diag_vals = init_data->xcvr_diag_vals[phy_type][TYPE_NONE][ssc];
+ xcvr_diag_vals = cdns_torrent_get_tbl_vals(&init_data->xcvr_diag_vals_tbl,
+ CLK_ANY, CLK_ANY,
+ phy_type, TYPE_NONE,
+ ANY_SSC);
if (xcvr_diag_vals) {
reg_pairs = xcvr_diag_vals->reg_pairs;
num_regs = xcvr_diag_vals->num_regs;
@@ -2294,7 +2355,10 @@ static int cdns_torrent_phy_init(struct phy *phy)
}
/* PHY PCS common registers configurations */
- pcs_cmn_vals = init_data->pcs_cmn_vals[phy_type][TYPE_NONE][ssc];
+ pcs_cmn_vals = cdns_torrent_get_tbl_vals(&init_data->pcs_cmn_vals_tbl,
+ CLK_ANY, CLK_ANY,
+ phy_type, TYPE_NONE,
+ ANY_SSC);
if (pcs_cmn_vals) {
reg_pairs = pcs_cmn_vals->reg_pairs;
num_regs = pcs_cmn_vals->num_regs;
@@ -2304,8 +2368,25 @@ static int cdns_torrent_phy_init(struct phy *phy)
reg_pairs[i].val);
}
+ /* PHY PMA common registers configurations */
+ phy_pma_cmn_vals = cdns_torrent_get_tbl_vals(&init_data->phy_pma_cmn_vals_tbl,
+ CLK_ANY, CLK_ANY,
+ phy_type, TYPE_NONE,
+ ANY_SSC);
+ if (phy_pma_cmn_vals) {
+ reg_pairs = phy_pma_cmn_vals->reg_pairs;
+ num_regs = phy_pma_cmn_vals->num_regs;
+ regmap = cdns_phy->regmap_phy_pma_common_cdb;
+ for (i = 0; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off,
+ reg_pairs[i].val);
+ }
+
/* PMA common registers configurations */
- cmn_vals = init_data->cmn_vals[ref_clk][phy_type][TYPE_NONE][ssc];
+ cmn_vals = cdns_torrent_get_tbl_vals(&init_data->cmn_vals_tbl,
+ ref_clk, ref_clk,
+ phy_type, TYPE_NONE,
+ ssc);
if (cmn_vals) {
reg_pairs = cmn_vals->reg_pairs;
num_regs = cmn_vals->num_regs;
@@ -2316,7 +2397,10 @@ static int cdns_torrent_phy_init(struct phy *phy)
}
/* PMA TX lane registers configurations */
- tx_ln_vals = init_data->tx_ln_vals[ref_clk][phy_type][TYPE_NONE][ssc];
+ tx_ln_vals = cdns_torrent_get_tbl_vals(&init_data->tx_ln_vals_tbl,
+ ref_clk, ref_clk,
+ phy_type, TYPE_NONE,
+ ssc);
if (tx_ln_vals) {
reg_pairs = tx_ln_vals->reg_pairs;
num_regs = tx_ln_vals->num_regs;
@@ -2329,7 +2413,10 @@ static int cdns_torrent_phy_init(struct phy *phy)
}
/* PMA RX lane registers configurations */
- rx_ln_vals = init_data->rx_ln_vals[ref_clk][phy_type][TYPE_NONE][ssc];
+ rx_ln_vals = cdns_torrent_get_tbl_vals(&init_data->rx_ln_vals_tbl,
+ ref_clk, ref_clk,
+ phy_type, TYPE_NONE,
+ ssc);
if (rx_ln_vals) {
reg_pairs = rx_ln_vals->reg_pairs;
num_regs = rx_ln_vals->num_regs;
@@ -2418,7 +2505,9 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy)
* being configured, but these can be different for particular
* PHY type and are per lane.
*/
- link_cmn_vals = init_data->link_cmn_vals[phy_t1][phy_t2][ssc];
+ link_cmn_vals = cdns_torrent_get_tbl_vals(&init_data->link_cmn_vals_tbl,
+ CLK_ANY, CLK_ANY,
+ phy_t1, phy_t2, ANY_SSC);
if (link_cmn_vals) {
reg_pairs = link_cmn_vals->reg_pairs;
num_regs = link_cmn_vals->num_regs;
@@ -2436,7 +2525,9 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy)
reg_pairs[i].val);
}
- xcvr_diag_vals = init_data->xcvr_diag_vals[phy_t1][phy_t2][ssc];
+ xcvr_diag_vals = cdns_torrent_get_tbl_vals(&init_data->xcvr_diag_vals_tbl,
+ CLK_ANY, CLK_ANY,
+ phy_t1, phy_t2, ANY_SSC);
if (xcvr_diag_vals) {
reg_pairs = xcvr_diag_vals->reg_pairs;
num_regs = xcvr_diag_vals->num_regs;
@@ -2449,7 +2540,9 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy)
}
/* PHY PCS common registers configurations */
- pcs_cmn_vals = init_data->pcs_cmn_vals[phy_t1][phy_t2][ssc];
+ pcs_cmn_vals = cdns_torrent_get_tbl_vals(&init_data->pcs_cmn_vals_tbl,
+ CLK_ANY, CLK_ANY,
+ phy_t1, phy_t2, ANY_SSC);
if (pcs_cmn_vals) {
reg_pairs = pcs_cmn_vals->reg_pairs;
num_regs = pcs_cmn_vals->num_regs;
@@ -2460,7 +2553,9 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy)
}
/* PMA common registers configurations */
- cmn_vals = init_data->cmn_vals[ref_clk][phy_t1][phy_t2][ssc];
+ cmn_vals = cdns_torrent_get_tbl_vals(&init_data->cmn_vals_tbl,
+ ref_clk, ref_clk,
+ phy_t1, phy_t2, ssc);
if (cmn_vals) {
reg_pairs = cmn_vals->reg_pairs;
num_regs = cmn_vals->num_regs;
@@ -2471,7 +2566,9 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy)
}
/* PMA TX lane registers configurations */
- tx_ln_vals = init_data->tx_ln_vals[ref_clk][phy_t1][phy_t2][ssc];
+ tx_ln_vals = cdns_torrent_get_tbl_vals(&init_data->tx_ln_vals_tbl,
+ ref_clk, ref_clk,
+ phy_t1, phy_t2, ssc);
if (tx_ln_vals) {
reg_pairs = tx_ln_vals->reg_pairs;
num_regs = tx_ln_vals->num_regs;
@@ -2484,7 +2581,9 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy)
}
/* PMA RX lane registers configurations */
- rx_ln_vals = init_data->rx_ln_vals[ref_clk][phy_t1][phy_t2][ssc];
+ rx_ln_vals = cdns_torrent_get_tbl_vals(&init_data->rx_ln_vals_tbl,
+ ref_clk, ref_clk,
+ phy_t1, phy_t2, ssc);
if (rx_ln_vals) {
reg_pairs = rx_ln_vals->reg_pairs;
num_regs = rx_ln_vals->num_regs;
@@ -2617,6 +2716,9 @@ static int cdns_torrent_clk(struct cdns_torrent_phy *cdns_phy)
case REF_CLK_100MHZ:
cdns_phy->ref_clk_rate = CLK_100_MHZ;
break;
+ case REF_CLK_156_25MHZ:
+ cdns_phy->ref_clk_rate = CLK_156_25_MHZ;
+ break;
default:
dev_err(cdns_phy->dev, "Invalid Ref Clock Rate\n");
clk_disable_unprepare(cdns_phy->clk);
@@ -2736,6 +2838,9 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev)
case PHY_TYPE_USB3:
cdns_phy->phys[node].phy_type = TYPE_USB;
break;
+ case PHY_TYPE_USXGMII:
+ cdns_phy->phys[node].phy_type = TYPE_USXGMII;
+ break;
default:
dev_err(dev, "Unsupported protocol\n");
ret = -EINVAL;
@@ -2929,6 +3034,123 @@ static struct cdns_torrent_vals dp_usb_xcvr_diag_ln_vals = {
.num_regs = ARRAY_SIZE(dp_usb_xcvr_diag_ln_regs),
};
+/* TI USXGMII configuration: Enable cmn_refclk_rcv_out_en */
+static struct cdns_reg_pairs ti_usxgmii_phy_pma_cmn_regs[] = {
+ {0x0040, PHY_PMA_CMN_CTRL1},
+};
+
+static struct cdns_torrent_vals ti_usxgmii_phy_pma_cmn_vals = {
+ .reg_pairs = ti_usxgmii_phy_pma_cmn_regs,
+ .num_regs = ARRAY_SIZE(ti_usxgmii_phy_pma_cmn_regs),
+};
+
+/* Single USXGMII link configuration */
+static struct cdns_reg_pairs sl_usxgmii_link_cmn_regs[] = {
+ {0x0000, PHY_PLL_CFG},
+ {0x0400, CMN_PDIAG_PLL0_CLK_SEL_M0}
+};
+
+static struct cdns_reg_pairs sl_usxgmii_xcvr_diag_ln_regs[] = {
+ {0x0000, XCVR_DIAG_HSCLK_SEL},
+ {0x0001, XCVR_DIAG_HSCLK_DIV},
+ {0x0001, XCVR_DIAG_PLLDRC_CTRL}
+};
+
+static struct cdns_torrent_vals sl_usxgmii_link_cmn_vals = {
+ .reg_pairs = sl_usxgmii_link_cmn_regs,
+ .num_regs = ARRAY_SIZE(sl_usxgmii_link_cmn_regs),
+};
+
+static struct cdns_torrent_vals sl_usxgmii_xcvr_diag_ln_vals = {
+ .reg_pairs = sl_usxgmii_xcvr_diag_ln_regs,
+ .num_regs = ARRAY_SIZE(sl_usxgmii_xcvr_diag_ln_regs),
+};
+
+/* Single link USXGMII, 156.25 MHz Ref clk, no SSC */
+static struct cdns_reg_pairs sl_usxgmii_156_25_no_ssc_cmn_regs[] = {
+ {0x0014, CMN_SSM_BIAS_TMR},
+ {0x0028, CMN_PLLSM0_PLLPRE_TMR},
+ {0x00A4, CMN_PLLSM0_PLLLOCK_TMR},
+ {0x0028, CMN_PLLSM1_PLLPRE_TMR},
+ {0x00A4, CMN_PLLSM1_PLLLOCK_TMR},
+ {0x0062, CMN_BGCAL_INIT_TMR},
+ {0x0062, CMN_BGCAL_ITER_TMR},
+ {0x0014, CMN_IBCAL_INIT_TMR},
+ {0x0018, CMN_TXPUCAL_INIT_TMR},
+ {0x0005, CMN_TXPUCAL_ITER_TMR},
+ {0x0018, CMN_TXPDCAL_INIT_TMR},
+ {0x0005, CMN_TXPDCAL_ITER_TMR},
+ {0x024A, CMN_RXCAL_INIT_TMR},
+ {0x0005, CMN_RXCAL_ITER_TMR},
+ {0x000B, CMN_SD_CAL_REFTIM_START},
+ {0x0132, CMN_SD_CAL_PLLCNT_START},
+ {0x0028, CMN_PDIAG_PLL1_CP_PADJ_M0},
+ {0x0014, CMN_PLL0_DSM_FBH_OVRD_M0},
+ {0x0014, CMN_PLL1_DSM_FBH_OVRD_M0},
+ {0x0005, CMN_PLL0_DSM_FBL_OVRD_M0},
+ {0x0005, CMN_PLL1_DSM_FBL_OVRD_M0},
+ {0x061B, CMN_PLL0_VCOCAL_INIT_TMR},
+ {0x061B, CMN_PLL1_VCOCAL_INIT_TMR},
+ {0x0019, CMN_PLL0_VCOCAL_ITER_TMR},
+ {0x0019, CMN_PLL1_VCOCAL_ITER_TMR},
+ {0x1354, CMN_PLL0_VCOCAL_REFTIM_START},
+ {0x1354, CMN_PLL1_VCOCAL_REFTIM_START},
+ {0x1354, CMN_PLL0_VCOCAL_PLLCNT_START},
+ {0x1354, CMN_PLL1_VCOCAL_PLLCNT_START},
+ {0x0003, CMN_PLL0_VCOCAL_TCTRL},
+ {0x0003, CMN_PLL1_VCOCAL_TCTRL},
+ {0x0138, CMN_PLL0_LOCK_REFCNT_START},
+ {0x0138, CMN_PLL1_LOCK_REFCNT_START},
+ {0x0138, CMN_PLL0_LOCK_PLLCNT_START},
+ {0x0138, CMN_PLL1_LOCK_PLLCNT_START}
+};
+
+static struct cdns_reg_pairs usxgmii_156_25_no_ssc_tx_ln_regs[] = {
+ {0x07A2, TX_RCVDET_ST_TMR},
+ {0x00F3, TX_PSC_A0},
+ {0x04A2, TX_PSC_A2},
+ {0x04A2, TX_PSC_A3},
+ {0x0000, TX_TXCC_CPOST_MULT_00},
+ {0x0000, XCVR_DIAG_PSC_OVRD}
+};
+
+static struct cdns_reg_pairs usxgmii_156_25_no_ssc_rx_ln_regs[] = {
+ {0x0014, RX_SDCAL0_INIT_TMR},
+ {0x0062, RX_SDCAL0_ITER_TMR},
+ {0x0014, RX_SDCAL1_INIT_TMR},
+ {0x0062, RX_SDCAL1_ITER_TMR},
+ {0x091D, RX_PSC_A0},
+ {0x0900, RX_PSC_A2},
+ {0x0100, RX_PSC_A3},
+ {0x0030, RX_REE_SMGM_CTRL1},
+ {0x03C7, RX_REE_GCSM1_EQENM_PH1},
+ {0x01C7, RX_REE_GCSM1_EQENM_PH2},
+ {0x0000, RX_DIAG_DFE_CTRL},
+ {0x0019, RX_REE_TAP1_CLIP},
+ {0x0019, RX_REE_TAP2TON_CLIP},
+ {0x00B9, RX_DIAG_NQST_CTRL},
+ {0x0C21, RX_DIAG_DFE_AMP_TUNE_2},
+ {0x0002, RX_DIAG_DFE_AMP_TUNE_3},
+ {0x0033, RX_DIAG_PI_RATE},
+ {0x0001, RX_DIAG_ACYA},
+ {0x018C, RX_CDRLF_CNFG}
+};
+
+static struct cdns_torrent_vals sl_usxgmii_156_25_no_ssc_cmn_vals = {
+ .reg_pairs = sl_usxgmii_156_25_no_ssc_cmn_regs,
+ .num_regs = ARRAY_SIZE(sl_usxgmii_156_25_no_ssc_cmn_regs),
+};
+
+static struct cdns_torrent_vals usxgmii_156_25_no_ssc_tx_ln_vals = {
+ .reg_pairs = usxgmii_156_25_no_ssc_tx_ln_regs,
+ .num_regs = ARRAY_SIZE(usxgmii_156_25_no_ssc_tx_ln_regs),
+};
+
+static struct cdns_torrent_vals usxgmii_156_25_no_ssc_rx_ln_vals = {
+ .reg_pairs = usxgmii_156_25_no_ssc_rx_ln_regs,
+ .num_regs = ARRAY_SIZE(usxgmii_156_25_no_ssc_rx_ln_regs),
+};
+
/* PCIe and DP link configuration */
static struct cdns_reg_pairs pcie_dp_link_cmn_regs[] = {
{0x0003, PHY_PLL_CFG},
@@ -3934,1093 +4156,401 @@ static struct cdns_torrent_vals pcie_100_no_ssc_rx_ln_vals = {
.num_regs = ARRAY_SIZE(pcie_100_ext_no_ssc_rx_ln_regs),
};
+static struct cdns_torrent_vals_entry link_cmn_vals_entries[] = {
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_NONE), &sl_dp_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_PCIE), &pcie_dp_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_USB), &usb_dp_link_cmn_vals},
+
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_NONE), NULL},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_SGMII), &pcie_sgmii_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_QSGMII), &pcie_sgmii_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_USB), &pcie_usb_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_DP), &pcie_dp_link_cmn_vals},
+
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_NONE), &sl_sgmii_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_PCIE), &pcie_sgmii_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_USB), &usb_sgmii_link_cmn_vals},
+
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_NONE), &sl_sgmii_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_PCIE), &pcie_sgmii_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_USB), &usb_sgmii_link_cmn_vals},
+
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_NONE), &sl_usb_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_PCIE), &pcie_usb_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_SGMII), &usb_sgmii_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_QSGMII), &usb_sgmii_link_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_DP), &usb_dp_link_cmn_vals},
+
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_NONE), &sl_usxgmii_link_cmn_vals},
+};
+
+static struct cdns_torrent_vals_entry xcvr_diag_vals_entries[] = {
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_NONE), &sl_dp_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_PCIE), &dp_pcie_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_DP, TYPE_USB), &dp_usb_xcvr_diag_ln_vals},
+
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_NONE), NULL},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_SGMII), &pcie_sgmii_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_QSGMII), &pcie_sgmii_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_USB), &pcie_usb_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_PCIE, TYPE_DP), &pcie_dp_xcvr_diag_ln_vals},
+
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_NONE), &sl_sgmii_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_PCIE), &sgmii_pcie_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_SGMII, TYPE_USB), &sgmii_usb_xcvr_diag_ln_vals},
+
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_NONE), &sl_sgmii_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_PCIE), &sgmii_pcie_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_QSGMII, TYPE_USB), &sgmii_usb_xcvr_diag_ln_vals},
+
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_NONE), &sl_usb_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_PCIE), &usb_pcie_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_SGMII), &usb_sgmii_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_QSGMII), &usb_sgmii_xcvr_diag_ln_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_DP), &usb_dp_xcvr_diag_ln_vals},
+
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_NONE), &sl_usxgmii_xcvr_diag_ln_vals},
+};
+
+static struct cdns_torrent_vals_entry pcs_cmn_vals_entries[] = {
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_NONE), &usb_phy_pcs_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_PCIE), &usb_phy_pcs_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_SGMII), &usb_phy_pcs_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_QSGMII), &usb_phy_pcs_cmn_vals},
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USB, TYPE_DP), &usb_phy_pcs_cmn_vals},
+};
+
+static struct cdns_torrent_vals_entry cmn_vals_entries[] = {
+ {CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_PCIE, NO_SSC), &dp_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_USB, NO_SSC), &sl_dp_100_no_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, EXTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, INTERNAL_SSC), &sl_pcie_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, NO_SSC), &pcie_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, EXTERNAL_SSC), &pcie_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, INTERNAL_SSC), &pcie_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, NO_SSC), &pcie_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, EXTERNAL_SSC), &pcie_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, INTERNAL_SSC), &pcie_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, NO_SSC), &pcie_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, EXTERNAL_SSC), &pcie_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, INTERNAL_SSC), &pcie_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_DP, NO_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_NONE, NO_SSC), &sl_sgmii_100_no_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, NO_SSC), &sgmii_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, EXTERNAL_SSC), &sgmii_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, INTERNAL_SSC), &sgmii_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, NO_SSC), &sgmii_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, EXTERNAL_SSC), &sgmii_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, INTERNAL_SSC), &sgmii_100_no_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_NONE, NO_SSC), &sl_qsgmii_100_no_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, NO_SSC), &qsgmii_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, EXTERNAL_SSC), &qsgmii_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, INTERNAL_SSC), &qsgmii_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, NO_SSC), &qsgmii_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, EXTERNAL_SSC), &qsgmii_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, INTERNAL_SSC), &qsgmii_100_no_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, NO_SSC), &sl_usb_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, EXTERNAL_SSC), &sl_usb_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, INTERNAL_SSC), &sl_usb_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, NO_SSC), &usb_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, EXTERNAL_SSC), &usb_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, INTERNAL_SSC), &usb_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, NO_SSC), &sl_usb_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, EXTERNAL_SSC), &sl_usb_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, INTERNAL_SSC), &sl_usb_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, NO_SSC), &sl_usb_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, EXTERNAL_SSC), &sl_usb_100_no_ssc_cmn_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, INTERNAL_SSC), &sl_usb_100_int_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_DP, NO_SSC), &usb_100_no_ssc_cmn_vals},
+
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_156_25_MHZ, TYPE_USXGMII, TYPE_NONE, NO_SSC), &sl_usxgmii_156_25_no_ssc_cmn_vals},
+};
+
+static struct cdns_torrent_vals_entry cdns_tx_ln_vals_entries[] = {
+ {CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_PCIE, NO_SSC), &dp_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_USB, NO_SSC), &dp_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, EXTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, INTERNAL_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, EXTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, INTERNAL_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, EXTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, INTERNAL_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, EXTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, INTERNAL_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_DP, NO_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_NONE, NO_SSC), &sgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, NO_SSC), &sgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, EXTERNAL_SSC), &sgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, INTERNAL_SSC), &sgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, NO_SSC), &sgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, EXTERNAL_SSC), &sgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, INTERNAL_SSC), &sgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_NONE, NO_SSC), &qsgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, NO_SSC), &qsgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, EXTERNAL_SSC), &qsgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, INTERNAL_SSC), &qsgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, NO_SSC), &qsgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, EXTERNAL_SSC), &qsgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, INTERNAL_SSC), &qsgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_DP, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_156_25_MHZ, TYPE_USXGMII, TYPE_NONE, NO_SSC), &usxgmii_156_25_no_ssc_tx_ln_vals},
+};
+
+static struct cdns_torrent_vals_entry cdns_rx_ln_vals_entries[] = {
+ {CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_PCIE, NO_SSC), &dp_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_USB, NO_SSC), &dp_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, NO_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, EXTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, INTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, NO_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, EXTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, INTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, NO_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, EXTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, INTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, NO_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, EXTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, INTERNAL_SSC), &pcie_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_DP, NO_SSC), &pcie_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_NONE, NO_SSC), &sgmii_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, NO_SSC), &sgmii_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, EXTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, INTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, NO_SSC), &sgmii_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, EXTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, INTERNAL_SSC), &sgmii_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_NONE, NO_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, NO_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, EXTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, INTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, NO_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, EXTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, INTERNAL_SSC), &qsgmii_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, NO_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, EXTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, INTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, NO_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, EXTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, INTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, NO_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, EXTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, INTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, NO_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, EXTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, INTERNAL_SSC), &usb_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_DP, NO_SSC), &usb_100_no_ssc_rx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_156_25_MHZ, TYPE_USXGMII, TYPE_NONE, NO_SSC), &usxgmii_156_25_no_ssc_rx_ln_vals},
+};
+
static const struct cdns_torrent_data cdns_map_torrent = {
.block_offset_shift = 0x2,
.reg_offset_shift = 0x2,
- .link_cmn_vals = {
- [TYPE_DP] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_dp_link_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &pcie_dp_link_cmn_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &usb_dp_link_cmn_vals,
- },
- },
- [TYPE_PCIE] = {
- [TYPE_NONE] = {
- [NO_SSC] = NULL,
- [EXTERNAL_SSC] = NULL,
- [INTERNAL_SSC] = NULL,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &pcie_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &pcie_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &pcie_usb_link_cmn_vals,
- [EXTERNAL_SSC] = &pcie_usb_link_cmn_vals,
- [INTERNAL_SSC] = &pcie_usb_link_cmn_vals,
- },
- [TYPE_DP] = {
- [NO_SSC] = &pcie_dp_link_cmn_vals,
- },
- },
- [TYPE_SGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_sgmii_link_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &pcie_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &usb_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- },
- },
- [TYPE_QSGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_sgmii_link_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &pcie_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &usb_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- },
- },
- [TYPE_USB] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_usb_link_cmn_vals,
- [EXTERNAL_SSC] = &sl_usb_link_cmn_vals,
- [INTERNAL_SSC] = &sl_usb_link_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &pcie_usb_link_cmn_vals,
- [EXTERNAL_SSC] = &pcie_usb_link_cmn_vals,
- [INTERNAL_SSC] = &pcie_usb_link_cmn_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &usb_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &usb_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- },
- [TYPE_DP] = {
- [NO_SSC] = &usb_dp_link_cmn_vals,
- },
- },
+ .link_cmn_vals_tbl = {
+ .entries = link_cmn_vals_entries,
+ .num_entries = ARRAY_SIZE(link_cmn_vals_entries),
},
- .xcvr_diag_vals = {
- [TYPE_DP] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_dp_xcvr_diag_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &dp_pcie_xcvr_diag_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &dp_usb_xcvr_diag_ln_vals,
- },
- },
- [TYPE_PCIE] = {
- [TYPE_NONE] = {
- [NO_SSC] = NULL,
- [EXTERNAL_SSC] = NULL,
- [INTERNAL_SSC] = NULL,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &pcie_usb_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &pcie_usb_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &pcie_usb_xcvr_diag_ln_vals,
- },
- [TYPE_DP] = {
- [NO_SSC] = &pcie_dp_xcvr_diag_ln_vals,
- },
- },
- [TYPE_SGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_sgmii_xcvr_diag_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
- },
- },
- [TYPE_QSGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_sgmii_xcvr_diag_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
- },
- },
- [TYPE_USB] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_usb_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &sl_usb_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &sl_usb_xcvr_diag_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &usb_pcie_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &usb_pcie_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &usb_pcie_xcvr_diag_ln_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
- },
- [TYPE_DP] = {
- [NO_SSC] = &usb_dp_xcvr_diag_ln_vals,
- },
- },
+ .xcvr_diag_vals_tbl = {
+ .entries = xcvr_diag_vals_entries,
+ .num_entries = ARRAY_SIZE(xcvr_diag_vals_entries),
},
- .pcs_cmn_vals = {
- [TYPE_USB] = {
- [TYPE_NONE] = {
- [NO_SSC] = &usb_phy_pcs_cmn_vals,
- [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &usb_phy_pcs_cmn_vals,
- [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &usb_phy_pcs_cmn_vals,
- [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &usb_phy_pcs_cmn_vals,
- [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- },
- [TYPE_DP] = {
- [NO_SSC] = &usb_phy_pcs_cmn_vals,
- },
- },
+ .pcs_cmn_vals_tbl = {
+ .entries = pcs_cmn_vals_entries,
+ .num_entries = ARRAY_SIZE(pcs_cmn_vals_entries),
},
- .cmn_vals = {
- [CLK_19_2_MHZ] = {
- [TYPE_DP] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_dp_19_2_no_ssc_cmn_vals,
- },
- },
- },
- [CLK_25_MHZ] = {
- [TYPE_DP] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_dp_25_no_ssc_cmn_vals,
- },
- },
- },
- [CLK_100_MHZ] = {
- [TYPE_DP] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_dp_100_no_ssc_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &dp_100_no_ssc_cmn_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &sl_dp_100_no_ssc_cmn_vals,
- },
- },
- [TYPE_PCIE] = {
- [TYPE_NONE] = {
- [NO_SSC] = NULL,
- [EXTERNAL_SSC] = NULL,
- [INTERNAL_SSC] = &sl_pcie_100_int_ssc_cmn_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
- },
- [TYPE_DP] = {
- [NO_SSC] = NULL,
- },
- },
- [TYPE_SGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_sgmii_100_no_ssc_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &sgmii_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &sgmii_100_int_ssc_cmn_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &sgmii_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals,
- },
- },
- [TYPE_QSGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_qsgmii_100_no_ssc_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &qsgmii_100_int_ssc_cmn_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals,
- },
- },
- [TYPE_USB] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &usb_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &usb_100_int_ssc_cmn_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
- },
- [TYPE_DP] = {
- [NO_SSC] = &usb_100_no_ssc_cmn_vals,
- },
- },
- },
+ .cmn_vals_tbl = {
+ .entries = cmn_vals_entries,
+ .num_entries = ARRAY_SIZE(cmn_vals_entries),
},
- .tx_ln_vals = {
- [CLK_19_2_MHZ] = {
- [TYPE_DP] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_dp_19_2_no_ssc_tx_ln_vals,
- },
- },
- },
- [CLK_25_MHZ] = {
- [TYPE_DP] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_dp_25_no_ssc_tx_ln_vals,
- },
- },
- },
- [CLK_100_MHZ] = {
- [TYPE_DP] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_dp_100_no_ssc_tx_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &dp_100_no_ssc_tx_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &dp_100_no_ssc_tx_ln_vals,
- },
- },
- [TYPE_PCIE] = {
- [TYPE_NONE] = {
- [NO_SSC] = NULL,
- [EXTERNAL_SSC] = NULL,
- [INTERNAL_SSC] = NULL,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = NULL,
- [EXTERNAL_SSC] = NULL,
- [INTERNAL_SSC] = NULL,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = NULL,
- [EXTERNAL_SSC] = NULL,
- [INTERNAL_SSC] = NULL,
- },
- [TYPE_USB] = {
- [NO_SSC] = NULL,
- [EXTERNAL_SSC] = NULL,
- [INTERNAL_SSC] = NULL,
- },
- [TYPE_DP] = {
- [NO_SSC] = NULL,
- },
- },
- [TYPE_SGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals,
- },
- },
- [TYPE_QSGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals,
- },
- },
- [TYPE_USB] = {
- [TYPE_NONE] = {
- [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- },
- [TYPE_DP] = {
- [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
- },
- },
- },
+ .tx_ln_vals_tbl = {
+ .entries = cdns_tx_ln_vals_entries,
+ .num_entries = ARRAY_SIZE(cdns_tx_ln_vals_entries),
},
- .rx_ln_vals = {
- [CLK_19_2_MHZ] = {
- [TYPE_DP] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_dp_19_2_no_ssc_rx_ln_vals,
- },
- },
- },
- [CLK_25_MHZ] = {
- [TYPE_DP] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_dp_25_no_ssc_rx_ln_vals,
- },
- },
- },
- [CLK_100_MHZ] = {
- [TYPE_DP] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_dp_100_no_ssc_rx_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &dp_100_no_ssc_rx_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &dp_100_no_ssc_rx_ln_vals,
- },
- },
- [TYPE_PCIE] = {
- [TYPE_NONE] = {
- [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- },
- [TYPE_DP] = {
- [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- },
- },
- [TYPE_SGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
- },
- },
- [TYPE_QSGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
- },
- },
- [TYPE_USB] = {
- [TYPE_NONE] = {
- [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- },
- [TYPE_DP] = {
- [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
- },
- },
- },
+ .rx_ln_vals_tbl = {
+ .entries = cdns_rx_ln_vals_entries,
+ .num_entries = ARRAY_SIZE(cdns_rx_ln_vals_entries),
},
};
+static struct cdns_torrent_vals_entry j721e_phy_pma_cmn_vals_entries[] = {
+ {CDNS_TORRENT_KEY_ANYCLK(TYPE_USXGMII, TYPE_NONE), &ti_usxgmii_phy_pma_cmn_vals},
+};
+
+static struct cdns_torrent_vals_entry ti_tx_ln_vals_entries[] = {
+ {CDNS_TORRENT_KEY(CLK_19_2_MHZ, CLK_19_2_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_19_2_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_25_MHZ, CLK_25_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_25_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_NONE, NO_SSC), &sl_dp_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_PCIE, NO_SSC), &dp_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_DP, TYPE_USB, NO_SSC), &dp_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, EXTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_NONE, INTERNAL_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, EXTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_SGMII, INTERNAL_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, EXTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_QSGMII, INTERNAL_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, NO_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, EXTERNAL_SSC), NULL},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_USB, INTERNAL_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_PCIE, TYPE_DP, NO_SSC), NULL},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_NONE, NO_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, NO_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, EXTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_PCIE, INTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, NO_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, EXTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_SGMII, TYPE_USB, INTERNAL_SSC), &ti_sgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_NONE, NO_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, NO_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, EXTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_PCIE, INTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, NO_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, EXTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_QSGMII, TYPE_USB, INTERNAL_SSC), &ti_qsgmii_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_NONE, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_PCIE, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_SGMII, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, EXTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_QSGMII, INTERNAL_SSC), &usb_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_100_MHZ, CLK_100_MHZ, TYPE_USB, TYPE_DP, NO_SSC), &usb_100_no_ssc_tx_ln_vals},
+
+ {CDNS_TORRENT_KEY(CLK_156_25_MHZ, CLK_156_25_MHZ, TYPE_USXGMII, TYPE_NONE, NO_SSC), &usxgmii_156_25_no_ssc_tx_ln_vals},
+};
+
static const struct cdns_torrent_data ti_j721e_map_torrent = {
.block_offset_shift = 0x0,
.reg_offset_shift = 0x1,
- .link_cmn_vals = {
- [TYPE_DP] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_dp_link_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &pcie_dp_link_cmn_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &usb_dp_link_cmn_vals,
- },
- },
- [TYPE_PCIE] = {
- [TYPE_NONE] = {
- [NO_SSC] = NULL,
- [EXTERNAL_SSC] = NULL,
- [INTERNAL_SSC] = NULL,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &pcie_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &pcie_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &pcie_usb_link_cmn_vals,
- [EXTERNAL_SSC] = &pcie_usb_link_cmn_vals,
- [INTERNAL_SSC] = &pcie_usb_link_cmn_vals,
- },
- [TYPE_DP] = {
- [NO_SSC] = &pcie_dp_link_cmn_vals,
- },
- },
- [TYPE_SGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_sgmii_link_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &pcie_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &usb_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- },
- },
- [TYPE_QSGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_sgmii_link_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &pcie_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &usb_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- },
- },
- [TYPE_USB] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_usb_link_cmn_vals,
- [EXTERNAL_SSC] = &sl_usb_link_cmn_vals,
- [INTERNAL_SSC] = &sl_usb_link_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &pcie_usb_link_cmn_vals,
- [EXTERNAL_SSC] = &pcie_usb_link_cmn_vals,
- [INTERNAL_SSC] = &pcie_usb_link_cmn_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &usb_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &usb_sgmii_link_cmn_vals,
- [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals,
- },
- [TYPE_DP] = {
- [NO_SSC] = &usb_dp_link_cmn_vals,
- },
- },
+ .link_cmn_vals_tbl = {
+ .entries = link_cmn_vals_entries,
+ .num_entries = ARRAY_SIZE(link_cmn_vals_entries),
+ },
+ .xcvr_diag_vals_tbl = {
+ .entries = xcvr_diag_vals_entries,
+ .num_entries = ARRAY_SIZE(xcvr_diag_vals_entries),
},
- .xcvr_diag_vals = {
- [TYPE_DP] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_dp_xcvr_diag_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &dp_pcie_xcvr_diag_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &dp_usb_xcvr_diag_ln_vals,
- },
- },
- [TYPE_PCIE] = {
- [TYPE_NONE] = {
- [NO_SSC] = NULL,
- [EXTERNAL_SSC] = NULL,
- [INTERNAL_SSC] = NULL,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &pcie_usb_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &pcie_usb_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &pcie_usb_xcvr_diag_ln_vals,
- },
- [TYPE_DP] = {
- [NO_SSC] = &pcie_dp_xcvr_diag_ln_vals,
- },
- },
- [TYPE_SGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_sgmii_xcvr_diag_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
- },
- },
- [TYPE_QSGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_sgmii_xcvr_diag_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals,
- },
- },
- [TYPE_USB] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_usb_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &sl_usb_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &sl_usb_xcvr_diag_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &usb_pcie_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &usb_pcie_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &usb_pcie_xcvr_diag_ln_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
- [EXTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
- [INTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals,
- },
- [TYPE_DP] = {
- [NO_SSC] = &usb_dp_xcvr_diag_ln_vals,
- },
- },
+ .pcs_cmn_vals_tbl = {
+ .entries = pcs_cmn_vals_entries,
+ .num_entries = ARRAY_SIZE(pcs_cmn_vals_entries),
},
- .pcs_cmn_vals = {
- [TYPE_USB] = {
- [TYPE_NONE] = {
- [NO_SSC] = &usb_phy_pcs_cmn_vals,
- [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &usb_phy_pcs_cmn_vals,
- [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &usb_phy_pcs_cmn_vals,
- [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &usb_phy_pcs_cmn_vals,
- [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals,
- },
- [TYPE_DP] = {
- [NO_SSC] = &usb_phy_pcs_cmn_vals,
- },
- },
+ .phy_pma_cmn_vals_tbl = {
+ .entries = j721e_phy_pma_cmn_vals_entries,
+ .num_entries = ARRAY_SIZE(j721e_phy_pma_cmn_vals_entries),
},
- .cmn_vals = {
- [CLK_19_2_MHZ] = {
- [TYPE_DP] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_dp_19_2_no_ssc_cmn_vals,
- },
- },
- },
- [CLK_25_MHZ] = {
- [TYPE_DP] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_dp_25_no_ssc_cmn_vals,
- },
- },
- },
- [CLK_100_MHZ] = {
- [TYPE_DP] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_dp_100_no_ssc_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &dp_100_no_ssc_cmn_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &sl_dp_100_no_ssc_cmn_vals,
- },
- },
- [TYPE_PCIE] = {
- [TYPE_NONE] = {
- [NO_SSC] = NULL,
- [EXTERNAL_SSC] = NULL,
- [INTERNAL_SSC] = &sl_pcie_100_int_ssc_cmn_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
- },
- [TYPE_DP] = {
- [NO_SSC] = NULL,
- },
- },
- [TYPE_SGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_sgmii_100_no_ssc_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &sgmii_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &sgmii_100_int_ssc_cmn_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &sgmii_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals,
- },
- },
- [TYPE_QSGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_qsgmii_100_no_ssc_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &qsgmii_100_int_ssc_cmn_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals,
- },
- },
- [TYPE_USB] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &usb_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &usb_100_int_ssc_cmn_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &sl_usb_100_no_ssc_cmn_vals,
- [EXTERNAL_SSC] = &sl_usb_100_no_ssc_cmn_vals,
- [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals,
- },
- [TYPE_DP] = {
- [NO_SSC] = &usb_100_no_ssc_cmn_vals,
- },
- },
- },
+ .cmn_vals_tbl = {
+ .entries = cmn_vals_entries,
+ .num_entries = ARRAY_SIZE(cmn_vals_entries),
},
- .tx_ln_vals = {
- [CLK_19_2_MHZ] = {
- [TYPE_DP] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_dp_19_2_no_ssc_tx_ln_vals,
- },
- },
- },
- [CLK_25_MHZ] = {
- [TYPE_DP] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_dp_25_no_ssc_tx_ln_vals,
- },
- },
- },
- [CLK_100_MHZ] = {
- [TYPE_DP] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_dp_100_no_ssc_tx_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &dp_100_no_ssc_tx_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &dp_100_no_ssc_tx_ln_vals,
- },
- },
- [TYPE_PCIE] = {
- [TYPE_NONE] = {
- [NO_SSC] = NULL,
- [EXTERNAL_SSC] = NULL,
- [INTERNAL_SSC] = NULL,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = NULL,
- [EXTERNAL_SSC] = NULL,
- [INTERNAL_SSC] = NULL,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = NULL,
- [EXTERNAL_SSC] = NULL,
- [INTERNAL_SSC] = NULL,
- },
- [TYPE_USB] = {
- [NO_SSC] = NULL,
- [EXTERNAL_SSC] = NULL,
- [INTERNAL_SSC] = NULL,
- },
- [TYPE_DP] = {
- [NO_SSC] = NULL,
- },
- },
- [TYPE_SGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &ti_sgmii_100_no_ssc_tx_ln_vals,
- },
- },
- [TYPE_QSGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &ti_qsgmii_100_no_ssc_tx_ln_vals,
- },
- },
- [TYPE_USB] = {
- [TYPE_NONE] = {
- [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals,
- },
- [TYPE_DP] = {
- [NO_SSC] = &usb_100_no_ssc_tx_ln_vals,
- },
- },
- },
+ .tx_ln_vals_tbl = {
+ .entries = ti_tx_ln_vals_entries,
+ .num_entries = ARRAY_SIZE(ti_tx_ln_vals_entries),
},
- .rx_ln_vals = {
- [CLK_19_2_MHZ] = {
- [TYPE_DP] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_dp_19_2_no_ssc_rx_ln_vals,
- },
- },
- },
- [CLK_25_MHZ] = {
- [TYPE_DP] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_dp_25_no_ssc_rx_ln_vals,
- },
- },
- },
- [CLK_100_MHZ] = {
- [TYPE_DP] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sl_dp_100_no_ssc_rx_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &dp_100_no_ssc_rx_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &dp_100_no_ssc_rx_ln_vals,
- },
- },
- [TYPE_PCIE] = {
- [TYPE_NONE] = {
- [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- },
- [TYPE_DP] = {
- [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals,
- },
- },
- [TYPE_SGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals,
- },
- },
- [TYPE_QSGMII] = {
- [TYPE_NONE] = {
- [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
- },
- [TYPE_USB] = {
- [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals,
- },
- },
- [TYPE_USB] = {
- [TYPE_NONE] = {
- [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- },
- [TYPE_PCIE] = {
- [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- },
- [TYPE_SGMII] = {
- [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- },
- [TYPE_QSGMII] = {
- [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals,
- },
- [TYPE_DP] = {
- [NO_SSC] = &usb_100_no_ssc_rx_ln_vals,
- },
- },
- },
+ .rx_ln_vals_tbl = {
+ .entries = cdns_rx_ln_vals_entries,
+ .num_entries = ARRAY_SIZE(cdns_rx_ln_vals_entries),
},
};
diff --git a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
index d4c92498ad1e..b700f52b7b67 100644
--- a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
+++ b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
@@ -11,7 +11,7 @@
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx7-iomuxc-gpr.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/phy/freescale/phy-fsl-imx8mq-usb.c b/drivers/phy/freescale/phy-fsl-imx8mq-usb.c
index 88826ceb72f8..0b9a59d5b8f0 100644
--- a/drivers/phy/freescale/phy-fsl-imx8mq-usb.c
+++ b/drivers/phy/freescale/phy-fsl-imx8mq-usb.c
@@ -6,7 +6,7 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
@@ -394,7 +394,7 @@ static int imx8mq_usb_phy_probe(struct platform_device *pdev)
imx_phy->vbus = devm_regulator_get(dev, "vbus");
if (IS_ERR(imx_phy->vbus))
- return PTR_ERR(imx_phy->vbus);
+ return dev_err_probe(dev, PTR_ERR(imx_phy->vbus), "failed to get vbus\n");
phy_set_drvdata(imx_phy->phy, imx_phy);
diff --git a/drivers/phy/freescale/phy-fsl-lynx-28g.c b/drivers/phy/freescale/phy-fsl-lynx-28g.c
index 569f12af2aaf..4f036c77284e 100644
--- a/drivers/phy/freescale/phy-fsl-lynx-28g.c
+++ b/drivers/phy/freescale/phy-fsl-lynx-28g.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2021-2022 NXP. */
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/phy.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
diff --git a/drivers/phy/hisilicon/phy-hi3660-usb3.c b/drivers/phy/hisilicon/phy-hi3660-usb3.c
index 84adce9b4277..e2a09d67faed 100644
--- a/drivers/phy/hisilicon/phy-hi3660-usb3.c
+++ b/drivers/phy/hisilicon/phy-hi3660-usb3.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/phy/hisilicon/phy-hi3670-usb3.c b/drivers/phy/hisilicon/phy-hi3670-usb3.c
index b9ffe08abaab..40d3cf128b44 100644
--- a/drivers/phy/hisilicon/phy-hi3670-usb3.c
+++ b/drivers/phy/hisilicon/phy-hi3670-usb3.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/phy/hisilicon/phy-hi6220-usb.c b/drivers/phy/hisilicon/phy-hi6220-usb.c
index e92ba78da4c8..97bd363dfe87 100644
--- a/drivers/phy/hisilicon/phy-hi6220-usb.c
+++ b/drivers/phy/hisilicon/phy-hi6220-usb.c
@@ -5,6 +5,7 @@
*/
#include <linux/mfd/syscon.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
diff --git a/drivers/phy/hisilicon/phy-hisi-inno-usb2.c b/drivers/phy/hisilicon/phy-hisi-inno-usb2.c
index 6ae6d509dfdd..c138cd4807d6 100644
--- a/drivers/phy/hisilicon/phy-hisi-inno-usb2.c
+++ b/drivers/phy/hisilicon/phy-hisi-inno-usb2.c
@@ -9,8 +9,9 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
#include <linux/reset.h>
#define INNO_PHY_PORT_NUM 2
diff --git a/drivers/phy/hisilicon/phy-histb-combphy.c b/drivers/phy/hisilicon/phy-histb-combphy.c
index f1cb3e4d2add..c44588fd5a53 100644
--- a/drivers/phy/hisilicon/phy-histb-combphy.c
+++ b/drivers/phy/hisilicon/phy-histb-combphy.c
@@ -13,8 +13,9 @@
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <dt-bindings/phy/phy.h>
diff --git a/drivers/phy/hisilicon/phy-hix5hd2-sata.c b/drivers/phy/hisilicon/phy-hix5hd2-sata.c
index b0f99a9ac857..1b26ddb4c8a7 100644
--- a/drivers/phy/hisilicon/phy-hix5hd2-sata.c
+++ b/drivers/phy/hisilicon/phy-hix5hd2-sata.c
@@ -8,6 +8,7 @@
#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/phy/ingenic/phy-ingenic-usb.c b/drivers/phy/ingenic/phy-ingenic-usb.c
index 28c28d816484..eb2721f72a4c 100644
--- a/drivers/phy/ingenic/phy-ingenic-usb.c
+++ b/drivers/phy/ingenic/phy-ingenic-usb.c
@@ -11,6 +11,7 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c b/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c
index 29d246ea24b4..82f1ffc0b0ad 100644
--- a/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c
+++ b/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c
@@ -12,7 +12,6 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/property.h>
diff --git a/drivers/phy/marvell/phy-armada38x-comphy.c b/drivers/phy/marvell/phy-armada38x-comphy.c
index 0fe408964334..b7d99861526a 100644
--- a/drivers/phy/marvell/phy-armada38x-comphy.c
+++ b/drivers/phy/marvell/phy-armada38x-comphy.c
@@ -8,6 +8,7 @@
#include <linux/delay.h>
#include <linux/iopoll.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
diff --git a/drivers/phy/marvell/phy-berlin-sata.c b/drivers/phy/marvell/phy-berlin-sata.c
index d70ba9bc42d9..f972d78372ea 100644
--- a/drivers/phy/marvell/phy-berlin-sata.c
+++ b/drivers/phy/marvell/phy-berlin-sata.c
@@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/io.h>
#include <linux/platform_device.h>
diff --git a/drivers/phy/marvell/phy-mmp3-hsic.c b/drivers/phy/marvell/phy-mmp3-hsic.c
index f2537fdcc3ab..271f1a2258ef 100644
--- a/drivers/phy/marvell/phy-mmp3-hsic.c
+++ b/drivers/phy/marvell/phy-mmp3-hsic.c
@@ -5,6 +5,7 @@
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
diff --git a/drivers/phy/marvell/phy-mmp3-usb.c b/drivers/phy/marvell/phy-mmp3-usb.c
index 04c0bada3519..5b71deb08851 100644
--- a/drivers/phy/marvell/phy-mmp3-usb.c
+++ b/drivers/phy/marvell/phy-mmp3-usb.c
@@ -6,6 +6,7 @@
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
diff --git a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
index d641b345afa3..24c3371e2bb2 100644
--- a/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
+++ b/drivers/phy/marvell/phy-mvebu-a3700-comphy.c
@@ -19,6 +19,7 @@
#include <linux/iopoll.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/phy.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
diff --git a/drivers/phy/marvell/phy-mvebu-a3700-utmi.c b/drivers/phy/marvell/phy-mvebu-a3700-utmi.c
index 8834436bc9db..04f4fb4bed70 100644
--- a/drivers/phy/marvell/phy-mvebu-a3700-utmi.c
+++ b/drivers/phy/marvell/phy-mvebu-a3700-utmi.c
@@ -13,7 +13,7 @@
#include <linux/iopoll.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
index 34672e868a1e..b0dd13366598 100644
--- a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
+++ b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
@@ -11,6 +11,7 @@
#include <linux/iopoll.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/phy.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
@@ -1011,8 +1012,7 @@ static int mvebu_comphy_probe(struct platform_device *pdev)
"marvell,system-controller");
if (IS_ERR(priv->regmap))
return PTR_ERR(priv->regmap);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, res);
+ priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
diff --git a/drivers/phy/marvell/phy-mvebu-cp110-utmi.c b/drivers/phy/marvell/phy-mvebu-cp110-utmi.c
index aa27c7994610..4922a5f3327d 100644
--- a/drivers/phy/marvell/phy-mvebu-cp110-utmi.c
+++ b/drivers/phy/marvell/phy-mvebu-cp110-utmi.c
@@ -12,7 +12,7 @@
#include <linux/iopoll.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/phy/marvell/phy-mvebu-sata.c b/drivers/phy/marvell/phy-mvebu-sata.c
index 51a4646e2933..89a5a2b69d80 100644
--- a/drivers/phy/marvell/phy-mvebu-sata.c
+++ b/drivers/phy/marvell/phy-mvebu-sata.c
@@ -10,6 +10,7 @@
#include <linux/clk.h>
#include <linux/phy/phy.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
struct priv {
diff --git a/drivers/phy/marvell/phy-pxa-28nm-usb2.c b/drivers/phy/marvell/phy-pxa-28nm-usb2.c
index 1b2107f80f3a..64afb82cf70e 100644
--- a/drivers/phy/marvell/phy-pxa-28nm-usb2.c
+++ b/drivers/phy/marvell/phy-pxa-28nm-usb2.c
@@ -11,7 +11,6 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/err.h>
diff --git a/drivers/phy/marvell/phy-pxa-usb.c b/drivers/phy/marvell/phy-pxa-usb.c
index ffe889893ff4..6c98eb9608e9 100644
--- a/drivers/phy/marvell/phy-pxa-usb.c
+++ b/drivers/phy/marvell/phy-pxa-usb.c
@@ -296,7 +296,7 @@ static int pxa_usb_phy_probe(struct platform_device *pdev)
of_id = of_match_node(pxa_usb_phy_of_match, dev->of_node);
if (of_id)
- pxa_usb_phy->version = (enum pxa_usb_phy_version)of_id->data;
+ pxa_usb_phy->version = (uintptr_t)of_id->data;
else
pxa_usb_phy->version = PXA_USB_PHY_MMP2;
diff --git a/drivers/phy/mediatek/phy-mtk-hdmi.h b/drivers/phy/mediatek/phy-mtk-hdmi.h
index fc2ad6a0527f..71c02d043485 100644
--- a/drivers/phy/mediatek/phy-mtk-hdmi.h
+++ b/drivers/phy/mediatek/phy-mtk-hdmi.h
@@ -11,7 +11,6 @@
#include <linux/delay.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/types.h>
diff --git a/drivers/phy/mediatek/phy-mtk-mipi-dsi.c b/drivers/phy/mediatek/phy-mtk-mipi-dsi.c
index 526c05a4af5e..065ea626093a 100644
--- a/drivers/phy/mediatek/phy-mtk-mipi-dsi.c
+++ b/drivers/phy/mediatek/phy-mtk-mipi-dsi.c
@@ -36,7 +36,7 @@ static int mtk_mipi_tx_power_on(struct phy *phy)
int ret;
/* Power up core and enable PLL */
- ret = clk_prepare_enable(mipi_tx->pll);
+ ret = clk_prepare_enable(mipi_tx->pll_hw.clk);
if (ret < 0)
return ret;
@@ -53,7 +53,7 @@ static int mtk_mipi_tx_power_off(struct phy *phy)
mipi_tx->driver_data->mipi_tx_disable_signal(phy);
/* Disable PLL and power down core */
- clk_disable_unprepare(mipi_tx->pll);
+ clk_disable_unprepare(mipi_tx->pll_hw.clk);
return 0;
}
@@ -158,9 +158,9 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
clk_init.ops = mipi_tx->driver_data->mipi_tx_clk_ops;
mipi_tx->pll_hw.init = &clk_init;
- mipi_tx->pll = devm_clk_register(dev, &mipi_tx->pll_hw);
- if (IS_ERR(mipi_tx->pll))
- return dev_err_probe(dev, PTR_ERR(mipi_tx->pll), "Failed to register PLL\n");
+ ret = devm_clk_hw_register(dev, &mipi_tx->pll_hw);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register PLL\n");
phy = devm_phy_create(dev, NULL, &mtk_mipi_tx_ops);
if (IS_ERR(phy))
@@ -176,29 +176,19 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
mtk_mipi_tx_get_calibration_datal(mipi_tx);
- return of_clk_add_provider(dev->of_node, of_clk_src_simple_get,
- mipi_tx->pll);
-}
-
-static void mtk_mipi_tx_remove(struct platform_device *pdev)
-{
- of_clk_del_provider(pdev->dev.of_node);
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, &mipi_tx->pll_hw);
}
static const struct of_device_id mtk_mipi_tx_match[] = {
- { .compatible = "mediatek,mt2701-mipi-tx",
- .data = &mt2701_mipitx_data },
- { .compatible = "mediatek,mt8173-mipi-tx",
- .data = &mt8173_mipitx_data },
- { .compatible = "mediatek,mt8183-mipi-tx",
- .data = &mt8183_mipitx_data },
- { },
+ { .compatible = "mediatek,mt2701-mipi-tx", .data = &mt2701_mipitx_data },
+ { .compatible = "mediatek,mt8173-mipi-tx", .data = &mt8173_mipitx_data },
+ { .compatible = "mediatek,mt8183-mipi-tx", .data = &mt8183_mipitx_data },
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mtk_mipi_tx_match);
static struct platform_driver mtk_mipi_tx_driver = {
.probe = mtk_mipi_tx_probe,
- .remove_new = mtk_mipi_tx_remove,
.driver = {
.name = "mediatek-mipi-tx",
.of_match_table = mtk_mipi_tx_match,
diff --git a/drivers/phy/mediatek/phy-mtk-mipi-dsi.h b/drivers/phy/mediatek/phy-mtk-mipi-dsi.h
index 47b60b1a7226..5d4876f1dc95 100644
--- a/drivers/phy/mediatek/phy-mtk-mipi-dsi.h
+++ b/drivers/phy/mediatek/phy-mtk-mipi-dsi.h
@@ -12,7 +12,6 @@
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
#include <linux/slab.h>
@@ -32,7 +31,6 @@ struct mtk_mipi_tx {
u32 rt_code[5];
const struct mtk_mipitx_data *driver_data;
struct clk_hw pll_hw;
- struct clk *pll;
};
struct mtk_mipi_tx *mtk_mipi_tx_from_clk_hw(struct clk_hw *hw);
diff --git a/drivers/phy/mediatek/phy-mtk-pcie.c b/drivers/phy/mediatek/phy-mtk-pcie.c
index 25dbd6e35722..a2f69d6c72f0 100644
--- a/drivers/phy/mediatek/phy-mtk-pcie.c
+++ b/drivers/phy/mediatek/phy-mtk-pcie.c
@@ -7,7 +7,7 @@
#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c
index 0d110e50bbfd..05eab9014132 100644
--- a/drivers/phy/mediatek/phy-mtk-tphy.c
+++ b/drivers/phy/mediatek/phy-mtk-tphy.c
@@ -13,8 +13,8 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
+#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/phy/mediatek/phy-mtk-ufs.c b/drivers/phy/mediatek/phy-mtk-ufs.c
index fc19e0fa8ed5..0cb5a25b1b7a 100644
--- a/drivers/phy/mediatek/phy-mtk-ufs.c
+++ b/drivers/phy/mediatek/phy-mtk-ufs.c
@@ -7,6 +7,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
diff --git a/drivers/phy/phy-can-transceiver.c b/drivers/phy/phy-can-transceiver.c
index 5487b9dd1ead..840b7f8a31c5 100644
--- a/drivers/phy/phy-can-transceiver.c
+++ b/drivers/phy/phy-can-transceiver.c
@@ -5,6 +5,7 @@
* Copyright (C) 2021 Texas Instruments Incorporated - https://www.ti.com
*
*/
+#include <linux/of.h>
#include<linux/phy/phy.h>
#include<linux/platform_device.h>
#include<linux/module.h>
diff --git a/drivers/phy/phy-xgene.c b/drivers/phy/phy-xgene.c
index d0f4546648f0..1f0f908323f0 100644
--- a/drivers/phy/phy-xgene.c
+++ b/drivers/phy/phy-xgene.c
@@ -39,6 +39,7 @@
* Currently, this driver only supports Gen3 SATA mode with external clock.
*/
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/delay.h>
diff --git a/drivers/phy/qualcomm/Kconfig b/drivers/phy/qualcomm/Kconfig
index 97ca5952e34e..d891058b7c39 100644
--- a/drivers/phy/qualcomm/Kconfig
+++ b/drivers/phy/qualcomm/Kconfig
@@ -102,6 +102,16 @@ config PHY_QCOM_QMP_USB
Enable this to support the QMP USB PHY transceiver that is used
with USB3 controllers on Qualcomm chips.
+config PHY_QCOM_QMP_USB_LEGACY
+ tristate "Qualcomm QMP legacy USB PHY Driver"
+ select GENERIC_PHY
+ default n
+ help
+ Enable this legacy driver to support the QMP USB+DisplayPort Combo
+ PHY transceivers working only in USB3 mode on Qualcomm chips. This
+ driver exists only for compatibility with older device trees,
+ existing users have been migrated to PHY_QCOM_QMP_COMBO driver.
+
endif # PHY_QCOM_QMP
config PHY_QCOM_QUSB2
@@ -133,6 +143,17 @@ config PHY_QCOM_EUSB2_REPEATER
PMICs. The repeater is paired with a Synopsys eUSB2 Phy
on Qualcomm SOCs.
+config PHY_QCOM_M31_USB
+ tristate "Qualcomm M31 HS PHY driver support"
+ depends on USB && (ARCH_QCOM || COMPILE_TEST)
+ select GENERIC_PHY
+ help
+ Enable this to support M31 HS PHY transceivers on Qualcomm chips
+ with DWC3 USB core. It handles PHY initialization, clock
+ management required after resetting the hardware and power
+ management. This driver is required even for peripheral only or
+ host only mode configurations.
+
config PHY_QCOM_USB_HS
tristate "Qualcomm USB HS PHY module"
depends on USB_ULPI_BUS
diff --git a/drivers/phy/qualcomm/Makefile b/drivers/phy/qualcomm/Makefile
index b030858e0f8d..ffd609ac6233 100644
--- a/drivers/phy/qualcomm/Makefile
+++ b/drivers/phy/qualcomm/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_PHY_QCOM_APQ8064_SATA) += phy-qcom-apq8064-sata.o
obj-$(CONFIG_PHY_QCOM_EDP) += phy-qcom-edp.o
obj-$(CONFIG_PHY_QCOM_IPQ4019_USB) += phy-qcom-ipq4019-usb.o
obj-$(CONFIG_PHY_QCOM_IPQ806X_SATA) += phy-qcom-ipq806x-sata.o
+obj-$(CONFIG_PHY_QCOM_M31_USB) += phy-qcom-m31.o
obj-$(CONFIG_PHY_QCOM_PCIE2) += phy-qcom-pcie2.o
obj-$(CONFIG_PHY_QCOM_QMP_COMBO) += phy-qcom-qmp-combo.o
@@ -11,6 +12,7 @@ obj-$(CONFIG_PHY_QCOM_QMP_PCIE) += phy-qcom-qmp-pcie.o
obj-$(CONFIG_PHY_QCOM_QMP_PCIE_8996) += phy-qcom-qmp-pcie-msm8996.o
obj-$(CONFIG_PHY_QCOM_QMP_UFS) += phy-qcom-qmp-ufs.o
obj-$(CONFIG_PHY_QCOM_QMP_USB) += phy-qcom-qmp-usb.o
+obj-$(CONFIG_PHY_QCOM_QMP_USB_LEGACY) += phy-qcom-qmp-usb-legacy.o
obj-$(CONFIG_PHY_QCOM_QUSB2) += phy-qcom-qusb2.o
obj-$(CONFIG_PHY_QCOM_SNPS_EUSB2) += phy-qcom-snps-eusb2.o
diff --git a/drivers/phy/qualcomm/phy-ath79-usb.c b/drivers/phy/qualcomm/phy-ath79-usb.c
index 09a77e556ece..f8d0199c6e78 100644
--- a/drivers/phy/qualcomm/phy-ath79-usb.c
+++ b/drivers/phy/qualcomm/phy-ath79-usb.c
@@ -5,6 +5,7 @@
* Copyright (C) 2015-2018 Alban Bedel <albeu@free.fr>
*/
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
diff --git a/drivers/phy/qualcomm/phy-qcom-edp.c b/drivers/phy/qualcomm/phy-qcom-edp.c
index 5c4305df7d53..8e5078304646 100644
--- a/drivers/phy/qualcomm/phy-qcom-edp.c
+++ b/drivers/phy/qualcomm/phy-qcom-edp.c
@@ -13,8 +13,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_address.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
index 90f8543ba265..52c275fbb2a1 100644
--- a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
+++ b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
@@ -8,7 +8,6 @@
#include <linux/regulator/consumer.h>
#include <linux/regmap.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/phy/phy.h>
/* eUSB2 status registers */
diff --git a/drivers/phy/qualcomm/phy-qcom-ipq4019-usb.c b/drivers/phy/qualcomm/phy-qcom-ipq4019-usb.c
index d3e7d5e1d1b6..da6f290af722 100644
--- a/drivers/phy/qualcomm/phy-qcom-ipq4019-usb.c
+++ b/drivers/phy/qualcomm/phy-qcom-ipq4019-usb.c
@@ -13,8 +13,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of_platform.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
diff --git a/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c b/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c
index 7bacc527fbad..06392ed7c91b 100644
--- a/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c
+++ b/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c
@@ -4,7 +4,7 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
diff --git a/drivers/phy/qualcomm/phy-qcom-m31.c b/drivers/phy/qualcomm/phy-qcom-m31.c
new file mode 100644
index 000000000000..ed08072ca032
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-m31.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2014-2023, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#define USB2PHY_PORT_UTMI_CTRL1 0x40
+
+#define USB2PHY_PORT_UTMI_CTRL2 0x44
+ #define UTMI_ULPI_SEL BIT(7)
+ #define UTMI_TEST_MUX_SEL BIT(6)
+
+#define HS_PHY_CTRL_REG 0x10
+ #define UTMI_OTG_VBUS_VALID BIT(20)
+ #define SW_SESSVLD_SEL BIT(28)
+
+#define USB_PHY_UTMI_CTRL0 0x3c
+
+#define USB_PHY_UTMI_CTRL5 0x50
+ #define POR_EN BIT(1)
+
+#define USB_PHY_HS_PHY_CTRL_COMMON0 0x54
+ #define COMMONONN BIT(7)
+ #define FSEL BIT(4)
+ #define RETENABLEN BIT(3)
+ #define FREQ_24MHZ (BIT(6) | BIT(4))
+
+#define USB_PHY_HS_PHY_CTRL2 0x64
+ #define USB2_SUSPEND_N_SEL BIT(3)
+ #define USB2_SUSPEND_N BIT(2)
+ #define USB2_UTMI_CLK_EN BIT(1)
+
+#define USB_PHY_CFG0 0x94
+ #define UTMI_PHY_OVERRIDE_EN BIT(1)
+
+#define USB_PHY_REFCLK_CTRL 0xa0
+ #define CLKCORE BIT(1)
+
+#define USB2PHY_PORT_POWERDOWN 0xa4
+ #define POWER_UP BIT(0)
+ #define POWER_DOWN 0
+
+#define USB_PHY_FSEL_SEL 0xb8
+ #define FREQ_SEL BIT(0)
+
+#define USB2PHY_USB_PHY_M31_XCFGI_1 0xbc
+ #define USB2_0_TX_ENABLE BIT(2)
+
+#define USB2PHY_USB_PHY_M31_XCFGI_4 0xc8
+ #define HSTX_SLEW_RATE_565PS GENMASK(1, 0)
+ #define PLL_CHARGING_PUMP_CURRENT_35UA GENMASK(4, 3)
+ #define ODT_VALUE_38_02_OHM GENMASK(7, 6)
+
+#define USB2PHY_USB_PHY_M31_XCFGI_5 0xcc
+ #define ODT_VALUE_45_02_OHM BIT(2)
+ #define HSTX_PRE_EMPHASIS_LEVEL_0_55MA BIT(0)
+
+#define USB2PHY_USB_PHY_M31_XCFGI_11 0xe4
+ #define XCFG_COARSE_TUNE_NUM BIT(1)
+ #define XCFG_FINE_TUNE_NUM BIT(3)
+
+struct m31_phy_regs {
+ u32 off;
+ u32 val;
+ u32 delay;
+};
+
+struct m31_priv_data {
+ bool ulpi_mode;
+ const struct m31_phy_regs *regs;
+ unsigned int nregs;
+};
+
+struct m31_phy_regs m31_ipq5332_regs[] = {
+ {
+ USB_PHY_CFG0,
+ UTMI_PHY_OVERRIDE_EN,
+ 0
+ },
+ {
+ USB_PHY_UTMI_CTRL5,
+ POR_EN,
+ 15
+ },
+ {
+ USB_PHY_FSEL_SEL,
+ FREQ_SEL,
+ 0
+ },
+ {
+ USB_PHY_HS_PHY_CTRL_COMMON0,
+ COMMONONN | FREQ_24MHZ | RETENABLEN,
+ 0
+ },
+ {
+ USB_PHY_UTMI_CTRL5,
+ POR_EN,
+ 0
+ },
+ {
+ USB_PHY_HS_PHY_CTRL2,
+ USB2_SUSPEND_N_SEL | USB2_SUSPEND_N | USB2_UTMI_CLK_EN,
+ 0
+ },
+ {
+ USB2PHY_USB_PHY_M31_XCFGI_11,
+ XCFG_COARSE_TUNE_NUM | XCFG_FINE_TUNE_NUM,
+ 0
+ },
+ {
+ USB2PHY_USB_PHY_M31_XCFGI_4,
+ HSTX_SLEW_RATE_565PS | PLL_CHARGING_PUMP_CURRENT_35UA | ODT_VALUE_38_02_OHM,
+ 0
+ },
+ {
+ USB2PHY_USB_PHY_M31_XCFGI_1,
+ USB2_0_TX_ENABLE,
+ 0
+ },
+ {
+ USB2PHY_USB_PHY_M31_XCFGI_5,
+ ODT_VALUE_45_02_OHM | HSTX_PRE_EMPHASIS_LEVEL_0_55MA,
+ 4
+ },
+ {
+ USB_PHY_UTMI_CTRL5,
+ 0x0,
+ 0
+ },
+ {
+ USB_PHY_HS_PHY_CTRL2,
+ USB2_SUSPEND_N | USB2_UTMI_CLK_EN,
+ 0
+ },
+};
+
+struct m31usb_phy {
+ struct phy *phy;
+ void __iomem *base;
+ const struct m31_phy_regs *regs;
+ int nregs;
+
+ struct regulator *vreg;
+ struct clk *clk;
+ struct reset_control *reset;
+
+ bool ulpi_mode;
+};
+
+static int m31usb_phy_init(struct phy *phy)
+{
+ struct m31usb_phy *qphy = phy_get_drvdata(phy);
+ const struct m31_phy_regs *regs = qphy->regs;
+ int i, ret;
+
+ ret = regulator_enable(qphy->vreg);
+ if (ret) {
+ dev_err(&phy->dev, "failed to enable regulator, %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(qphy->clk);
+ if (ret) {
+ if (qphy->vreg)
+ regulator_disable(qphy->vreg);
+ dev_err(&phy->dev, "failed to enable cfg ahb clock, %d\n", ret);
+ return ret;
+ }
+
+ /* Perform phy reset */
+ reset_control_assert(qphy->reset);
+ udelay(5);
+ reset_control_deassert(qphy->reset);
+
+ /* configure for ULPI mode if requested */
+ if (qphy->ulpi_mode)
+ writel(0x0, qphy->base + USB2PHY_PORT_UTMI_CTRL2);
+
+ /* Enable the PHY */
+ writel(POWER_UP, qphy->base + USB2PHY_PORT_POWERDOWN);
+
+ /* Turn on phy ref clock */
+ for (i = 0; i < qphy->nregs; i++) {
+ writel(regs[i].val, qphy->base + regs[i].off);
+ if (regs[i].delay)
+ udelay(regs[i].delay);
+ }
+
+ return 0;
+}
+
+static int m31usb_phy_shutdown(struct phy *phy)
+{
+ struct m31usb_phy *qphy = phy_get_drvdata(phy);
+
+ /* Disable the PHY */
+ writel_relaxed(POWER_DOWN, qphy->base + USB2PHY_PORT_POWERDOWN);
+
+ clk_disable_unprepare(qphy->clk);
+
+ regulator_disable(qphy->vreg);
+
+ return 0;
+}
+
+static const struct phy_ops m31usb_phy_gen_ops = {
+ .power_on = m31usb_phy_init,
+ .power_off = m31usb_phy_shutdown,
+ .owner = THIS_MODULE,
+};
+
+static int m31usb_phy_probe(struct platform_device *pdev)
+{
+ struct phy_provider *phy_provider;
+ const struct m31_priv_data *data;
+ struct device *dev = &pdev->dev;
+ struct m31usb_phy *qphy;
+
+ qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL);
+ if (!qphy)
+ return -ENOMEM;
+
+ qphy->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(qphy->base))
+ return PTR_ERR(qphy->base);
+
+ qphy->reset = devm_reset_control_get_exclusive_by_index(dev, 0);
+ if (IS_ERR(qphy->reset))
+ return PTR_ERR(qphy->reset);
+
+ qphy->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(qphy->clk))
+ return dev_err_probe(dev, PTR_ERR(qphy->clk),
+ "failed to get clk\n");
+
+ data = of_device_get_match_data(dev);
+ qphy->regs = data->regs;
+ qphy->nregs = data->nregs;
+ qphy->ulpi_mode = data->ulpi_mode;
+
+ qphy->phy = devm_phy_create(dev, NULL, &m31usb_phy_gen_ops);
+ if (IS_ERR(qphy->phy))
+ return dev_err_probe(dev, PTR_ERR(qphy->phy),
+ "failed to create phy\n");
+
+ qphy->vreg = devm_regulator_get(dev, "vdda-phy");
+ if (IS_ERR(qphy->vreg))
+ return dev_err_probe(dev, PTR_ERR(qphy->phy),
+ "failed to get vreg\n");
+
+ phy_set_drvdata(qphy->phy, qphy);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (!IS_ERR(phy_provider))
+ dev_info(dev, "Registered M31 USB phy\n");
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct m31_priv_data m31_ipq5332_data = {
+ .ulpi_mode = false,
+ .regs = m31_ipq5332_regs,
+ .nregs = ARRAY_SIZE(m31_ipq5332_regs),
+};
+
+static const struct of_device_id m31usb_phy_id_table[] = {
+ { .compatible = "qcom,ipq5332-usb-hsphy", .data = &m31_ipq5332_data },
+ { },
+};
+MODULE_DEVICE_TABLE(of, m31usb_phy_id_table);
+
+static struct platform_driver m31usb_phy_driver = {
+ .probe = m31usb_phy_probe,
+ .driver = {
+ .name = "qcom-m31usb-phy",
+ .of_match_table = m31usb_phy_id_table,
+ },
+};
+
+module_platform_driver(m31usb_phy_driver);
+
+MODULE_DESCRIPTION("USB2 Qualcomm M31 HSPHY driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
index bebce8c591a3..cbb28afce135 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
@@ -12,7 +12,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_address.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
@@ -106,6 +105,20 @@ enum qphy_reg_layout {
QPHY_PCS_AUTONOMOUS_MODE_CTRL,
QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR,
QPHY_PCS_POWER_DOWN_CONTROL,
+
+ QPHY_COM_RESETSM_CNTRL,
+ QPHY_COM_C_READY_STATUS,
+ QPHY_COM_CMN_STATUS,
+ QPHY_COM_BIAS_EN_CLKBUFLR_EN,
+
+ QPHY_DP_PHY_STATUS,
+
+ QPHY_TX_TX_POL_INV,
+ QPHY_TX_TX_DRV_LVL,
+ QPHY_TX_TX_EMP_POST1_LVL,
+ QPHY_TX_HIGHZ_DRVR_EN,
+ QPHY_TX_TRANSCEIVER_BIAS_EN,
+
/* Keep last to ensure regs_layout arrays are properly initialized */
QPHY_LAYOUT_SIZE
};
@@ -117,9 +130,22 @@ static const unsigned int qmp_v3_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V3_PCS_POWER_DOWN_CONTROL,
[QPHY_PCS_AUTONOMOUS_MODE_CTRL] = QPHY_V3_PCS_AUTONOMOUS_MODE_CTRL,
[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V3_PCS_LFPS_RXTERM_IRQ_CLEAR,
+
+ [QPHY_COM_RESETSM_CNTRL] = QSERDES_V3_COM_RESETSM_CNTRL,
+ [QPHY_COM_C_READY_STATUS] = QSERDES_V3_COM_C_READY_STATUS,
+ [QPHY_COM_CMN_STATUS] = QSERDES_V3_COM_CMN_STATUS,
+ [QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN,
+
+ [QPHY_DP_PHY_STATUS] = QSERDES_V3_DP_PHY_STATUS,
+
+ [QPHY_TX_TX_POL_INV] = QSERDES_V3_TX_TX_POL_INV,
+ [QPHY_TX_TX_DRV_LVL] = QSERDES_V3_TX_TX_DRV_LVL,
+ [QPHY_TX_TX_EMP_POST1_LVL] = QSERDES_V3_TX_TX_EMP_POST1_LVL,
+ [QPHY_TX_HIGHZ_DRVR_EN] = QSERDES_V3_TX_HIGHZ_DRVR_EN,
+ [QPHY_TX_TRANSCEIVER_BIAS_EN] = QSERDES_V3_TX_TRANSCEIVER_BIAS_EN,
};
-static const unsigned int qmp_v4_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+static const unsigned int qmp_v45_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_SW_RESET] = QPHY_V4_PCS_SW_RESET,
[QPHY_START_CTRL] = QPHY_V4_PCS_START_CONTROL,
[QPHY_PCS_STATUS] = QPHY_V4_PCS_PCS_STATUS1,
@@ -128,6 +154,67 @@ static const unsigned int qmp_v4_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
/* In PCS_USB */
[QPHY_PCS_AUTONOMOUS_MODE_CTRL] = QPHY_V4_PCS_USB3_AUTONOMOUS_MODE_CTRL,
[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V4_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR,
+
+ [QPHY_COM_RESETSM_CNTRL] = QSERDES_V4_COM_RESETSM_CNTRL,
+ [QPHY_COM_C_READY_STATUS] = QSERDES_V4_COM_C_READY_STATUS,
+ [QPHY_COM_CMN_STATUS] = QSERDES_V4_COM_CMN_STATUS,
+ [QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V4_COM_BIAS_EN_CLKBUFLR_EN,
+
+ [QPHY_DP_PHY_STATUS] = QSERDES_V4_DP_PHY_STATUS,
+
+ [QPHY_TX_TX_POL_INV] = QSERDES_V4_TX_TX_POL_INV,
+ [QPHY_TX_TX_DRV_LVL] = QSERDES_V4_TX_TX_DRV_LVL,
+ [QPHY_TX_TX_EMP_POST1_LVL] = QSERDES_V4_TX_TX_EMP_POST1_LVL,
+ [QPHY_TX_HIGHZ_DRVR_EN] = QSERDES_V4_TX_HIGHZ_DRVR_EN,
+ [QPHY_TX_TRANSCEIVER_BIAS_EN] = QSERDES_V4_TX_TRANSCEIVER_BIAS_EN,
+};
+
+static const unsigned int qmp_v5_5nm_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = QPHY_V5_PCS_SW_RESET,
+ [QPHY_START_CTRL] = QPHY_V5_PCS_START_CONTROL,
+ [QPHY_PCS_STATUS] = QPHY_V5_PCS_PCS_STATUS1,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V5_PCS_POWER_DOWN_CONTROL,
+
+ /* In PCS_USB */
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = QPHY_V5_PCS_USB3_AUTONOMOUS_MODE_CTRL,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V5_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR,
+
+ [QPHY_COM_RESETSM_CNTRL] = QSERDES_V5_COM_RESETSM_CNTRL,
+ [QPHY_COM_C_READY_STATUS] = QSERDES_V5_COM_C_READY_STATUS,
+ [QPHY_COM_CMN_STATUS] = QSERDES_V5_COM_CMN_STATUS,
+ [QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V5_COM_BIAS_EN_CLKBUFLR_EN,
+
+ [QPHY_DP_PHY_STATUS] = QSERDES_V5_DP_PHY_STATUS,
+
+ [QPHY_TX_TX_POL_INV] = QSERDES_V5_5NM_TX_TX_POL_INV,
+ [QPHY_TX_TX_DRV_LVL] = QSERDES_V5_5NM_TX_TX_DRV_LVL,
+ [QPHY_TX_TX_EMP_POST1_LVL] = QSERDES_V5_5NM_TX_TX_EMP_POST1_LVL,
+ [QPHY_TX_HIGHZ_DRVR_EN] = QSERDES_V5_5NM_TX_HIGHZ_DRVR_EN,
+ [QPHY_TX_TRANSCEIVER_BIAS_EN] = QSERDES_V5_5NM_TX_TRANSCEIVER_BIAS_EN,
+};
+
+static const unsigned int qmp_v6_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = QPHY_V5_PCS_SW_RESET,
+ [QPHY_START_CTRL] = QPHY_V5_PCS_START_CONTROL,
+ [QPHY_PCS_STATUS] = QPHY_V5_PCS_PCS_STATUS1,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V5_PCS_POWER_DOWN_CONTROL,
+
+ /* In PCS_USB */
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = QPHY_V5_PCS_USB3_AUTONOMOUS_MODE_CTRL,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V5_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR,
+
+ [QPHY_COM_RESETSM_CNTRL] = QSERDES_V6_COM_RESETSM_CNTRL,
+ [QPHY_COM_C_READY_STATUS] = QSERDES_V6_COM_C_READY_STATUS,
+ [QPHY_COM_CMN_STATUS] = QSERDES_V6_COM_CMN_STATUS,
+ [QPHY_COM_BIAS_EN_CLKBUFLR_EN] = QSERDES_V6_COM_PLL_BIAS_EN_CLK_BUFLR_EN,
+
+ [QPHY_DP_PHY_STATUS] = QSERDES_V6_DP_PHY_STATUS,
+
+ [QPHY_TX_TX_POL_INV] = QSERDES_V6_TX_TX_POL_INV,
+ [QPHY_TX_TX_DRV_LVL] = QSERDES_V6_TX_TX_DRV_LVL,
+ [QPHY_TX_TX_EMP_POST1_LVL] = QSERDES_V6_TX_TX_EMP_POST1_LVL,
+ [QPHY_TX_HIGHZ_DRVR_EN] = QSERDES_V6_TX_HIGHZ_DRVR_EN,
+ [QPHY_TX_TRANSCEIVER_BIAS_EN] = QSERDES_V6_TX_TRANSCEIVER_BIAS_EN,
};
static const struct qmp_phy_init_tbl qmp_v3_usb3_serdes_tbl[] = {
@@ -1271,9 +1358,6 @@ struct qmp_phy_cfg {
int (*calibrate_dp_phy)(struct qmp_combo *qmp);
void (*dp_aux_init)(struct qmp_combo *qmp);
- /* clock ids to be requested */
- const char * const *clk_list;
- int num_clks;
/* resets to be requested */
const char * const *reset_list;
int num_resets;
@@ -1315,6 +1399,7 @@ struct qmp_combo {
struct clk *pipe_clk;
struct clk_bulk_data *clks;
+ int num_clks;
struct reset_control_bulk_data *resets;
struct regulator_bulk_data *vregs;
@@ -1350,11 +1435,6 @@ static void qmp_v4_configure_dp_tx(struct qmp_combo *qmp);
static int qmp_v4_configure_dp_phy(struct qmp_combo *qmp);
static int qmp_v4_calibrate_dp_phy(struct qmp_combo *qmp);
-static int qmp_v5_configure_dp_phy(struct qmp_combo *qmp);
-
-static void qmp_v6_dp_aux_init(struct qmp_combo *qmp);
-static int qmp_v6_configure_dp_phy(struct qmp_combo *qmp);
-
static inline void qphy_setbits(void __iomem *base, u32 offset, u32 val)
{
u32 reg;
@@ -1380,19 +1460,10 @@ static inline void qphy_clrbits(void __iomem *base, u32 offset, u32 val)
}
/* list of clocks required by phy */
-static const char * const qmp_v3_phy_clk_l[] = {
+static const char * const qmp_combo_phy_clk_l[] = {
"aux", "cfg_ahb", "ref", "com_aux",
};
-static const char * const qmp_v4_phy_clk_l[] = {
- "aux", "ref", "com_aux",
-};
-
-/* the primary usb3 phy on sm8250 doesn't have a ref clock */
-static const char * const qmp_v4_sm8250_usbphy_clk_l[] = {
- "aux", "ref_clk_src", "com_aux"
-};
-
/* list of resets */
static const char * const msm8996_usb3phy_reset_l[] = {
"phy", "common",
@@ -1433,6 +1504,8 @@ static const struct qmp_combo_offsets qmp_combo_offsets_v5 = {
};
static const struct qmp_phy_cfg sc7180_usb3dpphy_cfg = {
+ .offsets = &qmp_combo_offsets_v3,
+
.serdes_tbl = qmp_v3_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_serdes_tbl),
.tx_tbl = qmp_v3_usb3_tx_tbl,
@@ -1466,8 +1539,6 @@ static const struct qmp_phy_cfg sc7180_usb3dpphy_cfg = {
.configure_dp_phy = qmp_v3_configure_dp_phy,
.calibrate_dp_phy = qmp_v3_calibrate_dp_phy,
- .clk_list = qmp_v3_phy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
.reset_list = sc7180_usb3phy_reset_l,
.num_resets = ARRAY_SIZE(sc7180_usb3phy_reset_l),
.vreg_list = qmp_phy_vreg_l,
@@ -1478,6 +1549,8 @@ static const struct qmp_phy_cfg sc7180_usb3dpphy_cfg = {
};
static const struct qmp_phy_cfg sdm845_usb3dpphy_cfg = {
+ .offsets = &qmp_combo_offsets_v3,
+
.serdes_tbl = qmp_v3_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_serdes_tbl),
.tx_tbl = qmp_v3_usb3_tx_tbl,
@@ -1511,8 +1584,6 @@ static const struct qmp_phy_cfg sdm845_usb3dpphy_cfg = {
.configure_dp_phy = qmp_v3_configure_dp_phy,
.calibrate_dp_phy = qmp_v3_calibrate_dp_phy,
- .clk_list = qmp_v3_phy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
.reset_list = msm8996_usb3phy_reset_l,
.num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
.vreg_list = qmp_phy_vreg_l,
@@ -1523,6 +1594,8 @@ static const struct qmp_phy_cfg sdm845_usb3dpphy_cfg = {
};
static const struct qmp_phy_cfg sc8180x_usb3dpphy_cfg = {
+ .offsets = &qmp_combo_offsets_v3,
+
.serdes_tbl = sm8150_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl),
.tx_tbl = sm8150_usb3_tx_tbl,
@@ -1558,13 +1631,11 @@ static const struct qmp_phy_cfg sc8180x_usb3dpphy_cfg = {
.configure_dp_phy = qmp_v4_configure_dp_phy,
.calibrate_dp_phy = qmp_v4_calibrate_dp_phy,
- .clk_list = qmp_v4_phy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
.reset_list = msm8996_usb3phy_reset_l,
.num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qmp_v4_usb3phy_regs_layout,
+ .regs = qmp_v45_usb3phy_regs_layout,
.pcs_usb_offset = 0x300,
.has_pwrdn_delay = true,
@@ -1603,16 +1674,14 @@ static const struct qmp_phy_cfg sc8280xp_usb43dpphy_cfg = {
.dp_aux_init = qmp_v4_dp_aux_init,
.configure_dp_tx = qmp_v4_configure_dp_tx,
- .configure_dp_phy = qmp_v5_configure_dp_phy,
+ .configure_dp_phy = qmp_v4_configure_dp_phy,
.calibrate_dp_phy = qmp_v4_calibrate_dp_phy,
- .clk_list = qmp_v4_phy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
.reset_list = msm8996_usb3phy_reset_l,
.num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qmp_v4_usb3phy_regs_layout,
+ .regs = qmp_v5_5nm_usb3phy_regs_layout,
};
static const struct qmp_phy_cfg sm6350_usb3dpphy_cfg = {
@@ -1651,8 +1720,6 @@ static const struct qmp_phy_cfg sm6350_usb3dpphy_cfg = {
.configure_dp_phy = qmp_v3_configure_dp_phy,
.calibrate_dp_phy = qmp_v3_calibrate_dp_phy,
- .clk_list = qmp_v4_phy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
.reset_list = msm8996_usb3phy_reset_l,
.num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
.vreg_list = qmp_phy_vreg_l,
@@ -1661,6 +1728,8 @@ static const struct qmp_phy_cfg sm6350_usb3dpphy_cfg = {
};
static const struct qmp_phy_cfg sm8250_usb3dpphy_cfg = {
+ .offsets = &qmp_combo_offsets_v3,
+
.serdes_tbl = sm8150_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl),
.tx_tbl = sm8250_usb3_tx_tbl,
@@ -1696,13 +1765,11 @@ static const struct qmp_phy_cfg sm8250_usb3dpphy_cfg = {
.configure_dp_phy = qmp_v4_configure_dp_phy,
.calibrate_dp_phy = qmp_v4_calibrate_dp_phy,
- .clk_list = qmp_v4_sm8250_usbphy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v4_sm8250_usbphy_clk_l),
.reset_list = msm8996_usb3phy_reset_l,
.num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qmp_v4_usb3phy_regs_layout,
+ .regs = qmp_v45_usb3phy_regs_layout,
.pcs_usb_offset = 0x300,
.has_pwrdn_delay = true,
@@ -1746,13 +1813,11 @@ static const struct qmp_phy_cfg sm8350_usb3dpphy_cfg = {
.configure_dp_phy = qmp_v4_configure_dp_phy,
.calibrate_dp_phy = qmp_v4_calibrate_dp_phy,
- .clk_list = qmp_v4_phy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
.reset_list = msm8996_usb3phy_reset_l,
.num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qmp_v4_usb3phy_regs_layout,
+ .regs = qmp_v45_usb3phy_regs_layout,
.has_pwrdn_delay = true,
};
@@ -1790,14 +1855,12 @@ static const struct qmp_phy_cfg sm8550_usb3dpphy_cfg = {
.swing_hbr3_hbr2 = &qmp_dp_v5_voltage_swing_hbr3_hbr2,
.pre_emphasis_hbr3_hbr2 = &qmp_dp_v5_pre_emphasis_hbr3_hbr2,
- .dp_aux_init = qmp_v6_dp_aux_init,
+ .dp_aux_init = qmp_v4_dp_aux_init,
.configure_dp_tx = qmp_v4_configure_dp_tx,
- .configure_dp_phy = qmp_v6_configure_dp_phy,
+ .configure_dp_phy = qmp_v4_configure_dp_phy,
.calibrate_dp_phy = qmp_v4_calibrate_dp_phy,
- .regs = qmp_v4_usb3phy_regs_layout,
- .clk_list = qmp_v4_phy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
+ .regs = qmp_v6_usb3phy_regs_layout,
.reset_list = msm8996_usb3phy_reset_l,
.num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
.vreg_list = qmp_phy_vreg_l,
@@ -1865,6 +1928,8 @@ static int qmp_combo_dp_serdes_init(struct qmp_combo *qmp)
static void qmp_v3_dp_aux_init(struct qmp_combo *qmp)
{
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+
writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN |
DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN,
qmp->dp_dp_phy + QSERDES_DP_PHY_PD_CTL);
@@ -1872,7 +1937,7 @@ static void qmp_v3_dp_aux_init(struct qmp_combo *qmp)
/* Turn on BIAS current for PHY/PLL */
writel(QSERDES_V3_COM_BIAS_EN | QSERDES_V3_COM_BIAS_EN_MUX |
QSERDES_V3_COM_CLKBUF_L_EN | QSERDES_V3_COM_EN_SYSCLK_TX_SEL,
- qmp->dp_serdes + QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN);
+ qmp->dp_serdes + cfg->regs[QPHY_COM_BIAS_EN_CLKBUFLR_EN]);
writel(DP_PHY_PD_CTL_PSR_PWRDN, qmp->dp_dp_phy + QSERDES_DP_PHY_PD_CTL);
@@ -1886,7 +1951,7 @@ static void qmp_v3_dp_aux_init(struct qmp_combo *qmp)
QSERDES_V3_COM_BIAS_EN_MUX | QSERDES_V3_COM_CLKBUF_R_EN |
QSERDES_V3_COM_CLKBUF_L_EN | QSERDES_V3_COM_EN_SYSCLK_TX_SEL |
QSERDES_V3_COM_CLKBUF_RX_DRIVE_L,
- qmp->dp_serdes + QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN);
+ qmp->dp_serdes + cfg->regs[QPHY_COM_BIAS_EN_CLKBUFLR_EN]);
writel(0x00, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG0);
writel(0x13, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG1);
@@ -1906,8 +1971,7 @@ static void qmp_v3_dp_aux_init(struct qmp_combo *qmp)
qmp->dp_dp_phy + QSERDES_V3_DP_PHY_AUX_INTERRUPT_MASK);
}
-static int qmp_combo_configure_dp_swing(struct qmp_combo *qmp,
- unsigned int drv_lvl_reg, unsigned int emp_post_reg)
+static int qmp_combo_configure_dp_swing(struct qmp_combo *qmp)
{
const struct phy_configure_opts_dp *dp_opts = &qmp->dp_opts;
const struct qmp_phy_cfg *cfg = qmp->cfg;
@@ -1936,10 +2000,10 @@ static int qmp_combo_configure_dp_swing(struct qmp_combo *qmp,
voltage_swing_cfg |= DP_PHY_TXn_TX_DRV_LVL_MUX_EN;
pre_emphasis_cfg |= DP_PHY_TXn_TX_EMP_POST1_LVL_MUX_EN;
- writel(voltage_swing_cfg, qmp->dp_tx + drv_lvl_reg);
- writel(pre_emphasis_cfg, qmp->dp_tx + emp_post_reg);
- writel(voltage_swing_cfg, qmp->dp_tx2 + drv_lvl_reg);
- writel(pre_emphasis_cfg, qmp->dp_tx2 + emp_post_reg);
+ writel(voltage_swing_cfg, qmp->dp_tx + cfg->regs[QPHY_TX_TX_DRV_LVL]);
+ writel(pre_emphasis_cfg, qmp->dp_tx + cfg->regs[QPHY_TX_TX_EMP_POST1_LVL]);
+ writel(voltage_swing_cfg, qmp->dp_tx2 + cfg->regs[QPHY_TX_TX_DRV_LVL]);
+ writel(pre_emphasis_cfg, qmp->dp_tx2 + cfg->regs[QPHY_TX_TX_EMP_POST1_LVL]);
return 0;
}
@@ -1949,8 +2013,7 @@ static void qmp_v3_configure_dp_tx(struct qmp_combo *qmp)
const struct phy_configure_opts_dp *dp_opts = &qmp->dp_opts;
u32 bias_en, drvr_en;
- if (qmp_combo_configure_dp_swing(qmp, QSERDES_V3_TX_TX_DRV_LVL,
- QSERDES_V3_TX_TX_EMP_POST1_LVL) < 0)
+ if (qmp_combo_configure_dp_swing(qmp) < 0)
return;
if (dp_opts->lanes == 1) {
@@ -1991,17 +2054,12 @@ static bool qmp_combo_configure_dp_mode(struct qmp_combo *qmp)
return reverse;
}
-static int qmp_v3_configure_dp_phy(struct qmp_combo *qmp)
+static int qmp_combo_configure_dp_clocks(struct qmp_combo *qmp)
{
const struct phy_configure_opts_dp *dp_opts = &qmp->dp_opts;
- u32 phy_vco_div, status;
+ u32 phy_vco_div;
unsigned long pixel_freq;
- qmp_combo_configure_dp_mode(qmp);
-
- writel(0x05, qmp->dp_dp_phy + QSERDES_V3_DP_PHY_TX0_TX1_LANE_CTL);
- writel(0x05, qmp->dp_dp_phy + QSERDES_V3_DP_PHY_TX2_TX3_LANE_CTL);
-
switch (dp_opts->link_rate) {
case 1620:
phy_vco_div = 0x1;
@@ -2023,20 +2081,38 @@ static int qmp_v3_configure_dp_phy(struct qmp_combo *qmp)
/* Other link rates aren't supported */
return -EINVAL;
}
- writel(phy_vco_div, qmp->dp_dp_phy + QSERDES_V3_DP_PHY_VCO_DIV);
+ writel(phy_vco_div, qmp->dp_dp_phy + QSERDES_V4_DP_PHY_VCO_DIV);
clk_set_rate(qmp->dp_link_hw.clk, dp_opts->link_rate * 100000);
clk_set_rate(qmp->dp_pixel_hw.clk, pixel_freq);
+ return 0;
+}
+
+static int qmp_v3_configure_dp_phy(struct qmp_combo *qmp)
+{
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ u32 status;
+ int ret;
+
+ qmp_combo_configure_dp_mode(qmp);
+
+ writel(0x05, qmp->dp_dp_phy + QSERDES_V3_DP_PHY_TX0_TX1_LANE_CTL);
+ writel(0x05, qmp->dp_dp_phy + QSERDES_V3_DP_PHY_TX2_TX3_LANE_CTL);
+
+ ret = qmp_combo_configure_dp_clocks(qmp);
+ if (ret)
+ return ret;
+
writel(0x04, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG2);
writel(0x01, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG);
writel(0x05, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG);
writel(0x01, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG);
writel(0x09, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG);
- writel(0x20, qmp->dp_serdes + QSERDES_V3_COM_RESETSM_CNTRL);
+ writel(0x20, qmp->dp_serdes + cfg->regs[QPHY_COM_RESETSM_CNTRL]);
- if (readl_poll_timeout(qmp->dp_serdes + QSERDES_V3_COM_C_READY_STATUS,
+ if (readl_poll_timeout(qmp->dp_serdes + cfg->regs[QPHY_COM_C_READY_STATUS],
status,
((status & BIT(0)) > 0),
500,
@@ -2045,7 +2121,7 @@ static int qmp_v3_configure_dp_phy(struct qmp_combo *qmp)
writel(0x19, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG);
- if (readl_poll_timeout(qmp->dp_dp_phy + QSERDES_V3_DP_PHY_STATUS,
+ if (readl_poll_timeout(qmp->dp_dp_phy + cfg->regs[QPHY_DP_PHY_STATUS],
status,
((status & BIT(1)) > 0),
500,
@@ -2056,7 +2132,7 @@ static int qmp_v3_configure_dp_phy(struct qmp_combo *qmp)
udelay(2000);
writel(0x19, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG);
- return readl_poll_timeout(qmp->dp_dp_phy + QSERDES_V3_DP_PHY_STATUS,
+ return readl_poll_timeout(qmp->dp_dp_phy + cfg->regs[QPHY_DP_PHY_STATUS],
status,
((status & BIT(1)) > 0),
500,
@@ -2083,39 +2159,14 @@ static int qmp_v3_calibrate_dp_phy(struct qmp_combo *qmp)
static void qmp_v4_dp_aux_init(struct qmp_combo *qmp)
{
- writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_PSR_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN |
- DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN,
- qmp->dp_dp_phy + QSERDES_DP_PHY_PD_CTL);
-
- /* Turn on BIAS current for PHY/PLL */
- writel(0x17, qmp->dp_serdes + QSERDES_V4_COM_BIAS_EN_CLKBUFLR_EN);
-
- writel(0x00, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG0);
- writel(0x13, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG1);
- writel(0xa4, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG2);
- writel(0x00, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG3);
- writel(0x0a, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG4);
- writel(0x26, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG5);
- writel(0x0a, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG6);
- writel(0x03, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG7);
- writel(0xb7, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG8);
- writel(0x03, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG9);
- qmp->dp_aux_cfg = 0;
-
- writel(PHY_AUX_STOP_ERR_MASK | PHY_AUX_DEC_ERR_MASK |
- PHY_AUX_SYNC_ERR_MASK | PHY_AUX_ALIGN_ERR_MASK |
- PHY_AUX_REQ_ERR_MASK,
- qmp->dp_dp_phy + QSERDES_V4_DP_PHY_AUX_INTERRUPT_MASK);
-}
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
-static void qmp_v6_dp_aux_init(struct qmp_combo *qmp)
-{
writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_PSR_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN |
DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN,
qmp->dp_dp_phy + QSERDES_DP_PHY_PD_CTL);
/* Turn on BIAS current for PHY/PLL */
- writel(0x17, qmp->dp_serdes + QSERDES_V6_COM_PLL_BIAS_EN_CLK_BUFLR_EN);
+ writel(0x17, qmp->dp_serdes + cfg->regs[QPHY_COM_BIAS_EN_CLKBUFLR_EN]);
writel(0x00, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG0);
writel(0x13, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG1);
@@ -2137,26 +2188,23 @@ static void qmp_v6_dp_aux_init(struct qmp_combo *qmp)
static void qmp_v4_configure_dp_tx(struct qmp_combo *qmp)
{
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+
/* Program default values before writing proper values */
- writel(0x27, qmp->dp_tx + QSERDES_V4_TX_TX_DRV_LVL);
- writel(0x27, qmp->dp_tx2 + QSERDES_V4_TX_TX_DRV_LVL);
+ writel(0x27, qmp->dp_tx + cfg->regs[QPHY_TX_TX_DRV_LVL]);
+ writel(0x27, qmp->dp_tx2 + cfg->regs[QPHY_TX_TX_DRV_LVL]);
- writel(0x20, qmp->dp_tx + QSERDES_V4_TX_TX_EMP_POST1_LVL);
- writel(0x20, qmp->dp_tx2 + QSERDES_V4_TX_TX_EMP_POST1_LVL);
+ writel(0x20, qmp->dp_tx + cfg->regs[QPHY_TX_TX_EMP_POST1_LVL]);
+ writel(0x20, qmp->dp_tx2 + cfg->regs[QPHY_TX_TX_EMP_POST1_LVL]);
- qmp_combo_configure_dp_swing(qmp, QSERDES_V4_TX_TX_DRV_LVL,
- QSERDES_V4_TX_TX_EMP_POST1_LVL);
+ qmp_combo_configure_dp_swing(qmp);
}
-static int qmp_v456_configure_dp_phy(struct qmp_combo *qmp,
- unsigned int com_resetm_ctrl_reg,
- unsigned int com_c_ready_status_reg,
- unsigned int com_cmn_status_reg,
- unsigned int dp_phy_status_reg)
+static int qmp_v456_configure_dp_phy(struct qmp_combo *qmp)
{
- const struct phy_configure_opts_dp *dp_opts = &qmp->dp_opts;
- u32 phy_vco_div, status;
- unsigned long pixel_freq;
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ u32 status;
+ int ret;
writel(0x0f, qmp->dp_dp_phy + QSERDES_V4_DP_PHY_CFG_1);
@@ -2168,54 +2216,32 @@ static int qmp_v456_configure_dp_phy(struct qmp_combo *qmp,
writel(0x05, qmp->dp_dp_phy + QSERDES_V4_DP_PHY_TX0_TX1_LANE_CTL);
writel(0x05, qmp->dp_dp_phy + QSERDES_V4_DP_PHY_TX2_TX3_LANE_CTL);
- switch (dp_opts->link_rate) {
- case 1620:
- phy_vco_div = 0x1;
- pixel_freq = 1620000000UL / 2;
- break;
- case 2700:
- phy_vco_div = 0x1;
- pixel_freq = 2700000000UL / 2;
- break;
- case 5400:
- phy_vco_div = 0x2;
- pixel_freq = 5400000000UL / 4;
- break;
- case 8100:
- phy_vco_div = 0x0;
- pixel_freq = 8100000000UL / 6;
- break;
- default:
- /* Other link rates aren't supported */
- return -EINVAL;
- }
- writel(phy_vco_div, qmp->dp_dp_phy + QSERDES_V4_DP_PHY_VCO_DIV);
-
- clk_set_rate(qmp->dp_link_hw.clk, dp_opts->link_rate * 100000);
- clk_set_rate(qmp->dp_pixel_hw.clk, pixel_freq);
+ ret = qmp_combo_configure_dp_clocks(qmp);
+ if (ret)
+ return ret;
writel(0x01, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG);
writel(0x05, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG);
writel(0x01, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG);
writel(0x09, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG);
- writel(0x20, qmp->dp_serdes + com_resetm_ctrl_reg);
+ writel(0x20, qmp->dp_serdes + cfg->regs[QPHY_COM_RESETSM_CNTRL]);
- if (readl_poll_timeout(qmp->dp_serdes + com_c_ready_status_reg,
+ if (readl_poll_timeout(qmp->dp_serdes + cfg->regs[QPHY_COM_C_READY_STATUS],
status,
((status & BIT(0)) > 0),
500,
10000))
return -ETIMEDOUT;
- if (readl_poll_timeout(qmp->dp_serdes + com_cmn_status_reg,
+ if (readl_poll_timeout(qmp->dp_serdes + cfg->regs[QPHY_COM_CMN_STATUS],
status,
((status & BIT(0)) > 0),
500,
10000))
return -ETIMEDOUT;
- if (readl_poll_timeout(qmp->dp_serdes + com_cmn_status_reg,
+ if (readl_poll_timeout(qmp->dp_serdes + cfg->regs[QPHY_COM_CMN_STATUS],
status,
((status & BIT(1)) > 0),
500,
@@ -2224,14 +2250,14 @@ static int qmp_v456_configure_dp_phy(struct qmp_combo *qmp,
writel(0x19, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG);
- if (readl_poll_timeout(qmp->dp_dp_phy + dp_phy_status_reg,
+ if (readl_poll_timeout(qmp->dp_dp_phy + cfg->regs[QPHY_DP_PHY_STATUS],
status,
((status & BIT(0)) > 0),
500,
10000))
return -ETIMEDOUT;
- if (readl_poll_timeout(qmp->dp_dp_phy + dp_phy_status_reg,
+ if (readl_poll_timeout(qmp->dp_dp_phy + cfg->regs[QPHY_DP_PHY_STATUS],
status,
((status & BIT(1)) > 0),
500,
@@ -2243,16 +2269,14 @@ static int qmp_v456_configure_dp_phy(struct qmp_combo *qmp,
static int qmp_v4_configure_dp_phy(struct qmp_combo *qmp)
{
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
bool reverse = (qmp->orientation == TYPEC_ORIENTATION_REVERSE);
const struct phy_configure_opts_dp *dp_opts = &qmp->dp_opts;
u32 bias0_en, drvr0_en, bias1_en, drvr1_en;
u32 status;
int ret;
- ret = qmp_v456_configure_dp_phy(qmp, QSERDES_V4_COM_RESETSM_CNTRL,
- QSERDES_V4_COM_C_READY_STATUS,
- QSERDES_V4_COM_CMN_STATUS,
- QSERDES_V4_DP_PHY_STATUS);
+ ret = qmp_v456_configure_dp_phy(qmp);
if (ret < 0)
return ret;
@@ -2278,150 +2302,32 @@ static int qmp_v4_configure_dp_phy(struct qmp_combo *qmp)
drvr1_en = 0x10;
}
- writel(drvr0_en, qmp->dp_tx + QSERDES_V4_TX_HIGHZ_DRVR_EN);
- writel(bias0_en, qmp->dp_tx + QSERDES_V4_TX_TRANSCEIVER_BIAS_EN);
- writel(drvr1_en, qmp->dp_tx2 + QSERDES_V4_TX_HIGHZ_DRVR_EN);
- writel(bias1_en, qmp->dp_tx2 + QSERDES_V4_TX_TRANSCEIVER_BIAS_EN);
-
- writel(0x18, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG);
- udelay(2000);
- writel(0x19, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG);
-
- if (readl_poll_timeout(qmp->dp_dp_phy + QSERDES_V4_DP_PHY_STATUS,
- status,
- ((status & BIT(1)) > 0),
- 500,
- 10000))
- return -ETIMEDOUT;
-
- writel(0x0a, qmp->dp_tx + QSERDES_V4_TX_TX_POL_INV);
- writel(0x0a, qmp->dp_tx2 + QSERDES_V4_TX_TX_POL_INV);
-
- writel(0x27, qmp->dp_tx + QSERDES_V4_TX_TX_DRV_LVL);
- writel(0x27, qmp->dp_tx2 + QSERDES_V4_TX_TX_DRV_LVL);
-
- writel(0x20, qmp->dp_tx + QSERDES_V4_TX_TX_EMP_POST1_LVL);
- writel(0x20, qmp->dp_tx2 + QSERDES_V4_TX_TX_EMP_POST1_LVL);
-
- return 0;
-}
-
-static int qmp_v5_configure_dp_phy(struct qmp_combo *qmp)
-{
- bool reverse = (qmp->orientation == TYPEC_ORIENTATION_REVERSE);
- const struct phy_configure_opts_dp *dp_opts = &qmp->dp_opts;
- u32 bias0_en, drvr0_en, bias1_en, drvr1_en;
- u32 status;
- int ret;
-
- ret = qmp_v456_configure_dp_phy(qmp, QSERDES_V4_COM_RESETSM_CNTRL,
- QSERDES_V4_COM_C_READY_STATUS,
- QSERDES_V4_COM_CMN_STATUS,
- QSERDES_V4_DP_PHY_STATUS);
- if (ret < 0)
- return ret;
-
- if (dp_opts->lanes == 1) {
- bias0_en = reverse ? 0x3e : 0x1a;
- drvr0_en = reverse ? 0x13 : 0x10;
- bias1_en = reverse ? 0x15 : 0x3e;
- drvr1_en = reverse ? 0x10 : 0x13;
- } else if (dp_opts->lanes == 2) {
- bias0_en = reverse ? 0x3f : 0x15;
- drvr0_en = 0x10;
- bias1_en = reverse ? 0x15 : 0x3f;
- drvr1_en = 0x10;
- } else {
- bias0_en = 0x3f;
- bias1_en = 0x3f;
- drvr0_en = 0x10;
- drvr1_en = 0x10;
- }
-
- writel(drvr0_en, qmp->dp_tx + QSERDES_V5_5NM_TX_HIGHZ_DRVR_EN);
- writel(bias0_en, qmp->dp_tx + QSERDES_V5_5NM_TX_TRANSCEIVER_BIAS_EN);
- writel(drvr1_en, qmp->dp_tx2 + QSERDES_V5_5NM_TX_HIGHZ_DRVR_EN);
- writel(bias1_en, qmp->dp_tx2 + QSERDES_V5_5NM_TX_TRANSCEIVER_BIAS_EN);
+ writel(drvr0_en, qmp->dp_tx + cfg->regs[QPHY_TX_HIGHZ_DRVR_EN]);
+ writel(bias0_en, qmp->dp_tx + cfg->regs[QPHY_TX_TRANSCEIVER_BIAS_EN]);
+ writel(drvr1_en, qmp->dp_tx2 + cfg->regs[QPHY_TX_HIGHZ_DRVR_EN]);
+ writel(bias1_en, qmp->dp_tx2 + cfg->regs[QPHY_TX_TRANSCEIVER_BIAS_EN]);
writel(0x18, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG);
udelay(2000);
writel(0x19, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG);
- if (readl_poll_timeout(qmp->dp_dp_phy + QSERDES_V4_DP_PHY_STATUS,
+ if (readl_poll_timeout(qmp->dp_dp_phy + cfg->regs[QPHY_DP_PHY_STATUS],
status,
((status & BIT(1)) > 0),
500,
10000))
return -ETIMEDOUT;
- writel(0x0a, qmp->dp_tx + QSERDES_V5_5NM_TX_TX_POL_INV);
- writel(0x0a, qmp->dp_tx2 + QSERDES_V5_5NM_TX_TX_POL_INV);
+ writel(0x0a, qmp->dp_tx + cfg->regs[QPHY_TX_TX_POL_INV]);
+ writel(0x0a, qmp->dp_tx2 + cfg->regs[QPHY_TX_TX_POL_INV]);
- writel(0x27, qmp->dp_tx + QSERDES_V5_5NM_TX_TX_DRV_LVL);
- writel(0x27, qmp->dp_tx2 + QSERDES_V5_5NM_TX_TX_DRV_LVL);
+ writel(0x27, qmp->dp_tx + cfg->regs[QPHY_TX_TX_DRV_LVL]);
+ writel(0x27, qmp->dp_tx2 + cfg->regs[QPHY_TX_TX_DRV_LVL]);
- writel(0x20, qmp->dp_tx + QSERDES_V5_5NM_TX_TX_EMP_POST1_LVL);
- writel(0x20, qmp->dp_tx2 + QSERDES_V5_5NM_TX_TX_EMP_POST1_LVL);
+ writel(0x20, qmp->dp_tx + cfg->regs[QPHY_TX_TX_EMP_POST1_LVL]);
+ writel(0x20, qmp->dp_tx2 + cfg->regs[QPHY_TX_TX_EMP_POST1_LVL]);
return 0;
-}
-
-static int qmp_v6_configure_dp_phy(struct qmp_combo *qmp)
-{
- bool reverse = (qmp->orientation == TYPEC_ORIENTATION_REVERSE);
- const struct phy_configure_opts_dp *dp_opts = &qmp->dp_opts;
- u32 bias0_en, drvr0_en, bias1_en, drvr1_en;
- u32 status;
- int ret;
-
- ret = qmp_v456_configure_dp_phy(qmp, QSERDES_V6_COM_RESETSM_CNTRL,
- QSERDES_V6_COM_C_READY_STATUS,
- QSERDES_V6_COM_CMN_STATUS,
- QSERDES_V6_DP_PHY_STATUS);
- if (ret < 0)
- return ret;
-
- if (dp_opts->lanes == 1) {
- bias0_en = reverse ? 0x3e : 0x1a;
- drvr0_en = reverse ? 0x13 : 0x10;
- bias1_en = reverse ? 0x15 : 0x3e;
- drvr1_en = reverse ? 0x10 : 0x13;
- } else if (dp_opts->lanes == 2) {
- bias0_en = reverse ? 0x3f : 0x15;
- drvr0_en = 0x10;
- bias1_en = reverse ? 0x15 : 0x3f;
- drvr1_en = 0x10;
- } else {
- bias0_en = 0x3f;
- bias1_en = 0x3f;
- drvr0_en = 0x10;
- drvr1_en = 0x10;
- }
-
- writel(drvr0_en, qmp->dp_tx + QSERDES_V4_TX_HIGHZ_DRVR_EN);
- writel(bias0_en, qmp->dp_tx + QSERDES_V4_TX_TRANSCEIVER_BIAS_EN);
- writel(drvr1_en, qmp->dp_tx2 + QSERDES_V4_TX_HIGHZ_DRVR_EN);
- writel(bias1_en, qmp->dp_tx2 + QSERDES_V4_TX_TRANSCEIVER_BIAS_EN);
-
- writel(0x18, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG);
- udelay(2000);
- writel(0x19, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG);
-
- if (readl_poll_timeout(qmp->dp_dp_phy + QSERDES_V6_DP_PHY_STATUS,
- status,
- ((status & BIT(1)) > 0),
- 500,
- 10000))
- return -ETIMEDOUT;
-
- writel(0x0a, qmp->dp_tx + QSERDES_V4_TX_TX_POL_INV);
- writel(0x0a, qmp->dp_tx2 + QSERDES_V4_TX_TX_POL_INV);
-
- writel(0x27, qmp->dp_tx + QSERDES_V4_TX_TX_DRV_LVL);
- writel(0x27, qmp->dp_tx2 + QSERDES_V4_TX_TX_DRV_LVL);
-
- writel(0x20, qmp->dp_tx + QSERDES_V4_TX_TX_EMP_POST1_LVL);
- writel(0x20, qmp->dp_tx2 + QSERDES_V4_TX_TX_EMP_POST1_LVL);
return 0;
}
@@ -2507,7 +2413,7 @@ static int qmp_combo_com_init(struct qmp_combo *qmp, bool force)
goto err_disable_regulators;
}
- ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks);
+ ret = clk_bulk_prepare_enable(qmp->num_clks, qmp->clks);
if (ret)
goto err_assert_reset;
@@ -2557,7 +2463,7 @@ static int qmp_combo_com_exit(struct qmp_combo *qmp, bool force)
reset_control_bulk_assert(cfg->num_resets, qmp->resets);
- clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
+ clk_bulk_disable_unprepare(qmp->num_clks, qmp->clks);
regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
@@ -2836,7 +2742,6 @@ static void qmp_combo_disable_autonomous_mode(struct qmp_combo *qmp)
static int __maybe_unused qmp_combo_runtime_suspend(struct device *dev)
{
struct qmp_combo *qmp = dev_get_drvdata(dev);
- const struct qmp_phy_cfg *cfg = qmp->cfg;
dev_vdbg(dev, "Suspending QMP phy, mode:%d\n", qmp->mode);
@@ -2848,7 +2753,7 @@ static int __maybe_unused qmp_combo_runtime_suspend(struct device *dev)
qmp_combo_enable_autonomous_mode(qmp);
clk_disable_unprepare(qmp->pipe_clk);
- clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
+ clk_bulk_disable_unprepare(qmp->num_clks, qmp->clks);
return 0;
}
@@ -2856,7 +2761,6 @@ static int __maybe_unused qmp_combo_runtime_suspend(struct device *dev)
static int __maybe_unused qmp_combo_runtime_resume(struct device *dev)
{
struct qmp_combo *qmp = dev_get_drvdata(dev);
- const struct qmp_phy_cfg *cfg = qmp->cfg;
int ret = 0;
dev_vdbg(dev, "Resuming QMP phy, mode:%d\n", qmp->mode);
@@ -2866,14 +2770,14 @@ static int __maybe_unused qmp_combo_runtime_resume(struct device *dev)
return 0;
}
- ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks);
+ ret = clk_bulk_prepare_enable(qmp->num_clks, qmp->clks);
if (ret)
return ret;
ret = clk_prepare_enable(qmp->pipe_clk);
if (ret) {
dev_err(dev, "pipe_clk enable failed, err=%d\n", ret);
- clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
+ clk_bulk_disable_unprepare(qmp->num_clks, qmp->clks);
return ret;
}
@@ -2944,9 +2848,8 @@ static int qmp_combo_reset_init(struct qmp_combo *qmp)
static int qmp_combo_clk_init(struct qmp_combo *qmp)
{
- const struct qmp_phy_cfg *cfg = qmp->cfg;
struct device *dev = qmp->dev;
- int num = cfg->num_clks;
+ int num = ARRAY_SIZE(qmp_combo_phy_clk_l);
int i;
qmp->clks = devm_kcalloc(dev, num, sizeof(*qmp->clks), GFP_KERNEL);
@@ -2954,9 +2857,11 @@ static int qmp_combo_clk_init(struct qmp_combo *qmp)
return -ENOMEM;
for (i = 0; i < num; i++)
- qmp->clks[i].id = cfg->clk_list[i];
+ qmp->clks[i].id = qmp_combo_phy_clk_l[i];
+
+ qmp->num_clks = num;
- return devm_clk_bulk_get(dev, num, qmp->clks);
+ return devm_clk_bulk_get_optional(dev, num, qmp->clks);
}
static void phy_clk_release_provider(void *res)
@@ -3421,6 +3326,12 @@ static int qmp_combo_parse_dt_legacy(struct qmp_combo *qmp, struct device_node *
if (ret)
return ret;
+ ret = devm_clk_bulk_get_all(qmp->dev, &qmp->clks);
+ if (ret < 0)
+ return ret;
+
+ qmp->num_clks = ret;
+
return 0;
}
@@ -3431,6 +3342,7 @@ static int qmp_combo_parse_dt(struct qmp_combo *qmp)
const struct qmp_combo_offsets *offs = cfg->offsets;
struct device *dev = qmp->dev;
void __iomem *base;
+ int ret;
if (!offs)
return -EINVAL;
@@ -3460,6 +3372,10 @@ static int qmp_combo_parse_dt(struct qmp_combo *qmp)
}
qmp->dp_dp_phy = base + offs->dp_dp_phy;
+ ret = qmp_combo_clk_init(qmp);
+ if (ret)
+ return ret;
+
qmp->pipe_clk = devm_clk_get(dev, "usb3_pipe");
if (IS_ERR(qmp->pipe_clk)) {
return dev_err_probe(dev, PTR_ERR(qmp->pipe_clk),
@@ -3508,10 +3424,6 @@ static int qmp_combo_probe(struct platform_device *pdev)
mutex_init(&qmp->phy_mutex);
- ret = qmp_combo_clk_init(qmp);
- if (ret)
- return ret;
-
ret = qmp_combo_reset_init(qmp);
if (ret)
return ret;
@@ -3603,6 +3515,10 @@ static const struct of_device_id qmp_combo_of_match_table[] = {
.data = &sc7180_usb3dpphy_cfg,
},
{
+ .compatible = "qcom,sc7280-qmp-usb3-dp-phy",
+ .data = &sm8250_usb3dpphy_cfg,
+ },
+ {
.compatible = "qcom,sc8180x-qmp-usb3-dp-phy",
.data = &sc8180x_usb3dpphy_cfg,
},
@@ -3619,6 +3535,10 @@ static const struct of_device_id qmp_combo_of_match_table[] = {
.data = &sm6350_usb3dpphy_cfg,
},
{
+ .compatible = "qcom,sm8150-qmp-usb3-dp-phy",
+ .data = &sc8180x_usb3dpphy_cfg,
+ },
+ {
.compatible = "qcom,sm8250-qmp-usb3-dp-phy",
.data = &sm8250_usb3dpphy_cfg,
},
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c
index 0c603bc06e09..ab61a9c73b18 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c
@@ -12,7 +12,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_address.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
index df505279edfd..a63ca7424974 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
@@ -13,7 +13,6 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_address.h>
#include <linux/phy/pcie.h>
#include <linux/phy/phy.h>
@@ -1910,6 +1909,244 @@ static const struct qmp_phy_init_tbl sm8550_qmp_gen4x2_pcie_pcs_misc_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_G4_FOM_EQ_CONFIG5, 0xf2),
};
+static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x2_pcie_serdes_alt_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIAS_EN_CLKBUFLR_EN, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_EN, 0x46),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_CFG, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_MAP, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_SEL, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_HS_SWITCH_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_MISC1, 0x88),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORE_CLK_EN, 0x60),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_MODE, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_DC_LEVEL_CTRL, 0x0f),
+};
+
+static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x2_pcie_rc_serdes_alt_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_EN_CENTER, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE0, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE1, 0x97),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_ENABLE1, 0x90),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_EN_SEL, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE0, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE1, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE1, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE1, 0xd0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE1, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE1, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_SELECT, 0x34),
+};
+
+static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x2_pcie_rx_alt_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_PI_CONTROLS, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B0, 0x9a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B1, 0xb0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B2, 0x92),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B3, 0xf0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B4, 0x42),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B5, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B6, 0x29),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B0, 0x9a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B1, 0xfb),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B2, 0x92),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B3, 0xec),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B4, 0x43),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B5, 0xdd),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B6, 0x0d),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B0, 0xf3),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B1, 0xf8),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B2, 0xec),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B3, 0xd6),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B4, 0x83),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B5, 0xf5),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B6, 0x5e),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_PHPRE_CTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_AUX_DATA_THRESH_BIN_RATE_0_1, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_AUX_DATA_THRESH_BIN_RATE_2_3, 0x37),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_DFE_3, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH1_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH2_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH3_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH4_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH5_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH6_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH1_RATE210, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH2_RATE210, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH3_RATE210, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_Q_PI_INTRINSIC_BIAS_RATE32, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_FO_GAIN_RATE2, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_FO_GAIN_RATE3, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_SO_GAIN_RATE3, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_VGA_CAL_CNTRL1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_VGA_CAL_MAN_VAL, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x7c),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_IDAC_SAOFFSET, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_DFE_DAC_ENABLE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_GM_CAL, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_TX_ADAPT_POST_THRESH1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_TX_ADAPT_POST_THRESH2, 0x1f),
+};
+
+static const struct qmp_phy_init_tbl sa8775p_qmp_gen4_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_TX_RES_CODE_LANE_OFFSET_TX, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_TX_RES_CODE_LANE_OFFSET_RX, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_TX_LANE_MODE_1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_TX_LANE_MODE_2, 0xf6),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_TX_LANE_MODE_3, 0x0f),
+};
+
+static const struct qmp_phy_init_tbl sa8775p_qmp_gen4_pcie_pcs_misc_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_EQ_CONFIG1, 0x16),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_G4_EQ_CONFIG5, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_G4_PRE_GAIN, 0x2e),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_RX_MARGINING_CONFIG3, 0x28),
+};
+
+static const struct qmp_phy_init_tbl sa8775p_qmp_gen4_pcie_rc_pcs_misc_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_POWER_STATE_CONFIG2, 0x1d),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00),
+};
+
+static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x2_pcie_pcs_alt_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_EQ_CONFIG4, 0x16),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_EQ_CONFIG5, 0x22),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_LANE1_INSIG_SW_CTRL2, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_LANE1_INSIG_MX_CTRL2, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_G3S2_PRE_GAIN, 0x2e),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_RX_SIGDET_LVL, 0x66),
+};
+
+static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x4_pcie_rx_alt_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_AUX_DATA_THRESH_BIN_RATE_0_1, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_AUX_DATA_THRESH_BIN_RATE_2_3, 0x37),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_DFE_3, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_DFE_DAC_ENABLE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_GM_CAL, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_PHPRE_CTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x7c),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_IDAC_SAOFFSET, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH1_RATE210, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH1_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH2_RATE210, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH2_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH3_RATE210, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH3_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH4_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH5_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH6_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_Q_PI_INTRINSIC_BIAS_RATE32, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B0, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B1, 0xb0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B2, 0x92),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B3, 0xf0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B4, 0x42),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B5, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B6, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B0, 0x9a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B1, 0xb6),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B2, 0x92),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B3, 0xf0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B4, 0x43),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B5, 0xdd),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B6, 0x0d),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B0, 0xf3),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B1, 0xf6),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B2, 0xee),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B3, 0xd2),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B4, 0x83),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B5, 0xf9),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B6, 0x3d),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_TX_ADAPT_POST_THRESH1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_TX_ADAPT_POST_THRESH2, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_FO_GAIN_RATE2, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_FO_GAIN_RATE3, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_SO_GAIN_RATE3, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_PI_CONTROLS, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_VGA_CAL_CNTRL1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_VGA_CAL_MAN_VAL, 0x08),
+};
+
+static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x4_pcie_pcs_alt_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_EQ_CONFIG4, 0x16),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_EQ_CONFIG5, 0x22),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_G3S2_PRE_GAIN, 0x2e),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_RX_SIGDET_LVL, 0x66),
+};
+
+static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x4_pcie_serdes_alt_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_EN, 0x46),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_CFG, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_MAP, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_SEL, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_HS_SWITCH_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_MISC1, 0x88),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORE_CLK_EN, 0x60),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_MODE, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_DC_LEVEL_CTRL, 0x0f),
+};
+
+
+static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x4_pcie_rc_serdes_alt_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_EN_CENTER, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE0, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE1, 0x97),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIAS_EN_CLKBUFLR_EN, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_ENABLE1, 0x90),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_EN_SEL, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE0, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE1, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE1, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE1, 0xd0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE1, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE1, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_SELECT, 0x34),
+};
+
struct qmp_pcie_offsets {
u16 serdes;
u16 pcs;
@@ -1957,9 +2194,6 @@ struct qmp_phy_cfg {
const struct qmp_phy_init_tbl *serdes_4ln_tbl;
int serdes_4ln_num;
- /* clock ids to be requested */
- const char * const *clk_list;
- int num_clks;
/* resets to be requested */
const char * const *reset_list;
int num_resets;
@@ -2038,20 +2272,8 @@ static inline void qphy_clrbits(void __iomem *base, u32 offset, u32 val)
}
/* list of clocks required by phy */
-static const char * const ipq8074_pciephy_clk_l[] = {
- "aux", "cfg_ahb",
-};
-
-static const char * const msm8996_phy_clk_l[] = {
- "aux", "cfg_ahb", "ref",
-};
-
-static const char * const sc8280xp_pciephy_clk_l[] = {
- "aux", "cfg_ahb", "ref", "rchng",
-};
-
-static const char * const sdm845_pciephy_clk_l[] = {
- "aux", "cfg_ahb", "ref", "refgen",
+static const char * const qmp_pciephy_clk_l[] = {
+ "aux", "cfg_ahb", "ref", "refgen", "rchng", "phy_aux",
};
/* list of regulators */
@@ -2072,6 +2294,56 @@ static const char * const sdm845_pciephy_reset_l[] = {
"phy",
};
+static const struct qmp_pcie_offsets qmp_pcie_offsets_qhp = {
+ .serdes = 0,
+ .pcs = 0x1800,
+ .tx = 0x0800,
+ /* no .rx for QHP */
+};
+
+static const struct qmp_pcie_offsets qmp_pcie_offsets_v2 = {
+ .serdes = 0,
+ .pcs = 0x0800,
+ .tx = 0x0200,
+ .rx = 0x0400,
+};
+
+static const struct qmp_pcie_offsets qmp_pcie_offsets_v3 = {
+ .serdes = 0,
+ .pcs = 0x0800,
+ .pcs_misc = 0x0600,
+ .tx = 0x0200,
+ .rx = 0x0400,
+};
+
+static const struct qmp_pcie_offsets qmp_pcie_offsets_v4x1 = {
+ .serdes = 0,
+ .pcs = 0x0800,
+ .pcs_misc = 0x0c00,
+ .tx = 0x0200,
+ .rx = 0x0400,
+};
+
+static const struct qmp_pcie_offsets qmp_pcie_offsets_v4x2 = {
+ .serdes = 0,
+ .pcs = 0x0a00,
+ .pcs_misc = 0x0e00,
+ .tx = 0x0200,
+ .rx = 0x0400,
+ .tx2 = 0x0600,
+ .rx2 = 0x0800,
+};
+
+static const struct qmp_pcie_offsets qmp_pcie_offsets_v4_20 = {
+ .serdes = 0x1000,
+ .pcs = 0x1200,
+ .pcs_misc = 0x1600,
+ .tx = 0x0000,
+ .rx = 0x0200,
+ .tx2 = 0x0800,
+ .rx2 = 0x0a00,
+};
+
static const struct qmp_pcie_offsets qmp_pcie_offsets_v5 = {
.serdes = 0,
.pcs = 0x0200,
@@ -2082,6 +2354,26 @@ static const struct qmp_pcie_offsets qmp_pcie_offsets_v5 = {
.rx2 = 0x1800,
};
+static const struct qmp_pcie_offsets qmp_pcie_offsets_v5_20 = {
+ .serdes = 0x1000,
+ .pcs = 0x1200,
+ .pcs_misc = 0x1400,
+ .tx = 0x0000,
+ .rx = 0x0200,
+ .tx2 = 0x0800,
+ .rx2 = 0x0a00,
+};
+
+static const struct qmp_pcie_offsets qmp_pcie_offsets_v5_30 = {
+ .serdes = 0x2000,
+ .pcs = 0x2200,
+ .pcs_misc = 0x2400,
+ .tx = 0x0,
+ .rx = 0x0200,
+ .tx2 = 0x3800,
+ .rx2 = 0x3a00,
+};
+
static const struct qmp_pcie_offsets qmp_pcie_offsets_v6_20 = {
.serdes = 0x1000,
.pcs = 0x1200,
@@ -2096,6 +2388,8 @@ static const struct qmp_pcie_offsets qmp_pcie_offsets_v6_20 = {
static const struct qmp_phy_cfg ipq8074_pciephy_cfg = {
.lanes = 1,
+ .offsets = &qmp_pcie_offsets_v2,
+
.tbls = {
.serdes = ipq8074_pcie_serdes_tbl,
.serdes_num = ARRAY_SIZE(ipq8074_pcie_serdes_tbl),
@@ -2106,8 +2400,6 @@ static const struct qmp_phy_cfg ipq8074_pciephy_cfg = {
.pcs = ipq8074_pcie_pcs_tbl,
.pcs_num = ARRAY_SIZE(ipq8074_pcie_pcs_tbl),
},
- .clk_list = ipq8074_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(ipq8074_pciephy_clk_l),
.reset_list = ipq8074_pciephy_reset_l,
.num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l),
.vreg_list = NULL,
@@ -2121,6 +2413,8 @@ static const struct qmp_phy_cfg ipq8074_pciephy_cfg = {
static const struct qmp_phy_cfg ipq8074_pciephy_gen3_cfg = {
.lanes = 1,
+ .offsets = &qmp_pcie_offsets_v4x1,
+
.tbls = {
.serdes = ipq8074_pcie_gen3_serdes_tbl,
.serdes_num = ARRAY_SIZE(ipq8074_pcie_gen3_serdes_tbl),
@@ -2133,8 +2427,6 @@ static const struct qmp_phy_cfg ipq8074_pciephy_gen3_cfg = {
.pcs_misc = ipq8074_pcie_gen3_pcs_misc_tbl,
.pcs_misc_num = ARRAY_SIZE(ipq8074_pcie_gen3_pcs_misc_tbl),
},
- .clk_list = ipq8074_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(ipq8074_pciephy_clk_l),
.reset_list = ipq8074_pciephy_reset_l,
.num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l),
.vreg_list = NULL,
@@ -2150,6 +2442,8 @@ static const struct qmp_phy_cfg ipq8074_pciephy_gen3_cfg = {
static const struct qmp_phy_cfg ipq6018_pciephy_cfg = {
.lanes = 1,
+ .offsets = &qmp_pcie_offsets_v4x1,
+
.tbls = {
.serdes = ipq6018_pcie_serdes_tbl,
.serdes_num = ARRAY_SIZE(ipq6018_pcie_serdes_tbl),
@@ -2162,8 +2456,6 @@ static const struct qmp_phy_cfg ipq6018_pciephy_cfg = {
.pcs_misc = ipq6018_pcie_pcs_misc_tbl,
.pcs_misc_num = ARRAY_SIZE(ipq6018_pcie_pcs_misc_tbl),
},
- .clk_list = ipq8074_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(ipq8074_pciephy_clk_l),
.reset_list = ipq8074_pciephy_reset_l,
.num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l),
.vreg_list = NULL,
@@ -2177,6 +2469,8 @@ static const struct qmp_phy_cfg ipq6018_pciephy_cfg = {
static const struct qmp_phy_cfg sdm845_qmp_pciephy_cfg = {
.lanes = 1,
+ .offsets = &qmp_pcie_offsets_v3,
+
.tbls = {
.serdes = sdm845_qmp_pcie_serdes_tbl,
.serdes_num = ARRAY_SIZE(sdm845_qmp_pcie_serdes_tbl),
@@ -2189,8 +2483,6 @@ static const struct qmp_phy_cfg sdm845_qmp_pciephy_cfg = {
.pcs_misc = sdm845_qmp_pcie_pcs_misc_tbl,
.pcs_misc_num = ARRAY_SIZE(sdm845_qmp_pcie_pcs_misc_tbl),
},
- .clk_list = sdm845_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
.reset_list = sdm845_pciephy_reset_l,
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
@@ -2204,6 +2496,8 @@ static const struct qmp_phy_cfg sdm845_qmp_pciephy_cfg = {
static const struct qmp_phy_cfg sdm845_qhp_pciephy_cfg = {
.lanes = 1,
+ .offsets = &qmp_pcie_offsets_qhp,
+
.tbls = {
.serdes = sdm845_qhp_pcie_serdes_tbl,
.serdes_num = ARRAY_SIZE(sdm845_qhp_pcie_serdes_tbl),
@@ -2212,8 +2506,6 @@ static const struct qmp_phy_cfg sdm845_qhp_pciephy_cfg = {
.pcs = sdm845_qhp_pcie_pcs_tbl,
.pcs_num = ARRAY_SIZE(sdm845_qhp_pcie_pcs_tbl),
},
- .clk_list = sdm845_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
.reset_list = sdm845_pciephy_reset_l,
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
@@ -2227,6 +2519,8 @@ static const struct qmp_phy_cfg sdm845_qhp_pciephy_cfg = {
static const struct qmp_phy_cfg sm8250_qmp_gen3x1_pciephy_cfg = {
.lanes = 1,
+ .offsets = &qmp_pcie_offsets_v4x1,
+
.tbls = {
.serdes = sm8250_qmp_pcie_serdes_tbl,
.serdes_num = ARRAY_SIZE(sm8250_qmp_pcie_serdes_tbl),
@@ -2249,8 +2543,6 @@ static const struct qmp_phy_cfg sm8250_qmp_gen3x1_pciephy_cfg = {
.pcs_misc = sm8250_qmp_gen3x1_pcie_pcs_misc_tbl,
.pcs_misc_num = ARRAY_SIZE(sm8250_qmp_gen3x1_pcie_pcs_misc_tbl),
},
- .clk_list = sdm845_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
.reset_list = sdm845_pciephy_reset_l,
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
@@ -2264,6 +2556,8 @@ static const struct qmp_phy_cfg sm8250_qmp_gen3x1_pciephy_cfg = {
static const struct qmp_phy_cfg sm8250_qmp_gen3x2_pciephy_cfg = {
.lanes = 2,
+ .offsets = &qmp_pcie_offsets_v4x2,
+
.tbls = {
.serdes = sm8250_qmp_pcie_serdes_tbl,
.serdes_num = ARRAY_SIZE(sm8250_qmp_pcie_serdes_tbl),
@@ -2286,8 +2580,6 @@ static const struct qmp_phy_cfg sm8250_qmp_gen3x2_pciephy_cfg = {
.pcs_misc = sm8250_qmp_gen3x2_pcie_pcs_misc_tbl,
.pcs_misc_num = ARRAY_SIZE(sm8250_qmp_gen3x2_pcie_pcs_misc_tbl),
},
- .clk_list = sdm845_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
.reset_list = sdm845_pciephy_reset_l,
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
@@ -2301,6 +2593,8 @@ static const struct qmp_phy_cfg sm8250_qmp_gen3x2_pciephy_cfg = {
static const struct qmp_phy_cfg msm8998_pciephy_cfg = {
.lanes = 1,
+ .offsets = &qmp_pcie_offsets_v3,
+
.tbls = {
.serdes = msm8998_pcie_serdes_tbl,
.serdes_num = ARRAY_SIZE(msm8998_pcie_serdes_tbl),
@@ -2311,8 +2605,6 @@ static const struct qmp_phy_cfg msm8998_pciephy_cfg = {
.pcs = msm8998_pcie_pcs_tbl,
.pcs_num = ARRAY_SIZE(msm8998_pcie_pcs_tbl),
},
- .clk_list = msm8996_phy_clk_l,
- .num_clks = ARRAY_SIZE(msm8996_phy_clk_l),
.reset_list = ipq8074_pciephy_reset_l,
.num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
@@ -2328,6 +2620,8 @@ static const struct qmp_phy_cfg msm8998_pciephy_cfg = {
static const struct qmp_phy_cfg sc8180x_pciephy_cfg = {
.lanes = 2,
+ .offsets = &qmp_pcie_offsets_v4x2,
+
.tbls = {
.serdes = sc8180x_qmp_pcie_serdes_tbl,
.serdes_num = ARRAY_SIZE(sc8180x_qmp_pcie_serdes_tbl),
@@ -2340,8 +2634,6 @@ static const struct qmp_phy_cfg sc8180x_pciephy_cfg = {
.pcs_misc = sc8180x_qmp_pcie_pcs_misc_tbl,
.pcs_misc_num = ARRAY_SIZE(sc8180x_qmp_pcie_pcs_misc_tbl),
},
- .clk_list = sdm845_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
.reset_list = sdm845_pciephy_reset_l,
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
@@ -2375,8 +2667,6 @@ static const struct qmp_phy_cfg sc8280xp_qmp_gen3x1_pciephy_cfg = {
.serdes_num = ARRAY_SIZE(sc8280xp_qmp_gen3x1_pcie_rc_serdes_tbl),
},
- .clk_list = sc8280xp_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(sc8280xp_pciephy_clk_l),
.reset_list = sdm845_pciephy_reset_l,
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
@@ -2410,8 +2700,6 @@ static const struct qmp_phy_cfg sc8280xp_qmp_gen3x2_pciephy_cfg = {
.serdes_num = ARRAY_SIZE(sc8280xp_qmp_gen3x2_pcie_rc_serdes_tbl),
},
- .clk_list = sc8280xp_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(sc8280xp_pciephy_clk_l),
.reset_list = sdm845_pciephy_reset_l,
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
@@ -2448,8 +2736,6 @@ static const struct qmp_phy_cfg sc8280xp_qmp_gen3x4_pciephy_cfg = {
.serdes_4ln_tbl = sc8280xp_qmp_gen3x4_pcie_serdes_4ln_tbl,
.serdes_4ln_num = ARRAY_SIZE(sc8280xp_qmp_gen3x4_pcie_serdes_4ln_tbl),
- .clk_list = sc8280xp_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(sc8280xp_pciephy_clk_l),
.reset_list = sdm845_pciephy_reset_l,
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
@@ -2463,6 +2749,8 @@ static const struct qmp_phy_cfg sc8280xp_qmp_gen3x4_pciephy_cfg = {
static const struct qmp_phy_cfg sdx55_qmp_pciephy_cfg = {
.lanes = 2,
+ .offsets = &qmp_pcie_offsets_v4_20,
+
.tbls = {
.serdes = sdx55_qmp_pcie_serdes_tbl,
.serdes_num = ARRAY_SIZE(sdx55_qmp_pcie_serdes_tbl),
@@ -2490,8 +2778,6 @@ static const struct qmp_phy_cfg sdx55_qmp_pciephy_cfg = {
.pcs_misc_num = ARRAY_SIZE(sdx55_qmp_pcie_ep_pcs_misc_tbl),
},
- .clk_list = sdm845_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
.reset_list = sdm845_pciephy_reset_l,
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
@@ -2527,8 +2813,6 @@ static const struct qmp_phy_cfg sm8350_qmp_gen3x1_pciephy_cfg = {
.rx_num = ARRAY_SIZE(sm8350_qmp_gen3x1_pcie_rc_rx_tbl),
},
- .clk_list = sc8280xp_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(sc8280xp_pciephy_clk_l),
.reset_list = sdm845_pciephy_reset_l,
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
@@ -2564,8 +2848,6 @@ static const struct qmp_phy_cfg sm8350_qmp_gen3x2_pciephy_cfg = {
.pcs_num = ARRAY_SIZE(sm8350_qmp_gen3x2_pcie_rc_pcs_tbl),
},
- .clk_list = sc8280xp_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(sc8280xp_pciephy_clk_l),
.reset_list = sdm845_pciephy_reset_l,
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
@@ -2593,8 +2875,6 @@ static const struct qmp_phy_cfg sdx65_qmp_pciephy_cfg = {
.pcs_misc = sdx65_qmp_pcie_pcs_misc_tbl,
.pcs_misc_num = ARRAY_SIZE(sdx65_qmp_pcie_pcs_misc_tbl),
},
- .clk_list = sdm845_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
.reset_list = sdm845_pciephy_reset_l,
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
@@ -2608,6 +2888,8 @@ static const struct qmp_phy_cfg sdx65_qmp_pciephy_cfg = {
static const struct qmp_phy_cfg sm8450_qmp_gen3x1_pciephy_cfg = {
.lanes = 1,
+ .offsets = &qmp_pcie_offsets_v5,
+
.tbls = {
.serdes = sm8450_qmp_gen3_pcie_serdes_tbl,
.serdes_num = ARRAY_SIZE(sm8450_qmp_gen3_pcie_serdes_tbl),
@@ -2628,8 +2910,6 @@ static const struct qmp_phy_cfg sm8450_qmp_gen3x1_pciephy_cfg = {
.rx_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_rc_rx_tbl),
},
- .clk_list = sdm845_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
.reset_list = sdm845_pciephy_reset_l,
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
@@ -2643,6 +2923,8 @@ static const struct qmp_phy_cfg sm8450_qmp_gen3x1_pciephy_cfg = {
static const struct qmp_phy_cfg sm8450_qmp_gen4x2_pciephy_cfg = {
.lanes = 2,
+ .offsets = &qmp_pcie_offsets_v5_20,
+
.tbls = {
.serdes = sm8450_qmp_gen4x2_pcie_serdes_tbl,
.serdes_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_serdes_tbl),
@@ -2670,8 +2952,6 @@ static const struct qmp_phy_cfg sm8450_qmp_gen4x2_pciephy_cfg = {
.pcs_misc_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_ep_pcs_misc_tbl),
},
- .clk_list = sdm845_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
.reset_list = sdm845_pciephy_reset_l,
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
@@ -2699,8 +2979,6 @@ static const struct qmp_phy_cfg sm8550_qmp_gen3x2_pciephy_cfg = {
.pcs_misc = sm8550_qmp_gen3x2_pcie_pcs_misc_tbl,
.pcs_misc_num = ARRAY_SIZE(sm8550_qmp_gen3x2_pcie_pcs_misc_tbl),
},
- .clk_list = sc8280xp_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(sc8280xp_pciephy_clk_l),
.reset_list = sdm845_pciephy_reset_l,
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
@@ -2730,8 +3008,6 @@ static const struct qmp_phy_cfg sm8550_qmp_gen4x2_pciephy_cfg = {
.ln_shrd = sm8550_qmp_gen4x2_pcie_ln_shrd_tbl,
.ln_shrd_num = ARRAY_SIZE(sm8550_qmp_gen4x2_pcie_ln_shrd_tbl),
},
- .clk_list = sc8280xp_pciephy_clk_l,
- .num_clks = ARRAY_SIZE(sc8280xp_pciephy_clk_l),
.reset_list = sdm845_pciephy_reset_l,
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = sm8550_qmp_phy_vreg_l,
@@ -2743,6 +3019,74 @@ static const struct qmp_phy_cfg sm8550_qmp_gen4x2_pciephy_cfg = {
.has_nocsr_reset = true,
};
+static const struct qmp_phy_cfg sa8775p_qmp_gen4x2_pciephy_cfg = {
+ .lanes = 2,
+ .offsets = &qmp_pcie_offsets_v5_20,
+
+ .tbls = {
+ .serdes = sa8775p_qmp_gen4x2_pcie_serdes_alt_tbl,
+ .serdes_num = ARRAY_SIZE(sa8775p_qmp_gen4x2_pcie_serdes_alt_tbl),
+ .tx = sa8775p_qmp_gen4_pcie_tx_tbl,
+ .tx_num = ARRAY_SIZE(sa8775p_qmp_gen4_pcie_tx_tbl),
+ .rx = sa8775p_qmp_gen4x2_pcie_rx_alt_tbl,
+ .rx_num = ARRAY_SIZE(sa8775p_qmp_gen4x2_pcie_rx_alt_tbl),
+ .pcs = sa8775p_qmp_gen4x2_pcie_pcs_alt_tbl,
+ .pcs_num = ARRAY_SIZE(sa8775p_qmp_gen4x2_pcie_pcs_alt_tbl),
+ .pcs_misc = sa8775p_qmp_gen4_pcie_pcs_misc_tbl,
+ .pcs_misc_num = ARRAY_SIZE(sa8775p_qmp_gen4_pcie_pcs_misc_tbl),
+ },
+
+ .tbls_rc = &(const struct qmp_phy_cfg_tbls) {
+ .serdes = sa8775p_qmp_gen4x2_pcie_rc_serdes_alt_tbl,
+ .serdes_num = ARRAY_SIZE(sa8775p_qmp_gen4x2_pcie_rc_serdes_alt_tbl),
+ .pcs_misc = sa8775p_qmp_gen4_pcie_rc_pcs_misc_tbl,
+ .pcs_misc_num = ARRAY_SIZE(sa8775p_qmp_gen4_pcie_rc_pcs_misc_tbl),
+ },
+
+ .reset_list = sdm845_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = pciephy_v5_regs_layout,
+
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS_4_20,
+};
+
+static const struct qmp_phy_cfg sa8775p_qmp_gen4x4_pciephy_cfg = {
+ .lanes = 4,
+ .offsets = &qmp_pcie_offsets_v5_30,
+
+ .tbls = {
+ .serdes = sa8775p_qmp_gen4x4_pcie_serdes_alt_tbl,
+ .serdes_num = ARRAY_SIZE(sa8775p_qmp_gen4x4_pcie_serdes_alt_tbl),
+ .tx = sa8775p_qmp_gen4_pcie_tx_tbl,
+ .tx_num = ARRAY_SIZE(sa8775p_qmp_gen4_pcie_tx_tbl),
+ .rx = sa8775p_qmp_gen4x4_pcie_rx_alt_tbl,
+ .rx_num = ARRAY_SIZE(sa8775p_qmp_gen4x4_pcie_rx_alt_tbl),
+ .pcs = sa8775p_qmp_gen4x4_pcie_pcs_alt_tbl,
+ .pcs_num = ARRAY_SIZE(sa8775p_qmp_gen4x4_pcie_pcs_alt_tbl),
+ .pcs_misc = sa8775p_qmp_gen4_pcie_pcs_misc_tbl,
+ .pcs_misc_num = ARRAY_SIZE(sa8775p_qmp_gen4_pcie_pcs_misc_tbl),
+ },
+
+ .tbls_rc = &(const struct qmp_phy_cfg_tbls) {
+ .serdes = sa8775p_qmp_gen4x4_pcie_rc_serdes_alt_tbl,
+ .serdes_num = ARRAY_SIZE(sa8775p_qmp_gen4x4_pcie_rc_serdes_alt_tbl),
+ .pcs_misc = sa8775p_qmp_gen4_pcie_rc_pcs_misc_tbl,
+ .pcs_misc_num = ARRAY_SIZE(sa8775p_qmp_gen4_pcie_rc_pcs_misc_tbl),
+ },
+
+ .reset_list = sdm845_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = pciephy_v5_regs_layout,
+
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS_4_20,
+};
+
static void qmp_pcie_configure_lane(void __iomem *base,
const struct qmp_phy_init_tbl tbl[],
int num,
@@ -2855,7 +3199,7 @@ static int qmp_pcie_init(struct phy *phy)
goto err_assert_reset;
}
- ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks);
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(qmp_pciephy_clk_l), qmp->clks);
if (ret)
goto err_assert_reset;
@@ -2876,7 +3220,7 @@ static int qmp_pcie_exit(struct phy *phy)
reset_control_bulk_assert(cfg->num_resets, qmp->resets);
- clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
+ clk_bulk_disable_unprepare(ARRAY_SIZE(qmp_pciephy_clk_l), qmp->clks);
regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
@@ -3059,9 +3403,8 @@ static int qmp_pcie_reset_init(struct qmp_pcie *qmp)
static int qmp_pcie_clk_init(struct qmp_pcie *qmp)
{
- const struct qmp_phy_cfg *cfg = qmp->cfg;
struct device *dev = qmp->dev;
- int num = cfg->num_clks;
+ int num = ARRAY_SIZE(qmp_pciephy_clk_l);
int i;
qmp->clks = devm_kcalloc(dev, num, sizeof(*qmp->clks), GFP_KERNEL);
@@ -3069,9 +3412,9 @@ static int qmp_pcie_clk_init(struct qmp_pcie *qmp)
return -ENOMEM;
for (i = 0; i < num; i++)
- qmp->clks[i].id = cfg->clk_list[i];
+ qmp->clks[i].id = qmp_pciephy_clk_l[i];
- return devm_clk_bulk_get(dev, num, qmp->clks);
+ return devm_clk_bulk_get_optional(dev, num, qmp->clks);
}
static void phy_clk_release_provider(void *res)
@@ -3378,6 +3721,12 @@ static const struct of_device_id qmp_pcie_of_match_table[] = {
.compatible = "qcom,msm8998-qmp-pcie-phy",
.data = &msm8998_pciephy_cfg,
}, {
+ .compatible = "qcom,sa8775p-qmp-gen4x2-pcie-phy",
+ .data = &sa8775p_qmp_gen4x2_pciephy_cfg,
+ }, {
+ .compatible = "qcom,sa8775p-qmp-gen4x4-pcie-phy",
+ .data = &sa8775p_qmp_gen4x4_pciephy_cfg,
+ }, {
.compatible = "qcom,sc8180x-qmp-pcie-phy",
.data = &sc8180x_pciephy_cfg,
}, {
@@ -3402,6 +3751,12 @@ static const struct of_device_id qmp_pcie_of_match_table[] = {
.compatible = "qcom,sdx65-qmp-gen4x2-pcie-phy",
.data = &sdx65_qmp_pciephy_cfg,
}, {
+ .compatible = "qcom,sm8150-qmp-gen3x1-pcie-phy",
+ .data = &sm8250_qmp_gen3x1_pciephy_cfg,
+ }, {
+ .compatible = "qcom,sm8150-qmp-gen3x2-pcie-phy",
+ .data = &sm8250_qmp_gen3x2_pciephy_cfg,
+ }, {
.compatible = "qcom,sm8250-qmp-gen3x1-pcie-phy",
.data = &sm8250_qmp_gen3x1_pciephy_cfg,
}, {
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h
index a3a056741fc7..cdf8c04ea078 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v5_20.h
@@ -7,6 +7,7 @@
#define QCOM_PHY_QMP_PCS_PCIE_V5_20_H_
/* Only for QMP V5_20 PHY - PCIe PCS registers */
+#define QPHY_V5_20_PCS_PCIE_POWER_STATE_CONFIG2 0x00c
#define QPHY_V5_20_PCS_PCIE_ENDPOINT_REFCLK_DRIVE 0x01c
#define QPHY_V5_20_PCS_PCIE_OSC_DTCT_MODE2_CONFIG5 0x084
#define QPHY_V5_20_PCS_PCIE_OSC_DTCT_ACTIONS 0x090
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v5_20.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v5_20.h
index c7b12c1fb7f5..cf91154eed13 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v5_20.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v5_20.h
@@ -19,6 +19,7 @@
/* Only for QMP V5_20 PHY - RX registers */
#define QSERDES_V5_20_RX_UCDR_FO_GAIN_RATE2 0x008
#define QSERDES_V5_20_RX_UCDR_FO_GAIN_RATE3 0x00c
+#define QSERDES_V5_20_RX_UCDR_SO_GAIN_RATE3 0x01c
#define QSERDES_V5_20_RX_UCDR_PI_CONTROLS 0x020
#define QSERDES_V5_20_RX_AUX_DATA_THRESH_BIN_RATE_0_1 0x02c
#define QSERDES_V5_20_RX_AUX_DATA_THRESH_BIN_RATE_2_3 0x030
@@ -80,5 +81,6 @@
#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH4_RATE3 0x210
#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH5_RATE3 0x218
#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH6_RATE3 0x220
+#define QSERDES_V5_20_RX_Q_PI_INTRINSIC_BIAS_RATE32 0x238
#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6.h
index a69233e68f9a..8883e1de730e 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6.h
@@ -7,6 +7,8 @@
#define QCOM_PHY_QMP_QSERDES_TXRX_USB_V6_H_
#define QSERDES_V6_TX_CLKBUF_ENABLE 0x08
+#define QSERDES_V6_TX_TX_EMP_POST1_LVL 0x0c
+#define QSERDES_V6_TX_TX_DRV_LVL 0x14
#define QSERDES_V6_TX_RESET_TSYNC_EN 0x1c
#define QSERDES_V6_TX_PRE_STALL_LDO_BOOST_EN 0x20
#define QSERDES_V6_TX_TX_BAND 0x24
@@ -15,6 +17,9 @@
#define QSERDES_V6_TX_RES_CODE_LANE_RX 0x38
#define QSERDES_V6_TX_RES_CODE_LANE_OFFSET_TX 0x3c
#define QSERDES_V6_TX_RES_CODE_LANE_OFFSET_RX 0x40
+#define QSERDES_V6_TX_TRANSCEIVER_BIAS_EN 0x54
+#define QSERDES_V6_TX_HIGHZ_DRVR_EN 0x58
+#define QSERDES_V6_TX_TX_POL_INV 0x5c
#define QSERDES_V6_TX_PARRATE_REC_DETECT_IDLE_EN 0x60
#define QSERDES_V6_TX_BIST_PATTERN7 0x7c
#define QSERDES_V6_TX_LANE_MODE_1 0x84
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
index 8c877b668bb9..3927eba8e468 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
@@ -12,7 +12,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_address.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
@@ -833,6 +832,8 @@ static const struct qmp_ufs_offsets qmp_ufs_offsets_v6 = {
static const struct qmp_phy_cfg msm8996_ufsphy_cfg = {
.lanes = 1,
+ .offsets = &qmp_ufs_offsets,
+
.tbls = {
.serdes = msm8996_ufsphy_serdes,
.serdes_num = ARRAY_SIZE(msm8996_ufsphy_serdes),
@@ -924,6 +925,8 @@ static const struct qmp_phy_cfg sc8280xp_ufsphy_cfg = {
static const struct qmp_phy_cfg sdm845_ufsphy_cfg = {
.lanes = 2,
+ .offsets = &qmp_ufs_offsets,
+
.tbls = {
.serdes = sdm845_ufsphy_serdes,
.serdes_num = ARRAY_SIZE(sdm845_ufsphy_serdes),
@@ -1006,6 +1009,8 @@ static const struct qmp_phy_cfg sm7150_ufsphy_cfg = {
static const struct qmp_phy_cfg sm8150_ufsphy_cfg = {
.lanes = 2,
+ .offsets = &qmp_ufs_offsets,
+
.tbls = {
.serdes = sm8150_ufsphy_serdes,
.serdes_num = ARRAY_SIZE(sm8150_ufsphy_serdes),
@@ -1038,6 +1043,8 @@ static const struct qmp_phy_cfg sm8150_ufsphy_cfg = {
static const struct qmp_phy_cfg sm8250_ufsphy_cfg = {
.lanes = 2,
+ .offsets = &qmp_ufs_offsets,
+
.tbls = {
.serdes = sm8150_ufsphy_serdes,
.serdes_num = ARRAY_SIZE(sm8150_ufsphy_serdes),
@@ -1070,6 +1077,8 @@ static const struct qmp_phy_cfg sm8250_ufsphy_cfg = {
static const struct qmp_phy_cfg sm8350_ufsphy_cfg = {
.lanes = 2,
+ .offsets = &qmp_ufs_offsets,
+
.tbls = {
.serdes = sm8350_ufsphy_serdes,
.serdes_num = ARRAY_SIZE(sm8350_ufsphy_serdes),
@@ -1102,6 +1111,8 @@ static const struct qmp_phy_cfg sm8350_ufsphy_cfg = {
static const struct qmp_phy_cfg sm8450_ufsphy_cfg = {
.lanes = 2,
+ .offsets = &qmp_ufs_offsets,
+
.tbls = {
.serdes = sm8350_ufsphy_serdes,
.serdes_num = ARRAY_SIZE(sm8350_ufsphy_serdes),
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb-legacy.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb-legacy.c
new file mode 100644
index 000000000000..cf466f6df94d
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb-legacy.c
@@ -0,0 +1,1407 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include "phy-qcom-qmp.h"
+#include "phy-qcom-qmp-pcs-misc-v3.h"
+#include "phy-qcom-qmp-pcs-usb-v4.h"
+#include "phy-qcom-qmp-pcs-usb-v5.h"
+
+/* QPHY_SW_RESET bit */
+#define SW_RESET BIT(0)
+/* QPHY_POWER_DOWN_CONTROL */
+#define SW_PWRDN BIT(0)
+/* QPHY_START_CONTROL bits */
+#define SERDES_START BIT(0)
+#define PCS_START BIT(1)
+/* QPHY_PCS_STATUS bit */
+#define PHYSTATUS BIT(6)
+
+/* QPHY_V3_DP_COM_RESET_OVRD_CTRL register bits */
+/* DP PHY soft reset */
+#define SW_DPPHY_RESET BIT(0)
+/* mux to select DP PHY reset control, 0:HW control, 1: software reset */
+#define SW_DPPHY_RESET_MUX BIT(1)
+/* USB3 PHY soft reset */
+#define SW_USB3PHY_RESET BIT(2)
+/* mux to select USB3 PHY reset control, 0:HW control, 1: software reset */
+#define SW_USB3PHY_RESET_MUX BIT(3)
+
+/* QPHY_V3_DP_COM_PHY_MODE_CTRL register bits */
+#define USB3_MODE BIT(0) /* enables USB3 mode */
+#define DP_MODE BIT(1) /* enables DP mode */
+
+/* QPHY_PCS_AUTONOMOUS_MODE_CTRL register bits */
+#define ARCVR_DTCT_EN BIT(0)
+#define ALFPS_DTCT_EN BIT(1)
+#define ARCVR_DTCT_EVENT_SEL BIT(4)
+
+/* QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR register bits */
+#define IRQ_CLEAR BIT(0)
+
+/* QPHY_V3_PCS_MISC_CLAMP_ENABLE register bits */
+#define CLAMP_EN BIT(0) /* enables i/o clamp_n */
+
+#define PHY_INIT_COMPLETE_TIMEOUT 10000
+
+struct qmp_phy_init_tbl {
+ unsigned int offset;
+ unsigned int val;
+ /*
+ * mask of lanes for which this register is written
+ * for cases when second lane needs different values
+ */
+ u8 lane_mask;
+};
+
+#define QMP_PHY_INIT_CFG(o, v) \
+ { \
+ .offset = o, \
+ .val = v, \
+ .lane_mask = 0xff, \
+ }
+
+#define QMP_PHY_INIT_CFG_LANE(o, v, l) \
+ { \
+ .offset = o, \
+ .val = v, \
+ .lane_mask = l, \
+ }
+
+/* set of registers with offsets different per-PHY */
+enum qphy_reg_layout {
+ /* PCS registers */
+ QPHY_SW_RESET,
+ QPHY_START_CTRL,
+ QPHY_PCS_STATUS,
+ QPHY_PCS_AUTONOMOUS_MODE_CTRL,
+ QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR,
+ QPHY_PCS_POWER_DOWN_CONTROL,
+ /* Keep last to ensure regs_layout arrays are properly initialized */
+ QPHY_LAYOUT_SIZE
+};
+
+static const unsigned int qmp_v3_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = QPHY_V3_PCS_SW_RESET,
+ [QPHY_START_CTRL] = QPHY_V3_PCS_START_CONTROL,
+ [QPHY_PCS_STATUS] = QPHY_V3_PCS_PCS_STATUS,
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = QPHY_V3_PCS_AUTONOMOUS_MODE_CTRL,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V3_PCS_LFPS_RXTERM_IRQ_CLEAR,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V3_PCS_POWER_DOWN_CONTROL,
+};
+
+static const unsigned int qmp_v4_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = QPHY_V4_PCS_SW_RESET,
+ [QPHY_START_CTRL] = QPHY_V4_PCS_START_CONTROL,
+ [QPHY_PCS_STATUS] = QPHY_V4_PCS_PCS_STATUS1,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V4_PCS_POWER_DOWN_CONTROL,
+
+ /* In PCS_USB */
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = QPHY_V4_PCS_USB3_AUTONOMOUS_MODE_CTRL,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V4_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR,
+};
+
+static const unsigned int qmp_v5_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = QPHY_V5_PCS_SW_RESET,
+ [QPHY_START_CTRL] = QPHY_V5_PCS_START_CONTROL,
+ [QPHY_PCS_STATUS] = QPHY_V5_PCS_PCS_STATUS1,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V5_PCS_POWER_DOWN_CONTROL,
+
+ /* In PCS_USB */
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = QPHY_V5_PCS_USB3_AUTONOMOUS_MODE_CTRL,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V5_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR,
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START1_MODE0, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xc9),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORECLK_DIV_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP3_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_CFG, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_BUF_ENABLE, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE1, 0x85),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE2, 0x07),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_RX, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x06),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x75),
+};
+
+static const struct qmp_phy_init_tbl qmp_v3_usb3_pcs_tbl[] = {
+ /* FLL settings */
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02),
+
+ /* Lock Det settings */
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG1, 0xd1),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG2, 0x1f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG3, 0x47),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_STATE_CONFIG2, 0x1b),
+
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0xba),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V0, 0x9f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V1, 0x9f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V2, 0xb7),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V3, 0x4e),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V4, 0x65),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_LS, 0x6b),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V1, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V1, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V2, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V2, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V3, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V3, 0x1d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V4, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V4, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_LS, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_LS, 0x0d),
+
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RATE_SLEW_CNTRL, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TSYNC_RSYNC_TIME, 0x44),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME, 0x75),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK, 0x86),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
+};
+
+static const struct qmp_phy_init_tbl sm8150_usb3_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE0, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE1, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE1, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_BUF_ENABLE, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CMN_IPTRIM, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE1, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE1, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE1, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE0, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE1, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE1, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE0, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE1, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE2_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORECLK_DIV_MODE1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xca),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xca),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL, 0x11),
+};
+
+static const struct qmp_phy_init_tbl sm8150_usb3_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_TX, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_RX, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0xd5),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_PI_QEC_CTRL, 0x20),
+};
+
+static const struct qmp_phy_init_tbl sm8150_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN2, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x54),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0xbf),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0xbf),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x94),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0x5c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb3),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_AUX_DATA_TCOARSE_TFINE, 0xa0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VTH_CODE, 0x10),
+};
+
+static const struct qmp_phy_init_tbl sm8150_usb3_pcs_tbl[] = {
+ /* Lock Det settings */
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13),
+
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10),
+};
+
+static const struct qmp_phy_init_tbl sm8150_usb3_pcs_usb_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
+};
+
+static const struct qmp_phy_init_tbl sm8250_usb3_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_TX, 0x60),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_RX, 0x60),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_RX, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0xd5),
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V4_TX_PI_QEC_CTRL, 0x40, 1),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V4_TX_PI_QEC_CTRL, 0x54, 2),
+};
+
+static const struct qmp_phy_init_tbl sm8250_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN2, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x54),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V4_RX_RX_MODE_00_LOW, 0xff, 1),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V4_RX_RX_MODE_00_LOW, 0x7f, 2),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V4_RX_RX_MODE_00_HIGH, 0x7f, 1),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V4_RX_RX_MODE_00_HIGH, 0xff, 2),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x97),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0x5c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x7b),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb4),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_AUX_DATA_TCOARSE_TFINE, 0xa0),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_VTH_CODE, 0x10),
+};
+
+static const struct qmp_phy_init_tbl sm8250_usb3_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG3, 0x20),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xa9),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10),
+};
+
+static const struct qmp_phy_init_tbl sm8250_usb3_pcs_usb_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
+};
+
+static const struct qmp_phy_init_tbl sm8350_usb3_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_TX, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_RX, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0x35),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_3, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_4, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_5, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_PI_QEC_CTRL, 0x21),
+};
+
+static const struct qmp_phy_init_tbl sm8350_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FO_GAIN, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_GAIN, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CONTROLS, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN2, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL1, 0x54),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x47),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_CNTRL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_LOW, 0xbb),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH, 0x7b),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH2, 0xbb),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0x3d, 1),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0x3c, 2),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH4, 0xdb),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_LOW, 0x64),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH2, 0xd2),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH3, 0x13),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH4, 0xa9),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_DFE_EN_TIMER, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_AUX_DATA_TCOARSE_TFINE, 0xa0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_DCC_CTRL1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_GM_CAL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_VTH_CODE, 0x10),
+};
+
+static const struct qmp_phy_init_tbl sm8350_usb3_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG3, 0x20),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10),
+};
+
+static const struct qmp_phy_init_tbl sm8350_usb3_pcs_usb_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RCVR_DTCT_DLY_U3_L, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RCVR_DTCT_DLY_U3_H, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
+};
+
+struct qmp_usb_legacy_offsets {
+ u16 serdes;
+ u16 pcs;
+ u16 pcs_usb;
+ u16 tx;
+ u16 rx;
+};
+
+/* struct qmp_phy_cfg - per-PHY initialization config */
+struct qmp_phy_cfg {
+ int lanes;
+
+ const struct qmp_usb_legacy_offsets *offsets;
+
+ /* Init sequence for PHY blocks - serdes, tx, rx, pcs */
+ const struct qmp_phy_init_tbl *serdes_tbl;
+ int serdes_tbl_num;
+ const struct qmp_phy_init_tbl *tx_tbl;
+ int tx_tbl_num;
+ const struct qmp_phy_init_tbl *rx_tbl;
+ int rx_tbl_num;
+ const struct qmp_phy_init_tbl *pcs_tbl;
+ int pcs_tbl_num;
+ const struct qmp_phy_init_tbl *pcs_usb_tbl;
+ int pcs_usb_tbl_num;
+
+ /* clock ids to be requested */
+ const char * const *clk_list;
+ int num_clks;
+ /* resets to be requested */
+ const char * const *reset_list;
+ int num_resets;
+ /* regulators to be requested */
+ const char * const *vreg_list;
+ int num_vregs;
+
+ /* array of registers with different offsets */
+ const unsigned int *regs;
+
+ /* Offset from PCS to PCS_USB region */
+ unsigned int pcs_usb_offset;
+};
+
+struct qmp_usb {
+ struct device *dev;
+
+ const struct qmp_phy_cfg *cfg;
+
+ void __iomem *serdes;
+ void __iomem *pcs;
+ void __iomem *pcs_misc;
+ void __iomem *pcs_usb;
+ void __iomem *tx;
+ void __iomem *rx;
+ void __iomem *tx2;
+ void __iomem *rx2;
+
+ void __iomem *dp_com;
+
+ struct clk *pipe_clk;
+ struct clk_bulk_data *clks;
+ struct reset_control_bulk_data *resets;
+ struct regulator_bulk_data *vregs;
+
+ enum phy_mode mode;
+
+ struct phy *phy;
+
+ struct clk_fixed_rate pipe_clk_fixed;
+};
+
+static inline void qphy_setbits(void __iomem *base, u32 offset, u32 val)
+{
+ u32 reg;
+
+ reg = readl(base + offset);
+ reg |= val;
+ writel(reg, base + offset);
+
+ /* ensure that above write is through */
+ readl(base + offset);
+}
+
+static inline void qphy_clrbits(void __iomem *base, u32 offset, u32 val)
+{
+ u32 reg;
+
+ reg = readl(base + offset);
+ reg &= ~val;
+ writel(reg, base + offset);
+
+ /* ensure that above write is through */
+ readl(base + offset);
+}
+
+/* list of clocks required by phy */
+static const char * const qmp_v3_phy_clk_l[] = {
+ "aux", "cfg_ahb", "ref", "com_aux",
+};
+
+static const char * const qmp_v4_ref_phy_clk_l[] = {
+ "aux", "ref_clk_src", "ref", "com_aux",
+};
+
+/* the primary usb3 phy on sm8250 doesn't have a ref clock */
+static const char * const qmp_v4_sm8250_usbphy_clk_l[] = {
+ "aux", "ref_clk_src", "com_aux"
+};
+
+/* list of resets */
+static const char * const msm8996_usb3phy_reset_l[] = {
+ "phy", "common",
+};
+
+static const char * const sc7180_usb3phy_reset_l[] = {
+ "phy",
+};
+
+/* list of regulators */
+static const char * const qmp_phy_vreg_l[] = {
+ "vdda-phy", "vdda-pll",
+};
+
+static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = {
+ .lanes = 2,
+
+ .serdes_tbl = qmp_v3_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_serdes_tbl),
+ .tx_tbl = qmp_v3_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_tx_tbl),
+ .rx_tbl = qmp_v3_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_rx_tbl),
+ .pcs_tbl = qmp_v3_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(qmp_v3_usb3_pcs_tbl),
+ .clk_list = qmp_v3_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v3_usb3phy_regs_layout,
+};
+
+static const struct qmp_phy_cfg sc7180_usb3phy_cfg = {
+ .lanes = 2,
+
+ .serdes_tbl = qmp_v3_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_serdes_tbl),
+ .tx_tbl = qmp_v3_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_tx_tbl),
+ .rx_tbl = qmp_v3_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_rx_tbl),
+ .pcs_tbl = qmp_v3_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(qmp_v3_usb3_pcs_tbl),
+ .clk_list = qmp_v3_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
+ .reset_list = sc7180_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(sc7180_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v3_usb3phy_regs_layout,
+};
+
+static const struct qmp_phy_cfg sm8150_usb3phy_cfg = {
+ .lanes = 2,
+
+ .serdes_tbl = sm8150_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl),
+ .tx_tbl = sm8150_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8150_usb3_tx_tbl),
+ .rx_tbl = sm8150_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8150_usb3_rx_tbl),
+ .pcs_tbl = sm8150_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8150_usb3_pcs_tbl),
+ .pcs_usb_tbl = sm8150_usb3_pcs_usb_tbl,
+ .pcs_usb_tbl_num = ARRAY_SIZE(sm8150_usb3_pcs_usb_tbl),
+ .clk_list = qmp_v4_ref_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v4_ref_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v4_usb3phy_regs_layout,
+ .pcs_usb_offset = 0x300,
+};
+
+static const struct qmp_phy_cfg sm8250_usb3phy_cfg = {
+ .lanes = 2,
+
+ .serdes_tbl = sm8150_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl),
+ .tx_tbl = sm8250_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8250_usb3_tx_tbl),
+ .rx_tbl = sm8250_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8250_usb3_rx_tbl),
+ .pcs_tbl = sm8250_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8250_usb3_pcs_tbl),
+ .pcs_usb_tbl = sm8250_usb3_pcs_usb_tbl,
+ .pcs_usb_tbl_num = ARRAY_SIZE(sm8250_usb3_pcs_usb_tbl),
+ .clk_list = qmp_v4_sm8250_usbphy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v4_sm8250_usbphy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v4_usb3phy_regs_layout,
+ .pcs_usb_offset = 0x300,
+};
+
+static const struct qmp_phy_cfg sm8350_usb3phy_cfg = {
+ .lanes = 2,
+
+ .serdes_tbl = sm8150_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl),
+ .tx_tbl = sm8350_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8350_usb3_tx_tbl),
+ .rx_tbl = sm8350_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8350_usb3_rx_tbl),
+ .pcs_tbl = sm8350_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8350_usb3_pcs_tbl),
+ .pcs_usb_tbl = sm8350_usb3_pcs_usb_tbl,
+ .pcs_usb_tbl_num = ARRAY_SIZE(sm8350_usb3_pcs_usb_tbl),
+ .clk_list = qmp_v4_sm8250_usbphy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v4_sm8250_usbphy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v5_usb3phy_regs_layout,
+ .pcs_usb_offset = 0x300,
+};
+
+static void qmp_usb_legacy_configure_lane(void __iomem *base,
+ const struct qmp_phy_init_tbl tbl[],
+ int num,
+ u8 lane_mask)
+{
+ int i;
+ const struct qmp_phy_init_tbl *t = tbl;
+
+ if (!t)
+ return;
+
+ for (i = 0; i < num; i++, t++) {
+ if (!(t->lane_mask & lane_mask))
+ continue;
+
+ writel(t->val, base + t->offset);
+ }
+}
+
+static void qmp_usb_legacy_configure(void __iomem *base,
+ const struct qmp_phy_init_tbl tbl[],
+ int num)
+{
+ qmp_usb_legacy_configure_lane(base, tbl, num, 0xff);
+}
+
+static int qmp_usb_legacy_serdes_init(struct qmp_usb *qmp)
+{
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *serdes = qmp->serdes;
+ const struct qmp_phy_init_tbl *serdes_tbl = cfg->serdes_tbl;
+ int serdes_tbl_num = cfg->serdes_tbl_num;
+
+ qmp_usb_legacy_configure(serdes, serdes_tbl, serdes_tbl_num);
+
+ return 0;
+}
+
+static void qmp_usb_legacy_init_dp_com(struct phy *phy)
+{
+ struct qmp_usb *qmp = phy_get_drvdata(phy);
+ void __iomem *dp_com = qmp->dp_com;
+
+ qphy_setbits(dp_com, QPHY_V3_DP_COM_POWER_DOWN_CTRL,
+ SW_PWRDN);
+ /* override hardware control for reset of qmp phy */
+ qphy_setbits(dp_com, QPHY_V3_DP_COM_RESET_OVRD_CTRL,
+ SW_DPPHY_RESET_MUX | SW_DPPHY_RESET |
+ SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET);
+
+ /* Default type-c orientation, i.e CC1 */
+ qphy_setbits(dp_com, QPHY_V3_DP_COM_TYPEC_CTRL, 0x02);
+
+ qphy_setbits(dp_com, QPHY_V3_DP_COM_PHY_MODE_CTRL,
+ USB3_MODE | DP_MODE);
+
+ /* bring both QMP USB and QMP DP PHYs PCS block out of reset */
+ qphy_clrbits(dp_com, QPHY_V3_DP_COM_RESET_OVRD_CTRL,
+ SW_DPPHY_RESET_MUX | SW_DPPHY_RESET |
+ SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET);
+
+ qphy_clrbits(dp_com, QPHY_V3_DP_COM_SWI_CTRL, 0x03);
+ qphy_clrbits(dp_com, QPHY_V3_DP_COM_SW_RESET, SW_RESET);
+}
+
+static int qmp_usb_legacy_init(struct phy *phy)
+{
+ struct qmp_usb *qmp = phy_get_drvdata(phy);
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *pcs = qmp->pcs;
+ int ret;
+
+ ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs);
+ if (ret) {
+ dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret);
+ return ret;
+ }
+
+ ret = reset_control_bulk_assert(cfg->num_resets, qmp->resets);
+ if (ret) {
+ dev_err(qmp->dev, "reset assert failed\n");
+ goto err_disable_regulators;
+ }
+
+ ret = reset_control_bulk_deassert(cfg->num_resets, qmp->resets);
+ if (ret) {
+ dev_err(qmp->dev, "reset deassert failed\n");
+ goto err_disable_regulators;
+ }
+
+ ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks);
+ if (ret)
+ goto err_assert_reset;
+
+ qmp_usb_legacy_init_dp_com(phy);
+
+ qphy_setbits(pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL], SW_PWRDN);
+
+ return 0;
+
+err_assert_reset:
+ reset_control_bulk_assert(cfg->num_resets, qmp->resets);
+err_disable_regulators:
+ regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
+
+ return ret;
+}
+
+static int qmp_usb_legacy_exit(struct phy *phy)
+{
+ struct qmp_usb *qmp = phy_get_drvdata(phy);
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+
+ reset_control_bulk_assert(cfg->num_resets, qmp->resets);
+
+ clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
+
+ regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
+
+ return 0;
+}
+
+static int qmp_usb_legacy_power_on(struct phy *phy)
+{
+ struct qmp_usb *qmp = phy_get_drvdata(phy);
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *tx = qmp->tx;
+ void __iomem *rx = qmp->rx;
+ void __iomem *pcs = qmp->pcs;
+ void __iomem *status;
+ unsigned int val;
+ int ret;
+
+ qmp_usb_legacy_serdes_init(qmp);
+
+ ret = clk_prepare_enable(qmp->pipe_clk);
+ if (ret) {
+ dev_err(qmp->dev, "pipe_clk enable failed err=%d\n", ret);
+ return ret;
+ }
+
+ /* Tx, Rx, and PCS configurations */
+ qmp_usb_legacy_configure_lane(tx, cfg->tx_tbl, cfg->tx_tbl_num, 1);
+ qmp_usb_legacy_configure_lane(rx, cfg->rx_tbl, cfg->rx_tbl_num, 1);
+
+ if (cfg->lanes >= 2) {
+ qmp_usb_legacy_configure_lane(qmp->tx2, cfg->tx_tbl, cfg->tx_tbl_num, 2);
+ qmp_usb_legacy_configure_lane(qmp->rx2, cfg->rx_tbl, cfg->rx_tbl_num, 2);
+ }
+
+ qmp_usb_legacy_configure(pcs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+
+ usleep_range(10, 20);
+
+ /* Pull PHY out of reset state */
+ qphy_clrbits(pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
+
+ /* start SerDes and Phy-Coding-Sublayer */
+ qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], SERDES_START | PCS_START);
+
+ status = pcs + cfg->regs[QPHY_PCS_STATUS];
+ ret = readl_poll_timeout(status, val, !(val & PHYSTATUS), 200,
+ PHY_INIT_COMPLETE_TIMEOUT);
+ if (ret) {
+ dev_err(qmp->dev, "phy initialization timed-out\n");
+ goto err_disable_pipe_clk;
+ }
+
+ return 0;
+
+err_disable_pipe_clk:
+ clk_disable_unprepare(qmp->pipe_clk);
+
+ return ret;
+}
+
+static int qmp_usb_legacy_power_off(struct phy *phy)
+{
+ struct qmp_usb *qmp = phy_get_drvdata(phy);
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+
+ clk_disable_unprepare(qmp->pipe_clk);
+
+ /* PHY reset */
+ qphy_setbits(qmp->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
+
+ /* stop SerDes and Phy-Coding-Sublayer */
+ qphy_clrbits(qmp->pcs, cfg->regs[QPHY_START_CTRL],
+ SERDES_START | PCS_START);
+
+ /* Put PHY into POWER DOWN state: active low */
+ qphy_clrbits(qmp->pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL],
+ SW_PWRDN);
+
+ return 0;
+}
+
+static int qmp_usb_legacy_enable(struct phy *phy)
+{
+ int ret;
+
+ ret = qmp_usb_legacy_init(phy);
+ if (ret)
+ return ret;
+
+ ret = qmp_usb_legacy_power_on(phy);
+ if (ret)
+ qmp_usb_legacy_exit(phy);
+
+ return ret;
+}
+
+static int qmp_usb_legacy_disable(struct phy *phy)
+{
+ int ret;
+
+ ret = qmp_usb_legacy_power_off(phy);
+ if (ret)
+ return ret;
+ return qmp_usb_legacy_exit(phy);
+}
+
+static int qmp_usb_legacy_set_mode(struct phy *phy, enum phy_mode mode, int submode)
+{
+ struct qmp_usb *qmp = phy_get_drvdata(phy);
+
+ qmp->mode = mode;
+
+ return 0;
+}
+
+static const struct phy_ops qmp_usb_legacy_phy_ops = {
+ .init = qmp_usb_legacy_enable,
+ .exit = qmp_usb_legacy_disable,
+ .set_mode = qmp_usb_legacy_set_mode,
+ .owner = THIS_MODULE,
+};
+
+static void qmp_usb_legacy_enable_autonomous_mode(struct qmp_usb *qmp)
+{
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *pcs_usb = qmp->pcs_usb ?: qmp->pcs;
+ void __iomem *pcs_misc = qmp->pcs_misc;
+ u32 intr_mask;
+
+ if (qmp->mode == PHY_MODE_USB_HOST_SS ||
+ qmp->mode == PHY_MODE_USB_DEVICE_SS)
+ intr_mask = ARCVR_DTCT_EN | ALFPS_DTCT_EN;
+ else
+ intr_mask = ARCVR_DTCT_EN | ARCVR_DTCT_EVENT_SEL;
+
+ /* Clear any pending interrupts status */
+ qphy_setbits(pcs_usb, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
+ /* Writing 1 followed by 0 clears the interrupt */
+ qphy_clrbits(pcs_usb, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
+
+ qphy_clrbits(pcs_usb, cfg->regs[QPHY_PCS_AUTONOMOUS_MODE_CTRL],
+ ARCVR_DTCT_EN | ALFPS_DTCT_EN | ARCVR_DTCT_EVENT_SEL);
+
+ /* Enable required PHY autonomous mode interrupts */
+ qphy_setbits(pcs_usb, cfg->regs[QPHY_PCS_AUTONOMOUS_MODE_CTRL], intr_mask);
+
+ /* Enable i/o clamp_n for autonomous mode */
+ if (pcs_misc)
+ qphy_clrbits(pcs_misc, QPHY_V3_PCS_MISC_CLAMP_ENABLE, CLAMP_EN);
+}
+
+static void qmp_usb_legacy_disable_autonomous_mode(struct qmp_usb *qmp)
+{
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *pcs_usb = qmp->pcs_usb ?: qmp->pcs;
+ void __iomem *pcs_misc = qmp->pcs_misc;
+
+ /* Disable i/o clamp_n on resume for normal mode */
+ if (pcs_misc)
+ qphy_setbits(pcs_misc, QPHY_V3_PCS_MISC_CLAMP_ENABLE, CLAMP_EN);
+
+ qphy_clrbits(pcs_usb, cfg->regs[QPHY_PCS_AUTONOMOUS_MODE_CTRL],
+ ARCVR_DTCT_EN | ARCVR_DTCT_EVENT_SEL | ALFPS_DTCT_EN);
+
+ qphy_setbits(pcs_usb, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
+ /* Writing 1 followed by 0 clears the interrupt */
+ qphy_clrbits(pcs_usb, cfg->regs[QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR], IRQ_CLEAR);
+}
+
+static int __maybe_unused qmp_usb_legacy_runtime_suspend(struct device *dev)
+{
+ struct qmp_usb *qmp = dev_get_drvdata(dev);
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+
+ dev_vdbg(dev, "Suspending QMP phy, mode:%d\n", qmp->mode);
+
+ if (!qmp->phy->init_count) {
+ dev_vdbg(dev, "PHY not initialized, bailing out\n");
+ return 0;
+ }
+
+ qmp_usb_legacy_enable_autonomous_mode(qmp);
+
+ clk_disable_unprepare(qmp->pipe_clk);
+ clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
+
+ return 0;
+}
+
+static int __maybe_unused qmp_usb_legacy_runtime_resume(struct device *dev)
+{
+ struct qmp_usb *qmp = dev_get_drvdata(dev);
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ int ret = 0;
+
+ dev_vdbg(dev, "Resuming QMP phy, mode:%d\n", qmp->mode);
+
+ if (!qmp->phy->init_count) {
+ dev_vdbg(dev, "PHY not initialized, bailing out\n");
+ return 0;
+ }
+
+ ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(qmp->pipe_clk);
+ if (ret) {
+ dev_err(dev, "pipe_clk enable failed, err=%d\n", ret);
+ clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks);
+ return ret;
+ }
+
+ qmp_usb_legacy_disable_autonomous_mode(qmp);
+
+ return 0;
+}
+
+static const struct dev_pm_ops qmp_usb_legacy_pm_ops = {
+ SET_RUNTIME_PM_OPS(qmp_usb_legacy_runtime_suspend,
+ qmp_usb_legacy_runtime_resume, NULL)
+};
+
+static int qmp_usb_legacy_vreg_init(struct qmp_usb *qmp)
+{
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ struct device *dev = qmp->dev;
+ int num = cfg->num_vregs;
+ int i;
+
+ qmp->vregs = devm_kcalloc(dev, num, sizeof(*qmp->vregs), GFP_KERNEL);
+ if (!qmp->vregs)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++)
+ qmp->vregs[i].supply = cfg->vreg_list[i];
+
+ return devm_regulator_bulk_get(dev, num, qmp->vregs);
+}
+
+static int qmp_usb_legacy_reset_init(struct qmp_usb *qmp)
+{
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ struct device *dev = qmp->dev;
+ int i;
+ int ret;
+
+ qmp->resets = devm_kcalloc(dev, cfg->num_resets,
+ sizeof(*qmp->resets), GFP_KERNEL);
+ if (!qmp->resets)
+ return -ENOMEM;
+
+ for (i = 0; i < cfg->num_resets; i++)
+ qmp->resets[i].id = cfg->reset_list[i];
+
+ ret = devm_reset_control_bulk_get_exclusive(dev, cfg->num_resets, qmp->resets);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to get resets\n");
+
+ return 0;
+}
+
+static int qmp_usb_legacy_clk_init(struct qmp_usb *qmp)
+{
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ struct device *dev = qmp->dev;
+ int num = cfg->num_clks;
+ int i;
+
+ qmp->clks = devm_kcalloc(dev, num, sizeof(*qmp->clks), GFP_KERNEL);
+ if (!qmp->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++)
+ qmp->clks[i].id = cfg->clk_list[i];
+
+ return devm_clk_bulk_get(dev, num, qmp->clks);
+}
+
+static void phy_clk_release_provider(void *res)
+{
+ of_clk_del_provider(res);
+}
+
+/*
+ * Register a fixed rate pipe clock.
+ *
+ * The <s>_pipe_clksrc generated by PHY goes to the GCC that gate
+ * controls it. The <s>_pipe_clk coming out of the GCC is requested
+ * by the PHY driver for its operations.
+ * We register the <s>_pipe_clksrc here. The gcc driver takes care
+ * of assigning this <s>_pipe_clksrc as parent to <s>_pipe_clk.
+ * Below picture shows this relationship.
+ *
+ * +---------------+
+ * | PHY block |<<---------------------------------------+
+ * | | |
+ * | +-------+ | +-----+ |
+ * I/P---^-->| PLL |---^--->pipe_clksrc--->| GCC |--->pipe_clk---+
+ * clk | +-------+ | +-----+
+ * +---------------+
+ */
+static int phy_pipe_clk_register(struct qmp_usb *qmp, struct device_node *np)
+{
+ struct clk_fixed_rate *fixed = &qmp->pipe_clk_fixed;
+ struct clk_init_data init = { };
+ int ret;
+
+ ret = of_property_read_string(np, "clock-output-names", &init.name);
+ if (ret) {
+ dev_err(qmp->dev, "%pOFn: No clock-output-names\n", np);
+ return ret;
+ }
+
+ init.ops = &clk_fixed_rate_ops;
+
+ /* controllers using QMP phys use 125MHz pipe clock interface */
+ fixed->fixed_rate = 125000000;
+ fixed->hw.init = &init;
+
+ ret = devm_clk_hw_register(qmp->dev, &fixed->hw);
+ if (ret)
+ return ret;
+
+ ret = of_clk_add_hw_provider(np, of_clk_hw_simple_get, &fixed->hw);
+ if (ret)
+ return ret;
+
+ /*
+ * Roll a devm action because the clock provider is the child node, but
+ * the child node is not actually a device.
+ */
+ return devm_add_action_or_reset(qmp->dev, phy_clk_release_provider, np);
+}
+
+static void __iomem *qmp_usb_legacy_iomap(struct device *dev, struct device_node *np,
+ int index, bool exclusive)
+{
+ struct resource res;
+
+ if (!exclusive) {
+ if (of_address_to_resource(np, index, &res))
+ return IOMEM_ERR_PTR(-EINVAL);
+
+ return devm_ioremap(dev, res.start, resource_size(&res));
+ }
+
+ return devm_of_iomap(dev, np, index, NULL);
+}
+
+static int qmp_usb_legacy_parse_dt_legacy(struct qmp_usb *qmp, struct device_node *np)
+{
+ struct platform_device *pdev = to_platform_device(qmp->dev);
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ struct device *dev = qmp->dev;
+ bool exclusive = true;
+
+ qmp->serdes = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(qmp->serdes))
+ return PTR_ERR(qmp->serdes);
+
+ qmp->dp_com = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(qmp->dp_com))
+ return PTR_ERR(qmp->dp_com);
+
+ /*
+ * Get memory resources for the PHY:
+ * Resources are indexed as: tx -> 0; rx -> 1; pcs -> 2.
+ * For dual lane PHYs: tx2 -> 3, rx2 -> 4, pcs_misc (optional) -> 5
+ * For single lane PHYs: pcs_misc (optional) -> 3.
+ */
+ qmp->tx = devm_of_iomap(dev, np, 0, NULL);
+ if (IS_ERR(qmp->tx))
+ return PTR_ERR(qmp->tx);
+
+ qmp->rx = devm_of_iomap(dev, np, 1, NULL);
+ if (IS_ERR(qmp->rx))
+ return PTR_ERR(qmp->rx);
+
+ qmp->pcs = qmp_usb_legacy_iomap(dev, np, 2, exclusive);
+ if (IS_ERR(qmp->pcs))
+ return PTR_ERR(qmp->pcs);
+
+ if (cfg->pcs_usb_offset)
+ qmp->pcs_usb = qmp->pcs + cfg->pcs_usb_offset;
+
+ if (cfg->lanes >= 2) {
+ qmp->tx2 = devm_of_iomap(dev, np, 3, NULL);
+ if (IS_ERR(qmp->tx2))
+ return PTR_ERR(qmp->tx2);
+
+ qmp->rx2 = devm_of_iomap(dev, np, 4, NULL);
+ if (IS_ERR(qmp->rx2))
+ return PTR_ERR(qmp->rx2);
+
+ qmp->pcs_misc = devm_of_iomap(dev, np, 5, NULL);
+ } else {
+ qmp->pcs_misc = devm_of_iomap(dev, np, 3, NULL);
+ }
+
+ if (IS_ERR(qmp->pcs_misc)) {
+ dev_vdbg(dev, "PHY pcs_misc-reg not used\n");
+ qmp->pcs_misc = NULL;
+ }
+
+ qmp->pipe_clk = devm_get_clk_from_child(dev, np, NULL);
+ if (IS_ERR(qmp->pipe_clk)) {
+ return dev_err_probe(dev, PTR_ERR(qmp->pipe_clk),
+ "failed to get pipe clock\n");
+ }
+
+ return 0;
+}
+
+static int qmp_usb_legacy_parse_dt(struct qmp_usb *qmp)
+{
+ struct platform_device *pdev = to_platform_device(qmp->dev);
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ const struct qmp_usb_legacy_offsets *offs = cfg->offsets;
+ struct device *dev = qmp->dev;
+ void __iomem *base;
+
+ if (!offs)
+ return -EINVAL;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ qmp->serdes = base + offs->serdes;
+ qmp->pcs = base + offs->pcs;
+ qmp->pcs_usb = base + offs->pcs_usb;
+ qmp->tx = base + offs->tx;
+ qmp->rx = base + offs->rx;
+
+ qmp->pipe_clk = devm_clk_get(dev, "pipe");
+ if (IS_ERR(qmp->pipe_clk)) {
+ return dev_err_probe(dev, PTR_ERR(qmp->pipe_clk),
+ "failed to get pipe clock\n");
+ }
+
+ return 0;
+}
+
+static int qmp_usb_legacy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct phy_provider *phy_provider;
+ struct device_node *np;
+ struct qmp_usb *qmp;
+ int ret;
+
+ qmp = devm_kzalloc(dev, sizeof(*qmp), GFP_KERNEL);
+ if (!qmp)
+ return -ENOMEM;
+
+ qmp->dev = dev;
+
+ qmp->cfg = of_device_get_match_data(dev);
+ if (!qmp->cfg)
+ return -EINVAL;
+
+ ret = qmp_usb_legacy_clk_init(qmp);
+ if (ret)
+ return ret;
+
+ ret = qmp_usb_legacy_reset_init(qmp);
+ if (ret)
+ return ret;
+
+ ret = qmp_usb_legacy_vreg_init(qmp);
+ if (ret)
+ return ret;
+
+ /* Check for legacy binding with child node. */
+ np = of_get_next_available_child(dev->of_node, NULL);
+ if (np) {
+ ret = qmp_usb_legacy_parse_dt_legacy(qmp, np);
+ } else {
+ np = of_node_get(dev->of_node);
+ ret = qmp_usb_legacy_parse_dt(qmp);
+ }
+ if (ret)
+ goto err_node_put;
+
+ pm_runtime_set_active(dev);
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ goto err_node_put;
+ /*
+ * Prevent runtime pm from being ON by default. Users can enable
+ * it using power/control in sysfs.
+ */
+ pm_runtime_forbid(dev);
+
+ ret = phy_pipe_clk_register(qmp, np);
+ if (ret)
+ goto err_node_put;
+
+ qmp->phy = devm_phy_create(dev, np, &qmp_usb_legacy_phy_ops);
+ if (IS_ERR(qmp->phy)) {
+ ret = PTR_ERR(qmp->phy);
+ dev_err(dev, "failed to create PHY: %d\n", ret);
+ goto err_node_put;
+ }
+
+ phy_set_drvdata(qmp->phy, qmp);
+
+ of_node_put(np);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+
+err_node_put:
+ of_node_put(np);
+ return ret;
+}
+
+static const struct of_device_id qmp_usb_legacy_of_match_table[] = {
+ {
+ .compatible = "qcom,sc7180-qmp-usb3-phy",
+ .data = &sc7180_usb3phy_cfg,
+ }, {
+ .compatible = "qcom,sc8180x-qmp-usb3-phy",
+ .data = &sm8150_usb3phy_cfg,
+ }, {
+ .compatible = "qcom,sdm845-qmp-usb3-phy",
+ .data = &qmp_v3_usb3phy_cfg,
+ }, {
+ .compatible = "qcom,sm8150-qmp-usb3-phy",
+ .data = &sm8150_usb3phy_cfg,
+ }, {
+ .compatible = "qcom,sm8250-qmp-usb3-phy",
+ .data = &sm8250_usb3phy_cfg,
+ }, {
+ .compatible = "qcom,sm8350-qmp-usb3-phy",
+ .data = &sm8350_usb3phy_cfg,
+ }, {
+ .compatible = "qcom,sm8450-qmp-usb3-phy",
+ .data = &sm8350_usb3phy_cfg,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, qmp_usb_legacy_of_match_table);
+
+static struct platform_driver qmp_usb_legacy_driver = {
+ .probe = qmp_usb_legacy_probe,
+ .driver = {
+ .name = "qcom-qmp-usb-legacy-phy",
+ .pm = &qmp_usb_legacy_pm_ops,
+ .of_match_table = qmp_usb_legacy_of_match_table,
+ },
+};
+
+module_platform_driver(qmp_usb_legacy_driver);
+
+MODULE_AUTHOR("Vivek Gautam <vivek.gautam@codeaurora.org>");
+MODULE_DESCRIPTION("Qualcomm QMP legacy USB+DP PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
index 466f0a56c82e..0130bb8e809a 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
@@ -12,7 +12,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_address.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
@@ -367,112 +366,6 @@ static const struct qmp_phy_init_tbl msm8996_usb3_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V2_PCS_POWER_STATE_CONFIG2, 0x08),
};
-static const struct qmp_phy_init_tbl qmp_v3_usb3_serdes_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL2, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START1_MODE0, 0xab),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0xea),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xc9),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORECLK_DIV_MODE0, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP3_MODE0, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x34),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x15),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_CFG, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_BUF_ENABLE, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_EN_CENTER, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER1, 0x31),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_PER2, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER1, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_ADJ_PER2, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE1, 0x85),
- QMP_PHY_INIT_CFG(QSERDES_V3_COM_SSC_STEP_SIZE2, 0x07),
-};
-
-static const struct qmp_phy_init_tbl qmp_v3_usb3_tx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_HIGHZ_DRVR_EN, 0x10),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_RCV_DETECT_LVL_2, 0x12),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_RX, 0x09),
- QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x06),
-};
-
-static const struct qmp_phy_init_tbl qmp_v3_usb3_rx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4e),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x18),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x03),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x75),
-};
-
-static const struct qmp_phy_init_tbl qmp_v3_usb3_pcs_tbl[] = {
- /* FLL settings */
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x40),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02),
-
- /* Lock Det settings */
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG1, 0xd1),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG2, 0x1f),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG3, 0x47),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_STATE_CONFIG2, 0x1b),
-
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0xba),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V0, 0x9f),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V1, 0x9f),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V2, 0xb7),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V3, 0x4e),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V4, 0x65),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_LS, 0x6b),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x15),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0d),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V1, 0x15),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V1, 0x0d),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V2, 0x15),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V2, 0x0d),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V3, 0x15),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V3, 0x1d),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V4, 0x15),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V4, 0x0d),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_LS, 0x15),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_LS, 0x0d),
-
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RATE_SLEW_CNTRL, 0x02),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_TSYNC_RSYNC_TIME, 0x44),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L, 0x40),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H, 0x00),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME, 0x75),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK, 0x86),
- QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
-};
-
static const struct qmp_phy_init_tbl qmp_v3_usb3_uniphy_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07),
QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x14),
@@ -693,117 +586,6 @@ static const struct qmp_phy_init_tbl msm8998_usb3_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
};
-static const struct qmp_phy_init_tbl sm8150_usb3_serdes_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_EN_CENTER, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER1, 0x31),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER2, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE0, 0xde),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE0, 0x07),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE1_MODE1, 0xde),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_STEP_SIZE2_MODE1, 0x07),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_BUF_ENABLE, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_CMN_IPTRIM, 0x20),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE0, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_CP_CTRL_MODE1, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE0, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_RCTRL_MODE1, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE0, 0x36),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_PLL_CCTRL_MODE1, 0x36),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x1a),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP_EN, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE0, 0x14),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE0, 0x34),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP1_MODE1, 0x34),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE1, 0x82),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE0, 0x82),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DEC_START_MODE1, 0x82),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE0, 0xab),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE0, 0xea),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE0, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START1_MODE1, 0xab),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START2_MODE1, 0xea),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_DIV_FRAC_START3_MODE1, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE0, 0x24),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE1_MODE1, 0x24),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE2_MODE1, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x01),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORECLK_DIV_MODE1, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xca),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xca),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x1e),
- QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL, 0x11),
-};
-
-static const struct qmp_phy_init_tbl sm8150_usb3_tx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_TX, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_RX, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0xd5),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_PI_QEC_CTRL, 0x20),
-};
-
-static const struct qmp_phy_init_tbl sm8150_usb3_rx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x05),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x99),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH1, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH2, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN1, 0x05),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN2, 0x05),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x54),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x0e),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0xbf),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0xbf),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0x3f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x7f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x94),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0xdc),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xdc),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0x5c),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x0b),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb3),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_AUX_DATA_TCOARSE_TFINE, 0xa0),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x1f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_VTH_CODE, 0x10),
-};
-
-static const struct qmp_phy_init_tbl sm8150_usb3_pcs_tbl[] = {
- /* Lock Det settings */
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13),
-
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0a),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10),
-};
-
-static const struct qmp_phy_init_tbl sm8150_usb3_pcs_usb_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
-};
-
static const struct qmp_phy_init_tbl sm8150_usb3_uniphy_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x1a),
QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_HSCLK_SEL, 0x11),
@@ -915,78 +697,6 @@ static const struct qmp_phy_init_tbl sm8150_usb3_uniphy_pcs_usb_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
};
-static const struct qmp_phy_init_tbl sm8250_usb3_tx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_TX, 0x60),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_RX, 0x60),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_TX, 0x11),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_RES_CODE_LANE_OFFSET_RX, 0x02),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0xd5),
- QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12),
- QMP_PHY_INIT_CFG_LANE(QSERDES_V4_TX_PI_QEC_CTRL, 0x40, 1),
- QMP_PHY_INIT_CFG_LANE(QSERDES_V4_TX_PI_QEC_CTRL, 0x54, 2),
-};
-
-static const struct qmp_phy_init_tbl sm8250_usb3_rx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_GAIN, 0x06),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CONTROLS, 0x99),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH1, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_THRESH2, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN1, 0x05),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SB2_GAIN2, 0x05),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL1, 0x54),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_VGA_CAL_CNTRL2, 0x0c),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
- QMP_PHY_INIT_CFG_LANE(QSERDES_V4_RX_RX_MODE_00_LOW, 0xff, 1),
- QMP_PHY_INIT_CFG_LANE(QSERDES_V4_RX_RX_MODE_00_LOW, 0x7f, 2),
- QMP_PHY_INIT_CFG_LANE(QSERDES_V4_RX_RX_MODE_00_HIGH, 0x7f, 1),
- QMP_PHY_INIT_CFG_LANE(QSERDES_V4_RX_RX_MODE_00_HIGH, 0xff, 2),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0x7f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x7f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x97),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0xdc),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0xdc),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0x5c),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH3, 0x7b),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0xb4),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_EN_TIMER, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_AUX_DATA_TCOARSE_TFINE, 0xa0),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_DCC_CTRL1, 0x0c),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_GM_CAL, 0x1f),
- QMP_PHY_INIT_CFG(QSERDES_V4_RX_VTH_CODE, 0x10),
-};
-
-static const struct qmp_phy_init_tbl sm8250_usb3_pcs_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG3, 0x20),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xa9),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0a),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10),
-};
-
-static const struct qmp_phy_init_tbl sm8250_usb3_pcs_usb_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
-};
-
static const struct qmp_phy_init_tbl sm8250_usb3_uniphy_tx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V4_TX_RCV_DETECT_LVL_2, 0x12),
QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0xd5),
@@ -1148,84 +858,6 @@ static const struct qmp_phy_init_tbl sdx65_usb3_uniphy_rx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_ENABLES, 0x00),
};
-static const struct qmp_phy_init_tbl sm8350_usb3_tx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_TX, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_RX, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX, 0x16),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x0e),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0x35),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_3, 0x3f),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_4, 0x7f),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_5, 0x3f),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_RCV_DETECT_LVL_2, 0x12),
- QMP_PHY_INIT_CFG(QSERDES_V5_TX_PI_QEC_CTRL, 0x21),
-};
-
-static const struct qmp_phy_init_tbl sm8350_usb3_rx_tbl[] = {
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FO_GAIN, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_GAIN, 0x05),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CONTROLS, 0x99),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH1, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH2, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN1, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN2, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL1, 0x54),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL2, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x47),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_CNTRL, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_LOW, 0xbb),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH, 0x7b),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH2, 0xbb),
- QMP_PHY_INIT_CFG_LANE(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0x3d, 1),
- QMP_PHY_INIT_CFG_LANE(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0x3c, 2),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH4, 0xdb),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_LOW, 0x64),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH, 0x24),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH2, 0xd2),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH3, 0x13),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH4, 0xa9),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_DFE_EN_TIMER, 0x04),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_AUX_DATA_TCOARSE_TFINE, 0xa0),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_DCC_CTRL1, 0x0c),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_GM_CAL, 0x00),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_VTH_CODE, 0x10),
-};
-
-static const struct qmp_phy_init_tbl sm8350_usb3_pcs_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG3, 0x20),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0a),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b),
- QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10),
-};
-
-static const struct qmp_phy_init_tbl sm8350_usb3_pcs_usb_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RCVR_DTCT_DLY_U3_L, 0x40),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RCVR_DTCT_DLY_U3_H, 0x00),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
-};
-
static const struct qmp_phy_init_tbl sm8350_usb3_uniphy_tx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0xa5),
QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_2, 0x82),
@@ -1556,9 +1188,6 @@ struct qmp_phy_cfg {
/* true, if PHY needs delay after POWER_DOWN */
bool has_pwrdn_delay;
- /* true, if PHY has a separate DP_COM control block */
- bool has_phy_dp_com_ctrl;
-
/* Offset from PCS to PCS_USB region */
unsigned int pcs_usb_offset;
};
@@ -1577,8 +1206,6 @@ struct qmp_usb {
void __iomem *tx2;
void __iomem *rx2;
- void __iomem *dp_com;
-
struct clk *pipe_clk;
struct clk_bulk_data *clks;
struct reset_control_bulk_data *resets;
@@ -1632,11 +1259,6 @@ static const char * const qmp_v4_ref_phy_clk_l[] = {
"aux", "ref_clk_src", "ref", "com_aux",
};
-/* the primary usb3 phy on sm8250 doesn't have a ref clock */
-static const char * const qmp_v4_sm8250_usbphy_clk_l[] = {
- "aux", "ref_clk_src", "com_aux"
-};
-
/* usb3 phy on sdx55 doesn't have com_aux clock */
static const char * const qmp_v4_sdx55_usbphy_clk_l[] = {
"aux", "cfg_ahb", "ref"
@@ -1651,10 +1273,6 @@ static const char * const msm8996_usb3phy_reset_l[] = {
"phy", "common",
};
-static const char * const sc7180_usb3phy_reset_l[] = {
- "phy",
-};
-
static const char * const qcm2290_usb3phy_reset_l[] = {
"phy_phy", "phy",
};
@@ -1752,29 +1370,6 @@ static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
.regs = qmp_v2_usb3phy_regs_layout,
};
-static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = {
- .lanes = 2,
-
- .serdes_tbl = qmp_v3_usb3_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_serdes_tbl),
- .tx_tbl = qmp_v3_usb3_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_tx_tbl),
- .rx_tbl = qmp_v3_usb3_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_rx_tbl),
- .pcs_tbl = qmp_v3_usb3_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(qmp_v3_usb3_pcs_tbl),
- .clk_list = qmp_v3_phy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
- .reset_list = msm8996_usb3phy_reset_l,
- .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qmp_v3_usb3phy_regs_layout,
-
- .has_pwrdn_delay = true,
- .has_phy_dp_com_ctrl = true,
-};
-
static const struct qmp_phy_cfg sa8775p_usb3_uniphy_cfg = {
.lanes = 1,
@@ -1797,29 +1392,6 @@ static const struct qmp_phy_cfg sa8775p_usb3_uniphy_cfg = {
.regs = qmp_v5_usb3phy_regs_layout,
};
-static const struct qmp_phy_cfg sc7180_usb3phy_cfg = {
- .lanes = 2,
-
- .serdes_tbl = qmp_v3_usb3_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_serdes_tbl),
- .tx_tbl = qmp_v3_usb3_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_tx_tbl),
- .rx_tbl = qmp_v3_usb3_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_rx_tbl),
- .pcs_tbl = qmp_v3_usb3_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(qmp_v3_usb3_pcs_tbl),
- .clk_list = qmp_v3_phy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
- .reset_list = sc7180_usb3phy_reset_l,
- .num_resets = ARRAY_SIZE(sc7180_usb3phy_reset_l),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qmp_v3_usb3phy_regs_layout,
-
- .has_pwrdn_delay = true,
- .has_phy_dp_com_ctrl = true,
-};
-
static const struct qmp_phy_cfg sc8280xp_usb3_uniphy_cfg = {
.lanes = 1,
@@ -1884,32 +1456,6 @@ static const struct qmp_phy_cfg msm8998_usb3phy_cfg = {
.regs = qmp_v3_usb3phy_regs_layout,
};
-static const struct qmp_phy_cfg sm8150_usb3phy_cfg = {
- .lanes = 2,
-
- .serdes_tbl = sm8150_usb3_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl),
- .tx_tbl = sm8150_usb3_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(sm8150_usb3_tx_tbl),
- .rx_tbl = sm8150_usb3_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(sm8150_usb3_rx_tbl),
- .pcs_tbl = sm8150_usb3_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(sm8150_usb3_pcs_tbl),
- .pcs_usb_tbl = sm8150_usb3_pcs_usb_tbl,
- .pcs_usb_tbl_num = ARRAY_SIZE(sm8150_usb3_pcs_usb_tbl),
- .clk_list = qmp_v4_ref_phy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v4_ref_phy_clk_l),
- .reset_list = msm8996_usb3phy_reset_l,
- .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qmp_v4_usb3phy_regs_layout,
- .pcs_usb_offset = 0x300,
-
- .has_pwrdn_delay = true,
- .has_phy_dp_com_ctrl = true,
-};
-
static const struct qmp_phy_cfg sm8150_usb3_uniphy_cfg = {
.lanes = 1,
@@ -1935,32 +1481,6 @@ static const struct qmp_phy_cfg sm8150_usb3_uniphy_cfg = {
.has_pwrdn_delay = true,
};
-static const struct qmp_phy_cfg sm8250_usb3phy_cfg = {
- .lanes = 2,
-
- .serdes_tbl = sm8150_usb3_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl),
- .tx_tbl = sm8250_usb3_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(sm8250_usb3_tx_tbl),
- .rx_tbl = sm8250_usb3_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(sm8250_usb3_rx_tbl),
- .pcs_tbl = sm8250_usb3_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(sm8250_usb3_pcs_tbl),
- .pcs_usb_tbl = sm8250_usb3_pcs_usb_tbl,
- .pcs_usb_tbl_num = ARRAY_SIZE(sm8250_usb3_pcs_usb_tbl),
- .clk_list = qmp_v4_sm8250_usbphy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v4_sm8250_usbphy_clk_l),
- .reset_list = msm8996_usb3phy_reset_l,
- .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qmp_v4_usb3phy_regs_layout,
- .pcs_usb_offset = 0x300,
-
- .has_pwrdn_delay = true,
- .has_phy_dp_com_ctrl = true,
-};
-
static const struct qmp_phy_cfg sm8250_usb3_uniphy_cfg = {
.lanes = 1,
@@ -2036,32 +1556,6 @@ static const struct qmp_phy_cfg sdx65_usb3_uniphy_cfg = {
.has_pwrdn_delay = true,
};
-static const struct qmp_phy_cfg sm8350_usb3phy_cfg = {
- .lanes = 2,
-
- .serdes_tbl = sm8150_usb3_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl),
- .tx_tbl = sm8350_usb3_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(sm8350_usb3_tx_tbl),
- .rx_tbl = sm8350_usb3_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(sm8350_usb3_rx_tbl),
- .pcs_tbl = sm8350_usb3_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(sm8350_usb3_pcs_tbl),
- .pcs_usb_tbl = sm8350_usb3_pcs_usb_tbl,
- .pcs_usb_tbl_num = ARRAY_SIZE(sm8350_usb3_pcs_usb_tbl),
- .clk_list = qmp_v4_sm8250_usbphy_clk_l,
- .num_clks = ARRAY_SIZE(qmp_v4_sm8250_usbphy_clk_l),
- .reset_list = msm8996_usb3phy_reset_l,
- .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qmp_v5_usb3phy_regs_layout,
- .pcs_usb_offset = 0x300,
-
- .has_pwrdn_delay = true,
- .has_phy_dp_com_ctrl = true,
-};
-
static const struct qmp_phy_cfg sm8350_usb3_uniphy_cfg = {
.lanes = 1,
@@ -2152,7 +1646,6 @@ static int qmp_usb_init(struct phy *phy)
struct qmp_usb *qmp = phy_get_drvdata(phy);
const struct qmp_phy_cfg *cfg = qmp->cfg;
void __iomem *pcs = qmp->pcs;
- void __iomem *dp_com = qmp->dp_com;
int ret;
ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs);
@@ -2177,29 +1670,6 @@ static int qmp_usb_init(struct phy *phy)
if (ret)
goto err_assert_reset;
- if (cfg->has_phy_dp_com_ctrl) {
- qphy_setbits(dp_com, QPHY_V3_DP_COM_POWER_DOWN_CTRL,
- SW_PWRDN);
- /* override hardware control for reset of qmp phy */
- qphy_setbits(dp_com, QPHY_V3_DP_COM_RESET_OVRD_CTRL,
- SW_DPPHY_RESET_MUX | SW_DPPHY_RESET |
- SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET);
-
- /* Default type-c orientation, i.e CC1 */
- qphy_setbits(dp_com, QPHY_V3_DP_COM_TYPEC_CTRL, 0x02);
-
- qphy_setbits(dp_com, QPHY_V3_DP_COM_PHY_MODE_CTRL,
- USB3_MODE | DP_MODE);
-
- /* bring both QMP USB and QMP DP PHYs PCS block out of reset */
- qphy_clrbits(dp_com, QPHY_V3_DP_COM_RESET_OVRD_CTRL,
- SW_DPPHY_RESET_MUX | SW_DPPHY_RESET |
- SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET);
-
- qphy_clrbits(dp_com, QPHY_V3_DP_COM_SWI_CTRL, 0x03);
- qphy_clrbits(dp_com, QPHY_V3_DP_COM_SW_RESET, SW_RESET);
- }
-
qphy_setbits(pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL], SW_PWRDN);
return 0;
@@ -2582,12 +2052,6 @@ static int qmp_usb_parse_dt_legacy(struct qmp_usb *qmp, struct device_node *np)
if (IS_ERR(qmp->serdes))
return PTR_ERR(qmp->serdes);
- if (cfg->has_phy_dp_com_ctrl) {
- qmp->dp_com = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(qmp->dp_com))
- return PTR_ERR(qmp->dp_com);
- }
-
/*
* FIXME: These bindings should be fixed to not rely on overlapping
* mappings for PCS.
@@ -2780,18 +2244,9 @@ static const struct of_device_id qmp_usb_of_match_table[] = {
.compatible = "qcom,sa8775p-qmp-usb3-uni-phy",
.data = &sa8775p_usb3_uniphy_cfg,
}, {
- .compatible = "qcom,sc7180-qmp-usb3-phy",
- .data = &sc7180_usb3phy_cfg,
- }, {
- .compatible = "qcom,sc8180x-qmp-usb3-phy",
- .data = &sm8150_usb3phy_cfg,
- }, {
.compatible = "qcom,sc8280xp-qmp-usb3-uni-phy",
.data = &sc8280xp_usb3_uniphy_cfg,
}, {
- .compatible = "qcom,sdm845-qmp-usb3-phy",
- .data = &qmp_v3_usb3phy_cfg,
- }, {
.compatible = "qcom,sdm845-qmp-usb3-uni-phy",
.data = &qmp_v3_usb3_uniphy_cfg,
}, {
@@ -2804,26 +2259,14 @@ static const struct of_device_id qmp_usb_of_match_table[] = {
.compatible = "qcom,sm6115-qmp-usb3-phy",
.data = &qcm2290_usb3phy_cfg,
}, {
- .compatible = "qcom,sm8150-qmp-usb3-phy",
- .data = &sm8150_usb3phy_cfg,
- }, {
.compatible = "qcom,sm8150-qmp-usb3-uni-phy",
.data = &sm8150_usb3_uniphy_cfg,
}, {
- .compatible = "qcom,sm8250-qmp-usb3-phy",
- .data = &sm8250_usb3phy_cfg,
- }, {
.compatible = "qcom,sm8250-qmp-usb3-uni-phy",
.data = &sm8250_usb3_uniphy_cfg,
}, {
- .compatible = "qcom,sm8350-qmp-usb3-phy",
- .data = &sm8350_usb3phy_cfg,
- }, {
.compatible = "qcom,sm8350-qmp-usb3-uni-phy",
.data = &sm8350_usb3_uniphy_cfg,
- }, {
- .compatible = "qcom,sm8450-qmp-usb3-phy",
- .data = &sm8350_usb3phy_cfg,
},
{ },
};
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
index 7ee4b0e07d11..32d897684755 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
@@ -134,6 +134,8 @@
#define QPHY_V4_PCS_MISC_TYPEC_STATUS 0x10
#define QPHY_V4_PCS_MISC_PLACEHOLDER_STATUS 0x14
+#define QSERDES_V5_DP_PHY_STATUS 0x0dc
+
/* Only for QMP V6 PHY - DP PHY registers */
#define QSERDES_V6_DP_PHY_AUX_INTERRUPT_STATUS 0x0e0
#define QSERDES_V6_DP_PHY_STATUS 0x0e4
diff --git a/drivers/phy/qualcomm/phy-qcom-qusb2.c b/drivers/phy/qualcomm/phy-qcom-qusb2.c
index bec6e40d5280..c52655a383ce 100644
--- a/drivers/phy/qualcomm/phy-qcom-qusb2.c
+++ b/drivers/phy/qualcomm/phy-qcom-qusb2.c
@@ -12,7 +12,6 @@
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/phy/qualcomm/phy-qcom-snps-eusb2.c b/drivers/phy/qualcomm/phy-qcom-snps-eusb2.c
index eeaa1eb0e24b..1484691a41d5 100644
--- a/drivers/phy/qualcomm/phy-qcom-snps-eusb2.c
+++ b/drivers/phy/qualcomm/phy-qcom-snps-eusb2.c
@@ -7,6 +7,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/iopoll.h>
+#include <linux/mod_devicetable.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c b/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c
index d0319bee01c0..eb0b0f61d98e 100644
--- a/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c
+++ b/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c
@@ -10,7 +10,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/phy/qualcomm/phy-qcom-usb-hs.c b/drivers/phy/qualcomm/phy-qcom-usb-hs.c
index 53e46c220a3a..98a18987f1be 100644
--- a/drivers/phy/qualcomm/phy-qcom-usb-hs.c
+++ b/drivers/phy/qualcomm/phy-qcom-usb-hs.c
@@ -7,7 +7,7 @@
#include <linux/ulpi/regs.h>
#include <linux/clk.h>
#include <linux/regulator/consumer.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/reset.h>
#include <linux/extcon.h>
diff --git a/drivers/phy/ralink/phy-mt7621-pci.c b/drivers/phy/ralink/phy-mt7621-pci.c
index 85888ab2d307..2f876f158e1d 100644
--- a/drivers/phy/ralink/phy-mt7621-pci.c
+++ b/drivers/phy/ralink/phy-mt7621-pci.c
@@ -9,8 +9,7 @@
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/phy/realtek/Kconfig b/drivers/phy/realtek/Kconfig
new file mode 100644
index 000000000000..650e20ed69af
--- /dev/null
+++ b/drivers/phy/realtek/Kconfig
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Phy drivers for Realtek platforms
+#
+config PHY_RTK_RTD_USB2PHY
+ tristate "Realtek RTD USB2 PHY Transceiver Driver"
+ depends on USB_SUPPORT
+ select GENERIC_PHY
+ select USB_PHY
+ select USB_COMMON
+ help
+ Enable this to support Realtek SoC USB2 phy transceiver.
+ The DHC (digital home center) RTD series SoCs used the Synopsys
+ DWC3 USB IP. This driver will do the PHY initialization
+ of the parameters.
+
+config PHY_RTK_RTD_USB3PHY
+ tristate "Realtek RTD USB3 PHY Transceiver Driver"
+ depends on USB_SUPPORT
+ select GENERIC_PHY
+ select USB_PHY
+ select USB_COMMON
+ help
+ Enable this to support Realtek SoC USB3 phy transceiver.
+ The DHC (digital home center) RTD series SoCs used the Synopsys
+ DWC3 USB IP. This driver will do the PHY initialization
+ of the parameters.
diff --git a/drivers/phy/realtek/Makefile b/drivers/phy/realtek/Makefile
new file mode 100644
index 000000000000..ed7b47ff8a26
--- /dev/null
+++ b/drivers/phy/realtek/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PHY_RTK_RTD_USB2PHY) += phy-rtk-usb2.o
+obj-$(CONFIG_PHY_RTK_RTD_USB3PHY) += phy-rtk-usb3.o
diff --git a/drivers/phy/realtek/phy-rtk-usb2.c b/drivers/phy/realtek/phy-rtk-usb2.c
new file mode 100644
index 000000000000..5e7ee060b404
--- /dev/null
+++ b/drivers/phy/realtek/phy-rtk-usb2.c
@@ -0,0 +1,1331 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * phy-rtk-usb2.c RTK usb2.0 PHY driver
+ *
+ * Copyright (C) 2023 Realtek Semiconductor Corporation
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/regmap.h>
+#include <linux/sys_soc.h>
+#include <linux/mfd/syscon.h>
+#include <linux/phy/phy.h>
+#include <linux/usb.h>
+#include <linux/usb/phy.h>
+#include <linux/usb/hcd.h>
+
+/* GUSB2PHYACCn register */
+#define PHY_NEW_REG_REQ BIT(25)
+#define PHY_VSTS_BUSY BIT(23)
+#define PHY_VCTRL_SHIFT 8
+#define PHY_REG_DATA_MASK 0xff
+
+#define GET_LOW_NIBBLE(addr) ((addr) & 0x0f)
+#define GET_HIGH_NIBBLE(addr) (((addr) & 0xf0) >> 4)
+
+#define EFUS_USB_DC_CAL_RATE 2
+#define EFUS_USB_DC_CAL_MAX 7
+
+#define EFUS_USB_DC_DIS_RATE 1
+#define EFUS_USB_DC_DIS_MAX 7
+
+#define MAX_PHY_DATA_SIZE 20
+#define OFFEST_PHY_READ 0x20
+
+#define MAX_USB_PHY_NUM 4
+#define MAX_USB_PHY_PAGE0_DATA_SIZE 16
+#define MAX_USB_PHY_PAGE1_DATA_SIZE 16
+#define MAX_USB_PHY_PAGE2_DATA_SIZE 8
+
+#define SET_PAGE_OFFSET 0xf4
+#define SET_PAGE_0 0x9b
+#define SET_PAGE_1 0xbb
+#define SET_PAGE_2 0xdb
+
+#define PAGE_START 0xe0
+#define PAGE0_0XE4 0xe4
+#define PAGE0_0XE6 0xe6
+#define PAGE0_0XE7 0xe7
+#define PAGE1_0XE0 0xe0
+#define PAGE1_0XE2 0xe2
+
+#define SENSITIVITY_CTRL (BIT(4) | BIT(5) | BIT(6))
+#define ENABLE_AUTO_SENSITIVITY_CALIBRATION BIT(2)
+#define DEFAULT_DC_DRIVING_VALUE (0x8)
+#define DEFAULT_DC_DISCONNECTION_VALUE (0x6)
+#define HS_CLK_SELECT BIT(6)
+
+struct phy_reg {
+ void __iomem *reg_wrap_vstatus;
+ void __iomem *reg_gusb2phyacc0;
+ int vstatus_index;
+};
+
+struct phy_data {
+ u8 addr;
+ u8 data;
+};
+
+struct phy_cfg {
+ int page0_size;
+ struct phy_data page0[MAX_USB_PHY_PAGE0_DATA_SIZE];
+ int page1_size;
+ struct phy_data page1[MAX_USB_PHY_PAGE1_DATA_SIZE];
+ int page2_size;
+ struct phy_data page2[MAX_USB_PHY_PAGE2_DATA_SIZE];
+
+ int num_phy;
+
+ bool check_efuse;
+ int check_efuse_version;
+#define CHECK_EFUSE_V1 1
+#define CHECK_EFUSE_V2 2
+ int efuse_dc_driving_rate;
+ int efuse_dc_disconnect_rate;
+ int dc_driving_mask;
+ int dc_disconnect_mask;
+ bool usb_dc_disconnect_at_page0;
+ int driving_updated_for_dev_dis;
+
+ bool do_toggle;
+ bool do_toggle_driving;
+ bool use_default_parameter;
+ bool is_double_sensitivity_mode;
+};
+
+struct phy_parameter {
+ struct phy_reg phy_reg;
+
+ /* Get from efuse */
+ s8 efuse_usb_dc_cal;
+ s8 efuse_usb_dc_dis;
+
+ /* Get from dts */
+ bool inverse_hstx_sync_clock;
+ u32 driving_level;
+ s32 driving_level_compensate;
+ s32 disconnection_compensate;
+};
+
+struct rtk_phy {
+ struct usb_phy phy;
+ struct device *dev;
+
+ struct phy_cfg *phy_cfg;
+ int num_phy;
+ struct phy_parameter *phy_parameter;
+
+ struct dentry *debug_dir;
+};
+
+/* mapping 0xE0 to 0 ... 0xE7 to 7, 0xF0 to 8 ,,, 0xF7 to 15 */
+static inline int page_addr_to_array_index(u8 addr)
+{
+ return (int)((((addr) - PAGE_START) & 0x7) +
+ ((((addr) - PAGE_START) & 0x10) >> 1));
+}
+
+static inline u8 array_index_to_page_addr(int index)
+{
+ return ((((index) + PAGE_START) & 0x7) +
+ ((((index) & 0x8) << 1) + PAGE_START));
+}
+
+#define PHY_IO_TIMEOUT_USEC (50000)
+#define PHY_IO_DELAY_US (100)
+
+static inline int utmi_wait_register(void __iomem *reg, u32 mask, u32 result)
+{
+ int ret;
+ unsigned int val;
+
+ ret = read_poll_timeout(readl, val, ((val & mask) == result),
+ PHY_IO_DELAY_US, PHY_IO_TIMEOUT_USEC, false, reg);
+ if (ret) {
+ pr_err("%s can't program USB phy\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static char rtk_phy_read(struct phy_reg *phy_reg, char addr)
+{
+ void __iomem *reg_gusb2phyacc0 = phy_reg->reg_gusb2phyacc0;
+ unsigned int val;
+ int ret = 0;
+
+ addr -= OFFEST_PHY_READ;
+
+ /* polling until VBusy == 0 */
+ ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
+ if (ret)
+ return (char)ret;
+
+ /* VCtrl = low nibble of addr, and set PHY_NEW_REG_REQ */
+ val = PHY_NEW_REG_REQ | (GET_LOW_NIBBLE(addr) << PHY_VCTRL_SHIFT);
+ writel(val, reg_gusb2phyacc0);
+ ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
+ if (ret)
+ return (char)ret;
+
+ /* VCtrl = high nibble of addr, and set PHY_NEW_REG_REQ */
+ val = PHY_NEW_REG_REQ | (GET_HIGH_NIBBLE(addr) << PHY_VCTRL_SHIFT);
+ writel(val, reg_gusb2phyacc0);
+ ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
+ if (ret)
+ return (char)ret;
+
+ val = readl(reg_gusb2phyacc0);
+
+ return (char)(val & PHY_REG_DATA_MASK);
+}
+
+static int rtk_phy_write(struct phy_reg *phy_reg, char addr, char data)
+{
+ unsigned int val;
+ void __iomem *reg_wrap_vstatus = phy_reg->reg_wrap_vstatus;
+ void __iomem *reg_gusb2phyacc0 = phy_reg->reg_gusb2phyacc0;
+ int shift_bits = phy_reg->vstatus_index * 8;
+ int ret = 0;
+
+ /* write data to VStatusOut2 (data output to phy) */
+ writel((u32)data << shift_bits, reg_wrap_vstatus);
+
+ ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
+ if (ret)
+ return ret;
+
+ /* VCtrl = low nibble of addr, set PHY_NEW_REG_REQ */
+ val = PHY_NEW_REG_REQ | (GET_LOW_NIBBLE(addr) << PHY_VCTRL_SHIFT);
+
+ writel(val, reg_gusb2phyacc0);
+ ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
+ if (ret)
+ return ret;
+
+ /* VCtrl = high nibble of addr, set PHY_NEW_REG_REQ */
+ val = PHY_NEW_REG_REQ | (GET_HIGH_NIBBLE(addr) << PHY_VCTRL_SHIFT);
+
+ writel(val, reg_gusb2phyacc0);
+ ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rtk_phy_set_page(struct phy_reg *phy_reg, int page)
+{
+ switch (page) {
+ case 0:
+ return rtk_phy_write(phy_reg, SET_PAGE_OFFSET, SET_PAGE_0);
+ case 1:
+ return rtk_phy_write(phy_reg, SET_PAGE_OFFSET, SET_PAGE_1);
+ case 2:
+ return rtk_phy_write(phy_reg, SET_PAGE_OFFSET, SET_PAGE_2);
+ default:
+ pr_err("%s error page=%d\n", __func__, page);
+ }
+
+ return -EINVAL;
+}
+
+static u8 __updated_dc_disconnect_level_page0_0xe4(struct phy_cfg *phy_cfg,
+ struct phy_parameter *phy_parameter, u8 data)
+{
+ u8 ret;
+ s32 val;
+ s32 dc_disconnect_mask = phy_cfg->dc_disconnect_mask;
+ int offset = 4;
+
+ val = (s32)((data >> offset) & dc_disconnect_mask)
+ + phy_parameter->efuse_usb_dc_dis
+ + phy_parameter->disconnection_compensate;
+
+ if (val > dc_disconnect_mask)
+ val = dc_disconnect_mask;
+ else if (val < 0)
+ val = 0;
+
+ ret = (data & (~(dc_disconnect_mask << offset))) |
+ (val & dc_disconnect_mask) << offset;
+
+ return ret;
+}
+
+/* updated disconnect level at page0 */
+static void update_dc_disconnect_level_at_page0(struct rtk_phy *rtk_phy,
+ struct phy_parameter *phy_parameter, bool update)
+{
+ struct phy_cfg *phy_cfg;
+ struct phy_reg *phy_reg;
+ struct phy_data *phy_data_page;
+ struct phy_data *phy_data;
+ u8 addr, data;
+ int offset = 4;
+ s32 dc_disconnect_mask;
+ int i;
+
+ phy_cfg = rtk_phy->phy_cfg;
+ phy_reg = &phy_parameter->phy_reg;
+
+ /* Set page 0 */
+ phy_data_page = phy_cfg->page0;
+ rtk_phy_set_page(phy_reg, 0);
+
+ i = page_addr_to_array_index(PAGE0_0XE4);
+ phy_data = phy_data_page + i;
+ if (!phy_data->addr) {
+ phy_data->addr = PAGE0_0XE4;
+ phy_data->data = rtk_phy_read(phy_reg, PAGE0_0XE4);
+ }
+
+ addr = phy_data->addr;
+ data = phy_data->data;
+ dc_disconnect_mask = phy_cfg->dc_disconnect_mask;
+
+ if (update)
+ data = __updated_dc_disconnect_level_page0_0xe4(phy_cfg, phy_parameter, data);
+ else
+ data = (data & ~(dc_disconnect_mask << offset)) |
+ (DEFAULT_DC_DISCONNECTION_VALUE << offset);
+
+ if (rtk_phy_write(phy_reg, addr, data))
+ dev_err(rtk_phy->dev,
+ "%s: Error to set page1 parameter addr=0x%x value=0x%x\n",
+ __func__, addr, data);
+}
+
+static u8 __updated_dc_disconnect_level_page1_0xe2(struct phy_cfg *phy_cfg,
+ struct phy_parameter *phy_parameter, u8 data)
+{
+ u8 ret;
+ s32 val;
+ s32 dc_disconnect_mask = phy_cfg->dc_disconnect_mask;
+
+ if (phy_cfg->check_efuse_version == CHECK_EFUSE_V1) {
+ val = (s32)(data & dc_disconnect_mask)
+ + phy_parameter->efuse_usb_dc_dis
+ + phy_parameter->disconnection_compensate;
+ } else { /* for CHECK_EFUSE_V2 or no efuse */
+ if (phy_parameter->efuse_usb_dc_dis)
+ val = (s32)(phy_parameter->efuse_usb_dc_dis +
+ phy_parameter->disconnection_compensate);
+ else
+ val = (s32)((data & dc_disconnect_mask) +
+ phy_parameter->disconnection_compensate);
+ }
+
+ if (val > dc_disconnect_mask)
+ val = dc_disconnect_mask;
+ else if (val < 0)
+ val = 0;
+
+ ret = (data & (~dc_disconnect_mask)) | (val & dc_disconnect_mask);
+
+ return ret;
+}
+
+/* updated disconnect level at page1 */
+static void update_dc_disconnect_level_at_page1(struct rtk_phy *rtk_phy,
+ struct phy_parameter *phy_parameter, bool update)
+{
+ struct phy_cfg *phy_cfg;
+ struct phy_data *phy_data_page;
+ struct phy_data *phy_data;
+ struct phy_reg *phy_reg;
+ u8 addr, data;
+ s32 dc_disconnect_mask;
+ int i;
+
+ phy_cfg = rtk_phy->phy_cfg;
+ phy_reg = &phy_parameter->phy_reg;
+
+ /* Set page 1 */
+ phy_data_page = phy_cfg->page1;
+ rtk_phy_set_page(phy_reg, 1);
+
+ i = page_addr_to_array_index(PAGE1_0XE2);
+ phy_data = phy_data_page + i;
+ if (!phy_data->addr) {
+ phy_data->addr = PAGE1_0XE2;
+ phy_data->data = rtk_phy_read(phy_reg, PAGE1_0XE2);
+ }
+
+ addr = phy_data->addr;
+ data = phy_data->data;
+ dc_disconnect_mask = phy_cfg->dc_disconnect_mask;
+
+ if (update)
+ data = __updated_dc_disconnect_level_page1_0xe2(phy_cfg, phy_parameter, data);
+ else
+ data = (data & ~dc_disconnect_mask) | DEFAULT_DC_DISCONNECTION_VALUE;
+
+ if (rtk_phy_write(phy_reg, addr, data))
+ dev_err(rtk_phy->dev,
+ "%s: Error to set page1 parameter addr=0x%x value=0x%x\n",
+ __func__, addr, data);
+}
+
+static void update_dc_disconnect_level(struct rtk_phy *rtk_phy,
+ struct phy_parameter *phy_parameter, bool update)
+{
+ struct phy_cfg *phy_cfg = rtk_phy->phy_cfg;
+
+ if (phy_cfg->usb_dc_disconnect_at_page0)
+ update_dc_disconnect_level_at_page0(rtk_phy, phy_parameter, update);
+ else
+ update_dc_disconnect_level_at_page1(rtk_phy, phy_parameter, update);
+}
+
+static u8 __update_dc_driving_page0_0xe4(struct phy_cfg *phy_cfg,
+ struct phy_parameter *phy_parameter, u8 data)
+{
+ s32 driving_level_compensate = phy_parameter->driving_level_compensate;
+ s32 dc_driving_mask = phy_cfg->dc_driving_mask;
+ s32 val;
+ u8 ret;
+
+ if (phy_cfg->check_efuse_version == CHECK_EFUSE_V1) {
+ val = (s32)(data & dc_driving_mask) + driving_level_compensate
+ + phy_parameter->efuse_usb_dc_cal;
+ } else { /* for CHECK_EFUSE_V2 or no efuse */
+ if (phy_parameter->efuse_usb_dc_cal)
+ val = (s32)((phy_parameter->efuse_usb_dc_cal & dc_driving_mask)
+ + driving_level_compensate);
+ else
+ val = (s32)(data & dc_driving_mask);
+ }
+
+ if (val > dc_driving_mask)
+ val = dc_driving_mask;
+ else if (val < 0)
+ val = 0;
+
+ ret = (data & (~dc_driving_mask)) | (val & dc_driving_mask);
+
+ return ret;
+}
+
+static void update_dc_driving_level(struct rtk_phy *rtk_phy,
+ struct phy_parameter *phy_parameter)
+{
+ struct phy_cfg *phy_cfg;
+ struct phy_reg *phy_reg;
+
+ phy_reg = &phy_parameter->phy_reg;
+ phy_cfg = rtk_phy->phy_cfg;
+ if (!phy_cfg->page0[4].addr) {
+ rtk_phy_set_page(phy_reg, 0);
+ phy_cfg->page0[4].addr = PAGE0_0XE4;
+ phy_cfg->page0[4].data = rtk_phy_read(phy_reg, PAGE0_0XE4);
+ }
+
+ if (phy_parameter->driving_level != DEFAULT_DC_DRIVING_VALUE) {
+ u32 dc_driving_mask;
+ u8 driving_level;
+ u8 data;
+
+ data = phy_cfg->page0[4].data;
+ dc_driving_mask = phy_cfg->dc_driving_mask;
+ driving_level = data & dc_driving_mask;
+
+ dev_dbg(rtk_phy->dev, "%s driving_level=%d => dts driving_level=%d\n",
+ __func__, driving_level, phy_parameter->driving_level);
+
+ phy_cfg->page0[4].data = (data & (~dc_driving_mask)) |
+ (phy_parameter->driving_level & dc_driving_mask);
+ }
+
+ phy_cfg->page0[4].data = __update_dc_driving_page0_0xe4(phy_cfg,
+ phy_parameter,
+ phy_cfg->page0[4].data);
+}
+
+static void update_hs_clk_select(struct rtk_phy *rtk_phy,
+ struct phy_parameter *phy_parameter)
+{
+ struct phy_cfg *phy_cfg;
+ struct phy_reg *phy_reg;
+
+ phy_cfg = rtk_phy->phy_cfg;
+ phy_reg = &phy_parameter->phy_reg;
+
+ if (phy_parameter->inverse_hstx_sync_clock) {
+ if (!phy_cfg->page0[6].addr) {
+ rtk_phy_set_page(phy_reg, 0);
+ phy_cfg->page0[6].addr = PAGE0_0XE6;
+ phy_cfg->page0[6].data = rtk_phy_read(phy_reg, PAGE0_0XE6);
+ }
+
+ phy_cfg->page0[6].data = phy_cfg->page0[6].data | HS_CLK_SELECT;
+ }
+}
+
+static void do_rtk_phy_toggle(struct rtk_phy *rtk_phy,
+ int index, bool connect)
+{
+ struct phy_parameter *phy_parameter;
+ struct phy_cfg *phy_cfg;
+ struct phy_reg *phy_reg;
+ struct phy_data *phy_data_page;
+ u8 addr, data;
+ int i;
+
+ phy_cfg = rtk_phy->phy_cfg;
+ phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+ phy_reg = &phy_parameter->phy_reg;
+
+ if (!phy_cfg->do_toggle)
+ goto out;
+
+ if (phy_cfg->is_double_sensitivity_mode)
+ goto do_toggle_driving;
+
+ /* Set page 0 */
+ rtk_phy_set_page(phy_reg, 0);
+
+ addr = PAGE0_0XE7;
+ data = rtk_phy_read(phy_reg, addr);
+
+ if (connect)
+ rtk_phy_write(phy_reg, addr, data & (~SENSITIVITY_CTRL));
+ else
+ rtk_phy_write(phy_reg, addr, data | (SENSITIVITY_CTRL));
+
+do_toggle_driving:
+
+ if (!phy_cfg->do_toggle_driving)
+ goto do_toggle;
+
+ /* Page 0 addr 0xE4 driving capability */
+
+ /* Set page 0 */
+ phy_data_page = phy_cfg->page0;
+ rtk_phy_set_page(phy_reg, 0);
+
+ i = page_addr_to_array_index(PAGE0_0XE4);
+ addr = phy_data_page[i].addr;
+ data = phy_data_page[i].data;
+
+ if (connect) {
+ rtk_phy_write(phy_reg, addr, data);
+ } else {
+ u8 value;
+ s32 tmp;
+ s32 driving_updated =
+ phy_cfg->driving_updated_for_dev_dis;
+ s32 dc_driving_mask = phy_cfg->dc_driving_mask;
+
+ tmp = (s32)(data & dc_driving_mask) + driving_updated;
+
+ if (tmp > dc_driving_mask)
+ tmp = dc_driving_mask;
+ else if (tmp < 0)
+ tmp = 0;
+
+ value = (data & (~dc_driving_mask)) | (tmp & dc_driving_mask);
+
+ rtk_phy_write(phy_reg, addr, value);
+ }
+
+do_toggle:
+ /* restore dc disconnect level before toggle */
+ update_dc_disconnect_level(rtk_phy, phy_parameter, false);
+
+ /* Set page 1 */
+ rtk_phy_set_page(phy_reg, 1);
+
+ addr = PAGE1_0XE0;
+ data = rtk_phy_read(phy_reg, addr);
+
+ rtk_phy_write(phy_reg, addr, data &
+ (~ENABLE_AUTO_SENSITIVITY_CALIBRATION));
+ mdelay(1);
+ rtk_phy_write(phy_reg, addr, data |
+ (ENABLE_AUTO_SENSITIVITY_CALIBRATION));
+
+ /* update dc disconnect level after toggle */
+ update_dc_disconnect_level(rtk_phy, phy_parameter, true);
+
+out:
+ return;
+}
+
+static int do_rtk_phy_init(struct rtk_phy *rtk_phy, int index)
+{
+ struct phy_parameter *phy_parameter;
+ struct phy_cfg *phy_cfg;
+ struct phy_data *phy_data_page;
+ struct phy_reg *phy_reg;
+ int i;
+
+ phy_cfg = rtk_phy->phy_cfg;
+ phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+ phy_reg = &phy_parameter->phy_reg;
+
+ if (phy_cfg->use_default_parameter) {
+ dev_dbg(rtk_phy->dev, "%s phy#%d use default parameter\n",
+ __func__, index);
+ goto do_toggle;
+ }
+
+ /* Set page 0 */
+ phy_data_page = phy_cfg->page0;
+ rtk_phy_set_page(phy_reg, 0);
+
+ for (i = 0; i < phy_cfg->page0_size; i++) {
+ struct phy_data *phy_data = phy_data_page + i;
+ u8 addr = phy_data->addr;
+ u8 data = phy_data->data;
+
+ if (!addr)
+ continue;
+
+ if (rtk_phy_write(phy_reg, addr, data)) {
+ dev_err(rtk_phy->dev,
+ "%s: Error to set page0 parameter addr=0x%x value=0x%x\n",
+ __func__, addr, data);
+ return -EINVAL;
+ }
+ }
+
+ /* Set page 1 */
+ phy_data_page = phy_cfg->page1;
+ rtk_phy_set_page(phy_reg, 1);
+
+ for (i = 0; i < phy_cfg->page1_size; i++) {
+ struct phy_data *phy_data = phy_data_page + i;
+ u8 addr = phy_data->addr;
+ u8 data = phy_data->data;
+
+ if (!addr)
+ continue;
+
+ if (rtk_phy_write(phy_reg, addr, data)) {
+ dev_err(rtk_phy->dev,
+ "%s: Error to set page1 parameter addr=0x%x value=0x%x\n",
+ __func__, addr, data);
+ return -EINVAL;
+ }
+ }
+
+ if (phy_cfg->page2_size == 0)
+ goto do_toggle;
+
+ /* Set page 2 */
+ phy_data_page = phy_cfg->page2;
+ rtk_phy_set_page(phy_reg, 2);
+
+ for (i = 0; i < phy_cfg->page2_size; i++) {
+ struct phy_data *phy_data = phy_data_page + i;
+ u8 addr = phy_data->addr;
+ u8 data = phy_data->data;
+
+ if (!addr)
+ continue;
+
+ if (rtk_phy_write(phy_reg, addr, data)) {
+ dev_err(rtk_phy->dev,
+ "%s: Error to set page2 parameter addr=0x%x value=0x%x\n",
+ __func__, addr, data);
+ return -EINVAL;
+ }
+ }
+
+do_toggle:
+ do_rtk_phy_toggle(rtk_phy, index, false);
+
+ return 0;
+}
+
+static int rtk_phy_init(struct phy *phy)
+{
+ struct rtk_phy *rtk_phy = phy_get_drvdata(phy);
+ unsigned long phy_init_time = jiffies;
+ int i, ret = 0;
+
+ if (!rtk_phy)
+ return -EINVAL;
+
+ for (i = 0; i < rtk_phy->num_phy; i++)
+ ret = do_rtk_phy_init(rtk_phy, i);
+
+ dev_dbg(rtk_phy->dev, "Initialized RTK USB 2.0 PHY (take %dms)\n",
+ jiffies_to_msecs(jiffies - phy_init_time));
+ return ret;
+}
+
+static int rtk_phy_exit(struct phy *phy)
+{
+ return 0;
+}
+
+static const struct phy_ops ops = {
+ .init = rtk_phy_init,
+ .exit = rtk_phy_exit,
+ .owner = THIS_MODULE,
+};
+
+static void rtk_phy_toggle(struct usb_phy *usb2_phy, bool connect, int port)
+{
+ int index = port;
+ struct rtk_phy *rtk_phy = NULL;
+
+ rtk_phy = dev_get_drvdata(usb2_phy->dev);
+
+ if (index > rtk_phy->num_phy) {
+ dev_err(rtk_phy->dev, "%s: The port=%d is not in usb phy (num_phy=%d)\n",
+ __func__, index, rtk_phy->num_phy);
+ return;
+ }
+
+ do_rtk_phy_toggle(rtk_phy, index, connect);
+}
+
+static int rtk_phy_notify_port_status(struct usb_phy *x, int port,
+ u16 portstatus, u16 portchange)
+{
+ bool connect = false;
+
+ pr_debug("%s port=%d portstatus=0x%x portchange=0x%x\n",
+ __func__, port, (int)portstatus, (int)portchange);
+ if (portstatus & USB_PORT_STAT_CONNECTION)
+ connect = true;
+
+ if (portchange & USB_PORT_STAT_C_CONNECTION)
+ rtk_phy_toggle(x, connect, port);
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *create_phy_debug_root(void)
+{
+ struct dentry *phy_debug_root;
+
+ phy_debug_root = debugfs_lookup("phy", usb_debug_root);
+ if (!phy_debug_root)
+ phy_debug_root = debugfs_create_dir("phy", usb_debug_root);
+
+ return phy_debug_root;
+}
+
+static int rtk_usb2_parameter_show(struct seq_file *s, void *unused)
+{
+ struct rtk_phy *rtk_phy = s->private;
+ struct phy_cfg *phy_cfg;
+ int i, index;
+
+ phy_cfg = rtk_phy->phy_cfg;
+
+ seq_puts(s, "Property:\n");
+ seq_printf(s, " check_efuse: %s\n",
+ phy_cfg->check_efuse ? "Enable" : "Disable");
+ seq_printf(s, " check_efuse_version: %d\n",
+ phy_cfg->check_efuse_version);
+ seq_printf(s, " efuse_dc_driving_rate: %d\n",
+ phy_cfg->efuse_dc_driving_rate);
+ seq_printf(s, " dc_driving_mask: 0x%x\n",
+ phy_cfg->dc_driving_mask);
+ seq_printf(s, " efuse_dc_disconnect_rate: %d\n",
+ phy_cfg->efuse_dc_disconnect_rate);
+ seq_printf(s, " dc_disconnect_mask: 0x%x\n",
+ phy_cfg->dc_disconnect_mask);
+ seq_printf(s, " usb_dc_disconnect_at_page0: %s\n",
+ phy_cfg->usb_dc_disconnect_at_page0 ? "true" : "false");
+ seq_printf(s, " do_toggle: %s\n",
+ phy_cfg->do_toggle ? "Enable" : "Disable");
+ seq_printf(s, " do_toggle_driving: %s\n",
+ phy_cfg->do_toggle_driving ? "Enable" : "Disable");
+ seq_printf(s, " driving_updated_for_dev_dis: 0x%x\n",
+ phy_cfg->driving_updated_for_dev_dis);
+ seq_printf(s, " use_default_parameter: %s\n",
+ phy_cfg->use_default_parameter ? "Enable" : "Disable");
+ seq_printf(s, " is_double_sensitivity_mode: %s\n",
+ phy_cfg->is_double_sensitivity_mode ? "Enable" : "Disable");
+
+ for (index = 0; index < rtk_phy->num_phy; index++) {
+ struct phy_parameter *phy_parameter;
+ struct phy_reg *phy_reg;
+ struct phy_data *phy_data_page;
+
+ phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+ phy_reg = &phy_parameter->phy_reg;
+
+ seq_printf(s, "PHY %d:\n", index);
+
+ seq_puts(s, "Page 0:\n");
+ /* Set page 0 */
+ phy_data_page = phy_cfg->page0;
+ rtk_phy_set_page(phy_reg, 0);
+
+ for (i = 0; i < phy_cfg->page0_size; i++) {
+ struct phy_data *phy_data = phy_data_page + i;
+ u8 addr = array_index_to_page_addr(i);
+ u8 data = phy_data->data;
+ u8 value = rtk_phy_read(phy_reg, addr);
+
+ if (phy_data->addr)
+ seq_printf(s, " Page 0: addr=0x%x data=0x%02x ==> read value=0x%02x\n",
+ addr, data, value);
+ else
+ seq_printf(s, " Page 0: addr=0x%x data=none ==> read value=0x%02x\n",
+ addr, value);
+ }
+
+ seq_puts(s, "Page 1:\n");
+ /* Set page 1 */
+ phy_data_page = phy_cfg->page1;
+ rtk_phy_set_page(phy_reg, 1);
+
+ for (i = 0; i < phy_cfg->page1_size; i++) {
+ struct phy_data *phy_data = phy_data_page + i;
+ u8 addr = array_index_to_page_addr(i);
+ u8 data = phy_data->data;
+ u8 value = rtk_phy_read(phy_reg, addr);
+
+ if (phy_data->addr)
+ seq_printf(s, " Page 1: addr=0x%x data=0x%02x ==> read value=0x%02x\n",
+ addr, data, value);
+ else
+ seq_printf(s, " Page 1: addr=0x%x data=none ==> read value=0x%02x\n",
+ addr, value);
+ }
+
+ if (phy_cfg->page2_size == 0)
+ goto out;
+
+ seq_puts(s, "Page 2:\n");
+ /* Set page 2 */
+ phy_data_page = phy_cfg->page2;
+ rtk_phy_set_page(phy_reg, 2);
+
+ for (i = 0; i < phy_cfg->page2_size; i++) {
+ struct phy_data *phy_data = phy_data_page + i;
+ u8 addr = array_index_to_page_addr(i);
+ u8 data = phy_data->data;
+ u8 value = rtk_phy_read(phy_reg, addr);
+
+ if (phy_data->addr)
+ seq_printf(s, " Page 2: addr=0x%x data=0x%02x ==> read value=0x%02x\n",
+ addr, data, value);
+ else
+ seq_printf(s, " Page 2: addr=0x%x data=none ==> read value=0x%02x\n",
+ addr, value);
+ }
+
+out:
+ seq_puts(s, "PHY Property:\n");
+ seq_printf(s, " efuse_usb_dc_cal: %d\n",
+ (int)phy_parameter->efuse_usb_dc_cal);
+ seq_printf(s, " efuse_usb_dc_dis: %d\n",
+ (int)phy_parameter->efuse_usb_dc_dis);
+ seq_printf(s, " inverse_hstx_sync_clock: %s\n",
+ phy_parameter->inverse_hstx_sync_clock ? "Enable" : "Disable");
+ seq_printf(s, " driving_level: %d\n",
+ phy_parameter->driving_level);
+ seq_printf(s, " driving_level_compensate: %d\n",
+ phy_parameter->driving_level_compensate);
+ seq_printf(s, " disconnection_compensate: %d\n",
+ phy_parameter->disconnection_compensate);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(rtk_usb2_parameter);
+
+static inline void create_debug_files(struct rtk_phy *rtk_phy)
+{
+ struct dentry *phy_debug_root = NULL;
+
+ phy_debug_root = create_phy_debug_root();
+ if (!phy_debug_root)
+ return;
+
+ rtk_phy->debug_dir = debugfs_create_dir(dev_name(rtk_phy->dev),
+ phy_debug_root);
+ if (!rtk_phy->debug_dir)
+ return;
+
+ if (!debugfs_create_file("parameter", 0444, rtk_phy->debug_dir, rtk_phy,
+ &rtk_usb2_parameter_fops))
+ goto file_error;
+
+ return;
+
+file_error:
+ debugfs_remove_recursive(rtk_phy->debug_dir);
+}
+
+static inline void remove_debug_files(struct rtk_phy *rtk_phy)
+{
+ debugfs_remove_recursive(rtk_phy->debug_dir);
+}
+#else
+static inline void create_debug_files(struct rtk_phy *rtk_phy) { }
+static inline void remove_debug_files(struct rtk_phy *rtk_phy) { }
+#endif /* CONFIG_DEBUG_FS */
+
+static int get_phy_data_by_efuse(struct rtk_phy *rtk_phy,
+ struct phy_parameter *phy_parameter, int index)
+{
+ struct phy_cfg *phy_cfg = rtk_phy->phy_cfg;
+ u8 value = 0;
+ struct nvmem_cell *cell;
+ struct soc_device_attribute rtk_soc_groot[] = {
+ { .family = "Realtek Groot",},
+ { /* empty */ } };
+
+ if (!phy_cfg->check_efuse)
+ goto out;
+
+ /* Read efuse for usb dc cal */
+ cell = nvmem_cell_get(rtk_phy->dev, "usb-dc-cal");
+ if (IS_ERR(cell)) {
+ dev_dbg(rtk_phy->dev, "%s no usb-dc-cal: %ld\n",
+ __func__, PTR_ERR(cell));
+ } else {
+ unsigned char *buf;
+ size_t buf_size;
+
+ buf = nvmem_cell_read(cell, &buf_size);
+ if (!IS_ERR(buf)) {
+ value = buf[0] & phy_cfg->dc_driving_mask;
+ kfree(buf);
+ }
+ nvmem_cell_put(cell);
+ }
+
+ if (phy_cfg->check_efuse_version == CHECK_EFUSE_V1) {
+ int rate = phy_cfg->efuse_dc_driving_rate;
+
+ if (value <= EFUS_USB_DC_CAL_MAX)
+ phy_parameter->efuse_usb_dc_cal = (int8_t)(value * rate);
+ else
+ phy_parameter->efuse_usb_dc_cal = -(int8_t)
+ ((EFUS_USB_DC_CAL_MAX & value) * rate);
+
+ if (soc_device_match(rtk_soc_groot)) {
+ dev_dbg(rtk_phy->dev, "For groot IC we need a workaround to adjust efuse_usb_dc_cal\n");
+
+ /* We don't multiple dc_cal_rate=2 for positive dc cal compensate */
+ if (value <= EFUS_USB_DC_CAL_MAX)
+ phy_parameter->efuse_usb_dc_cal = (int8_t)(value);
+
+ /* We set max dc cal compensate is 0x8 if otp is 0x7 */
+ if (value == 0x7)
+ phy_parameter->efuse_usb_dc_cal = (int8_t)(value + 1);
+ }
+ } else { /* for CHECK_EFUSE_V2 */
+ phy_parameter->efuse_usb_dc_cal = value & phy_cfg->dc_driving_mask;
+ }
+
+ /* Read efuse for usb dc disconnect level */
+ value = 0;
+ cell = nvmem_cell_get(rtk_phy->dev, "usb-dc-dis");
+ if (IS_ERR(cell)) {
+ dev_dbg(rtk_phy->dev, "%s no usb-dc-dis: %ld\n",
+ __func__, PTR_ERR(cell));
+ } else {
+ unsigned char *buf;
+ size_t buf_size;
+
+ buf = nvmem_cell_read(cell, &buf_size);
+ if (!IS_ERR(buf)) {
+ value = buf[0] & phy_cfg->dc_disconnect_mask;
+ kfree(buf);
+ }
+ nvmem_cell_put(cell);
+ }
+
+ if (phy_cfg->check_efuse_version == CHECK_EFUSE_V1) {
+ int rate = phy_cfg->efuse_dc_disconnect_rate;
+
+ if (value <= EFUS_USB_DC_DIS_MAX)
+ phy_parameter->efuse_usb_dc_dis = (int8_t)(value * rate);
+ else
+ phy_parameter->efuse_usb_dc_dis = -(int8_t)
+ ((EFUS_USB_DC_DIS_MAX & value) * rate);
+ } else { /* for CHECK_EFUSE_V2 */
+ phy_parameter->efuse_usb_dc_dis = value & phy_cfg->dc_disconnect_mask;
+ }
+
+out:
+ return 0;
+}
+
+static int parse_phy_data(struct rtk_phy *rtk_phy)
+{
+ struct device *dev = rtk_phy->dev;
+ struct device_node *np = dev->of_node;
+ struct phy_parameter *phy_parameter;
+ int ret = 0;
+ int index;
+
+ rtk_phy->phy_parameter = devm_kzalloc(dev, sizeof(struct phy_parameter) *
+ rtk_phy->num_phy, GFP_KERNEL);
+ if (!rtk_phy->phy_parameter)
+ return -ENOMEM;
+
+ for (index = 0; index < rtk_phy->num_phy; index++) {
+ phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+
+ phy_parameter->phy_reg.reg_wrap_vstatus = of_iomap(np, 0);
+ phy_parameter->phy_reg.reg_gusb2phyacc0 = of_iomap(np, 1) + index;
+ phy_parameter->phy_reg.vstatus_index = index;
+
+ if (of_property_read_bool(np, "realtek,inverse-hstx-sync-clock"))
+ phy_parameter->inverse_hstx_sync_clock = true;
+ else
+ phy_parameter->inverse_hstx_sync_clock = false;
+
+ if (of_property_read_u32_index(np, "realtek,driving-level",
+ index, &phy_parameter->driving_level))
+ phy_parameter->driving_level = DEFAULT_DC_DRIVING_VALUE;
+
+ if (of_property_read_u32_index(np, "realtek,driving-level-compensate",
+ index, &phy_parameter->driving_level_compensate))
+ phy_parameter->driving_level_compensate = 0;
+
+ if (of_property_read_u32_index(np, "realtek,disconnection-compensate",
+ index, &phy_parameter->disconnection_compensate))
+ phy_parameter->disconnection_compensate = 0;
+
+ get_phy_data_by_efuse(rtk_phy, phy_parameter, index);
+
+ update_dc_driving_level(rtk_phy, phy_parameter);
+
+ update_hs_clk_select(rtk_phy, phy_parameter);
+ }
+
+ return ret;
+}
+
+static int rtk_usb2phy_probe(struct platform_device *pdev)
+{
+ struct rtk_phy *rtk_phy;
+ struct device *dev = &pdev->dev;
+ struct phy *generic_phy;
+ struct phy_provider *phy_provider;
+ const struct phy_cfg *phy_cfg;
+ int ret = 0;
+
+ phy_cfg = of_device_get_match_data(dev);
+ if (!phy_cfg) {
+ dev_err(dev, "phy config are not assigned!\n");
+ return -EINVAL;
+ }
+
+ rtk_phy = devm_kzalloc(dev, sizeof(*rtk_phy), GFP_KERNEL);
+ if (!rtk_phy)
+ return -ENOMEM;
+
+ rtk_phy->dev = &pdev->dev;
+ rtk_phy->phy.dev = rtk_phy->dev;
+ rtk_phy->phy.label = "rtk-usb2phy";
+ rtk_phy->phy.notify_port_status = rtk_phy_notify_port_status;
+
+ rtk_phy->phy_cfg = devm_kzalloc(dev, sizeof(*phy_cfg), GFP_KERNEL);
+
+ memcpy(rtk_phy->phy_cfg, phy_cfg, sizeof(*phy_cfg));
+
+ rtk_phy->num_phy = phy_cfg->num_phy;
+
+ ret = parse_phy_data(rtk_phy);
+ if (ret)
+ goto err;
+
+ platform_set_drvdata(pdev, rtk_phy);
+
+ generic_phy = devm_phy_create(rtk_phy->dev, NULL, &ops);
+ if (IS_ERR(generic_phy))
+ return PTR_ERR(generic_phy);
+
+ phy_set_drvdata(generic_phy, rtk_phy);
+
+ phy_provider = devm_of_phy_provider_register(rtk_phy->dev,
+ of_phy_simple_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
+ ret = usb_add_phy_dev(&rtk_phy->phy);
+ if (ret)
+ goto err;
+
+ create_debug_files(rtk_phy);
+
+err:
+ return ret;
+}
+
+static void rtk_usb2phy_remove(struct platform_device *pdev)
+{
+ struct rtk_phy *rtk_phy = platform_get_drvdata(pdev);
+
+ remove_debug_files(rtk_phy);
+
+ usb_remove_phy(&rtk_phy->phy);
+}
+
+static const struct phy_cfg rtd1295_phy_cfg = {
+ .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+ .page0 = { [0] = {0xe0, 0x90},
+ [3] = {0xe3, 0x3a},
+ [4] = {0xe4, 0x68},
+ [6] = {0xe6, 0x91},
+ [13] = {0xf5, 0x81},
+ [15] = {0xf7, 0x02}, },
+ .page1_size = 8,
+ .page1 = { /* default parameter */ },
+ .page2_size = 0,
+ .page2 = { /* no parameter */ },
+ .num_phy = 1,
+ .check_efuse = false,
+ .check_efuse_version = CHECK_EFUSE_V1,
+ .efuse_dc_driving_rate = 1,
+ .dc_driving_mask = 0xf,
+ .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+ .dc_disconnect_mask = 0xf,
+ .usb_dc_disconnect_at_page0 = true,
+ .do_toggle = true,
+ .do_toggle_driving = false,
+ .driving_updated_for_dev_dis = 0xf,
+ .use_default_parameter = false,
+ .is_double_sensitivity_mode = false,
+};
+
+static const struct phy_cfg rtd1395_phy_cfg = {
+ .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+ .page0 = { [4] = {0xe4, 0xac},
+ [13] = {0xf5, 0x00},
+ [15] = {0xf7, 0x02}, },
+ .page1_size = 8,
+ .page1 = { /* default parameter */ },
+ .page2_size = 0,
+ .page2 = { /* no parameter */ },
+ .num_phy = 1,
+ .check_efuse = false,
+ .check_efuse_version = CHECK_EFUSE_V1,
+ .efuse_dc_driving_rate = 1,
+ .dc_driving_mask = 0xf,
+ .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+ .dc_disconnect_mask = 0xf,
+ .usb_dc_disconnect_at_page0 = true,
+ .do_toggle = true,
+ .do_toggle_driving = false,
+ .driving_updated_for_dev_dis = 0xf,
+ .use_default_parameter = false,
+ .is_double_sensitivity_mode = false,
+};
+
+static const struct phy_cfg rtd1395_phy_cfg_2port = {
+ .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+ .page0 = { [4] = {0xe4, 0xac},
+ [13] = {0xf5, 0x00},
+ [15] = {0xf7, 0x02}, },
+ .page1_size = 8,
+ .page1 = { /* default parameter */ },
+ .page2_size = 0,
+ .page2 = { /* no parameter */ },
+ .num_phy = 2,
+ .check_efuse = false,
+ .check_efuse_version = CHECK_EFUSE_V1,
+ .efuse_dc_driving_rate = 1,
+ .dc_driving_mask = 0xf,
+ .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+ .dc_disconnect_mask = 0xf,
+ .usb_dc_disconnect_at_page0 = true,
+ .do_toggle = true,
+ .do_toggle_driving = false,
+ .driving_updated_for_dev_dis = 0xf,
+ .use_default_parameter = false,
+ .is_double_sensitivity_mode = false,
+};
+
+static const struct phy_cfg rtd1619_phy_cfg = {
+ .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+ .page0 = { [4] = {0xe4, 0x68}, },
+ .page1_size = 8,
+ .page1 = { /* default parameter */ },
+ .page2_size = 0,
+ .page2 = { /* no parameter */ },
+ .num_phy = 1,
+ .check_efuse = true,
+ .check_efuse_version = CHECK_EFUSE_V1,
+ .efuse_dc_driving_rate = 1,
+ .dc_driving_mask = 0xf,
+ .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+ .dc_disconnect_mask = 0xf,
+ .usb_dc_disconnect_at_page0 = true,
+ .do_toggle = true,
+ .do_toggle_driving = false,
+ .driving_updated_for_dev_dis = 0xf,
+ .use_default_parameter = false,
+ .is_double_sensitivity_mode = false,
+};
+
+static const struct phy_cfg rtd1319_phy_cfg = {
+ .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+ .page0 = { [0] = {0xe0, 0x18},
+ [4] = {0xe4, 0x6a},
+ [7] = {0xe7, 0x71},
+ [13] = {0xf5, 0x15},
+ [15] = {0xf7, 0x32}, },
+ .page1_size = 8,
+ .page1 = { [3] = {0xe3, 0x44}, },
+ .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
+ .page2 = { [0] = {0xe0, 0x01}, },
+ .num_phy = 1,
+ .check_efuse = true,
+ .check_efuse_version = CHECK_EFUSE_V1,
+ .efuse_dc_driving_rate = 1,
+ .dc_driving_mask = 0xf,
+ .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+ .dc_disconnect_mask = 0xf,
+ .usb_dc_disconnect_at_page0 = true,
+ .do_toggle = true,
+ .do_toggle_driving = true,
+ .driving_updated_for_dev_dis = 0xf,
+ .use_default_parameter = false,
+ .is_double_sensitivity_mode = true,
+};
+
+static const struct phy_cfg rtd1312c_phy_cfg = {
+ .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+ .page0 = { [0] = {0xe0, 0x14},
+ [4] = {0xe4, 0x67},
+ [5] = {0xe5, 0x55}, },
+ .page1_size = 8,
+ .page1 = { [3] = {0xe3, 0x23},
+ [6] = {0xe6, 0x58}, },
+ .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
+ .page2 = { /* default parameter */ },
+ .num_phy = 1,
+ .check_efuse = true,
+ .check_efuse_version = CHECK_EFUSE_V1,
+ .efuse_dc_driving_rate = 1,
+ .dc_driving_mask = 0xf,
+ .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+ .dc_disconnect_mask = 0xf,
+ .usb_dc_disconnect_at_page0 = true,
+ .do_toggle = true,
+ .do_toggle_driving = true,
+ .driving_updated_for_dev_dis = 0xf,
+ .use_default_parameter = false,
+ .is_double_sensitivity_mode = true,
+};
+
+static const struct phy_cfg rtd1619b_phy_cfg = {
+ .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+ .page0 = { [0] = {0xe0, 0xa3},
+ [4] = {0xe4, 0x88},
+ [5] = {0xe5, 0x4f},
+ [6] = {0xe6, 0x02}, },
+ .page1_size = 8,
+ .page1 = { [3] = {0xe3, 0x64}, },
+ .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
+ .page2 = { [7] = {0xe7, 0x45}, },
+ .num_phy = 1,
+ .check_efuse = true,
+ .check_efuse_version = CHECK_EFUSE_V1,
+ .efuse_dc_driving_rate = EFUS_USB_DC_CAL_RATE,
+ .dc_driving_mask = 0x1f,
+ .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+ .dc_disconnect_mask = 0xf,
+ .usb_dc_disconnect_at_page0 = false,
+ .do_toggle = true,
+ .do_toggle_driving = true,
+ .driving_updated_for_dev_dis = 0x8,
+ .use_default_parameter = false,
+ .is_double_sensitivity_mode = true,
+};
+
+static const struct phy_cfg rtd1319d_phy_cfg = {
+ .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+ .page0 = { [0] = {0xe0, 0xa3},
+ [4] = {0xe4, 0x8e},
+ [5] = {0xe5, 0x4f},
+ [6] = {0xe6, 0x02}, },
+ .page1_size = MAX_USB_PHY_PAGE1_DATA_SIZE,
+ .page1 = { [14] = {0xf5, 0x1}, },
+ .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
+ .page2 = { [7] = {0xe7, 0x44}, },
+ .check_efuse = true,
+ .num_phy = 1,
+ .check_efuse_version = CHECK_EFUSE_V1,
+ .efuse_dc_driving_rate = EFUS_USB_DC_CAL_RATE,
+ .dc_driving_mask = 0x1f,
+ .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+ .dc_disconnect_mask = 0xf,
+ .usb_dc_disconnect_at_page0 = false,
+ .do_toggle = true,
+ .do_toggle_driving = false,
+ .driving_updated_for_dev_dis = 0x8,
+ .use_default_parameter = false,
+ .is_double_sensitivity_mode = true,
+};
+
+static const struct phy_cfg rtd1315e_phy_cfg = {
+ .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+ .page0 = { [0] = {0xe0, 0xa3},
+ [4] = {0xe4, 0x8c},
+ [5] = {0xe5, 0x4f},
+ [6] = {0xe6, 0x02}, },
+ .page1_size = MAX_USB_PHY_PAGE1_DATA_SIZE,
+ .page1 = { [3] = {0xe3, 0x7f},
+ [14] = {0xf5, 0x01}, },
+ .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
+ .page2 = { [7] = {0xe7, 0x44}, },
+ .num_phy = 1,
+ .check_efuse = true,
+ .check_efuse_version = CHECK_EFUSE_V2,
+ .efuse_dc_driving_rate = EFUS_USB_DC_CAL_RATE,
+ .dc_driving_mask = 0x1f,
+ .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+ .dc_disconnect_mask = 0xf,
+ .usb_dc_disconnect_at_page0 = false,
+ .do_toggle = true,
+ .do_toggle_driving = false,
+ .driving_updated_for_dev_dis = 0x8,
+ .use_default_parameter = false,
+ .is_double_sensitivity_mode = true,
+};
+
+static const struct of_device_id usbphy_rtk_dt_match[] = {
+ { .compatible = "realtek,rtd1295-usb2phy", .data = &rtd1295_phy_cfg },
+ { .compatible = "realtek,rtd1312c-usb2phy", .data = &rtd1312c_phy_cfg },
+ { .compatible = "realtek,rtd1315e-usb2phy", .data = &rtd1315e_phy_cfg },
+ { .compatible = "realtek,rtd1319-usb2phy", .data = &rtd1319_phy_cfg },
+ { .compatible = "realtek,rtd1319d-usb2phy", .data = &rtd1319d_phy_cfg },
+ { .compatible = "realtek,rtd1395-usb2phy", .data = &rtd1395_phy_cfg },
+ { .compatible = "realtek,rtd1395-usb2phy-2port", .data = &rtd1395_phy_cfg_2port },
+ { .compatible = "realtek,rtd1619-usb2phy", .data = &rtd1619_phy_cfg },
+ { .compatible = "realtek,rtd1619b-usb2phy", .data = &rtd1619b_phy_cfg },
+ {},
+};
+MODULE_DEVICE_TABLE(of, usbphy_rtk_dt_match);
+
+static struct platform_driver rtk_usb2phy_driver = {
+ .probe = rtk_usb2phy_probe,
+ .remove_new = rtk_usb2phy_remove,
+ .driver = {
+ .name = "rtk-usb2phy",
+ .of_match_table = usbphy_rtk_dt_match,
+ },
+};
+
+module_platform_driver(rtk_usb2phy_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform: rtk-usb2phy");
+MODULE_AUTHOR("Stanley Chang <stanley_chang@realtek.com>");
+MODULE_DESCRIPTION("Realtek usb 2.0 phy driver");
diff --git a/drivers/phy/realtek/phy-rtk-usb3.c b/drivers/phy/realtek/phy-rtk-usb3.c
new file mode 100644
index 000000000000..7881f908aade
--- /dev/null
+++ b/drivers/phy/realtek/phy-rtk-usb3.c
@@ -0,0 +1,767 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * phy-rtk-usb3.c RTK usb3.0 phy driver
+ *
+ * copyright (c) 2023 realtek semiconductor corporation
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/regmap.h>
+#include <linux/sys_soc.h>
+#include <linux/mfd/syscon.h>
+#include <linux/phy/phy.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/phy.h>
+
+#define USB_MDIO_CTRL_PHY_BUSY BIT(7)
+#define USB_MDIO_CTRL_PHY_WRITE BIT(0)
+#define USB_MDIO_CTRL_PHY_ADDR_SHIFT 8
+#define USB_MDIO_CTRL_PHY_DATA_SHIFT 16
+
+#define MAX_USB_PHY_DATA_SIZE 0x30
+#define PHY_ADDR_0X09 0x09
+#define PHY_ADDR_0X0B 0x0b
+#define PHY_ADDR_0X0D 0x0d
+#define PHY_ADDR_0X10 0x10
+#define PHY_ADDR_0X1F 0x1f
+#define PHY_ADDR_0X20 0x20
+#define PHY_ADDR_0X21 0x21
+#define PHY_ADDR_0X30 0x30
+
+#define REG_0X09_FORCE_CALIBRATION BIT(9)
+#define REG_0X0B_RX_OFFSET_RANGE_MASK 0xc
+#define REG_0X0D_RX_DEBUG_TEST_EN BIT(6)
+#define REG_0X10_DEBUG_MODE_SETTING 0x3c0
+#define REG_0X10_DEBUG_MODE_SETTING_MASK 0x3f8
+#define REG_0X1F_RX_OFFSET_CODE_MASK 0x1e
+
+#define USB_U3_TX_LFPS_SWING_TRIM_SHIFT 4
+#define USB_U3_TX_LFPS_SWING_TRIM_MASK 0xf
+#define AMPLITUDE_CONTROL_COARSE_MASK 0xff
+#define AMPLITUDE_CONTROL_FINE_MASK 0xffff
+#define AMPLITUDE_CONTROL_COARSE_DEFAULT 0xff
+#define AMPLITUDE_CONTROL_FINE_DEFAULT 0xffff
+
+#define PHY_ADDR_MAP_ARRAY_INDEX(addr) (addr)
+#define ARRAY_INDEX_MAP_PHY_ADDR(index) (index)
+
+struct phy_reg {
+ void __iomem *reg_mdio_ctl;
+};
+
+struct phy_data {
+ u8 addr;
+ u16 data;
+};
+
+struct phy_cfg {
+ int param_size;
+ struct phy_data param[MAX_USB_PHY_DATA_SIZE];
+
+ bool check_efuse;
+ bool do_toggle;
+ bool do_toggle_once;
+ bool use_default_parameter;
+ bool check_rx_front_end_offset;
+};
+
+struct phy_parameter {
+ struct phy_reg phy_reg;
+
+ /* Get from efuse */
+ u8 efuse_usb_u3_tx_lfps_swing_trim;
+
+ /* Get from dts */
+ u32 amplitude_control_coarse;
+ u32 amplitude_control_fine;
+};
+
+struct rtk_phy {
+ struct usb_phy phy;
+ struct device *dev;
+
+ struct phy_cfg *phy_cfg;
+ int num_phy;
+ struct phy_parameter *phy_parameter;
+
+ struct dentry *debug_dir;
+};
+
+#define PHY_IO_TIMEOUT_USEC (50000)
+#define PHY_IO_DELAY_US (100)
+
+static inline int utmi_wait_register(void __iomem *reg, u32 mask, u32 result)
+{
+ int ret;
+ unsigned int val;
+
+ ret = read_poll_timeout(readl, val, ((val & mask) == result),
+ PHY_IO_DELAY_US, PHY_IO_TIMEOUT_USEC, false, reg);
+ if (ret) {
+ pr_err("%s can't program USB phy\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int rtk_phy3_wait_vbusy(struct phy_reg *phy_reg)
+{
+ return utmi_wait_register(phy_reg->reg_mdio_ctl, USB_MDIO_CTRL_PHY_BUSY, 0);
+}
+
+static u16 rtk_phy_read(struct phy_reg *phy_reg, char addr)
+{
+ unsigned int tmp;
+ u32 value;
+
+ tmp = (addr << USB_MDIO_CTRL_PHY_ADDR_SHIFT);
+
+ writel(tmp, phy_reg->reg_mdio_ctl);
+
+ rtk_phy3_wait_vbusy(phy_reg);
+
+ value = readl(phy_reg->reg_mdio_ctl);
+ value = value >> USB_MDIO_CTRL_PHY_DATA_SHIFT;
+
+ return (u16)value;
+}
+
+static int rtk_phy_write(struct phy_reg *phy_reg, char addr, u16 data)
+{
+ unsigned int val;
+
+ val = USB_MDIO_CTRL_PHY_WRITE |
+ (addr << USB_MDIO_CTRL_PHY_ADDR_SHIFT) |
+ (data << USB_MDIO_CTRL_PHY_DATA_SHIFT);
+
+ writel(val, phy_reg->reg_mdio_ctl);
+
+ rtk_phy3_wait_vbusy(phy_reg);
+
+ return 0;
+}
+
+static void do_rtk_usb3_phy_toggle(struct rtk_phy *rtk_phy, int index, bool connect)
+{
+ struct phy_cfg *phy_cfg = rtk_phy->phy_cfg;
+ struct phy_reg *phy_reg;
+ struct phy_parameter *phy_parameter;
+ struct phy_data *phy_data;
+ u8 addr;
+ u16 data;
+ int i;
+
+ phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+ phy_reg = &phy_parameter->phy_reg;
+
+ if (!phy_cfg->do_toggle)
+ return;
+
+ i = PHY_ADDR_MAP_ARRAY_INDEX(PHY_ADDR_0X09);
+ phy_data = phy_cfg->param + i;
+ addr = phy_data->addr;
+ data = phy_data->data;
+
+ if (!addr && !data) {
+ addr = PHY_ADDR_0X09;
+ data = rtk_phy_read(phy_reg, addr);
+ phy_data->addr = addr;
+ phy_data->data = data;
+ }
+
+ rtk_phy_write(phy_reg, addr, data & (~REG_0X09_FORCE_CALIBRATION));
+ mdelay(1);
+ rtk_phy_write(phy_reg, addr, data | REG_0X09_FORCE_CALIBRATION);
+}
+
+static int do_rtk_phy_init(struct rtk_phy *rtk_phy, int index)
+{
+ struct phy_cfg *phy_cfg;
+ struct phy_reg *phy_reg;
+ struct phy_parameter *phy_parameter;
+ int i = 0;
+
+ phy_cfg = rtk_phy->phy_cfg;
+ phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+ phy_reg = &phy_parameter->phy_reg;
+
+ if (phy_cfg->use_default_parameter)
+ goto do_toggle;
+
+ for (i = 0; i < phy_cfg->param_size; i++) {
+ struct phy_data *phy_data = phy_cfg->param + i;
+ u8 addr = phy_data->addr;
+ u16 data = phy_data->data;
+
+ if (!addr && !data)
+ continue;
+
+ rtk_phy_write(phy_reg, addr, data);
+ }
+
+do_toggle:
+ if (phy_cfg->do_toggle_once)
+ phy_cfg->do_toggle = true;
+
+ do_rtk_usb3_phy_toggle(rtk_phy, index, false);
+
+ if (phy_cfg->do_toggle_once) {
+ u16 check_value = 0;
+ int count = 10;
+ u16 value_0x0d, value_0x10;
+
+ /* Enable Debug mode by set 0x0D and 0x10 */
+ value_0x0d = rtk_phy_read(phy_reg, PHY_ADDR_0X0D);
+ value_0x10 = rtk_phy_read(phy_reg, PHY_ADDR_0X10);
+
+ rtk_phy_write(phy_reg, PHY_ADDR_0X0D,
+ value_0x0d | REG_0X0D_RX_DEBUG_TEST_EN);
+ rtk_phy_write(phy_reg, PHY_ADDR_0X10,
+ (value_0x10 & ~REG_0X10_DEBUG_MODE_SETTING_MASK) |
+ REG_0X10_DEBUG_MODE_SETTING);
+
+ check_value = rtk_phy_read(phy_reg, PHY_ADDR_0X30);
+
+ while (!(check_value & BIT(15))) {
+ check_value = rtk_phy_read(phy_reg, PHY_ADDR_0X30);
+ mdelay(1);
+ if (count-- < 0)
+ break;
+ }
+
+ if (!(check_value & BIT(15)))
+ dev_info(rtk_phy->dev, "toggle fail addr=0x%02x, data=0x%04x\n",
+ PHY_ADDR_0X30, check_value);
+
+ /* Disable Debug mode by set 0x0D and 0x10 to default*/
+ rtk_phy_write(phy_reg, PHY_ADDR_0X0D, value_0x0d);
+ rtk_phy_write(phy_reg, PHY_ADDR_0X10, value_0x10);
+
+ phy_cfg->do_toggle = false;
+ }
+
+ if (phy_cfg->check_rx_front_end_offset) {
+ u16 rx_offset_code, rx_offset_range;
+ u16 code_mask = REG_0X1F_RX_OFFSET_CODE_MASK;
+ u16 range_mask = REG_0X0B_RX_OFFSET_RANGE_MASK;
+ bool do_update = false;
+
+ rx_offset_code = rtk_phy_read(phy_reg, PHY_ADDR_0X1F);
+ if (((rx_offset_code & code_mask) == 0x0) ||
+ ((rx_offset_code & code_mask) == code_mask))
+ do_update = true;
+
+ rx_offset_range = rtk_phy_read(phy_reg, PHY_ADDR_0X0B);
+ if (((rx_offset_range & range_mask) == range_mask) && do_update) {
+ dev_warn(rtk_phy->dev, "Don't update rx_offset_range (rx_offset_code=0x%x, rx_offset_range=0x%x)\n",
+ rx_offset_code, rx_offset_range);
+ do_update = false;
+ }
+
+ if (do_update) {
+ u16 tmp1, tmp2;
+
+ tmp1 = rx_offset_range & (~range_mask);
+ tmp2 = rx_offset_range & range_mask;
+ tmp2 += (1 << 2);
+ rx_offset_range = tmp1 | (tmp2 & range_mask);
+ rtk_phy_write(phy_reg, PHY_ADDR_0X0B, rx_offset_range);
+ goto do_toggle;
+ }
+ }
+
+ return 0;
+}
+
+static int rtk_phy_init(struct phy *phy)
+{
+ struct rtk_phy *rtk_phy = phy_get_drvdata(phy);
+ int ret = 0;
+ int i;
+ unsigned long phy_init_time = jiffies;
+
+ for (i = 0; i < rtk_phy->num_phy; i++)
+ ret = do_rtk_phy_init(rtk_phy, i);
+
+ dev_dbg(rtk_phy->dev, "Initialized RTK USB 3.0 PHY (take %dms)\n",
+ jiffies_to_msecs(jiffies - phy_init_time));
+
+ return ret;
+}
+
+static int rtk_phy_exit(struct phy *phy)
+{
+ return 0;
+}
+
+static const struct phy_ops ops = {
+ .init = rtk_phy_init,
+ .exit = rtk_phy_exit,
+ .owner = THIS_MODULE,
+};
+
+static void rtk_phy_toggle(struct usb_phy *usb3_phy, bool connect, int port)
+{
+ int index = port;
+ struct rtk_phy *rtk_phy = NULL;
+
+ rtk_phy = dev_get_drvdata(usb3_phy->dev);
+
+ if (index > rtk_phy->num_phy) {
+ dev_err(rtk_phy->dev, "%s: The port=%d is not in usb phy (num_phy=%d)\n",
+ __func__, index, rtk_phy->num_phy);
+ return;
+ }
+
+ do_rtk_usb3_phy_toggle(rtk_phy, index, connect);
+}
+
+static int rtk_phy_notify_port_status(struct usb_phy *x, int port,
+ u16 portstatus, u16 portchange)
+{
+ bool connect = false;
+
+ pr_debug("%s port=%d portstatus=0x%x portchange=0x%x\n",
+ __func__, port, (int)portstatus, (int)portchange);
+ if (portstatus & USB_PORT_STAT_CONNECTION)
+ connect = true;
+
+ if (portchange & USB_PORT_STAT_C_CONNECTION)
+ rtk_phy_toggle(x, connect, port);
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *create_phy_debug_root(void)
+{
+ struct dentry *phy_debug_root;
+
+ phy_debug_root = debugfs_lookup("phy", usb_debug_root);
+ if (!phy_debug_root)
+ phy_debug_root = debugfs_create_dir("phy", usb_debug_root);
+
+ return phy_debug_root;
+}
+
+static int rtk_usb3_parameter_show(struct seq_file *s, void *unused)
+{
+ struct rtk_phy *rtk_phy = s->private;
+ struct phy_cfg *phy_cfg;
+ int i, index;
+
+ phy_cfg = rtk_phy->phy_cfg;
+
+ seq_puts(s, "Property:\n");
+ seq_printf(s, " check_efuse: %s\n",
+ phy_cfg->check_efuse ? "Enable" : "Disable");
+ seq_printf(s, " do_toggle: %s\n",
+ phy_cfg->do_toggle ? "Enable" : "Disable");
+ seq_printf(s, " do_toggle_once: %s\n",
+ phy_cfg->do_toggle_once ? "Enable" : "Disable");
+ seq_printf(s, " use_default_parameter: %s\n",
+ phy_cfg->use_default_parameter ? "Enable" : "Disable");
+
+ for (index = 0; index < rtk_phy->num_phy; index++) {
+ struct phy_reg *phy_reg;
+ struct phy_parameter *phy_parameter;
+
+ phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+ phy_reg = &phy_parameter->phy_reg;
+
+ seq_printf(s, "PHY %d:\n", index);
+
+ for (i = 0; i < phy_cfg->param_size; i++) {
+ struct phy_data *phy_data = phy_cfg->param + i;
+ u8 addr = ARRAY_INDEX_MAP_PHY_ADDR(i);
+ u16 data = phy_data->data;
+
+ if (!phy_data->addr && !data)
+ seq_printf(s, " addr = 0x%02x, data = none ==> read value = 0x%04x\n",
+ addr, rtk_phy_read(phy_reg, addr));
+ else
+ seq_printf(s, " addr = 0x%02x, data = 0x%04x ==> read value = 0x%04x\n",
+ addr, data, rtk_phy_read(phy_reg, addr));
+ }
+
+ seq_puts(s, "PHY Property:\n");
+ seq_printf(s, " efuse_usb_u3_tx_lfps_swing_trim: 0x%x\n",
+ (int)phy_parameter->efuse_usb_u3_tx_lfps_swing_trim);
+ seq_printf(s, " amplitude_control_coarse: 0x%x\n",
+ (int)phy_parameter->amplitude_control_coarse);
+ seq_printf(s, " amplitude_control_fine: 0x%x\n",
+ (int)phy_parameter->amplitude_control_fine);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(rtk_usb3_parameter);
+
+static inline void create_debug_files(struct rtk_phy *rtk_phy)
+{
+ struct dentry *phy_debug_root = NULL;
+
+ phy_debug_root = create_phy_debug_root();
+
+ if (!phy_debug_root)
+ return;
+
+ rtk_phy->debug_dir = debugfs_create_dir(dev_name(rtk_phy->dev), phy_debug_root);
+ if (!rtk_phy->debug_dir)
+ return;
+
+ if (!debugfs_create_file("parameter", 0444, rtk_phy->debug_dir, rtk_phy,
+ &rtk_usb3_parameter_fops))
+ goto file_error;
+
+ return;
+
+file_error:
+ debugfs_remove_recursive(rtk_phy->debug_dir);
+}
+
+static inline void remove_debug_files(struct rtk_phy *rtk_phy)
+{
+ debugfs_remove_recursive(rtk_phy->debug_dir);
+}
+#else
+static inline void create_debug_files(struct rtk_phy *rtk_phy) { }
+static inline void remove_debug_files(struct rtk_phy *rtk_phy) { }
+#endif /* CONFIG_DEBUG_FS */
+
+static int get_phy_data_by_efuse(struct rtk_phy *rtk_phy,
+ struct phy_parameter *phy_parameter, int index)
+{
+ struct phy_cfg *phy_cfg = rtk_phy->phy_cfg;
+ u8 value = 0;
+ struct nvmem_cell *cell;
+
+ if (!phy_cfg->check_efuse)
+ goto out;
+
+ cell = nvmem_cell_get(rtk_phy->dev, "usb_u3_tx_lfps_swing_trim");
+ if (IS_ERR(cell)) {
+ dev_dbg(rtk_phy->dev, "%s no usb_u3_tx_lfps_swing_trim: %ld\n",
+ __func__, PTR_ERR(cell));
+ } else {
+ unsigned char *buf;
+ size_t buf_size;
+
+ buf = nvmem_cell_read(cell, &buf_size);
+ if (!IS_ERR(buf)) {
+ value = buf[0] & USB_U3_TX_LFPS_SWING_TRIM_MASK;
+ kfree(buf);
+ }
+ nvmem_cell_put(cell);
+ }
+
+ if (value > 0 && value < 0x8)
+ phy_parameter->efuse_usb_u3_tx_lfps_swing_trim = 0x8;
+ else
+ phy_parameter->efuse_usb_u3_tx_lfps_swing_trim = (u8)value;
+
+out:
+ return 0;
+}
+
+static void update_amplitude_control_value(struct rtk_phy *rtk_phy,
+ struct phy_parameter *phy_parameter)
+{
+ struct phy_cfg *phy_cfg;
+ struct phy_reg *phy_reg;
+
+ phy_reg = &phy_parameter->phy_reg;
+ phy_cfg = rtk_phy->phy_cfg;
+
+ if (phy_parameter->amplitude_control_coarse != AMPLITUDE_CONTROL_COARSE_DEFAULT) {
+ u16 val_mask = AMPLITUDE_CONTROL_COARSE_MASK;
+ u16 data;
+
+ if (!phy_cfg->param[PHY_ADDR_0X20].addr && !phy_cfg->param[PHY_ADDR_0X20].data) {
+ phy_cfg->param[PHY_ADDR_0X20].addr = PHY_ADDR_0X20;
+ data = rtk_phy_read(phy_reg, PHY_ADDR_0X20);
+ } else {
+ data = phy_cfg->param[PHY_ADDR_0X20].data;
+ }
+
+ data &= (~val_mask);
+ data |= (phy_parameter->amplitude_control_coarse & val_mask);
+
+ phy_cfg->param[PHY_ADDR_0X20].data = data;
+ }
+
+ if (phy_parameter->efuse_usb_u3_tx_lfps_swing_trim) {
+ u8 efuse_val = phy_parameter->efuse_usb_u3_tx_lfps_swing_trim;
+ u16 val_mask = USB_U3_TX_LFPS_SWING_TRIM_MASK;
+ int val_shift = USB_U3_TX_LFPS_SWING_TRIM_SHIFT;
+ u16 data;
+
+ if (!phy_cfg->param[PHY_ADDR_0X20].addr && !phy_cfg->param[PHY_ADDR_0X20].data) {
+ phy_cfg->param[PHY_ADDR_0X20].addr = PHY_ADDR_0X20;
+ data = rtk_phy_read(phy_reg, PHY_ADDR_0X20);
+ } else {
+ data = phy_cfg->param[PHY_ADDR_0X20].data;
+ }
+
+ data &= ~(val_mask << val_shift);
+ data |= ((efuse_val & val_mask) << val_shift);
+
+ phy_cfg->param[PHY_ADDR_0X20].data = data;
+ }
+
+ if (phy_parameter->amplitude_control_fine != AMPLITUDE_CONTROL_FINE_DEFAULT) {
+ u16 val_mask = AMPLITUDE_CONTROL_FINE_MASK;
+
+ if (!phy_cfg->param[PHY_ADDR_0X21].addr && !phy_cfg->param[PHY_ADDR_0X21].data)
+ phy_cfg->param[PHY_ADDR_0X21].addr = PHY_ADDR_0X21;
+
+ phy_cfg->param[PHY_ADDR_0X21].data =
+ phy_parameter->amplitude_control_fine & val_mask;
+ }
+}
+
+static int parse_phy_data(struct rtk_phy *rtk_phy)
+{
+ struct device *dev = rtk_phy->dev;
+ struct phy_parameter *phy_parameter;
+ int ret = 0;
+ int index;
+
+ rtk_phy->phy_parameter = devm_kzalloc(dev, sizeof(struct phy_parameter) *
+ rtk_phy->num_phy, GFP_KERNEL);
+ if (!rtk_phy->phy_parameter)
+ return -ENOMEM;
+
+ for (index = 0; index < rtk_phy->num_phy; index++) {
+ phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+
+ phy_parameter->phy_reg.reg_mdio_ctl = of_iomap(dev->of_node, 0) + index;
+
+ /* Amplitude control address 0x20 bit 0 to bit 7 */
+ if (of_property_read_u32(dev->of_node, "realtek,amplitude-control-coarse-tuning",
+ &phy_parameter->amplitude_control_coarse))
+ phy_parameter->amplitude_control_coarse = AMPLITUDE_CONTROL_COARSE_DEFAULT;
+
+ /* Amplitude control address 0x21 bit 0 to bit 16 */
+ if (of_property_read_u32(dev->of_node, "realtek,amplitude-control-fine-tuning",
+ &phy_parameter->amplitude_control_fine))
+ phy_parameter->amplitude_control_fine = AMPLITUDE_CONTROL_FINE_DEFAULT;
+
+ get_phy_data_by_efuse(rtk_phy, phy_parameter, index);
+
+ update_amplitude_control_value(rtk_phy, phy_parameter);
+ }
+
+ return ret;
+}
+
+static int rtk_usb3phy_probe(struct platform_device *pdev)
+{
+ struct rtk_phy *rtk_phy;
+ struct device *dev = &pdev->dev;
+ struct phy *generic_phy;
+ struct phy_provider *phy_provider;
+ const struct phy_cfg *phy_cfg;
+ int ret;
+
+ phy_cfg = of_device_get_match_data(dev);
+ if (!phy_cfg) {
+ dev_err(dev, "phy config are not assigned!\n");
+ return -EINVAL;
+ }
+
+ rtk_phy = devm_kzalloc(dev, sizeof(*rtk_phy), GFP_KERNEL);
+ if (!rtk_phy)
+ return -ENOMEM;
+
+ rtk_phy->dev = &pdev->dev;
+ rtk_phy->phy.dev = rtk_phy->dev;
+ rtk_phy->phy.label = "rtk-usb3phy";
+ rtk_phy->phy.notify_port_status = rtk_phy_notify_port_status;
+
+ rtk_phy->phy_cfg = devm_kzalloc(dev, sizeof(*phy_cfg), GFP_KERNEL);
+
+ memcpy(rtk_phy->phy_cfg, phy_cfg, sizeof(*phy_cfg));
+
+ rtk_phy->num_phy = 1;
+
+ ret = parse_phy_data(rtk_phy);
+ if (ret)
+ goto err;
+
+ platform_set_drvdata(pdev, rtk_phy);
+
+ generic_phy = devm_phy_create(rtk_phy->dev, NULL, &ops);
+ if (IS_ERR(generic_phy))
+ return PTR_ERR(generic_phy);
+
+ phy_set_drvdata(generic_phy, rtk_phy);
+
+ phy_provider = devm_of_phy_provider_register(rtk_phy->dev, of_phy_simple_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
+ ret = usb_add_phy_dev(&rtk_phy->phy);
+ if (ret)
+ goto err;
+
+ create_debug_files(rtk_phy);
+
+err:
+ return ret;
+}
+
+static void rtk_usb3phy_remove(struct platform_device *pdev)
+{
+ struct rtk_phy *rtk_phy = platform_get_drvdata(pdev);
+
+ remove_debug_files(rtk_phy);
+
+ usb_remove_phy(&rtk_phy->phy);
+}
+
+static const struct phy_cfg rtd1295_phy_cfg = {
+ .param_size = MAX_USB_PHY_DATA_SIZE,
+ .param = { [0] = {0x01, 0x4008}, [1] = {0x01, 0xe046},
+ [2] = {0x02, 0x6046}, [3] = {0x03, 0x2779},
+ [4] = {0x04, 0x72f5}, [5] = {0x05, 0x2ad3},
+ [6] = {0x06, 0x000e}, [7] = {0x07, 0x2e00},
+ [8] = {0x08, 0x3591}, [9] = {0x09, 0x525c},
+ [10] = {0x0a, 0xa600}, [11] = {0x0b, 0xa904},
+ [12] = {0x0c, 0xc000}, [13] = {0x0d, 0xef1c},
+ [14] = {0x0e, 0x2000}, [15] = {0x0f, 0x0000},
+ [16] = {0x10, 0x000c}, [17] = {0x11, 0x4c00},
+ [18] = {0x12, 0xfc00}, [19] = {0x13, 0x0c81},
+ [20] = {0x14, 0xde01}, [21] = {0x15, 0x0000},
+ [22] = {0x16, 0x0000}, [23] = {0x17, 0x0000},
+ [24] = {0x18, 0x0000}, [25] = {0x19, 0x4004},
+ [26] = {0x1a, 0x1260}, [27] = {0x1b, 0xff00},
+ [28] = {0x1c, 0xcb00}, [29] = {0x1d, 0xa03f},
+ [30] = {0x1e, 0xc2e0}, [31] = {0x1f, 0x2807},
+ [32] = {0x20, 0x947a}, [33] = {0x21, 0x88aa},
+ [34] = {0x22, 0x0057}, [35] = {0x23, 0xab66},
+ [36] = {0x24, 0x0800}, [37] = {0x25, 0x0000},
+ [38] = {0x26, 0x040a}, [39] = {0x27, 0x01d6},
+ [40] = {0x28, 0xf8c2}, [41] = {0x29, 0x3080},
+ [42] = {0x2a, 0x3082}, [43] = {0x2b, 0x2078},
+ [44] = {0x2c, 0xffff}, [45] = {0x2d, 0xffff},
+ [46] = {0x2e, 0x0000}, [47] = {0x2f, 0x0040}, },
+ .check_efuse = false,
+ .do_toggle = true,
+ .do_toggle_once = false,
+ .use_default_parameter = false,
+ .check_rx_front_end_offset = false,
+};
+
+static const struct phy_cfg rtd1619_phy_cfg = {
+ .param_size = MAX_USB_PHY_DATA_SIZE,
+ .param = { [8] = {0x08, 0x3591},
+ [38] = {0x26, 0x840b},
+ [40] = {0x28, 0xf842}, },
+ .check_efuse = false,
+ .do_toggle = true,
+ .do_toggle_once = false,
+ .use_default_parameter = false,
+ .check_rx_front_end_offset = false,
+};
+
+static const struct phy_cfg rtd1319_phy_cfg = {
+ .param_size = MAX_USB_PHY_DATA_SIZE,
+ .param = { [1] = {0x01, 0xac86},
+ [6] = {0x06, 0x0003},
+ [9] = {0x09, 0x924c},
+ [10] = {0x0a, 0xa608},
+ [11] = {0x0b, 0xb905},
+ [14] = {0x0e, 0x2010},
+ [32] = {0x20, 0x705a},
+ [33] = {0x21, 0xf645},
+ [34] = {0x22, 0x0013},
+ [35] = {0x23, 0xcb66},
+ [41] = {0x29, 0xff00}, },
+ .check_efuse = true,
+ .do_toggle = true,
+ .do_toggle_once = false,
+ .use_default_parameter = false,
+ .check_rx_front_end_offset = false,
+};
+
+static const struct phy_cfg rtd1619b_phy_cfg = {
+ .param_size = MAX_USB_PHY_DATA_SIZE,
+ .param = { [1] = {0x01, 0xac8c},
+ [6] = {0x06, 0x0017},
+ [9] = {0x09, 0x724c},
+ [10] = {0x0a, 0xb610},
+ [11] = {0x0b, 0xb90d},
+ [13] = {0x0d, 0xef2a},
+ [15] = {0x0f, 0x9050},
+ [16] = {0x10, 0x000c},
+ [32] = {0x20, 0x70ff},
+ [34] = {0x22, 0x0013},
+ [35] = {0x23, 0xdb66},
+ [38] = {0x26, 0x8609},
+ [41] = {0x29, 0xff13},
+ [42] = {0x2a, 0x3070}, },
+ .check_efuse = true,
+ .do_toggle = false,
+ .do_toggle_once = true,
+ .use_default_parameter = false,
+ .check_rx_front_end_offset = false,
+};
+
+static const struct phy_cfg rtd1319d_phy_cfg = {
+ .param_size = MAX_USB_PHY_DATA_SIZE,
+ .param = { [1] = {0x01, 0xac89},
+ [4] = {0x04, 0xf2f5},
+ [6] = {0x06, 0x0017},
+ [9] = {0x09, 0x424c},
+ [10] = {0x0a, 0x9610},
+ [11] = {0x0b, 0x9901},
+ [12] = {0x0c, 0xf000},
+ [13] = {0x0d, 0xef2a},
+ [14] = {0x0e, 0x1000},
+ [15] = {0x0f, 0x9050},
+ [32] = {0x20, 0x7077},
+ [35] = {0x23, 0x0b62},
+ [37] = {0x25, 0x10ec},
+ [42] = {0x2a, 0x3070}, },
+ .check_efuse = true,
+ .do_toggle = false,
+ .do_toggle_once = true,
+ .use_default_parameter = false,
+ .check_rx_front_end_offset = true,
+};
+
+static const struct of_device_id usbphy_rtk_dt_match[] = {
+ { .compatible = "realtek,rtd1295-usb3phy", .data = &rtd1295_phy_cfg },
+ { .compatible = "realtek,rtd1319-usb3phy", .data = &rtd1319_phy_cfg },
+ { .compatible = "realtek,rtd1319d-usb3phy", .data = &rtd1319d_phy_cfg },
+ { .compatible = "realtek,rtd1619-usb3phy", .data = &rtd1619_phy_cfg },
+ { .compatible = "realtek,rtd1619b-usb3phy", .data = &rtd1619b_phy_cfg },
+ {},
+};
+MODULE_DEVICE_TABLE(of, usbphy_rtk_dt_match);
+
+static struct platform_driver rtk_usb3phy_driver = {
+ .probe = rtk_usb3phy_probe,
+ .remove_new = rtk_usb3phy_remove,
+ .driver = {
+ .name = "rtk-usb3phy",
+ .of_match_table = usbphy_rtk_dt_match,
+ },
+};
+
+module_platform_driver(rtk_usb3phy_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform: rtk-usb3phy");
+MODULE_AUTHOR("Stanley Chang <stanley_chang@realtek.com>");
+MODULE_DESCRIPTION("Realtek usb 3.0 phy driver");
diff --git a/drivers/phy/renesas/phy-rcar-gen2.c b/drivers/phy/renesas/phy-rcar-gen2.c
index c375a4676a3d..507435af2656 100644
--- a/drivers/phy/renesas/phy-rcar-gen2.c
+++ b/drivers/phy/renesas/phy-rcar-gen2.c
@@ -16,7 +16,6 @@
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/atomic.h>
-#include <linux/of_device.h>
#define USBHS_LPSTS 0x02
#define USBHS_UGCTRL 0x80
diff --git a/drivers/phy/renesas/phy-rcar-gen3-pcie.c b/drivers/phy/renesas/phy-rcar-gen3-pcie.c
index 9cf786a7daac..0ce7e9c94444 100644
--- a/drivers/phy/renesas/phy-rcar-gen3-pcie.c
+++ b/drivers/phy/renesas/phy-rcar-gen3-pcie.c
@@ -10,7 +10,6 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
index d4e2ee7e4efb..e53eace7c91e 100644
--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
@@ -15,8 +15,6 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/phy/renesas/r8a779f0-ether-serdes.c b/drivers/phy/renesas/r8a779f0-ether-serdes.c
index 55b7bdfc10d3..683b19bc411a 100644
--- a/drivers/phy/renesas/r8a779f0-ether-serdes.c
+++ b/drivers/phy/renesas/r8a779f0-ether-serdes.c
@@ -8,6 +8,7 @@
#include <linux/err.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
+#include <linux/of.h>
#include <linux/phy.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
@@ -339,22 +340,15 @@ static int r8a779f0_eth_serdes_probe(struct platform_device *pdev)
{
struct r8a779f0_eth_serdes_drv_data *dd;
struct phy_provider *provider;
- struct resource *res;
int i;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "invalid resource\n");
- return -EINVAL;
- }
-
dd = devm_kzalloc(&pdev->dev, sizeof(*dd), GFP_KERNEL);
if (!dd)
return -ENOMEM;
platform_set_drvdata(pdev, dd);
dd->pdev = pdev;
- dd->addr = devm_ioremap_resource(&pdev->dev, res);
+ dd->addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dd->addr))
return PTR_ERR(dd->addr);
diff --git a/drivers/phy/rockchip/phy-rockchip-dphy-rx0.c b/drivers/phy/rockchip/phy-rockchip-dphy-rx0.c
index 639452f47869..e6a768bbb9b3 100644
--- a/drivers/phy/rockchip/phy-rockchip-dphy-rx0.c
+++ b/drivers/phy/rockchip/phy-rockchip-dphy-rx0.c
@@ -21,7 +21,6 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/phy/phy-mipi-dphy.h>
#include <linux/platform_device.h>
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
index 401b0aabb159..6405943a2676 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
@@ -14,7 +14,7 @@
#include <linux/init.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
@@ -770,6 +770,9 @@ static const struct of_device_id inno_dsidphy_of_match[] = {
}, {
.compatible = "rockchip,rk3568-dsi-dphy",
.data = &max_2_5ghz_video_phy_plat_data,
+ }, {
+ .compatible = "rockchip,rv1126-dsi-dphy",
+ .data = &max_2_5ghz_video_phy_plat_data,
},
{}
};
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
index 1e1563f5fffc..053bd62e31ba 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
@@ -15,7 +15,6 @@
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/phy/phy.h>
@@ -245,6 +244,7 @@ struct inno_hdmi_phy {
struct clk_hw hw;
struct clk *phyclk;
unsigned long pixclock;
+ unsigned long tmdsclock;
};
struct pre_pll_config {
@@ -291,31 +291,179 @@ struct inno_hdmi_phy_drv_data {
};
static const struct pre_pll_config pre_pll_cfg_table[] = {
- { 27000000, 27000000, 1, 90, 3, 2, 2, 10, 3, 3, 4, 0, 0},
- { 27000000, 33750000, 1, 90, 1, 3, 3, 10, 3, 3, 4, 0, 0},
- { 40000000, 40000000, 1, 80, 2, 2, 2, 12, 2, 2, 2, 0, 0},
- { 59341000, 59341000, 1, 98, 3, 1, 2, 1, 3, 3, 4, 0, 0xE6AE6B},
- { 59400000, 59400000, 1, 99, 3, 1, 1, 1, 3, 3, 4, 0, 0},
- { 59341000, 74176250, 1, 98, 0, 3, 3, 1, 3, 3, 4, 0, 0xE6AE6B},
- { 59400000, 74250000, 1, 99, 1, 2, 2, 1, 3, 3, 4, 0, 0},
- { 74176000, 74176000, 1, 98, 1, 2, 2, 1, 2, 3, 4, 0, 0xE6AE6B},
- { 74250000, 74250000, 1, 99, 1, 2, 2, 1, 2, 3, 4, 0, 0},
- { 74176000, 92720000, 4, 494, 1, 2, 2, 1, 3, 3, 4, 0, 0x816817},
- { 74250000, 92812500, 4, 495, 1, 2, 2, 1, 3, 3, 4, 0, 0},
- {148352000, 148352000, 1, 98, 1, 1, 1, 1, 2, 2, 2, 0, 0xE6AE6B},
- {148500000, 148500000, 1, 99, 1, 1, 1, 1, 2, 2, 2, 0, 0},
- {148352000, 185440000, 4, 494, 0, 2, 2, 1, 3, 2, 2, 0, 0x816817},
- {148500000, 185625000, 4, 495, 0, 2, 2, 1, 3, 2, 2, 0, 0},
- {296703000, 296703000, 1, 98, 0, 1, 1, 1, 0, 2, 2, 0, 0xE6AE6B},
- {297000000, 297000000, 1, 99, 0, 1, 1, 1, 0, 2, 2, 0, 0},
- {296703000, 370878750, 4, 494, 1, 2, 0, 1, 3, 1, 1, 0, 0x816817},
- {297000000, 371250000, 4, 495, 1, 2, 0, 1, 3, 1, 1, 0, 0},
- {593407000, 296703500, 1, 98, 0, 1, 1, 1, 0, 2, 1, 0, 0xE6AE6B},
- {594000000, 297000000, 1, 99, 0, 1, 1, 1, 0, 2, 1, 0, 0},
- {593407000, 370879375, 4, 494, 1, 2, 0, 1, 3, 1, 1, 1, 0x816817},
- {594000000, 371250000, 4, 495, 1, 2, 0, 1, 3, 1, 1, 1, 0},
- {593407000, 593407000, 1, 98, 0, 2, 0, 1, 0, 1, 1, 0, 0xE6AE6B},
- {594000000, 594000000, 1, 99, 0, 2, 0, 1, 0, 1, 1, 0, 0},
+ { 25175000, 25175000, 3, 125, 3, 1, 1, 1, 3, 3, 4, 0, 0xe00000},
+ { 25175000, 31468750, 1, 41, 0, 3, 3, 1, 3, 3, 4, 0, 0xf5554f},
+ { 27000000, 27000000, 1, 36, 0, 3, 3, 1, 2, 3, 4, 0, 0x0},
+ { 27000000, 33750000, 1, 45, 0, 3, 3, 1, 3, 3, 4, 0, 0x0},
+ { 31500000, 31500000, 1, 42, 0, 3, 3, 1, 2, 3, 4, 0, 0x0},
+ { 31500000, 39375000, 1, 105, 1, 3, 3, 10, 0, 3, 4, 0, 0x0},
+ { 33750000, 33750000, 1, 45, 0, 3, 3, 1, 2, 3, 4, 0, 0x0},
+ { 33750000, 42187500, 1, 169, 2, 3, 3, 15, 0, 3, 4, 0, 0x0},
+ { 35500000, 35500000, 1, 71, 2, 2, 2, 6, 0, 3, 4, 0, 0x0},
+ { 35500000, 44375000, 1, 74, 3, 1, 1, 25, 0, 1, 1, 0, 0x0},
+ { 36000000, 36000000, 1, 36, 2, 1, 1, 1, 1, 3, 4, 0, 0x0},
+ { 36000000, 45000000, 1, 45, 2, 1, 1, 15, 0, 1, 1, 0, 0x0},
+ { 40000000, 40000000, 1, 40, 2, 1, 1, 1, 1, 3, 4, 0, 0x0},
+ { 40000000, 50000000, 1, 50, 2, 1, 1, 15, 0, 1, 1, 0, 0x0},
+ { 49500000, 49500000, 1, 66, 0, 3, 3, 1, 2, 3, 4, 0, 0x0},
+ { 49500000, 61875000, 1, 165, 1, 3, 3, 10, 0, 3, 4, 0, 0x0},
+ { 50000000, 50000000, 1, 50, 2, 1, 1, 1, 1, 3, 4, 0, 0x0},
+ { 50000000, 62500000, 1, 125, 2, 2, 2, 15, 0, 2, 2, 0, 0x0},
+ { 54000000, 54000000, 1, 36, 0, 2, 2, 1, 0, 3, 4, 0, 0x0},
+ { 54000000, 67500000, 1, 45, 0, 2, 2, 1, 3, 2, 2, 0, 0x0},
+ { 56250000, 56250000, 1, 75, 0, 3, 3, 1, 2, 3, 4, 0, 0x0},
+ { 56250000, 70312500, 1, 117, 3, 1, 1, 25, 0, 1, 1, 0, 0x0},
+ { 59341000, 59341000, 1, 118, 2, 2, 2, 6, 0, 3, 4, 0, 0xae978d},
+ { 59341000, 74176250, 2, 148, 2, 1, 1, 15, 0, 1, 1, 0, 0x5a3d70},
+ { 59400000, 59400000, 1, 99, 3, 1, 1, 1, 3, 3, 4, 0, 0x0},
+ { 59400000, 74250000, 1, 99, 0, 3, 3, 1, 3, 3, 4, 0, 0x0},
+ { 65000000, 65000000, 1, 65, 2, 1, 1, 1, 1, 3, 4, 0, 0x0},
+ { 65000000, 81250000, 3, 325, 0, 3, 3, 1, 3, 3, 4, 0, 0x0},
+ { 68250000, 68250000, 1, 91, 0, 3, 3, 1, 2, 3, 4, 0, 0x0},
+ { 68250000, 85312500, 1, 142, 3, 1, 1, 25, 0, 1, 1, 0, 0x0},
+ { 71000000, 71000000, 1, 71, 2, 1, 1, 1, 1, 3, 4, 0, 0x0},
+ { 71000000, 88750000, 3, 355, 0, 3, 3, 1, 3, 3, 4, 0, 0x0},
+ { 72000000, 72000000, 1, 36, 2, 0, 0, 1, 1, 2, 2, 0, 0x0},
+ { 72000000, 90000000, 1, 60, 0, 2, 2, 1, 3, 2, 2, 0, 0x0},
+ { 73250000, 73250000, 3, 293, 0, 3, 3, 1, 2, 3, 4, 0, 0x0},
+ { 73250000, 91562500, 1, 61, 0, 2, 2, 1, 3, 2, 2, 0, 0x0},
+ { 74176000, 74176000, 1, 37, 2, 0, 0, 1, 1, 2, 2, 0, 0x16872b},
+ { 74176000, 92720000, 2, 185, 2, 1, 1, 15, 0, 1, 1, 0, 0x70a3d7},
+ { 74250000, 74250000, 1, 99, 0, 3, 3, 1, 2, 3, 4, 0, 0x0},
+ { 74250000, 92812500, 4, 495, 0, 3, 3, 1, 3, 3, 4, 0, 0x0},
+ { 75000000, 75000000, 1, 50, 0, 2, 2, 1, 0, 3, 4, 0, 0x0},
+ { 75000000, 93750000, 1, 125, 0, 3, 3, 1, 3, 3, 4, 0, 0x0},
+ { 78750000, 78750000, 1, 105, 0, 3, 3, 1, 2, 3, 4, 0, 0x0},
+ { 78750000, 98437500, 1, 164, 3, 1, 1, 25, 0, 1, 1, 0, 0x0},
+ { 79500000, 79500000, 1, 53, 0, 2, 2, 1, 0, 3, 4, 0, 0x0},
+ { 79500000, 99375000, 1, 199, 2, 2, 2, 15, 0, 2, 2, 0, 0x0},
+ { 83500000, 83500000, 2, 167, 2, 1, 1, 1, 1, 3, 4, 0, 0x0},
+ { 83500000, 104375000, 1, 104, 2, 1, 1, 15, 0, 1, 1, 0, 0x600000},
+ { 85500000, 85500000, 1, 57, 0, 2, 2, 1, 0, 3, 4, 0, 0x0},
+ { 85500000, 106875000, 1, 178, 3, 1, 1, 25, 0, 1, 1, 0, 0x0},
+ { 85750000, 85750000, 3, 343, 0, 3, 3, 1, 2, 3, 4, 0, 0x0},
+ { 85750000, 107187500, 1, 143, 0, 3, 3, 1, 3, 3, 4, 0, 0x0},
+ { 88750000, 88750000, 3, 355, 0, 3, 3, 1, 2, 3, 4, 0, 0x0},
+ { 88750000, 110937500, 1, 110, 2, 1, 1, 15, 0, 1, 1, 0, 0xf00000},
+ { 94500000, 94500000, 1, 63, 0, 2, 2, 1, 0, 3, 4, 0, 0x0},
+ { 94500000, 118125000, 1, 197, 3, 1, 1, 25, 0, 1, 1, 0, 0x0},
+ {101000000, 101000000, 1, 101, 2, 1, 1, 1, 1, 3, 4, 0, 0x0},
+ {101000000, 126250000, 1, 42, 0, 1, 1, 1, 3, 1, 1, 0, 0x0},
+ {102250000, 102250000, 4, 409, 2, 1, 1, 1, 1, 3, 4, 0, 0x0},
+ {102250000, 127812500, 1, 128, 2, 1, 1, 15, 0, 1, 1, 0, 0x0},
+ {106500000, 106500000, 1, 71, 0, 2, 2, 1, 0, 3, 4, 0, 0x0},
+ {106500000, 133125000, 1, 133, 2, 1, 1, 15, 0, 1, 1, 0, 0x0},
+ {108000000, 108000000, 1, 36, 0, 1, 1, 1, 0, 2, 2, 0, 0x0},
+ {108000000, 135000000, 1, 45, 0, 1, 1, 1, 3, 1, 1, 0, 0x0},
+ {115500000, 115500000, 1, 77, 0, 2, 2, 1, 0, 3, 4, 0, 0x0},
+ {115500000, 144375000, 1, 48, 0, 1, 1, 1, 3, 1, 1, 0, 0x0},
+ {117500000, 117500000, 2, 235, 2, 1, 1, 1, 1, 3, 4, 0, 0x0},
+ {117500000, 146875000, 1, 49, 0, 1, 1, 1, 3, 1, 1, 0, 0x0},
+ {119000000, 119000000, 1, 119, 2, 1, 1, 1, 1, 3, 4, 0, 0x0},
+ {119000000, 148750000, 3, 148, 0, 1, 1, 1, 3, 1, 1, 0, 0xc00000},
+ {121750000, 121750000, 4, 487, 2, 1, 1, 1, 1, 3, 4, 0, 0x0},
+ {121750000, 152187500, 1, 203, 0, 3, 3, 1, 3, 3, 4, 0, 0x0},
+ {122500000, 122500000, 2, 245, 2, 1, 1, 1, 1, 3, 4, 0, 0x0},
+ {122500000, 153125000, 1, 51, 0, 1, 1, 1, 3, 1, 1, 0, 0x0},
+ {135000000, 135000000, 1, 45, 0, 1, 1, 1, 0, 2, 2, 0, 0x0},
+ {135000000, 168750000, 1, 169, 2, 1, 1, 15, 0, 1, 1, 0, 0x0},
+ {136750000, 136750000, 1, 68, 2, 0, 0, 1, 1, 2, 2, 0, 0x600000},
+ {136750000, 170937500, 1, 113, 0, 2, 2, 1, 3, 2, 2, 0, 0xf5554f},
+ {140250000, 140250000, 2, 187, 0, 2, 2, 1, 0, 3, 4, 0, 0x0},
+ {140250000, 175312500, 1, 117, 0, 2, 2, 1, 3, 2, 2, 0, 0x0},
+ {146250000, 146250000, 2, 195, 0, 2, 2, 1, 0, 3, 4, 0, 0x0},
+ {146250000, 182812500, 1, 61, 0, 1, 1, 1, 3, 1, 1, 0, 0x0},
+ {148250000, 148250000, 3, 222, 2, 0, 0, 1, 1, 2, 2, 0, 0x600000},
+ {148250000, 185312500, 1, 123, 0, 2, 2, 1, 3, 2, 2, 0, 0x8aaab0},
+ {148352000, 148352000, 2, 148, 2, 0, 0, 1, 1, 2, 2, 0, 0x5a1cac},
+ {148352000, 185440000, 3, 185, 0, 1, 1, 1, 3, 1, 1, 0, 0x70a3d7},
+ {148500000, 148500000, 1, 99, 0, 2, 2, 1, 0, 3, 4, 0, 0x0},
+ {148500000, 185625000, 4, 495, 0, 2, 2, 1, 3, 2, 2, 0, 0x0},
+ {154000000, 154000000, 1, 77, 2, 0, 0, 1, 1, 2, 2, 0, 0x0},
+ {154000000, 192500000, 1, 64, 0, 1, 1, 1, 3, 1, 1, 0, 0x0},
+ {156000000, 156000000, 1, 52, 0, 1, 1, 1, 0, 2, 2, 0, 0x0},
+ {156000000, 195000000, 1, 65, 0, 1, 1, 1, 3, 1, 1, 0, 0x0},
+ {156750000, 156750000, 2, 209, 0, 2, 2, 1, 0, 3, 4, 0, 0x0},
+ {156750000, 195937500, 1, 196, 2, 1, 1, 15, 0, 1, 1, 0, 0x0},
+ {157000000, 157000000, 2, 157, 2, 0, 0, 1, 1, 2, 2, 0, 0x0},
+ {157000000, 196250000, 1, 131, 0, 2, 2, 1, 3, 2, 2, 0, 0x0},
+ {157500000, 157500000, 1, 105, 0, 2, 2, 1, 0, 3, 4, 0, 0x0},
+ {157500000, 196875000, 1, 197, 2, 1, 1, 15, 0, 1, 1, 0, 0x0},
+ {162000000, 162000000, 1, 54, 0, 1, 1, 1, 0, 2, 2, 0, 0x0},
+ {162000000, 202500000, 2, 135, 0, 1, 1, 1, 3, 1, 1, 0, 0x0},
+ {175500000, 175500000, 1, 117, 0, 2, 2, 1, 0, 3, 4, 0, 0x0},
+ {175500000, 219375000, 1, 73, 0, 1, 1, 1, 3, 1, 1, 0, 0x0},
+ {179500000, 179500000, 3, 359, 0, 2, 2, 1, 0, 3, 4, 0, 0x0},
+ {179500000, 224375000, 1, 75, 0, 1, 1, 1, 3, 1, 1, 0, 0x0},
+ {182750000, 182750000, 1, 91, 2, 0, 0, 1, 1, 2, 2, 0, 0x600000},
+ {182750000, 228437500, 1, 152, 0, 2, 2, 1, 3, 2, 2, 0, 0x4aaab0},
+ {182750000, 228437500, 1, 152, 0, 2, 2, 1, 3, 2, 2, 0, 0x4aaab0},
+ {187000000, 187000000, 2, 187, 2, 0, 0, 1, 1, 2, 2, 0, 0x0},
+ {187000000, 233750000, 1, 39, 0, 0, 0, 1, 3, 0, 0, 1, 0x0},
+ {187250000, 187250000, 3, 280, 2, 0, 0, 1, 1, 2, 2, 0, 0xe00000},
+ {187250000, 234062500, 1, 156, 0, 2, 2, 1, 3, 2, 2, 0, 0xaaab0},
+ {189000000, 189000000, 1, 63, 0, 1, 1, 1, 0, 2, 2, 0, 0x0},
+ {189000000, 236250000, 1, 79, 0, 1, 1, 1, 3, 1, 1, 0, 0x0},
+ {193250000, 193250000, 3, 289, 2, 0, 0, 1, 1, 2, 2, 0, 0xe00000},
+ {193250000, 241562500, 1, 161, 0, 2, 2, 1, 3, 2, 2, 0, 0xaaab0},
+ {202500000, 202500000, 2, 135, 0, 1, 1, 1, 0, 2, 2, 0, 0x0},
+ {202500000, 253125000, 1, 169, 0, 2, 2, 1, 3, 2, 2, 0, 0x0},
+ {204750000, 204750000, 4, 273, 0, 1, 1, 1, 0, 2, 2, 0, 0x0},
+ {204750000, 255937500, 1, 171, 0, 2, 2, 1, 3, 2, 2, 0, 0x0},
+ {208000000, 208000000, 1, 104, 2, 0, 0, 1, 1, 2, 2, 0, 0x0},
+ {208000000, 260000000, 1, 173, 0, 2, 2, 1, 3, 2, 2, 0, 0x0},
+ {214750000, 214750000, 1, 107, 2, 0, 0, 1, 1, 2, 2, 0, 0x600000},
+ {214750000, 268437500, 1, 178, 0, 2, 2, 1, 3, 2, 2, 0, 0xf5554f},
+ {218250000, 218250000, 4, 291, 0, 1, 1, 1, 0, 2, 2, 0, 0x0},
+ {218250000, 272812500, 1, 91, 0, 1, 1, 1, 3, 1, 1, 0, 0x0},
+ {229500000, 229500000, 2, 153, 0, 1, 1, 1, 0, 2, 2, 0, 0x0},
+ {229500000, 286875000, 1, 191, 0, 2, 2, 1, 3, 2, 2, 0, 0x0},
+ {234000000, 234000000, 1, 39, 0, 0, 0, 1, 0, 1, 1, 0, 0x0},
+ {234000000, 292500000, 1, 195, 0, 2, 2, 1, 3, 2, 2, 0, 0x0},
+ {241500000, 241500000, 2, 161, 0, 1, 1, 1, 0, 2, 2, 0, 0x0},
+ {241500000, 301875000, 1, 201, 0, 2, 2, 1, 3, 2, 2, 0, 0x0},
+ {245250000, 245250000, 4, 327, 0, 1, 1, 1, 0, 2, 2, 0, 0x0},
+ {245250000, 306562500, 1, 51, 0, 0, 0, 1, 3, 0, 0, 1, 0x0},
+ {245500000, 245500000, 4, 491, 2, 0, 0, 1, 1, 2, 2, 0, 0x0},
+ {245500000, 306875000, 1, 51, 0, 0, 0, 1, 3, 0, 0, 1, 0x0},
+ {261000000, 261000000, 1, 87, 0, 1, 1, 1, 0, 2, 2, 0, 0x0},
+ {261000000, 326250000, 1, 109, 0, 1, 1, 1, 3, 1, 1, 0, 0x0},
+ {268250000, 268250000, 9, 402, 0, 0, 0, 1, 0, 1, 1, 0, 0x600000},
+ {268250000, 335312500, 1, 111, 0, 1, 1, 1, 3, 1, 1, 0, 0xc5554f},
+ {268500000, 268500000, 2, 179, 0, 1, 1, 1, 0, 2, 2, 0, 0x0},
+ {268500000, 335625000, 1, 56, 0, 0, 0, 1, 3, 0, 0, 1, 0x0},
+ {281250000, 281250000, 4, 375, 0, 1, 1, 1, 0, 2, 2, 0, 0x0},
+ {281250000, 351562500, 1, 117, 0, 3, 1, 1, 3, 1, 1, 0, 0x0},
+ {288000000, 288000000, 1, 48, 0, 0, 0, 1, 0, 1, 1, 0, 0x0},
+ {288000000, 360000000, 1, 60, 0, 2, 0, 1, 3, 0, 0, 1, 0x0},
+ {296703000, 296703000, 1, 49, 0, 0, 0, 1, 0, 1, 1, 0, 0x7353f7},
+ {296703000, 370878750, 1, 123, 0, 3, 1, 1, 3, 1, 1, 0, 0xa051eb},
+ {297000000, 297000000, 1, 99, 0, 1, 1, 1, 0, 2, 2, 0, 0x0},
+ {297000000, 371250000, 4, 495, 0, 3, 1, 1, 3, 1, 1, 0, 0x0},
+ {312250000, 312250000, 9, 468, 0, 0, 0, 1, 0, 1, 1, 0, 0x600000},
+ {312250000, 390312500, 1, 130, 0, 3, 1, 1, 3, 1, 1, 0, 0x1aaab0},
+ {317000000, 317000000, 3, 317, 0, 1, 1, 1, 0, 2, 2, 0, 0x0},
+ {317000000, 396250000, 1, 66, 0, 2, 0, 1, 3, 0, 0, 1, 0x0},
+ {319750000, 319750000, 3, 159, 0, 0, 0, 1, 0, 1, 1, 0, 0xe00000},
+ {319750000, 399687500, 3, 199, 0, 2, 0, 1, 3, 0, 0, 1, 0xd80000},
+ {333250000, 333250000, 9, 499, 0, 0, 0, 1, 0, 1, 1, 0, 0xe00000},
+ {333250000, 416562500, 1, 138, 0, 3, 1, 1, 3, 1, 1, 0, 0xdaaab0},
+ {348500000, 348500000, 9, 522, 0, 2, 0, 1, 0, 1, 1, 0, 0xc00000},
+ {348500000, 435625000, 1, 145, 0, 3, 1, 1, 3, 1, 1, 0, 0x35554f},
+ {356500000, 356500000, 9, 534, 0, 2, 0, 1, 0, 1, 1, 0, 0xc00000},
+ {356500000, 445625000, 1, 148, 0, 3, 1, 1, 3, 1, 1, 0, 0x8aaab0},
+ {380500000, 380500000, 9, 570, 0, 2, 0, 1, 0, 1, 1, 0, 0xc00000},
+ {380500000, 475625000, 1, 158, 0, 3, 1, 1, 3, 1, 1, 0, 0x8aaab0},
+ {443250000, 443250000, 1, 73, 0, 2, 0, 1, 0, 1, 1, 0, 0xe00000},
+ {443250000, 554062500, 1, 92, 0, 2, 0, 1, 3, 0, 0, 1, 0x580000},
+ {505250000, 505250000, 9, 757, 0, 2, 0, 1, 0, 1, 1, 0, 0xe00000},
+ {552750000, 552750000, 3, 276, 0, 2, 0, 1, 0, 1, 1, 0, 0x600000},
+ {593407000, 296703500, 3, 296, 0, 1, 1, 1, 0, 1, 1, 0, 0xb41893},
+ {593407000, 370879375, 4, 494, 0, 3, 1, 1, 3, 0, 0, 1, 0x817e4a},
+ {593407000, 593407000, 3, 296, 0, 2, 0, 1, 0, 1, 1, 0, 0xb41893},
+ {594000000, 297000000, 1, 99, 0, 1, 1, 1, 0, 1, 1, 0, 0x0},
+ {594000000, 371250000, 4, 495, 0, 3, 1, 1, 3, 0, 0, 1, 0x0},
+ {594000000, 594000000, 1, 99, 0, 2, 0, 1, 0, 1, 1, 0, 0x0},
{ /* sentinel */ }
};
@@ -485,6 +633,8 @@ static int inno_hdmi_phy_power_on(struct phy *phy)
dev_dbg(inno->dev, "Inno HDMI PHY Power On\n");
+ inno->plat_data->clk_ops->set_rate(&inno->hw, inno->pixclock, 24000000);
+
ret = clk_prepare_enable(inno->phyclk);
if (ret)
return ret;
@@ -509,6 +659,8 @@ static int inno_hdmi_phy_power_off(struct phy *phy)
clk_disable_unprepare(inno->phyclk);
+ inno->tmdsclock = 0;
+
dev_dbg(inno->dev, "Inno HDMI PHY Power Off\n");
return 0;
@@ -628,6 +780,9 @@ static int inno_hdmi_phy_rk3228_clk_set_rate(struct clk_hw *hw,
dev_dbg(inno->dev, "%s rate %lu tmdsclk %lu\n",
__func__, rate, tmdsclock);
+ if (inno->pixclock == rate && inno->tmdsclock == tmdsclock)
+ return 0;
+
cfg = inno_hdmi_phy_get_pre_pll_cfg(inno, rate);
if (IS_ERR(cfg))
return PTR_ERR(cfg);
@@ -670,6 +825,7 @@ static int inno_hdmi_phy_rk3228_clk_set_rate(struct clk_hw *hw,
}
inno->pixclock = rate;
+ inno->tmdsclock = tmdsclock;
return 0;
}
@@ -714,7 +870,7 @@ unsigned long inno_hdmi_phy_rk3328_clk_recalc_rate(struct clk_hw *hw,
{
struct inno_hdmi_phy *inno = to_inno_hdmi_phy(hw);
unsigned long frac;
- u8 nd, no_a, no_b, no_c, no_d;
+ u8 nd, no_a, no_b, no_d;
u64 vco;
u16 nf;
@@ -737,18 +893,17 @@ unsigned long inno_hdmi_phy_rk3328_clk_recalc_rate(struct clk_hw *hw,
no_b = inno_read(inno, 0xa5) & RK3328_PRE_PLL_PCLK_DIV_B_MASK;
no_b >>= RK3328_PRE_PLL_PCLK_DIV_B_SHIFT;
no_b += 2;
- no_c = inno_read(inno, 0xa6) & RK3328_PRE_PLL_PCLK_DIV_C_MASK;
- no_c >>= RK3328_PRE_PLL_PCLK_DIV_C_SHIFT;
- no_c = 1 << no_c;
no_d = inno_read(inno, 0xa6) & RK3328_PRE_PLL_PCLK_DIV_D_MASK;
do_div(vco, (nd * (no_a == 1 ? no_b : no_a) * no_d * 2));
}
- inno->pixclock = vco;
- dev_dbg(inno->dev, "%s rate %lu\n", __func__, inno->pixclock);
+ inno->pixclock = DIV_ROUND_CLOSEST((unsigned long)vco, 1000) * 1000;
- return vco;
+ dev_dbg(inno->dev, "%s rate %lu vco %llu\n",
+ __func__, inno->pixclock, vco);
+
+ return inno->pixclock;
}
static long inno_hdmi_phy_rk3328_clk_round_rate(struct clk_hw *hw,
@@ -782,6 +937,9 @@ static int inno_hdmi_phy_rk3328_clk_set_rate(struct clk_hw *hw,
dev_dbg(inno->dev, "%s rate %lu tmdsclk %lu\n",
__func__, rate, tmdsclock);
+ if (inno->pixclock == rate && inno->tmdsclock == tmdsclock)
+ return 0;
+
cfg = inno_hdmi_phy_get_pre_pll_cfg(inno, rate);
if (IS_ERR(cfg))
return PTR_ERR(cfg);
@@ -790,8 +948,8 @@ static int inno_hdmi_phy_rk3328_clk_set_rate(struct clk_hw *hw,
RK3328_PRE_PLL_POWER_DOWN);
/* Configure pre-pll */
- inno_update_bits(inno, 0xa0, RK3228_PCLK_VCO_DIV_5_MASK,
- RK3228_PCLK_VCO_DIV_5(cfg->vco_div_5_en));
+ inno_update_bits(inno, 0xa0, RK3328_PCLK_VCO_DIV_5_MASK,
+ RK3328_PCLK_VCO_DIV_5(cfg->vco_div_5_en));
inno_write(inno, 0xa1, RK3328_PRE_PLL_PRE_DIV(cfg->prediv));
val = RK3328_SPREAD_SPECTRUM_MOD_DISABLE;
@@ -821,6 +979,7 @@ static int inno_hdmi_phy_rk3328_clk_set_rate(struct clk_hw *hw,
}
inno->pixclock = rate;
+ inno->tmdsclock = tmdsclock;
return 0;
}
@@ -1021,9 +1180,10 @@ inno_hdmi_phy_rk3328_power_on(struct inno_hdmi_phy *inno,
inno_write(inno, 0xac, RK3328_POST_PLL_FB_DIV_7_0(cfg->fbdiv));
if (cfg->postdiv == 1) {
- inno_write(inno, 0xaa, RK3328_POST_PLL_REFCLK_SEL_TMDS);
inno_write(inno, 0xab, RK3328_POST_PLL_FB_DIV_8(cfg->fbdiv) |
RK3328_POST_PLL_PRE_DIV(cfg->prediv));
+ inno_write(inno, 0xaa, RK3328_POST_PLL_REFCLK_SEL_TMDS |
+ RK3328_POST_PLL_POWER_DOWN);
} else {
v = (cfg->postdiv / 2) - 1;
v &= RK3328_POST_PLL_POST_DIV_MASK;
@@ -1031,7 +1191,8 @@ inno_hdmi_phy_rk3328_power_on(struct inno_hdmi_phy *inno,
inno_write(inno, 0xab, RK3328_POST_PLL_FB_DIV_8(cfg->fbdiv) |
RK3328_POST_PLL_PRE_DIV(cfg->prediv));
inno_write(inno, 0xaa, RK3328_POST_PLL_POST_DIV_ENABLE |
- RK3328_POST_PLL_REFCLK_SEL_TMDS);
+ RK3328_POST_PLL_REFCLK_SEL_TMDS |
+ RK3328_POST_PLL_POWER_DOWN);
}
for (v = 0; v < 14; v++)
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
index a0bc10aa7961..b982c3f0d4b5 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
@@ -24,6 +24,7 @@
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/regmap.h>
+#include <linux/reset.h>
#include <linux/mfd/syscon.h>
#include <linux/usb/of.h>
#include <linux/usb/otg.h>
@@ -32,6 +33,8 @@
#define SCHEDULE_DELAY (60 * HZ)
#define OTG_SCHEDULE_DELAY (2 * HZ)
+struct rockchip_usb2phy;
+
enum rockchip_usb2phy_port_id {
USB2PHY_PORT_OTG,
USB2PHY_PORT_HOST,
@@ -116,6 +119,12 @@ struct rockchip_chg_det_reg {
* @bvalid_det_en: vbus valid rise detection enable register.
* @bvalid_det_st: vbus valid rise detection status register.
* @bvalid_det_clr: vbus valid rise detection clear register.
+ * @disfall_en: host disconnect fall edge detection enable.
+ * @disfall_st: host disconnect fall edge detection state.
+ * @disfall_clr: host disconnect fall edge detection clear.
+ * @disrise_en: host disconnect rise edge detection enable.
+ * @disrise_st: host disconnect rise edge detection state.
+ * @disrise_clr: host disconnect rise edge detection clear.
* @id_det_en: id detection enable register.
* @id_det_st: id detection state register.
* @id_det_clr: id detection clear register.
@@ -133,6 +142,12 @@ struct rockchip_usb2phy_port_cfg {
struct usb2phy_reg bvalid_det_en;
struct usb2phy_reg bvalid_det_st;
struct usb2phy_reg bvalid_det_clr;
+ struct usb2phy_reg disfall_en;
+ struct usb2phy_reg disfall_st;
+ struct usb2phy_reg disfall_clr;
+ struct usb2phy_reg disrise_en;
+ struct usb2phy_reg disrise_st;
+ struct usb2phy_reg disrise_clr;
struct usb2phy_reg id_det_en;
struct usb2phy_reg id_det_st;
struct usb2phy_reg id_det_clr;
@@ -150,6 +165,7 @@ struct rockchip_usb2phy_port_cfg {
* struct rockchip_usb2phy_cfg - usb-phy configuration.
* @reg: the address offset of grf for usb-phy config.
* @num_ports: specify how many ports that the phy has.
+ * @phy_tuning: phy default parameters tuning.
* @clkout_ctl: keep on/turn off output clk of phy.
* @port_cfgs: usb-phy port configurations.
* @chg_det: charger detection registers.
@@ -157,6 +173,7 @@ struct rockchip_usb2phy_port_cfg {
struct rockchip_usb2phy_cfg {
unsigned int reg;
unsigned int num_ports;
+ int (*phy_tuning)(struct rockchip_usb2phy *rphy);
struct usb2phy_reg clkout_ctl;
const struct rockchip_usb2phy_port_cfg port_cfgs[USB2PHY_NUM_PORTS];
const struct rockchip_chg_det_reg chg_det;
@@ -168,6 +185,7 @@ struct rockchip_usb2phy_cfg {
* @port_id: flag for otg port or host port.
* @suspended: phy suspended flag.
* @vbus_attached: otg device vbus status.
+ * @host_disconnect: usb host disconnect status.
* @bvalid_irq: IRQ number assigned for vbus valid rise detection.
* @id_irq: IRQ number assigned for ID pin detection.
* @ls_irq: IRQ number assigned for linestate detection.
@@ -187,6 +205,7 @@ struct rockchip_usb2phy_port {
unsigned int port_id;
bool suspended;
bool vbus_attached;
+ bool host_disconnect;
int bvalid_irq;
int id_irq;
int ls_irq;
@@ -209,6 +228,7 @@ struct rockchip_usb2phy_port {
* @clk: clock struct of phy input clk.
* @clk480m: clock struct of phy output clk.
* @clk480m_hw: clock struct of phy output clk management.
+ * @phy_reset: phy reset control.
* @chg_state: states involved in USB charger detection.
* @chg_type: USB charger types.
* @dcd_retries: The retry count used to track Data contact
@@ -225,6 +245,7 @@ struct rockchip_usb2phy {
struct clk *clk;
struct clk *clk480m;
struct clk_hw clk480m_hw;
+ struct reset_control *phy_reset;
enum usb_chg_state chg_state;
enum power_supply_type chg_type;
u8 dcd_retries;
@@ -266,6 +287,25 @@ static inline bool property_enabled(struct regmap *base,
return tmp != reg->disable;
}
+static int rockchip_usb2phy_reset(struct rockchip_usb2phy *rphy)
+{
+ int ret;
+
+ ret = reset_control_assert(rphy->phy_reset);
+ if (ret)
+ return ret;
+
+ udelay(10);
+
+ ret = reset_control_deassert(rphy->phy_reset);
+ if (ret)
+ return ret;
+
+ usleep_range(100, 200);
+
+ return 0;
+}
+
static int rockchip_usb2phy_clk480m_prepare(struct clk_hw *hw)
{
struct rockchip_usb2phy *rphy =
@@ -405,6 +445,27 @@ static int rockchip_usb2phy_extcon_register(struct rockchip_usb2phy *rphy)
return 0;
}
+static int rockchip_usb2phy_enable_host_disc_irq(struct rockchip_usb2phy *rphy,
+ struct rockchip_usb2phy_port *rport,
+ bool en)
+{
+ int ret;
+
+ ret = property_enable(rphy->grf, &rport->port_cfg->disfall_clr, true);
+ if (ret)
+ return ret;
+
+ ret = property_enable(rphy->grf, &rport->port_cfg->disfall_en, en);
+ if (ret)
+ return ret;
+
+ ret = property_enable(rphy->grf, &rport->port_cfg->disrise_clr, true);
+ if (ret)
+ return ret;
+
+ return property_enable(rphy->grf, &rport->port_cfg->disrise_en, en);
+}
+
static int rockchip_usb2phy_init(struct phy *phy)
{
struct rockchip_usb2phy_port *rport = phy_get_drvdata(phy);
@@ -449,6 +510,15 @@ static int rockchip_usb2phy_init(struct phy *phy)
dev_dbg(&rport->phy->dev, "mode %d\n", rport->mode);
}
} else if (rport->port_id == USB2PHY_PORT_HOST) {
+ if (rport->port_cfg->disfall_en.offset) {
+ rport->host_disconnect = true;
+ ret = rockchip_usb2phy_enable_host_disc_irq(rphy, rport, true);
+ if (ret) {
+ dev_err(rphy->dev, "failed to enable disconnect irq\n");
+ goto out;
+ }
+ }
+
/* clear linestate and enable linestate detect irq */
ret = property_enable(rphy->grf,
&rport->port_cfg->ls_det_clr, true);
@@ -490,6 +560,18 @@ static int rockchip_usb2phy_power_on(struct phy *phy)
return ret;
}
+ /*
+ * For rk3588, it needs to reset phy when exit from
+ * suspend mode with common_on_n 1'b1(aka REFCLK_LOGIC,
+ * Bias, and PLL blocks are powered down) for lower
+ * power consumption. If you don't want to reset phy,
+ * please keep the common_on_n 1'b0 to set these blocks
+ * remain powered.
+ */
+ ret = rockchip_usb2phy_reset(rphy);
+ if (ret)
+ return ret;
+
/* waiting for the utmi_clk to become stable */
usleep_range(1500, 2000);
@@ -810,9 +892,7 @@ static void rockchip_usb2phy_sm_work(struct work_struct *work)
struct rockchip_usb2phy_port *rport =
container_of(work, struct rockchip_usb2phy_port, sm_work.work);
struct rockchip_usb2phy *rphy = dev_get_drvdata(rport->phy->dev.parent);
- unsigned int sh = rport->port_cfg->utmi_hstdet.bitend -
- rport->port_cfg->utmi_hstdet.bitstart + 1;
- unsigned int ul, uhd, state;
+ unsigned int sh, ul, uhd, state;
unsigned int ul_mask, uhd_mask;
int ret;
@@ -822,18 +902,26 @@ static void rockchip_usb2phy_sm_work(struct work_struct *work)
if (ret < 0)
goto next_schedule;
- ret = regmap_read(rphy->grf, rport->port_cfg->utmi_hstdet.offset, &uhd);
- if (ret < 0)
- goto next_schedule;
-
- uhd_mask = GENMASK(rport->port_cfg->utmi_hstdet.bitend,
- rport->port_cfg->utmi_hstdet.bitstart);
ul_mask = GENMASK(rport->port_cfg->utmi_ls.bitend,
rport->port_cfg->utmi_ls.bitstart);
- /* stitch on utmi_ls and utmi_hstdet as phy state */
- state = ((uhd & uhd_mask) >> rport->port_cfg->utmi_hstdet.bitstart) |
- (((ul & ul_mask) >> rport->port_cfg->utmi_ls.bitstart) << sh);
+ if (rport->port_cfg->utmi_hstdet.offset) {
+ ret = regmap_read(rphy->grf, rport->port_cfg->utmi_hstdet.offset, &uhd);
+ if (ret < 0)
+ goto next_schedule;
+
+ uhd_mask = GENMASK(rport->port_cfg->utmi_hstdet.bitend,
+ rport->port_cfg->utmi_hstdet.bitstart);
+
+ sh = rport->port_cfg->utmi_hstdet.bitend -
+ rport->port_cfg->utmi_hstdet.bitstart + 1;
+ /* stitch on utmi_ls and utmi_hstdet as phy state */
+ state = ((uhd & uhd_mask) >> rport->port_cfg->utmi_hstdet.bitstart) |
+ (((ul & ul_mask) >> rport->port_cfg->utmi_ls.bitstart) << sh);
+ } else {
+ state = ((ul & ul_mask) >> rport->port_cfg->utmi_ls.bitstart) << 1 |
+ rport->host_disconnect;
+ }
switch (state) {
case PHY_STATE_HS_ONLINE:
@@ -966,6 +1054,31 @@ static irqreturn_t rockchip_usb2phy_otg_mux_irq(int irq, void *data)
return ret;
}
+static irqreturn_t rockchip_usb2phy_host_disc_irq(int irq, void *data)
+{
+ struct rockchip_usb2phy_port *rport = data;
+ struct rockchip_usb2phy *rphy = dev_get_drvdata(rport->phy->dev.parent);
+
+ if (!property_enabled(rphy->grf, &rport->port_cfg->disfall_st) &&
+ !property_enabled(rphy->grf, &rport->port_cfg->disrise_st))
+ return IRQ_NONE;
+
+ mutex_lock(&rport->mutex);
+
+ /* clear disconnect fall or rise detect irq pending status */
+ if (property_enabled(rphy->grf, &rport->port_cfg->disfall_st)) {
+ property_enable(rphy->grf, &rport->port_cfg->disfall_clr, true);
+ rport->host_disconnect = false;
+ } else if (property_enabled(rphy->grf, &rport->port_cfg->disrise_st)) {
+ property_enable(rphy->grf, &rport->port_cfg->disrise_clr, true);
+ rport->host_disconnect = true;
+ }
+
+ mutex_unlock(&rport->mutex);
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t rockchip_usb2phy_irq(int irq, void *data)
{
struct rockchip_usb2phy *rphy = data;
@@ -978,6 +1091,10 @@ static irqreturn_t rockchip_usb2phy_irq(int irq, void *data)
if (!rport->phy)
continue;
+ if (rport->port_id == USB2PHY_PORT_HOST &&
+ rport->port_cfg->disfall_en.offset)
+ ret |= rockchip_usb2phy_host_disc_irq(irq, rport);
+
switch (rport->port_id) {
case USB2PHY_PORT_OTG:
if (rport->mode != USB_DR_MODE_HOST &&
@@ -1188,7 +1305,6 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev)
struct phy_provider *provider;
struct rockchip_usb2phy *rphy;
const struct rockchip_usb2phy_cfg *phy_cfgs;
- const struct of_device_id *match;
unsigned int reg;
int index, ret;
@@ -1196,12 +1312,6 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev)
if (!rphy)
return -ENOMEM;
- match = of_match_device(dev->driver->of_match_table, dev);
- if (!match || !match->data) {
- dev_err(dev, "phy configs are not assigned!\n");
- return -EINVAL;
- }
-
if (!dev->parent || !dev->parent->of_node) {
rphy->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,usbgrf");
if (IS_ERR(rphy->grf)) {
@@ -1233,7 +1343,7 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev)
}
/* support address_cells=2 */
- if (reg == 0) {
+ if (of_property_count_u32_elems(np, "reg") > 2 && reg == 0) {
if (of_property_read_u32_index(np, "reg", 1, &reg)) {
dev_err(dev, "the reg property is not assigned in %pOFn node\n",
np);
@@ -1242,45 +1352,55 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev)
}
rphy->dev = dev;
- phy_cfgs = match->data;
+ phy_cfgs = device_get_match_data(dev);
rphy->chg_state = USB_CHG_STATE_UNDEFINED;
rphy->chg_type = POWER_SUPPLY_TYPE_UNKNOWN;
rphy->irq = platform_get_irq_optional(pdev, 0);
platform_set_drvdata(pdev, rphy);
+ if (!phy_cfgs)
+ return dev_err_probe(dev, -EINVAL, "phy configs are not assigned!\n");
+
ret = rockchip_usb2phy_extcon_register(rphy);
if (ret)
return ret;
/* find out a proper config which can be matched with dt. */
index = 0;
- while (phy_cfgs[index].reg) {
+ do {
if (phy_cfgs[index].reg == reg) {
rphy->phy_cfg = &phy_cfgs[index];
break;
}
++index;
- }
+ } while (phy_cfgs[index].reg);
if (!rphy->phy_cfg) {
- dev_err(dev, "no phy-config can be matched with %pOFn node\n",
- np);
+ dev_err(dev, "could not find phy config for reg=0x%08x\n", reg);
return -EINVAL;
}
- rphy->clk = of_clk_get_by_name(np, "phyclk");
- if (!IS_ERR(rphy->clk)) {
- clk_prepare_enable(rphy->clk);
- } else {
- dev_info(&pdev->dev, "no phyclk specified\n");
- rphy->clk = NULL;
+ rphy->phy_reset = devm_reset_control_get_optional(dev, "phy");
+ if (IS_ERR(rphy->phy_reset))
+ return PTR_ERR(rphy->phy_reset);
+
+ rphy->clk = devm_clk_get_optional_enabled(dev, "phyclk");
+ if (IS_ERR(rphy->clk)) {
+ return dev_err_probe(&pdev->dev, PTR_ERR(rphy->clk),
+ "failed to get phyclk\n");
}
ret = rockchip_usb2phy_clk480m_register(rphy);
if (ret) {
dev_err(dev, "failed to register 480m output clock\n");
- goto disable_clks;
+ return ret;
+ }
+
+ if (rphy->phy_cfg->phy_tuning) {
+ ret = rphy->phy_cfg->phy_tuning(rphy);
+ if (ret)
+ return ret;
}
index = 0;
@@ -1343,11 +1463,55 @@ next_child:
put_child:
of_node_put(child_np);
-disable_clks:
- if (rphy->clk) {
- clk_disable_unprepare(rphy->clk);
- clk_put(rphy->clk);
+ return ret;
+}
+
+static int rk3588_usb2phy_tuning(struct rockchip_usb2phy *rphy)
+{
+ int ret;
+ bool usb3otg = false;
+ /*
+ * utmi_termselect = 1'b1 (en FS terminations)
+ * utmi_xcvrselect = 2'b01 (FS transceiver)
+ */
+ int suspend_cfg = 0x14;
+
+ if (rphy->phy_cfg->reg == 0x0000 || rphy->phy_cfg->reg == 0x4000) {
+ /* USB2 config for USB3_0 and USB3_1 */
+ suspend_cfg |= 0x01; /* utmi_opmode = 2'b01 (no-driving) */
+ usb3otg = true;
+ } else if (rphy->phy_cfg->reg == 0x8000 || rphy->phy_cfg->reg == 0xc000) {
+ /* USB2 config for USB2_0 and USB2_1 */
+ suspend_cfg |= 0x00; /* utmi_opmode = 2'b00 (normal) */
+ } else {
+ return -EINVAL;
}
+
+ /* Deassert SIDDQ to power on analog block */
+ ret = regmap_write(rphy->grf, 0x0008, GENMASK(29, 29) | 0x0000);
+ if (ret)
+ return ret;
+
+ /* Do reset after exit IDDQ mode */
+ ret = rockchip_usb2phy_reset(rphy);
+ if (ret)
+ return ret;
+
+ /* suspend configuration */
+ ret |= regmap_write(rphy->grf, 0x000c, GENMASK(20, 16) | suspend_cfg);
+
+ /* HS DC Voltage Level Adjustment 4'b1001 : +5.89% */
+ ret |= regmap_write(rphy->grf, 0x0004, GENMASK(27, 24) | 0x0900);
+
+ /* HS Transmitter Pre-Emphasis Current Control 2'b10 : 2x */
+ ret |= regmap_write(rphy->grf, 0x0008, GENMASK(20, 19) | 0x0010);
+
+ if (!usb3otg)
+ return ret;
+
+ /* Pullup iddig pin for USB3_0 OTG mode */
+ ret |= regmap_write(rphy->grf, 0x0010, GENMASK(17, 16) | 0x0003);
+
return ret;
}
@@ -1664,6 +1828,126 @@ static const struct rockchip_usb2phy_cfg rk3568_phy_cfgs[] = {
{ /* sentinel */ }
};
+static const struct rockchip_usb2phy_cfg rk3588_phy_cfgs[] = {
+ {
+ .reg = 0x0000,
+ .num_ports = 1,
+ .phy_tuning = rk3588_usb2phy_tuning,
+ .clkout_ctl = { 0x0000, 0, 0, 1, 0 },
+ .port_cfgs = {
+ [USB2PHY_PORT_OTG] = {
+ .phy_sus = { 0x000c, 11, 11, 0, 1 },
+ .bvalid_det_en = { 0x0080, 1, 1, 0, 1 },
+ .bvalid_det_st = { 0x0084, 1, 1, 0, 1 },
+ .bvalid_det_clr = { 0x0088, 1, 1, 0, 1 },
+ .ls_det_en = { 0x0080, 0, 0, 0, 1 },
+ .ls_det_st = { 0x0084, 0, 0, 0, 1 },
+ .ls_det_clr = { 0x0088, 0, 0, 0, 1 },
+ .disfall_en = { 0x0080, 6, 6, 0, 1 },
+ .disfall_st = { 0x0084, 6, 6, 0, 1 },
+ .disfall_clr = { 0x0088, 6, 6, 0, 1 },
+ .disrise_en = { 0x0080, 5, 5, 0, 1 },
+ .disrise_st = { 0x0084, 5, 5, 0, 1 },
+ .disrise_clr = { 0x0088, 5, 5, 0, 1 },
+ .utmi_avalid = { 0x00c0, 7, 7, 0, 1 },
+ .utmi_bvalid = { 0x00c0, 6, 6, 0, 1 },
+ .utmi_ls = { 0x00c0, 10, 9, 0, 1 },
+ }
+ },
+ .chg_det = {
+ .cp_det = { 0x00c0, 0, 0, 0, 1 },
+ .dcp_det = { 0x00c0, 0, 0, 0, 1 },
+ .dp_det = { 0x00c0, 1, 1, 1, 0 },
+ .idm_sink_en = { 0x0008, 5, 5, 1, 0 },
+ .idp_sink_en = { 0x0008, 5, 5, 0, 1 },
+ .idp_src_en = { 0x0008, 14, 14, 0, 1 },
+ .rdm_pdwn_en = { 0x0008, 14, 14, 0, 1 },
+ .vdm_src_en = { 0x0008, 7, 6, 0, 3 },
+ .vdp_src_en = { 0x0008, 7, 6, 0, 3 },
+ },
+ },
+ {
+ .reg = 0x4000,
+ .num_ports = 1,
+ .phy_tuning = rk3588_usb2phy_tuning,
+ .clkout_ctl = { 0x0000, 0, 0, 1, 0 },
+ .port_cfgs = {
+ [USB2PHY_PORT_OTG] = {
+ .phy_sus = { 0x000c, 11, 11, 0, 1 },
+ .bvalid_det_en = { 0x0080, 1, 1, 0, 1 },
+ .bvalid_det_st = { 0x0084, 1, 1, 0, 1 },
+ .bvalid_det_clr = { 0x0088, 1, 1, 0, 1 },
+ .ls_det_en = { 0x0080, 0, 0, 0, 1 },
+ .ls_det_st = { 0x0084, 0, 0, 0, 1 },
+ .ls_det_clr = { 0x0088, 0, 0, 0, 1 },
+ .disfall_en = { 0x0080, 6, 6, 0, 1 },
+ .disfall_st = { 0x0084, 6, 6, 0, 1 },
+ .disfall_clr = { 0x0088, 6, 6, 0, 1 },
+ .disrise_en = { 0x0080, 5, 5, 0, 1 },
+ .disrise_st = { 0x0084, 5, 5, 0, 1 },
+ .disrise_clr = { 0x0088, 5, 5, 0, 1 },
+ .utmi_avalid = { 0x00c0, 7, 7, 0, 1 },
+ .utmi_bvalid = { 0x00c0, 6, 6, 0, 1 },
+ .utmi_ls = { 0x00c0, 10, 9, 0, 1 },
+ }
+ },
+ .chg_det = {
+ .cp_det = { 0x00c0, 0, 0, 0, 1 },
+ .dcp_det = { 0x00c0, 0, 0, 0, 1 },
+ .dp_det = { 0x00c0, 1, 1, 1, 0 },
+ .idm_sink_en = { 0x0008, 5, 5, 1, 0 },
+ .idp_sink_en = { 0x0008, 5, 5, 0, 1 },
+ .idp_src_en = { 0x0008, 14, 14, 0, 1 },
+ .rdm_pdwn_en = { 0x0008, 14, 14, 0, 1 },
+ .vdm_src_en = { 0x0008, 7, 6, 0, 3 },
+ .vdp_src_en = { 0x0008, 7, 6, 0, 3 },
+ },
+ },
+ {
+ .reg = 0x8000,
+ .num_ports = 1,
+ .phy_tuning = rk3588_usb2phy_tuning,
+ .clkout_ctl = { 0x0000, 0, 0, 1, 0 },
+ .port_cfgs = {
+ [USB2PHY_PORT_HOST] = {
+ .phy_sus = { 0x0008, 2, 2, 0, 1 },
+ .ls_det_en = { 0x0080, 0, 0, 0, 1 },
+ .ls_det_st = { 0x0084, 0, 0, 0, 1 },
+ .ls_det_clr = { 0x0088, 0, 0, 0, 1 },
+ .disfall_en = { 0x0080, 6, 6, 0, 1 },
+ .disfall_st = { 0x0084, 6, 6, 0, 1 },
+ .disfall_clr = { 0x0088, 6, 6, 0, 1 },
+ .disrise_en = { 0x0080, 5, 5, 0, 1 },
+ .disrise_st = { 0x0084, 5, 5, 0, 1 },
+ .disrise_clr = { 0x0088, 5, 5, 0, 1 },
+ .utmi_ls = { 0x00c0, 10, 9, 0, 1 },
+ }
+ },
+ },
+ {
+ .reg = 0xc000,
+ .num_ports = 1,
+ .phy_tuning = rk3588_usb2phy_tuning,
+ .clkout_ctl = { 0x0000, 0, 0, 1, 0 },
+ .port_cfgs = {
+ [USB2PHY_PORT_HOST] = {
+ .phy_sus = { 0x0008, 2, 2, 0, 1 },
+ .ls_det_en = { 0x0080, 0, 0, 0, 1 },
+ .ls_det_st = { 0x0084, 0, 0, 0, 1 },
+ .ls_det_clr = { 0x0088, 0, 0, 0, 1 },
+ .disfall_en = { 0x0080, 6, 6, 0, 1 },
+ .disfall_st = { 0x0084, 6, 6, 0, 1 },
+ .disfall_clr = { 0x0088, 6, 6, 0, 1 },
+ .disrise_en = { 0x0080, 5, 5, 0, 1 },
+ .disrise_st = { 0x0084, 5, 5, 0, 1 },
+ .disrise_clr = { 0x0088, 5, 5, 0, 1 },
+ .utmi_ls = { 0x00c0, 10, 9, 0, 1 },
+ }
+ },
+ },
+ { /* sentinel */ }
+};
+
static const struct rockchip_usb2phy_cfg rv1108_phy_cfgs[] = {
{
.reg = 0x100,
@@ -1714,6 +1998,7 @@ static const struct of_device_id rockchip_usb2phy_dt_match[] = {
{ .compatible = "rockchip,rk3366-usb2phy", .data = &rk3366_phy_cfgs },
{ .compatible = "rockchip,rk3399-usb2phy", .data = &rk3399_phy_cfgs },
{ .compatible = "rockchip,rk3568-usb2phy", .data = &rk3568_phy_cfgs },
+ { .compatible = "rockchip,rk3588-usb2phy", .data = &rk3588_phy_cfgs },
{ .compatible = "rockchip,rv1108-usb2phy", .data = &rv1108_phy_cfgs },
{}
};
diff --git a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
index 7b8b001e4f9e..5de5e2e97ffa 100644
--- a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
+++ b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
@@ -8,8 +8,9 @@
#include <dt-bindings/phy/phy.h>
#include <linux/clk.h>
#include <linux/mfd/syscon.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/units.h>
diff --git a/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c b/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c
index 1d355b32ba55..121e5961ce11 100644
--- a/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c
+++ b/drivers/phy/rockchip/phy-rockchip-snps-pcie3.c
@@ -12,9 +12,10 @@
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/phy/pcie.h>
#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
diff --git a/drivers/phy/rockchip/phy-rockchip-typec.c b/drivers/phy/rockchip/phy-rockchip-typec.c
index 8b1667be4915..4efcb78b0ab1 100644
--- a/drivers/phy/rockchip/phy-rockchip-typec.c
+++ b/drivers/phy/rockchip/phy-rockchip-typec.c
@@ -1116,8 +1116,7 @@ static int rockchip_typec_phy_probe(struct platform_device *pdev)
return -EINVAL;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- tcphy->base = devm_ioremap_resource(dev, res);
+ tcphy->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(tcphy->base))
return PTR_ERR(tcphy->base);
diff --git a/drivers/phy/samsung/phy-exynos-dp-video.c b/drivers/phy/samsung/phy-exynos-dp-video.c
index 6069fedbd8f3..a636dee07585 100644
--- a/drivers/phy/samsung/phy-exynos-dp-video.c
+++ b/drivers/phy/samsung/phy-exynos-dp-video.c
@@ -12,8 +12,6 @@
#include <linux/module.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/phy/samsung/phy-exynos-mipi-video.c b/drivers/phy/samsung/phy-exynos-mipi-video.c
index a7f67857e5b2..592d8067e848 100644
--- a/drivers/phy/samsung/phy-exynos-mipi-video.c
+++ b/drivers/phy/samsung/phy-exynos-mipi-video.c
@@ -11,9 +11,8 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/spinlock.h>
#include <linux/soc/samsung/exynos-regs-pmu.h>
diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c
index ee0848fe8432..3f310b28bfff 100644
--- a/drivers/phy/samsung/phy-exynos5-usbdrd.c
+++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c
@@ -14,8 +14,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/iopoll.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
@@ -32,6 +30,7 @@
#define EXYNOS5_FSEL_19MHZ2 0x3
#define EXYNOS5_FSEL_20MHZ 0x4
#define EXYNOS5_FSEL_24MHZ 0x5
+#define EXYNOS5_FSEL_26MHZ 0x82
#define EXYNOS5_FSEL_50MHZ 0x7
/* Exynos5: USB 3.0 DRD PHY registers */
@@ -146,6 +145,34 @@
#define LANE0_TX_DEBUG_RXDET_MEAS_TIME_62M5 (0x20 << 4)
#define LANE0_TX_DEBUG_RXDET_MEAS_TIME_96M_100M (0x40 << 4)
+/* Exynos850: USB DRD PHY registers */
+#define EXYNOS850_DRD_LINKCTRL 0x04
+#define LINKCTRL_BUS_FILTER_BYPASS(_x) ((_x) << 4)
+#define LINKCTRL_FORCE_QACT BIT(8)
+
+#define EXYNOS850_DRD_CLKRST 0x20
+#define CLKRST_LINK_SW_RST BIT(0)
+#define CLKRST_PORT_RST BIT(1)
+#define CLKRST_PHY_SW_RST BIT(3)
+
+#define EXYNOS850_DRD_UTMI 0x50
+#define UTMI_FORCE_SLEEP BIT(0)
+#define UTMI_FORCE_SUSPEND BIT(1)
+#define UTMI_DM_PULLDOWN BIT(2)
+#define UTMI_DP_PULLDOWN BIT(3)
+#define UTMI_FORCE_BVALID BIT(4)
+#define UTMI_FORCE_VBUSVALID BIT(5)
+
+#define EXYNOS850_DRD_HSP 0x54
+#define HSP_COMMONONN BIT(8)
+#define HSP_EN_UTMISUSPEND BIT(9)
+#define HSP_VBUSVLDEXT BIT(12)
+#define HSP_VBUSVLDEXTSEL BIT(13)
+#define HSP_FSV_OUT_EN BIT(24)
+
+#define EXYNOS850_DRD_HSP_TEST 0x5c
+#define HSP_TEST_SIDDQ BIT(24)
+
#define KHZ 1000
#define MHZ (KHZ * KHZ)
@@ -167,6 +194,7 @@ struct exynos5_usbdrd_phy_config {
struct exynos5_usbdrd_phy_drvdata {
const struct exynos5_usbdrd_phy_config *phy_cfg;
+ const struct phy_ops *phy_ops;
u32 pmu_offset_usbdrd0_phy;
u32 pmu_offset_usbdrd1_phy;
bool has_common_clk_gate;
@@ -245,6 +273,9 @@ static unsigned int exynos5_rate_to_clk(unsigned long rate, u32 *reg)
case 24 * MHZ:
*reg = EXYNOS5_FSEL_24MHZ;
break;
+ case 26 * MHZ:
+ *reg = EXYNOS5_FSEL_26MHZ;
+ break;
case 50 * MHZ:
*reg = EXYNOS5_FSEL_50MHZ;
break;
@@ -713,6 +744,129 @@ static const struct phy_ops exynos5_usbdrd_phy_ops = {
.owner = THIS_MODULE,
};
+static void exynos850_usbdrd_utmi_init(struct exynos5_usbdrd_phy *phy_drd)
+{
+ void __iomem *regs_base = phy_drd->reg_phy;
+ u32 reg;
+
+ /*
+ * Disable HWACG (hardware auto clock gating control). This will force
+ * QACTIVE signal in Q-Channel interface to HIGH level, to make sure
+ * the PHY clock is not gated by the hardware.
+ */
+ reg = readl(regs_base + EXYNOS850_DRD_LINKCTRL);
+ reg |= LINKCTRL_FORCE_QACT;
+ writel(reg, regs_base + EXYNOS850_DRD_LINKCTRL);
+
+ /* Start PHY Reset (POR=high) */
+ reg = readl(regs_base + EXYNOS850_DRD_CLKRST);
+ reg |= CLKRST_PHY_SW_RST;
+ writel(reg, regs_base + EXYNOS850_DRD_CLKRST);
+
+ /* Enable UTMI+ */
+ reg = readl(regs_base + EXYNOS850_DRD_UTMI);
+ reg &= ~(UTMI_FORCE_SUSPEND | UTMI_FORCE_SLEEP | UTMI_DP_PULLDOWN |
+ UTMI_DM_PULLDOWN);
+ writel(reg, regs_base + EXYNOS850_DRD_UTMI);
+
+ /* Set PHY clock and control HS PHY */
+ reg = readl(regs_base + EXYNOS850_DRD_HSP);
+ reg |= HSP_EN_UTMISUSPEND | HSP_COMMONONN;
+ writel(reg, regs_base + EXYNOS850_DRD_HSP);
+
+ /* Set VBUS Valid and D+ pull-up control by VBUS pad usage */
+ reg = readl(regs_base + EXYNOS850_DRD_LINKCTRL);
+ reg |= LINKCTRL_BUS_FILTER_BYPASS(0xf);
+ writel(reg, regs_base + EXYNOS850_DRD_LINKCTRL);
+
+ reg = readl(regs_base + EXYNOS850_DRD_UTMI);
+ reg |= UTMI_FORCE_BVALID | UTMI_FORCE_VBUSVALID;
+ writel(reg, regs_base + EXYNOS850_DRD_UTMI);
+
+ reg = readl(regs_base + EXYNOS850_DRD_HSP);
+ reg |= HSP_VBUSVLDEXT | HSP_VBUSVLDEXTSEL;
+ writel(reg, regs_base + EXYNOS850_DRD_HSP);
+
+ /* Power up PHY analog blocks */
+ reg = readl(regs_base + EXYNOS850_DRD_HSP_TEST);
+ reg &= ~HSP_TEST_SIDDQ;
+ writel(reg, regs_base + EXYNOS850_DRD_HSP_TEST);
+
+ /* Finish PHY reset (POR=low) */
+ udelay(10); /* required before doing POR=low */
+ reg = readl(regs_base + EXYNOS850_DRD_CLKRST);
+ reg &= ~(CLKRST_PHY_SW_RST | CLKRST_PORT_RST);
+ writel(reg, regs_base + EXYNOS850_DRD_CLKRST);
+ udelay(75); /* required after POR=low for guaranteed PHY clock */
+
+ /* Disable single ended signal out */
+ reg = readl(regs_base + EXYNOS850_DRD_HSP);
+ reg &= ~HSP_FSV_OUT_EN;
+ writel(reg, regs_base + EXYNOS850_DRD_HSP);
+}
+
+static int exynos850_usbdrd_phy_init(struct phy *phy)
+{
+ struct phy_usb_instance *inst = phy_get_drvdata(phy);
+ struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst);
+ int ret;
+
+ ret = clk_prepare_enable(phy_drd->clk);
+ if (ret)
+ return ret;
+
+ /* UTMI or PIPE3 specific init */
+ inst->phy_cfg->phy_init(phy_drd);
+
+ clk_disable_unprepare(phy_drd->clk);
+
+ return 0;
+}
+
+static int exynos850_usbdrd_phy_exit(struct phy *phy)
+{
+ struct phy_usb_instance *inst = phy_get_drvdata(phy);
+ struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst);
+ void __iomem *regs_base = phy_drd->reg_phy;
+ u32 reg;
+ int ret;
+
+ ret = clk_prepare_enable(phy_drd->clk);
+ if (ret)
+ return ret;
+
+ /* Set PHY clock and control HS PHY */
+ reg = readl(regs_base + EXYNOS850_DRD_UTMI);
+ reg &= ~(UTMI_DP_PULLDOWN | UTMI_DM_PULLDOWN);
+ reg |= UTMI_FORCE_SUSPEND | UTMI_FORCE_SLEEP;
+ writel(reg, regs_base + EXYNOS850_DRD_UTMI);
+
+ /* Power down PHY analog blocks */
+ reg = readl(regs_base + EXYNOS850_DRD_HSP_TEST);
+ reg |= HSP_TEST_SIDDQ;
+ writel(reg, regs_base + EXYNOS850_DRD_HSP_TEST);
+
+ /* Link reset */
+ reg = readl(regs_base + EXYNOS850_DRD_CLKRST);
+ reg |= CLKRST_LINK_SW_RST;
+ writel(reg, regs_base + EXYNOS850_DRD_CLKRST);
+ udelay(10); /* required before doing POR=low */
+ reg &= ~CLKRST_LINK_SW_RST;
+ writel(reg, regs_base + EXYNOS850_DRD_CLKRST);
+
+ clk_disable_unprepare(phy_drd->clk);
+
+ return 0;
+}
+
+static const struct phy_ops exynos850_usbdrd_phy_ops = {
+ .init = exynos850_usbdrd_phy_init,
+ .exit = exynos850_usbdrd_phy_exit,
+ .power_on = exynos5_usbdrd_phy_power_on,
+ .power_off = exynos5_usbdrd_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
static int exynos5_usbdrd_phy_clk_handle(struct exynos5_usbdrd_phy *phy_drd)
{
unsigned long ref_rate;
@@ -779,8 +933,17 @@ static const struct exynos5_usbdrd_phy_config phy_cfg_exynos5[] = {
},
};
+static const struct exynos5_usbdrd_phy_config phy_cfg_exynos850[] = {
+ {
+ .id = EXYNOS5_DRDPHY_UTMI,
+ .phy_isol = exynos5_usbdrd_phy_isol,
+ .phy_init = exynos850_usbdrd_utmi_init,
+ },
+};
+
static const struct exynos5_usbdrd_phy_drvdata exynos5420_usbdrd_phy = {
.phy_cfg = phy_cfg_exynos5,
+ .phy_ops = &exynos5_usbdrd_phy_ops,
.pmu_offset_usbdrd0_phy = EXYNOS5_USBDRD_PHY_CONTROL,
.pmu_offset_usbdrd1_phy = EXYNOS5420_USBDRD1_PHY_CONTROL,
.has_common_clk_gate = true,
@@ -788,12 +951,14 @@ static const struct exynos5_usbdrd_phy_drvdata exynos5420_usbdrd_phy = {
static const struct exynos5_usbdrd_phy_drvdata exynos5250_usbdrd_phy = {
.phy_cfg = phy_cfg_exynos5,
+ .phy_ops = &exynos5_usbdrd_phy_ops,
.pmu_offset_usbdrd0_phy = EXYNOS5_USBDRD_PHY_CONTROL,
.has_common_clk_gate = true,
};
static const struct exynos5_usbdrd_phy_drvdata exynos5433_usbdrd_phy = {
.phy_cfg = phy_cfg_exynos5,
+ .phy_ops = &exynos5_usbdrd_phy_ops,
.pmu_offset_usbdrd0_phy = EXYNOS5_USBDRD_PHY_CONTROL,
.pmu_offset_usbdrd1_phy = EXYNOS5433_USBHOST30_PHY_CONTROL,
.has_common_clk_gate = false,
@@ -801,10 +966,18 @@ static const struct exynos5_usbdrd_phy_drvdata exynos5433_usbdrd_phy = {
static const struct exynos5_usbdrd_phy_drvdata exynos7_usbdrd_phy = {
.phy_cfg = phy_cfg_exynos5,
+ .phy_ops = &exynos5_usbdrd_phy_ops,
.pmu_offset_usbdrd0_phy = EXYNOS5_USBDRD_PHY_CONTROL,
.has_common_clk_gate = false,
};
+static const struct exynos5_usbdrd_phy_drvdata exynos850_usbdrd_phy = {
+ .phy_cfg = phy_cfg_exynos850,
+ .phy_ops = &exynos850_usbdrd_phy_ops,
+ .pmu_offset_usbdrd0_phy = EXYNOS5_USBDRD_PHY_CONTROL,
+ .has_common_clk_gate = true,
+};
+
static const struct of_device_id exynos5_usbdrd_phy_of_match[] = {
{
.compatible = "samsung,exynos5250-usbdrd-phy",
@@ -818,6 +991,9 @@ static const struct of_device_id exynos5_usbdrd_phy_of_match[] = {
}, {
.compatible = "samsung,exynos7-usbdrd-phy",
.data = &exynos7_usbdrd_phy
+ }, {
+ .compatible = "samsung,exynos850-usbdrd-phy",
+ .data = &exynos850_usbdrd_phy
},
{ },
};
@@ -908,8 +1084,8 @@ static int exynos5_usbdrd_phy_probe(struct platform_device *pdev)
dev_vdbg(dev, "Creating usbdrd_phy phy\n");
for (i = 0; i < EXYNOS5_DRDPHYS_NUM; i++) {
- struct phy *phy = devm_phy_create(dev, NULL,
- &exynos5_usbdrd_phy_ops);
+ struct phy *phy = devm_phy_create(dev, NULL, drv_data->phy_ops);
+
if (IS_ERR(phy)) {
dev_err(dev, "Failed to create usbdrd_phy phy\n");
return PTR_ERR(phy);
diff --git a/drivers/phy/samsung/phy-samsung-usb2.c b/drivers/phy/samsung/phy-samsung-usb2.c
index ec2befabeea6..68a174eca0ba 100644
--- a/drivers/phy/samsung/phy-samsung-usb2.c
+++ b/drivers/phy/samsung/phy-samsung-usb2.c
@@ -10,8 +10,6 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
diff --git a/drivers/phy/socionext/phy-uniphier-pcie.c b/drivers/phy/socionext/phy-uniphier-pcie.c
index ebca296ef123..c19173492b79 100644
--- a/drivers/phy/socionext/phy-uniphier-pcie.c
+++ b/drivers/phy/socionext/phy-uniphier-pcie.c
@@ -11,7 +11,7 @@
#include <linux/iopoll.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/phy/st/phy-spear1310-miphy.c b/drivers/phy/st/phy-spear1310-miphy.c
index 292413db7da4..35a9831b5161 100644
--- a/drivers/phy/st/phy-spear1310-miphy.c
+++ b/drivers/phy/st/phy-spear1310-miphy.c
@@ -13,8 +13,9 @@
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
/* SPEAr1310 Registers */
diff --git a/drivers/phy/st/phy-spear1340-miphy.c b/drivers/phy/st/phy-spear1340-miphy.c
index c1d9ffa5a311..34a1cf21015f 100644
--- a/drivers/phy/st/phy-spear1340-miphy.c
+++ b/drivers/phy/st/phy-spear1340-miphy.c
@@ -13,8 +13,9 @@
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
/* SPEAr1340 Registers */
diff --git a/drivers/phy/st/phy-stm32-usbphyc.c b/drivers/phy/st/phy-stm32-usbphyc.c
index 0a8552628cbd..d5e7e44000b5 100644
--- a/drivers/phy/st/phy-stm32-usbphyc.c
+++ b/drivers/phy/st/phy-stm32-usbphyc.c
@@ -12,8 +12,9 @@
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/units.h>
diff --git a/drivers/phy/starfive/Kconfig b/drivers/phy/starfive/Kconfig
new file mode 100644
index 000000000000..9508e2143011
--- /dev/null
+++ b/drivers/phy/starfive/Kconfig
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Phy drivers for StarFive platforms
+#
+
+if ARCH_STARFIVE || COMPILE_TEST
+
+config PHY_STARFIVE_JH7110_DPHY_RX
+ tristate "StarFive JH7110 D-PHY RX support"
+ depends on HAS_IOMEM
+ select GENERIC_PHY
+ select GENERIC_PHY_MIPI_DPHY
+ help
+ Choose this option if you have a StarFive D-PHY in your
+ system. If M is selected, the module will be called
+ phy-jh7110-dphy-rx.ko.
+
+config PHY_STARFIVE_JH7110_PCIE
+ tristate "Starfive JH7110 PCIE 2.0/USB 3.0 PHY support"
+ depends on HAS_IOMEM
+ select GENERIC_PHY
+ help
+ Enable this to support the StarFive PCIe 2.0 PHY,
+ or used as USB 3.0 PHY.
+ If M is selected, the module will be called
+ phy-jh7110-pcie.ko.
+
+config PHY_STARFIVE_JH7110_USB
+ tristate "Starfive JH7110 USB 2.0 PHY support"
+ depends on USB_SUPPORT
+ select GENERIC_PHY
+ help
+ Enable this to support the StarFive USB 2.0 PHY,
+ used with the Cadence USB controller.
+ If M is selected, the module will be called
+ phy-jh7110-usb.ko.
+
+endif # ARCH_STARFIVE || COMPILE_TEST
diff --git a/drivers/phy/starfive/Makefile b/drivers/phy/starfive/Makefile
new file mode 100644
index 000000000000..b391018b7c47
--- /dev/null
+++ b/drivers/phy/starfive/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PHY_STARFIVE_JH7110_DPHY_RX) += phy-jh7110-dphy-rx.o
+obj-$(CONFIG_PHY_STARFIVE_JH7110_PCIE) += phy-jh7110-pcie.o
+obj-$(CONFIG_PHY_STARFIVE_JH7110_USB) += phy-jh7110-usb.o
diff --git a/drivers/phy/starfive/phy-jh7110-dphy-rx.c b/drivers/phy/starfive/phy-jh7110-dphy-rx.c
new file mode 100644
index 000000000000..037a9e0263cd
--- /dev/null
+++ b/drivers/phy/starfive/phy-jh7110-dphy-rx.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * StarFive JH7110 DPHY RX driver
+ *
+ * Copyright (C) 2023 StarFive Technology Co., Ltd.
+ * Author: Jack Zhu <jack.zhu@starfivetech.com>
+ * Author: Changhuang Liang <changhuang.liang@starfivetech.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#define STF_DPHY_APBCFGSAIF_SYSCFG(x) (x)
+
+#define STF_DPHY_ENABLE_CLK BIT(6)
+#define STF_DPHY_ENABLE_CLK1 BIT(7)
+#define STF_DPHY_ENABLE_LAN0 BIT(8)
+#define STF_DPHY_ENABLE_LAN1 BIT(9)
+#define STF_DPHY_ENABLE_LAN2 BIT(10)
+#define STF_DPHY_ENABLE_LAN3 BIT(11)
+#define STF_DPHY_LANE_SWAP_CLK GENMASK(22, 20)
+#define STF_DPHY_LANE_SWAP_CLK1 GENMASK(25, 23)
+#define STF_DPHY_LANE_SWAP_LAN0 GENMASK(28, 26)
+#define STF_DPHY_LANE_SWAP_LAN1 GENMASK(31, 29)
+
+#define STF_DPHY_LANE_SWAP_LAN2 GENMASK(2, 0)
+#define STF_DPHY_LANE_SWAP_LAN3 GENMASK(5, 3)
+#define STF_DPHY_PLL_CLK_SEL GENMASK(21, 12)
+#define STF_DPHY_PRECOUNTER_IN_CLK GENMASK(29, 22)
+
+#define STF_DPHY_PRECOUNTER_IN_CLK1 GENMASK(7, 0)
+#define STF_DPHY_PRECOUNTER_IN_LAN0 GENMASK(15, 8)
+#define STF_DPHY_PRECOUNTER_IN_LAN1 GENMASK(23, 16)
+#define STF_DPHY_PRECOUNTER_IN_LAN2 GENMASK(31, 24)
+
+#define STF_DPHY_PRECOUNTER_IN_LAN3 GENMASK(7, 0)
+#define STF_DPHY_RX_1C2C_SEL BIT(8)
+
+#define STF_MAP_LANES_NUM 6
+
+struct regval {
+ u32 addr;
+ u32 val;
+};
+
+struct stf_dphy_info {
+ /**
+ * @maps:
+ *
+ * Physical lanes and logic lanes mapping table.
+ *
+ * The default order is:
+ * [clk lane0, data lane 0, data lane 1, data lane 2, date lane 3, clk lane 1]
+ */
+ u8 maps[STF_MAP_LANES_NUM];
+};
+
+struct stf_dphy {
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *cfg_clk;
+ struct clk *ref_clk;
+ struct clk *tx_clk;
+ struct reset_control *rstc;
+ struct regulator *mipi_0p9;
+ struct phy *phy;
+ const struct stf_dphy_info *info;
+};
+
+static int stf_dphy_configure(struct phy *phy, union phy_configure_opts *opts)
+{
+ struct stf_dphy *dphy = phy_get_drvdata(phy);
+ const struct stf_dphy_info *info = dphy->info;
+
+ writel(FIELD_PREP(STF_DPHY_ENABLE_CLK, 1) |
+ FIELD_PREP(STF_DPHY_ENABLE_CLK1, 1) |
+ FIELD_PREP(STF_DPHY_ENABLE_LAN0, 1) |
+ FIELD_PREP(STF_DPHY_ENABLE_LAN1, 1) |
+ FIELD_PREP(STF_DPHY_ENABLE_LAN2, 1) |
+ FIELD_PREP(STF_DPHY_ENABLE_LAN3, 1) |
+ FIELD_PREP(STF_DPHY_LANE_SWAP_CLK, info->maps[0]) |
+ FIELD_PREP(STF_DPHY_LANE_SWAP_CLK1, info->maps[5]) |
+ FIELD_PREP(STF_DPHY_LANE_SWAP_LAN0, info->maps[1]) |
+ FIELD_PREP(STF_DPHY_LANE_SWAP_LAN1, info->maps[2]),
+ dphy->regs + STF_DPHY_APBCFGSAIF_SYSCFG(188));
+
+ writel(FIELD_PREP(STF_DPHY_LANE_SWAP_LAN2, info->maps[3]) |
+ FIELD_PREP(STF_DPHY_LANE_SWAP_LAN3, info->maps[4]) |
+ FIELD_PREP(STF_DPHY_PRECOUNTER_IN_CLK, 8),
+ dphy->regs + STF_DPHY_APBCFGSAIF_SYSCFG(192));
+
+ writel(FIELD_PREP(STF_DPHY_PRECOUNTER_IN_CLK1, 8) |
+ FIELD_PREP(STF_DPHY_PRECOUNTER_IN_LAN0, 7) |
+ FIELD_PREP(STF_DPHY_PRECOUNTER_IN_LAN1, 7) |
+ FIELD_PREP(STF_DPHY_PRECOUNTER_IN_LAN2, 7),
+ dphy->regs + STF_DPHY_APBCFGSAIF_SYSCFG(196));
+
+ writel(FIELD_PREP(STF_DPHY_PRECOUNTER_IN_LAN3, 7),
+ dphy->regs + STF_DPHY_APBCFGSAIF_SYSCFG(200));
+
+ return 0;
+}
+
+static int stf_dphy_power_on(struct phy *phy)
+{
+ struct stf_dphy *dphy = phy_get_drvdata(phy);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dphy->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = regulator_enable(dphy->mipi_0p9);
+ if (ret) {
+ pm_runtime_put(dphy->dev);
+ return ret;
+ }
+
+ clk_set_rate(dphy->cfg_clk, 99000000);
+ clk_set_rate(dphy->ref_clk, 49500000);
+ clk_set_rate(dphy->tx_clk, 19800000);
+ reset_control_deassert(dphy->rstc);
+
+ return 0;
+}
+
+static int stf_dphy_power_off(struct phy *phy)
+{
+ struct stf_dphy *dphy = phy_get_drvdata(phy);
+
+ reset_control_assert(dphy->rstc);
+
+ regulator_disable(dphy->mipi_0p9);
+
+ pm_runtime_put_sync(dphy->dev);
+
+ return 0;
+}
+
+static const struct phy_ops stf_dphy_ops = {
+ .configure = stf_dphy_configure,
+ .power_on = stf_dphy_power_on,
+ .power_off = stf_dphy_power_off,
+};
+
+static int stf_dphy_probe(struct platform_device *pdev)
+{
+ struct phy_provider *phy_provider;
+ struct stf_dphy *dphy;
+
+ dphy = devm_kzalloc(&pdev->dev, sizeof(*dphy), GFP_KERNEL);
+ if (!dphy)
+ return -ENOMEM;
+
+ dphy->info = of_device_get_match_data(&pdev->dev);
+
+ dev_set_drvdata(&pdev->dev, dphy);
+ dphy->dev = &pdev->dev;
+
+ dphy->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(dphy->regs))
+ return PTR_ERR(dphy->regs);
+
+ dphy->cfg_clk = devm_clk_get(&pdev->dev, "cfg");
+ if (IS_ERR(dphy->cfg_clk))
+ return PTR_ERR(dphy->cfg_clk);
+
+ dphy->ref_clk = devm_clk_get(&pdev->dev, "ref");
+ if (IS_ERR(dphy->ref_clk))
+ return PTR_ERR(dphy->ref_clk);
+
+ dphy->tx_clk = devm_clk_get(&pdev->dev, "tx");
+ if (IS_ERR(dphy->tx_clk))
+ return PTR_ERR(dphy->tx_clk);
+
+ dphy->rstc = devm_reset_control_array_get_exclusive(&pdev->dev);
+ if (IS_ERR(dphy->rstc))
+ return PTR_ERR(dphy->rstc);
+
+ dphy->mipi_0p9 = devm_regulator_get(&pdev->dev, "mipi_0p9");
+ if (IS_ERR(dphy->mipi_0p9))
+ return PTR_ERR(dphy->mipi_0p9);
+
+ dphy->phy = devm_phy_create(&pdev->dev, NULL, &stf_dphy_ops);
+ if (IS_ERR(dphy->phy)) {
+ dev_err(&pdev->dev, "Failed to create PHY\n");
+ return PTR_ERR(dphy->phy);
+ }
+
+ pm_runtime_enable(&pdev->dev);
+
+ phy_set_drvdata(dphy->phy, dphy);
+ phy_provider = devm_of_phy_provider_register(&pdev->dev,
+ of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct stf_dphy_info starfive_dphy_info = {
+ .maps = {4, 0, 1, 2, 3, 5},
+};
+
+static const struct of_device_id stf_dphy_dt_ids[] = {
+ {
+ .compatible = "starfive,jh7110-dphy-rx",
+ .data = &starfive_dphy_info,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, stf_dphy_dt_ids);
+
+static struct platform_driver stf_dphy_driver = {
+ .probe = stf_dphy_probe,
+ .driver = {
+ .name = "starfive-dphy-rx",
+ .of_match_table = stf_dphy_dt_ids,
+ },
+};
+module_platform_driver(stf_dphy_driver);
+
+MODULE_AUTHOR("Jack Zhu <jack.zhu@starfivetech.com>");
+MODULE_AUTHOR("Changhuang Liang <changhuang.liang@starfivetech.com>");
+MODULE_DESCRIPTION("StarFive JH7110 DPHY RX driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/starfive/phy-jh7110-pcie.c b/drivers/phy/starfive/phy-jh7110-pcie.c
new file mode 100644
index 000000000000..734c8e007727
--- /dev/null
+++ b/drivers/phy/starfive/phy-jh7110-pcie.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * StarFive JH7110 PCIe 2.0 PHY driver
+ *
+ * Copyright (C) 2023 StarFive Technology Co., Ltd.
+ * Author: Minda Chen <minda.chen@starfivetech.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mfd/syscon.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define PCIE_KVCO_LEVEL_OFF 0x28
+#define PCIE_USB3_PHY_PLL_CTL_OFF 0x7c
+#define PCIE_KVCO_TUNE_SIGNAL_OFF 0x80
+#define PCIE_USB3_PHY_ENABLE BIT(4)
+#define PHY_KVCO_FINE_TUNE_LEVEL 0x91
+#define PHY_KVCO_FINE_TUNE_SIGNALS 0xc
+
+#define USB_PDRSTN_SPLIT BIT(17)
+
+#define PCIE_PHY_MODE BIT(20)
+#define PCIE_PHY_MODE_MASK GENMASK(21, 20)
+#define PCIE_USB3_BUS_WIDTH_MASK GENMASK(3, 2)
+#define PCIE_USB3_BUS_WIDTH BIT(3)
+#define PCIE_USB3_RATE_MASK GENMASK(6, 5)
+#define PCIE_USB3_RX_STANDBY_MASK BIT(7)
+#define PCIE_USB3_PHY_ENABLE BIT(4)
+
+struct jh7110_pcie_phy {
+ struct phy *phy;
+ struct regmap *stg_syscon;
+ struct regmap *sys_syscon;
+ void __iomem *regs;
+ u32 sys_phy_connect;
+ u32 stg_pcie_mode;
+ u32 stg_pcie_usb;
+ enum phy_mode mode;
+};
+
+static int phy_usb3_mode_set(struct jh7110_pcie_phy *data)
+{
+ if (!data->stg_syscon || !data->sys_syscon) {
+ dev_err(&data->phy->dev, "doesn't support usb3 mode\n");
+ return -EINVAL;
+ }
+
+ regmap_update_bits(data->stg_syscon, data->stg_pcie_mode,
+ PCIE_PHY_MODE_MASK, PCIE_PHY_MODE);
+ regmap_update_bits(data->stg_syscon, data->stg_pcie_usb,
+ PCIE_USB3_BUS_WIDTH_MASK, 0);
+ regmap_update_bits(data->stg_syscon, data->stg_pcie_usb,
+ PCIE_USB3_PHY_ENABLE, PCIE_USB3_PHY_ENABLE);
+
+ /* Connect usb 3.0 phy mode */
+ regmap_update_bits(data->sys_syscon, data->sys_phy_connect,
+ USB_PDRSTN_SPLIT, 0);
+
+ /* Configuare spread-spectrum mode: down-spread-spectrum */
+ writel(PCIE_USB3_PHY_ENABLE, data->regs + PCIE_USB3_PHY_PLL_CTL_OFF);
+
+ return 0;
+}
+
+static void phy_pcie_mode_set(struct jh7110_pcie_phy *data)
+{
+ u32 val;
+
+ /* default is PCIe mode */
+ if (!data->stg_syscon || !data->sys_syscon)
+ return;
+
+ regmap_update_bits(data->stg_syscon, data->stg_pcie_mode,
+ PCIE_PHY_MODE_MASK, 0);
+ regmap_update_bits(data->stg_syscon, data->stg_pcie_usb,
+ PCIE_USB3_BUS_WIDTH_MASK,
+ PCIE_USB3_BUS_WIDTH);
+ regmap_update_bits(data->stg_syscon, data->stg_pcie_usb,
+ PCIE_USB3_PHY_ENABLE, 0);
+
+ regmap_update_bits(data->sys_syscon, data->sys_phy_connect,
+ USB_PDRSTN_SPLIT, 0);
+
+ val = readl(data->regs + PCIE_USB3_PHY_PLL_CTL_OFF);
+ val &= ~PCIE_USB3_PHY_ENABLE;
+ writel(val, data->regs + PCIE_USB3_PHY_PLL_CTL_OFF);
+}
+
+static void phy_kvco_gain_set(struct jh7110_pcie_phy *phy)
+{
+ /* PCIe Multi-PHY PLL KVCO Gain fine tune settings: */
+ writel(PHY_KVCO_FINE_TUNE_LEVEL, phy->regs + PCIE_KVCO_LEVEL_OFF);
+ writel(PHY_KVCO_FINE_TUNE_SIGNALS, phy->regs + PCIE_KVCO_TUNE_SIGNAL_OFF);
+}
+
+static int jh7110_pcie_phy_set_mode(struct phy *_phy,
+ enum phy_mode mode, int submode)
+{
+ struct jh7110_pcie_phy *phy = phy_get_drvdata(_phy);
+ int ret;
+
+ if (mode == phy->mode)
+ return 0;
+
+ switch (mode) {
+ case PHY_MODE_USB_HOST:
+ case PHY_MODE_USB_DEVICE:
+ case PHY_MODE_USB_OTG:
+ ret = phy_usb3_mode_set(phy);
+ if (ret)
+ return ret;
+ break;
+ case PHY_MODE_PCIE:
+ phy_pcie_mode_set(phy);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dev_dbg(&_phy->dev, "Changing phy mode to %d\n", mode);
+ phy->mode = mode;
+
+ return 0;
+}
+
+static const struct phy_ops jh7110_pcie_phy_ops = {
+ .set_mode = jh7110_pcie_phy_set_mode,
+ .owner = THIS_MODULE,
+};
+
+static int jh7110_pcie_phy_probe(struct platform_device *pdev)
+{
+ struct jh7110_pcie_phy *phy;
+ struct device *dev = &pdev->dev;
+ struct phy_provider *phy_provider;
+ u32 args[2];
+
+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ phy->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(phy->regs))
+ return PTR_ERR(phy->regs);
+
+ phy->phy = devm_phy_create(dev, NULL, &jh7110_pcie_phy_ops);
+ if (IS_ERR(phy->phy))
+ return dev_err_probe(dev, PTR_ERR(phy->phy),
+ "Failed to map phy base\n");
+
+ phy->sys_syscon =
+ syscon_regmap_lookup_by_phandle_args(pdev->dev.of_node,
+ "starfive,sys-syscon",
+ 1, args);
+
+ if (!IS_ERR_OR_NULL(phy->sys_syscon))
+ phy->sys_phy_connect = args[0];
+ else
+ phy->sys_syscon = NULL;
+
+ phy->stg_syscon =
+ syscon_regmap_lookup_by_phandle_args(pdev->dev.of_node,
+ "starfive,stg-syscon",
+ 2, args);
+
+ if (!IS_ERR_OR_NULL(phy->stg_syscon)) {
+ phy->stg_pcie_mode = args[0];
+ phy->stg_pcie_usb = args[1];
+ } else {
+ phy->stg_syscon = NULL;
+ }
+
+ phy_kvco_gain_set(phy);
+
+ phy_set_drvdata(phy->phy, phy);
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id jh7110_pcie_phy_of_match[] = {
+ { .compatible = "starfive,jh7110-pcie-phy" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, jh7110_pcie_phy_of_match);
+
+static struct platform_driver jh7110_pcie_phy_driver = {
+ .probe = jh7110_pcie_phy_probe,
+ .driver = {
+ .of_match_table = jh7110_pcie_phy_of_match,
+ .name = "jh7110-pcie-phy",
+ }
+};
+module_platform_driver(jh7110_pcie_phy_driver);
+
+MODULE_DESCRIPTION("StarFive JH7110 PCIe 2.0 PHY driver");
+MODULE_AUTHOR("Minda Chen <minda.chen@starfivetech.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/starfive/phy-jh7110-usb.c b/drivers/phy/starfive/phy-jh7110-usb.c
new file mode 100644
index 000000000000..633912f8a05d
--- /dev/null
+++ b/drivers/phy/starfive/phy-jh7110-usb.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * StarFive JH7110 USB 2.0 PHY driver
+ *
+ * Copyright (C) 2023 StarFive Technology Co., Ltd.
+ * Author: Minda Chen <minda.chen@starfivetech.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/usb/of.h>
+
+#define USB_125M_CLK_RATE 125000000
+#define USB_LS_KEEPALIVE_OFF 0x4
+#define USB_LS_KEEPALIVE_ENABLE BIT(4)
+
+struct jh7110_usb2_phy {
+ struct phy *phy;
+ void __iomem *regs;
+ struct clk *usb_125m_clk;
+ struct clk *app_125m;
+ enum phy_mode mode;
+};
+
+static void usb2_set_ls_keepalive(struct jh7110_usb2_phy *phy, bool set)
+{
+ unsigned int val;
+
+ /* Host mode enable the LS speed keep-alive signal */
+ val = readl(phy->regs + USB_LS_KEEPALIVE_OFF);
+ if (set)
+ val |= USB_LS_KEEPALIVE_ENABLE;
+ else
+ val &= ~USB_LS_KEEPALIVE_ENABLE;
+
+ writel(val, phy->regs + USB_LS_KEEPALIVE_OFF);
+}
+
+static int usb2_phy_set_mode(struct phy *_phy,
+ enum phy_mode mode, int submode)
+{
+ struct jh7110_usb2_phy *phy = phy_get_drvdata(_phy);
+
+ switch (mode) {
+ case PHY_MODE_USB_HOST:
+ case PHY_MODE_USB_DEVICE:
+ case PHY_MODE_USB_OTG:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (mode != phy->mode) {
+ dev_dbg(&_phy->dev, "Changing phy to %d\n", mode);
+ phy->mode = mode;
+ usb2_set_ls_keepalive(phy, (mode != PHY_MODE_USB_DEVICE));
+ }
+
+ return 0;
+}
+
+static int jh7110_usb2_phy_init(struct phy *_phy)
+{
+ struct jh7110_usb2_phy *phy = phy_get_drvdata(_phy);
+ int ret;
+
+ ret = clk_set_rate(phy->usb_125m_clk, USB_125M_CLK_RATE);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(phy->app_125m);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int jh7110_usb2_phy_exit(struct phy *_phy)
+{
+ struct jh7110_usb2_phy *phy = phy_get_drvdata(_phy);
+
+ clk_disable_unprepare(phy->app_125m);
+
+ return 0;
+}
+
+static const struct phy_ops jh7110_usb2_phy_ops = {
+ .init = jh7110_usb2_phy_init,
+ .exit = jh7110_usb2_phy_exit,
+ .set_mode = usb2_phy_set_mode,
+ .owner = THIS_MODULE,
+};
+
+static int jh7110_usb_phy_probe(struct platform_device *pdev)
+{
+ struct jh7110_usb2_phy *phy;
+ struct device *dev = &pdev->dev;
+ struct phy_provider *phy_provider;
+
+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ phy->usb_125m_clk = devm_clk_get(dev, "125m");
+ if (IS_ERR(phy->usb_125m_clk))
+ return dev_err_probe(dev, PTR_ERR(phy->usb_125m_clk),
+ "Failed to get 125m clock\n");
+
+ phy->app_125m = devm_clk_get(dev, "app_125m");
+ if (IS_ERR(phy->app_125m))
+ return dev_err_probe(dev, PTR_ERR(phy->app_125m),
+ "Failed to get app 125m clock\n");
+
+ phy->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(phy->regs))
+ return dev_err_probe(dev, PTR_ERR(phy->regs),
+ "Failed to map phy base\n");
+
+ phy->phy = devm_phy_create(dev, NULL, &jh7110_usb2_phy_ops);
+ if (IS_ERR(phy->phy))
+ return dev_err_probe(dev, PTR_ERR(phy->phy),
+ "Failed to create phy\n");
+
+ phy_set_drvdata(phy->phy, phy);
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id jh7110_usb_phy_of_match[] = {
+ { .compatible = "starfive,jh7110-usb-phy" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, jh7110_usb_phy_of_match);
+
+static struct platform_driver jh7110_usb_phy_driver = {
+ .probe = jh7110_usb_phy_probe,
+ .driver = {
+ .of_match_table = jh7110_usb_phy_of_match,
+ .name = "jh7110-usb-phy",
+ }
+};
+module_platform_driver(jh7110_usb_phy_driver);
+
+MODULE_DESCRIPTION("StarFive JH7110 USB 2.0 PHY driver");
+MODULE_AUTHOR("Minda Chen <minda.chen@starfivetech.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/sunplus/phy-sunplus-usb2.c b/drivers/phy/sunplus/phy-sunplus-usb2.c
index 56de41091d63..0efe74ac9c6a 100644
--- a/drivers/phy/sunplus/phy-sunplus-usb2.c
+++ b/drivers/phy/sunplus/phy-sunplus-usb2.c
@@ -16,7 +16,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
diff --git a/drivers/phy/tegra/phy-tegra194-p2u.c b/drivers/phy/tegra/phy-tegra194-p2u.c
index 633e6b747275..f49b417c9eb6 100644
--- a/drivers/phy/tegra/phy-tegra194-p2u.c
+++ b/drivers/phy/tegra/phy-tegra194-p2u.c
@@ -11,8 +11,8 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
#define P2U_CONTROL_CMN 0x74
#define P2U_CONTROL_CMN_ENABLE_L2_EXIT_RATE_CHANGE BIT(13)
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index a296b87dced1..142ebe0247cc 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -8,7 +8,7 @@
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/phy/tegra/xusb.h>
#include <linux/platform_device.h>
diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c
index 6286cf25a426..555b323f45da 100644
--- a/drivers/phy/ti/phy-gmii-sel.c
+++ b/drivers/phy/ti/phy-gmii-sel.c
@@ -465,9 +465,12 @@ static int phy_gmii_sel_probe(struct platform_device *pdev)
priv->regmap = syscon_node_to_regmap(node->parent);
if (IS_ERR(priv->regmap)) {
- ret = PTR_ERR(priv->regmap);
- dev_err(dev, "Failed to get syscon %d\n", ret);
- return ret;
+ priv->regmap = device_node_to_regmap(node);
+ if (IS_ERR(priv->regmap)) {
+ ret = PTR_ERR(priv->regmap);
+ dev_err(dev, "Failed to get syscon %d\n", ret);
+ return ret;
+ }
}
ret = phy_gmii_sel_init_ports(priv);
diff --git a/drivers/phy/ti/phy-tusb1210.c b/drivers/phy/ti/phy-tusb1210.c
index 669c13d6e402..b4881cb34475 100644
--- a/drivers/phy/ti/phy-tusb1210.c
+++ b/drivers/phy/ti/phy-tusb1210.c
@@ -14,6 +14,7 @@
#include <linux/gpio/consumer.h>
#include <linux/phy/ulpi_phy.h>
#include <linux/power_supply.h>
+#include <linux/property.h>
#include <linux/workqueue.h>
#define TUSB1211_POWER_CONTROL 0x3d
diff --git a/drivers/phy/ti/phy-twl4030-usb.c b/drivers/phy/ti/phy-twl4030-usb.c
index da50732625d1..6b265992d988 100644
--- a/drivers/phy/ti/phy-twl4030-usb.c
+++ b/drivers/phy/ti/phy-twl4030-usb.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/workqueue.h>
#include <linux/io.h>
diff --git a/drivers/phy/xilinx/phy-zynqmp.c b/drivers/phy/xilinx/phy-zynqmp.c
index 8833680923a1..2559c6594cea 100644
--- a/drivers/phy/xilinx/phy-zynqmp.c
+++ b/drivers/phy/xilinx/phy-zynqmp.c
@@ -20,6 +20,7 @@
#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <dt-bindings/phy/phy.h>
@@ -571,6 +572,10 @@ static int xpsgtr_phy_init(struct phy *phy)
mutex_lock(&gtr_dev->gtr_mutex);
+ /* Configure and enable the clock when peripheral phy_init call */
+ if (clk_prepare_enable(gtr_dev->clk[gtr_phy->lane]))
+ goto out;
+
/* Skip initialization if not required. */
if (!xpsgtr_phy_init_required(gtr_phy))
goto out;
@@ -615,9 +620,13 @@ out:
static int xpsgtr_phy_exit(struct phy *phy)
{
struct xpsgtr_phy *gtr_phy = phy_get_drvdata(phy);
+ struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
gtr_phy->skip_phy_init = false;
+ /* Ensure that disable clock only, which configure for lane */
+ clk_disable_unprepare(gtr_dev->clk[gtr_phy->lane]);
+
return 0;
}
@@ -820,34 +829,23 @@ static struct phy *xpsgtr_xlate(struct device *dev,
* Power Management
*/
-static int __maybe_unused xpsgtr_suspend(struct device *dev)
+static int xpsgtr_runtime_suspend(struct device *dev)
{
struct xpsgtr_dev *gtr_dev = dev_get_drvdata(dev);
- unsigned int i;
/* Save the snapshot ICM_CFG registers. */
gtr_dev->saved_icm_cfg0 = xpsgtr_read(gtr_dev, ICM_CFG0);
gtr_dev->saved_icm_cfg1 = xpsgtr_read(gtr_dev, ICM_CFG1);
- for (i = 0; i < ARRAY_SIZE(gtr_dev->clk); i++)
- clk_disable_unprepare(gtr_dev->clk[i]);
-
return 0;
}
-static int __maybe_unused xpsgtr_resume(struct device *dev)
+static int xpsgtr_runtime_resume(struct device *dev)
{
struct xpsgtr_dev *gtr_dev = dev_get_drvdata(dev);
unsigned int icm_cfg0, icm_cfg1;
unsigned int i;
bool skip_phy_init;
- int err;
-
- for (i = 0; i < ARRAY_SIZE(gtr_dev->clk); i++) {
- err = clk_prepare_enable(gtr_dev->clk[i]);
- if (err)
- goto err_clk_put;
- }
icm_cfg0 = xpsgtr_read(gtr_dev, ICM_CFG0);
icm_cfg1 = xpsgtr_read(gtr_dev, ICM_CFG1);
@@ -868,18 +866,10 @@ static int __maybe_unused xpsgtr_resume(struct device *dev)
gtr_dev->phys[i].skip_phy_init = skip_phy_init;
return 0;
-
-err_clk_put:
- while (i--)
- clk_disable_unprepare(gtr_dev->clk[i]);
-
- return err;
}
-static const struct dev_pm_ops xpsgtr_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(xpsgtr_suspend, xpsgtr_resume)
-};
-
+static DEFINE_RUNTIME_DEV_PM_OPS(xpsgtr_pm_ops, xpsgtr_runtime_suspend,
+ xpsgtr_runtime_resume, NULL);
/*
* Probe & Platform Driver
*/
@@ -887,7 +877,6 @@ static const struct dev_pm_ops xpsgtr_pm_ops = {
static int xpsgtr_get_ref_clocks(struct xpsgtr_dev *gtr_dev)
{
unsigned int refclk;
- int ret;
for (refclk = 0; refclk < ARRAY_SIZE(gtr_dev->refclk_sscs); ++refclk) {
unsigned long rate;
@@ -898,19 +887,14 @@ static int xpsgtr_get_ref_clocks(struct xpsgtr_dev *gtr_dev)
snprintf(name, sizeof(name), "ref%u", refclk);
clk = devm_clk_get_optional(gtr_dev->dev, name);
if (IS_ERR(clk)) {
- ret = dev_err_probe(gtr_dev->dev, PTR_ERR(clk),
- "Failed to get reference clock %u\n",
- refclk);
- goto err_clk_put;
+ return dev_err_probe(gtr_dev->dev, PTR_ERR(clk),
+ "Failed to get ref clock %u\n",
+ refclk);
}
if (!clk)
continue;
- ret = clk_prepare_enable(clk);
- if (ret)
- goto err_clk_put;
-
gtr_dev->clk[refclk] = clk;
/*
@@ -920,7 +904,10 @@ static int xpsgtr_get_ref_clocks(struct xpsgtr_dev *gtr_dev)
rate = clk_get_rate(clk);
for (i = 0 ; i < ARRAY_SIZE(ssc_lookup); i++) {
- if (rate == ssc_lookup[i].refclk_rate) {
+ /* Allow an error of 100 ppm */
+ unsigned long error = ssc_lookup[i].refclk_rate / 10000;
+
+ if (abs(rate - ssc_lookup[i].refclk_rate) < error) {
gtr_dev->refclk_sscs[refclk] = &ssc_lookup[i];
break;
}
@@ -930,18 +917,11 @@ static int xpsgtr_get_ref_clocks(struct xpsgtr_dev *gtr_dev)
dev_err(gtr_dev->dev,
"Invalid rate %lu for reference clock %u\n",
rate, refclk);
- ret = -EINVAL;
- goto err_clk_put;
+ return -EINVAL;
}
}
return 0;
-
-err_clk_put:
- while (refclk--)
- clk_disable_unprepare(gtr_dev->clk[refclk]);
-
- return ret;
}
static int xpsgtr_probe(struct platform_device *pdev)
@@ -950,7 +930,6 @@ static int xpsgtr_probe(struct platform_device *pdev)
struct xpsgtr_dev *gtr_dev;
struct phy_provider *provider;
unsigned int port;
- unsigned int i;
int ret;
gtr_dev = devm_kzalloc(&pdev->dev, sizeof(*gtr_dev), GFP_KERNEL);
@@ -990,8 +969,7 @@ static int xpsgtr_probe(struct platform_device *pdev)
phy = devm_phy_create(&pdev->dev, np, &xpsgtr_phyops);
if (IS_ERR(phy)) {
dev_err(&pdev->dev, "failed to create PHY\n");
- ret = PTR_ERR(phy);
- goto err_clk_put;
+ return PTR_ERR(phy);
}
gtr_phy->phy = phy;
@@ -1002,16 +980,30 @@ static int xpsgtr_probe(struct platform_device *pdev)
provider = devm_of_phy_provider_register(&pdev->dev, xpsgtr_xlate);
if (IS_ERR(provider)) {
dev_err(&pdev->dev, "registering provider failed\n");
- ret = PTR_ERR(provider);
- goto err_clk_put;
+ return PTR_ERR(provider);
}
+
+ pm_runtime_set_active(gtr_dev->dev);
+ pm_runtime_enable(gtr_dev->dev);
+
+ ret = pm_runtime_resume_and_get(gtr_dev->dev);
+ if (ret < 0) {
+ pm_runtime_disable(gtr_dev->dev);
+ return ret;
+ }
+
return 0;
+}
-err_clk_put:
- for (i = 0; i < ARRAY_SIZE(gtr_dev->clk); i++)
- clk_disable_unprepare(gtr_dev->clk[i]);
+static int xpsgtr_remove(struct platform_device *pdev)
+{
+ struct xpsgtr_dev *gtr_dev = platform_get_drvdata(pdev);
- return ret;
+ pm_runtime_disable(gtr_dev->dev);
+ pm_runtime_put_noidle(gtr_dev->dev);
+ pm_runtime_set_suspended(gtr_dev->dev);
+
+ return 0;
}
static const struct of_device_id xpsgtr_of_match[] = {
@@ -1023,10 +1015,11 @@ MODULE_DEVICE_TABLE(of, xpsgtr_of_match);
static struct platform_driver xpsgtr_driver = {
.probe = xpsgtr_probe,
+ .remove = xpsgtr_remove,
.driver = {
.name = "xilinx-psgtr",
.of_match_table = xpsgtr_of_match,
- .pm = &xpsgtr_pm_ops,
+ .pm = pm_ptr(&xpsgtr_pm_ops),
},
};
diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c
index 8b7949220382..5d36fbc75e1b 100644
--- a/drivers/platform/chrome/cros_ec.c
+++ b/drivers/platform/chrome/cros_ec.c
@@ -12,6 +12,7 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
#include <linux/slab.h>
diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
index 25f9767c28e8..d0b4d3fc40ed 100644
--- a/drivers/platform/chrome/cros_ec_typec.c
+++ b/drivers/platform/chrome/cros_ec_typec.c
@@ -406,6 +406,27 @@ static int cros_typec_usb_safe_state(struct cros_typec_port *port)
return ret;
}
+/**
+ * cros_typec_get_cable_vdo() - Get Cable VDO of the connected cable
+ * @port: Type-C port data
+ * @svid: Standard or Vendor ID to match
+ *
+ * Returns the Cable VDO if match is found and returns 0 if match is not found.
+ */
+static int cros_typec_get_cable_vdo(struct cros_typec_port *port, u16 svid)
+{
+ struct list_head *head = &port->plug_mode_list;
+ struct cros_typec_altmode_node *node;
+ u32 ret = 0;
+
+ list_for_each_entry(node, head, list) {
+ if (node->amode->svid == svid)
+ return node->amode->vdo;
+ }
+
+ return ret;
+}
+
/*
* Spoof the VDOs that were likely communicated by the partner for TBT alt
* mode.
@@ -432,6 +453,9 @@ static int cros_typec_enable_tbt(struct cros_typec_data *typec,
/* Cable Discover Mode VDO */
data.cable_mode = TBT_MODE;
+
+ data.cable_mode |= cros_typec_get_cable_vdo(port, USB_TYPEC_TBT_SID);
+
data.cable_mode |= TBT_SET_CABLE_SPEED(pd_ctrl->cable_speed);
if (pd_ctrl->control_flags & USB_PD_CTRL_OPTICAL_CABLE)
@@ -522,8 +546,10 @@ static int cros_typec_enable_usb4(struct cros_typec_data *typec,
/* Cable Type */
if (pd_ctrl->control_flags & USB_PD_CTRL_OPTICAL_CABLE)
data.eudo |= EUDO_CABLE_TYPE_OPTICAL << EUDO_CABLE_TYPE_SHIFT;
- else if (pd_ctrl->control_flags & USB_PD_CTRL_ACTIVE_CABLE)
+ else if (cros_typec_get_cable_vdo(port, USB_TYPEC_TBT_SID) & TBT_CABLE_RETIMER)
data.eudo |= EUDO_CABLE_TYPE_RE_TIMER << EUDO_CABLE_TYPE_SHIFT;
+ else if (pd_ctrl->control_flags & USB_PD_CTRL_ACTIVE_CABLE)
+ data.eudo |= EUDO_CABLE_TYPE_RE_DRIVER << EUDO_CABLE_TYPE_SHIFT;
data.active_link_training = !!(pd_ctrl->control_flags &
USB_PD_CTRL_ACTIVE_LINK_UNIDIR);
diff --git a/drivers/platform/mellanox/mlxbf-bootctl.c b/drivers/platform/mellanox/mlxbf-bootctl.c
index fb9f7815c6cd..4ee7bb431b7c 100644
--- a/drivers/platform/mellanox/mlxbf-bootctl.c
+++ b/drivers/platform/mellanox/mlxbf-bootctl.c
@@ -11,6 +11,7 @@
#include <linux/acpi.h>
#include <linux/arm-smccc.h>
#include <linux/delay.h>
+#include <linux/if_ether.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -79,6 +80,52 @@ static void __iomem *mlxbf_rsh_scratch_buf_data;
static const char * const mlxbf_rsh_log_level[] = {
"INFO", "WARN", "ERR", "ASSERT"};
+static DEFINE_MUTEX(icm_ops_lock);
+static DEFINE_MUTEX(os_up_lock);
+static DEFINE_MUTEX(mfg_ops_lock);
+
+/*
+ * Objects are stored within the MFG partition per type.
+ * Type 0 is not supported.
+ */
+enum {
+ MLNX_MFG_TYPE_OOB_MAC = 1,
+ MLNX_MFG_TYPE_OPN_0,
+ MLNX_MFG_TYPE_OPN_1,
+ MLNX_MFG_TYPE_OPN_2,
+ MLNX_MFG_TYPE_SKU_0,
+ MLNX_MFG_TYPE_SKU_1,
+ MLNX_MFG_TYPE_SKU_2,
+ MLNX_MFG_TYPE_MODL_0,
+ MLNX_MFG_TYPE_MODL_1,
+ MLNX_MFG_TYPE_MODL_2,
+ MLNX_MFG_TYPE_SN_0,
+ MLNX_MFG_TYPE_SN_1,
+ MLNX_MFG_TYPE_SN_2,
+ MLNX_MFG_TYPE_UUID_0,
+ MLNX_MFG_TYPE_UUID_1,
+ MLNX_MFG_TYPE_UUID_2,
+ MLNX_MFG_TYPE_UUID_3,
+ MLNX_MFG_TYPE_UUID_4,
+ MLNX_MFG_TYPE_REV,
+};
+
+#define MLNX_MFG_OPN_VAL_LEN 24
+#define MLNX_MFG_SKU_VAL_LEN 24
+#define MLNX_MFG_MODL_VAL_LEN 24
+#define MLNX_MFG_SN_VAL_LEN 24
+#define MLNX_MFG_UUID_VAL_LEN 40
+#define MLNX_MFG_REV_VAL_LEN 8
+#define MLNX_MFG_VAL_QWORD_CNT(type) \
+ (MLNX_MFG_##type##_VAL_LEN / sizeof(u64))
+
+/*
+ * The MAC address consists of 6 bytes (2 digits each) separated by ':'.
+ * The expected format is: "XX:XX:XX:XX:XX:XX"
+ */
+#define MLNX_MFG_OOB_MAC_FORMAT_LEN \
+ ((ETH_ALEN * 2) + (ETH_ALEN - 1))
+
/* ARM SMC call which is atomic and no need for lock. */
static int mlxbf_bootctl_smc(unsigned int smc_op, int smc_arg)
{
@@ -391,6 +438,444 @@ done:
return count;
}
+static ssize_t large_icm_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct arm_smccc_res res;
+
+ mutex_lock(&icm_ops_lock);
+ arm_smccc_smc(MLNX_HANDLE_GET_ICM_INFO, 0, 0, 0, 0,
+ 0, 0, 0, &res);
+ mutex_unlock(&icm_ops_lock);
+ if (res.a0)
+ return -EPERM;
+
+ return snprintf(buf, PAGE_SIZE, "0x%lx", res.a1);
+}
+
+static ssize_t large_icm_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct arm_smccc_res res;
+ unsigned long icm_data;
+ int err;
+
+ err = kstrtoul(buf, MLXBF_LARGE_ICMC_MAX_STRING_SIZE, &icm_data);
+ if (err)
+ return err;
+
+ if ((icm_data != 0 && icm_data < MLXBF_LARGE_ICMC_SIZE_MIN) ||
+ icm_data > MLXBF_LARGE_ICMC_SIZE_MAX || icm_data % MLXBF_LARGE_ICMC_GRANULARITY)
+ return -EPERM;
+
+ mutex_lock(&icm_ops_lock);
+ arm_smccc_smc(MLNX_HANDLE_SET_ICM_INFO, icm_data, 0, 0, 0, 0, 0, 0, &res);
+ mutex_unlock(&icm_ops_lock);
+
+ return res.a0 ? -EPERM : count;
+}
+
+static ssize_t os_up_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct arm_smccc_res res;
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
+
+ if (val != 1)
+ return -EINVAL;
+
+ mutex_lock(&os_up_lock);
+ arm_smccc_smc(MLNX_HANDLE_OS_UP, 0, 0, 0, 0, 0, 0, 0, &res);
+ mutex_unlock(&os_up_lock);
+
+ return count;
+}
+
+static ssize_t oob_mac_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct arm_smccc_res res;
+ u8 *mac_byte_ptr;
+
+ mutex_lock(&mfg_ops_lock);
+ arm_smccc_smc(MLXBF_BOOTCTL_GET_MFG_INFO, MLNX_MFG_TYPE_OOB_MAC, 0, 0, 0,
+ 0, 0, 0, &res);
+ mutex_unlock(&mfg_ops_lock);
+ if (res.a0)
+ return -EPERM;
+
+ mac_byte_ptr = (u8 *)&res.a1;
+
+ return sysfs_format_mac(buf, mac_byte_ptr, ETH_ALEN);
+}
+
+static ssize_t oob_mac_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int byte[MLNX_MFG_OOB_MAC_FORMAT_LEN] = { 0 };
+ struct arm_smccc_res res;
+ int byte_idx, len;
+ u64 mac_addr = 0;
+ u8 *mac_byte_ptr;
+
+ if ((count - 1) != MLNX_MFG_OOB_MAC_FORMAT_LEN)
+ return -EINVAL;
+
+ len = sscanf(buf, "%02x:%02x:%02x:%02x:%02x:%02x",
+ &byte[0], &byte[1], &byte[2],
+ &byte[3], &byte[4], &byte[5]);
+ if (len != ETH_ALEN)
+ return -EINVAL;
+
+ mac_byte_ptr = (u8 *)&mac_addr;
+
+ for (byte_idx = 0; byte_idx < ETH_ALEN; byte_idx++)
+ mac_byte_ptr[byte_idx] = (u8)byte[byte_idx];
+
+ mutex_lock(&mfg_ops_lock);
+ arm_smccc_smc(MLXBF_BOOTCTL_SET_MFG_INFO, MLNX_MFG_TYPE_OOB_MAC,
+ ETH_ALEN, mac_addr, 0, 0, 0, 0, &res);
+ mutex_unlock(&mfg_ops_lock);
+
+ return res.a0 ? -EPERM : count;
+}
+
+static ssize_t opn_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u64 opn_data[MLNX_MFG_VAL_QWORD_CNT(OPN) + 1] = { 0 };
+ struct arm_smccc_res res;
+ int word;
+
+ mutex_lock(&mfg_ops_lock);
+ for (word = 0; word < MLNX_MFG_VAL_QWORD_CNT(OPN); word++) {
+ arm_smccc_smc(MLXBF_BOOTCTL_GET_MFG_INFO,
+ MLNX_MFG_TYPE_OPN_0 + word,
+ 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0) {
+ mutex_unlock(&mfg_ops_lock);
+ return -EPERM;
+ }
+ opn_data[word] = res.a1;
+ }
+ mutex_unlock(&mfg_ops_lock);
+
+ return snprintf(buf, PAGE_SIZE, "%s", (char *)opn_data);
+}
+
+static ssize_t opn_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u64 opn[MLNX_MFG_VAL_QWORD_CNT(OPN)] = { 0 };
+ struct arm_smccc_res res;
+ int word;
+
+ if (count > MLNX_MFG_OPN_VAL_LEN)
+ return -EINVAL;
+
+ memcpy(opn, buf, count);
+
+ mutex_lock(&mfg_ops_lock);
+ for (word = 0; word < MLNX_MFG_VAL_QWORD_CNT(OPN); word++) {
+ arm_smccc_smc(MLXBF_BOOTCTL_SET_MFG_INFO,
+ MLNX_MFG_TYPE_OPN_0 + word,
+ sizeof(u64), opn[word], 0, 0, 0, 0, &res);
+ if (res.a0) {
+ mutex_unlock(&mfg_ops_lock);
+ return -EPERM;
+ }
+ }
+ mutex_unlock(&mfg_ops_lock);
+
+ return count;
+}
+
+static ssize_t sku_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u64 sku_data[MLNX_MFG_VAL_QWORD_CNT(SKU) + 1] = { 0 };
+ struct arm_smccc_res res;
+ int word;
+
+ mutex_lock(&mfg_ops_lock);
+ for (word = 0; word < MLNX_MFG_VAL_QWORD_CNT(SKU); word++) {
+ arm_smccc_smc(MLXBF_BOOTCTL_GET_MFG_INFO,
+ MLNX_MFG_TYPE_SKU_0 + word,
+ 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0) {
+ mutex_unlock(&mfg_ops_lock);
+ return -EPERM;
+ }
+ sku_data[word] = res.a1;
+ }
+ mutex_unlock(&mfg_ops_lock);
+
+ return snprintf(buf, PAGE_SIZE, "%s", (char *)sku_data);
+}
+
+static ssize_t sku_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u64 sku[MLNX_MFG_VAL_QWORD_CNT(SKU)] = { 0 };
+ struct arm_smccc_res res;
+ int word;
+
+ if (count > MLNX_MFG_SKU_VAL_LEN)
+ return -EINVAL;
+
+ memcpy(sku, buf, count);
+
+ mutex_lock(&mfg_ops_lock);
+ for (word = 0; word < MLNX_MFG_VAL_QWORD_CNT(SKU); word++) {
+ arm_smccc_smc(MLXBF_BOOTCTL_SET_MFG_INFO,
+ MLNX_MFG_TYPE_SKU_0 + word,
+ sizeof(u64), sku[word], 0, 0, 0, 0, &res);
+ if (res.a0) {
+ mutex_unlock(&mfg_ops_lock);
+ return -EPERM;
+ }
+ }
+ mutex_unlock(&mfg_ops_lock);
+
+ return count;
+}
+
+static ssize_t modl_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u64 modl_data[MLNX_MFG_VAL_QWORD_CNT(MODL) + 1] = { 0 };
+ struct arm_smccc_res res;
+ int word;
+
+ mutex_lock(&mfg_ops_lock);
+ for (word = 0; word < MLNX_MFG_VAL_QWORD_CNT(MODL); word++) {
+ arm_smccc_smc(MLXBF_BOOTCTL_GET_MFG_INFO,
+ MLNX_MFG_TYPE_MODL_0 + word,
+ 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0) {
+ mutex_unlock(&mfg_ops_lock);
+ return -EPERM;
+ }
+ modl_data[word] = res.a1;
+ }
+ mutex_unlock(&mfg_ops_lock);
+
+ return snprintf(buf, PAGE_SIZE, "%s", (char *)modl_data);
+}
+
+static ssize_t modl_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u64 modl[MLNX_MFG_VAL_QWORD_CNT(MODL)] = { 0 };
+ struct arm_smccc_res res;
+ int word;
+
+ if (count > MLNX_MFG_MODL_VAL_LEN)
+ return -EINVAL;
+
+ memcpy(modl, buf, count);
+
+ mutex_lock(&mfg_ops_lock);
+ for (word = 0; word < MLNX_MFG_VAL_QWORD_CNT(MODL); word++) {
+ arm_smccc_smc(MLXBF_BOOTCTL_SET_MFG_INFO,
+ MLNX_MFG_TYPE_MODL_0 + word,
+ sizeof(u64), modl[word], 0, 0, 0, 0, &res);
+ if (res.a0) {
+ mutex_unlock(&mfg_ops_lock);
+ return -EPERM;
+ }
+ }
+ mutex_unlock(&mfg_ops_lock);
+
+ return count;
+}
+
+static ssize_t sn_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u64 sn_data[MLNX_MFG_VAL_QWORD_CNT(SN) + 1] = { 0 };
+ struct arm_smccc_res res;
+ int word;
+
+ mutex_lock(&mfg_ops_lock);
+ for (word = 0; word < MLNX_MFG_VAL_QWORD_CNT(SN); word++) {
+ arm_smccc_smc(MLXBF_BOOTCTL_GET_MFG_INFO,
+ MLNX_MFG_TYPE_SN_0 + word,
+ 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0) {
+ mutex_unlock(&mfg_ops_lock);
+ return -EPERM;
+ }
+ sn_data[word] = res.a1;
+ }
+ mutex_unlock(&mfg_ops_lock);
+
+ return snprintf(buf, PAGE_SIZE, "%s", (char *)sn_data);
+}
+
+static ssize_t sn_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u64 sn[MLNX_MFG_VAL_QWORD_CNT(SN)] = { 0 };
+ struct arm_smccc_res res;
+ int word;
+
+ if (count > MLNX_MFG_SN_VAL_LEN)
+ return -EINVAL;
+
+ memcpy(sn, buf, count);
+
+ mutex_lock(&mfg_ops_lock);
+ for (word = 0; word < MLNX_MFG_VAL_QWORD_CNT(SN); word++) {
+ arm_smccc_smc(MLXBF_BOOTCTL_SET_MFG_INFO,
+ MLNX_MFG_TYPE_SN_0 + word,
+ sizeof(u64), sn[word], 0, 0, 0, 0, &res);
+ if (res.a0) {
+ mutex_unlock(&mfg_ops_lock);
+ return -EPERM;
+ }
+ }
+ mutex_unlock(&mfg_ops_lock);
+
+ return count;
+}
+
+static ssize_t uuid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u64 uuid_data[MLNX_MFG_VAL_QWORD_CNT(UUID) + 1] = { 0 };
+ struct arm_smccc_res res;
+ int word;
+
+ mutex_lock(&mfg_ops_lock);
+ for (word = 0; word < MLNX_MFG_VAL_QWORD_CNT(UUID); word++) {
+ arm_smccc_smc(MLXBF_BOOTCTL_GET_MFG_INFO,
+ MLNX_MFG_TYPE_UUID_0 + word,
+ 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0) {
+ mutex_unlock(&mfg_ops_lock);
+ return -EPERM;
+ }
+ uuid_data[word] = res.a1;
+ }
+ mutex_unlock(&mfg_ops_lock);
+
+ return snprintf(buf, PAGE_SIZE, "%s", (char *)uuid_data);
+}
+
+static ssize_t uuid_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u64 uuid[MLNX_MFG_VAL_QWORD_CNT(UUID)] = { 0 };
+ struct arm_smccc_res res;
+ int word;
+
+ if (count > MLNX_MFG_UUID_VAL_LEN)
+ return -EINVAL;
+
+ memcpy(uuid, buf, count);
+
+ mutex_lock(&mfg_ops_lock);
+ for (word = 0; word < MLNX_MFG_VAL_QWORD_CNT(UUID); word++) {
+ arm_smccc_smc(MLXBF_BOOTCTL_SET_MFG_INFO,
+ MLNX_MFG_TYPE_UUID_0 + word,
+ sizeof(u64), uuid[word], 0, 0, 0, 0, &res);
+ if (res.a0) {
+ mutex_unlock(&mfg_ops_lock);
+ return -EPERM;
+ }
+ }
+ mutex_unlock(&mfg_ops_lock);
+
+ return count;
+}
+
+static ssize_t rev_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u64 rev_data[MLNX_MFG_VAL_QWORD_CNT(REV) + 1] = { 0 };
+ struct arm_smccc_res res;
+ int word;
+
+ mutex_lock(&mfg_ops_lock);
+ for (word = 0; word < MLNX_MFG_VAL_QWORD_CNT(REV); word++) {
+ arm_smccc_smc(MLXBF_BOOTCTL_GET_MFG_INFO,
+ MLNX_MFG_TYPE_REV + word,
+ 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0) {
+ mutex_unlock(&mfg_ops_lock);
+ return -EPERM;
+ }
+ rev_data[word] = res.a1;
+ }
+ mutex_unlock(&mfg_ops_lock);
+
+ return snprintf(buf, PAGE_SIZE, "%s", (char *)rev_data);
+}
+
+static ssize_t rev_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ u64 rev[MLNX_MFG_VAL_QWORD_CNT(REV)] = { 0 };
+ struct arm_smccc_res res;
+ int word;
+
+ if (count > MLNX_MFG_REV_VAL_LEN)
+ return -EINVAL;
+
+ memcpy(rev, buf, count);
+
+ mutex_lock(&mfg_ops_lock);
+ for (word = 0; word < MLNX_MFG_VAL_QWORD_CNT(REV); word++) {
+ arm_smccc_smc(MLXBF_BOOTCTL_SET_MFG_INFO,
+ MLNX_MFG_TYPE_REV + word,
+ sizeof(u64), rev[word], 0, 0, 0, 0, &res);
+ if (res.a0) {
+ mutex_unlock(&mfg_ops_lock);
+ return -EPERM;
+ }
+ }
+ mutex_unlock(&mfg_ops_lock);
+
+ return count;
+}
+
+static ssize_t mfg_lock_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct arm_smccc_res res;
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
+
+ if (val != 1)
+ return -EINVAL;
+
+ mutex_lock(&mfg_ops_lock);
+ arm_smccc_smc(MLXBF_BOOTCTL_LOCK_MFG_INFO, 0, 0, 0, 0, 0, 0, 0, &res);
+ mutex_unlock(&mfg_ops_lock);
+
+ return count;
+}
+
static DEVICE_ATTR_RW(post_reset_wdog);
static DEVICE_ATTR_RW(reset_action);
static DEVICE_ATTR_RW(second_reset_action);
@@ -398,6 +883,16 @@ static DEVICE_ATTR_RO(lifecycle_state);
static DEVICE_ATTR_RO(secure_boot_fuse_state);
static DEVICE_ATTR_WO(fw_reset);
static DEVICE_ATTR_WO(rsh_log);
+static DEVICE_ATTR_RW(large_icm);
+static DEVICE_ATTR_WO(os_up);
+static DEVICE_ATTR_RW(oob_mac);
+static DEVICE_ATTR_RW(opn);
+static DEVICE_ATTR_RW(sku);
+static DEVICE_ATTR_RW(modl);
+static DEVICE_ATTR_RW(sn);
+static DEVICE_ATTR_RW(uuid);
+static DEVICE_ATTR_RW(rev);
+static DEVICE_ATTR_WO(mfg_lock);
static struct attribute *mlxbf_bootctl_attrs[] = {
&dev_attr_post_reset_wdog.attr,
@@ -407,6 +902,16 @@ static struct attribute *mlxbf_bootctl_attrs[] = {
&dev_attr_secure_boot_fuse_state.attr,
&dev_attr_fw_reset.attr,
&dev_attr_rsh_log.attr,
+ &dev_attr_large_icm.attr,
+ &dev_attr_os_up.attr,
+ &dev_attr_oob_mac.attr,
+ &dev_attr_opn.attr,
+ &dev_attr_sku.attr,
+ &dev_attr_modl.attr,
+ &dev_attr_sn.attr,
+ &dev_attr_uuid.attr,
+ &dev_attr_rev.attr,
+ &dev_attr_mfg_lock.attr,
NULL
};
diff --git a/drivers/platform/mellanox/mlxbf-bootctl.h b/drivers/platform/mellanox/mlxbf-bootctl.h
index b48243f60a59..1299750a8661 100644
--- a/drivers/platform/mellanox/mlxbf-bootctl.h
+++ b/drivers/platform/mellanox/mlxbf-bootctl.h
@@ -81,6 +81,28 @@
*/
#define MLXBF_BOOTCTL_FW_RESET 0x8200000D
+/*
+ * SMC function IDs to set, get and lock the manufacturing information
+ * stored within the eeprom.
+ */
+#define MLXBF_BOOTCTL_SET_MFG_INFO 0x8200000E
+#define MLXBF_BOOTCTL_GET_MFG_INFO 0x8200000F
+#define MLXBF_BOOTCTL_LOCK_MFG_INFO 0x82000011
+
+/*
+ * SMC function IDs to set and get the large ICM carveout size
+ * stored in the eeprom.
+ */
+#define MLNX_HANDLE_SET_ICM_INFO 0x82000012
+#define MLNX_HANDLE_GET_ICM_INFO 0x82000013
+
+#define MAX_ICM_BUFFER_SIZE 10
+
+/*
+ * SMC function ID to set the ARM boot state to up
+ */
+#define MLNX_HANDLE_OS_UP 0x82000014
+
/* SMC function IDs for SiP Service queries */
#define MLXBF_BOOTCTL_SIP_SVC_CALL_COUNT 0x8200ff00
#define MLXBF_BOOTCTL_SIP_SVC_UID 0x8200ff01
@@ -106,4 +128,9 @@
/* Additional value to disable the MLXBF_BOOTCTL_SET_SECOND_RESET_ACTION. */
#define MLXBF_BOOTCTL_NONE 0x7fffffff /* Don't change next boot action */
+#define MLXBF_LARGE_ICMC_MAX_STRING_SIZE 16
+#define MLXBF_LARGE_ICMC_SIZE_MIN 0x80
+#define MLXBF_LARGE_ICMC_SIZE_MAX 0x100000
+#define MLXBF_LARGE_ICMC_GRANULARITY MLXBF_LARGE_ICMC_SIZE_MIN
+
#endif /* __MLXBF_BOOTCTL_H__ */
diff --git a/drivers/platform/mellanox/mlxreg-hotplug.c b/drivers/platform/mellanox/mlxreg-hotplug.c
index b7dcc64cd238..eb5ad35274dd 100644
--- a/drivers/platform/mellanox/mlxreg-hotplug.c
+++ b/drivers/platform/mellanox/mlxreg-hotplug.c
@@ -12,7 +12,6 @@
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/platform_data/mlxreg.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
@@ -113,7 +112,7 @@ static int mlxreg_hotplug_device_create(struct mlxreg_hotplug_priv_data *priv,
* Return if adapter number is negative. It could be in case hotplug
* event is not associated with hotplug device.
*/
- if (data->hpdev.nr < 0)
+ if (data->hpdev.nr < 0 && data->hpdev.action != MLXREG_HOTPLUG_DEVICE_NO_ACTION)
return 0;
pdata = dev_get_platdata(&priv->pdev->dev);
diff --git a/drivers/platform/mellanox/mlxreg-io.c b/drivers/platform/mellanox/mlxreg-io.c
index ddc08abf398c..83ba037408cd 100644
--- a/drivers/platform/mellanox/mlxreg-io.c
+++ b/drivers/platform/mellanox/mlxreg-io.c
@@ -11,7 +11,6 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/platform_data/mlxreg.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/platform/mellanox/nvsw-sn2201.c b/drivers/platform/mellanox/nvsw-sn2201.c
index 7b9c107c17ce..75b699676ca6 100644
--- a/drivers/platform/mellanox/nvsw-sn2201.c
+++ b/drivers/platform/mellanox/nvsw-sn2201.c
@@ -84,6 +84,10 @@
#define NVSW_SN2201_MAIN_MUX_CH5_NR (NVSW_SN2201_MAIN_MUX_CH0_NR + 5)
#define NVSW_SN2201_MAIN_MUX_CH6_NR (NVSW_SN2201_MAIN_MUX_CH0_NR + 6)
#define NVSW_SN2201_MAIN_MUX_CH7_NR (NVSW_SN2201_MAIN_MUX_CH0_NR + 7)
+#define NVSW_SN2201_2ND_MUX_CH0_NR (NVSW_SN2201_MAIN_MUX_CH7_NR + 1)
+#define NVSW_SN2201_2ND_MUX_CH1_NR (NVSW_SN2201_MAIN_MUX_CH7_NR + 2)
+#define NVSW_SN2201_2ND_MUX_CH2_NR (NVSW_SN2201_MAIN_MUX_CH7_NR + 3)
+#define NVSW_SN2201_2ND_MUX_CH3_NR (NVSW_SN2201_MAIN_MUX_CH7_NR + 4)
#define NVSW_SN2201_CPLD_NR NVSW_SN2201_MAIN_MUX_CH0_NR
#define NVSW_SN2201_NR_NONE -1
@@ -425,28 +429,28 @@ static struct mlxreg_core_data nvsw_sn2201_fan_items_data[] = {
.reg = NVSW_SN2201_FAN_PRSNT_STATUS_OFFSET,
.mask = BIT(0),
.hpdev.brdinfo = &nvsw_sn2201_fan_devices[0],
- .hpdev.nr = NVSW_SN2201_NR_NONE,
+ .hpdev.nr = NVSW_SN2201_2ND_MUX_CH0_NR,
},
{
.label = "fan2",
.reg = NVSW_SN2201_FAN_PRSNT_STATUS_OFFSET,
.mask = BIT(1),
.hpdev.brdinfo = &nvsw_sn2201_fan_devices[1],
- .hpdev.nr = NVSW_SN2201_NR_NONE,
+ .hpdev.nr = NVSW_SN2201_2ND_MUX_CH1_NR,
},
{
.label = "fan3",
.reg = NVSW_SN2201_FAN_PRSNT_STATUS_OFFSET,
.mask = BIT(2),
.hpdev.brdinfo = &nvsw_sn2201_fan_devices[2],
- .hpdev.nr = NVSW_SN2201_NR_NONE,
+ .hpdev.nr = NVSW_SN2201_2ND_MUX_CH2_NR,
},
{
.label = "fan4",
.reg = NVSW_SN2201_FAN_PRSNT_STATUS_OFFSET,
.mask = BIT(3),
.hpdev.brdinfo = &nvsw_sn2201_fan_devices[3],
- .hpdev.nr = NVSW_SN2201_NR_NONE,
+ .hpdev.nr = NVSW_SN2201_2ND_MUX_CH3_NR,
},
};
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 49c2c4cd8d00..2a1070543391 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -965,7 +965,7 @@ config SERIAL_MULTI_INSTANTIATE
config MLX_PLATFORM
tristate "Mellanox Technologies platform support"
- depends on I2C
+ depends on ACPI && I2C && PCI
select REGMAP
help
This option enables system support for the Mellanox Technologies
@@ -1074,17 +1074,7 @@ config INTEL_SCU_IPC_UTIL
low level access for debug work and updating the firmware. Say
N unless you will be doing this on an Intel MID platform.
-config SIEMENS_SIMATIC_IPC
- tristate "Siemens Simatic IPC Class driver"
- depends on PCI
- help
- This Simatic IPC class driver is the central of several drivers. It
- is mainly used for system identification, after which drivers in other
- classes will take care of driving specifics of those machines.
- i.e. LEDs and watchdog.
-
- To compile this driver as a module, choose M here: the module
- will be called simatic-ipc.
+source "drivers/platform/x86/siemens/Kconfig"
config WINMATE_FM07_KEYS
tristate "Winmate FM07/FM07P front-panel keys driver"
@@ -1094,10 +1084,25 @@ config WINMATE_FM07_KEYS
buttons below the display. This module adds an input device
that delivers key events when these buttons are pressed.
+config SEL3350_PLATFORM
+ tristate "SEL-3350 LEDs and power supplies"
+ depends on ACPI
+ depends on GPIOLIB
+ depends on PINCTRL_BROXTON
+ select POWER_SUPPLY
+ select NEW_LEDS
+ select LEDS_CLASS
+ select LEDS_GPIO
+ help
+ Support for LEDs and power supplies on SEL-3350 computers.
+
+ To compile this driver as a module, choose M here: the module
+ will be called sel3350-platform.
+
endif # X86_PLATFORM_DEVICES
config P2SB
- bool "Primary to Sideband (P2SB) bridge access support"
+ bool
depends on PCI && X86
help
The Primary to Sideband (P2SB) bridge is an interface to some
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 52dfdf574ac2..b457de5abf7d 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -131,7 +131,10 @@ obj-$(CONFIG_INTEL_SCU_IPC_UTIL) += intel_scu_ipcutil.o
obj-$(CONFIG_X86_INTEL_LPSS) += pmc_atom.o
# Siemens Simatic Industrial PCs
-obj-$(CONFIG_SIEMENS_SIMATIC_IPC) += simatic-ipc.o
+obj-$(CONFIG_SIEMENS_SIMATIC_IPC) += siemens/
# Winmate
obj-$(CONFIG_WINMATE_FM07_KEYS) += winmate-fm07-keys.o
+
+# SEL
+obj-$(CONFIG_SEL3350_PLATFORM) += sel3350-platform.o
diff --git a/drivers/platform/x86/amd/Kconfig b/drivers/platform/x86/amd/Kconfig
index d9685aef0887..55f3a2fc6aec 100644
--- a/drivers/platform/x86/amd/Kconfig
+++ b/drivers/platform/x86/amd/Kconfig
@@ -4,21 +4,7 @@
#
source "drivers/platform/x86/amd/pmf/Kconfig"
-
-config AMD_PMC
- tristate "AMD SoC PMC driver"
- depends on ACPI && PCI && RTC_CLASS && AMD_NB
- select SERIO
- help
- The driver provides support for AMD Power Management Controller
- primarily responsible for S2Idle transactions that are driven from
- a platform firmware running on SMU. This driver also provides a debug
- mechanism to investigate the S2Idle transactions and failures.
-
- Say Y or M here if you have a notebook powered by AMD RYZEN CPU/APU.
-
- If you choose to compile this driver as a module the module will be
- called amd-pmc.
+source "drivers/platform/x86/amd/pmc/Kconfig"
config AMD_HSMP
tristate "AMD HSMP Driver"
diff --git a/drivers/platform/x86/amd/Makefile b/drivers/platform/x86/amd/Makefile
index 65732f0a3913..f04932b7a7d1 100644
--- a/drivers/platform/x86/amd/Makefile
+++ b/drivers/platform/x86/amd/Makefile
@@ -4,8 +4,7 @@
# AMD x86 Platform-Specific Drivers
#
-amd-pmc-y := pmc.o pmc-quirks.o
-obj-$(CONFIG_AMD_PMC) += amd-pmc.o
+obj-$(CONFIG_AMD_PMC) += pmc/
amd_hsmp-y := hsmp.o
obj-$(CONFIG_AMD_HSMP) += amd_hsmp.o
obj-$(CONFIG_AMD_PMF) += pmf/
diff --git a/drivers/platform/x86/amd/pmc/Kconfig b/drivers/platform/x86/amd/pmc/Kconfig
new file mode 100644
index 000000000000..883c0a95ac0c
--- /dev/null
+++ b/drivers/platform/x86/amd/pmc/Kconfig
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# AMD PMC Driver
+#
+
+config AMD_PMC
+ tristate "AMD SoC PMC driver"
+ depends on ACPI && PCI && RTC_CLASS && AMD_NB
+ depends on SUSPEND
+ select SERIO
+ help
+ The driver provides support for AMD Power Management Controller
+ primarily responsible for S2Idle transactions that are driven from
+ a platform firmware running on SMU. This driver also provides a debug
+ mechanism to investigate the S2Idle transactions and failures.
+
+ Say Y or M here if you have a notebook powered by AMD RYZEN CPU/APU.
+
+ If you choose to compile this driver as a module the module will be
+ called amd-pmc.
diff --git a/drivers/platform/x86/amd/pmc/Makefile b/drivers/platform/x86/amd/pmc/Makefile
new file mode 100644
index 000000000000..4aaa29d351c9
--- /dev/null
+++ b/drivers/platform/x86/amd/pmc/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for linux/drivers/platform/x86/amd/pmc
+# AMD Power Management Controller Driver
+#
+
+amd-pmc-objs := pmc.o pmc-quirks.o
+obj-$(CONFIG_AMD_PMC) += amd-pmc.o
diff --git a/drivers/platform/x86/amd/pmc-quirks.c b/drivers/platform/x86/amd/pmc/pmc-quirks.c
index ad702463a65d..ad702463a65d 100644
--- a/drivers/platform/x86/amd/pmc-quirks.c
+++ b/drivers/platform/x86/amd/pmc/pmc-quirks.c
diff --git a/drivers/platform/x86/amd/pmc.c b/drivers/platform/x86/amd/pmc/pmc.c
index c1e788b67a74..c1e788b67a74 100644
--- a/drivers/platform/x86/amd/pmc.c
+++ b/drivers/platform/x86/amd/pmc/pmc.c
diff --git a/drivers/platform/x86/amd/pmc.h b/drivers/platform/x86/amd/pmc/pmc.h
index c27bd6a5642f..c27bd6a5642f 100644
--- a/drivers/platform/x86/amd/pmc.h
+++ b/drivers/platform/x86/amd/pmc/pmc.h
diff --git a/drivers/platform/x86/amd/pmf/cnqf.c b/drivers/platform/x86/amd/pmf/cnqf.c
index 539b186e9027..bc8899e15c91 100644
--- a/drivers/platform/x86/amd/pmf/cnqf.c
+++ b/drivers/platform/x86/amd/pmf/cnqf.c
@@ -8,6 +8,7 @@
* Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
*/
+#include <linux/string_choices.h>
#include <linux/workqueue.h>
#include "pmf.h"
@@ -399,7 +400,7 @@ static ssize_t cnqf_enable_store(struct device *dev,
amd_pmf_set_sps_power_limits(pdev);
}
- dev_dbg(pdev->dev, "Received CnQF %s\n", input ? "on" : "off");
+ dev_dbg(pdev->dev, "Received CnQF %s\n", str_on_off(input));
return count;
}
@@ -409,7 +410,7 @@ static ssize_t cnqf_enable_show(struct device *dev,
{
struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
- return sysfs_emit(buf, "%s\n", pdev->cnqf_enabled ? "on" : "off");
+ return sysfs_emit(buf, "%s\n", str_on_off(pdev->cnqf_enabled));
}
static DEVICE_ATTR_RW(cnqf_enable);
diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
index 57bf1a9f0e76..78ed3ee22555 100644
--- a/drivers/platform/x86/amd/pmf/core.c
+++ b/drivers/platform/x86/amd/pmf/core.c
@@ -324,7 +324,8 @@ static void amd_pmf_init_features(struct amd_pmf_dev *dev)
static void amd_pmf_deinit_features(struct amd_pmf_dev *dev)
{
- if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
+ if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR) ||
+ is_apmf_func_supported(dev, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) {
power_supply_unreg_notifier(&dev->pwr_src_notifier);
amd_pmf_deinit_sps(dev);
}
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 8bef66a2f0ce..9f8cea5f9615 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -72,6 +72,7 @@ module_param(fnlock_default, bool, 0444);
#define ASUS_WMI_FNLOCK_BIOS_DISABLED BIT(0)
+#define ASUS_MID_FAN_DESC "mid_fan"
#define ASUS_GPU_FAN_DESC "gpu_fan"
#define ASUS_FAN_DESC "cpu_fan"
#define ASUS_FAN_MFUN 0x13
@@ -112,9 +113,20 @@ module_param(fnlock_default, bool, 0444);
#define FAN_CURVE_BUF_LEN 32
#define FAN_CURVE_DEV_CPU 0x00
#define FAN_CURVE_DEV_GPU 0x01
+#define FAN_CURVE_DEV_MID 0x02
/* Mask to determine if setting temperature or percentage */
#define FAN_CURVE_PWM_MASK 0x04
+/* Limits for tunables available on ASUS ROG laptops */
+#define PPT_TOTAL_MIN 5
+#define PPT_TOTAL_MAX 250
+#define PPT_CPU_MIN 5
+#define PPT_CPU_MAX 130
+#define NVIDIA_BOOST_MIN 5
+#define NVIDIA_BOOST_MAX 25
+#define NVIDIA_TEMP_MIN 75
+#define NVIDIA_TEMP_MAX 87
+
static const char * const ashs_ids[] = { "ATK4001", "ATK4002", NULL };
static int throttle_thermal_policy_write(struct asus_wmi *);
@@ -229,18 +241,31 @@ struct asus_wmi {
enum fan_type fan_type;
enum fan_type gpu_fan_type;
+ enum fan_type mid_fan_type;
int fan_pwm_mode;
int gpu_fan_pwm_mode;
+ int mid_fan_pwm_mode;
int agfn_pwm;
bool fan_boost_mode_available;
u8 fan_boost_mode_mask;
u8 fan_boost_mode;
+ bool charge_mode_available;
bool egpu_enable_available;
+ bool egpu_connect_available;
bool dgpu_disable_available;
bool gpu_mux_mode_available;
+ /* Tunables provided by ASUS for gaming laptops */
+ bool ppt_pl2_sppt_available;
+ bool ppt_pl1_spl_available;
+ bool ppt_apu_sppt_available;
+ bool ppt_plat_sppt_available;
+ bool ppt_fppt_available;
+ bool nv_dyn_boost_available;
+ bool nv_temp_tgt_available;
+
bool kbd_rgb_mode_available;
bool kbd_rgb_state_available;
@@ -249,7 +274,8 @@ struct asus_wmi {
bool cpu_fan_curve_available;
bool gpu_fan_curve_available;
- struct fan_curve_data custom_fan_curves[2];
+ bool mid_fan_curve_available;
+ struct fan_curve_data custom_fan_curves[3];
struct platform_profile_handler platform_profile_handler;
bool platform_profile_support;
@@ -258,6 +284,7 @@ struct asus_wmi {
bool battery_rsoc_available;
bool panel_overdrive_available;
+ bool mini_led_mode_available;
struct hotplug_slot hotplug_slot;
struct mutex hotplug_lock;
@@ -586,6 +613,22 @@ static void asus_wmi_tablet_mode_get_state(struct asus_wmi *asus)
asus_wmi_tablet_sw_report(asus, result);
}
+/* Charging mode, 1=Barrel, 2=USB ******************************************/
+static ssize_t charge_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ int result, value;
+
+ result = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_CHARGE_MODE, &value);
+ if (result < 0)
+ return result;
+
+ return sysfs_emit(buf, "%d\n", value & 0xff);
+}
+
+static DEVICE_ATTR_RO(charge_mode);
+
/* dGPU ********************************************************************/
static ssize_t dgpu_disable_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -622,6 +665,18 @@ static ssize_t dgpu_disable_store(struct device *dev,
if (disable > 1)
return -EINVAL;
+ if (asus->gpu_mux_mode_available) {
+ result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_GPU_MUX);
+ if (result < 0)
+ /* An error here may signal greater failure of GPU handling */
+ return result;
+ if (!result && disable) {
+ err = -ENODEV;
+ pr_warn("Can not disable dGPU when the MUX is in dGPU mode: %d\n", err);
+ return err;
+ }
+ }
+
err = asus_wmi_set_devstate(ASUS_WMI_DEVID_DGPU, disable, &result);
if (err) {
pr_warn("Failed to set dgpu disable: %d\n", err);
@@ -670,14 +725,34 @@ static ssize_t egpu_enable_store(struct device *dev,
if (enable > 1)
return -EINVAL;
+ err = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_EGPU_CONNECTED);
+ if (err < 0) {
+ pr_warn("Failed to get egpu connection status: %d\n", err);
+ return err;
+ }
+
+ if (asus->gpu_mux_mode_available) {
+ result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_GPU_MUX);
+ if (result < 0) {
+ /* An error here may signal greater failure of GPU handling */
+ pr_warn("Failed to get gpu mux status: %d\n", result);
+ return result;
+ }
+ if (!result && enable) {
+ err = -ENODEV;
+ pr_warn("Can not enable eGPU when the MUX is in dGPU mode: %d\n", err);
+ return err;
+ }
+ }
+
err = asus_wmi_set_devstate(ASUS_WMI_DEVID_EGPU, enable, &result);
if (err) {
- pr_warn("Failed to set egpu disable: %d\n", err);
+ pr_warn("Failed to set egpu state: %d\n", err);
return err;
}
if (result > 1) {
- pr_warn("Failed to set egpu disable (retval): 0x%x\n", result);
+ pr_warn("Failed to set egpu state (retval): 0x%x\n", result);
return -EIO;
}
@@ -687,6 +762,22 @@ static ssize_t egpu_enable_store(struct device *dev,
}
static DEVICE_ATTR_RW(egpu_enable);
+/* Is eGPU connected? *********************************************************/
+static ssize_t egpu_connected_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ int result;
+
+ result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_EGPU_CONNECTED);
+ if (result < 0)
+ return result;
+
+ return sysfs_emit(buf, "%d\n", result);
+}
+
+static DEVICE_ATTR_RO(egpu_connected);
+
/* gpu mux switch *************************************************************/
static ssize_t gpu_mux_mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -716,6 +807,30 @@ static ssize_t gpu_mux_mode_store(struct device *dev,
if (optimus > 1)
return -EINVAL;
+ if (asus->dgpu_disable_available) {
+ result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_DGPU);
+ if (result < 0)
+ /* An error here may signal greater failure of GPU handling */
+ return result;
+ if (result && !optimus) {
+ err = -ENODEV;
+ pr_warn("Can not switch MUX to dGPU mode when dGPU is disabled: %d\n", err);
+ return err;
+ }
+ }
+
+ if (asus->egpu_enable_available) {
+ result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_EGPU);
+ if (result < 0)
+ /* An error here may signal greater failure of GPU handling */
+ return result;
+ if (result && !optimus) {
+ err = -ENODEV;
+ pr_warn("Can not switch MUX to dGPU mode when eGPU is enabled: %d\n", err);
+ return err;
+ }
+ }
+
err = asus_wmi_set_devstate(ASUS_WMI_DEVID_GPU_MUX, optimus, &result);
if (err) {
dev_err(dev, "Failed to set GPU MUX mode: %d\n", err);
@@ -859,6 +974,244 @@ static const struct attribute_group *kbd_rgb_mode_groups[] = {
NULL,
};
+/* Tunable: PPT: Intel=PL1, AMD=SPPT *****************************************/
+static ssize_t ppt_pl2_sppt_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int result, err;
+ u32 value;
+
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+
+ result = kstrtou32(buf, 10, &value);
+ if (result)
+ return result;
+
+ if (value < PPT_TOTAL_MIN || value > PPT_TOTAL_MAX)
+ return -EINVAL;
+
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_PPT_PL2_SPPT, value, &result);
+ if (err) {
+ pr_warn("Failed to set ppt_pl2_sppt: %d\n", err);
+ return err;
+ }
+
+ if (result > 1) {
+ pr_warn("Failed to set ppt_pl2_sppt (result): 0x%x\n", result);
+ return -EIO;
+ }
+
+ sysfs_notify(&asus->platform_device->dev.kobj, NULL, "ppt_pl2_sppt");
+
+ return count;
+}
+static DEVICE_ATTR_WO(ppt_pl2_sppt);
+
+/* Tunable: PPT, Intel=PL1, AMD=SPL ******************************************/
+static ssize_t ppt_pl1_spl_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int result, err;
+ u32 value;
+
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+
+ result = kstrtou32(buf, 10, &value);
+ if (result)
+ return result;
+
+ if (value < PPT_TOTAL_MIN || value > PPT_TOTAL_MAX)
+ return -EINVAL;
+
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_PPT_PL1_SPL, value, &result);
+ if (err) {
+ pr_warn("Failed to set ppt_pl1_spl: %d\n", err);
+ return err;
+ }
+
+ if (result > 1) {
+ pr_warn("Failed to set ppt_pl1_spl (result): 0x%x\n", result);
+ return -EIO;
+ }
+
+ sysfs_notify(&asus->platform_device->dev.kobj, NULL, "ppt_pl1_spl");
+
+ return count;
+}
+static DEVICE_ATTR_WO(ppt_pl1_spl);
+
+/* Tunable: PPT APU FPPT ******************************************************/
+static ssize_t ppt_fppt_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int result, err;
+ u32 value;
+
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+
+ result = kstrtou32(buf, 10, &value);
+ if (result)
+ return result;
+
+ if (value < PPT_TOTAL_MIN || value > PPT_TOTAL_MAX)
+ return -EINVAL;
+
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_PPT_FPPT, value, &result);
+ if (err) {
+ pr_warn("Failed to set ppt_fppt: %d\n", err);
+ return err;
+ }
+
+ if (result > 1) {
+ pr_warn("Failed to set ppt_fppt (result): 0x%x\n", result);
+ return -EIO;
+ }
+
+ sysfs_notify(&asus->platform_device->dev.kobj, NULL, "ppt_fpu_sppt");
+
+ return count;
+}
+static DEVICE_ATTR_WO(ppt_fppt);
+
+/* Tunable: PPT APU SPPT *****************************************************/
+static ssize_t ppt_apu_sppt_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int result, err;
+ u32 value;
+
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+
+ result = kstrtou32(buf, 10, &value);
+ if (result)
+ return result;
+
+ if (value < PPT_CPU_MIN || value > PPT_CPU_MAX)
+ return -EINVAL;
+
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_PPT_APU_SPPT, value, &result);
+ if (err) {
+ pr_warn("Failed to set ppt_apu_sppt: %d\n", err);
+ return err;
+ }
+
+ if (result > 1) {
+ pr_warn("Failed to set ppt_apu_sppt (result): 0x%x\n", result);
+ return -EIO;
+ }
+
+ sysfs_notify(&asus->platform_device->dev.kobj, NULL, "ppt_apu_sppt");
+
+ return count;
+}
+static DEVICE_ATTR_WO(ppt_apu_sppt);
+
+/* Tunable: PPT platform SPPT ************************************************/
+static ssize_t ppt_platform_sppt_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int result, err;
+ u32 value;
+
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+
+ result = kstrtou32(buf, 10, &value);
+ if (result)
+ return result;
+
+ if (value < PPT_CPU_MIN || value > PPT_CPU_MAX)
+ return -EINVAL;
+
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_PPT_PLAT_SPPT, value, &result);
+ if (err) {
+ pr_warn("Failed to set ppt_platform_sppt: %d\n", err);
+ return err;
+ }
+
+ if (result > 1) {
+ pr_warn("Failed to set ppt_platform_sppt (result): 0x%x\n", result);
+ return -EIO;
+ }
+
+ sysfs_notify(&asus->platform_device->dev.kobj, NULL, "ppt_platform_sppt");
+
+ return count;
+}
+static DEVICE_ATTR_WO(ppt_platform_sppt);
+
+/* Tunable: NVIDIA dynamic boost *********************************************/
+static ssize_t nv_dynamic_boost_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int result, err;
+ u32 value;
+
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+
+ result = kstrtou32(buf, 10, &value);
+ if (result)
+ return result;
+
+ if (value < NVIDIA_BOOST_MIN || value > NVIDIA_BOOST_MAX)
+ return -EINVAL;
+
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_NV_DYN_BOOST, value, &result);
+ if (err) {
+ pr_warn("Failed to set nv_dynamic_boost: %d\n", err);
+ return err;
+ }
+
+ if (result > 1) {
+ pr_warn("Failed to set nv_dynamic_boost (result): 0x%x\n", result);
+ return -EIO;
+ }
+
+ sysfs_notify(&asus->platform_device->dev.kobj, NULL, "nv_dynamic_boost");
+
+ return count;
+}
+static DEVICE_ATTR_WO(nv_dynamic_boost);
+
+/* Tunable: NVIDIA temperature target ****************************************/
+static ssize_t nv_temp_target_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int result, err;
+ u32 value;
+
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+
+ result = kstrtou32(buf, 10, &value);
+ if (result)
+ return result;
+
+ if (value < NVIDIA_TEMP_MIN || value > NVIDIA_TEMP_MAX)
+ return -EINVAL;
+
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_NV_THERM_TARGET, value, &result);
+ if (err) {
+ pr_warn("Failed to set nv_temp_target: %d\n", err);
+ return err;
+ }
+
+ if (result > 1) {
+ pr_warn("Failed to set nv_temp_target (result): 0x%x\n", result);
+ return -EIO;
+ }
+
+ sysfs_notify(&asus->platform_device->dev.kobj, NULL, "nv_temp_target");
+
+ return count;
+}
+static DEVICE_ATTR_WO(nv_temp_target);
+
/* Battery ********************************************************************/
/* The battery maximum charging percentage */
@@ -1734,6 +2087,54 @@ static ssize_t panel_od_store(struct device *dev,
}
static DEVICE_ATTR_RW(panel_od);
+/* Mini-LED mode **************************************************************/
+static ssize_t mini_led_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ int result;
+
+ result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_MINI_LED_MODE);
+ if (result < 0)
+ return result;
+
+ return sysfs_emit(buf, "%d\n", result);
+}
+
+static ssize_t mini_led_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int result, err;
+ u32 mode;
+
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+
+ result = kstrtou32(buf, 10, &mode);
+ if (result)
+ return result;
+
+ if (mode > 1)
+ return -EINVAL;
+
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_MINI_LED_MODE, mode, &result);
+
+ if (err) {
+ pr_warn("Failed to set mini-LED: %d\n", err);
+ return err;
+ }
+
+ if (result > 1) {
+ pr_warn("Failed to set mini-LED mode (result): 0x%x\n", result);
+ return -EIO;
+ }
+
+ sysfs_notify(&asus->platform_device->dev.kobj, NULL, "mini_led_mode");
+
+ return count;
+}
+static DEVICE_ATTR_RW(mini_led_mode);
+
/* Quirks *********************************************************************/
static void asus_wmi_set_xusb2pr(struct asus_wmi *asus)
@@ -2070,6 +2471,8 @@ static ssize_t pwm1_enable_store(struct device *dev,
asus->custom_fan_curves[FAN_CURVE_DEV_CPU].enabled = false;
if (asus->gpu_fan_curve_available)
asus->custom_fan_curves[FAN_CURVE_DEV_GPU].enabled = false;
+ if (asus->mid_fan_curve_available)
+ asus->custom_fan_curves[FAN_CURVE_DEV_MID].enabled = false;
return count;
}
@@ -2122,6 +2525,31 @@ static ssize_t fan2_label_show(struct device *dev,
return sysfs_emit(buf, "%s\n", ASUS_GPU_FAN_DESC);
}
+/* Middle/Center fan on modern ROG laptops */
+static ssize_t fan3_input_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ int value;
+ int ret;
+
+ ret = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_MID_FAN_CTRL, &value);
+ if (ret < 0)
+ return ret;
+
+ value &= 0xffff;
+
+ return sysfs_emit(buf, "%d\n", value * 100);
+}
+
+static ssize_t fan3_label_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%s\n", ASUS_MID_FAN_DESC);
+}
+
static ssize_t pwm2_enable_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -2168,6 +2596,52 @@ static ssize_t pwm2_enable_store(struct device *dev,
return count;
}
+static ssize_t pwm3_enable_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", asus->mid_fan_pwm_mode);
+}
+
+static ssize_t pwm3_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct asus_wmi *asus = dev_get_drvdata(dev);
+ int state;
+ int value;
+ int ret;
+ u32 retval;
+
+ ret = kstrtouint(buf, 10, &state);
+ if (ret)
+ return ret;
+
+ switch (state) { /* standard documented hwmon values */
+ case ASUS_FAN_CTRL_FULLSPEED:
+ value = 1;
+ break;
+ case ASUS_FAN_CTRL_AUTO:
+ value = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = asus_wmi_set_devstate(ASUS_WMI_DEVID_MID_FAN_CTRL,
+ value, &retval);
+ if (ret)
+ return ret;
+
+ if (retval != 1)
+ return -EIO;
+
+ asus->mid_fan_pwm_mode = state;
+ return count;
+}
+
/* Fan1 */
static DEVICE_ATTR_RW(pwm1);
static DEVICE_ATTR_RW(pwm1_enable);
@@ -2177,6 +2651,10 @@ static DEVICE_ATTR_RO(fan1_label);
static DEVICE_ATTR_RW(pwm2_enable);
static DEVICE_ATTR_RO(fan2_input);
static DEVICE_ATTR_RO(fan2_label);
+/* Fan3 - Middle/center fan */
+static DEVICE_ATTR_RW(pwm3_enable);
+static DEVICE_ATTR_RO(fan3_input);
+static DEVICE_ATTR_RO(fan3_label);
/* Temperature */
static DEVICE_ATTR(temp1_input, S_IRUGO, asus_hwmon_temp1, NULL);
@@ -2185,10 +2663,13 @@ static struct attribute *hwmon_attributes[] = {
&dev_attr_pwm1.attr,
&dev_attr_pwm1_enable.attr,
&dev_attr_pwm2_enable.attr,
+ &dev_attr_pwm3_enable.attr,
&dev_attr_fan1_input.attr,
&dev_attr_fan1_label.attr,
&dev_attr_fan2_input.attr,
&dev_attr_fan2_label.attr,
+ &dev_attr_fan3_input.attr,
+ &dev_attr_fan3_label.attr,
&dev_attr_temp1_input.attr,
NULL
@@ -2214,6 +2695,11 @@ static umode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj,
|| attr == &dev_attr_pwm2_enable.attr) {
if (asus->gpu_fan_type == FAN_TYPE_NONE)
return 0;
+ } else if (attr == &dev_attr_fan3_input.attr
+ || attr == &dev_attr_fan3_label.attr
+ || attr == &dev_attr_pwm3_enable.attr) {
+ if (asus->mid_fan_type == FAN_TYPE_NONE)
+ return 0;
} else if (attr == &dev_attr_temp1_input.attr) {
int err = asus_wmi_get_devstate(asus,
ASUS_WMI_DEVID_THERMAL_CTRL,
@@ -2257,6 +2743,7 @@ static int asus_wmi_hwmon_init(struct asus_wmi *asus)
static int asus_wmi_fan_init(struct asus_wmi *asus)
{
asus->gpu_fan_type = FAN_TYPE_NONE;
+ asus->mid_fan_type = FAN_TYPE_NONE;
asus->fan_type = FAN_TYPE_NONE;
asus->agfn_pwm = -1;
@@ -2271,6 +2758,10 @@ static int asus_wmi_fan_init(struct asus_wmi *asus)
if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_GPU_FAN_CTRL))
asus->gpu_fan_type = FAN_TYPE_SPEC83;
+ /* Some models also have a center/middle fan */
+ if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_MID_FAN_CTRL))
+ asus->mid_fan_type = FAN_TYPE_SPEC83;
+
if (asus->fan_type == FAN_TYPE_NONE)
return -ENODEV;
@@ -2418,9 +2909,8 @@ static int fan_curve_get_factory_default(struct asus_wmi *asus, u32 fan_dev)
{
struct fan_curve_data *curves;
u8 buf[FAN_CURVE_BUF_LEN];
- int fan_idx = 0;
+ int err, fan_idx;
u8 mode = 0;
- int err;
if (asus->throttle_thermal_policy_available)
mode = asus->throttle_thermal_policy_mode;
@@ -2430,10 +2920,6 @@ static int fan_curve_get_factory_default(struct asus_wmi *asus, u32 fan_dev)
else if (mode == 1)
mode = 2;
- if (fan_dev == ASUS_WMI_DEVID_GPU_FAN_CURVE)
- fan_idx = FAN_CURVE_DEV_GPU;
-
- curves = &asus->custom_fan_curves[fan_idx];
err = asus_wmi_evaluate_method_buf(asus->dsts_id, fan_dev, mode, buf,
FAN_CURVE_BUF_LEN);
if (err) {
@@ -2441,9 +2927,17 @@ static int fan_curve_get_factory_default(struct asus_wmi *asus, u32 fan_dev)
return err;
}
- fan_curve_copy_from_buf(curves, buf);
+ fan_idx = FAN_CURVE_DEV_CPU;
+ if (fan_dev == ASUS_WMI_DEVID_GPU_FAN_CURVE)
+ fan_idx = FAN_CURVE_DEV_GPU;
+
+ if (fan_dev == ASUS_WMI_DEVID_MID_FAN_CURVE)
+ fan_idx = FAN_CURVE_DEV_MID;
+
+ curves = &asus->custom_fan_curves[fan_idx];
curves->device_id = fan_dev;
+ fan_curve_copy_from_buf(curves, buf);
return 0;
}
@@ -2473,7 +2967,7 @@ static struct fan_curve_data *fan_curve_attr_select(struct asus_wmi *asus,
{
int index = to_sensor_dev_attr(attr)->index;
- return &asus->custom_fan_curves[index & FAN_CURVE_DEV_GPU];
+ return &asus->custom_fan_curves[index];
}
/* Determine which fan the attribute is for if SENSOR_ATTR_2 */
@@ -2482,7 +2976,7 @@ static struct fan_curve_data *fan_curve_attr_2_select(struct asus_wmi *asus,
{
int nr = to_sensor_dev_attr_2(attr)->nr;
- return &asus->custom_fan_curves[nr & FAN_CURVE_DEV_GPU];
+ return &asus->custom_fan_curves[nr & ~FAN_CURVE_PWM_MASK];
}
static ssize_t fan_curve_show(struct device *dev,
@@ -2491,13 +2985,13 @@ static ssize_t fan_curve_show(struct device *dev,
struct sensor_device_attribute_2 *dev_attr = to_sensor_dev_attr_2(attr);
struct asus_wmi *asus = dev_get_drvdata(dev);
struct fan_curve_data *data;
- int value, index, nr;
+ int value, pwm, index;
data = fan_curve_attr_2_select(asus, attr);
+ pwm = dev_attr->nr & FAN_CURVE_PWM_MASK;
index = dev_attr->index;
- nr = dev_attr->nr;
- if (nr & FAN_CURVE_PWM_MASK)
+ if (pwm)
value = data->percents[index];
else
value = data->temps[index];
@@ -2540,23 +3034,21 @@ static ssize_t fan_curve_store(struct device *dev,
struct sensor_device_attribute_2 *dev_attr = to_sensor_dev_attr_2(attr);
struct asus_wmi *asus = dev_get_drvdata(dev);
struct fan_curve_data *data;
+ int err, pwm, index;
u8 value;
- int err;
-
- int pwm = dev_attr->nr & FAN_CURVE_PWM_MASK;
- int index = dev_attr->index;
data = fan_curve_attr_2_select(asus, attr);
+ pwm = dev_attr->nr & FAN_CURVE_PWM_MASK;
+ index = dev_attr->index;
err = kstrtou8(buf, 10, &value);
if (err < 0)
return err;
- if (pwm) {
+ if (pwm)
data->percents[index] = value;
- } else {
+ else
data->temps[index] = value;
- }
/*
* Mark as disabled so the user has to explicitly enable to apply a
@@ -2669,7 +3161,7 @@ static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point8_temp, fan_curve,
FAN_CURVE_DEV_CPU, 7);
static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point1_pwm, fan_curve,
- FAN_CURVE_DEV_CPU | FAN_CURVE_PWM_MASK, 0);
+ FAN_CURVE_DEV_CPU | FAN_CURVE_PWM_MASK, 0);
static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point2_pwm, fan_curve,
FAN_CURVE_DEV_CPU | FAN_CURVE_PWM_MASK, 1);
static SENSOR_DEVICE_ATTR_2_RW(pwm1_auto_point3_pwm, fan_curve,
@@ -2721,6 +3213,42 @@ static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point7_pwm, fan_curve,
static SENSOR_DEVICE_ATTR_2_RW(pwm2_auto_point8_pwm, fan_curve,
FAN_CURVE_DEV_GPU | FAN_CURVE_PWM_MASK, 7);
+/* MID */
+static SENSOR_DEVICE_ATTR_RW(pwm3_enable, fan_curve_enable, FAN_CURVE_DEV_MID);
+static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point1_temp, fan_curve,
+ FAN_CURVE_DEV_MID, 0);
+static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point2_temp, fan_curve,
+ FAN_CURVE_DEV_MID, 1);
+static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point3_temp, fan_curve,
+ FAN_CURVE_DEV_MID, 2);
+static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point4_temp, fan_curve,
+ FAN_CURVE_DEV_MID, 3);
+static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point5_temp, fan_curve,
+ FAN_CURVE_DEV_MID, 4);
+static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point6_temp, fan_curve,
+ FAN_CURVE_DEV_MID, 5);
+static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point7_temp, fan_curve,
+ FAN_CURVE_DEV_MID, 6);
+static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point8_temp, fan_curve,
+ FAN_CURVE_DEV_MID, 7);
+
+static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point1_pwm, fan_curve,
+ FAN_CURVE_DEV_MID | FAN_CURVE_PWM_MASK, 0);
+static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point2_pwm, fan_curve,
+ FAN_CURVE_DEV_MID | FAN_CURVE_PWM_MASK, 1);
+static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point3_pwm, fan_curve,
+ FAN_CURVE_DEV_MID | FAN_CURVE_PWM_MASK, 2);
+static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point4_pwm, fan_curve,
+ FAN_CURVE_DEV_MID | FAN_CURVE_PWM_MASK, 3);
+static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point5_pwm, fan_curve,
+ FAN_CURVE_DEV_MID | FAN_CURVE_PWM_MASK, 4);
+static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point6_pwm, fan_curve,
+ FAN_CURVE_DEV_MID | FAN_CURVE_PWM_MASK, 5);
+static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point7_pwm, fan_curve,
+ FAN_CURVE_DEV_MID | FAN_CURVE_PWM_MASK, 6);
+static SENSOR_DEVICE_ATTR_2_RW(pwm3_auto_point8_pwm, fan_curve,
+ FAN_CURVE_DEV_MID | FAN_CURVE_PWM_MASK, 7);
+
static struct attribute *asus_fan_curve_attr[] = {
/* CPU */
&sensor_dev_attr_pwm1_enable.dev_attr.attr,
@@ -2758,6 +3286,24 @@ static struct attribute *asus_fan_curve_attr[] = {
&sensor_dev_attr_pwm2_auto_point6_pwm.dev_attr.attr,
&sensor_dev_attr_pwm2_auto_point7_pwm.dev_attr.attr,
&sensor_dev_attr_pwm2_auto_point8_pwm.dev_attr.attr,
+ /* MID */
+ &sensor_dev_attr_pwm3_enable.dev_attr.attr,
+ &sensor_dev_attr_pwm3_auto_point1_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm3_auto_point2_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm3_auto_point3_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm3_auto_point4_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm3_auto_point5_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm3_auto_point6_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm3_auto_point7_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm3_auto_point8_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm3_auto_point1_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm3_auto_point2_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm3_auto_point3_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm3_auto_point4_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm3_auto_point5_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm3_auto_point6_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm3_auto_point7_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm3_auto_point8_pwm.dev_attr.attr,
NULL
};
@@ -2777,6 +3323,9 @@ static umode_t asus_fan_curve_is_visible(struct kobject *kobj,
if (asus->gpu_fan_curve_available && attr->name[3] == '2')
return 0644;
+ if (asus->mid_fan_curve_available && attr->name[3] == '3')
+ return 0644;
+
return 0;
}
@@ -2806,7 +3355,14 @@ static int asus_wmi_custom_fan_curve_init(struct asus_wmi *asus)
if (err)
return err;
- if (!asus->cpu_fan_curve_available && !asus->gpu_fan_curve_available)
+ err = fan_curve_check_present(asus, &asus->mid_fan_curve_available,
+ ASUS_WMI_DEVID_MID_FAN_CURVE);
+ if (err)
+ return err;
+
+ if (!asus->cpu_fan_curve_available
+ && !asus->gpu_fan_curve_available
+ && !asus->mid_fan_curve_available)
return 0;
hwmon = devm_hwmon_device_register_with_groups(
@@ -2875,6 +3431,8 @@ static int throttle_thermal_policy_write(struct asus_wmi *asus)
asus->custom_fan_curves[FAN_CURVE_DEV_CPU].enabled = false;
if (asus->gpu_fan_curve_available)
asus->custom_fan_curves[FAN_CURVE_DEV_GPU].enabled = false;
+ if (asus->mid_fan_curve_available)
+ asus->custom_fan_curves[FAN_CURVE_DEV_MID].enabled = false;
return 0;
}
@@ -3472,14 +4030,24 @@ static struct attribute *platform_attributes[] = {
&dev_attr_camera.attr,
&dev_attr_cardr.attr,
&dev_attr_touchpad.attr,
+ &dev_attr_charge_mode.attr,
&dev_attr_egpu_enable.attr,
+ &dev_attr_egpu_connected.attr,
&dev_attr_dgpu_disable.attr,
&dev_attr_gpu_mux_mode.attr,
&dev_attr_lid_resume.attr,
&dev_attr_als_enable.attr,
&dev_attr_fan_boost_mode.attr,
&dev_attr_throttle_thermal_policy.attr,
+ &dev_attr_ppt_pl2_sppt.attr,
+ &dev_attr_ppt_pl1_spl.attr,
+ &dev_attr_ppt_fppt.attr,
+ &dev_attr_ppt_apu_sppt.attr,
+ &dev_attr_ppt_platform_sppt.attr,
+ &dev_attr_nv_dynamic_boost.attr,
+ &dev_attr_nv_temp_target.attr,
&dev_attr_panel_od.attr,
+ &dev_attr_mini_led_mode.attr,
NULL
};
@@ -3501,8 +4069,12 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj,
devid = ASUS_WMI_DEVID_LID_RESUME;
else if (attr == &dev_attr_als_enable.attr)
devid = ASUS_WMI_DEVID_ALS_ENABLE;
+ else if (attr == &dev_attr_charge_mode.attr)
+ ok = asus->charge_mode_available;
else if (attr == &dev_attr_egpu_enable.attr)
ok = asus->egpu_enable_available;
+ else if (attr == &dev_attr_egpu_connected.attr)
+ ok = asus->egpu_connect_available;
else if (attr == &dev_attr_dgpu_disable.attr)
ok = asus->dgpu_disable_available;
else if (attr == &dev_attr_gpu_mux_mode.attr)
@@ -3511,8 +4083,24 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj,
ok = asus->fan_boost_mode_available;
else if (attr == &dev_attr_throttle_thermal_policy.attr)
ok = asus->throttle_thermal_policy_available;
+ else if (attr == &dev_attr_ppt_pl2_sppt.attr)
+ ok = asus->ppt_pl2_sppt_available;
+ else if (attr == &dev_attr_ppt_pl1_spl.attr)
+ ok = asus->ppt_pl1_spl_available;
+ else if (attr == &dev_attr_ppt_fppt.attr)
+ ok = asus->ppt_fppt_available;
+ else if (attr == &dev_attr_ppt_apu_sppt.attr)
+ ok = asus->ppt_apu_sppt_available;
+ else if (attr == &dev_attr_ppt_platform_sppt.attr)
+ ok = asus->ppt_plat_sppt_available;
+ else if (attr == &dev_attr_nv_dynamic_boost.attr)
+ ok = asus->nv_dyn_boost_available;
+ else if (attr == &dev_attr_nv_temp_target.attr)
+ ok = asus->nv_temp_tgt_available;
else if (attr == &dev_attr_panel_od.attr)
ok = asus->panel_overdrive_available;
+ else if (attr == &dev_attr_mini_led_mode.attr)
+ ok = asus->mini_led_mode_available;
if (devid != -1)
ok = !(asus_wmi_get_devstate_simple(asus, devid) < 0);
@@ -3767,12 +4355,22 @@ static int asus_wmi_add(struct platform_device *pdev)
if (err)
goto fail_platform;
+ asus->charge_mode_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_CHARGE_MODE);
asus->egpu_enable_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_EGPU);
+ asus->egpu_connect_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_EGPU_CONNECTED);
asus->dgpu_disable_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_DGPU);
asus->gpu_mux_mode_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_GPU_MUX);
asus->kbd_rgb_mode_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_MODE);
asus->kbd_rgb_state_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_STATE);
+ asus->ppt_pl2_sppt_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_PPT_PL2_SPPT);
+ asus->ppt_pl1_spl_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_PPT_PL1_SPL);
+ asus->ppt_fppt_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_PPT_FPPT);
+ asus->ppt_apu_sppt_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_PPT_APU_SPPT);
+ asus->ppt_plat_sppt_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_PPT_PLAT_SPPT);
+ asus->nv_dyn_boost_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_NV_DYN_BOOST);
+ asus->nv_temp_tgt_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_NV_THERM_TARGET);
asus->panel_overdrive_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_PANEL_OD);
+ asus->mini_led_mode_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_MINI_LED_MODE);
err = fan_boost_mode_check_present(asus);
if (err)
diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
index b68dd11cb892..b929b4f82420 100644
--- a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
+++ b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
@@ -393,6 +393,7 @@ static int init_bios_attributes(int attr_type, const char *guid)
struct kobject *attr_name_kobj; //individual attribute names
union acpi_object *obj = NULL;
union acpi_object *elements;
+ struct kobject *duplicate;
struct kset *tmp_set;
int min_elements;
@@ -451,9 +452,11 @@ static int init_bios_attributes(int attr_type, const char *guid)
else
tmp_set = wmi_priv.main_dir_kset;
- if (kset_find_obj(tmp_set, elements[ATTR_NAME].string.pointer)) {
- pr_debug("duplicate attribute name found - %s\n",
- elements[ATTR_NAME].string.pointer);
+ duplicate = kset_find_obj(tmp_set, elements[ATTR_NAME].string.pointer);
+ if (duplicate) {
+ pr_debug("Duplicate attribute name found - %s\n",
+ elements[ATTR_NAME].string.pointer);
+ kobject_put(duplicate);
goto nextobj;
}
diff --git a/drivers/platform/x86/hp/Kconfig b/drivers/platform/x86/hp/Kconfig
index ae165955311c..7fef4f12e498 100644
--- a/drivers/platform/x86/hp/Kconfig
+++ b/drivers/platform/x86/hp/Kconfig
@@ -60,4 +60,20 @@ config TC1100_WMI
This is a driver for the WMI extensions (wireless and bluetooth power
control) of the HP Compaq TC1100 tablet.
+config HP_BIOSCFG
+ tristate "HP BIOS Configuration Driver"
+ default m
+ depends on ACPI_WMI
+ select NLS
+ select FW_ATTR_CLASS
+ help
+ This driver enables administrators to securely manage BIOS settings
+ using digital certificates and public-key cryptography that eliminate
+ the need for passwords for both remote and local management. It supports
+ changing BIOS settings on many HP machines from 2018 and newer without
+ the use of any additional software.
+
+ To compile this driver as a module, choose M here: the module will
+ be called hp-bioscfg.
+
endif # X86_PLATFORM_DRIVERS_HP
diff --git a/drivers/platform/x86/hp/Makefile b/drivers/platform/x86/hp/Makefile
index db1eed4cd7c7..e4f908a61acf 100644
--- a/drivers/platform/x86/hp/Makefile
+++ b/drivers/platform/x86/hp/Makefile
@@ -8,3 +8,4 @@
obj-$(CONFIG_HP_ACCEL) += hp_accel.o
obj-$(CONFIG_HP_WMI) += hp-wmi.o
obj-$(CONFIG_TC1100_WMI) += tc1100-wmi.o
+obj-$(CONFIG_HP_BIOSCFG) += hp-bioscfg/
diff --git a/drivers/platform/x86/hp/hp-bioscfg/Makefile b/drivers/platform/x86/hp/hp-bioscfg/Makefile
new file mode 100644
index 000000000000..67be0d917753
--- /dev/null
+++ b/drivers/platform/x86/hp/hp-bioscfg/Makefile
@@ -0,0 +1,11 @@
+obj-$(CONFIG_HP_BIOSCFG) := hp-bioscfg.o
+
+hp-bioscfg-objs := bioscfg.o \
+ biosattr-interface.o \
+ enum-attributes.o \
+ int-attributes.o \
+ order-list-attributes.o \
+ passwdobj-attributes.o \
+ spmobj-attributes.o \
+ string-attributes.o \
+ surestart-attributes.o
diff --git a/drivers/platform/x86/hp/hp-bioscfg/biosattr-interface.c b/drivers/platform/x86/hp/hp-bioscfg/biosattr-interface.c
new file mode 100644
index 000000000000..dea54f35b8b5
--- /dev/null
+++ b/drivers/platform/x86/hp/hp-bioscfg/biosattr-interface.c
@@ -0,0 +1,312 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Functions corresponding to methods under BIOS interface GUID
+ * for use with hp-bioscfg driver.
+ *
+ * Copyright (c) 2022 Hewlett-Packard Inc.
+ */
+
+#include <linux/wmi.h>
+#include "bioscfg.h"
+
+/*
+ * struct bios_args buffer is dynamically allocated. New WMI command types
+ * were introduced that exceeds 128-byte data size. Changes to handle
+ * the data size allocation scheme were kept in hp_wmi_perform_query function.
+ */
+struct bios_args {
+ u32 signature;
+ u32 command;
+ u32 commandtype;
+ u32 datasize;
+ u8 data[];
+};
+
+/**
+ * hp_set_attribute
+ *
+ * @a_name: The attribute name
+ * @a_value: The attribute value
+ *
+ * Sets an attribute to new value
+ *
+ * Returns zero on success
+ * -ENODEV if device is not found
+ * -EINVAL if the instance of 'Setup Admin' password is not found.
+ * -ENOMEM unable to allocate memory
+ */
+int hp_set_attribute(const char *a_name, const char *a_value)
+{
+ int security_area_size;
+ int a_name_size, a_value_size;
+ u16 *buffer = NULL;
+ u16 *start;
+ int buffer_size, instance, ret;
+ char *auth_token_choice;
+
+ mutex_lock(&bioscfg_drv.mutex);
+
+ instance = hp_get_password_instance_for_type(SETUP_PASSWD);
+ if (instance < 0) {
+ ret = -EINVAL;
+ goto out_set_attribute;
+ }
+
+ /* Select which auth token to use; password or [auth token] */
+ if (bioscfg_drv.spm_data.auth_token)
+ auth_token_choice = bioscfg_drv.spm_data.auth_token;
+ else
+ auth_token_choice = bioscfg_drv.password_data[instance].current_password;
+
+ a_name_size = hp_calculate_string_buffer(a_name);
+ a_value_size = hp_calculate_string_buffer(a_value);
+ security_area_size = hp_calculate_security_buffer(auth_token_choice);
+ buffer_size = a_name_size + a_value_size + security_area_size;
+
+ buffer = kmalloc(buffer_size + 1, GFP_KERNEL);
+ if (!buffer) {
+ ret = -ENOMEM;
+ goto out_set_attribute;
+ }
+
+ /* build variables to set */
+ start = buffer;
+ start = hp_ascii_to_utf16_unicode(start, a_name);
+ if (!start) {
+ ret = -EINVAL;
+ goto out_set_attribute;
+ }
+
+ start = hp_ascii_to_utf16_unicode(start, a_value);
+ if (!start) {
+ ret = -EINVAL;
+ goto out_set_attribute;
+ }
+
+ ret = hp_populate_security_buffer(start, auth_token_choice);
+ if (ret < 0)
+ goto out_set_attribute;
+
+ ret = hp_wmi_set_bios_setting(buffer, buffer_size);
+
+out_set_attribute:
+ kfree(buffer);
+ mutex_unlock(&bioscfg_drv.mutex);
+ return ret;
+}
+
+/**
+ * hp_wmi_perform_query
+ *
+ * @query: The commandtype (enum hp_wmi_commandtype)
+ * @command: The command (enum hp_wmi_command)
+ * @buffer: Buffer used as input and/or output
+ * @insize: Size of input buffer
+ * @outsize: Size of output buffer
+ *
+ * returns zero on success
+ * an HP WMI query specific error code (which is positive)
+ * -EINVAL if the query was not successful at all
+ * -EINVAL if the output buffer size exceeds buffersize
+ *
+ * Note: The buffersize must at least be the maximum of the input and output
+ * size. E.g. Battery info query is defined to have 1 byte input
+ * and 128 byte output. The caller would do:
+ * buffer = kzalloc(128, GFP_KERNEL);
+ * ret = hp_wmi_perform_query(HPWMI_BATTERY_QUERY, HPWMI_READ,
+ * buffer, 1, 128)
+ */
+int hp_wmi_perform_query(int query, enum hp_wmi_command command, void *buffer,
+ u32 insize, u32 outsize)
+{
+ struct acpi_buffer input, output = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct bios_return *bios_return;
+ union acpi_object *obj = NULL;
+ struct bios_args *args = NULL;
+ int mid, actual_outsize, ret;
+ size_t bios_args_size;
+
+ mid = hp_encode_outsize_for_pvsz(outsize);
+ if (WARN_ON(mid < 0))
+ return mid;
+
+ bios_args_size = struct_size(args, data, insize);
+ args = kmalloc(bios_args_size, GFP_KERNEL);
+ if (!args)
+ return -ENOMEM;
+
+ input.length = bios_args_size;
+ input.pointer = args;
+
+ /* BIOS expects 'SECU' in hex as the signature value*/
+ args->signature = 0x55434553;
+ args->command = command;
+ args->commandtype = query;
+ args->datasize = insize;
+ memcpy(args->data, buffer, flex_array_size(args, data, insize));
+
+ ret = wmi_evaluate_method(HP_WMI_BIOS_GUID, 0, mid, &input, &output);
+ if (ret)
+ goto out_free;
+
+ obj = output.pointer;
+ if (!obj) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ if (obj->type != ACPI_TYPE_BUFFER ||
+ obj->buffer.length < sizeof(*bios_return)) {
+ pr_warn("query 0x%x returned wrong type or too small buffer\n", query);
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ bios_return = (struct bios_return *)obj->buffer.pointer;
+ ret = bios_return->return_code;
+ if (ret) {
+ if (ret != INVALID_CMD_VALUE && ret != INVALID_CMD_TYPE)
+ pr_warn("query 0x%x returned error 0x%x\n", query, ret);
+ goto out_free;
+ }
+
+ /* Ignore output data of zero size */
+ if (!outsize)
+ goto out_free;
+
+ actual_outsize = min_t(u32, outsize, obj->buffer.length - sizeof(*bios_return));
+ memcpy_and_pad(buffer, outsize, obj->buffer.pointer + sizeof(*bios_return),
+ actual_outsize, 0);
+
+out_free:
+ ret = hp_wmi_error_and_message(ret);
+
+ kfree(obj);
+ kfree(args);
+ return ret;
+}
+
+static void *utf16_empty_string(u16 *p)
+{
+ *p++ = 2;
+ *p++ = 0x00;
+ return p;
+}
+
+/**
+ * hp_ascii_to_utf16_unicode - Convert ascii string to UTF-16 unicode
+ *
+ * BIOS supports UTF-16 characters that are 2 bytes long. No variable
+ * multi-byte language supported.
+ *
+ * @p: Unicode buffer address
+ * @str: string to convert to unicode
+ *
+ * Returns a void pointer to the buffer string
+ */
+void *hp_ascii_to_utf16_unicode(u16 *p, const u8 *str)
+{
+ int len = strlen(str);
+ int ret;
+
+ /*
+ * Add null character when reading an empty string
+ * "02 00 00 00"
+ */
+ if (len == 0)
+ return utf16_empty_string(p);
+
+ /* Move pointer len * 2 number of bytes */
+ *p++ = len * 2;
+ ret = utf8s_to_utf16s(str, strlen(str), UTF16_HOST_ENDIAN, p, len);
+ if (ret < 0) {
+ dev_err(bioscfg_drv.class_dev, "UTF16 conversion failed\n");
+ return NULL;
+ }
+
+ if (ret * sizeof(u16) > U16_MAX) {
+ dev_err(bioscfg_drv.class_dev, "Error string too long\n");
+ return NULL;
+ }
+
+ p += len;
+ return p;
+}
+
+/**
+ * hp_wmi_set_bios_setting - Set setting's value in BIOS
+ *
+ * @input_buffer: Input buffer address
+ * @input_size: Input buffer size
+ *
+ * Returns: Count of unicode characters written to BIOS if successful, otherwise
+ * -ENOMEM unable to allocate memory
+ * -EINVAL buffer not allocated or too small
+ */
+int hp_wmi_set_bios_setting(u16 *input_buffer, u32 input_size)
+{
+ union acpi_object *obj;
+ struct acpi_buffer input = {input_size, input_buffer};
+ struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
+ int ret;
+
+ ret = wmi_evaluate_method(HP_WMI_SET_BIOS_SETTING_GUID, 0, 1, &input, &output);
+
+ obj = output.pointer;
+ if (!obj)
+ return -EINVAL;
+
+ if (obj->type != ACPI_TYPE_INTEGER) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ ret = obj->integer.value;
+ if (ret) {
+ ret = hp_wmi_error_and_message(ret);
+ goto out_free;
+ }
+
+out_free:
+ kfree(obj);
+ return ret;
+}
+
+static int hp_attr_set_interface_probe(struct wmi_device *wdev, const void *context)
+{
+ mutex_lock(&bioscfg_drv.mutex);
+ mutex_unlock(&bioscfg_drv.mutex);
+ return 0;
+}
+
+static void hp_attr_set_interface_remove(struct wmi_device *wdev)
+{
+ mutex_lock(&bioscfg_drv.mutex);
+ mutex_unlock(&bioscfg_drv.mutex);
+}
+
+static const struct wmi_device_id hp_attr_set_interface_id_table[] = {
+ { .guid_string = HP_WMI_BIOS_GUID},
+ { }
+};
+
+static struct wmi_driver hp_attr_set_interface_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+ .probe = hp_attr_set_interface_probe,
+ .remove = hp_attr_set_interface_remove,
+ .id_table = hp_attr_set_interface_id_table,
+};
+
+int hp_init_attr_set_interface(void)
+{
+ return wmi_driver_register(&hp_attr_set_interface_driver);
+}
+
+void hp_exit_attr_set_interface(void)
+{
+ wmi_driver_unregister(&hp_attr_set_interface_driver);
+}
+
+MODULE_DEVICE_TABLE(wmi, hp_attr_set_interface_id_table);
diff --git a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
new file mode 100644
index 000000000000..8c4f9e12f018
--- /dev/null
+++ b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c
@@ -0,0 +1,1063 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Common methods for use with hp-bioscfg driver
+ *
+ * Copyright (c) 2022 HP Development Company, L.P.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/wmi.h>
+#include "bioscfg.h"
+#include "../../firmware_attributes_class.h"
+#include <linux/nls.h>
+#include <linux/errno.h>
+
+MODULE_AUTHOR("Jorge Lopez <jorge.lopez2@hp.com>");
+MODULE_DESCRIPTION("HP BIOS Configuration Driver");
+MODULE_LICENSE("GPL");
+
+struct bioscfg_priv bioscfg_drv = {
+ .mutex = __MUTEX_INITIALIZER(bioscfg_drv.mutex),
+};
+
+static struct class *fw_attr_class;
+
+ssize_t display_name_language_code_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%s\n", LANG_CODE_STR);
+}
+
+struct kobj_attribute common_display_langcode =
+ __ATTR_RO(display_name_language_code);
+
+int hp_get_integer_from_buffer(u8 **buffer, u32 *buffer_size, u32 *integer)
+{
+ int *ptr = PTR_ALIGN((int *)*buffer, sizeof(int));
+
+ /* Ensure there is enough space remaining to read the integer */
+ if (*buffer_size < sizeof(int))
+ return -EINVAL;
+
+ *integer = *(ptr++);
+ *buffer = (u8 *)ptr;
+ *buffer_size -= sizeof(int);
+
+ return 0;
+}
+
+int hp_get_string_from_buffer(u8 **buffer, u32 *buffer_size, char *dst, u32 dst_size)
+{
+ u16 *src = (u16 *)*buffer;
+ u16 src_size;
+
+ u16 size;
+ int i;
+ int conv_dst_size;
+
+ if (*buffer_size < sizeof(u16))
+ return -EINVAL;
+
+ src_size = *(src++);
+ /* size value in u16 chars */
+ size = src_size / sizeof(u16);
+
+ /* Ensure there is enough space remaining to read and convert
+ * the string
+ */
+ if (*buffer_size < src_size)
+ return -EINVAL;
+
+ for (i = 0; i < size; i++)
+ if (src[i] == '\\' ||
+ src[i] == '\r' ||
+ src[i] == '\n' ||
+ src[i] == '\t')
+ size++;
+
+ /*
+ * Conversion is limited to destination string max number of
+ * bytes.
+ */
+ conv_dst_size = size;
+ if (size > dst_size)
+ conv_dst_size = dst_size - 1;
+
+ /*
+ * convert from UTF-16 unicode to ASCII
+ */
+ utf16s_to_utf8s(src, src_size, UTF16_HOST_ENDIAN, dst, conv_dst_size);
+ dst[conv_dst_size] = 0;
+
+ for (i = 0; i < conv_dst_size; i++) {
+ if (*src == '\\' ||
+ *src == '\r' ||
+ *src == '\n' ||
+ *src == '\t') {
+ dst[i++] = '\\';
+ if (i == conv_dst_size)
+ break;
+ }
+
+ if (*src == '\r')
+ dst[i] = 'r';
+ else if (*src == '\n')
+ dst[i] = 'n';
+ else if (*src == '\t')
+ dst[i] = 't';
+ else if (*src == '"')
+ dst[i] = '\'';
+ else
+ dst[i] = *src;
+ src++;
+ }
+
+ *buffer = (u8 *)src;
+ *buffer_size -= size * sizeof(u16);
+
+ return size;
+}
+
+int hp_get_common_data_from_buffer(u8 **buffer_ptr, u32 *buffer_size,
+ struct common_data *common_data)
+{
+ int ret = 0;
+ int reqs;
+
+ // PATH:
+ ret = hp_get_string_from_buffer(buffer_ptr, buffer_size, common_data->path,
+ sizeof(common_data->path));
+ if (ret < 0)
+ goto common_exit;
+
+ // IS_READONLY:
+ ret = hp_get_integer_from_buffer(buffer_ptr, buffer_size,
+ &common_data->is_readonly);
+ if (ret < 0)
+ goto common_exit;
+
+ //DISPLAY_IN_UI:
+ ret = hp_get_integer_from_buffer(buffer_ptr, buffer_size,
+ &common_data->display_in_ui);
+ if (ret < 0)
+ goto common_exit;
+
+ // REQUIRES_PHYSICAL_PRESENCE:
+ ret = hp_get_integer_from_buffer(buffer_ptr, buffer_size,
+ &common_data->requires_physical_presence);
+ if (ret < 0)
+ goto common_exit;
+
+ // SEQUENCE:
+ ret = hp_get_integer_from_buffer(buffer_ptr, buffer_size,
+ &common_data->sequence);
+ if (ret < 0)
+ goto common_exit;
+
+ // PREREQUISITES_SIZE:
+ ret = hp_get_integer_from_buffer(buffer_ptr, buffer_size,
+ &common_data->prerequisites_size);
+ if (ret < 0)
+ goto common_exit;
+
+ if (common_data->prerequisites_size > MAX_PREREQUISITES_SIZE) {
+ /* Report a message and limit prerequisite size to maximum value */
+ pr_warn("Prerequisites size value exceeded the maximum number of elements supported or data may be malformed\n");
+ common_data->prerequisites_size = MAX_PREREQUISITES_SIZE;
+ }
+
+ // PREREQUISITES:
+ for (reqs = 0; reqs < common_data->prerequisites_size; reqs++) {
+ ret = hp_get_string_from_buffer(buffer_ptr, buffer_size,
+ common_data->prerequisites[reqs],
+ sizeof(common_data->prerequisites[reqs]));
+ if (ret < 0)
+ break;
+ }
+
+ // SECURITY_LEVEL:
+ ret = hp_get_integer_from_buffer(buffer_ptr, buffer_size,
+ &common_data->security_level);
+
+common_exit:
+ return ret;
+}
+
+int hp_enforce_single_line_input(char *buf, size_t count)
+{
+ char *p;
+
+ p = memchr(buf, '\n', count);
+
+ if (p == buf + count - 1)
+ *p = '\0'; /* strip trailing newline */
+ else if (p)
+ return -EINVAL; /* enforce single line input */
+
+ return 0;
+}
+
+/* Set pending reboot value and generate KOBJ_NAME event */
+void hp_set_reboot_and_signal_event(void)
+{
+ bioscfg_drv.pending_reboot = true;
+ kobject_uevent(&bioscfg_drv.class_dev->kobj, KOBJ_CHANGE);
+}
+
+/**
+ * hp_calculate_string_buffer() - determines size of string buffer for
+ * use with BIOS communication
+ *
+ * @str: the string to calculate based upon
+ */
+size_t hp_calculate_string_buffer(const char *str)
+{
+ size_t length = strlen(str);
+
+ /* BIOS expects 4 bytes when an empty string is found */
+ if (length == 0)
+ return 4;
+
+ /* u16 length field + one UTF16 char for each input char */
+ return sizeof(u16) + strlen(str) * sizeof(u16);
+}
+
+int hp_wmi_error_and_message(int error_code)
+{
+ char *error_msg = NULL;
+ int ret;
+
+ switch (error_code) {
+ case SUCCESS:
+ error_msg = "Success";
+ ret = 0;
+ break;
+ case CMD_FAILED:
+ error_msg = "Command failed";
+ ret = -EINVAL;
+ break;
+ case INVALID_SIGN:
+ error_msg = "Invalid signature";
+ ret = -EINVAL;
+ break;
+ case INVALID_CMD_VALUE:
+ error_msg = "Invalid command value/Feature not supported";
+ ret = -EOPNOTSUPP;
+ break;
+ case INVALID_CMD_TYPE:
+ error_msg = "Invalid command type";
+ ret = -EINVAL;
+ break;
+ case INVALID_DATA_SIZE:
+ error_msg = "Invalid data size";
+ ret = -EINVAL;
+ break;
+ case INVALID_CMD_PARAM:
+ error_msg = "Invalid command parameter";
+ ret = -EINVAL;
+ break;
+ case ENCRYP_CMD_REQUIRED:
+ error_msg = "Secure/encrypted command required";
+ ret = -EACCES;
+ break;
+ case NO_SECURE_SESSION:
+ error_msg = "No secure session established";
+ ret = -EACCES;
+ break;
+ case SECURE_SESSION_FOUND:
+ error_msg = "Secure session already established";
+ ret = -EACCES;
+ break;
+ case SECURE_SESSION_FAILED:
+ error_msg = "Secure session failed";
+ ret = -EIO;
+ break;
+ case AUTH_FAILED:
+ error_msg = "Other permission/Authentication failed";
+ ret = -EACCES;
+ break;
+ case INVALID_BIOS_AUTH:
+ error_msg = "Invalid BIOS administrator password";
+ ret = -EINVAL;
+ break;
+ case NONCE_DID_NOT_MATCH:
+ error_msg = "Nonce did not match";
+ ret = -EINVAL;
+ break;
+ case GENERIC_ERROR:
+ error_msg = "Generic/Other error";
+ ret = -EIO;
+ break;
+ case BIOS_ADMIN_POLICY_NOT_MET:
+ error_msg = "BIOS Admin password does not meet password policy requirements";
+ ret = -EINVAL;
+ break;
+ case BIOS_ADMIN_NOT_SET:
+ error_msg = "BIOS Setup password is not set";
+ ret = -EPERM;
+ break;
+ case P21_NO_PROVISIONED:
+ error_msg = "P21 is not provisioned";
+ ret = -EPERM;
+ break;
+ case P21_PROVISION_IN_PROGRESS:
+ error_msg = "P21 is already provisioned or provisioning is in progress and a signing key has already been sent";
+ ret = -EINPROGRESS;
+ break;
+ case P21_IN_USE:
+ error_msg = "P21 in use (cannot deprovision)";
+ ret = -EPERM;
+ break;
+ case HEP_NOT_ACTIVE:
+ error_msg = "HEP not activated";
+ ret = -EPERM;
+ break;
+ case HEP_ALREADY_SET:
+ error_msg = "HEP Transport already set";
+ ret = -EINVAL;
+ break;
+ case HEP_CHECK_STATE:
+ error_msg = "Check the current HEP state";
+ ret = -EINVAL;
+ break;
+ default:
+ error_msg = "Generic/Other error";
+ ret = -EIO;
+ break;
+ }
+
+ if (error_code)
+ pr_warn_ratelimited("Returned error 0x%x, \"%s\"\n", error_code, error_msg);
+
+ return ret;
+}
+
+static ssize_t pending_reboot_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%d\n", bioscfg_drv.pending_reboot);
+}
+
+static struct kobj_attribute pending_reboot = __ATTR_RO(pending_reboot);
+
+/*
+ * create_attributes_level_sysfs_files() - Creates pending_reboot attributes
+ */
+static int create_attributes_level_sysfs_files(void)
+{
+ return sysfs_create_file(&bioscfg_drv.main_dir_kset->kobj,
+ &pending_reboot.attr);
+}
+
+static void attr_name_release(struct kobject *kobj)
+{
+ kfree(kobj);
+}
+
+static const struct kobj_type attr_name_ktype = {
+ .release = attr_name_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+/**
+ * hp_get_wmiobj_pointer() - Get Content of WMI block for particular instance
+ *
+ * @instance_id: WMI instance ID
+ * @guid_string: WMI GUID (in str form)
+ *
+ * Fetches the content for WMI block (instance_id) under GUID (guid_string)
+ * Caller must kfree the return
+ */
+union acpi_object *hp_get_wmiobj_pointer(int instance_id, const char *guid_string)
+{
+ struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_status status;
+
+ status = wmi_query_block(guid_string, instance_id, &out);
+ return ACPI_SUCCESS(status) ? (union acpi_object *)out.pointer : NULL;
+}
+
+/**
+ * hp_get_instance_count() - Compute total number of instances under guid_string
+ *
+ * @guid_string: WMI GUID (in string form)
+ */
+int hp_get_instance_count(const char *guid_string)
+{
+ union acpi_object *wmi_obj = NULL;
+ int i = 0;
+
+ do {
+ kfree(wmi_obj);
+ wmi_obj = hp_get_wmiobj_pointer(i, guid_string);
+ i++;
+ } while (wmi_obj);
+
+ return i - 1;
+}
+
+/**
+ * hp_alloc_attributes_data() - Allocate attributes data for a particular type
+ *
+ * @attr_type: Attribute type to allocate
+ */
+static int hp_alloc_attributes_data(int attr_type)
+{
+ switch (attr_type) {
+ case HPWMI_STRING_TYPE:
+ return hp_alloc_string_data();
+
+ case HPWMI_INTEGER_TYPE:
+ return hp_alloc_integer_data();
+
+ case HPWMI_ENUMERATION_TYPE:
+ return hp_alloc_enumeration_data();
+
+ case HPWMI_ORDERED_LIST_TYPE:
+ return hp_alloc_ordered_list_data();
+
+ case HPWMI_PASSWORD_TYPE:
+ return hp_alloc_password_data();
+
+ default:
+ return 0;
+ }
+}
+
+int hp_convert_hexstr_to_str(const char *input, u32 input_len, char **str, int *len)
+{
+ int ret = 0;
+ int new_len = 0;
+ char tmp[] = "0x00";
+ char *new_str = NULL;
+ long ch;
+ int i;
+
+ if (input_len <= 0 || !input || !str || !len)
+ return -EINVAL;
+
+ *len = 0;
+ *str = NULL;
+
+ new_str = kmalloc(input_len, GFP_KERNEL);
+ if (!new_str)
+ return -ENOMEM;
+
+ for (i = 0; i < input_len; i += 5) {
+ strncpy(tmp, input + i, strlen(tmp));
+ if (kstrtol(tmp, 16, &ch) == 0) {
+ // escape char
+ if (ch == '\\' ||
+ ch == '\r' ||
+ ch == '\n' || ch == '\t') {
+ if (ch == '\r')
+ ch = 'r';
+ else if (ch == '\n')
+ ch = 'n';
+ else if (ch == '\t')
+ ch = 't';
+ new_str[new_len++] = '\\';
+ }
+ new_str[new_len++] = ch;
+ if (ch == '\0')
+ break;
+ }
+ }
+
+ if (new_len) {
+ new_str[new_len] = '\0';
+ *str = krealloc(new_str, (new_len + 1) * sizeof(char),
+ GFP_KERNEL);
+ if (*str)
+ *len = new_len;
+ else
+ ret = -ENOMEM;
+ } else {
+ ret = -EFAULT;
+ }
+
+ if (ret)
+ kfree(new_str);
+ return ret;
+}
+
+/* map output size to the corresponding WMI method id */
+int hp_encode_outsize_for_pvsz(int outsize)
+{
+ if (outsize > 4096)
+ return -EINVAL;
+ if (outsize > 1024)
+ return 5;
+ if (outsize > 128)
+ return 4;
+ if (outsize > 4)
+ return 3;
+ if (outsize > 0)
+ return 2;
+ return 1;
+}
+
+/*
+ * Update friendly display name for several attributes associated to
+ * 'Schedule Power-On'
+ */
+void hp_friendly_user_name_update(char *path, const char *attr_name,
+ char *attr_display, int attr_size)
+{
+ if (strstr(path, SCHEDULE_POWER_ON))
+ snprintf(attr_display, attr_size, "%s - %s", SCHEDULE_POWER_ON, attr_name);
+ else
+ strscpy(attr_display, attr_name, attr_size);
+}
+
+/**
+ * hp_update_attribute_permissions() - Update attributes permissions when
+ * isReadOnly value is 1
+ *
+ * @is_readonly: bool value to indicate if it a readonly attribute.
+ * @current_val: kobj_attribute corresponding to attribute.
+ *
+ */
+void hp_update_attribute_permissions(bool is_readonly, struct kobj_attribute *current_val)
+{
+ current_val->attr.mode = is_readonly ? 0444 : 0644;
+}
+
+/**
+ * destroy_attribute_objs() - Free a kset of kobjects
+ * @kset: The kset to destroy
+ *
+ * Fress kobjects created for each attribute_name under attribute type kset
+ */
+static void destroy_attribute_objs(struct kset *kset)
+{
+ struct kobject *pos, *next;
+
+ list_for_each_entry_safe(pos, next, &kset->list, entry)
+ kobject_put(pos);
+}
+
+/**
+ * release_attributes_data() - Clean-up all sysfs directories and files created
+ */
+static void release_attributes_data(void)
+{
+ mutex_lock(&bioscfg_drv.mutex);
+
+ hp_exit_string_attributes();
+ hp_exit_integer_attributes();
+ hp_exit_enumeration_attributes();
+ hp_exit_ordered_list_attributes();
+ hp_exit_password_attributes();
+ hp_exit_sure_start_attributes();
+ hp_exit_secure_platform_attributes();
+
+ if (bioscfg_drv.authentication_dir_kset) {
+ destroy_attribute_objs(bioscfg_drv.authentication_dir_kset);
+ kset_unregister(bioscfg_drv.authentication_dir_kset);
+ bioscfg_drv.authentication_dir_kset = NULL;
+ }
+ if (bioscfg_drv.main_dir_kset) {
+ sysfs_remove_file(&bioscfg_drv.main_dir_kset->kobj, &pending_reboot.attr);
+ destroy_attribute_objs(bioscfg_drv.main_dir_kset);
+ kset_unregister(bioscfg_drv.main_dir_kset);
+ bioscfg_drv.main_dir_kset = NULL;
+ }
+ mutex_unlock(&bioscfg_drv.mutex);
+}
+
+/**
+ * hp_add_other_attributes() - Initialize HP custom attributes not
+ * reported by BIOS and required to support Secure Platform and Sure
+ * Start.
+ *
+ * @attr_type: Custom HP attribute not reported by BIOS
+ *
+ * Initialize all 2 types of attributes: Platform and Sure Start
+ * object. Populates each attribute types respective properties
+ * under sysfs files.
+ *
+ * Returns zero(0) if successful. Otherwise, a negative value.
+ */
+static int hp_add_other_attributes(int attr_type)
+{
+ struct kobject *attr_name_kobj;
+ union acpi_object *obj = NULL;
+ int ret;
+ char *attr_name;
+
+ mutex_lock(&bioscfg_drv.mutex);
+
+ attr_name_kobj = kzalloc(sizeof(*attr_name_kobj), GFP_KERNEL);
+ if (!attr_name_kobj) {
+ ret = -ENOMEM;
+ goto err_other_attr_init;
+ }
+
+ /* Check if attribute type is supported */
+ switch (attr_type) {
+ case HPWMI_SECURE_PLATFORM_TYPE:
+ attr_name_kobj->kset = bioscfg_drv.authentication_dir_kset;
+ attr_name = SPM_STR;
+ break;
+
+ case HPWMI_SURE_START_TYPE:
+ attr_name_kobj->kset = bioscfg_drv.main_dir_kset;
+ attr_name = SURE_START_STR;
+ break;
+
+ default:
+ pr_err("Error: Unknown attr_type: %d\n", attr_type);
+ ret = -EINVAL;
+ goto err_other_attr_init;
+ }
+
+ ret = kobject_init_and_add(attr_name_kobj, &attr_name_ktype,
+ NULL, "%s", attr_name);
+ if (ret) {
+ pr_err("Error encountered [%d]\n", ret);
+ kobject_put(attr_name_kobj);
+ goto err_other_attr_init;
+ }
+
+ /* Populate attribute data */
+ switch (attr_type) {
+ case HPWMI_SECURE_PLATFORM_TYPE:
+ ret = hp_populate_secure_platform_data(attr_name_kobj);
+ if (ret)
+ goto err_other_attr_init;
+ break;
+
+ case HPWMI_SURE_START_TYPE:
+ ret = hp_populate_sure_start_data(attr_name_kobj);
+ if (ret)
+ goto err_other_attr_init;
+ break;
+
+ default:
+ ret = -EINVAL;
+ goto err_other_attr_init;
+ }
+
+ mutex_unlock(&bioscfg_drv.mutex);
+ return 0;
+
+err_other_attr_init:
+ mutex_unlock(&bioscfg_drv.mutex);
+ kfree(obj);
+ return ret;
+}
+
+static int hp_init_bios_package_attribute(enum hp_wmi_data_type attr_type,
+ union acpi_object *obj,
+ const char *guid, int min_elements,
+ int instance_id)
+{
+ struct kobject *attr_name_kobj;
+ union acpi_object *elements;
+ struct kset *temp_kset;
+
+ char *str_value = NULL;
+ int str_len;
+ int ret = 0;
+
+ /* Take action appropriate to each ACPI TYPE */
+ if (obj->package.count < min_elements) {
+ pr_err("ACPI-package does not have enough elements: %d < %d\n",
+ obj->package.count, min_elements);
+ goto pack_attr_exit;
+ }
+
+ elements = obj->package.elements;
+
+ /* sanity checking */
+ if (elements[NAME].type != ACPI_TYPE_STRING) {
+ pr_debug("incorrect element type\n");
+ goto pack_attr_exit;
+ }
+ if (strlen(elements[NAME].string.pointer) == 0) {
+ pr_debug("empty attribute found\n");
+ goto pack_attr_exit;
+ }
+
+ if (attr_type == HPWMI_PASSWORD_TYPE)
+ temp_kset = bioscfg_drv.authentication_dir_kset;
+ else
+ temp_kset = bioscfg_drv.main_dir_kset;
+
+ /* convert attribute name to string */
+ ret = hp_convert_hexstr_to_str(elements[NAME].string.pointer,
+ elements[NAME].string.length,
+ &str_value, &str_len);
+
+ if (ret) {
+ pr_debug("Failed to populate integer package data. Error [0%0x]\n",
+ ret);
+ kfree(str_value);
+ return ret;
+ }
+
+ /* All duplicate attributes found are ignored */
+ if (kset_find_obj(temp_kset, str_value)) {
+ pr_debug("Duplicate attribute name found - %s\n", str_value);
+ goto pack_attr_exit;
+ }
+
+ /* build attribute */
+ attr_name_kobj = kzalloc(sizeof(*attr_name_kobj), GFP_KERNEL);
+ if (!attr_name_kobj) {
+ ret = -ENOMEM;
+ goto pack_attr_exit;
+ }
+
+ attr_name_kobj->kset = temp_kset;
+
+ ret = kobject_init_and_add(attr_name_kobj, &attr_name_ktype,
+ NULL, "%s", str_value);
+
+ if (ret) {
+ kobject_put(attr_name_kobj);
+ goto pack_attr_exit;
+ }
+
+ /* enumerate all of these attributes */
+ switch (attr_type) {
+ case HPWMI_STRING_TYPE:
+ ret = hp_populate_string_package_data(elements,
+ instance_id,
+ attr_name_kobj);
+ break;
+ case HPWMI_INTEGER_TYPE:
+ ret = hp_populate_integer_package_data(elements,
+ instance_id,
+ attr_name_kobj);
+ break;
+ case HPWMI_ENUMERATION_TYPE:
+ ret = hp_populate_enumeration_package_data(elements,
+ instance_id,
+ attr_name_kobj);
+ break;
+ case HPWMI_ORDERED_LIST_TYPE:
+ ret = hp_populate_ordered_list_package_data(elements,
+ instance_id,
+ attr_name_kobj);
+ break;
+ case HPWMI_PASSWORD_TYPE:
+ ret = hp_populate_password_package_data(elements,
+ instance_id,
+ attr_name_kobj);
+ break;
+ default:
+ pr_debug("Unknown attribute type found: 0x%x\n", attr_type);
+ break;
+ }
+
+pack_attr_exit:
+ kfree(str_value);
+ return ret;
+}
+
+static int hp_init_bios_buffer_attribute(enum hp_wmi_data_type attr_type,
+ union acpi_object *obj,
+ const char *guid, int min_elements,
+ int instance_id)
+{
+ struct kobject *attr_name_kobj;
+ struct kset *temp_kset;
+ char str[MAX_BUFF_SIZE];
+
+ char *temp_str = NULL;
+ char *str_value = NULL;
+ u8 *buffer_ptr = NULL;
+ int buffer_size;
+ int ret = 0;
+
+ buffer_size = obj->buffer.length;
+ buffer_ptr = obj->buffer.pointer;
+
+ ret = hp_get_string_from_buffer(&buffer_ptr,
+ &buffer_size, str, MAX_BUFF_SIZE);
+
+ if (ret < 0)
+ goto buff_attr_exit;
+
+ if (attr_type == HPWMI_PASSWORD_TYPE ||
+ attr_type == HPWMI_SECURE_PLATFORM_TYPE)
+ temp_kset = bioscfg_drv.authentication_dir_kset;
+ else
+ temp_kset = bioscfg_drv.main_dir_kset;
+
+ /* All duplicate attributes found are ignored */
+ if (kset_find_obj(temp_kset, str)) {
+ pr_debug("Duplicate attribute name found - %s\n", str);
+ goto buff_attr_exit;
+ }
+
+ /* build attribute */
+ attr_name_kobj = kzalloc(sizeof(*attr_name_kobj), GFP_KERNEL);
+ if (!attr_name_kobj) {
+ ret = -ENOMEM;
+ goto buff_attr_exit;
+ }
+
+ attr_name_kobj->kset = temp_kset;
+
+ temp_str = str;
+ if (attr_type == HPWMI_SECURE_PLATFORM_TYPE)
+ temp_str = "SPM";
+
+ ret = kobject_init_and_add(attr_name_kobj,
+ &attr_name_ktype, NULL, "%s", temp_str);
+ if (ret) {
+ kobject_put(attr_name_kobj);
+ goto buff_attr_exit;
+ }
+
+ /* enumerate all of these attributes */
+ switch (attr_type) {
+ case HPWMI_STRING_TYPE:
+ ret = hp_populate_string_buffer_data(buffer_ptr,
+ &buffer_size,
+ instance_id,
+ attr_name_kobj);
+ break;
+ case HPWMI_INTEGER_TYPE:
+ ret = hp_populate_integer_buffer_data(buffer_ptr,
+ &buffer_size,
+ instance_id,
+ attr_name_kobj);
+ break;
+ case HPWMI_ENUMERATION_TYPE:
+ ret = hp_populate_enumeration_buffer_data(buffer_ptr,
+ &buffer_size,
+ instance_id,
+ attr_name_kobj);
+ break;
+ case HPWMI_ORDERED_LIST_TYPE:
+ ret = hp_populate_ordered_list_buffer_data(buffer_ptr,
+ &buffer_size,
+ instance_id,
+ attr_name_kobj);
+ break;
+ case HPWMI_PASSWORD_TYPE:
+ ret = hp_populate_password_buffer_data(buffer_ptr,
+ &buffer_size,
+ instance_id,
+ attr_name_kobj);
+ break;
+ default:
+ pr_debug("Unknown attribute type found: 0x%x\n", attr_type);
+ break;
+ }
+
+buff_attr_exit:
+ kfree(str_value);
+ return ret;
+}
+
+/**
+ * hp_init_bios_attributes() - Initialize all attributes for a type
+ * @attr_type: The attribute type to initialize
+ * @guid: The WMI GUID associated with this type to initialize
+ *
+ * Initialize all 5 types of attributes: enumeration, integer,
+ * string, password, ordered list object. Populates each attribute types
+ * respective properties under sysfs files
+ */
+static int hp_init_bios_attributes(enum hp_wmi_data_type attr_type, const char *guid)
+{
+ union acpi_object *obj = NULL;
+ int min_elements;
+
+ /* instance_id needs to be reset for each type GUID
+ * also, instance IDs are unique within GUID but not across
+ */
+ int instance_id = 0;
+ int cur_instance_id = instance_id;
+ int ret = 0;
+
+ ret = hp_alloc_attributes_data(attr_type);
+ if (ret)
+ return ret;
+
+ switch (attr_type) {
+ case HPWMI_STRING_TYPE:
+ min_elements = STR_ELEM_CNT;
+ break;
+ case HPWMI_INTEGER_TYPE:
+ min_elements = INT_ELEM_CNT;
+ break;
+ case HPWMI_ENUMERATION_TYPE:
+ min_elements = ENUM_ELEM_CNT;
+ break;
+ case HPWMI_ORDERED_LIST_TYPE:
+ min_elements = ORD_ELEM_CNT;
+ break;
+ case HPWMI_PASSWORD_TYPE:
+ min_elements = PSWD_ELEM_CNT;
+ break;
+ default:
+ pr_err("Error: Unknown attr_type: %d\n", attr_type);
+ return -EINVAL;
+ }
+
+ /* need to use specific instance_id and guid combination to get right data */
+ obj = hp_get_wmiobj_pointer(instance_id, guid);
+ if (!obj)
+ return -ENODEV;
+
+ mutex_lock(&bioscfg_drv.mutex);
+ while (obj) {
+ /* Take action appropriate to each ACPI TYPE */
+ if (obj->type == ACPI_TYPE_PACKAGE) {
+ ret = hp_init_bios_package_attribute(attr_type, obj,
+ guid, min_elements,
+ cur_instance_id);
+
+ } else if (obj->type == ACPI_TYPE_BUFFER) {
+ ret = hp_init_bios_buffer_attribute(attr_type, obj,
+ guid, min_elements,
+ cur_instance_id);
+
+ } else {
+ pr_err("Expected ACPI-package or buffer type, got: %d\n",
+ obj->type);
+ ret = -EIO;
+ goto err_attr_init;
+ }
+
+ /*
+ * Failure reported in one attribute must not
+ * stop process of the remaining attribute values.
+ */
+ if (ret >= 0)
+ cur_instance_id++;
+
+ kfree(obj);
+ instance_id++;
+ obj = hp_get_wmiobj_pointer(instance_id, guid);
+ }
+
+err_attr_init:
+ mutex_unlock(&bioscfg_drv.mutex);
+ kfree(obj);
+ return ret;
+}
+
+static int __init hp_init(void)
+{
+ int ret;
+ int hp_bios_capable = wmi_has_guid(HP_WMI_BIOS_GUID);
+ int set_bios_settings = wmi_has_guid(HP_WMI_SET_BIOS_SETTING_GUID);
+
+ if (!hp_bios_capable) {
+ pr_err("Unable to run on non-HP system\n");
+ return -ENODEV;
+ }
+
+ if (!set_bios_settings) {
+ pr_err("Unable to set BIOS settings on HP systems\n");
+ return -ENODEV;
+ }
+
+ ret = hp_init_attr_set_interface();
+ if (ret)
+ return ret;
+
+ ret = fw_attributes_class_get(&fw_attr_class);
+ if (ret)
+ goto err_unregister_class;
+
+ bioscfg_drv.class_dev = device_create(fw_attr_class, NULL, MKDEV(0, 0),
+ NULL, "%s", DRIVER_NAME);
+ if (IS_ERR(bioscfg_drv.class_dev)) {
+ ret = PTR_ERR(bioscfg_drv.class_dev);
+ goto err_unregister_class;
+ }
+
+ bioscfg_drv.main_dir_kset = kset_create_and_add("attributes", NULL,
+ &bioscfg_drv.class_dev->kobj);
+ if (!bioscfg_drv.main_dir_kset) {
+ ret = -ENOMEM;
+ pr_debug("Failed to create and add attributes\n");
+ goto err_destroy_classdev;
+ }
+
+ bioscfg_drv.authentication_dir_kset = kset_create_and_add("authentication", NULL,
+ &bioscfg_drv.class_dev->kobj);
+ if (!bioscfg_drv.authentication_dir_kset) {
+ ret = -ENOMEM;
+ pr_debug("Failed to create and add authentication\n");
+ goto err_release_attributes_data;
+ }
+
+ /*
+ * sysfs level attributes.
+ * - pending_reboot
+ */
+ ret = create_attributes_level_sysfs_files();
+ if (ret)
+ pr_debug("Failed to create sysfs level attributes\n");
+
+ ret = hp_init_bios_attributes(HPWMI_STRING_TYPE, HP_WMI_BIOS_STRING_GUID);
+ if (ret)
+ pr_debug("Failed to populate string type attributes\n");
+
+ ret = hp_init_bios_attributes(HPWMI_INTEGER_TYPE, HP_WMI_BIOS_INTEGER_GUID);
+ if (ret)
+ pr_debug("Failed to populate integer type attributes\n");
+
+ ret = hp_init_bios_attributes(HPWMI_ENUMERATION_TYPE, HP_WMI_BIOS_ENUMERATION_GUID);
+ if (ret)
+ pr_debug("Failed to populate enumeration type attributes\n");
+
+ ret = hp_init_bios_attributes(HPWMI_ORDERED_LIST_TYPE, HP_WMI_BIOS_ORDERED_LIST_GUID);
+ if (ret)
+ pr_debug("Failed to populate ordered list object type attributes\n");
+
+ ret = hp_init_bios_attributes(HPWMI_PASSWORD_TYPE, HP_WMI_BIOS_PASSWORD_GUID);
+ if (ret)
+ pr_debug("Failed to populate password object type attributes\n");
+
+ bioscfg_drv.spm_data.attr_name_kobj = NULL;
+ ret = hp_add_other_attributes(HPWMI_SECURE_PLATFORM_TYPE);
+ if (ret)
+ pr_debug("Failed to populate secure platform object type attribute\n");
+
+ bioscfg_drv.sure_start_attr_kobj = NULL;
+ ret = hp_add_other_attributes(HPWMI_SURE_START_TYPE);
+ if (ret)
+ pr_debug("Failed to populate sure start object type attribute\n");
+
+ return 0;
+
+err_release_attributes_data:
+ release_attributes_data();
+
+err_destroy_classdev:
+ device_destroy(fw_attr_class, MKDEV(0, 0));
+
+err_unregister_class:
+ fw_attributes_class_put();
+ hp_exit_attr_set_interface();
+
+ return ret;
+}
+
+static void __exit hp_exit(void)
+{
+ release_attributes_data();
+ device_destroy(fw_attr_class, MKDEV(0, 0));
+
+ fw_attributes_class_put();
+ hp_exit_attr_set_interface();
+}
+
+module_init(hp_init);
+module_exit(hp_exit);
diff --git a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.h b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.h
new file mode 100644
index 000000000000..3166ef328eba
--- /dev/null
+++ b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.h
@@ -0,0 +1,487 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Definitions for kernel modules using hp_bioscfg driver
+ *
+ * Copyright (c) 2022 HP Development Company, L.P.
+ */
+
+#ifndef _HP_BIOSCFG_H_
+#define _HP_BIOSCFG_H_
+
+#include <linux/wmi.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/nls.h>
+
+#define DRIVER_NAME "hp-bioscfg"
+
+#define MAX_BUFF_SIZE 512
+#define MAX_KEY_MOD_SIZE 256
+#define MAX_PASSWD_SIZE 64
+#define MAX_PREREQUISITES_SIZE 20
+#define MAX_REQ_ELEM_SIZE 128
+#define MAX_VALUES_SIZE 16
+#define MAX_ENCODINGS_SIZE 16
+#define MAX_ELEMENTS_SIZE 16
+
+#define SPM_STR_DESC "Secure Platform Management"
+#define SPM_STR "SPM"
+#define SURE_START_DESC "Sure Start"
+#define SURE_START_STR "Sure_Start"
+#define SETUP_PASSWD "Setup Password"
+#define POWER_ON_PASSWD "Power-On Password"
+
+#define LANG_CODE_STR "en_US.UTF-8"
+#define SCHEDULE_POWER_ON "Scheduled Power-On"
+
+#define COMMA_SEP ","
+#define SEMICOLON_SEP ";"
+
+/* Sure Admin Functions */
+
+#define UTF_PREFIX "<utf-16/>"
+#define BEAM_PREFIX "<BEAM/>"
+
+enum mechanism_values {
+ PASSWORD = 0x00,
+ SIGNING_KEY = 0x01,
+ ENDORSEMENT_KEY = 0x02,
+};
+
+#define BIOS_ADMIN "bios-admin"
+#define POWER_ON "power-on"
+#define BIOS_SPM "enhanced-bios-auth"
+
+#define PASSWD_MECHANISM_TYPES "password"
+
+#define HP_WMI_BIOS_GUID "5FB7F034-2C63-45e9-BE91-3D44E2C707E4"
+
+#define HP_WMI_BIOS_STRING_GUID "988D08E3-68F4-4c35-AF3E-6A1B8106F83C"
+#define HP_WMI_BIOS_INTEGER_GUID "8232DE3D-663D-4327-A8F4-E293ADB9BF05"
+#define HP_WMI_BIOS_ENUMERATION_GUID "2D114B49-2DFB-4130-B8FE-4A3C09E75133"
+#define HP_WMI_BIOS_ORDERED_LIST_GUID "14EA9746-CE1F-4098-A0E0-7045CB4DA745"
+#define HP_WMI_BIOS_PASSWORD_GUID "322F2028-0F84-4901-988E-015176049E2D"
+#define HP_WMI_SET_BIOS_SETTING_GUID "1F4C91EB-DC5C-460b-951D-C7CB9B4B8D5E"
+
+enum hp_wmi_spm_commandtype {
+ HPWMI_SECUREPLATFORM_GET_STATE = 0x10,
+ HPWMI_SECUREPLATFORM_SET_KEK = 0x11,
+ HPWMI_SECUREPLATFORM_SET_SK = 0x12,
+};
+
+enum hp_wmi_surestart_commandtype {
+ HPWMI_SURESTART_GET_LOG_COUNT = 0x01,
+ HPWMI_SURESTART_GET_LOG = 0x02,
+};
+
+enum hp_wmi_command {
+ HPWMI_READ = 0x01,
+ HPWMI_WRITE = 0x02,
+ HPWMI_ODM = 0x03,
+ HPWMI_SURESTART = 0x20006,
+ HPWMI_GM = 0x20008,
+ HPWMI_SECUREPLATFORM = 0x20010,
+};
+
+struct bios_return {
+ u32 sigpass;
+ u32 return_code;
+};
+
+enum wmi_error_values {
+ SUCCESS = 0x00,
+ CMD_FAILED = 0x01,
+ INVALID_SIGN = 0x02,
+ INVALID_CMD_VALUE = 0x03,
+ INVALID_CMD_TYPE = 0x04,
+ INVALID_DATA_SIZE = 0x05,
+ INVALID_CMD_PARAM = 0x06,
+ ENCRYP_CMD_REQUIRED = 0x07,
+ NO_SECURE_SESSION = 0x08,
+ SECURE_SESSION_FOUND = 0x09,
+ SECURE_SESSION_FAILED = 0x0A,
+ AUTH_FAILED = 0x0B,
+ INVALID_BIOS_AUTH = 0x0E,
+ NONCE_DID_NOT_MATCH = 0x18,
+ GENERIC_ERROR = 0x1C,
+ BIOS_ADMIN_POLICY_NOT_MET = 0x28,
+ BIOS_ADMIN_NOT_SET = 0x38,
+ P21_NO_PROVISIONED = 0x1000,
+ P21_PROVISION_IN_PROGRESS = 0x1001,
+ P21_IN_USE = 0x1002,
+ HEP_NOT_ACTIVE = 0x1004,
+ HEP_ALREADY_SET = 0x1006,
+ HEP_CHECK_STATE = 0x1007,
+};
+
+struct common_data {
+ u8 display_name[MAX_BUFF_SIZE];
+ u8 path[MAX_BUFF_SIZE];
+ u32 is_readonly;
+ u32 display_in_ui;
+ u32 requires_physical_presence;
+ u32 sequence;
+ u32 prerequisites_size;
+ u8 prerequisites[MAX_PREREQUISITES_SIZE][MAX_BUFF_SIZE];
+ u32 security_level;
+};
+
+struct string_data {
+ struct common_data common;
+ struct kobject *attr_name_kobj;
+ u8 current_value[MAX_BUFF_SIZE];
+ u8 new_value[MAX_BUFF_SIZE];
+ u32 min_length;
+ u32 max_length;
+};
+
+struct integer_data {
+ struct common_data common;
+ struct kobject *attr_name_kobj;
+ u32 current_value;
+ u32 new_value;
+ u32 lower_bound;
+ u32 upper_bound;
+ u32 scalar_increment;
+};
+
+struct enumeration_data {
+ struct common_data common;
+ struct kobject *attr_name_kobj;
+ u8 current_value[MAX_BUFF_SIZE];
+ u8 new_value[MAX_BUFF_SIZE];
+ u32 possible_values_size;
+ u8 possible_values[MAX_VALUES_SIZE][MAX_BUFF_SIZE];
+};
+
+struct ordered_list_data {
+ struct common_data common;
+ struct kobject *attr_name_kobj;
+ u8 current_value[MAX_BUFF_SIZE];
+ u8 new_value[MAX_BUFF_SIZE];
+ u32 elements_size;
+ u8 elements[MAX_ELEMENTS_SIZE][MAX_BUFF_SIZE];
+};
+
+struct password_data {
+ struct common_data common;
+ struct kobject *attr_name_kobj;
+ u8 current_password[MAX_PASSWD_SIZE];
+ u8 new_password[MAX_PASSWD_SIZE];
+ u32 min_password_length;
+ u32 max_password_length;
+ u32 encodings_size;
+ u8 encodings[MAX_ENCODINGS_SIZE][MAX_BUFF_SIZE];
+ bool is_enabled;
+
+ /*
+ * 'role' identifies the type of authentication.
+ * Two known types are bios-admin and power-on.
+ * 'bios-admin' represents BIOS administrator password
+ * 'power-on' represents a password required to use the system
+ */
+ u32 role;
+
+ /*
+ * 'mechanism' represents the means of authentication.
+ * Only supported type currently is "password"
+ */
+ u32 mechanism;
+};
+
+struct secure_platform_data {
+ struct kobject *attr_name_kobj;
+ u8 attribute_name[MAX_BUFF_SIZE];
+ u8 *endorsement_key;
+ u8 *signing_key;
+ u8 *auth_token;
+ bool is_enabled;
+ u32 mechanism;
+};
+
+struct bioscfg_priv {
+ struct kset *authentication_dir_kset;
+ struct kset *main_dir_kset;
+ struct device *class_dev;
+ struct string_data *string_data;
+ u32 string_instances_count;
+ struct integer_data *integer_data;
+ u32 integer_instances_count;
+ struct enumeration_data *enumeration_data;
+ u32 enumeration_instances_count;
+ struct ordered_list_data *ordered_list_data;
+ u32 ordered_list_instances_count;
+ struct password_data *password_data;
+ u32 password_instances_count;
+
+ struct kobject *sure_start_attr_kobj;
+ struct secure_platform_data spm_data;
+ u8 display_name_language_code[MAX_BUFF_SIZE];
+ bool pending_reboot;
+ struct mutex mutex;
+};
+
+/* global structure used by multiple WMI interfaces */
+extern struct bioscfg_priv bioscfg_drv;
+
+enum hp_wmi_data_type {
+ HPWMI_STRING_TYPE,
+ HPWMI_INTEGER_TYPE,
+ HPWMI_ENUMERATION_TYPE,
+ HPWMI_ORDERED_LIST_TYPE,
+ HPWMI_PASSWORD_TYPE,
+ HPWMI_SECURE_PLATFORM_TYPE,
+ HPWMI_SURE_START_TYPE,
+};
+
+enum hp_wmi_data_elements {
+ /* Common elements */
+ NAME = 0,
+ VALUE = 1,
+ PATH = 2,
+ IS_READONLY = 3,
+ DISPLAY_IN_UI = 4,
+ REQUIRES_PHYSICAL_PRESENCE = 5,
+ SEQUENCE = 6,
+ PREREQUISITES_SIZE = 7,
+ PREREQUISITES = 8,
+ SECURITY_LEVEL = 9,
+
+ /* String elements */
+ STR_MIN_LENGTH = 10,
+ STR_MAX_LENGTH = 11,
+ STR_ELEM_CNT = 12,
+
+ /* Integer elements */
+ INT_LOWER_BOUND = 10,
+ INT_UPPER_BOUND = 11,
+ INT_SCALAR_INCREMENT = 12,
+ INT_ELEM_CNT = 13,
+
+ /* Enumeration elements */
+ ENUM_CURRENT_VALUE = 10,
+ ENUM_SIZE = 11,
+ ENUM_POSSIBLE_VALUES = 12,
+ ENUM_ELEM_CNT = 13,
+
+ /* Ordered list elements */
+ ORD_LIST_SIZE = 10,
+ ORD_LIST_ELEMENTS = 11,
+ ORD_ELEM_CNT = 12,
+
+ /* Password elements */
+ PSWD_MIN_LENGTH = 10,
+ PSWD_MAX_LENGTH = 11,
+ PSWD_SIZE = 12,
+ PSWD_ENCODINGS = 13,
+ PSWD_IS_SET = 14,
+ PSWD_ELEM_CNT = 15,
+};
+
+#define GET_INSTANCE_ID(type) \
+ static int get_##type##_instance_id(struct kobject *kobj) \
+ { \
+ int i; \
+ \
+ for (i = 0; i <= bioscfg_drv.type##_instances_count; i++) { \
+ if (!strcmp(kobj->name, bioscfg_drv.type##_data[i].attr_name_kobj->name)) \
+ return i; \
+ } \
+ return -EIO; \
+ }
+
+#define ATTRIBUTE_S_PROPERTY_SHOW(name, type) \
+ static ssize_t name##_show(struct kobject *kobj, struct kobj_attribute *attr, \
+ char *buf) \
+ { \
+ int i = get_##type##_instance_id(kobj); \
+ if (i >= 0) \
+ return sysfs_emit(buf, "%s\n", bioscfg_drv.type##_data[i].name); \
+ return -EIO; \
+ }
+
+#define ATTRIBUTE_N_PROPERTY_SHOW(name, type) \
+ static ssize_t name##_show(struct kobject *kobj, struct kobj_attribute *attr, \
+ char *buf) \
+ { \
+ int i = get_##type##_instance_id(kobj); \
+ if (i >= 0) \
+ return sysfs_emit(buf, "%d\n", bioscfg_drv.type##_data[i].name); \
+ return -EIO; \
+ }
+
+#define ATTRIBUTE_PROPERTY_STORE(curr_val, type) \
+ static ssize_t curr_val##_store(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ const char *buf, size_t count) \
+ { \
+ char *attr_value = NULL; \
+ int i; \
+ int ret = -EIO; \
+ \
+ attr_value = kstrdup(buf, GFP_KERNEL); \
+ if (!attr_value) \
+ return -ENOMEM; \
+ \
+ ret = hp_enforce_single_line_input(attr_value, count); \
+ if (!ret) { \
+ i = get_##type##_instance_id(kobj); \
+ if (i >= 0) \
+ ret = validate_##type##_input(i, attr_value); \
+ } \
+ if (!ret) \
+ ret = hp_set_attribute(kobj->name, attr_value); \
+ if (!ret) { \
+ update_##type##_value(i, attr_value); \
+ if (bioscfg_drv.type##_data[i].common.requires_physical_presence) \
+ hp_set_reboot_and_signal_event(); \
+ } \
+ hp_clear_all_credentials(); \
+ kfree(attr_value); \
+ \
+ return ret ? ret : count; \
+ }
+
+#define ATTRIBUTE_SPM_N_PROPERTY_SHOW(name, type) \
+ static ssize_t name##_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) \
+ { \
+ return sysfs_emit(buf, "%d\n", bioscfg_drv.type##_data.name); \
+ }
+
+#define ATTRIBUTE_SPM_S_PROPERTY_SHOW(name, type) \
+ static ssize_t name##_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) \
+ { \
+ return sysfs_emit(buf, "%s\n", bioscfg_drv.type##_data.name); \
+ }
+
+#define ATTRIBUTE_VALUES_PROPERTY_SHOW(name, type, sep) \
+ static ssize_t name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf) \
+ { \
+ int i; \
+ int len = 0; \
+ int instance_id = get_##type##_instance_id(kobj); \
+ \
+ if (instance_id < 0) \
+ return 0; \
+ \
+ for (i = 0; i < bioscfg_drv.type##_data[instance_id].name##_size; i++) { \
+ if (i) \
+ len += sysfs_emit_at(buf, len, "%s", sep); \
+ \
+ len += sysfs_emit_at(buf, len, "%s", \
+ bioscfg_drv.type##_data[instance_id].name[i]); \
+ } \
+ len += sysfs_emit_at(buf, len, "\n"); \
+ return len; \
+ }
+
+#define ATTRIBUTE_S_COMMON_PROPERTY_SHOW(name, type) \
+ static ssize_t name##_show(struct kobject *kobj, struct kobj_attribute *attr, \
+ char *buf) \
+ { \
+ int i = get_##type##_instance_id(kobj); \
+ if (i >= 0) \
+ return sysfs_emit(buf, "%s\n", bioscfg_drv.type##_data[i].common.name); \
+ return -EIO; \
+ }
+
+extern struct kobj_attribute common_display_langcode;
+
+/* Prototypes */
+
+/* String attributes */
+int hp_populate_string_buffer_data(u8 *buffer_ptr, u32 *buffer_size,
+ int instance_id,
+ struct kobject *attr_name_kobj);
+int hp_alloc_string_data(void);
+void hp_exit_string_attributes(void);
+int hp_populate_string_package_data(union acpi_object *str_obj,
+ int instance_id,
+ struct kobject *attr_name_kobj);
+
+/* Integer attributes */
+int hp_populate_integer_buffer_data(u8 *buffer_ptr, u32 *buffer_size,
+ int instance_id,
+ struct kobject *attr_name_kobj);
+int hp_alloc_integer_data(void);
+void hp_exit_integer_attributes(void);
+int hp_populate_integer_package_data(union acpi_object *integer_obj,
+ int instance_id,
+ struct kobject *attr_name_kobj);
+
+/* Enumeration attributes */
+int hp_populate_enumeration_buffer_data(u8 *buffer_ptr, u32 *buffer_size,
+ int instance_id,
+ struct kobject *attr_name_kobj);
+int hp_alloc_enumeration_data(void);
+void hp_exit_enumeration_attributes(void);
+int hp_populate_enumeration_package_data(union acpi_object *enum_obj,
+ int instance_id,
+ struct kobject *attr_name_kobj);
+
+/* Ordered list */
+int hp_populate_ordered_list_buffer_data(u8 *buffer_ptr,
+ u32 *buffer_size,
+ int instance_id,
+ struct kobject *attr_name_kobj);
+int hp_alloc_ordered_list_data(void);
+void hp_exit_ordered_list_attributes(void);
+int hp_populate_ordered_list_package_data(union acpi_object *order_obj,
+ int instance_id,
+ struct kobject *attr_name_kobj);
+
+/* Password authentication attributes */
+int hp_populate_password_buffer_data(u8 *buffer_ptr, u32 *buffer_size,
+ int instance_id,
+ struct kobject *attr_name_kobj);
+int hp_populate_password_package_data(union acpi_object *password_obj,
+ int instance_id,
+ struct kobject *attr_name_kobj);
+int hp_alloc_password_data(void);
+int hp_get_password_instance_for_type(const char *name);
+int hp_clear_all_credentials(void);
+int hp_set_attribute(const char *a_name, const char *a_value);
+
+/* SPM attributes */
+void hp_exit_password_attributes(void);
+void hp_exit_secure_platform_attributes(void);
+int hp_populate_secure_platform_data(struct kobject *attr_name_kobj);
+int hp_populate_security_buffer(u16 *buffer, const char *authentication);
+
+/* Bios Attributes interface */
+int hp_wmi_set_bios_setting(u16 *input_buffer, u32 input_size);
+int hp_wmi_perform_query(int query, enum hp_wmi_command command,
+ void *buffer, u32 insize, u32 outsize);
+
+/* Sure Start attributes */
+void hp_exit_sure_start_attributes(void);
+int hp_populate_sure_start_data(struct kobject *attr_name_kobj);
+
+/* Bioscfg */
+
+void hp_exit_attr_set_interface(void);
+int hp_init_attr_set_interface(void);
+size_t hp_calculate_string_buffer(const char *str);
+size_t hp_calculate_security_buffer(const char *authentication);
+void *hp_ascii_to_utf16_unicode(u16 *p, const u8 *str);
+int hp_get_integer_from_buffer(u8 **buffer, u32 *buffer_size, u32 *integer);
+int hp_get_string_from_buffer(u8 **buffer, u32 *buffer_size, char *dst, u32 dst_size);
+int hp_convert_hexstr_to_str(const char *input, u32 input_len, char **str, int *len);
+int hp_encode_outsize_for_pvsz(int outsize);
+int hp_enforce_single_line_input(char *buf, size_t count);
+void hp_set_reboot_and_signal_event(void);
+ssize_t display_name_language_code_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf);
+union acpi_object *hp_get_wmiobj_pointer(int instance_id, const char *guid_string);
+int hp_get_instance_count(const char *guid_string);
+void hp_update_attribute_permissions(bool isreadonly, struct kobj_attribute *current_val);
+void hp_friendly_user_name_update(char *path, const char *attr_name,
+ char *attr_display, int attr_size);
+int hp_wmi_error_and_message(int error_code);
+int hp_get_common_data_from_buffer(u8 **buffer_ptr, u32 *buffer_size, struct common_data *common);
+
+#endif
diff --git a/drivers/platform/x86/hp/hp-bioscfg/enum-attributes.c b/drivers/platform/x86/hp/hp-bioscfg/enum-attributes.c
new file mode 100644
index 000000000000..a2402d31c146
--- /dev/null
+++ b/drivers/platform/x86/hp/hp-bioscfg/enum-attributes.c
@@ -0,0 +1,457 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Functions corresponding to enumeration type attributes under
+ * BIOS Enumeration GUID for use with hp-bioscfg driver.
+ *
+ * Copyright (c) 2022 HP Development Company, L.P.
+ */
+
+#include "bioscfg.h"
+
+GET_INSTANCE_ID(enumeration);
+
+static ssize_t current_value_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ int instance_id = get_enumeration_instance_id(kobj);
+
+ if (instance_id < 0)
+ return -EIO;
+
+ return sysfs_emit(buf, "%s\n",
+ bioscfg_drv.enumeration_data[instance_id].current_value);
+}
+
+/**
+ * validate_enumeration_input() -
+ * Validate input of current_value against possible values
+ *
+ * @instance_id: The instance on which input is validated
+ * @buf: Input value
+ */
+static int validate_enumeration_input(int instance_id, const char *buf)
+{
+ int i;
+ int found = 0;
+ struct enumeration_data *enum_data = &bioscfg_drv.enumeration_data[instance_id];
+
+ /* Is it a read only attribute */
+ if (enum_data->common.is_readonly)
+ return -EIO;
+
+ for (i = 0; i < enum_data->possible_values_size && !found; i++)
+ if (!strcmp(enum_data->possible_values[i], buf))
+ found = 1;
+
+ if (!found)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void update_enumeration_value(int instance_id, char *attr_value)
+{
+ struct enumeration_data *enum_data = &bioscfg_drv.enumeration_data[instance_id];
+
+ strscpy(enum_data->current_value,
+ attr_value,
+ sizeof(enum_data->current_value));
+}
+
+ATTRIBUTE_S_COMMON_PROPERTY_SHOW(display_name, enumeration);
+static struct kobj_attribute enumeration_display_name =
+ __ATTR_RO(display_name);
+
+ATTRIBUTE_PROPERTY_STORE(current_value, enumeration);
+static struct kobj_attribute enumeration_current_val =
+ __ATTR_RW(current_value);
+
+ATTRIBUTE_VALUES_PROPERTY_SHOW(possible_values, enumeration, SEMICOLON_SEP);
+static struct kobj_attribute enumeration_poss_val =
+ __ATTR_RO(possible_values);
+
+static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "enumeration\n");
+}
+
+static struct kobj_attribute enumeration_type =
+ __ATTR_RO(type);
+
+static struct attribute *enumeration_attrs[] = {
+ &common_display_langcode.attr,
+ &enumeration_display_name.attr,
+ &enumeration_current_val.attr,
+ &enumeration_poss_val.attr,
+ &enumeration_type.attr,
+ NULL
+};
+
+static const struct attribute_group enumeration_attr_group = {
+ .attrs = enumeration_attrs,
+};
+
+int hp_alloc_enumeration_data(void)
+{
+ bioscfg_drv.enumeration_instances_count =
+ hp_get_instance_count(HP_WMI_BIOS_ENUMERATION_GUID);
+
+ bioscfg_drv.enumeration_data = kcalloc(bioscfg_drv.enumeration_instances_count,
+ sizeof(*bioscfg_drv.enumeration_data), GFP_KERNEL);
+ if (!bioscfg_drv.enumeration_data) {
+ bioscfg_drv.enumeration_instances_count = 0;
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/* Expected Values types associated with each element */
+static const acpi_object_type expected_enum_types[] = {
+ [NAME] = ACPI_TYPE_STRING,
+ [VALUE] = ACPI_TYPE_STRING,
+ [PATH] = ACPI_TYPE_STRING,
+ [IS_READONLY] = ACPI_TYPE_INTEGER,
+ [DISPLAY_IN_UI] = ACPI_TYPE_INTEGER,
+ [REQUIRES_PHYSICAL_PRESENCE] = ACPI_TYPE_INTEGER,
+ [SEQUENCE] = ACPI_TYPE_INTEGER,
+ [PREREQUISITES_SIZE] = ACPI_TYPE_INTEGER,
+ [PREREQUISITES] = ACPI_TYPE_STRING,
+ [SECURITY_LEVEL] = ACPI_TYPE_INTEGER,
+ [ENUM_CURRENT_VALUE] = ACPI_TYPE_STRING,
+ [ENUM_SIZE] = ACPI_TYPE_INTEGER,
+ [ENUM_POSSIBLE_VALUES] = ACPI_TYPE_STRING,
+};
+
+static int hp_populate_enumeration_elements_from_package(union acpi_object *enum_obj,
+ int enum_obj_count,
+ int instance_id)
+{
+ char *str_value = NULL;
+ int value_len;
+ u32 size = 0;
+ u32 int_value = 0;
+ int elem = 0;
+ int reqs;
+ int pos_values;
+ int ret;
+ int eloc;
+ struct enumeration_data *enum_data = &bioscfg_drv.enumeration_data[instance_id];
+
+ for (elem = 1, eloc = 1; elem < enum_obj_count; elem++, eloc++) {
+ /* ONLY look at the first ENUM_ELEM_CNT elements */
+ if (eloc == ENUM_ELEM_CNT)
+ goto exit_enumeration_package;
+
+ switch (enum_obj[elem].type) {
+ case ACPI_TYPE_STRING:
+ if (PREREQUISITES != elem && ENUM_POSSIBLE_VALUES != elem) {
+ ret = hp_convert_hexstr_to_str(enum_obj[elem].string.pointer,
+ enum_obj[elem].string.length,
+ &str_value, &value_len);
+ if (ret)
+ return -EINVAL;
+ }
+ break;
+ case ACPI_TYPE_INTEGER:
+ int_value = (u32)enum_obj[elem].integer.value;
+ break;
+ default:
+ pr_warn("Unsupported object type [%d]\n", enum_obj[elem].type);
+ continue;
+ }
+
+ /* Check that both expected and read object type match */
+ if (expected_enum_types[eloc] != enum_obj[elem].type) {
+ pr_err("Error expected type %d for elem %d, but got type %d instead\n",
+ expected_enum_types[eloc], elem, enum_obj[elem].type);
+ kfree(str_value);
+ return -EIO;
+ }
+
+ /* Assign appropriate element value to corresponding field */
+ switch (eloc) {
+ case NAME:
+ case VALUE:
+ break;
+ case PATH:
+ strscpy(enum_data->common.path, str_value,
+ sizeof(enum_data->common.path));
+ break;
+ case IS_READONLY:
+ enum_data->common.is_readonly = int_value;
+ break;
+ case DISPLAY_IN_UI:
+ enum_data->common.display_in_ui = int_value;
+ break;
+ case REQUIRES_PHYSICAL_PRESENCE:
+ enum_data->common.requires_physical_presence = int_value;
+ break;
+ case SEQUENCE:
+ enum_data->common.sequence = int_value;
+ break;
+ case PREREQUISITES_SIZE:
+ if (int_value > MAX_PREREQUISITES_SIZE) {
+ pr_warn("Prerequisites size value exceeded the maximum number of elements supported or data may be malformed\n");
+ int_value = MAX_PREREQUISITES_SIZE;
+ }
+ enum_data->common.prerequisites_size = int_value;
+
+ /*
+ * This step is needed to keep the expected
+ * element list pointing to the right obj[elem].type
+ * when the size is zero. PREREQUISITES
+ * object is omitted by BIOS when the size is
+ * zero.
+ */
+ if (int_value == 0)
+ eloc++;
+ break;
+
+ case PREREQUISITES:
+ size = min_t(u32, enum_data->common.prerequisites_size, MAX_PREREQUISITES_SIZE);
+ for (reqs = 0; reqs < size; reqs++) {
+ if (elem >= enum_obj_count) {
+ pr_err("Error enum-objects package is too small\n");
+ return -EINVAL;
+ }
+
+ ret = hp_convert_hexstr_to_str(enum_obj[elem + reqs].string.pointer,
+ enum_obj[elem + reqs].string.length,
+ &str_value, &value_len);
+
+ if (ret)
+ return -EINVAL;
+
+ strscpy(enum_data->common.prerequisites[reqs],
+ str_value,
+ sizeof(enum_data->common.prerequisites[reqs]));
+
+ kfree(str_value);
+ str_value = NULL;
+ }
+ break;
+
+ case SECURITY_LEVEL:
+ enum_data->common.security_level = int_value;
+ break;
+
+ case ENUM_CURRENT_VALUE:
+ strscpy(enum_data->current_value,
+ str_value, sizeof(enum_data->current_value));
+ break;
+ case ENUM_SIZE:
+ if (int_value > MAX_VALUES_SIZE) {
+ pr_warn("Possible number values size value exceeded the maximum number of elements supported or data may be malformed\n");
+ int_value = MAX_VALUES_SIZE;
+ }
+ enum_data->possible_values_size = int_value;
+
+ /*
+ * This step is needed to keep the expected
+ * element list pointing to the right obj[elem].type
+ * when the size is zero. POSSIBLE_VALUES
+ * object is omitted by BIOS when the size is zero.
+ */
+ if (int_value == 0)
+ eloc++;
+ break;
+
+ case ENUM_POSSIBLE_VALUES:
+ size = enum_data->possible_values_size;
+
+ for (pos_values = 0; pos_values < size && pos_values < MAX_VALUES_SIZE;
+ pos_values++) {
+ if (elem >= enum_obj_count) {
+ pr_err("Error enum-objects package is too small\n");
+ return -EINVAL;
+ }
+
+ ret = hp_convert_hexstr_to_str(enum_obj[elem + pos_values].string.pointer,
+ enum_obj[elem + pos_values].string.length,
+ &str_value, &value_len);
+
+ if (ret)
+ return -EINVAL;
+
+ /*
+ * ignore strings when possible values size
+ * is greater than MAX_VALUES_SIZE
+ */
+ if (size < MAX_VALUES_SIZE)
+ strscpy(enum_data->possible_values[pos_values],
+ str_value,
+ sizeof(enum_data->possible_values[pos_values]));
+
+ kfree(str_value);
+ str_value = NULL;
+ }
+ break;
+ default:
+ pr_warn("Invalid element: %d found in Enumeration attribute or data may be malformed\n", elem);
+ break;
+ }
+
+ kfree(str_value);
+ str_value = NULL;
+ }
+
+exit_enumeration_package:
+ kfree(str_value);
+ return 0;
+}
+
+/**
+ * hp_populate_enumeration_package_data() -
+ * Populate all properties of an instance under enumeration attribute
+ *
+ * @enum_obj: ACPI object with enumeration data
+ * @instance_id: The instance to enumerate
+ * @attr_name_kobj: The parent kernel object
+ */
+int hp_populate_enumeration_package_data(union acpi_object *enum_obj,
+ int instance_id,
+ struct kobject *attr_name_kobj)
+{
+ struct enumeration_data *enum_data = &bioscfg_drv.enumeration_data[instance_id];
+
+ enum_data->attr_name_kobj = attr_name_kobj;
+
+ hp_populate_enumeration_elements_from_package(enum_obj,
+ enum_obj->package.count,
+ instance_id);
+ hp_update_attribute_permissions(enum_data->common.is_readonly,
+ &enumeration_current_val);
+ /*
+ * Several attributes have names such "MONDAY". Friendly
+ * user nane is generated to make the name more descriptive
+ */
+ hp_friendly_user_name_update(enum_data->common.path,
+ attr_name_kobj->name,
+ enum_data->common.display_name,
+ sizeof(enum_data->common.display_name));
+ return sysfs_create_group(attr_name_kobj, &enumeration_attr_group);
+}
+
+static int hp_populate_enumeration_elements_from_buffer(u8 *buffer_ptr, u32 *buffer_size,
+ int instance_id)
+{
+ int values;
+ struct enumeration_data *enum_data = &bioscfg_drv.enumeration_data[instance_id];
+ int ret = 0;
+
+ /*
+ * Only data relevant to this driver and its functionality is
+ * read. BIOS defines the order in which each * element is
+ * read. Element 0 data is not relevant to this
+ * driver hence it is ignored. For clarity, all element names
+ * (DISPLAY_IN_UI) which defines the order in which is read
+ * and the name matches the variable where the data is stored.
+ *
+ * In earlier implementation, reported errors were ignored
+ * causing the data to remain uninitialized. It is not
+ * possible to determine if data read from BIOS is valid or
+ * not. It is for this reason functions may return a error
+ * without validating the data itself.
+ */
+
+ // VALUE:
+ ret = hp_get_string_from_buffer(&buffer_ptr, buffer_size, enum_data->current_value,
+ sizeof(enum_data->current_value));
+ if (ret < 0)
+ goto buffer_exit;
+
+ // COMMON:
+ ret = hp_get_common_data_from_buffer(&buffer_ptr, buffer_size, &enum_data->common);
+ if (ret < 0)
+ goto buffer_exit;
+
+ // ENUM_CURRENT_VALUE:
+ ret = hp_get_string_from_buffer(&buffer_ptr, buffer_size,
+ enum_data->current_value,
+ sizeof(enum_data->current_value));
+ if (ret < 0)
+ goto buffer_exit;
+
+ // ENUM_SIZE:
+ ret = hp_get_integer_from_buffer(&buffer_ptr, buffer_size,
+ &enum_data->possible_values_size);
+
+ if (enum_data->possible_values_size > MAX_VALUES_SIZE) {
+ /* Report a message and limit possible values size to maximum value */
+ pr_warn("Enum Possible size value exceeded the maximum number of elements supported or data may be malformed\n");
+ enum_data->possible_values_size = MAX_VALUES_SIZE;
+ }
+
+ // ENUM_POSSIBLE_VALUES:
+ for (values = 0; values < enum_data->possible_values_size; values++) {
+ ret = hp_get_string_from_buffer(&buffer_ptr, buffer_size,
+ enum_data->possible_values[values],
+ sizeof(enum_data->possible_values[values]));
+ if (ret < 0)
+ break;
+ }
+
+buffer_exit:
+ return ret;
+}
+
+/**
+ * hp_populate_enumeration_buffer_data() -
+ * Populate all properties of an instance under enumeration attribute
+ *
+ * @buffer_ptr: Buffer pointer
+ * @buffer_size: Buffer size
+ * @instance_id: The instance to enumerate
+ * @attr_name_kobj: The parent kernel object
+ */
+int hp_populate_enumeration_buffer_data(u8 *buffer_ptr, u32 *buffer_size,
+ int instance_id,
+ struct kobject *attr_name_kobj)
+{
+ struct enumeration_data *enum_data = &bioscfg_drv.enumeration_data[instance_id];
+ int ret = 0;
+
+ enum_data->attr_name_kobj = attr_name_kobj;
+
+ /* Populate enumeration elements */
+ ret = hp_populate_enumeration_elements_from_buffer(buffer_ptr, buffer_size,
+ instance_id);
+ if (ret < 0)
+ return ret;
+
+ hp_update_attribute_permissions(enum_data->common.is_readonly,
+ &enumeration_current_val);
+ /*
+ * Several attributes have names such "MONDAY". A Friendlier
+ * user nane is generated to make the name more descriptive
+ */
+ hp_friendly_user_name_update(enum_data->common.path,
+ attr_name_kobj->name,
+ enum_data->common.display_name,
+ sizeof(enum_data->common.display_name));
+
+ return sysfs_create_group(attr_name_kobj, &enumeration_attr_group);
+}
+
+/**
+ * hp_exit_enumeration_attributes() - Clear all attribute data
+ *
+ * Clears all data allocated for this group of attributes
+ */
+void hp_exit_enumeration_attributes(void)
+{
+ int instance_id;
+
+ for (instance_id = 0; instance_id < bioscfg_drv.enumeration_instances_count;
+ instance_id++) {
+ struct enumeration_data *enum_data = &bioscfg_drv.enumeration_data[instance_id];
+ struct kobject *attr_name_kobj = enum_data->attr_name_kobj;
+
+ if (attr_name_kobj)
+ sysfs_remove_group(attr_name_kobj, &enumeration_attr_group);
+ }
+ bioscfg_drv.enumeration_instances_count = 0;
+
+ kfree(bioscfg_drv.enumeration_data);
+ bioscfg_drv.enumeration_data = NULL;
+}
diff --git a/drivers/platform/x86/hp/hp-bioscfg/int-attributes.c b/drivers/platform/x86/hp/hp-bioscfg/int-attributes.c
new file mode 100644
index 000000000000..86b7ac63fec2
--- /dev/null
+++ b/drivers/platform/x86/hp/hp-bioscfg/int-attributes.c
@@ -0,0 +1,418 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Functions corresponding to integer type attributes under
+ * BIOS Enumeration GUID for use with hp-bioscfg driver.
+ *
+ * Copyright (c) 2022 Hewlett-Packard Inc.
+ */
+
+#include "bioscfg.h"
+
+GET_INSTANCE_ID(integer);
+
+static ssize_t current_value_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ int instance_id = get_integer_instance_id(kobj);
+
+ if (instance_id < 0)
+ return -EIO;
+
+ return sysfs_emit(buf, "%d\n",
+ bioscfg_drv.integer_data[instance_id].current_value);
+}
+
+/**
+ * validate_integer_input() -
+ * Validate input of current_value against lower and upper bound
+ *
+ * @instance_id: The instance on which input is validated
+ * @buf: Input value
+ */
+static int validate_integer_input(int instance_id, char *buf)
+{
+ int in_val;
+ int ret;
+ struct integer_data *integer_data = &bioscfg_drv.integer_data[instance_id];
+
+ /* BIOS treats it as a read only attribute */
+ if (integer_data->common.is_readonly)
+ return -EIO;
+
+ ret = kstrtoint(buf, 10, &in_val);
+ if (ret < 0)
+ return ret;
+
+ if (in_val < integer_data->lower_bound ||
+ in_val > integer_data->upper_bound)
+ return -ERANGE;
+
+ return 0;
+}
+
+static void update_integer_value(int instance_id, char *attr_value)
+{
+ int in_val;
+ int ret;
+ struct integer_data *integer_data = &bioscfg_drv.integer_data[instance_id];
+
+ ret = kstrtoint(attr_value, 10, &in_val);
+ if (ret == 0)
+ integer_data->current_value = in_val;
+ else
+ pr_warn("Invalid integer value found: %s\n", attr_value);
+}
+
+ATTRIBUTE_S_COMMON_PROPERTY_SHOW(display_name, integer);
+static struct kobj_attribute integer_display_name =
+ __ATTR_RO(display_name);
+
+ATTRIBUTE_PROPERTY_STORE(current_value, integer);
+static struct kobj_attribute integer_current_val =
+ __ATTR_RW_MODE(current_value, 0644);
+
+ATTRIBUTE_N_PROPERTY_SHOW(lower_bound, integer);
+static struct kobj_attribute integer_lower_bound =
+ __ATTR_RO(lower_bound);
+
+ATTRIBUTE_N_PROPERTY_SHOW(upper_bound, integer);
+static struct kobj_attribute integer_upper_bound =
+ __ATTR_RO(upper_bound);
+
+ATTRIBUTE_N_PROPERTY_SHOW(scalar_increment, integer);
+static struct kobj_attribute integer_scalar_increment =
+ __ATTR_RO(scalar_increment);
+
+static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "integer\n");
+}
+
+static struct kobj_attribute integer_type =
+ __ATTR_RO(type);
+
+static struct attribute *integer_attrs[] = {
+ &common_display_langcode.attr,
+ &integer_display_name.attr,
+ &integer_current_val.attr,
+ &integer_lower_bound.attr,
+ &integer_upper_bound.attr,
+ &integer_scalar_increment.attr,
+ &integer_type.attr,
+ NULL
+};
+
+static const struct attribute_group integer_attr_group = {
+ .attrs = integer_attrs,
+};
+
+int hp_alloc_integer_data(void)
+{
+ bioscfg_drv.integer_instances_count = hp_get_instance_count(HP_WMI_BIOS_INTEGER_GUID);
+ bioscfg_drv.integer_data = kcalloc(bioscfg_drv.integer_instances_count,
+ sizeof(*bioscfg_drv.integer_data), GFP_KERNEL);
+
+ if (!bioscfg_drv.integer_data) {
+ bioscfg_drv.integer_instances_count = 0;
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/* Expected Values types associated with each element */
+static const acpi_object_type expected_integer_types[] = {
+ [NAME] = ACPI_TYPE_STRING,
+ [VALUE] = ACPI_TYPE_STRING,
+ [PATH] = ACPI_TYPE_STRING,
+ [IS_READONLY] = ACPI_TYPE_INTEGER,
+ [DISPLAY_IN_UI] = ACPI_TYPE_INTEGER,
+ [REQUIRES_PHYSICAL_PRESENCE] = ACPI_TYPE_INTEGER,
+ [SEQUENCE] = ACPI_TYPE_INTEGER,
+ [PREREQUISITES_SIZE] = ACPI_TYPE_INTEGER,
+ [PREREQUISITES] = ACPI_TYPE_STRING,
+ [SECURITY_LEVEL] = ACPI_TYPE_INTEGER,
+ [INT_LOWER_BOUND] = ACPI_TYPE_INTEGER,
+ [INT_UPPER_BOUND] = ACPI_TYPE_INTEGER,
+ [INT_SCALAR_INCREMENT] = ACPI_TYPE_INTEGER,
+};
+
+static int hp_populate_integer_elements_from_package(union acpi_object *integer_obj,
+ int integer_obj_count,
+ int instance_id)
+{
+ char *str_value = NULL;
+ int value_len;
+ int ret;
+ u32 int_value = 0;
+ int elem;
+ int reqs;
+ int eloc;
+ int size;
+ struct integer_data *integer_data = &bioscfg_drv.integer_data[instance_id];
+
+ if (!integer_obj)
+ return -EINVAL;
+
+ for (elem = 1, eloc = 1; elem < integer_obj_count; elem++, eloc++) {
+ /* ONLY look at the first INTEGER_ELEM_CNT elements */
+ if (eloc == INT_ELEM_CNT)
+ goto exit_integer_package;
+
+ switch (integer_obj[elem].type) {
+ case ACPI_TYPE_STRING:
+ if (elem != PREREQUISITES) {
+ ret = hp_convert_hexstr_to_str(integer_obj[elem].string.pointer,
+ integer_obj[elem].string.length,
+ &str_value, &value_len);
+ if (ret)
+ continue;
+ }
+ break;
+ case ACPI_TYPE_INTEGER:
+ int_value = (u32)integer_obj[elem].integer.value;
+ break;
+ default:
+ pr_warn("Unsupported object type [%d]\n", integer_obj[elem].type);
+ continue;
+ }
+ /* Check that both expected and read object type match */
+ if (expected_integer_types[eloc] != integer_obj[elem].type) {
+ pr_err("Error expected type %d for elem %d, but got type %d instead\n",
+ expected_integer_types[eloc], elem, integer_obj[elem].type);
+ kfree(str_value);
+ return -EIO;
+ }
+ /* Assign appropriate element value to corresponding field*/
+ switch (eloc) {
+ case VALUE:
+ ret = kstrtoint(str_value, 10, &int_value);
+ if (ret)
+ continue;
+
+ integer_data->current_value = int_value;
+ break;
+ case PATH:
+ strscpy(integer_data->common.path, str_value,
+ sizeof(integer_data->common.path));
+ break;
+ case IS_READONLY:
+ integer_data->common.is_readonly = int_value;
+ break;
+ case DISPLAY_IN_UI:
+ integer_data->common.display_in_ui = int_value;
+ break;
+ case REQUIRES_PHYSICAL_PRESENCE:
+ integer_data->common.requires_physical_presence = int_value;
+ break;
+ case SEQUENCE:
+ integer_data->common.sequence = int_value;
+ break;
+ case PREREQUISITES_SIZE:
+ if (int_value > MAX_PREREQUISITES_SIZE) {
+ pr_warn("Prerequisites size value exceeded the maximum number of elements supported or data may be malformed\n");
+ int_value = MAX_PREREQUISITES_SIZE;
+ }
+ integer_data->common.prerequisites_size = int_value;
+
+ /*
+ * This step is needed to keep the expected
+ * element list pointing to the right obj[elem].type
+ * when the size is zero. PREREQUISITES
+ * object is omitted by BIOS when the size is
+ * zero.
+ */
+ if (integer_data->common.prerequisites_size == 0)
+ eloc++;
+ break;
+ case PREREQUISITES:
+ size = min_t(u32, integer_data->common.prerequisites_size, MAX_PREREQUISITES_SIZE);
+
+ for (reqs = 0; reqs < size; reqs++) {
+ if (elem >= integer_obj_count) {
+ pr_err("Error elem-objects package is too small\n");
+ return -EINVAL;
+ }
+
+ ret = hp_convert_hexstr_to_str(integer_obj[elem + reqs].string.pointer,
+ integer_obj[elem + reqs].string.length,
+ &str_value, &value_len);
+
+ if (ret)
+ continue;
+
+ strscpy(integer_data->common.prerequisites[reqs],
+ str_value,
+ sizeof(integer_data->common.prerequisites[reqs]));
+ kfree(str_value);
+ str_value = NULL;
+ }
+ break;
+
+ case SECURITY_LEVEL:
+ integer_data->common.security_level = int_value;
+ break;
+ case INT_LOWER_BOUND:
+ integer_data->lower_bound = int_value;
+ break;
+ case INT_UPPER_BOUND:
+ integer_data->upper_bound = int_value;
+ break;
+ case INT_SCALAR_INCREMENT:
+ integer_data->scalar_increment = int_value;
+ break;
+ default:
+ pr_warn("Invalid element: %d found in Integer attribute or data may be malformed\n", elem);
+ break;
+ }
+
+ kfree(str_value);
+ str_value = NULL;
+ }
+exit_integer_package:
+ kfree(str_value);
+ return 0;
+}
+
+/**
+ * hp_populate_integer_package_data() -
+ * Populate all properties of an instance under integer attribute
+ *
+ * @integer_obj: ACPI object with integer data
+ * @instance_id: The instance to enumerate
+ * @attr_name_kobj: The parent kernel object
+ */
+int hp_populate_integer_package_data(union acpi_object *integer_obj,
+ int instance_id,
+ struct kobject *attr_name_kobj)
+{
+ struct integer_data *integer_data = &bioscfg_drv.integer_data[instance_id];
+
+ integer_data->attr_name_kobj = attr_name_kobj;
+ hp_populate_integer_elements_from_package(integer_obj,
+ integer_obj->package.count,
+ instance_id);
+ hp_update_attribute_permissions(integer_data->common.is_readonly,
+ &integer_current_val);
+ hp_friendly_user_name_update(integer_data->common.path,
+ attr_name_kobj->name,
+ integer_data->common.display_name,
+ sizeof(integer_data->common.display_name));
+ return sysfs_create_group(attr_name_kobj, &integer_attr_group);
+}
+
+static int hp_populate_integer_elements_from_buffer(u8 *buffer_ptr, u32 *buffer_size,
+ int instance_id)
+{
+ char *dst = NULL;
+ int dst_size = *buffer_size / sizeof(u16);
+ struct integer_data *integer_data = &bioscfg_drv.integer_data[instance_id];
+ int ret = 0;
+
+ dst = kcalloc(dst_size, sizeof(char), GFP_KERNEL);
+ if (!dst)
+ return -ENOMEM;
+
+ /*
+ * Only data relevant to this driver and its functionality is
+ * read. BIOS defines the order in which each * element is
+ * read. Element 0 data is not relevant to this
+ * driver hence it is ignored. For clarity, all element names
+ * (DISPLAY_IN_UI) which defines the order in which is read
+ * and the name matches the variable where the data is stored.
+ *
+ * In earlier implementation, reported errors were ignored
+ * causing the data to remain uninitialized. It is not
+ * possible to determine if data read from BIOS is valid or
+ * not. It is for this reason functions may return a error
+ * without validating the data itself.
+ */
+
+ // VALUE:
+ integer_data->current_value = 0;
+
+ hp_get_string_from_buffer(&buffer_ptr, buffer_size, dst, dst_size);
+ ret = kstrtoint(dst, 10, &integer_data->current_value);
+ if (ret)
+ pr_warn("Unable to convert string to integer: %s\n", dst);
+ kfree(dst);
+
+ // COMMON:
+ ret = hp_get_common_data_from_buffer(&buffer_ptr, buffer_size, &integer_data->common);
+ if (ret < 0)
+ goto buffer_exit;
+
+ // INT_LOWER_BOUND:
+ ret = hp_get_integer_from_buffer(&buffer_ptr, buffer_size,
+ &integer_data->lower_bound);
+ if (ret < 0)
+ goto buffer_exit;
+
+ // INT_UPPER_BOUND:
+ ret = hp_get_integer_from_buffer(&buffer_ptr, buffer_size,
+ &integer_data->upper_bound);
+ if (ret < 0)
+ goto buffer_exit;
+
+ // INT_SCALAR_INCREMENT:
+ ret = hp_get_integer_from_buffer(&buffer_ptr, buffer_size,
+ &integer_data->scalar_increment);
+
+buffer_exit:
+ return ret;
+}
+
+/**
+ * hp_populate_integer_buffer_data() -
+ * Populate all properties of an instance under integer attribute
+ *
+ * @buffer_ptr: Buffer pointer
+ * @buffer_size: Buffer size
+ * @instance_id: The instance to enumerate
+ * @attr_name_kobj: The parent kernel object
+ */
+int hp_populate_integer_buffer_data(u8 *buffer_ptr, u32 *buffer_size, int instance_id,
+ struct kobject *attr_name_kobj)
+{
+ struct integer_data *integer_data = &bioscfg_drv.integer_data[instance_id];
+ int ret = 0;
+
+ integer_data->attr_name_kobj = attr_name_kobj;
+
+ /* Populate integer elements */
+ ret = hp_populate_integer_elements_from_buffer(buffer_ptr, buffer_size,
+ instance_id);
+ if (ret < 0)
+ return ret;
+
+ hp_update_attribute_permissions(integer_data->common.is_readonly,
+ &integer_current_val);
+ hp_friendly_user_name_update(integer_data->common.path,
+ attr_name_kobj->name,
+ integer_data->common.display_name,
+ sizeof(integer_data->common.display_name));
+
+ return sysfs_create_group(attr_name_kobj, &integer_attr_group);
+}
+
+/**
+ * hp_exit_integer_attributes() - Clear all attribute data
+ *
+ * Clears all data allocated for this group of attributes
+ */
+void hp_exit_integer_attributes(void)
+{
+ int instance_id;
+
+ for (instance_id = 0; instance_id < bioscfg_drv.integer_instances_count;
+ instance_id++) {
+ struct kobject *attr_name_kobj =
+ bioscfg_drv.integer_data[instance_id].attr_name_kobj;
+
+ if (attr_name_kobj)
+ sysfs_remove_group(attr_name_kobj, &integer_attr_group);
+ }
+ bioscfg_drv.integer_instances_count = 0;
+
+ kfree(bioscfg_drv.integer_data);
+ bioscfg_drv.integer_data = NULL;
+}
diff --git a/drivers/platform/x86/hp/hp-bioscfg/order-list-attributes.c b/drivers/platform/x86/hp/hp-bioscfg/order-list-attributes.c
new file mode 100644
index 000000000000..1ff09dfb7d7e
--- /dev/null
+++ b/drivers/platform/x86/hp/hp-bioscfg/order-list-attributes.c
@@ -0,0 +1,441 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Functions corresponding to ordered list type attributes under
+ * BIOS ORDERED LIST GUID for use with hp-bioscfg driver.
+ *
+ * Copyright (c) 2022 HP Development Company, L.P.
+ */
+
+#include "bioscfg.h"
+
+GET_INSTANCE_ID(ordered_list);
+
+static ssize_t current_value_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ int instance_id = get_ordered_list_instance_id(kobj);
+
+ if (instance_id < 0)
+ return -EIO;
+
+ return sysfs_emit(buf, "%s\n",
+ bioscfg_drv.ordered_list_data[instance_id].current_value);
+}
+
+static int replace_char_str(u8 *buffer, char *repl_char, char *repl_with)
+{
+ char *src = buffer;
+ int buflen = strlen(buffer);
+ int item;
+
+ if (buflen < 1)
+ return -EINVAL;
+
+ for (item = 0; item < buflen; item++)
+ if (src[item] == *repl_char)
+ src[item] = *repl_with;
+
+ return 0;
+}
+
+/**
+ * validate_ordered_list_input() -
+ * Validate input of current_value against possible values
+ *
+ * @instance: The instance on which input is validated
+ * @buf: Input value
+ */
+static int validate_ordered_list_input(int instance, char *buf)
+{
+ /* validation is done by BIOS. This validation function will
+ * convert semicolon to commas. BIOS uses commas as
+ * separators when reporting ordered-list values.
+ */
+ return replace_char_str(buf, SEMICOLON_SEP, COMMA_SEP);
+}
+
+static void update_ordered_list_value(int instance, char *attr_value)
+{
+ struct ordered_list_data *ordered_list_data = &bioscfg_drv.ordered_list_data[instance];
+
+ strscpy(ordered_list_data->current_value,
+ attr_value,
+ sizeof(ordered_list_data->current_value));
+}
+
+ATTRIBUTE_S_COMMON_PROPERTY_SHOW(display_name, ordered_list);
+static struct kobj_attribute ordered_list_display_name =
+ __ATTR_RO(display_name);
+
+ATTRIBUTE_PROPERTY_STORE(current_value, ordered_list);
+static struct kobj_attribute ordered_list_current_val =
+ __ATTR_RW_MODE(current_value, 0644);
+
+ATTRIBUTE_VALUES_PROPERTY_SHOW(elements, ordered_list, SEMICOLON_SEP);
+static struct kobj_attribute ordered_list_elements_val =
+ __ATTR_RO(elements);
+
+static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "ordered-list\n");
+}
+
+static struct kobj_attribute ordered_list_type =
+ __ATTR_RO(type);
+
+static struct attribute *ordered_list_attrs[] = {
+ &common_display_langcode.attr,
+ &ordered_list_display_name.attr,
+ &ordered_list_current_val.attr,
+ &ordered_list_elements_val.attr,
+ &ordered_list_type.attr,
+ NULL
+};
+
+static const struct attribute_group ordered_list_attr_group = {
+ .attrs = ordered_list_attrs,
+};
+
+int hp_alloc_ordered_list_data(void)
+{
+ bioscfg_drv.ordered_list_instances_count =
+ hp_get_instance_count(HP_WMI_BIOS_ORDERED_LIST_GUID);
+ bioscfg_drv.ordered_list_data = kcalloc(bioscfg_drv.ordered_list_instances_count,
+ sizeof(*bioscfg_drv.ordered_list_data),
+ GFP_KERNEL);
+ if (!bioscfg_drv.ordered_list_data) {
+ bioscfg_drv.ordered_list_instances_count = 0;
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/* Expected Values types associated with each element */
+static const acpi_object_type expected_order_types[] = {
+ [NAME] = ACPI_TYPE_STRING,
+ [VALUE] = ACPI_TYPE_STRING,
+ [PATH] = ACPI_TYPE_STRING,
+ [IS_READONLY] = ACPI_TYPE_INTEGER,
+ [DISPLAY_IN_UI] = ACPI_TYPE_INTEGER,
+ [REQUIRES_PHYSICAL_PRESENCE] = ACPI_TYPE_INTEGER,
+ [SEQUENCE] = ACPI_TYPE_INTEGER,
+ [PREREQUISITES_SIZE] = ACPI_TYPE_INTEGER,
+ [PREREQUISITES] = ACPI_TYPE_STRING,
+ [SECURITY_LEVEL] = ACPI_TYPE_INTEGER,
+ [ORD_LIST_SIZE] = ACPI_TYPE_INTEGER,
+ [ORD_LIST_ELEMENTS] = ACPI_TYPE_STRING,
+};
+
+static int hp_populate_ordered_list_elements_from_package(union acpi_object *order_obj,
+ int order_obj_count,
+ int instance_id)
+{
+ char *str_value = NULL;
+ int value_len = 0;
+ int ret;
+ u32 size;
+ u32 int_value = 0;
+ int elem;
+ int olist_elem;
+ int reqs;
+ int eloc;
+ char *tmpstr = NULL;
+ char *part_tmp = NULL;
+ int tmp_len = 0;
+ char *part = NULL;
+ struct ordered_list_data *ordered_list_data = &bioscfg_drv.ordered_list_data[instance_id];
+
+ if (!order_obj)
+ return -EINVAL;
+
+ for (elem = 1, eloc = 1; eloc < ORD_ELEM_CNT; elem++, eloc++) {
+
+ switch (order_obj[elem].type) {
+ case ACPI_TYPE_STRING:
+ if (elem != PREREQUISITES && elem != ORD_LIST_ELEMENTS) {
+ ret = hp_convert_hexstr_to_str(order_obj[elem].string.pointer,
+ order_obj[elem].string.length,
+ &str_value, &value_len);
+ if (ret)
+ continue;
+ }
+ break;
+ case ACPI_TYPE_INTEGER:
+ int_value = (u32)order_obj[elem].integer.value;
+ break;
+ default:
+ pr_warn("Unsupported object type [%d]\n", order_obj[elem].type);
+ continue;
+ }
+
+ /* Check that both expected and read object type match */
+ if (expected_order_types[eloc] != order_obj[elem].type) {
+ pr_err("Error expected type %d for elem %d, but got type %d instead\n",
+ expected_order_types[eloc], elem, order_obj[elem].type);
+ kfree(str_value);
+ return -EIO;
+ }
+
+ /* Assign appropriate element value to corresponding field*/
+ switch (eloc) {
+ case VALUE:
+ strscpy(ordered_list_data->current_value,
+ str_value, sizeof(ordered_list_data->current_value));
+ replace_char_str(ordered_list_data->current_value, COMMA_SEP, SEMICOLON_SEP);
+ break;
+ case PATH:
+ strscpy(ordered_list_data->common.path, str_value,
+ sizeof(ordered_list_data->common.path));
+ break;
+ case IS_READONLY:
+ ordered_list_data->common.is_readonly = int_value;
+ break;
+ case DISPLAY_IN_UI:
+ ordered_list_data->common.display_in_ui = int_value;
+ break;
+ case REQUIRES_PHYSICAL_PRESENCE:
+ ordered_list_data->common.requires_physical_presence = int_value;
+ break;
+ case SEQUENCE:
+ ordered_list_data->common.sequence = int_value;
+ break;
+ case PREREQUISITES_SIZE:
+ if (int_value > MAX_PREREQUISITES_SIZE) {
+ pr_warn("Prerequisites size value exceeded the maximum number of elements supported or data may be malformed\n");
+ int_value = MAX_PREREQUISITES_SIZE;
+ }
+ ordered_list_data->common.prerequisites_size = int_value;
+
+ /*
+ * This step is needed to keep the expected
+ * element list pointing to the right obj[elem].type
+ * when the size is zero. PREREQUISITES
+ * object is omitted by BIOS when the size is
+ * zero.
+ */
+ if (int_value == 0)
+ eloc++;
+ break;
+ case PREREQUISITES:
+ size = min_t(u32, ordered_list_data->common.prerequisites_size,
+ MAX_PREREQUISITES_SIZE);
+ for (reqs = 0; reqs < size; reqs++) {
+ ret = hp_convert_hexstr_to_str(order_obj[elem + reqs].string.pointer,
+ order_obj[elem + reqs].string.length,
+ &str_value, &value_len);
+
+ if (ret)
+ continue;
+
+ strscpy(ordered_list_data->common.prerequisites[reqs],
+ str_value,
+ sizeof(ordered_list_data->common.prerequisites[reqs]));
+
+ kfree(str_value);
+ str_value = NULL;
+ }
+ break;
+
+ case SECURITY_LEVEL:
+ ordered_list_data->common.security_level = int_value;
+ break;
+
+ case ORD_LIST_SIZE:
+ if (int_value > MAX_ELEMENTS_SIZE) {
+ pr_warn("Order List size value exceeded the maximum number of elements supported or data may be malformed\n");
+ int_value = MAX_ELEMENTS_SIZE;
+ }
+ ordered_list_data->elements_size = int_value;
+
+ /*
+ * This step is needed to keep the expected
+ * element list pointing to the right obj[elem].type
+ * when the size is zero. ORD_LIST_ELEMENTS
+ * object is omitted by BIOS when the size is
+ * zero.
+ */
+ if (int_value == 0)
+ eloc++;
+ break;
+ case ORD_LIST_ELEMENTS:
+
+ /*
+ * Ordered list data is stored in hex and comma separated format
+ * Convert the data and split it to show each element
+ */
+ ret = hp_convert_hexstr_to_str(str_value, value_len, &tmpstr, &tmp_len);
+ if (ret)
+ goto exit_list;
+
+ part_tmp = tmpstr;
+ part = strsep(&part_tmp, COMMA_SEP);
+
+ for (olist_elem = 0; olist_elem < MAX_ELEMENTS_SIZE && part; olist_elem++) {
+ strscpy(ordered_list_data->elements[olist_elem],
+ part,
+ sizeof(ordered_list_data->elements[olist_elem]));
+ part = strsep(&part_tmp, COMMA_SEP);
+ }
+ ordered_list_data->elements_size = olist_elem;
+
+ kfree(str_value);
+ str_value = NULL;
+ break;
+ default:
+ pr_warn("Invalid element: %d found in Ordered_List attribute or data may be malformed\n", elem);
+ break;
+ }
+ kfree(tmpstr);
+ tmpstr = NULL;
+ kfree(str_value);
+ str_value = NULL;
+ }
+
+exit_list:
+ kfree(tmpstr);
+ kfree(str_value);
+ return 0;
+}
+
+/**
+ * hp_populate_ordered_list_package_data() -
+ * Populate all properties of an instance under ordered_list attribute
+ *
+ * @order_obj: ACPI object with ordered_list data
+ * @instance_id: The instance to enumerate
+ * @attr_name_kobj: The parent kernel object
+ */
+int hp_populate_ordered_list_package_data(union acpi_object *order_obj, int instance_id,
+ struct kobject *attr_name_kobj)
+{
+ struct ordered_list_data *ordered_list_data = &bioscfg_drv.ordered_list_data[instance_id];
+
+ ordered_list_data->attr_name_kobj = attr_name_kobj;
+
+ hp_populate_ordered_list_elements_from_package(order_obj,
+ order_obj->package.count,
+ instance_id);
+ hp_update_attribute_permissions(ordered_list_data->common.is_readonly,
+ &ordered_list_current_val);
+ hp_friendly_user_name_update(ordered_list_data->common.path,
+ attr_name_kobj->name,
+ ordered_list_data->common.display_name,
+ sizeof(ordered_list_data->common.display_name));
+ return sysfs_create_group(attr_name_kobj, &ordered_list_attr_group);
+}
+
+static int hp_populate_ordered_list_elements_from_buffer(u8 *buffer_ptr, u32 *buffer_size,
+ int instance_id)
+{
+ int values;
+ struct ordered_list_data *ordered_list_data = &bioscfg_drv.ordered_list_data[instance_id];
+ int ret = 0;
+
+ /*
+ * Only data relevant to this driver and its functionality is
+ * read. BIOS defines the order in which each * element is
+ * read. Element 0 data is not relevant to this
+ * driver hence it is ignored. For clarity, all element names
+ * (DISPLAY_IN_UI) which defines the order in which is read
+ * and the name matches the variable where the data is stored.
+ *
+ * In earlier implementation, reported errors were ignored
+ * causing the data to remain uninitialized. It is not
+ * possible to determine if data read from BIOS is valid or
+ * not. It is for this reason functions may return a error
+ * without validating the data itself.
+ */
+
+ // VALUE:
+ ret = hp_get_string_from_buffer(&buffer_ptr, buffer_size, ordered_list_data->current_value,
+ sizeof(ordered_list_data->current_value));
+ if (ret < 0)
+ goto buffer_exit;
+
+ replace_char_str(ordered_list_data->current_value, COMMA_SEP, SEMICOLON_SEP);
+
+ // COMMON:
+ ret = hp_get_common_data_from_buffer(&buffer_ptr, buffer_size,
+ &ordered_list_data->common);
+ if (ret < 0)
+ goto buffer_exit;
+
+ // ORD_LIST_SIZE:
+ ret = hp_get_integer_from_buffer(&buffer_ptr, buffer_size,
+ &ordered_list_data->elements_size);
+
+ if (ordered_list_data->elements_size > MAX_ELEMENTS_SIZE) {
+ /* Report a message and limit elements size to maximum value */
+ pr_warn("Ordered List size value exceeded the maximum number of elements supported or data may be malformed\n");
+ ordered_list_data->elements_size = MAX_ELEMENTS_SIZE;
+ }
+
+ // ORD_LIST_ELEMENTS:
+ for (values = 0; values < ordered_list_data->elements_size; values++) {
+ ret = hp_get_string_from_buffer(&buffer_ptr, buffer_size,
+ ordered_list_data->elements[values],
+ sizeof(ordered_list_data->elements[values]));
+ if (ret < 0)
+ break;
+ }
+
+buffer_exit:
+ return ret;
+}
+
+/**
+ * hp_populate_ordered_list_buffer_data() - Populate all properties of an
+ * instance under ordered list attribute
+ *
+ * @buffer_ptr: Buffer pointer
+ * @buffer_size: Buffer size
+ * @instance_id: The instance to enumerate
+ * @attr_name_kobj: The parent kernel object
+ */
+int hp_populate_ordered_list_buffer_data(u8 *buffer_ptr, u32 *buffer_size, int instance_id,
+ struct kobject *attr_name_kobj)
+{
+ struct ordered_list_data *ordered_list_data = &bioscfg_drv.ordered_list_data[instance_id];
+ int ret = 0;
+
+ ordered_list_data->attr_name_kobj = attr_name_kobj;
+
+ /* Populate ordered list elements */
+ ret = hp_populate_ordered_list_elements_from_buffer(buffer_ptr, buffer_size,
+ instance_id);
+ if (ret < 0)
+ return ret;
+
+ hp_update_attribute_permissions(ordered_list_data->common.is_readonly,
+ &ordered_list_current_val);
+ hp_friendly_user_name_update(ordered_list_data->common.path,
+ attr_name_kobj->name,
+ ordered_list_data->common.display_name,
+ sizeof(ordered_list_data->common.display_name));
+
+ return sysfs_create_group(attr_name_kobj, &ordered_list_attr_group);
+}
+
+/**
+ * hp_exit_ordered_list_attributes() - Clear all attribute data
+ *
+ * Clears all data allocated for this group of attributes
+ */
+void hp_exit_ordered_list_attributes(void)
+{
+ int instance_id;
+
+ for (instance_id = 0; instance_id < bioscfg_drv.ordered_list_instances_count;
+ instance_id++) {
+ struct kobject *attr_name_kobj =
+ bioscfg_drv.ordered_list_data[instance_id].attr_name_kobj;
+
+ if (attr_name_kobj)
+ sysfs_remove_group(attr_name_kobj,
+ &ordered_list_attr_group);
+ }
+ bioscfg_drv.ordered_list_instances_count = 0;
+
+ kfree(bioscfg_drv.ordered_list_data);
+ bioscfg_drv.ordered_list_data = NULL;
+}
diff --git a/drivers/platform/x86/hp/hp-bioscfg/passwdobj-attributes.c b/drivers/platform/x86/hp/hp-bioscfg/passwdobj-attributes.c
new file mode 100644
index 000000000000..03d0188804ba
--- /dev/null
+++ b/drivers/platform/x86/hp/hp-bioscfg/passwdobj-attributes.c
@@ -0,0 +1,556 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Functions corresponding to password object type attributes under
+ * BIOS PASSWORD for use with hp-bioscfg driver.
+ *
+ * Copyright (c) 2022 HP Development Company, L.P.
+ */
+
+#include "bioscfg.h"
+#include <asm-generic/posix_types.h>
+
+GET_INSTANCE_ID(password);
+/*
+ * Clear all passwords copied to memory for a particular
+ * authentication instance
+ */
+static int clear_passwords(const int instance)
+{
+ struct password_data *password_data = &bioscfg_drv.password_data[instance];
+
+ if (!password_data->is_enabled)
+ return 0;
+
+ memset(password_data->current_password,
+ 0, sizeof(password_data->current_password));
+ memset(password_data->new_password,
+ 0, sizeof(password_data->new_password));
+
+ return 0;
+}
+
+/*
+ * Clear all credentials copied to memory for both Power-ON and Setup
+ * BIOS instances
+ */
+int hp_clear_all_credentials(void)
+{
+ int count = bioscfg_drv.password_instances_count;
+ int instance;
+
+ /* clear all passwords */
+ for (instance = 0; instance < count; instance++)
+ clear_passwords(instance);
+
+ /* clear auth_token */
+ kfree(bioscfg_drv.spm_data.auth_token);
+ bioscfg_drv.spm_data.auth_token = NULL;
+
+ return 0;
+}
+
+int hp_get_password_instance_for_type(const char *name)
+{
+ int count = bioscfg_drv.password_instances_count;
+ int instance;
+
+ for (instance = 0; instance < count; instance++)
+ if (!strcmp(bioscfg_drv.password_data[instance].common.display_name, name))
+ return instance;
+
+ return -EINVAL;
+}
+
+static int validate_password_input(int instance_id, const char *buf)
+{
+ int length;
+ struct password_data *password_data = &bioscfg_drv.password_data[instance_id];
+
+ length = strlen(buf);
+ if (buf[length - 1] == '\n')
+ length--;
+
+ if (length > MAX_PASSWD_SIZE)
+ return INVALID_BIOS_AUTH;
+
+ if (password_data->min_password_length > length ||
+ password_data->max_password_length < length)
+ return INVALID_BIOS_AUTH;
+ return SUCCESS;
+}
+
+ATTRIBUTE_N_PROPERTY_SHOW(is_enabled, password);
+static struct kobj_attribute password_is_password_set = __ATTR_RO(is_enabled);
+
+static int store_password_instance(struct kobject *kobj, const char *buf,
+ size_t count, bool is_current)
+{
+ char *buf_cp;
+ int id, ret = 0;
+
+ buf_cp = kstrdup(buf, GFP_KERNEL);
+ if (!buf_cp)
+ return -ENOMEM;
+
+ ret = hp_enforce_single_line_input(buf_cp, count);
+ if (!ret) {
+ id = get_password_instance_id(kobj);
+
+ if (id >= 0)
+ ret = validate_password_input(id, buf_cp);
+ }
+
+ if (!ret) {
+ if (is_current)
+ strscpy(bioscfg_drv.password_data[id].current_password,
+ buf_cp,
+ sizeof(bioscfg_drv.password_data[id].current_password));
+ else
+ strscpy(bioscfg_drv.password_data[id].new_password,
+ buf_cp,
+ sizeof(bioscfg_drv.password_data[id].new_password));
+ }
+
+ kfree(buf_cp);
+ return ret < 0 ? ret : count;
+}
+
+static ssize_t current_password_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ return store_password_instance(kobj, buf, count, true);
+}
+
+static struct kobj_attribute password_current_password = __ATTR_WO(current_password);
+
+static ssize_t new_password_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ return store_password_instance(kobj, buf, count, true);
+}
+
+static struct kobj_attribute password_new_password = __ATTR_WO(new_password);
+
+ATTRIBUTE_N_PROPERTY_SHOW(min_password_length, password);
+static struct kobj_attribute password_min_password_length = __ATTR_RO(min_password_length);
+
+ATTRIBUTE_N_PROPERTY_SHOW(max_password_length, password);
+static struct kobj_attribute password_max_password_length = __ATTR_RO(max_password_length);
+
+static ssize_t role_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ if (!strcmp(kobj->name, SETUP_PASSWD))
+ return sysfs_emit(buf, "%s\n", BIOS_ADMIN);
+
+ if (!strcmp(kobj->name, POWER_ON_PASSWD))
+ return sysfs_emit(buf, "%s\n", POWER_ON);
+
+ return -EIO;
+}
+
+static struct kobj_attribute password_role = __ATTR_RO(role);
+
+static ssize_t mechanism_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ int i = get_password_instance_id(kobj);
+
+ if (i < 0)
+ return i;
+
+ if (bioscfg_drv.password_data[i].mechanism != PASSWORD)
+ return -EINVAL;
+
+ return sysfs_emit(buf, "%s\n", PASSWD_MECHANISM_TYPES);
+}
+
+static struct kobj_attribute password_mechanism = __ATTR_RO(mechanism);
+
+ATTRIBUTE_VALUES_PROPERTY_SHOW(encodings, password, SEMICOLON_SEP);
+static struct kobj_attribute password_encodings_val = __ATTR_RO(encodings);
+
+static struct attribute *password_attrs[] = {
+ &password_is_password_set.attr,
+ &password_min_password_length.attr,
+ &password_max_password_length.attr,
+ &password_current_password.attr,
+ &password_new_password.attr,
+ &password_role.attr,
+ &password_mechanism.attr,
+ &password_encodings_val.attr,
+ NULL
+};
+
+static const struct attribute_group password_attr_group = {
+ .attrs = password_attrs
+};
+
+int hp_alloc_password_data(void)
+{
+ bioscfg_drv.password_instances_count = hp_get_instance_count(HP_WMI_BIOS_PASSWORD_GUID);
+ bioscfg_drv.password_data = kcalloc(bioscfg_drv.password_instances_count,
+ sizeof(*bioscfg_drv.password_data), GFP_KERNEL);
+ if (!bioscfg_drv.password_data) {
+ bioscfg_drv.password_instances_count = 0;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/* Expected Values types associated with each element */
+static const acpi_object_type expected_password_types[] = {
+ [NAME] = ACPI_TYPE_STRING,
+ [VALUE] = ACPI_TYPE_STRING,
+ [PATH] = ACPI_TYPE_STRING,
+ [IS_READONLY] = ACPI_TYPE_INTEGER,
+ [DISPLAY_IN_UI] = ACPI_TYPE_INTEGER,
+ [REQUIRES_PHYSICAL_PRESENCE] = ACPI_TYPE_INTEGER,
+ [SEQUENCE] = ACPI_TYPE_INTEGER,
+ [PREREQUISITES_SIZE] = ACPI_TYPE_INTEGER,
+ [PREREQUISITES] = ACPI_TYPE_STRING,
+ [SECURITY_LEVEL] = ACPI_TYPE_INTEGER,
+ [PSWD_MIN_LENGTH] = ACPI_TYPE_INTEGER,
+ [PSWD_MAX_LENGTH] = ACPI_TYPE_INTEGER,
+ [PSWD_SIZE] = ACPI_TYPE_INTEGER,
+ [PSWD_ENCODINGS] = ACPI_TYPE_STRING,
+ [PSWD_IS_SET] = ACPI_TYPE_INTEGER,
+};
+
+static int hp_populate_password_elements_from_package(union acpi_object *password_obj,
+ int password_obj_count,
+ int instance_id)
+{
+ char *str_value = NULL;
+ int value_len;
+ int ret;
+ u32 size;
+ u32 int_value = 0;
+ int elem;
+ int reqs;
+ int eloc;
+ int pos_values;
+ struct password_data *password_data = &bioscfg_drv.password_data[instance_id];
+
+ if (!password_obj)
+ return -EINVAL;
+
+ for (elem = 1, eloc = 1; elem < password_obj_count; elem++, eloc++) {
+ /* ONLY look at the first PASSWORD_ELEM_CNT elements */
+ if (eloc == PSWD_ELEM_CNT)
+ goto exit_package;
+
+ switch (password_obj[elem].type) {
+ case ACPI_TYPE_STRING:
+ if (PREREQUISITES != elem && PSWD_ENCODINGS != elem) {
+ ret = hp_convert_hexstr_to_str(password_obj[elem].string.pointer,
+ password_obj[elem].string.length,
+ &str_value, &value_len);
+ if (ret)
+ continue;
+ }
+ break;
+ case ACPI_TYPE_INTEGER:
+ int_value = (u32)password_obj[elem].integer.value;
+ break;
+ default:
+ pr_warn("Unsupported object type [%d]\n", password_obj[elem].type);
+ continue;
+ }
+
+ /* Check that both expected and read object type match */
+ if (expected_password_types[eloc] != password_obj[elem].type) {
+ pr_err("Error expected type %d for elem %d, but got type %d instead\n",
+ expected_password_types[eloc], elem, password_obj[elem].type);
+ kfree(str_value);
+ return -EIO;
+ }
+
+ /* Assign appropriate element value to corresponding field*/
+ switch (eloc) {
+ case VALUE:
+ break;
+ case PATH:
+ strscpy(password_data->common.path, str_value,
+ sizeof(password_data->common.path));
+ break;
+ case IS_READONLY:
+ password_data->common.is_readonly = int_value;
+ break;
+ case DISPLAY_IN_UI:
+ password_data->common.display_in_ui = int_value;
+ break;
+ case REQUIRES_PHYSICAL_PRESENCE:
+ password_data->common.requires_physical_presence = int_value;
+ break;
+ case SEQUENCE:
+ password_data->common.sequence = int_value;
+ break;
+ case PREREQUISITES_SIZE:
+ if (int_value > MAX_PREREQUISITES_SIZE) {
+ pr_warn("Prerequisites size value exceeded the maximum number of elements supported or data may be malformed\n");
+ int_value = MAX_PREREQUISITES_SIZE;
+ }
+ password_data->common.prerequisites_size = int_value;
+
+ /* This step is needed to keep the expected
+ * element list pointing to the right obj[elem].type
+ * when the size is zero. PREREQUISITES
+ * object is omitted by BIOS when the size is
+ * zero.
+ */
+ if (int_value == 0)
+ eloc++;
+ break;
+ case PREREQUISITES:
+ size = min_t(u32, password_data->common.prerequisites_size,
+ MAX_PREREQUISITES_SIZE);
+
+ for (reqs = 0; reqs < size; reqs++) {
+ ret = hp_convert_hexstr_to_str(password_obj[elem + reqs].string.pointer,
+ password_obj[elem + reqs].string.length,
+ &str_value, &value_len);
+
+ if (ret)
+ break;
+
+ strscpy(password_data->common.prerequisites[reqs],
+ str_value,
+ sizeof(password_data->common.prerequisites[reqs]));
+
+ kfree(str_value);
+ str_value = NULL;
+
+ }
+ break;
+ case SECURITY_LEVEL:
+ password_data->common.security_level = int_value;
+ break;
+ case PSWD_MIN_LENGTH:
+ password_data->min_password_length = int_value;
+ break;
+ case PSWD_MAX_LENGTH:
+ password_data->max_password_length = int_value;
+ break;
+ case PSWD_SIZE:
+
+ if (int_value > MAX_ENCODINGS_SIZE) {
+ pr_warn("Password Encoding size value exceeded the maximum number of elements supported or data may be malformed\n");
+ int_value = MAX_ENCODINGS_SIZE;
+ }
+ password_data->encodings_size = int_value;
+
+ /* This step is needed to keep the expected
+ * element list pointing to the right obj[elem].type
+ * when the size is zero. PSWD_ENCODINGS
+ * object is omitted by BIOS when the size is
+ * zero.
+ */
+ if (int_value == 0)
+ eloc++;
+ break;
+ case PSWD_ENCODINGS:
+ size = min_t(u32, password_data->encodings_size, MAX_ENCODINGS_SIZE);
+ for (pos_values = 0; pos_values < size; pos_values++) {
+ ret = hp_convert_hexstr_to_str(password_obj[elem + pos_values].string.pointer,
+ password_obj[elem + pos_values].string.length,
+ &str_value, &value_len);
+ if (ret)
+ break;
+
+ strscpy(password_data->encodings[pos_values],
+ str_value,
+ sizeof(password_data->encodings[pos_values]));
+ kfree(str_value);
+ str_value = NULL;
+
+ }
+ break;
+ case PSWD_IS_SET:
+ password_data->is_enabled = int_value;
+ break;
+ default:
+ pr_warn("Invalid element: %d found in Password attribute or data may be malformed\n", elem);
+ break;
+ }
+
+ kfree(str_value);
+ str_value = NULL;
+ }
+
+exit_package:
+ kfree(str_value);
+ return 0;
+}
+
+/**
+ * hp_populate_password_package_data()
+ * Populate all properties for an instance under password attribute
+ *
+ * @password_obj: ACPI object with password data
+ * @instance_id: The instance to enumerate
+ * @attr_name_kobj: The parent kernel object
+ */
+int hp_populate_password_package_data(union acpi_object *password_obj, int instance_id,
+ struct kobject *attr_name_kobj)
+{
+ struct password_data *password_data = &bioscfg_drv.password_data[instance_id];
+
+ password_data->attr_name_kobj = attr_name_kobj;
+
+ hp_populate_password_elements_from_package(password_obj,
+ password_obj->package.count,
+ instance_id);
+
+ hp_friendly_user_name_update(password_data->common.path,
+ attr_name_kobj->name,
+ password_data->common.display_name,
+ sizeof(password_data->common.display_name));
+
+ if (!strcmp(attr_name_kobj->name, SETUP_PASSWD))
+ return sysfs_create_group(attr_name_kobj, &password_attr_group);
+
+ return sysfs_create_group(attr_name_kobj, &password_attr_group);
+}
+
+static int hp_populate_password_elements_from_buffer(u8 *buffer_ptr, u32 *buffer_size,
+ int instance_id)
+{
+ int values;
+ int isreadonly;
+ struct password_data *password_data = &bioscfg_drv.password_data[instance_id];
+ int ret = 0;
+
+ /*
+ * Only data relevant to this driver and its functionality is
+ * read. BIOS defines the order in which each * element is
+ * read. Element 0 data is not relevant to this
+ * driver hence it is ignored. For clarity, all element names
+ * (DISPLAY_IN_UI) which defines the order in which is read
+ * and the name matches the variable where the data is stored.
+ *
+ * In earlier implementation, reported errors were ignored
+ * causing the data to remain uninitialized. It is not
+ * possible to determine if data read from BIOS is valid or
+ * not. It is for this reason functions may return a error
+ * without validating the data itself.
+ */
+
+ // VALUE:
+ ret = hp_get_string_from_buffer(&buffer_ptr, buffer_size, password_data->current_password,
+ sizeof(password_data->current_password));
+ if (ret < 0)
+ goto buffer_exit;
+
+ // COMMON:
+ ret = hp_get_common_data_from_buffer(&buffer_ptr, buffer_size,
+ &password_data->common);
+ if (ret < 0)
+ goto buffer_exit;
+
+ // PSWD_MIN_LENGTH:
+ ret = hp_get_integer_from_buffer(&buffer_ptr, buffer_size,
+ &password_data->min_password_length);
+ if (ret < 0)
+ goto buffer_exit;
+
+ // PSWD_MAX_LENGTH:
+ ret = hp_get_integer_from_buffer(&buffer_ptr, buffer_size,
+ &password_data->max_password_length);
+ if (ret < 0)
+ goto buffer_exit;
+
+ // PSWD_SIZE:
+ ret = hp_get_integer_from_buffer(&buffer_ptr, buffer_size,
+ &password_data->encodings_size);
+ if (ret < 0)
+ goto buffer_exit;
+
+ if (password_data->encodings_size > MAX_ENCODINGS_SIZE) {
+ /* Report a message and limit possible values size to maximum value */
+ pr_warn("Password Encoding size value exceeded the maximum number of elements supported or data may be malformed\n");
+ password_data->encodings_size = MAX_ENCODINGS_SIZE;
+ }
+
+ // PSWD_ENCODINGS:
+ for (values = 0; values < password_data->encodings_size; values++) {
+ ret = hp_get_string_from_buffer(&buffer_ptr, buffer_size,
+ password_data->encodings[values],
+ sizeof(password_data->encodings[values]));
+ if (ret < 0)
+ break;
+ }
+
+ // PSWD_IS_SET:
+ ret = hp_get_integer_from_buffer(&buffer_ptr, buffer_size, &isreadonly);
+ if (ret < 0)
+ goto buffer_exit;
+
+ password_data->is_enabled = isreadonly ? true : false;
+
+buffer_exit:
+ return ret;
+}
+
+/**
+ * hp_populate_password_buffer_data()
+ * Populate all properties for an instance under password object attribute
+ *
+ * @buffer_ptr: Buffer pointer
+ * @buffer_size: Buffer size
+ * @instance_id: The instance to enumerate
+ * @attr_name_kobj: The parent kernel object
+ */
+int hp_populate_password_buffer_data(u8 *buffer_ptr, u32 *buffer_size, int instance_id,
+ struct kobject *attr_name_kobj)
+{
+ struct password_data *password_data = &bioscfg_drv.password_data[instance_id];
+ int ret = 0;
+
+ password_data->attr_name_kobj = attr_name_kobj;
+
+ /* Populate Password attributes */
+ ret = hp_populate_password_elements_from_buffer(buffer_ptr, buffer_size,
+ instance_id);
+ if (ret < 0)
+ return ret;
+
+ hp_friendly_user_name_update(password_data->common.path,
+ attr_name_kobj->name,
+ password_data->common.display_name,
+ sizeof(password_data->common.display_name));
+ if (!strcmp(attr_name_kobj->name, SETUP_PASSWD))
+ return sysfs_create_group(attr_name_kobj, &password_attr_group);
+
+ return sysfs_create_group(attr_name_kobj, &password_attr_group);
+}
+
+/**
+ * hp_exit_password_attributes() - Clear all attribute data
+ *
+ * Clears all data allocated for this group of attributes
+ */
+void hp_exit_password_attributes(void)
+{
+ int instance_id;
+
+ for (instance_id = 0; instance_id < bioscfg_drv.password_instances_count;
+ instance_id++) {
+ struct kobject *attr_name_kobj =
+ bioscfg_drv.password_data[instance_id].attr_name_kobj;
+
+ if (attr_name_kobj) {
+ if (!strcmp(attr_name_kobj->name, SETUP_PASSWD))
+ sysfs_remove_group(attr_name_kobj,
+ &password_attr_group);
+ else
+ sysfs_remove_group(attr_name_kobj,
+ &password_attr_group);
+ }
+ }
+ bioscfg_drv.password_instances_count = 0;
+ kfree(bioscfg_drv.password_data);
+ bioscfg_drv.password_data = NULL;
+}
diff --git a/drivers/platform/x86/hp/hp-bioscfg/spmobj-attributes.c b/drivers/platform/x86/hp/hp-bioscfg/spmobj-attributes.c
new file mode 100644
index 000000000000..86f90238750c
--- /dev/null
+++ b/drivers/platform/x86/hp/hp-bioscfg/spmobj-attributes.c
@@ -0,0 +1,381 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Functions corresponding to secure platform management object type
+ * attributes under BIOS PASSWORD for use with hp-bioscfg driver
+ *
+ * Copyright (c) 2022 HP Development Company, L.P.
+ */
+
+#include "bioscfg.h"
+
+static const char * const spm_state_types[] = {
+ "not provisioned",
+ "provisioned",
+ "provisioning in progress",
+};
+
+static const char * const spm_mechanism_types[] = {
+ "not provisioned",
+ "signing-key",
+ "endorsement-key",
+};
+
+struct secureplatform_provisioning_data {
+ u8 state;
+ u8 version[2];
+ u8 reserved1;
+ u32 features;
+ u32 nonce;
+ u8 reserved2[28];
+ u8 sk_mod[MAX_KEY_MOD_SIZE];
+ u8 kek_mod[MAX_KEY_MOD_SIZE];
+};
+
+/**
+ * hp_calculate_security_buffer() - determines size of security buffer
+ * for authentication scheme
+ *
+ * @authentication: the authentication content
+ *
+ * Currently only supported type is Admin password
+ */
+size_t hp_calculate_security_buffer(const char *authentication)
+{
+ size_t size, authlen;
+
+ if (!authentication)
+ return sizeof(u16) * 2;
+
+ authlen = strlen(authentication);
+ if (!authlen)
+ return sizeof(u16) * 2;
+
+ size = sizeof(u16) + authlen * sizeof(u16);
+ if (!strstarts(authentication, BEAM_PREFIX))
+ size += strlen(UTF_PREFIX) * sizeof(u16);
+
+ return size;
+}
+
+/**
+ * hp_populate_security_buffer() - builds a security buffer for
+ * authentication scheme
+ *
+ * @authbuf: the security buffer
+ * @authentication: the authentication content
+ *
+ * Currently only supported type is PLAIN TEXT
+ */
+int hp_populate_security_buffer(u16 *authbuf, const char *authentication)
+{
+ u16 *auth = authbuf;
+ char *strprefix = NULL;
+ int ret = 0;
+
+ if (strstarts(authentication, BEAM_PREFIX)) {
+ /*
+ * BEAM_PREFIX is append to authbuf when a signature
+ * is provided and Sure Admin is enabled in BIOS
+ */
+ /* BEAM_PREFIX found, convert part to unicode */
+ auth = hp_ascii_to_utf16_unicode(auth, authentication);
+ if (!auth)
+ return -EINVAL;
+
+ } else {
+ /*
+ * UTF-16 prefix is append to the * authbuf when a BIOS
+ * admin password is configured in BIOS
+ */
+
+ /* append UTF_PREFIX to part and then convert it to unicode */
+ strprefix = kasprintf(GFP_KERNEL, "%s%s", UTF_PREFIX,
+ authentication);
+ if (!strprefix)
+ return -ENOMEM;
+
+ auth = hp_ascii_to_utf16_unicode(auth, strprefix);
+ kfree(strprefix);
+
+ if (!auth) {
+ ret = -EINVAL;
+ goto out_buffer;
+ }
+ }
+
+out_buffer:
+ return ret;
+}
+
+static ssize_t update_spm_state(void)
+{
+ struct secureplatform_provisioning_data data;
+ int ret;
+
+ ret = hp_wmi_perform_query(HPWMI_SECUREPLATFORM_GET_STATE,
+ HPWMI_SECUREPLATFORM, &data, 0,
+ sizeof(data));
+ if (ret < 0)
+ return ret;
+
+ bioscfg_drv.spm_data.mechanism = data.state;
+ if (bioscfg_drv.spm_data.mechanism)
+ bioscfg_drv.spm_data.is_enabled = 1;
+
+ return 0;
+}
+
+static ssize_t statusbin(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ struct secureplatform_provisioning_data *buf)
+{
+ int ret = hp_wmi_perform_query(HPWMI_SECUREPLATFORM_GET_STATE,
+ HPWMI_SECUREPLATFORM, buf, 0,
+ sizeof(*buf));
+
+ if (ret < 0)
+ return ret;
+
+ return sizeof(struct secureplatform_provisioning_data);
+}
+
+/*
+ * status_show - Reads SPM status
+ */
+static ssize_t status_show(struct kobject *kobj, struct kobj_attribute
+ *attr, char *buf)
+{
+ int ret, i;
+ int len = 0;
+ struct secureplatform_provisioning_data data;
+
+ ret = statusbin(kobj, attr, &data);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * 'status' is a read-only file that returns ASCII text in
+ * JSON format reporting the status information.
+ *
+ * "State": "not provisioned | provisioned | provisioning in progress ",
+ * "Version": " Major. Minor ",
+ * "Nonce": <16-bit unsigned number display in base 10>,
+ * "FeaturesInUse": <16-bit unsigned number display in base 10>,
+ * "EndorsementKeyMod": "<256 bytes in base64>",
+ * "SigningKeyMod": "<256 bytes in base64>"
+ */
+
+ len += sysfs_emit_at(buf, len, "{\n");
+ len += sysfs_emit_at(buf, len, "\t\"State\": \"%s\",\n",
+ spm_state_types[data.state]);
+ len += sysfs_emit_at(buf, len, "\t\"Version\": \"%d.%d\"",
+ data.version[0], data.version[1]);
+
+ /*
+ * state == 0 means secure platform management
+ * feature is not configured in BIOS.
+ */
+ if (data.state == 0) {
+ len += sysfs_emit_at(buf, len, "\n");
+ goto status_exit;
+ } else {
+ len += sysfs_emit_at(buf, len, ",\n");
+ }
+
+ len += sysfs_emit_at(buf, len, "\t\"Nonce\": %d,\n", data.nonce);
+ len += sysfs_emit_at(buf, len, "\t\"FeaturesInUse\": %d,\n", data.features);
+ len += sysfs_emit_at(buf, len, "\t\"EndorsementKeyMod\": \"");
+
+ for (i = 255; i >= 0; i--)
+ len += sysfs_emit_at(buf, len, " %u", data.kek_mod[i]);
+
+ len += sysfs_emit_at(buf, len, " \",\n");
+ len += sysfs_emit_at(buf, len, "\t\"SigningKeyMod\": \"");
+
+ for (i = 255; i >= 0; i--)
+ len += sysfs_emit_at(buf, len, " %u", data.sk_mod[i]);
+
+ /* Return buf contents */
+ len += sysfs_emit_at(buf, len, " \"\n");
+
+status_exit:
+ len += sysfs_emit_at(buf, len, "}\n");
+
+ return len;
+}
+
+static struct kobj_attribute password_spm_status = __ATTR_RO(status);
+
+ATTRIBUTE_SPM_N_PROPERTY_SHOW(is_enabled, spm);
+static struct kobj_attribute password_spm_is_key_enabled = __ATTR_RO(is_enabled);
+
+static ssize_t key_mechanism_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%s\n",
+ spm_mechanism_types[bioscfg_drv.spm_data.mechanism]);
+}
+
+static struct kobj_attribute password_spm_key_mechanism = __ATTR_RO(key_mechanism);
+
+static ssize_t sk_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ int length;
+
+ length = count;
+ if (buf[length - 1] == '\n')
+ length--;
+
+ /* allocate space and copy current signing key */
+ bioscfg_drv.spm_data.signing_key = kmemdup(buf, length, GFP_KERNEL);
+ if (!bioscfg_drv.spm_data.signing_key)
+ return -ENOMEM;
+
+ /* submit signing key payload */
+ ret = hp_wmi_perform_query(HPWMI_SECUREPLATFORM_SET_SK,
+ HPWMI_SECUREPLATFORM,
+ (void *)bioscfg_drv.spm_data.signing_key,
+ count, 0);
+
+ if (!ret) {
+ bioscfg_drv.spm_data.mechanism = SIGNING_KEY;
+ hp_set_reboot_and_signal_event();
+ }
+
+ kfree(bioscfg_drv.spm_data.signing_key);
+ bioscfg_drv.spm_data.signing_key = NULL;
+
+ return ret ? ret : count;
+}
+
+static struct kobj_attribute password_spm_signing_key = __ATTR_WO(sk);
+
+static ssize_t kek_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ int length;
+
+ length = count;
+ if (buf[length - 1] == '\n')
+ length--;
+
+ /* allocate space and copy current signing key */
+ bioscfg_drv.spm_data.endorsement_key = kmemdup(buf, length, GFP_KERNEL);
+ if (!bioscfg_drv.spm_data.endorsement_key) {
+ ret = -ENOMEM;
+ goto exit_kek;
+ }
+
+ ret = hp_wmi_perform_query(HPWMI_SECUREPLATFORM_SET_KEK,
+ HPWMI_SECUREPLATFORM,
+ (void *)bioscfg_drv.spm_data.endorsement_key,
+ count, 0);
+
+ if (!ret) {
+ bioscfg_drv.spm_data.mechanism = ENDORSEMENT_KEY;
+ hp_set_reboot_and_signal_event();
+ }
+
+exit_kek:
+ kfree(bioscfg_drv.spm_data.endorsement_key);
+ bioscfg_drv.spm_data.endorsement_key = NULL;
+
+ return ret ? ret : count;
+}
+
+static struct kobj_attribute password_spm_endorsement_key = __ATTR_WO(kek);
+
+static ssize_t role_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%s\n", BIOS_SPM);
+}
+
+static struct kobj_attribute password_spm_role = __ATTR_RO(role);
+
+static ssize_t auth_token_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret = 0;
+ int length;
+
+ length = count;
+ if (buf[length - 1] == '\n')
+ length--;
+
+ /* allocate space and copy current auth token */
+ bioscfg_drv.spm_data.auth_token = kmemdup(buf, length, GFP_KERNEL);
+ if (!bioscfg_drv.spm_data.auth_token) {
+ ret = -ENOMEM;
+ goto exit_token;
+ }
+
+ return count;
+
+exit_token:
+ kfree(bioscfg_drv.spm_data.auth_token);
+ bioscfg_drv.spm_data.auth_token = NULL;
+
+ return ret;
+}
+
+static struct kobj_attribute password_spm_auth_token = __ATTR_WO(auth_token);
+
+static struct attribute *secure_platform_attrs[] = {
+ &password_spm_is_key_enabled.attr,
+ &password_spm_signing_key.attr,
+ &password_spm_endorsement_key.attr,
+ &password_spm_key_mechanism.attr,
+ &password_spm_status.attr,
+ &password_spm_role.attr,
+ &password_spm_auth_token.attr,
+ NULL,
+};
+
+static const struct attribute_group secure_platform_attr_group = {
+ .attrs = secure_platform_attrs,
+};
+
+void hp_exit_secure_platform_attributes(void)
+{
+ /* remove secure platform sysfs entry and free key data*/
+
+ kfree(bioscfg_drv.spm_data.endorsement_key);
+ bioscfg_drv.spm_data.endorsement_key = NULL;
+
+ kfree(bioscfg_drv.spm_data.signing_key);
+ bioscfg_drv.spm_data.signing_key = NULL;
+
+ kfree(bioscfg_drv.spm_data.auth_token);
+ bioscfg_drv.spm_data.auth_token = NULL;
+
+ if (bioscfg_drv.spm_data.attr_name_kobj)
+ sysfs_remove_group(bioscfg_drv.spm_data.attr_name_kobj,
+ &secure_platform_attr_group);
+}
+
+int hp_populate_secure_platform_data(struct kobject *attr_name_kobj)
+{
+ /* Populate data for Secure Platform Management */
+ bioscfg_drv.spm_data.attr_name_kobj = attr_name_kobj;
+
+ strscpy(bioscfg_drv.spm_data.attribute_name, SPM_STR,
+ sizeof(bioscfg_drv.spm_data.attribute_name));
+
+ bioscfg_drv.spm_data.is_enabled = 0;
+ bioscfg_drv.spm_data.mechanism = 0;
+ bioscfg_drv.pending_reboot = false;
+ update_spm_state();
+
+ bioscfg_drv.spm_data.endorsement_key = NULL;
+ bioscfg_drv.spm_data.signing_key = NULL;
+ bioscfg_drv.spm_data.auth_token = NULL;
+
+ return sysfs_create_group(attr_name_kobj, &secure_platform_attr_group);
+}
diff --git a/drivers/platform/x86/hp/hp-bioscfg/string-attributes.c b/drivers/platform/x86/hp/hp-bioscfg/string-attributes.c
new file mode 100644
index 000000000000..f0c20070094d
--- /dev/null
+++ b/drivers/platform/x86/hp/hp-bioscfg/string-attributes.c
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Functions corresponding to string type attributes under
+ * HP_WMI_BIOS_STRING_GUID for use with hp-bioscfg driver.
+ *
+ * Copyright (c) 2022 HP Development Company, L.P.
+ */
+
+#include "bioscfg.h"
+
+#define WMI_STRING_TYPE "HPBIOS_BIOSString"
+
+GET_INSTANCE_ID(string);
+
+static ssize_t current_value_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ int instance_id = get_string_instance_id(kobj);
+
+ if (instance_id < 0)
+ return -EIO;
+
+ return sysfs_emit(buf, "%s\n",
+ bioscfg_drv.string_data[instance_id].current_value);
+}
+
+/**
+ * validate_string_input() -
+ * Validate input of current_value against min and max lengths
+ *
+ * @instance_id: The instance on which input is validated
+ * @buf: Input value
+ */
+static int validate_string_input(int instance_id, const char *buf)
+{
+ int in_len = strlen(buf);
+ struct string_data *string_data = &bioscfg_drv.string_data[instance_id];
+
+ /* BIOS treats it as a read only attribute */
+ if (string_data->common.is_readonly)
+ return -EIO;
+
+ if (in_len < string_data->min_length || in_len > string_data->max_length)
+ return -ERANGE;
+
+ return 0;
+}
+
+static void update_string_value(int instance_id, char *attr_value)
+{
+ struct string_data *string_data = &bioscfg_drv.string_data[instance_id];
+
+ /* Write settings to BIOS */
+ strscpy(string_data->current_value, attr_value, sizeof(string_data->current_value));
+}
+
+/*
+ * ATTRIBUTE_S_COMMON_PROPERTY_SHOW(display_name_language_code, string);
+ * static struct kobj_attribute string_display_langcode =
+ * __ATTR_RO(display_name_language_code);
+ */
+
+ATTRIBUTE_S_COMMON_PROPERTY_SHOW(display_name, string);
+static struct kobj_attribute string_display_name =
+ __ATTR_RO(display_name);
+
+ATTRIBUTE_PROPERTY_STORE(current_value, string);
+static struct kobj_attribute string_current_val =
+ __ATTR_RW_MODE(current_value, 0644);
+
+ATTRIBUTE_N_PROPERTY_SHOW(min_length, string);
+static struct kobj_attribute string_min_length =
+ __ATTR_RO(min_length);
+
+ATTRIBUTE_N_PROPERTY_SHOW(max_length, string);
+static struct kobj_attribute string_max_length =
+ __ATTR_RO(max_length);
+
+static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "string\n");
+}
+
+static struct kobj_attribute string_type =
+ __ATTR_RO(type);
+
+static struct attribute *string_attrs[] = {
+ &common_display_langcode.attr,
+ &string_display_name.attr,
+ &string_current_val.attr,
+ &string_min_length.attr,
+ &string_max_length.attr,
+ &string_type.attr,
+ NULL
+};
+
+static const struct attribute_group string_attr_group = {
+ .attrs = string_attrs,
+};
+
+int hp_alloc_string_data(void)
+{
+ bioscfg_drv.string_instances_count = hp_get_instance_count(HP_WMI_BIOS_STRING_GUID);
+ bioscfg_drv.string_data = kcalloc(bioscfg_drv.string_instances_count,
+ sizeof(*bioscfg_drv.string_data), GFP_KERNEL);
+ if (!bioscfg_drv.string_data) {
+ bioscfg_drv.string_instances_count = 0;
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/* Expected Values types associated with each element */
+static const acpi_object_type expected_string_types[] = {
+ [NAME] = ACPI_TYPE_STRING,
+ [VALUE] = ACPI_TYPE_STRING,
+ [PATH] = ACPI_TYPE_STRING,
+ [IS_READONLY] = ACPI_TYPE_INTEGER,
+ [DISPLAY_IN_UI] = ACPI_TYPE_INTEGER,
+ [REQUIRES_PHYSICAL_PRESENCE] = ACPI_TYPE_INTEGER,
+ [SEQUENCE] = ACPI_TYPE_INTEGER,
+ [PREREQUISITES_SIZE] = ACPI_TYPE_INTEGER,
+ [PREREQUISITES] = ACPI_TYPE_STRING,
+ [SECURITY_LEVEL] = ACPI_TYPE_INTEGER,
+ [STR_MIN_LENGTH] = ACPI_TYPE_INTEGER,
+ [STR_MAX_LENGTH] = ACPI_TYPE_INTEGER,
+};
+
+static int hp_populate_string_elements_from_package(union acpi_object *string_obj,
+ int string_obj_count,
+ int instance_id)
+{
+ char *str_value = NULL;
+ int value_len;
+ int ret = 0;
+ u32 int_value = 0;
+ int elem;
+ int reqs;
+ int eloc;
+ int size;
+ struct string_data *string_data = &bioscfg_drv.string_data[instance_id];
+
+ if (!string_obj)
+ return -EINVAL;
+
+ for (elem = 1, eloc = 1; elem < string_obj_count; elem++, eloc++) {
+ /* ONLY look at the first STRING_ELEM_CNT elements */
+ if (eloc == STR_ELEM_CNT)
+ goto exit_string_package;
+
+ switch (string_obj[elem].type) {
+ case ACPI_TYPE_STRING:
+ if (elem != PREREQUISITES) {
+ ret = hp_convert_hexstr_to_str(string_obj[elem].string.pointer,
+ string_obj[elem].string.length,
+ &str_value, &value_len);
+
+ if (ret)
+ continue;
+ }
+ break;
+ case ACPI_TYPE_INTEGER:
+ int_value = (u32)string_obj[elem].integer.value;
+ break;
+ default:
+ pr_warn("Unsupported object type [%d]\n", string_obj[elem].type);
+ continue;
+ }
+
+ /* Check that both expected and read object type match */
+ if (expected_string_types[eloc] != string_obj[elem].type) {
+ pr_err("Error expected type %d for elem %d, but got type %d instead\n",
+ expected_string_types[eloc], elem, string_obj[elem].type);
+ kfree(str_value);
+ return -EIO;
+ }
+
+ /* Assign appropriate element value to corresponding field*/
+ switch (eloc) {
+ case VALUE:
+ strscpy(string_data->current_value,
+ str_value, sizeof(string_data->current_value));
+ break;
+ case PATH:
+ strscpy(string_data->common.path, str_value,
+ sizeof(string_data->common.path));
+ break;
+ case IS_READONLY:
+ string_data->common.is_readonly = int_value;
+ break;
+ case DISPLAY_IN_UI:
+ string_data->common.display_in_ui = int_value;
+ break;
+ case REQUIRES_PHYSICAL_PRESENCE:
+ string_data->common.requires_physical_presence = int_value;
+ break;
+ case SEQUENCE:
+ string_data->common.sequence = int_value;
+ break;
+ case PREREQUISITES_SIZE:
+ if (int_value > MAX_PREREQUISITES_SIZE) {
+ pr_warn("Prerequisites size value exceeded the maximum number of elements supported or data may be malformed\n");
+ int_value = MAX_PREREQUISITES_SIZE;
+ }
+ string_data->common.prerequisites_size = int_value;
+
+ /*
+ * This step is needed to keep the expected
+ * element list pointing to the right obj[elem].type
+ * when the size is zero. PREREQUISITES
+ * object is omitted by BIOS when the size is
+ * zero.
+ */
+ if (string_data->common.prerequisites_size == 0)
+ eloc++;
+ break;
+ case PREREQUISITES:
+ size = min_t(u32, string_data->common.prerequisites_size,
+ MAX_PREREQUISITES_SIZE);
+
+ for (reqs = 0; reqs < size; reqs++) {
+ if (elem >= string_obj_count) {
+ pr_err("Error elem-objects package is too small\n");
+ return -EINVAL;
+ }
+
+ ret = hp_convert_hexstr_to_str(string_obj[elem + reqs].string.pointer,
+ string_obj[elem + reqs].string.length,
+ &str_value, &value_len);
+
+ if (ret)
+ continue;
+
+ strscpy(string_data->common.prerequisites[reqs],
+ str_value,
+ sizeof(string_data->common.prerequisites[reqs]));
+ kfree(str_value);
+ str_value = NULL;
+ }
+ break;
+
+ case SECURITY_LEVEL:
+ string_data->common.security_level = int_value;
+ break;
+ case STR_MIN_LENGTH:
+ string_data->min_length = int_value;
+ break;
+ case STR_MAX_LENGTH:
+ string_data->max_length = int_value;
+ break;
+ default:
+ pr_warn("Invalid element: %d found in String attribute or data may be malformed\n", elem);
+ break;
+ }
+
+ kfree(str_value);
+ str_value = NULL;
+ }
+
+exit_string_package:
+ kfree(str_value);
+ return 0;
+}
+
+/**
+ * hp_populate_string_package_data() -
+ * Populate all properties of an instance under string attribute
+ *
+ * @string_obj: ACPI object with string data
+ * @instance_id: The instance to enumerate
+ * @attr_name_kobj: The parent kernel object
+ */
+int hp_populate_string_package_data(union acpi_object *string_obj,
+ int instance_id,
+ struct kobject *attr_name_kobj)
+{
+ struct string_data *string_data = &bioscfg_drv.string_data[instance_id];
+
+ string_data->attr_name_kobj = attr_name_kobj;
+
+ hp_populate_string_elements_from_package(string_obj,
+ string_obj->package.count,
+ instance_id);
+
+ hp_update_attribute_permissions(string_data->common.is_readonly,
+ &string_current_val);
+ hp_friendly_user_name_update(string_data->common.path,
+ attr_name_kobj->name,
+ string_data->common.display_name,
+ sizeof(string_data->common.display_name));
+ return sysfs_create_group(attr_name_kobj, &string_attr_group);
+}
+
+static int hp_populate_string_elements_from_buffer(u8 *buffer_ptr, u32 *buffer_size,
+ int instance_id)
+{
+ int ret = 0;
+ struct string_data *string_data = &bioscfg_drv.string_data[instance_id];
+
+ /*
+ * Only data relevant to this driver and its functionality is
+ * read. BIOS defines the order in which each * element is
+ * read. Element 0 data is not relevant to this
+ * driver hence it is ignored. For clarity, all element names
+ * (DISPLAY_IN_UI) which defines the order in which is read
+ * and the name matches the variable where the data is stored.
+ *
+ * In earlier implementation, reported errors were ignored
+ * causing the data to remain uninitialized. It is not
+ * possible to determine if data read from BIOS is valid or
+ * not. It is for this reason functions may return a error
+ * without validating the data itself.
+ */
+
+ // VALUE:
+ ret = hp_get_string_from_buffer(&buffer_ptr, buffer_size, string_data->current_value,
+ sizeof(string_data->current_value));
+ if (ret < 0)
+ goto buffer_exit;
+
+ // COMMON:
+ ret = hp_get_common_data_from_buffer(&buffer_ptr, buffer_size, &string_data->common);
+ if (ret < 0)
+ goto buffer_exit;
+
+ // STR_MIN_LENGTH:
+ ret = hp_get_integer_from_buffer(&buffer_ptr, buffer_size,
+ &string_data->min_length);
+ if (ret < 0)
+ goto buffer_exit;
+
+ // STR_MAX_LENGTH:
+ ret = hp_get_integer_from_buffer(&buffer_ptr, buffer_size,
+ &string_data->max_length);
+
+buffer_exit:
+
+ return ret;
+}
+
+/**
+ * hp_populate_string_buffer_data() -
+ * Populate all properties of an instance under string attribute
+ *
+ * @buffer_ptr: Buffer pointer
+ * @buffer_size: Buffer size
+ * @instance_id: The instance to enumerate
+ * @attr_name_kobj: The parent kernel object
+ */
+int hp_populate_string_buffer_data(u8 *buffer_ptr, u32 *buffer_size,
+ int instance_id,
+ struct kobject *attr_name_kobj)
+{
+ struct string_data *string_data = &bioscfg_drv.string_data[instance_id];
+ int ret = 0;
+
+ string_data->attr_name_kobj = attr_name_kobj;
+
+ ret = hp_populate_string_elements_from_buffer(buffer_ptr, buffer_size,
+ instance_id);
+ if (ret < 0)
+ return ret;
+
+ hp_update_attribute_permissions(string_data->common.is_readonly,
+ &string_current_val);
+ hp_friendly_user_name_update(string_data->common.path,
+ attr_name_kobj->name,
+ string_data->common.display_name,
+ sizeof(string_data->common.display_name));
+
+ return sysfs_create_group(attr_name_kobj, &string_attr_group);
+}
+
+/**
+ * hp_exit_string_attributes() - Clear all attribute data
+ *
+ * Clears all data allocated for this group of attributes
+ */
+void hp_exit_string_attributes(void)
+{
+ int instance_id;
+
+ for (instance_id = 0; instance_id < bioscfg_drv.string_instances_count;
+ instance_id++) {
+ struct kobject *attr_name_kobj =
+ bioscfg_drv.string_data[instance_id].attr_name_kobj;
+
+ if (attr_name_kobj)
+ sysfs_remove_group(attr_name_kobj, &string_attr_group);
+ }
+ bioscfg_drv.string_instances_count = 0;
+
+ kfree(bioscfg_drv.string_data);
+ bioscfg_drv.string_data = NULL;
+}
diff --git a/drivers/platform/x86/hp/hp-bioscfg/surestart-attributes.c b/drivers/platform/x86/hp/hp-bioscfg/surestart-attributes.c
new file mode 100644
index 000000000000..b57e42f29282
--- /dev/null
+++ b/drivers/platform/x86/hp/hp-bioscfg/surestart-attributes.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Functions corresponding to sure start object type attributes under
+ * BIOS for use with hp-bioscfg driver
+ *
+ * Copyright (c) 2022 HP Development Company, L.P.
+ */
+
+#include "bioscfg.h"
+#include <linux/types.h>
+
+/* Maximum number of log entries supported when log entry size is 16
+ * bytes. This value is calculated by dividing 4096 (page size) by
+ * log entry size.
+ */
+#define LOG_MAX_ENTRIES 254
+
+/*
+ * Current Log entry size. This value size will change in the
+ * future. The driver reads a total of 128 bytes for each log entry
+ * provided by BIOS but only the first 16 bytes are used/read.
+ */
+#define LOG_ENTRY_SIZE 16
+
+/*
+ * audit_log_entry_count_show - Reports the number of
+ * existing audit log entries available
+ * to be read
+ */
+static ssize_t audit_log_entry_count_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ int ret;
+ u32 count = 0;
+
+ ret = hp_wmi_perform_query(HPWMI_SURESTART_GET_LOG_COUNT,
+ HPWMI_SURESTART,
+ &count, 1, sizeof(count));
+
+ if (ret < 0)
+ return ret;
+
+ return sysfs_emit(buf, "%d,%d,%d\n", count, LOG_ENTRY_SIZE,
+ LOG_MAX_ENTRIES);
+}
+
+/*
+ * audit_log_entries_show() - Return all entries found in log file
+ */
+static ssize_t audit_log_entries_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ int ret;
+ int i;
+ u32 count = 0;
+ u8 audit_log_buffer[128];
+
+ // Get the number of event logs
+ ret = hp_wmi_perform_query(HPWMI_SURESTART_GET_LOG_COUNT,
+ HPWMI_SURESTART,
+ &count, 1, sizeof(count));
+
+ if (ret < 0)
+ return ret;
+
+ /*
+ * The show() api will not work if the audit logs ever go
+ * beyond 4KB
+ */
+ if (count * LOG_ENTRY_SIZE > PAGE_SIZE)
+ return -EIO;
+
+ /*
+ * We are guaranteed the buffer is 4KB so today all the event
+ * logs will fit
+ */
+ for (i = 0; i < count; i++) {
+ audit_log_buffer[0] = i + 1;
+
+ /*
+ * read audit log entry at a time. 'buf' input value
+ * provides the audit log entry to be read. On
+ * input, Byte 0 = Audit Log entry number from
+ * beginning (1..254)
+ * Entry number 1 is the newest entry whereas the
+ * highest entry number (number of entries) is the
+ * oldest entry.
+ */
+ ret = hp_wmi_perform_query(HPWMI_SURESTART_GET_LOG,
+ HPWMI_SURESTART,
+ audit_log_buffer, 1, 128);
+
+ if (ret < 0 || (LOG_ENTRY_SIZE * i) > PAGE_SIZE) {
+ /*
+ * Encountered a failure while reading
+ * individual logs. Only a partial list of
+ * audit log will be returned.
+ */
+ break;
+ } else {
+ memcpy(buf, audit_log_buffer, LOG_ENTRY_SIZE);
+ buf += LOG_ENTRY_SIZE;
+ }
+ }
+
+ return i * LOG_ENTRY_SIZE;
+}
+
+static struct kobj_attribute sure_start_audit_log_entry_count = __ATTR_RO(audit_log_entry_count);
+static struct kobj_attribute sure_start_audit_log_entries = __ATTR_RO(audit_log_entries);
+
+static struct attribute *sure_start_attrs[] = {
+ &sure_start_audit_log_entry_count.attr,
+ &sure_start_audit_log_entries.attr,
+ NULL
+};
+
+static const struct attribute_group sure_start_attr_group = {
+ .attrs = sure_start_attrs,
+};
+
+void hp_exit_sure_start_attributes(void)
+{
+ sysfs_remove_group(bioscfg_drv.sure_start_attr_kobj,
+ &sure_start_attr_group);
+}
+
+int hp_populate_sure_start_data(struct kobject *attr_name_kobj)
+{
+ bioscfg_drv.sure_start_attr_kobj = attr_name_kobj;
+ return sysfs_create_group(attr_name_kobj, &sure_start_attr_group);
+}
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 6d9297c1d96c..ac037540acfc 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -10,6 +10,7 @@
#include <linux/acpi.h>
#include <linux/backlight.h>
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/debugfs.h>
@@ -85,6 +86,31 @@ enum {
SALS_FNLOCK_OFF = 0xf,
};
+/*
+ * These correspond to the number of supported states - 1
+ * Future keyboard types may need a new system, if there's a collision
+ * KBD_BL_TRISTATE_AUTO has no way to report or set the auto state
+ * so it effectively has 3 states, but needs to handle 4
+ */
+enum {
+ KBD_BL_STANDARD = 1,
+ KBD_BL_TRISTATE = 2,
+ KBD_BL_TRISTATE_AUTO = 3,
+};
+
+#define KBD_BL_QUERY_TYPE 0x1
+#define KBD_BL_TRISTATE_TYPE 0x5
+#define KBD_BL_TRISTATE_AUTO_TYPE 0x7
+
+#define KBD_BL_COMMAND_GET 0x2
+#define KBD_BL_COMMAND_SET 0x3
+#define KBD_BL_COMMAND_TYPE GENMASK(7, 4)
+
+#define KBD_BL_GET_BRIGHTNESS GENMASK(15, 1)
+#define KBD_BL_SET_BRIGHTNESS GENMASK(19, 16)
+
+#define KBD_BL_KBLC_CHANGED_EVENT 12
+
struct ideapad_dytc_priv {
enum platform_profile_option current_profile;
struct platform_profile_handler pprof;
@@ -122,6 +148,7 @@ struct ideapad_private {
} features;
struct {
bool initialized;
+ int type;
struct led_classdev led;
unsigned int last_brightness;
} kbd_bl;
@@ -242,6 +269,16 @@ static int exec_sals(acpi_handle handle, unsigned long arg)
return exec_simple_method(handle, "SALS", arg);
}
+static int exec_kblc(acpi_handle handle, unsigned long arg)
+{
+ return exec_simple_method(handle, "KBLC", arg);
+}
+
+static int eval_kblc(acpi_handle handle, unsigned long cmd, unsigned long *res)
+{
+ return eval_int_with_arg(handle, "KBLC", cmd, res);
+}
+
static int eval_dytc(acpi_handle handle, unsigned long cmd, unsigned long *res)
{
return eval_int_with_arg(handle, "DYTC", cmd, res);
@@ -1275,16 +1312,47 @@ static void ideapad_backlight_notify_brightness(struct ideapad_private *priv)
/*
* keyboard backlight
*/
+static int ideapad_kbd_bl_check_tristate(int type)
+{
+ return (type == KBD_BL_TRISTATE) || (type == KBD_BL_TRISTATE_AUTO);
+}
+
static int ideapad_kbd_bl_brightness_get(struct ideapad_private *priv)
{
- unsigned long hals;
+ unsigned long value;
int err;
- err = eval_hals(priv->adev->handle, &hals);
+ if (ideapad_kbd_bl_check_tristate(priv->kbd_bl.type)) {
+ err = eval_kblc(priv->adev->handle,
+ FIELD_PREP(KBD_BL_COMMAND_TYPE, priv->kbd_bl.type) |
+ KBD_BL_COMMAND_GET,
+ &value);
+
+ if (err)
+ return err;
+
+ /* Convert returned value to brightness level */
+ value = FIELD_GET(KBD_BL_GET_BRIGHTNESS, value);
+
+ /* Off, low or high */
+ if (value <= priv->kbd_bl.led.max_brightness)
+ return value;
+
+ /* Auto, report as off */
+ if (value == priv->kbd_bl.led.max_brightness + 1)
+ return 0;
+
+ /* Unknown value */
+ dev_warn(&priv->platform_device->dev,
+ "Unknown keyboard backlight value: %lu", value);
+ return -EINVAL;
+ }
+
+ err = eval_hals(priv->adev->handle, &value);
if (err)
return err;
- return !!test_bit(HALS_KBD_BL_STATE_BIT, &hals);
+ return !!test_bit(HALS_KBD_BL_STATE_BIT, &value);
}
static enum led_brightness ideapad_kbd_bl_led_cdev_brightness_get(struct led_classdev *led_cdev)
@@ -1296,7 +1364,21 @@ static enum led_brightness ideapad_kbd_bl_led_cdev_brightness_get(struct led_cla
static int ideapad_kbd_bl_brightness_set(struct ideapad_private *priv, unsigned int brightness)
{
- int err = exec_sals(priv->adev->handle, brightness ? SALS_KBD_BL_ON : SALS_KBD_BL_OFF);
+ int err;
+ unsigned long value;
+ int type = priv->kbd_bl.type;
+
+ if (ideapad_kbd_bl_check_tristate(type)) {
+ if (brightness > priv->kbd_bl.led.max_brightness)
+ return -EINVAL;
+
+ value = FIELD_PREP(KBD_BL_SET_BRIGHTNESS, brightness) |
+ FIELD_PREP(KBD_BL_COMMAND_TYPE, type) |
+ KBD_BL_COMMAND_SET;
+ err = exec_kblc(priv->adev->handle, value);
+ } else {
+ err = exec_sals(priv->adev->handle, brightness ? SALS_KBD_BL_ON : SALS_KBD_BL_OFF);
+ }
if (err)
return err;
@@ -1349,8 +1431,13 @@ static int ideapad_kbd_bl_init(struct ideapad_private *priv)
priv->kbd_bl.last_brightness = brightness;
+ if (ideapad_kbd_bl_check_tristate(priv->kbd_bl.type)) {
+ priv->kbd_bl.led.max_brightness = 2;
+ } else {
+ priv->kbd_bl.led.max_brightness = 1;
+ }
+
priv->kbd_bl.led.name = "platform::" LED_FUNCTION_KBD_BACKLIGHT;
- priv->kbd_bl.led.max_brightness = 1;
priv->kbd_bl.led.brightness_get = ideapad_kbd_bl_led_cdev_brightness_get;
priv->kbd_bl.led.brightness_set_blocking = ideapad_kbd_bl_led_cdev_brightness_set;
priv->kbd_bl.led.flags = LED_BRIGHT_HW_CHANGED;
@@ -1461,6 +1548,7 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
case 2:
ideapad_backlight_notify_power(priv);
break;
+ case KBD_BL_KBLC_CHANGED_EVENT:
case 1:
/*
* Some IdeaPads report event 1 every ~20
@@ -1562,13 +1650,31 @@ static void ideapad_check_features(struct ideapad_private *priv)
if (test_bit(HALS_FNLOCK_SUPPORT_BIT, &val))
priv->features.fn_lock = true;
- if (test_bit(HALS_KBD_BL_SUPPORT_BIT, &val))
+ if (test_bit(HALS_KBD_BL_SUPPORT_BIT, &val)) {
priv->features.kbd_bl = true;
+ priv->kbd_bl.type = KBD_BL_STANDARD;
+ }
if (test_bit(HALS_USB_CHARGING_SUPPORT_BIT, &val))
priv->features.usb_charging = true;
}
}
+
+ if (acpi_has_method(handle, "KBLC")) {
+ if (!eval_kblc(priv->adev->handle, KBD_BL_QUERY_TYPE, &val)) {
+ if (val == KBD_BL_TRISTATE_TYPE) {
+ priv->features.kbd_bl = true;
+ priv->kbd_bl.type = KBD_BL_TRISTATE;
+ } else if (val == KBD_BL_TRISTATE_AUTO_TYPE) {
+ priv->features.kbd_bl = true;
+ priv->kbd_bl.type = KBD_BL_TRISTATE_AUTO;
+ } else {
+ dev_warn(&priv->platform_device->dev,
+ "Unknown keyboard type: %lu",
+ val);
+ }
+ }
+ }
}
#if IS_ENABLED(CONFIG_ACPI_WMI)
diff --git a/drivers/platform/x86/intel/tpmi.c b/drivers/platform/x86/intel/tpmi.c
index d1fd6e69401c..0a95736d97e4 100644
--- a/drivers/platform/x86/intel/tpmi.c
+++ b/drivers/platform/x86/intel/tpmi.c
@@ -47,10 +47,17 @@
*/
#include <linux/auxiliary_bus.h>
+#include <linux/bitfield.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
#include <linux/intel_tpmi.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/security.h>
+#include <linux/sizes.h>
+#include <linux/string_helpers.h>
#include "vsec.h"
@@ -83,12 +90,14 @@ struct intel_tpmi_pfs_entry {
* @vsec_offset: Starting MMIO address for this feature in bytes. Essentially
* this offset = "Address" from VSEC header + PFS Capability
* offset for this feature entry.
+ * @vsec_dev: Pointer to intel_vsec_device structure for this TPMI device
*
* Represents TPMI instance information for one TPMI ID.
*/
struct intel_tpmi_pm_feature {
struct intel_tpmi_pfs_entry pfs_header;
unsigned int vsec_offset;
+ struct intel_vsec_device *vsec_dev;
};
/**
@@ -98,6 +107,8 @@ struct intel_tpmi_pm_feature {
* @feature_count: Number of TPMI of TPMI instances pointed by tpmi_features
* @pfs_start: Start of PFS offset for the TPMI instances in this device
* @plat_info: Stores platform info which can be used by the client drivers
+ * @tpmi_control_mem: Memory mapped IO for getting control information
+ * @dbgfs_dir: debugfs entry pointer
*
* Stores the information for all TPMI devices enumerated from a single PCI device.
*/
@@ -107,6 +118,8 @@ struct intel_tpmi_info {
int feature_count;
u64 pfs_start;
struct intel_tpmi_plat_info plat_info;
+ void __iomem *tpmi_control_mem;
+ struct dentry *dbgfs_dir;
};
/**
@@ -139,9 +152,19 @@ enum intel_tpmi_id {
TPMI_ID_PEM = 1, /* Power and Perf excursion Monitor */
TPMI_ID_UNCORE = 2, /* Uncore Frequency Scaling */
TPMI_ID_SST = 5, /* Speed Select Technology */
+ TPMI_CONTROL_ID = 0x80, /* Special ID for getting feature status */
TPMI_INFO_ID = 0x81, /* Special ID for PCI BDF and Package ID information */
};
+/*
+ * The size from hardware is in u32 units. This size is from a trusted hardware,
+ * but better to verify for pre silicon platforms. Set size to 0, when invalid.
+ */
+#define TPMI_GET_SINGLE_ENTRY_SIZE(pfs) \
+({ \
+ pfs->pfs_header.entry_size > SZ_1K ? 0 : pfs->pfs_header.entry_size << 2; \
+})
+
/* Used during auxbus device creation */
static DEFINE_IDA(intel_vsec_tpmi_ida);
@@ -175,6 +198,349 @@ struct resource *tpmi_get_resource_at_index(struct auxiliary_device *auxdev, int
}
EXPORT_SYMBOL_NS_GPL(tpmi_get_resource_at_index, INTEL_TPMI);
+/* TPMI Control Interface */
+
+#define TPMI_CONTROL_STATUS_OFFSET 0x00
+#define TPMI_COMMAND_OFFSET 0x08
+
+/*
+ * Spec is calling for max 1 seconds to get ownership at the worst
+ * case. Read at 10 ms timeouts and repeat up to 1 second.
+ */
+#define TPMI_CONTROL_TIMEOUT_US (10 * USEC_PER_MSEC)
+#define TPMI_CONTROL_TIMEOUT_MAX_US (1 * USEC_PER_SEC)
+
+#define TPMI_RB_TIMEOUT_US (10 * USEC_PER_MSEC)
+#define TPMI_RB_TIMEOUT_MAX_US USEC_PER_SEC
+
+/* TPMI Control status register defines */
+
+#define TPMI_CONTROL_STATUS_RB BIT_ULL(0)
+
+#define TPMI_CONTROL_STATUS_OWNER GENMASK_ULL(5, 4)
+#define TPMI_OWNER_NONE 0
+#define TPMI_OWNER_IN_BAND 1
+
+#define TPMI_CONTROL_STATUS_CPL BIT_ULL(6)
+#define TPMI_CONTROL_STATUS_RESULT GENMASK_ULL(15, 8)
+#define TPMI_CONTROL_STATUS_LEN GENMASK_ULL(31, 16)
+
+#define TPMI_CMD_PKT_LEN 2
+#define TPMI_CMD_STATUS_SUCCESS 0x40
+
+/* TPMI command data registers */
+#define TMPI_CONTROL_DATA_CMD GENMASK_ULL(7, 0)
+#define TMPI_CONTROL_DATA_VAL GENMASK_ULL(63, 32)
+#define TPMI_CONTROL_DATA_VAL_FEATURE GENMASK_ULL(48, 40)
+
+/* Command to send via control interface */
+#define TPMI_CONTROL_GET_STATE_CMD 0x10
+
+#define TPMI_CONTROL_CMD_MASK GENMASK_ULL(48, 40)
+
+#define TPMI_CMD_LEN_MASK GENMASK_ULL(18, 16)
+
+#define TPMI_STATE_DISABLED BIT_ULL(0)
+#define TPMI_STATE_LOCKED BIT_ULL(31)
+
+/* Mutex to complete get feature status without interruption */
+static DEFINE_MUTEX(tpmi_dev_lock);
+
+static int tpmi_wait_for_owner(struct intel_tpmi_info *tpmi_info, u8 owner)
+{
+ u64 control;
+
+ return readq_poll_timeout(tpmi_info->tpmi_control_mem + TPMI_CONTROL_STATUS_OFFSET,
+ control, owner == FIELD_GET(TPMI_CONTROL_STATUS_OWNER, control),
+ TPMI_CONTROL_TIMEOUT_US, TPMI_CONTROL_TIMEOUT_MAX_US);
+}
+
+static int tpmi_read_feature_status(struct intel_tpmi_info *tpmi_info, int feature_id,
+ int *locked, int *disabled)
+{
+ u64 control, data;
+ int ret;
+
+ if (!tpmi_info->tpmi_control_mem)
+ return -EFAULT;
+
+ mutex_lock(&tpmi_dev_lock);
+
+ /* Wait for owner bit set to 0 (none) */
+ ret = tpmi_wait_for_owner(tpmi_info, TPMI_OWNER_NONE);
+ if (ret)
+ goto err_unlock;
+
+ /* set command id to 0x10 for TPMI_GET_STATE */
+ data = FIELD_PREP(TMPI_CONTROL_DATA_CMD, TPMI_CONTROL_GET_STATE_CMD);
+
+ /* 32 bits for DATA offset and +8 for feature_id field */
+ data |= FIELD_PREP(TPMI_CONTROL_DATA_VAL_FEATURE, feature_id);
+
+ /* Write at command offset for qword access */
+ writeq(data, tpmi_info->tpmi_control_mem + TPMI_COMMAND_OFFSET);
+
+ /* Wait for owner bit set to in-band */
+ ret = tpmi_wait_for_owner(tpmi_info, TPMI_OWNER_IN_BAND);
+ if (ret)
+ goto err_unlock;
+
+ /* Set Run Busy and packet length of 2 dwords */
+ control = TPMI_CONTROL_STATUS_RB;
+ control |= FIELD_PREP(TPMI_CONTROL_STATUS_LEN, TPMI_CMD_PKT_LEN);
+
+ /* Write at status offset for qword access */
+ writeq(control, tpmi_info->tpmi_control_mem + TPMI_CONTROL_STATUS_OFFSET);
+
+ /* Wait for Run Busy clear */
+ ret = readq_poll_timeout(tpmi_info->tpmi_control_mem + TPMI_CONTROL_STATUS_OFFSET,
+ control, !(control & TPMI_CONTROL_STATUS_RB),
+ TPMI_RB_TIMEOUT_US, TPMI_RB_TIMEOUT_MAX_US);
+ if (ret)
+ goto done_proc;
+
+ control = FIELD_GET(TPMI_CONTROL_STATUS_RESULT, control);
+ if (control != TPMI_CMD_STATUS_SUCCESS) {
+ ret = -EBUSY;
+ goto done_proc;
+ }
+
+ /* Response is ready */
+ data = readq(tpmi_info->tpmi_control_mem + TPMI_COMMAND_OFFSET);
+ data = FIELD_GET(TMPI_CONTROL_DATA_VAL, data);
+
+ *disabled = 0;
+ *locked = 0;
+
+ if (!(data & TPMI_STATE_DISABLED))
+ *disabled = 1;
+
+ if (data & TPMI_STATE_LOCKED)
+ *locked = 1;
+
+ ret = 0;
+
+done_proc:
+ /* Set CPL "completion" bit */
+ writeq(TPMI_CONTROL_STATUS_CPL, tpmi_info->tpmi_control_mem + TPMI_CONTROL_STATUS_OFFSET);
+
+err_unlock:
+ mutex_unlock(&tpmi_dev_lock);
+
+ return ret;
+}
+
+int tpmi_get_feature_status(struct auxiliary_device *auxdev, int feature_id,
+ int *locked, int *disabled)
+{
+ struct intel_vsec_device *intel_vsec_dev = dev_to_ivdev(auxdev->dev.parent);
+ struct intel_tpmi_info *tpmi_info = auxiliary_get_drvdata(&intel_vsec_dev->auxdev);
+
+ return tpmi_read_feature_status(tpmi_info, feature_id, locked, disabled);
+}
+EXPORT_SYMBOL_NS_GPL(tpmi_get_feature_status, INTEL_TPMI);
+
+static int tpmi_pfs_dbg_show(struct seq_file *s, void *unused)
+{
+ struct intel_tpmi_info *tpmi_info = s->private;
+ struct intel_tpmi_pm_feature *pfs;
+ int locked, disabled, ret, i;
+
+ seq_printf(s, "tpmi PFS start offset 0x:%llx\n", tpmi_info->pfs_start);
+ seq_puts(s, "tpmi_id\t\tentries\t\tsize\t\tcap_offset\tattribute\tvsec_offset\tlocked\tdisabled\n");
+ for (i = 0; i < tpmi_info->feature_count; ++i) {
+ pfs = &tpmi_info->tpmi_features[i];
+ ret = tpmi_read_feature_status(tpmi_info, pfs->pfs_header.tpmi_id, &locked,
+ &disabled);
+ if (ret) {
+ locked = 'U';
+ disabled = 'U';
+ } else {
+ disabled = disabled ? 'Y' : 'N';
+ locked = locked ? 'Y' : 'N';
+ }
+ seq_printf(s, "0x%02x\t\t0x%02x\t\t0x%04x\t\t0x%04x\t\t0x%02x\t\t0x%08x\t%c\t%c\n",
+ pfs->pfs_header.tpmi_id, pfs->pfs_header.num_entries,
+ pfs->pfs_header.entry_size, pfs->pfs_header.cap_offset,
+ pfs->pfs_header.attribute, pfs->vsec_offset, locked, disabled);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(tpmi_pfs_dbg);
+
+#define MEM_DUMP_COLUMN_COUNT 8
+
+static int tpmi_mem_dump_show(struct seq_file *s, void *unused)
+{
+ size_t row_size = MEM_DUMP_COLUMN_COUNT * sizeof(u32);
+ struct intel_tpmi_pm_feature *pfs = s->private;
+ int count, ret = 0;
+ void __iomem *mem;
+ u32 off, size;
+ u8 *buffer;
+
+ size = TPMI_GET_SINGLE_ENTRY_SIZE(pfs);
+ if (!size)
+ return -EIO;
+
+ buffer = kmalloc(size, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ off = pfs->vsec_offset;
+
+ mutex_lock(&tpmi_dev_lock);
+
+ for (count = 0; count < pfs->pfs_header.num_entries; ++count) {
+ seq_printf(s, "TPMI Instance:%d offset:0x%x\n", count, off);
+
+ mem = ioremap(off, size);
+ if (!mem) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ memcpy_fromio(buffer, mem, size);
+
+ seq_hex_dump(s, " ", DUMP_PREFIX_OFFSET, row_size, sizeof(u32), buffer, size,
+ false);
+
+ iounmap(mem);
+
+ off += size;
+ }
+
+ mutex_unlock(&tpmi_dev_lock);
+
+ kfree(buffer);
+
+ return ret;
+}
+DEFINE_SHOW_ATTRIBUTE(tpmi_mem_dump);
+
+static ssize_t mem_write(struct file *file, const char __user *userbuf, size_t len, loff_t *ppos)
+{
+ struct seq_file *m = file->private_data;
+ struct intel_tpmi_pm_feature *pfs = m->private;
+ u32 addr, value, punit, size;
+ u32 num_elems, *array;
+ void __iomem *mem;
+ int ret;
+
+ size = TPMI_GET_SINGLE_ENTRY_SIZE(pfs);
+ if (!size)
+ return -EIO;
+
+ ret = parse_int_array_user(userbuf, len, (int **)&array);
+ if (ret < 0)
+ return ret;
+
+ num_elems = *array;
+ if (num_elems != 3) {
+ ret = -EINVAL;
+ goto exit_write;
+ }
+
+ punit = array[1];
+ addr = array[2];
+ value = array[3];
+
+ if (punit >= pfs->pfs_header.num_entries) {
+ ret = -EINVAL;
+ goto exit_write;
+ }
+
+ if (addr >= size) {
+ ret = -EINVAL;
+ goto exit_write;
+ }
+
+ mutex_lock(&tpmi_dev_lock);
+
+ mem = ioremap(pfs->vsec_offset + punit * size, size);
+ if (!mem) {
+ ret = -ENOMEM;
+ goto unlock_mem_write;
+ }
+
+ writel(value, mem + addr);
+
+ iounmap(mem);
+
+ ret = len;
+
+unlock_mem_write:
+ mutex_unlock(&tpmi_dev_lock);
+
+exit_write:
+ kfree(array);
+
+ return ret;
+}
+
+static int mem_write_show(struct seq_file *s, void *unused)
+{
+ return 0;
+}
+
+static int mem_write_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mem_write_show, inode->i_private);
+}
+
+static const struct file_operations mem_write_ops = {
+ .open = mem_write_open,
+ .read = seq_read,
+ .write = mem_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+#define tpmi_to_dev(info) (&info->vsec_dev->pcidev->dev)
+
+static void tpmi_dbgfs_register(struct intel_tpmi_info *tpmi_info)
+{
+ char name[64];
+ int i;
+
+ snprintf(name, sizeof(name), "tpmi-%s", dev_name(tpmi_to_dev(tpmi_info)));
+ tpmi_info->dbgfs_dir = debugfs_create_dir(name, NULL);
+
+ debugfs_create_file("pfs_dump", 0444, tpmi_info->dbgfs_dir, tpmi_info, &tpmi_pfs_dbg_fops);
+
+ for (i = 0; i < tpmi_info->feature_count; ++i) {
+ struct intel_tpmi_pm_feature *pfs;
+ struct dentry *dir;
+
+ pfs = &tpmi_info->tpmi_features[i];
+ snprintf(name, sizeof(name), "tpmi-id-%02x", pfs->pfs_header.tpmi_id);
+ dir = debugfs_create_dir(name, tpmi_info->dbgfs_dir);
+
+ debugfs_create_file("mem_dump", 0444, dir, pfs, &tpmi_mem_dump_fops);
+ debugfs_create_file("mem_write", 0644, dir, pfs, &mem_write_ops);
+ }
+}
+
+static void tpmi_set_control_base(struct auxiliary_device *auxdev,
+ struct intel_tpmi_info *tpmi_info,
+ struct intel_tpmi_pm_feature *pfs)
+{
+ void __iomem *mem;
+ u32 size;
+
+ size = TPMI_GET_SINGLE_ENTRY_SIZE(pfs);
+ if (!size)
+ return;
+
+ mem = devm_ioremap(&auxdev->dev, pfs->vsec_offset, size);
+ if (!mem)
+ return;
+
+ /* mem is pointing to TPMI CONTROL base */
+ tpmi_info->tpmi_control_mem = mem;
+}
+
static const char *intel_tpmi_name(enum intel_tpmi_id id)
{
switch (id) {
@@ -316,7 +682,7 @@ static int intel_vsec_tpmi_init(struct auxiliary_device *auxdev)
struct pci_dev *pci_dev = vsec_dev->pcidev;
struct intel_tpmi_info *tpmi_info;
u64 pfs_start = 0;
- int i;
+ int ret, i;
tpmi_info = devm_kzalloc(&auxdev->dev, sizeof(*tpmi_info), GFP_KERNEL);
if (!tpmi_info)
@@ -339,6 +705,7 @@ static int intel_vsec_tpmi_init(struct auxiliary_device *auxdev)
int size, ret;
pfs = &tpmi_info->tpmi_features[i];
+ pfs->vsec_dev = vsec_dev;
res = &vsec_dev->resource[i];
if (!res)
@@ -367,13 +734,29 @@ static int intel_vsec_tpmi_init(struct auxiliary_device *auxdev)
*/
if (pfs->pfs_header.tpmi_id == TPMI_INFO_ID)
tpmi_process_info(tpmi_info, pfs);
+
+ if (pfs->pfs_header.tpmi_id == TPMI_CONTROL_ID)
+ tpmi_set_control_base(auxdev, tpmi_info, pfs);
}
tpmi_info->pfs_start = pfs_start;
auxiliary_set_drvdata(auxdev, tpmi_info);
- return tpmi_create_devices(tpmi_info);
+ ret = tpmi_create_devices(tpmi_info);
+ if (ret)
+ return ret;
+
+ /*
+ * Allow debugfs when security policy allows. Everything this debugfs
+ * interface provides, can also be done via /dev/mem access. If
+ * /dev/mem interface is locked, don't allow debugfs to present any
+ * information. Also check for CAP_SYS_RAWIO as /dev/mem interface.
+ */
+ if (!security_locked_down(LOCKDOWN_DEV_MEM) && capable(CAP_SYS_RAWIO))
+ tpmi_dbgfs_register(tpmi_info);
+
+ return 0;
}
static int tpmi_probe(struct auxiliary_device *auxdev,
@@ -382,11 +765,12 @@ static int tpmi_probe(struct auxiliary_device *auxdev,
return intel_vsec_tpmi_init(auxdev);
}
-/*
- * Remove callback is not needed currently as there is no
- * cleanup required. All memory allocs are device managed. All
- * devices created by this modules are also device managed.
- */
+static void tpmi_remove(struct auxiliary_device *auxdev)
+{
+ struct intel_tpmi_info *tpmi_info = auxiliary_get_drvdata(auxdev);
+
+ debugfs_remove_recursive(tpmi_info->dbgfs_dir);
+}
static const struct auxiliary_device_id tpmi_id_table[] = {
{ .name = "intel_vsec.tpmi" },
@@ -397,6 +781,7 @@ MODULE_DEVICE_TABLE(auxiliary, tpmi_id_table);
static struct auxiliary_driver tpmi_aux_driver = {
.id_table = tpmi_id_table,
.probe = tpmi_probe,
+ .remove = tpmi_remove,
};
module_auxiliary_driver(tpmi_aux_driver);
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
index 7d33977d9c60..3d96dbf79a72 100644
--- a/drivers/platform/x86/mlx-platform.c
+++ b/drivers/platform/x86/mlx-platform.c
@@ -12,6 +12,7 @@
#include <linux/i2c-mux.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/platform_data/i2c-mux-reg.h>
#include <linux/platform_data/mlxreg.h>
@@ -35,6 +36,7 @@
#define MLXPLAT_CPLD_LPC_REG_CPLD3_PN1_OFFSET 0x09
#define MLXPLAT_CPLD_LPC_REG_CPLD4_PN_OFFSET 0x0a
#define MLXPLAT_CPLD_LPC_REG_CPLD4_PN1_OFFSET 0x0b
+#define MLXPLAT_CPLD_LPC_REG_RESET_GP1_OFFSET 0x17
#define MLXPLAT_CPLD_LPC_REG_RESET_GP2_OFFSET 0x19
#define MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET 0x1c
#define MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET 0x1d
@@ -62,6 +64,7 @@
#define MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET 0x37
#define MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET 0x3a
#define MLXPLAT_CPLD_LPC_REG_AGGR_MASK_OFFSET 0x3b
+#define MLXPLAT_CPLD_LPC_REG_FU_CAP_OFFSET 0x3c
#define MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET 0x40
#define MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET 0x41
#define MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET 0x42
@@ -94,6 +97,9 @@
#define MLXPLAT_CPLD_LPC_REG_FAN_OFFSET 0x88
#define MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET 0x89
#define MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET 0x8a
+#define MLXPLAT_CPLD_LPC_REG_CPLD5_VER_OFFSET 0x8e
+#define MLXPLAT_CPLD_LPC_REG_CPLD5_PN_OFFSET 0x8f
+#define MLXPLAT_CPLD_LPC_REG_CPLD5_PN1_OFFSET 0x90
#define MLXPLAT_CPLD_LPC_REG_EROT_OFFSET 0x91
#define MLXPLAT_CPLD_LPC_REG_EROT_EVENT_OFFSET 0x92
#define MLXPLAT_CPLD_LPC_REG_EROT_MASK_OFFSET 0x93
@@ -128,6 +134,7 @@
#define MLXPLAT_CPLD_LPC_REG_DBG4_OFFSET 0xb9
#define MLXPLAT_CPLD_LPC_REG_GP4_RO_OFFSET 0xc2
#define MLXPLAT_CPLD_LPC_REG_SPI_CHNL_SELECT 0xc3
+#define MLXPLAT_CPLD_LPC_REG_CPLD5_MVER_OFFSET 0xc4
#define MLXPLAT_CPLD_LPC_REG_WD_CLEAR_OFFSET 0xc7
#define MLXPLAT_CPLD_LPC_REG_WD_CLEAR_WP_OFFSET 0xc8
#define MLXPLAT_CPLD_LPC_REG_WD1_TMR_OFFSET 0xc9
@@ -236,6 +243,7 @@
#define MLXPLAT_CPLD_VOLTREG_UPD_MASK GENMASK(5, 4)
#define MLXPLAT_CPLD_GWP_MASK GENMASK(0, 0)
#define MLXPLAT_CPLD_EROT_MASK GENMASK(1, 0)
+#define MLXPLAT_CPLD_FU_CAP_MASK GENMASK(1, 0)
#define MLXPLAT_CPLD_PWR_BUTTON_MASK BIT(0)
#define MLXPLAT_CPLD_LATCH_RST_MASK BIT(6)
#define MLXPLAT_CPLD_THERMAL1_PDB_MASK BIT(3)
@@ -248,6 +256,7 @@
MLXPLAT_CPLD_PWM_PG_MASK)
#define MLXPLAT_CPLD_I2C_CAP_BIT 0x04
#define MLXPLAT_CPLD_I2C_CAP_MASK GENMASK(5, MLXPLAT_CPLD_I2C_CAP_BIT)
+#define MLXPLAT_CPLD_SYS_RESET_MASK BIT(0)
/* Masks for aggregation for comex carriers */
#define MLXPLAT_CPLD_AGGR_MASK_CARRIER BIT(1)
@@ -259,6 +268,7 @@
#define MLXPLAT_CPLD_LPC_LC_MASK GENMASK(7, 0)
#define MLXPLAT_CPLD_HALT_MASK BIT(3)
+#define MLXPLAT_CPLD_RESET_MASK GENMASK(7, 1)
/* Default I2C parent bus number */
#define MLXPLAT_CPLD_PHYS_ADAPTER_DEF_NR 1
@@ -322,6 +332,12 @@
#define MLXPLAT_I2C_MAIN_BUS_NOTIFIED 0x01
#define MLXPLAT_I2C_MAIN_BUS_HANDLE_CREATED 0x02
+/* Lattice FPGA PCI configuration */
+#define PCI_VENDOR_ID_LATTICE 0x1204
+#define PCI_DEVICE_ID_LATTICE_I2C_BRIDGE 0x9c2f
+#define PCI_DEVICE_ID_LATTICE_JTAG_BRIDGE 0x9c30
+#define PCI_DEVICE_ID_LATTICE_LPC_BRIDGE 0x9c32
+
/* mlxplat_priv - platform private data
* @pdev_i2c - i2c controller platform device
* @pdev_mux - array of mux platform devices
@@ -334,6 +350,7 @@
* @hotplug_resources: system hotplug resources
* @hotplug_resources_size: size of system hotplug resources
* @hi2c_main_init_status: init status of I2C main bus
+ * @irq_fpga: FPGA IRQ number
*/
struct mlxplat_priv {
struct platform_device *pdev_i2c;
@@ -347,10 +364,12 @@ struct mlxplat_priv {
struct resource *hotplug_resources;
unsigned int hotplug_resources_size;
u8 i2c_main_init_status;
+ int irq_fpga;
};
static struct platform_device *mlxplat_dev;
static int mlxplat_i2c_main_complition_notify(void *handle, int id);
+static void __iomem *i2c_bridge_addr, *jtag_bridge_addr;
/* Regions for LPC I2C controller and LPC base register space */
static const struct resource mlxplat_lpc_resources[] = {
@@ -435,6 +454,7 @@ static struct i2c_mux_reg_platform_data mlxplat_default_mux_data[] = {
static int mlxplat_max_adap_num;
static int mlxplat_mux_num;
static struct i2c_mux_reg_platform_data *mlxplat_mux_data;
+static struct notifier_block *mlxplat_reboot_nb;
/* Platform extended mux data */
static struct i2c_mux_reg_platform_data mlxplat_extended_mux_data[] = {
@@ -2355,8 +2375,11 @@ static int
mlxplat_mlxcpld_l1_switch_pwr_events_handler(void *handle, enum mlxreg_hotplug_kind kind,
u8 action)
{
- dev_info(&mlxplat_dev->dev, "System shutdown due to short press of power button");
- kernel_power_off();
+ if (action) {
+ dev_info(&mlxplat_dev->dev, "System shutdown due to short press of power button");
+ kernel_power_off();
+ }
+
return 0;
}
@@ -2371,6 +2394,7 @@ static struct mlxreg_core_data mlxplat_mlxcpld_l1_switch_pwr_events_items_data[]
.reg = MLXPLAT_CPLD_LPC_REG_PWRB_OFFSET,
.mask = MLXPLAT_CPLD_PWR_BUTTON_MASK,
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_l1_switch_pwr_events_notifier,
},
};
@@ -2431,6 +2455,7 @@ static struct mlxreg_core_data mlxplat_mlxcpld_l1_switch_health_events_items_dat
.reg = MLXPLAT_CPLD_LPC_REG_BRD_OFFSET,
.mask = MLXPLAT_CPLD_INTRUSION_MASK,
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
.hpdev.notifier = &mlxplat_mlxcpld_l1_switch_intrusion_events_notifier,
},
{
@@ -3428,6 +3453,12 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
.mode = 0444,
},
{
+ .label = "cpld5_version",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD5_VER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
.label = "cpld1_pn",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD1_PN_OFFSET,
.bit = GENMASK(15, 0),
@@ -3456,6 +3487,13 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
.regnum = 2,
},
{
+ .label = "cpld5_pn",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD5_PN_OFFSET,
+ .bit = GENMASK(15, 0),
+ .mode = 0444,
+ .regnum = 2,
+ },
+ {
.label = "cpld1_version_min",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD1_MVER_OFFSET,
.bit = GENMASK(7, 0),
@@ -3480,6 +3518,12 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
.mode = 0444,
},
{
+ .label = "cpld5_version_min",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD5_MVER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
.label = "asic_reset",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_GP2_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(3),
@@ -3555,9 +3599,9 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
.mode = 0444,
},
{
- .label = "reset_from_comex",
+ .label = "reset_swb_dc_dc_pwr_fail",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
- .mask = GENMASK(7, 0) & ~BIT(4),
+ .mask = GENMASK(7, 0) & ~BIT(3),
.mode = 0444,
},
{
@@ -3579,6 +3623,12 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
.mode = 0444,
},
{
+ .label = "reset_sw_reset",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0444,
+ },
+ {
.label = "reset_comex_pwr_fail",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(3),
@@ -3681,6 +3731,13 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
.mode = 0200,
},
{
+ .label = "jtag_cap",
+ .reg = MLXPLAT_CPLD_LPC_REG_FU_CAP_OFFSET,
+ .mask = MLXPLAT_CPLD_FU_CAP_MASK,
+ .bit = 1,
+ .mode = 0444,
+ },
+ {
.label = "jtag_enable",
.reg = MLXPLAT_CPLD_LPC_REG_GP2_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(4),
@@ -3793,6 +3850,12 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
.mode = 0444,
},
{
+ .label = "lid_open",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP4_RO_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0444,
+ },
+ {
.label = "clk_brd1_boot_fail",
.reg = MLXPLAT_CPLD_LPC_REG_GP4_RO_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(4),
@@ -4432,6 +4495,12 @@ static struct mlxreg_core_data mlxplat_mlxcpld_chassis_blade_regs_io_data[] = {
.mode = 0444,
},
{
+ .label = "reset_long_pwr_pb",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(7),
+ .mode = 0444,
+ },
+ {
.label = "pwr_cycle",
.reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(2),
@@ -4905,6 +4974,7 @@ static struct mlxreg_core_platform_data mlxplat_mlxcpld_wd_set_type3[] = {
static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
+ case MLXPLAT_CPLD_LPC_REG_RESET_GP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED2_OFFSET:
@@ -4923,6 +4993,7 @@ static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_SAFE_BIOS_OFFSET:
case MLXPLAT_CPLD_LPC_SAFE_BIOS_WP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGR_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_FU_CAP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_DBG1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_DBG2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_DBG3_OFFSET:
@@ -5001,6 +5072,7 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_CPLD2_VER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD3_VER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD4_VER_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD5_VER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD1_PN_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD1_PN1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD2_PN_OFFSET:
@@ -5009,6 +5081,9 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_CPLD3_PN1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD4_PN_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD4_PN1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD5_PN_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD5_PN1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_RESET_GP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET:
@@ -5034,6 +5109,7 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_SAFE_BIOS_WP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGR_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_FU_CAP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_DBG1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_DBG2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_DBG3_OFFSET:
@@ -5119,6 +5195,7 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_CPLD2_MVER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD3_MVER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD4_MVER_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD5_MVER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM3_OFFSET:
@@ -5160,6 +5237,7 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_CPLD2_VER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD3_VER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD4_VER_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD5_VER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD1_PN_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD1_PN1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD2_PN_OFFSET:
@@ -5168,6 +5246,9 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_CPLD3_PN1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD4_PN_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD4_PN1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD5_PN_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD5_PN1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_RESET_GP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET:
@@ -5191,6 +5272,7 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_SAFE_BIOS_WP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGR_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_FU_CAP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_DBG1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_DBG2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_DBG3_OFFSET:
@@ -5270,6 +5352,7 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_CPLD2_MVER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD3_MVER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD4_MVER_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD5_MVER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM3_OFFSET:
@@ -5469,13 +5552,50 @@ static struct mlxreg_core_platform_data *mlxplat_fan;
static struct mlxreg_core_platform_data
*mlxplat_wd_data[MLXPLAT_CPLD_WD_MAX_DEVS];
static const struct regmap_config *mlxplat_regmap_config;
+static struct pci_dev *lpc_bridge;
+static struct pci_dev *i2c_bridge;
+static struct pci_dev *jtag_bridge;
+
+/* Platform default reset function */
+static int mlxplat_reboot_notifier(struct notifier_block *nb, unsigned long action, void *unused)
+{
+ struct mlxplat_priv *priv = platform_get_drvdata(mlxplat_dev);
+ u32 regval;
+ int ret;
+
+ ret = regmap_read(priv->regmap, MLXPLAT_CPLD_LPC_REG_RESET_GP1_OFFSET, &regval);
+
+ if (action == SYS_RESTART && !ret && regval & MLXPLAT_CPLD_SYS_RESET_MASK)
+ regmap_write(priv->regmap, MLXPLAT_CPLD_LPC_REG_RESET_GP1_OFFSET,
+ MLXPLAT_CPLD_RESET_MASK);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block mlxplat_reboot_default_nb = {
+ .notifier_call = mlxplat_reboot_notifier,
+};
/* Platform default poweroff function */
static void mlxplat_poweroff(void)
{
struct mlxplat_priv *priv = platform_get_drvdata(mlxplat_dev);
+ if (mlxplat_reboot_nb)
+ unregister_reboot_notifier(mlxplat_reboot_nb);
regmap_write(priv->regmap, MLXPLAT_CPLD_LPC_REG_GP1_OFFSET, MLXPLAT_CPLD_HALT_MASK);
+ kernel_halt();
+}
+
+static int __init mlxplat_register_platform_device(void)
+{
+ mlxplat_dev = platform_device_register_simple(MLX_PLAT_DEVICE_NAME, -1,
+ mlxplat_lpc_resources,
+ ARRAY_SIZE(mlxplat_lpc_resources));
+ if (IS_ERR(mlxplat_dev))
+ return PTR_ERR(mlxplat_dev);
+ else
+ return 1;
}
static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi)
@@ -5498,7 +5618,7 @@ static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi)
mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
mlxplat_i2c = &mlxplat_mlxcpld_i2c_default_data;
- return 1;
+ return mlxplat_register_platform_device();
}
static int __init mlxplat_dmi_default_wc_matched(const struct dmi_system_id *dmi)
@@ -5521,7 +5641,7 @@ static int __init mlxplat_dmi_default_wc_matched(const struct dmi_system_id *dmi
mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
mlxplat_i2c = &mlxplat_mlxcpld_i2c_default_data;
- return 1;
+ return mlxplat_register_platform_device();
}
static int __init mlxplat_dmi_default_eth_wc_blade_matched(const struct dmi_system_id *dmi)
@@ -5546,7 +5666,7 @@ static int __init mlxplat_dmi_default_eth_wc_blade_matched(const struct dmi_syst
mlxplat_i2c = &mlxplat_mlxcpld_i2c_ng_data;
mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_ng;
- return 1;
+ return mlxplat_register_platform_device();
}
static int __init mlxplat_dmi_msn21xx_matched(const struct dmi_system_id *dmi)
@@ -5569,7 +5689,7 @@ static int __init mlxplat_dmi_msn21xx_matched(const struct dmi_system_id *dmi)
mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
mlxplat_i2c = &mlxplat_mlxcpld_i2c_default_data;
- return 1;
+ return mlxplat_register_platform_device();
}
static int __init mlxplat_dmi_msn274x_matched(const struct dmi_system_id *dmi)
@@ -5592,7 +5712,7 @@ static int __init mlxplat_dmi_msn274x_matched(const struct dmi_system_id *dmi)
mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
mlxplat_i2c = &mlxplat_mlxcpld_i2c_default_data;
- return 1;
+ return mlxplat_register_platform_device();
}
static int __init mlxplat_dmi_msn201x_matched(const struct dmi_system_id *dmi)
@@ -5615,7 +5735,7 @@ static int __init mlxplat_dmi_msn201x_matched(const struct dmi_system_id *dmi)
mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
mlxplat_i2c = &mlxplat_mlxcpld_i2c_default_data;
- return 1;
+ return mlxplat_register_platform_device();
}
static int __init mlxplat_dmi_qmb7xx_matched(const struct dmi_system_id *dmi)
@@ -5641,7 +5761,7 @@ static int __init mlxplat_dmi_qmb7xx_matched(const struct dmi_system_id *dmi)
mlxplat_i2c = &mlxplat_mlxcpld_i2c_ng_data;
mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_ng;
- return 1;
+ return mlxplat_register_platform_device();
}
static int __init mlxplat_dmi_comex_matched(const struct dmi_system_id *dmi)
@@ -5666,7 +5786,7 @@ static int __init mlxplat_dmi_comex_matched(const struct dmi_system_id *dmi)
mlxplat_i2c = &mlxplat_mlxcpld_i2c_default_data;
mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_comex;
- return 1;
+ return mlxplat_register_platform_device();
}
static int __init mlxplat_dmi_ng400_matched(const struct dmi_system_id *dmi)
@@ -5692,7 +5812,7 @@ static int __init mlxplat_dmi_ng400_matched(const struct dmi_system_id *dmi)
mlxplat_i2c = &mlxplat_mlxcpld_i2c_ng_data;
mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_ng400;
- return 1;
+ return mlxplat_register_platform_device();
}
static int __init mlxplat_dmi_modular_matched(const struct dmi_system_id *dmi)
@@ -5712,7 +5832,7 @@ static int __init mlxplat_dmi_modular_matched(const struct dmi_system_id *dmi)
mlxplat_i2c = &mlxplat_mlxcpld_i2c_ng_data;
mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_eth_modular;
- return 1;
+ return mlxplat_register_platform_device();
}
static int __init mlxplat_dmi_chassis_blade_matched(const struct dmi_system_id *dmi)
@@ -5734,7 +5854,7 @@ static int __init mlxplat_dmi_chassis_blade_matched(const struct dmi_system_id *
mlxplat_i2c = &mlxplat_mlxcpld_i2c_ng_data;
mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_ng400;
- return 1;
+ return mlxplat_register_platform_device();
}
static int __init mlxplat_dmi_rack_switch_matched(const struct dmi_system_id *dmi)
@@ -5755,7 +5875,7 @@ static int __init mlxplat_dmi_rack_switch_matched(const struct dmi_system_id *dm
mlxplat_i2c = &mlxplat_mlxcpld_i2c_ng_data;
mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_rack_switch;
- return 1;
+ return mlxplat_register_platform_device();
}
static int __init mlxplat_dmi_ng800_matched(const struct dmi_system_id *dmi)
@@ -5776,7 +5896,7 @@ static int __init mlxplat_dmi_ng800_matched(const struct dmi_system_id *dmi)
mlxplat_i2c = &mlxplat_mlxcpld_i2c_ng_data;
mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_ng400;
- return 1;
+ return mlxplat_register_platform_device();
}
static int __init mlxplat_dmi_l1_switch_matched(const struct dmi_system_id *dmi)
@@ -5797,8 +5917,9 @@ static int __init mlxplat_dmi_l1_switch_matched(const struct dmi_system_id *dmi)
mlxplat_i2c = &mlxplat_mlxcpld_i2c_ng_data;
mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_rack_switch;
pm_power_off = mlxplat_poweroff;
+ mlxplat_reboot_nb = &mlxplat_reboot_default_nb;
- return 1;
+ return mlxplat_register_platform_device();
}
static const struct dmi_system_id mlxplat_dmi_table[] __initconst = {
@@ -6042,12 +6163,6 @@ static int mlxplat_lpc_cpld_device_init(struct resource **hotplug_resources,
{
int err;
- mlxplat_dev = platform_device_register_simple(MLX_PLAT_DEVICE_NAME, PLATFORM_DEVID_NONE,
- mlxplat_lpc_resources,
- ARRAY_SIZE(mlxplat_lpc_resources));
- if (IS_ERR(mlxplat_dev))
- return PTR_ERR(mlxplat_dev);
-
mlxplat_mlxcpld_regmap_ctx.base = devm_ioport_map(&mlxplat_dev->dev,
mlxplat_lpc_resources[1].start, 1);
if (!mlxplat_mlxcpld_regmap_ctx.base) {
@@ -6061,24 +6176,138 @@ static int mlxplat_lpc_cpld_device_init(struct resource **hotplug_resources,
return 0;
fail_devm_ioport_map:
- platform_device_unregister(mlxplat_dev);
return err;
}
static void mlxplat_lpc_cpld_device_exit(void)
{
- platform_device_unregister(mlxplat_dev);
+}
+
+static int
+mlxplat_pci_fpga_device_init(unsigned int device, const char *res_name, struct pci_dev **pci_bridge,
+ void __iomem **pci_bridge_addr)
+{
+ void __iomem *pci_mem_addr;
+ struct pci_dev *pci_dev;
+ int err;
+
+ pci_dev = pci_get_device(PCI_VENDOR_ID_LATTICE, device, NULL);
+ if (!pci_dev)
+ return -ENODEV;
+
+ err = pci_enable_device(pci_dev);
+ if (err) {
+ dev_err(&pci_dev->dev, "pci_enable_device failed with error %d\n", err);
+ goto fail_pci_enable_device;
+ }
+
+ err = pci_request_region(pci_dev, 0, res_name);
+ if (err) {
+ dev_err(&pci_dev->dev, "pci_request_regions failed with error %d\n", err);
+ goto fail_pci_request_regions;
+ }
+
+ err = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ err = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pci_dev->dev, "dma_set_mask failed with error %d\n", err);
+ goto fail_pci_set_dma_mask;
+ }
+ }
+
+ pci_set_master(pci_dev);
+
+ pci_mem_addr = devm_ioremap(&pci_dev->dev, pci_resource_start(pci_dev, 0),
+ pci_resource_len(pci_dev, 0));
+ if (!pci_mem_addr) {
+ dev_err(&mlxplat_dev->dev, "ioremap failed\n");
+ err = -EIO;
+ goto fail_ioremap;
+ }
+
+ *pci_bridge = pci_dev;
+ *pci_bridge_addr = pci_mem_addr;
+
+ return 0;
+
+fail_ioremap:
+fail_pci_set_dma_mask:
+ pci_release_regions(pci_dev);
+fail_pci_request_regions:
+ pci_disable_device(pci_dev);
+fail_pci_enable_device:
+ return err;
+}
+
+static void
+mlxplat_pci_fpga_device_exit(struct pci_dev *pci_bridge,
+ void __iomem *pci_bridge_addr)
+{
+ iounmap(pci_bridge_addr);
+ pci_release_regions(pci_bridge);
+ pci_disable_device(pci_bridge);
+}
+
+static int
+mlxplat_pci_fpga_devices_init(struct resource **hotplug_resources,
+ unsigned int *hotplug_resources_size)
+{
+ int err;
+
+ err = mlxplat_pci_fpga_device_init(PCI_DEVICE_ID_LATTICE_LPC_BRIDGE,
+ "mlxplat_lpc_bridge", &lpc_bridge,
+ &mlxplat_mlxcpld_regmap_ctx.base);
+ if (err)
+ goto mlxplat_pci_fpga_device_init_lpc_fail;
+
+ err = mlxplat_pci_fpga_device_init(PCI_DEVICE_ID_LATTICE_I2C_BRIDGE,
+ "mlxplat_i2c_bridge", &i2c_bridge,
+ &i2c_bridge_addr);
+ if (err)
+ goto mlxplat_pci_fpga_device_init_i2c_fail;
+
+ err = mlxplat_pci_fpga_device_init(PCI_DEVICE_ID_LATTICE_JTAG_BRIDGE,
+ "mlxplat_jtag_bridge", &jtag_bridge,
+ &jtag_bridge_addr);
+ if (err)
+ goto mlxplat_pci_fpga_device_init_jtag_fail;
+
+ return 0;
+
+mlxplat_pci_fpga_device_init_jtag_fail:
+ mlxplat_pci_fpga_device_exit(i2c_bridge, i2c_bridge_addr);
+mlxplat_pci_fpga_device_init_i2c_fail:
+ mlxplat_pci_fpga_device_exit(lpc_bridge, mlxplat_mlxcpld_regmap_ctx.base);
+mlxplat_pci_fpga_device_init_lpc_fail:
+ return err;
+}
+
+static void mlxplat_pci_fpga_devices_exit(void)
+{
+ mlxplat_pci_fpga_device_exit(jtag_bridge, jtag_bridge_addr);
+ mlxplat_pci_fpga_device_exit(i2c_bridge, i2c_bridge_addr);
+ mlxplat_pci_fpga_device_exit(lpc_bridge, mlxplat_mlxcpld_regmap_ctx.base);
}
static int
mlxplat_pre_init(struct resource **hotplug_resources, unsigned int *hotplug_resources_size)
{
- return mlxplat_lpc_cpld_device_init(hotplug_resources, hotplug_resources_size);
+ int err;
+
+ err = mlxplat_pci_fpga_devices_init(hotplug_resources, hotplug_resources_size);
+ if (err == -ENODEV)
+ return mlxplat_lpc_cpld_device_init(hotplug_resources, hotplug_resources_size);
+
+ return err;
}
static void mlxplat_post_exit(void)
{
- mlxplat_lpc_cpld_device_exit();
+ if (lpc_bridge)
+ mlxplat_pci_fpga_devices_exit();
+ else
+ mlxplat_lpc_cpld_device_exit();
}
static int mlxplat_post_init(struct mlxplat_priv *priv)
@@ -6088,6 +6317,8 @@ static int mlxplat_post_init(struct mlxplat_priv *priv)
/* Add hotplug driver */
if (mlxplat_hotplug) {
mlxplat_hotplug->regmap = priv->regmap;
+ if (priv->irq_fpga)
+ mlxplat_hotplug->irq = priv->irq_fpga;
priv->pdev_hotplug =
platform_device_register_resndata(&mlxplat_dev->dev,
"mlxreg-hotplug", PLATFORM_DEVID_NONE,
@@ -6201,7 +6432,7 @@ mlxplat_i2c_mux_complition_notify(void *handle, struct i2c_adapter *parent,
return mlxplat_post_init(priv);
}
-static int mlxplat_i2c_mux_topolgy_init(struct mlxplat_priv *priv)
+static int mlxplat_i2c_mux_topology_init(struct mlxplat_priv *priv)
{
int i, err;
@@ -6230,7 +6461,7 @@ fail_platform_mux_register:
return err;
}
-static void mlxplat_i2c_mux_topolgy_exit(struct mlxplat_priv *priv)
+static void mlxplat_i2c_mux_topology_exit(struct mlxplat_priv *priv)
{
int i;
@@ -6244,7 +6475,7 @@ static int mlxplat_i2c_main_complition_notify(void *handle, int id)
{
struct mlxplat_priv *priv = handle;
- return mlxplat_i2c_mux_topolgy_init(priv);
+ return mlxplat_i2c_mux_topology_init(priv);
}
static int mlxplat_i2c_main_init(struct mlxplat_priv *priv)
@@ -6262,6 +6493,9 @@ static int mlxplat_i2c_main_init(struct mlxplat_priv *priv)
mlxplat_i2c->regmap = priv->regmap;
mlxplat_i2c->handle = priv;
+ /* Set mapped base address of I2C-LPC bridge over PCIe */
+ if (lpc_bridge)
+ mlxplat_i2c->addr = i2c_bridge_addr;
priv->pdev_i2c = platform_device_register_resndata(&mlxplat_dev->dev, "i2c_mlxcpld",
nr, priv->hotplug_resources,
priv->hotplug_resources_size,
@@ -6272,14 +6506,14 @@ static int mlxplat_i2c_main_init(struct mlxplat_priv *priv)
}
if (priv->i2c_main_init_status == MLXPLAT_I2C_MAIN_BUS_NOTIFIED) {
- err = mlxplat_i2c_mux_topolgy_init(priv);
+ err = mlxplat_i2c_mux_topology_init(priv);
if (err)
- goto fail_mlxplat_i2c_mux_topolgy_init;
+ goto fail_mlxplat_i2c_mux_topology_init;
}
return 0;
-fail_mlxplat_i2c_mux_topolgy_init:
+fail_mlxplat_i2c_mux_topology_init:
fail_platform_i2c_register:
fail_mlxplat_mlxcpld_verify_bus_topology:
return err;
@@ -6287,20 +6521,26 @@ fail_mlxplat_mlxcpld_verify_bus_topology:
static void mlxplat_i2c_main_exit(struct mlxplat_priv *priv)
{
- mlxplat_i2c_mux_topolgy_exit(priv);
+ mlxplat_i2c_mux_topology_exit(priv);
if (priv->pdev_i2c)
platform_device_unregister(priv->pdev_i2c);
}
-static int __init mlxplat_init(void)
+static int mlxplat_probe(struct platform_device *pdev)
{
- unsigned int hotplug_resources_size;
- struct resource *hotplug_resources;
+ unsigned int hotplug_resources_size = 0;
+ struct resource *hotplug_resources = NULL;
+ struct acpi_device *acpi_dev;
struct mlxplat_priv *priv;
- int i, err;
-
- if (!dmi_check_system(mlxplat_dmi_table))
- return -ENODEV;
+ int irq_fpga = 0, i, err;
+
+ acpi_dev = ACPI_COMPANION(&pdev->dev);
+ if (acpi_dev) {
+ irq_fpga = acpi_dev_gpio_irq_get(acpi_dev, 0);
+ if (irq_fpga < 0)
+ return -ENODEV;
+ mlxplat_dev = pdev;
+ }
err = mlxplat_pre_init(&hotplug_resources, &hotplug_resources_size);
if (err)
@@ -6315,6 +6555,7 @@ static int __init mlxplat_init(void)
platform_set_drvdata(mlxplat_dev, priv);
priv->hotplug_resources = hotplug_resources;
priv->hotplug_resources_size = hotplug_resources_size;
+ priv->irq_fpga = irq_fpga;
if (!mlxplat_regmap_config)
mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config;
@@ -6346,8 +6587,15 @@ static int __init mlxplat_init(void)
if (err)
goto fail_regcache_sync;
+ if (mlxplat_reboot_nb) {
+ err = register_reboot_notifier(mlxplat_reboot_nb);
+ if (err)
+ goto fail_register_reboot_notifier;
+ }
+
return 0;
+fail_register_reboot_notifier:
fail_regcache_sync:
mlxplat_pre_exit(priv);
fail_mlxplat_i2c_main_init:
@@ -6357,17 +6605,57 @@ fail_alloc:
return err;
}
-module_init(mlxplat_init);
-static void __exit mlxplat_exit(void)
+static int mlxplat_remove(struct platform_device *pdev)
{
struct mlxplat_priv *priv = platform_get_drvdata(mlxplat_dev);
if (pm_power_off)
pm_power_off = NULL;
+ if (mlxplat_reboot_nb)
+ unregister_reboot_notifier(mlxplat_reboot_nb);
mlxplat_pre_exit(priv);
mlxplat_i2c_main_exit(priv);
mlxplat_post_exit();
+ return 0;
+}
+
+static const struct acpi_device_id mlxplat_acpi_table[] = {
+ { "MLNXBF49", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, mlxplat_acpi_table);
+
+static struct platform_driver mlxplat_driver = {
+ .driver = {
+ .name = "mlxplat",
+ .acpi_match_table = mlxplat_acpi_table,
+ .probe_type = PROBE_FORCE_SYNCHRONOUS,
+ },
+ .probe = mlxplat_probe,
+ .remove = mlxplat_remove,
+};
+
+static int __init mlxplat_init(void)
+{
+ int err;
+
+ if (!dmi_check_system(mlxplat_dmi_table))
+ return -ENODEV;
+
+ err = platform_driver_register(&mlxplat_driver);
+ if (err)
+ return err;
+ return 0;
+}
+module_init(mlxplat_init);
+
+static void __exit mlxplat_exit(void)
+{
+ if (mlxplat_dev)
+ platform_device_unregister(mlxplat_dev);
+
+ platform_driver_unregister(&mlxplat_driver);
}
module_exit(mlxplat_exit);
diff --git a/drivers/platform/x86/sel3350-platform.c b/drivers/platform/x86/sel3350-platform.c
new file mode 100644
index 000000000000..fa267d0d3778
--- /dev/null
+++ b/drivers/platform/x86/sel3350-platform.c
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
+/*
+ * Copyright 2023 Schweitzer Engineering Laboratories, Inc.
+ * 2350 NE Hopkins Court, Pullman, WA 99163 USA
+ *
+ * Platform support for the b2093 mainboard used in SEL-3350 computers.
+ * Consumes GPIO from the SoC to provide standard LED and power supply
+ * devices.
+ */
+
+#include <linux/acpi.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/machine.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+
+/* Broxton communities */
+#define BXT_NW "INT3452:01"
+#define BXT_W "INT3452:02"
+#define BXT_SW "INT3452:03"
+
+#define B2093_GPIO_ACPI_ID "SEL0003"
+
+#define SEL_PS_A "sel_ps_a"
+#define SEL_PS_A_DETECT "sel_ps_a_detect"
+#define SEL_PS_A_GOOD "sel_ps_a_good"
+#define SEL_PS_B "sel_ps_b"
+#define SEL_PS_B_DETECT "sel_ps_b_detect"
+#define SEL_PS_B_GOOD "sel_ps_b_good"
+
+/* LEDs */
+static const struct gpio_led sel3350_leds[] = {
+ { .name = "sel:green:aux1" },
+ { .name = "sel:green:aux2" },
+ { .name = "sel:green:aux3" },
+ { .name = "sel:green:aux4" },
+ { .name = "sel:red:alarm" },
+ { .name = "sel:green:enabled",
+ .default_state = LEDS_GPIO_DEFSTATE_ON },
+ { .name = "sel:red:aux1" },
+ { .name = "sel:red:aux2" },
+ { .name = "sel:red:aux3" },
+ { .name = "sel:red:aux4" },
+};
+
+static const struct gpio_led_platform_data sel3350_leds_pdata = {
+ .num_leds = ARRAY_SIZE(sel3350_leds),
+ .leds = sel3350_leds,
+};
+
+/* Map GPIOs to LEDs */
+static struct gpiod_lookup_table sel3350_leds_table = {
+ .dev_id = "leds-gpio",
+ .table = {
+ GPIO_LOOKUP_IDX(BXT_NW, 49, NULL, 0, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX(BXT_NW, 50, NULL, 1, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX(BXT_NW, 51, NULL, 2, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX(BXT_NW, 52, NULL, 3, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX(BXT_W, 20, NULL, 4, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX(BXT_W, 21, NULL, 5, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX(BXT_SW, 37, NULL, 6, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX(BXT_SW, 38, NULL, 7, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX(BXT_SW, 39, NULL, 8, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX(BXT_SW, 40, NULL, 9, GPIO_ACTIVE_HIGH),
+ {},
+ }
+};
+
+/* Map GPIOs to power supplies */
+static struct gpiod_lookup_table sel3350_gpios_table = {
+ .dev_id = B2093_GPIO_ACPI_ID ":00",
+ .table = {
+ GPIO_LOOKUP(BXT_NW, 44, SEL_PS_A_DETECT, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP(BXT_NW, 45, SEL_PS_A_GOOD, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP(BXT_NW, 46, SEL_PS_B_DETECT, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP(BXT_NW, 47, SEL_PS_B_GOOD, GPIO_ACTIVE_LOW),
+ {},
+ }
+};
+
+/* Power Supplies */
+
+struct sel3350_power_cfg_data {
+ struct gpio_desc *ps_detect;
+ struct gpio_desc *ps_good;
+};
+
+static int sel3350_power_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct sel3350_power_cfg_data *data = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_HEALTH:
+ if (gpiod_get_value(data->ps_detect)) {
+ if (gpiod_get_value(data->ps_good))
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ else
+ val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+ } else {
+ val->intval = POWER_SUPPLY_HEALTH_UNKNOWN;
+ }
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = gpiod_get_value(data->ps_detect);
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = gpiod_get_value(data->ps_good);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static const enum power_supply_property sel3350_power_properties[] = {
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+};
+
+static const struct power_supply_desc sel3350_ps_a_desc = {
+ .name = SEL_PS_A,
+ .type = POWER_SUPPLY_TYPE_MAINS,
+ .properties = sel3350_power_properties,
+ .num_properties = ARRAY_SIZE(sel3350_power_properties),
+ .get_property = sel3350_power_get_property,
+};
+
+static const struct power_supply_desc sel3350_ps_b_desc = {
+ .name = SEL_PS_B,
+ .type = POWER_SUPPLY_TYPE_MAINS,
+ .properties = sel3350_power_properties,
+ .num_properties = ARRAY_SIZE(sel3350_power_properties),
+ .get_property = sel3350_power_get_property,
+};
+
+struct sel3350_data {
+ struct platform_device *leds_pdev;
+ struct power_supply *ps_a;
+ struct power_supply *ps_b;
+ struct sel3350_power_cfg_data ps_a_cfg_data;
+ struct sel3350_power_cfg_data ps_b_cfg_data;
+};
+
+static int sel3350_probe(struct platform_device *pdev)
+{
+ int rs;
+ struct sel3350_data *sel3350;
+ struct power_supply_config ps_cfg = {};
+
+ sel3350 = devm_kzalloc(&pdev->dev, sizeof(struct sel3350_data), GFP_KERNEL);
+ if (!sel3350)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, sel3350);
+
+ gpiod_add_lookup_table(&sel3350_leds_table);
+ gpiod_add_lookup_table(&sel3350_gpios_table);
+
+ sel3350->leds_pdev = platform_device_register_data(
+ NULL,
+ "leds-gpio",
+ PLATFORM_DEVID_NONE,
+ &sel3350_leds_pdata,
+ sizeof(sel3350_leds_pdata));
+ if (IS_ERR(sel3350->leds_pdev)) {
+ rs = PTR_ERR(sel3350->leds_pdev);
+ dev_err(&pdev->dev, "Failed registering platform device: %d\n", rs);
+ goto err_platform;
+ }
+
+ /* Power Supply A */
+ sel3350->ps_a_cfg_data.ps_detect = devm_gpiod_get(&pdev->dev,
+ SEL_PS_A_DETECT,
+ GPIOD_IN);
+ sel3350->ps_a_cfg_data.ps_good = devm_gpiod_get(&pdev->dev,
+ SEL_PS_A_GOOD,
+ GPIOD_IN);
+ ps_cfg.drv_data = &sel3350->ps_a_cfg_data;
+ sel3350->ps_a = devm_power_supply_register(&pdev->dev,
+ &sel3350_ps_a_desc,
+ &ps_cfg);
+ if (IS_ERR(sel3350->ps_a)) {
+ rs = PTR_ERR(sel3350->ps_a);
+ dev_err(&pdev->dev, "Failed registering power supply A: %d\n", rs);
+ goto err_ps;
+ }
+
+ /* Power Supply B */
+ sel3350->ps_b_cfg_data.ps_detect = devm_gpiod_get(&pdev->dev,
+ SEL_PS_B_DETECT,
+ GPIOD_IN);
+ sel3350->ps_b_cfg_data.ps_good = devm_gpiod_get(&pdev->dev,
+ SEL_PS_B_GOOD,
+ GPIOD_IN);
+ ps_cfg.drv_data = &sel3350->ps_b_cfg_data;
+ sel3350->ps_b = devm_power_supply_register(&pdev->dev,
+ &sel3350_ps_b_desc,
+ &ps_cfg);
+ if (IS_ERR(sel3350->ps_b)) {
+ rs = PTR_ERR(sel3350->ps_b);
+ dev_err(&pdev->dev, "Failed registering power supply B: %d\n", rs);
+ goto err_ps;
+ }
+
+ return 0;
+
+err_ps:
+ platform_device_unregister(sel3350->leds_pdev);
+err_platform:
+ gpiod_remove_lookup_table(&sel3350_gpios_table);
+ gpiod_remove_lookup_table(&sel3350_leds_table);
+
+ return rs;
+}
+
+static int sel3350_remove(struct platform_device *pdev)
+{
+ struct sel3350_data *sel3350 = platform_get_drvdata(pdev);
+
+ platform_device_unregister(sel3350->leds_pdev);
+ gpiod_remove_lookup_table(&sel3350_gpios_table);
+ gpiod_remove_lookup_table(&sel3350_leds_table);
+
+ return 0;
+}
+
+static const struct acpi_device_id sel3350_device_ids[] = {
+ { B2093_GPIO_ACPI_ID, 0 },
+ { "", 0 },
+};
+MODULE_DEVICE_TABLE(acpi, sel3350_device_ids);
+
+static struct platform_driver sel3350_platform_driver = {
+ .probe = sel3350_probe,
+ .remove = sel3350_remove,
+ .driver = {
+ .name = "sel3350-platform",
+ .acpi_match_table = sel3350_device_ids,
+ },
+};
+module_platform_driver(sel3350_platform_driver);
+
+MODULE_AUTHOR("Schweitzer Engineering Laboratories");
+MODULE_DESCRIPTION("SEL-3350 platform driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_SOFTDEP("pre: pinctrl_broxton leds-gpio");
diff --git a/drivers/platform/x86/siemens/Kconfig b/drivers/platform/x86/siemens/Kconfig
new file mode 100644
index 000000000000..33d028c26bf8
--- /dev/null
+++ b/drivers/platform/x86/siemens/Kconfig
@@ -0,0 +1,64 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Siemens X86 Platform Specific Drivers
+#
+
+config SIEMENS_SIMATIC_IPC
+ tristate "Siemens Simatic IPC Class driver"
+ help
+ This Simatic IPC class driver is the central of several drivers. It
+ is mainly used for system identification, after which drivers in other
+ classes will take care of driving specifics of those machines.
+ i.e. LEDs and watchdog.
+
+ To compile this driver as a module, choose M here: the module
+ will be called simatic-ipc.
+
+config SIEMENS_SIMATIC_IPC_BATT
+ tristate "CMOS battery driver for Siemens Simatic IPCs"
+ default SIEMENS_SIMATIC_IPC
+ depends on HWMON
+ depends on SIEMENS_SIMATIC_IPC
+ help
+ This option enables support for monitoring the voltage of the CMOS
+ batteries of several Industrial PCs from Siemens.
+
+ To compile this driver as a module, choose M here: the module
+ will be called simatic-ipc-batt.
+
+config SIEMENS_SIMATIC_IPC_BATT_APOLLOLAKE
+ tristate "CMOS Battery monitoring for Simatic IPCs based on Apollo Lake GPIO"
+ default SIEMENS_SIMATIC_IPC_BATT
+ depends on PINCTRL_BROXTON
+ depends on SIEMENS_SIMATIC_IPC_BATT
+ help
+ This option enables CMOS battery monitoring for Simatic Industrial PCs
+ from Siemens based on Apollo Lake GPIO.
+
+ To compile this driver as a module, choose M here: the module
+ will be called simatic-ipc-batt-apollolake.
+
+config SIEMENS_SIMATIC_IPC_BATT_ELKHARTLAKE
+ tristate "CMOS Battery monitoring for Simatic IPCs based on Elkhart Lake GPIO"
+ default SIEMENS_SIMATIC_IPC_BATT
+ depends on PINCTRL_ELKHARTLAKE
+ depends on SIEMENS_SIMATIC_IPC_BATT
+ help
+ This option enables CMOS battery monitoring for Simatic Industrial PCs
+ from Siemens based on Elkhart Lake GPIO.
+
+ To compile this driver as a module, choose M here: the module
+ will be called simatic-ipc-batt-elkhartlake.
+
+config SIEMENS_SIMATIC_IPC_BATT_F7188X
+ tristate "CMOS Battery monitoring for Simatic IPCs based on Nuvoton GPIO"
+ default SIEMENS_SIMATIC_IPC_BATT
+ depends on GPIO_F7188X
+ depends on PINCTRL_ALDERLAKE
+ depends on SIEMENS_SIMATIC_IPC_BATT
+ help
+ This option enables CMOS battery monitoring for Simatic Industrial PCs
+ from Siemens based on Nuvoton GPIO.
+
+ To compile this driver as a module, choose M here: the module
+ will be called simatic-ipc-batt-f7188x.
diff --git a/drivers/platform/x86/siemens/Makefile b/drivers/platform/x86/siemens/Makefile
new file mode 100644
index 000000000000..2b384b4cb8ba
--- /dev/null
+++ b/drivers/platform/x86/siemens/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for linux/drivers/platform/x86/siemens
+# Siemens x86 Platform-Specific Drivers
+#
+
+obj-$(CONFIG_SIEMENS_SIMATIC_IPC) += simatic-ipc.o
+obj-$(CONFIG_SIEMENS_SIMATIC_IPC_BATT) += simatic-ipc-batt.o
+obj-$(CONFIG_SIEMENS_SIMATIC_IPC_BATT_APOLLOLAKE) += simatic-ipc-batt-apollolake.o
+obj-$(CONFIG_SIEMENS_SIMATIC_IPC_BATT_ELKHARTLAKE) += simatic-ipc-batt-elkhartlake.o
+obj-$(CONFIG_SIEMENS_SIMATIC_IPC_BATT_F7188X) += simatic-ipc-batt-f7188x.o
diff --git a/drivers/platform/x86/siemens/simatic-ipc-batt-apollolake.c b/drivers/platform/x86/siemens/simatic-ipc-batt-apollolake.c
new file mode 100644
index 000000000000..8a67979d8f96
--- /dev/null
+++ b/drivers/platform/x86/siemens/simatic-ipc-batt-apollolake.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Siemens SIMATIC IPC driver for CMOS battery monitoring
+ *
+ * Copyright (c) Siemens AG, 2023
+ *
+ * Authors:
+ * Henning Schild <henning.schild@siemens.com>
+ */
+
+#include <linux/gpio/machine.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "simatic-ipc-batt.h"
+
+static struct gpiod_lookup_table simatic_ipc_batt_gpio_table_127e = {
+ .table = {
+ GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 55, NULL, 0, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 61, NULL, 1, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX("apollolake-pinctrl.1", 41, NULL, 2, GPIO_ACTIVE_HIGH),
+ {} /* Terminating entry */
+ },
+};
+
+static int simatic_ipc_batt_apollolake_remove(struct platform_device *pdev)
+{
+ return simatic_ipc_batt_remove(pdev, &simatic_ipc_batt_gpio_table_127e);
+}
+
+static int simatic_ipc_batt_apollolake_probe(struct platform_device *pdev)
+{
+ return simatic_ipc_batt_probe(pdev, &simatic_ipc_batt_gpio_table_127e);
+}
+
+static struct platform_driver simatic_ipc_batt_driver = {
+ .probe = simatic_ipc_batt_apollolake_probe,
+ .remove = simatic_ipc_batt_apollolake_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+};
+
+module_platform_driver(simatic_ipc_batt_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" KBUILD_MODNAME);
+MODULE_SOFTDEP("pre: simatic-ipc-batt platform:apollolake-pinctrl");
+MODULE_AUTHOR("Henning Schild <henning.schild@siemens.com>");
diff --git a/drivers/platform/x86/siemens/simatic-ipc-batt-elkhartlake.c b/drivers/platform/x86/siemens/simatic-ipc-batt-elkhartlake.c
new file mode 100644
index 000000000000..607d033911a2
--- /dev/null
+++ b/drivers/platform/x86/siemens/simatic-ipc-batt-elkhartlake.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Siemens SIMATIC IPC driver for CMOS battery monitoring
+ *
+ * Copyright (c) Siemens AG, 2023
+ *
+ * Authors:
+ * Henning Schild <henning.schild@siemens.com>
+ */
+
+#include <linux/gpio/machine.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "simatic-ipc-batt.h"
+
+static struct gpiod_lookup_table simatic_ipc_batt_gpio_table_bx_21a = {
+ .table = {
+ GPIO_LOOKUP_IDX("INTC1020:04", 18, NULL, 0, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX("INTC1020:04", 19, NULL, 1, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX("INTC1020:01", 66, NULL, 2, GPIO_ACTIVE_HIGH),
+ {} /* Terminating entry */
+ },
+};
+
+static int simatic_ipc_batt_elkhartlake_remove(struct platform_device *pdev)
+{
+ return simatic_ipc_batt_remove(pdev, &simatic_ipc_batt_gpio_table_bx_21a);
+}
+
+static int simatic_ipc_batt_elkhartlake_probe(struct platform_device *pdev)
+{
+ return simatic_ipc_batt_probe(pdev, &simatic_ipc_batt_gpio_table_bx_21a);
+}
+
+static struct platform_driver simatic_ipc_batt_driver = {
+ .probe = simatic_ipc_batt_elkhartlake_probe,
+ .remove = simatic_ipc_batt_elkhartlake_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+};
+
+module_platform_driver(simatic_ipc_batt_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" KBUILD_MODNAME);
+MODULE_SOFTDEP("pre: simatic-ipc-batt platform:elkhartlake-pinctrl");
+MODULE_AUTHOR("Henning Schild <henning.schild@siemens.com>");
diff --git a/drivers/platform/x86/siemens/simatic-ipc-batt-f7188x.c b/drivers/platform/x86/siemens/simatic-ipc-batt-f7188x.c
new file mode 100644
index 000000000000..a66107e0fe1e
--- /dev/null
+++ b/drivers/platform/x86/siemens/simatic-ipc-batt-f7188x.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Siemens SIMATIC IPC driver for CMOS battery monitoring
+ *
+ * Copyright (c) Siemens AG, 2023
+ *
+ * Authors:
+ * Henning Schild <henning.schild@siemens.com>
+ */
+
+#include <linux/gpio/machine.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/x86/simatic-ipc-base.h>
+
+#include "simatic-ipc-batt.h"
+
+static struct gpiod_lookup_table *batt_lookup_table;
+
+static struct gpiod_lookup_table simatic_ipc_batt_gpio_table_227g = {
+ .table = {
+ GPIO_LOOKUP_IDX("gpio-f7188x-7", 6, NULL, 0, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX("gpio-f7188x-7", 5, NULL, 1, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX("INTC1020:01", 66, NULL, 2, GPIO_ACTIVE_HIGH),
+ {} /* Terminating entry */
+ },
+};
+
+static struct gpiod_lookup_table simatic_ipc_batt_gpio_table_bx_39a = {
+ .table = {
+ GPIO_LOOKUP_IDX("gpio-f7188x-6", 4, NULL, 0, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX("gpio-f7188x-6", 3, NULL, 1, GPIO_ACTIVE_HIGH),
+ {} /* Terminating entry */
+ },
+};
+
+static struct gpiod_lookup_table simatic_ipc_batt_gpio_table_bx_59a = {
+ .table = {
+ GPIO_LOOKUP_IDX("gpio-f7188x-7", 6, NULL, 0, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX("gpio-f7188x-7", 5, NULL, 1, GPIO_ACTIVE_HIGH),
+ GPIO_LOOKUP_IDX("INTC1056:00", 438, NULL, 2, GPIO_ACTIVE_HIGH),
+ {} /* Terminating entry */
+ }
+};
+
+static int simatic_ipc_batt_f7188x_remove(struct platform_device *pdev)
+{
+ return simatic_ipc_batt_remove(pdev, batt_lookup_table);
+}
+
+static int simatic_ipc_batt_f7188x_probe(struct platform_device *pdev)
+{
+ const struct simatic_ipc_platform *plat = pdev->dev.platform_data;
+
+ switch (plat->devmode) {
+ case SIMATIC_IPC_DEVICE_227G:
+ batt_lookup_table = &simatic_ipc_batt_gpio_table_227g;
+ break;
+ case SIMATIC_IPC_DEVICE_BX_39A:
+ batt_lookup_table = &simatic_ipc_batt_gpio_table_bx_39a;
+ break;
+ case SIMATIC_IPC_DEVICE_BX_59A:
+ batt_lookup_table = &simatic_ipc_batt_gpio_table_bx_59a;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ return simatic_ipc_batt_probe(pdev, batt_lookup_table);
+}
+
+static struct platform_driver simatic_ipc_batt_driver = {
+ .probe = simatic_ipc_batt_f7188x_probe,
+ .remove = simatic_ipc_batt_f7188x_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+};
+
+module_platform_driver(simatic_ipc_batt_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" KBUILD_MODNAME);
+MODULE_SOFTDEP("pre: simatic-ipc-batt gpio_f7188x platform:elkhartlake-pinctrl platform:alderlake-pinctrl");
+MODULE_AUTHOR("Henning Schild <henning.schild@siemens.com>");
diff --git a/drivers/platform/x86/siemens/simatic-ipc-batt.c b/drivers/platform/x86/siemens/simatic-ipc-batt.c
new file mode 100644
index 000000000000..ef28c806b383
--- /dev/null
+++ b/drivers/platform/x86/siemens/simatic-ipc-batt.c
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Siemens SIMATIC IPC driver for CMOS battery monitoring
+ *
+ * Copyright (c) Siemens AG, 2023
+ *
+ * Authors:
+ * Gerd Haeussler <gerd.haeussler.ext@siemens.com>
+ * Henning Schild <henning.schild@siemens.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/gpio/machine.h>
+#include <linux/gpio/consumer.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/x86/simatic-ipc-base.h>
+#include <linux/sizes.h>
+
+#include "simatic-ipc-batt.h"
+
+#define BATT_DELAY_MS (1000 * 60 * 60 * 24) /* 24 h delay */
+
+#define SIMATIC_IPC_BATT_LEVEL_FULL 3000
+#define SIMATIC_IPC_BATT_LEVEL_CRIT 2750
+#define SIMATIC_IPC_BATT_LEVEL_EMPTY 0
+
+static struct simatic_ipc_batt {
+ u8 devmode;
+ long current_state;
+ struct gpio_desc *gpios[3];
+ unsigned long last_updated_jiffies;
+} priv;
+
+static long simatic_ipc_batt_read_gpio(void)
+{
+ long r = SIMATIC_IPC_BATT_LEVEL_FULL;
+
+ if (priv.gpios[2]) {
+ gpiod_set_value(priv.gpios[2], 1);
+ msleep(150);
+ }
+
+ if (gpiod_get_value_cansleep(priv.gpios[0]))
+ r = SIMATIC_IPC_BATT_LEVEL_EMPTY;
+ else if (gpiod_get_value_cansleep(priv.gpios[1]))
+ r = SIMATIC_IPC_BATT_LEVEL_CRIT;
+
+ if (priv.gpios[2])
+ gpiod_set_value(priv.gpios[2], 0);
+
+ return r;
+}
+
+#define SIMATIC_IPC_BATT_PORT_BASE 0x404D
+static struct resource simatic_ipc_batt_io_res =
+ DEFINE_RES_IO_NAMED(SIMATIC_IPC_BATT_PORT_BASE, SZ_1, KBUILD_MODNAME);
+
+static long simatic_ipc_batt_read_io(struct device *dev)
+{
+ long r = SIMATIC_IPC_BATT_LEVEL_FULL;
+ struct resource *res = &simatic_ipc_batt_io_res;
+ u8 val;
+
+ if (!request_muxed_region(res->start, resource_size(res), res->name)) {
+ dev_err(dev, "Unable to register IO resource at %pR\n", res);
+ return -EBUSY;
+ }
+
+ val = inb(SIMATIC_IPC_BATT_PORT_BASE);
+ release_region(simatic_ipc_batt_io_res.start, resource_size(&simatic_ipc_batt_io_res));
+
+ if (val & (1 << 7))
+ r = SIMATIC_IPC_BATT_LEVEL_EMPTY;
+ else if (val & (1 << 6))
+ r = SIMATIC_IPC_BATT_LEVEL_CRIT;
+
+ return r;
+}
+
+static long simatic_ipc_batt_read_value(struct device *dev)
+{
+ unsigned long next_update;
+
+ next_update = priv.last_updated_jiffies + msecs_to_jiffies(BATT_DELAY_MS);
+ if (time_after(jiffies, next_update) || !priv.last_updated_jiffies) {
+ if (priv.devmode == SIMATIC_IPC_DEVICE_227E)
+ priv.current_state = simatic_ipc_batt_read_io(dev);
+ else
+ priv.current_state = simatic_ipc_batt_read_gpio();
+
+ priv.last_updated_jiffies = jiffies;
+ if (priv.current_state < SIMATIC_IPC_BATT_LEVEL_FULL)
+ dev_warn(dev, "CMOS battery needs to be replaced.\n");
+ }
+
+ return priv.current_state;
+}
+
+static int simatic_ipc_batt_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ switch (attr) {
+ case hwmon_in_input:
+ *val = simatic_ipc_batt_read_value(dev);
+ break;
+ case hwmon_in_lcrit:
+ *val = SIMATIC_IPC_BATT_LEVEL_CRIT;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static umode_t simatic_ipc_batt_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ if (attr == hwmon_in_input || attr == hwmon_in_lcrit)
+ return 0444;
+
+ return 0;
+}
+
+static const struct hwmon_ops simatic_ipc_batt_ops = {
+ .is_visible = simatic_ipc_batt_is_visible,
+ .read = simatic_ipc_batt_read,
+};
+
+static const struct hwmon_channel_info *simatic_ipc_batt_info[] = {
+ HWMON_CHANNEL_INFO(in, HWMON_I_INPUT | HWMON_I_LCRIT),
+ NULL
+};
+
+static const struct hwmon_chip_info simatic_ipc_batt_chip_info = {
+ .ops = &simatic_ipc_batt_ops,
+ .info = simatic_ipc_batt_info,
+};
+
+int simatic_ipc_batt_remove(struct platform_device *pdev, struct gpiod_lookup_table *table)
+{
+ gpiod_remove_lookup_table(table);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(simatic_ipc_batt_remove);
+
+int simatic_ipc_batt_probe(struct platform_device *pdev, struct gpiod_lookup_table *table)
+{
+ struct simatic_ipc_platform *plat;
+ struct device *dev = &pdev->dev;
+ struct device *hwmon_dev;
+ unsigned long flags;
+ int err;
+
+ plat = pdev->dev.platform_data;
+ priv.devmode = plat->devmode;
+
+ switch (priv.devmode) {
+ case SIMATIC_IPC_DEVICE_127E:
+ case SIMATIC_IPC_DEVICE_227G:
+ case SIMATIC_IPC_DEVICE_BX_39A:
+ case SIMATIC_IPC_DEVICE_BX_21A:
+ case SIMATIC_IPC_DEVICE_BX_59A:
+ table->dev_id = dev_name(dev);
+ gpiod_add_lookup_table(table);
+ break;
+ case SIMATIC_IPC_DEVICE_227E:
+ goto nogpio;
+ default:
+ return -ENODEV;
+ }
+
+ priv.gpios[0] = devm_gpiod_get_index(dev, "CMOSBattery empty", 0, GPIOD_IN);
+ if (IS_ERR(priv.gpios[0])) {
+ err = PTR_ERR(priv.gpios[0]);
+ priv.gpios[0] = NULL;
+ goto out;
+ }
+ priv.gpios[1] = devm_gpiod_get_index(dev, "CMOSBattery low", 1, GPIOD_IN);
+ if (IS_ERR(priv.gpios[1])) {
+ err = PTR_ERR(priv.gpios[1]);
+ priv.gpios[1] = NULL;
+ goto out;
+ }
+
+ if (table->table[2].key) {
+ flags = GPIOD_OUT_HIGH;
+ if (priv.devmode == SIMATIC_IPC_DEVICE_BX_21A ||
+ priv.devmode == SIMATIC_IPC_DEVICE_BX_59A)
+ flags = GPIOD_OUT_LOW;
+ priv.gpios[2] = devm_gpiod_get_index(dev, "CMOSBattery meter", 2, flags);
+ if (IS_ERR(priv.gpios[2])) {
+ err = PTR_ERR(priv.gpios[2]);
+ priv.gpios[2] = NULL;
+ goto out;
+ }
+ } else {
+ priv.gpios[2] = NULL;
+ }
+
+nogpio:
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, KBUILD_MODNAME,
+ &priv,
+ &simatic_ipc_batt_chip_info,
+ NULL);
+ if (IS_ERR(hwmon_dev)) {
+ err = PTR_ERR(hwmon_dev);
+ goto out;
+ }
+
+ /* warn about aging battery even if userspace never reads hwmon */
+ simatic_ipc_batt_read_value(dev);
+
+ return 0;
+out:
+ simatic_ipc_batt_remove(pdev, table);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(simatic_ipc_batt_probe);
+
+static int simatic_ipc_batt_io_remove(struct platform_device *pdev)
+{
+ return simatic_ipc_batt_remove(pdev, NULL);
+}
+
+static int simatic_ipc_batt_io_probe(struct platform_device *pdev)
+{
+ return simatic_ipc_batt_probe(pdev, NULL);
+}
+
+static struct platform_driver simatic_ipc_batt_driver = {
+ .probe = simatic_ipc_batt_io_probe,
+ .remove = simatic_ipc_batt_io_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+};
+
+module_platform_driver(simatic_ipc_batt_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" KBUILD_MODNAME);
+MODULE_AUTHOR("Henning Schild <henning.schild@siemens.com>");
diff --git a/drivers/platform/x86/siemens/simatic-ipc-batt.h b/drivers/platform/x86/siemens/simatic-ipc-batt.h
new file mode 100644
index 000000000000..4545cd3e3026
--- /dev/null
+++ b/drivers/platform/x86/siemens/simatic-ipc-batt.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Siemens SIMATIC IPC driver for CMOS battery monitoring
+ *
+ * Copyright (c) Siemens AG, 2023
+ *
+ * Author:
+ * Henning Schild <henning.schild@siemens.com>
+ */
+
+#ifndef _SIMATIC_IPC_BATT_H
+#define _SIMATIC_IPC_BATT_H
+
+int simatic_ipc_batt_probe(struct platform_device *pdev,
+ struct gpiod_lookup_table *table);
+
+int simatic_ipc_batt_remove(struct platform_device *pdev,
+ struct gpiod_lookup_table *table);
+
+#endif /* _SIMATIC_IPC_BATT_H */
diff --git a/drivers/platform/x86/siemens/simatic-ipc.c b/drivers/platform/x86/siemens/simatic-ipc.c
new file mode 100644
index 000000000000..8ca6e277fa03
--- /dev/null
+++ b/drivers/platform/x86/siemens/simatic-ipc.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Siemens SIMATIC IPC platform driver
+ *
+ * Copyright (c) Siemens AG, 2018-2023
+ *
+ * Authors:
+ * Henning Schild <henning.schild@siemens.com>
+ * Jan Kiszka <jan.kiszka@siemens.com>
+ * Gerd Haeussler <gerd.haeussler.ext@siemens.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/dmi.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_data/x86/simatic-ipc.h>
+#include <linux/platform_device.h>
+
+static struct platform_device *ipc_led_platform_device;
+static struct platform_device *ipc_wdt_platform_device;
+static struct platform_device *ipc_batt_platform_device;
+
+static const struct dmi_system_id simatic_ipc_whitelist[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SIEMENS AG"),
+ },
+ },
+ {}
+};
+
+static struct simatic_ipc_platform platform_data;
+
+#define SIMATIC_IPC_MAX_EXTRA_MODULES 2
+
+static struct {
+ u32 station_id;
+ u8 led_mode;
+ u8 wdt_mode;
+ u8 batt_mode;
+ char *extra_modules[SIMATIC_IPC_MAX_EXTRA_MODULES];
+} device_modes[] = {
+ {SIMATIC_IPC_IPC127E,
+ SIMATIC_IPC_DEVICE_127E, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_127E,
+ { "emc1403", NULL }},
+ {SIMATIC_IPC_IPC227D,
+ SIMATIC_IPC_DEVICE_227D, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_NONE,
+ { "emc1403", NULL }},
+ {SIMATIC_IPC_IPC227E,
+ SIMATIC_IPC_DEVICE_427E, SIMATIC_IPC_DEVICE_227E, SIMATIC_IPC_DEVICE_227E,
+ { "emc1403", NULL }},
+ {SIMATIC_IPC_IPC227G,
+ SIMATIC_IPC_DEVICE_227G, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_227G,
+ { "nct6775", "w83627hf_wdt" }},
+ {SIMATIC_IPC_IPC277G,
+ SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_227G,
+ { "nct6775", "w83627hf_wdt" }},
+ {SIMATIC_IPC_IPC277E,
+ SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_227E, SIMATIC_IPC_DEVICE_227E,
+ { "emc1403", NULL }},
+ {SIMATIC_IPC_IPC427D,
+ SIMATIC_IPC_DEVICE_427E, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_NONE,
+ { "emc1403", NULL }},
+ {SIMATIC_IPC_IPC427E,
+ SIMATIC_IPC_DEVICE_427E, SIMATIC_IPC_DEVICE_427E, SIMATIC_IPC_DEVICE_NONE,
+ { "emc1403", NULL }},
+ {SIMATIC_IPC_IPC477E,
+ SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_427E, SIMATIC_IPC_DEVICE_NONE,
+ { "emc1403", NULL }},
+ {SIMATIC_IPC_IPCBX_39A,
+ SIMATIC_IPC_DEVICE_227G, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_BX_39A,
+ { "nct6775", "w83627hf_wdt" }},
+ {SIMATIC_IPC_IPCPX_39A,
+ SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_BX_39A,
+ { "nct6775", "w83627hf_wdt" }},
+ {SIMATIC_IPC_IPCBX_21A,
+ SIMATIC_IPC_DEVICE_BX_21A, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_BX_21A,
+ { "emc1403", NULL }},
+ {SIMATIC_IPC_IPCBX_56A,
+ SIMATIC_IPC_DEVICE_BX_59A, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_BX_59A,
+ { "emc1403", "w83627hf_wdt" }},
+ {SIMATIC_IPC_IPCBX_59A,
+ SIMATIC_IPC_DEVICE_BX_59A, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_BX_59A,
+ { "emc1403", "w83627hf_wdt" }},
+};
+
+static int register_platform_devices(u32 station_id)
+{
+ u8 ledmode = SIMATIC_IPC_DEVICE_NONE;
+ u8 wdtmode = SIMATIC_IPC_DEVICE_NONE;
+ u8 battmode = SIMATIC_IPC_DEVICE_NONE;
+ char *pdevname;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(device_modes); i++) {
+ if (device_modes[i].station_id == station_id) {
+ ledmode = device_modes[i].led_mode;
+ wdtmode = device_modes[i].wdt_mode;
+ battmode = device_modes[i].batt_mode;
+ break;
+ }
+ }
+
+ if (battmode != SIMATIC_IPC_DEVICE_NONE) {
+ pdevname = KBUILD_MODNAME "_batt";
+ if (battmode == SIMATIC_IPC_DEVICE_127E)
+ pdevname = KBUILD_MODNAME "_batt_apollolake";
+ if (battmode == SIMATIC_IPC_DEVICE_BX_21A)
+ pdevname = KBUILD_MODNAME "_batt_elkhartlake";
+ if (battmode == SIMATIC_IPC_DEVICE_227G ||
+ battmode == SIMATIC_IPC_DEVICE_BX_39A ||
+ battmode == SIMATIC_IPC_DEVICE_BX_59A)
+ pdevname = KBUILD_MODNAME "_batt_f7188x";
+ platform_data.devmode = battmode;
+ ipc_batt_platform_device =
+ platform_device_register_data(NULL, pdevname,
+ PLATFORM_DEVID_NONE, &platform_data,
+ sizeof(struct simatic_ipc_platform));
+ if (IS_ERR(ipc_batt_platform_device))
+ return PTR_ERR(ipc_batt_platform_device);
+
+ pr_debug("device=%s created\n",
+ ipc_batt_platform_device->name);
+ }
+
+ if (ledmode != SIMATIC_IPC_DEVICE_NONE) {
+ pdevname = KBUILD_MODNAME "_leds";
+ if (ledmode == SIMATIC_IPC_DEVICE_127E)
+ pdevname = KBUILD_MODNAME "_leds_gpio_apollolake";
+ if (ledmode == SIMATIC_IPC_DEVICE_227G || ledmode == SIMATIC_IPC_DEVICE_BX_59A)
+ pdevname = KBUILD_MODNAME "_leds_gpio_f7188x";
+ if (ledmode == SIMATIC_IPC_DEVICE_BX_21A)
+ pdevname = KBUILD_MODNAME "_leds_gpio_elkhartlake";
+ platform_data.devmode = ledmode;
+ ipc_led_platform_device =
+ platform_device_register_data(NULL,
+ pdevname, PLATFORM_DEVID_NONE,
+ &platform_data,
+ sizeof(struct simatic_ipc_platform));
+ if (IS_ERR(ipc_led_platform_device))
+ return PTR_ERR(ipc_led_platform_device);
+
+ pr_debug("device=%s created\n",
+ ipc_led_platform_device->name);
+ }
+
+ if (wdtmode != SIMATIC_IPC_DEVICE_NONE) {
+ platform_data.devmode = wdtmode;
+ ipc_wdt_platform_device =
+ platform_device_register_data(NULL,
+ KBUILD_MODNAME "_wdt", PLATFORM_DEVID_NONE,
+ &platform_data,
+ sizeof(struct simatic_ipc_platform));
+ if (IS_ERR(ipc_wdt_platform_device))
+ return PTR_ERR(ipc_wdt_platform_device);
+
+ pr_debug("device=%s created\n",
+ ipc_wdt_platform_device->name);
+ }
+
+ if (ledmode == SIMATIC_IPC_DEVICE_NONE &&
+ wdtmode == SIMATIC_IPC_DEVICE_NONE &&
+ battmode == SIMATIC_IPC_DEVICE_NONE) {
+ pr_warn("unsupported IPC detected, station id=%08x\n",
+ station_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void request_additional_modules(u32 station_id)
+{
+ char **extra_modules = NULL;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(device_modes); i++) {
+ if (device_modes[i].station_id == station_id) {
+ extra_modules = device_modes[i].extra_modules;
+ break;
+ }
+ }
+
+ if (!extra_modules)
+ return;
+
+ for (i = 0; i < SIMATIC_IPC_MAX_EXTRA_MODULES; i++) {
+ if (extra_modules[i])
+ request_module(extra_modules[i]);
+ else
+ break;
+ }
+}
+
+static int __init simatic_ipc_init_module(void)
+{
+ const struct dmi_system_id *match;
+ u32 station_id;
+ int err;
+
+ match = dmi_first_match(simatic_ipc_whitelist);
+ if (!match)
+ return 0;
+
+ err = dmi_walk(simatic_ipc_find_dmi_entry_helper, &station_id);
+
+ if (err || station_id == SIMATIC_IPC_INVALID_STATION_ID) {
+ pr_warn("DMI entry %d not found\n", SIMATIC_IPC_DMI_ENTRY_OEM);
+ return 0;
+ }
+
+ request_additional_modules(station_id);
+
+ return register_platform_devices(station_id);
+}
+
+static void __exit simatic_ipc_exit_module(void)
+{
+ platform_device_unregister(ipc_led_platform_device);
+ ipc_led_platform_device = NULL;
+
+ platform_device_unregister(ipc_wdt_platform_device);
+ ipc_wdt_platform_device = NULL;
+
+ platform_device_unregister(ipc_batt_platform_device);
+ ipc_batt_platform_device = NULL;
+}
+
+module_init(simatic_ipc_init_module);
+module_exit(simatic_ipc_exit_module);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Gerd Haeussler <gerd.haeussler.ext@siemens.com>");
+MODULE_ALIAS("dmi:*:svnSIEMENSAG:*");
diff --git a/drivers/platform/x86/simatic-ipc.c b/drivers/platform/x86/simatic-ipc.c
deleted file mode 100644
index c773995b230d..000000000000
--- a/drivers/platform/x86/simatic-ipc.c
+++ /dev/null
@@ -1,151 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Siemens SIMATIC IPC platform driver
- *
- * Copyright (c) Siemens AG, 2018-2021
- *
- * Authors:
- * Henning Schild <henning.schild@siemens.com>
- * Jan Kiszka <jan.kiszka@siemens.com>
- * Gerd Haeussler <gerd.haeussler.ext@siemens.com>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/dmi.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/platform_data/x86/simatic-ipc.h>
-#include <linux/platform_device.h>
-
-static struct platform_device *ipc_led_platform_device;
-static struct platform_device *ipc_wdt_platform_device;
-
-static const struct dmi_system_id simatic_ipc_whitelist[] = {
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SIEMENS AG"),
- },
- },
- {}
-};
-
-static struct simatic_ipc_platform platform_data;
-
-static struct {
- u32 station_id;
- u8 led_mode;
- u8 wdt_mode;
-} device_modes[] = {
- {SIMATIC_IPC_IPC127E, SIMATIC_IPC_DEVICE_127E, SIMATIC_IPC_DEVICE_NONE},
- {SIMATIC_IPC_IPC227D, SIMATIC_IPC_DEVICE_227D, SIMATIC_IPC_DEVICE_NONE},
- {SIMATIC_IPC_IPC227E, SIMATIC_IPC_DEVICE_427E, SIMATIC_IPC_DEVICE_227E},
- {SIMATIC_IPC_IPC227G, SIMATIC_IPC_DEVICE_227G, SIMATIC_IPC_DEVICE_227G},
- {SIMATIC_IPC_IPC277E, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_227E},
- {SIMATIC_IPC_IPC427D, SIMATIC_IPC_DEVICE_427E, SIMATIC_IPC_DEVICE_NONE},
- {SIMATIC_IPC_IPC427E, SIMATIC_IPC_DEVICE_427E, SIMATIC_IPC_DEVICE_427E},
- {SIMATIC_IPC_IPC477E, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_427E},
- {SIMATIC_IPC_IPCBX_39A, SIMATIC_IPC_DEVICE_227G, SIMATIC_IPC_DEVICE_227G},
- {SIMATIC_IPC_IPCPX_39A, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_227G},
-};
-
-static int register_platform_devices(u32 station_id)
-{
- u8 ledmode = SIMATIC_IPC_DEVICE_NONE;
- u8 wdtmode = SIMATIC_IPC_DEVICE_NONE;
- char *pdevname = KBUILD_MODNAME "_leds";
- int i;
-
- platform_data.devmode = SIMATIC_IPC_DEVICE_NONE;
-
- for (i = 0; i < ARRAY_SIZE(device_modes); i++) {
- if (device_modes[i].station_id == station_id) {
- ledmode = device_modes[i].led_mode;
- wdtmode = device_modes[i].wdt_mode;
- break;
- }
- }
-
- if (ledmode != SIMATIC_IPC_DEVICE_NONE) {
- if (ledmode == SIMATIC_IPC_DEVICE_127E)
- pdevname = KBUILD_MODNAME "_leds_gpio_apollolake";
- if (ledmode == SIMATIC_IPC_DEVICE_227G)
- pdevname = KBUILD_MODNAME "_leds_gpio_f7188x";
- platform_data.devmode = ledmode;
- ipc_led_platform_device =
- platform_device_register_data(NULL,
- pdevname, PLATFORM_DEVID_NONE,
- &platform_data,
- sizeof(struct simatic_ipc_platform));
- if (IS_ERR(ipc_led_platform_device))
- return PTR_ERR(ipc_led_platform_device);
-
- pr_debug("device=%s created\n",
- ipc_led_platform_device->name);
- }
-
- if (wdtmode == SIMATIC_IPC_DEVICE_227G) {
- request_module("w83627hf_wdt");
- return 0;
- }
-
- if (wdtmode != SIMATIC_IPC_DEVICE_NONE) {
- platform_data.devmode = wdtmode;
- ipc_wdt_platform_device =
- platform_device_register_data(NULL,
- KBUILD_MODNAME "_wdt", PLATFORM_DEVID_NONE,
- &platform_data,
- sizeof(struct simatic_ipc_platform));
- if (IS_ERR(ipc_wdt_platform_device))
- return PTR_ERR(ipc_wdt_platform_device);
-
- pr_debug("device=%s created\n",
- ipc_wdt_platform_device->name);
- }
-
- if (ledmode == SIMATIC_IPC_DEVICE_NONE &&
- wdtmode == SIMATIC_IPC_DEVICE_NONE) {
- pr_warn("unsupported IPC detected, station id=%08x\n",
- station_id);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int __init simatic_ipc_init_module(void)
-{
- const struct dmi_system_id *match;
- u32 station_id;
- int err;
-
- match = dmi_first_match(simatic_ipc_whitelist);
- if (!match)
- return 0;
-
- err = dmi_walk(simatic_ipc_find_dmi_entry_helper, &station_id);
-
- if (err || station_id == SIMATIC_IPC_INVALID_STATION_ID) {
- pr_warn("DMI entry %d not found\n", SIMATIC_IPC_DMI_ENTRY_OEM);
- return 0;
- }
-
- return register_platform_devices(station_id);
-}
-
-static void __exit simatic_ipc_exit_module(void)
-{
- platform_device_unregister(ipc_led_platform_device);
- ipc_led_platform_device = NULL;
-
- platform_device_unregister(ipc_wdt_platform_device);
- ipc_wdt_platform_device = NULL;
-}
-
-module_init(simatic_ipc_init_module);
-module_exit(simatic_ipc_exit_module);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Gerd Haeussler <gerd.haeussler.ext@siemens.com>");
-MODULE_ALIAS("dmi:*:svnSIEMENSAG:*");
diff --git a/drivers/platform/x86/system76_acpi.c b/drivers/platform/x86/system76_acpi.c
index fc4708fa6ebe..3da753b3d00d 100644
--- a/drivers/platform/x86/system76_acpi.c
+++ b/drivers/platform/x86/system76_acpi.c
@@ -2,7 +2,7 @@
/*
* System76 ACPI Driver
*
- * Copyright (C) 2019 System76
+ * Copyright (C) 2023 System76
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -24,6 +24,12 @@
#include <acpi/battery.h>
+enum kbled_type {
+ KBLED_NONE,
+ KBLED_WHITE,
+ KBLED_RGB,
+};
+
struct system76_data {
struct acpi_device *acpi_dev;
struct led_classdev ap_led;
@@ -36,6 +42,7 @@ struct system76_data {
union acpi_object *ntmp;
struct input_dev *input;
bool has_open_ec;
+ enum kbled_type kbled_type;
};
static const struct acpi_device_id device_ids[] = {
@@ -327,7 +334,11 @@ static int kb_led_set(struct led_classdev *led, enum led_brightness value)
data = container_of(led, struct system76_data, kb_led);
data->kb_brightness = value;
- return system76_set(data, "SKBL", (int)data->kb_brightness);
+ if (acpi_has_method(acpi_device_handle(data->acpi_dev), "GKBK")) {
+ return system76_set(data, "SKBB", (int)data->kb_brightness);
+ } else {
+ return system76_set(data, "SKBL", (int)data->kb_brightness);
+ }
}
// Get the last set keyboard LED color
@@ -399,7 +410,12 @@ static void kb_led_hotkey_hardware(struct system76_data *data)
{
int value;
- value = system76_get(data, "GKBL");
+ if (acpi_has_method(acpi_device_handle(data->acpi_dev), "GKBK")) {
+ value = system76_get(data, "GKBB");
+ } else {
+ value = system76_get(data, "GKBL");
+ }
+
if (value < 0)
return;
data->kb_brightness = value;
@@ -459,8 +475,9 @@ static void kb_led_hotkey_color(struct system76_data *data)
{
int i;
- if (data->kb_color < 0)
+ if (data->kbled_type != KBLED_RGB)
return;
+
if (data->kb_brightness > 0) {
for (i = 0; i < ARRAY_SIZE(kb_colors); i++) {
if (kb_colors[i] == data->kb_color)
@@ -687,19 +704,46 @@ static int system76_add(struct acpi_device *acpi_dev)
data->kb_led.flags = LED_BRIGHT_HW_CHANGED | LED_CORE_SUSPENDRESUME;
data->kb_led.brightness_get = kb_led_get;
data->kb_led.brightness_set_blocking = kb_led_set;
- if (acpi_has_method(acpi_device_handle(data->acpi_dev), "SKBC")) {
- data->kb_led.max_brightness = 255;
- data->kb_led.groups = system76_kb_led_color_groups;
- data->kb_toggle_brightness = 72;
- data->kb_color = 0xffffff;
- system76_set(data, "SKBC", data->kb_color);
+ if (acpi_has_method(acpi_device_handle(data->acpi_dev), "GKBK")) {
+ // Use the new ACPI methods
+ data->kbled_type = system76_get(data, "GKBK");
+
+ switch (data->kbled_type) {
+ case KBLED_NONE:
+ // Nothing to do: Device will not be registered.
+ break;
+ case KBLED_WHITE:
+ data->kb_led.max_brightness = 255;
+ data->kb_toggle_brightness = 72;
+ break;
+ case KBLED_RGB:
+ data->kb_led.max_brightness = 255;
+ data->kb_led.groups = system76_kb_led_color_groups;
+ data->kb_toggle_brightness = 72;
+ data->kb_color = 0xffffff;
+ system76_set(data, "SKBC", data->kb_color);
+ break;
+ }
} else {
- data->kb_led.max_brightness = 5;
- data->kb_color = -1;
+ // Use the old ACPI methods
+ if (acpi_has_method(acpi_device_handle(data->acpi_dev), "SKBC")) {
+ data->kbled_type = KBLED_RGB;
+ data->kb_led.max_brightness = 255;
+ data->kb_led.groups = system76_kb_led_color_groups;
+ data->kb_toggle_brightness = 72;
+ data->kb_color = 0xffffff;
+ system76_set(data, "SKBC", data->kb_color);
+ } else {
+ data->kbled_type = KBLED_WHITE;
+ data->kb_led.max_brightness = 5;
+ }
+ }
+
+ if (data->kbled_type != KBLED_NONE) {
+ err = devm_led_classdev_register(&acpi_dev->dev, &data->kb_led);
+ if (err)
+ return err;
}
- err = devm_led_classdev_register(&acpi_dev->dev, &data->kb_led);
- if (err)
- return err;
data->input = devm_input_allocate_device(&acpi_dev->dev);
if (!data->input)
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index ad460417f901..d70c89d32534 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -50,6 +50,7 @@
#include <linux/kthread.h>
#include <linux/leds.h>
#include <linux/list.h>
+#include <linux/lockdep.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/nvram.h>
@@ -907,16 +908,9 @@ static ssize_t dispatch_proc_write(struct file *file,
if (count > PAGE_SIZE - 1)
return -EINVAL;
- kernbuf = kmalloc(count + 1, GFP_KERNEL);
- if (!kernbuf)
- return -ENOMEM;
-
- if (copy_from_user(kernbuf, userbuf, count)) {
- kfree(kernbuf);
- return -EFAULT;
- }
-
- kernbuf[count] = 0;
+ kernbuf = memdup_user_nul(userbuf, count);
+ if (IS_ERR(kernbuf))
+ return PTR_ERR(kernbuf);
ret = ibm->write(kernbuf);
if (ret == 0)
ret = count;
@@ -2066,11 +2060,11 @@ static int hotkey_get_tablet_mode(int *status)
* hotkey_acpi_mask accordingly. Also resets any bits
* from hotkey_user_mask that are unavailable to be
* delivered (shadow requirement of the userspace ABI).
- *
- * Call with hotkey_mutex held
*/
static int hotkey_mask_get(void)
{
+ lockdep_assert_held(&hotkey_mutex);
+
if (tp_features.hotkey_mask) {
u32 m = 0;
@@ -2106,8 +2100,6 @@ static void hotkey_mask_warn_incomplete_mask(void)
* Also calls hotkey_mask_get to update hotkey_acpi_mask.
*
* NOTE: does not set bits in hotkey_user_mask, but may reset them.
- *
- * Call with hotkey_mutex held
*/
static int hotkey_mask_set(u32 mask)
{
@@ -2116,6 +2108,8 @@ static int hotkey_mask_set(u32 mask)
const u32 fwmask = mask & ~hotkey_source_mask;
+ lockdep_assert_held(&hotkey_mutex);
+
if (tp_features.hotkey_mask) {
for (i = 0; i < 32; i++) {
if (!acpi_evalf(hkey_handle,
@@ -2147,13 +2141,13 @@ static int hotkey_mask_set(u32 mask)
/*
* Sets hotkey_user_mask and tries to set the firmware mask
- *
- * Call with hotkey_mutex held
*/
static int hotkey_user_mask_set(const u32 mask)
{
int rc;
+ lockdep_assert_held(&hotkey_mutex);
+
/* Give people a chance to notice they are doing something that
* is bound to go boom on their users sooner or later */
if (!tp_warned.hotkey_mask_ff &&
@@ -2514,21 +2508,23 @@ exit:
return 0;
}
-/* call with hotkey_mutex held */
static void hotkey_poll_stop_sync(void)
{
+ lockdep_assert_held(&hotkey_mutex);
+
if (tpacpi_hotkey_task) {
kthread_stop(tpacpi_hotkey_task);
tpacpi_hotkey_task = NULL;
}
}
-/* call with hotkey_mutex held */
static void hotkey_poll_setup(const bool may_warn)
{
const u32 poll_driver_mask = hotkey_driver_mask & hotkey_source_mask;
const u32 poll_user_mask = hotkey_user_mask & hotkey_source_mask;
+ lockdep_assert_held(&hotkey_mutex);
+
if (hotkey_poll_freq > 0 &&
(poll_driver_mask ||
(poll_user_mask && tpacpi_inputdev->users > 0))) {
@@ -2557,9 +2553,10 @@ static void hotkey_poll_setup_safe(const bool may_warn)
mutex_unlock(&hotkey_mutex);
}
-/* call with hotkey_mutex held */
static void hotkey_poll_set_freq(unsigned int freq)
{
+ lockdep_assert_held(&hotkey_mutex);
+
if (!freq)
hotkey_poll_stop_sync();
@@ -3473,7 +3470,9 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
if (tp_features.hotkey_mask) {
/* hotkey_source_mask *must* be zero for
* the first hotkey_mask_get to return hotkey_orig_mask */
+ mutex_lock(&hotkey_mutex);
res = hotkey_mask_get();
+ mutex_unlock(&hotkey_mutex);
if (res)
return res;
@@ -3572,9 +3571,11 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
hotkey_exit();
return res;
}
+ mutex_lock(&hotkey_mutex);
res = hotkey_mask_set(((hotkey_all_mask & ~hotkey_reserved_mask)
| hotkey_driver_mask)
& ~hotkey_source_mask);
+ mutex_unlock(&hotkey_mutex);
if (res < 0 && res != -ENXIO) {
hotkey_exit();
return res;
@@ -6528,12 +6529,13 @@ static unsigned int brightness_enable = 2; /* 2 = auto, 0 = no, 1 = yes */
static struct mutex brightness_mutex;
-/* NVRAM brightness access,
- * call with brightness_mutex held! */
+/* NVRAM brightness access */
static unsigned int tpacpi_brightness_nvram_get(void)
{
u8 lnvram;
+ lockdep_assert_held(&brightness_mutex);
+
lnvram = (nvram_read_byte(TP_NVRAM_ADDR_BRIGHTNESS)
& TP_NVRAM_MASK_LEVEL_BRIGHTNESS)
>> TP_NVRAM_POS_LEVEL_BRIGHTNESS;
@@ -6581,11 +6583,12 @@ unlock:
}
-/* call with brightness_mutex held! */
static int tpacpi_brightness_get_raw(int *status)
{
u8 lec = 0;
+ lockdep_assert_held(&brightness_mutex);
+
switch (brightness_mode) {
case TPACPI_BRGHT_MODE_UCMS_STEP:
*status = tpacpi_brightness_nvram_get();
@@ -6601,12 +6604,13 @@ static int tpacpi_brightness_get_raw(int *status)
}
}
-/* call with brightness_mutex held! */
/* do NOT call with illegal backlight level value */
static int tpacpi_brightness_set_ec(unsigned int value)
{
u8 lec = 0;
+ lockdep_assert_held(&brightness_mutex);
+
if (unlikely(!acpi_ec_read(TP_EC_BACKLIGHT, &lec)))
return -EIO;
@@ -6618,12 +6622,13 @@ static int tpacpi_brightness_set_ec(unsigned int value)
return 0;
}
-/* call with brightness_mutex held! */
static int tpacpi_brightness_set_ucmsstep(unsigned int value)
{
int cmos_cmd, inc;
unsigned int current_value, i;
+ lockdep_assert_held(&brightness_mutex);
+
current_value = tpacpi_brightness_nvram_get();
if (value == current_value)
@@ -8072,11 +8077,10 @@ static bool fan_select_fan2(void)
return true;
}
-/*
- * Call with fan_mutex held
- */
static void fan_update_desired_level(u8 status)
{
+ lockdep_assert_held(&fan_mutex);
+
if ((status &
(TP_EC_FAN_AUTO | TP_EC_FAN_FULLSPEED)) == 0) {
if (status > 7)
diff --git a/drivers/platform/x86/wmi-bmof.c b/drivers/platform/x86/wmi-bmof.c
index 80137afb9753..644d2fd889c0 100644
--- a/drivers/platform/x86/wmi-bmof.c
+++ b/drivers/platform/x86/wmi-bmof.c
@@ -25,25 +25,13 @@ struct bmof_priv {
struct bin_attribute bmof_bin_attr;
};
-static ssize_t
-read_bmof(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
+static ssize_t read_bmof(struct file *filp, struct kobject *kobj, struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
{
- struct bmof_priv *priv =
- container_of(attr, struct bmof_priv, bmof_bin_attr);
+ struct bmof_priv *priv = container_of(attr, struct bmof_priv, bmof_bin_attr);
- if (off < 0)
- return -EINVAL;
-
- if (off >= priv->bmofdata->buffer.length)
- return 0;
-
- if (count > priv->bmofdata->buffer.length - off)
- count = priv->bmofdata->buffer.length - off;
-
- memcpy(buf, priv->bmofdata->buffer.pointer + off, count);
- return count;
+ return memory_read_from_buffer(buf, count, &off, priv->bmofdata->buffer.pointer,
+ priv->bmofdata->buffer.length);
}
static int wmi_bmof_probe(struct wmi_device *wdev, const void *context)
@@ -75,7 +63,7 @@ static int wmi_bmof_probe(struct wmi_device *wdev, const void *context)
priv->bmof_bin_attr.read = read_bmof;
priv->bmof_bin_attr.size = priv->bmofdata->buffer.length;
- ret = sysfs_create_bin_file(&wdev->dev.kobj, &priv->bmof_bin_attr);
+ ret = device_create_bin_file(&wdev->dev, &priv->bmof_bin_attr);
if (ret)
goto err_free;
@@ -90,7 +78,7 @@ static void wmi_bmof_remove(struct wmi_device *wdev)
{
struct bmof_priv *priv = dev_get_drvdata(&wdev->dev);
- sysfs_remove_bin_file(&wdev->dev.kobj, &priv->bmof_bin_attr);
+ device_remove_bin_file(&wdev->dev, &priv->bmof_bin_attr);
kfree(priv->bmofdata);
}
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index 4aa466c945e2..0b69fb7bafd8 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -1309,8 +1309,8 @@ static int psy_register_thermal(struct power_supply *psy)
struct thermal_zone_params tzp = {
.no_hwmon = IS_ENABLED(CONFIG_POWER_SUPPLY_HWMON)
};
- psy->tzd = thermal_zone_device_register(psy->desc->name,
- 0, 0, psy, &psy_tzd_ops, &tzp, 0, 0);
+ psy->tzd = thermal_tripless_zone_device_register(psy->desc->name,
+ psy, &psy_tzd_ops, &tzp);
if (IS_ERR(psy->tzd))
return PTR_ERR(psy->tzd);
ret = thermal_zone_device_enable(psy->tzd);
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index 5c2e6d5eea2a..40a2cc649c79 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -658,8 +658,6 @@ static struct rapl_primitive_info rpi_msr[NR_RAPL_PRIMITIVES] = {
RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
[PL2_CLAMP] = PRIMITIVE_INFO_INIT(PL2_CLAMP, POWER_LIMIT2_CLAMP, 48,
RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
- [PL4_ENABLE] = PRIMITIVE_INFO_INIT(PL4_ENABLE, POWER_LIMIT4_MASK, 0,
- RAPL_DOMAIN_REG_PL4, ARBITRARY_UNIT, 0),
[TIME_WINDOW1] = PRIMITIVE_INFO_INIT(TIME_WINDOW1, TIME_WINDOW1_MASK, 17,
RAPL_DOMAIN_REG_LIMIT, TIME_UNIT, 0),
[TIME_WINDOW2] = PRIMITIVE_INFO_INIT(TIME_WINDOW2, TIME_WINDOW2_MASK, 49,
@@ -1458,7 +1456,7 @@ static void rapl_detect_powerlimit(struct rapl_domain *rd)
}
}
- if (rapl_read_pl_data(rd, i, PL_ENABLE, false, &val64))
+ if (rapl_read_pl_data(rd, i, PL_LIMIT, false, &val64))
rd->rpl[i].name = NULL;
}
}
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index 6210babb0741..8ebcddf91f7b 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -505,7 +505,7 @@ config PWM_ROCKCHIP
config PWM_RZ_MTU3
tristate "Renesas RZ/G2L MTU3a PWM Timer support"
- depends on RZ_MTU3 || COMPILE_TEST
+ depends on RZ_MTU3
depends on HAS_IOMEM
help
This driver exposes the MTU3a PWM Timer controller found in Renesas
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 3dacceaef4a9..dc66e3405bf5 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -8,8 +8,8 @@
#include <linux/acpi.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/pwm.h>
-#include <linux/radix-tree.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/err.h>
@@ -127,28 +127,28 @@ static int pwm_device_request(struct pwm_device *pwm, const char *label)
}
struct pwm_device *
-of_pwm_xlate_with_flags(struct pwm_chip *pc, const struct of_phandle_args *args)
+of_pwm_xlate_with_flags(struct pwm_chip *chip, const struct of_phandle_args *args)
{
struct pwm_device *pwm;
- if (pc->of_pwm_n_cells < 2)
+ if (chip->of_pwm_n_cells < 2)
return ERR_PTR(-EINVAL);
/* flags in the third cell are optional */
if (args->args_count < 2)
return ERR_PTR(-EINVAL);
- if (args->args[0] >= pc->npwm)
+ if (args->args[0] >= chip->npwm)
return ERR_PTR(-EINVAL);
- pwm = pwm_request_from_chip(pc, args->args[0], NULL);
+ pwm = pwm_request_from_chip(chip, args->args[0], NULL);
if (IS_ERR(pwm))
return pwm;
pwm->args.period = args->args[1];
pwm->args.polarity = PWM_POLARITY_NORMAL;
- if (pc->of_pwm_n_cells >= 3) {
+ if (chip->of_pwm_n_cells >= 3) {
if (args->args_count > 2 && args->args[2] & PWM_POLARITY_INVERTED)
pwm->args.polarity = PWM_POLARITY_INVERSED;
}
@@ -158,18 +158,18 @@ of_pwm_xlate_with_flags(struct pwm_chip *pc, const struct of_phandle_args *args)
EXPORT_SYMBOL_GPL(of_pwm_xlate_with_flags);
struct pwm_device *
-of_pwm_single_xlate(struct pwm_chip *pc, const struct of_phandle_args *args)
+of_pwm_single_xlate(struct pwm_chip *chip, const struct of_phandle_args *args)
{
struct pwm_device *pwm;
- if (pc->of_pwm_n_cells < 1)
+ if (chip->of_pwm_n_cells < 1)
return ERR_PTR(-EINVAL);
/* validate that one cell is specified, optionally with flags */
if (args->args_count != 1 && args->args_count != 2)
return ERR_PTR(-EINVAL);
- pwm = pwm_request_from_chip(pc, 0, NULL);
+ pwm = pwm_request_from_chip(chip, 0, NULL);
if (IS_ERR(pwm))
return pwm;
@@ -312,22 +312,19 @@ EXPORT_SYMBOL_GPL(pwmchip_add);
* pwmchip_remove() - remove a PWM chip
* @chip: the PWM chip to remove
*
- * Removes a PWM chip. This function may return busy if the PWM chip provides
- * a PWM device that is still requested.
- *
- * Returns: 0 on success or a negative error code on failure.
+ * Removes a PWM chip.
*/
void pwmchip_remove(struct pwm_chip *chip)
{
pwmchip_sysfs_unexport(chip);
+ if (IS_ENABLED(CONFIG_OF))
+ of_pwmchip_remove(chip);
+
mutex_lock(&pwm_lock);
list_del_init(&chip->list);
- if (IS_ENABLED(CONFIG_OF))
- of_pwmchip_remove(chip);
-
free_pwms(chip);
mutex_unlock(&pwm_lock);
@@ -692,7 +689,7 @@ static struct pwm_device *of_pwm_get(struct device *dev, struct device_node *np,
struct pwm_device *pwm = NULL;
struct of_phandle_args args;
struct device_link *dl;
- struct pwm_chip *pc;
+ struct pwm_chip *chip;
int index = 0;
int err;
@@ -709,16 +706,16 @@ static struct pwm_device *of_pwm_get(struct device *dev, struct device_node *np,
return ERR_PTR(err);
}
- pc = fwnode_to_pwmchip(of_fwnode_handle(args.np));
- if (IS_ERR(pc)) {
- if (PTR_ERR(pc) != -EPROBE_DEFER)
+ chip = fwnode_to_pwmchip(of_fwnode_handle(args.np));
+ if (IS_ERR(chip)) {
+ if (PTR_ERR(chip) != -EPROBE_DEFER)
pr_err("%s(): PWM chip not found\n", __func__);
- pwm = ERR_CAST(pc);
+ pwm = ERR_CAST(chip);
goto put;
}
- pwm = pc->of_xlate(pc, &args);
+ pwm = chip->of_xlate(chip, &args);
if (IS_ERR(pwm))
goto put;
diff --git a/drivers/pwm/pwm-apple.c b/drivers/pwm/pwm-apple.c
index a38a62edd713..8e7d67fb5fbe 100644
--- a/drivers/pwm/pwm-apple.c
+++ b/drivers/pwm/pwm-apple.c
@@ -12,6 +12,7 @@
* - When APPLE_PWM_CTRL is set to 0, the output is constant low
*/
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
diff --git a/drivers/pwm/pwm-atmel-hlcdc.c b/drivers/pwm/pwm-atmel-hlcdc.c
index 96a709a9d49a..e271d920151e 100644
--- a/drivers/pwm/pwm-atmel-hlcdc.c
+++ b/drivers/pwm/pwm-atmel-hlcdc.c
@@ -10,6 +10,7 @@
#include <linux/delay.h>
#include <linux/mfd/atmel-hlcdc.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/regmap.h>
@@ -38,11 +39,11 @@ static inline struct atmel_hlcdc_pwm *to_atmel_hlcdc_pwm(struct pwm_chip *chip)
return container_of(chip, struct atmel_hlcdc_pwm, chip);
}
-static int atmel_hlcdc_pwm_apply(struct pwm_chip *c, struct pwm_device *pwm,
+static int atmel_hlcdc_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
- struct atmel_hlcdc_pwm *chip = to_atmel_hlcdc_pwm(c);
- struct atmel_hlcdc *hlcdc = chip->hlcdc;
+ struct atmel_hlcdc_pwm *atmel = to_atmel_hlcdc_pwm(chip);
+ struct atmel_hlcdc *hlcdc = atmel->hlcdc;
unsigned int status;
int ret;
@@ -54,7 +55,7 @@ static int atmel_hlcdc_pwm_apply(struct pwm_chip *c, struct pwm_device *pwm,
u32 pwmcfg;
int pres;
- if (!chip->errata || !chip->errata->slow_clk_erratum) {
+ if (!atmel->errata || !atmel->errata->slow_clk_erratum) {
clk_freq = clk_get_rate(new_clk);
if (!clk_freq)
return -EINVAL;
@@ -64,7 +65,7 @@ static int atmel_hlcdc_pwm_apply(struct pwm_chip *c, struct pwm_device *pwm,
}
/* Errata: cannot use slow clk on some IP revisions */
- if ((chip->errata && chip->errata->slow_clk_erratum) ||
+ if ((atmel->errata && atmel->errata->slow_clk_erratum) ||
clk_period_ns > state->period) {
new_clk = hlcdc->sys_clk;
clk_freq = clk_get_rate(new_clk);
@@ -77,8 +78,8 @@ static int atmel_hlcdc_pwm_apply(struct pwm_chip *c, struct pwm_device *pwm,
for (pres = 0; pres <= ATMEL_HLCDC_PWMPS_MAX; pres++) {
/* Errata: cannot divide by 1 on some IP revisions */
- if (!pres && chip->errata &&
- chip->errata->div1_clk_erratum)
+ if (!pres && atmel->errata &&
+ atmel->errata->div1_clk_erratum)
continue;
if ((clk_period_ns << pres) >= state->period)
@@ -90,7 +91,7 @@ static int atmel_hlcdc_pwm_apply(struct pwm_chip *c, struct pwm_device *pwm,
pwmcfg = ATMEL_HLCDC_PWMPS(pres);
- if (new_clk != chip->cur_clk) {
+ if (new_clk != atmel->cur_clk) {
u32 gencfg = 0;
int ret;
@@ -98,8 +99,8 @@ static int atmel_hlcdc_pwm_apply(struct pwm_chip *c, struct pwm_device *pwm,
if (ret)
return ret;
- clk_disable_unprepare(chip->cur_clk);
- chip->cur_clk = new_clk;
+ clk_disable_unprepare(atmel->cur_clk);
+ atmel->cur_clk = new_clk;
if (new_clk == hlcdc->sys_clk)
gencfg = ATMEL_HLCDC_CLKPWMSEL;
@@ -160,8 +161,8 @@ static int atmel_hlcdc_pwm_apply(struct pwm_chip *c, struct pwm_device *pwm,
if (ret)
return ret;
- clk_disable_unprepare(chip->cur_clk);
- chip->cur_clk = NULL;
+ clk_disable_unprepare(atmel->cur_clk);
+ atmel->cur_clk = NULL;
}
return 0;
@@ -183,31 +184,32 @@ static const struct atmel_hlcdc_pwm_errata atmel_hlcdc_pwm_sama5d3_errata = {
#ifdef CONFIG_PM_SLEEP
static int atmel_hlcdc_pwm_suspend(struct device *dev)
{
- struct atmel_hlcdc_pwm *chip = dev_get_drvdata(dev);
+ struct atmel_hlcdc_pwm *atmel = dev_get_drvdata(dev);
/* Keep the periph clock enabled if the PWM is still running. */
- if (pwm_is_enabled(&chip->chip.pwms[0]))
- clk_disable_unprepare(chip->hlcdc->periph_clk);
+ if (pwm_is_enabled(&atmel->chip.pwms[0]))
+ clk_disable_unprepare(atmel->hlcdc->periph_clk);
return 0;
}
static int atmel_hlcdc_pwm_resume(struct device *dev)
{
- struct atmel_hlcdc_pwm *chip = dev_get_drvdata(dev);
+ struct atmel_hlcdc_pwm *atmel = dev_get_drvdata(dev);
struct pwm_state state;
int ret;
- pwm_get_state(&chip->chip.pwms[0], &state);
+ pwm_get_state(&atmel->chip.pwms[0], &state);
/* Re-enable the periph clock it was stopped during suspend. */
if (!state.enabled) {
- ret = clk_prepare_enable(chip->hlcdc->periph_clk);
+ ret = clk_prepare_enable(atmel->hlcdc->periph_clk);
if (ret)
return ret;
}
- return atmel_hlcdc_pwm_apply(&chip->chip, &chip->chip.pwms[0], &state);
+ return atmel_hlcdc_pwm_apply(&atmel->chip, &atmel->chip.pwms[0],
+ &state);
}
#endif
@@ -244,14 +246,14 @@ static int atmel_hlcdc_pwm_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
struct device *dev = &pdev->dev;
- struct atmel_hlcdc_pwm *chip;
+ struct atmel_hlcdc_pwm *atmel;
struct atmel_hlcdc *hlcdc;
int ret;
hlcdc = dev_get_drvdata(dev->parent);
- chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
- if (!chip)
+ atmel = devm_kzalloc(dev, sizeof(*atmel), GFP_KERNEL);
+ if (!atmel)
return -ENOMEM;
ret = clk_prepare_enable(hlcdc->periph_clk);
@@ -260,31 +262,31 @@ static int atmel_hlcdc_pwm_probe(struct platform_device *pdev)
match = of_match_node(atmel_hlcdc_dt_ids, dev->parent->of_node);
if (match)
- chip->errata = match->data;
+ atmel->errata = match->data;
- chip->hlcdc = hlcdc;
- chip->chip.ops = &atmel_hlcdc_pwm_ops;
- chip->chip.dev = dev;
- chip->chip.npwm = 1;
+ atmel->hlcdc = hlcdc;
+ atmel->chip.ops = &atmel_hlcdc_pwm_ops;
+ atmel->chip.dev = dev;
+ atmel->chip.npwm = 1;
- ret = pwmchip_add(&chip->chip);
+ ret = pwmchip_add(&atmel->chip);
if (ret) {
clk_disable_unprepare(hlcdc->periph_clk);
return ret;
}
- platform_set_drvdata(pdev, chip);
+ platform_set_drvdata(pdev, atmel);
return 0;
}
static void atmel_hlcdc_pwm_remove(struct platform_device *pdev)
{
- struct atmel_hlcdc_pwm *chip = platform_get_drvdata(pdev);
+ struct atmel_hlcdc_pwm *atmel = platform_get_drvdata(pdev);
- pwmchip_remove(&chip->chip);
+ pwmchip_remove(&atmel->chip);
- clk_disable_unprepare(chip->hlcdc->periph_clk);
+ clk_disable_unprepare(atmel->hlcdc->periph_clk);
}
static const struct of_device_id atmel_hlcdc_pwm_dt_ids[] = {
diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c
index 4a116dc44f6e..c00dd37c5fbd 100644
--- a/drivers/pwm/pwm-atmel-tcb.c
+++ b/drivers/pwm/pwm-atmel-tcb.c
@@ -19,8 +19,7 @@
#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <soc/at91/atmel_tcb.h>
@@ -34,7 +33,6 @@
ATMEL_TC_BEEVT | ATMEL_TC_BSWTRG)
struct atmel_tcb_pwm_device {
- enum pwm_polarity polarity; /* PWM polarity */
unsigned div; /* PWM clock divider */
unsigned duty; /* PWM duty expressed in clk cycles */
unsigned period; /* PWM period expressed in clk cycles */
@@ -57,7 +55,7 @@ struct atmel_tcb_pwm_chip {
struct clk *clk;
struct clk *gclk;
struct clk *slow_clk;
- struct atmel_tcb_pwm_device *pwms[NPWM];
+ struct atmel_tcb_pwm_device pwms[NPWM];
struct atmel_tcb_channel bkup;
};
@@ -68,37 +66,18 @@ static inline struct atmel_tcb_pwm_chip *to_tcb_chip(struct pwm_chip *chip)
return container_of(chip, struct atmel_tcb_pwm_chip, chip);
}
-static int atmel_tcb_pwm_set_polarity(struct pwm_chip *chip,
- struct pwm_device *pwm,
- enum pwm_polarity polarity)
-{
- struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
- struct atmel_tcb_pwm_device *tcbpwm = tcbpwmc->pwms[pwm->hwpwm];
-
- tcbpwm->polarity = polarity;
-
- return 0;
-}
-
static int atmel_tcb_pwm_request(struct pwm_chip *chip,
struct pwm_device *pwm)
{
struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
- struct atmel_tcb_pwm_device *tcbpwm;
+ struct atmel_tcb_pwm_device *tcbpwm = &tcbpwmc->pwms[pwm->hwpwm];
unsigned cmr;
int ret;
- tcbpwm = devm_kzalloc(chip->dev, sizeof(*tcbpwm), GFP_KERNEL);
- if (!tcbpwm)
- return -ENOMEM;
-
ret = clk_prepare_enable(tcbpwmc->clk);
- if (ret) {
- devm_kfree(chip->dev, tcbpwm);
+ if (ret)
return ret;
- }
- tcbpwm->polarity = PWM_POLARITY_NORMAL;
tcbpwm->duty = 0;
tcbpwm->period = 0;
tcbpwm->div = 0;
@@ -131,27 +110,22 @@ static int atmel_tcb_pwm_request(struct pwm_chip *chip,
regmap_write(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CMR), cmr);
spin_unlock(&tcbpwmc->lock);
- tcbpwmc->pwms[pwm->hwpwm] = tcbpwm;
-
return 0;
}
static void atmel_tcb_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
- struct atmel_tcb_pwm_device *tcbpwm = tcbpwmc->pwms[pwm->hwpwm];
clk_disable_unprepare(tcbpwmc->clk);
- tcbpwmc->pwms[pwm->hwpwm] = NULL;
- devm_kfree(chip->dev, tcbpwm);
}
-static void atmel_tcb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+static void atmel_tcb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm,
+ enum pwm_polarity polarity)
{
struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
- struct atmel_tcb_pwm_device *tcbpwm = tcbpwmc->pwms[pwm->hwpwm];
+ struct atmel_tcb_pwm_device *tcbpwm = &tcbpwmc->pwms[pwm->hwpwm];
unsigned cmr;
- enum pwm_polarity polarity = tcbpwm->polarity;
/*
* If duty is 0 the timer will be stopped and we have to
@@ -203,12 +177,12 @@ static void atmel_tcb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
spin_unlock(&tcbpwmc->lock);
}
-static int atmel_tcb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+static int atmel_tcb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm,
+ enum pwm_polarity polarity)
{
struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
- struct atmel_tcb_pwm_device *tcbpwm = tcbpwmc->pwms[pwm->hwpwm];
+ struct atmel_tcb_pwm_device *tcbpwm = &tcbpwmc->pwms[pwm->hwpwm];
u32 cmr;
- enum pwm_polarity polarity = tcbpwm->polarity;
/*
* If duty is 0 the timer will be stopped and we have to
@@ -291,7 +265,7 @@ static int atmel_tcb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
int duty_ns, int period_ns)
{
struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
- struct atmel_tcb_pwm_device *tcbpwm = tcbpwmc->pwms[pwm->hwpwm];
+ struct atmel_tcb_pwm_device *tcbpwm = &tcbpwmc->pwms[pwm->hwpwm];
struct atmel_tcb_pwm_device *atcbpwm = NULL;
int i = 0;
int slowclk = 0;
@@ -338,9 +312,9 @@ static int atmel_tcb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
period = div_u64(period_ns, min);
if (pwm->hwpwm == 0)
- atcbpwm = tcbpwmc->pwms[1];
+ atcbpwm = &tcbpwmc->pwms[1];
else
- atcbpwm = tcbpwmc->pwms[0];
+ atcbpwm = &tcbpwmc->pwms[0];
/*
* PWM devices provided by the TCB driver are grouped by 2.
@@ -371,11 +345,8 @@ static int atmel_tcb_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
int duty_cycle, period;
int ret;
- /* This function only sets a flag in driver data */
- atmel_tcb_pwm_set_polarity(chip, pwm, state->polarity);
-
if (!state->enabled) {
- atmel_tcb_pwm_disable(chip, pwm);
+ atmel_tcb_pwm_disable(chip, pwm, state->polarity);
return 0;
}
@@ -386,7 +357,7 @@ static int atmel_tcb_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (ret)
return ret;
- return atmel_tcb_pwm_enable(chip, pwm);
+ return atmel_tcb_pwm_enable(chip, pwm, state->polarity);
}
static const struct pwm_ops atmel_tcb_pwm_ops = {
@@ -422,13 +393,14 @@ static int atmel_tcb_pwm_probe(struct platform_device *pdev)
struct atmel_tcb_pwm_chip *tcbpwm;
const struct atmel_tcb_config *config;
struct device_node *np = pdev->dev.of_node;
- struct regmap *regmap;
- struct clk *clk, *gclk = NULL;
- struct clk *slow_clk;
char clk_name[] = "t0_clk";
int err;
int channel;
+ tcbpwm = devm_kzalloc(&pdev->dev, sizeof(*tcbpwm), GFP_KERNEL);
+ if (tcbpwm == NULL)
+ return -ENOMEM;
+
err = of_property_read_u32(np, "reg", &channel);
if (err < 0) {
dev_err(&pdev->dev,
@@ -437,49 +409,43 @@ static int atmel_tcb_pwm_probe(struct platform_device *pdev)
return err;
}
- regmap = syscon_node_to_regmap(np->parent);
- if (IS_ERR(regmap))
- return PTR_ERR(regmap);
+ tcbpwm->regmap = syscon_node_to_regmap(np->parent);
+ if (IS_ERR(tcbpwm->regmap))
+ return PTR_ERR(tcbpwm->regmap);
- slow_clk = of_clk_get_by_name(np->parent, "slow_clk");
- if (IS_ERR(slow_clk))
- return PTR_ERR(slow_clk);
+ tcbpwm->slow_clk = of_clk_get_by_name(np->parent, "slow_clk");
+ if (IS_ERR(tcbpwm->slow_clk))
+ return PTR_ERR(tcbpwm->slow_clk);
clk_name[1] += channel;
- clk = of_clk_get_by_name(np->parent, clk_name);
- if (IS_ERR(clk))
- clk = of_clk_get_by_name(np->parent, "t0_clk");
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ tcbpwm->clk = of_clk_get_by_name(np->parent, clk_name);
+ if (IS_ERR(tcbpwm->clk))
+ tcbpwm->clk = of_clk_get_by_name(np->parent, "t0_clk");
+ if (IS_ERR(tcbpwm->clk)) {
+ err = PTR_ERR(tcbpwm->clk);
+ goto err_slow_clk;
+ }
match = of_match_node(atmel_tcb_of_match, np->parent);
config = match->data;
if (config->has_gclk) {
- gclk = of_clk_get_by_name(np->parent, "gclk");
- if (IS_ERR(gclk))
- return PTR_ERR(gclk);
- }
-
- tcbpwm = devm_kzalloc(&pdev->dev, sizeof(*tcbpwm), GFP_KERNEL);
- if (tcbpwm == NULL) {
- err = -ENOMEM;
- goto err_slow_clk;
+ tcbpwm->gclk = of_clk_get_by_name(np->parent, "gclk");
+ if (IS_ERR(tcbpwm->gclk)) {
+ err = PTR_ERR(tcbpwm->gclk);
+ goto err_clk;
+ }
}
tcbpwm->chip.dev = &pdev->dev;
tcbpwm->chip.ops = &atmel_tcb_pwm_ops;
tcbpwm->chip.npwm = NPWM;
tcbpwm->channel = channel;
- tcbpwm->regmap = regmap;
- tcbpwm->clk = clk;
- tcbpwm->gclk = gclk;
- tcbpwm->slow_clk = slow_clk;
tcbpwm->width = config->counter_width;
- err = clk_prepare_enable(slow_clk);
+ err = clk_prepare_enable(tcbpwm->slow_clk);
if (err)
- goto err_slow_clk;
+ goto err_gclk;
spin_lock_init(&tcbpwm->lock);
@@ -494,8 +460,14 @@ static int atmel_tcb_pwm_probe(struct platform_device *pdev)
err_disable_clk:
clk_disable_unprepare(tcbpwm->slow_clk);
+err_gclk:
+ clk_put(tcbpwm->gclk);
+
+err_clk:
+ clk_put(tcbpwm->clk);
+
err_slow_clk:
- clk_put(slow_clk);
+ clk_put(tcbpwm->slow_clk);
return err;
}
@@ -507,8 +479,9 @@ static void atmel_tcb_pwm_remove(struct platform_device *pdev)
pwmchip_remove(&tcbpwm->chip);
clk_disable_unprepare(tcbpwm->slow_clk);
- clk_put(tcbpwm->slow_clk);
+ clk_put(tcbpwm->gclk);
clk_put(tcbpwm->clk);
+ clk_put(tcbpwm->slow_clk);
}
static const struct of_device_id atmel_tcb_pwm_dt_ids[] = {
diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c
index 5f7d286871cf..1f73325d1bea 100644
--- a/drivers/pwm/pwm-atmel.c
+++ b/drivers/pwm/pwm-atmel.c
@@ -25,7 +25,6 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/slab.h>
@@ -36,7 +35,7 @@
#define PWM_SR 0x0C
#define PWM_ISR 0x1C
/* Bit field in SR */
-#define PWM_SR_ALL_CH_ON 0x0F
+#define PWM_SR_ALL_CH_MASK 0x0F
/* The following register is PWM channel related registers */
#define PWM_CH_REG_OFFSET 0x200
@@ -464,6 +463,42 @@ static const struct of_device_id atmel_pwm_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, atmel_pwm_dt_ids);
+static int atmel_pwm_enable_clk_if_on(struct atmel_pwm_chip *atmel_pwm, bool on)
+{
+ unsigned int i, cnt = 0;
+ unsigned long sr;
+ int ret = 0;
+
+ sr = atmel_pwm_readl(atmel_pwm, PWM_SR) & PWM_SR_ALL_CH_MASK;
+ if (!sr)
+ return 0;
+
+ cnt = bitmap_weight(&sr, atmel_pwm->chip.npwm);
+
+ if (!on)
+ goto disable_clk;
+
+ for (i = 0; i < cnt; i++) {
+ ret = clk_enable(atmel_pwm->clk);
+ if (ret) {
+ dev_err(atmel_pwm->chip.dev,
+ "failed to enable clock for pwm %pe\n",
+ ERR_PTR(ret));
+
+ cnt = i;
+ goto disable_clk;
+ }
+ }
+
+ return 0;
+
+disable_clk:
+ while (cnt--)
+ clk_disable(atmel_pwm->clk);
+
+ return ret;
+}
+
static int atmel_pwm_probe(struct platform_device *pdev)
{
struct atmel_pwm_chip *atmel_pwm;
@@ -482,51 +517,39 @@ static int atmel_pwm_probe(struct platform_device *pdev)
if (IS_ERR(atmel_pwm->base))
return PTR_ERR(atmel_pwm->base);
- atmel_pwm->clk = devm_clk_get(&pdev->dev, NULL);
+ atmel_pwm->clk = devm_clk_get_prepared(&pdev->dev, NULL);
if (IS_ERR(atmel_pwm->clk))
- return PTR_ERR(atmel_pwm->clk);
-
- ret = clk_prepare(atmel_pwm->clk);
- if (ret) {
- dev_err(&pdev->dev, "failed to prepare PWM clock\n");
- return ret;
- }
+ return dev_err_probe(&pdev->dev, PTR_ERR(atmel_pwm->clk),
+ "failed to get prepared PWM clock\n");
atmel_pwm->chip.dev = &pdev->dev;
atmel_pwm->chip.ops = &atmel_pwm_ops;
atmel_pwm->chip.npwm = 4;
- ret = pwmchip_add(&atmel_pwm->chip);
+ ret = atmel_pwm_enable_clk_if_on(atmel_pwm, true);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_pwmchip_add(&pdev->dev, &atmel_pwm->chip);
if (ret < 0) {
- dev_err(&pdev->dev, "failed to add PWM chip %d\n", ret);
- goto unprepare_clk;
+ dev_err_probe(&pdev->dev, ret, "failed to add PWM chip\n");
+ goto disable_clk;
}
- platform_set_drvdata(pdev, atmel_pwm);
+ return 0;
- return ret;
+disable_clk:
+ atmel_pwm_enable_clk_if_on(atmel_pwm, false);
-unprepare_clk:
- clk_unprepare(atmel_pwm->clk);
return ret;
}
-static void atmel_pwm_remove(struct platform_device *pdev)
-{
- struct atmel_pwm_chip *atmel_pwm = platform_get_drvdata(pdev);
-
- pwmchip_remove(&atmel_pwm->chip);
-
- clk_unprepare(atmel_pwm->clk);
-}
-
static struct platform_driver atmel_pwm_driver = {
.driver = {
.name = "atmel-pwm",
.of_match_table = of_match_ptr(atmel_pwm_dt_ids),
},
.probe = atmel_pwm_probe,
- .remove_new = atmel_pwm_remove,
};
module_platform_driver(atmel_pwm_driver);
diff --git a/drivers/pwm/pwm-bcm-kona.c b/drivers/pwm/pwm-bcm-kona.c
index 4fa6e249e4cf..e5b00cc9f7a7 100644
--- a/drivers/pwm/pwm-bcm-kona.c
+++ b/drivers/pwm/pwm-bcm-kona.c
@@ -61,9 +61,9 @@ struct kona_pwmc {
struct clk *clk;
};
-static inline struct kona_pwmc *to_kona_pwmc(struct pwm_chip *_chip)
+static inline struct kona_pwmc *to_kona_pwmc(struct pwm_chip *chip)
{
- return container_of(_chip, struct kona_pwmc, chip);
+ return container_of(chip, struct kona_pwmc, chip);
}
/*
diff --git a/drivers/pwm/pwm-berlin.c b/drivers/pwm/pwm-berlin.c
index 0c5992a046b2..0971c666afd1 100644
--- a/drivers/pwm/pwm-berlin.c
+++ b/drivers/pwm/pwm-berlin.c
@@ -13,6 +13,7 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
diff --git a/drivers/pwm/pwm-crc.c b/drivers/pwm/pwm-crc.c
index 4703b4a0b6e4..b9f063dc6b5f 100644
--- a/drivers/pwm/pwm-crc.c
+++ b/drivers/pwm/pwm-crc.c
@@ -34,9 +34,9 @@ struct crystalcove_pwm {
struct regmap *regmap;
};
-static inline struct crystalcove_pwm *to_crc_pwm(struct pwm_chip *pc)
+static inline struct crystalcove_pwm *to_crc_pwm(struct pwm_chip *chip)
{
- return container_of(pc, struct crystalcove_pwm, chip);
+ return container_of(chip, struct crystalcove_pwm, chip);
}
static int crc_pwm_calc_clk_div(int period_ns)
diff --git a/drivers/pwm/pwm-cros-ec.c b/drivers/pwm/pwm-cros-ec.c
index 74e863aa1d8d..baaac0c33aa0 100644
--- a/drivers/pwm/pwm-cros-ec.c
+++ b/drivers/pwm/pwm-cros-ec.c
@@ -6,6 +6,7 @@
*/
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
#include <linux/platform_device.h>
@@ -37,9 +38,9 @@ struct cros_ec_pwm {
u16 duty_cycle;
};
-static inline struct cros_ec_pwm_device *pwm_to_cros_ec_pwm(struct pwm_chip *c)
+static inline struct cros_ec_pwm_device *pwm_to_cros_ec_pwm(struct pwm_chip *chip)
{
- return container_of(c, struct cros_ec_pwm_device, chip);
+ return container_of(chip, struct cros_ec_pwm_device, chip);
}
static int cros_ec_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
@@ -218,14 +219,14 @@ static int cros_ec_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
}
static struct pwm_device *
-cros_ec_pwm_xlate(struct pwm_chip *pc, const struct of_phandle_args *args)
+cros_ec_pwm_xlate(struct pwm_chip *chip, const struct of_phandle_args *args)
{
struct pwm_device *pwm;
- if (args->args[0] >= pc->npwm)
+ if (args->args[0] >= chip->npwm)
return ERR_PTR(-EINVAL);
- pwm = pwm_request_from_chip(pc, args->args[0], NULL);
+ pwm = pwm_request_from_chip(chip, args->args[0], NULL);
if (IS_ERR(pwm))
return pwm;
diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c
index 5caadbd6194e..b7c6045c5d08 100644
--- a/drivers/pwm/pwm-fsl-ftm.c
+++ b/drivers/pwm/pwm-fsl-ftm.c
@@ -11,8 +11,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pwm.h>
diff --git a/drivers/pwm/pwm-hibvt.c b/drivers/pwm/pwm-hibvt.c
index b95df1a96127..f7ba6fe9a349 100644
--- a/drivers/pwm/pwm-hibvt.c
+++ b/drivers/pwm/pwm-hibvt.c
@@ -10,7 +10,7 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/reset.h>
diff --git a/drivers/pwm/pwm-imx1.c b/drivers/pwm/pwm-imx1.c
index 1f2eb1c8ff6c..0651983bed19 100644
--- a/drivers/pwm/pwm-imx1.c
+++ b/drivers/pwm/pwm-imx1.c
@@ -14,7 +14,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/slab.h>
diff --git a/drivers/pwm/pwm-jz4740.c b/drivers/pwm/pwm-jz4740.c
index 3b7067f6cd0d..ef1293f2a897 100644
--- a/drivers/pwm/pwm-jz4740.c
+++ b/drivers/pwm/pwm-jz4740.c
@@ -15,7 +15,7 @@
#include <linux/mfd/ingenic-tcu.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/regmap.h>
diff --git a/drivers/pwm/pwm-lp3943.c b/drivers/pwm/pwm-lp3943.c
index 35675e4058c6..4b133a17f4be 100644
--- a/drivers/pwm/pwm-lp3943.c
+++ b/drivers/pwm/pwm-lp3943.c
@@ -10,6 +10,7 @@
#include <linux/err.h>
#include <linux/mfd/lp3943.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/slab.h>
@@ -24,9 +25,9 @@ struct lp3943_pwm {
struct lp3943_platform_data *pdata;
};
-static inline struct lp3943_pwm *to_lp3943_pwm(struct pwm_chip *_chip)
+static inline struct lp3943_pwm *to_lp3943_pwm(struct pwm_chip *chip)
{
- return container_of(_chip, struct lp3943_pwm, chip);
+ return container_of(chip, struct lp3943_pwm, chip);
}
static struct lp3943_pwm_map *
diff --git a/drivers/pwm/pwm-lpc18xx-sct.c b/drivers/pwm/pwm-lpc18xx-sct.c
index b9bf5b366f4b..7a19a840bca5 100644
--- a/drivers/pwm/pwm-lpc18xx-sct.c
+++ b/drivers/pwm/pwm-lpc18xx-sct.c
@@ -22,6 +22,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
@@ -366,30 +367,21 @@ static int lpc18xx_pwm_probe(struct platform_device *pdev)
if (IS_ERR(lpc18xx_pwm->base))
return PTR_ERR(lpc18xx_pwm->base);
- lpc18xx_pwm->pwm_clk = devm_clk_get(&pdev->dev, "pwm");
+ lpc18xx_pwm->pwm_clk = devm_clk_get_enabled(&pdev->dev, "pwm");
if (IS_ERR(lpc18xx_pwm->pwm_clk))
return dev_err_probe(&pdev->dev, PTR_ERR(lpc18xx_pwm->pwm_clk),
"failed to get pwm clock\n");
- ret = clk_prepare_enable(lpc18xx_pwm->pwm_clk);
- if (ret < 0)
- return dev_err_probe(&pdev->dev, ret,
- "could not prepare or enable pwm clock\n");
-
lpc18xx_pwm->clk_rate = clk_get_rate(lpc18xx_pwm->pwm_clk);
- if (!lpc18xx_pwm->clk_rate) {
- ret = dev_err_probe(&pdev->dev,
- -EINVAL, "pwm clock has no frequency\n");
- goto disable_pwmclk;
- }
+ if (!lpc18xx_pwm->clk_rate)
+ return dev_err_probe(&pdev->dev,
+ -EINVAL, "pwm clock has no frequency\n");
/*
* If clkrate is too fast, the calculations in .apply() might overflow.
*/
- if (lpc18xx_pwm->clk_rate > NSEC_PER_SEC) {
- ret = dev_err_probe(&pdev->dev, -EINVAL, "pwm clock to fast\n");
- goto disable_pwmclk;
- }
+ if (lpc18xx_pwm->clk_rate > NSEC_PER_SEC)
+ return dev_err_probe(&pdev->dev, -EINVAL, "pwm clock to fast\n");
mutex_init(&lpc18xx_pwm->res_lock);
mutex_init(&lpc18xx_pwm->period_lock);
@@ -435,18 +427,12 @@ static int lpc18xx_pwm_probe(struct platform_device *pdev)
lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_CTRL, val);
ret = pwmchip_add(&lpc18xx_pwm->chip);
- if (ret < 0) {
- dev_err_probe(&pdev->dev, ret, "pwmchip_add failed\n");
- goto disable_pwmclk;
- }
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "pwmchip_add failed\n");
platform_set_drvdata(pdev, lpc18xx_pwm);
return 0;
-
-disable_pwmclk:
- clk_disable_unprepare(lpc18xx_pwm->pwm_clk);
- return ret;
}
static void lpc18xx_pwm_remove(struct platform_device *pdev)
@@ -459,8 +445,6 @@ static void lpc18xx_pwm_remove(struct platform_device *pdev)
val = lpc18xx_pwm_readl(lpc18xx_pwm, LPC18XX_PWM_CTRL);
lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_CTRL,
val | LPC18XX_PWM_CTRL_HALT);
-
- clk_disable_unprepare(lpc18xx_pwm->pwm_clk);
}
static struct platform_driver lpc18xx_pwm_driver = {
diff --git a/drivers/pwm/pwm-lpc32xx.c b/drivers/pwm/pwm-lpc32xx.c
index 86a0ea0f6955..806f0bb3ad6d 100644
--- a/drivers/pwm/pwm-lpc32xx.c
+++ b/drivers/pwm/pwm-lpc32xx.c
@@ -51,10 +51,10 @@ static int lpc32xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
if (duty_cycles > 255)
duty_cycles = 255;
- val = readl(lpc32xx->base + (pwm->hwpwm << 2));
+ val = readl(lpc32xx->base);
val &= ~0xFFFF;
val |= (period_cycles << 8) | duty_cycles;
- writel(val, lpc32xx->base + (pwm->hwpwm << 2));
+ writel(val, lpc32xx->base);
return 0;
}
@@ -69,9 +69,9 @@ static int lpc32xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
if (ret)
return ret;
- val = readl(lpc32xx->base + (pwm->hwpwm << 2));
+ val = readl(lpc32xx->base);
val |= PWM_ENABLE;
- writel(val, lpc32xx->base + (pwm->hwpwm << 2));
+ writel(val, lpc32xx->base);
return 0;
}
@@ -81,9 +81,9 @@ static void lpc32xx_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
struct lpc32xx_pwm_chip *lpc32xx = to_lpc32xx_pwm_chip(chip);
u32 val;
- val = readl(lpc32xx->base + (pwm->hwpwm << 2));
+ val = readl(lpc32xx->base);
val &= ~PWM_ENABLE;
- writel(val, lpc32xx->base + (pwm->hwpwm << 2));
+ writel(val, lpc32xx->base);
clk_disable_unprepare(lpc32xx->clk);
}
@@ -141,9 +141,9 @@ static int lpc32xx_pwm_probe(struct platform_device *pdev)
lpc32xx->chip.npwm = 1;
/* If PWM is disabled, configure the output to the default value */
- val = readl(lpc32xx->base + (lpc32xx->chip.pwms[0].hwpwm << 2));
+ val = readl(lpc32xx->base);
val &= ~PWM_PIN_LEVEL;
- writel(val, lpc32xx->base + (lpc32xx->chip.pwms[0].hwpwm << 2));
+ writel(val, lpc32xx->base);
ret = devm_pwmchip_add(&pdev->dev, &lpc32xx->chip);
if (ret < 0) {
diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c
index 7a51d210a877..6adb0ed01906 100644
--- a/drivers/pwm/pwm-mediatek.c
+++ b/drivers/pwm/pwm-mediatek.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/slab.h>
diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
index 22f54db3ae8e..25519cddc2a9 100644
--- a/drivers/pwm/pwm-meson.c
+++ b/drivers/pwm/pwm-meson.c
@@ -37,7 +37,6 @@
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/slab.h>
diff --git a/drivers/pwm/pwm-microchip-core.c b/drivers/pwm/pwm-microchip-core.c
index 8750b57684a9..e7525c98105e 100644
--- a/drivers/pwm/pwm-microchip-core.c
+++ b/drivers/pwm/pwm-microchip-core.c
@@ -37,7 +37,7 @@
#include <linux/math.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
diff --git a/drivers/pwm/pwm-mtk-disp.c b/drivers/pwm/pwm-mtk-disp.c
index 2401b6733241..a83bd6e18b07 100644
--- a/drivers/pwm/pwm-mtk-disp.c
+++ b/drivers/pwm/pwm-mtk-disp.c
@@ -11,7 +11,6 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/slab.h>
diff --git a/drivers/pwm/pwm-ntxec.c b/drivers/pwm/pwm-ntxec.c
index ab63b081df53..7514ea384ec5 100644
--- a/drivers/pwm/pwm-ntxec.c
+++ b/drivers/pwm/pwm-ntxec.c
@@ -24,7 +24,6 @@
#include <linux/types.h>
struct ntxec_pwm {
- struct device *dev;
struct ntxec *ec;
struct pwm_chip chip;
};
@@ -141,14 +140,13 @@ static int ntxec_pwm_probe(struct platform_device *pdev)
struct ntxec_pwm *priv;
struct pwm_chip *chip;
- pdev->dev.of_node = pdev->dev.parent->of_node;
+ device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent);
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->ec = ec;
- priv->dev = &pdev->dev;
chip = &priv->chip;
chip->dev = &pdev->dev;
diff --git a/drivers/pwm/pwm-pxa.c b/drivers/pwm/pwm-pxa.c
index 762429d5647f..1e475ed10180 100644
--- a/drivers/pwm/pwm-pxa.c
+++ b/drivers/pwm/pwm-pxa.c
@@ -15,6 +15,7 @@
* input clock (PWMCR_SD is set) and the output is driven to inactive.
*/
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
@@ -156,13 +157,6 @@ MODULE_DEVICE_TABLE(of, pwm_of_match);
#define pwm_of_match NULL
#endif
-static const struct platform_device_id *pxa_pwm_get_id_dt(struct device *dev)
-{
- const struct of_device_id *id = of_match_device(pwm_of_match, dev);
-
- return id ? id->data : NULL;
-}
-
static int pwm_probe(struct platform_device *pdev)
{
const struct platform_device_id *id = platform_get_device_id(pdev);
@@ -170,7 +164,7 @@ static int pwm_probe(struct platform_device *pdev)
int ret = 0;
if (IS_ENABLED(CONFIG_OF) && id == NULL)
- id = pxa_pwm_get_id_dt(&pdev->dev);
+ id = of_device_get_match_data(&pdev->dev);
if (id == NULL)
return -EINVAL;
diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
index c1a1f2d864b5..03ee18fb82d5 100644
--- a/drivers/pwm/pwm-rockchip.c
+++ b/drivers/pwm/pwm-rockchip.c
@@ -52,9 +52,9 @@ struct rockchip_pwm_data {
u32 enable_conf;
};
-static inline struct rockchip_pwm_chip *to_rockchip_pwm_chip(struct pwm_chip *c)
+static inline struct rockchip_pwm_chip *to_rockchip_pwm_chip(struct pwm_chip *chip)
{
- return container_of(c, struct rockchip_pwm_chip, chip);
+ return container_of(chip, struct rockchip_pwm_chip, chip);
}
static int rockchip_pwm_get_state(struct pwm_chip *chip,
diff --git a/drivers/pwm/pwm-rz-mtu3.c b/drivers/pwm/pwm-rz-mtu3.c
index bed8bd671e37..a56cecb0e46e 100644
--- a/drivers/pwm/pwm-rz-mtu3.c
+++ b/drivers/pwm/pwm-rz-mtu3.c
@@ -40,7 +40,7 @@
* struct rz_mtu3_channel_io_map - MTU3 pwm channel map
*
* @base_pwm_number: First PWM of a channel
- * @num: number of IOs on the HW channel.
+ * @num_channel_ios: number of IOs on the HW channel.
*/
struct rz_mtu3_channel_io_map {
u8 base_pwm_number;
diff --git a/drivers/pwm/pwm-sifive.c b/drivers/pwm/pwm-sifive.c
index ae49d67ab2b1..eabddb7c7820 100644
--- a/drivers/pwm/pwm-sifive.c
+++ b/drivers/pwm/pwm-sifive.c
@@ -13,6 +13,7 @@
*/
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
@@ -51,9 +52,9 @@ struct pwm_sifive_ddata {
};
static inline
-struct pwm_sifive_ddata *pwm_sifive_chip_to_ddata(struct pwm_chip *c)
+struct pwm_sifive_ddata *pwm_sifive_chip_to_ddata(struct pwm_chip *chip)
{
- return container_of(c, struct pwm_sifive_ddata, chip);
+ return container_of(chip, struct pwm_sifive_ddata, chip);
}
static int pwm_sifive_request(struct pwm_chip *chip, struct pwm_device *pwm)
diff --git a/drivers/pwm/pwm-sl28cpld.c b/drivers/pwm/pwm-sl28cpld.c
index e64900ad4ba1..9e42e3a74ad6 100644
--- a/drivers/pwm/pwm-sl28cpld.c
+++ b/drivers/pwm/pwm-sl28cpld.c
@@ -38,6 +38,7 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/pwm.h>
#include <linux/regmap.h>
@@ -80,12 +81,15 @@
regmap_write((priv)->regmap, (priv)->offset + (reg), (val))
struct sl28cpld_pwm {
- struct pwm_chip pwm_chip;
+ struct pwm_chip chip;
struct regmap *regmap;
u32 offset;
};
-#define sl28cpld_pwm_from_chip(_chip) \
- container_of(_chip, struct sl28cpld_pwm, pwm_chip)
+
+static inline struct sl28cpld_pwm *sl28cpld_pwm_from_chip(struct pwm_chip *chip)
+{
+ return container_of(chip, struct sl28cpld_pwm, chip);
+}
static int sl28cpld_pwm_get_state(struct pwm_chip *chip,
struct pwm_device *pwm,
@@ -228,12 +232,12 @@ static int sl28cpld_pwm_probe(struct platform_device *pdev)
}
/* Initialize the pwm_chip structure */
- chip = &priv->pwm_chip;
+ chip = &priv->chip;
chip->dev = &pdev->dev;
chip->ops = &sl28cpld_pwm_ops;
chip->npwm = 1;
- ret = devm_pwmchip_add(&pdev->dev, &priv->pwm_chip);
+ ret = devm_pwmchip_add(&pdev->dev, chip);
if (ret) {
dev_err(&pdev->dev, "failed to add PWM chip (%pe)",
ERR_PTR(ret));
diff --git a/drivers/pwm/pwm-sprd.c b/drivers/pwm/pwm-sprd.c
index d43a6fa3f4e0..1499c8c1fe37 100644
--- a/drivers/pwm/pwm-sprd.c
+++ b/drivers/pwm/pwm-sprd.c
@@ -7,6 +7,7 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/math64.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c
index 62e397aeb9aa..3d6be7749e23 100644
--- a/drivers/pwm/pwm-stm32.c
+++ b/drivers/pwm/pwm-stm32.c
@@ -637,7 +637,7 @@ static int stm32_pwm_probe(struct platform_device *pdev)
priv->chip.ops = &stm32pwm_ops;
priv->chip.npwm = stm32_pwm_detect_channels(priv);
- ret = pwmchip_add(&priv->chip);
+ ret = devm_pwmchip_add(dev, &priv->chip);
if (ret < 0)
return ret;
@@ -646,17 +646,6 @@ static int stm32_pwm_probe(struct platform_device *pdev)
return 0;
}
-static void stm32_pwm_remove(struct platform_device *pdev)
-{
- struct stm32_pwm *priv = platform_get_drvdata(pdev);
- unsigned int i;
-
- for (i = 0; i < priv->chip.npwm; i++)
- pwm_disable(&priv->chip.pwms[i]);
-
- pwmchip_remove(&priv->chip);
-}
-
static int __maybe_unused stm32_pwm_suspend(struct device *dev)
{
struct stm32_pwm *priv = dev_get_drvdata(dev);
@@ -701,7 +690,6 @@ MODULE_DEVICE_TABLE(of, stm32_pwm_of_match);
static struct platform_driver stm32_pwm_driver = {
.probe = stm32_pwm_probe,
- .remove_new = stm32_pwm_remove,
.driver = {
.name = "stm32-pwm",
.of_match_table = stm32_pwm_of_match,
diff --git a/drivers/pwm/pwm-stmpe.c b/drivers/pwm/pwm-stmpe.c
index 5d4a4762ce0c..e205405c4828 100644
--- a/drivers/pwm/pwm-stmpe.c
+++ b/drivers/pwm/pwm-stmpe.c
@@ -61,8 +61,8 @@ static int stmpe_24xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
return 0;
}
-static void stmpe_24xx_pwm_disable(struct pwm_chip *chip,
- struct pwm_device *pwm)
+static int stmpe_24xx_pwm_disable(struct pwm_chip *chip,
+ struct pwm_device *pwm)
{
struct stmpe_pwm *stmpe_pwm = to_stmpe_pwm(chip);
u8 value;
@@ -72,17 +72,16 @@ static void stmpe_24xx_pwm_disable(struct pwm_chip *chip,
if (ret < 0) {
dev_err(chip->dev, "error reading PWM#%u control\n",
pwm->hwpwm);
- return;
+ return ret;
}
value = ret & ~BIT(pwm->hwpwm);
ret = stmpe_reg_write(stmpe_pwm->stmpe, STMPE24XX_PWMCS, value);
- if (ret) {
+ if (ret)
dev_err(chip->dev, "error writing PWM#%u control\n",
pwm->hwpwm);
- return;
- }
+ return ret;
}
/* STMPE 24xx PWM instructions */
@@ -111,7 +110,9 @@ static int stmpe_24xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
/* Make sure we are disabled */
if (pwm_is_enabled(pwm)) {
- stmpe_24xx_pwm_disable(chip, pwm);
+ ret = stmpe_24xx_pwm_disable(chip, pwm);
+ if (ret)
+ return ret;
} else {
/* Connect the PWM to the pin */
pin = pwm->hwpwm;
@@ -269,7 +270,7 @@ static int stmpe_24xx_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (!state->enabled) {
if (pwm->state.enabled)
- stmpe_24xx_pwm_disable(chip, pwm);
+ return stmpe_24xx_pwm_disable(chip, pwm);
return 0;
}
diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c
index a8790a8fc53e..c84fcf1a13dc 100644
--- a/drivers/pwm/pwm-sun4i.c
+++ b/drivers/pwm/pwm-sun4i.c
@@ -17,7 +17,6 @@
#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/reset.h>
diff --git a/drivers/pwm/pwm-sunplus.c b/drivers/pwm/pwm-sunplus.c
index d6ebe9f03b35..7705c7b86c3a 100644
--- a/drivers/pwm/pwm-sunplus.c
+++ b/drivers/pwm/pwm-sunplus.c
@@ -23,6 +23,7 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c
index 5810abf66e2a..a169a34e0778 100644
--- a/drivers/pwm/pwm-tegra.c
+++ b/drivers/pwm/pwm-tegra.c
@@ -41,7 +41,6 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/pm_opp.h>
#include <linux/pwm.h>
#include <linux/platform_device.h>
diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c
index 109449956307..8c94b266c1b2 100644
--- a/drivers/pwm/pwm-tiecap.c
+++ b/drivers/pwm/pwm-tiecap.c
@@ -12,7 +12,7 @@
#include <linux/clk.h>
#include <linux/pm_runtime.h>
#include <linux/pwm.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
/* ECAP registers and bits definitions */
#define CAP1 0x08
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index bb3959ace6b4..ecbfd7e954ec 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -12,7 +12,7 @@
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/pm_runtime.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
/* EHRPWM registers and bits definitions */
diff --git a/drivers/pwm/pwm-visconti.c b/drivers/pwm/pwm-visconti.c
index e3fb79b3e2a7..7f7591a2384c 100644
--- a/drivers/pwm/pwm-visconti.c
+++ b/drivers/pwm/pwm-visconti.c
@@ -21,7 +21,7 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
diff --git a/drivers/pwm/pwm-vt8500.c b/drivers/pwm/pwm-vt8500.c
index d2c48fd98706..6d46db51daac 100644
--- a/drivers/pwm/pwm-vt8500.c
+++ b/drivers/pwm/pwm-vt8500.c
@@ -6,6 +6,7 @@
* Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
*/
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
@@ -18,10 +19,6 @@
#include <asm/div64.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_address.h>
-
/*
* SoC architecture allocates register space for 4 PWMs but only
* 2 are currently implemented.
diff --git a/drivers/regulator/tps6287x-regulator.c b/drivers/regulator/tps6287x-regulator.c
index d022184a8e7d..9b7c3d77789e 100644
--- a/drivers/regulator/tps6287x-regulator.c
+++ b/drivers/regulator/tps6287x-regulator.c
@@ -119,7 +119,7 @@ static struct regulator_desc tps6287x_reg = {
.ramp_mask = TPS6287X_CTRL1_VRAMP,
.ramp_delay_table = tps6287x_ramp_table,
.n_ramp_values = ARRAY_SIZE(tps6287x_ramp_table),
- .n_voltages = 256,
+ .n_voltages = 256 * ARRAY_SIZE(tps6287x_voltage_ranges),
.linear_ranges = tps6287x_voltage_ranges,
.n_linear_ranges = ARRAY_SIZE(tps6287x_voltage_ranges),
.linear_range_selectors_bitfield = tps6287x_voltage_range_sel,
diff --git a/drivers/regulator/tps6594-regulator.c b/drivers/regulator/tps6594-regulator.c
index 25ef102c8270..b7f0c8779757 100644
--- a/drivers/regulator/tps6594-regulator.c
+++ b/drivers/regulator/tps6594-regulator.c
@@ -384,21 +384,19 @@ static int tps6594_request_reg_irqs(struct platform_device *pdev,
if (irq < 0)
return -EINVAL;
- irq_data[*irq_idx + j].dev = tps->dev;
- irq_data[*irq_idx + j].type = irq_type;
- irq_data[*irq_idx + j].rdev = rdev;
+ irq_data[*irq_idx].dev = tps->dev;
+ irq_data[*irq_idx].type = irq_type;
+ irq_data[*irq_idx].rdev = rdev;
error = devm_request_threaded_irq(tps->dev, irq, NULL,
- tps6594_regulator_irq_handler,
- IRQF_ONESHOT,
- irq_type->irq_name,
- &irq_data[*irq_idx]);
- (*irq_idx)++;
+ tps6594_regulator_irq_handler, IRQF_ONESHOT,
+ irq_type->irq_name, &irq_data[*irq_idx]);
if (error) {
dev_err(tps->dev, "tps6594 failed to request %s IRQ %d: %d\n",
irq_type->irq_name, irq, error);
return error;
}
+ (*irq_idx)++;
}
return 0;
}
@@ -420,8 +418,8 @@ static int tps6594_regulator_probe(struct platform_device *pdev)
int error, i, irq, multi, delta;
int irq_idx = 0;
int buck_idx = 0;
- int ext_reg_irq_nb = 2;
-
+ size_t ext_reg_irq_nb = 2;
+ size_t reg_irq_nb;
enum {
MULTI_BUCK12,
MULTI_BUCK123,
@@ -484,15 +482,16 @@ static int tps6594_regulator_probe(struct platform_device *pdev)
}
}
- if (tps->chip_id == LP8764)
+ if (tps->chip_id == LP8764) {
/* There is only 4 buck on LP8764 */
buck_configured[4] = 1;
+ reg_irq_nb = size_mul(REGS_INT_NB, (BUCK_NB - 1));
+ } else {
+ reg_irq_nb = size_mul(REGS_INT_NB, (size_add(BUCK_NB, LDO_NB)));
+ }
- irq_data = devm_kmalloc_array(tps->dev,
- REGS_INT_NB * sizeof(struct tps6594_regulator_irq_data),
- ARRAY_SIZE(tps6594_bucks_irq_types) +
- ARRAY_SIZE(tps6594_ldos_irq_types),
- GFP_KERNEL);
+ irq_data = devm_kmalloc_array(tps->dev, reg_irq_nb,
+ sizeof(struct tps6594_regulator_irq_data), GFP_KERNEL);
if (!irq_data)
return -ENOMEM;
diff --git a/drivers/remoteproc/imx_dsp_rproc.c b/drivers/remoteproc/imx_dsp_rproc.c
index d95fa5586189..8fcda9b74545 100644
--- a/drivers/remoteproc/imx_dsp_rproc.c
+++ b/drivers/remoteproc/imx_dsp_rproc.c
@@ -12,8 +12,7 @@
#include <linux/mailbox_client.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c
index f9874fc5a80f..8bb293b9f327 100644
--- a/drivers/remoteproc/imx_rproc.c
+++ b/drivers/remoteproc/imx_rproc.c
@@ -13,9 +13,9 @@
#include <linux/mailbox_client.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_reserved_mem.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/regmap.h>
@@ -40,6 +40,12 @@
#define IMX7D_M4_STOP (IMX7D_ENABLE_M4 | IMX7D_SW_M4C_RST | \
IMX7D_SW_M4C_NON_SCLR_RST)
+#define IMX8M_M7_STOP (IMX7D_ENABLE_M4 | IMX7D_SW_M4C_RST)
+#define IMX8M_M7_POLL IMX7D_ENABLE_M4
+
+#define IMX8M_GPR22 0x58
+#define IMX8M_GPR22_CM7_CPUWAIT BIT(0)
+
/* Address: 0x020D8000 */
#define IMX6SX_SRC_SCR 0x00
#define IMX6SX_ENABLE_M4 BIT(22)
@@ -91,6 +97,7 @@ static int imx_rproc_detach_pd(struct rproc *rproc);
struct imx_rproc {
struct device *dev;
struct regmap *regmap;
+ struct regmap *gpr;
struct rproc *rproc;
const struct imx_rproc_dcfg *dcfg;
struct imx_rproc_mem mem[IMX_RPROC_MEM_MAX];
@@ -285,6 +292,18 @@ static const struct imx_rproc_att imx_rproc_att_imx6sx[] = {
{ 0x80000000, 0x80000000, 0x60000000, 0 },
};
+static const struct imx_rproc_dcfg imx_rproc_cfg_imx8mn_mmio = {
+ .src_reg = IMX7D_SRC_SCR,
+ .src_mask = IMX7D_M4_RST_MASK,
+ .src_start = IMX7D_M4_START,
+ .src_stop = IMX8M_M7_STOP,
+ .gpr_reg = IMX8M_GPR22,
+ .gpr_wait = IMX8M_GPR22_CM7_CPUWAIT,
+ .att = imx_rproc_att_imx8mn,
+ .att_size = ARRAY_SIZE(imx_rproc_att_imx8mn),
+ .method = IMX_RPROC_MMIO,
+};
+
static const struct imx_rproc_dcfg imx_rproc_cfg_imx8mn = {
.att = imx_rproc_att_imx8mn,
.att_size = ARRAY_SIZE(imx_rproc_att_imx8mn),
@@ -365,8 +384,14 @@ static int imx_rproc_start(struct rproc *rproc)
switch (dcfg->method) {
case IMX_RPROC_MMIO:
- ret = regmap_update_bits(priv->regmap, dcfg->src_reg, dcfg->src_mask,
- dcfg->src_start);
+ if (priv->gpr) {
+ ret = regmap_clear_bits(priv->gpr, dcfg->gpr_reg,
+ dcfg->gpr_wait);
+ } else {
+ ret = regmap_update_bits(priv->regmap, dcfg->src_reg,
+ dcfg->src_mask,
+ dcfg->src_start);
+ }
break;
case IMX_RPROC_SMC:
arm_smccc_smc(IMX_SIP_RPROC, IMX_SIP_RPROC_START, 0, 0, 0, 0, 0, 0, &res);
@@ -395,6 +420,16 @@ static int imx_rproc_stop(struct rproc *rproc)
switch (dcfg->method) {
case IMX_RPROC_MMIO:
+ if (priv->gpr) {
+ ret = regmap_set_bits(priv->gpr, dcfg->gpr_reg,
+ dcfg->gpr_wait);
+ if (ret) {
+ dev_err(priv->dev,
+ "Failed to quiescence M4 platform!\n");
+ return ret;
+ }
+ }
+
ret = regmap_update_bits(priv->regmap, dcfg->src_reg, dcfg->src_mask,
dcfg->src_stop);
break;
@@ -725,13 +760,22 @@ static int imx_rproc_addr_init(struct imx_rproc *priv,
return 0;
}
+static int imx_rproc_notified_idr_cb(int id, void *ptr, void *data)
+{
+ struct rproc *rproc = data;
+
+ rproc_vq_interrupt(rproc, id);
+
+ return 0;
+}
+
static void imx_rproc_vq_work(struct work_struct *work)
{
struct imx_rproc *priv = container_of(work, struct imx_rproc,
rproc_work);
+ struct rproc *rproc = priv->rproc;
- rproc_vq_interrupt(priv->rproc, 0);
- rproc_vq_interrupt(priv->rproc, 1);
+ idr_for_each(&rproc->notifyids, imx_rproc_notified_idr_cb, rproc);
}
static void imx_rproc_rx_callback(struct mbox_client *cl, void *msg)
@@ -983,6 +1027,10 @@ static int imx_rproc_detect_mode(struct imx_rproc *priv)
break;
}
+ priv->gpr = syscon_regmap_lookup_by_phandle(dev->of_node, "fsl,iomuxc-gpr");
+ if (IS_ERR(priv->gpr))
+ priv->gpr = NULL;
+
regmap = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
if (IS_ERR(regmap)) {
dev_err(dev, "failed to find syscon\n");
@@ -992,6 +1040,19 @@ static int imx_rproc_detect_mode(struct imx_rproc *priv)
priv->regmap = regmap;
regmap_attach_dev(dev, regmap, &config);
+ if (priv->gpr) {
+ ret = regmap_read(priv->gpr, dcfg->gpr_reg, &val);
+ if (val & dcfg->gpr_wait) {
+ /*
+ * After cold boot, the CM indicates its in wait
+ * state, but not fully powered off. Power it off
+ * fully so firmware can be loaded into it.
+ */
+ imx_rproc_stop(priv->rproc);
+ return 0;
+ }
+ }
+
ret = regmap_read(regmap, dcfg->src_reg, &val);
if (ret) {
dev_err(dev, "Failed to read src\n");
@@ -1133,6 +1194,8 @@ static const struct of_device_id imx_rproc_of_match[] = {
{ .compatible = "fsl,imx8mm-cm4", .data = &imx_rproc_cfg_imx8mq },
{ .compatible = "fsl,imx8mn-cm7", .data = &imx_rproc_cfg_imx8mn },
{ .compatible = "fsl,imx8mp-cm7", .data = &imx_rproc_cfg_imx8mn },
+ { .compatible = "fsl,imx8mn-cm7-mmio", .data = &imx_rproc_cfg_imx8mn_mmio },
+ { .compatible = "fsl,imx8mp-cm7-mmio", .data = &imx_rproc_cfg_imx8mn_mmio },
{ .compatible = "fsl,imx8qxp-cm4", .data = &imx_rproc_cfg_imx8qxp },
{ .compatible = "fsl,imx8qm-cm4", .data = &imx_rproc_cfg_imx8qm },
{ .compatible = "fsl,imx8ulp-cm33", .data = &imx_rproc_cfg_imx8ulp },
diff --git a/drivers/remoteproc/imx_rproc.h b/drivers/remoteproc/imx_rproc.h
index 1c7e2127c758..79a1b8956d14 100644
--- a/drivers/remoteproc/imx_rproc.h
+++ b/drivers/remoteproc/imx_rproc.h
@@ -31,6 +31,8 @@ struct imx_rproc_dcfg {
u32 src_mask;
u32 src_start;
u32 src_stop;
+ u32 gpr_reg;
+ u32 gpr_wait;
const struct imx_rproc_att *att;
size_t att_size;
enum imx_rproc_method method;
diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c
index 82ed90f03d91..8f50ab80e56f 100644
--- a/drivers/remoteproc/omap_remoteproc.c
+++ b/drivers/remoteproc/omap_remoteproc.c
@@ -19,7 +19,8 @@
#include <linux/clk/ti.h>
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/remoteproc/pru_rproc.c b/drivers/remoteproc/pru_rproc.c
index 2874c8d324f7..327f0c7ee3d6 100644
--- a/drivers/remoteproc/pru_rproc.c
+++ b/drivers/remoteproc/pru_rproc.c
@@ -16,8 +16,9 @@
#include <linux/debugfs.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_irq.h>
+#include <linux/platform_device.h>
#include <linux/remoteproc/pruss.h>
#include <linux/pruss_driver.h>
#include <linux/remoteproc.h>
@@ -109,6 +110,7 @@ struct pru_private_data {
* @dbg_single_step: debug state variable to set PRU into single step mode
* @dbg_continuous: debug state variable to restore PRU execution mode
* @evt_count: number of mapped events
+ * @gpmux_save: saved value for gpmux config
*/
struct pru_rproc {
int id;
@@ -127,6 +129,7 @@ struct pru_rproc {
u32 dbg_single_step;
u32 dbg_continuous;
u8 evt_count;
+ u8 gpmux_save;
};
static inline u32 pru_control_read_reg(struct pru_rproc *pru, unsigned int reg)
@@ -228,6 +231,7 @@ struct rproc *pru_rproc_get(struct device_node *np, int index,
struct device *dev;
const char *fw_name;
int ret;
+ u32 mux;
rproc = __pru_rproc_get(np, index);
if (IS_ERR(rproc))
@@ -252,6 +256,23 @@ struct rproc *pru_rproc_get(struct device_node *np, int index,
if (pru_id)
*pru_id = pru->id;
+ ret = pruss_cfg_get_gpmux(pru->pruss, pru->id, &pru->gpmux_save);
+ if (ret) {
+ dev_err(dev, "failed to get cfg gpmux: %d\n", ret);
+ goto err;
+ }
+
+ /* An error here is acceptable for backward compatibility */
+ ret = of_property_read_u32_index(np, "ti,pruss-gp-mux-sel", index,
+ &mux);
+ if (!ret) {
+ ret = pruss_cfg_set_gpmux(pru->pruss, pru->id, mux);
+ if (ret) {
+ dev_err(dev, "failed to set cfg gpmux: %d\n", ret);
+ goto err;
+ }
+ }
+
ret = of_property_read_string_index(np, "firmware-name", index,
&fw_name);
if (!ret) {
@@ -290,6 +311,8 @@ void pru_rproc_put(struct rproc *rproc)
pru = rproc->priv;
+ pruss_cfg_set_gpmux(pru->pruss, pru->id, pru->gpmux_save);
+
pru_rproc_set_firmware(rproc, NULL);
mutex_lock(&pru->lock);
diff --git a/drivers/remoteproc/qcom_common.c b/drivers/remoteproc/qcom_common.c
index a0d4238492e9..03e5f5d533eb 100644
--- a/drivers/remoteproc/qcom_common.c
+++ b/drivers/remoteproc/qcom_common.c
@@ -29,9 +29,9 @@
#define MAX_NUM_OF_SS 10
#define MAX_REGION_NAME_LENGTH 16
#define SBL_MINIDUMP_SMEM_ID 602
-#define MD_REGION_VALID ('V' << 24 | 'A' << 16 | 'L' << 8 | 'I' << 0)
-#define MD_SS_ENCR_DONE ('D' << 24 | 'O' << 16 | 'N' << 8 | 'E' << 0)
-#define MD_SS_ENABLED ('E' << 24 | 'N' << 16 | 'B' << 8 | 'L' << 0)
+#define MINIDUMP_REGION_VALID ('V' << 24 | 'A' << 16 | 'L' << 8 | 'I' << 0)
+#define MINIDUMP_SS_ENCR_DONE ('D' << 24 | 'O' << 16 | 'N' << 8 | 'E' << 0)
+#define MINIDUMP_SS_ENABLED ('E' << 24 | 'N' << 16 | 'B' << 8 | 'L' << 0)
/**
* struct minidump_region - Minidump region
@@ -125,7 +125,7 @@ static int qcom_add_minidump_segments(struct rproc *rproc, struct minidump_subsy
for (i = 0; i < seg_cnt; i++) {
memcpy_fromio(&region, ptr + i, sizeof(region));
- if (le32_to_cpu(region.valid) == MD_REGION_VALID) {
+ if (le32_to_cpu(region.valid) == MINIDUMP_REGION_VALID) {
name = kstrndup(region.name, MAX_REGION_NAME_LENGTH - 1, GFP_KERNEL);
if (!name) {
iounmap(ptr);
@@ -168,12 +168,21 @@ void qcom_minidump(struct rproc *rproc, unsigned int minidump_id,
*/
if (subsystem->regions_baseptr == 0 ||
le32_to_cpu(subsystem->status) != 1 ||
- le32_to_cpu(subsystem->enabled) != MD_SS_ENABLED ||
- le32_to_cpu(subsystem->encryption_status) != MD_SS_ENCR_DONE) {
+ le32_to_cpu(subsystem->enabled) != MINIDUMP_SS_ENABLED) {
+ return rproc_coredump(rproc);
+ }
+
+ if (le32_to_cpu(subsystem->encryption_status) != MINIDUMP_SS_ENCR_DONE) {
dev_err(&rproc->dev, "Minidump not ready, skipping\n");
return;
}
+ /**
+ * Clear out the dump segments populated by parse_fw before
+ * re-populating them with minidump segments.
+ */
+ rproc_coredump_cleanup(rproc);
+
ret = qcom_add_minidump_segments(rproc, subsystem, rproc_dumpfn_t);
if (ret) {
dev_err(&rproc->dev, "Failed with error: %d while adding minidump entries\n", ret);
diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c
index 6777a3bd6226..6c67514cc493 100644
--- a/drivers/remoteproc/qcom_q6v5_adsp.c
+++ b/drivers/remoteproc/qcom_q6v5_adsp.c
@@ -14,8 +14,8 @@
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
@@ -637,28 +637,26 @@ static int adsp_init_mmio(struct qcom_adsp *adsp,
static int adsp_alloc_memory_region(struct qcom_adsp *adsp)
{
+ struct reserved_mem *rmem = NULL;
struct device_node *node;
- struct resource r;
- int ret;
node = of_parse_phandle(adsp->dev->of_node, "memory-region", 0);
- if (!node) {
- dev_err(adsp->dev, "no memory-region specified\n");
+ if (node)
+ rmem = of_reserved_mem_lookup(node);
+ of_node_put(node);
+
+ if (!rmem) {
+ dev_err(adsp->dev, "unable to resolve memory-region\n");
return -EINVAL;
}
- ret = of_address_to_resource(node, 0, &r);
- of_node_put(node);
- if (ret)
- return ret;
-
- adsp->mem_phys = adsp->mem_reloc = r.start;
- adsp->mem_size = resource_size(&r);
+ adsp->mem_phys = adsp->mem_reloc = rmem->base;
+ adsp->mem_size = rmem->size;
adsp->mem_region = devm_ioremap_wc(adsp->dev,
adsp->mem_phys, adsp->mem_size);
if (!adsp->mem_region) {
dev_err(adsp->dev, "unable to map memory region: %pa+%zx\n",
- &r.start, adsp->mem_size);
+ &rmem->base, adsp->mem_size);
return -EBUSY;
}
diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
index 70bffc9f33f6..22fe7b5f5236 100644
--- a/drivers/remoteproc/qcom_q6v5_mss.c
+++ b/drivers/remoteproc/qcom_q6v5_mss.c
@@ -15,9 +15,9 @@
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_reserved_mem.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
@@ -71,6 +71,7 @@
#define QDSP6SS_MEM_PWR_CTL 0x0B0
#define QDSP6V6SS_MEM_PWR_CTL 0x034
#define QDSP6SS_STRAP_ACC 0x110
+#define QDSP6V62SS_BHS_STATUS 0x0C4
/* AXI Halt Register Offsets */
#define AXI_HALTREQ_REG 0x0
@@ -123,6 +124,7 @@
#define QDSP6v56_CLAMP_QMC_MEM BIT(22)
#define QDSP6SS_XO_CBCR 0x0038
#define QDSP6SS_ACC_OVERRIDE_VAL 0x20
+#define QDSP6v55_BHS_EN_REST_ACK BIT(0)
/* QDSP6v65 parameters */
#define QDSP6SS_CORE_CBCR 0x20
@@ -130,6 +132,7 @@
#define QDSP6SS_BOOT_CORE_START 0x400
#define QDSP6SS_BOOT_CMD 0x404
#define BOOT_FSM_TIMEOUT 10000
+#define BHS_CHECK_MAX_LOOPS 200
struct reg_info {
struct regulator *reg;
@@ -250,6 +253,7 @@ enum {
MSS_MSM8998,
MSS_SC7180,
MSS_SC7280,
+ MSS_SDM660,
MSS_SDM845,
};
@@ -700,7 +704,8 @@ static int q6v5proc_reset(struct q6v5 *qproc)
} else if (qproc->version == MSS_MSM8909 ||
qproc->version == MSS_MSM8953 ||
qproc->version == MSS_MSM8996 ||
- qproc->version == MSS_MSM8998) {
+ qproc->version == MSS_MSM8998 ||
+ qproc->version == MSS_SDM660) {
if (qproc->version != MSS_MSM8909 &&
qproc->version != MSS_MSM8953)
@@ -734,6 +739,16 @@ static int q6v5proc_reset(struct q6v5 *qproc)
val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
udelay(1);
+ if (qproc->version == MSS_SDM660) {
+ ret = readl_relaxed_poll_timeout(qproc->reg_base + QDSP6V62SS_BHS_STATUS,
+ i, (i & QDSP6v55_BHS_EN_REST_ACK),
+ 1, BHS_CHECK_MAX_LOOPS);
+ if (ret == -ETIMEDOUT) {
+ dev_err(qproc->dev, "BHS_EN_REST_ACK not set!\n");
+ return -ETIMEDOUT;
+ }
+ }
+
/* Put LDO in bypass mode */
val |= QDSP6v56_LDO_BYP;
writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
@@ -756,7 +771,7 @@ static int q6v5proc_reset(struct q6v5 *qproc)
mem_pwr_ctl = QDSP6SS_MEM_PWR_CTL;
i = 19;
} else {
- /* MSS_MSM8998 */
+ /* MSS_MSM8998, MSS_SDM660 */
mem_pwr_ctl = QDSP6V6SS_MEM_PWR_CTL;
i = 28;
}
@@ -1875,8 +1890,6 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
struct device_node *child;
struct reserved_mem *rmem;
struct device_node *node;
- struct resource r;
- int ret;
/*
* In the absence of mba/mpss sub-child, extract the mba and mpss
@@ -1891,15 +1904,20 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
of_node_put(child);
}
- ret = of_address_to_resource(node, 0, &r);
+ if (!node) {
+ dev_err(qproc->dev, "no mba memory-region specified\n");
+ return -EINVAL;
+ }
+
+ rmem = of_reserved_mem_lookup(node);
of_node_put(node);
- if (ret) {
+ if (!rmem) {
dev_err(qproc->dev, "unable to resolve mba region\n");
- return ret;
+ return -EINVAL;
}
- qproc->mba_phys = r.start;
- qproc->mba_size = resource_size(&r);
+ qproc->mba_phys = rmem->base;
+ qproc->mba_size = rmem->size;
if (!child) {
node = of_parse_phandle(qproc->dev->of_node,
@@ -1910,15 +1928,20 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
of_node_put(child);
}
- ret = of_address_to_resource(node, 0, &r);
+ if (!node) {
+ dev_err(qproc->dev, "no mpss memory-region specified\n");
+ return -EINVAL;
+ }
+
+ rmem = of_reserved_mem_lookup(node);
of_node_put(node);
- if (ret) {
+ if (!rmem) {
dev_err(qproc->dev, "unable to resolve mpss region\n");
- return ret;
+ return -EINVAL;
}
- qproc->mpss_phys = qproc->mpss_reloc = r.start;
- qproc->mpss_size = resource_size(&r);
+ qproc->mpss_phys = qproc->mpss_reloc = rmem->base;
+ qproc->mpss_size = rmem->size;
if (!child) {
node = of_parse_phandle(qproc->dev->of_node, "memory-region", 2);
@@ -2191,6 +2214,37 @@ static const struct rproc_hexagon_res sc7280_mss = {
.version = MSS_SC7280,
};
+static const struct rproc_hexagon_res sdm660_mss = {
+ .hexagon_mba_image = "mba.mbn",
+ .proxy_clk_names = (char*[]){
+ "xo",
+ "qdss",
+ "mem",
+ NULL
+ },
+ .active_clk_names = (char*[]){
+ "iface",
+ "bus",
+ "gpll0_mss",
+ "mnoc_axi",
+ "snoc_axi",
+ NULL
+ },
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mx",
+ NULL
+ },
+ .need_mem_protection = true,
+ .has_alt_reset = false,
+ .has_mba_logs = false,
+ .has_spare_reg = false,
+ .has_qaccept_regs = false,
+ .has_ext_cntl_regs = false,
+ .has_vq6 = false,
+ .version = MSS_SDM660,
+};
+
static const struct rproc_hexagon_res sdm845_mss = {
.hexagon_mba_image = "mba.mbn",
.proxy_clk_names = (char*[]){
@@ -2473,6 +2527,7 @@ static const struct of_device_id q6v5_of_match[] = {
{ .compatible = "qcom,msm8998-mss-pil", .data = &msm8998_mss},
{ .compatible = "qcom,sc7180-mss-pil", .data = &sc7180_mss},
{ .compatible = "qcom,sc7280-mss-pil", .data = &sc7280_mss},
+ { .compatible = "qcom,sdm660-mss-pil", .data = &sdm660_mss},
{ .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss},
{ },
};
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index 3153d82037e7..b5447dd2dd35 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -13,8 +13,9 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
@@ -425,6 +426,7 @@ static const struct rproc_ops adsp_minidump_ops = {
.start = adsp_start,
.stop = adsp_stop,
.da_to_va = adsp_da_to_va,
+ .parse_fw = qcom_register_dump_segments,
.load = adsp_load,
.panic = adsp_panic,
.coredump = adsp_minidump,
@@ -533,9 +535,8 @@ static void adsp_pds_detach(struct qcom_adsp *adsp, struct device **pds,
static int adsp_alloc_memory_region(struct qcom_adsp *adsp)
{
+ struct reserved_mem *rmem;
struct device_node *node;
- struct resource r;
- int ret;
node = of_parse_phandle(adsp->dev->of_node, "memory-region", 0);
if (!node) {
@@ -543,17 +544,19 @@ static int adsp_alloc_memory_region(struct qcom_adsp *adsp)
return -EINVAL;
}
- ret = of_address_to_resource(node, 0, &r);
+ rmem = of_reserved_mem_lookup(node);
of_node_put(node);
- if (ret)
- return ret;
+ if (!rmem) {
+ dev_err(adsp->dev, "unable to resolve memory-region\n");
+ return -EINVAL;
+ }
- adsp->mem_phys = adsp->mem_reloc = r.start;
- adsp->mem_size = resource_size(&r);
+ adsp->mem_phys = adsp->mem_reloc = rmem->base;
+ adsp->mem_size = rmem->size;
adsp->mem_region = devm_ioremap_wc(adsp->dev, adsp->mem_phys, adsp->mem_size);
if (!adsp->mem_region) {
dev_err(adsp->dev, "unable to map memory region: %pa+%zx\n",
- &r.start, adsp->mem_size);
+ &rmem->base, adsp->mem_size);
return -EBUSY;
}
@@ -566,16 +569,19 @@ static int adsp_alloc_memory_region(struct qcom_adsp *adsp)
return -EINVAL;
}
- ret = of_address_to_resource(node, 0, &r);
- if (ret)
- return ret;
+ rmem = of_reserved_mem_lookup(node);
+ of_node_put(node);
+ if (!rmem) {
+ dev_err(adsp->dev, "unable to resolve dtb memory-region\n");
+ return -EINVAL;
+ }
- adsp->dtb_mem_phys = adsp->dtb_mem_reloc = r.start;
- adsp->dtb_mem_size = resource_size(&r);
+ adsp->dtb_mem_phys = adsp->dtb_mem_reloc = rmem->base;
+ adsp->dtb_mem_size = rmem->size;
adsp->dtb_mem_region = devm_ioremap_wc(adsp->dev, adsp->dtb_mem_phys, adsp->dtb_mem_size);
if (!adsp->dtb_mem_region) {
dev_err(adsp->dev, "unable to map dtb memory region: %pa+%zx\n",
- &r.start, adsp->dtb_mem_size);
+ &rmem->base, adsp->dtb_mem_size);
return -EBUSY;
}
@@ -584,29 +590,28 @@ static int adsp_alloc_memory_region(struct qcom_adsp *adsp)
static int adsp_assign_memory_region(struct qcom_adsp *adsp)
{
+ struct reserved_mem *rmem = NULL;
struct qcom_scm_vmperm perm;
struct device_node *node;
- struct resource r;
int ret;
if (!adsp->region_assign_idx)
return 0;
node = of_parse_phandle(adsp->dev->of_node, "memory-region", adsp->region_assign_idx);
- if (!node) {
- dev_err(adsp->dev, "missing shareable memory-region\n");
+ if (node)
+ rmem = of_reserved_mem_lookup(node);
+ of_node_put(node);
+ if (!rmem) {
+ dev_err(adsp->dev, "unable to resolve shareable memory-region\n");
return -EINVAL;
}
- ret = of_address_to_resource(node, 0, &r);
- if (ret)
- return ret;
-
perm.vmid = QCOM_SCM_VMID_MSS_MSA;
perm.perm = QCOM_SCM_PERM_RW;
- adsp->region_assign_phys = r.start;
- adsp->region_assign_size = resource_size(&r);
+ adsp->region_assign_phys = rmem->base;
+ adsp->region_assign_size = rmem->size;
adsp->region_assign_perms = BIT(QCOM_SCM_VMID_HLOS);
ret = qcom_scm_assign_mem(adsp->region_assign_phys,
@@ -1012,7 +1017,7 @@ static const struct adsp_data sc8180x_mpss_resource = {
.ssctl_id = 0x12,
};
-static const struct adsp_data slpi_resource_init = {
+static const struct adsp_data msm8996_slpi_resource_init = {
.crash_reason_smem = 424,
.firmware_name = "slpi.mdt",
.pas_id = 12,
@@ -1026,7 +1031,7 @@ static const struct adsp_data slpi_resource_init = {
.ssctl_id = 0x16,
};
-static const struct adsp_data sm8150_slpi_resource = {
+static const struct adsp_data sdm845_slpi_resource_init = {
.crash_reason_smem = 424,
.firmware_name = "slpi.mdt",
.pas_id = 12,
@@ -1042,38 +1047,6 @@ static const struct adsp_data sm8150_slpi_resource = {
.ssctl_id = 0x16,
};
-static const struct adsp_data sm8250_slpi_resource = {
- .crash_reason_smem = 424,
- .firmware_name = "slpi.mdt",
- .pas_id = 12,
- .auto_boot = true,
- .proxy_pd_names = (char*[]){
- "lcx",
- "lmx",
- NULL
- },
- .load_state = "slpi",
- .ssr_name = "dsps",
- .sysmon_name = "slpi",
- .ssctl_id = 0x16,
-};
-
-static const struct adsp_data sm8350_slpi_resource = {
- .crash_reason_smem = 424,
- .firmware_name = "slpi.mdt",
- .pas_id = 12,
- .auto_boot = true,
- .proxy_pd_names = (char*[]){
- "lcx",
- "lmx",
- NULL
- },
- .load_state = "slpi",
- .ssr_name = "dsps",
- .sysmon_name = "slpi",
- .ssctl_id = 0x16,
-};
-
static const struct adsp_data wcss_resource_init = {
.crash_reason_smem = 421,
.firmware_name = "wcnss.mdt",
@@ -1182,9 +1155,9 @@ static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,msm8953-adsp-pil", .data = &msm8996_adsp_resource},
{ .compatible = "qcom,msm8974-adsp-pil", .data = &adsp_resource_init},
{ .compatible = "qcom,msm8996-adsp-pil", .data = &msm8996_adsp_resource},
- { .compatible = "qcom,msm8996-slpi-pil", .data = &slpi_resource_init},
+ { .compatible = "qcom,msm8996-slpi-pil", .data = &msm8996_slpi_resource_init},
{ .compatible = "qcom,msm8998-adsp-pas", .data = &msm8996_adsp_resource},
- { .compatible = "qcom,msm8998-slpi-pas", .data = &slpi_resource_init},
+ { .compatible = "qcom,msm8998-slpi-pas", .data = &msm8996_slpi_resource_init},
{ .compatible = "qcom,qcs404-adsp-pas", .data = &adsp_resource_init },
{ .compatible = "qcom,qcs404-cdsp-pas", .data = &cdsp_resource_init },
{ .compatible = "qcom,qcs404-wcss-pas", .data = &wcss_resource_init },
@@ -1199,6 +1172,7 @@ static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,sdm660-adsp-pas", .data = &adsp_resource_init},
{ .compatible = "qcom,sdm845-adsp-pas", .data = &sdm845_adsp_resource_init},
{ .compatible = "qcom,sdm845-cdsp-pas", .data = &sdm845_cdsp_resource_init},
+ { .compatible = "qcom,sdm845-slpi-pas", .data = &sdm845_slpi_resource_init},
{ .compatible = "qcom,sdx55-mpss-pas", .data = &sdx55_mpss_resource},
{ .compatible = "qcom,sm6115-adsp-pas", .data = &adsp_resource_init},
{ .compatible = "qcom,sm6115-cdsp-pas", .data = &cdsp_resource_init},
@@ -1209,17 +1183,17 @@ static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,sm8150-adsp-pas", .data = &sm8150_adsp_resource},
{ .compatible = "qcom,sm8150-cdsp-pas", .data = &sm8150_cdsp_resource},
{ .compatible = "qcom,sm8150-mpss-pas", .data = &mpss_resource_init},
- { .compatible = "qcom,sm8150-slpi-pas", .data = &sm8150_slpi_resource},
+ { .compatible = "qcom,sm8150-slpi-pas", .data = &sdm845_slpi_resource_init},
{ .compatible = "qcom,sm8250-adsp-pas", .data = &sm8250_adsp_resource},
{ .compatible = "qcom,sm8250-cdsp-pas", .data = &sm8250_cdsp_resource},
- { .compatible = "qcom,sm8250-slpi-pas", .data = &sm8250_slpi_resource},
+ { .compatible = "qcom,sm8250-slpi-pas", .data = &sdm845_slpi_resource_init},
{ .compatible = "qcom,sm8350-adsp-pas", .data = &sm8350_adsp_resource},
{ .compatible = "qcom,sm8350-cdsp-pas", .data = &sm8350_cdsp_resource},
- { .compatible = "qcom,sm8350-slpi-pas", .data = &sm8350_slpi_resource},
+ { .compatible = "qcom,sm8350-slpi-pas", .data = &sdm845_slpi_resource_init},
{ .compatible = "qcom,sm8350-mpss-pas", .data = &mpss_resource_init},
{ .compatible = "qcom,sm8450-adsp-pas", .data = &sm8350_adsp_resource},
{ .compatible = "qcom,sm8450-cdsp-pas", .data = &sm8350_cdsp_resource},
- { .compatible = "qcom,sm8450-slpi-pas", .data = &sm8350_slpi_resource},
+ { .compatible = "qcom,sm8450-slpi-pas", .data = &sdm845_slpi_resource_init},
{ .compatible = "qcom,sm8450-mpss-pas", .data = &sm8450_mpss_resource},
{ .compatible = "qcom,sm8550-adsp-pas", .data = &sm8550_adsp_resource},
{ .compatible = "qcom,sm8550-cdsp-pas", .data = &sm8550_cdsp_resource},
diff --git a/drivers/remoteproc/qcom_q6v5_wcss.c b/drivers/remoteproc/qcom_q6v5_wcss.c
index b437044aa126..cff1fa07d1de 100644
--- a/drivers/remoteproc/qcom_q6v5_wcss.c
+++ b/drivers/remoteproc/qcom_q6v5_wcss.c
@@ -837,8 +837,7 @@ static int q6v5_wcss_init_mmio(struct q6v5_wcss *wcss,
return -ENOMEM;
if (wcss->version == WCSS_IPQ8074) {
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb");
- wcss->rmb_base = devm_ioremap_resource(&pdev->dev, res);
+ wcss->rmb_base = devm_platform_ioremap_resource_byname(pdev, "rmb");
if (IS_ERR(wcss->rmb_base))
return PTR_ERR(wcss->rmb_base);
}
diff --git a/drivers/remoteproc/qcom_sysmon.c b/drivers/remoteproc/qcom_sysmon.c
index 746f56b4bafb..c24e4a882873 100644
--- a/drivers/remoteproc/qcom_sysmon.c
+++ b/drivers/remoteproc/qcom_sysmon.c
@@ -9,7 +9,6 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/remoteproc/qcom_rproc.h>
#include <linux/rpmsg.h>
diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c
index 1ed0647bc962..90de22c81da9 100644
--- a/drivers/remoteproc/qcom_wcnss.c
+++ b/drivers/remoteproc/qcom_wcnss.c
@@ -14,8 +14,8 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/io.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
@@ -506,27 +506,25 @@ static int wcnss_request_irq(struct qcom_wcnss *wcnss,
static int wcnss_alloc_memory_region(struct qcom_wcnss *wcnss)
{
+ struct reserved_mem *rmem = NULL;
struct device_node *node;
- struct resource r;
- int ret;
node = of_parse_phandle(wcnss->dev->of_node, "memory-region", 0);
- if (!node) {
- dev_err(wcnss->dev, "no memory-region specified\n");
+ if (node)
+ rmem = of_reserved_mem_lookup(node);
+ of_node_put(node);
+
+ if (!rmem) {
+ dev_err(wcnss->dev, "unable to resolve memory-region\n");
return -EINVAL;
}
- ret = of_address_to_resource(node, 0, &r);
- of_node_put(node);
- if (ret)
- return ret;
-
- wcnss->mem_phys = wcnss->mem_reloc = r.start;
- wcnss->mem_size = resource_size(&r);
+ wcnss->mem_phys = wcnss->mem_reloc = rmem->base;
+ wcnss->mem_size = rmem->size;
wcnss->mem_region = devm_ioremap_wc(wcnss->dev, wcnss->mem_phys, wcnss->mem_size);
if (!wcnss->mem_region) {
dev_err(wcnss->dev, "unable to map memory region: %pa+%zx\n",
- &r.start, wcnss->mem_size);
+ &rmem->base, wcnss->mem_size);
return -EBUSY;
}
@@ -538,7 +536,6 @@ static int wcnss_probe(struct platform_device *pdev)
const char *fw_name = WCNSS_FIRMWARE_NAME;
const struct wcnss_data *data;
struct qcom_wcnss *wcnss;
- struct resource *res;
struct rproc *rproc;
void __iomem *mmio;
int ret;
@@ -576,8 +573,7 @@ static int wcnss_probe(struct platform_device *pdev)
mutex_init(&wcnss->iris_lock);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pmu");
- mmio = devm_ioremap_resource(&pdev->dev, res);
+ mmio = devm_platform_ioremap_resource_byname(pdev, "pmu");
if (IS_ERR(mmio)) {
ret = PTR_ERR(mmio);
goto free_rproc;
diff --git a/drivers/remoteproc/qcom_wcnss_iris.c b/drivers/remoteproc/qcom_wcnss_iris.c
index 09720ddddc85..dd36fd077911 100644
--- a/drivers/remoteproc/qcom_wcnss_iris.c
+++ b/drivers/remoteproc/qcom_wcnss_iris.c
@@ -10,6 +10,7 @@
#include <linux/clk.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/remoteproc/rcar_rproc.c b/drivers/remoteproc/rcar_rproc.c
index 90e8769d5624..cc17e8421f65 100644
--- a/drivers/remoteproc/rcar_rproc.c
+++ b/drivers/remoteproc/rcar_rproc.c
@@ -5,8 +5,9 @@
#include <linux/limits.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/remoteproc.h>
#include <linux/reset.h>
diff --git a/drivers/remoteproc/remoteproc_coredump.c b/drivers/remoteproc/remoteproc_coredump.c
index bc0e1603a7a3..6ede8c0c93ad 100644
--- a/drivers/remoteproc/remoteproc_coredump.c
+++ b/drivers/remoteproc/remoteproc_coredump.c
@@ -32,6 +32,7 @@ void rproc_coredump_cleanup(struct rproc *rproc)
kfree(entry);
}
}
+EXPORT_SYMBOL_GPL(rproc_coredump_cleanup);
/**
* rproc_coredump_add_segment() - add segment of device memory to coredump
@@ -327,6 +328,7 @@ void rproc_coredump(struct rproc *rproc)
*/
wait_for_completion(&dump_state.dump_done);
}
+EXPORT_SYMBOL_GPL(rproc_coredump);
/**
* rproc_coredump_using_sections() - perform coredump using section headers
diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h
index d4dbb8d1d80c..f62a82d71dfa 100644
--- a/drivers/remoteproc/remoteproc_internal.h
+++ b/drivers/remoteproc/remoteproc_internal.h
@@ -76,10 +76,6 @@ extern struct class rproc_class;
int rproc_init_sysfs(void);
void rproc_exit_sysfs(void);
-/* from remoteproc_coredump.c */
-void rproc_coredump_cleanup(struct rproc *rproc);
-void rproc_coredump(struct rproc *rproc);
-
#ifdef CONFIG_REMOTEPROC_CDEV
void rproc_init_cdev(void);
void rproc_exit_cdev(void);
diff --git a/drivers/remoteproc/st_slim_rproc.c b/drivers/remoteproc/st_slim_rproc.c
index 4ed9467897e5..d17719384c16 100644
--- a/drivers/remoteproc/st_slim_rproc.c
+++ b/drivers/remoteproc/st_slim_rproc.c
@@ -12,7 +12,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/remoteproc.h>
#include <linux/remoteproc/st_slim_rproc.h>
diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c
index cf073bac79f7..9d9b13530f78 100644
--- a/drivers/remoteproc/stm32_rproc.c
+++ b/drivers/remoteproc/stm32_rproc.c
@@ -12,9 +12,9 @@
#include <linux/mailbox_client.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
#include <linux/pm_wakeirq.h>
#include <linux/regmap.h>
#include <linux/remoteproc.h>
@@ -921,7 +921,7 @@ static void stm32_rproc_remove(struct platform_device *pdev)
rproc_free(rproc);
}
-static int __maybe_unused stm32_rproc_suspend(struct device *dev)
+static int stm32_rproc_suspend(struct device *dev)
{
struct rproc *rproc = dev_get_drvdata(dev);
struct stm32_rproc *ddata = rproc->priv;
@@ -932,7 +932,7 @@ static int __maybe_unused stm32_rproc_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused stm32_rproc_resume(struct device *dev)
+static int stm32_rproc_resume(struct device *dev)
{
struct rproc *rproc = dev_get_drvdata(dev);
struct stm32_rproc *ddata = rproc->priv;
@@ -943,16 +943,16 @@ static int __maybe_unused stm32_rproc_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(stm32_rproc_pm_ops,
- stm32_rproc_suspend, stm32_rproc_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(stm32_rproc_pm_ops,
+ stm32_rproc_suspend, stm32_rproc_resume);
static struct platform_driver stm32_rproc_driver = {
.probe = stm32_rproc_probe,
.remove_new = stm32_rproc_remove,
.driver = {
.name = "stm32-rproc",
- .pm = &stm32_rproc_pm_ops,
- .of_match_table = of_match_ptr(stm32_rproc_match),
+ .pm = pm_ptr(&stm32_rproc_pm_ops),
+ .of_match_table = stm32_rproc_match,
},
};
module_platform_driver(stm32_rproc_driver);
diff --git a/drivers/remoteproc/ti_k3_dsp_remoteproc.c b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
index ec626a37fef6..ef8415a7cd54 100644
--- a/drivers/remoteproc/ti_k3_dsp_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
@@ -9,7 +9,7 @@
#include <linux/io.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_reserved_mem.h>
#include <linux/omap-mailbox.h>
#include <linux/platform_device.h>
diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c
index 23fe44d4d7a5..ad3415a3851b 100644
--- a/drivers/remoteproc/ti_k3_r5_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c
@@ -12,9 +12,10 @@
#include <linux/kernel.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/of_reserved_mem.h>
+#include <linux/of_platform.h>
#include <linux/omap-mailbox.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/remoteproc/wkup_m3_rproc.c b/drivers/remoteproc/wkup_m3_rproc.c
index 120dc7d2dac1..36a55f7ffa64 100644
--- a/drivers/remoteproc/wkup_m3_rproc.c
+++ b/drivers/remoteproc/wkup_m3_rproc.c
@@ -12,7 +12,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index 1beb40a1d3df..82d460ff4777 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -200,9 +200,15 @@ static const struct rpmsg_endpoint_ops glink_endpoint_ops;
#define GLINK_CMD_TX_DATA_CONT 12
#define GLINK_CMD_READ_NOTIF 13
#define GLINK_CMD_RX_DONE_W_REUSE 14
+#define GLINK_CMD_SIGNALS 15
#define GLINK_FEATURE_INTENTLESS BIT(1)
+#define NATIVE_DTR_SIG NATIVE_DSR_SIG
+#define NATIVE_DSR_SIG BIT(31)
+#define NATIVE_RTS_SIG NATIVE_CTS_SIG
+#define NATIVE_CTS_SIG BIT(30)
+
static void qcom_glink_rx_done_work(struct work_struct *work);
static struct glink_channel *qcom_glink_alloc_channel(struct qcom_glink *glink,
@@ -221,6 +227,10 @@ static struct glink_channel *qcom_glink_alloc_channel(struct qcom_glink *glink,
channel->glink = glink;
channel->name = kstrdup(name, GFP_KERNEL);
+ if (!channel->name) {
+ kfree(channel);
+ return ERR_PTR(-ENOMEM);
+ }
init_completion(&channel->open_req);
init_completion(&channel->open_ack);
@@ -1025,6 +1035,52 @@ static int qcom_glink_rx_open_ack(struct qcom_glink *glink, unsigned int lcid)
return 0;
}
+/**
+ * qcom_glink_set_flow_control() - convert a signal cmd to wire format and transmit
+ * @ept: Rpmsg endpoint for channel.
+ * @pause: Pause transmission
+ * @dst: destination address of the endpoint
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int qcom_glink_set_flow_control(struct rpmsg_endpoint *ept, bool pause, u32 dst)
+{
+ struct glink_channel *channel = to_glink_channel(ept);
+ struct qcom_glink *glink = channel->glink;
+ struct glink_msg msg;
+ u32 sigs = 0;
+
+ if (pause)
+ sigs |= NATIVE_DTR_SIG | NATIVE_RTS_SIG;
+
+ msg.cmd = cpu_to_le16(GLINK_CMD_SIGNALS);
+ msg.param1 = cpu_to_le16(channel->lcid);
+ msg.param2 = cpu_to_le32(sigs);
+
+ return qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true);
+}
+
+static void qcom_glink_handle_signals(struct qcom_glink *glink,
+ unsigned int rcid, unsigned int sigs)
+{
+ struct glink_channel *channel;
+ unsigned long flags;
+ bool enable;
+
+ spin_lock_irqsave(&glink->idr_lock, flags);
+ channel = idr_find(&glink->rcids, rcid);
+ spin_unlock_irqrestore(&glink->idr_lock, flags);
+ if (!channel) {
+ dev_err(glink->dev, "signal for non-existing channel\n");
+ return;
+ }
+
+ enable = sigs & NATIVE_DSR_SIG || sigs & NATIVE_CTS_SIG;
+
+ if (channel->ept.flow_cb)
+ channel->ept.flow_cb(channel->ept.rpdev, channel->ept.priv, enable);
+}
+
void qcom_glink_native_rx(struct qcom_glink *glink)
{
struct glink_msg msg;
@@ -1086,6 +1142,10 @@ void qcom_glink_native_rx(struct qcom_glink *glink)
qcom_glink_handle_intent_req_ack(glink, param1, param2);
qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
break;
+ case GLINK_CMD_SIGNALS:
+ qcom_glink_handle_signals(glink, param1, param2);
+ qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
+ break;
default:
dev_err(glink->dev, "unhandled rx cmd: %d\n", cmd);
ret = -EINVAL;
@@ -1446,6 +1506,7 @@ static const struct rpmsg_endpoint_ops glink_endpoint_ops = {
.sendto = qcom_glink_sendto,
.trysend = qcom_glink_trysend,
.trysendto = qcom_glink_trysendto,
+ .set_flow_control = qcom_glink_set_flow_control,
};
static void qcom_glink_rpdev_release(struct device *dev)
diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c
index a271fceb16f4..09833ad05da7 100644
--- a/drivers/rpmsg/rpmsg_char.c
+++ b/drivers/rpmsg/rpmsg_char.c
@@ -52,6 +52,8 @@ static DEFINE_IDA(rpmsg_minor_ida);
* @readq: wait object for incoming queue
* @default_ept: set to channel default endpoint if the default endpoint should be re-used
* on device open to prevent endpoint address update.
+ * remote_flow_restricted: to indicate if the remote has requested for flow to be limited
+ * remote_flow_updated: to indicate if the flow control has been requested
*/
struct rpmsg_eptdev {
struct device dev;
@@ -68,6 +70,8 @@ struct rpmsg_eptdev {
struct sk_buff_head queue;
wait_queue_head_t readq;
+ bool remote_flow_restricted;
+ bool remote_flow_updated;
};
int rpmsg_chrdev_eptdev_destroy(struct device *dev, void *data)
@@ -116,6 +120,18 @@ static int rpmsg_ept_cb(struct rpmsg_device *rpdev, void *buf, int len,
return 0;
}
+static int rpmsg_ept_flow_cb(struct rpmsg_device *rpdev, void *priv, bool enable)
+{
+ struct rpmsg_eptdev *eptdev = priv;
+
+ eptdev->remote_flow_restricted = enable;
+ eptdev->remote_flow_updated = true;
+
+ wake_up_interruptible(&eptdev->readq);
+
+ return 0;
+}
+
static int rpmsg_eptdev_open(struct inode *inode, struct file *filp)
{
struct rpmsg_eptdev *eptdev = cdev_to_eptdev(inode->i_cdev);
@@ -152,6 +168,7 @@ static int rpmsg_eptdev_open(struct inode *inode, struct file *filp)
return -EINVAL;
}
+ ept->flow_cb = rpmsg_ept_flow_cb;
eptdev->ept = ept;
filp->private_data = eptdev;
mutex_unlock(&eptdev->ept_lock);
@@ -172,6 +189,7 @@ static int rpmsg_eptdev_release(struct inode *inode, struct file *filp)
eptdev->ept = NULL;
}
mutex_unlock(&eptdev->ept_lock);
+ eptdev->remote_flow_updated = false;
/* Discard all SKBs */
skb_queue_purge(&eptdev->queue);
@@ -285,6 +303,9 @@ static __poll_t rpmsg_eptdev_poll(struct file *filp, poll_table *wait)
if (!skb_queue_empty(&eptdev->queue))
mask |= EPOLLIN | EPOLLRDNORM;
+ if (eptdev->remote_flow_updated)
+ mask |= EPOLLPRI;
+
mutex_lock(&eptdev->ept_lock);
mask |= rpmsg_poll(eptdev->ept, filp, wait);
mutex_unlock(&eptdev->ept_lock);
@@ -297,14 +318,35 @@ static long rpmsg_eptdev_ioctl(struct file *fp, unsigned int cmd,
{
struct rpmsg_eptdev *eptdev = fp->private_data;
- if (cmd != RPMSG_DESTROY_EPT_IOCTL)
- return -EINVAL;
+ bool set;
+ int ret;
- /* Don't allow to destroy a default endpoint. */
- if (eptdev->default_ept)
- return -EINVAL;
+ switch (cmd) {
+ case RPMSG_GET_OUTGOING_FLOWCONTROL:
+ eptdev->remote_flow_updated = false;
+ ret = put_user(eptdev->remote_flow_restricted, (int __user *)arg);
+ break;
+ case RPMSG_SET_INCOMING_FLOWCONTROL:
+ if (arg > 1) {
+ ret = -EINVAL;
+ break;
+ }
+ set = !!arg;
+ ret = rpmsg_set_flow_control(eptdev->ept, set, eptdev->chinfo.dst);
+ break;
+ case RPMSG_DESTROY_EPT_IOCTL:
+ /* Don't allow to destroy a default endpoint. */
+ if (eptdev->default_ept) {
+ ret = -EINVAL;
+ break;
+ }
+ ret = rpmsg_chrdev_eptdev_destroy(&eptdev->dev, NULL);
+ break;
+ default:
+ ret = -EINVAL;
+ }
- return rpmsg_chrdev_eptdev_destroy(&eptdev->dev, NULL);
+ return ret;
}
static const struct file_operations rpmsg_eptdev_fops = {
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
index 5039df757127..32b550c91d9f 100644
--- a/drivers/rpmsg/rpmsg_core.c
+++ b/drivers/rpmsg/rpmsg_core.c
@@ -331,6 +331,25 @@ int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
EXPORT_SYMBOL(rpmsg_trysend_offchannel);
/**
+ * rpmsg_set_flow_control() - request remote to pause/resume transmission
+ * @ept: the rpmsg endpoint
+ * @pause: pause transmission
+ * @dst: destination address of the endpoint
+ *
+ * Return: 0 on success and an appropriate error value on failure.
+ */
+int rpmsg_set_flow_control(struct rpmsg_endpoint *ept, bool pause, u32 dst)
+{
+ if (WARN_ON(!ept))
+ return -EINVAL;
+ if (!ept->ops->set_flow_control)
+ return -EOPNOTSUPP;
+
+ return ept->ops->set_flow_control(ept, pause, dst);
+}
+EXPORT_SYMBOL_GPL(rpmsg_set_flow_control);
+
+/**
* rpmsg_get_mtu() - get maximum transmission buffer size for sending message.
* @ept: the rpmsg endpoint
*
@@ -539,6 +558,8 @@ static int rpmsg_dev_probe(struct device *dev)
rpdev->ept = ept;
rpdev->src = ept->addr;
+
+ ept->flow_cb = rpdrv->flowcontrol;
}
err = rpdrv->probe(rpdev);
diff --git a/drivers/rpmsg/rpmsg_internal.h b/drivers/rpmsg/rpmsg_internal.h
index 39b646d0d40d..b950d6f790a3 100644
--- a/drivers/rpmsg/rpmsg_internal.h
+++ b/drivers/rpmsg/rpmsg_internal.h
@@ -55,6 +55,7 @@ struct rpmsg_device_ops {
* @trysendto: see @rpmsg_trysendto(), optional
* @trysend_offchannel: see @rpmsg_trysend_offchannel(), optional
* @poll: see @rpmsg_poll(), optional
+ * @set_flow_control: see @rpmsg_set_flow_control(), optional
* @get_mtu: see @rpmsg_get_mtu(), optional
*
* Indirection table for the operations that a rpmsg backend should implement.
@@ -75,6 +76,7 @@ struct rpmsg_endpoint_ops {
void *data, int len);
__poll_t (*poll)(struct rpmsg_endpoint *ept, struct file *filp,
poll_table *wait);
+ int (*set_flow_control)(struct rpmsg_endpoint *ept, bool pause, u32 dst);
ssize_t (*get_mtu)(struct rpmsg_endpoint *ept);
};
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 05f4b2d66290..d7502433c78a 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -904,9 +904,9 @@ config RTC_DRV_PCF2127
select REGMAP_SPI if SPI_MASTER
select WATCHDOG_CORE if WATCHDOG
help
- If you say yes here you get support for the NXP PCF2127/29 RTC
+ If you say yes here you get support for the NXP PCF2127/29/31 RTC
chips with integrated quartz crystal for industrial applications.
- Both chips also have watchdog timer and tamper switch detection
+ These chips also have watchdog timer and tamper switch detection
features.
PCF2127 has an additional feature of 512 bytes battery backed
@@ -1196,6 +1196,7 @@ config RTC_DRV_MSM6242
config RTC_DRV_BQ4802
tristate "TI BQ4802"
depends on HAS_IOMEM && HAS_IOPORT
+ depends on SPARC || COMPILE_TEST
help
If you say Y here you will get support for the TI
BQ4802 RTC chip.
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 499d89150afc..1b63111cdda2 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -376,7 +376,7 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
err = rtc_valid_tm(&alarm->time);
done:
- if (err)
+ if (err && alarm->enabled)
dev_warn(&rtc->dev, "invalid alarm value: %ptR\n",
&alarm->time);
diff --git a/drivers/rtc/rtc-abx80x.c b/drivers/rtc/rtc-abx80x.c
index e08d3181bd2a..fde2b8054c2e 100644
--- a/drivers/rtc/rtc-abx80x.c
+++ b/drivers/rtc/rtc-abx80x.c
@@ -15,7 +15,7 @@
#include <linux/i2c.h>
#include <linux/kstrtox.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/rtc.h>
#include <linux/watchdog.h>
diff --git a/drivers/rtc/rtc-armada38x.c b/drivers/rtc/rtc-armada38x.c
index b4139c200676..569c1054d6b0 100644
--- a/drivers/rtc/rtc-armada38x.c
+++ b/drivers/rtc/rtc-armada38x.c
@@ -11,7 +11,6 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
@@ -474,7 +473,6 @@ static const struct armada38x_rtc_data armada8k_data = {
.alarm = ALARM2,
};
-#ifdef CONFIG_OF
static const struct of_device_id armada38x_rtc_of_match_table[] = {
{
.compatible = "marvell,armada-380-rtc",
@@ -487,7 +485,6 @@ static const struct of_device_id armada38x_rtc_of_match_table[] = {
{}
};
MODULE_DEVICE_TABLE(of, armada38x_rtc_of_match_table);
-#endif
static __init int armada38x_rtc_probe(struct platform_device *pdev)
{
@@ -577,7 +574,7 @@ static struct platform_driver armada38x_rtc_driver = {
.driver = {
.name = "armada38x-rtc",
.pm = &armada38x_rtc_pm_ops,
- .of_match_table = of_match_ptr(armada38x_rtc_of_match_table),
+ .of_match_table = armada38x_rtc_of_match_table,
},
};
diff --git a/drivers/rtc/rtc-aspeed.c b/drivers/rtc/rtc-aspeed.c
index a93352ed3aec..880b015eebaf 100644
--- a/drivers/rtc/rtc-aspeed.c
+++ b/drivers/rtc/rtc-aspeed.c
@@ -118,7 +118,7 @@ MODULE_DEVICE_TABLE(of, aspeed_rtc_match);
static struct platform_driver aspeed_rtc_driver = {
.driver = {
.name = "aspeed-rtc",
- .of_match_table = of_match_ptr(aspeed_rtc_match),
+ .of_match_table = aspeed_rtc_match,
},
};
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index e9d17232d0a8..add4f71d7b3b 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -22,7 +22,6 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
@@ -642,7 +641,7 @@ static struct platform_driver at91_rtc_driver = {
.driver = {
.name = "at91_rtc",
.pm = &at91_rtc_pm_ops,
- .of_match_table = of_match_ptr(at91_rtc_dt_ids),
+ .of_match_table = at91_rtc_dt_ids,
},
};
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index 610f27dfc462..f93bee96e362 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -534,7 +534,7 @@ static struct platform_driver at91_rtc_driver = {
.driver = {
.name = "rtc-at91sam9",
.pm = &at91_rtc_pm_ops,
- .of_match_table = of_match_ptr(at91_rtc_dt_ids),
+ .of_match_table = at91_rtc_dt_ids,
},
};
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index c9416fe8542d..228fb2d11c70 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -913,6 +913,10 @@ static inline void cmos_check_acpi_rtc_status(struct device *dev,
#define INITSECTION __init
#endif
+#define SECS_PER_DAY (24 * 60 * 60)
+#define SECS_PER_MONTH (28 * SECS_PER_DAY)
+#define SECS_PER_YEAR (365 * SECS_PER_DAY)
+
static int INITSECTION
cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
{
@@ -1019,6 +1023,13 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
goto cleanup0;
}
+ if (cmos_rtc.mon_alrm)
+ cmos_rtc.rtc->alarm_offset_max = SECS_PER_YEAR - 1;
+ else if (cmos_rtc.day_alrm)
+ cmos_rtc.rtc->alarm_offset_max = SECS_PER_MONTH - 1;
+ else
+ cmos_rtc.rtc->alarm_offset_max = SECS_PER_DAY - 1;
+
rename_region(ports, dev_name(&cmos_rtc.rtc->dev));
if (!mc146818_does_rtc_work()) {
diff --git a/drivers/rtc/rtc-cros-ec.c b/drivers/rtc/rtc-cros-ec.c
index 998ab8606f0b..0cd397c04ff0 100644
--- a/drivers/rtc/rtc-cros-ec.c
+++ b/drivers/rtc/rtc-cros-ec.c
@@ -182,21 +182,15 @@ static int cros_ec_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
ret = cros_ec_rtc_set(cros_ec, EC_CMD_RTC_SET_ALARM, alarm_offset);
if (ret < 0) {
- if (ret == -EINVAL && alarm_offset >= SECS_PER_DAY) {
- /*
- * RTC chips on some older Chromebooks can only handle
- * alarms up to 24h in the future. Try to set an alarm
- * below that limit to avoid suspend failures.
- */
- ret = cros_ec_rtc_set(cros_ec, EC_CMD_RTC_SET_ALARM,
- SECS_PER_DAY - 1);
- }
-
- if (ret < 0) {
- dev_err(dev, "error setting alarm in %u seconds: %d\n",
- alarm_offset, ret);
- return ret;
- }
+ dev_err(dev, "error setting alarm in %u seconds: %d\n",
+ alarm_offset, ret);
+ /*
+ * The EC code returns -EINVAL if the alarm time is too
+ * far in the future. Convert it to the expected error code.
+ */
+ if (ret == -EINVAL)
+ ret = -ERANGE;
+ return ret;
}
return 0;
@@ -355,6 +349,20 @@ static int cros_ec_rtc_probe(struct platform_device *pdev)
cros_ec_rtc->rtc->ops = &cros_ec_rtc_ops;
cros_ec_rtc->rtc->range_max = U32_MAX;
+ /*
+ * The RTC on some older Chromebooks can only handle alarms less than
+ * 24 hours in the future. The only way to find out is to try to set an
+ * alarm further in the future. If that fails, assume that the RTC
+ * connected to the EC can only handle less than 24 hours of alarm
+ * window.
+ */
+ ret = cros_ec_rtc_set(cros_ec, EC_CMD_RTC_SET_ALARM, SECS_PER_DAY * 2);
+ if (ret == -EINVAL)
+ cros_ec_rtc->rtc->alarm_offset_max = SECS_PER_DAY - 1;
+
+ (void)cros_ec_rtc_set(cros_ec, EC_CMD_RTC_SET_ALARM,
+ EC_RTC_ALARM_CLEAR);
+
ret = devm_rtc_register_device(cros_ec_rtc->rtc);
if (ret)
return ret;
diff --git a/drivers/rtc/rtc-da9063.c b/drivers/rtc/rtc-da9063.c
index ee2efb496174..2f5d60622564 100644
--- a/drivers/rtc/rtc-da9063.c
+++ b/drivers/rtc/rtc-da9063.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
#include <linux/regmap.h>
#include <linux/rtc.h>
#include <linux/slab.h>
@@ -496,6 +497,12 @@ static int da9063_rtc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Failed to request ALARM IRQ %d: %d\n",
irq_alarm, ret);
+ ret = dev_pm_set_wake_irq(&pdev->dev, irq_alarm);
+ if (ret)
+ dev_warn(&pdev->dev,
+ "Failed to set IRQ %d as a wake IRQ: %d\n",
+ irq_alarm, ret);
+
device_init_wakeup(&pdev->dev, true);
return devm_rtc_register_device(rtc->rtc_dev);
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index ed9360486953..d4de401548b4 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -336,8 +336,8 @@ static int ds1305_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
/* make sure alarm fires within the next 24 hours */
if (later <= now)
return -EINVAL;
- if ((later - now) > 24 * 60 * 60)
- return -EDOM;
+ if ((later - now) > ds1305->rtc->alarm_offset_max)
+ return -ERANGE;
/* disable alarm if needed */
if (ds1305->ctrl[0] & DS1305_AEI0) {
@@ -691,6 +691,7 @@ static int ds1305_probe(struct spi_device *spi)
ds1305->rtc->ops = &ds1305_ops;
ds1305->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
ds1305->rtc->range_max = RTC_TIMESTAMP_END_2099;
+ ds1305->rtc->alarm_offset_max = 24 * 60 * 60;
ds1305_nvmem_cfg.priv = ds1305;
status = devm_rtc_register_device(ds1305->rtc);
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index cb5acecc11aa..506b7d1c2397 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -1744,7 +1744,7 @@ static int ds1307_probe(struct i2c_client *client)
match = device_get_match_data(&client->dev);
if (match) {
- ds1307->type = (enum ds_type)match;
+ ds1307->type = (uintptr_t)match;
chip = &chips[ds1307->type];
} else if (id) {
chip = &chips[id->driver_data];
diff --git a/drivers/rtc/rtc-ds1742.c b/drivers/rtc/rtc-ds1742.c
index a5026b0514e7..6ae8b9a294fe 100644
--- a/drivers/rtc/rtc-ds1742.c
+++ b/drivers/rtc/rtc-ds1742.c
@@ -16,7 +16,6 @@
#include <linux/jiffies.h>
#include <linux/rtc.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/module.h>
diff --git a/drivers/rtc/rtc-ds2404.c b/drivers/rtc/rtc-ds2404.c
index 0480f592307e..3231fd9f61da 100644
--- a/drivers/rtc/rtc-ds2404.c
+++ b/drivers/rtc/rtc-ds2404.c
@@ -7,9 +7,8 @@
#include <linux/rtc.h>
#include <linux/types.h>
#include <linux/bcd.h>
-#include <linux/platform_data/rtc-ds2404.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <linux/io.h>
@@ -27,164 +26,140 @@
#define DS2404_CLK 1
#define DS2404_DQ 2
-struct ds2404_gpio {
- const char *name;
- unsigned int gpio;
-};
-
struct ds2404 {
- struct ds2404_gpio *gpio;
+ struct device *dev;
+ struct gpio_desc *rst_gpiod;
+ struct gpio_desc *clk_gpiod;
+ struct gpio_desc *dq_gpiod;
struct rtc_device *rtc;
};
-static struct ds2404_gpio ds2404_gpio[] = {
- { "RTC RST", 0 },
- { "RTC CLK", 0 },
- { "RTC DQ", 0 },
-};
-
-static int ds2404_gpio_map(struct ds2404 *chip, struct platform_device *pdev,
- struct ds2404_platform_data *pdata)
+static int ds2404_gpio_map(struct ds2404 *chip, struct platform_device *pdev)
{
- int i, err;
-
- ds2404_gpio[DS2404_RST].gpio = pdata->gpio_rst;
- ds2404_gpio[DS2404_CLK].gpio = pdata->gpio_clk;
- ds2404_gpio[DS2404_DQ].gpio = pdata->gpio_dq;
-
- for (i = 0; i < ARRAY_SIZE(ds2404_gpio); i++) {
- err = gpio_request(ds2404_gpio[i].gpio, ds2404_gpio[i].name);
- if (err) {
- dev_err(&pdev->dev, "error mapping gpio %s: %d\n",
- ds2404_gpio[i].name, err);
- goto err_request;
- }
- if (i != DS2404_DQ)
- gpio_direction_output(ds2404_gpio[i].gpio, 1);
- }
+ struct device *dev = &pdev->dev;
- chip->gpio = ds2404_gpio;
- return 0;
+ /* This will de-assert RESET, declare this GPIO as GPIOD_ACTIVE_LOW */
+ chip->rst_gpiod = devm_gpiod_get(dev, "rst", GPIOD_OUT_LOW);
+ if (IS_ERR(chip->rst_gpiod))
+ return PTR_ERR(chip->rst_gpiod);
-err_request:
- while (--i >= 0)
- gpio_free(ds2404_gpio[i].gpio);
- return err;
-}
+ chip->clk_gpiod = devm_gpiod_get(dev, "clk", GPIOD_OUT_HIGH);
+ if (IS_ERR(chip->clk_gpiod))
+ return PTR_ERR(chip->clk_gpiod);
-static void ds2404_gpio_unmap(void *data)
-{
- int i;
+ chip->dq_gpiod = devm_gpiod_get(dev, "dq", GPIOD_ASIS);
+ if (IS_ERR(chip->dq_gpiod))
+ return PTR_ERR(chip->dq_gpiod);
- for (i = 0; i < ARRAY_SIZE(ds2404_gpio); i++)
- gpio_free(ds2404_gpio[i].gpio);
+ return 0;
}
-static void ds2404_reset(struct device *dev)
+static void ds2404_reset(struct ds2404 *chip)
{
- gpio_set_value(ds2404_gpio[DS2404_RST].gpio, 0);
+ gpiod_set_value(chip->rst_gpiod, 1);
udelay(1000);
- gpio_set_value(ds2404_gpio[DS2404_RST].gpio, 1);
- gpio_set_value(ds2404_gpio[DS2404_CLK].gpio, 0);
- gpio_direction_output(ds2404_gpio[DS2404_DQ].gpio, 0);
+ gpiod_set_value(chip->rst_gpiod, 0);
+ gpiod_set_value(chip->clk_gpiod, 0);
+ gpiod_direction_output(chip->dq_gpiod, 0);
udelay(10);
}
-static void ds2404_write_byte(struct device *dev, u8 byte)
+static void ds2404_write_byte(struct ds2404 *chip, u8 byte)
{
int i;
- gpio_direction_output(ds2404_gpio[DS2404_DQ].gpio, 1);
+ gpiod_direction_output(chip->dq_gpiod, 1);
for (i = 0; i < 8; i++) {
- gpio_set_value(ds2404_gpio[DS2404_DQ].gpio, byte & (1 << i));
+ gpiod_set_value(chip->dq_gpiod, byte & (1 << i));
udelay(10);
- gpio_set_value(ds2404_gpio[DS2404_CLK].gpio, 1);
+ gpiod_set_value(chip->clk_gpiod, 1);
udelay(10);
- gpio_set_value(ds2404_gpio[DS2404_CLK].gpio, 0);
+ gpiod_set_value(chip->clk_gpiod, 0);
udelay(10);
}
}
-static u8 ds2404_read_byte(struct device *dev)
+static u8 ds2404_read_byte(struct ds2404 *chip)
{
int i;
u8 ret = 0;
- gpio_direction_input(ds2404_gpio[DS2404_DQ].gpio);
+ gpiod_direction_input(chip->dq_gpiod);
for (i = 0; i < 8; i++) {
- gpio_set_value(ds2404_gpio[DS2404_CLK].gpio, 0);
+ gpiod_set_value(chip->clk_gpiod, 0);
udelay(10);
- if (gpio_get_value(ds2404_gpio[DS2404_DQ].gpio))
+ if (gpiod_get_value(chip->dq_gpiod))
ret |= 1 << i;
- gpio_set_value(ds2404_gpio[DS2404_CLK].gpio, 1);
+ gpiod_set_value(chip->clk_gpiod, 1);
udelay(10);
}
return ret;
}
-static void ds2404_read_memory(struct device *dev, u16 offset,
+static void ds2404_read_memory(struct ds2404 *chip, u16 offset,
int length, u8 *out)
{
- ds2404_reset(dev);
- ds2404_write_byte(dev, DS2404_READ_MEMORY_CMD);
- ds2404_write_byte(dev, offset & 0xff);
- ds2404_write_byte(dev, (offset >> 8) & 0xff);
+ ds2404_reset(chip);
+ ds2404_write_byte(chip, DS2404_READ_MEMORY_CMD);
+ ds2404_write_byte(chip, offset & 0xff);
+ ds2404_write_byte(chip, (offset >> 8) & 0xff);
while (length--)
- *out++ = ds2404_read_byte(dev);
+ *out++ = ds2404_read_byte(chip);
}
-static void ds2404_write_memory(struct device *dev, u16 offset,
+static void ds2404_write_memory(struct ds2404 *chip, u16 offset,
int length, u8 *out)
{
int i;
u8 ta01, ta02, es;
- ds2404_reset(dev);
- ds2404_write_byte(dev, DS2404_WRITE_SCRATCHPAD_CMD);
- ds2404_write_byte(dev, offset & 0xff);
- ds2404_write_byte(dev, (offset >> 8) & 0xff);
+ ds2404_reset(chip);
+ ds2404_write_byte(chip, DS2404_WRITE_SCRATCHPAD_CMD);
+ ds2404_write_byte(chip, offset & 0xff);
+ ds2404_write_byte(chip, (offset >> 8) & 0xff);
for (i = 0; i < length; i++)
- ds2404_write_byte(dev, out[i]);
+ ds2404_write_byte(chip, out[i]);
- ds2404_reset(dev);
- ds2404_write_byte(dev, DS2404_READ_SCRATCHPAD_CMD);
+ ds2404_reset(chip);
+ ds2404_write_byte(chip, DS2404_READ_SCRATCHPAD_CMD);
- ta01 = ds2404_read_byte(dev);
- ta02 = ds2404_read_byte(dev);
- es = ds2404_read_byte(dev);
+ ta01 = ds2404_read_byte(chip);
+ ta02 = ds2404_read_byte(chip);
+ es = ds2404_read_byte(chip);
for (i = 0; i < length; i++) {
- if (out[i] != ds2404_read_byte(dev)) {
- dev_err(dev, "read invalid data\n");
+ if (out[i] != ds2404_read_byte(chip)) {
+ dev_err(chip->dev, "read invalid data\n");
return;
}
}
- ds2404_reset(dev);
- ds2404_write_byte(dev, DS2404_COPY_SCRATCHPAD_CMD);
- ds2404_write_byte(dev, ta01);
- ds2404_write_byte(dev, ta02);
- ds2404_write_byte(dev, es);
+ ds2404_reset(chip);
+ ds2404_write_byte(chip, DS2404_COPY_SCRATCHPAD_CMD);
+ ds2404_write_byte(chip, ta01);
+ ds2404_write_byte(chip, ta02);
+ ds2404_write_byte(chip, es);
- gpio_direction_input(ds2404_gpio[DS2404_DQ].gpio);
- while (gpio_get_value(ds2404_gpio[DS2404_DQ].gpio))
+ while (gpiod_get_value(chip->dq_gpiod))
;
}
-static void ds2404_enable_osc(struct device *dev)
+static void ds2404_enable_osc(struct ds2404 *chip)
{
u8 in[1] = { 0x10 }; /* enable oscillator */
- ds2404_write_memory(dev, 0x201, 1, in);
+
+ ds2404_write_memory(chip, 0x201, 1, in);
}
static int ds2404_read_time(struct device *dev, struct rtc_time *dt)
{
+ struct ds2404 *chip = dev_get_drvdata(dev);
unsigned long time = 0;
__le32 hw_time = 0;
- ds2404_read_memory(dev, 0x203, 4, (u8 *)&hw_time);
+ ds2404_read_memory(chip, 0x203, 4, (u8 *)&hw_time);
time = le32_to_cpu(hw_time);
rtc_time64_to_tm(time, dt);
@@ -193,8 +168,9 @@ static int ds2404_read_time(struct device *dev, struct rtc_time *dt)
static int ds2404_set_time(struct device *dev, struct rtc_time *dt)
{
+ struct ds2404 *chip = dev_get_drvdata(dev);
u32 time = cpu_to_le32(rtc_tm_to_time64(dt));
- ds2404_write_memory(dev, 0x203, 4, (u8 *)&time);
+ ds2404_write_memory(chip, 0x203, 4, (u8 *)&time);
return 0;
}
@@ -205,7 +181,6 @@ static const struct rtc_class_ops ds2404_rtc_ops = {
static int rtc_probe(struct platform_device *pdev)
{
- struct ds2404_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct ds2404 *chip;
int retval = -EBUSY;
@@ -213,22 +188,16 @@ static int rtc_probe(struct platform_device *pdev)
if (!chip)
return -ENOMEM;
+ chip->dev = &pdev->dev;
+
chip->rtc = devm_rtc_allocate_device(&pdev->dev);
if (IS_ERR(chip->rtc))
return PTR_ERR(chip->rtc);
- retval = ds2404_gpio_map(chip, pdev, pdata);
+ retval = ds2404_gpio_map(chip, pdev);
if (retval)
return retval;
- retval = devm_add_action_or_reset(&pdev->dev, ds2404_gpio_unmap, chip);
- if (retval)
- return retval;
-
- dev_info(&pdev->dev, "using GPIOs RST:%d, CLK:%d, DQ:%d\n",
- chip->gpio[DS2404_RST].gpio, chip->gpio[DS2404_CLK].gpio,
- chip->gpio[DS2404_DQ].gpio);
-
platform_set_drvdata(pdev, chip);
chip->rtc->ops = &ds2404_rtc_ops;
@@ -238,7 +207,7 @@ static int rtc_probe(struct platform_device *pdev)
if (retval)
return retval;
- ds2404_enable_osc(&pdev->dev);
+ ds2404_enable_osc(chip);
return 0;
}
diff --git a/drivers/rtc/rtc-fsl-ftm-alarm.c b/drivers/rtc/rtc-fsl-ftm-alarm.c
index 3d7c4077fe1c..a72c4ad0cec6 100644
--- a/drivers/rtc/rtc-fsl-ftm-alarm.c
+++ b/drivers/rtc/rtc-fsl-ftm-alarm.c
@@ -11,11 +11,8 @@
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/fsl/ftm.h>
#include <linux/rtc.h>
diff --git a/drivers/rtc/rtc-isl12022.c b/drivers/rtc/rtc-isl12022.c
index a613257d1574..4eef7afcc8bc 100644
--- a/drivers/rtc/rtc-isl12022.c
+++ b/drivers/rtc/rtc-isl12022.c
@@ -9,6 +9,8 @@
*/
#include <linux/bcd.h>
+#include <linux/bitfield.h>
+#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/hwmon.h>
#include <linux/i2c.h>
@@ -31,6 +33,8 @@
#define ISL12022_REG_SR 0x07
#define ISL12022_REG_INT 0x08
+#define ISL12022_REG_PWR_VBAT 0x0a
+
#define ISL12022_REG_BETA 0x0d
#define ISL12022_REG_TEMP_L 0x28
@@ -41,6 +45,12 @@
#define ISL12022_SR_LBAT75 (1 << 1)
#define ISL12022_INT_WRTC (1 << 6)
+#define ISL12022_INT_FO_MASK GENMASK(3, 0)
+#define ISL12022_INT_FO_OFF 0x0
+#define ISL12022_INT_FO_32K 0x1
+
+#define ISL12022_REG_VB85_MASK GENMASK(5, 3)
+#define ISL12022_REG_VB75_MASK GENMASK(2, 0)
#define ISL12022_BETA_TSE (1 << 7)
@@ -141,12 +151,6 @@ static int isl12022_rtc_read_time(struct device *dev, struct rtc_time *tm)
if (ret)
return ret;
- if (buf[ISL12022_REG_SR] & (ISL12022_SR_LBAT85 | ISL12022_SR_LBAT75)) {
- dev_warn(dev,
- "voltage dropped below %u%%, date and time is not reliable.\n",
- buf[ISL12022_REG_SR] & ISL12022_SR_LBAT85 ? 85 : 75);
- }
-
dev_dbg(dev,
"raw data is sec=%02x, min=%02x, hr=%02x, mday=%02x, mon=%02x, year=%02x, wday=%02x, sr=%02x, int=%02x",
buf[ISL12022_REG_SC],
@@ -204,7 +208,34 @@ static int isl12022_rtc_set_time(struct device *dev, struct rtc_time *tm)
return regmap_bulk_write(regmap, ISL12022_REG_SC, buf, sizeof(buf));
}
+static int isl12022_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+{
+ struct regmap *regmap = dev_get_drvdata(dev);
+ u32 user, val;
+ int ret;
+
+ switch (cmd) {
+ case RTC_VL_READ:
+ ret = regmap_read(regmap, ISL12022_REG_SR, &val);
+ if (ret)
+ return ret;
+
+ user = 0;
+ if (val & ISL12022_SR_LBAT85)
+ user |= RTC_VL_BACKUP_LOW;
+
+ if (val & ISL12022_SR_LBAT75)
+ user |= RTC_VL_BACKUP_EMPTY;
+
+ return put_user(user, (u32 __user *)arg);
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
static const struct rtc_class_ops isl12022_rtc_ops = {
+ .ioctl = isl12022_rtc_ioctl,
.read_time = isl12022_rtc_read_time,
.set_time = isl12022_rtc_set_time,
};
@@ -215,10 +246,88 @@ static const struct regmap_config regmap_config = {
.use_single_write = true,
};
+static int isl12022_register_clock(struct device *dev)
+{
+ struct regmap *regmap = dev_get_drvdata(dev);
+ struct clk_hw *hw;
+ int ret;
+
+ if (!device_property_present(dev, "#clock-cells")) {
+ /*
+ * Disabling the F_OUT pin reduces the power
+ * consumption in battery mode by ~25%.
+ */
+ regmap_update_bits(regmap, ISL12022_REG_INT, ISL12022_INT_FO_MASK,
+ ISL12022_INT_FO_OFF);
+
+ return 0;
+ }
+
+ if (!IS_ENABLED(CONFIG_COMMON_CLK))
+ return 0;
+
+ /*
+ * For now, only support a fixed clock of 32768Hz (the reset default).
+ */
+ ret = regmap_update_bits(regmap, ISL12022_REG_INT,
+ ISL12022_INT_FO_MASK, ISL12022_INT_FO_32K);
+ if (ret)
+ return ret;
+
+ hw = devm_clk_hw_register_fixed_rate(dev, "isl12022", NULL, 0, 32768);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, hw);
+}
+
+static const u32 trip_levels[2][7] = {
+ { 2125000, 2295000, 2550000, 2805000, 3060000, 4250000, 4675000 },
+ { 1875000, 2025000, 2250000, 2475000, 2700000, 3750000, 4125000 },
+};
+
+static void isl12022_set_trip_levels(struct device *dev)
+{
+ struct regmap *regmap = dev_get_drvdata(dev);
+ u32 levels[2] = {0, 0};
+ int ret, i, j, x[2];
+ u8 val, mask;
+
+ device_property_read_u32_array(dev, "isil,battery-trip-levels-microvolt",
+ levels, 2);
+
+ for (i = 0; i < 2; i++) {
+ for (j = 0; j < ARRAY_SIZE(trip_levels[i]) - 1; j++) {
+ if (levels[i] <= trip_levels[i][j])
+ break;
+ }
+ x[i] = j;
+ }
+
+ val = FIELD_PREP(ISL12022_REG_VB85_MASK, x[0]) |
+ FIELD_PREP(ISL12022_REG_VB75_MASK, x[1]);
+ mask = ISL12022_REG_VB85_MASK | ISL12022_REG_VB75_MASK;
+
+ ret = regmap_update_bits(regmap, ISL12022_REG_PWR_VBAT, mask, val);
+ if (ret)
+ dev_warn(dev, "unable to set battery alarm levels: %d\n", ret);
+
+ /*
+ * Force a write of the TSE bit in the BETA register, in order
+ * to trigger an update of the LBAT75 and LBAT85 bits in the
+ * status register. In battery backup mode, those bits have
+ * another meaning, so without this, they may contain stale
+ * values for up to a minute after power-on.
+ */
+ regmap_write_bits(regmap, ISL12022_REG_BETA,
+ ISL12022_BETA_TSE, ISL12022_BETA_TSE);
+}
+
static int isl12022_probe(struct i2c_client *client)
{
struct rtc_device *rtc;
struct regmap *regmap;
+ int ret;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -ENODEV;
@@ -231,6 +340,11 @@ static int isl12022_probe(struct i2c_client *client)
dev_set_drvdata(&client->dev, regmap);
+ ret = isl12022_register_clock(&client->dev);
+ if (ret)
+ return ret;
+
+ isl12022_set_trip_levels(&client->dev);
isl12022_hwmon_register(&client->dev);
rtc = devm_rtc_allocate_device(&client->dev);
diff --git a/drivers/rtc/rtc-isl12026.c b/drivers/rtc/rtc-isl12026.c
index 5abff5d348ac..2aabb9151d4c 100644
--- a/drivers/rtc/rtc-isl12026.c
+++ b/drivers/rtc/rtc-isl12026.c
@@ -11,7 +11,6 @@
#include <linux/mutex.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/rtc.h>
#include <linux/slab.h>
@@ -429,7 +428,7 @@ static void isl12026_force_power_modes(struct i2c_client *client)
}
}
-static int isl12026_probe_new(struct i2c_client *client)
+static int isl12026_probe(struct i2c_client *client)
{
struct isl12026 *priv;
int ret;
@@ -490,7 +489,7 @@ static struct i2c_driver isl12026_driver = {
.name = "rtc-isl12026",
.of_match_table = isl12026_dt_match,
},
- .probe = isl12026_probe_new,
+ .probe = isl12026_probe,
.remove = isl12026_remove,
};
diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c
index b0712b4e3648..e50c23ee1646 100644
--- a/drivers/rtc/rtc-isl1208.c
+++ b/drivers/rtc/rtc-isl1208.c
@@ -9,7 +9,7 @@
#include <linux/clk.h>
#include <linux/i2c.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/rtc.h>
@@ -188,7 +188,7 @@ isl1208_i2c_validate_client(struct i2c_client *client)
static int isl1208_set_xtoscb(struct i2c_client *client, int sr, int xtosb_val)
{
/* Do nothing if bit is already set to desired value */
- if ((sr & ISL1208_REG_SR_XTOSCB) == xtosb_val)
+ if (!!(sr & ISL1208_REG_SR_XTOSCB) == xtosb_val)
return 0;
if (xtosb_val)
@@ -862,17 +862,9 @@ isl1208_probe(struct i2c_client *client)
i2c_set_clientdata(client, isl1208);
/* Determine which chip we have */
- if (client->dev.of_node) {
- isl1208->config = of_device_get_match_data(&client->dev);
- if (!isl1208->config)
- return -ENODEV;
- } else {
- const struct i2c_device_id *id = i2c_match_id(isl1208_id, client);
-
- if (!id)
- return -ENODEV;
- isl1208->config = (struct isl1208_config *)id->driver_data;
- }
+ isl1208->config = i2c_get_match_data(client);
+ if (!isl1208->config)
+ return -ENODEV;
rc = isl1208_clk_present(client, "xin");
if (rc < 0)
@@ -952,7 +944,6 @@ isl1208_probe(struct i2c_client *client)
rc = isl1208_setup_irq(client, client->irq);
if (rc)
return rc;
-
} else {
clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, isl1208->rtc->features);
}
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c
index 36453b008139..bafa7d1b9b88 100644
--- a/drivers/rtc/rtc-jz4740.c
+++ b/drivers/rtc/rtc-jz4740.c
@@ -11,7 +11,7 @@
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_wakeirq.h>
#include <linux/property.h>
@@ -349,7 +349,7 @@ static int jz4740_rtc_probe(struct platform_device *pdev)
if (!rtc)
return -ENOMEM;
- rtc->type = (enum jz4740_rtc_type)device_get_match_data(dev);
+ rtc->type = (uintptr_t)device_get_match_data(dev);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
diff --git a/drivers/rtc/rtc-lpc24xx.c b/drivers/rtc/rtc-lpc24xx.c
index a4612e543f35..df17c48ff086 100644
--- a/drivers/rtc/rtc-lpc24xx.c
+++ b/drivers/rtc/rtc-lpc24xx.c
@@ -9,9 +9,8 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 3cc5151e0986..866489ad56d6 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -17,7 +17,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/rtc.h>
#include <linux/slab.h>
#include <linux/mutex.h>
diff --git a/drivers/rtc/rtc-m48t86.c b/drivers/rtc/rtc-m48t86.c
index 481c9525b1dd..dd4a62e2d39c 100644
--- a/drivers/rtc/rtc-m48t86.c
+++ b/drivers/rtc/rtc-m48t86.c
@@ -11,6 +11,7 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/rtc.h>
#include <linux/platform_device.h>
#include <linux/bcd.h>
@@ -269,9 +270,16 @@ static int m48t86_rtc_probe(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id m48t86_rtc_of_ids[] = {
+ { .compatible = "st,m48t86" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, m48t86_rtc_of_ids);
+
static struct platform_driver m48t86_rtc_platform_driver = {
.driver = {
.name = "rtc-m48t86",
+ .of_match_table = m48t86_rtc_of_ids,
},
.probe = m48t86_rtc_probe,
};
diff --git a/drivers/rtc/rtc-mpc5121.c b/drivers/rtc/rtc-mpc5121.c
index 07df43e4c4d0..28858fcaea8f 100644
--- a/drivers/rtc/rtc-mpc5121.c
+++ b/drivers/rtc/rtc-mpc5121.c
@@ -11,10 +11,8 @@
#include <linux/module.h>
#include <linux/rtc.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/slab.h>
diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c
index 1d297af80f87..1617063669cc 100644
--- a/drivers/rtc/rtc-mt6397.c
+++ b/drivers/rtc/rtc-mt6397.c
@@ -9,7 +9,7 @@
#include <linux/mfd/mt6397/core.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/rtc.h>
diff --git a/drivers/rtc/rtc-mt7622.c b/drivers/rtc/rtc-mt7622.c
index 81857a457c32..094c649fc137 100644
--- a/drivers/rtc/rtc-mt7622.c
+++ b/drivers/rtc/rtc-mt7622.c
@@ -7,9 +7,9 @@
#include <linux/clk.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
index 762cf03345f1..dbb935dbbd8a 100644
--- a/drivers/rtc/rtc-mxc.c
+++ b/drivers/rtc/rtc-mxc.c
@@ -11,7 +11,6 @@
#include <linux/pm_wakeirq.h>
#include <linux/clk.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#define RTC_INPUT_CLK_32768HZ (0x00 << 5)
#define RTC_INPUT_CLK_32000HZ (0x01 << 5)
diff --git a/drivers/rtc/rtc-nct3018y.c b/drivers/rtc/rtc-nct3018y.c
index a4e3f924837e..ed4e606be8e5 100644
--- a/drivers/rtc/rtc-nct3018y.c
+++ b/drivers/rtc/rtc-nct3018y.c
@@ -538,7 +538,7 @@ MODULE_DEVICE_TABLE(of, nct3018y_of_match);
static struct i2c_driver nct3018y_driver = {
.driver = {
.name = "rtc-nct3018y",
- .of_match_table = of_match_ptr(nct3018y_of_match),
+ .of_match_table = nct3018y_of_match,
},
.probe = nct3018y_probe,
.id_table = nct3018y_id,
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index 8ae4d7824ec9..5b10ab06cd2e 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -747,12 +747,12 @@ static int omap_rtc_probe(struct platform_device *pdev)
}
rtc->irq_timer = platform_get_irq(pdev, 0);
- if (rtc->irq_timer <= 0)
- return -ENOENT;
+ if (rtc->irq_timer < 0)
+ return rtc->irq_timer;
rtc->irq_alarm = platform_get_irq(pdev, 1);
- if (rtc->irq_alarm <= 0)
- return -ENOENT;
+ if (rtc->irq_alarm < 0)
+ return rtc->irq_alarm;
rtc->clk = devm_clk_get(&pdev->dev, "ext-clk");
if (!IS_ERR(rtc->clk))
diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
index ee03b04b74ba..9c04c4e1a49c 100644
--- a/drivers/rtc/rtc-pcf2127.c
+++ b/drivers/rtc/rtc-pcf2127.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * An I2C and SPI driver for the NXP PCF2127/29 RTC
+ * An I2C and SPI driver for the NXP PCF2127/29/31 RTC
* Copyright 2013 Til-Technologies
*
* Author: Renaud Cerrato <r.cerrato@til-technologies.fr>
@@ -8,9 +8,13 @@
* Watchdog and tamper functions
* Author: Bruno Thomsen <bruno.thomsen@gmail.com>
*
+ * PCF2131 support
+ * Author: Hugo Villeneuve <hvilleneuve@dimonoff.com>
+ *
* based on the other drivers in this same directory.
*
- * Datasheet: https://www.nxp.com/docs/en/data-sheet/PCF2127.pdf
+ * Datasheets: https://www.nxp.com/docs/en/data-sheet/PCF2127.pdf
+ * https://www.nxp.com/docs/en/data-sheet/PCF2131DS.pdf
*/
#include <linux/i2c.h>
@@ -21,6 +25,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_irq.h>
+#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/watchdog.h>
@@ -28,6 +33,7 @@
#define PCF2127_REG_CTRL1 0x00
#define PCF2127_BIT_CTRL1_POR_OVRD BIT(3)
#define PCF2127_BIT_CTRL1_TSF1 BIT(4)
+#define PCF2127_BIT_CTRL1_STOP BIT(5)
/* Control register 2 */
#define PCF2127_REG_CTRL2 0x01
#define PCF2127_BIT_CTRL2_AIE BIT(1)
@@ -43,20 +49,10 @@
#define PCF2127_BIT_CTRL3_BF BIT(3)
#define PCF2127_BIT_CTRL3_BTSE BIT(4)
/* Time and date registers */
-#define PCF2127_REG_SC 0x03
+#define PCF2127_REG_TIME_BASE 0x03
#define PCF2127_BIT_SC_OSF BIT(7)
-#define PCF2127_REG_MN 0x04
-#define PCF2127_REG_HR 0x05
-#define PCF2127_REG_DM 0x06
-#define PCF2127_REG_DW 0x07
-#define PCF2127_REG_MO 0x08
-#define PCF2127_REG_YR 0x09
/* Alarm registers */
-#define PCF2127_REG_ALARM_SC 0x0A
-#define PCF2127_REG_ALARM_MN 0x0B
-#define PCF2127_REG_ALARM_HR 0x0C
-#define PCF2127_REG_ALARM_DM 0x0D
-#define PCF2127_REG_ALARM_DW 0x0E
+#define PCF2127_REG_ALARM_BASE 0x0A
#define PCF2127_BIT_ALARM_AE BIT(7)
/* CLKOUT control register */
#define PCF2127_REG_CLKOUT 0x0f
@@ -68,21 +64,15 @@
#define PCF2127_BIT_WD_CTL_CD0 BIT(6)
#define PCF2127_BIT_WD_CTL_CD1 BIT(7)
#define PCF2127_REG_WD_VAL 0x11
-/* Tamper timestamp registers */
-#define PCF2127_REG_TS_CTRL 0x12
+/* Tamper timestamp1 registers */
+#define PCF2127_REG_TS1_BASE 0x12
#define PCF2127_BIT_TS_CTRL_TSOFF BIT(6)
#define PCF2127_BIT_TS_CTRL_TSM BIT(7)
-#define PCF2127_REG_TS_SC 0x13
-#define PCF2127_REG_TS_MN 0x14
-#define PCF2127_REG_TS_HR 0x15
-#define PCF2127_REG_TS_DM 0x16
-#define PCF2127_REG_TS_MO 0x17
-#define PCF2127_REG_TS_YR 0x18
/*
* RAM registers
* PCF2127 has 512 bytes general-purpose static RAM (SRAM) that is
* battery backed and can survive a power outage.
- * PCF2129 doesn't have this feature.
+ * PCF2129/31 doesn't have this feature.
*/
#define PCF2127_REG_RAM_ADDR_MSB 0x1A
#define PCF2127_REG_RAM_WRT_CMD 0x1C
@@ -90,9 +80,14 @@
/* Watchdog timer value constants */
#define PCF2127_WD_VAL_STOP 0
-#define PCF2127_WD_VAL_MIN 2
-#define PCF2127_WD_VAL_MAX 255
-#define PCF2127_WD_VAL_DEFAULT 60
+/* PCF2127/29 watchdog timer value constants */
+#define PCF2127_WD_CLOCK_HZ_X1000 1000 /* 1Hz */
+#define PCF2127_WD_MIN_HW_HEARTBEAT_MS 500
+/* PCF2131 watchdog timer value constants */
+#define PCF2131_WD_CLOCK_HZ_X1000 250 /* 1/4Hz */
+#define PCF2131_WD_MIN_HW_HEARTBEAT_MS 4000
+
+#define PCF2127_WD_DEFAULT_TIMEOUT_S 60
/* Mask for currently enabled interrupts */
#define PCF2127_CTRL1_IRQ_MASK (PCF2127_BIT_CTRL1_TSF1)
@@ -101,13 +96,117 @@
PCF2127_BIT_CTRL2_WDTF | \
PCF2127_BIT_CTRL2_TSF2)
+#define PCF2127_MAX_TS_SUPPORTED 4
+
+/* Control register 4 */
+#define PCF2131_REG_CTRL4 0x03
+#define PCF2131_BIT_CTRL4_TSF4 BIT(4)
+#define PCF2131_BIT_CTRL4_TSF3 BIT(5)
+#define PCF2131_BIT_CTRL4_TSF2 BIT(6)
+#define PCF2131_BIT_CTRL4_TSF1 BIT(7)
+/* Control register 5 */
+#define PCF2131_REG_CTRL5 0x04
+#define PCF2131_BIT_CTRL5_TSIE4 BIT(4)
+#define PCF2131_BIT_CTRL5_TSIE3 BIT(5)
+#define PCF2131_BIT_CTRL5_TSIE2 BIT(6)
+#define PCF2131_BIT_CTRL5_TSIE1 BIT(7)
+/* Software reset register */
+#define PCF2131_REG_SR_RESET 0x05
+#define PCF2131_SR_RESET_READ_PATTERN (BIT(2) | BIT(5))
+#define PCF2131_SR_RESET_CPR_CMD (PCF2131_SR_RESET_READ_PATTERN | BIT(7))
+/* Time and date registers */
+#define PCF2131_REG_TIME_BASE 0x07
+/* Alarm registers */
+#define PCF2131_REG_ALARM_BASE 0x0E
+/* CLKOUT control register */
+#define PCF2131_REG_CLKOUT 0x13
+/* Watchdog registers */
+#define PCF2131_REG_WD_CTL 0x35
+#define PCF2131_REG_WD_VAL 0x36
+/* Tamper timestamp1 registers */
+#define PCF2131_REG_TS1_BASE 0x14
+/* Tamper timestamp2 registers */
+#define PCF2131_REG_TS2_BASE 0x1B
+/* Tamper timestamp3 registers */
+#define PCF2131_REG_TS3_BASE 0x22
+/* Tamper timestamp4 registers */
+#define PCF2131_REG_TS4_BASE 0x29
+/* Interrupt mask registers */
+#define PCF2131_REG_INT_A_MASK1 0x31
+#define PCF2131_REG_INT_A_MASK2 0x32
+#define PCF2131_REG_INT_B_MASK1 0x33
+#define PCF2131_REG_INT_B_MASK2 0x34
+#define PCF2131_BIT_INT_BLIE BIT(0)
+#define PCF2131_BIT_INT_BIE BIT(1)
+#define PCF2131_BIT_INT_AIE BIT(2)
+#define PCF2131_BIT_INT_WD_CD BIT(3)
+#define PCF2131_BIT_INT_SI BIT(4)
+#define PCF2131_BIT_INT_MI BIT(5)
+#define PCF2131_CTRL2_IRQ_MASK ( \
+ PCF2127_BIT_CTRL2_AF | \
+ PCF2127_BIT_CTRL2_WDTF)
+#define PCF2131_CTRL4_IRQ_MASK ( \
+ PCF2131_BIT_CTRL4_TSF4 | \
+ PCF2131_BIT_CTRL4_TSF3 | \
+ PCF2131_BIT_CTRL4_TSF2 | \
+ PCF2131_BIT_CTRL4_TSF1)
+
+enum pcf21xx_type {
+ PCF2127,
+ PCF2129,
+ PCF2131,
+ PCF21XX_LAST_ID
+};
+
+struct pcf21xx_ts_config {
+ u8 reg_base; /* Base register to read timestamp values. */
+
+ /*
+ * If the TS input pin is driven to GND, an interrupt can be generated
+ * (supported by all variants).
+ */
+ u8 gnd_detect_reg; /* Interrupt control register address. */
+ u8 gnd_detect_bit; /* Interrupt bit. */
+
+ /*
+ * If the TS input pin is driven to an intermediate level between GND
+ * and supply, an interrupt can be generated (optional feature depending
+ * on variant).
+ */
+ u8 inter_detect_reg; /* Interrupt control register address. */
+ u8 inter_detect_bit; /* Interrupt bit. */
+
+ u8 ie_reg; /* Interrupt enable control register. */
+ u8 ie_bit; /* Interrupt enable bit. */
+};
+
+struct pcf21xx_config {
+ int type; /* IC variant */
+ int max_register;
+ unsigned int has_nvmem:1;
+ unsigned int has_bit_wd_ctl_cd0:1;
+ unsigned int wd_val_reg_readable:1; /* If watchdog value register can be read. */
+ unsigned int has_int_a_b:1; /* PCF2131 supports two interrupt outputs. */
+ u8 reg_time_base; /* Time/date base register. */
+ u8 regs_alarm_base; /* Alarm function base registers. */
+ u8 reg_wd_ctl; /* Watchdog control register. */
+ u8 reg_wd_val; /* Watchdog value register. */
+ u8 reg_clkout; /* Clkout register. */
+ int wdd_clock_hz_x1000; /* Watchdog clock in Hz multiplicated by 1000 */
+ int wdd_min_hw_heartbeat_ms;
+ unsigned int ts_count;
+ struct pcf21xx_ts_config ts[PCF2127_MAX_TS_SUPPORTED];
+ struct attribute_group attribute_group;
+};
+
struct pcf2127 {
struct rtc_device *rtc;
struct watchdog_device wdd;
struct regmap *regmap;
- time64_t ts;
- bool ts_valid;
+ const struct pcf21xx_config *cfg;
bool irq_enabled;
+ time64_t ts[PCF2127_MAX_TS_SUPPORTED]; /* Timestamp values. */
+ bool ts_valid[PCF2127_MAX_TS_SUPPORTED]; /* Timestamp valid indication. */
};
/*
@@ -117,27 +216,22 @@ struct pcf2127 {
static int pcf2127_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
- unsigned char buf[10];
+ unsigned char buf[7];
int ret;
/*
* Avoid reading CTRL2 register as it causes WD_VAL register
* value to reset to 0 which means watchdog is stopped.
*/
- ret = regmap_bulk_read(pcf2127->regmap, PCF2127_REG_CTRL3,
- (buf + PCF2127_REG_CTRL3),
- ARRAY_SIZE(buf) - PCF2127_REG_CTRL3);
+ ret = regmap_bulk_read(pcf2127->regmap, pcf2127->cfg->reg_time_base,
+ buf, sizeof(buf));
if (ret) {
dev_err(dev, "%s: read error\n", __func__);
return ret;
}
- if (buf[PCF2127_REG_CTRL3] & PCF2127_BIT_CTRL3_BLF)
- dev_info(dev,
- "low voltage detected, check/replace RTC battery.\n");
-
/* Clock integrity is not guaranteed when OSF flag is set. */
- if (buf[PCF2127_REG_SC] & PCF2127_BIT_SC_OSF) {
+ if (buf[0] & PCF2127_BIT_SC_OSF) {
/*
* no need clear the flag here,
* it will be cleared once the new date is saved
@@ -148,20 +242,17 @@ static int pcf2127_rtc_read_time(struct device *dev, struct rtc_time *tm)
}
dev_dbg(dev,
- "%s: raw data is cr3=%02x, sec=%02x, min=%02x, hr=%02x, "
+ "%s: raw data is sec=%02x, min=%02x, hr=%02x, "
"mday=%02x, wday=%02x, mon=%02x, year=%02x\n",
- __func__, buf[PCF2127_REG_CTRL3], buf[PCF2127_REG_SC],
- buf[PCF2127_REG_MN], buf[PCF2127_REG_HR],
- buf[PCF2127_REG_DM], buf[PCF2127_REG_DW],
- buf[PCF2127_REG_MO], buf[PCF2127_REG_YR]);
-
- tm->tm_sec = bcd2bin(buf[PCF2127_REG_SC] & 0x7F);
- tm->tm_min = bcd2bin(buf[PCF2127_REG_MN] & 0x7F);
- tm->tm_hour = bcd2bin(buf[PCF2127_REG_HR] & 0x3F); /* rtc hr 0-23 */
- tm->tm_mday = bcd2bin(buf[PCF2127_REG_DM] & 0x3F);
- tm->tm_wday = buf[PCF2127_REG_DW] & 0x07;
- tm->tm_mon = bcd2bin(buf[PCF2127_REG_MO] & 0x1F) - 1; /* rtc mn 1-12 */
- tm->tm_year = bcd2bin(buf[PCF2127_REG_YR]);
+ __func__, buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]);
+
+ tm->tm_sec = bcd2bin(buf[0] & 0x7F);
+ tm->tm_min = bcd2bin(buf[1] & 0x7F);
+ tm->tm_hour = bcd2bin(buf[2] & 0x3F);
+ tm->tm_mday = bcd2bin(buf[3] & 0x3F);
+ tm->tm_wday = buf[4] & 0x07;
+ tm->tm_mon = bcd2bin(buf[5] & 0x1F) - 1;
+ tm->tm_year = bcd2bin(buf[6]);
tm->tm_year += 100;
dev_dbg(dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
@@ -198,14 +289,45 @@ static int pcf2127_rtc_set_time(struct device *dev, struct rtc_time *tm)
/* year */
buf[i++] = bin2bcd(tm->tm_year - 100);
- /* write register's data */
- err = regmap_bulk_write(pcf2127->regmap, PCF2127_REG_SC, buf, i);
+ /* Write access to time registers:
+ * PCF2127/29: no special action required.
+ * PCF2131: requires setting the STOP and CPR bits. STOP bit needs to
+ * be cleared after time registers are updated.
+ */
+ if (pcf2127->cfg->type == PCF2131) {
+ err = regmap_update_bits(pcf2127->regmap, PCF2127_REG_CTRL1,
+ PCF2127_BIT_CTRL1_STOP,
+ PCF2127_BIT_CTRL1_STOP);
+ if (err) {
+ dev_dbg(dev, "setting STOP bit failed\n");
+ return err;
+ }
+
+ err = regmap_write(pcf2127->regmap, PCF2131_REG_SR_RESET,
+ PCF2131_SR_RESET_CPR_CMD);
+ if (err) {
+ dev_dbg(dev, "sending CPR cmd failed\n");
+ return err;
+ }
+ }
+
+ /* write time register's data */
+ err = regmap_bulk_write(pcf2127->regmap, pcf2127->cfg->reg_time_base, buf, i);
if (err) {
- dev_err(dev,
- "%s: err=%d", __func__, err);
+ dev_dbg(dev, "%s: err=%d", __func__, err);
return err;
}
+ if (pcf2127->cfg->type == PCF2131) {
+ /* Clear STOP bit (PCF2131 only) after write is completed. */
+ err = regmap_update_bits(pcf2127->regmap, PCF2127_REG_CTRL1,
+ PCF2127_BIT_CTRL1_STOP, 0);
+ if (err) {
+ dev_dbg(dev, "clearing STOP bit failed\n");
+ return err;
+ }
+ }
+
return 0;
}
@@ -275,9 +397,16 @@ static int pcf2127_nvmem_write(void *priv, unsigned int offset,
static int pcf2127_wdt_ping(struct watchdog_device *wdd)
{
+ int wd_val;
struct pcf2127 *pcf2127 = watchdog_get_drvdata(wdd);
- return regmap_write(pcf2127->regmap, PCF2127_REG_WD_VAL, wdd->timeout);
+ /*
+ * Compute counter value of WATCHDG_TIM_VAL to obtain desired period
+ * in seconds, depending on the source clock frequency.
+ */
+ wd_val = ((wdd->timeout * pcf2127->cfg->wdd_clock_hz_x1000) / 1000) + 1;
+
+ return regmap_write(pcf2127->regmap, pcf2127->cfg->reg_wd_val, wd_val);
}
/*
@@ -311,7 +440,7 @@ static int pcf2127_wdt_stop(struct watchdog_device *wdd)
{
struct pcf2127 *pcf2127 = watchdog_get_drvdata(wdd);
- return regmap_write(pcf2127->regmap, PCF2127_REG_WD_VAL,
+ return regmap_write(pcf2127->regmap, pcf2127->cfg->reg_wd_val,
PCF2127_WD_VAL_STOP);
}
@@ -339,9 +468,25 @@ static const struct watchdog_ops pcf2127_watchdog_ops = {
.set_timeout = pcf2127_wdt_set_timeout,
};
+/*
+ * Compute watchdog period, t, in seconds, from the WATCHDG_TIM_VAL register
+ * value, n, and the clock frequency, f1000, in Hz x 1000.
+ *
+ * The PCF2127/29 datasheet gives t as:
+ * t = n / f
+ * The PCF2131 datasheet gives t as:
+ * t = (n - 1) / f
+ * For both variants, the watchdog is triggered when the WATCHDG_TIM_VAL reaches
+ * the value 1, and not zero. Consequently, the equation from the PCF2131
+ * datasheet seems to be the correct one for both variants.
+ */
+static int pcf2127_watchdog_get_period(int n, int f1000)
+{
+ return (1000 * (n - 1)) / f1000;
+}
+
static int pcf2127_watchdog_init(struct device *dev, struct pcf2127 *pcf2127)
{
- u32 wdd_timeout;
int ret;
if (!IS_ENABLED(CONFIG_WATCHDOG) ||
@@ -351,21 +496,35 @@ static int pcf2127_watchdog_init(struct device *dev, struct pcf2127 *pcf2127)
pcf2127->wdd.parent = dev;
pcf2127->wdd.info = &pcf2127_wdt_info;
pcf2127->wdd.ops = &pcf2127_watchdog_ops;
- pcf2127->wdd.min_timeout = PCF2127_WD_VAL_MIN;
- pcf2127->wdd.max_timeout = PCF2127_WD_VAL_MAX;
- pcf2127->wdd.timeout = PCF2127_WD_VAL_DEFAULT;
- pcf2127->wdd.min_hw_heartbeat_ms = 500;
+
+ pcf2127->wdd.min_timeout =
+ pcf2127_watchdog_get_period(
+ 2, pcf2127->cfg->wdd_clock_hz_x1000);
+ pcf2127->wdd.max_timeout =
+ pcf2127_watchdog_get_period(
+ 255, pcf2127->cfg->wdd_clock_hz_x1000);
+ pcf2127->wdd.timeout = PCF2127_WD_DEFAULT_TIMEOUT_S;
+
+ dev_dbg(dev, "%s clock = %d Hz / 1000\n", __func__,
+ pcf2127->cfg->wdd_clock_hz_x1000);
+
+ pcf2127->wdd.min_hw_heartbeat_ms = pcf2127->cfg->wdd_min_hw_heartbeat_ms;
pcf2127->wdd.status = WATCHDOG_NOWAYOUT_INIT_STATUS;
watchdog_set_drvdata(&pcf2127->wdd, pcf2127);
/* Test if watchdog timer is started by bootloader */
- ret = regmap_read(pcf2127->regmap, PCF2127_REG_WD_VAL, &wdd_timeout);
- if (ret)
- return ret;
+ if (pcf2127->cfg->wd_val_reg_readable) {
+ u32 wdd_timeout;
- if (wdd_timeout)
- set_bit(WDOG_HW_RUNNING, &pcf2127->wdd.status);
+ ret = regmap_read(pcf2127->regmap, pcf2127->cfg->reg_wd_val,
+ &wdd_timeout);
+ if (ret)
+ return ret;
+
+ if (wdd_timeout)
+ set_bit(WDOG_HW_RUNNING, &pcf2127->wdd.status);
+ }
return devm_watchdog_register_device(dev, &pcf2127->wdd);
}
@@ -386,8 +545,8 @@ static int pcf2127_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
if (ret)
return ret;
- ret = regmap_bulk_read(pcf2127->regmap, PCF2127_REG_ALARM_SC, buf,
- sizeof(buf));
+ ret = regmap_bulk_read(pcf2127->regmap, pcf2127->cfg->regs_alarm_base,
+ buf, sizeof(buf));
if (ret)
return ret;
@@ -437,8 +596,8 @@ static int pcf2127_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
buf[3] = bin2bcd(alrm->time.tm_mday);
buf[4] = PCF2127_BIT_ALARM_AE; /* Do not match on week day */
- ret = regmap_bulk_write(pcf2127->regmap, PCF2127_REG_ALARM_SC, buf,
- sizeof(buf));
+ ret = regmap_bulk_write(pcf2127->regmap, pcf2127->cfg->regs_alarm_base,
+ buf, sizeof(buf));
if (ret)
return ret;
@@ -446,38 +605,35 @@ static int pcf2127_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
}
/*
- * This function reads ctrl2 register, caller is responsible for calling
- * pcf2127_wdt_active_ping()
+ * This function reads one timestamp function data, caller is responsible for
+ * calling pcf2127_wdt_active_ping()
*/
-static int pcf2127_rtc_ts_read(struct device *dev, time64_t *ts)
+static int pcf2127_rtc_ts_read(struct device *dev, time64_t *ts,
+ int ts_id)
{
struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
struct rtc_time tm;
int ret;
- unsigned char data[25];
+ unsigned char data[7];
- ret = regmap_bulk_read(pcf2127->regmap, PCF2127_REG_CTRL1, data,
- sizeof(data));
+ ret = regmap_bulk_read(pcf2127->regmap, pcf2127->cfg->ts[ts_id].reg_base,
+ data, sizeof(data));
if (ret) {
dev_err(dev, "%s: read error ret=%d\n", __func__, ret);
return ret;
}
dev_dbg(dev,
- "%s: raw data is cr1=%02x, cr2=%02x, cr3=%02x, ts_sc=%02x, ts_mn=%02x, ts_hr=%02x, ts_dm=%02x, ts_mo=%02x, ts_yr=%02x\n",
- __func__, data[PCF2127_REG_CTRL1], data[PCF2127_REG_CTRL2],
- data[PCF2127_REG_CTRL3], data[PCF2127_REG_TS_SC],
- data[PCF2127_REG_TS_MN], data[PCF2127_REG_TS_HR],
- data[PCF2127_REG_TS_DM], data[PCF2127_REG_TS_MO],
- data[PCF2127_REG_TS_YR]);
-
- tm.tm_sec = bcd2bin(data[PCF2127_REG_TS_SC] & 0x7F);
- tm.tm_min = bcd2bin(data[PCF2127_REG_TS_MN] & 0x7F);
- tm.tm_hour = bcd2bin(data[PCF2127_REG_TS_HR] & 0x3F);
- tm.tm_mday = bcd2bin(data[PCF2127_REG_TS_DM] & 0x3F);
+ "%s: raw data is ts_sc=%02x, ts_mn=%02x, ts_hr=%02x, ts_dm=%02x, ts_mo=%02x, ts_yr=%02x\n",
+ __func__, data[1], data[2], data[3], data[4], data[5], data[6]);
+
+ tm.tm_sec = bcd2bin(data[1] & 0x7F);
+ tm.tm_min = bcd2bin(data[2] & 0x7F);
+ tm.tm_hour = bcd2bin(data[3] & 0x3F);
+ tm.tm_mday = bcd2bin(data[4] & 0x3F);
/* TS_MO register (month) value range: 1-12 */
- tm.tm_mon = bcd2bin(data[PCF2127_REG_TS_MO] & 0x1F) - 1;
- tm.tm_year = bcd2bin(data[PCF2127_REG_TS_YR]);
+ tm.tm_mon = bcd2bin(data[5] & 0x1F) - 1;
+ tm.tm_year = bcd2bin(data[6]);
if (tm.tm_year < 70)
tm.tm_year += 100; /* assume we are in 1970...2069 */
@@ -491,47 +647,84 @@ static int pcf2127_rtc_ts_read(struct device *dev, time64_t *ts)
return 0;
};
-static void pcf2127_rtc_ts_snapshot(struct device *dev)
+static void pcf2127_rtc_ts_snapshot(struct device *dev, int ts_id)
{
struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
int ret;
+ if (ts_id >= pcf2127->cfg->ts_count)
+ return;
+
/* Let userspace read the first timestamp */
- if (pcf2127->ts_valid)
+ if (pcf2127->ts_valid[ts_id])
return;
- ret = pcf2127_rtc_ts_read(dev, &pcf2127->ts);
+ ret = pcf2127_rtc_ts_read(dev, &pcf2127->ts[ts_id], ts_id);
if (!ret)
- pcf2127->ts_valid = true;
+ pcf2127->ts_valid[ts_id] = true;
}
static irqreturn_t pcf2127_rtc_irq(int irq, void *dev)
{
struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
- unsigned int ctrl1, ctrl2;
+ unsigned int ctrl2;
int ret = 0;
- ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL1, &ctrl1);
- if (ret)
- return IRQ_NONE;
-
ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL2, &ctrl2);
if (ret)
return IRQ_NONE;
- if (!(ctrl1 & PCF2127_CTRL1_IRQ_MASK || ctrl2 & PCF2127_CTRL2_IRQ_MASK))
- return IRQ_NONE;
+ if (pcf2127->cfg->ts_count == 1) {
+ /* PCF2127/29 */
+ unsigned int ctrl1;
+
+ ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL1, &ctrl1);
+ if (ret)
+ return IRQ_NONE;
+
+ if (!(ctrl1 & PCF2127_CTRL1_IRQ_MASK || ctrl2 & PCF2127_CTRL2_IRQ_MASK))
+ return IRQ_NONE;
+
+ if (ctrl1 & PCF2127_BIT_CTRL1_TSF1 || ctrl2 & PCF2127_BIT_CTRL2_TSF2)
+ pcf2127_rtc_ts_snapshot(dev, 0);
+
+ if (ctrl1 & PCF2127_CTRL1_IRQ_MASK)
+ regmap_write(pcf2127->regmap, PCF2127_REG_CTRL1,
+ ctrl1 & ~PCF2127_CTRL1_IRQ_MASK);
+
+ if (ctrl2 & PCF2127_CTRL2_IRQ_MASK)
+ regmap_write(pcf2127->regmap, PCF2127_REG_CTRL2,
+ ctrl2 & ~PCF2127_CTRL2_IRQ_MASK);
+ } else {
+ /* PCF2131. */
+ unsigned int ctrl4;
+
+ ret = regmap_read(pcf2127->regmap, PCF2131_REG_CTRL4, &ctrl4);
+ if (ret)
+ return IRQ_NONE;
+
+ if (!(ctrl4 & PCF2131_CTRL4_IRQ_MASK || ctrl2 & PCF2131_CTRL2_IRQ_MASK))
+ return IRQ_NONE;
- if (ctrl1 & PCF2127_BIT_CTRL1_TSF1 || ctrl2 & PCF2127_BIT_CTRL2_TSF2)
- pcf2127_rtc_ts_snapshot(dev);
+ if (ctrl4 & PCF2131_CTRL4_IRQ_MASK) {
+ int i;
+ int tsf_bit = PCF2131_BIT_CTRL4_TSF1; /* Start at bit 7. */
- if (ctrl1 & PCF2127_CTRL1_IRQ_MASK)
- regmap_write(pcf2127->regmap, PCF2127_REG_CTRL1,
- ctrl1 & ~PCF2127_CTRL1_IRQ_MASK);
+ for (i = 0; i < pcf2127->cfg->ts_count; i++) {
+ if (ctrl4 & tsf_bit)
+ pcf2127_rtc_ts_snapshot(dev, i);
- if (ctrl2 & PCF2127_CTRL2_IRQ_MASK)
- regmap_write(pcf2127->regmap, PCF2127_REG_CTRL2,
- ctrl2 & ~PCF2127_CTRL2_IRQ_MASK);
+ tsf_bit = tsf_bit >> 1;
+ }
+
+ regmap_write(pcf2127->regmap, PCF2131_REG_CTRL4,
+ ctrl4 & ~PCF2131_CTRL4_IRQ_MASK);
+ }
+
+ if (ctrl2 & PCF2131_CTRL2_IRQ_MASK)
+ regmap_write(pcf2127->regmap, PCF2127_REG_CTRL2,
+ ctrl2 & ~PCF2131_CTRL2_IRQ_MASK);
+ }
if (ctrl2 & PCF2127_BIT_CTRL2_AF)
rtc_update_irq(pcf2127->rtc, 1, RTC_IRQF | RTC_AF);
@@ -552,28 +745,41 @@ static const struct rtc_class_ops pcf2127_rtc_ops = {
/* sysfs interface */
-static ssize_t timestamp0_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t timestamp_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count, int ts_id)
{
struct pcf2127 *pcf2127 = dev_get_drvdata(dev->parent);
int ret;
+ if (ts_id >= pcf2127->cfg->ts_count)
+ return 0;
+
if (pcf2127->irq_enabled) {
- pcf2127->ts_valid = false;
+ pcf2127->ts_valid[ts_id] = false;
} else {
- ret = regmap_update_bits(pcf2127->regmap, PCF2127_REG_CTRL1,
- PCF2127_BIT_CTRL1_TSF1, 0);
+ /* Always clear GND interrupt bit. */
+ ret = regmap_update_bits(pcf2127->regmap,
+ pcf2127->cfg->ts[ts_id].gnd_detect_reg,
+ pcf2127->cfg->ts[ts_id].gnd_detect_bit,
+ 0);
+
if (ret) {
- dev_err(dev, "%s: update ctrl1 ret=%d\n", __func__, ret);
+ dev_err(dev, "%s: update TS gnd detect ret=%d\n", __func__, ret);
return ret;
}
- ret = regmap_update_bits(pcf2127->regmap, PCF2127_REG_CTRL2,
- PCF2127_BIT_CTRL2_TSF2, 0);
- if (ret) {
- dev_err(dev, "%s: update ctrl2 ret=%d\n", __func__, ret);
- return ret;
+ if (pcf2127->cfg->ts[ts_id].inter_detect_bit) {
+ /* Clear intermediate level interrupt bit if supported. */
+ ret = regmap_update_bits(pcf2127->regmap,
+ pcf2127->cfg->ts[ts_id].inter_detect_reg,
+ pcf2127->cfg->ts[ts_id].inter_detect_bit,
+ 0);
+ if (ret) {
+ dev_err(dev, "%s: update TS intermediate level detect ret=%d\n",
+ __func__, ret);
+ return ret;
+ }
}
ret = pcf2127_wdt_active_ping(&pcf2127->wdd);
@@ -582,34 +788,84 @@ static ssize_t timestamp0_store(struct device *dev,
}
return count;
+}
+
+static ssize_t timestamp0_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return timestamp_store(dev, attr, buf, count, 0);
};
-static ssize_t timestamp0_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t timestamp1_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return timestamp_store(dev, attr, buf, count, 1);
+};
+
+static ssize_t timestamp2_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return timestamp_store(dev, attr, buf, count, 2);
+};
+
+static ssize_t timestamp3_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ return timestamp_store(dev, attr, buf, count, 3);
+};
+
+static ssize_t timestamp_show(struct device *dev,
+ struct device_attribute *attr, char *buf,
+ int ts_id)
{
struct pcf2127 *pcf2127 = dev_get_drvdata(dev->parent);
- unsigned int ctrl1, ctrl2;
int ret;
time64_t ts;
+ if (ts_id >= pcf2127->cfg->ts_count)
+ return 0;
+
if (pcf2127->irq_enabled) {
- if (!pcf2127->ts_valid)
+ if (!pcf2127->ts_valid[ts_id])
return 0;
- ts = pcf2127->ts;
+ ts = pcf2127->ts[ts_id];
} else {
- ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL1, &ctrl1);
- if (ret)
- return 0;
+ u8 valid_low = 0;
+ u8 valid_inter = 0;
+ unsigned int ctrl;
- ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL2, &ctrl2);
+ /* Check if TS input pin is driven to GND, supported by all
+ * variants.
+ */
+ ret = regmap_read(pcf2127->regmap,
+ pcf2127->cfg->ts[ts_id].gnd_detect_reg,
+ &ctrl);
if (ret)
return 0;
- if (!(ctrl1 & PCF2127_BIT_CTRL1_TSF1) &&
- !(ctrl2 & PCF2127_BIT_CTRL2_TSF2))
+ valid_low = ctrl & pcf2127->cfg->ts[ts_id].gnd_detect_bit;
+
+ if (pcf2127->cfg->ts[ts_id].inter_detect_bit) {
+ /* Check if TS input pin is driven to intermediate level
+ * between GND and supply, if supported by variant.
+ */
+ ret = regmap_read(pcf2127->regmap,
+ pcf2127->cfg->ts[ts_id].inter_detect_reg,
+ &ctrl);
+ if (ret)
+ return 0;
+
+ valid_inter = ctrl & pcf2127->cfg->ts[ts_id].inter_detect_bit;
+ }
+
+ if (!valid_low && !valid_inter)
return 0;
- ret = pcf2127_rtc_ts_read(dev->parent, &ts);
+ ret = pcf2127_rtc_ts_read(dev->parent, &ts, ts_id);
if (ret)
return 0;
@@ -618,21 +874,227 @@ static ssize_t timestamp0_show(struct device *dev,
return ret;
}
return sprintf(buf, "%llu\n", (unsigned long long)ts);
+}
+
+static ssize_t timestamp0_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return timestamp_show(dev, attr, buf, 0);
+};
+
+static ssize_t timestamp1_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return timestamp_show(dev, attr, buf, 1);
+};
+
+static ssize_t timestamp2_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return timestamp_show(dev, attr, buf, 2);
+};
+
+static ssize_t timestamp3_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return timestamp_show(dev, attr, buf, 3);
};
static DEVICE_ATTR_RW(timestamp0);
+static DEVICE_ATTR_RW(timestamp1);
+static DEVICE_ATTR_RW(timestamp2);
+static DEVICE_ATTR_RW(timestamp3);
static struct attribute *pcf2127_attrs[] = {
&dev_attr_timestamp0.attr,
NULL
};
-static const struct attribute_group pcf2127_attr_group = {
- .attrs = pcf2127_attrs,
+static struct attribute *pcf2131_attrs[] = {
+ &dev_attr_timestamp0.attr,
+ &dev_attr_timestamp1.attr,
+ &dev_attr_timestamp2.attr,
+ &dev_attr_timestamp3.attr,
+ NULL
};
+static struct pcf21xx_config pcf21xx_cfg[] = {
+ [PCF2127] = {
+ .type = PCF2127,
+ .max_register = 0x1d,
+ .has_nvmem = 1,
+ .has_bit_wd_ctl_cd0 = 1,
+ .wd_val_reg_readable = 1,
+ .has_int_a_b = 0,
+ .reg_time_base = PCF2127_REG_TIME_BASE,
+ .regs_alarm_base = PCF2127_REG_ALARM_BASE,
+ .reg_wd_ctl = PCF2127_REG_WD_CTL,
+ .reg_wd_val = PCF2127_REG_WD_VAL,
+ .reg_clkout = PCF2127_REG_CLKOUT,
+ .wdd_clock_hz_x1000 = PCF2127_WD_CLOCK_HZ_X1000,
+ .wdd_min_hw_heartbeat_ms = PCF2127_WD_MIN_HW_HEARTBEAT_MS,
+ .ts_count = 1,
+ .ts[0] = {
+ .reg_base = PCF2127_REG_TS1_BASE,
+ .gnd_detect_reg = PCF2127_REG_CTRL1,
+ .gnd_detect_bit = PCF2127_BIT_CTRL1_TSF1,
+ .inter_detect_reg = PCF2127_REG_CTRL2,
+ .inter_detect_bit = PCF2127_BIT_CTRL2_TSF2,
+ .ie_reg = PCF2127_REG_CTRL2,
+ .ie_bit = PCF2127_BIT_CTRL2_TSIE,
+ },
+ .attribute_group = {
+ .attrs = pcf2127_attrs,
+ },
+ },
+ [PCF2129] = {
+ .type = PCF2129,
+ .max_register = 0x19,
+ .has_nvmem = 0,
+ .has_bit_wd_ctl_cd0 = 0,
+ .wd_val_reg_readable = 1,
+ .has_int_a_b = 0,
+ .reg_time_base = PCF2127_REG_TIME_BASE,
+ .regs_alarm_base = PCF2127_REG_ALARM_BASE,
+ .reg_wd_ctl = PCF2127_REG_WD_CTL,
+ .reg_wd_val = PCF2127_REG_WD_VAL,
+ .reg_clkout = PCF2127_REG_CLKOUT,
+ .wdd_clock_hz_x1000 = PCF2127_WD_CLOCK_HZ_X1000,
+ .wdd_min_hw_heartbeat_ms = PCF2127_WD_MIN_HW_HEARTBEAT_MS,
+ .ts_count = 1,
+ .ts[0] = {
+ .reg_base = PCF2127_REG_TS1_BASE,
+ .gnd_detect_reg = PCF2127_REG_CTRL1,
+ .gnd_detect_bit = PCF2127_BIT_CTRL1_TSF1,
+ .inter_detect_reg = PCF2127_REG_CTRL2,
+ .inter_detect_bit = PCF2127_BIT_CTRL2_TSF2,
+ .ie_reg = PCF2127_REG_CTRL2,
+ .ie_bit = PCF2127_BIT_CTRL2_TSIE,
+ },
+ .attribute_group = {
+ .attrs = pcf2127_attrs,
+ },
+ },
+ [PCF2131] = {
+ .type = PCF2131,
+ .max_register = 0x36,
+ .has_nvmem = 0,
+ .has_bit_wd_ctl_cd0 = 0,
+ .wd_val_reg_readable = 0,
+ .has_int_a_b = 1,
+ .reg_time_base = PCF2131_REG_TIME_BASE,
+ .regs_alarm_base = PCF2131_REG_ALARM_BASE,
+ .reg_wd_ctl = PCF2131_REG_WD_CTL,
+ .reg_wd_val = PCF2131_REG_WD_VAL,
+ .reg_clkout = PCF2131_REG_CLKOUT,
+ .wdd_clock_hz_x1000 = PCF2131_WD_CLOCK_HZ_X1000,
+ .wdd_min_hw_heartbeat_ms = PCF2131_WD_MIN_HW_HEARTBEAT_MS,
+ .ts_count = 4,
+ .ts[0] = {
+ .reg_base = PCF2131_REG_TS1_BASE,
+ .gnd_detect_reg = PCF2131_REG_CTRL4,
+ .gnd_detect_bit = PCF2131_BIT_CTRL4_TSF1,
+ .inter_detect_bit = 0,
+ .ie_reg = PCF2131_REG_CTRL5,
+ .ie_bit = PCF2131_BIT_CTRL5_TSIE1,
+ },
+ .ts[1] = {
+ .reg_base = PCF2131_REG_TS2_BASE,
+ .gnd_detect_reg = PCF2131_REG_CTRL4,
+ .gnd_detect_bit = PCF2131_BIT_CTRL4_TSF2,
+ .inter_detect_bit = 0,
+ .ie_reg = PCF2131_REG_CTRL5,
+ .ie_bit = PCF2131_BIT_CTRL5_TSIE2,
+ },
+ .ts[2] = {
+ .reg_base = PCF2131_REG_TS3_BASE,
+ .gnd_detect_reg = PCF2131_REG_CTRL4,
+ .gnd_detect_bit = PCF2131_BIT_CTRL4_TSF3,
+ .inter_detect_bit = 0,
+ .ie_reg = PCF2131_REG_CTRL5,
+ .ie_bit = PCF2131_BIT_CTRL5_TSIE3,
+ },
+ .ts[3] = {
+ .reg_base = PCF2131_REG_TS4_BASE,
+ .gnd_detect_reg = PCF2131_REG_CTRL4,
+ .gnd_detect_bit = PCF2131_BIT_CTRL4_TSF4,
+ .inter_detect_bit = 0,
+ .ie_reg = PCF2131_REG_CTRL5,
+ .ie_bit = PCF2131_BIT_CTRL5_TSIE4,
+ },
+ .attribute_group = {
+ .attrs = pcf2131_attrs,
+ },
+ },
+};
+
+/*
+ * Enable timestamp function and corresponding interrupt(s).
+ */
+static int pcf2127_enable_ts(struct device *dev, int ts_id)
+{
+ struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
+ int ret;
+
+ if (ts_id >= pcf2127->cfg->ts_count) {
+ dev_err(dev, "%s: invalid tamper detection ID (%d)\n",
+ __func__, ts_id);
+ return -EINVAL;
+ }
+
+ /* Enable timestamp function. */
+ ret = regmap_update_bits(pcf2127->regmap,
+ pcf2127->cfg->ts[ts_id].reg_base,
+ PCF2127_BIT_TS_CTRL_TSOFF |
+ PCF2127_BIT_TS_CTRL_TSM,
+ PCF2127_BIT_TS_CTRL_TSM);
+ if (ret) {
+ dev_err(dev, "%s: tamper detection config (ts%d_ctrl) failed\n",
+ __func__, ts_id);
+ return ret;
+ }
+
+ /*
+ * Enable interrupt generation when TSF timestamp flag is set.
+ * Interrupt signals are open-drain outputs and can be left floating if
+ * unused.
+ */
+ ret = regmap_update_bits(pcf2127->regmap, pcf2127->cfg->ts[ts_id].ie_reg,
+ pcf2127->cfg->ts[ts_id].ie_bit,
+ pcf2127->cfg->ts[ts_id].ie_bit);
+ if (ret) {
+ dev_err(dev, "%s: tamper detection TSIE%d config failed\n",
+ __func__, ts_id);
+ return ret;
+ }
+
+ return ret;
+}
+
+/* Route all interrupt sources to INT A pin. */
+static int pcf2127_configure_interrupt_pins(struct device *dev)
+{
+ struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
+ int ret;
+
+ /* Mask bits need to be cleared to enable corresponding
+ * interrupt source.
+ */
+ ret = regmap_write(pcf2127->regmap,
+ PCF2131_REG_INT_A_MASK1, 0);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(pcf2127->regmap,
+ PCF2131_REG_INT_A_MASK2, 0);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
static int pcf2127_probe(struct device *dev, struct regmap *regmap,
- int alarm_irq, const char *name, bool is_pcf2127)
+ int alarm_irq, const struct pcf21xx_config *config)
{
struct pcf2127 *pcf2127;
int ret = 0;
@@ -645,6 +1107,7 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
return -ENOMEM;
pcf2127->regmap = regmap;
+ pcf2127->cfg = config;
dev_set_drvdata(dev, pcf2127);
@@ -656,8 +1119,16 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
pcf2127->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
pcf2127->rtc->range_max = RTC_TIMESTAMP_END_2099;
pcf2127->rtc->set_start_time = true; /* Sets actual start to 1970 */
- set_bit(RTC_FEATURE_ALARM_RES_2S, pcf2127->rtc->features);
- clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, pcf2127->rtc->features);
+
+ /*
+ * PCF2127/29 do not work correctly when setting alarms at 1s intervals.
+ * PCF2131 is ok.
+ */
+ if (pcf2127->cfg->type == PCF2127 || pcf2127->cfg->type == PCF2129) {
+ set_bit(RTC_FEATURE_ALARM_RES_2S, pcf2127->rtc->features);
+ clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, pcf2127->rtc->features);
+ }
+
clear_bit(RTC_FEATURE_ALARM, pcf2127->rtc->features);
if (alarm_irq > 0) {
@@ -688,7 +1159,16 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
set_bit(RTC_FEATURE_ALARM, pcf2127->rtc->features);
}
- if (is_pcf2127) {
+ if (pcf2127->cfg->has_int_a_b) {
+ /* Configure int A/B pins, independently of alarm_irq. */
+ ret = pcf2127_configure_interrupt_pins(dev);
+ if (ret) {
+ dev_err(dev, "failed to configure interrupt pins\n");
+ return ret;
+ }
+ }
+
+ if (pcf2127->cfg->has_nvmem) {
struct nvmem_config nvmem_cfg = {
.priv = pcf2127,
.reg_read = pcf2127_nvmem_read,
@@ -703,15 +1183,17 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
* The "Power-On Reset Override" facility prevents the RTC to do a reset
* after power on. For normal operation the PORO must be disabled.
*/
- regmap_clear_bits(pcf2127->regmap, PCF2127_REG_CTRL1,
+ ret = regmap_clear_bits(pcf2127->regmap, PCF2127_REG_CTRL1,
PCF2127_BIT_CTRL1_POR_OVRD);
+ if (ret < 0)
+ return ret;
- ret = regmap_read(pcf2127->regmap, PCF2127_REG_CLKOUT, &val);
+ ret = regmap_read(pcf2127->regmap, pcf2127->cfg->reg_clkout, &val);
if (ret < 0)
return ret;
if (!(val & PCF2127_BIT_CLKOUT_OTPR)) {
- ret = regmap_set_bits(pcf2127->regmap, PCF2127_REG_CLKOUT,
+ ret = regmap_set_bits(pcf2127->regmap, pcf2127->cfg->reg_clkout,
PCF2127_BIT_CLKOUT_OTPR);
if (ret < 0)
return ret;
@@ -721,20 +1203,20 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
/*
* Watchdog timer enabled and reset pin /RST activated when timed out.
- * Select 1Hz clock source for watchdog timer.
+ * Select 1Hz clock source for watchdog timer (1/4Hz for PCF2131).
* Note: Countdown timer disabled and not available.
- * For pca2129, pcf2129, only bit[7] is for Symbol WD_CD
+ * For pca2129, pcf2129 and pcf2131, only bit[7] is for Symbol WD_CD
* of register watchdg_tim_ctl. The bit[6] is labeled
* as T. Bits labeled as T must always be written with
* logic 0.
*/
- ret = regmap_update_bits(pcf2127->regmap, PCF2127_REG_WD_CTL,
+ ret = regmap_update_bits(pcf2127->regmap, pcf2127->cfg->reg_wd_ctl,
PCF2127_BIT_WD_CTL_CD1 |
PCF2127_BIT_WD_CTL_CD0 |
PCF2127_BIT_WD_CTL_TF1 |
PCF2127_BIT_WD_CTL_TF0,
PCF2127_BIT_WD_CTL_CD1 |
- (is_pcf2127 ? PCF2127_BIT_WD_CTL_CD0 : 0) |
+ (pcf2127->cfg->has_bit_wd_ctl_cd0 ? PCF2127_BIT_WD_CTL_CD0 : 0) |
PCF2127_BIT_WD_CTL_TF1);
if (ret) {
dev_err(dev, "%s: watchdog config (wd_ctl) failed\n", __func__);
@@ -760,34 +1242,15 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
}
/*
- * Enable timestamp function and store timestamp of first trigger
- * event until TSF1 and TSF2 interrupt flags are cleared.
- */
- ret = regmap_update_bits(pcf2127->regmap, PCF2127_REG_TS_CTRL,
- PCF2127_BIT_TS_CTRL_TSOFF |
- PCF2127_BIT_TS_CTRL_TSM,
- PCF2127_BIT_TS_CTRL_TSM);
- if (ret) {
- dev_err(dev, "%s: tamper detection config (ts_ctrl) failed\n",
- __func__);
- return ret;
- }
-
- /*
- * Enable interrupt generation when TSF1 or TSF2 timestamp flags
- * are set. Interrupt signal is an open-drain output and can be
- * left floating if unused.
+ * Enable timestamp functions 1 to 4.
*/
- ret = regmap_update_bits(pcf2127->regmap, PCF2127_REG_CTRL2,
- PCF2127_BIT_CTRL2_TSIE,
- PCF2127_BIT_CTRL2_TSIE);
- if (ret) {
- dev_err(dev, "%s: tamper detection config (ctrl2) failed\n",
- __func__);
- return ret;
+ for (int i = 0; i < pcf2127->cfg->ts_count; i++) {
+ ret = pcf2127_enable_ts(dev, i);
+ if (ret)
+ return ret;
}
- ret = rtc_add_group(pcf2127->rtc, &pcf2127_attr_group);
+ ret = rtc_add_group(pcf2127->rtc, &pcf2127->cfg->attribute_group);
if (ret) {
dev_err(dev, "%s: tamper sysfs registering failed\n",
__func__);
@@ -799,9 +1262,10 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
#ifdef CONFIG_OF
static const struct of_device_id pcf2127_of_match[] = {
- { .compatible = "nxp,pcf2127" },
- { .compatible = "nxp,pcf2129" },
- { .compatible = "nxp,pca2129" },
+ { .compatible = "nxp,pcf2127", .data = &pcf21xx_cfg[PCF2127] },
+ { .compatible = "nxp,pcf2129", .data = &pcf21xx_cfg[PCF2129] },
+ { .compatible = "nxp,pca2129", .data = &pcf21xx_cfg[PCF2129] },
+ { .compatible = "nxp,pcf2131", .data = &pcf21xx_cfg[PCF2131] },
{}
};
MODULE_DEVICE_TABLE(of, pcf2127_of_match);
@@ -886,26 +1350,41 @@ static const struct regmap_bus pcf2127_i2c_regmap = {
static struct i2c_driver pcf2127_i2c_driver;
static const struct i2c_device_id pcf2127_i2c_id[] = {
- { "pcf2127", 1 },
- { "pcf2129", 0 },
- { "pca2129", 0 },
+ { "pcf2127", PCF2127 },
+ { "pcf2129", PCF2129 },
+ { "pca2129", PCF2129 },
+ { "pcf2131", PCF2131 },
{ }
};
MODULE_DEVICE_TABLE(i2c, pcf2127_i2c_id);
static int pcf2127_i2c_probe(struct i2c_client *client)
{
- const struct i2c_device_id *id = i2c_match_id(pcf2127_i2c_id, client);
struct regmap *regmap;
- static const struct regmap_config config = {
+ static struct regmap_config config = {
.reg_bits = 8,
.val_bits = 8,
- .max_register = 0x1d,
};
+ const struct pcf21xx_config *variant;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
return -ENODEV;
+ if (client->dev.of_node) {
+ variant = of_device_get_match_data(&client->dev);
+ if (!variant)
+ return -ENODEV;
+ } else {
+ enum pcf21xx_type type =
+ i2c_match_id(pcf2127_i2c_id, client)->driver_data;
+
+ if (type >= PCF21XX_LAST_ID)
+ return -ENODEV;
+ variant = &pcf21xx_cfg[type];
+ }
+
+ config.max_register = variant->max_register,
+
regmap = devm_regmap_init(&client->dev, &pcf2127_i2c_regmap,
&client->dev, &config);
if (IS_ERR(regmap)) {
@@ -914,8 +1393,7 @@ static int pcf2127_i2c_probe(struct i2c_client *client)
return PTR_ERR(regmap);
}
- return pcf2127_probe(&client->dev, regmap, client->irq,
- pcf2127_i2c_driver.driver.name, id->driver_data);
+ return pcf2127_probe(&client->dev, regmap, client->irq, variant);
}
static struct i2c_driver pcf2127_i2c_driver = {
@@ -953,17 +1431,32 @@ static void pcf2127_i2c_unregister_driver(void)
#if IS_ENABLED(CONFIG_SPI_MASTER)
static struct spi_driver pcf2127_spi_driver;
+static const struct spi_device_id pcf2127_spi_id[];
static int pcf2127_spi_probe(struct spi_device *spi)
{
- static const struct regmap_config config = {
+ static struct regmap_config config = {
.reg_bits = 8,
.val_bits = 8,
.read_flag_mask = 0xa0,
.write_flag_mask = 0x20,
- .max_register = 0x1d,
};
struct regmap *regmap;
+ const struct pcf21xx_config *variant;
+
+ if (spi->dev.of_node) {
+ variant = of_device_get_match_data(&spi->dev);
+ if (!variant)
+ return -ENODEV;
+ } else {
+ enum pcf21xx_type type = spi_get_device_id(spi)->driver_data;
+
+ if (type >= PCF21XX_LAST_ID)
+ return -ENODEV;
+ variant = &pcf21xx_cfg[type];
+ }
+
+ config.max_register = variant->max_register,
regmap = devm_regmap_init_spi(spi, &config);
if (IS_ERR(regmap)) {
@@ -972,15 +1465,14 @@ static int pcf2127_spi_probe(struct spi_device *spi)
return PTR_ERR(regmap);
}
- return pcf2127_probe(&spi->dev, regmap, spi->irq,
- pcf2127_spi_driver.driver.name,
- spi_get_device_id(spi)->driver_data);
+ return pcf2127_probe(&spi->dev, regmap, spi->irq, variant);
}
static const struct spi_device_id pcf2127_spi_id[] = {
- { "pcf2127", 1 },
- { "pcf2129", 0 },
- { "pca2129", 0 },
+ { "pcf2127", PCF2127 },
+ { "pcf2129", PCF2129 },
+ { "pca2129", PCF2129 },
+ { "pcf2131", PCF2131 },
{ }
};
MODULE_DEVICE_TABLE(spi, pcf2127_spi_id);
@@ -1045,5 +1537,5 @@ static void __exit pcf2127_exit(void)
module_exit(pcf2127_exit)
MODULE_AUTHOR("Renaud Cerrato <r.cerrato@til-technologies.fr>");
-MODULE_DESCRIPTION("NXP PCF2127/29 RTC driver");
+MODULE_DESCRIPTION("NXP PCF2127/29/31 RTC driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
index e517abfaee2a..fdbc07f14036 100644
--- a/drivers/rtc/rtc-pcf85063.c
+++ b/drivers/rtc/rtc-pcf85063.c
@@ -14,7 +14,7 @@
#include <linux/bcd.h>
#include <linux/rtc.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/pm_wakeirq.h>
#include <linux/regmap.h>
@@ -514,49 +514,40 @@ static struct clk *pcf85063_clkout_register_clk(struct pcf85063 *pcf85063)
}
#endif
-enum pcf85063_type {
- PCF85063,
- PCF85063TP,
- PCF85063A,
- RV8263,
- PCF85063_LAST_ID
+static const struct pcf85063_config config_pcf85063 = {
+ .regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x0a,
+ },
};
-static struct pcf85063_config pcf85063_cfg[] = {
- [PCF85063] = {
- .regmap = {
- .reg_bits = 8,
- .val_bits = 8,
- .max_register = 0x0a,
- },
- },
- [PCF85063TP] = {
- .regmap = {
- .reg_bits = 8,
- .val_bits = 8,
- .max_register = 0x0a,
- },
- },
- [PCF85063A] = {
- .regmap = {
- .reg_bits = 8,
- .val_bits = 8,
- .max_register = 0x11,
- },
- .has_alarms = 1,
+static const struct pcf85063_config config_pcf85063tp = {
+ .regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x0a,
},
- [RV8263] = {
- .regmap = {
- .reg_bits = 8,
- .val_bits = 8,
- .max_register = 0x11,
- },
- .has_alarms = 1,
- .force_cap_7000 = 1,
+};
+
+static const struct pcf85063_config config_pcf85063a = {
+ .regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x11,
},
+ .has_alarms = 1,
};
-static const struct i2c_device_id pcf85063_ids[];
+static const struct pcf85063_config config_rv8263 = {
+ .regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x11,
+ },
+ .has_alarms = 1,
+ .force_cap_7000 = 1,
+};
static int pcf85063_probe(struct i2c_client *client)
{
@@ -579,17 +570,9 @@ static int pcf85063_probe(struct i2c_client *client)
if (!pcf85063)
return -ENOMEM;
- if (client->dev.of_node) {
- config = of_device_get_match_data(&client->dev);
- if (!config)
- return -ENODEV;
- } else {
- enum pcf85063_type type =
- i2c_match_id(pcf85063_ids, client)->driver_data;
- if (type >= PCF85063_LAST_ID)
- return -ENODEV;
- config = &pcf85063_cfg[type];
- }
+ config = i2c_get_match_data(client);
+ if (!config)
+ return -ENODEV;
pcf85063->regmap = devm_regmap_init_i2c(client, &config->regmap);
if (IS_ERR(pcf85063->regmap))
@@ -655,22 +638,22 @@ static int pcf85063_probe(struct i2c_client *client)
}
static const struct i2c_device_id pcf85063_ids[] = {
- { "pca85073a", PCF85063A },
- { "pcf85063", PCF85063 },
- { "pcf85063tp", PCF85063TP },
- { "pcf85063a", PCF85063A },
- { "rv8263", RV8263 },
+ { "pca85073a", .driver_data = (kernel_ulong_t)&config_pcf85063a },
+ { "pcf85063", .driver_data = (kernel_ulong_t)&config_pcf85063 },
+ { "pcf85063tp", .driver_data = (kernel_ulong_t)&config_pcf85063tp },
+ { "pcf85063a", .driver_data = (kernel_ulong_t)&config_pcf85063a },
+ { "rv8263", .driver_data = (kernel_ulong_t)&config_rv8263 },
{}
};
MODULE_DEVICE_TABLE(i2c, pcf85063_ids);
#ifdef CONFIG_OF
static const struct of_device_id pcf85063_of_match[] = {
- { .compatible = "nxp,pca85073a", .data = &pcf85063_cfg[PCF85063A] },
- { .compatible = "nxp,pcf85063", .data = &pcf85063_cfg[PCF85063] },
- { .compatible = "nxp,pcf85063tp", .data = &pcf85063_cfg[PCF85063TP] },
- { .compatible = "nxp,pcf85063a", .data = &pcf85063_cfg[PCF85063A] },
- { .compatible = "microcrystal,rv8263", .data = &pcf85063_cfg[RV8263] },
+ { .compatible = "nxp,pca85073a", .data = &config_pcf85063a },
+ { .compatible = "nxp,pcf85063", .data = &config_pcf85063 },
+ { .compatible = "nxp,pcf85063tp", .data = &config_pcf85063tp },
+ { .compatible = "nxp,pcf85063a", .data = &config_pcf85063a },
+ { .compatible = "microcrystal,rv8263", .data = &config_rv8263 },
{}
};
MODULE_DEVICE_TABLE(of, pcf85063_of_match);
diff --git a/drivers/rtc/rtc-pcf85363.c b/drivers/rtc/rtc-pcf85363.c
index 65b8b1338dbb..06194674d71c 100644
--- a/drivers/rtc/rtc-pcf85363.c
+++ b/drivers/rtc/rtc-pcf85363.c
@@ -15,7 +15,6 @@
#include <linux/errno.h>
#include <linux/bcd.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/regmap.h>
/*
@@ -403,6 +402,7 @@ static int pcf85363_probe(struct i2c_client *client)
},
};
int ret, i, err;
+ bool wakeup_source;
if (data)
config = data;
@@ -432,25 +432,36 @@ static int pcf85363_probe(struct i2c_client *client)
pcf85363->rtc->ops = &rtc_ops;
pcf85363->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
pcf85363->rtc->range_max = RTC_TIMESTAMP_END_2099;
- clear_bit(RTC_FEATURE_ALARM, pcf85363->rtc->features);
+
+ wakeup_source = device_property_read_bool(&client->dev,
+ "wakeup-source");
+ if (client->irq > 0 || wakeup_source) {
+ regmap_write(pcf85363->regmap, CTRL_FLAGS, 0);
+ regmap_update_bits(pcf85363->regmap, CTRL_PIN_IO,
+ PIN_IO_INTA_OUT, PIN_IO_INTAPM);
+ }
if (client->irq > 0) {
unsigned long irqflags = IRQF_TRIGGER_LOW;
if (dev_fwnode(&client->dev))
irqflags = 0;
-
- regmap_write(pcf85363->regmap, CTRL_FLAGS, 0);
- regmap_update_bits(pcf85363->regmap, CTRL_PIN_IO,
- PIN_IO_INTA_OUT, PIN_IO_INTAPM);
ret = devm_request_threaded_irq(&client->dev, client->irq,
NULL, pcf85363_rtc_handle_irq,
irqflags | IRQF_ONESHOT,
"pcf85363", client);
- if (ret)
- dev_warn(&client->dev, "unable to request IRQ, alarms disabled\n");
- else
- set_bit(RTC_FEATURE_ALARM, pcf85363->rtc->features);
+ if (ret) {
+ dev_warn(&client->dev,
+ "unable to request IRQ, alarms disabled\n");
+ client->irq = 0;
+ }
+ }
+
+ if (client->irq > 0 || wakeup_source) {
+ device_init_wakeup(&client->dev, true);
+ set_bit(RTC_FEATURE_ALARM, pcf85363->rtc->features);
+ } else {
+ clear_bit(RTC_FEATURE_ALARM, pcf85363->rtc->features);
}
ret = devm_rtc_register_device(pcf85363->rtc);
diff --git a/drivers/rtc/rtc-pxa.c b/drivers/rtc/rtc-pxa.c
index eeacf480cf36..e400c78252e8 100644
--- a/drivers/rtc/rtc-pxa.c
+++ b/drivers/rtc/rtc-pxa.c
@@ -14,7 +14,6 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include "rtc-sa1100.h"
diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c
index a5a6c8772ecd..f8fab0205f8c 100644
--- a/drivers/rtc/rtc-rs5c372.c
+++ b/drivers/rtc/rtc-rs5c372.c
@@ -12,7 +12,7 @@
#include <linux/bcd.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
/*
* Ricoh has a family of I2C based RTCs, which differ only slightly from
@@ -826,8 +826,7 @@ static int rs5c372_probe(struct i2c_client *client)
rs5c372->client = client;
i2c_set_clientdata(client, rs5c372);
if (client->dev.of_node) {
- rs5c372->type = (enum rtc_type)
- of_device_get_match_data(&client->dev);
+ rs5c372->type = (uintptr_t)of_device_get_match_data(&client->dev);
} else {
const struct i2c_device_id *id = i2c_match_id(rs5c372_id, client);
rs5c372->type = id->driver_data;
diff --git a/drivers/rtc/rtc-rv3028.c b/drivers/rtc/rtc-rv3028.c
index 076e56f4e01a..2f001c59c61d 100644
--- a/drivers/rtc/rtc-rv3028.c
+++ b/drivers/rtc/rtc-rv3028.c
@@ -17,7 +17,7 @@
#include <linux/kernel.h>
#include <linux/log2.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/rtc.h>
@@ -855,11 +855,68 @@ static const struct regmap_config regmap_config = {
.max_register = 0x37,
};
+static u8 rv3028_set_trickle_charger(struct rv3028_data *rv3028,
+ struct i2c_client *client)
+{
+ int ret, val_old, val;
+ u32 ohms, chargeable;
+
+ ret = regmap_read(rv3028->regmap, RV3028_BACKUP, &val_old);
+ if (ret < 0)
+ return ret;
+
+ /* mask out only trickle charger bits */
+ val_old = val_old & (RV3028_BACKUP_TCE | RV3028_BACKUP_TCR_MASK);
+ val = val_old;
+
+ /* setup trickle charger */
+ if (!device_property_read_u32(&client->dev, "trickle-resistor-ohms",
+ &ohms)) {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(rv3028_trickle_resistors); i++)
+ if (ohms == rv3028_trickle_resistors[i])
+ break;
+
+ if (i < ARRAY_SIZE(rv3028_trickle_resistors)) {
+ /* enable trickle charger and its resistor */
+ val = RV3028_BACKUP_TCE | i;
+ } else {
+ dev_warn(&client->dev, "invalid trickle resistor value\n");
+ }
+ }
+
+ if (!device_property_read_u32(&client->dev, "aux-voltage-chargeable",
+ &chargeable)) {
+ switch (chargeable) {
+ case 0:
+ val &= ~RV3028_BACKUP_TCE;
+ break;
+ case 1:
+ val |= RV3028_BACKUP_TCE;
+ break;
+ default:
+ dev_warn(&client->dev,
+ "unsupported aux-voltage-chargeable value\n");
+ break;
+ }
+ }
+
+ /* only update EEPROM if changes are necessary */
+ if (val_old != val) {
+ ret = rv3028_update_cfg(rv3028, RV3028_BACKUP, RV3028_BACKUP_TCE |
+ RV3028_BACKUP_TCR_MASK, val);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
static int rv3028_probe(struct i2c_client *client)
{
struct rv3028_data *rv3028;
int ret, status;
- u32 ohms;
struct nvmem_config nvmem_cfg = {
.name = "rv3028_nvram",
.word_size = 1,
@@ -937,24 +994,9 @@ static int rv3028_probe(struct i2c_client *client)
if (ret)
return ret;
- /* setup trickle charger */
- if (!device_property_read_u32(&client->dev, "trickle-resistor-ohms",
- &ohms)) {
- int i;
-
- for (i = 0; i < ARRAY_SIZE(rv3028_trickle_resistors); i++)
- if (ohms == rv3028_trickle_resistors[i])
- break;
-
- if (i < ARRAY_SIZE(rv3028_trickle_resistors)) {
- ret = rv3028_update_cfg(rv3028, RV3028_BACKUP, RV3028_BACKUP_TCE |
- RV3028_BACKUP_TCR_MASK, RV3028_BACKUP_TCE | i);
- if (ret)
- return ret;
- } else {
- dev_warn(&client->dev, "invalid trickle resistor value\n");
- }
- }
+ ret = rv3028_set_trickle_charger(rv3028, client);
+ if (ret)
+ return ret;
ret = rtc_add_group(rv3028->rtc, &rv3028_attr_group);
if (ret)
diff --git a/drivers/rtc/rtc-rv3032.c b/drivers/rtc/rtc-rv3032.c
index 6b8eb2039a33..35b2e36b426a 100644
--- a/drivers/rtc/rtc-rv3032.c
+++ b/drivers/rtc/rtc-rv3032.c
@@ -19,7 +19,7 @@
#include <linux/kernel.h>
#include <linux/log2.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/rtc.h>
diff --git a/drivers/rtc/rtc-rv8803.c b/drivers/rtc/rtc-rv8803.c
index 98679cae13e8..1a3ec1bb5b81 100644
--- a/drivers/rtc/rtc-rv8803.c
+++ b/drivers/rtc/rtc-rv8803.c
@@ -15,7 +15,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/rtc.h>
#define RV8803_I2C_TRY_COUNT 4
@@ -645,8 +645,7 @@ static int rv8803_probe(struct i2c_client *client)
mutex_init(&rv8803->flags_lock);
rv8803->client = client;
if (client->dev.of_node) {
- rv8803->type = (enum rv8803_type)
- of_device_get_match_data(&client->dev);
+ rv8803->type = (uintptr_t)of_device_get_match_data(&client->dev);
} else {
const struct i2c_device_id *id = i2c_match_id(rv8803_id, client);
diff --git a/drivers/rtc/rtc-rx6110.c b/drivers/rtc/rtc-rx6110.c
index 8702db6096ba..834274db8c3f 100644
--- a/drivers/rtc/rtc-rx6110.c
+++ b/drivers/rtc/rtc-rx6110.c
@@ -13,7 +13,6 @@
#include <linux/regmap.h>
#include <linux/rtc.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/spi/spi.h>
#include <linux/i2c.h>
diff --git a/drivers/rtc/rtc-rx8581.c b/drivers/rtc/rtc-rx8581.c
index 82881fd2e14a..48efd61a114d 100644
--- a/drivers/rtc/rtc-rx8581.c
+++ b/drivers/rtc/rtc-rx8581.c
@@ -13,7 +13,6 @@
#include <linux/i2c.h>
#include <linux/bcd.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/rtc.h>
#include <linux/log2.h>
diff --git a/drivers/rtc/rtc-rzn1.c b/drivers/rtc/rtc-rzn1.c
index dca736caba85..56ebbd4d0481 100644
--- a/drivers/rtc/rtc-rzn1.c
+++ b/drivers/rtc/rtc-rzn1.c
@@ -15,7 +15,7 @@
#include <linux/init.h>
#include <linux/iopoll.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/rtc.h>
@@ -227,7 +227,7 @@ static int rzn1_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
return ret;
/* We cannot set alarms more than one week ahead */
- farest = rtc_tm_to_time64(&tm_now) + (7 * 86400);
+ farest = rtc_tm_to_time64(&tm_now) + rtc->rtcdev->alarm_offset_max;
alarm = rtc_tm_to_time64(tm);
if (time_after(alarm, farest))
return -ERANGE;
@@ -351,6 +351,7 @@ static int rzn1_rtc_probe(struct platform_device *pdev)
rtc->rtcdev->range_min = RTC_TIMESTAMP_BEGIN_2000;
rtc->rtcdev->range_max = RTC_TIMESTAMP_END_2099;
+ rtc->rtcdev->alarm_offset_max = 7 * 86400;
rtc->rtcdev->ops = &rzn1_rtc_ops;
set_bit(RTC_FEATURE_ALARM_RES_MINUTE, rtc->rtcdev->features);
clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->rtcdev->features);
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 70e1a18e5efd..282238818f63 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -23,7 +23,6 @@
#include <linux/log2.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/uaccess.h>
#include <linux/io.h>
diff --git a/drivers/rtc/rtc-stm32.c b/drivers/rtc/rtc-stm32.c
index 3d36e11cff80..76753c71d92e 100644
--- a/drivers/rtc/rtc-stm32.c
+++ b/drivers/rtc/rtc-stm32.c
@@ -6,11 +6,13 @@
#include <linux/bcd.h>
#include <linux/clk.h>
+#include <linux/errno.h>
#include <linux/iopoll.h>
#include <linux/ioport.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/pm_wakeirq.h>
#include <linux/regmap.h>
#include <linux/rtc.h>
@@ -89,6 +91,9 @@
/* Max STM32 RTC register offset is 0x3FC */
#define UNDEF_REG 0xFFFF
+/* STM32 RTC driver time helpers */
+#define SEC_PER_DAY (24 * 60 * 60)
+
struct stm32_rtc;
struct stm32_rtc_registers {
@@ -114,6 +119,7 @@ struct stm32_rtc_data {
void (*clear_events)(struct stm32_rtc *rtc, unsigned int flags);
bool has_pclk;
bool need_dbp;
+ bool need_accuracy;
};
struct stm32_rtc {
@@ -158,10 +164,9 @@ static int stm32_rtc_enter_init_mode(struct stm32_rtc *rtc)
* slowest rtc_ck frequency may be 32kHz and highest should be
* 1MHz, we poll every 10 us with a timeout of 100ms.
*/
- return readl_relaxed_poll_timeout_atomic(
- rtc->base + regs->isr,
- isr, (isr & STM32_RTC_ISR_INITF),
- 10, 100000);
+ return readl_relaxed_poll_timeout_atomic(rtc->base + regs->isr, isr,
+ (isr & STM32_RTC_ISR_INITF),
+ 10, 100000);
}
return 0;
@@ -425,40 +430,42 @@ static int stm32_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
return 0;
}
-static int stm32_rtc_valid_alrm(struct stm32_rtc *rtc, struct rtc_time *tm)
+static int stm32_rtc_valid_alrm(struct device *dev, struct rtc_time *tm)
{
- const struct stm32_rtc_registers *regs = &rtc->data->regs;
- int cur_day, cur_mon, cur_year, cur_hour, cur_min, cur_sec;
- unsigned int dr = readl_relaxed(rtc->base + regs->dr);
- unsigned int tr = readl_relaxed(rtc->base + regs->tr);
-
- cur_day = (dr & STM32_RTC_DR_DATE) >> STM32_RTC_DR_DATE_SHIFT;
- cur_mon = (dr & STM32_RTC_DR_MONTH) >> STM32_RTC_DR_MONTH_SHIFT;
- cur_year = (dr & STM32_RTC_DR_YEAR) >> STM32_RTC_DR_YEAR_SHIFT;
- cur_sec = (tr & STM32_RTC_TR_SEC) >> STM32_RTC_TR_SEC_SHIFT;
- cur_min = (tr & STM32_RTC_TR_MIN) >> STM32_RTC_TR_MIN_SHIFT;
- cur_hour = (tr & STM32_RTC_TR_HOUR) >> STM32_RTC_TR_HOUR_SHIFT;
+ static struct rtc_time now;
+ time64_t max_alarm_time64;
+ int max_day_forward;
+ int next_month;
+ int next_year;
/*
* Assuming current date is M-D-Y H:M:S.
* RTC alarm can't be set on a specific month and year.
* So the valid alarm range is:
* M-D-Y H:M:S < alarm <= (M+1)-D-Y H:M:S
- * with a specific case for December...
*/
- if ((((tm->tm_year > cur_year) &&
- (tm->tm_mon == 0x1) && (cur_mon == 0x12)) ||
- ((tm->tm_year == cur_year) &&
- (tm->tm_mon <= cur_mon + 1))) &&
- ((tm->tm_mday > cur_day) ||
- ((tm->tm_mday == cur_day) &&
- ((tm->tm_hour > cur_hour) ||
- ((tm->tm_hour == cur_hour) && (tm->tm_min > cur_min)) ||
- ((tm->tm_hour == cur_hour) && (tm->tm_min == cur_min) &&
- (tm->tm_sec >= cur_sec))))))
- return 0;
+ stm32_rtc_read_time(dev, &now);
+
+ /*
+ * Find the next month and the year of the next month.
+ * Note: tm_mon and next_month are from 0 to 11
+ */
+ next_month = now.tm_mon + 1;
+ if (next_month == 12) {
+ next_month = 0;
+ next_year = now.tm_year + 1;
+ } else {
+ next_year = now.tm_year;
+ }
- return -EINVAL;
+ /* Find the maximum limit of alarm in days. */
+ max_day_forward = rtc_month_days(now.tm_mon, now.tm_year)
+ - now.tm_mday
+ + min(rtc_month_days(next_month, next_year), now.tm_mday);
+
+ /* Convert to timestamp and compare the alarm time and its upper limit */
+ max_alarm_time64 = rtc_tm_to_time64(&now) + max_day_forward * SEC_PER_DAY;
+ return rtc_tm_to_time64(tm) <= max_alarm_time64 ? 0 : -EINVAL;
}
static int stm32_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
@@ -469,17 +476,17 @@ static int stm32_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
unsigned int cr, isr, alrmar;
int ret = 0;
- tm2bcd(tm);
-
/*
* RTC alarm can't be set on a specific date, unless this date is
* up to the same day of month next month.
*/
- if (stm32_rtc_valid_alrm(rtc, tm) < 0) {
+ if (stm32_rtc_valid_alrm(dev, tm) < 0) {
dev_err(dev, "Alarm can be set only on upcoming month.\n");
return -EINVAL;
}
+ tm2bcd(tm);
+
alrmar = 0;
/* tm_year and tm_mon are not used because not supported by RTC */
alrmar |= (tm->tm_mday << STM32_RTC_ALRMXR_DATE_SHIFT) &
@@ -545,6 +552,7 @@ static void stm32_rtc_clear_events(struct stm32_rtc *rtc,
static const struct stm32_rtc_data stm32_rtc_data = {
.has_pclk = false,
.need_dbp = true,
+ .need_accuracy = false,
.regs = {
.tr = 0x00,
.dr = 0x04,
@@ -566,6 +574,7 @@ static const struct stm32_rtc_data stm32_rtc_data = {
static const struct stm32_rtc_data stm32h7_rtc_data = {
.has_pclk = true,
.need_dbp = true,
+ .need_accuracy = false,
.regs = {
.tr = 0x00,
.dr = 0x04,
@@ -596,6 +605,7 @@ static void stm32mp1_rtc_clear_events(struct stm32_rtc *rtc,
static const struct stm32_rtc_data stm32mp1_data = {
.has_pclk = true,
.need_dbp = false,
+ .need_accuracy = true,
.regs = {
.tr = 0x00,
.dr = 0x04,
@@ -628,7 +638,7 @@ static int stm32_rtc_init(struct platform_device *pdev,
const struct stm32_rtc_registers *regs = &rtc->data->regs;
unsigned int prer, pred_a, pred_s, pred_a_max, pred_s_max, cr;
unsigned int rate;
- int ret = 0;
+ int ret;
rate = clk_get_rate(rtc->rtc_ck);
@@ -636,18 +646,32 @@ static int stm32_rtc_init(struct platform_device *pdev,
pred_a_max = STM32_RTC_PRER_PRED_A >> STM32_RTC_PRER_PRED_A_SHIFT;
pred_s_max = STM32_RTC_PRER_PRED_S >> STM32_RTC_PRER_PRED_S_SHIFT;
- for (pred_a = pred_a_max; pred_a + 1 > 0; pred_a--) {
- pred_s = (rate / (pred_a + 1)) - 1;
+ if (rate > (pred_a_max + 1) * (pred_s_max + 1)) {
+ dev_err(&pdev->dev, "rtc_ck rate is too high: %dHz\n", rate);
+ return -EINVAL;
+ }
+
+ if (rtc->data->need_accuracy) {
+ for (pred_a = 0; pred_a <= pred_a_max; pred_a++) {
+ pred_s = (rate / (pred_a + 1)) - 1;
+
+ if (pred_s <= pred_s_max && ((pred_s + 1) * (pred_a + 1)) == rate)
+ break;
+ }
+ } else {
+ for (pred_a = pred_a_max; pred_a + 1 > 0; pred_a--) {
+ pred_s = (rate / (pred_a + 1)) - 1;
- if (((pred_s + 1) * (pred_a + 1)) == rate)
- break;
+ if (((pred_s + 1) * (pred_a + 1)) == rate)
+ break;
+ }
}
/*
* Can't find a 1Hz, so give priority to RTC power consumption
* by choosing the higher possible value for prediv_a
*/
- if ((pred_s > pred_s_max) || (pred_a > pred_a_max)) {
+ if (pred_s > pred_s_max || pred_a > pred_a_max) {
pred_a = pred_a_max;
pred_s = (rate / (pred_a + 1)) - 1;
@@ -656,6 +680,20 @@ static int stm32_rtc_init(struct platform_device *pdev,
"fast" : "slow");
}
+ cr = readl_relaxed(rtc->base + regs->cr);
+
+ prer = readl_relaxed(rtc->base + regs->prer);
+ prer &= STM32_RTC_PRER_PRED_S | STM32_RTC_PRER_PRED_A;
+
+ pred_s = (pred_s << STM32_RTC_PRER_PRED_S_SHIFT) &
+ STM32_RTC_PRER_PRED_S;
+ pred_a = (pred_a << STM32_RTC_PRER_PRED_A_SHIFT) &
+ STM32_RTC_PRER_PRED_A;
+
+ /* quit if there is nothing to initialize */
+ if ((cr & STM32_RTC_CR_FMT) == 0 && prer == (pred_s | pred_a))
+ return 0;
+
stm32_rtc_wpr_unlock(rtc);
ret = stm32_rtc_enter_init_mode(rtc);
@@ -665,13 +703,10 @@ static int stm32_rtc_init(struct platform_device *pdev,
goto end;
}
- prer = (pred_s << STM32_RTC_PRER_PRED_S_SHIFT) & STM32_RTC_PRER_PRED_S;
- writel_relaxed(prer, rtc->base + regs->prer);
- prer |= (pred_a << STM32_RTC_PRER_PRED_A_SHIFT) & STM32_RTC_PRER_PRED_A;
- writel_relaxed(prer, rtc->base + regs->prer);
+ writel_relaxed(pred_s, rtc->base + regs->prer);
+ writel_relaxed(pred_a | pred_s, rtc->base + regs->prer);
/* Force 24h time format */
- cr = readl_relaxed(rtc->base + regs->cr);
cr &= ~STM32_RTC_CR_FMT;
writel_relaxed(cr, rtc->base + regs->cr);
@@ -730,16 +765,13 @@ static int stm32_rtc_probe(struct platform_device *pdev)
rtc->rtc_ck = devm_clk_get(&pdev->dev, NULL);
} else {
rtc->pclk = devm_clk_get(&pdev->dev, "pclk");
- if (IS_ERR(rtc->pclk)) {
- dev_err(&pdev->dev, "no pclk clock");
- return PTR_ERR(rtc->pclk);
- }
+ if (IS_ERR(rtc->pclk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(rtc->pclk), "no pclk clock");
+
rtc->rtc_ck = devm_clk_get(&pdev->dev, "rtc_ck");
}
- if (IS_ERR(rtc->rtc_ck)) {
- dev_err(&pdev->dev, "no rtc_ck clock");
- return PTR_ERR(rtc->rtc_ck);
- }
+ if (IS_ERR(rtc->rtc_ck))
+ return dev_err_probe(&pdev->dev, PTR_ERR(rtc->rtc_ck), "no rtc_ck clock");
if (rtc->data->has_pclk) {
ret = clk_prepare_enable(rtc->pclk);
@@ -859,7 +891,6 @@ static void stm32_rtc_remove(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, false);
}
-#ifdef CONFIG_PM_SLEEP
static int stm32_rtc_suspend(struct device *dev)
{
struct stm32_rtc *rtc = dev_get_drvdata(dev);
@@ -890,10 +921,10 @@ static int stm32_rtc_resume(struct device *dev)
return ret;
}
-#endif
-static SIMPLE_DEV_PM_OPS(stm32_rtc_pm_ops,
- stm32_rtc_suspend, stm32_rtc_resume);
+static const struct dev_pm_ops stm32_rtc_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(stm32_rtc_suspend, stm32_rtc_resume)
+};
static struct platform_driver stm32_rtc_driver = {
.probe = stm32_rtc_probe,
diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c
index 6f11b745f34d..7566d0a44af8 100644
--- a/drivers/rtc/rtc-stmp3xxx.c
+++ b/drivers/rtc/rtc-stmp3xxx.c
@@ -18,7 +18,6 @@
#include <linux/delay.h>
#include <linux/rtc.h>
#include <linux/slab.h>
-#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/stmp_device.h>
#include <linux/stmp3xxx_rtc_wdt.h>
diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c
index 71548dd59a3a..8e0c66906103 100644
--- a/drivers/rtc/rtc-sun6i.c
+++ b/drivers/rtc/rtc-sun6i.c
@@ -24,7 +24,6 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
#include <linux/slab.h>
@@ -847,8 +846,6 @@ static int sun6i_rtc_probe(struct platform_device *pdev)
if (ret)
return ret;
- dev_info(&pdev->dev, "RTC enabled\n");
-
return 0;
}
diff --git a/drivers/rtc/rtc-sunplus.c b/drivers/rtc/rtc-sunplus.c
index f33dc301f301..20c7e97c2fc8 100644
--- a/drivers/rtc/rtc-sunplus.c
+++ b/drivers/rtc/rtc-sunplus.c
@@ -244,7 +244,7 @@ static int sp_rtc_probe(struct platform_device *plat_dev)
sp_rtc->irq = platform_get_irq(plat_dev, 0);
if (sp_rtc->irq < 0)
- return dev_err_probe(&plat_dev->dev, sp_rtc->irq, "platform_get_irq failed\n");
+ return sp_rtc->irq;
ret = devm_request_irq(&plat_dev->dev, sp_rtc->irq, sp_rtc_irq_handler,
IRQF_TRIGGER_RISING, "rtc irq", plat_dev);
diff --git a/drivers/rtc/rtc-sunxi.c b/drivers/rtc/rtc-sunxi.c
index 5d019e3a835a..5cab9953c44f 100644
--- a/drivers/rtc/rtc-sunxi.c
+++ b/drivers/rtc/rtc-sunxi.c
@@ -14,8 +14,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
#include <linux/types.h>
diff --git a/drivers/rtc/rtc-ti-k3.c b/drivers/rtc/rtc-ti-k3.c
index 0d90fe923355..ec759d8f7023 100644
--- a/drivers/rtc/rtc-ti-k3.c
+++ b/drivers/rtc/rtc-ti-k3.c
@@ -9,7 +9,7 @@
#include <linux/delay.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/sys_soc.h>
#include <linux/property.h>
diff --git a/drivers/rtc/rtc-tps6586x.c b/drivers/rtc/rtc-tps6586x.c
index 9f14e2475747..20faf08c254c 100644
--- a/drivers/rtc/rtc-tps6586x.c
+++ b/drivers/rtc/rtc-tps6586x.c
@@ -252,6 +252,7 @@ static int tps6586x_rtc_probe(struct platform_device *pdev)
rtc->rtc->ops = &tps6586x_rtc_ops;
rtc->rtc->range_max = (1ULL << 30) - 1; /* 30-bit seconds */
+ rtc->rtc->alarm_offset_max = ALM1_VALID_RANGE_IN_SEC;
rtc->rtc->start_secs = mktime64(2009, 1, 1, 0, 0, 0);
rtc->rtc->set_start_time = true;
diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c
index 75e4c2d777b9..411ff66c0468 100644
--- a/drivers/rtc/rtc-tps65910.c
+++ b/drivers/rtc/rtc-tps65910.c
@@ -406,11 +406,8 @@ static int tps65910_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, tps_rtc);
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- dev_warn(&pdev->dev, "Wake up is not possible as irq = %d\n",
- irq);
- return -ENXIO;
- }
+ if (irq < 0)
+ return irq;
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
tps65910_rtc_interrupt, IRQF_TRIGGER_LOW,
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index 81b36948c2fa..13f8ce08243c 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -487,11 +487,24 @@ static const struct rtc_class_ops twl_rtc_ops = {
.alarm_irq_enable = twl_rtc_alarm_irq_enable,
};
+static int twl_nvram_read(void *priv, unsigned int offset, void *val,
+ size_t bytes)
+{
+ return twl_i2c_read((long)priv, val, offset, bytes);
+}
+
+static int twl_nvram_write(void *priv, unsigned int offset, void *val,
+ size_t bytes)
+{
+ return twl_i2c_write((long)priv, val, offset, bytes);
+}
+
/*----------------------------------------------------------------------*/
static int twl_rtc_probe(struct platform_device *pdev)
{
struct twl_rtc *twl_rtc;
+ struct nvmem_config nvmem_cfg;
struct device_node *np = pdev->dev.of_node;
int ret = -EINVAL;
int irq = platform_get_irq(pdev, 0);
@@ -542,7 +555,6 @@ static int twl_rtc_probe(struct platform_device *pdev)
REG_INT_MSK_STS_A);
}
- dev_info(&pdev->dev, "Enabling TWL-RTC\n");
ret = twl_rtc_write_u8(twl_rtc, BIT_RTC_CTRL_REG_STOP_RTC_M,
REG_RTC_CTRL_REG);
if (ret < 0)
@@ -564,11 +576,8 @@ static int twl_rtc_probe(struct platform_device *pdev)
twl_rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
&twl_rtc_ops, THIS_MODULE);
- if (IS_ERR(twl_rtc->rtc)) {
- dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
- PTR_ERR(twl_rtc->rtc));
+ if (IS_ERR(twl_rtc->rtc))
return PTR_ERR(twl_rtc->rtc);
- }
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
twl_rtc_interrupt,
@@ -579,6 +588,30 @@ static int twl_rtc_probe(struct platform_device *pdev)
return ret;
}
+ memset(&nvmem_cfg, 0, sizeof(nvmem_cfg));
+ nvmem_cfg.name = "twl-secured-";
+ nvmem_cfg.type = NVMEM_TYPE_BATTERY_BACKED;
+ nvmem_cfg.reg_read = twl_nvram_read,
+ nvmem_cfg.reg_write = twl_nvram_write,
+ nvmem_cfg.word_size = 1;
+ nvmem_cfg.stride = 1;
+ if (twl_class_is_4030()) {
+ /* 20 bytes SECURED_REG area */
+ nvmem_cfg.size = 20;
+ nvmem_cfg.priv = (void *)TWL_MODULE_SECURED_REG;
+ devm_rtc_nvmem_register(twl_rtc->rtc, &nvmem_cfg);
+ /* 8 bytes BACKUP area */
+ nvmem_cfg.name = "twl-backup-";
+ nvmem_cfg.size = 8;
+ nvmem_cfg.priv = (void *)TWL4030_MODULE_BACKUP;
+ devm_rtc_nvmem_register(twl_rtc->rtc, &nvmem_cfg);
+ } else {
+ /* 8 bytes SECURED_REG area */
+ nvmem_cfg.size = 8;
+ nvmem_cfg.priv = (void *)TWL_MODULE_SECURED_REG;
+ devm_rtc_nvmem_register(twl_rtc->rtc, &nvmem_cfg);
+ }
+
return 0;
}
diff --git a/drivers/rtc/rtc-wm8350.c b/drivers/rtc/rtc-wm8350.c
index 947f8071803f..3c773cff2b39 100644
--- a/drivers/rtc/rtc-wm8350.c
+++ b/drivers/rtc/rtc-wm8350.c
@@ -386,8 +386,6 @@ static int wm8350_rtc_probe(struct platform_device *pdev)
/* enable the RTC if it's not already enabled */
power5 = wm8350_reg_read(wm8350, WM8350_POWER_MGMT_5);
if (!(power5 & WM8350_RTC_TICK_ENA)) {
- dev_info(wm8350->dev, "Starting RTC\n");
-
wm8350_reg_unlock(wm8350);
ret = wm8350_set_bits(wm8350, WM8350_POWER_MGMT_5,
@@ -426,11 +424,8 @@ static int wm8350_rtc_probe(struct platform_device *pdev)
wm_rtc->rtc = devm_rtc_device_register(&pdev->dev, "wm8350",
&wm8350_rtc_ops, THIS_MODULE);
- if (IS_ERR(wm_rtc->rtc)) {
- ret = PTR_ERR(wm_rtc->rtc);
- dev_err(&pdev->dev, "failed to register RTC: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(wm_rtc->rtc))
+ return PTR_ERR(wm_rtc->rtc);
ret = wm8350_register_irq(wm8350, WM8350_IRQ_RTC_SEC,
wm8350_rtc_update_handler, 0,
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 620fab01b710..c4e36650c426 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -1378,16 +1378,12 @@ static ssize_t dasd_vendor_show(struct device *dev,
static DEVICE_ATTR(vendor, 0444, dasd_vendor_show, NULL);
-#define UID_STRLEN ( /* vendor */ 3 + 1 + /* serial */ 14 + 1 +\
- /* SSID */ 4 + 1 + /* unit addr */ 2 + 1 +\
- /* vduit */ 32 + 1)
-
static ssize_t
dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf)
{
+ char uid_string[DASD_UID_STRLEN];
struct dasd_device *device;
struct dasd_uid uid;
- char uid_string[UID_STRLEN];
char ua_string[3];
device = dasd_device_from_cdev(to_ccwdev(dev));
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 8587e423169e..bd89b032968a 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1079,12 +1079,12 @@ static void dasd_eckd_get_uid_string(struct dasd_conf *conf,
create_uid(conf, &uid);
if (strlen(uid.vduit) > 0)
- snprintf(print_uid, sizeof(*print_uid),
+ snprintf(print_uid, DASD_UID_STRLEN,
"%s.%s.%04x.%02x.%s",
uid.vendor, uid.serial, uid.ssid,
uid.real_unit_addr, uid.vduit);
else
- snprintf(print_uid, sizeof(*print_uid),
+ snprintf(print_uid, DASD_UID_STRLEN,
"%s.%s.%04x.%02x",
uid.vendor, uid.serial, uid.ssid,
uid.real_unit_addr);
@@ -1093,8 +1093,8 @@ static void dasd_eckd_get_uid_string(struct dasd_conf *conf,
static int dasd_eckd_check_cabling(struct dasd_device *device,
void *conf_data, __u8 lpm)
{
+ char print_path_uid[DASD_UID_STRLEN], print_device_uid[DASD_UID_STRLEN];
struct dasd_eckd_private *private = device->private;
- char print_path_uid[60], print_device_uid[60];
struct dasd_conf path_conf;
path_conf.data = conf_data;
@@ -1293,9 +1293,9 @@ static void dasd_eckd_path_available_action(struct dasd_device *device,
__u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE];
__u8 lpm, opm, npm, ppm, epm, hpfpm, cablepm;
struct dasd_conf_data *conf_data;
+ char print_uid[DASD_UID_STRLEN];
struct dasd_conf path_conf;
unsigned long flags;
- char print_uid[60];
int rc, pos;
opm = 0;
@@ -5855,8 +5855,8 @@ static void dasd_eckd_dump_sense(struct dasd_device *device,
static int dasd_eckd_reload_device(struct dasd_device *device)
{
struct dasd_eckd_private *private = device->private;
+ char print_uid[DASD_UID_STRLEN];
int rc, old_base;
- char print_uid[60];
struct dasd_uid uid;
unsigned long flags;
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 0aa56351da72..8a4dbe9d7741 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -259,6 +259,10 @@ struct dasd_uid {
char vduit[33];
};
+#define DASD_UID_STRLEN ( /* vendor */ 3 + 1 + /* serial */ 14 + 1 + \
+ /* SSID */ 4 + 1 + /* unit addr */ 2 + 1 + \
+ /* vduit */ 32 + 1)
+
/*
* PPRC Status data
*/
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 06bcb6c78909..4b7ecd4fd431 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -411,13 +411,13 @@ removeseg:
segment_unload(entry->segment_name);
}
list_del(&dev_info->lh);
+ up_write(&dcssblk_devices_sem);
dax_remove_host(dev_info->gd);
kill_dax(dev_info->dax_dev);
put_dax(dev_info->dax_dev);
del_gendisk(dev_info->gd);
put_disk(dev_info->gd);
- up_write(&dcssblk_devices_sem);
if (device_remove_file_self(dev, attr)) {
device_unregister(dev);
@@ -790,18 +790,17 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch
}
list_del(&dev_info->lh);
+ /* unload all related segments */
+ list_for_each_entry(entry, &dev_info->seg_list, lh)
+ segment_unload(entry->segment_name);
+ up_write(&dcssblk_devices_sem);
+
dax_remove_host(dev_info->gd);
kill_dax(dev_info->dax_dev);
put_dax(dev_info->dax_dev);
del_gendisk(dev_info->gd);
put_disk(dev_info->gd);
- /* unload all related segments */
- list_for_each_entry(entry, &dev_info->seg_list, lh)
- segment_unload(entry->segment_name);
-
- up_write(&dcssblk_devices_sem);
-
device_unregister(&dev_info->dev);
put_device(&dev_info->dev);
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index a1fef666c9b0..99361618c31f 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -1021,8 +1021,8 @@ static unsigned int tty3215_write_room(struct tty_struct *tty)
/*
* String write routine for 3215 ttys
*/
-static int tty3215_write(struct tty_struct *tty,
- const unsigned char *buf, int count)
+static ssize_t tty3215_write(struct tty_struct *tty, const u8 *buf,
+ size_t count)
{
handle_write(tty->driver_data, buf, count);
return count;
@@ -1031,7 +1031,7 @@ static int tty3215_write(struct tty_struct *tty,
/*
* Put character routine for 3215 ttys
*/
-static int tty3215_put_char(struct tty_struct *tty, unsigned char ch)
+static int tty3215_put_char(struct tty_struct *tty, u8 ch)
{
struct raw3215_info *raw = tty->driver_data;
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index d9983550062d..363315fa1666 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -1803,8 +1803,8 @@ static void tty3270_do_write(struct tty3270 *tp, struct tty_struct *tty,
/*
* String write routine for 3270 ttys
*/
-static int tty3270_write(struct tty_struct *tty,
- const unsigned char *buf, int count)
+static ssize_t tty3270_write(struct tty_struct *tty, const u8 *buf,
+ size_t count)
{
struct tty3270 *tp;
@@ -1822,7 +1822,7 @@ static int tty3270_write(struct tty_struct *tty,
/*
* Put single characters to the ttys character buffer
*/
-static int tty3270_put_char(struct tty_struct *tty, unsigned char ch)
+static int tty3270_put_char(struct tty_struct *tty, u8 ch)
{
struct tty3270 *tp;
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index 9fa92e45e0ee..7207a7f5842a 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -111,7 +111,7 @@ static inline unsigned long mon_mca_end(struct mon_msg *monmsg)
static inline u8 mon_mca_type(struct mon_msg *monmsg, u8 index)
{
- return *((u8 *) mon_mca_start(monmsg) + monmsg->mca_offset + index);
+ return *((u8 *)__va(mon_mca_start(monmsg)) + monmsg->mca_offset + index);
}
static inline u32 mon_mca_size(struct mon_msg *monmsg)
@@ -121,12 +121,12 @@ static inline u32 mon_mca_size(struct mon_msg *monmsg)
static inline u32 mon_rec_start(struct mon_msg *monmsg)
{
- return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 4));
+ return *((u32 *)(__va(mon_mca_start(monmsg)) + monmsg->mca_offset + 4));
}
static inline u32 mon_rec_end(struct mon_msg *monmsg)
{
- return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 8));
+ return *((u32 *)(__va(mon_mca_start(monmsg)) + monmsg->mca_offset + 8));
}
static int mon_check_mca(struct mon_msg *monmsg)
@@ -392,8 +392,7 @@ static ssize_t mon_read(struct file *filp, char __user *data,
mce_start = mon_mca_start(monmsg) + monmsg->mca_offset;
if ((monmsg->pos >= mce_start) && (monmsg->pos < mce_start + 12)) {
count = min(count, (size_t) mce_start + 12 - monmsg->pos);
- ret = copy_to_user(data, (void *) (unsigned long) monmsg->pos,
- count);
+ ret = copy_to_user(data, __va(monmsg->pos), count);
if (ret)
return -EFAULT;
monmsg->pos += count;
@@ -406,8 +405,7 @@ static ssize_t mon_read(struct file *filp, char __user *data,
if (monmsg->pos <= mon_rec_end(monmsg)) {
count = min(count, (size_t) mon_rec_end(monmsg) - monmsg->pos
+ 1);
- ret = copy_to_user(data, (void *) (unsigned long) monmsg->pos,
- count);
+ ret = copy_to_user(data, __va(monmsg->pos), count);
if (ret)
return -EFAULT;
monmsg->pos += count;
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index 971fbb52740b..892c18d2f87e 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -48,7 +48,7 @@ static struct sclp_buffer *sclp_ttybuf;
static struct timer_list sclp_tty_timer;
static struct tty_port sclp_port;
-static unsigned char sclp_tty_chars[SCLP_TTY_BUF_SIZE];
+static u8 sclp_tty_chars[SCLP_TTY_BUF_SIZE];
static unsigned short int sclp_tty_chars_count;
struct tty_driver *sclp_tty_driver;
@@ -168,7 +168,7 @@ sclp_tty_timeout(struct timer_list *unused)
/*
* Write a string to the sclp tty.
*/
-static int sclp_tty_write_string(const unsigned char *str, int count, int may_fail)
+static int sclp_tty_write_string(const u8 *str, int count, int may_fail)
{
unsigned long flags;
void *page;
@@ -229,8 +229,8 @@ out:
* tty device. The characters may come from user space or kernel space. This
* routine will return the number of characters actually accepted for writing.
*/
-static int
-sclp_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
+static ssize_t
+sclp_tty_write(struct tty_struct *tty, const u8 *buf, size_t count)
{
if (sclp_tty_chars_count > 0) {
sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count, 0);
@@ -250,7 +250,7 @@ sclp_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
* sclp_write() without final '\n' - will be written.
*/
static int
-sclp_tty_put_char(struct tty_struct *tty, unsigned char ch)
+sclp_tty_put_char(struct tty_struct *tty, u8 ch)
{
sclp_tty_chars[sclp_tty_chars_count++] = ch;
if (ch == '\n' || sclp_tty_chars_count >= SCLP_TTY_BUF_SIZE) {
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index a32f34a1c6d2..218ae604f737 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -462,8 +462,8 @@ out:
* user space or kernel space. This routine will return the
* number of characters actually accepted for writing.
*/
-static int
-sclp_vt220_write(struct tty_struct *tty, const unsigned char *buf, int count)
+static ssize_t
+sclp_vt220_write(struct tty_struct *tty, const u8 *buf, size_t count)
{
return __sclp_vt220_write(buf, count, 1, 0, 1);
}
@@ -579,7 +579,7 @@ sclp_vt220_close(struct tty_struct *tty, struct file *filp)
* done stuffing characters into the driver.
*/
static int
-sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch)
+sclp_vt220_put_char(struct tty_struct *tty, u8 ch)
{
return __sclp_vt220_write(&ch, 1, 0, 0, 1);
}
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c
index 34967e67249e..a108f2bf5b33 100644
--- a/drivers/s390/cio/airq.c
+++ b/drivers/s390/cio/airq.c
@@ -49,8 +49,6 @@ int register_adapter_interrupt(struct airq_struct *airq)
return -ENOMEM;
airq->flags |= AIRQ_PTR_ALLOCATED;
}
- if (!airq->lsi_mask)
- airq->lsi_mask = 0xff;
snprintf(dbf_txt, sizeof(dbf_txt), "rairq:%p", airq);
CIO_TRACE_EVENT(4, dbf_txt);
isc_register(airq->isc);
@@ -98,7 +96,7 @@ static irqreturn_t do_airq_interrupt(int irq, void *dummy)
head = &airq_lists[tpi_info->isc];
rcu_read_lock();
hlist_for_each_entry_rcu(airq, head, list)
- if ((*airq->lsi_ptr & airq->lsi_mask) != 0)
+ if (*airq->lsi_ptr != 0)
airq->handler(airq, tpi_info);
rcu_read_unlock();
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 4b23c9f7f3e5..ce04caa7913f 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -366,7 +366,6 @@ static int zcdn_create(const char *name)
{
dev_t devt;
int i, rc = 0;
- char nodename[ZCDN_MAX_NAME];
struct zcdn_device *zcdndev;
if (mutex_lock_interruptible(&ap_perms_mutex))
@@ -407,13 +406,11 @@ static int zcdn_create(const char *name)
zcdndev->device.devt = devt;
zcdndev->device.groups = zcdn_dev_attr_groups;
if (name[0])
- strncpy(nodename, name, sizeof(nodename));
+ rc = dev_set_name(&zcdndev->device, "%s", name);
else
- snprintf(nodename, sizeof(nodename),
- ZCRYPT_NAME "_%d", (int)MINOR(devt));
- nodename[sizeof(nodename) - 1] = '\0';
- if (dev_set_name(&zcdndev->device, nodename)) {
- rc = -EINVAL;
+ rc = dev_set_name(&zcdndev->device, ZCRYPT_NAME "_%d", (int)MINOR(devt));
+ if (rc) {
+ kfree(zcdndev);
goto unlockout;
}
rc = device_register(&zcdndev->device);
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index 02922768b129..ac67576301bf 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -250,7 +250,6 @@ static struct airq_info *new_airq_info(int index)
info->airq.handler = virtio_airq_handler;
info->summary_indicator_idx = index;
info->airq.lsi_ptr = get_summary_indicator(info);
- info->airq.lsi_mask = 0xff;
info->airq.isc = VIRTIO_AIRQ_ISC;
rc = register_adapter_interrupt(&info->airq);
if (rc) {
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 4962ce989113..695a57d894cd 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -836,7 +836,7 @@ config SCSI_IMM
config SCSI_IZIP_EPP16
bool "ppa/imm option - Use slow (but safe) EPP-16"
- depends on SCSI_PPA || SCSI_IMM
+ depends on SCSI_IMM
help
EPP (Enhanced Parallel Port) is a standard for parallel ports which
allows them to act as expansion buses that can handle up to 64
diff --git a/drivers/scsi/aic7xxx/aicasm/Makefile b/drivers/scsi/aic7xxx/aicasm/Makefile
index 243adb0a38d1..a3f2357a3f08 100644
--- a/drivers/scsi/aic7xxx/aicasm/Makefile
+++ b/drivers/scsi/aic7xxx/aicasm/Makefile
@@ -61,23 +61,11 @@ $(OUTDIR)/aicdb.h:
clean:
rm -f $(clean-files)
-# Create a dependency chain in generated files
-# to avoid concurrent invocations of the single
-# rule that builds them all.
-$(OUTDIR)/aicasm_gram.c: $(OUTDIR)/aicasm_gram.h
$(OUTDIR)/aicasm_gram.c $(OUTDIR)/aicasm_gram.h: aicasm_gram.y
- $(YACC) $(YFLAGS) -b $(<:.y=) $<
- mv $(<:.y=).tab.c $(OUTDIR)/$(<:.y=.c)
- mv $(<:.y=).tab.h $(OUTDIR)/$(<:.y=.h)
-
-# Create a dependency chain in generated files
-# to avoid concurrent invocations of the single
-# rule that builds them all.
-$(OUTDIR)/aicasm_macro_gram.c: $(OUTDIR)/aicasm_macro_gram.h
+ $(YACC) $(YFLAGS) -b $(<:.y=) $< -o $(OUTDIR)/$(<:.y=.c)
+
$(OUTDIR)/aicasm_macro_gram.c $(OUTDIR)/aicasm_macro_gram.h: aicasm_macro_gram.y
- $(YACC) $(YFLAGS) -b $(<:.y=) -p mm $<
- mv $(<:.y=).tab.c $(OUTDIR)/$(<:.y=.c)
- mv $(<:.y=).tab.h $(OUTDIR)/$(<:.y=.h)
+ $(YACC) $(YFLAGS) -b $(<:.y=) -p mm $< -o $(OUTDIR)/$(<:.y=.c)
$(OUTDIR)/aicasm_scan.c: aicasm_scan.l
$(LEX) $(LFLAGS) -o $@ $<
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
index 975fcfcc0d8f..2b44eb5702eb 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
@@ -52,6 +52,7 @@
#include <stdlib.h>
#include <string.h>
#include <sysexits.h>
+#include <ctype.h>
#include "aicasm_symbol.h"
#include "aicasm.h"
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c
index 3dd110143471..9dda296c0152 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.c
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.c
@@ -28,7 +28,7 @@ static int asd_get_user_sas_addr(struct asd_ha_struct *asd_ha)
if (asd_ha->hw_prof.sas_addr[0])
return 0;
- return sas_request_addr(asd_ha->sas_ha.core.shost,
+ return sas_request_addr(asd_ha->sas_ha.shost,
asd_ha->hw_prof.sas_addr);
}
@@ -72,10 +72,8 @@ static int asd_init_phy(struct asd_phy *phy)
struct asd_sas_phy *sas_phy = &phy->sas_phy;
sas_phy->enabled = 1;
- sas_phy->class = SAS;
sas_phy->iproto = SAS_PROTOCOL_ALL;
sas_phy->tproto = 0;
- sas_phy->type = PHY_TYPE_PHYSICAL;
sas_phy->role = PHY_ROLE_INITIATOR;
sas_phy->oob_mode = OOB_NOT_CONNECTED;
sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index f7f81f6c3fbf..8a3340d8d7ad 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -667,7 +667,6 @@ static int asd_register_sas_ha(struct asd_ha_struct *asd_ha)
}
asd_ha->sas_ha.sas_ha_name = (char *) asd_ha->name;
- asd_ha->sas_ha.lldd_module = THIS_MODULE;
asd_ha->sas_ha.sas_addr = &asd_ha->hw_prof.sas_addr[0];
for (i = 0; i < ASD_MAX_PHYS; i++) {
@@ -688,8 +687,8 @@ static int asd_unregister_sas_ha(struct asd_ha_struct *asd_ha)
err = sas_unregister_ha(&asd_ha->sas_ha);
- sas_remove_host(asd_ha->sas_ha.core.shost);
- scsi_host_put(asd_ha->sas_ha.core.shost);
+ sas_remove_host(asd_ha->sas_ha.shost);
+ scsi_host_put(asd_ha->sas_ha.shost);
kfree(asd_ha->sas_ha.sas_phy);
kfree(asd_ha->sas_ha.sas_port);
@@ -739,7 +738,7 @@ static int asd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
asd_printk("found %s, device %s\n", asd_ha->name, pci_name(dev));
SHOST_TO_SAS_HA(shost) = &asd_ha->sas_ha;
- asd_ha->sas_ha.core.shost = shost;
+ asd_ha->sas_ha.shost = shost;
shost->transportt = aic94xx_transport_template;
shost->max_id = ~0;
shost->max_lun = ~0;
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
index 7f0208300110..4bfd03724ad6 100644
--- a/drivers/scsi/aic94xx/aic94xx_task.c
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -388,14 +388,9 @@ static int asd_build_ata_ascb(struct asd_ascb *ascb, struct sas_task *task,
flags |= data_dir_flags[task->data_dir];
scb->ata_task.ata_flags = flags;
- scb->ata_task.retry_count = task->ata_task.retry_count;
+ scb->ata_task.retry_count = 0;
- flags = 0;
- if (task->ata_task.set_affil_pol)
- flags |= SET_AFFIL_POLICY;
- if (task->ata_task.stp_affil_pol)
- flags |= STP_AFFIL_POLICY;
- scb->ata_task.flags = flags;
+ scb->ata_task.flags = 0;
}
ascb->tasklet_complete = asd_task_tasklet_complete;
@@ -485,9 +480,6 @@ static int asd_build_ssp_ascb(struct asd_ascb *ascb, struct sas_task *task,
scb->ssp_task.ssp_frame.tptt = cpu_to_be16(0xFFFF);
memcpy(scb->ssp_task.ssp_cmd.lun, task->ssp_task.LUN, 8);
- if (task->ssp_task.enable_first_burst)
- scb->ssp_task.ssp_cmd.efb_prio_attr |= EFB_MASK;
- scb->ssp_task.ssp_cmd.efb_prio_attr |= (task->ssp_task.task_prio << 3);
scb->ssp_task.ssp_cmd.efb_prio_attr |= (task->ssp_task.task_attr & 7);
memcpy(scb->ssp_task.ssp_cmd.cdb, task->ssp_task.cmd->cmnd,
task->ssp_task.cmd->cmd_len);
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 2cd12c7f06c6..a66221c3b72f 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -1715,14 +1715,14 @@ static void arcmsr_shutdown(struct pci_dev *pdev)
arcmsr_flush_adapter_cache(acb);
}
-static int arcmsr_module_init(void)
+static int __init arcmsr_module_init(void)
{
int error = 0;
error = pci_register_driver(&arcmsr_pci_driver);
return error;
}
-static void arcmsr_module_exit(void)
+static void __exit arcmsr_module_exit(void)
{
pci_unregister_driver(&arcmsr_pci_driver);
}
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 8aeaddc93b16..8d374ae863ba 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -450,6 +450,10 @@ int beiscsi_iface_set_param(struct Scsi_Host *shost,
}
nla_for_each_attr(attrib, data, dt_len, rm_len) {
+ /* ignore nla_type as it is never used */
+ if (nla_len(attrib) < sizeof(*iface_param))
+ return -EINVAL;
+
iface_param = nla_data(attrib);
if (iface_param->param_type != ISCSI_NET_PARAM)
diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h
index a12d693065ce..1091aa428533 100644
--- a/drivers/scsi/bfa/bfa_fc.h
+++ b/drivers/scsi/bfa/bfa_fc.h
@@ -800,7 +800,7 @@ struct fc_rscn_pl_s {
u8 command;
u8 pagelen;
__be16 payldlen;
- struct fc_rscn_event_s event[1];
+ struct fc_rscn_event_s event[];
};
/*
diff --git a/drivers/scsi/bfa/bfa_fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c
index 773c84af784c..52303e8c716d 100644
--- a/drivers/scsi/bfa/bfa_fcbuild.c
+++ b/drivers/scsi/bfa/bfa_fcbuild.c
@@ -1051,7 +1051,7 @@ fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn,
rscn->event[0].format = FC_RSCN_FORMAT_PORTID;
rscn->event[0].portid = s_id;
- return sizeof(struct fc_rscn_pl_s);
+ return struct_size(rscn, event, 1);
}
u16
diff --git a/drivers/scsi/elx/libefc_sli/sli4.c b/drivers/scsi/elx/libefc_sli/sli4.c
index 8f96049f62dd..5e7fb110bc3f 100644
--- a/drivers/scsi/elx/libefc_sli/sli4.c
+++ b/drivers/scsi/elx/libefc_sli/sli4.c
@@ -2317,12 +2317,8 @@ sli_xmit_bls_rsp64_wqe(struct sli4 *sli, void *buf,
SLI4_GENERIC_CONTEXT_VPI << SLI4_BLS_RSP_WQE_CT_SHFT;
bls->context_tag = cpu_to_le16(params->vpi);
- if (params->s_id != U32_MAX)
- bls->local_n_port_id_dword |=
- cpu_to_le32(params->s_id & 0x00ffffff);
- else
- bls->local_n_port_id_dword |=
- cpu_to_le32(params->s_id & 0x00ffffff);
+ bls->local_n_port_id_dword |=
+ cpu_to_le32(params->s_id & 0x00ffffff);
dw_ridflags = (dw_ridflags & ~SLI4_BLS_RSP_RID) |
(params->d_id & SLI4_BLS_RSP_RID);
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 5c8d1ba3f8f3..19eee108db02 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -319,16 +319,17 @@ static void fcoe_ctlr_announce(struct fcoe_ctlr *fip)
{
struct fcoe_fcf *sel;
struct fcoe_fcf *fcf;
+ unsigned long flags;
mutex_lock(&fip->ctlr_mutex);
- spin_lock_bh(&fip->ctlr_lock);
+ spin_lock_irqsave(&fip->ctlr_lock, flags);
kfree_skb(fip->flogi_req);
fip->flogi_req = NULL;
list_for_each_entry(fcf, &fip->fcfs, list)
fcf->flogi_sent = 0;
- spin_unlock_bh(&fip->ctlr_lock);
+ spin_unlock_irqrestore(&fip->ctlr_lock, flags);
sel = fip->sel_fcf;
if (sel && ether_addr_equal(sel->fcf_mac, fip->dest_addr))
@@ -699,6 +700,7 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
{
struct fc_frame *fp;
struct fc_frame_header *fh;
+ unsigned long flags;
u16 old_xid;
u8 op;
u8 mac[ETH_ALEN];
@@ -732,11 +734,11 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
op = FIP_DT_FLOGI;
if (fip->mode == FIP_MODE_VN2VN)
break;
- spin_lock_bh(&fip->ctlr_lock);
+ spin_lock_irqsave(&fip->ctlr_lock, flags);
kfree_skb(fip->flogi_req);
fip->flogi_req = skb;
fip->flogi_req_send = 1;
- spin_unlock_bh(&fip->ctlr_lock);
+ spin_unlock_irqrestore(&fip->ctlr_lock, flags);
schedule_work(&fip->timer_work);
return -EINPROGRESS;
case ELS_FDISC:
@@ -1705,10 +1707,11 @@ static int fcoe_ctlr_flogi_send_locked(struct fcoe_ctlr *fip)
static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
{
struct fcoe_fcf *fcf;
+ unsigned long flags;
int error;
mutex_lock(&fip->ctlr_mutex);
- spin_lock_bh(&fip->ctlr_lock);
+ spin_lock_irqsave(&fip->ctlr_lock, flags);
LIBFCOE_FIP_DBG(fip, "re-sending FLOGI - reselect\n");
fcf = fcoe_ctlr_select(fip);
if (!fcf || fcf->flogi_sent) {
@@ -1719,7 +1722,7 @@ static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
fcoe_ctlr_solicit(fip, NULL);
error = fcoe_ctlr_flogi_send_locked(fip);
}
- spin_unlock_bh(&fip->ctlr_lock);
+ spin_unlock_irqrestore(&fip->ctlr_lock, flags);
mutex_unlock(&fip->ctlr_mutex);
return error;
}
@@ -1736,8 +1739,9 @@ static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip)
{
struct fcoe_fcf *fcf;
+ unsigned long flags;
- spin_lock_bh(&fip->ctlr_lock);
+ spin_lock_irqsave(&fip->ctlr_lock, flags);
fcf = fip->sel_fcf;
if (!fcf || !fip->flogi_req_send)
goto unlock;
@@ -1764,7 +1768,7 @@ static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip)
} else /* XXX */
LIBFCOE_FIP_DBG(fip, "No FCF selected - defer send\n");
unlock:
- spin_unlock_bh(&fip->ctlr_lock);
+ spin_unlock_irqrestore(&fip->ctlr_lock, flags);
}
/**
diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c
index d2eddad099a2..0420bfe9bd42 100644
--- a/drivers/scsi/gvp11.c
+++ b/drivers/scsi/gvp11.c
@@ -50,11 +50,6 @@ static irqreturn_t gvp11_intr(int irq, void *data)
static int gvp11_xfer_mask = 0;
-void gvp11_setup(char *str, int *ints)
-{
- gvp11_xfer_mask = ints[1];
-}
-
static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{
struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 8f22ece957bd..9472b9743aef 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -787,7 +787,7 @@ static int hisi_sas_init_device(struct domain_device *device)
* However we don't need to issue a hard reset here for these
* reasons:
* a. When probing the device, libsas/libata already issues a
- * hard reset in sas_probe_sata() -> ata_sas_async_probe().
+ * hard reset in sas_probe_sata() -> ata_port_probe().
* Note that in hisi_sas_debug_I_T_nexus_reset() we take care
* to issue a hard reset by checking the dev status (== INIT).
* b. When resetting the controller, this is simply unnecessary.
@@ -1018,10 +1018,8 @@ static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate();
sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
- sas_phy->class = SAS;
sas_phy->iproto = SAS_PROTOCOL_ALL;
sas_phy->tproto = 0;
- sas_phy->type = PHY_TYPE_PHYSICAL;
sas_phy->role = PHY_ROLE_INITIATOR;
sas_phy->oob_mode = OOB_NOT_CONNECTED;
sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
@@ -1065,23 +1063,18 @@ EXPORT_SYMBOL_GPL(hisi_sas_phy_enable);
static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
{
- struct sas_ha_struct *sas_ha = sas_phy->ha;
- struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
struct hisi_sas_phy *phy = sas_phy->lldd_phy;
struct asd_sas_port *sas_port = sas_phy->port;
struct hisi_sas_port *port;
- unsigned long flags;
if (!sas_port)
return;
port = to_hisi_sas_port(sas_port);
- spin_lock_irqsave(&hisi_hba->lock, flags);
port->port_attached = 1;
port->id = phy->port_id;
phy->port = port;
sas_port->lldd_port = port;
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
}
static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task,
@@ -2519,10 +2512,9 @@ int hisi_sas_probe(struct platform_device *pdev,
sha->sas_ha_name = DRV_NAME;
sha->dev = hisi_hba->dev;
- sha->lldd_module = THIS_MODULE;
sha->sas_addr = &hisi_hba->sas_addr[0];
sha->num_phys = hisi_hba->n_phy;
- sha->core.shost = hisi_hba->shost;
+ sha->shost = hisi_hba->shost;
for (i = 0; i < hisi_hba->n_phy; i++) {
sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
@@ -2564,12 +2556,12 @@ void hisi_sas_remove(struct platform_device *pdev)
{
struct sas_ha_struct *sha = platform_get_drvdata(pdev);
struct hisi_hba *hisi_hba = sha->lldd_ha;
- struct Scsi_Host *shost = sha->core.shost;
+ struct Scsi_Host *shost = sha->shost;
del_timer_sync(&hisi_hba->timer);
sas_unregister_ha(sha);
- sas_remove_host(sha->core.shost);
+ sas_remove_host(shost);
hisi_sas_free(hisi_hba);
scsi_host_put(shost);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 94fbbceddc2e..3c555579f9a1 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -960,7 +960,7 @@ static void prep_ssp_v1_hw(struct hisi_hba *hisi_hba,
struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
struct sas_tmf_task *tmf = slot->tmf;
int has_data = 0, priority = !!tmf;
- u8 *buf_cmd, fburst = 0;
+ u8 *buf_cmd;
u32 dw1, dw2;
/* create header */
@@ -1018,16 +1018,11 @@ static void prep_ssp_v1_hw(struct hisi_hba *hisi_hba,
buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot) +
sizeof(struct ssp_frame_hdr);
- if (task->ssp_task.enable_first_burst) {
- fburst = (1 << 7);
- dw2 |= 1 << CMD_HDR_FIRST_BURST_OFF;
- }
hdr->dw2 = cpu_to_le32(dw2);
memcpy(buf_cmd, &task->ssp_task.LUN, 8);
if (!tmf) {
- buf_cmd[9] = fburst | task->ssp_task.task_attr |
- (task->ssp_task.task_prio << 3);
+ buf_cmd[9] = task->ssp_task.task_attr;
memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd,
task->ssp_task.cmd->cmd_len);
} else {
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 87d8e408ccd1..73b378837da7 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -1798,8 +1798,7 @@ static void prep_ssp_v2_hw(struct hisi_hba *hisi_hba,
memcpy(buf_cmd, &task->ssp_task.LUN, 8);
if (!tmf) {
- buf_cmd[9] = task->ssp_task.task_attr |
- (task->ssp_task.task_prio << 3);
+ buf_cmd[9] = task->ssp_task.task_attr;
memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd,
task->ssp_task.cmd->cmd_len);
} else {
@@ -2026,6 +2025,11 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
u16 dma_tx_err_type = le16_to_cpu(err_record->dma_tx_err_type);
u16 sipc_rx_err_type = le16_to_cpu(err_record->sipc_rx_err_type);
u32 dma_rx_err_type = le32_to_cpu(err_record->dma_rx_err_type);
+ struct hisi_sas_complete_v2_hdr *complete_queue =
+ hisi_hba->complete_hdr[slot->cmplt_queue];
+ struct hisi_sas_complete_v2_hdr *complete_hdr =
+ &complete_queue[slot->cmplt_queue_slot];
+ u32 dw0 = le32_to_cpu(complete_hdr->dw0);
int error = -1;
if (err_phase == 1) {
@@ -2310,7 +2314,8 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
break;
}
}
- hisi_sas_sata_done(task, slot);
+ if (dw0 & CMPLT_HDR_RSPNS_XFRD_MSK)
+ hisi_sas_sata_done(task, slot);
}
break;
default:
@@ -2443,7 +2448,8 @@ static void slot_complete_v2_hw(struct hisi_hba *hisi_hba,
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
{
ts->stat = SAS_SAM_STAT_GOOD;
- hisi_sas_sata_done(task, slot);
+ if (dw0 & CMPLT_HDR_RSPNS_XFRD_MSK)
+ hisi_sas_sata_done(task, slot);
break;
}
default:
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 20e1607c6282..bbb64ee6afd7 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -1326,7 +1326,7 @@ static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
memcpy(buf_cmd, &task->ssp_task.LUN, 8);
if (!tmf) {
- buf_cmd[9] = ssp_task->task_attr | (ssp_task->task_prio << 3);
+ buf_cmd[9] = ssp_task->task_attr;
memcpy(buf_cmd + 12, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
} else {
buf_cmd[10] = tmf->tmf;
@@ -2257,7 +2257,8 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
ts->stat = SAS_OPEN_REJECT;
ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
}
- hisi_sas_sata_done(task, slot);
+ if (dw0 & CMPLT_HDR_RSPNS_XFRD_MSK)
+ hisi_sas_sata_done(task, slot);
break;
case SAS_PROTOCOL_SMP:
ts->stat = SAS_SAM_STAT_CHECK_CONDITION;
@@ -2384,7 +2385,8 @@ static void slot_complete_v3_hw(struct hisi_hba *hisi_hba,
case SAS_PROTOCOL_STP:
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
ts->stat = SAS_SAM_STAT_GOOD;
- hisi_sas_sata_done(task, slot);
+ if (dw0 & CMPLT_HDR_RSPNS_XFRD_MSK)
+ hisi_sas_sata_done(task, slot);
break;
default:
ts->stat = SAS_SAM_STAT_CHECK_CONDITION;
@@ -3104,21 +3106,25 @@ static const struct hisi_sas_debugfs_reg debugfs_ras_reg = {
static void debugfs_snapshot_prepare_v3_hw(struct hisi_hba *hisi_hba)
{
- set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
-
- hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0);
+ struct Scsi_Host *shost = hisi_hba->shost;
+ scsi_block_requests(shost);
wait_cmds_complete_timeout_v3_hw(hisi_hba, 100, 5000);
+ set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
hisi_sas_sync_cqs(hisi_hba);
+ hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0);
}
static void debugfs_snapshot_restore_v3_hw(struct hisi_hba *hisi_hba)
{
+ struct Scsi_Host *shost = hisi_hba->shost;
+
hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE,
(u32)((1ULL << hisi_hba->queue_count) - 1));
clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
+ scsi_unblock_requests(shost);
}
static void read_iost_itct_cache_v3_hw(struct hisi_hba *hisi_hba,
@@ -4576,7 +4582,7 @@ static int debugfs_fifo_data_v3_hw_show(struct seq_file *s, void *p)
debugfs_read_fifo_data_v3_hw(phy);
debugfs_show_row_32_v3_hw(s, 0, HISI_SAS_FIFO_DATA_DW_SIZE * 4,
- phy->fifo.rd_data);
+ (__le32 *)phy->fifo.rd_data);
return 0;
}
@@ -4950,7 +4956,7 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
sha->sas_phy = arr_phy;
sha->sas_port = arr_port;
- sha->core.shost = shost;
+ sha->shost = shost;
sha->lldd_ha = hisi_hba;
shost->transportt = hisi_sas_stt;
@@ -4967,7 +4973,6 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
sha->sas_ha_name = DRV_NAME;
sha->dev = dev;
- sha->lldd_module = THIS_MODULE;
sha->sas_addr = &hisi_hba->sas_addr[0];
sha->num_phys = hisi_hba->n_phy;
@@ -5055,14 +5060,14 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
struct device *dev = &pdev->dev;
struct sas_ha_struct *sha = dev_get_drvdata(dev);
struct hisi_hba *hisi_hba = sha->lldd_ha;
- struct Scsi_Host *shost = sha->core.shost;
+ struct Scsi_Host *shost = sha->shost;
pm_runtime_get_noresume(dev);
del_timer_sync(&hisi_hba->timer);
sas_unregister_ha(sha);
flush_workqueue(hisi_hba->wq);
- sas_remove_host(sha->core.shost);
+ sas_remove_host(shost);
hisi_sas_v3_destroy_irqs(pdev, hisi_hba);
hisi_sas_free(hisi_hba);
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 198edf03f929..d7f51b84f3c7 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -537,7 +537,7 @@ EXPORT_SYMBOL(scsi_host_alloc);
static int __scsi_host_match(struct device *dev, const void *data)
{
struct Scsi_Host *p;
- const unsigned short *hostnum = data;
+ const unsigned int *hostnum = data;
p = class_to_shost(dev);
return p->host_no == *hostnum;
@@ -554,7 +554,7 @@ static int __scsi_host_match(struct device *dev, const void *data)
* that scsi_host_get() took. The put_device() below dropped
* the reference from class_find_device().
**/
-struct Scsi_Host *scsi_host_lookup(unsigned short hostnum)
+struct Scsi_Host *scsi_host_lookup(unsigned int hostnum)
{
struct device *cdev;
struct Scsi_Host *shost = NULL;
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index 6bc3f022630a..52388374cf31 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -306,7 +306,7 @@ static inline struct isci_pci_info *to_pci_info(struct pci_dev *pdev)
static inline struct Scsi_Host *to_shost(struct isci_host *ihost)
{
- return ihost->sas_ha.core.shost;
+ return ihost->sas_ha.shost;
}
#define for_each_isci_host(id, ihost, pdev) \
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index ac1e04b86d8f..6277162a028b 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -250,7 +250,6 @@ static int isci_register_sas_ha(struct isci_host *isci_host)
return -ENOMEM;
sas_ha->sas_ha_name = DRV_NAME;
- sas_ha->lldd_module = THIS_MODULE;
sas_ha->sas_addr = &isci_host->phys[0].sas_addr[0];
for (i = 0; i < SCI_MAX_PHYS; i++) {
@@ -264,9 +263,7 @@ static int isci_register_sas_ha(struct isci_host *isci_host)
sas_ha->strict_wide_ports = 1;
- sas_register_ha(sas_ha);
-
- return 0;
+ return sas_register_ha(sas_ha);
}
static void isci_unregister(struct isci_host *isci_host)
@@ -575,7 +572,7 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
goto err_shost;
SHOST_TO_SAS_HA(shost) = &ihost->sas_ha;
- ihost->sas_ha.core.shost = shost;
+ ihost->sas_ha.shost = shost;
shost->transportt = isci_transport_template;
shost->max_id = ~0;
@@ -730,7 +727,7 @@ static int isci_resume(struct device *dev)
sas_prep_resume_ha(&ihost->sas_ha);
isci_host_init(ihost);
- isci_host_start(ihost->sas_ha.core.shost);
+ isci_host_start(ihost->sas_ha.shost);
wait_for_start(ihost);
sas_resume_ha(&ihost->sas_ha);
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
index aa8787343e83..743a3c64b0da 100644
--- a/drivers/scsi/isci/phy.c
+++ b/drivers/scsi/isci/phy.c
@@ -1404,10 +1404,8 @@ void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index)
iphy->sas_phy.ha = &ihost->sas_ha;
iphy->sas_phy.lldd_phy = iphy;
iphy->sas_phy.enabled = 1;
- iphy->sas_phy.class = SAS;
iphy->sas_phy.iproto = SAS_PROTOCOL_ALL;
iphy->sas_phy.tproto = 0;
- iphy->sas_phy.type = PHY_TYPE_PHYSICAL;
iphy->sas_phy.role = PHY_ROLE_INITIATOR;
iphy->sas_phy.oob_mode = OOB_NOT_CONNECTED;
iphy->sas_phy.linkrate = SAS_LINK_RATE_UNKNOWN;
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 6370cdbfba08..a7b3243b471d 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -180,7 +180,7 @@ static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq)
cmd_iu->_r_a = 0;
cmd_iu->_r_b = 0;
cmd_iu->en_fburst = 0; /* unsupported */
- cmd_iu->task_prio = task->ssp_task.task_prio;
+ cmd_iu->task_prio = 0;
cmd_iu->task_attr = task->ssp_task.task_attr;
cmd_iu->_r_c = 0;
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 77714a495cbb..12e2653846e3 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -162,7 +162,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
struct ata_port *ap = qc->ap;
struct domain_device *dev = ap->private_data;
struct sas_ha_struct *sas_ha = dev->port->ha;
- struct Scsi_Host *host = sas_ha->core.shost;
+ struct Scsi_Host *host = sas_ha->shost;
struct sas_internal *i = to_sas_internal(host->transportt);
/* TODO: we should try to remove that unlock */
@@ -201,12 +201,14 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
task->data_dir = qc->dma_dir;
}
task->scatter = qc->sg;
- task->ata_task.retry_count = 1;
qc->lldd_task = task;
task->ata_task.use_ncq = ata_is_ncq(qc->tf.protocol);
task->ata_task.dma_xfer = ata_is_dma(qc->tf.protocol);
+ if (qc->flags & ATA_QCFLAG_RESULT_TF)
+ task->ata_task.return_fis_on_success = 1;
+
if (qc->scsicmd)
ASSIGN_SAS_TASK(qc->scsicmd, task);
@@ -235,7 +237,7 @@ static void sas_ata_qc_fill_rtf(struct ata_queued_cmd *qc)
static struct sas_internal *dev_to_sas_internal(struct domain_device *dev)
{
- return to_sas_internal(dev->port->ha->core.shost->transportt);
+ return to_sas_internal(dev->port->ha->shost->transportt);
}
static int sas_get_ata_command_set(struct domain_device *dev)
@@ -565,8 +567,6 @@ static struct ata_port_operations sas_sata_ops = {
.qc_prep = ata_noop_qc_prep,
.qc_issue = sas_ata_qc_issue,
.qc_fill_rtf = sas_ata_qc_fill_rtf,
- .port_start = ata_sas_port_start,
- .port_stop = ata_sas_port_stop,
.set_dmamode = sas_ata_set_dmamode,
.sched_eh = sas_ata_sched_eh,
.end_eh = sas_ata_end_eh,
@@ -584,7 +584,7 @@ static struct ata_port_info sata_port_info = {
int sas_ata_init(struct domain_device *found_dev)
{
struct sas_ha_struct *ha = found_dev->port->ha;
- struct Scsi_Host *shost = ha->core.shost;
+ struct Scsi_Host *shost = ha->shost;
struct ata_host *ata_host;
struct ata_port *ap;
int rc;
@@ -607,9 +607,6 @@ int sas_ata_init(struct domain_device *found_dev)
ap->private_data = found_dev;
ap->cbl = ATA_CBL_SATA;
ap->scsi_host = shost;
- rc = ata_sas_port_init(ap);
- if (rc)
- goto destroy_port;
rc = ata_sas_tport_add(ata_host->dev, ap);
if (rc)
@@ -621,7 +618,7 @@ int sas_ata_init(struct domain_device *found_dev)
return 0;
destroy_port:
- ata_sas_port_destroy(ap);
+ kfree(ap);
free_host:
ata_host_put(ata_host);
return rc;
@@ -655,7 +652,7 @@ void sas_probe_sata(struct asd_sas_port *port)
if (!dev_is_sata(dev))
continue;
- ata_sas_async_probe(dev->sata_dev.ap);
+ ata_port_probe(dev->sata_dev.ap);
}
mutex_unlock(&port->ha->disco_mutex);
@@ -822,7 +819,7 @@ static void async_sas_ata_eh(void *data, async_cookie_t cookie)
struct sas_ha_struct *ha = dev->port->ha;
sas_ata_printk(KERN_DEBUG, dev, "dev error handler\n");
- ata_scsi_port_error_handler(ha->core.shost, ap);
+ ata_scsi_port_error_handler(ha->shost, ap);
sas_put_device(dev);
}
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 8c6afe724944..ff7b63b10aeb 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -170,7 +170,7 @@ int sas_notify_lldd_dev_found(struct domain_device *dev)
{
int res = 0;
struct sas_ha_struct *sas_ha = dev->port->ha;
- struct Scsi_Host *shost = sas_ha->core.shost;
+ struct Scsi_Host *shost = sas_ha->shost;
struct sas_internal *i = to_sas_internal(shost->transportt);
if (!i->dft->lldd_dev_found)
@@ -192,7 +192,7 @@ int sas_notify_lldd_dev_found(struct domain_device *dev)
void sas_notify_lldd_dev_gone(struct domain_device *dev)
{
struct sas_ha_struct *sas_ha = dev->port->ha;
- struct Scsi_Host *shost = sas_ha->core.shost;
+ struct Scsi_Host *shost = sas_ha->shost;
struct sas_internal *i = to_sas_internal(shost->transportt);
if (!i->dft->lldd_dev_gone)
@@ -234,7 +234,7 @@ static void sas_suspend_devices(struct work_struct *work)
struct domain_device *dev;
struct sas_discovery_event *ev = to_sas_discovery_event(work);
struct asd_sas_port *port = ev->port;
- struct Scsi_Host *shost = port->ha->core.shost;
+ struct Scsi_Host *shost = port->ha->shost;
struct sas_internal *si = to_sas_internal(shost->transportt);
clear_bit(DISCE_SUSPEND, &port->disc.pending);
@@ -301,7 +301,7 @@ void sas_free_device(struct kref *kref)
if (dev_is_sata(dev) && dev->sata_dev.ap) {
ata_sas_tport_delete(dev->sata_dev.ap);
- ata_sas_port_destroy(dev->sata_dev.ap);
+ kfree(dev->sata_dev.ap);
ata_host_put(dev->sata_dev.ata_host);
dev->sata_dev.ata_host = NULL;
dev->sata_dev.ap = NULL;
@@ -373,7 +373,7 @@ static bool sas_abort_cmd(struct request *req, void *data)
static void sas_abort_device_scsi_cmds(struct domain_device *dev)
{
struct sas_ha_struct *sas_ha = dev->port->ha;
- struct Scsi_Host *shost = sas_ha->core.shost;
+ struct Scsi_Host *shost = sas_ha->shost;
if (dev_is_expander(dev->dev_type))
return;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index adcac57aaee6..a2204674b680 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -37,7 +37,7 @@ static int smp_execute_task_sg(struct domain_device *dev,
int res, retry;
struct sas_task *task = NULL;
struct sas_internal *i =
- to_sas_internal(dev->port->ha->core.shost->transportt);
+ to_sas_internal(dev->port->ha->shost->transportt);
struct sas_ha_struct *ha = dev->port->ha;
pm_runtime_get_sync(ha->dev);
diff --git a/drivers/scsi/libsas/sas_host_smp.c b/drivers/scsi/libsas/sas_host_smp.c
index 32cdc969b736..2ecb8535634c 100644
--- a/drivers/scsi/libsas/sas_host_smp.c
+++ b/drivers/scsi/libsas/sas_host_smp.c
@@ -114,7 +114,7 @@ static int sas_host_smp_write_gpio(struct sas_ha_struct *sas_ha, u8 *resp_data,
u8 reg_type, u8 reg_index, u8 reg_count,
u8 *req_data)
{
- struct sas_internal *i = to_sas_internal(sas_ha->core.shost->transportt);
+ struct sas_internal *i = to_sas_internal(sas_ha->shost->transportt);
int written;
if (i->dft->lldd_write_gpio == NULL) {
@@ -182,7 +182,7 @@ static void sas_phy_control(struct sas_ha_struct *sas_ha, u8 phy_id,
enum sas_linkrate max, u8 *resp_data)
{
struct sas_internal *i =
- to_sas_internal(sas_ha->core.shost->transportt);
+ to_sas_internal(sas_ha->shost->transportt);
struct sas_phy_linkrates rates;
struct asd_sas_phy *asd_phy;
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index f2c05ebeb72f..8586dc79f2a0 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -183,7 +183,7 @@ static int sas_get_linkerrors(struct sas_phy *phy)
struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
struct sas_internal *i =
- to_sas_internal(sas_ha->core.shost->transportt);
+ to_sas_internal(sas_ha->shost->transportt);
return i->dft->lldd_control_phy(asd_phy, PHY_FUNC_GET_EVENTS, NULL);
}
@@ -232,7 +232,7 @@ static int transport_sas_phy_reset(struct sas_phy *phy, int hard_reset)
struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
struct sas_internal *i =
- to_sas_internal(sas_ha->core.shost->transportt);
+ to_sas_internal(sas_ha->shost->transportt);
if (!hard_reset && sas_try_ata_reset(asd_phy) == 0)
return 0;
@@ -266,7 +266,7 @@ int sas_phy_enable(struct sas_phy *phy, int enable)
struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
struct sas_internal *i =
- to_sas_internal(sas_ha->core.shost->transportt);
+ to_sas_internal(sas_ha->shost->transportt);
if (enable)
ret = transport_sas_phy_reset(phy, 0);
@@ -303,7 +303,7 @@ int sas_phy_reset(struct sas_phy *phy, int hard_reset)
struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
struct sas_internal *i =
- to_sas_internal(sas_ha->core.shost->transportt);
+ to_sas_internal(sas_ha->shost->transportt);
ret = i->dft->lldd_control_phy(asd_phy, reset_type, NULL);
} else {
@@ -339,7 +339,7 @@ int sas_set_phy_speed(struct sas_phy *phy,
struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
struct sas_internal *i =
- to_sas_internal(sas_ha->core.shost->transportt);
+ to_sas_internal(sas_ha->shost->transportt);
ret = i->dft->lldd_control_phy(asd_phy, PHY_FUNC_SET_LINK_RATE,
rates);
@@ -438,7 +438,7 @@ static void _sas_resume_ha(struct sas_ha_struct *ha, bool drain)
/* all phys are back up or timed out, turn on i/o so we can
* flush out disks that did not return
*/
- scsi_unblock_requests(ha->core.shost);
+ scsi_unblock_requests(ha->shost);
if (drain)
sas_drain_work(ha);
clear_bit(SAS_HA_RESUMING, &ha->state);
@@ -468,7 +468,7 @@ void sas_suspend_ha(struct sas_ha_struct *ha)
int i;
sas_disable_events(ha);
- scsi_block_requests(ha->core.shost);
+ scsi_block_requests(ha->shost);
for (i = 0; i < ha->num_phys; i++) {
struct asd_sas_port *port = ha->sas_port[i];
@@ -641,7 +641,7 @@ struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy,
struct asd_sas_event *event;
struct sas_ha_struct *sas_ha = phy->ha;
struct sas_internal *i =
- to_sas_internal(sas_ha->core.shost->transportt);
+ to_sas_internal(sas_ha->shost->transportt);
event = kmem_cache_zalloc(sas_event_cache, gfp_flags);
if (!event)
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 6f593fa69b58..a6dc7dc07fce 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -41,13 +41,7 @@ struct sas_phy_data {
void sas_scsi_recover_host(struct Scsi_Host *shost);
-int sas_show_class(enum sas_class class, char *buf);
-int sas_show_proto(enum sas_protocol proto, char *buf);
-int sas_show_linkrate(enum sas_linkrate linkrate, char *buf);
-int sas_show_oob_mode(enum sas_oob_mode oob_mode, char *buf);
-
int sas_register_phys(struct sas_ha_struct *sas_ha);
-void sas_unregister_phys(struct sas_ha_struct *sas_ha);
struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy, gfp_t gfp_flags);
void sas_free_event(struct asd_sas_event *event);
@@ -91,7 +85,6 @@ int sas_get_report_phy_sata(struct domain_device *dev, int phy_id,
int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id,
u8 *sas_addr, enum sas_device_type *type);
int sas_try_ata_reset(struct asd_sas_phy *phy);
-void sas_hae_reset(struct work_struct *work);
void sas_free_device(struct kref *kref);
void sas_destruct_devices(struct asd_sas_port *port);
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
index a0d592d11dfb..57494ac97076 100644
--- a/drivers/scsi/libsas/sas_phy.c
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -38,7 +38,7 @@ static void sas_phye_oob_error(struct work_struct *work)
struct sas_ha_struct *sas_ha = phy->ha;
struct asd_sas_port *port = phy->port;
struct sas_internal *i =
- to_sas_internal(sas_ha->core.shost->transportt);
+ to_sas_internal(sas_ha->shost->transportt);
sas_deform_port(phy, 1);
@@ -66,7 +66,7 @@ static void sas_phye_spinup_hold(struct work_struct *work)
struct asd_sas_phy *phy = ev->phy;
struct sas_ha_struct *sas_ha = phy->ha;
struct sas_internal *i =
- to_sas_internal(sas_ha->core.shost->transportt);
+ to_sas_internal(sas_ha->shost->transportt);
phy->error = 0;
i->dft->lldd_control_phy(phy, PHY_FUNC_RELEASE_SPINUP_HOLD, NULL);
@@ -95,7 +95,7 @@ static void sas_phye_shutdown(struct work_struct *work)
struct asd_sas_phy *phy = ev->phy;
struct sas_ha_struct *sas_ha = phy->ha;
struct sas_internal *i =
- to_sas_internal(sas_ha->core.shost->transportt);
+ to_sas_internal(sas_ha->shost->transportt);
if (phy->enabled) {
int ret;
@@ -131,7 +131,7 @@ int sas_register_phys(struct sas_ha_struct *sas_ha)
spin_lock_init(&phy->sas_prim_lock);
phy->frame_rcvd_size = 0;
- phy->phy = sas_phy_alloc(&sas_ha->core.shost->shost_gendev, i);
+ phy->phy = sas_phy_alloc(&sas_ha->shost->shost_gendev, i);
if (!phy->phy)
return -ENOMEM;
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index 11599c0e3fc3..e3f2ed913419 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -28,7 +28,7 @@ static void sas_resume_port(struct asd_sas_phy *phy)
struct domain_device *dev, *n;
struct asd_sas_port *port = phy->port;
struct sas_ha_struct *sas_ha = phy->ha;
- struct sas_internal *si = to_sas_internal(sas_ha->core.shost->transportt);
+ struct sas_internal *si = to_sas_internal(sas_ha->shost->transportt);
if (si->dft->lldd_port_formed)
si->dft->lldd_port_formed(phy);
@@ -83,7 +83,6 @@ static void sas_form_port_add_phy(struct asd_sas_port *port,
memcpy(port->sas_addr, phy->sas_addr, SAS_ADDR_SIZE);
if (*(u64 *)port->attached_sas_addr == 0) {
- port->class = phy->class;
memcpy(port->attached_sas_addr, phy->attached_sas_addr,
SAS_ADDR_SIZE);
port->iproto = phy->iproto;
@@ -109,7 +108,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
struct asd_sas_port *port = phy->port;
struct domain_device *port_dev = NULL;
struct sas_internal *si =
- to_sas_internal(sas_ha->core.shost->transportt);
+ to_sas_internal(sas_ha->shost->transportt);
unsigned long flags;
if (port) {
@@ -212,7 +211,7 @@ void sas_deform_port(struct asd_sas_phy *phy, int gone)
struct sas_ha_struct *sas_ha = phy->ha;
struct asd_sas_port *port = phy->port;
struct sas_internal *si =
- to_sas_internal(sas_ha->core.shost->transportt);
+ to_sas_internal(sas_ha->shost->transportt);
struct domain_device *dev;
unsigned long flags;
@@ -249,7 +248,6 @@ void sas_deform_port(struct asd_sas_phy *phy, int gone)
INIT_LIST_HEAD(&port->phy_list);
memset(port->sas_addr, 0, SAS_ADDR_SIZE);
memset(port->attached_sas_addr, 0, SAS_ADDR_SIZE);
- port->class = 0;
port->iproto = 0;
port->tproto = 0;
port->oob_mode = 0;
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 94c5f14f3c16..0c103f4523b8 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -142,7 +142,6 @@ static struct sas_task *sas_create_task(struct scsi_cmnd *cmd,
task->dev = dev;
task->task_proto = task->dev->tproto; /* BUG_ON(!SSP) */
- task->ssp_task.retry_count = 1;
int_to_scsilun(cmd->device->lun, &lun);
memcpy(task->ssp_task.LUN, &lun.scsi_lun, 8);
task->ssp_task.task_attr = TASK_ATTR_SIMPLE;
@@ -279,7 +278,7 @@ static enum task_disposition sas_scsi_find_task(struct sas_task *task)
unsigned long flags;
int i, res;
struct sas_internal *si =
- to_sas_internal(task->dev->port->ha->core.shost->transportt);
+ to_sas_internal(task->dev->port->ha->shost->transportt);
for (i = 0; i < 5; i++) {
pr_notice("%s: aborting task 0x%p\n", __func__, task);
@@ -327,7 +326,7 @@ static int sas_recover_lu(struct domain_device *dev, struct scsi_cmnd *cmd)
int res = TMF_RESP_FUNC_FAILED;
struct scsi_lun lun;
struct sas_internal *i =
- to_sas_internal(dev->port->ha->core.shost->transportt);
+ to_sas_internal(dev->port->ha->shost->transportt);
int_to_scsilun(cmd->device->lun, &lun);
@@ -355,7 +354,7 @@ static int sas_recover_I_T(struct domain_device *dev)
{
int res = TMF_RESP_FUNC_FAILED;
struct sas_internal *i =
- to_sas_internal(dev->port->ha->core.shost->transportt);
+ to_sas_internal(dev->port->ha->shost->transportt);
pr_notice("I_T nexus reset for dev %016llx\n",
SAS_ADDR(dev->sas_addr));
@@ -410,7 +409,7 @@ static void sas_wait_eh(struct domain_device *dev)
spin_unlock_irq(&ha->lock);
/* make sure SCSI EH is complete */
- if (scsi_host_in_recovery(ha->core.shost)) {
+ if (scsi_host_in_recovery(ha->shost)) {
msleep(10);
goto retry;
}
@@ -440,7 +439,7 @@ static int sas_queue_reset(struct domain_device *dev, int reset_type,
set_bit(SAS_DEV_EH_PENDING, &dev->state);
set_bit(reset_type, &dev->state);
int_to_scsilun(lun, &dev->ssp_dev.reset_lun);
- scsi_schedule_eh(ha->core.shost);
+ scsi_schedule_eh(ha->shost);
}
spin_unlock_irq(&ha->lock);
@@ -925,7 +924,7 @@ static int sas_execute_internal_abort(struct domain_device *device,
unsigned int qid, void *data)
{
struct sas_ha_struct *ha = device->port->ha;
- struct sas_internal *i = to_sas_internal(ha->core.shost->transportt);
+ struct sas_internal *i = to_sas_internal(ha->shost->transportt);
struct sas_task *task = NULL;
int res, retry;
@@ -1015,7 +1014,7 @@ int sas_execute_tmf(struct domain_device *device, void *parameter,
{
struct sas_task *task;
struct sas_internal *i =
- to_sas_internal(device->port->ha->core.shost->transportt);
+ to_sas_internal(device->port->ha->shost->transportt);
int res, retry;
for (retry = 0; retry < TASK_RETRY; retry++) {
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 9a8963684369..af15f7a22d25 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -309,7 +309,6 @@ struct lpfc_hba;
#define LPFC_VMID_TIMER 300 /* timer interval in seconds */
#define LPFC_MAX_VMID_SIZE 256
-#define LPFC_COMPRESS_VMID_SIZE 16
union lpfc_vmid_io_tag {
u32 app_id; /* App Id vmid */
@@ -667,7 +666,7 @@ struct lpfc_vport {
uint32_t cfg_first_burst_size;
uint32_t dev_loss_tmo_changed;
/* VMID parameters */
- u8 lpfc_vmid_host_uuid[LPFC_COMPRESS_VMID_SIZE];
+ u8 lpfc_vmid_host_uuid[16];
u32 max_vmid; /* maximum VMIDs allowed per port */
u32 cur_vmid_cnt; /* Current VMID count */
#define LPFC_MIN_VMID 4
@@ -872,6 +871,7 @@ enum lpfc_irq_chann_mode {
enum lpfc_hba_bit_flags {
FABRIC_COMANDS_BLOCKED,
HBA_PCI_ERR,
+ MBX_TMO_ERR,
};
struct lpfc_hba {
@@ -1709,6 +1709,25 @@ lpfc_next_online_cpu(const struct cpumask *mask, unsigned int start)
return cpu_it;
}
/**
+ * lpfc_next_present_cpu - Finds next present CPU after n
+ * @n: the cpu prior to search
+ *
+ * Note: If no next present cpu, then fallback to first present cpu.
+ *
+ **/
+static inline unsigned int lpfc_next_present_cpu(int n)
+{
+ unsigned int cpu;
+
+ cpu = cpumask_next(n, cpu_present_mask);
+
+ if (cpu >= nr_cpu_ids)
+ cpu = cpumask_first(cpu_present_mask);
+
+ return cpu;
+}
+
+/**
* lpfc_sli4_mod_hba_eq_delay - update EQ delay
* @phba: Pointer to HBA context object.
* @q: The Event Queue to update.
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 21c7ecd3ede5..b1c9107d3408 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -2127,11 +2127,12 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
uint32_t *mrpi, uint32_t *arpi,
uint32_t *mvpi, uint32_t *avpi)
{
- struct lpfc_mbx_read_config *rd_config;
LPFC_MBOXQ_t *pmboxq;
MAILBOX_t *pmb;
int rc = 0;
- uint32_t max_vpi;
+ struct lpfc_sli4_hba *sli4_hba;
+ struct lpfc_max_cfg_param *max_cfg_param;
+ u16 rsrc_ext_cnt, rsrc_ext_size, max_vpi;
/*
* prevent udev from issuing mailbox commands until the port is
@@ -2167,31 +2168,65 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
}
if (phba->sli_rev == LPFC_SLI_REV4) {
- rd_config = &pmboxq->u.mqe.un.rd_config;
- if (mrpi)
- *mrpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
- if (arpi)
- *arpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config) -
- phba->sli4_hba.max_cfg_param.rpi_used;
- if (mxri)
- *mxri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
- if (axri)
- *axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) -
- phba->sli4_hba.max_cfg_param.xri_used;
+ sli4_hba = &phba->sli4_hba;
+ max_cfg_param = &sli4_hba->max_cfg_param;
+
+ /* Normally, extents are not used */
+ if (!phba->sli4_hba.extents_in_use) {
+ if (mrpi)
+ *mrpi = max_cfg_param->max_rpi;
+ if (mxri)
+ *mxri = max_cfg_param->max_xri;
+ if (mvpi) {
+ max_vpi = max_cfg_param->max_vpi;
+
+ /* Limit the max we support */
+ if (max_vpi > LPFC_MAX_VPI)
+ max_vpi = LPFC_MAX_VPI;
+ *mvpi = max_vpi;
+ }
+ } else { /* Extents in use */
+ if (mrpi) {
+ if (lpfc_sli4_get_avail_extnt_rsrc(phba,
+ LPFC_RSC_TYPE_FCOE_RPI,
+ &rsrc_ext_cnt,
+ &rsrc_ext_size)) {
+ rc = 0;
+ goto free_pmboxq;
+ }
+
+ *mrpi = rsrc_ext_cnt * rsrc_ext_size;
+ }
- /* Account for differences with SLI-3. Get vpi count from
- * mailbox data and subtract one for max vpi value.
- */
- max_vpi = (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) > 0) ?
- (bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) - 1) : 0;
+ if (mxri) {
+ if (lpfc_sli4_get_avail_extnt_rsrc(phba,
+ LPFC_RSC_TYPE_FCOE_XRI,
+ &rsrc_ext_cnt,
+ &rsrc_ext_size)) {
+ rc = 0;
+ goto free_pmboxq;
+ }
- /* Limit the max we support */
- if (max_vpi > LPFC_MAX_VPI)
- max_vpi = LPFC_MAX_VPI;
- if (mvpi)
- *mvpi = max_vpi;
- if (avpi)
- *avpi = max_vpi - phba->sli4_hba.max_cfg_param.vpi_used;
+ *mxri = rsrc_ext_cnt * rsrc_ext_size;
+ }
+
+ if (mvpi) {
+ if (lpfc_sli4_get_avail_extnt_rsrc(phba,
+ LPFC_RSC_TYPE_FCOE_VPI,
+ &rsrc_ext_cnt,
+ &rsrc_ext_size)) {
+ rc = 0;
+ goto free_pmboxq;
+ }
+
+ max_vpi = rsrc_ext_cnt * rsrc_ext_size;
+
+ /* Limit the max we support */
+ if (max_vpi > LPFC_MAX_VPI)
+ max_vpi = LPFC_MAX_VPI;
+ *mvpi = max_vpi;
+ }
+ }
} else {
if (mrpi)
*mrpi = pmb->un.varRdConfig.max_rpi;
@@ -2212,8 +2247,12 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
}
}
+ /* Success */
+ rc = 1;
+
+free_pmboxq:
mempool_free(pmboxq, phba->mbox_mem_pool);
- return 1;
+ return rc;
}
/**
@@ -2265,10 +2304,19 @@ lpfc_used_rpi_show(struct device *dev, struct device_attribute *attr,
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- uint32_t cnt, acnt;
+ struct lpfc_sli4_hba *sli4_hba;
+ struct lpfc_max_cfg_param *max_cfg_param;
+ u32 cnt = 0, acnt = 0;
- if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, &acnt, NULL, NULL))
- return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ sli4_hba = &phba->sli4_hba;
+ max_cfg_param = &sli4_hba->max_cfg_param;
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
+ max_cfg_param->rpi_used);
+ } else {
+ if (lpfc_get_hba_info(phba, NULL, NULL, &cnt, &acnt, NULL, NULL))
+ return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+ }
return scnprintf(buf, PAGE_SIZE, "Unknown\n");
}
@@ -2321,10 +2369,19 @@ lpfc_used_xri_show(struct device *dev, struct device_attribute *attr,
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- uint32_t cnt, acnt;
+ struct lpfc_sli4_hba *sli4_hba;
+ struct lpfc_max_cfg_param *max_cfg_param;
+ u32 cnt = 0, acnt = 0;
- if (lpfc_get_hba_info(phba, &cnt, &acnt, NULL, NULL, NULL, NULL))
- return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ sli4_hba = &phba->sli4_hba;
+ max_cfg_param = &sli4_hba->max_cfg_param;
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
+ max_cfg_param->xri_used);
+ } else {
+ if (lpfc_get_hba_info(phba, &cnt, &acnt, NULL, NULL, NULL, NULL))
+ return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+ }
return scnprintf(buf, PAGE_SIZE, "Unknown\n");
}
@@ -2377,10 +2434,19 @@ lpfc_used_vpi_show(struct device *dev, struct device_attribute *attr,
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
- uint32_t cnt, acnt;
+ struct lpfc_sli4_hba *sli4_hba;
+ struct lpfc_max_cfg_param *max_cfg_param;
+ u32 cnt = 0, acnt = 0;
- if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, &acnt))
- return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ sli4_hba = &phba->sli4_hba;
+ max_cfg_param = &sli4_hba->max_cfg_param;
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
+ max_cfg_param->vpi_used);
+ } else {
+ if (lpfc_get_hba_info(phba, NULL, NULL, NULL, NULL, &cnt, &acnt))
+ return scnprintf(buf, PAGE_SIZE, "%d\n", (cnt - acnt));
+ }
return scnprintf(buf, PAGE_SIZE, "Unknown\n");
}
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 474834f313a7..baae1f8279e0 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1557,7 +1557,8 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ndlp->nlp_fc4_type |= NLP_FC4_FCP;
if (fc4_data_1 & LPFC_FC4_TYPE_BITMASK)
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
- lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_DISCOVERY | LOG_NODE,
"3064 Setting ndlp x%px, DID x%06x "
"with FC4 x%08x, Data: x%08x x%08x "
"%d\n",
@@ -1568,14 +1569,21 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE &&
ndlp->nlp_fc4_type) {
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
-
- lpfc_nlp_set_state(vport, ndlp,
- NLP_STE_PRLI_ISSUE);
- lpfc_issue_els_prli(vport, ndlp, 0);
+ /* This is a fabric topology so if discovery
+ * started with an unsolicited PLOGI, don't
+ * send a PRLI. Targets don't issue PLOGI or
+ * PRLI when acting as a target. Likely this is
+ * an initiator function.
+ */
+ if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) {
+ lpfc_nlp_set_state(vport, ndlp,
+ NLP_STE_PRLI_ISSUE);
+ lpfc_issue_els_prli(vport, ndlp, 0);
+ }
} else if (!ndlp->nlp_fc4_type) {
/* If fc4 type is still unknown, then LOGO */
lpfc_printf_vlog(vport, KERN_INFO,
- LOG_DISCOVERY,
+ LOG_DISCOVERY | LOG_NODE,
"6443 Sending LOGO ndlp x%px,"
"DID x%06x with fc4_type: "
"x%08x, state: %d\n",
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 2bad9954c355..54e47f268235 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1041,7 +1041,7 @@ stop_rr_fcf_flogi:
!(ndlp->fc4_xpt_flags & SCSI_XPT_REGD))
lpfc_nlp_put(ndlp);
- lpfc_printf_vlog(vport, KERN_WARNING, LOG_TRACE_EVENT,
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
"0150 FLOGI failure Status:x%x/x%x "
"xri x%x TMO:x%x refcnt %d\n",
ulp_status, ulp_word4, cmdiocb->sli4_xritag,
@@ -1091,7 +1091,6 @@ stop_rr_fcf_flogi:
if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4))
lpfc_issue_reg_vfi(vport);
- lpfc_nlp_put(ndlp);
goto out;
}
goto flogifail;
@@ -1332,7 +1331,8 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (phba->cfg_vmid_priority_tagging) {
sp->cmn.priority_tagging = 1;
/* lpfc_vmid_host_uuid is combination of wwpn and wwnn */
- if (uuid_is_null((uuid_t *)vport->lpfc_vmid_host_uuid)) {
+ if (!memchr_inv(vport->lpfc_vmid_host_uuid, 0,
+ sizeof(vport->lpfc_vmid_host_uuid))) {
memcpy(vport->lpfc_vmid_host_uuid, phba->wwpn,
sizeof(phba->wwpn));
memcpy(&vport->lpfc_vmid_host_uuid[8], phba->wwnn,
@@ -2377,10 +2377,10 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* PRLI failed */
lpfc_printf_vlog(vport, mode, loglevel,
"2754 PRLI failure DID:%06X Status:x%x/x%x, "
- "data: x%x x%x\n",
+ "data: x%x x%x x%x\n",
ndlp->nlp_DID, ulp_status,
ulp_word4, ndlp->nlp_state,
- ndlp->fc4_prli_sent);
+ ndlp->fc4_prli_sent, ndlp->nlp_flag);
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
if (!lpfc_error_lost_link(vport, ulp_status, ulp_word4))
@@ -2391,14 +2391,16 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* mismatch typically caused by an RSCN. Skip any
* processing to allow recovery.
*/
- if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE &&
- ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE) {
+ if ((ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE &&
+ ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE) ||
+ (ndlp->nlp_state == NLP_STE_NPR_NODE &&
+ ndlp->nlp_flag & NLP_DELAY_TMO)) {
lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
- "2784 PRLI cmpl: state mismatch "
+ "2784 PRLI cmpl: Allow Node recovery "
"DID x%06x nstate x%x nflag x%x\n",
ndlp->nlp_DID, ndlp->nlp_state,
ndlp->nlp_flag);
- goto out;
+ goto out;
}
/*
@@ -6166,11 +6168,25 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
npr->TaskRetryIdReq = 1;
}
npr->acceptRspCode = PRLI_REQ_EXECUTED;
- npr->estabImagePair = 1;
+
+ /* Set image pair for complementary pairs only. */
+ if (ndlp->nlp_type & NLP_FCP_TARGET)
+ npr->estabImagePair = 1;
+ else
+ npr->estabImagePair = 0;
npr->readXferRdyDis = 1;
npr->ConfmComplAllowed = 1;
npr->prliType = PRLI_FCP_TYPE;
npr->initiatorFunc = 1;
+
+ /* Xmit PRLI ACC response tag <ulpIoTag> */
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_ELS | LOG_NODE | LOG_DISCOVERY,
+ "6014 FCP issue PRLI ACC imgpair %d "
+ "retry %d task %d\n",
+ npr->estabImagePair,
+ npr->Retry, npr->TaskRetryIdReq);
+
} else if (prli_fc4_req == PRLI_NVME_TYPE) {
/* Respond with an NVME PRLI Type */
npr_nvme = (struct lpfc_nvme_prli *) pcmd;
@@ -9588,11 +9604,13 @@ void
lpfc_els_flush_cmd(struct lpfc_vport *vport)
{
LIST_HEAD(abort_list);
+ LIST_HEAD(cancel_list);
struct lpfc_hba *phba = vport->phba;
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *tmp_iocb, *piocb;
u32 ulp_command;
unsigned long iflags = 0;
+ bool mbx_tmo_err;
lpfc_fabric_abort_vport(vport);
@@ -9614,15 +9632,16 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
if (phba->sli_rev == LPFC_SLI_REV4)
spin_lock(&pring->ring_lock);
+ mbx_tmo_err = test_bit(MBX_TMO_ERR, &phba->bit_flags);
/* First we need to issue aborts to outstanding cmds on txcmpl */
list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
- if (piocb->cmd_flag & LPFC_IO_LIBDFC)
+ if (piocb->cmd_flag & LPFC_IO_LIBDFC && !mbx_tmo_err)
continue;
if (piocb->vport != vport)
continue;
- if (piocb->cmd_flag & LPFC_DRIVER_ABORTED)
+ if (piocb->cmd_flag & LPFC_DRIVER_ABORTED && !mbx_tmo_err)
continue;
/* On the ELS ring we can have ELS_REQUESTs or
@@ -9641,8 +9660,8 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
*/
if (phba->link_state == LPFC_LINK_DOWN)
piocb->cmd_cmpl = lpfc_cmpl_els_link_down;
- }
- if (ulp_command == CMD_GEN_REQUEST64_CR)
+ } else if (ulp_command == CMD_GEN_REQUEST64_CR ||
+ mbx_tmo_err)
list_add_tail(&piocb->dlist, &abort_list);
}
@@ -9654,11 +9673,19 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
spin_lock_irqsave(&phba->hbalock, iflags);
list_del_init(&piocb->dlist);
- lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL);
+ if (mbx_tmo_err)
+ list_move_tail(&piocb->list, &cancel_list);
+ else
+ lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL);
+
spin_unlock_irqrestore(&phba->hbalock, iflags);
}
- /* Make sure HBA is alive */
- lpfc_issue_hb_tmo(phba);
+ if (!list_empty(&cancel_list))
+ lpfc_sli_cancel_iocbs(phba, &cancel_list, IOSTAT_LOCAL_REJECT,
+ IOERR_SLI_ABORTED);
+ else
+ /* Make sure HBA is alive */
+ lpfc_issue_hb_tmo(phba);
if (!list_empty(&abort_list))
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@@ -12331,9 +12358,10 @@ lpfc_vmid_uvem(struct lpfc_vport *vport,
elsiocb->vmid_tag.vmid_context = vmid_context;
pcmd = (u8 *)elsiocb->cmd_dmabuf->virt;
- if (uuid_is_null((uuid_t *)vport->lpfc_vmid_host_uuid))
+ if (!memchr_inv(vport->lpfc_vmid_host_uuid, 0,
+ sizeof(vport->lpfc_vmid_host_uuid)))
memcpy(vport->lpfc_vmid_host_uuid, vmid->host_vmid,
- LPFC_COMPRESS_VMID_SIZE);
+ sizeof(vport->lpfc_vmid_host_uuid));
*((u32 *)(pcmd)) = ELS_CMD_UVEM;
len = (u32 *)(pcmd + 4);
@@ -12343,13 +12371,13 @@ lpfc_vmid_uvem(struct lpfc_vport *vport,
vem_id_desc->tag = be32_to_cpu(VEM_ID_DESC_TAG);
vem_id_desc->length = be32_to_cpu(LPFC_UVEM_VEM_ID_DESC_SIZE);
memcpy(vem_id_desc->vem_id, vport->lpfc_vmid_host_uuid,
- LPFC_COMPRESS_VMID_SIZE);
+ sizeof(vem_id_desc->vem_id));
inst_desc = (struct instantiated_ve_desc *)(pcmd + 32);
inst_desc->tag = be32_to_cpu(INSTANTIATED_VE_DESC_TAG);
inst_desc->length = be32_to_cpu(LPFC_UVEM_VE_MAP_DESC_SIZE);
memcpy(inst_desc->global_vem_id, vmid->host_vmid,
- LPFC_COMPRESS_VMID_SIZE);
+ sizeof(inst_desc->global_vem_id));
bf_set(lpfc_instantiated_nport_id, inst_desc, vport->fc_myDID);
bf_set(lpfc_instantiated_local_id, inst_desc,
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index fdd7f69d87ef..51afb60859eb 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -169,29 +169,44 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
"3181 dev_loss_callbk x%06x, rport x%px flg x%x "
- "load_flag x%x refcnt %d state %d xpt x%x\n",
+ "load_flag x%x refcnt %u state %d xpt x%x\n",
ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag,
vport->load_flag, kref_read(&ndlp->kref),
ndlp->nlp_state, ndlp->fc4_xpt_flags);
- /* Don't schedule a worker thread event if the vport is going down.
- * The teardown process cleans up the node via lpfc_drop_node.
- */
+ /* Don't schedule a worker thread event if the vport is going down. */
if (vport->load_flag & FC_UNLOADING) {
- ((struct lpfc_rport_data *)rport->dd_data)->pnode = NULL;
+ spin_lock_irqsave(&ndlp->lock, iflags);
ndlp->rport = NULL;
- ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD;
- /* clear the NLP_XPT_REGD if the node is not registered
- * with nvme-fc
+ /* The scsi_transport is done with the rport so lpfc cannot
+ * call to unregister. Remove the scsi transport reference
+ * and clean up the SCSI transport node details.
*/
- if (ndlp->fc4_xpt_flags == NLP_XPT_REGD)
- ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
+ if (ndlp->fc4_xpt_flags & (NLP_XPT_REGD | SCSI_XPT_REGD)) {
+ ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD;
+
+ /* NVME transport-registered rports need the
+ * NLP_XPT_REGD flag to complete an unregister.
+ */
+ if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD))
+ ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
+ lpfc_nlp_put(ndlp);
+ spin_lock_irqsave(&ndlp->lock, iflags);
+ }
- /* Remove the node reference from remote_port_add now.
- * The driver will not call remote_port_delete.
+ /* Only 1 thread can drop the initial node reference. If
+ * another thread has set NLP_DROPPED, this thread is done.
*/
- lpfc_nlp_put(ndlp);
+ if (!(ndlp->nlp_flag & NLP_DROPPED)) {
+ ndlp->nlp_flag |= NLP_DROPPED;
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
+ lpfc_nlp_put(ndlp);
+ spin_lock_irqsave(&ndlp->lock, iflags);
+ }
+
+ spin_unlock_irqrestore(&ndlp->lock, iflags);
return;
}
@@ -4686,7 +4701,8 @@ lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
spin_lock_irqsave(&ndlp->lock, iflags);
if (!(ndlp->fc4_xpt_flags & NLP_XPT_REGD)) {
spin_unlock_irqrestore(&ndlp->lock, iflags);
- lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_ELS | LOG_NODE | LOG_DISCOVERY,
"0999 %s Not regd: ndlp x%px rport x%px DID "
"x%x FLG x%x XPT x%x\n",
__func__, ndlp, ndlp->rport, ndlp->nlp_DID,
@@ -4702,9 +4718,10 @@ lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
vport->phba->nport_event_cnt++;
lpfc_unregister_remote_port(ndlp);
} else if (!ndlp->rport) {
- lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_ELS | LOG_NODE | LOG_DISCOVERY,
"1999 %s NDLP in devloss x%px DID x%x FLG x%x"
- " XPT x%x refcnt %d\n",
+ " XPT x%x refcnt %u\n",
__func__, ndlp, ndlp->nlp_DID, ndlp->nlp_flag,
ndlp->fc4_xpt_flags,
kref_read(&ndlp->kref));
@@ -4954,22 +4971,29 @@ lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
/*
* Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should
- * be used if we wish to issue the "last" lpfc_nlp_put() to remove
- * the ndlp from the vport. The ndlp marked as UNUSED on the list
- * until ALL other outstanding threads have completed. We check
- * that the ndlp not already in the UNUSED state before we proceed.
+ * be used when lpfc wants to remove the "last" lpfc_nlp_put() to
+ * release the ndlp from the vport when conditions are correct.
*/
if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
return;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
- ndlp->nlp_flag |= NLP_DROPPED;
if (vport->phba->sli_rev == LPFC_SLI_REV4) {
lpfc_cleanup_vports_rrqs(vport, ndlp);
lpfc_unreg_rpi(vport, ndlp);
}
- lpfc_nlp_put(ndlp);
- return;
+ /* NLP_DROPPED means another thread already removed the initial
+ * reference from lpfc_nlp_init. If set, don't drop it again and
+ * introduce an imbalance.
+ */
+ spin_lock_irq(&ndlp->lock);
+ if (!(ndlp->nlp_flag & NLP_DROPPED)) {
+ ndlp->nlp_flag |= NLP_DROPPED;
+ spin_unlock_irq(&ndlp->lock);
+ lpfc_nlp_put(ndlp);
+ return;
+ }
+ spin_unlock_irq(&ndlp->lock);
}
/*
@@ -5757,8 +5781,11 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
(NLP_FCP_TARGET | NLP_NVME_TARGET)))
return NULL;
- lpfc_disc_state_machine(vport, ndlp, NULL,
- NLP_EVT_DEVICE_RECOVERY);
+ if (ndlp->nlp_state > NLP_STE_UNUSED_NODE &&
+ ndlp->nlp_state < NLP_STE_PRLI_ISSUE) {
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RECOVERY);
+ }
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index aaea3e31944d..2108b4cb7815 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -764,6 +764,8 @@ typedef struct _PRLI { /* Structure is in Big Endian format */
#define PRLI_PREDEF_CONFIG 0x5
#define PRLI_PARTIAL_SUCCESS 0x6
#define PRLI_INVALID_PAGE_CNT 0x7
+#define PRLI_INV_SRV_PARM 0x8
+
uint8_t word0Reserved3; /* FC Parm Word 0, bit 0:7 */
uint32_t origProcAssoc; /* FC Parm Word 1, bit 0:31 */
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 3221a934066b..9e59c050103d 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -2123,7 +2123,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
en_rn_msg = false;
} else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3144 Port Down: Debug Dump\n");
else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
@@ -7550,6 +7550,8 @@ lpfc_disable_pci_dev(struct lpfc_hba *phba)
void
lpfc_reset_hba(struct lpfc_hba *phba)
{
+ int rc = 0;
+
/* If resets are disabled then set error state and return. */
if (!phba->cfg_enable_hba_reset) {
phba->link_state = LPFC_HBA_ERROR;
@@ -7560,13 +7562,25 @@ lpfc_reset_hba(struct lpfc_hba *phba)
if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) {
lpfc_offline_prep(phba, LPFC_MBX_WAIT);
} else {
+ if (test_bit(MBX_TMO_ERR, &phba->bit_flags)) {
+ /* Perform a PCI function reset to start from clean */
+ rc = lpfc_pci_function_reset(phba);
+ lpfc_els_flush_all_cmd(phba);
+ }
lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
lpfc_sli_flush_io_rings(phba);
}
lpfc_offline(phba);
- lpfc_sli_brdrestart(phba);
- lpfc_online(phba);
- lpfc_unblock_mgmt_io(phba);
+ clear_bit(MBX_TMO_ERR, &phba->bit_flags);
+ if (unlikely(rc)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "8888 PCI function reset failed rc %x\n",
+ rc);
+ } else {
+ lpfc_sli_brdrestart(phba);
+ lpfc_online(phba);
+ lpfc_unblock_mgmt_io(phba);
+ }
}
/**
@@ -12498,10 +12512,7 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
(new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) &&
(new_cpup->phys_id == cpup->phys_id))
goto found_same;
- new_cpu = cpumask_next(
- new_cpu, cpu_present_mask);
- if (new_cpu >= nr_cpu_ids)
- new_cpu = first_cpu;
+ new_cpu = lpfc_next_present_cpu(new_cpu);
}
/* At this point, we leave the CPU as unassigned */
continue;
@@ -12513,9 +12524,7 @@ found_same:
* chance of having multiple unassigned CPU entries
* selecting the same IRQ.
*/
- start_cpu = cpumask_next(new_cpu, cpu_present_mask);
- if (start_cpu >= nr_cpu_ids)
- start_cpu = first_cpu;
+ start_cpu = lpfc_next_present_cpu(new_cpu);
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"3337 Set Affinity: CPU %d "
@@ -12548,10 +12557,7 @@ found_same:
if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
(new_cpup->eq != LPFC_VECTOR_MAP_EMPTY))
goto found_any;
- new_cpu = cpumask_next(
- new_cpu, cpu_present_mask);
- if (new_cpu >= nr_cpu_ids)
- new_cpu = first_cpu;
+ new_cpu = lpfc_next_present_cpu(new_cpu);
}
/* We should never leave an entry unassigned */
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -12567,9 +12573,7 @@ found_any:
* chance of having multiple unassigned CPU entries
* selecting the same IRQ.
*/
- start_cpu = cpumask_next(new_cpu, cpu_present_mask);
- if (start_cpu >= nr_cpu_ids)
- start_cpu = first_cpu;
+ start_cpu = lpfc_next_present_cpu(new_cpu);
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"3338 Set Affinity: CPU %d "
@@ -12640,9 +12644,7 @@ found_any:
new_cpup->core_id == cpup->core_id) {
goto found_hdwq;
}
- new_cpu = cpumask_next(new_cpu, cpu_present_mask);
- if (new_cpu >= nr_cpu_ids)
- new_cpu = first_cpu;
+ new_cpu = lpfc_next_present_cpu(new_cpu);
}
/* If we can't match both phys_id and core_id,
@@ -12654,10 +12656,7 @@ found_any:
if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
new_cpup->phys_id == cpup->phys_id)
goto found_hdwq;
-
- new_cpu = cpumask_next(new_cpu, cpu_present_mask);
- if (new_cpu >= nr_cpu_ids)
- new_cpu = first_cpu;
+ new_cpu = lpfc_next_present_cpu(new_cpu);
}
/* Otherwise just round robin on cfg_hdw_queue */
@@ -12666,9 +12665,7 @@ found_any:
goto logit;
found_hdwq:
/* We found an available entry, copy the IRQ info */
- start_cpu = cpumask_next(new_cpu, cpu_present_mask);
- if (start_cpu >= nr_cpu_ids)
- start_cpu = first_cpu;
+ start_cpu = lpfc_next_present_cpu(new_cpu);
cpup->hdwq = new_cpup->hdwq;
logit:
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index b86ff9fcdf0c..1eb7f7e60bba 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -879,23 +879,34 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_unlock_irq(shost->host_lock);
lpfc_retry_pport_discovery(phba);
}
- } else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
- ((ndlp->nlp_type & NLP_FCP_TARGET) ||
- (ndlp->nlp_type & NLP_NVME_TARGET) ||
- (vport->fc_flag & FC_PT2PT))) ||
- (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
- /* Only try to re-login if this is NOT a Fabric Node
- * AND the remote NPORT is a FCP/NVME Target or we
- * are in pt2pt mode. NLP_STE_ADISC_ISSUE is a special
- * case for LOGO as a response to ADISC behavior.
- */
- mod_timer(&ndlp->nlp_delayfunc,
- jiffies + msecs_to_jiffies(1000 * 1));
- spin_lock_irq(&ndlp->lock);
- ndlp->nlp_flag |= NLP_DELAY_TMO;
- spin_unlock_irq(&ndlp->lock);
-
- ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
+ } else {
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_NODE | LOG_ELS | LOG_DISCOVERY,
+ "3203 LOGO recover nport x%06x state x%x "
+ "ntype x%x fc_flag x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_state,
+ ndlp->nlp_type, vport->fc_flag);
+
+ /* Special cases for rports that recover post LOGO. */
+ if ((!(ndlp->nlp_type == NLP_FABRIC) &&
+ (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) ||
+ vport->fc_flag & FC_PT2PT)) ||
+ (ndlp->nlp_state >= NLP_STE_ADISC_ISSUE ||
+ ndlp->nlp_state <= NLP_STE_PRLI_ISSUE)) {
+ mod_timer(&ndlp->nlp_delayfunc,
+ jiffies + msecs_to_jiffies(1000 * 1));
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag |= NLP_DELAY_TMO;
+ spin_unlock_irq(&ndlp->lock);
+ ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_NODE | LOG_ELS | LOG_DISCOVERY,
+ "3204 Start nlpdelay on DID x%06x "
+ "nflag x%x lastels x%x ref cnt %u",
+ ndlp->nlp_DID, ndlp->nlp_flag,
+ ndlp->nlp_last_elscmd,
+ kref_read(&ndlp->kref));
+ }
}
out:
/* Unregister from backend, could have been skipped due to ADISC */
@@ -1854,7 +1865,6 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
LPFC_MBOXQ_t *mb;
LPFC_MBOXQ_t *nextmb;
- struct lpfc_nodelist *ns_ndlp;
cmdiocb = (struct lpfc_iocbq *) arg;
@@ -1882,13 +1892,6 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
}
spin_unlock_irq(&phba->hbalock);
- /* software abort if any GID_FT is outstanding */
- if (vport->cfg_enable_fc4_type != LPFC_ENABLE_FCP) {
- ns_ndlp = lpfc_findnode_did(vport, NameServer_DID);
- if (ns_ndlp)
- lpfc_els_abort(phba, ns_ndlp);
- }
-
lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO);
return ndlp->nlp_state;
}
@@ -2148,6 +2151,7 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
struct lpfc_nvme_prli *nvpr;
void *temp_ptr;
u32 ulp_status;
+ bool acc_imode_sps = false;
cmdiocb = (struct lpfc_iocbq *) arg;
rspiocb = cmdiocb->rsp_iocb;
@@ -2182,22 +2186,32 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
goto out_err;
}
- if (npr && (npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
- (npr->prliType == PRLI_FCP_TYPE)) {
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
- "6028 FCP NPR PRLI Cmpl Init %d Target %d\n",
- npr->initiatorFunc,
- npr->targetFunc);
- if (npr->initiatorFunc)
- ndlp->nlp_type |= NLP_FCP_INITIATOR;
- if (npr->targetFunc) {
- ndlp->nlp_type |= NLP_FCP_TARGET;
- if (npr->writeXferRdyDis)
- ndlp->nlp_flag |= NLP_FIRSTBURST;
+ if (npr && npr->prliType == PRLI_FCP_TYPE) {
+ lpfc_printf_vlog(vport, KERN_INFO,
+ LOG_ELS | LOG_NODE | LOG_DISCOVERY,
+ "6028 FCP NPR PRLI Cmpl Init %d Target %d "
+ "EIP %d AccCode x%x\n",
+ npr->initiatorFunc, npr->targetFunc,
+ npr->estabImagePair, npr->acceptRspCode);
+
+ if (npr->acceptRspCode == PRLI_INV_SRV_PARM) {
+ /* Strict initiators don't establish an image pair. */
+ if (npr->initiatorFunc && !npr->targetFunc &&
+ !npr->estabImagePair)
+ acc_imode_sps = true;
}
- if (npr->Retry)
- ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
+ if (npr->acceptRspCode == PRLI_REQ_EXECUTED || acc_imode_sps) {
+ if (npr->initiatorFunc)
+ ndlp->nlp_type |= NLP_FCP_INITIATOR;
+ if (npr->targetFunc) {
+ ndlp->nlp_type |= NLP_FCP_TARGET;
+ if (npr->writeXferRdyDis)
+ ndlp->nlp_flag |= NLP_FIRSTBURST;
+ }
+ if (npr->Retry)
+ ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
+ }
} else if (nvpr &&
(bf_get_be32(prli_acc_rsp_code, nvpr) ==
PRLI_REQ_EXECUTED) &&
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 8db7cb99903d..39acbcb7ec66 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -1864,7 +1864,6 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_nvme_fcpreq_priv *freqpriv;
unsigned long flags;
int ret_val;
- struct nvme_fc_cmd_iu *cp;
/* Validate pointers. LLDD fault handling with transport does
* have timing races.
@@ -1988,16 +1987,10 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
return;
}
- /*
- * Get Command Id from cmd to plug into response. This
- * code is not needed in the next NVME Transport drop.
- */
- cp = (struct nvme_fc_cmd_iu *)lpfc_nbuf->nvmeCmd->cmdaddr;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
"6138 Transport Abort NVME Request Issued for "
- "ox_id x%x nvme opcode x%x nvme cmd_id x%x\n",
- nvmereq_wqe->sli4_xritag, cp->sqe.common.opcode,
- cp->sqe.common.command_id);
+ "ox_id x%x\n",
+ nvmereq_wqe->sli4_xritag);
return;
out_unlock:
@@ -2510,8 +2503,9 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
lpfc_printf_vlog(vport, KERN_ERR,
LOG_TRACE_EVENT,
"6031 RemotePort Registration failed "
- "err: %d, DID x%06x\n",
- ret, ndlp->nlp_DID);
+ "err: %d, DID x%06x ref %u\n",
+ ret, ndlp->nlp_DID, kref_read(&ndlp->kref));
+ lpfc_nlp_put(ndlp);
}
return ret;
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index dff4584d338b..425328d9c2d8 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1620,10 +1620,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
cpu = cpumask_first(cpu_present_mask);
continue;
}
- cpu = cpumask_next(cpu, cpu_present_mask);
- if (cpu == nr_cpu_ids)
- cpu = cpumask_first(cpu_present_mask);
-
+ cpu = lpfc_next_present_cpu(cpu);
}
for_each_present_cpu(i) {
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 58d10f8f75a7..4dfadf254a72 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -3935,6 +3935,8 @@ void lpfc_poll_eratt(struct timer_list *t)
uint64_t sli_intr, cnt;
phba = from_timer(phba, t, eratt_poll);
+ if (!(phba->hba_flag & HBA_SETUP))
+ return;
/* Here we will also keep track of interrupts per sec of the hba */
sli_intr = phba->sli.slistat.sli_intr;
@@ -7693,7 +7695,9 @@ lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
spin_unlock_irq(&phba->hbalock);
} else {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "3161 Failure to post sgl to port.\n");
+ "3161 Failure to post sgl to port,status %x "
+ "blkcnt %d totalcnt %d postcnt %d\n",
+ status, block_cnt, total_cnt, post_cnt);
return -EIO;
}
@@ -8478,6 +8482,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
spin_unlock_irq(&phba->hbalock);
}
}
+ phba->hba_flag &= ~HBA_SETUP;
lpfc_sli4_dip(phba);
@@ -9282,6 +9287,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
* would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
* it to fail all outstanding SCSI IO.
*/
+ set_bit(MBX_TMO_ERR, &phba->bit_flags);
spin_lock_irq(&phba->pport->work_port_lock);
phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
spin_unlock_irq(&phba->pport->work_port_lock);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 6f35491aed0f..13a547277f97 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.2.0.13"
+#define LPFC_DRIVER_VERSION "14.2.0.14"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index ef2b6380e19a..bc867da650b6 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -438,7 +438,7 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
// set up PCI related soft state and other pre-known parameters
- adapter->unique_id = pdev->bus->number << 8 | pdev->devfn;
+ adapter->unique_id = pci_dev_id(pdev);
adapter->irq = pdev->irq;
adapter->pdev = pdev;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 050eed8e2684..b9d46dcb5210 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -7518,7 +7518,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
*/
instance->pdev = pdev;
instance->host = host;
- instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
+ instance->unique_id = pci_dev_id(pdev);
instance->init_id = MEGASAS_DEFAULT_INIT_ID;
megasas_set_adapter_type(instance);
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
index 2fc196499c89..35f81af40f51 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
@@ -1482,7 +1482,7 @@ struct mpi3_security_page0 {
#define MPI3_SECURITY1_KEY_RECORD_MAX 1
#endif
#ifndef MPI3_SECURITY1_PAD_MAX
-#define MPI3_SECURITY1_PAD_MAX 1
+#define MPI3_SECURITY1_PAD_MAX 4
#endif
union mpi3_security1_key_data {
__le32 dword[128];
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
index f5e9c2309ce6..1e4a60fc655f 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
@@ -600,6 +600,7 @@ struct mpi3_event_data_pcie_error_threshold {
__le16 threshold_count;
__le16 attached_dev_handle;
__le16 reserved12;
+ __le32 reserved14;
};
#define MPI3_EVENT_PCI_ERROR_RC_THRESHOLD_EXCEEDED (0x00)
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
index 441cfc2c7f09..1e0a3dcaf723 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
@@ -18,7 +18,7 @@ union mpi3_version_union {
#define MPI3_VERSION_MAJOR (3)
#define MPI3_VERSION_MINOR (0)
-#define MPI3_VERSION_UNIT (27)
+#define MPI3_VERSION_UNIT (28)
#define MPI3_VERSION_DEV (0)
#define MPI3_DEVHANDLE_INVALID (0xffff)
struct mpi3_sysif_oper_queue_indexes {
diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
index 0afb687402e1..ae98d15c30b1 100644
--- a/drivers/scsi/mpi3mr/mpi3mr.h
+++ b/drivers/scsi/mpi3mr/mpi3mr.h
@@ -55,8 +55,8 @@ extern struct list_head mrioc_list;
extern int prot_mask;
extern atomic64_t event_counter;
-#define MPI3MR_DRIVER_VERSION "8.4.1.0.0"
-#define MPI3MR_DRIVER_RELDATE "16-March-2023"
+#define MPI3MR_DRIVER_VERSION "8.5.0.0.0"
+#define MPI3MR_DRIVER_RELDATE "24-July-2023"
#define MPI3MR_DRIVER_NAME "mpi3mr"
#define MPI3MR_DRIVER_LICENSE "GPL"
@@ -66,11 +66,12 @@ extern atomic64_t event_counter;
#define MPI3MR_NAME_LENGTH 32
#define IOCNAME "%s: "
-#define MPI3MR_MAX_SECTORS 2048
+#define MPI3MR_DEFAULT_MAX_IO_SIZE (1 * 1024 * 1024)
/* Definitions for internal SGL and Chain SGL buffers */
#define MPI3MR_PAGE_SIZE_4K 4096
-#define MPI3MR_SG_DEPTH (MPI3MR_PAGE_SIZE_4K / sizeof(struct mpi3_sge_common))
+#define MPI3MR_DEFAULT_SGL_ENTRIES 256
+#define MPI3MR_MAX_SGL_ENTRIES 2048
/* Definitions for MAX values for shost */
#define MPI3MR_MAX_CMDS_LUN 128
@@ -206,6 +207,9 @@ extern atomic64_t event_counter;
*/
#define MPI3MR_MAX_APP_XFER_SECTORS (2048 + 512)
+#define MPI3MR_WRITE_SAME_MAX_LEN_256_BLKS 256
+#define MPI3MR_WRITE_SAME_MAX_LEN_2048_BLKS 2048
+
/**
* struct mpi3mr_nvme_pt_sge - Structure to store SGEs for NVMe
* Encapsulated commands.
@@ -323,6 +327,7 @@ struct mpi3mr_ioc_facts {
u16 max_perids;
u16 max_pds;
u16 max_sasexpanders;
+ u32 max_data_length;
u16 max_sasinitiators;
u16 max_enclosures;
u16 max_pcie_switches;
@@ -676,6 +681,7 @@ enum mpi3mr_dev_state {
* @io_unit_port: IO Unit port ID
* @non_stl: Is this device not to be attached with SAS TL
* @io_throttle_enabled: I/O throttling needed or not
+ * @wslen: Write same max length
* @q_depth: Device specific Queue Depth
* @wwid: World wide ID
* @enclosure_logical_id: Enclosure logical identifier
@@ -698,6 +704,7 @@ struct mpi3mr_tgt_dev {
u8 io_unit_port;
u8 non_stl;
u8 io_throttle_enabled;
+ u16 wslen;
u16 q_depth;
u64 wwid;
u64 enclosure_logical_id;
@@ -751,6 +758,8 @@ static inline void mpi3mr_tgtdev_put(struct mpi3mr_tgt_dev *s)
* @dev_removed: Device removed in the Firmware
* @dev_removedelay: Device is waiting to be removed in FW
* @dev_type: Device type
+ * @dev_nvme_dif: Device is NVMe DIF enabled
+ * @wslen: Write same max length
* @io_throttle_enabled: I/O throttling needed or not
* @io_divert: Flag indicates io divert is on or off for the dev
* @throttle_group: Pointer to throttle group info
@@ -767,6 +776,8 @@ struct mpi3mr_stgt_priv_data {
u8 dev_removed;
u8 dev_removedelay;
u8 dev_type;
+ u8 dev_nvme_dif;
+ u16 wslen;
u8 io_throttle_enabled;
u8 io_divert;
struct mpi3mr_throttle_group_info *throttle_group;
@@ -782,12 +793,14 @@ struct mpi3mr_stgt_priv_data {
* @ncq_prio_enable: NCQ priority enable for SATA device
* @pend_count: Counter to track pending I/Os during error
* handling
+ * @wslen: Write same max length
*/
struct mpi3mr_sdev_priv_data {
struct mpi3mr_stgt_priv_data *tgt_priv_data;
u32 lun_id;
u8 ncq_prio_enable;
u32 pend_count;
+ u16 wslen;
};
/**
@@ -959,6 +972,7 @@ struct scmd_priv {
* @stop_drv_processing: Stop all command processing
* @device_refresh_on: Don't process the events until devices are refreshed
* @max_host_ios: Maximum host I/O count
+ * @max_sgl_entries: Max SGL entries per I/O
* @chain_buf_count: Chain buffer count
* @chain_buf_pool: Chain buffer pool
* @chain_sgl_list: Chain SGL list
@@ -1129,6 +1143,7 @@ struct mpi3mr_ioc {
u16 max_host_ios;
spinlock_t tgtdev_lock;
struct list_head tgtdev_list;
+ u16 max_sgl_entries;
u32 chain_buf_count;
struct dma_pool *chain_buf_pool;
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
index 5fa07d6ee5b8..f039f1d98647 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -1163,6 +1163,12 @@ mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc)
return -EPERM;
}
+ if (mrioc->shost->max_sectors != (mrioc->facts.max_data_length / 512))
+ ioc_err(mrioc, "Warning: The maximum data transfer length\n"
+ "\tchanged after reset: previous(%d), new(%d),\n"
+ "the driver cannot change this at run time\n",
+ mrioc->shost->max_sectors * 512, mrioc->facts.max_data_length);
+
if ((mrioc->sas_transport_enabled) && (mrioc->facts.ioc_capabilities &
MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED))
ioc_err(mrioc,
@@ -2343,8 +2349,8 @@ static int mpi3mr_sync_timestamp(struct mpi3mr_ioc *mrioc)
ioc_err(mrioc, "Issue IOUCTL time_stamp: command timed out\n");
mrioc->init_cmds.is_waiting = 0;
if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
- mpi3mr_soft_reset_handler(mrioc,
- MPI3MR_RESET_FROM_TSU_TIMEOUT, 1);
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_TSU_TIMEOUT);
retval = -1;
goto out_unlock;
}
@@ -2856,6 +2862,7 @@ static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
le16_to_cpu(facts_data->max_pcie_switches);
mrioc->facts.max_sasexpanders =
le16_to_cpu(facts_data->max_sas_expanders);
+ mrioc->facts.max_data_length = le16_to_cpu(facts_data->max_data_length);
mrioc->facts.max_sasinitiators =
le16_to_cpu(facts_data->max_sas_initiators);
mrioc->facts.max_enclosures = le16_to_cpu(facts_data->max_enclosures);
@@ -2893,13 +2900,18 @@ static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
mrioc->facts.io_throttle_high =
le16_to_cpu(facts_data->io_throttle_high);
+ if (mrioc->facts.max_data_length ==
+ MPI3_IOCFACTS_MAX_DATA_LENGTH_NOT_REPORTED)
+ mrioc->facts.max_data_length = MPI3MR_DEFAULT_MAX_IO_SIZE;
+ else
+ mrioc->facts.max_data_length *= MPI3MR_PAGE_SIZE_4K;
/* Store in 512b block count */
if (mrioc->facts.io_throttle_data_length)
mrioc->io_throttle_data_length =
(mrioc->facts.io_throttle_data_length * 2 * 4);
else
/* set the length to 1MB + 1K to disable throttle */
- mrioc->io_throttle_data_length = MPI3MR_MAX_SECTORS + 2;
+ mrioc->io_throttle_data_length = (mrioc->facts.max_data_length / 512) + 2;
mrioc->io_throttle_high = (mrioc->facts.io_throttle_high * 2 * 1024);
mrioc->io_throttle_low = (mrioc->facts.io_throttle_low * 2 * 1024);
@@ -2914,9 +2926,9 @@ static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
ioc_info(mrioc, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x ",
mrioc->facts.sge_mod_mask, mrioc->facts.sge_mod_value,
mrioc->facts.sge_mod_shift);
- ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x\n",
+ ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x max_data_len (%d)\n",
mrioc->facts.dma_mask, (facts_flags &
- MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK));
+ MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK), mrioc->facts.max_data_length);
ioc_info(mrioc,
"max_dev_per_throttle_group(%d), max_throttle_groups(%d)\n",
mrioc->facts.max_dev_per_tg, mrioc->facts.max_io_throttle_group);
@@ -3359,8 +3371,8 @@ int mpi3mr_process_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
ioc_err(mrioc, "Issue EvtNotify: command timed out\n");
if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
- mpi3mr_soft_reset_handler(mrioc,
- MPI3MR_RESET_FROM_EVTACK_TIMEOUT, 1);
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_EVTACK_TIMEOUT);
retval = -1;
goto out_unlock;
}
@@ -3414,7 +3426,14 @@ static int mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc *mrioc)
if (!mrioc->chain_sgl_list)
goto out_failed;
- sz = MPI3MR_PAGE_SIZE_4K;
+ if (mrioc->max_sgl_entries > (mrioc->facts.max_data_length /
+ MPI3MR_PAGE_SIZE_4K))
+ mrioc->max_sgl_entries = mrioc->facts.max_data_length /
+ MPI3MR_PAGE_SIZE_4K;
+ sz = mrioc->max_sgl_entries * sizeof(struct mpi3_sge_common);
+ ioc_info(mrioc, "number of sgl entries=%d chain buffer size=%dKB\n",
+ mrioc->max_sgl_entries, sz/1024);
+
mrioc->chain_buf_pool = dma_pool_create("chain_buf pool",
&mrioc->pdev->dev, sz, 16, 0);
if (!mrioc->chain_buf_pool) {
@@ -3813,7 +3832,7 @@ retry_init:
}
mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD;
-
+ mrioc->shost->max_sectors = mrioc->facts.max_data_length / 512;
mrioc->num_io_throttle_group = mrioc->facts.max_io_throttle_group;
atomic_set(&mrioc->pend_large_data_sz, 0);
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index d627355303d7..89ba015c5d7e 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -33,6 +33,12 @@ static int logging_level;
module_param(logging_level, int, 0);
MODULE_PARM_DESC(logging_level,
" bits for enabling additional logging info (default=0)");
+static int max_sgl_entries = MPI3MR_DEFAULT_SGL_ENTRIES;
+module_param(max_sgl_entries, int, 0444);
+MODULE_PARM_DESC(max_sgl_entries,
+ "Preferred max number of SG entries to be used for a single I/O\n"
+ "The actual value will be determined by the driver\n"
+ "(Minimum=256, Maximum=2048, default=256)");
/* Forward declarations*/
static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
@@ -424,6 +430,7 @@ void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc)
tgt_priv->io_throttle_enabled = 0;
tgt_priv->io_divert = 0;
tgt_priv->throttle_group = NULL;
+ tgt_priv->wslen = 0;
if (tgtdev->host_exposed)
atomic_set(&tgt_priv->block_io, 1);
}
@@ -1034,6 +1041,19 @@ mpi3mr_update_sdev(struct scsi_device *sdev, void *data)
void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc)
{
struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next;
+ struct mpi3mr_stgt_priv_data *tgt_priv;
+
+ dprint_reset(mrioc, "refresh target devices: check for removals\n");
+ list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
+ list) {
+ if ((tgtdev->dev_handle == MPI3MR_INVALID_DEV_HANDLE) &&
+ tgtdev->host_exposed && tgtdev->starget &&
+ tgtdev->starget->hostdata) {
+ tgt_priv = tgtdev->starget->hostdata;
+ tgt_priv->dev_removed = 1;
+ atomic_set(&tgt_priv->block_io, 0);
+ }
+ }
list_for_each_entry_safe(tgtdev, tgtdev_next, &mrioc->tgtdev_list,
list) {
@@ -1102,6 +1122,18 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
tgtdev->io_throttle_enabled =
(flags & MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED) ? 1 : 0;
+ switch (flags & MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_MASK) {
+ case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_256_LB:
+ tgtdev->wslen = MPI3MR_WRITE_SAME_MAX_LEN_256_BLKS;
+ break;
+ case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_2048_LB:
+ tgtdev->wslen = MPI3MR_WRITE_SAME_MAX_LEN_2048_BLKS;
+ break;
+ case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_NO_LIMIT:
+ default:
+ tgtdev->wslen = 0;
+ break;
+ }
if (tgtdev->starget && tgtdev->starget->hostdata) {
scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
@@ -1113,6 +1145,7 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
tgtdev->io_throttle_enabled;
if (is_added == true)
atomic_set(&scsi_tgt_priv_data->block_io, 0);
+ scsi_tgt_priv_data->wslen = tgtdev->wslen;
}
switch (dev_pg0->access_status) {
@@ -3413,7 +3446,7 @@ static int mpi3mr_prepare_sg_scmd(struct mpi3mr_ioc *mrioc,
scsi_bufflen(scmd));
return -ENOMEM;
}
- if (sges_left > MPI3MR_SG_DEPTH) {
+ if (sges_left > mrioc->max_sgl_entries) {
sdev_printk(KERN_ERR, scmd->device,
"scsi_dma_map returned unsupported sge count %d!\n",
sges_left);
@@ -3934,6 +3967,48 @@ void mpi3mr_wait_for_host_io(struct mpi3mr_ioc *mrioc, u32 timeout)
}
/**
+ * mpi3mr_setup_divert_ws - Setup Divert IO flag for write same
+ * @mrioc: Adapter instance reference
+ * @scmd: SCSI command reference
+ * @scsiio_req: MPI3 SCSI IO request
+ * @scsiio_flags: Pointer to MPI3 SCSI IO Flags
+ * @wslen: write same max length
+ *
+ * Gets values of unmap, ndob and number of blocks from write
+ * same scsi io and based on these values it sets divert IO flag
+ * and reason for diverting IO to firmware.
+ *
+ * Return: Nothing
+ */
+static inline void mpi3mr_setup_divert_ws(struct mpi3mr_ioc *mrioc,
+ struct scsi_cmnd *scmd, struct mpi3_scsi_io_request *scsiio_req,
+ u32 *scsiio_flags, u16 wslen)
+{
+ u8 unmap = 0, ndob = 0;
+ u8 opcode = scmd->cmnd[0];
+ u32 num_blocks = 0;
+ u16 sa = (scmd->cmnd[8] << 8) | (scmd->cmnd[9]);
+
+ if (opcode == WRITE_SAME_16) {
+ unmap = scmd->cmnd[1] & 0x08;
+ ndob = scmd->cmnd[1] & 0x01;
+ num_blocks = get_unaligned_be32(scmd->cmnd + 10);
+ } else if ((opcode == VARIABLE_LENGTH_CMD) && (sa == WRITE_SAME_32)) {
+ unmap = scmd->cmnd[10] & 0x08;
+ ndob = scmd->cmnd[10] & 0x01;
+ num_blocks = get_unaligned_be32(scmd->cmnd + 28);
+ } else
+ return;
+
+ if ((unmap) && (ndob) && (num_blocks > wslen)) {
+ scsiio_req->msg_flags |=
+ MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE;
+ *scsiio_flags |=
+ MPI3_SCSIIO_FLAGS_DIVERT_REASON_WRITE_SAME_TOO_LARGE;
+ }
+}
+
+/**
* mpi3mr_eh_host_reset - Host reset error handling callback
* @scmd: SCSI command reference
*
@@ -4430,7 +4505,6 @@ static int mpi3mr_target_alloc(struct scsi_target *starget)
unsigned long flags;
int retval = 0;
struct sas_rphy *rphy = NULL;
- bool update_stgt_priv_data = false;
scsi_tgt_priv_data = kzalloc(sizeof(*scsi_tgt_priv_data), GFP_KERNEL);
if (!scsi_tgt_priv_data)
@@ -4439,39 +4513,50 @@ static int mpi3mr_target_alloc(struct scsi_target *starget)
starget->hostdata = scsi_tgt_priv_data;
spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
-
if (starget->channel == mrioc->scsi_device_channel) {
tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
- if (tgt_dev && !tgt_dev->is_hidden)
- update_stgt_priv_data = true;
- else
+ if (tgt_dev && !tgt_dev->is_hidden) {
+ scsi_tgt_priv_data->starget = starget;
+ scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle;
+ scsi_tgt_priv_data->perst_id = tgt_dev->perst_id;
+ scsi_tgt_priv_data->dev_type = tgt_dev->dev_type;
+ scsi_tgt_priv_data->tgt_dev = tgt_dev;
+ tgt_dev->starget = starget;
+ atomic_set(&scsi_tgt_priv_data->block_io, 0);
+ retval = 0;
+ if ((tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_PCIE) &&
+ ((tgt_dev->dev_spec.pcie_inf.dev_info &
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) ==
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) &&
+ ((tgt_dev->dev_spec.pcie_inf.dev_info &
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_MASK) !=
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_0))
+ scsi_tgt_priv_data->dev_nvme_dif = 1;
+ scsi_tgt_priv_data->io_throttle_enabled = tgt_dev->io_throttle_enabled;
+ scsi_tgt_priv_data->wslen = tgt_dev->wslen;
+ if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_VD)
+ scsi_tgt_priv_data->throttle_group = tgt_dev->dev_spec.vd_inf.tg;
+ } else
retval = -ENXIO;
} else if (mrioc->sas_transport_enabled && !starget->channel) {
rphy = dev_to_rphy(starget->dev.parent);
tgt_dev = __mpi3mr_get_tgtdev_by_addr_and_rphy(mrioc,
rphy->identify.sas_address, rphy);
if (tgt_dev && !tgt_dev->is_hidden && !tgt_dev->non_stl &&
- (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_SAS_SATA))
- update_stgt_priv_data = true;
- else
+ (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_SAS_SATA)) {
+ scsi_tgt_priv_data->starget = starget;
+ scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle;
+ scsi_tgt_priv_data->perst_id = tgt_dev->perst_id;
+ scsi_tgt_priv_data->dev_type = tgt_dev->dev_type;
+ scsi_tgt_priv_data->tgt_dev = tgt_dev;
+ scsi_tgt_priv_data->io_throttle_enabled = tgt_dev->io_throttle_enabled;
+ scsi_tgt_priv_data->wslen = tgt_dev->wslen;
+ tgt_dev->starget = starget;
+ atomic_set(&scsi_tgt_priv_data->block_io, 0);
+ retval = 0;
+ } else
retval = -ENXIO;
}
-
- if (update_stgt_priv_data) {
- scsi_tgt_priv_data->starget = starget;
- scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle;
- scsi_tgt_priv_data->perst_id = tgt_dev->perst_id;
- scsi_tgt_priv_data->dev_type = tgt_dev->dev_type;
- scsi_tgt_priv_data->tgt_dev = tgt_dev;
- tgt_dev->starget = starget;
- atomic_set(&scsi_tgt_priv_data->block_io, 0);
- retval = 0;
- scsi_tgt_priv_data->io_throttle_enabled =
- tgt_dev->io_throttle_enabled;
- if (tgt_dev->dev_type == MPI3_DEVICE_DEVFORM_VD)
- scsi_tgt_priv_data->throttle_group =
- tgt_dev->dev_spec.vd_inf.tg;
- }
spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
return retval;
@@ -4732,6 +4817,10 @@ static int mpi3mr_qcmd(struct Scsi_Host *shost,
mpi3mr_setup_eedp(mrioc, scmd, scsiio_req);
+ if (stgt_priv_data->wslen)
+ mpi3mr_setup_divert_ws(mrioc, scmd, scsiio_req, &scsiio_flags,
+ stgt_priv_data->wslen);
+
memcpy(scsiio_req->cdb.cdb32, scmd->cmnd, scmd->cmd_len);
scsiio_req->data_length = cpu_to_le32(scsi_bufflen(scmd));
scsiio_req->dev_handle = cpu_to_le16(dev_handle);
@@ -4818,10 +4907,10 @@ static const struct scsi_host_template mpi3mr_driver_template = {
.no_write_same = 1,
.can_queue = 1,
.this_id = -1,
- .sg_tablesize = MPI3MR_SG_DEPTH,
+ .sg_tablesize = MPI3MR_DEFAULT_SGL_ENTRIES,
/* max xfer supported is 1M (2K in 512 byte sized sectors)
*/
- .max_sectors = 2048,
+ .max_sectors = (MPI3MR_DEFAULT_MAX_IO_SIZE / 512),
.cmd_per_lun = MPI3MR_MAX_CMDS_LUN,
.max_segment_size = 0xffffffff,
.track_queue_depth = 1,
@@ -5004,6 +5093,16 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mrioc->pdev = pdev;
mrioc->stop_bsgs = 1;
+ mrioc->max_sgl_entries = max_sgl_entries;
+ if (max_sgl_entries > MPI3MR_MAX_SGL_ENTRIES)
+ mrioc->max_sgl_entries = MPI3MR_MAX_SGL_ENTRIES;
+ else if (max_sgl_entries < MPI3MR_DEFAULT_SGL_ENTRIES)
+ mrioc->max_sgl_entries = MPI3MR_DEFAULT_SGL_ENTRIES;
+ else {
+ mrioc->max_sgl_entries /= MPI3MR_DEFAULT_SGL_ENTRIES;
+ mrioc->max_sgl_entries *= MPI3MR_DEFAULT_SGL_ENTRIES;
+ }
+
/* init shost parameters */
shost->max_cmd_len = MPI3MR_MAX_CDB_LENGTH;
shost->max_lun = -1;
@@ -5068,7 +5167,7 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
shost->nr_maps = 3;
shost->can_queue = mrioc->max_host_ios;
- shost->sg_tablesize = MPI3MR_SG_DEPTH;
+ shost->sg_tablesize = mrioc->max_sgl_entries;
shost->max_id = mrioc->facts.max_perids + 1;
retval = scsi_add_host(shost, &pdev->dev);
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 49e2a5e7ce54..43ebb331e216 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -84,10 +84,8 @@ static void mvs_phy_init(struct mvs_info *mvi, int phy_id)
phy->port = NULL;
timer_setup(&phy->timer, NULL, 0);
sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
- sas_phy->class = SAS;
sas_phy->iproto = SAS_PROTOCOL_ALL;
sas_phy->tproto = 0;
- sas_phy->type = PHY_TYPE_PHYSICAL;
sas_phy->role = PHY_ROLE_INITIATOR;
sas_phy->oob_mode = OOB_NOT_CONNECTED;
sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
@@ -416,7 +414,7 @@ static int mvs_prep_sas_ha_init(struct Scsi_Host *shost,
sha->sas_phy = arr_phy;
sha->sas_port = arr_port;
- sha->core.shost = shost;
+ sha->shost = shost;
sha->lldd_ha = kzalloc(sizeof(struct mvs_prv_info), GFP_KERNEL);
if (!sha->lldd_ha)
@@ -458,7 +456,6 @@ static void mvs_post_sas_ha_init(struct Scsi_Host *shost,
sha->sas_ha_name = DRV_NAME;
sha->dev = mvi->dev;
- sha->lldd_module = THIS_MODULE;
sha->sas_addr = &mvi->sas_addr[0];
sha->num_phys = nr_core * chip_info->n_phy;
@@ -473,7 +470,7 @@ static void mvs_post_sas_ha_init(struct Scsi_Host *shost,
shost->sg_tablesize = min_t(u16, SG_ALL, MVS_MAX_SG);
shost->can_queue = can_queue;
mvi->shost->cmd_per_lun = MVS_QUEUE_SIZE;
- sha->core.shost = mvi->shost;
+ sha->shost = mvi->shost;
}
static void mvs_init_sas_add(struct mvs_info *mvi)
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 9978c424214c..1444b1f1c4c8 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -564,7 +564,7 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
void *buf_prd;
struct ssp_frame_hdr *ssp_hdr;
void *buf_tmp;
- u8 *buf_cmd, *buf_oaf, fburst = 0;
+ u8 *buf_cmd, *buf_oaf;
dma_addr_t buf_tmp_dma;
u32 flags;
u32 resp_len, req_len, i, tag = tei->tag;
@@ -582,10 +582,6 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
(phy_mask << TXQ_PHY_SHIFT));
flags = MCH_RETRY;
- if (task->ssp_task.enable_first_burst) {
- flags |= MCH_FBURST;
- fburst = (1 << 7);
- }
if (is_tmf)
flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT);
else
@@ -667,8 +663,7 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
memcpy(buf_cmd, &task->ssp_task.LUN, 8);
if (ssp_hdr->frame_type != SSP_TASK) {
- buf_cmd[9] = fburst | task->ssp_task.task_attr |
- (task->ssp_task.task_prio << 3);
+ buf_cmd[9] = task->ssp_task.task_attr;
memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd,
task->ssp_task.cmd->cmd_len);
} else{
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index 73aa7059b556..97f9d2fa6429 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -2490,7 +2490,7 @@ static int mvumi_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
mhba->pdev = pdev;
mhba->shost = host;
- mhba->unique_id = pdev->bus->number << 8 | pdev->devfn;
+ mhba->unique_id = pci_dev_id(pdev);
ret = mvumi_init_fw(mhba);
if (ret)
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 73cd25f30ca5..33053db5a713 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -4053,9 +4053,6 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len);
ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id);
ssp_cmd.tag = cpu_to_le32(tag);
- if (task->ssp_task.enable_first_burst)
- ssp_cmd.ssp_iu.efb_prio_attr |= 0x80;
- ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_prio << 3);
ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7);
memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd,
task->ssp_task.cmd->cmd_len);
@@ -4095,7 +4092,7 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
u32 hdr_tag, ncg_tag = 0;
u64 phys_addr;
u32 ATAP = 0x0;
- u32 dir;
+ u32 dir, retfis = 0;
u32 opc = OPC_INB_SATA_HOST_OPSTART;
memset(&sata_cmd, 0, sizeof(sata_cmd));
@@ -4124,8 +4121,11 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
sata_cmd.tag = cpu_to_le32(tag);
sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
sata_cmd.data_len = cpu_to_le32(task->total_xfer_len);
- sata_cmd.ncqtag_atap_dir_m =
- cpu_to_le32(((ncg_tag & 0xff)<<16)|((ATAP & 0x3f) << 10) | dir);
+ if (task->ata_task.return_fis_on_success)
+ retfis = 1;
+ sata_cmd.retfis_ncqtag_atap_dir_m =
+ cpu_to_le32((retfis << 24) | ((ncg_tag & 0xff) << 16) |
+ ((ATAP & 0x3f) << 10) | dir);
sata_cmd.sata_fis = task->ata_task.fis;
if (likely(!task->ata_task.device_control_reg_update))
sata_cmd.sata_fis.flags |= 0x80;/* C=1: update ATA cmd reg */
diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h
index 961d0465b923..fc2127dcb58d 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.h
+++ b/drivers/scsi/pm8001/pm8001_hwi.h
@@ -515,7 +515,7 @@ struct sata_start_req {
__le32 tag;
__le32 device_id;
__le32 data_len;
- __le32 ncqtag_atap_dir_m;
+ __le32 retfis_ncqtag_atap_dir_m;
struct host_to_dev_fis sata_fis;
u32 reserved1;
u32 reserved2;
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 4995e1ef4e0e..5e5ce1e74c3b 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -162,10 +162,8 @@ static void pm8001_phy_init(struct pm8001_hba_info *pm8001_ha, int phy_id)
phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS;
sas_phy->enabled = (phy_id < pm8001_ha->chip->n_phy) ? 1 : 0;
- sas_phy->class = SAS;
sas_phy->iproto = SAS_PROTOCOL_ALL;
sas_phy->tproto = 0;
- sas_phy->type = PHY_TYPE_PHYSICAL;
sas_phy->role = PHY_ROLE_INITIATOR;
sas_phy->oob_mode = OOB_NOT_CONNECTED;
sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
@@ -654,10 +652,9 @@ static void pm8001_post_sas_ha_init(struct Scsi_Host *shost,
sha->sas_ha_name = DRV_NAME;
sha->dev = pm8001_ha->dev;
sha->strict_wide_ports = 1;
- sha->lldd_module = THIS_MODULE;
sha->sas_addr = &pm8001_ha->sas_addr[0];
sha->num_phys = chip_info->n_phy;
- sha->core.shost = shost;
+ sha->shost = shost;
}
/**
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 953572fc0d9e..2fadd353f1c1 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -702,8 +702,6 @@ int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha,
void *piomb);
int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha, void *piomb);
int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb);
-struct sas_task *pm8001_alloc_task(void);
-void pm8001_free_task(struct sas_task *task);
void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag);
struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,
u32 device_id);
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 39a12ee94a72..f6857632dc7c 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -4316,9 +4316,6 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len);
ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id);
ssp_cmd.tag = cpu_to_le32(tag);
- if (task->ssp_task.enable_first_burst)
- ssp_cmd.ssp_iu.efb_prio_attr = 0x80;
- ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_prio << 3);
ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7);
memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd,
task->ssp_task.cmd->cmd_len);
@@ -4457,7 +4454,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
u64 phys_addr, end_addr;
u32 end_addr_high, end_addr_low;
u32 ATAP = 0x0;
- u32 dir;
+ u32 dir, retfis = 0;
u32 opc = OPC_INB_SATA_HOST_OPSTART;
memset(&sata_cmd, 0, sizeof(sata_cmd));
@@ -4487,7 +4484,8 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
sata_cmd.tag = cpu_to_le32(tag);
sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id);
sata_cmd.data_len = cpu_to_le32(task->total_xfer_len);
-
+ if (task->ata_task.return_fis_on_success)
+ retfis = 1;
sata_cmd.sata_fis = task->ata_task.fis;
if (likely(!task->ata_task.device_control_reg_update))
sata_cmd.sata_fis.flags |= 0x80;/* C=1: update ATA cmd reg */
@@ -4500,12 +4498,10 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
"Encryption enabled.Sending Encrypt SATA cmd 0x%x\n",
sata_cmd.sata_fis.command);
opc = OPC_INB_SATA_DIF_ENC_IO;
-
- /* set encryption bit */
- sata_cmd.ncqtag_atap_dir_m_dad =
- cpu_to_le32(((ncg_tag & 0xff)<<16)|
- ((ATAP & 0x3f) << 10) | 0x20 | dir);
- /* dad (bit 0-1) is 0 */
+ /* set encryption bit; dad (bits 0-1) is 0 */
+ sata_cmd.retfis_ncqtag_atap_dir_m_dad =
+ cpu_to_le32((retfis << 24) | ((ncg_tag & 0xff) << 16) |
+ ((ATAP & 0x3f) << 10) | 0x20 | dir);
/* fill in PRD (scatter/gather) table, if any */
if (task->num_scatter > 1) {
pm8001_chip_make_sg(task->scatter,
@@ -4568,11 +4564,10 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
pm8001_dbg(pm8001_ha, IO,
"Sending Normal SATA command 0x%x inb %x\n",
sata_cmd.sata_fis.command, q_index);
- /* dad (bit 0-1) is 0 */
- sata_cmd.ncqtag_atap_dir_m_dad =
- cpu_to_le32(((ncg_tag & 0xff)<<16) |
- ((ATAP & 0x3f) << 10) | dir);
-
+ /* dad (bits 0-1) is 0 */
+ sata_cmd.retfis_ncqtag_atap_dir_m_dad =
+ cpu_to_le32((retfis << 24) | ((ncg_tag & 0xff) << 16) |
+ ((ATAP & 0x3f) << 10) | dir);
/* fill in PRD (scatter/gather) table, if any */
if (task->num_scatter > 1) {
pm8001_chip_make_sg(task->scatter,
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
index acf6e3005b84..eb8fd37b2066 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.h
+++ b/drivers/scsi/pm8001/pm80xx_hwi.h
@@ -731,7 +731,7 @@ struct sata_start_req {
__le32 tag;
__le32 device_id;
__le32 data_len;
- __le32 ncqtag_atap_dir_m_dad;
+ __le32 retfis_ncqtag_atap_dir_m_dad;
struct host_to_dev_fis sata_fis;
u32 reserved1;
u32 reserved2; /* dword 11. rsvd for normal I/O. */
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 9415a4819470..50dc30051f22 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -3584,8 +3584,7 @@ static ssize_t pmcraid_show_adapter_id(
struct Scsi_Host *shost = class_to_shost(dev);
struct pmcraid_instance *pinstance =
(struct pmcraid_instance *)shost->hostdata;
- u32 adapter_id = (pinstance->pdev->bus->number << 8) |
- pinstance->pdev->devfn;
+ u32 adapter_id = pci_dev_id(pinstance->pdev);
u32 aen_group = pmcraid_event_family.id;
return snprintf(buf, PAGE_SIZE,
diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c
index 909c49541984..19f0b93fa3d8 100644
--- a/drivers/scsi/ppa.c
+++ b/drivers/scsi/ppa.c
@@ -45,6 +45,11 @@ typedef struct {
#include "ppa.h"
+static unsigned int mode = PPA_AUTODETECT;
+module_param(mode, uint, 0644);
+MODULE_PARM_DESC(mode, "Transfer mode (0 = Autodetect, 1 = SPP 4-bit, "
+ "2 = SPP 8-bit, 3 = EPP 8-bit, 4 = EPP 16-bit, 5 = EPP 32-bit");
+
static struct scsi_pointer *ppa_scsi_pointer(struct scsi_cmnd *cmd)
{
return scsi_cmd_priv(cmd);
@@ -157,7 +162,7 @@ static int ppa_show_info(struct seq_file *m, struct Scsi_Host *host)
return 0;
}
-static int device_check(ppa_struct *dev);
+static int device_check(ppa_struct *dev, bool autodetect);
#if PPA_DEBUG > 0
#define ppa_fail(x,y) printk("ppa: ppa_fail(%i) from %s at line %d\n",\
@@ -302,13 +307,10 @@ static int ppa_out(ppa_struct *dev, char *buffer, int len)
case PPA_EPP_8:
epp_reset(ppb);
w_ctr(ppb, 0x4);
-#ifdef CONFIG_SCSI_IZIP_EPP16
- if (!(((long) buffer | len) & 0x01))
- outsw(ppb + 4, buffer, len >> 1);
-#else
- if (!(((long) buffer | len) & 0x03))
+ if (dev->mode == PPA_EPP_32 && !(((long) buffer | len) & 0x01))
outsl(ppb + 4, buffer, len >> 2);
-#endif
+ else if (dev->mode == PPA_EPP_16 && !(((long) buffer | len) & 0x03))
+ outsw(ppb + 4, buffer, len >> 1);
else
outsb(ppb + 4, buffer, len);
w_ctr(ppb, 0xc);
@@ -355,13 +357,10 @@ static int ppa_in(ppa_struct *dev, char *buffer, int len)
case PPA_EPP_8:
epp_reset(ppb);
w_ctr(ppb, 0x24);
-#ifdef CONFIG_SCSI_IZIP_EPP16
- if (!(((long) buffer | len) & 0x01))
- insw(ppb + 4, buffer, len >> 1);
-#else
- if (!(((long) buffer | len) & 0x03))
+ if (dev->mode == PPA_EPP_32 && !(((long) buffer | len) & 0x03))
insl(ppb + 4, buffer, len >> 2);
-#endif
+ else if (dev->mode == PPA_EPP_16 && !(((long) buffer | len) & 0x01))
+ insw(ppb + 4, buffer, len >> 1);
else
insb(ppb + 4, buffer, len);
w_ctr(ppb, 0x2c);
@@ -469,6 +468,27 @@ static int ppa_init(ppa_struct *dev)
{
int retv;
unsigned short ppb = dev->base;
+ bool autodetect = dev->mode == PPA_AUTODETECT;
+
+ if (autodetect) {
+ int modes = dev->dev->port->modes;
+ int ppb_hi = dev->dev->port->base_hi;
+
+ /* Mode detection works up the chain of speed
+ * This avoids a nasty if-then-else-if-... tree
+ */
+ dev->mode = PPA_NIBBLE;
+
+ if (modes & PARPORT_MODE_TRISTATE)
+ dev->mode = PPA_PS2;
+
+ if (modes & PARPORT_MODE_ECP) {
+ w_ecr(ppb_hi, 0x20);
+ dev->mode = PPA_PS2;
+ }
+ if ((modes & PARPORT_MODE_EPP) && (modes & PARPORT_MODE_ECP))
+ w_ecr(ppb_hi, 0x80);
+ }
ppa_disconnect(dev);
ppa_connect(dev, CONNECT_NORMAL);
@@ -492,7 +512,7 @@ static int ppa_init(ppa_struct *dev)
if (retv)
return -EIO;
- return device_check(dev);
+ return device_check(dev, autodetect);
}
static inline int ppa_send_command(struct scsi_cmnd *cmd)
@@ -637,7 +657,7 @@ static void ppa_interrupt(struct work_struct *work)
case DID_OK:
break;
case DID_NO_CONNECT:
- printk(KERN_DEBUG "ppa: no device at SCSI ID %i\n", cmd->device->target);
+ printk(KERN_DEBUG "ppa: no device at SCSI ID %i\n", scmd_id(cmd));
break;
case DID_BUS_BUSY:
printk(KERN_DEBUG "ppa: BUS BUSY - EPP timeout detected\n");
@@ -883,7 +903,7 @@ static int ppa_reset(struct scsi_cmnd *cmd)
return SUCCESS;
}
-static int device_check(ppa_struct *dev)
+static int device_check(ppa_struct *dev, bool autodetect)
{
/* This routine looks for a device and then attempts to use EPP
to send a command. If all goes as planned then EPP is available. */
@@ -895,8 +915,8 @@ static int device_check(ppa_struct *dev)
old_mode = dev->mode;
for (loop = 0; loop < 8; loop++) {
/* Attempt to use EPP for Test Unit Ready */
- if ((ppb & 0x0007) == 0x0000)
- dev->mode = PPA_EPP_32;
+ if (autodetect && (ppb & 0x0007) == 0x0000)
+ dev->mode = PPA_EPP_8;
second_pass:
ppa_connect(dev, CONNECT_EPP_MAYBE);
@@ -924,7 +944,7 @@ second_pass:
udelay(1000);
ppa_disconnect(dev);
udelay(1000);
- if (dev->mode == PPA_EPP_32) {
+ if (dev->mode != old_mode) {
dev->mode = old_mode;
goto second_pass;
}
@@ -947,7 +967,7 @@ second_pass:
udelay(1000);
ppa_disconnect(dev);
udelay(1000);
- if (dev->mode == PPA_EPP_32) {
+ if (dev->mode != old_mode) {
dev->mode = old_mode;
goto second_pass;
}
@@ -1026,7 +1046,6 @@ static int __ppa_attach(struct parport *pb)
DEFINE_WAIT(wait);
ppa_struct *dev, *temp;
int ports;
- int modes, ppb, ppb_hi;
int err = -ENOMEM;
struct pardev_cb ppa_cb;
@@ -1034,7 +1053,7 @@ static int __ppa_attach(struct parport *pb)
if (!dev)
return -ENOMEM;
dev->base = -1;
- dev->mode = PPA_AUTODETECT;
+ dev->mode = mode < PPA_UNKNOWN ? mode : PPA_AUTODETECT;
dev->recon_tmo = PPA_RECON_TMO;
init_waitqueue_head(&waiting);
temp = find_parent();
@@ -1069,25 +1088,8 @@ static int __ppa_attach(struct parport *pb)
}
dev->waiting = NULL;
finish_wait(&waiting, &wait);
- ppb = dev->base = dev->dev->port->base;
- ppb_hi = dev->dev->port->base_hi;
- w_ctr(ppb, 0x0c);
- modes = dev->dev->port->modes;
-
- /* Mode detection works up the chain of speed
- * This avoids a nasty if-then-else-if-... tree
- */
- dev->mode = PPA_NIBBLE;
-
- if (modes & PARPORT_MODE_TRISTATE)
- dev->mode = PPA_PS2;
-
- if (modes & PARPORT_MODE_ECP) {
- w_ecr(ppb_hi, 0x20);
- dev->mode = PPA_PS2;
- }
- if ((modes & PARPORT_MODE_EPP) && (modes & PARPORT_MODE_ECP))
- w_ecr(ppb_hi, 0x80);
+ dev->base = dev->dev->port->base;
+ w_ctr(dev->base, 0x0c);
/* Done configuration */
diff --git a/drivers/scsi/ppa.h b/drivers/scsi/ppa.h
index 6a1f8a2d70eb..098bcf7b9eb4 100644
--- a/drivers/scsi/ppa.h
+++ b/drivers/scsi/ppa.h
@@ -107,11 +107,7 @@ static char *PPA_MODE_STRING[] =
"PS/2",
"EPP 8 bit",
"EPP 16 bit",
-#ifdef CONFIG_SCSI_IZIP_EPP16
- "EPP 16 bit",
-#else
"EPP 32 bit",
-#endif
"Unknown"};
/* other options */
diff --git a/drivers/scsi/qedf/qedf_dbg.h b/drivers/scsi/qedf/qedf_dbg.h
index f4d81127239e..5ec2b817c694 100644
--- a/drivers/scsi/qedf/qedf_dbg.h
+++ b/drivers/scsi/qedf/qedf_dbg.h
@@ -59,6 +59,8 @@ extern uint qedf_debug;
#define QEDF_LOG_NOTICE 0x40000000 /* Notice logs */
#define QEDF_LOG_WARN 0x80000000 /* Warning logs */
+#define QEDF_DEBUGFS_LOG_LEN (2 * PAGE_SIZE)
+
/* Debug context structure */
struct qedf_dbg_ctx {
unsigned int host_no;
diff --git a/drivers/scsi/qedf/qedf_debugfs.c b/drivers/scsi/qedf/qedf_debugfs.c
index a3ed681c8ce3..451fd236bfd0 100644
--- a/drivers/scsi/qedf/qedf_debugfs.c
+++ b/drivers/scsi/qedf/qedf_debugfs.c
@@ -8,6 +8,7 @@
#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <linux/module.h>
+#include <linux/vmalloc.h>
#include "qedf.h"
#include "qedf_dbg.h"
@@ -98,7 +99,9 @@ static ssize_t
qedf_dbg_fp_int_cmd_read(struct file *filp, char __user *buffer, size_t count,
loff_t *ppos)
{
+ ssize_t ret;
size_t cnt = 0;
+ char *cbuf;
int id;
struct qedf_fastpath *fp = NULL;
struct qedf_dbg_ctx *qedf_dbg =
@@ -108,19 +111,25 @@ qedf_dbg_fp_int_cmd_read(struct file *filp, char __user *buffer, size_t count,
QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n");
- cnt = sprintf(buffer, "\nFastpath I/O completions\n\n");
+ cbuf = vmalloc(QEDF_DEBUGFS_LOG_LEN);
+ if (!cbuf)
+ return 0;
+
+ cnt += scnprintf(cbuf + cnt, QEDF_DEBUGFS_LOG_LEN - cnt, "\nFastpath I/O completions\n\n");
for (id = 0; id < qedf->num_queues; id++) {
fp = &(qedf->fp_array[id]);
if (fp->sb_id == QEDF_SB_ID_NULL)
continue;
- cnt += sprintf((buffer + cnt), "#%d: %lu\n", id,
- fp->completions);
+ cnt += scnprintf(cbuf + cnt, QEDF_DEBUGFS_LOG_LEN - cnt,
+ "#%d: %lu\n", id, fp->completions);
}
- cnt = min_t(int, count, cnt - *ppos);
- *ppos += cnt;
- return cnt;
+ ret = simple_read_from_buffer(buffer, count, ppos, cbuf, cnt);
+
+ vfree(cbuf);
+
+ return ret;
}
static ssize_t
@@ -138,15 +147,14 @@ qedf_dbg_debug_cmd_read(struct file *filp, char __user *buffer, size_t count,
loff_t *ppos)
{
int cnt;
+ char cbuf[32];
struct qedf_dbg_ctx *qedf_dbg =
(struct qedf_dbg_ctx *)filp->private_data;
QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "debug mask=0x%x\n", qedf_debug);
- cnt = sprintf(buffer, "debug mask = 0x%x\n", qedf_debug);
+ cnt = scnprintf(cbuf, sizeof(cbuf), "debug mask = 0x%x\n", qedf_debug);
- cnt = min_t(int, count, cnt - *ppos);
- *ppos += cnt;
- return cnt;
+ return simple_read_from_buffer(buffer, count, ppos, cbuf, cnt);
}
static ssize_t
@@ -185,18 +193,17 @@ qedf_dbg_stop_io_on_error_cmd_read(struct file *filp, char __user *buffer,
size_t count, loff_t *ppos)
{
int cnt;
+ char cbuf[7];
struct qedf_dbg_ctx *qedf_dbg =
(struct qedf_dbg_ctx *)filp->private_data;
struct qedf_ctx *qedf = container_of(qedf_dbg,
struct qedf_ctx, dbg_ctx);
QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "entered\n");
- cnt = sprintf(buffer, "%s\n",
+ cnt = scnprintf(cbuf, sizeof(cbuf), "%s\n",
qedf->stop_io_on_error ? "true" : "false");
- cnt = min_t(int, count, cnt - *ppos);
- *ppos += cnt;
- return cnt;
+ return simple_read_from_buffer(buffer, count, ppos, cbuf, cnt);
}
static ssize_t
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 4ae38305c15a..336b8c665cb4 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -466,6 +466,7 @@ static inline be_id_t port_id_to_be_id(port_id_t port_id)
}
struct tmf_arg {
+ struct list_head tmf_elem;
struct qla_qpair *qpair;
struct fc_port *fcport;
struct scsi_qla_host *vha;
@@ -2541,7 +2542,6 @@ enum rscn_addr_format {
typedef struct fc_port {
struct list_head list;
struct scsi_qla_host *vha;
- struct list_head tmf_pending;
unsigned int conf_compl_supported:1;
unsigned int deleted:2;
@@ -2562,9 +2562,6 @@ typedef struct fc_port {
unsigned int do_prli_nvme:1;
uint8_t nvme_flag;
- uint8_t active_tmf;
-#define MAX_ACTIVE_TMF 8
-
uint8_t node_name[WWN_SIZE];
uint8_t port_name[WWN_SIZE];
port_id_t d_id;
@@ -4656,6 +4653,8 @@ struct qla_hw_data {
uint32_t flt_region_aux_img_status_sec;
};
uint8_t active_image;
+ uint8_t active_tmf;
+#define MAX_ACTIVE_TMF 8
/* Needed for BEACON */
uint16_t beacon_blink_led;
@@ -4670,6 +4669,8 @@ struct qla_hw_data {
struct qla_msix_entry *msix_entries;
+ struct list_head tmf_pending;
+ struct list_head tmf_active;
struct list_head vp_list; /* list of VP */
unsigned long vp_idx_map[(MAX_MULTI_ID_FABRIC / 8) /
sizeof(unsigned long)];
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index ba7831f24734..816c0b9ecd0e 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -48,8 +48,6 @@ extern int qla24xx_els_dcmd2_iocb(scsi_qla_host_t *, int, fc_port_t *, bool);
extern void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha,
struct els_plogi *els_plogi);
-extern void qla2x00_update_fcports(scsi_qla_host_t *);
-
extern int qla2x00_abort_isp(scsi_qla_host_t *);
extern void qla2x00_abort_isp_cleanup(scsi_qla_host_t *);
extern void qla2x00_quiesce_io(scsi_qla_host_t *);
@@ -143,6 +141,7 @@ void qla_edif_sess_down(struct scsi_qla_host *vha, struct fc_port *sess);
void qla_edif_clear_appdata(struct scsi_qla_host *vha,
struct fc_port *fcport);
const char *sc_to_str(uint16_t cmd);
+void qla_adjust_iocb_limit(scsi_qla_host_t *vha);
/*
* Global Data in qla_os.c source file.
@@ -205,8 +204,6 @@ extern int qla2x00_post_async_logout_work(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern int qla2x00_post_async_adisc_work(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
-extern int qla2x00_post_async_adisc_done_work(struct scsi_qla_host *,
- fc_port_t *, uint16_t *);
extern int qla2x00_set_exlogins_buffer(struct scsi_qla_host *);
extern void qla2x00_free_exlogin_buffer(struct qla_hw_data *);
extern int qla2x00_set_exchoffld_buffer(struct scsi_qla_host *);
@@ -216,7 +213,6 @@ extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *);
extern struct scsi_qla_host *qla2x00_create_host(const struct scsi_host_template *,
struct qla_hw_data *);
-extern void qla2x00_free_host(struct scsi_qla_host *);
extern void qla2x00_relogin(struct scsi_qla_host *);
extern void qla2x00_do_work(struct scsi_qla_host *);
extern void qla2x00_free_fcports(struct scsi_qla_host *);
@@ -238,13 +234,10 @@ extern int __qla83xx_clear_drv_presence(scsi_qla_host_t *vha);
extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32);
extern void qla2x00_disable_board_on_pci_error(struct work_struct *);
-extern void qla_eeh_work(struct work_struct *);
extern void qla2x00_sp_compl(srb_t *sp, int);
extern void qla2xxx_qpair_sp_free_dma(srb_t *sp);
extern void qla2xxx_qpair_sp_compl(srb_t *sp, int);
extern void qla24xx_sched_upd_fcport(fc_port_t *);
-void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
- uint16_t *);
int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
int qla24xx_post_relogin_work(struct scsi_qla_host *vha);
void qla2x00_wait_for_sess_deletion(scsi_qla_host_t *);
@@ -728,7 +721,6 @@ int qla24xx_post_gpsc_work(struct scsi_qla_host *, fc_port_t *);
int qla24xx_async_gpsc(scsi_qla_host_t *, fc_port_t *);
void qla24xx_handle_gpsc_event(scsi_qla_host_t *, struct event_arg *);
int qla2x00_mgmt_svr_login(scsi_qla_host_t *);
-void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea);
int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport, bool);
int qla24xx_async_gpnft(scsi_qla_host_t *, u8, srb_t *);
void qla24xx_async_gpnft_done(scsi_qla_host_t *, srb_t *);
@@ -851,7 +843,6 @@ extern void qla2x00_start_iocbs(struct scsi_qla_host *, struct req_que *);
/* Interrupt related */
extern irqreturn_t qla82xx_intr_handler(int, void *);
-extern irqreturn_t qla82xx_msi_handler(int, void *);
extern irqreturn_t qla82xx_msix_default(int, void *);
extern irqreturn_t qla82xx_msix_rsp_q(int, void *);
extern void qla82xx_enable_intrs(struct qla_hw_data *);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 367fba27fe69..df623de89255 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -508,6 +508,7 @@ static
void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
{
struct fc_port *fcport = ea->fcport;
+ unsigned long flags;
ql_dbg(ql_dbg_disc, vha, 0x20d2,
"%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
@@ -522,9 +523,15 @@ void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
ql_dbg(ql_dbg_disc, vha, 0x2066,
"%s %8phC: adisc fail: post delete\n",
__func__, ea->fcport->port_name);
+
+ spin_lock_irqsave(&vha->work_lock, flags);
/* deleted = 0 & logout_on_delete = force fw cleanup */
- fcport->deleted = 0;
+ if (fcport->deleted == QLA_SESS_DELETED)
+ fcport->deleted = 0;
+
fcport->logout_on_delete = 1;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
qlt_schedule_sess_for_deletion(ea->fcport);
return;
}
@@ -1134,7 +1141,7 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
u16 *mb;
if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
- return rval;
+ goto done;
ql_dbg(ql_dbg_disc, vha, 0x20d9,
"Async-gnlist WWPN %8phC \n", fcport->port_name);
@@ -1188,8 +1195,9 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
done_free_sp:
/* ref: INIT */
kref_put(&sp->cmd_kref, qla2x00_sp_release);
+ fcport->flags &= ~(FCF_ASYNC_SENT);
done:
- fcport->flags &= ~(FCF_ASYNC_ACTIVE | FCF_ASYNC_SENT);
+ fcport->flags &= ~(FCF_ASYNC_ACTIVE);
return rval;
}
@@ -1446,7 +1454,6 @@ void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
ea->fcport->login_gen++;
- ea->fcport->deleted = 0;
ea->fcport->logout_on_delete = 1;
if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) {
@@ -1996,12 +2003,11 @@ qla2x00_tmf_iocb_timeout(void *data)
int rc, h;
unsigned long flags;
- if (sp->type == SRB_MARKER) {
- complete(&tmf->u.tmf.comp);
- return;
- }
+ if (sp->type == SRB_MARKER)
+ rc = QLA_FUNCTION_FAILED;
+ else
+ rc = qla24xx_async_abort_cmd(sp, false);
- rc = qla24xx_async_abort_cmd(sp, false);
if (rc) {
spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
@@ -2032,10 +2038,14 @@ static void qla_marker_sp_done(srb_t *sp, int res)
complete(&tmf->u.tmf.comp);
}
-#define START_SP_W_RETRIES(_sp, _rval) \
+#define START_SP_W_RETRIES(_sp, _rval, _chip_gen, _login_gen) \
{\
int cnt = 5; \
do { \
+ if (_chip_gen != sp->vha->hw->chip_reset || _login_gen != sp->fcport->login_gen) {\
+ _rval = EINVAL; \
+ break; \
+ } \
_rval = qla2x00_start_sp(_sp); \
if (_rval == EAGAIN) \
msleep(1); \
@@ -2058,6 +2068,7 @@ qla26xx_marker(struct tmf_arg *arg)
srb_t *sp;
int rval = QLA_FUNCTION_FAILED;
fc_port_t *fcport = arg->fcport;
+ u32 chip_gen, login_gen;
if (TMF_NOT_READY(arg->fcport)) {
ql_dbg(ql_dbg_taskm, vha, 0x8039,
@@ -2067,6 +2078,9 @@ qla26xx_marker(struct tmf_arg *arg)
return QLA_SUSPENDED;
}
+ chip_gen = vha->hw->chip_reset;
+ login_gen = fcport->login_gen;
+
/* ref: INIT */
sp = qla2xxx_get_qpair_sp(vha, arg->qpair, fcport, GFP_KERNEL);
if (!sp)
@@ -2084,7 +2098,7 @@ qla26xx_marker(struct tmf_arg *arg)
tm_iocb->u.tmf.loop_id = fcport->loop_id;
tm_iocb->u.tmf.vp_index = vha->vp_idx;
- START_SP_W_RETRIES(sp, rval);
+ START_SP_W_RETRIES(sp, rval, chip_gen, login_gen);
ql_dbg(ql_dbg_taskm, vha, 0x8006,
"Async-marker hdl=%x loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d rval %d.\n",
@@ -2123,6 +2137,17 @@ static void qla2x00_tmf_sp_done(srb_t *sp, int res)
complete(&tmf->u.tmf.comp);
}
+static int qla_tmf_wait(struct tmf_arg *arg)
+{
+ /* there are only 2 types of error handling that reaches here, lun or target reset */
+ if (arg->flags & (TCF_LUN_RESET | TCF_ABORT_TASK_SET | TCF_CLEAR_TASK_SET))
+ return qla2x00_eh_wait_for_pending_commands(arg->vha,
+ arg->fcport->d_id.b24, arg->lun, WAIT_LUN);
+ else
+ return qla2x00_eh_wait_for_pending_commands(arg->vha,
+ arg->fcport->d_id.b24, arg->lun, WAIT_TARGET);
+}
+
static int
__qla2x00_async_tm_cmd(struct tmf_arg *arg)
{
@@ -2130,8 +2155,9 @@ __qla2x00_async_tm_cmd(struct tmf_arg *arg)
struct srb_iocb *tm_iocb;
srb_t *sp;
int rval = QLA_FUNCTION_FAILED;
-
fc_port_t *fcport = arg->fcport;
+ u32 chip_gen, login_gen;
+ u64 jif;
if (TMF_NOT_READY(arg->fcport)) {
ql_dbg(ql_dbg_taskm, vha, 0x8032,
@@ -2141,6 +2167,9 @@ __qla2x00_async_tm_cmd(struct tmf_arg *arg)
return QLA_SUSPENDED;
}
+ chip_gen = vha->hw->chip_reset;
+ login_gen = fcport->login_gen;
+
/* ref: INIT */
sp = qla2xxx_get_qpair_sp(vha, arg->qpair, fcport, GFP_KERNEL);
if (!sp)
@@ -2158,7 +2187,7 @@ __qla2x00_async_tm_cmd(struct tmf_arg *arg)
tm_iocb->u.tmf.flags = arg->flags;
tm_iocb->u.tmf.lun = arg->lun;
- START_SP_W_RETRIES(sp, rval);
+ START_SP_W_RETRIES(sp, rval, chip_gen, login_gen);
ql_dbg(ql_dbg_taskm, vha, 0x802f,
"Async-tmf hdl=%x loop-id=%x portid=%06x ctrl=%x lun=%lld qp=%d rval=%x.\n",
@@ -2176,8 +2205,24 @@ __qla2x00_async_tm_cmd(struct tmf_arg *arg)
"TM IOCB failed (%x).\n", rval);
}
- if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw))
- rval = qla26xx_marker(arg);
+ if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) {
+ jif = jiffies;
+ if (qla_tmf_wait(arg)) {
+ ql_log(ql_log_info, vha, 0x803e,
+ "Waited %u ms Nexus=%ld:%06x:%llu.\n",
+ jiffies_to_msecs(jiffies - jif), vha->host_no,
+ fcport->d_id.b24, arg->lun);
+ }
+
+ if (chip_gen == vha->hw->chip_reset && login_gen == fcport->login_gen) {
+ rval = qla26xx_marker(arg);
+ } else {
+ ql_log(ql_log_info, vha, 0x803e,
+ "Skip Marker due to disruption. Nexus=%ld:%06x:%llu.\n",
+ vha->host_no, fcport->d_id.b24, arg->lun);
+ rval = QLA_FUNCTION_FAILED;
+ }
+ }
done_free_sp:
/* ref: INIT */
@@ -2186,30 +2231,42 @@ done:
return rval;
}
-static void qla_put_tmf(fc_port_t *fcport)
+static void qla_put_tmf(struct tmf_arg *arg)
{
- struct scsi_qla_host *vha = fcport->vha;
+ struct scsi_qla_host *vha = arg->vha;
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- fcport->active_tmf--;
+ ha->active_tmf--;
+ list_del(&arg->tmf_elem);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
}
static
-int qla_get_tmf(fc_port_t *fcport)
+int qla_get_tmf(struct tmf_arg *arg)
{
- struct scsi_qla_host *vha = fcport->vha;
+ struct scsi_qla_host *vha = arg->vha;
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
+ fc_port_t *fcport = arg->fcport;
int rc = 0;
- LIST_HEAD(tmf_elem);
+ struct tmf_arg *t;
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- list_add_tail(&tmf_elem, &fcport->tmf_pending);
+ list_for_each_entry(t, &ha->tmf_active, tmf_elem) {
+ if (t->fcport == arg->fcport && t->lun == arg->lun) {
+ /* reject duplicate TMF */
+ ql_log(ql_log_warn, vha, 0x802c,
+ "found duplicate TMF. Nexus=%ld:%06x:%llu.\n",
+ vha->host_no, fcport->d_id.b24, arg->lun);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ return -EINVAL;
+ }
+ }
- while (fcport->active_tmf >= MAX_ACTIVE_TMF) {
+ list_add_tail(&arg->tmf_elem, &ha->tmf_pending);
+ while (ha->active_tmf >= MAX_ACTIVE_TMF) {
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
msleep(1);
@@ -2221,15 +2278,17 @@ int qla_get_tmf(fc_port_t *fcport)
rc = EIO;
break;
}
- if (fcport->active_tmf < MAX_ACTIVE_TMF &&
- list_is_first(&tmf_elem, &fcport->tmf_pending))
+ if (ha->active_tmf < MAX_ACTIVE_TMF &&
+ list_is_first(&arg->tmf_elem, &ha->tmf_pending))
break;
}
- list_del(&tmf_elem);
+ list_del(&arg->tmf_elem);
- if (!rc)
- fcport->active_tmf++;
+ if (!rc) {
+ ha->active_tmf++;
+ list_add_tail(&arg->tmf_elem, &ha->tmf_active);
+ }
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
@@ -2241,9 +2300,8 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint64_t lun,
uint32_t tag)
{
struct scsi_qla_host *vha = fcport->vha;
- struct qla_qpair *qpair;
struct tmf_arg a;
- int i, rval = QLA_SUCCESS;
+ int rval = QLA_SUCCESS;
if (TMF_NOT_READY(fcport))
return QLA_SUSPENDED;
@@ -2251,47 +2309,22 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint64_t lun,
a.vha = fcport->vha;
a.fcport = fcport;
a.lun = lun;
+ a.flags = flags;
+ INIT_LIST_HEAD(&a.tmf_elem);
+
if (flags & (TCF_LUN_RESET|TCF_ABORT_TASK_SET|TCF_CLEAR_TASK_SET|TCF_CLEAR_ACA)) {
a.modifier = MK_SYNC_ID_LUN;
-
- if (qla_get_tmf(fcport))
- return QLA_FUNCTION_FAILED;
} else {
a.modifier = MK_SYNC_ID;
}
- if (vha->hw->mqenable) {
- for (i = 0; i < vha->hw->num_qpairs; i++) {
- qpair = vha->hw->queue_pair_map[i];
- if (!qpair)
- continue;
-
- if (TMF_NOT_READY(fcport)) {
- ql_log(ql_log_warn, vha, 0x8026,
- "Unable to send TM due to disruption.\n");
- rval = QLA_SUSPENDED;
- break;
- }
-
- a.qpair = qpair;
- a.flags = flags|TCF_NOTMCMD_TO_TARGET;
- rval = __qla2x00_async_tm_cmd(&a);
- if (rval)
- break;
- }
- }
-
- if (rval)
- goto bailout;
+ if (qla_get_tmf(&a))
+ return QLA_FUNCTION_FAILED;
a.qpair = vha->hw->base_qpair;
- a.flags = flags;
rval = __qla2x00_async_tm_cmd(&a);
-bailout:
- if (a.modifier == MK_SYNC_ID_LUN)
- qla_put_tmf(fcport);
-
+ qla_put_tmf(&a);
return rval;
}
@@ -4147,41 +4180,55 @@ out:
return ha->flags.lr_detected;
}
-void qla_init_iocb_limit(scsi_qla_host_t *vha)
+static void __qla_adjust_iocb_limit(struct qla_qpair *qpair)
{
- u16 i, num_qps;
- u32 limit;
- struct qla_hw_data *ha = vha->hw;
+ u8 num_qps;
+ u16 limit;
+ struct qla_hw_data *ha = qpair->vha->hw;
num_qps = ha->num_qpairs + 1;
limit = (ha->orig_fw_iocb_count * QLA_IOCB_PCT_LIMIT) / 100;
- ha->base_qpair->fwres.iocbs_total = ha->orig_fw_iocb_count;
- ha->base_qpair->fwres.iocbs_limit = limit;
- ha->base_qpair->fwres.iocbs_qp_limit = limit / num_qps;
- ha->base_qpair->fwres.iocbs_used = 0;
+ qpair->fwres.iocbs_total = ha->orig_fw_iocb_count;
+ qpair->fwres.iocbs_limit = limit;
+ qpair->fwres.iocbs_qp_limit = limit / num_qps;
+
+ qpair->fwres.exch_total = ha->orig_fw_xcb_count;
+ qpair->fwres.exch_limit = (ha->orig_fw_xcb_count *
+ QLA_IOCB_PCT_LIMIT) / 100;
+}
+
+void qla_init_iocb_limit(scsi_qla_host_t *vha)
+{
+ u8 i;
+ struct qla_hw_data *ha = vha->hw;
- ha->base_qpair->fwres.exch_total = ha->orig_fw_xcb_count;
- ha->base_qpair->fwres.exch_limit = (ha->orig_fw_xcb_count *
- QLA_IOCB_PCT_LIMIT) / 100;
+ __qla_adjust_iocb_limit(ha->base_qpair);
+ ha->base_qpair->fwres.iocbs_used = 0;
ha->base_qpair->fwres.exch_used = 0;
for (i = 0; i < ha->max_qpairs; i++) {
if (ha->queue_pair_map[i]) {
- ha->queue_pair_map[i]->fwres.iocbs_total =
- ha->orig_fw_iocb_count;
- ha->queue_pair_map[i]->fwres.iocbs_limit = limit;
- ha->queue_pair_map[i]->fwres.iocbs_qp_limit =
- limit / num_qps;
+ __qla_adjust_iocb_limit(ha->queue_pair_map[i]);
ha->queue_pair_map[i]->fwres.iocbs_used = 0;
- ha->queue_pair_map[i]->fwres.exch_total = ha->orig_fw_xcb_count;
- ha->queue_pair_map[i]->fwres.exch_limit =
- (ha->orig_fw_xcb_count * QLA_IOCB_PCT_LIMIT) / 100;
ha->queue_pair_map[i]->fwres.exch_used = 0;
}
}
}
+void qla_adjust_iocb_limit(scsi_qla_host_t *vha)
+{
+ u8 i;
+ struct qla_hw_data *ha = vha->hw;
+
+ __qla_adjust_iocb_limit(ha->base_qpair);
+
+ for (i = 0; i < ha->max_qpairs; i++) {
+ if (ha->queue_pair_map[i])
+ __qla_adjust_iocb_limit(ha->queue_pair_map[i]);
+ }
+}
+
/**
* qla2x00_setup_chip() - Load and start RISC firmware.
* @vha: HA context
@@ -4777,15 +4824,16 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
if (ha->flags.edif_enabled)
mid_init_cb->init_cb.frame_payload_size = cpu_to_le16(ELS_MAX_PAYLOAD);
+ QLA_FW_STARTED(ha);
rval = qla2x00_init_firmware(vha, ha->init_cb_size);
next_check:
if (rval) {
+ QLA_FW_STOPPED(ha);
ql_log(ql_log_fatal, vha, 0x00d2,
"Init Firmware **** FAILED ****.\n");
} else {
ql_dbg(ql_dbg_init, vha, 0x00d3,
"Init Firmware -- success.\n");
- QLA_FW_STARTED(ha);
vha->u_ql2xexchoffld = vha->u_ql2xiniexchg = 0;
}
@@ -5506,7 +5554,6 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
INIT_WORK(&fcport->reg_work, qla_register_fcport_fn);
INIT_LIST_HEAD(&fcport->gnl_entry);
INIT_LIST_HEAD(&fcport->list);
- INIT_LIST_HEAD(&fcport->tmf_pending);
INIT_LIST_HEAD(&fcport->sess_cmd_list);
spin_lock_init(&fcport->sess_cmd_lock);
@@ -6090,6 +6137,8 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
void
qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
{
+ unsigned long flags;
+
if (IS_SW_RESV_ADDR(fcport->d_id))
return;
@@ -6099,7 +6148,11 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT);
fcport->login_retry = vha->hw->login_retry_count;
fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
+
+ spin_lock_irqsave(&vha->work_lock, flags);
fcport->deleted = 0;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
if (vha->hw->current_topology == ISP_CFG_NL)
fcport->logout_on_delete = 0;
else
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 730d8609276c..1ee9b7d5fc15 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -3882,6 +3882,7 @@ qla_marker_iocb(srb_t *sp, struct mrk_entry_24xx *mrk)
{
mrk->entry_type = MARKER_TYPE;
mrk->modifier = sp->u.iocb_cmd.u.tmf.modifier;
+ mrk->handle = make_handle(sp->qpair->req->id, sp->handle);
if (sp->u.iocb_cmd.u.tmf.modifier != MK_SYNC_ALL) {
mrk->nport_handle = cpu_to_le16(sp->u.iocb_cmd.u.tmf.loop_id);
int_to_scsilun(sp->u.iocb_cmd.u.tmf.lun, (struct scsi_lun *)&mrk->lun);
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 656700f79325..1f42a413b598 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1121,8 +1121,12 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
unsigned long flags;
fc_port_t *fcport = NULL;
- if (!vha->hw->flags.fw_started)
+ if (!vha->hw->flags.fw_started) {
+ ql_log(ql_log_warn, vha, 0x50ff,
+ "Dropping AEN - %04x %04x %04x %04x.\n",
+ mb[0], mb[1], mb[2], mb[3]);
return;
+ }
/* Setup to process RIO completion. */
handle_cnt = 0;
@@ -2539,7 +2543,6 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
case CS_PORT_BUSY:
case CS_INCOMPLETE:
case CS_PORT_UNAVAILABLE:
- case CS_TIMEOUT:
case CS_RESET:
if (atomic_read(&fcport->state) == FCS_ONLINE) {
ql_dbg(ql_dbg_disc, fcport->vha, 0x3021,
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 254fd4c64262..b05f93037875 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -2213,6 +2213,9 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
"Entered %s.\n", __func__);
+ if (!ha->flags.fw_started)
+ return QLA_FUNCTION_FAILED;
+
mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
mcp->out_mb = MBX_0;
if (IS_FWI2_CAPABLE(vha->hw))
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 86e85f2f4782..9941b38eac93 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -132,6 +132,7 @@ static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
"Failed to allocate qpair\n");
return -EINVAL;
}
+ qla_adjust_iocb_limit(vha);
}
*handle = qpair;
@@ -667,7 +668,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
rval = qla2x00_start_nvme_mq(sp);
if (rval != QLA_SUCCESS) {
- ql_log(ql_log_warn, vha, 0x212d,
+ ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x212d,
"qla2x00_start_nvme_mq failed = %d\n", rval);
sp->priv = NULL;
priv->sp = NULL;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 877e4f446709..03bc3a0b45b6 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1488,8 +1488,9 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd)
goto eh_reset_failed;
}
err = 3;
- if (qla2x00_eh_wait_for_pending_commands(vha, sdev->id,
- sdev->lun, WAIT_LUN) != QLA_SUCCESS) {
+ if (qla2x00_eh_wait_for_pending_commands(vha, fcport->d_id.b24,
+ cmd->device->lun,
+ WAIT_LUN) != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x800d,
"wait for pending cmds failed for cmd=%p.\n", cmd);
goto eh_reset_failed;
@@ -1555,8 +1556,8 @@ qla2xxx_eh_target_reset(struct scsi_cmnd *cmd)
goto eh_reset_failed;
}
err = 3;
- if (qla2x00_eh_wait_for_pending_commands(vha, sdev->id,
- 0, WAIT_TARGET) != QLA_SUCCESS) {
+ if (qla2x00_eh_wait_for_pending_commands(vha, fcport->d_id.b24, 0,
+ WAIT_TARGET) != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x800d,
"wait for pending cmds failed for cmd=%p.\n", cmd);
goto eh_reset_failed;
@@ -3009,6 +3010,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
atomic_set(&ha->num_pend_mbx_stage3, 0);
atomic_set(&ha->zio_threshold, DEFAULT_ZIO_THRESHOLD);
ha->last_zio_threshold = DEFAULT_ZIO_THRESHOLD;
+ INIT_LIST_HEAD(&ha->tmf_pending);
+ INIT_LIST_HEAD(&ha->tmf_active);
/* Assign ISP specific operations. */
if (IS_QLA2100(ha)) {
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 5258b07687a9..2b815a9928ea 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -1068,10 +1068,6 @@ void qlt_free_session_done(struct work_struct *work)
(struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO);
}
- spin_lock_irqsave(&vha->work_lock, flags);
- sess->flags &= ~FCF_ASYNC_SENT;
- spin_unlock_irqrestore(&vha->work_lock, flags);
-
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
if (sess->se_sess) {
sess->se_sess = NULL;
@@ -1081,7 +1077,6 @@ void qlt_free_session_done(struct work_struct *work)
qla2x00_set_fcport_disc_state(sess, DSC_DELETED);
sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
- sess->deleted = QLA_SESS_DELETED;
if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) {
vha->fcport_count--;
@@ -1133,10 +1128,15 @@ void qlt_free_session_done(struct work_struct *work)
sess->explicit_logout = 0;
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
- sess->free_pending = 0;
qla2x00_dfs_remove_rport(vha, sess);
+ spin_lock_irqsave(&vha->work_lock, flags);
+ sess->flags &= ~FCF_ASYNC_SENT;
+ sess->deleted = QLA_SESS_DELETED;
+ sess->free_pending = 0;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
ql_dbg(ql_dbg_disc, vha, 0xf001,
"Unregistration of sess %p %8phC finished fcp_cnt %d\n",
sess, sess->port_name, vha->fcport_count);
@@ -1185,12 +1185,12 @@ void qlt_unreg_sess(struct fc_port *sess)
* management from being sent.
*/
sess->flags |= FCF_ASYNC_SENT;
+ sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
spin_unlock_irqrestore(&sess->vha->work_lock, flags);
if (sess->se_sess)
vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
- sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND);
sess->last_rscn_gen = sess->rscn_gen;
sess->last_login_gen = sess->login_gen;
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index e3771923b0d7..81bdf6b03241 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -6,9 +6,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.02.08.400-k"
+#define QLA2XXX_VERSION "10.02.08.500-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 2
#define QLA_DRIVER_PATCH_VER 8
-#define QLA_DRIVER_BETA_VER 400
+#define QLA_DRIVER_BETA_VER 500
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index b2a3988e1e15..675332e49a7b 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -968,6 +968,11 @@ static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, int len)
memset(&chap_rec, 0, sizeof(chap_rec));
nla_for_each_attr(attr, data, len, rem) {
+ if (nla_len(attr) < sizeof(*param_info)) {
+ rc = -EINVAL;
+ goto exit_set_chap;
+ }
+
param_info = nla_data(attr);
switch (param_info->param) {
@@ -2750,6 +2755,11 @@ qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
}
nla_for_each_attr(attr, data, len, rem) {
+ if (nla_len(attr) < sizeof(*iface_param)) {
+ rval = -EINVAL;
+ goto exit_init_fw_cb;
+ }
+
iface_param = nla_data(attr);
if (iface_param->param_type == ISCSI_NET_PARAM) {
@@ -8104,6 +8114,11 @@ qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
memset((void *)&chap_tbl, 0, sizeof(chap_tbl));
nla_for_each_attr(attr, data, len, rem) {
+ if (nla_len(attr) < sizeof(*fnode_param)) {
+ rc = -EINVAL;
+ goto exit_set_param;
+ }
+
fnode_param = nla_data(attr);
switch (fnode_param->param) {
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 1e8fbd457248..3b95f7a6216f 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -28,7 +28,7 @@
#include <linux/jiffies.h>
#include <linux/dma-mapping.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/firmware.h>
#include <linux/pgtable.h>
@@ -843,7 +843,7 @@ static int qpti_map_queues(struct qlogicpti *qpti)
return 0;
}
-const char *qlogicpti_info(struct Scsi_Host *host)
+static const char *qlogicpti_info(struct Scsi_Host *host)
{
static char buf[80];
struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata;
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index f42388ecb024..118855576ca8 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -103,7 +103,6 @@ bool scsi_noretry_cmd(struct scsi_cmnd *scmd);
void scsi_eh_done(struct scsi_cmnd *scmd);
/* scsi_lib.c */
-extern int scsi_maybe_unblock_host(struct scsi_device *sdev);
extern void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd);
extern void scsi_queue_insert(struct scsi_cmnd *cmd, int reason);
extern void scsi_io_completion(struct scsi_cmnd *, unsigned int);
@@ -155,7 +154,6 @@ extern int scsi_sysfs_add_host(struct Scsi_Host *);
extern int scsi_sysfs_register(void);
extern void scsi_sysfs_unregister(void);
extern void scsi_sysfs_device_initialize(struct scsi_device *);
-extern int scsi_sysfs_target_initialize(struct scsi_device *);
extern struct scsi_transport_template blank_transport_template;
extern void __scsi_remove_device(struct scsi_device *);
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index e527ece12453..3075b2ddf7a6 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -3014,14 +3014,15 @@ iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev
}
static int
-iscsi_if_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
+iscsi_if_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev, u32 rlen)
{
char *data = (char*)ev + sizeof(*ev);
struct iscsi_cls_conn *conn;
struct iscsi_cls_session *session;
int err = 0, value = 0, state;
- if (ev->u.set_param.len > PAGE_SIZE)
+ if (ev->u.set_param.len > rlen ||
+ ev->u.set_param.len > PAGE_SIZE)
return -EINVAL;
session = iscsi_session_lookup(ev->u.set_param.sid);
@@ -3029,6 +3030,10 @@ iscsi_if_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
if (!conn || !session)
return -EINVAL;
+ /* data will be regarded as NULL-ended string, do length check */
+ if (strlen(data) > ev->u.set_param.len)
+ return -EINVAL;
+
switch (ev->u.set_param.param) {
case ISCSI_PARAM_SESS_RECOVERY_TMO:
sscanf(data, "%d", &value);
@@ -3118,7 +3123,7 @@ put_ep:
static int
iscsi_if_transport_ep(struct iscsi_transport *transport,
- struct iscsi_uevent *ev, int msg_type)
+ struct iscsi_uevent *ev, int msg_type, u32 rlen)
{
struct iscsi_endpoint *ep;
int rc = 0;
@@ -3126,7 +3131,10 @@ iscsi_if_transport_ep(struct iscsi_transport *transport,
switch (msg_type) {
case ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST:
case ISCSI_UEVENT_TRANSPORT_EP_CONNECT:
- rc = iscsi_if_ep_connect(transport, ev, msg_type);
+ if (rlen < sizeof(struct sockaddr))
+ rc = -EINVAL;
+ else
+ rc = iscsi_if_ep_connect(transport, ev, msg_type);
break;
case ISCSI_UEVENT_TRANSPORT_EP_POLL:
if (!transport->ep_poll)
@@ -3150,12 +3158,15 @@ iscsi_if_transport_ep(struct iscsi_transport *transport,
static int
iscsi_tgt_dscvr(struct iscsi_transport *transport,
- struct iscsi_uevent *ev)
+ struct iscsi_uevent *ev, u32 rlen)
{
struct Scsi_Host *shost;
struct sockaddr *dst_addr;
int err;
+ if (rlen < sizeof(*dst_addr))
+ return -EINVAL;
+
if (!transport->tgt_dscvr)
return -EINVAL;
@@ -3176,7 +3187,7 @@ iscsi_tgt_dscvr(struct iscsi_transport *transport,
static int
iscsi_set_host_param(struct iscsi_transport *transport,
- struct iscsi_uevent *ev)
+ struct iscsi_uevent *ev, u32 rlen)
{
char *data = (char*)ev + sizeof(*ev);
struct Scsi_Host *shost;
@@ -3185,7 +3196,8 @@ iscsi_set_host_param(struct iscsi_transport *transport,
if (!transport->set_host_param)
return -ENOSYS;
- if (ev->u.set_host_param.len > PAGE_SIZE)
+ if (ev->u.set_host_param.len > rlen ||
+ ev->u.set_host_param.len > PAGE_SIZE)
return -EINVAL;
shost = scsi_host_lookup(ev->u.set_host_param.host_no);
@@ -3195,6 +3207,10 @@ iscsi_set_host_param(struct iscsi_transport *transport,
return -ENODEV;
}
+ /* see similar check in iscsi_if_set_param() */
+ if (strlen(data) > ev->u.set_host_param.len)
+ return -EINVAL;
+
err = transport->set_host_param(shost, ev->u.set_host_param.param,
data, ev->u.set_host_param.len);
scsi_host_put(shost);
@@ -3202,12 +3218,15 @@ iscsi_set_host_param(struct iscsi_transport *transport,
}
static int
-iscsi_set_path(struct iscsi_transport *transport, struct iscsi_uevent *ev)
+iscsi_set_path(struct iscsi_transport *transport, struct iscsi_uevent *ev, u32 rlen)
{
struct Scsi_Host *shost;
struct iscsi_path *params;
int err;
+ if (rlen < sizeof(*params))
+ return -EINVAL;
+
if (!transport->set_path)
return -ENOSYS;
@@ -3267,12 +3286,15 @@ iscsi_set_iface_params(struct iscsi_transport *transport,
}
static int
-iscsi_send_ping(struct iscsi_transport *transport, struct iscsi_uevent *ev)
+iscsi_send_ping(struct iscsi_transport *transport, struct iscsi_uevent *ev, u32 rlen)
{
struct Scsi_Host *shost;
struct sockaddr *dst_addr;
int err;
+ if (rlen < sizeof(*dst_addr))
+ return -EINVAL;
+
if (!transport->send_ping)
return -ENOSYS;
@@ -3770,13 +3792,12 @@ exit_host_stats:
}
static int iscsi_if_transport_conn(struct iscsi_transport *transport,
- struct nlmsghdr *nlh)
+ struct nlmsghdr *nlh, u32 pdu_len)
{
struct iscsi_uevent *ev = nlmsg_data(nlh);
struct iscsi_cls_session *session;
struct iscsi_cls_conn *conn = NULL;
struct iscsi_endpoint *ep;
- uint32_t pdu_len;
int err = 0;
switch (nlh->nlmsg_type) {
@@ -3861,8 +3882,6 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport,
break;
case ISCSI_UEVENT_SEND_PDU:
- pdu_len = nlh->nlmsg_len - sizeof(*nlh) - sizeof(*ev);
-
if ((ev->u.send_pdu.hdr_size > pdu_len) ||
(ev->u.send_pdu.data_size > (pdu_len - ev->u.send_pdu.hdr_size))) {
err = -EINVAL;
@@ -3892,6 +3911,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
struct iscsi_internal *priv;
struct iscsi_cls_session *session;
struct iscsi_endpoint *ep = NULL;
+ u32 rlen;
if (!netlink_capable(skb, CAP_SYS_ADMIN))
return -EPERM;
@@ -3911,6 +3931,13 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
portid = NETLINK_CB(skb).portid;
+ /*
+ * Even though the remaining payload may not be regarded as nlattr,
+ * (like address or something else), calculate the remaining length
+ * here to ease following length checks.
+ */
+ rlen = nlmsg_attrlen(nlh, sizeof(*ev));
+
switch (nlh->nlmsg_type) {
case ISCSI_UEVENT_CREATE_SESSION:
err = iscsi_if_create_session(priv, ep, ev,
@@ -3967,7 +3994,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
err = -EINVAL;
break;
case ISCSI_UEVENT_SET_PARAM:
- err = iscsi_if_set_param(transport, ev);
+ err = iscsi_if_set_param(transport, ev, rlen);
break;
case ISCSI_UEVENT_CREATE_CONN:
case ISCSI_UEVENT_DESTROY_CONN:
@@ -3975,7 +4002,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
case ISCSI_UEVENT_START_CONN:
case ISCSI_UEVENT_BIND_CONN:
case ISCSI_UEVENT_SEND_PDU:
- err = iscsi_if_transport_conn(transport, nlh);
+ err = iscsi_if_transport_conn(transport, nlh, rlen);
break;
case ISCSI_UEVENT_GET_STATS:
err = iscsi_if_get_stats(transport, nlh);
@@ -3984,23 +4011,22 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
case ISCSI_UEVENT_TRANSPORT_EP_POLL:
case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
case ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST:
- err = iscsi_if_transport_ep(transport, ev, nlh->nlmsg_type);
+ err = iscsi_if_transport_ep(transport, ev, nlh->nlmsg_type, rlen);
break;
case ISCSI_UEVENT_TGT_DSCVR:
- err = iscsi_tgt_dscvr(transport, ev);
+ err = iscsi_tgt_dscvr(transport, ev, rlen);
break;
case ISCSI_UEVENT_SET_HOST_PARAM:
- err = iscsi_set_host_param(transport, ev);
+ err = iscsi_set_host_param(transport, ev, rlen);
break;
case ISCSI_UEVENT_PATH_UPDATE:
- err = iscsi_set_path(transport, ev);
+ err = iscsi_set_path(transport, ev, rlen);
break;
case ISCSI_UEVENT_SET_IFACE_PARAMS:
- err = iscsi_set_iface_params(transport, ev,
- nlmsg_attrlen(nlh, sizeof(*ev)));
+ err = iscsi_set_iface_params(transport, ev, rlen);
break;
case ISCSI_UEVENT_PING:
- err = iscsi_send_ping(transport, ev);
+ err = iscsi_send_ping(transport, ev, rlen);
break;
case ISCSI_UEVENT_GET_CHAP:
err = iscsi_get_chap(transport, nlh);
@@ -4009,13 +4035,10 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
err = iscsi_delete_chap(transport, ev);
break;
case ISCSI_UEVENT_SET_FLASHNODE_PARAMS:
- err = iscsi_set_flashnode_param(transport, ev,
- nlmsg_attrlen(nlh,
- sizeof(*ev)));
+ err = iscsi_set_flashnode_param(transport, ev, rlen);
break;
case ISCSI_UEVENT_NEW_FLASHNODE:
- err = iscsi_new_flashnode(transport, ev,
- nlmsg_attrlen(nlh, sizeof(*ev)));
+ err = iscsi_new_flashnode(transport, ev, rlen);
break;
case ISCSI_UEVENT_DEL_FLASHNODE:
err = iscsi_del_flashnode(transport, ev);
@@ -4030,8 +4053,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
err = iscsi_logout_flashnode_sid(transport, ev);
break;
case ISCSI_UEVENT_SET_CHAP:
- err = iscsi_set_chap(transport, ev,
- nlmsg_attrlen(nlh, sizeof(*ev)));
+ err = iscsi_set_chap(transport, ev, rlen);
break;
case ISCSI_UEVENT_GET_HOST_STATS:
err = iscsi_get_host_stats(transport, nlh);
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 047ffaf7d42a..068625556dda 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -316,6 +316,9 @@ enum storvsc_request_type {
#define SRB_STATUS_ABORTED 0x02
#define SRB_STATUS_ERROR 0x04
#define SRB_STATUS_INVALID_REQUEST 0x06
+#define SRB_STATUS_TIMEOUT 0x09
+#define SRB_STATUS_SELECTION_TIMEOUT 0x0A
+#define SRB_STATUS_BUS_RESET 0x0E
#define SRB_STATUS_DATA_OVERRUN 0x12
#define SRB_STATUS_INVALID_LUN 0x20
#define SRB_STATUS_INTERNAL_ERROR 0x30
@@ -981,6 +984,10 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
case SRB_STATUS_ABORTED:
case SRB_STATUS_INVALID_REQUEST:
case SRB_STATUS_INTERNAL_ERROR:
+ case SRB_STATUS_TIMEOUT:
+ case SRB_STATUS_SELECTION_TIMEOUT:
+ case SRB_STATUS_BUS_RESET:
+ case SRB_STATUS_DATA_OVERRUN:
if (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID) {
/* Check for capacity change */
if ((asc == 0x2a) && (ascq == 0x9)) {
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c
index d06e933191a2..afa9d02a33ec 100644
--- a/drivers/scsi/sun_esp.c
+++ b/drivers/scsi/sun_esp.c
@@ -12,7 +12,8 @@
#include <linux/init.h>
#include <linux/dma-mapping.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/gfp.h>
#include <asm/irq.h>
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index 1cc2281cb370..1720031f35a3 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -13,7 +13,6 @@
#include "sysfs_local.h"
static DEFINE_IDA(sdw_bus_ida);
-static DEFINE_IDA(sdw_peripheral_ida);
static int sdw_get_id(struct sdw_bus *bus)
{
@@ -194,8 +193,8 @@ static int sdw_delete_slave(struct device *dev, void *data)
if (slave->dev_num) { /* clear dev_num if assigned */
clear_bit(slave->dev_num, bus->assigned);
- if (bus->dev_num_ida_min)
- ida_free(&sdw_peripheral_ida, slave->dev_num);
+ if (bus->ops && bus->ops->put_device_num)
+ bus->ops->put_device_num(bus, slave);
}
list_del_init(&slave->node);
mutex_unlock(&bus->bus_lock);
@@ -739,16 +738,15 @@ EXPORT_SYMBOL(sdw_compare_devid);
/* called with bus_lock held */
static int sdw_get_device_num(struct sdw_slave *slave)
{
+ struct sdw_bus *bus = slave->bus;
int bit;
- if (slave->bus->dev_num_ida_min) {
- bit = ida_alloc_range(&sdw_peripheral_ida,
- slave->bus->dev_num_ida_min, SDW_MAX_DEVICES,
- GFP_KERNEL);
+ if (bus->ops && bus->ops->get_device_num) {
+ bit = bus->ops->get_device_num(bus, slave);
if (bit < 0)
goto err;
} else {
- bit = find_first_zero_bit(slave->bus->assigned, SDW_MAX_DEVICES);
+ bit = find_first_zero_bit(bus->assigned, SDW_MAX_DEVICES);
if (bit == SDW_MAX_DEVICES) {
bit = -ENODEV;
goto err;
@@ -759,7 +757,7 @@ static int sdw_get_device_num(struct sdw_slave *slave)
* Do not update dev_num in Slave data structure here,
* Update once program dev_num is successful
*/
- set_bit(bit, slave->bus->assigned);
+ set_bit(bit, bus->assigned);
err:
return bit;
@@ -810,7 +808,7 @@ static int sdw_assign_device_num(struct sdw_slave *slave)
slave->dev_num = slave->dev_num_sticky;
if (bus->ops && bus->ops->new_peripheral_assigned)
- bus->ops->new_peripheral_assigned(bus, dev_num);
+ bus->ops->new_peripheral_assigned(bus, slave, dev_num);
return 0;
}
diff --git a/drivers/soundwire/intel_ace2x.c b/drivers/soundwire/intel_ace2x.c
index 1be0bea5f40f..a9d25ae0b73f 100644
--- a/drivers/soundwire/intel_ace2x.c
+++ b/drivers/soundwire/intel_ace2x.c
@@ -10,6 +10,7 @@
#include <linux/soundwire/sdw_registers.h>
#include <linux/soundwire/sdw.h>
#include <linux/soundwire/sdw_intel.h>
+#include <sound/pcm_params.h>
#include <sound/hda-mlink.h>
#include "cadence_master.h"
#include "bus.h"
@@ -191,10 +192,292 @@ static bool intel_check_cmdsync_unlocked(struct sdw_intel *sdw)
return hdac_bus_eml_sdw_check_cmdsync_unlocked(sdw->link_res->hbus);
}
+/* DAI callbacks */
+static int intel_params_stream(struct sdw_intel *sdw,
+ struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai,
+ struct snd_pcm_hw_params *hw_params,
+ int link_id, int alh_stream_id)
+{
+ struct sdw_intel_link_res *res = sdw->link_res;
+ struct sdw_intel_stream_params_data params_data;
+
+ params_data.substream = substream;
+ params_data.dai = dai;
+ params_data.hw_params = hw_params;
+ params_data.link_id = link_id;
+ params_data.alh_stream_id = alh_stream_id;
+
+ if (res->ops && res->ops->params_stream && res->dev)
+ return res->ops->params_stream(res->dev,
+ &params_data);
+ return -EIO;
+}
+
+static int intel_free_stream(struct sdw_intel *sdw,
+ struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai,
+ int link_id)
+
+{
+ struct sdw_intel_link_res *res = sdw->link_res;
+ struct sdw_intel_stream_free_data free_data;
+
+ free_data.substream = substream;
+ free_data.dai = dai;
+ free_data.link_id = link_id;
+
+ if (res->ops && res->ops->free_stream && res->dev)
+ return res->ops->free_stream(res->dev,
+ &free_data);
+
+ return 0;
+}
+
/*
* DAI operations
*/
+static int intel_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
+ struct sdw_intel *sdw = cdns_to_intel(cdns);
+ struct sdw_cdns_dai_runtime *dai_runtime;
+ struct sdw_cdns_pdi *pdi;
+ struct sdw_stream_config sconfig;
+ struct sdw_port_config *pconfig;
+ int ch, dir;
+ int ret;
+
+ dai_runtime = cdns->dai_runtime_array[dai->id];
+ if (!dai_runtime)
+ return -EIO;
+
+ ch = params_channels(params);
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ dir = SDW_DATA_DIR_RX;
+ else
+ dir = SDW_DATA_DIR_TX;
+
+ pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pcm, ch, dir, dai->id);
+
+ if (!pdi) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* the SHIM will be configured in the callback functions */
+
+ sdw_cdns_config_stream(cdns, ch, dir, pdi);
+
+ /* store pdi and state, may be needed in prepare step */
+ dai_runtime->paused = false;
+ dai_runtime->suspended = false;
+ dai_runtime->pdi = pdi;
+
+ /* Inform DSP about PDI stream number */
+ ret = intel_params_stream(sdw, substream, dai, params,
+ sdw->instance,
+ pdi->intel_alh_id);
+ if (ret)
+ goto error;
+
+ sconfig.direction = dir;
+ sconfig.ch_count = ch;
+ sconfig.frame_rate = params_rate(params);
+ sconfig.type = dai_runtime->stream_type;
+
+ sconfig.bps = snd_pcm_format_width(params_format(params));
+
+ /* Port configuration */
+ pconfig = kzalloc(sizeof(*pconfig), GFP_KERNEL);
+ if (!pconfig) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ pconfig->num = pdi->num;
+ pconfig->ch_mask = (1 << ch) - 1;
+
+ ret = sdw_stream_add_master(&cdns->bus, &sconfig,
+ pconfig, 1, dai_runtime->stream);
+ if (ret)
+ dev_err(cdns->dev, "add master to stream failed:%d\n", ret);
+
+ kfree(pconfig);
+error:
+ return ret;
+}
+
+static int intel_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
+ struct sdw_intel *sdw = cdns_to_intel(cdns);
+ struct sdw_cdns_dai_runtime *dai_runtime;
+ int ch, dir;
+ int ret = 0;
+
+ dai_runtime = cdns->dai_runtime_array[dai->id];
+ if (!dai_runtime) {
+ dev_err(dai->dev, "failed to get dai runtime in %s\n",
+ __func__);
+ return -EIO;
+ }
+
+ if (dai_runtime->suspended) {
+ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+ struct snd_pcm_hw_params *hw_params;
+
+ hw_params = &rtd->dpcm[substream->stream].hw_params;
+
+ dai_runtime->suspended = false;
+
+ /*
+ * .prepare() is called after system resume, where we
+ * need to reinitialize the SHIM/ALH/Cadence IP.
+ * .prepare() is also called to deal with underflows,
+ * but in those cases we cannot touch ALH/SHIM
+ * registers
+ */
+
+ /* configure stream */
+ ch = params_channels(hw_params);
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ dir = SDW_DATA_DIR_RX;
+ else
+ dir = SDW_DATA_DIR_TX;
+
+ /* the SHIM will be configured in the callback functions */
+
+ sdw_cdns_config_stream(cdns, ch, dir, dai_runtime->pdi);
+
+ /* Inform DSP about PDI stream number */
+ ret = intel_params_stream(sdw, substream, dai,
+ hw_params,
+ sdw->instance,
+ dai_runtime->pdi->intel_alh_id);
+ }
+
+ return ret;
+}
+
+static int
+intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
+{
+ struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
+ struct sdw_intel *sdw = cdns_to_intel(cdns);
+ struct sdw_cdns_dai_runtime *dai_runtime;
+ int ret;
+
+ dai_runtime = cdns->dai_runtime_array[dai->id];
+ if (!dai_runtime)
+ return -EIO;
+
+ /*
+ * The sdw stream state will transition to RELEASED when stream->
+ * master_list is empty. So the stream state will transition to
+ * DEPREPARED for the first cpu-dai and to RELEASED for the last
+ * cpu-dai.
+ */
+ ret = sdw_stream_remove_master(&cdns->bus, dai_runtime->stream);
+ if (ret < 0) {
+ dev_err(dai->dev, "remove master from stream %s failed: %d\n",
+ dai_runtime->stream->name, ret);
+ return ret;
+ }
+
+ ret = intel_free_stream(sdw, substream, dai, sdw->instance);
+ if (ret < 0) {
+ dev_err(dai->dev, "intel_free_stream: failed %d\n", ret);
+ return ret;
+ }
+
+ dai_runtime->pdi = NULL;
+
+ return 0;
+}
+
+static int intel_pcm_set_sdw_stream(struct snd_soc_dai *dai,
+ void *stream, int direction)
+{
+ return cdns_set_sdw_stream(dai, stream, direction);
+}
+
+static void *intel_get_sdw_stream(struct snd_soc_dai *dai,
+ int direction)
+{
+ struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
+ struct sdw_cdns_dai_runtime *dai_runtime;
+
+ dai_runtime = cdns->dai_runtime_array[dai->id];
+ if (!dai_runtime)
+ return ERR_PTR(-EINVAL);
+
+ return dai_runtime->stream;
+}
+
+static int intel_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai)
+{
+ struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
+ struct sdw_intel *sdw = cdns_to_intel(cdns);
+ struct sdw_intel_link_res *res = sdw->link_res;
+ struct sdw_cdns_dai_runtime *dai_runtime;
+ int ret = 0;
+
+ /*
+ * The .trigger callback is used to program HDaudio DMA and send required IPC to audio
+ * firmware.
+ */
+ if (res->ops && res->ops->trigger) {
+ ret = res->ops->trigger(substream, cmd, dai);
+ if (ret < 0)
+ return ret;
+ }
+
+ dai_runtime = cdns->dai_runtime_array[dai->id];
+ if (!dai_runtime) {
+ dev_err(dai->dev, "failed to get dai runtime in %s\n",
+ __func__);
+ return -EIO;
+ }
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+
+ /*
+ * The .prepare callback is used to deal with xruns and resume operations.
+ * In the case of xruns, the DMAs and SHIM registers cannot be touched,
+ * but for resume operations the DMAs and SHIM registers need to be initialized.
+ * the .trigger callback is used to track the suspend case only.
+ */
+
+ dai_runtime->suspended = true;
+
+ break;
+
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ dai_runtime->paused = true;
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ dai_runtime->paused = false;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
static const struct snd_soc_dai_ops intel_pcm_dai_ops = {
+ .hw_params = intel_hw_params,
+ .prepare = intel_prepare,
+ .hw_free = intel_hw_free,
+ .trigger = intel_trigger,
+ .set_stream = intel_pcm_set_sdw_stream,
+ .get_stream = intel_get_sdw_stream,
};
static const struct snd_soc_component_driver dai_component = {
diff --git a/drivers/soundwire/intel_auxdevice.c b/drivers/soundwire/intel_auxdevice.c
index 0daa6ca9a224..7f15e3549e53 100644
--- a/drivers/soundwire/intel_auxdevice.c
+++ b/drivers/soundwire/intel_auxdevice.c
@@ -23,9 +23,6 @@
#include "intel.h"
#include "intel_auxdevice.h"
-/* IDA min selected to avoid conflicts with HDaudio/iDISP SDI values */
-#define INTEL_DEV_NUM_IDA_MIN 4
-
#define INTEL_MASTER_SUSPEND_DELAY_MS 3000
/*
@@ -44,6 +41,39 @@ static int md_flags;
module_param_named(sdw_md_flags, md_flags, int, 0444);
MODULE_PARM_DESC(sdw_md_flags, "SoundWire Intel Master device flags (0x0 all off)");
+struct wake_capable_part {
+ const u16 mfg_id;
+ const u16 part_id;
+};
+
+static struct wake_capable_part wake_capable_list[] = {
+ {0x025d, 0x5682},
+ {0x025d, 0x700},
+ {0x025d, 0x711},
+ {0x025d, 0x1712},
+ {0x025d, 0x1713},
+ {0x025d, 0x1716},
+ {0x025d, 0x1717},
+ {0x025d, 0x712},
+ {0x025d, 0x713},
+ {0x025d, 0x714},
+ {0x025d, 0x715},
+ {0x025d, 0x716},
+ {0x025d, 0x717},
+ {0x025d, 0x722},
+};
+
+static bool is_wake_capable(struct sdw_slave *slave)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(wake_capable_list); i++)
+ if (slave->id.part_id == wake_capable_list[i].part_id &&
+ slave->id.mfg_id == wake_capable_list[i].mfg_id)
+ return true;
+ return false;
+}
+
static int generic_pre_bank_switch(struct sdw_bus *bus)
{
struct sdw_cdns *cdns = bus_to_cdns(bus);
@@ -60,18 +90,32 @@ static int generic_post_bank_switch(struct sdw_bus *bus)
return sdw->link_res->hw_ops->post_bank_switch(sdw);
}
-static void generic_new_peripheral_assigned(struct sdw_bus *bus, int dev_num)
+static void generic_new_peripheral_assigned(struct sdw_bus *bus,
+ struct sdw_slave *slave,
+ int dev_num)
{
struct sdw_cdns *cdns = bus_to_cdns(bus);
struct sdw_intel *sdw = cdns_to_intel(cdns);
+ int dev_num_min;
+ int dev_num_max;
+ bool wake_capable = slave->prop.wake_capable || is_wake_capable(slave);
+
+ if (wake_capable) {
+ dev_num_min = SDW_INTEL_DEV_NUM_IDA_MIN;
+ dev_num_max = SDW_MAX_DEVICES;
+ } else {
+ dev_num_min = 1;
+ dev_num_max = SDW_INTEL_DEV_NUM_IDA_MIN - 1;
+ }
/* paranoia check, this should never happen */
- if (dev_num < INTEL_DEV_NUM_IDA_MIN || dev_num > SDW_MAX_DEVICES) {
- dev_err(bus->dev, "%s: invalid dev_num %d\n", __func__, dev_num);
+ if (dev_num < dev_num_min || dev_num > dev_num_max) {
+ dev_err(bus->dev, "%s: invalid dev_num %d, wake supported %d\n",
+ __func__, dev_num, slave->prop.wake_capable);
return;
}
- if (sdw->link_res->hw_ops->program_sdi)
+ if (sdw->link_res->hw_ops->program_sdi && wake_capable)
sdw->link_res->hw_ops->program_sdi(sdw, dev_num);
}
@@ -123,6 +167,30 @@ static int intel_prop_read(struct sdw_bus *bus)
return 0;
}
+static DEFINE_IDA(intel_peripheral_ida);
+
+static int intel_get_device_num_ida(struct sdw_bus *bus, struct sdw_slave *slave)
+{
+ int bit;
+
+ if (slave->prop.wake_capable || is_wake_capable(slave))
+ return ida_alloc_range(&intel_peripheral_ida,
+ SDW_INTEL_DEV_NUM_IDA_MIN, SDW_MAX_DEVICES,
+ GFP_KERNEL);
+
+ bit = find_first_zero_bit(slave->bus->assigned, SDW_MAX_DEVICES);
+ if (bit == SDW_MAX_DEVICES)
+ return -ENODEV;
+
+ return bit;
+}
+
+static void intel_put_device_num_ida(struct sdw_bus *bus, struct sdw_slave *slave)
+{
+ if (slave->prop.wake_capable || is_wake_capable(slave))
+ ida_free(&intel_peripheral_ida, slave->dev_num);
+}
+
static struct sdw_master_ops sdw_intel_ops = {
.read_prop = intel_prop_read,
.override_adr = sdw_dmi_override_adr,
@@ -132,6 +200,8 @@ static struct sdw_master_ops sdw_intel_ops = {
.pre_bank_switch = generic_pre_bank_switch,
.post_bank_switch = generic_post_bank_switch,
.read_ping_status = cdns_read_ping_status,
+ .get_device_num = intel_get_device_num_ida,
+ .put_device_num = intel_put_device_num_ida,
.new_peripheral_assigned = generic_new_peripheral_assigned,
};
@@ -165,7 +235,6 @@ static int intel_link_probe(struct auxiliary_device *auxdev,
cdns->msg_count = 0;
bus->link_id = auxdev->id;
- bus->dev_num_ida_min = INTEL_DEV_NUM_IDA_MIN;
bus->clk_stop_timeout = 1;
sdw_cdns_probe(cdns);
@@ -248,13 +317,6 @@ int intel_link_startup(struct auxiliary_device *auxdev)
sdw_intel_debugfs_init(sdw);
- /* start bus */
- ret = sdw_intel_start_bus(sdw);
- if (ret) {
- dev_err(dev, "bus start failed: %d\n", ret);
- goto err_power_up;
- }
-
/* Enable runtime PM */
if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME)) {
pm_runtime_set_autosuspend_delay(dev,
@@ -264,6 +326,15 @@ int intel_link_startup(struct auxiliary_device *auxdev)
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
+
+ pm_runtime_resume(bus->dev);
+ }
+
+ /* start bus */
+ ret = sdw_intel_start_bus(sdw);
+ if (ret) {
+ dev_err(dev, "bus start failed: %d\n", ret);
+ goto err_pm_runtime;
}
clock_stop_quirks = sdw->link_res->clock_stop_quirks;
@@ -293,12 +364,18 @@ int intel_link_startup(struct auxiliary_device *auxdev)
* with a delay. A more complete solution would require the
* definition of Master properties.
*/
- if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME_IDLE))
+ if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME_IDLE)) {
+ pm_runtime_mark_last_busy(bus->dev);
+ pm_runtime_mark_last_busy(dev);
pm_runtime_idle(dev);
+ }
sdw->startup_done = true;
return 0;
+err_pm_runtime:
+ if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME))
+ pm_runtime_disable(dev);
err_power_up:
sdw_intel_link_power_down(sdw);
err_init:
@@ -552,6 +629,8 @@ static int __maybe_unused intel_resume(struct device *dev)
pm_runtime_mark_last_busy(dev);
pm_runtime_enable(dev);
+ pm_runtime_resume(bus->dev);
+
link_flags = md_flags >> (bus->link_id * 8);
if (!(link_flags & SDW_INTEL_MASTER_DISABLE_PM_RUNTIME_IDLE))
@@ -587,6 +666,7 @@ static int __maybe_unused intel_resume(struct device *dev)
* counters and delay the pm_runtime suspend by several
* seconds, by when all enumeration should be complete.
*/
+ pm_runtime_mark_last_busy(bus->dev);
pm_runtime_mark_last_busy(dev);
return 0;
diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
index c029e4d53573..55be9f4b8d59 100644
--- a/drivers/soundwire/qcom.c
+++ b/drivers/soundwire/qcom.c
@@ -10,7 +10,6 @@
#include <linux/debugfs.h>
#include <linux/of.h>
#include <linux/of_irq.h>
-#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/reset.h>
diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
index 3f5b1556ece0..fddc63309773 100644
--- a/drivers/spi/spi-sun6i.c
+++ b/drivers/spi/spi-sun6i.c
@@ -106,6 +106,7 @@ struct sun6i_spi {
struct reset_control *rstc;
struct completion done;
+ struct completion dma_rx_done;
const u8 *tx_buf;
u8 *rx_buf;
@@ -200,6 +201,13 @@ static size_t sun6i_spi_max_transfer_size(struct spi_device *spi)
return SUN6I_MAX_XFER_SIZE - 1;
}
+static void sun6i_spi_dma_rx_cb(void *param)
+{
+ struct sun6i_spi *sspi = param;
+
+ complete(&sspi->dma_rx_done);
+}
+
static int sun6i_spi_prepare_dma(struct sun6i_spi *sspi,
struct spi_transfer *tfr)
{
@@ -211,7 +219,7 @@ static int sun6i_spi_prepare_dma(struct sun6i_spi *sspi,
struct dma_slave_config rxconf = {
.direction = DMA_DEV_TO_MEM,
.src_addr = sspi->dma_addr_rx,
- .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
.src_maxburst = 8,
};
@@ -224,6 +232,8 @@ static int sun6i_spi_prepare_dma(struct sun6i_spi *sspi,
DMA_PREP_INTERRUPT);
if (!rxdesc)
return -EINVAL;
+ rxdesc->callback_param = sspi;
+ rxdesc->callback = sun6i_spi_dma_rx_cb;
}
txdesc = NULL;
@@ -279,6 +289,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
return -EINVAL;
reinit_completion(&sspi->done);
+ reinit_completion(&sspi->dma_rx_done);
sspi->tx_buf = tfr->tx_buf;
sspi->rx_buf = tfr->rx_buf;
sspi->len = tfr->len;
@@ -479,6 +490,22 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
start = jiffies;
timeout = wait_for_completion_timeout(&sspi->done,
msecs_to_jiffies(tx_time));
+
+ if (!use_dma) {
+ sun6i_spi_drain_fifo(sspi);
+ } else {
+ if (timeout && rx_len) {
+ /*
+ * Even though RX on the peripheral side has finished
+ * RX DMA might still be in flight
+ */
+ timeout = wait_for_completion_timeout(&sspi->dma_rx_done,
+ timeout);
+ if (!timeout)
+ dev_warn(&master->dev, "RX DMA timeout\n");
+ }
+ }
+
end = jiffies;
if (!timeout) {
dev_warn(&master->dev,
@@ -506,7 +533,6 @@ static irqreturn_t sun6i_spi_handler(int irq, void *dev_id)
/* Transfer complete */
if (status & SUN6I_INT_CTL_TC) {
sun6i_spi_write(sspi, SUN6I_INT_STA_REG, SUN6I_INT_CTL_TC);
- sun6i_spi_drain_fifo(sspi);
complete(&sspi->done);
return IRQ_HANDLED;
}
@@ -665,6 +691,7 @@ static int sun6i_spi_probe(struct platform_device *pdev)
}
init_completion(&sspi->done);
+ init_completion(&sspi->dma_rx_done);
sspi->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (IS_ERR(sspi->rstc)) {
diff --git a/drivers/staging/axis-fifo/axis-fifo.c b/drivers/staging/axis-fifo/axis-fifo.c
index 98db47cb4fa4..727b956aa231 100644
--- a/drivers/staging/axis-fifo/axis-fifo.c
+++ b/drivers/staging/axis-fifo/axis-fifo.c
@@ -15,6 +15,8 @@
*/
#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/wait.h>
#include <linux/mutex.h>
#include <linux/device.h>
@@ -32,10 +34,6 @@
#include <linux/jiffies.h>
#include <linux/miscdevice.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
-
/* ----------------------------
* driver parameters
* ----------------------------
@@ -839,16 +837,8 @@ static int axis_fifo_probe(struct platform_device *pdev)
* ----------------------------
*/
- /* get iospace for the device */
- r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r_mem) {
- dev_err(fifo->dt_device, "invalid address\n");
- rc = -ENODEV;
- goto err_initial;
- }
-
- /* request physical memory */
- fifo->base_addr = devm_ioremap_resource(fifo->dt_device, r_mem);
+ /* get iospace for the device and request physical memory */
+ fifo->base_addr = devm_platform_get_and_ioremap_resource(pdev, 0, &r_mem);
if (IS_ERR(fifo->base_addr)) {
rc = PTR_ERR(fifo->base_addr);
goto err_initial;
diff --git a/drivers/staging/fieldbus/anybuss/arcx-anybus.c b/drivers/staging/fieldbus/anybuss/arcx-anybus.c
index f135b9f52c8d..6f69758a8b27 100644
--- a/drivers/staging/fieldbus/anybuss/arcx-anybus.c
+++ b/drivers/staging/fieldbus/anybuss/arcx-anybus.c
@@ -156,8 +156,8 @@ create_anybus_host(struct platform_device *pdev, int idx)
if (IS_ERR(ops.regmap))
return ERR_CAST(ops.regmap);
ops.irq = platform_get_irq(pdev, idx);
- if (ops.irq <= 0)
- return ERR_PTR(-EINVAL);
+ if (ops.irq < 0)
+ return ERR_PTR(ops.irq);
return devm_anybuss_host_common_probe(&pdev->dev, &ops);
}
@@ -343,7 +343,7 @@ static struct platform_driver controller_driver = {
.remove_new = controller_remove,
.driver = {
.name = "arcx-anybus-controller",
- .of_match_table = of_match_ptr(controller_of_match),
+ .of_match_table = controller_of_match,
},
};
diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
index e1a84d6020f4..32b2e817ff04 100644
--- a/drivers/staging/gdm724x/gdm_tty.c
+++ b/drivers/staging/gdm724x/gdm_tty.c
@@ -149,22 +149,17 @@ static void gdm_tty_send_complete(void *arg)
tty_port_tty_wakeup(&gdm->port);
}
-static int gdm_tty_write(struct tty_struct *tty, const unsigned char *buf,
- int len)
+static ssize_t gdm_tty_write(struct tty_struct *tty, const u8 *buf, size_t len)
{
struct gdm *gdm = tty->driver_data;
- int remain = len;
- int sent_len = 0;
- int sending_len = 0;
+ size_t remain = len;
+ size_t sent_len = 0;
if (!gdm_tty_ready(gdm))
return -ENODEV;
- if (!len)
- return 0;
-
- while (1) {
- sending_len = min(MUX_TX_MAX_SIZE, remain);
+ while (remain) {
+ size_t sending_len = min_t(size_t, MUX_TX_MAX_SIZE, remain);
gdm->tty_dev->send_func(gdm->tty_dev->priv_dev,
(void *)(buf + sent_len),
sending_len,
@@ -173,8 +168,6 @@ static int gdm_tty_write(struct tty_struct *tty, const unsigned char *buf,
gdm);
sent_len += sending_len;
remain -= sending_len;
- if (remain <= 0)
- break;
}
return len;
diff --git a/drivers/staging/greybus/arche-platform.c b/drivers/staging/greybus/arche-platform.c
index ebe835f25d13..891b75327d7f 100644
--- a/drivers/staging/greybus/arche-platform.c
+++ b/drivers/staging/greybus/arche-platform.c
@@ -20,6 +20,7 @@
#include <linux/suspend.h>
#include <linux/time.h>
#include <linux/greybus.h>
+#include <linux/of.h>
#include "arche_platform.h"
#if IS_ENABLED(CONFIG_USB_HSIC_USB3613)
diff --git a/drivers/staging/greybus/fw-core.c b/drivers/staging/greybus/fw-core.c
index 57bebf24636b..0fb15a60412f 100644
--- a/drivers/staging/greybus/fw-core.c
+++ b/drivers/staging/greybus/fw-core.c
@@ -89,7 +89,7 @@ static int gb_fw_core_probe(struct gb_bundle *bundle,
}
connection = gb_connection_create(bundle, cport_id,
- gb_fw_mgmt_request_handler);
+ gb_fw_mgmt_request_handler);
if (IS_ERR(connection)) {
ret = PTR_ERR(connection);
dev_err(&bundle->dev,
@@ -110,7 +110,7 @@ static int gb_fw_core_probe(struct gb_bundle *bundle,
}
connection = gb_connection_create(bundle, cport_id,
- gb_fw_download_request_handler);
+ gb_fw_download_request_handler);
if (IS_ERR(connection)) {
dev_err(&bundle->dev, "failed to create download connection (%ld)\n",
PTR_ERR(connection));
diff --git a/drivers/staging/greybus/pwm.c b/drivers/staging/greybus/pwm.c
index 88da1d796f13..57cc1960d059 100644
--- a/drivers/staging/greybus/pwm.c
+++ b/drivers/staging/greybus/pwm.c
@@ -19,7 +19,6 @@ struct gb_pwm_chip {
u8 pwm_max; /* max pwm number */
struct pwm_chip chip;
- struct pwm_chip *pwm;
};
static inline struct gb_pwm_chip *pwm_chip_to_gb_pwm_chip(struct pwm_chip *chip)
@@ -267,7 +266,7 @@ static int gb_pwm_probe(struct gbphy_device *gbphy_dev,
{
struct gb_connection *connection;
struct gb_pwm_chip *pwmc;
- struct pwm_chip *pwm;
+ struct pwm_chip *chip;
int ret;
pwmc = kzalloc(sizeof(*pwmc), GFP_KERNEL);
@@ -295,13 +294,13 @@ static int gb_pwm_probe(struct gbphy_device *gbphy_dev,
if (ret)
goto exit_connection_disable;
- pwm = &pwmc->chip;
+ chip = &pwmc->chip;
- pwm->dev = &gbphy_dev->dev;
- pwm->ops = &gb_pwm_ops;
- pwm->npwm = pwmc->pwm_max + 1;
+ chip->dev = &gbphy_dev->dev;
+ chip->ops = &gb_pwm_ops;
+ chip->npwm = pwmc->pwm_max + 1;
- ret = pwmchip_add(pwm);
+ ret = pwmchip_add(chip);
if (ret) {
dev_err(&gbphy_dev->dev,
"failed to register PWM: %d\n", ret);
diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c
index 20a34599859f..999ce613dca8 100644
--- a/drivers/staging/greybus/uart.c
+++ b/drivers/staging/greybus/uart.c
@@ -427,8 +427,7 @@ static void gb_tty_hangup(struct tty_struct *tty)
tty_port_hangup(&gb_tty->port);
}
-static int gb_tty_write(struct tty_struct *tty, const unsigned char *buf,
- int count)
+static ssize_t gb_tty_write(struct tty_struct *tty, const u8 *buf, size_t count)
{
struct gb_tty *gb_tty = tty->driver_data;
diff --git a/drivers/staging/media/atomisp/Kconfig b/drivers/staging/media/atomisp/Kconfig
index e9b168ba97bf..5d8917160d41 100644
--- a/drivers/staging/media/atomisp/Kconfig
+++ b/drivers/staging/media/atomisp/Kconfig
@@ -12,9 +12,12 @@ menuconfig INTEL_ATOMISP
config VIDEO_ATOMISP
tristate "Intel Atom Image Signal Processor Driver"
depends on VIDEO_DEV && INTEL_ATOMISP
+ depends on MEDIA_PCI_SUPPORT
depends on PMIC_OPREGION
+ depends on I2C
select V4L2_FWNODE
select IOSF_MBI
+ select IPU_BRIDGE
select VIDEOBUF2_VMALLOC
select VIDEO_V4L2_SUBDEV_API
help
diff --git a/drivers/staging/media/atomisp/i2c/Kconfig b/drivers/staging/media/atomisp/i2c/Kconfig
index e726101b24e4..2d4165cda2f1 100644
--- a/drivers/staging/media/atomisp/i2c/Kconfig
+++ b/drivers/staging/media/atomisp/i2c/Kconfig
@@ -57,18 +57,6 @@ config VIDEO_ATOMISP_GC0310
This is a Video4Linux2 sensor-level driver for the Galaxycore
GC0310 0.3MP sensor.
-config VIDEO_ATOMISP_OV2680
- tristate "Omnivision OV2680 sensor support"
- depends on ACPI
- depends on I2C && VIDEO_DEV
- help
- This is a Video4Linux2 sensor-level driver for the Omnivision
- OV2680 raw camera.
-
- ov2680 is a 2M raw sensor.
-
- It currently only works with the atomisp driver.
-
config VIDEO_ATOMISP_OV5693
tristate "Omnivision ov5693 sensor support"
depends on ACPI
diff --git a/drivers/staging/media/atomisp/i2c/Makefile b/drivers/staging/media/atomisp/i2c/Makefile
index 8d022986e199..fc55af5f3422 100644
--- a/drivers/staging/media/atomisp/i2c/Makefile
+++ b/drivers/staging/media/atomisp/i2c/Makefile
@@ -7,7 +7,6 @@ obj-$(CONFIG_VIDEO_ATOMISP_OV5693) += ov5693/
obj-$(CONFIG_VIDEO_ATOMISP_MT9M114) += atomisp-mt9m114.o
obj-$(CONFIG_VIDEO_ATOMISP_GC2235) += atomisp-gc2235.o
obj-$(CONFIG_VIDEO_ATOMISP_OV2722) += atomisp-ov2722.o
-obj-$(CONFIG_VIDEO_ATOMISP_OV2680) += atomisp-ov2680.o
obj-$(CONFIG_VIDEO_ATOMISP_GC0310) += atomisp-gc0310.o
obj-$(CONFIG_VIDEO_ATOMISP_MSRLIST_HELPER) += atomisp-libmsrlisthelper.o
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c b/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c
deleted file mode 100644
index 4cc2839937af..000000000000
--- a/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c
+++ /dev/null
@@ -1,849 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Support for OmniVision OV2680 1080p HD camera sensor.
- *
- * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
- * Copyright (c) 2023 Hans de Goede <hdegoede@redhat.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/acpi.h>
-#include <linux/device.h>
-#include <linux/gpio/consumer.h>
-#include <linux/gpio/machine.h>
-#include <linux/i2c.h>
-#include <linux/module.h>
-#include <linux/pm_runtime.h>
-#include <linux/types.h>
-
-#include <media/ov_16bit_addr_reg_helpers.h>
-#include <media/v4l2-device.h>
-
-#include "ov2680.h"
-
-static const struct v4l2_rect ov2680_default_crop = {
- .left = OV2680_ACTIVE_START_LEFT,
- .top = OV2680_ACTIVE_START_TOP,
- .width = OV2680_ACTIVE_WIDTH,
- .height = OV2680_ACTIVE_HEIGHT,
-};
-
-static int ov2680_write_reg_array(struct i2c_client *client,
- const struct ov2680_reg *reglist)
-{
- const struct ov2680_reg *next = reglist;
- int ret;
-
- for (; next->reg != 0; next++) {
- ret = ov_write_reg8(client, next->reg, next->val);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static void ov2680_set_bayer_order(struct ov2680_dev *sensor, struct v4l2_mbus_framefmt *fmt)
-{
- static const int ov2680_hv_flip_bayer_order[] = {
- MEDIA_BUS_FMT_SBGGR10_1X10,
- MEDIA_BUS_FMT_SGRBG10_1X10,
- MEDIA_BUS_FMT_SGBRG10_1X10,
- MEDIA_BUS_FMT_SRGGB10_1X10,
- };
- int hv_flip = 0;
-
- if (sensor->ctrls.vflip->val)
- hv_flip += 1;
-
- if (sensor->ctrls.hflip->val)
- hv_flip += 2;
-
- fmt->code = ov2680_hv_flip_bayer_order[hv_flip];
-}
-
-static int ov2680_set_vflip(struct ov2680_dev *sensor, s32 val)
-{
- int ret;
-
- if (sensor->is_streaming)
- return -EBUSY;
-
- ret = ov_update_reg(sensor->client, OV2680_REG_FORMAT1, BIT(2), val ? BIT(2) : 0);
- if (ret < 0)
- return ret;
-
- ov2680_set_bayer_order(sensor, &sensor->mode.fmt);
- return 0;
-}
-
-static int ov2680_set_hflip(struct ov2680_dev *sensor, s32 val)
-{
- int ret;
-
- if (sensor->is_streaming)
- return -EBUSY;
-
- ret = ov_update_reg(sensor->client, OV2680_REG_FORMAT2, BIT(2), val ? BIT(2) : 0);
- if (ret < 0)
- return ret;
-
- ov2680_set_bayer_order(sensor, &sensor->mode.fmt);
- return 0;
-}
-
-static int ov2680_exposure_set(struct ov2680_dev *sensor, u32 exp)
-{
- return ov_write_reg24(sensor->client, OV2680_REG_EXPOSURE_PK_HIGH, exp << 4);
-}
-
-static int ov2680_gain_set(struct ov2680_dev *sensor, u32 gain)
-{
- return ov_write_reg16(sensor->client, OV2680_REG_GAIN_PK, gain);
-}
-
-static int ov2680_test_pattern_set(struct ov2680_dev *sensor, int value)
-{
- int ret;
-
- if (!value)
- return ov_update_reg(sensor->client, OV2680_REG_ISP_CTRL00, BIT(7), 0);
-
- ret = ov_update_reg(sensor->client, OV2680_REG_ISP_CTRL00, 0x03, value - 1);
- if (ret < 0)
- return ret;
-
- ret = ov_update_reg(sensor->client, OV2680_REG_ISP_CTRL00, BIT(7), BIT(7));
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static int ov2680_s_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct v4l2_subdev *sd = ctrl_to_sd(ctrl);
- struct ov2680_dev *sensor = to_ov2680_sensor(sd);
- int ret;
-
- /* Only apply changes to the controls if the device is powered up */
- if (!pm_runtime_get_if_in_use(sensor->sd.dev)) {
- ov2680_set_bayer_order(sensor, &sensor->mode.fmt);
- return 0;
- }
-
- switch (ctrl->id) {
- case V4L2_CID_VFLIP:
- ret = ov2680_set_vflip(sensor, ctrl->val);
- break;
- case V4L2_CID_HFLIP:
- ret = ov2680_set_hflip(sensor, ctrl->val);
- break;
- case V4L2_CID_EXPOSURE:
- ret = ov2680_exposure_set(sensor, ctrl->val);
- break;
- case V4L2_CID_GAIN:
- ret = ov2680_gain_set(sensor, ctrl->val);
- break;
- case V4L2_CID_TEST_PATTERN:
- ret = ov2680_test_pattern_set(sensor, ctrl->val);
- break;
- default:
- ret = -EINVAL;
- }
-
- pm_runtime_put(sensor->sd.dev);
- return ret;
-}
-
-static const struct v4l2_ctrl_ops ov2680_ctrl_ops = {
- .s_ctrl = ov2680_s_ctrl,
-};
-
-static int ov2680_init_registers(struct v4l2_subdev *sd)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
-
- ret = ov_write_reg8(client, OV2680_SW_RESET, 0x01);
-
- /* Wait for sensor reset */
- usleep_range(1000, 2000);
-
- ret |= ov2680_write_reg_array(client, ov2680_global_setting);
-
- return ret;
-}
-
-static struct v4l2_mbus_framefmt *
-__ov2680_get_pad_format(struct ov2680_dev *sensor, struct v4l2_subdev_state *state,
- unsigned int pad, enum v4l2_subdev_format_whence which)
-{
- if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_format(&sensor->sd, state, pad);
-
- return &sensor->mode.fmt;
-}
-
-static struct v4l2_rect *
-__ov2680_get_pad_crop(struct ov2680_dev *sensor, struct v4l2_subdev_state *state,
- unsigned int pad, enum v4l2_subdev_format_whence which)
-{
- if (which == V4L2_SUBDEV_FORMAT_TRY)
- return v4l2_subdev_get_try_crop(&sensor->sd, state, pad);
-
- return &sensor->mode.crop;
-}
-
-static void ov2680_fill_format(struct ov2680_dev *sensor,
- struct v4l2_mbus_framefmt *fmt,
- unsigned int width, unsigned int height)
-{
- memset(fmt, 0, sizeof(*fmt));
- fmt->width = width;
- fmt->height = height;
- fmt->field = V4L2_FIELD_NONE;
- ov2680_set_bayer_order(sensor, fmt);
-}
-
-static void ov2680_calc_mode(struct ov2680_dev *sensor)
-{
- int width = sensor->mode.fmt.width;
- int height = sensor->mode.fmt.height;
- int orig_width = width;
- int orig_height = height;
-
- if (width <= (sensor->mode.crop.width / 2) &&
- height <= (sensor->mode.crop.height / 2)) {
- sensor->mode.binning = true;
- width *= 2;
- height *= 2;
- } else {
- sensor->mode.binning = false;
- }
-
- sensor->mode.h_start =
- (sensor->mode.crop.left + (sensor->mode.crop.width - width) / 2) & ~1;
- sensor->mode.v_start =
- (sensor->mode.crop.top + (sensor->mode.crop.height - height) / 2) & ~1;
- sensor->mode.h_end = min(sensor->mode.h_start + width + OV2680_END_MARGIN - 1,
- OV2680_NATIVE_WIDTH - 1);
- sensor->mode.v_end = min(sensor->mode.v_start + height + OV2680_END_MARGIN - 1,
- OV2680_NATIVE_HEIGHT - 1);
- sensor->mode.h_output_size = orig_width;
- sensor->mode.v_output_size = orig_height;
- sensor->mode.hts = OV2680_PIXELS_PER_LINE;
- sensor->mode.vts = OV2680_LINES_PER_FRAME;
-}
-
-static int ov2680_set_mode(struct ov2680_dev *sensor)
-{
- struct i2c_client *client = sensor->client;
- u8 sensor_ctrl_0a, inc, fmt1, fmt2;
- int ret;
-
- if (sensor->mode.binning) {
- sensor_ctrl_0a = 0x23;
- inc = 0x31;
- fmt1 = 0xc2;
- fmt2 = 0x01;
- } else {
- sensor_ctrl_0a = 0x21;
- inc = 0x11;
- fmt1 = 0xc0;
- fmt2 = 0x00;
- }
-
- ret = ov_write_reg8(client, OV2680_REG_SENSOR_CTRL_0A, sensor_ctrl_0a);
- if (ret)
- return ret;
-
- ret = ov_write_reg16(client, OV2680_HORIZONTAL_START_H, sensor->mode.h_start);
- if (ret)
- return ret;
-
- ret = ov_write_reg16(client, OV2680_VERTICAL_START_H, sensor->mode.v_start);
- if (ret)
- return ret;
-
- ret = ov_write_reg16(client, OV2680_HORIZONTAL_END_H, sensor->mode.h_end);
- if (ret)
- return ret;
-
- ret = ov_write_reg16(client, OV2680_VERTICAL_END_H, sensor->mode.v_end);
- if (ret)
- return ret;
-
- ret = ov_write_reg16(client, OV2680_HORIZONTAL_OUTPUT_SIZE_H,
- sensor->mode.h_output_size);
- if (ret)
- return ret;
-
- ret = ov_write_reg16(client, OV2680_VERTICAL_OUTPUT_SIZE_H,
- sensor->mode.v_output_size);
- if (ret)
- return ret;
-
- ret = ov_write_reg16(client, OV2680_HTS, sensor->mode.hts);
- if (ret)
- return ret;
-
- ret = ov_write_reg16(client, OV2680_VTS, sensor->mode.vts);
- if (ret)
- return ret;
-
- ret = ov_write_reg16(client, OV2680_ISP_X_WIN, 0);
- if (ret)
- return ret;
-
- ret = ov_write_reg16(client, OV2680_ISP_Y_WIN, 0);
- if (ret)
- return ret;
-
- ret = ov_write_reg8(client, OV2680_X_INC, inc);
- if (ret)
- return ret;
-
- ret = ov_write_reg8(client, OV2680_Y_INC, inc);
- if (ret)
- return ret;
-
- ret = ov_write_reg16(client, OV2680_X_WIN, sensor->mode.h_output_size);
- if (ret)
- return ret;
-
- ret = ov_write_reg16(client, OV2680_Y_WIN, sensor->mode.v_output_size);
- if (ret)
- return ret;
-
- ret = ov_write_reg8(client, OV2680_REG_FORMAT1, fmt1);
- if (ret)
- return ret;
-
- ret = ov_write_reg8(client, OV2680_REG_FORMAT2, fmt2);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int ov2680_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_format *format)
-{
- struct ov2680_dev *sensor = to_ov2680_sensor(sd);
- struct v4l2_mbus_framefmt *fmt;
- const struct v4l2_rect *crop;
- unsigned int width, height;
-
- crop = __ov2680_get_pad_crop(sensor, sd_state, format->pad, format->which);
-
- /* Limit set_fmt max size to crop width / height */
- width = clamp_t(unsigned int, ALIGN(format->format.width, 2),
- OV2680_MIN_CROP_WIDTH, crop->width);
- height = clamp_t(unsigned int, ALIGN(format->format.height, 2),
- OV2680_MIN_CROP_HEIGHT, crop->height);
-
- fmt = __ov2680_get_pad_format(sensor, sd_state, format->pad, format->which);
- ov2680_fill_format(sensor, fmt, width, height);
-
- format->format = *fmt;
-
- if (format->which == V4L2_SUBDEV_FORMAT_TRY)
- return 0;
-
- mutex_lock(&sensor->lock);
- ov2680_calc_mode(sensor);
- mutex_unlock(&sensor->lock);
- return 0;
-}
-
-static int ov2680_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_format *format)
-{
- struct ov2680_dev *sensor = to_ov2680_sensor(sd);
- struct v4l2_mbus_framefmt *fmt;
-
- fmt = __ov2680_get_pad_format(sensor, sd_state, format->pad, format->which);
- format->format = *fmt;
- return 0;
-}
-
-static int ov2680_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *state,
- struct v4l2_subdev_selection *sel)
-{
- struct ov2680_dev *sensor = to_ov2680_sensor(sd);
-
- switch (sel->target) {
- case V4L2_SEL_TGT_CROP:
- mutex_lock(&sensor->lock);
- sel->r = *__ov2680_get_pad_crop(sensor, state, sel->pad, sel->which);
- mutex_unlock(&sensor->lock);
- break;
- case V4L2_SEL_TGT_NATIVE_SIZE:
- case V4L2_SEL_TGT_CROP_BOUNDS:
- sel->r.top = 0;
- sel->r.left = 0;
- sel->r.width = OV2680_NATIVE_WIDTH;
- sel->r.height = OV2680_NATIVE_HEIGHT;
- break;
- case V4L2_SEL_TGT_CROP_DEFAULT:
- sel->r.top = OV2680_ACTIVE_START_TOP;
- sel->r.left = OV2680_ACTIVE_START_LEFT;
- sel->r.width = OV2680_ACTIVE_WIDTH;
- sel->r.height = OV2680_ACTIVE_HEIGHT;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int ov2680_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *state,
- struct v4l2_subdev_selection *sel)
-{
- struct ov2680_dev *sensor = to_ov2680_sensor(sd);
- struct v4l2_mbus_framefmt *format;
- struct v4l2_rect *__crop;
- struct v4l2_rect rect;
-
- if (sel->target != V4L2_SEL_TGT_CROP)
- return -EINVAL;
-
- /*
- * Clamp the boundaries of the crop rectangle to the size of the sensor
- * pixel array. Align to multiples of 2 to ensure Bayer pattern isn't
- * disrupted.
- */
- rect.left = clamp(ALIGN(sel->r.left, 2), OV2680_NATIVE_START_LEFT,
- OV2680_NATIVE_WIDTH);
- rect.top = clamp(ALIGN(sel->r.top, 2), OV2680_NATIVE_START_TOP,
- OV2680_NATIVE_HEIGHT);
- rect.width = clamp_t(unsigned int, ALIGN(sel->r.width, 2),
- OV2680_MIN_CROP_WIDTH, OV2680_NATIVE_WIDTH);
- rect.height = clamp_t(unsigned int, ALIGN(sel->r.height, 2),
- OV2680_MIN_CROP_HEIGHT, OV2680_NATIVE_HEIGHT);
-
- /* Make sure the crop rectangle isn't outside the bounds of the array */
- rect.width = min_t(unsigned int, rect.width,
- OV2680_NATIVE_WIDTH - rect.left);
- rect.height = min_t(unsigned int, rect.height,
- OV2680_NATIVE_HEIGHT - rect.top);
-
- __crop = __ov2680_get_pad_crop(sensor, state, sel->pad, sel->which);
-
- if (rect.width != __crop->width || rect.height != __crop->height) {
- /*
- * Reset the output image size if the crop rectangle size has
- * been modified.
- */
- format = __ov2680_get_pad_format(sensor, state, sel->pad, sel->which);
- format->width = rect.width;
- format->height = rect.height;
- }
-
- *__crop = rect;
- sel->r = rect;
-
- return 0;
-}
-
-static int ov2680_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state)
-{
- struct v4l2_subdev_format fmt = {
- .which = sd_state ? V4L2_SUBDEV_FORMAT_TRY
- : V4L2_SUBDEV_FORMAT_ACTIVE,
- .format = {
- .width = 800,
- .height = 600,
- },
- };
-
- sd_state->pads[0].try_crop = ov2680_default_crop;
-
- return ov2680_set_fmt(sd, sd_state, &fmt);
-}
-
-static int ov2680_detect(struct i2c_client *client)
-{
- struct i2c_adapter *adapter = client->adapter;
- u32 high = 0, low = 0;
- int ret;
- u16 id;
- u8 revision;
-
- if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
- return -ENODEV;
-
- ret = ov_read_reg8(client, OV2680_SC_CMMN_CHIP_ID_H, &high);
- if (ret) {
- dev_err(&client->dev, "sensor_id_high read failed (%d)\n", ret);
- return -ENODEV;
- }
- ret = ov_read_reg8(client, OV2680_SC_CMMN_CHIP_ID_L, &low);
- id = ((((u16)high) << 8) | (u16)low);
-
- if (id != OV2680_ID) {
- dev_err(&client->dev, "sensor ID error 0x%x\n", id);
- return -ENODEV;
- }
-
- ret = ov_read_reg8(client, OV2680_SC_CMMN_SUB_ID, &high);
- revision = (u8)high & 0x0f;
-
- dev_info(&client->dev, "sensor_revision id = 0x%x, rev= %d\n",
- id, revision);
-
- return 0;
-}
-
-static int ov2680_s_stream(struct v4l2_subdev *sd, int enable)
-{
- struct ov2680_dev *sensor = to_ov2680_sensor(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret = 0;
-
- mutex_lock(&sensor->lock);
-
- if (sensor->is_streaming == enable) {
- dev_warn(&client->dev, "stream already %s\n", enable ? "started" : "stopped");
- goto error_unlock;
- }
-
- if (enable) {
- ret = pm_runtime_get_sync(sensor->sd.dev);
- if (ret < 0)
- goto error_power_down;
-
- ret = ov2680_set_mode(sensor);
- if (ret)
- goto error_power_down;
-
- /* Restore value of all ctrls */
- ret = __v4l2_ctrl_handler_setup(&sensor->ctrls.handler);
- if (ret)
- goto error_power_down;
-
- ret = ov_write_reg8(client, OV2680_SW_STREAM, OV2680_START_STREAMING);
- if (ret)
- goto error_power_down;
- } else {
- ov_write_reg8(client, OV2680_SW_STREAM, OV2680_STOP_STREAMING);
- pm_runtime_put(sensor->sd.dev);
- }
-
- sensor->is_streaming = enable;
- v4l2_ctrl_activate(sensor->ctrls.vflip, !enable);
- v4l2_ctrl_activate(sensor->ctrls.hflip, !enable);
-
- mutex_unlock(&sensor->lock);
- return 0;
-
-error_power_down:
- pm_runtime_put(sensor->sd.dev);
- sensor->is_streaming = false;
-error_unlock:
- mutex_unlock(&sensor->lock);
- return ret;
-}
-
-static int ov2680_s_config(struct v4l2_subdev *sd)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
-
- ret = pm_runtime_get_sync(&client->dev);
- if (ret < 0) {
- dev_err(&client->dev, "ov2680 power-up err.\n");
- goto fail_power_on;
- }
-
- /* config & detect sensor */
- ret = ov2680_detect(client);
- if (ret)
- dev_err(&client->dev, "ov2680_detect err s_config.\n");
-
-fail_power_on:
- pm_runtime_put(&client->dev);
- return ret;
-}
-
-static int ov2680_g_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_frame_interval *interval)
-{
- interval->interval.numerator = 1;
- interval->interval.denominator = OV2680_FPS;
- return 0;
-}
-
-static int ov2680_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_mbus_code_enum *code)
-{
- /* We support only a single format */
- if (code->index)
- return -EINVAL;
-
- code->code = MEDIA_BUS_FMT_SBGGR10_1X10;
- return 0;
-}
-
-static int ov2680_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_frame_size_enum *fse)
-{
- static const struct v4l2_frmsize_discrete ov2680_frame_sizes[] = {
- { 1616, 1216 },
- { 1616, 1096 },
- { 1616, 916 },
- { 1456, 1096 },
- { 1296, 976 },
- { 1296, 736 },
- { 784, 592 },
- { 656, 496 },
- };
- int index = fse->index;
-
- if (index >= ARRAY_SIZE(ov2680_frame_sizes))
- return -EINVAL;
-
- fse->min_width = ov2680_frame_sizes[index].width;
- fse->min_height = ov2680_frame_sizes[index].height;
- fse->max_width = ov2680_frame_sizes[index].width;
- fse->max_height = ov2680_frame_sizes[index].height;
-
- return 0;
-}
-
-static int ov2680_enum_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_frame_interval_enum *fie)
-{
- /* Only 1 framerate */
- if (fie->index)
- return -EINVAL;
-
- fie->interval.numerator = 1;
- fie->interval.denominator = OV2680_FPS;
- return 0;
-}
-
-static int ov2680_g_skip_frames(struct v4l2_subdev *sd, u32 *frames)
-{
- *frames = OV2680_SKIP_FRAMES;
- return 0;
-}
-
-static const struct v4l2_subdev_video_ops ov2680_video_ops = {
- .s_stream = ov2680_s_stream,
- .g_frame_interval = ov2680_g_frame_interval,
-};
-
-static const struct v4l2_subdev_sensor_ops ov2680_sensor_ops = {
- .g_skip_frames = ov2680_g_skip_frames,
-};
-
-static const struct v4l2_subdev_pad_ops ov2680_pad_ops = {
- .init_cfg = ov2680_init_cfg,
- .enum_mbus_code = ov2680_enum_mbus_code,
- .enum_frame_size = ov2680_enum_frame_size,
- .enum_frame_interval = ov2680_enum_frame_interval,
- .get_fmt = ov2680_get_fmt,
- .set_fmt = ov2680_set_fmt,
- .get_selection = ov2680_get_selection,
- .set_selection = ov2680_set_selection,
-};
-
-static const struct v4l2_subdev_ops ov2680_ops = {
- .video = &ov2680_video_ops,
- .pad = &ov2680_pad_ops,
- .sensor = &ov2680_sensor_ops,
-};
-
-static int ov2680_init_controls(struct ov2680_dev *sensor)
-{
- static const char * const test_pattern_menu[] = {
- "Disabled",
- "Color Bars",
- "Random Data",
- "Square",
- "Black Image",
- };
- const struct v4l2_ctrl_ops *ops = &ov2680_ctrl_ops;
- struct ov2680_ctrls *ctrls = &sensor->ctrls;
- struct v4l2_ctrl_handler *hdl = &ctrls->handler;
- int exp_max = OV2680_LINES_PER_FRAME - OV2680_INTEGRATION_TIME_MARGIN;
-
- v4l2_ctrl_handler_init(hdl, 4);
-
- hdl->lock = &sensor->lock;
-
- ctrls->hflip = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_HFLIP, 0, 1, 1, 0);
- ctrls->vflip = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_VFLIP, 0, 1, 1, 0);
- ctrls->exposure = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_EXPOSURE,
- 0, exp_max, 1, exp_max);
- ctrls->gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_GAIN, 0, 1023, 1, 250);
- ctrls->test_pattern =
- v4l2_ctrl_new_std_menu_items(hdl,
- &ov2680_ctrl_ops, V4L2_CID_TEST_PATTERN,
- ARRAY_SIZE(test_pattern_menu) - 1,
- 0, 0, test_pattern_menu);
-
- ctrls->hflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
- ctrls->vflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
-
- if (hdl->error)
- return hdl->error;
-
- sensor->sd.ctrl_handler = hdl;
- return 0;
-}
-
-static void ov2680_remove(struct i2c_client *client)
-{
- struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct ov2680_dev *sensor = to_ov2680_sensor(sd);
-
- dev_dbg(&client->dev, "ov2680_remove...\n");
-
- v4l2_async_unregister_subdev(&sensor->sd);
- media_entity_cleanup(&sensor->sd.entity);
- v4l2_ctrl_handler_free(&sensor->ctrls.handler);
- mutex_destroy(&sensor->lock);
- fwnode_handle_put(sensor->ep_fwnode);
- pm_runtime_disable(&client->dev);
-}
-
-static int ov2680_probe(struct i2c_client *client)
-{
- struct device *dev = &client->dev;
- struct ov2680_dev *sensor;
- int ret;
-
- sensor = devm_kzalloc(dev, sizeof(*sensor), GFP_KERNEL);
- if (!sensor)
- return -ENOMEM;
-
- mutex_init(&sensor->lock);
-
- sensor->client = client;
- v4l2_i2c_subdev_init(&sensor->sd, client, &ov2680_ops);
-
- /*
- * Sometimes the fwnode graph is initialized by the bridge driver.
- * Bridge drivers doing this may also add GPIO mappings, wait for this.
- */
- sensor->ep_fwnode = fwnode_graph_get_next_endpoint(dev_fwnode(dev), NULL);
- if (!sensor->ep_fwnode)
- return dev_err_probe(dev, -EPROBE_DEFER, "waiting for fwnode graph endpoint\n");
-
- sensor->powerdown = devm_gpiod_get_optional(dev, "powerdown", GPIOD_OUT_HIGH);
- if (IS_ERR(sensor->powerdown)) {
- fwnode_handle_put(sensor->ep_fwnode);
- return dev_err_probe(dev, PTR_ERR(sensor->powerdown), "getting powerdown GPIO\n");
- }
-
- pm_runtime_set_suspended(dev);
- pm_runtime_enable(dev);
- pm_runtime_set_autosuspend_delay(dev, 1000);
- pm_runtime_use_autosuspend(dev);
-
- ret = ov2680_s_config(&sensor->sd);
- if (ret) {
- ov2680_remove(client);
- return ret;
- }
-
- sensor->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
- sensor->pad.flags = MEDIA_PAD_FL_SOURCE;
- sensor->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
- sensor->sd.fwnode = sensor->ep_fwnode;
-
- ret = ov2680_init_controls(sensor);
- if (ret) {
- ov2680_remove(client);
- return ret;
- }
-
- ret = media_entity_pads_init(&sensor->sd.entity, 1, &sensor->pad);
- if (ret) {
- ov2680_remove(client);
- return ret;
- }
-
- sensor->mode.crop = ov2680_default_crop;
- ov2680_fill_format(sensor, &sensor->mode.fmt, OV2680_NATIVE_WIDTH, OV2680_NATIVE_HEIGHT);
- ov2680_calc_mode(sensor);
-
- ret = v4l2_async_register_subdev_sensor(&sensor->sd);
- if (ret) {
- ov2680_remove(client);
- return ret;
- }
-
- return 0;
-}
-
-static int ov2680_suspend(struct device *dev)
-{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct ov2680_dev *sensor = to_ov2680_sensor(sd);
-
- gpiod_set_value_cansleep(sensor->powerdown, 1);
- return 0;
-}
-
-static int ov2680_resume(struct device *dev)
-{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct ov2680_dev *sensor = to_ov2680_sensor(sd);
-
- /* according to DS, at least 5ms is needed after DOVDD (enabled by ACPI) */
- usleep_range(5000, 6000);
-
- gpiod_set_value_cansleep(sensor->powerdown, 0);
-
- /* according to DS, 20ms is needed between PWDN and i2c access */
- msleep(20);
-
- ov2680_init_registers(sd);
- return 0;
-}
-
-static DEFINE_RUNTIME_DEV_PM_OPS(ov2680_pm_ops, ov2680_suspend, ov2680_resume, NULL);
-
-static const struct acpi_device_id ov2680_acpi_match[] = {
- {"XXOV2680"},
- {"OVTI2680"},
- {},
-};
-MODULE_DEVICE_TABLE(acpi, ov2680_acpi_match);
-
-static struct i2c_driver ov2680_driver = {
- .driver = {
- .name = "ov2680",
- .pm = pm_sleep_ptr(&ov2680_pm_ops),
- .acpi_match_table = ov2680_acpi_match,
- },
- .probe = ov2680_probe,
- .remove = ov2680_remove,
-};
-module_i2c_driver(ov2680_driver);
-
-MODULE_AUTHOR("Jacky Wang <Jacky_wang@ovt.com>");
-MODULE_DESCRIPTION("A low-level driver for OmniVision 2680 sensors");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/atomisp/i2c/ov2680.h b/drivers/staging/media/atomisp/i2c/ov2680.h
deleted file mode 100644
index d032af245674..000000000000
--- a/drivers/staging/media/atomisp/i2c/ov2680.h
+++ /dev/null
@@ -1,249 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Support for OmniVision OV2680 5M camera sensor.
- *
- * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- *
- */
-
-#ifndef __OV2680_H__
-#define __OV2680_H__
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/i2c.h>
-#include <linux/delay.h>
-#include <linux/videodev2.h>
-#include <linux/spinlock.h>
-#include <media/v4l2-subdev.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-ctrls.h>
-#include <linux/v4l2-mediabus.h>
-#include <media/media-entity.h>
-
-#define OV2680_NATIVE_WIDTH 1616
-#define OV2680_NATIVE_HEIGHT 1216
-#define OV2680_NATIVE_START_LEFT 0
-#define OV2680_NATIVE_START_TOP 0
-#define OV2680_ACTIVE_WIDTH 1600
-#define OV2680_ACTIVE_HEIGHT 1200
-#define OV2680_ACTIVE_START_LEFT 8
-#define OV2680_ACTIVE_START_TOP 8
-#define OV2680_MIN_CROP_WIDTH 2
-#define OV2680_MIN_CROP_HEIGHT 2
-
-/* 1704 * 1294 * 30fps = 66MHz pixel clock */
-#define OV2680_PIXELS_PER_LINE 1704
-#define OV2680_LINES_PER_FRAME 1294
-#define OV2680_FPS 30
-#define OV2680_SKIP_FRAMES 3
-
-/* If possible send 16 extra rows / lines to the ISP as padding */
-#define OV2680_END_MARGIN 16
-
-#define OV2680_FOCAL_LENGTH_NUM 334 /*3.34mm*/
-
-#define OV2680_INTEGRATION_TIME_MARGIN 8
-#define OV2680_ID 0x2680
-
-/*
- * OV2680 System control registers
- */
-#define OV2680_SW_SLEEP 0x0100
-#define OV2680_SW_RESET 0x0103
-#define OV2680_SW_STREAM 0x0100
-
-#define OV2680_SC_CMMN_CHIP_ID_H 0x300A
-#define OV2680_SC_CMMN_CHIP_ID_L 0x300B
-#define OV2680_SC_CMMN_SCCB_ID 0x302B /* 0x300C*/
-#define OV2680_SC_CMMN_SUB_ID 0x302A /* process, version*/
-
-#define OV2680_GROUP_ACCESS 0x3208 /*Bit[7:4] Group control, Bit[3:0] Group ID*/
-
-#define OV2680_REG_EXPOSURE_PK_HIGH 0x3500
-#define OV2680_REG_GAIN_PK 0x350a
-
-#define OV2680_REG_SENSOR_CTRL_0A 0x370a
-
-#define OV2680_HORIZONTAL_START_H 0x3800 /* Bit[11:8] */
-#define OV2680_HORIZONTAL_START_L 0x3801 /* Bit[7:0] */
-#define OV2680_VERTICAL_START_H 0x3802 /* Bit[11:8] */
-#define OV2680_VERTICAL_START_L 0x3803 /* Bit[7:0] */
-#define OV2680_HORIZONTAL_END_H 0x3804 /* Bit[11:8] */
-#define OV2680_HORIZONTAL_END_L 0x3805 /* Bit[7:0] */
-#define OV2680_VERTICAL_END_H 0x3806 /* Bit[11:8] */
-#define OV2680_VERTICAL_END_L 0x3807 /* Bit[7:0] */
-#define OV2680_HORIZONTAL_OUTPUT_SIZE_H 0x3808 /* Bit[11:8] */
-#define OV2680_HORIZONTAL_OUTPUT_SIZE_L 0x3809 /* Bit[7:0] */
-#define OV2680_VERTICAL_OUTPUT_SIZE_H 0x380a /* Bit[11:8] */
-#define OV2680_VERTICAL_OUTPUT_SIZE_L 0x380b /* Bit[7:0] */
-#define OV2680_HTS 0x380c
-#define OV2680_VTS 0x380e
-#define OV2680_ISP_X_WIN 0x3810
-#define OV2680_ISP_Y_WIN 0x3812
-#define OV2680_X_INC 0x3814
-#define OV2680_Y_INC 0x3815
-
-#define OV2680_FRAME_OFF_NUM 0x4202
-
-/*Flip/Mirror*/
-#define OV2680_REG_FORMAT1 0x3820
-#define OV2680_REG_FORMAT2 0x3821
-
-#define OV2680_MWB_RED_GAIN_H 0x5004/*0x3400*/
-#define OV2680_MWB_GREEN_GAIN_H 0x5006/*0x3402*/
-#define OV2680_MWB_BLUE_GAIN_H 0x5008/*0x3404*/
-#define OV2680_MWB_GAIN_MAX 0x0fff
-
-#define OV2680_REG_ISP_CTRL00 0x5080
-
-#define OV2680_X_WIN 0x5704
-#define OV2680_Y_WIN 0x5706
-#define OV2680_WIN_CONTROL 0x5708
-
-#define OV2680_START_STREAMING 0x01
-#define OV2680_STOP_STREAMING 0x00
-
-/*
- * ov2680 device structure.
- */
-struct ov2680_dev {
- struct v4l2_subdev sd;
- struct media_pad pad;
- /* Protect against concurrent changes to controls */
- struct mutex lock;
- struct i2c_client *client;
- struct gpio_desc *powerdown;
- struct fwnode_handle *ep_fwnode;
- bool is_streaming;
-
- struct ov2680_mode {
- struct v4l2_rect crop;
- struct v4l2_mbus_framefmt fmt;
- bool binning;
- u16 h_start;
- u16 v_start;
- u16 h_end;
- u16 v_end;
- u16 h_output_size;
- u16 v_output_size;
- u16 hts;
- u16 vts;
- } mode;
-
- struct ov2680_ctrls {
- struct v4l2_ctrl_handler handler;
- struct v4l2_ctrl *hflip;
- struct v4l2_ctrl *vflip;
- struct v4l2_ctrl *exposure;
- struct v4l2_ctrl *gain;
- struct v4l2_ctrl *test_pattern;
- } ctrls;
-};
-
-/**
- * struct ov2680_reg - MI sensor register format
- * @type: type of the register
- * @reg: 16-bit offset to register
- * @val: 8/16/32-bit register value
- *
- * Define a structure for sensor register initialization values
- */
-struct ov2680_reg {
- u16 reg;
- u32 val; /* @set value for read/mod/write, @mask */
-};
-
-#define to_ov2680_sensor(x) container_of(x, struct ov2680_dev, sd)
-
-static inline struct v4l2_subdev *ctrl_to_sd(struct v4l2_ctrl *ctrl)
-{
- struct ov2680_dev *sensor =
- container_of(ctrl->handler, struct ov2680_dev, ctrls.handler);
-
- return &sensor->sd;
-}
-
-static struct ov2680_reg const ov2680_global_setting[] = {
- /* MIPI PHY, 0x10 -> 0x1c enable bp_c_hs_en_lat and bp_d_hs_en_lat */
- {0x3016, 0x1c},
-
- /* PLL MULT bits 0-7, datasheet default 0x37 for 24MHz extclk, use 0x45 for 19.2 Mhz extclk */
- {0x3082, 0x45},
-
- /* R MANUAL set exposure (0x01) and gain (0x02) to manual (hw does not do auto) */
- {0x3503, 0x03},
-
- /* Analog control register tweaks */
- {0x3603, 0x39}, /* Reset value 0x99 */
- {0x3604, 0x24}, /* Reset value 0x74 */
- {0x3621, 0x37}, /* Reset value 0x44 */
-
- /* Sensor control register tweaks */
- {0x3701, 0x64}, /* Reset value 0x61 */
- {0x3705, 0x3c}, /* Reset value 0x21 */
- {0x370c, 0x50}, /* Reset value 0x10 */
- {0x370d, 0xc0}, /* Reset value 0x00 */
- {0x3718, 0x88}, /* Reset value 0x80 */
-
- /* PSRAM tweaks */
- {0x3781, 0x80}, /* Reset value 0x00 */
- {0x3784, 0x0c}, /* Reset value 0x00, based on OV2680_R1A_AM10.ovt */
- {0x3789, 0x60}, /* Reset value 0x50 */
-
- /* BLC CTRL00 0x01 -> 0x81 set avg_weight to 8 */
- {0x4000, 0x81},
-
- /* Set black level compensation range to 0 - 3 (default 0 - 11) */
- {0x4008, 0x00},
- {0x4009, 0x03},
-
- /* VFIFO R2 0x00 -> 0x02 set Frame reset enable */
- {0x4602, 0x02},
-
- /* MIPI ctrl CLK PREPARE MIN change from 0x26 (38) -> 0x36 (54) */
- {0x481f, 0x36},
-
- /* MIPI ctrl CLK LPX P MIN change from 0x32 (50) -> 0x36 (54) */
- {0x4825, 0x36},
-
- /* R ISP CTRL2 0x20 -> 0x30, set sof_sel bit */
- {0x5002, 0x30},
-
- /*
- * Window CONTROL 0x00 -> 0x01, enable manual window control,
- * this is necessary for full size flip and mirror support.
- */
- {0x5708, 0x01},
-
- /*
- * DPC CTRL0 0x14 -> 0x3e, set enable_tail, enable_3x3_cluster
- * and enable_general_tail bits based OV2680_R1A_AM10.ovt.
- */
- {0x5780, 0x3e},
-
- /* DPC MORE CONNECTION CASE THRE 0x0c (12) -> 0x02 (2) */
- {0x5788, 0x02},
-
- /* DPC GAIN LIST1 0x0f (15) -> 0x08 (8) */
- {0x578e, 0x08},
-
- /* DPC GAIN LIST2 0x3f (63) -> 0x0c (12) */
- {0x578f, 0x0c},
-
- /* DPC THRE RATIO 0x04 (4) -> 0x00 (0) */
- {0x5792, 0x00},
-
- {}
-};
-
-#endif
diff --git a/drivers/staging/media/atomisp/pci/atomisp_cmd.c b/drivers/staging/media/atomisp/pci/atomisp_cmd.c
index e27f9dc8e7aa..0803b296e9ac 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_cmd.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_cmd.c
@@ -3001,12 +3001,6 @@ void atomisp_handle_parameter_and_buffer(struct atomisp_video_pipe *pipe)
bool need_to_enqueue_buffer = false;
int i;
- if (!asd) {
- dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n",
- __func__, pipe->vdev.name);
- return;
- }
-
lockdep_assert_held(&asd->isp->mutex);
/*
@@ -3068,12 +3062,6 @@ int atomisp_set_parameters(struct video_device *vdev,
struct atomisp_css_params *css_param = &asd->params.css_param;
int ret;
- if (!asd) {
- dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n",
- __func__, vdev->name);
- return -EINVAL;
- }
-
lockdep_assert_held(&asd->isp->mutex);
if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
@@ -4067,12 +4055,6 @@ static int atomisp_set_fmt_to_isp(struct video_device *vdev,
const struct atomisp_in_fmt_conv *fc = NULL;
int ret, i;
- if (!asd) {
- dev_err(isp->dev, "%s(): asd is NULL, device is %s\n",
- __func__, vdev->name);
- return -EINVAL;
- }
-
isp_sink_crop = atomisp_subdev_get_rect(
&asd->subdev, NULL, V4L2_SUBDEV_FORMAT_ACTIVE,
ATOMISP_SUBDEV_PAD_SINK, V4L2_SEL_TGT_CROP);
@@ -4280,12 +4262,6 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev, const struct v4l2_p
(struct atomisp_input_stream_info *)ffmt->reserved;
int ret;
- if (!asd) {
- dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n",
- __func__, vdev->name);
- return -EINVAL;
- }
-
format = atomisp_get_format_bridge(f->pixelformat);
if (!format)
return -EINVAL;
diff --git a/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
index b13d1cb4668d..b97ec85aa0ba 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
@@ -613,9 +613,6 @@ static void __apply_additional_pipe_config(
static bool is_pipe_valid_to_current_run_mode(struct atomisp_sub_device *asd,
enum ia_css_pipe_id pipe_id)
{
- if (!asd)
- return false;
-
if (pipe_id == IA_CSS_PIPE_ID_YUVPP)
return true;
diff --git a/drivers/staging/media/atomisp/pci/atomisp_csi2.h b/drivers/staging/media/atomisp/pci/atomisp_csi2.h
index 16ddb3ab2963..8a112acba1e0 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_csi2.h
+++ b/drivers/staging/media/atomisp/pci/atomisp_csi2.h
@@ -30,9 +30,6 @@
#define CSI2_PAD_SOURCE 1
#define CSI2_PADS_NUM 2
-#define CSI2_MAX_LANES 4
-#define CSI2_MAX_LINK_FREQS 3
-
#define CSI2_MAX_ACPI_GPIOS 2u
struct acpi_device;
@@ -55,70 +52,6 @@ struct atomisp_csi2_acpi_gpio_parsing_data {
unsigned int map_count;
};
-enum atomisp_csi2_sensor_swnodes {
- SWNODE_SENSOR,
- SWNODE_SENSOR_PORT,
- SWNODE_SENSOR_ENDPOINT,
- SWNODE_CSI2_PORT,
- SWNODE_CSI2_ENDPOINT,
- SWNODE_COUNT
-};
-
-struct atomisp_csi2_property_names {
- char clock_frequency[16];
- char rotation[9];
- char bus_type[9];
- char data_lanes[11];
- char remote_endpoint[16];
- char link_frequencies[17];
-};
-
-struct atomisp_csi2_node_names {
- char port[7];
- char endpoint[11];
- char remote_port[7];
-};
-
-struct atomisp_csi2_sensor_config {
- const char *hid;
- int lanes;
- int nr_link_freqs;
- u64 link_freqs[CSI2_MAX_LINK_FREQS];
-};
-
-struct atomisp_csi2_sensor {
- /* Append port in "-%u" format as suffix of HID */
- char name[ACPI_ID_LEN + 4];
- struct acpi_device *adev;
- int port;
- int lanes;
-
- /* SWNODE_COUNT + 1 for terminating NULL */
- const struct software_node *group[SWNODE_COUNT + 1];
- struct software_node swnodes[SWNODE_COUNT];
- struct atomisp_csi2_node_names node_names;
- struct atomisp_csi2_property_names prop_names;
- /* "clock-frequency", "rotation" + terminating entry */
- struct property_entry dev_properties[3];
- /* "bus-type", "data-lanes", "remote-endpoint" + "link-freq" + terminating entry */
- struct property_entry ep_properties[5];
- /* "data-lanes", "remote-endpoint" + terminating entry */
- struct property_entry csi2_properties[3];
- struct software_node_ref_args local_ref[1];
- struct software_node_ref_args remote_ref[1];
- struct software_node_ref_args vcm_ref[1];
- /* GPIO mappings storage */
- struct atomisp_csi2_acpi_gpio_map gpio_map;
-};
-
-struct atomisp_csi2_bridge {
- struct software_node csi2_node;
- char csi2_node_name[14];
- u32 data_lanes[CSI2_MAX_LANES];
- unsigned int n_sensors;
- struct atomisp_csi2_sensor sensors[ATOMISP_CAMERA_NR_PORTS];
-};
-
struct atomisp_mipi_csi2_device {
struct v4l2_subdev subdev;
struct media_pad pads[CSI2_PADS_NUM];
diff --git a/drivers/staging/media/atomisp/pci/atomisp_csi2_bridge.c b/drivers/staging/media/atomisp/pci/atomisp_csi2_bridge.c
index 0d12ba78d9c1..03940c11505f 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_csi2_bridge.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_csi2_bridge.c
@@ -14,31 +14,14 @@
#include <linux/device.h>
#include <linux/dmi.h>
#include <linux/property.h>
+
+#include <media/ipu-bridge.h>
#include <media/v4l2-fwnode.h>
#include "atomisp_cmd.h"
#include "atomisp_csi2.h"
#include "atomisp_internal.h"
-#define NODE_SENSOR(_HID, _PROPS) \
- ((const struct software_node) { \
- .name = _HID, \
- .properties = _PROPS, \
- })
-
-#define NODE_PORT(_PORT, _SENSOR_NODE) \
- ((const struct software_node) { \
- .name = _PORT, \
- .parent = _SENSOR_NODE, \
- })
-
-#define NODE_ENDPOINT(_EP, _PORT, _PROPS) \
- ((const struct software_node) { \
- .name = _EP, \
- .parent = _PORT, \
- .properties = _PROPS, \
- })
-
#define PMC_CLK_RATE_19_2MHZ 19200000
/*
@@ -84,20 +67,27 @@ static const guid_t atomisp_dsm_guid =
0x97, 0xb9, 0x88, 0x2a, 0x68, 0x60, 0xa4, 0xbe);
/*
- * Extend this array with ACPI Hardware IDs of sensors known to be working
- * plus the default number of links + link-frequencies.
- *
- * Do not add an entry for a sensor that is not actually supported,
- * or which have not yet been converted to work without atomisp_gmin
- * power-management and with v4l2-async probing.
+ * 75c9a639-5c8a-4a00-9f48-a9c3b5da789f
+ * This _DSM GUID returns a string giving the VCM type e.g. "AD5823".
*/
-static const struct atomisp_csi2_sensor_config supported_sensors[] = {
- /* GalaxyCore GC0310 */
- { "INT0310", 1 },
- /* Omnivision OV2680 */
- { "OVTI2680", 1 },
+static const guid_t vcm_dsm_guid =
+ GUID_INIT(0x75c9a639, 0x5c8a, 0x4a00,
+ 0x9f, 0x48, 0xa9, 0xc3, 0xb5, 0xda, 0x78, 0x9f);
+
+struct atomisp_sensor_config {
+ int lanes;
+ bool vcm;
};
+#define ATOMISP_SENSOR_CONFIG(_HID, _LANES, _VCM) \
+{ \
+ .id = _HID, \
+ .driver_data = (long)&((const struct atomisp_sensor_config) { \
+ .lanes = _LANES, \
+ .vcm = _VCM, \
+ }) \
+}
+
/*
* gmin_cfg parsing code. This is a cleaned up version of the gmin_cfg parsing
* code from atomisp_gmin_platform.c.
@@ -151,7 +141,8 @@ static char *gmin_cfg_get_dsm(struct acpi_device *adev, const char *key)
if (!val)
break;
- acpi_handle_info(adev->handle, "Using DSM entry %s=%s\n", key, val);
+ acpi_handle_info(adev->handle, "%s: Using DSM entry %s=%s\n",
+ dev_name(&adev->dev), key, val);
break;
}
}
@@ -176,7 +167,8 @@ static char *gmin_cfg_get_dmi_override(struct acpi_device *adev, const char *key
if (strcmp(key, gv->key))
continue;
- acpi_handle_info(adev->handle, "Using DMI entry %s=%s\n", key, gv->val);
+ acpi_handle_info(adev->handle, "%s: Using DMI entry %s=%s\n",
+ dev_name(&adev->dev), key, gv->val);
return kstrdup(gv->val, GFP_KERNEL);
}
@@ -212,7 +204,8 @@ static int gmin_cfg_get_int(struct acpi_device *adev, const char *key, int defau
return int_val;
out_use_default:
- acpi_handle_info(adev->handle, "Using default %s=%d\n", key, default_val);
+ acpi_handle_info(adev->handle, "%s: Using default %s=%d\n",
+ dev_name(&adev->dev), key, default_val);
return default_val;
}
@@ -255,7 +248,8 @@ static int atomisp_csi2_get_pmc_clk_nr_from_acpi_pr0(struct acpi_device *adev)
ACPI_FREE(buffer.pointer);
if (ret < 0)
- acpi_handle_warn(adev->handle, "Could not find PMC clk in _PR0\n");
+ acpi_handle_warn(adev->handle, "%s: Could not find PMC clk in _PR0\n",
+ dev_name(&adev->dev));
return ret;
}
@@ -274,7 +268,8 @@ static int atomisp_csi2_set_pmc_clk_freq(struct acpi_device *adev, int clock_num
clk = clk_get(NULL, name);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
- acpi_handle_err(adev->handle, "Error getting clk %s:%d\n", name, ret);
+ acpi_handle_err(adev->handle, "%s: Error getting clk %s: %d\n",
+ dev_name(&adev->dev), name, ret);
return ret;
}
@@ -288,7 +283,8 @@ static int atomisp_csi2_set_pmc_clk_freq(struct acpi_device *adev, int clock_num
if (!ret)
ret = clk_set_rate(clk, PMC_CLK_RATE_19_2MHZ);
if (ret)
- acpi_handle_err(adev->handle, "Error setting clk-rate for %s:%d\n", name, ret);
+ acpi_handle_err(adev->handle, "%s: Error setting clk-rate for %s: %d\n",
+ dev_name(&adev->dev), name, ret);
clk_put(clk);
return ret;
@@ -337,7 +333,8 @@ static int atomisp_csi2_handle_acpi_gpio_res(struct acpi_resource *ares, void *_
if (i == data->settings_count) {
acpi_handle_warn(data->adev->handle,
- "Could not find DSM GPIO settings for pin %u\n", pin);
+ "%s: Could not find DSM GPIO settings for pin %u\n",
+ dev_name(&data->adev->dev), pin);
return 1;
}
@@ -349,7 +346,8 @@ static int atomisp_csi2_handle_acpi_gpio_res(struct acpi_resource *ares, void *_
name = "powerdown-gpios";
break;
default:
- acpi_handle_warn(data->adev->handle, "Unknown GPIO type 0x%02lx for pin %u\n",
+ acpi_handle_warn(data->adev->handle, "%s: Unknown GPIO type 0x%02lx for pin %u\n",
+ dev_name(&data->adev->dev),
INTEL_GPIO_DSM_TYPE(settings), pin);
return 1;
}
@@ -374,7 +372,8 @@ static int atomisp_csi2_handle_acpi_gpio_res(struct acpi_resource *ares, void *_
data->map->mapping[i].size = 1;
data->map_count++;
- acpi_handle_info(data->adev->handle, "%s crs %d %s pin %u active-%s\n", name,
+ acpi_handle_info(data->adev->handle, "%s: %s crs %d %s pin %u active-%s\n",
+ dev_name(&data->adev->dev), name,
data->res_count - 1, agpio->resource_source.string_ptr,
pin, active_low ? "low" : "high");
@@ -400,8 +399,7 @@ static int atomisp_csi2_handle_acpi_gpio_res(struct acpi_resource *ares, void *_
* the INT3472 discrete.c code and there is some overlap, but there are
* enough differences that it is difficult to share the code.
*/
-static int atomisp_csi2_add_gpio_mappings(struct atomisp_csi2_sensor *sensor,
- struct acpi_device *adev)
+static int atomisp_csi2_add_gpio_mappings(struct acpi_device *adev)
{
struct atomisp_csi2_acpi_gpio_parsing_data data = { };
LIST_HEAD(resource_list);
@@ -412,7 +410,8 @@ static int atomisp_csi2_add_gpio_mappings(struct atomisp_csi2_sensor *sensor,
obj = acpi_evaluate_dsm_typed(adev->handle, &intel_sensor_module_guid,
0x00, 1, NULL, ACPI_TYPE_STRING);
if (obj) {
- acpi_handle_info(adev->handle, "Sensor module id: '%s'\n", obj->string.pointer);
+ acpi_handle_info(adev->handle, "%s: Sensor module id: '%s'\n",
+ dev_name(&adev->dev), obj->string.pointer);
ACPI_FREE(obj);
}
@@ -426,7 +425,8 @@ static int atomisp_csi2_add_gpio_mappings(struct atomisp_csi2_sensor *sensor,
&intel_sensor_gpio_info_guid, 0x00, 1,
NULL, ACPI_TYPE_INTEGER);
if (!obj) {
- acpi_handle_err(adev->handle, "No _DSM entry for GPIO pin count\n");
+ acpi_handle_err(adev->handle, "%s: No _DSM entry for GPIO pin count\n",
+ dev_name(&adev->dev));
return -EIO;
}
@@ -434,7 +434,9 @@ static int atomisp_csi2_add_gpio_mappings(struct atomisp_csi2_sensor *sensor,
ACPI_FREE(obj);
if (data.settings_count > CSI2_MAX_ACPI_GPIOS) {
- acpi_handle_err(adev->handle, "Too many GPIOs %u > %u\n", data.settings_count, CSI2_MAX_ACPI_GPIOS);
+ acpi_handle_err(adev->handle, "%s: Too many GPIOs %u > %u\n",
+ dev_name(&adev->dev), data.settings_count,
+ CSI2_MAX_ACPI_GPIOS);
return -EOVERFLOW;
}
@@ -448,7 +450,8 @@ static int atomisp_csi2_add_gpio_mappings(struct atomisp_csi2_sensor *sensor,
0x00, i + 2,
NULL, ACPI_TYPE_INTEGER);
if (!obj) {
- acpi_handle_err(adev->handle, "No _DSM entry for pin %u\n", i);
+ acpi_handle_err(adev->handle, "%s: No _DSM entry for pin %u\n",
+ dev_name(&adev->dev), i);
return -EIO;
}
@@ -463,15 +466,19 @@ static int atomisp_csi2_add_gpio_mappings(struct atomisp_csi2_sensor *sensor,
INTEL_GPIO_DSM_PIN(data.settings[j]))
continue;
- acpi_handle_err(adev->handle, "Duplicate pin number %lu\n",
+ acpi_handle_err(adev->handle, "%s: Duplicate pin number %lu\n",
+ dev_name(&adev->dev),
INTEL_GPIO_DSM_PIN(data.settings[i]));
return -EIO;
}
}
+ data.map = kzalloc(sizeof(*data.map), GFP_KERNEL);
+ if (!data.map)
+ return -ENOMEM;
+
/* Now parse the ACPI resources and build the lookup table */
data.adev = adev;
- data.map = &sensor->gpio_map;
ret = acpi_dev_get_resources(adev, &resource_list,
atomisp_csi2_handle_acpi_gpio_res, &data);
if (ret < 0)
@@ -481,230 +488,105 @@ static int atomisp_csi2_add_gpio_mappings(struct atomisp_csi2_sensor *sensor,
if (data.map_count != data.settings_count ||
data.res_count != data.settings_count)
- acpi_handle_warn(adev->handle, "ACPI GPIO resources vs DSM GPIO-info count mismatch (dsm: %d res: %d map %d\n",
- data.settings_count, data.res_count, data.map_count);
+ acpi_handle_warn(adev->handle, "%s: ACPI GPIO resources vs DSM GPIO-info count mismatch (dsm: %d res: %d map %d\n",
+ dev_name(&adev->dev), data.settings_count,
+ data.res_count, data.map_count);
ret = acpi_dev_add_driver_gpios(adev, data.map->mapping);
if (ret)
- acpi_handle_err(adev->handle, "Error adding driver GPIOs: %d\n", ret);
+ acpi_handle_err(adev->handle, "%s: Error adding driver GPIOs: %d\n",
+ dev_name(&adev->dev), ret);
return ret;
}
-static const struct atomisp_csi2_property_names prop_names = {
- .clock_frequency = "clock-frequency",
- .rotation = "rotation",
- .bus_type = "bus-type",
- .data_lanes = "data-lanes",
- .remote_endpoint = "remote-endpoint",
- .link_frequencies = "link-frequencies",
-};
-
-static void atomisp_csi2_create_fwnode_properties(struct atomisp_csi2_sensor *sensor,
- struct atomisp_csi2_bridge *bridge,
- const struct atomisp_csi2_sensor_config *cfg)
+static char *atomisp_csi2_get_vcm_type(struct acpi_device *adev)
{
- sensor->prop_names = prop_names;
-
- sensor->local_ref[0] = SOFTWARE_NODE_REFERENCE(&sensor->swnodes[SWNODE_CSI2_ENDPOINT]);
- sensor->remote_ref[0] = SOFTWARE_NODE_REFERENCE(&sensor->swnodes[SWNODE_SENSOR_ENDPOINT]);
-
- sensor->dev_properties[0] = PROPERTY_ENTRY_U32(sensor->prop_names.clock_frequency,
- PMC_CLK_RATE_19_2MHZ);
- sensor->dev_properties[1] = PROPERTY_ENTRY_U32(sensor->prop_names.rotation, 0);
-
- sensor->ep_properties[0] = PROPERTY_ENTRY_U32(sensor->prop_names.bus_type,
- V4L2_FWNODE_BUS_TYPE_CSI2_DPHY);
- sensor->ep_properties[1] = PROPERTY_ENTRY_U32_ARRAY_LEN(sensor->prop_names.data_lanes,
- bridge->data_lanes,
- sensor->lanes);
- sensor->ep_properties[2] = PROPERTY_ENTRY_REF_ARRAY(sensor->prop_names.remote_endpoint,
- sensor->local_ref);
- if (cfg->nr_link_freqs > 0)
- sensor->ep_properties[3] =
- PROPERTY_ENTRY_U64_ARRAY_LEN(sensor->prop_names.link_frequencies,
- cfg->link_freqs, cfg->nr_link_freqs);
-
- sensor->csi2_properties[0] = PROPERTY_ENTRY_U32_ARRAY_LEN(sensor->prop_names.data_lanes,
- bridge->data_lanes,
- sensor->lanes);
- sensor->csi2_properties[1] = PROPERTY_ENTRY_REF_ARRAY(sensor->prop_names.remote_endpoint,
- sensor->remote_ref);
-}
+ union acpi_object *obj;
+ char *vcm_type;
-static void atomisp_csi2_init_swnode_names(struct atomisp_csi2_sensor *sensor)
-{
- snprintf(sensor->node_names.remote_port,
- sizeof(sensor->node_names.remote_port),
- SWNODE_GRAPH_PORT_NAME_FMT, sensor->port);
- snprintf(sensor->node_names.port,
- sizeof(sensor->node_names.port),
- SWNODE_GRAPH_PORT_NAME_FMT, 0); /* Always port 0 */
- snprintf(sensor->node_names.endpoint,
- sizeof(sensor->node_names.endpoint),
- SWNODE_GRAPH_ENDPOINT_NAME_FMT, 0); /* And endpoint 0 */
-}
+ obj = acpi_evaluate_dsm_typed(adev->handle, &vcm_dsm_guid, 0, 0,
+ NULL, ACPI_TYPE_STRING);
+ if (!obj)
+ return NULL;
-static void atomisp_csi2_init_swnode_group(struct atomisp_csi2_sensor *sensor)
-{
- struct software_node *nodes = sensor->swnodes;
+ vcm_type = kstrdup(obj->string.pointer, GFP_KERNEL);
+ ACPI_FREE(obj);
- sensor->group[SWNODE_SENSOR] = &nodes[SWNODE_SENSOR];
- sensor->group[SWNODE_SENSOR_PORT] = &nodes[SWNODE_SENSOR_PORT];
- sensor->group[SWNODE_SENSOR_ENDPOINT] = &nodes[SWNODE_SENSOR_ENDPOINT];
- sensor->group[SWNODE_CSI2_PORT] = &nodes[SWNODE_CSI2_PORT];
- sensor->group[SWNODE_CSI2_ENDPOINT] = &nodes[SWNODE_CSI2_ENDPOINT];
-}
+ if (!vcm_type)
+ return NULL;
-static void atomisp_csi2_create_connection_swnodes(struct atomisp_csi2_bridge *bridge,
- struct atomisp_csi2_sensor *sensor)
-{
- struct software_node *nodes = sensor->swnodes;
-
- atomisp_csi2_init_swnode_names(sensor);
-
- nodes[SWNODE_SENSOR] = NODE_SENSOR(sensor->name,
- sensor->dev_properties);
- nodes[SWNODE_SENSOR_PORT] = NODE_PORT(sensor->node_names.port,
- &nodes[SWNODE_SENSOR]);
- nodes[SWNODE_SENSOR_ENDPOINT] = NODE_ENDPOINT(sensor->node_names.endpoint,
- &nodes[SWNODE_SENSOR_PORT],
- sensor->ep_properties);
- nodes[SWNODE_CSI2_PORT] = NODE_PORT(sensor->node_names.remote_port,
- &bridge->csi2_node);
- nodes[SWNODE_CSI2_ENDPOINT] = NODE_ENDPOINT(sensor->node_names.endpoint,
- &nodes[SWNODE_CSI2_PORT],
- sensor->csi2_properties);
-
- atomisp_csi2_init_swnode_group(sensor);
+ string_lower(vcm_type, vcm_type);
+ return vcm_type;
}
-static void atomisp_csi2_unregister_sensors(struct atomisp_csi2_bridge *bridge)
-{
- struct atomisp_csi2_sensor *sensor;
- unsigned int i;
-
- for (i = 0; i < bridge->n_sensors; i++) {
- sensor = &bridge->sensors[i];
- software_node_unregister_node_group(sensor->group);
- acpi_dev_remove_driver_gpios(sensor->adev);
- acpi_dev_put(sensor->adev);
- }
-}
+static const struct acpi_device_id atomisp_sensor_configs[] = {
+ ATOMISP_SENSOR_CONFIG("INT33BE", 2, true), /* OV5693 */
+ {}
+};
-static int atomisp_csi2_connect_sensor(const struct atomisp_csi2_sensor_config *cfg,
- struct atomisp_csi2_bridge *bridge,
- struct atomisp_device *isp)
+static int atomisp_csi2_parse_sensor_fwnode(struct acpi_device *adev,
+ struct ipu_sensor *sensor)
{
- struct fwnode_handle *fwnode, *primary;
- struct atomisp_csi2_sensor *sensor;
- struct acpi_device *adev;
+ const struct acpi_device_id *id;
int ret, clock_num;
+ bool vcm = false;
+ int lanes = 1;
- for_each_acpi_dev_match(adev, cfg->hid, NULL, -1) {
- if (!adev->status.enabled)
- continue;
-
- if (bridge->n_sensors >= ATOMISP_CAMERA_NR_PORTS) {
- dev_err(isp->dev, "Exceeded available CSI2 ports\n");
- ret = -EOVERFLOW;
- goto err_put_adev;
- }
-
- sensor = &bridge->sensors[bridge->n_sensors];
-
- /*
- * ACPI takes care of turning the PMC clock on and off, but on BYT
- * the clock defaults to 25 MHz instead of the expected 19.2 MHz.
- * Get the PMC-clock number from ACPI _PR0 method and set it to 19.2 MHz.
- * The PMC-clock number is also used to determine the default CSI port.
- */
- clock_num = atomisp_csi2_get_pmc_clk_nr_from_acpi_pr0(adev);
-
- ret = atomisp_csi2_set_pmc_clk_freq(adev, clock_num);
- if (ret)
- goto err_put_adev;
-
- sensor->port = atomisp_csi2_get_port(adev, clock_num);
- if (sensor->port >= ATOMISP_CAMERA_NR_PORTS) {
- acpi_handle_err(adev->handle, "Invalid port: %d\n", sensor->port);
- ret = -EINVAL;
- goto err_put_adev;
- }
-
- sensor->lanes = gmin_cfg_get_int(adev, "CsiLanes", cfg->lanes);
- if (sensor->lanes > CSI2_MAX_LANES) {
- acpi_handle_err(adev->handle, "Invalid number of lanes: %d\n", sensor->lanes);
- ret = -EINVAL;
- goto err_put_adev;
- }
-
- ret = atomisp_csi2_add_gpio_mappings(sensor, adev);
- if (ret)
- goto err_put_adev;
+ id = acpi_match_acpi_device(atomisp_sensor_configs, adev);
+ if (id) {
+ struct atomisp_sensor_config *cfg =
+ (struct atomisp_sensor_config *)id->driver_data;
- snprintf(sensor->name, sizeof(sensor->name), "%s-%u",
- cfg->hid, sensor->port);
-
- atomisp_csi2_create_fwnode_properties(sensor, bridge, cfg);
- atomisp_csi2_create_connection_swnodes(bridge, sensor);
-
- ret = software_node_register_node_group(sensor->group);
- if (ret)
- goto err_remove_mappings;
-
- fwnode = software_node_fwnode(&sensor->swnodes[SWNODE_SENSOR]);
- if (!fwnode) {
- ret = -ENODEV;
- goto err_free_swnodes;
- }
+ lanes = cfg->lanes;
+ vcm = cfg->vcm;
+ }
- sensor->adev = acpi_dev_get(adev);
+ /*
+ * ACPI takes care of turning the PMC clock on and off, but on BYT
+ * the clock defaults to 25 MHz instead of the expected 19.2 MHz.
+ * Get the PMC-clock number from ACPI PR0 method and set it to 19.2 MHz.
+ * The PMC-clock number is also used to determine the default CSI port.
+ */
+ clock_num = atomisp_csi2_get_pmc_clk_nr_from_acpi_pr0(adev);
- primary = acpi_fwnode_handle(adev);
- primary->secondary = fwnode;
+ ret = atomisp_csi2_set_pmc_clk_freq(adev, clock_num);
+ if (ret)
+ return ret;
- bridge->n_sensors++;
+ sensor->link = atomisp_csi2_get_port(adev, clock_num);
+ if (sensor->link >= ATOMISP_CAMERA_NR_PORTS) {
+ acpi_handle_err(adev->handle, "%s: Invalid port: %u\n",
+ dev_name(&adev->dev), sensor->link);
+ return -EINVAL;
}
- return 0;
-
-err_free_swnodes:
- software_node_unregister_node_group(sensor->group);
-err_remove_mappings:
- acpi_dev_remove_driver_gpios(adev);
-err_put_adev:
- acpi_dev_put(adev);
- return ret;
-}
+ sensor->lanes = gmin_cfg_get_int(adev, "CsiLanes", lanes);
+ if (sensor->lanes > IPU_MAX_LANES) {
+ acpi_handle_err(adev->handle, "%s: Invalid lane-count: %d\n",
+ dev_name(&adev->dev), sensor->lanes);
+ return -EINVAL;
+ }
-static int atomisp_csi2_connect_sensors(struct atomisp_csi2_bridge *bridge,
- struct atomisp_device *isp)
-{
- unsigned int i;
- int ret;
+ ret = atomisp_csi2_add_gpio_mappings(adev);
+ if (ret)
+ return ret;
- for (i = 0; i < ARRAY_SIZE(supported_sensors); i++) {
- const struct atomisp_csi2_sensor_config *cfg = &supported_sensors[i];
+ sensor->mclkspeed = PMC_CLK_RATE_19_2MHZ;
+ sensor->rotation = 0;
+ sensor->orientation = (sensor->link == 1) ?
+ V4L2_FWNODE_ORIENTATION_BACK : V4L2_FWNODE_ORIENTATION_FRONT;
- ret = atomisp_csi2_connect_sensor(cfg, bridge, isp);
- if (ret)
- goto err_unregister_sensors;
- }
+ if (vcm)
+ sensor->vcm_type = atomisp_csi2_get_vcm_type(adev);
return 0;
-
-err_unregister_sensors:
- atomisp_csi2_unregister_sensors(bridge);
- return ret;
}
int atomisp_csi2_bridge_init(struct atomisp_device *isp)
{
- struct atomisp_csi2_bridge *bridge;
struct device *dev = isp->dev;
struct fwnode_handle *fwnode;
- int i, ret;
/*
* This function is intended to run only once and then leave
@@ -716,58 +598,13 @@ int atomisp_csi2_bridge_init(struct atomisp_device *isp)
if (fwnode && fwnode->secondary)
return 0;
- bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
- if (!bridge)
- return -ENOMEM;
-
- strscpy(bridge->csi2_node_name, "atomisp-csi2", sizeof(bridge->csi2_node_name));
- bridge->csi2_node.name = bridge->csi2_node_name;
-
- ret = software_node_register(&bridge->csi2_node);
- if (ret < 0) {
- dev_err(dev, "Failed to register the CSI2 HID node\n");
- goto err_free_bridge;
- }
-
- /*
- * Map the lane arrangement, which is fixed for the ISP2 (meaning we
- * only need one, rather than one per sensor). We include it as a
- * member of the bridge struct rather than a global variable so
- * that it survives if the module is unloaded along with the rest of
- * the struct.
- */
- for (i = 0; i < CSI2_MAX_LANES; i++)
- bridge->data_lanes[i] = i + 1;
-
- ret = atomisp_csi2_connect_sensors(bridge, isp);
- if (ret || bridge->n_sensors == 0)
- goto err_unregister_csi2;
-
- fwnode = software_node_fwnode(&bridge->csi2_node);
- if (!fwnode) {
- dev_err(dev, "Error getting fwnode from csi2 software_node\n");
- ret = -ENODEV;
- goto err_unregister_sensors;
- }
-
- set_secondary_fwnode(dev, fwnode);
-
- return 0;
-
-err_unregister_sensors:
- atomisp_csi2_unregister_sensors(bridge);
-err_unregister_csi2:
- software_node_unregister(&bridge->csi2_node);
-err_free_bridge:
- kfree(bridge);
-
- return ret;
+ return ipu_bridge_init(dev, atomisp_csi2_parse_sensor_fwnode);
}
/******* V4L2 sub-device asynchronous registration callbacks***********/
struct sensor_async_subdev {
- struct v4l2_async_subdev asd;
+ struct v4l2_async_connection asd;
int port;
};
@@ -777,10 +614,11 @@ struct sensor_async_subdev {
/* .bound() notifier callback when a match is found */
static int atomisp_notifier_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
struct atomisp_device *isp = notifier_to_atomisp(notifier);
struct sensor_async_subdev *s_asd = to_sensor_asd(asd);
+ int ret;
if (s_asd->port >= ATOMISP_CAMERA_NR_PORTS) {
dev_err(isp->dev, "port %d not supported\n", s_asd->port);
@@ -792,6 +630,10 @@ static int atomisp_notifier_bound(struct v4l2_async_notifier *notifier,
return -EBUSY;
}
+ ret = ipu_bridge_instantiate_vcm(sd->dev);
+ if (ret)
+ return ret;
+
isp->sensor_subdevs[s_asd->port] = sd;
return 0;
}
@@ -799,7 +641,7 @@ static int atomisp_notifier_bound(struct v4l2_async_notifier *notifier,
/* The .unbind callback */
static void atomisp_notifier_unbind(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
struct atomisp_device *isp = notifier_to_atomisp(notifier);
struct sensor_async_subdev *s_asd = to_sensor_asd(asd);
@@ -825,7 +667,7 @@ int atomisp_csi2_bridge_parse_firmware(struct atomisp_device *isp)
{
int i, mipi_port, ret;
- v4l2_async_nf_init(&isp->notifier);
+ v4l2_async_nf_init(&isp->notifier, &isp->v4l2_dev);
isp->notifier.ops = &atomisp_async_ops;
for (i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++) {
diff --git a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
index c43b916a006e..0d0329f5e4ad 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
@@ -1508,7 +1508,7 @@ static int atomisp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
isp->firmware = NULL;
isp->css_env.isp_css_fw.data = NULL;
- err = v4l2_async_nf_register(&isp->v4l2_dev, &isp->notifier);
+ err = v4l2_async_nf_register(&isp->notifier);
if (err) {
dev_err(isp->dev, "failed to register async notifier : %d\n", err);
goto css_init_fail;
@@ -1615,3 +1615,4 @@ MODULE_AUTHOR("Wen Wang <wen.w.wang@intel.com>");
MODULE_AUTHOR("Xiaolin Zhang <xiaolin.zhang@intel.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Intel ATOM Platform ISP Driver");
+MODULE_IMPORT_NS(INTEL_IPU_BRIDGE);
diff --git a/drivers/staging/media/atomisp/pci/sh_css_mipi.c b/drivers/staging/media/atomisp/pci/sh_css_mipi.c
index b20acaab0595..ced21dedf7ac 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_mipi.c
+++ b/drivers/staging/media/atomisp/pci/sh_css_mipi.c
@@ -351,15 +351,6 @@ allocate_mipi_frames(struct ia_css_pipe *pipe,
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"allocate_mipi_frames(%p) enter:\n", pipe);
- assert(pipe);
- assert(pipe->stream);
- if ((!pipe) || (!pipe->stream)) {
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
- "allocate_mipi_frames(%p) exit: pipe or stream is null.\n",
- pipe);
- return -EINVAL;
- }
-
if (IS_ISP2401 && pipe->stream->config.online) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"allocate_mipi_frames(%p) exit: no buffers needed for 2401 pipe mode.\n",
@@ -557,13 +548,6 @@ send_mipi_frames(struct ia_css_pipe *pipe)
IA_CSS_ENTER_PRIVATE("pipe=%p", pipe);
- assert(pipe);
- assert(pipe->stream);
- if (!pipe || !pipe->stream) {
- IA_CSS_ERROR("pipe or stream is null");
- return -EINVAL;
- }
-
/* multi stream video needs mipi buffers */
/* nothing to be done in other cases. */
if (pipe->stream->config.mode != IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
diff --git a/drivers/staging/media/atomisp/pci/sh_css_param_dvs.c b/drivers/staging/media/atomisp/pci/sh_css_param_dvs.c
index ff0082d02af3..5174bc210ae1 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_param_dvs.c
+++ b/drivers/staging/media/atomisp/pci/sh_css_param_dvs.c
@@ -202,9 +202,6 @@ generate_dvs_6axis_table_from_config(struct ia_css_dvs_6axis_config
void
free_dvs_6axis_table(struct ia_css_dvs_6axis_config **dvs_6axis_config)
{
- assert(dvs_6axis_config);
- assert(*dvs_6axis_config);
-
if ((dvs_6axis_config) && (*dvs_6axis_config)) {
IA_CSS_ENTER_PRIVATE("dvs_6axis_config %p", (*dvs_6axis_config));
if ((*dvs_6axis_config)->xcoords_y) {
diff --git a/drivers/staging/media/atomisp/pci/sh_css_sp.c b/drivers/staging/media/atomisp/pci/sh_css_sp.c
index 297e1b981720..f35c745c22c0 100644
--- a/drivers/staging/media/atomisp/pci/sh_css_sp.c
+++ b/drivers/staging/media/atomisp/pci/sh_css_sp.c
@@ -51,6 +51,7 @@
#include "ia_css_event.h"
#include "mmu_device.h"
#include "ia_css_spctrl.h"
+#include "atomisp_internal.h"
#ifndef offsetof
#define offsetof(T, x) ((unsigned int)&(((T *)0)->x))
@@ -1212,14 +1213,15 @@ sh_css_sp_init_pipeline(struct ia_css_pipeline *me,
struct ia_css_binary *first_binary = NULL;
struct ia_css_pipe *pipe = NULL;
unsigned int num;
-
enum ia_css_pipe_id pipe_id = id;
unsigned int thread_id;
u8 if_config_index, tmp_if_config_index;
- assert(me);
-
- assert(me->stages);
+ if (!me->stages) {
+ dev_err(atomisp_dev, "%s called on a pipeline without stages\n",
+ __func__);
+ return; /* FIXME should be able to return an error */
+ }
first_binary = me->stages->binary;
@@ -1252,8 +1254,8 @@ sh_css_sp_init_pipeline(struct ia_css_pipeline *me,
} /* if (first_binary != NULL) */
/* Signal the host immediately after start for SP_ISYS_COPY only */
- if ((me->num_stages == 1) && me->stages &&
- (me->stages->sp_func == IA_CSS_PIPELINE_ISYS_COPY))
+ if (me->num_stages == 1 &&
+ me->stages->sp_func == IA_CSS_PIPELINE_ISYS_COPY)
sh_css_sp_group.config.no_isp_sync = true;
/* Init stage data */
diff --git a/drivers/staging/media/deprecated/atmel/atmel-isc-base.c b/drivers/staging/media/deprecated/atmel/atmel-isc-base.c
index 61c5afa58142..f5d963904201 100644
--- a/drivers/staging/media/deprecated/atmel/atmel-isc-base.c
+++ b/drivers/staging/media/deprecated/atmel/atmel-isc-base.c
@@ -1727,7 +1727,7 @@ static int isc_ctrl_init(struct isc_device *isc)
static int isc_async_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
struct isc_device *isc = container_of(notifier->v4l2_dev,
struct isc_device, v4l2_dev);
@@ -1746,7 +1746,7 @@ static int isc_async_bound(struct v4l2_async_notifier *notifier,
static void isc_async_unbind(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
struct isc_device *isc = container_of(notifier->v4l2_dev,
struct isc_device, v4l2_dev);
diff --git a/drivers/staging/media/deprecated/atmel/atmel-isc.h b/drivers/staging/media/deprecated/atmel/atmel-isc.h
index dfc030b5a08f..31767ea74be6 100644
--- a/drivers/staging/media/deprecated/atmel/atmel-isc.h
+++ b/drivers/staging/media/deprecated/atmel/atmel-isc.h
@@ -44,7 +44,7 @@ struct isc_buffer {
struct isc_subdev_entity {
struct v4l2_subdev *sd;
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asd;
struct device_node *epn;
struct v4l2_async_notifier notifier;
diff --git a/drivers/staging/media/deprecated/atmel/atmel-sama5d2-isc.c b/drivers/staging/media/deprecated/atmel/atmel-sama5d2-isc.c
index cc86ebcc76af..31b2b48085c5 100644
--- a/drivers/staging/media/deprecated/atmel/atmel-sama5d2-isc.c
+++ b/drivers/staging/media/deprecated/atmel/atmel-sama5d2-isc.c
@@ -503,15 +503,15 @@ static int atmel_isc_probe(struct platform_device *pdev)
}
list_for_each_entry(subdev_entity, &isc->subdev_entities, list) {
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asd;
struct fwnode_handle *fwnode =
of_fwnode_handle(subdev_entity->epn);
- v4l2_async_nf_init(&subdev_entity->notifier);
+ v4l2_async_nf_init(&subdev_entity->notifier, &isc->v4l2_dev);
asd = v4l2_async_nf_add_fwnode_remote(&subdev_entity->notifier,
fwnode,
- struct v4l2_async_subdev);
+ struct v4l2_async_connection);
of_node_put(subdev_entity->epn);
subdev_entity->epn = NULL;
@@ -523,8 +523,7 @@ static int atmel_isc_probe(struct platform_device *pdev)
subdev_entity->notifier.ops = &atmel_isc_async_ops;
- ret = v4l2_async_nf_register(&isc->v4l2_dev,
- &subdev_entity->notifier);
+ ret = v4l2_async_nf_register(&subdev_entity->notifier);
if (ret) {
dev_err(dev, "fail to register async notifier\n");
goto cleanup_subdev;
diff --git a/drivers/staging/media/deprecated/atmel/atmel-sama7g5-isc.c b/drivers/staging/media/deprecated/atmel/atmel-sama7g5-isc.c
index 68ef3374d25e..020034f631f5 100644
--- a/drivers/staging/media/deprecated/atmel/atmel-sama7g5-isc.c
+++ b/drivers/staging/media/deprecated/atmel/atmel-sama7g5-isc.c
@@ -493,15 +493,15 @@ static int microchip_xisc_probe(struct platform_device *pdev)
}
list_for_each_entry(subdev_entity, &isc->subdev_entities, list) {
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asd;
struct fwnode_handle *fwnode =
of_fwnode_handle(subdev_entity->epn);
- v4l2_async_nf_init(&subdev_entity->notifier);
+ v4l2_async_nf_init(&subdev_entity->notifier, &isc->v4l2_dev);
asd = v4l2_async_nf_add_fwnode_remote(&subdev_entity->notifier,
fwnode,
- struct v4l2_async_subdev);
+ struct v4l2_async_connection);
of_node_put(subdev_entity->epn);
subdev_entity->epn = NULL;
@@ -513,8 +513,7 @@ static int microchip_xisc_probe(struct platform_device *pdev)
subdev_entity->notifier.ops = &atmel_isc_async_ops;
- ret = v4l2_async_nf_register(&isc->v4l2_dev,
- &subdev_entity->notifier);
+ ret = v4l2_async_nf_register(&subdev_entity->notifier);
if (ret) {
dev_err(dev, "fail to register async notifier\n");
goto cleanup_subdev;
diff --git a/drivers/staging/media/imx/Kconfig b/drivers/staging/media/imx/Kconfig
index 21fd79515042..426310e1ea5b 100644
--- a/drivers/staging/media/imx/Kconfig
+++ b/drivers/staging/media/imx/Kconfig
@@ -1,10 +1,11 @@
# SPDX-License-Identifier: GPL-2.0
config VIDEO_IMX_MEDIA
- tristate "i.MX5/6 V4L2 media core driver"
+ tristate "i.MX5/6 V4L2 media drivers"
depends on ARCH_MXC || COMPILE_TEST
depends on HAS_DMA
depends on VIDEO_DEV
depends on VIDEO_DEV
+ depends on IMX_IPUV3_CORE
select MEDIA_CONTROLLER
select V4L2_FWNODE
select V4L2_MEM2MEM_DEV
@@ -12,26 +13,4 @@ config VIDEO_IMX_MEDIA
select VIDEO_V4L2_SUBDEV_API
help
Say yes here to enable support for video4linux media controller
- driver for the i.MX5/6 SOC.
-
-if VIDEO_IMX_MEDIA
-menu "i.MX5/6/7/8 Media Sub devices"
-
-config VIDEO_IMX_CSI
- tristate "i.MX5/6 Camera Sensor Interface driver"
- depends on IMX_IPUV3_CORE
- default y
- help
- A video4linux camera sensor interface driver for i.MX5/6.
-endmenu
-endif
-
-config VIDEO_IMX8MQ_MIPI_CSI2
- tristate "NXP i.MX8MQ MIPI CSI-2 receiver"
- depends on ARCH_MXC || COMPILE_TEST
- depends on VIDEO_DEV
- select MEDIA_CONTROLLER
- select V4L2_FWNODE
- select VIDEO_V4L2_SUBDEV_API
- help
- V4L2 driver for the MIPI CSI-2 receiver found in the i.MX8MQ SoC.
+ drivers for the i.MX5/6 SOC.
diff --git a/drivers/staging/media/imx/Makefile b/drivers/staging/media/imx/Makefile
index 906a422aa656..330e0825f506 100644
--- a/drivers/staging/media/imx/Makefile
+++ b/drivers/staging/media/imx/Makefile
@@ -9,9 +9,6 @@ imx6-media-objs := imx-media-dev.o imx-media-internal-sd.o \
imx6-media-csi-objs := imx-media-csi.o imx-media-fim.o
obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx-media-common.o
-
-obj-$(CONFIG_VIDEO_IMX_CSI) += imx6-media.o
-obj-$(CONFIG_VIDEO_IMX_CSI) += imx6-media-csi.o
-obj-$(CONFIG_VIDEO_IMX_CSI) += imx6-mipi-csi2.o
-
-obj-$(CONFIG_VIDEO_IMX8MQ_MIPI_CSI2) += imx8mq-mipi-csi2.o
+obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx6-media.o
+obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx6-media-csi.o
+obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx6-mipi-csi2.o
diff --git a/drivers/staging/media/imx/imx-media-capture.c b/drivers/staging/media/imx/imx-media-capture.c
index 4364df27c6d2..4846078315ff 100644
--- a/drivers/staging/media/imx/imx-media-capture.c
+++ b/drivers/staging/media/imx/imx-media-capture.c
@@ -7,7 +7,6 @@
#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
index 097171bb930d..dda1ebc34692 100644
--- a/drivers/staging/media/imx/imx-media-csi.c
+++ b/drivers/staging/media/imx/imx-media-csi.c
@@ -1892,7 +1892,7 @@ static const struct v4l2_subdev_internal_ops csi_internal_ops = {
static int imx_csi_notify_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
struct csi_priv *priv = notifier_to_dev(notifier);
struct media_pad *sink = &priv->sd.entity.pads[CSI_SINK_PAD];
@@ -1913,12 +1913,12 @@ static const struct v4l2_async_notifier_operations csi_notify_ops = {
static int imx_csi_async_register(struct csi_priv *priv)
{
- struct v4l2_async_subdev *asd = NULL;
+ struct v4l2_async_connection *asd = NULL;
struct fwnode_handle *ep;
unsigned int port;
int ret;
- v4l2_async_nf_init(&priv->notifier);
+ v4l2_async_subdev_nf_init(&priv->notifier, &priv->sd);
/* get this CSI's port id */
ret = fwnode_property_read_u32(dev_fwnode(priv->dev), "reg", &port);
@@ -1930,7 +1930,7 @@ static int imx_csi_async_register(struct csi_priv *priv)
FWNODE_GRAPH_ENDPOINT_NEXT);
if (ep) {
asd = v4l2_async_nf_add_fwnode_remote(&priv->notifier, ep,
- struct v4l2_async_subdev);
+ struct v4l2_async_connection);
fwnode_handle_put(ep);
@@ -1944,7 +1944,7 @@ static int imx_csi_async_register(struct csi_priv *priv)
priv->notifier.ops = &csi_notify_ops;
- ret = v4l2_async_subdev_nf_register(&priv->sd, &priv->notifier);
+ ret = v4l2_async_nf_register(&priv->notifier);
if (ret)
return ret;
diff --git a/drivers/staging/media/imx/imx-media-dev-common.c b/drivers/staging/media/imx/imx-media-dev-common.c
index 991820a8500f..0d0ee8627a2d 100644
--- a/drivers/staging/media/imx/imx-media-dev-common.c
+++ b/drivers/staging/media/imx/imx-media-dev-common.c
@@ -6,8 +6,6 @@
* Copyright (c) 2016 Mentor Graphics Inc.
*/
-#include <linux/of_graph.h>
-#include <linux/of_platform.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-event.h>
#include <media/v4l2-ioctl.h>
@@ -367,7 +365,7 @@ struct imx_media_dev *imx_media_dev_init(struct device *dev,
INIT_LIST_HEAD(&imxmd->vdev_list);
- v4l2_async_nf_init(&imxmd->notifier);
+ v4l2_async_nf_init(&imxmd->notifier, &imxmd->v4l2_dev);
return imxmd;
@@ -384,14 +382,14 @@ int imx_media_dev_notifier_register(struct imx_media_dev *imxmd,
int ret;
/* no subdevs? just bail */
- if (list_empty(&imxmd->notifier.asd_list)) {
+ if (list_empty(&imxmd->notifier.waiting_list)) {
v4l2_err(&imxmd->v4l2_dev, "no subdevs\n");
return -ENODEV;
}
/* prepare the async subdev notifier and register it */
imxmd->notifier.ops = ops ? ops : &imx_media_notifier_ops;
- ret = v4l2_async_nf_register(&imxmd->v4l2_dev, &imxmd->notifier);
+ ret = v4l2_async_nf_register(&imxmd->notifier);
if (ret) {
v4l2_err(&imxmd->v4l2_dev,
"v4l2_async_nf_register failed with %d\n", ret);
diff --git a/drivers/staging/media/imx/imx-media-dev.c b/drivers/staging/media/imx/imx-media-dev.c
index c80113905069..be54dca11465 100644
--- a/drivers/staging/media/imx/imx-media-dev.c
+++ b/drivers/staging/media/imx/imx-media-dev.c
@@ -20,7 +20,7 @@ static inline struct imx_media_dev *notifier2dev(struct v4l2_async_notifier *n)
/* async subdev bound notifier */
static int imx_media_subdev_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
struct imx_media_dev *imxmd = notifier2dev(notifier);
int ret;
diff --git a/drivers/staging/media/imx/imx-media-of.c b/drivers/staging/media/imx/imx-media-of.c
index 92a99010c150..118bff988bc7 100644
--- a/drivers/staging/media/imx/imx-media-of.c
+++ b/drivers/staging/media/imx/imx-media-of.c
@@ -19,7 +19,7 @@
static int imx_media_of_add_csi(struct imx_media_dev *imxmd,
struct device_node *csi_np)
{
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asd;
int ret = 0;
if (!of_device_is_available(csi_np)) {
@@ -31,7 +31,7 @@ static int imx_media_of_add_csi(struct imx_media_dev *imxmd,
/* add CSI fwnode to async notifier */
asd = v4l2_async_nf_add_fwnode(&imxmd->notifier,
of_fwnode_handle(csi_np),
- struct v4l2_async_subdev);
+ struct v4l2_async_connection);
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
if (ret == -EEXIST)
diff --git a/drivers/staging/media/imx/imx6-mipi-csi2.c b/drivers/staging/media/imx/imx6-mipi-csi2.c
index ab565b4e29ec..b2d8476d83a0 100644
--- a/drivers/staging/media/imx/imx6-mipi-csi2.c
+++ b/drivers/staging/media/imx/imx6-mipi-csi2.c
@@ -636,7 +636,7 @@ static const struct v4l2_subdev_internal_ops csi2_internal_ops = {
static int csi2_notify_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
struct csi2_dev *csi2 = notifier_to_dev(notifier);
struct media_pad *sink = &csi2->sd.entity.pads[CSI2_SINK_PAD];
@@ -659,7 +659,7 @@ static int csi2_notify_bound(struct v4l2_async_notifier *notifier,
static void csi2_notify_unbind(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
struct csi2_dev *csi2 = notifier_to_dev(notifier);
@@ -676,11 +676,11 @@ static int csi2_async_register(struct csi2_dev *csi2)
struct v4l2_fwnode_endpoint vep = {
.bus_type = V4L2_MBUS_CSI2_DPHY,
};
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asd;
struct fwnode_handle *ep;
int ret;
- v4l2_async_nf_init(&csi2->notifier);
+ v4l2_async_subdev_nf_init(&csi2->notifier, &csi2->sd);
ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(csi2->dev), 0, 0,
FWNODE_GRAPH_ENDPOINT_NEXT);
@@ -697,7 +697,7 @@ static int csi2_async_register(struct csi2_dev *csi2)
dev_dbg(csi2->dev, "flags: 0x%08x\n", vep.bus.mipi_csi2.flags);
asd = v4l2_async_nf_add_fwnode_remote(&csi2->notifier, ep,
- struct v4l2_async_subdev);
+ struct v4l2_async_connection);
fwnode_handle_put(ep);
if (IS_ERR(asd))
@@ -705,7 +705,7 @@ static int csi2_async_register(struct csi2_dev *csi2)
csi2->notifier.ops = &csi2_notify_ops;
- ret = v4l2_async_subdev_nf_register(&csi2->sd, &csi2->notifier);
+ ret = v4l2_async_nf_register(&csi2->notifier);
if (ret)
return ret;
diff --git a/drivers/staging/media/ipu3/ipu3-css.c b/drivers/staging/media/ipu3/ipu3-css.c
index 8c70497d744c..9c10f1474c35 100644
--- a/drivers/staging/media/ipu3/ipu3-css.c
+++ b/drivers/staging/media/ipu3/ipu3-css.c
@@ -1193,14 +1193,14 @@ static int imgu_css_binary_preallocate(struct imgu_css *css, unsigned int pipe)
for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)
if (!imgu_dmamap_alloc(imgu,
- &css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].
- mem[i], CSS_BDS_SIZE))
+ &css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].mem[i],
+ CSS_BDS_SIZE))
goto out_of_memory;
for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)
if (!imgu_dmamap_alloc(imgu,
- &css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].
- mem[i], CSS_GDC_SIZE))
+ &css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].mem[i],
+ CSS_GDC_SIZE))
goto out_of_memory;
return 0;
@@ -1428,13 +1428,11 @@ static int imgu_css_map_init(struct imgu_css *css, unsigned int pipe)
for (p = 0; p < IPU3_CSS_PIPE_ID_NUM; p++)
for (i = 0; i < IMGU_ABI_MAX_STAGES; i++) {
if (!imgu_dmamap_alloc(imgu,
- &css_pipe->
- xmem_sp_stage_ptrs[p][i],
+ &css_pipe->xmem_sp_stage_ptrs[p][i],
sizeof(struct imgu_abi_sp_stage)))
return -ENOMEM;
if (!imgu_dmamap_alloc(imgu,
- &css_pipe->
- xmem_isp_stage_ptrs[p][i],
+ &css_pipe->xmem_isp_stage_ptrs[p][i],
sizeof(struct imgu_abi_isp_stage)))
return -ENOMEM;
}
diff --git a/drivers/staging/media/meson/vdec/esparser.c b/drivers/staging/media/meson/vdec/esparser.c
index 7b15fc54efe4..4632346f04a9 100644
--- a/drivers/staging/media/meson/vdec/esparser.c
+++ b/drivers/staging/media/meson/vdec/esparser.c
@@ -11,7 +11,6 @@
#include <linux/ioctl.h>
#include <linux/list.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/reset.h>
#include <linux/interrupt.h>
#include <media/videobuf2-dma-contig.h>
diff --git a/drivers/staging/media/meson/vdec/vdec.c b/drivers/staging/media/meson/vdec/vdec.c
index 5ca4b1200831..219185aaa588 100644
--- a/drivers/staging/media/meson/vdec/vdec.c
+++ b/drivers/staging/media/meson/vdec/vdec.c
@@ -4,7 +4,7 @@
* Author: Maxime Jourdan <mjourdan@baylibre.com>
*/
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
diff --git a/drivers/staging/media/meson/vdec/vdec_platform.c b/drivers/staging/media/meson/vdec/vdec_platform.c
index 88c9d72e1c83..70c9fd7c8bc5 100644
--- a/drivers/staging/media/meson/vdec/vdec_platform.c
+++ b/drivers/staging/media/meson/vdec/vdec_platform.c
@@ -280,3 +280,12 @@ const struct vdec_platform vdec_platform_sm1 = {
.num_formats = ARRAY_SIZE(vdec_formats_sm1),
.revision = VDEC_REVISION_SM1,
};
+
+MODULE_FIRMWARE("meson/vdec/g12a_h264.bin");
+MODULE_FIRMWARE("meson/vdec/g12a_vp9.bin");
+MODULE_FIRMWARE("meson/vdec/gxbb_h264.bin");
+MODULE_FIRMWARE("meson/vdec/gxl_h264.bin");
+MODULE_FIRMWARE("meson/vdec/gxl_mpeg12.bin");
+MODULE_FIRMWARE("meson/vdec/gxl_vp9.bin");
+MODULE_FIRMWARE("meson/vdec/gxm_h264.bin");
+MODULE_FIRMWARE("meson/vdec/sm1_vp9_mmu.bin");
diff --git a/drivers/staging/media/rkvdec/rkvdec-vp9.c b/drivers/staging/media/rkvdec/rkvdec-vp9.c
index cfae99b40ccb..0e7e16f20eeb 100644
--- a/drivers/staging/media/rkvdec/rkvdec-vp9.c
+++ b/drivers/staging/media/rkvdec/rkvdec-vp9.c
@@ -227,7 +227,6 @@ static void init_intra_only_probs(struct rkvdec_ctx *ctx,
}
}
}
-
}
for (i = 0; i < sizeof(v4l2_vp9_kf_uv_mode_prob); ++i) {
diff --git a/drivers/staging/media/rkvdec/rkvdec.c b/drivers/staging/media/rkvdec/rkvdec.c
index 134e2b9fa7d9..84a41792cb4b 100644
--- a/drivers/staging/media/rkvdec/rkvdec.c
+++ b/drivers/staging/media/rkvdec/rkvdec.c
@@ -120,7 +120,7 @@ static const struct rkvdec_coded_fmt_desc rkvdec_coded_fmts[] = {
.max_width = 4096,
.step_width = 16,
.min_height = 48,
- .max_height = 2304,
+ .max_height = 2560,
.step_height = 16,
},
.ctrls = &rkvdec_h264_ctrls,
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
index fa86a658fdc6..b696bf884cbd 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
@@ -14,8 +14,8 @@
*/
#include <linux/platform_device.h>
+#include <linux/of.h>
#include <linux/of_reserved_mem.h>
-#include <linux/of_device.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
diff --git a/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp.c b/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp.c
index 0dc75adbd9d8..5c0a45394cba 100644
--- a/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp.c
+++ b/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp.c
@@ -10,7 +10,6 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
@@ -541,7 +540,7 @@ static struct platform_driver sun6i_isp_platform_driver = {
.remove_new = sun6i_isp_remove,
.driver = {
.name = SUN6I_ISP_NAME,
- .of_match_table = of_match_ptr(sun6i_isp_of_match),
+ .of_match_table = sun6i_isp_of_match,
.pm = &sun6i_isp_pm_ops,
},
};
diff --git a/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_proc.c b/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_proc.c
index 1ca4673df2b3..ccbb530aa2e2 100644
--- a/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_proc.c
+++ b/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_proc.c
@@ -395,7 +395,7 @@ static int sun6i_isp_proc_link(struct sun6i_isp_device *isp_dev,
static int sun6i_isp_proc_notifier_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *remote_subdev,
- struct v4l2_async_subdev *async_subdev)
+ struct v4l2_async_connection *async_subdev)
{
struct sun6i_isp_device *isp_dev =
container_of(notifier, struct sun6i_isp_device, proc.notifier);
@@ -536,7 +536,7 @@ int sun6i_isp_proc_setup(struct sun6i_isp_device *isp_dev)
/* V4L2 Async */
- v4l2_async_nf_init(notifier);
+ v4l2_async_nf_init(notifier, v4l2_dev);
notifier->ops = &sun6i_isp_proc_notifier_ops;
sun6i_isp_proc_source_setup(isp_dev, &proc->source_csi0,
@@ -544,7 +544,7 @@ int sun6i_isp_proc_setup(struct sun6i_isp_device *isp_dev)
sun6i_isp_proc_source_setup(isp_dev, &proc->source_csi1,
SUN6I_ISP_PORT_CSI1);
- ret = v4l2_async_nf_register(v4l2_dev, notifier);
+ ret = v4l2_async_nf_register(notifier);
if (ret) {
v4l2_err(v4l2_dev,
"failed to register v4l2 async notifier: %d\n", ret);
diff --git a/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_proc.h b/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_proc.h
index c5c274e21ad5..db6738a39147 100644
--- a/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_proc.h
+++ b/drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_proc.h
@@ -34,7 +34,7 @@ struct sun6i_isp_proc_source {
};
struct sun6i_isp_proc_async_subdev {
- struct v4l2_async_subdev async_subdev;
+ struct v4l2_async_connection async_subdev;
struct sun6i_isp_proc_source *source;
};
diff --git a/drivers/staging/media/tegra-video/csi.c b/drivers/staging/media/tegra-video/csi.c
index 052172017b3b..e79657920dc8 100644
--- a/drivers/staging/media/tegra-video/csi.c
+++ b/drivers/staging/media/tegra-video/csi.c
@@ -10,7 +10,6 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/staging/media/tegra-video/vi.c b/drivers/staging/media/tegra-video/vi.c
index 79284c3b6cae..e98b3010520e 100644
--- a/drivers/staging/media/tegra-video/vi.c
+++ b/drivers/staging/media/tegra-video/vi.c
@@ -11,8 +11,8 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_graph.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
@@ -40,7 +40,7 @@
* @subdev: V4L2 subdev
*/
struct tegra_vi_graph_entity {
- struct v4l2_async_subdev asd;
+ struct v4l2_async_connection asd;
struct media_entity *entity;
struct v4l2_subdev *subdev;
};
@@ -58,7 +58,7 @@ to_tegra_channel_buffer(struct vb2_v4l2_buffer *vb)
}
static inline struct tegra_vi_graph_entity *
-to_tegra_vi_graph_entity(struct v4l2_async_subdev *asd)
+to_tegra_vi_graph_entity(struct v4l2_async_connection *asd)
{
return container_of(asd, struct tegra_vi_graph_entity, asd);
}
@@ -1181,7 +1181,7 @@ static int tegra_channel_init(struct tegra_vi_channel *chan)
}
if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
- v4l2_async_nf_init(&chan->notifier);
+ v4l2_async_nf_init(&chan->notifier, &vid->v4l2_dev);
return 0;
@@ -1462,9 +1462,9 @@ tegra_vi_graph_find_entity(struct tegra_vi_channel *chan,
const struct fwnode_handle *fwnode)
{
struct tegra_vi_graph_entity *entity;
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asd;
- list_for_each_entry(asd, &chan->notifier.asd_list, asd_list) {
+ list_for_each_entry(asd, &chan->notifier.done_list, asc_entry) {
entity = to_tegra_vi_graph_entity(asd);
if (entity->asd.match.fwnode == fwnode)
return entity;
@@ -1578,7 +1578,7 @@ create_link:
static int tegra_vi_graph_notify_complete(struct v4l2_async_notifier *notifier)
{
struct tegra_vi_graph_entity *entity;
- struct v4l2_async_subdev *asd;
+ struct v4l2_async_connection *asd;
struct v4l2_subdev *subdev;
struct tegra_vi_channel *chan;
struct tegra_vi *vi;
@@ -1608,7 +1608,7 @@ static int tegra_vi_graph_notify_complete(struct v4l2_async_notifier *notifier)
}
/* create links between the entities */
- list_for_each_entry(asd, &chan->notifier.asd_list, asd_list) {
+ list_for_each_entry(asd, &chan->notifier.done_list, asc_entry) {
entity = to_tegra_vi_graph_entity(asd);
ret = tegra_vi_graph_build(chan, entity);
if (ret < 0)
@@ -1651,7 +1651,7 @@ unregister_video:
static int tegra_vi_graph_notify_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+ struct v4l2_async_connection *asd)
{
struct tegra_vi_graph_entity *entity;
struct tegra_vi *vi;
@@ -1748,7 +1748,6 @@ cleanup:
static int tegra_vi_graph_init(struct tegra_vi *vi)
{
- struct tegra_video_device *vid = dev_get_drvdata(vi->client.host);
struct tegra_vi_channel *chan;
struct fwnode_handle *fwnode = dev_fwnode(vi->dev);
int ret;
@@ -1775,11 +1774,11 @@ static int tegra_vi_graph_init(struct tegra_vi *vi)
ret = tegra_vi_graph_parse_one(chan, remote);
fwnode_handle_put(remote);
- if (ret < 0 || list_empty(&chan->notifier.asd_list))
+ if (ret < 0 || list_empty(&chan->notifier.waiting_list))
continue;
chan->notifier.ops = &tegra_vi_async_ops;
- ret = v4l2_async_nf_register(&vid->v4l2_dev, &chan->notifier);
+ ret = v4l2_async_nf_register(&chan->notifier);
if (ret < 0) {
dev_err(vi->dev,
"failed to register channel %d notifier: %d\n",
diff --git a/drivers/staging/media/tegra-video/vip.c b/drivers/staging/media/tegra-video/vip.c
index a1ab886acc18..191ecd19a6a7 100644
--- a/drivers/staging/media/tegra-video/vip.c
+++ b/drivers/staging/media/tegra-video/vip.c
@@ -13,13 +13,13 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <media/v4l2-fwnode.h>
#include "vip.h"
+#include "video.h"
static inline struct tegra_vip *host1x_client_to_vip(struct host1x_client *client)
{
diff --git a/drivers/staging/most/dim2/dim2.c b/drivers/staging/most/dim2/dim2.c
index 44d3252d4612..ed6a9cc88541 100644
--- a/drivers/staging/most/dim2/dim2.c
+++ b/drivers/staging/most/dim2/dim2.c
@@ -8,7 +8,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
-#include <linux/of_platform.h>
#include <linux/printk.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -21,6 +20,7 @@
#include <linux/sched.h>
#include <linux/kthread.h>
#include <linux/most.h>
+#include <linux/of.h>
#include "hal.h"
#include "errors.h"
#include "sysfs.h"
diff --git a/drivers/staging/pi433/pi433_if.c b/drivers/staging/pi433/pi433_if.c
index 220e157d4a5e..58887619b83f 100644
--- a/drivers/staging/pi433/pi433_if.c
+++ b/drivers/staging/pi433/pi433_if.c
@@ -31,7 +31,6 @@
#include <linux/errno.h>
#include <linux/mutex.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/gpio/consumer.h>
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index e5925899402c..58e90b7772ef 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
@@ -59,10 +59,6 @@ static void _rtl92e_update_msr(struct net_device *dev)
if (priv->rtllib->link_state == MAC80211_LINKED)
msr |= MSR_LINK_ADHOC;
break;
- case IW_MODE_MASTER:
- if (priv->rtllib->link_state == MAC80211_LINKED)
- msr |= MSR_LINK_MASTER;
- break;
default:
break;
}
@@ -419,10 +415,7 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
rtl92e_init_adaptive_rate(dev);
- if (priv->reg_chnl_plan == 0xf)
- priv->chnl_plan = priv->eeprom_chnl_plan;
- else
- priv->chnl_plan = priv->reg_chnl_plan;
+ priv->chnl_plan = priv->eeprom_chnl_plan;
switch (priv->eeprom_customer_id) {
case EEPROM_CID_NetCore:
@@ -1915,7 +1908,7 @@ void rtl92e_enable_tx(struct net_device *dev)
rtl92e_writel(dev, TX_DESC_BASE[i], priv->tx_ring[i].dma);
}
-void rtl92e_ack_irq(struct net_device *dev, u32 *p_inta, u32 *p_intb)
+void rtl92e_ack_irq(struct net_device *dev, u32 *p_inta)
{
*p_inta = rtl92e_readl(dev, ISR);
rtl92e_writel(dev, ISR, *p_inta);
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h
index fa3b71dbb091..11366fda4ec3 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h
@@ -13,7 +13,7 @@ bool rtl92e_is_halfn_supported_by_ap(struct net_device *dev);
bool rtl92e_get_nmode_support_by_sec(struct net_device *dev);
bool rtl92e_is_tx_stuck(struct net_device *dev);
bool rtl92e_is_rx_stuck(struct net_device *dev);
-void rtl92e_ack_irq(struct net_device *dev, u32 *p_inta, u32 *p_intb);
+void rtl92e_ack_irq(struct net_device *dev, u32 *p_inta);
void rtl92e_enable_rx(struct net_device *dev);
void rtl92e_enable_tx(struct net_device *dev);
void rtl92e_enable_irq(struct net_device *dev);
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
index 6d9e5c27017d..944cc73fb2b6 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
@@ -49,8 +49,7 @@ void rtl92e_enable_hw_security_config(struct net_device *dev)
}
void rtl92e_set_swcam(struct net_device *dev, u8 EntryNo, u8 KeyIndex,
- u16 KeyType, const u8 *MacAddr, u8 DefaultKey,
- u32 *KeyContent, u8 is_mesh)
+ u16 KeyType, const u8 *MacAddr, u32 *KeyContent)
{
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
@@ -58,14 +57,12 @@ void rtl92e_set_swcam(struct net_device *dev, u8 EntryNo, u8 KeyIndex,
if (EntryNo >= TOTAL_CAM_ENTRY)
return;
- if (!is_mesh) {
- ieee->swcamtable[EntryNo].bused = true;
- ieee->swcamtable[EntryNo].key_index = KeyIndex;
- ieee->swcamtable[EntryNo].key_type = KeyType;
- memcpy(ieee->swcamtable[EntryNo].macaddr, MacAddr, 6);
- ieee->swcamtable[EntryNo].useDK = DefaultKey;
- memcpy(ieee->swcamtable[EntryNo].key_buf, (u8 *)KeyContent, 16);
- }
+ ieee->swcamtable[EntryNo].bused = true;
+ ieee->swcamtable[EntryNo].key_index = KeyIndex;
+ ieee->swcamtable[EntryNo].key_type = KeyType;
+ memcpy(ieee->swcamtable[EntryNo].macaddr, MacAddr, 6);
+ ieee->swcamtable[EntryNo].useDK = 0;
+ memcpy(ieee->swcamtable[EntryNo].key_buf, (u8 *)KeyContent, 16);
}
void rtl92e_set_key(struct net_device *dev, u8 EntryNo, u8 KeyIndex,
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h
index 1ebd92e27441..bd33ef105107 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h
@@ -19,8 +19,7 @@ void rtl92e_set_key(struct net_device *dev, u8 EntryNo, u8 KeyIndex,
u16 KeyType, const u8 *MacAddr, u8 DefaultKey,
u32 *KeyContent);
void rtl92e_set_swcam(struct net_device *dev, u8 EntryNo, u8 KeyIndex,
- u16 KeyType, const u8 *MacAddr, u8 DefaultKey,
- u32 *KeyContent, u8 is_mesh);
+ u16 KeyType, const u8 *MacAddr, u32 *KeyContent);
void rtl92e_cam_restore(struct net_device *dev);
#endif
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index 4447489a16ea..50eb8f3494ec 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -618,8 +618,6 @@ static int _rtl92e_sta_up(struct net_device *dev, bool is_silent_reset)
(&priv->rtllib->pwr_save_ctrl);
bool init_status;
- priv->bdisable_nic = false;
-
priv->up = 1;
priv->rtllib->ieee_up = 1;
@@ -760,13 +758,11 @@ static void _rtl92e_init_priv_variable(struct net_device *dev)
priv->up_first_time = 1;
priv->blinked_ingpio = false;
priv->being_init_adapter = false;
- priv->bdisable_nic = false;
priv->txringcount = 64;
priv->rxbuffersize = 9100;
priv->rxringcount = MAX_RX_COUNT;
priv->irq_enabled = 0;
priv->chan = 1;
- priv->reg_chnl_plan = 0xf;
priv->rtllib->mode = WIRELESS_MODE_AUTO;
priv->rtllib->iw_mode = IW_MODE_INFRA;
priv->rtllib->net_promiscuous_md = false;
@@ -778,7 +774,6 @@ static void _rtl92e_init_priv_variable(struct net_device *dev)
priv->retry_data = DEFAULT_RETRY_DATA;
priv->rtllib->rts = DEFAULT_RTS_THRESHOLD;
priv->rtllib->rate = 110;
- priv->rtllib->short_slot = 1;
priv->promisc = (dev->flags & IFF_PROMISC) ? 1 : 0;
priv->bcck_in_ch14 = false;
priv->cck_present_attn = 0;
@@ -804,15 +799,9 @@ static void _rtl92e_init_priv_variable(struct net_device *dev)
priv->rtllib->iw_mode = IW_MODE_INFRA;
priv->rtllib->active_scan = 1;
priv->rtllib->be_scan_inprogress = false;
- priv->rtllib->modulation = RTLLIB_CCK_MODULATION |
- RTLLIB_OFDM_MODULATION;
- priv->rtllib->host_encrypt = 1;
- priv->rtllib->host_decrypt = 1;
priv->rtllib->fts = DEFAULT_FRAG_THRESHOLD;
- priv->card_type = PCI;
-
priv->fw_info = vzalloc(sizeof(struct rt_firmware));
if (!priv->fw_info)
netdev_err(dev,
@@ -1504,12 +1493,6 @@ static short _rtl92e_tx(struct net_device *dev, struct sk_buff *skb)
int idx;
u32 fwinfo_size = 0;
- if (priv->bdisable_nic) {
- netdev_warn(dev, "%s: Nic is disabled! Can't tx packet.\n",
- __func__);
- return skb->len;
- }
-
priv->rtllib->bAwakePktSent = true;
fwinfo_size = sizeof(struct tx_fwinfo_8190pci);
@@ -1990,16 +1973,13 @@ static irqreturn_t _rtl92e_irq(int irq, void *netdev)
struct r8192_priv *priv = rtllib_priv(dev);
unsigned long flags;
u32 inta;
- u32 intb;
-
- intb = 0;
if (priv->irq_enabled == 0)
goto done;
spin_lock_irqsave(&priv->irq_th_lock, flags);
- rtl92e_ack_irq(dev, &inta, &intb);
+ rtl92e_ack_irq(dev, &inta);
if (!inta) {
spin_unlock_irqrestore(&priv->irq_th_lock, flags);
@@ -2251,20 +2231,17 @@ bool rtl92e_enable_nic(struct net_device *dev)
if (!priv->up) {
netdev_warn(dev, "%s(): Driver is already down!\n", __func__);
- priv->bdisable_nic = false;
return false;
}
init_status = rtl92e_start_adapter(dev);
if (!init_status) {
netdev_warn(dev, "%s(): Initialization failed!\n", __func__);
- priv->bdisable_nic = false;
return false;
}
RT_CLEAR_PS_LEVEL(psc, RT_RF_OFF_LEVL_HALT_NIC);
rtl92e_irq_enable(dev);
- priv->bdisable_nic = false;
return init_status;
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
index ec9e454299a8..fa82a0667813 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
@@ -259,12 +259,6 @@ struct r8192_priv {
u8 polling_timer_on;
/**********************************************************/
-
- enum card_type {
- PCI, MINIPCI,
- CARDBUS, USB
- } card_type;
-
struct work_struct qos_activate;
short promisc;
@@ -322,13 +316,11 @@ struct r8192_priv {
bool tx_pwr_data_read_from_eeprom;
- u16 reg_chnl_plan;
u16 chnl_plan;
u8 hw_rf_off_action;
bool rf_change_in_progress;
bool set_rf_pwr_state_in_progress;
- bool bdisable_nic;
u8 cck_pwr_enl;
u16 tssi_13dBm;
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
index 37c275cac40b..dbf765d601b3 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
@@ -277,8 +277,8 @@ static void _rtl92e_dm_check_rate_adaptive(struct net_device *dev)
struct r8192_priv *priv = rtllib_priv(dev);
struct rt_hi_throughput *ht_info = priv->rtllib->ht_info;
struct rate_adaptive *pra = &priv->rate_adaptive;
- u32 currentRATR, targetRATR = 0;
- u32 LowRSSIThreshForRA = 0, HighRSSIThreshForRA = 0;
+ u32 current_ratr, target_ratr = 0;
+ u32 low_rssi_thresh_for_ra = 0, high_rssi_thresh_for_ra = 0;
bool bshort_gi_enabled = false;
static u8 ping_rssi_state;
@@ -319,30 +319,30 @@ static void _rtl92e_dm_check_rate_adaptive(struct net_device *dev)
((bshort_gi_enabled) ? BIT31 : 0);
if (pra->ratr_state == DM_RATR_STA_HIGH) {
- HighRSSIThreshForRA = pra->high2low_rssi_thresh_for_ra;
- LowRSSIThreshForRA = (priv->current_chnl_bw != HT_CHANNEL_WIDTH_20) ?
+ high_rssi_thresh_for_ra = pra->high2low_rssi_thresh_for_ra;
+ low_rssi_thresh_for_ra = (priv->current_chnl_bw != HT_CHANNEL_WIDTH_20) ?
(pra->low_rssi_thresh_for_ra40M) : (pra->low_rssi_thresh_for_ra20M);
} else if (pra->ratr_state == DM_RATR_STA_LOW) {
- HighRSSIThreshForRA = pra->high_rssi_thresh_for_ra;
- LowRSSIThreshForRA = (priv->current_chnl_bw != HT_CHANNEL_WIDTH_20) ?
+ high_rssi_thresh_for_ra = pra->high_rssi_thresh_for_ra;
+ low_rssi_thresh_for_ra = (priv->current_chnl_bw != HT_CHANNEL_WIDTH_20) ?
(pra->low2high_rssi_thresh_for_ra40M) : (pra->low2high_rssi_thresh_for_ra20M);
} else {
- HighRSSIThreshForRA = pra->high_rssi_thresh_for_ra;
- LowRSSIThreshForRA = (priv->current_chnl_bw != HT_CHANNEL_WIDTH_20) ?
+ high_rssi_thresh_for_ra = pra->high_rssi_thresh_for_ra;
+ low_rssi_thresh_for_ra = (priv->current_chnl_bw != HT_CHANNEL_WIDTH_20) ?
(pra->low_rssi_thresh_for_ra40M) : (pra->low_rssi_thresh_for_ra20M);
}
if (priv->undecorated_smoothed_pwdb >=
- (long)HighRSSIThreshForRA) {
+ (long)high_rssi_thresh_for_ra) {
pra->ratr_state = DM_RATR_STA_HIGH;
- targetRATR = pra->upper_rssi_threshold_ratr;
+ target_ratr = pra->upper_rssi_threshold_ratr;
} else if (priv->undecorated_smoothed_pwdb >=
- (long)LowRSSIThreshForRA) {
+ (long)low_rssi_thresh_for_ra) {
pra->ratr_state = DM_RATR_STA_MIDDLE;
- targetRATR = pra->middle_rssi_threshold_ratr;
+ target_ratr = pra->middle_rssi_threshold_ratr;
} else {
pra->ratr_state = DM_RATR_STA_LOW;
- targetRATR = pra->low_rssi_threshold_ratr;
+ target_ratr = pra->low_rssi_threshold_ratr;
}
if (pra->ping_rssi_enable) {
@@ -352,7 +352,7 @@ static void _rtl92e_dm_check_rate_adaptive(struct net_device *dev)
(long)pra->ping_rssi_thresh_for_ra) ||
ping_rssi_state) {
pra->ratr_state = DM_RATR_STA_LOW;
- targetRATR = pra->ping_rssi_ratr;
+ target_ratr = pra->ping_rssi_ratr;
ping_rssi_state = 1;
}
} else {
@@ -361,18 +361,18 @@ static void _rtl92e_dm_check_rate_adaptive(struct net_device *dev)
}
if (priv->rtllib->GetHalfNmodeSupportByAPsHandler(dev))
- targetRATR &= 0xf00fffff;
+ target_ratr &= 0xf00fffff;
- currentRATR = rtl92e_readl(dev, RATR0);
- if (targetRATR != currentRATR) {
+ current_ratr = rtl92e_readl(dev, RATR0);
+ if (target_ratr != current_ratr) {
u32 ratr_value;
- ratr_value = targetRATR;
+ ratr_value = target_ratr;
ratr_value &= ~(RATE_ALL_OFDM_2SS);
rtl92e_writel(dev, RATR0, ratr_value);
rtl92e_writeb(dev, UFWP, 1);
- pra->last_ratr = targetRATR;
+ pra->last_ratr = target_ratr;
}
} else {
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
index 1c49d5da68eb..598bfc0ff3d1 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
@@ -118,8 +118,7 @@ void rtl92e_ips_enter(struct net_device *dev)
rt_state = priv->rtllib->rf_power_state;
if (rt_state == rf_on && !psc->bSwRfProcessing &&
- (priv->rtllib->link_state != MAC80211_LINKED) &&
- (priv->rtllib->iw_mode != IW_MODE_MASTER)) {
+ (priv->rtllib->link_state != MAC80211_LINKED)) {
psc->eInactivePowerState = rf_off;
_rtl92e_ps_update_rf_state(dev);
}
@@ -210,8 +209,7 @@ void rtl92e_leisure_ps_enter(struct net_device *dev)
if (!((priv->rtllib->iw_mode == IW_MODE_INFRA) &&
(priv->rtllib->link_state == MAC80211_LINKED))
- || (priv->rtllib->iw_mode == IW_MODE_ADHOC) ||
- (priv->rtllib->iw_mode == IW_MODE_MASTER))
+ || (priv->rtllib->iw_mode == IW_MODE_ADHOC))
return;
if (psc->bLeisurePs) {
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
index 88975dc804c6..189798852568 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
@@ -428,7 +428,7 @@ static int _rtl92e_wx_set_scan(struct net_device *dev,
ieee->ScanOperationBackupHandler(ieee->dev, SCAN_OPT_BACKUP);
- rtllib_start_scan_syncro(priv->rtllib, 0);
+ rtllib_start_scan_syncro(priv->rtllib);
ieee->ScanOperationBackupHandler(ieee->dev, SCAN_OPT_RESTORE);
}
@@ -712,7 +712,7 @@ static int _rtl92e_wx_set_enc(struct net_device *dev,
rtl92e_set_key(dev, key_idx, key_idx, KEY_TYPE_WEP104,
zero_addr[key_idx], 0, hwkey);
rtl92e_set_swcam(dev, key_idx, key_idx, KEY_TYPE_WEP104,
- zero_addr[key_idx], 0, hwkey, 0);
+ zero_addr[key_idx], hwkey);
} else {
netdev_info(dev,
"wrong type in WEP, not WEP40 and WEP104\n");
@@ -857,21 +857,19 @@ static int _rtl92e_wx_set_encode_ext(struct net_device *dev,
if (ext->key_len == 13)
ieee->pairwise_key_type = alg = KEY_TYPE_WEP104;
rtl92e_set_key(dev, idx, idx, alg, zero, 0, key);
- rtl92e_set_swcam(dev, idx, idx, alg, zero, 0, key, 0);
+ rtl92e_set_swcam(dev, idx, idx, alg, zero, key);
} else if (group) {
ieee->group_key_type = alg;
rtl92e_set_key(dev, idx, idx, alg, broadcast_addr, 0,
key);
- rtl92e_set_swcam(dev, idx, idx, alg, broadcast_addr, 0,
- key, 0);
+ rtl92e_set_swcam(dev, idx, idx, alg, broadcast_addr, key);
} else {
if ((ieee->pairwise_key_type == KEY_TYPE_CCMP) &&
ieee->ht_info->bCurrentHTSupport)
rtl92e_writeb(dev, 0x173, 1);
rtl92e_set_key(dev, 4, idx, alg,
(u8 *)ieee->ap_mac_addr, 0, key);
- rtl92e_set_swcam(dev, 4, idx, alg,
- (u8 *)ieee->ap_mac_addr, 0, key, 0);
+ rtl92e_set_swcam(dev, 4, idx, alg, (u8 *)ieee->ap_mac_addr, key);
}
}
diff --git a/drivers/staging/rtl8192e/rtl819x_BAProc.c b/drivers/staging/rtl8192e/rtl819x_BAProc.c
index acc19514bca6..0e3372868f97 100644
--- a/drivers/staging/rtl8192e/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_BAProc.c
@@ -10,51 +10,51 @@
#include "rtllib.h"
#include "rtl819x_BA.h"
-static void ActivateBAEntry(struct ba_record *pBA, u16 Time)
+static void activate_ba_entry(struct ba_record *pBA, u16 Time)
{
pBA->b_valid = true;
if (Time != 0)
mod_timer(&pBA->timer, jiffies + msecs_to_jiffies(Time));
}
-static void DeActivateBAEntry(struct rtllib_device *ieee, struct ba_record *pBA)
+static void deactivate_ba_entry(struct rtllib_device *ieee, struct ba_record *pBA)
{
pBA->b_valid = false;
del_timer_sync(&pBA->timer);
}
-static u8 TxTsDeleteBA(struct rtllib_device *ieee, struct tx_ts_record *pTxTs)
+static u8 tx_ts_delete_ba(struct rtllib_device *ieee, struct tx_ts_record *pTxTs)
{
struct ba_record *pAdmittedBa = &pTxTs->TxAdmittedBARecord;
struct ba_record *pPendingBa = &pTxTs->TxPendingBARecord;
u8 bSendDELBA = false;
if (pPendingBa->b_valid) {
- DeActivateBAEntry(ieee, pPendingBa);
+ deactivate_ba_entry(ieee, pPendingBa);
bSendDELBA = true;
}
if (pAdmittedBa->b_valid) {
- DeActivateBAEntry(ieee, pAdmittedBa);
+ deactivate_ba_entry(ieee, pAdmittedBa);
bSendDELBA = true;
}
return bSendDELBA;
}
-static u8 RxTsDeleteBA(struct rtllib_device *ieee, struct rx_ts_record *pRxTs)
+static u8 rx_ts_delete_ba(struct rtllib_device *ieee, struct rx_ts_record *pRxTs)
{
struct ba_record *pBa = &pRxTs->rx_admitted_ba_record;
u8 bSendDELBA = false;
if (pBa->b_valid) {
- DeActivateBAEntry(ieee, pBa);
+ deactivate_ba_entry(ieee, pBa);
bSendDELBA = true;
}
return bSendDELBA;
}
-void ResetBaEntry(struct ba_record *pBA)
+void rtllib_reset_ba_entry(struct ba_record *pBA)
{
pBA->b_valid = false;
pBA->ba_param_set.short_data = 0;
@@ -270,7 +270,7 @@ int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb)
rtllib_FlushRxTsPendingPkts(ieee, pTS);
- DeActivateBAEntry(ieee, pBA);
+ deactivate_ba_entry(ieee, pBA);
pBA->dialog_token = *pDialogToken;
pBA->ba_param_set = *pBaParamSet;
pBA->ba_timeout_value = *pBaTimeoutVal;
@@ -282,7 +282,7 @@ int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb)
else
pBA->ba_param_set.field.buffer_size = 32;
- ActivateBAEntry(pBA, 0);
+ activate_ba_entry(pBA, 0);
rtllib_send_ADDBARsp(ieee, dst, pBA, ADDBA_STATUS_SUCCESS);
return 0;
@@ -363,13 +363,13 @@ int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb)
netdev_dbg(ieee->dev,
"%s(): Recv ADDBA Rsp. BA is admitted! Status code:%X\n",
__func__, *pStatusCode);
- DeActivateBAEntry(ieee, pPendingBA);
+ deactivate_ba_entry(ieee, pPendingBA);
}
if (*pStatusCode == ADDBA_STATUS_SUCCESS) {
if (pBaParamSet->field.ba_policy == BA_POLICY_DELAYED) {
pTS->bAddBaReqDelayed = true;
- DeActivateBAEntry(ieee, pAdmittedBA);
+ deactivate_ba_entry(ieee, pAdmittedBA);
ReasonCode = DELBA_REASON_END_BA;
goto OnADDBARsp_Reject;
}
@@ -378,8 +378,8 @@ int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb)
pAdmittedBA->ba_timeout_value = *pBaTimeoutVal;
pAdmittedBA->ba_start_seq_ctrl = pPendingBA->ba_start_seq_ctrl;
pAdmittedBA->ba_param_set = *pBaParamSet;
- DeActivateBAEntry(ieee, pAdmittedBA);
- ActivateBAEntry(pAdmittedBA, *pBaTimeoutVal);
+ deactivate_ba_entry(ieee, pAdmittedBA);
+ activate_ba_entry(pAdmittedBA, *pBaTimeoutVal);
} else {
pTS->bAddBaReqDelayed = true;
pTS->bDisable_AddBa = true;
@@ -441,7 +441,7 @@ int rtllib_rx_DELBA(struct rtllib_device *ieee, struct sk_buff *skb)
return -1;
}
- RxTsDeleteBA(ieee, pRxTs);
+ rx_ts_delete_ba(ieee, pRxTs);
} else {
struct tx_ts_record *pTxTs;
@@ -456,20 +456,20 @@ int rtllib_rx_DELBA(struct rtllib_device *ieee, struct sk_buff *skb)
pTxTs->bAddBaReqInProgress = false;
pTxTs->bAddBaReqDelayed = false;
del_timer_sync(&pTxTs->TsAddBaTimer);
- TxTsDeleteBA(ieee, pTxTs);
+ tx_ts_delete_ba(ieee, pTxTs);
}
return 0;
}
-void TsInitAddBA(struct rtllib_device *ieee, struct tx_ts_record *pTS,
- u8 Policy, u8 bOverwritePending)
+void rtllib_ts_init_add_ba(struct rtllib_device *ieee, struct tx_ts_record *pTS,
+ u8 Policy, u8 bOverwritePending)
{
struct ba_record *pBA = &pTS->TxPendingBARecord;
if (pBA->b_valid && !bOverwritePending)
return;
- DeActivateBAEntry(ieee, pBA);
+ deactivate_ba_entry(ieee, pBA);
pBA->dialog_token++;
pBA->ba_param_set.field.amsdu_support = 0;
@@ -479,20 +479,20 @@ void TsInitAddBA(struct rtllib_device *ieee, struct tx_ts_record *pTS,
pBA->ba_timeout_value = 0;
pBA->ba_start_seq_ctrl.field.seq_num = (pTS->TxCurSeq + 3) % 4096;
- ActivateBAEntry(pBA, BA_SETUP_TIMEOUT);
+ activate_ba_entry(pBA, BA_SETUP_TIMEOUT);
rtllib_send_ADDBAReq(ieee, pTS->TsCommonInfo.Addr, pBA);
}
-void TsInitDelBA(struct rtllib_device *ieee,
- struct ts_common_info *pTsCommonInfo,
- enum tr_select TxRxSelect)
+void rtllib_ts_init_del_ba(struct rtllib_device *ieee,
+ struct ts_common_info *pTsCommonInfo,
+ enum tr_select TxRxSelect)
{
if (TxRxSelect == TX_DIR) {
struct tx_ts_record *pTxTs =
(struct tx_ts_record *)pTsCommonInfo;
- if (TxTsDeleteBA(ieee, pTxTs))
+ if (tx_ts_delete_ba(ieee, pTxTs))
rtllib_send_DELBA(ieee, pTsCommonInfo->Addr,
(pTxTs->TxAdmittedBARecord.b_valid) ?
(&pTxTs->TxAdmittedBARecord) :
@@ -501,14 +501,14 @@ void TsInitDelBA(struct rtllib_device *ieee,
} else if (TxRxSelect == RX_DIR) {
struct rx_ts_record *pRxTs =
(struct rx_ts_record *)pTsCommonInfo;
- if (RxTsDeleteBA(ieee, pRxTs))
+ if (rx_ts_delete_ba(ieee, pRxTs))
rtllib_send_DELBA(ieee, pTsCommonInfo->Addr,
&pRxTs->rx_admitted_ba_record,
TxRxSelect, DELBA_REASON_END_BA);
}
}
-void BaSetupTimeOut(struct timer_list *t)
+void rtllib_ba_setup_timeout(struct timer_list *t)
{
struct tx_ts_record *pTxTs = from_timer(pTxTs, t,
TxPendingBARecord.timer);
@@ -518,26 +518,26 @@ void BaSetupTimeOut(struct timer_list *t)
pTxTs->TxPendingBARecord.b_valid = false;
}
-void TxBaInactTimeout(struct timer_list *t)
+void rtllib_tx_ba_inact_timeout(struct timer_list *t)
{
struct tx_ts_record *pTxTs = from_timer(pTxTs, t,
TxAdmittedBARecord.timer);
struct rtllib_device *ieee = container_of(pTxTs, struct rtllib_device,
TxTsRecord[pTxTs->num]);
- TxTsDeleteBA(ieee, pTxTs);
+ tx_ts_delete_ba(ieee, pTxTs);
rtllib_send_DELBA(ieee, pTxTs->TsCommonInfo.Addr,
&pTxTs->TxAdmittedBARecord, TX_DIR,
DELBA_REASON_TIMEOUT);
}
-void RxBaInactTimeout(struct timer_list *t)
+void rtllib_rx_ba_inact_timeout(struct timer_list *t)
{
struct rx_ts_record *pRxTs = from_timer(pRxTs, t,
rx_admitted_ba_record.timer);
struct rtllib_device *ieee = container_of(pRxTs, struct rtllib_device,
RxTsRecord[pRxTs->num]);
- RxTsDeleteBA(ieee, pRxTs);
+ rx_ts_delete_ba(ieee, pRxTs);
rtllib_send_DELBA(ieee, pRxTs->ts_common_info.Addr,
&pRxTs->rx_admitted_ba_record, RX_DIR,
DELBA_REASON_TIMEOUT);
diff --git a/drivers/staging/rtl8192e/rtl819x_HTProc.c b/drivers/staging/rtl8192e/rtl819x_HTProc.c
index f9fa3f2bb728..f19feea46158 100644
--- a/drivers/staging/rtl8192e/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_HTProc.c
@@ -363,8 +363,7 @@ void HTConstructInfoElement(struct rtllib_device *ieee, u8 *posHTInfo,
}
memset(posHTInfo, 0, *len);
- if ((ieee->iw_mode == IW_MODE_ADHOC) ||
- (ieee->iw_mode == IW_MODE_MASTER)) {
+ if (ieee->iw_mode == IW_MODE_ADHOC) {
pHTInfoEle->ControlChl = ieee->current_network.channel;
pHTInfoEle->ExtChlOffset = ((!pHT->bRegBW40MHz) ?
HT_EXTCHNL_OFFSET_NO_EXT :
diff --git a/drivers/staging/rtl8192e/rtl819x_TS.h b/drivers/staging/rtl8192e/rtl819x_TS.h
index 37760d0bc35d..0e851d4221a7 100644
--- a/drivers/staging/rtl8192e/rtl819x_TS.h
+++ b/drivers/staging/rtl8192e/rtl819x_TS.h
@@ -19,8 +19,6 @@ enum tr_select {
struct ts_common_info {
struct list_head List;
- struct timer_list SetupTimer;
- struct timer_list InactTimer;
u8 Addr[ETH_ALEN];
union tspec_body TSpec;
union qos_tclas TClass[TCLAS_NUM];
diff --git a/drivers/staging/rtl8192e/rtl819x_TSProc.c b/drivers/staging/rtl8192e/rtl819x_TSProc.c
index c61fdf73c572..419ff72f2ba7 100644
--- a/drivers/staging/rtl8192e/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_TSProc.c
@@ -8,14 +8,6 @@
#include <linux/etherdevice.h>
#include "rtl819x_TS.h"
-static void TsSetupTimeOut(struct timer_list *unused)
-{
-}
-
-static void TsInactTimeout(struct timer_list *unused)
-{
-}
-
static void RxPktPendingTimeout(struct timer_list *t)
{
struct rx_ts_record *pRxTs = from_timer(pRxTs, t,
@@ -96,7 +88,7 @@ static void TsAddBaProcess(struct timer_list *t)
struct rtllib_device *ieee = container_of(pTxTs, struct rtllib_device,
TxTsRecord[num]);
- TsInitAddBA(ieee, pTxTs, BA_POLICY_IMMEDIATE, false);
+ rtllib_ts_init_add_ba(ieee, pTxTs, BA_POLICY_IMMEDIATE, false);
netdev_dbg(ieee->dev, "%s(): ADDBA Req is started\n", __func__);
}
@@ -117,8 +109,8 @@ static void ResetTxTsEntry(struct tx_ts_record *pTS)
pTS->bAddBaReqDelayed = false;
pTS->bUsingBa = false;
pTS->bDisable_AddBa = false;
- ResetBaEntry(&pTS->TxAdmittedBARecord);
- ResetBaEntry(&pTS->TxPendingBARecord);
+ rtllib_reset_ba_entry(&pTS->TxAdmittedBARecord);
+ rtllib_reset_ba_entry(&pTS->TxPendingBARecord);
}
static void ResetRxTsEntry(struct rx_ts_record *pTS)
@@ -126,7 +118,7 @@ static void ResetRxTsEntry(struct rx_ts_record *pTS)
ResetTsCommonInfo(&pTS->ts_common_info);
pTS->rx_indicate_seq = 0xffff;
pTS->rx_timeout_indicate_seq = 0xffff;
- ResetBaEntry(&pTS->rx_admitted_ba_record);
+ rtllib_reset_ba_entry(&pTS->rx_admitted_ba_record);
}
void TSInitialize(struct rtllib_device *ieee)
@@ -142,18 +134,12 @@ void TSInitialize(struct rtllib_device *ieee)
for (count = 0; count < TOTAL_TS_NUM; count++) {
pTxTS->num = count;
- timer_setup(&pTxTS->TsCommonInfo.SetupTimer, TsSetupTimeOut,
- 0);
-
- timer_setup(&pTxTS->TsCommonInfo.InactTimer, TsInactTimeout,
- 0);
-
timer_setup(&pTxTS->TsAddBaTimer, TsAddBaProcess, 0);
- timer_setup(&pTxTS->TxPendingBARecord.timer, BaSetupTimeOut,
+ timer_setup(&pTxTS->TxPendingBARecord.timer, rtllib_ba_setup_timeout,
0);
timer_setup(&pTxTS->TxAdmittedBARecord.timer,
- TxBaInactTimeout, 0);
+ rtllib_tx_ba_inact_timeout, 0);
ResetTxTsEntry(pTxTS);
list_add_tail(&pTxTS->TsCommonInfo.List,
@@ -167,15 +153,8 @@ void TSInitialize(struct rtllib_device *ieee)
for (count = 0; count < TOTAL_TS_NUM; count++) {
pRxTS->num = count;
INIT_LIST_HEAD(&pRxTS->rx_pending_pkt_list);
-
- timer_setup(&pRxTS->ts_common_info.SetupTimer, TsSetupTimeOut,
- 0);
-
- timer_setup(&pRxTS->ts_common_info.InactTimer, TsInactTimeout,
- 0);
-
timer_setup(&pRxTS->rx_admitted_ba_record.timer,
- RxBaInactTimeout, 0);
+ rtllib_rx_ba_inact_timeout, 0);
timer_setup(&pRxTS->rx_pkt_pending_timer, RxPktPendingTimeout, 0);
@@ -194,17 +173,6 @@ void TSInitialize(struct rtllib_device *ieee)
}
}
-static void AdmitTS(struct rtllib_device *ieee,
- struct ts_common_info *pTsCommonInfo, u32 InactTime)
-{
- del_timer_sync(&pTsCommonInfo->SetupTimer);
- del_timer_sync(&pTsCommonInfo->InactTimer);
-
- if (InactTime != 0)
- mod_timer(&pTsCommonInfo->InactTimer, jiffies +
- msecs_to_jiffies(InactTime));
-}
-
static struct ts_common_info *SearchAdmitTRStream(struct rtllib_device *ieee,
u8 *Addr, u8 TID,
enum tr_select TxRxSelect)
@@ -214,15 +182,7 @@ static struct ts_common_info *SearchAdmitTRStream(struct rtllib_device *ieee,
struct list_head *psearch_list;
struct ts_common_info *pRet = NULL;
- if (ieee->iw_mode == IW_MODE_MASTER) {
- if (TxRxSelect == TX_DIR) {
- search_dir[DIR_DOWN] = true;
- search_dir[DIR_BI_DIR] = true;
- } else {
- search_dir[DIR_UP] = true;
- search_dir[DIR_BI_DIR] = true;
- }
- } else if (ieee->iw_mode == IW_MODE_ADHOC) {
+ if (ieee->iw_mode == IW_MODE_ADHOC) {
if (TxRxSelect == TX_DIR)
search_dir[DIR_UP] = true;
else
@@ -343,9 +303,7 @@ bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS,
(&ieee->Tx_TS_Admit_List) :
(&ieee->Rx_TS_Admit_List);
- Dir = (ieee->iw_mode == IW_MODE_MASTER) ?
- ((TxRxSelect == TX_DIR) ? DIR_DOWN : DIR_UP) :
- ((TxRxSelect == TX_DIR) ? DIR_UP : DIR_DOWN);
+ Dir = ((TxRxSelect == TX_DIR) ? DIR_UP : DIR_DOWN);
if (!list_empty(pUnusedList)) {
(*ppTS) = list_entry(pUnusedList->next,
@@ -379,7 +337,6 @@ bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS,
pTSInfo->field.ucSchedule = 0;
MakeTSEntry(*ppTS, Addr, &TSpec, NULL, 0, 0);
- AdmitTS(ieee, *ppTS, 0);
list_add_tail(&((*ppTS)->List), pAddmitList);
return true;
@@ -394,9 +351,7 @@ bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS,
static void RemoveTsEntry(struct rtllib_device *ieee,
struct ts_common_info *pTs, enum tr_select TxRxSelect)
{
- del_timer_sync(&pTs->SetupTimer);
- del_timer_sync(&pTs->InactTimer);
- TsInitDelBA(ieee, pTs, TxRxSelect);
+ rtllib_ts_init_del_ba(ieee, pTs, TxRxSelect);
if (TxRxSelect == RX_DIR) {
struct rx_reorder_entry *pRxReorderEntry;
diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
index e3ce4431d460..bfa4dbf94d60 100644
--- a/drivers/staging/rtl8192e/rtllib.h
+++ b/drivers/staging/rtl8192e/rtllib.h
@@ -89,10 +89,6 @@ static inline void *netdev_priv_rsl(struct net_device *dev)
#define HIGH_QUEUE 7
#define BEACON_QUEUE 8
-#ifndef IW_MODE_MESH
-#define IW_MODE_MESH 7
-#endif
-
#define IE_CISCO_FLAG_POSITION 0x08
#define SUPPORT_CKIP_MIC 0x08
#define SUPPORT_CKIP_PK 0x10
@@ -818,7 +814,7 @@ struct rtllib_txb {
u16 reserved;
__le16 frag_size;
__le16 payload_size;
- struct sk_buff *fragments[];
+ struct sk_buff *fragments[] __counted_by(nr_frags);
};
#define MAX_SUBFRAME_COUNT 64
@@ -1440,10 +1436,6 @@ struct rtllib_device {
* WEP key changes
*/
- /* If the host performs {en,de}cryption, then set to 1 */
- int host_encrypt;
- int host_decrypt;
-
int ieee802_1x; /* is IEEE 802.1X used */
/* WPA data */
@@ -1490,9 +1482,7 @@ struct rtllib_device {
enum rtl_link_state link_state;
- int short_slot;
int mode; /* A, B, G */
- int modulation; /* CCK, OFDM */
/* used for forcing the ibss workqueue to terminate
* without wait for the syncro scan to terminate
@@ -1893,7 +1883,7 @@ void rtllib_disassociate(struct rtllib_device *ieee);
void rtllib_stop_scan(struct rtllib_device *ieee);
bool rtllib_act_scanning(struct rtllib_device *ieee, bool sync_scan);
void rtllib_stop_scan_syncro(struct rtllib_device *ieee);
-void rtllib_start_scan_syncro(struct rtllib_device *ieee, u8 is_mesh);
+void rtllib_start_scan_syncro(struct rtllib_device *ieee);
void rtllib_sta_ps_send_null_frame(struct rtllib_device *ieee, short pwr);
void rtllib_sta_ps_send_pspoll_frame(struct rtllib_device *ieee);
void rtllib_start_protocol(struct rtllib_device *ieee);
@@ -2008,15 +1998,15 @@ u16 TxCountToDataRate(struct rtllib_device *ieee, u8 nDataRate);
int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb);
int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb);
int rtllib_rx_DELBA(struct rtllib_device *ieee, struct sk_buff *skb);
-void TsInitAddBA(struct rtllib_device *ieee, struct tx_ts_record *pTS,
- u8 Policy, u8 bOverwritePending);
-void TsInitDelBA(struct rtllib_device *ieee,
- struct ts_common_info *pTsCommonInfo,
- enum tr_select TxRxSelect);
-void BaSetupTimeOut(struct timer_list *t);
-void TxBaInactTimeout(struct timer_list *t);
-void RxBaInactTimeout(struct timer_list *t);
-void ResetBaEntry(struct ba_record *pBA);
+void rtllib_ts_init_add_ba(struct rtllib_device *ieee, struct tx_ts_record *pTS,
+ u8 Policy, u8 bOverwritePending);
+void rtllib_ts_init_del_ba(struct rtllib_device *ieee,
+ struct ts_common_info *pTsCommonInfo,
+ enum tr_select TxRxSelect);
+void rtllib_ba_setup_timeout(struct timer_list *t);
+void rtllib_tx_ba_inact_timeout(struct timer_list *t);
+void rtllib_rx_ba_inact_timeout(struct timer_list *t);
+void rtllib_reset_ba_entry(struct ba_record *pBA);
bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS, u8 *Addr,
u8 TID, enum tr_select TxRxSelect, bool bAddNewTs);
void TSInitialize(struct rtllib_device *ieee);
diff --git a/drivers/staging/rtl8192e/rtllib_module.c b/drivers/staging/rtl8192e/rtllib_module.c
index d6a4d6b4ec57..2416e0c60255 100644
--- a/drivers/staging/rtl8192e/rtllib_module.c
+++ b/drivers/staging/rtl8192e/rtllib_module.c
@@ -97,9 +97,6 @@ struct net_device *alloc_rtllib(int sizeof_priv)
ieee->scan_age = DEFAULT_MAX_SCAN_AGE;
ieee->open_wep = 1;
- /* Default to enabling full open WEP with host based encrypt/decrypt */
- ieee->host_encrypt = 1;
- ieee->host_decrypt = 1;
ieee->ieee802_1x = 1; /* Default to supporting 802.1x */
ieee->rtllib_ap_sec_type = rtllib_ap_sec_type;
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index 91dd3c373aef..40e7bbb17c0d 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -1013,17 +1013,15 @@ static int rtllib_rx_data_filter(struct rtllib_device *ieee, u16 fc,
}
}
- if (ieee->iw_mode != IW_MODE_MESH) {
- /* packets from our adapter are dropped (echo) */
- if (!memcmp(src, ieee->dev->dev_addr, ETH_ALEN))
- return -1;
+ /* packets from our adapter are dropped (echo) */
+ if (!memcmp(src, ieee->dev->dev_addr, ETH_ALEN))
+ return -1;
- /* {broad,multi}cast packets to our BSS go through */
- if (is_multicast_ether_addr(dst)) {
- if (memcmp(bssid, ieee->current_network.bssid,
- ETH_ALEN))
- return -1;
- }
+ /* {broad,multi}cast packets to our BSS go through */
+ if (is_multicast_ether_addr(dst)) {
+ if (memcmp(bssid, ieee->current_network.bssid,
+ ETH_ALEN))
+ return -1;
}
return 0;
}
@@ -1035,29 +1033,27 @@ static int rtllib_rx_get_crypt(struct rtllib_device *ieee, struct sk_buff *skb,
u16 fc = le16_to_cpu(hdr->frame_ctl);
int idx = 0;
- if (ieee->host_decrypt) {
- if (skb->len >= hdrlen + 3)
- idx = skb->data[hdrlen + 3] >> 6;
+ if (skb->len >= hdrlen + 3)
+ idx = skb->data[hdrlen + 3] >> 6;
- *crypt = ieee->crypt_info.crypt[idx];
- /* allow NULL decrypt to indicate an station specific override
- * for default encryption
+ *crypt = ieee->crypt_info.crypt[idx];
+ /* allow NULL decrypt to indicate an station specific override
+ * for default encryption
+ */
+ if (*crypt && ((*crypt)->ops == NULL ||
+ (*crypt)->ops->decrypt_mpdu == NULL))
+ *crypt = NULL;
+
+ if (!*crypt && (fc & RTLLIB_FCTL_WEP)) {
+ /* This seems to be triggered by some (multicast?)
+ * frames from other than current BSS, so just drop the
+ * frames silently instead of filling system log with
+ * these reports.
*/
- if (*crypt && ((*crypt)->ops == NULL ||
- (*crypt)->ops->decrypt_mpdu == NULL))
- *crypt = NULL;
-
- if (!*crypt && (fc & RTLLIB_FCTL_WEP)) {
- /* This seems to be triggered by some (multicast?)
- * frames from other than current BSS, so just drop the
- * frames silently instead of filling system log with
- * these reports.
- */
- netdev_dbg(ieee->dev,
- "Decryption failed (not set) (SA= %pM)\n",
- hdr->addr2);
- return -1;
- }
+ netdev_dbg(ieee->dev,
+ "Decryption failed (not set) (SA= %pM)\n",
+ hdr->addr2);
+ return -1;
}
return 0;
@@ -1083,7 +1079,7 @@ static int rtllib_rx_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
ieee->need_sw_enc = 0;
keyidx = rtllib_rx_frame_decrypt(ieee, skb, crypt);
- if (ieee->host_decrypt && (fc & RTLLIB_FCTL_WEP) && (keyidx < 0)) {
+ if ((fc & RTLLIB_FCTL_WEP) && (keyidx < 0)) {
netdev_info(ieee->dev, "%s: decrypt frame error\n", __func__);
return -1;
}
@@ -1147,7 +1143,7 @@ static int rtllib_rx_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
/* skb: hdr + (possible reassembled) full MSDU payload; possibly still
* encrypted/authenticated
*/
- if (ieee->host_decrypt && (fc & RTLLIB_FCTL_WEP) &&
+ if ((fc & RTLLIB_FCTL_WEP) &&
rtllib_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt)) {
netdev_info(ieee->dev, "%s: ==>decrypt msdu error\n", __func__);
return -1;
@@ -1447,12 +1443,6 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb,
return 0;
}
-static int rtllib_rx_Master(struct rtllib_device *ieee, struct sk_buff *skb,
- struct rtllib_rx_stats *rx_stats)
-{
- return 0;
-}
-
static int rtllib_rx_Monitor(struct rtllib_device *ieee, struct sk_buff *skb,
struct rtllib_rx_stats *rx_stats)
{
@@ -1481,12 +1471,6 @@ static int rtllib_rx_Monitor(struct rtllib_device *ieee, struct sk_buff *skb,
return 1;
}
-static int rtllib_rx_Mesh(struct rtllib_device *ieee, struct sk_buff *skb,
- struct rtllib_rx_stats *rx_stats)
-{
- return 0;
-}
-
/* All received frames are sent to this function. @skb contains the frame in
* IEEE 802.11 format, i.e., in the format it was sent over air.
* This function is called only as a tasklet (software IRQ).
@@ -1510,16 +1494,9 @@ int rtllib_rx(struct rtllib_device *ieee, struct sk_buff *skb,
case IW_MODE_INFRA:
ret = rtllib_rx_InfraAdhoc(ieee, skb, rx_stats);
break;
- case IW_MODE_MASTER:
- case IW_MODE_REPEAT:
- ret = rtllib_rx_Master(ieee, skb, rx_stats);
- break;
case IW_MODE_MONITOR:
ret = rtllib_rx_Monitor(ieee, skb, rx_stats);
break;
- case IW_MODE_MESH:
- ret = rtllib_rx_Mesh(ieee, skb, rx_stats);
- break;
default:
netdev_info(ieee->dev, "%s: ERR iw mode!!!\n", __func__);
break;
@@ -2698,8 +2675,7 @@ static void rtllib_rx_mgt(struct rtllib_device *ieee,
netdev_dbg(ieee->dev, "received PROBE REQUEST (%d)\n",
WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)));
if ((ieee->softmac_features & IEEE_SOFTMAC_PROBERS) &&
- ((ieee->iw_mode == IW_MODE_ADHOC ||
- ieee->iw_mode == IW_MODE_MASTER) &&
+ (ieee->iw_mode == IW_MODE_ADHOC &&
ieee->link_state == MAC80211_LINKED))
rtllib_rx_probe_rq(ieee, skb);
break;
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index 425d4acbcdf0..de1702491191 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -35,12 +35,8 @@ static unsigned int rtllib_MFIE_rate_len(struct rtllib_device *ieee)
{
unsigned int rate_len = 0;
- if (ieee->modulation & RTLLIB_CCK_MODULATION)
- rate_len = RTLLIB_CCK_RATE_LEN + 2;
-
- if (ieee->modulation & RTLLIB_OFDM_MODULATION)
-
- rate_len += RTLLIB_OFDM_RATE_LEN + 2;
+ rate_len = RTLLIB_CCK_RATE_LEN + 2;
+ rate_len += RTLLIB_OFDM_RATE_LEN + 2;
return rate_len;
}
@@ -53,14 +49,12 @@ static void rtllib_MFIE_Brate(struct rtllib_device *ieee, u8 **tag_p)
{
u8 *tag = *tag_p;
- if (ieee->modulation & RTLLIB_CCK_MODULATION) {
- *tag++ = MFIE_TYPE_RATES;
- *tag++ = 4;
- *tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_CCK_RATE_1MB;
- *tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_CCK_RATE_2MB;
- *tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_CCK_RATE_5MB;
- *tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_CCK_RATE_11MB;
- }
+ *tag++ = MFIE_TYPE_RATES;
+ *tag++ = 4;
+ *tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_CCK_RATE_1MB;
+ *tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_CCK_RATE_2MB;
+ *tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_CCK_RATE_5MB;
+ *tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_CCK_RATE_11MB;
/* We may add an option for custom rates that specific HW
* might support
@@ -72,18 +66,17 @@ static void rtllib_MFIE_Grate(struct rtllib_device *ieee, u8 **tag_p)
{
u8 *tag = *tag_p;
- if (ieee->modulation & RTLLIB_OFDM_MODULATION) {
- *tag++ = MFIE_TYPE_RATES_EX;
- *tag++ = 8;
- *tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_OFDM_RATE_6MB;
- *tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_OFDM_RATE_9MB;
- *tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_OFDM_RATE_12MB;
- *tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_OFDM_RATE_18MB;
- *tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_OFDM_RATE_24MB;
- *tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_OFDM_RATE_36MB;
- *tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_OFDM_RATE_48MB;
- *tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_OFDM_RATE_54MB;
- }
+ *tag++ = MFIE_TYPE_RATES_EX;
+ *tag++ = 8;
+ *tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_OFDM_RATE_6MB;
+ *tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_OFDM_RATE_9MB;
+ *tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_OFDM_RATE_12MB;
+ *tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_OFDM_RATE_18MB;
+ *tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_OFDM_RATE_24MB;
+ *tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_OFDM_RATE_36MB;
+ *tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_OFDM_RATE_48MB;
+ *tag++ = RTLLIB_BASIC_RATE_MASK | RTLLIB_OFDM_RATE_54MB;
+
/* We may add an option for custom rates that specific HW might
* support
*/
@@ -456,7 +449,7 @@ void rtllib_DisableIntelPromiscuousMode(struct net_device *dev,
}
EXPORT_SYMBOL(rtllib_DisableIntelPromiscuousMode);
-static void rtllib_send_probe(struct rtllib_device *ieee, u8 is_mesh)
+static void rtllib_send_probe(struct rtllib_device *ieee)
{
struct sk_buff *skb;
@@ -467,12 +460,12 @@ static void rtllib_send_probe(struct rtllib_device *ieee, u8 is_mesh)
}
}
-static void rtllib_send_probe_requests(struct rtllib_device *ieee, u8 is_mesh)
+static void rtllib_send_probe_requests(struct rtllib_device *ieee)
{
if (ieee->active_scan && (ieee->softmac_features &
IEEE_SOFTMAC_PROBERQ)) {
- rtllib_send_probe(ieee, 0);
- rtllib_send_probe(ieee, 0);
+ rtllib_send_probe(ieee);
+ rtllib_send_probe(ieee);
}
}
@@ -485,7 +478,7 @@ static void rtllib_update_active_chan_map(struct rtllib_device *ieee)
/* this performs syncro scan blocking the caller until all channels
* in the allowed channel map has been checked.
*/
-static void rtllib_softmac_scan_syncro(struct rtllib_device *ieee, u8 is_mesh)
+static void rtllib_softmac_scan_syncro(struct rtllib_device *ieee)
{
union iwreq_data wrqu;
short ch = 0;
@@ -532,7 +525,7 @@ static void rtllib_softmac_scan_syncro(struct rtllib_device *ieee, u8 is_mesh)
ieee->set_chan(ieee->dev, ch);
if (ieee->active_channel_map[ch] == 1)
- rtllib_send_probe_requests(ieee, 0);
+ rtllib_send_probe_requests(ieee);
/* this prevent excessive time wait when we
* need to wait for a syncro scan to end..
@@ -594,7 +587,7 @@ static void rtllib_softmac_scan_wq(void *data)
ieee->set_chan(ieee->dev, ieee->current_network.channel);
if (ieee->active_channel_map[ieee->current_network.channel] == 1)
- rtllib_send_probe_requests(ieee, 0);
+ rtllib_send_probe_requests(ieee);
schedule_delayed_work(&ieee->softmac_scan_wq,
msecs_to_jiffies(RTLLIB_SOFTMAC_SCAN_TIME));
@@ -714,7 +707,7 @@ static void rtllib_start_scan(struct rtllib_device *ieee)
}
/* called with wx_mutex held */
-void rtllib_start_scan_syncro(struct rtllib_device *ieee, u8 is_mesh)
+void rtllib_start_scan_syncro(struct rtllib_device *ieee)
{
if (IS_DOT11D_ENABLE(ieee)) {
if (IS_COUNTRY_IE_VALID(ieee))
@@ -722,7 +715,7 @@ void rtllib_start_scan_syncro(struct rtllib_device *ieee, u8 is_mesh)
}
ieee->sync_scan_hurryup = 0;
if (ieee->softmac_features & IEEE_SOFTMAC_SCAN)
- rtllib_softmac_scan_syncro(ieee, is_mesh);
+ rtllib_softmac_scan_syncro(ieee);
}
EXPORT_SYMBOL(rtllib_start_scan_syncro);
@@ -814,7 +807,7 @@ static struct sk_buff *rtllib_probe_resp(struct rtllib_device *ieee,
}
crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
- encrypt = ieee->host_encrypt && crypt && crypt->ops &&
+ encrypt = crypt && crypt->ops &&
((strcmp(crypt->ops->name, "R-WEP") == 0 || wpa_ie_len));
if (ieee->ht_info->bCurrentHTSupport) {
tmp_ht_cap_buf = (u8 *)&(ieee->ht_info->SelfHTCap);
@@ -859,8 +852,7 @@ static struct sk_buff *rtllib_probe_resp(struct rtllib_device *ieee,
cpu_to_le16(ieee->current_network.capability &
WLAN_CAPABILITY_SHORT_PREAMBLE);
- if (ieee->short_slot && (ieee->current_network.capability &
- WLAN_CAPABILITY_SHORT_SLOT_TIME))
+ if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
beacon_buf->capability |=
cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
@@ -917,91 +909,6 @@ static struct sk_buff *rtllib_probe_resp(struct rtllib_device *ieee,
return skb;
}
-static struct sk_buff *rtllib_assoc_resp(struct rtllib_device *ieee, u8 *dest)
-{
- struct sk_buff *skb;
- u8 *tag;
-
- struct lib80211_crypt_data *crypt;
- struct rtllib_assoc_response_frame *assoc;
- short encrypt;
-
- unsigned int rate_len = rtllib_MFIE_rate_len(ieee);
- int len = sizeof(struct rtllib_assoc_response_frame) + rate_len +
- ieee->tx_headroom;
-
- skb = dev_alloc_skb(len);
-
- if (!skb)
- return NULL;
-
- skb_reserve(skb, ieee->tx_headroom);
-
- assoc = skb_put(skb, sizeof(struct rtllib_assoc_response_frame));
-
- assoc->header.frame_ctl = cpu_to_le16(RTLLIB_STYPE_ASSOC_RESP);
- ether_addr_copy(assoc->header.addr1, dest);
- ether_addr_copy(assoc->header.addr3, ieee->dev->dev_addr);
- ether_addr_copy(assoc->header.addr2, ieee->dev->dev_addr);
- assoc->capability = cpu_to_le16(ieee->iw_mode == IW_MODE_MASTER ?
- WLAN_CAPABILITY_ESS : WLAN_CAPABILITY_IBSS);
-
- if (ieee->short_slot)
- assoc->capability |=
- cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
-
- if (ieee->host_encrypt)
- crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
- else
- crypt = NULL;
-
- encrypt = (crypt && crypt->ops);
-
- if (encrypt)
- assoc->capability |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY);
-
- assoc->status = 0;
- assoc->aid = cpu_to_le16(ieee->assoc_id);
- if (ieee->assoc_id == 0x2007)
- ieee->assoc_id = 0;
- else
- ieee->assoc_id++;
-
- tag = skb_put(skb, rate_len);
- rtllib_MFIE_Brate(ieee, &tag);
- rtllib_MFIE_Grate(ieee, &tag);
-
- return skb;
-}
-
-static struct sk_buff *rtllib_auth_resp(struct rtllib_device *ieee, int status,
- u8 *dest)
-{
- struct sk_buff *skb = NULL;
- struct rtllib_authentication *auth;
- int len = ieee->tx_headroom + sizeof(struct rtllib_authentication) + 1;
-
- skb = dev_alloc_skb(len);
- if (!skb)
- return NULL;
-
- skb->len = sizeof(struct rtllib_authentication);
-
- skb_reserve(skb, ieee->tx_headroom);
-
- auth = skb_put(skb, sizeof(struct rtllib_authentication));
-
- auth->status = cpu_to_le16(status);
- auth->transaction = cpu_to_le16(2);
- auth->algorithm = cpu_to_le16(WLAN_AUTH_OPEN);
-
- ether_addr_copy(auth->header.addr3, ieee->dev->dev_addr);
- ether_addr_copy(auth->header.addr2, ieee->dev->dev_addr);
- ether_addr_copy(auth->header.addr1, dest);
- auth->header.frame_ctl = cpu_to_le16(RTLLIB_STYPE_AUTH);
- return skb;
-}
-
static struct sk_buff *rtllib_null_func(struct rtllib_device *ieee, short pwr)
{
struct sk_buff *skb;
@@ -1049,22 +956,6 @@ static struct sk_buff *rtllib_pspoll_func(struct rtllib_device *ieee)
return skb;
}
-static void rtllib_resp_to_assoc_rq(struct rtllib_device *ieee, u8 *dest)
-{
- struct sk_buff *buf = rtllib_assoc_resp(ieee, dest);
-
- if (buf)
- softmac_mgmt_xmit(buf, ieee);
-}
-
-static void rtllib_resp_to_auth(struct rtllib_device *ieee, int s, u8 *dest)
-{
- struct sk_buff *buf = rtllib_auth_resp(ieee, s, dest);
-
- if (buf)
- softmac_mgmt_xmit(buf, ieee);
-}
-
static void rtllib_resp_to_probe(struct rtllib_device *ieee, u8 *dest)
{
struct sk_buff *buf = rtllib_probe_resp(ieee, dest);
@@ -1122,7 +1013,7 @@ rtllib_association_req(struct rtllib_network *beacon,
crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
if (crypt != NULL)
- encrypt = ieee->host_encrypt && crypt && crypt->ops &&
+ encrypt = crypt && crypt->ops &&
((strcmp(crypt->ops->name, "R-WEP") == 0 ||
wpa_ie_len));
else
@@ -1200,8 +1091,7 @@ rtllib_association_req(struct rtllib_network *beacon,
if (beacon->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE);
- if (ieee->short_slot &&
- (beacon->capability & WLAN_CAPABILITY_SHORT_SLOT_TIME))
+ if (beacon->capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
hdr->capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
hdr->listen_interval = cpu_to_le16(beacon->listen_interval);
@@ -1465,8 +1355,7 @@ static void rtllib_associate_complete_wq(void *data)
netif_carrier_on(ieee->dev);
ieee->is_roaming = false;
- if (rtllib_is_54g(&ieee->current_network) &&
- (ieee->modulation & RTLLIB_OFDM_MODULATION)) {
+ if (rtllib_is_54g(&ieee->current_network)) {
ieee->rate = 108;
netdev_info(ieee->dev, "Using G rates:%d\n", ieee->rate);
} else {
@@ -1652,9 +1541,7 @@ inline void rtllib_softmac_new_net(struct rtllib_device *ieee,
schedule_delayed_work(
&ieee->associate_procedure_wq, 0);
} else {
- if (rtllib_is_54g(&ieee->current_network) &&
- (ieee->modulation &
- RTLLIB_OFDM_MODULATION)) {
+ if (rtllib_is_54g(&ieee->current_network)) {
ieee->rate = 108;
ieee->set_wireless_mode(ieee->dev, WIRELESS_MODE_G);
netdev_info(ieee->dev,
@@ -1726,25 +1613,6 @@ static inline int auth_parse(struct net_device *dev, struct sk_buff *skb,
return 0;
}
-static int auth_rq_parse(struct net_device *dev, struct sk_buff *skb, u8 *dest)
-{
- struct rtllib_authentication *a;
-
- if (skb->len < (sizeof(struct rtllib_authentication) -
- sizeof(struct rtllib_info_element))) {
- netdev_dbg(dev, "invalid len in auth request: %d\n", skb->len);
- return -1;
- }
- a = (struct rtllib_authentication *)skb->data;
-
- ether_addr_copy(dest, a->header.addr2);
-
- if (le16_to_cpu(a->algorithm) != WLAN_AUTH_OPEN)
- return WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG;
-
- return WLAN_STATUS_SUCCESS;
-}
-
static short probe_rq_parse(struct rtllib_device *ieee, struct sk_buff *skb,
u8 *src)
{
@@ -1791,23 +1659,6 @@ static short probe_rq_parse(struct rtllib_device *ieee, struct sk_buff *skb,
return !strncmp(ssid, ieee->current_network.ssid, ssidlen);
}
-static int assoc_rq_parse(struct net_device *dev, struct sk_buff *skb, u8 *dest)
-{
- struct rtllib_assoc_request_frame *a;
-
- if (skb->len < (sizeof(struct rtllib_assoc_request_frame) -
- sizeof(struct rtllib_info_element))) {
- netdev_dbg(dev, "invalid len in auth request:%d\n", skb->len);
- return -1;
- }
-
- a = (struct rtllib_assoc_request_frame *)skb->data;
-
- ether_addr_copy(dest, a->header.addr2);
-
- return 0;
-}
-
static inline u16 assoc_parse(struct rtllib_device *ieee, struct sk_buff *skb,
int *aid)
{
@@ -1848,31 +1699,6 @@ void rtllib_rx_probe_rq(struct rtllib_device *ieee, struct sk_buff *skb)
}
}
-static inline void rtllib_rx_auth_rq(struct rtllib_device *ieee,
- struct sk_buff *skb)
-{
- u8 dest[ETH_ALEN];
- int status;
-
- ieee->softmac_stats.rx_auth_rq++;
-
- status = auth_rq_parse(ieee->dev, skb, dest);
- if (status != -1)
- rtllib_resp_to_auth(ieee, status, dest);
-}
-
-static inline void rtllib_rx_assoc_rq(struct rtllib_device *ieee,
- struct sk_buff *skb)
-{
- u8 dest[ETH_ALEN];
-
- ieee->softmac_stats.rx_ass_rq++;
- if (assoc_rq_parse(ieee->dev, skb, dest) != -1)
- rtllib_resp_to_assoc_rq(ieee, dest);
-
- netdev_info(ieee->dev, "New client associated: %pM\n", dest);
-}
-
void rtllib_sta_ps_send_null_frame(struct rtllib_device *ieee, short pwr)
{
struct sk_buff *buf = rtllib_null_func(ieee, pwr);
@@ -2265,8 +2091,6 @@ rtllib_rx_auth(struct rtllib_device *ieee, struct sk_buff *skb,
netdev_dbg(ieee->dev,
"Received authentication response");
rtllib_rx_auth_resp(ieee, skb);
- } else if (ieee->iw_mode == IW_MODE_MASTER) {
- rtllib_rx_auth_rq(ieee, skb);
}
}
return 0;
@@ -2326,9 +2150,6 @@ inline int rtllib_rx_frame_softmac(struct rtllib_device *ieee,
break;
case RTLLIB_STYPE_ASSOC_REQ:
case RTLLIB_STYPE_REASSOC_REQ:
- if ((ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) &&
- ieee->iw_mode == IW_MODE_MASTER)
- rtllib_rx_assoc_rq(ieee, skb);
break;
case RTLLIB_STYPE_AUTH:
rtllib_rx_auth(ieee, skb, rx_stats);
@@ -2442,30 +2263,6 @@ void rtllib_wake_all_queues(struct rtllib_device *ieee)
netif_tx_wake_all_queues(ieee->dev);
}
-/* called in user context only */
-static void rtllib_start_master_bss(struct rtllib_device *ieee)
-{
- ieee->assoc_id = 1;
-
- if (ieee->current_network.ssid_len == 0) {
- strncpy(ieee->current_network.ssid,
- RTLLIB_DEFAULT_TX_ESSID,
- IW_ESSID_MAX_SIZE);
-
- ieee->current_network.ssid_len =
- strlen(RTLLIB_DEFAULT_TX_ESSID);
- ieee->ssid_set = 1;
- }
-
- ether_addr_copy(ieee->current_network.bssid, ieee->dev->dev_addr);
-
- ieee->set_chan(ieee->dev, ieee->current_network.channel);
- ieee->link_state = MAC80211_LINKED;
- ieee->link_change(ieee->dev);
- notify_wx_assoc_event(ieee);
- netif_carrier_on(ieee->dev);
-}
-
static void rtllib_start_monitor_mode(struct rtllib_device *ieee)
{
/* reset hardware status */
@@ -2517,7 +2314,7 @@ static void rtllib_start_ibss_wq(void *data)
* associated.
*/
if (ieee->link_state == MAC80211_NOLINK)
- rtllib_start_scan_syncro(ieee, 0);
+ rtllib_start_scan_syncro(ieee);
/* the network definitively is not here.. create a new cell */
if (ieee->link_state == MAC80211_NOLINK) {
@@ -2526,47 +2323,34 @@ static void rtllib_start_ibss_wq(void *data)
if (!ieee->wap_set)
eth_random_addr(ieee->current_network.bssid);
- if (ieee->modulation & RTLLIB_CCK_MODULATION) {
- ieee->current_network.rates_len = 4;
-
- ieee->current_network.rates[0] =
- RTLLIB_BASIC_RATE_MASK | RTLLIB_CCK_RATE_1MB;
- ieee->current_network.rates[1] =
- RTLLIB_BASIC_RATE_MASK | RTLLIB_CCK_RATE_2MB;
- ieee->current_network.rates[2] =
- RTLLIB_BASIC_RATE_MASK | RTLLIB_CCK_RATE_5MB;
- ieee->current_network.rates[3] =
- RTLLIB_BASIC_RATE_MASK | RTLLIB_CCK_RATE_11MB;
-
- } else {
- ieee->current_network.rates_len = 0;
- }
-
- if (ieee->modulation & RTLLIB_OFDM_MODULATION) {
- ieee->current_network.rates_ex_len = 8;
-
- ieee->current_network.rates_ex[0] =
- RTLLIB_OFDM_RATE_6MB;
- ieee->current_network.rates_ex[1] =
- RTLLIB_OFDM_RATE_9MB;
- ieee->current_network.rates_ex[2] =
- RTLLIB_OFDM_RATE_12MB;
- ieee->current_network.rates_ex[3] =
- RTLLIB_OFDM_RATE_18MB;
- ieee->current_network.rates_ex[4] =
- RTLLIB_OFDM_RATE_24MB;
- ieee->current_network.rates_ex[5] =
- RTLLIB_OFDM_RATE_36MB;
- ieee->current_network.rates_ex[6] =
- RTLLIB_OFDM_RATE_48MB;
- ieee->current_network.rates_ex[7] =
- RTLLIB_OFDM_RATE_54MB;
-
- ieee->rate = 108;
- } else {
- ieee->current_network.rates_ex_len = 0;
- ieee->rate = 22;
- }
+ ieee->current_network.rates_len = 4;
+ ieee->current_network.rates[0] =
+ RTLLIB_BASIC_RATE_MASK | RTLLIB_CCK_RATE_1MB;
+ ieee->current_network.rates[1] =
+ RTLLIB_BASIC_RATE_MASK | RTLLIB_CCK_RATE_2MB;
+ ieee->current_network.rates[2] =
+ RTLLIB_BASIC_RATE_MASK | RTLLIB_CCK_RATE_5MB;
+ ieee->current_network.rates[3] =
+ RTLLIB_BASIC_RATE_MASK | RTLLIB_CCK_RATE_11MB;
+
+ ieee->current_network.rates_ex_len = 8;
+ ieee->current_network.rates_ex[0] =
+ RTLLIB_OFDM_RATE_6MB;
+ ieee->current_network.rates_ex[1] =
+ RTLLIB_OFDM_RATE_9MB;
+ ieee->current_network.rates_ex[2] =
+ RTLLIB_OFDM_RATE_12MB;
+ ieee->current_network.rates_ex[3] =
+ RTLLIB_OFDM_RATE_18MB;
+ ieee->current_network.rates_ex[4] =
+ RTLLIB_OFDM_RATE_24MB;
+ ieee->current_network.rates_ex[5] =
+ RTLLIB_OFDM_RATE_36MB;
+ ieee->current_network.rates_ex[6] =
+ RTLLIB_OFDM_RATE_48MB;
+ ieee->current_network.rates_ex[7] =
+ RTLLIB_OFDM_RATE_54MB;
+ ieee->rate = 108;
ieee->current_network.qos_data.supported = 0;
ieee->set_wireless_mode(ieee->dev, WIRELESS_MODE_G);
@@ -2837,9 +2621,6 @@ void rtllib_start_protocol(struct rtllib_device *ieee)
case IW_MODE_ADHOC:
rtllib_start_ibss(ieee);
break;
- case IW_MODE_MASTER:
- rtllib_start_master_bss(ieee);
- break;
case IW_MODE_MONITOR:
rtllib_start_monitor_mode(ieee);
break;
@@ -3018,8 +2799,7 @@ u8 rtllib_ap_sec_type(struct rtllib_device *ieee)
crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
encrypt = (ieee->current_network.capability & WLAN_CAPABILITY_PRIVACY)
- || (ieee->host_encrypt && crypt && crypt->ops &&
- (strcmp(crypt->ops->name, "R-WEP") == 0));
+ || (crypt && crypt->ops && (strcmp(crypt->ops->name, "R-WEP") == 0));
/* simply judge */
if (encrypt && (wpa_ie_len == 0)) {
diff --git a/drivers/staging/rtl8192e/rtllib_softmac_wx.c b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
index 2de63d1f2009..0b690f0ffeef 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
@@ -51,8 +51,7 @@ int rtllib_wx_set_freq(struct rtllib_device *ieee, struct iw_request_info *a,
ieee->current_network.channel = fwrq->m;
ieee->set_chan(ieee->dev, ieee->current_network.channel);
- if (ieee->iw_mode == IW_MODE_ADHOC ||
- ieee->iw_mode == IW_MODE_MASTER)
+ if (ieee->iw_mode == IW_MODE_ADHOC)
if (ieee->link_state == MAC80211_LINKED) {
rtllib_stop_send_beacons(ieee);
rtllib_start_send_beacons(ieee);
@@ -125,10 +124,6 @@ int rtllib_wx_set_wap(struct rtllib_device *ieee,
mutex_lock(&ieee->wx_mutex);
/* use ifconfig hw ether */
- if (ieee->iw_mode == IW_MODE_MASTER) {
- ret = -1;
- goto out;
- }
if (temp->sa_family != ARPHRD_ETHER) {
ret = -EINVAL;
@@ -310,7 +305,7 @@ void rtllib_wx_sync_scan_wq(void *data)
mutex_lock(&ieee->wx_mutex);
if (!(ieee->softmac_features & IEEE_SOFTMAC_SCAN)) {
- rtllib_start_scan_syncro(ieee, 0);
+ rtllib_start_scan_syncro(ieee);
goto out;
}
@@ -339,7 +334,7 @@ void rtllib_wx_sync_scan_wq(void *data)
HT_EXTCHNL_OFFSET_NO_EXT);
}
- rtllib_start_scan_syncro(ieee, 0);
+ rtllib_start_scan_syncro(ieee);
if (b40M) {
if (chan_offset == HT_EXTCHNL_OFFSET_UPPER)
@@ -366,7 +361,7 @@ void rtllib_wx_sync_scan_wq(void *data)
ieee->link_detect_info.NumRecvBcnInPeriod = 1;
ieee->link_detect_info.NumRecvDataInPeriod = 1;
}
- if (ieee->iw_mode == IW_MODE_ADHOC || ieee->iw_mode == IW_MODE_MASTER)
+ if (ieee->iw_mode == IW_MODE_ADHOC)
rtllib_start_send_beacons(ieee);
rtllib_wake_all_queues(ieee);
@@ -487,11 +482,9 @@ EXPORT_SYMBOL(rtllib_wx_set_rawtx);
int rtllib_wx_get_name(struct rtllib_device *ieee, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
- const char *b = ieee->modulation & RTLLIB_CCK_MODULATION ? "b" : "";
- const char *g = ieee->modulation & RTLLIB_OFDM_MODULATION ? "g" : "";
const char *n = ieee->mode & (WIRELESS_MODE_N_24G) ? "n" : "";
- scnprintf(wrqu->name, sizeof(wrqu->name), "802.11%s%s%s", b, g, n);
+ scnprintf(wrqu->name, sizeof(wrqu->name), "802.11bg%s", n);
return 0;
}
EXPORT_SYMBOL(rtllib_wx_get_name);
diff --git a/drivers/staging/rtl8192e/rtllib_tx.c b/drivers/staging/rtl8192e/rtllib_tx.c
index ec038ef806c3..4199aee930f0 100644
--- a/drivers/staging/rtl8192e/rtllib_tx.c
+++ b/drivers/staging/rtl8192e/rtllib_tx.c
@@ -463,8 +463,6 @@ static void rtllib_query_protectionmode(struct rtllib_device *ieee,
}
if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
tcb_desc->bUseShortPreamble = true;
- if (ieee->iw_mode == IW_MODE_MASTER)
- goto NO_PROTECTION;
return;
NO_PROTECTION:
tcb_desc->bRTSEnable = false;
@@ -635,8 +633,7 @@ static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
skb->priority = rtllib_classify(skb, IsAmsdu);
crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
- encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
- ieee->host_encrypt && crypt && crypt->ops;
+ encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) && crypt && crypt->ops;
if (!encrypt && ieee->ieee802_1x &&
ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
stats->tx_dropped++;
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index ca09367005e1..5da8ac401df0 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -1121,10 +1121,12 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
/* skb: hdr + (possibly fragmented, possibly encrypted) payload */
- if (ieee->host_decrypt && (fc & IEEE80211_FCTL_WEP) &&
- (keyidx = ieee80211_rx_frame_decrypt(ieee, skb, crypt)) < 0) {
- netdev_dbg(ieee->dev, "decrypt frame error\n");
- goto rx_dropped;
+ if (ieee->host_decrypt && (fc & IEEE80211_FCTL_WEP)) {
+ keyidx = ieee80211_rx_frame_decrypt(ieee, skb, crypt);
+ if (keyidx < 0) {
+ netdev_dbg(ieee->dev, "decrypt frame error\n");
+ goto rx_dropped;
+ }
}
diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
index a2f3645be0cc..b18e6d9c832b 100644
--- a/drivers/staging/rtl8712/os_intfs.c
+++ b/drivers/staging/rtl8712/os_intfs.c
@@ -327,6 +327,7 @@ int r8712_init_drv_sw(struct _adapter *padapter)
mp871xinit(padapter);
init_default_value(padapter);
r8712_InitSwLeds(padapter);
+ mutex_init(&padapter->mutex_start);
return 0;
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index 37364d3101e2..df05213f922f 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -567,7 +567,6 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
if (rtl871x_load_fw(padapter))
goto deinit_drv_sw;
init_completion(&padapter->rx_filter_ready);
- mutex_init(&padapter->mutex_start);
return 0;
deinit_drv_sw:
diff --git a/drivers/staging/rtl8723bs/core/rtw_ap.c b/drivers/staging/rtl8723bs/core/rtw_ap.c
index d30d6e6bcd07..e4063713fecc 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ap.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ap.c
@@ -1238,7 +1238,6 @@ void rtw_acl_remove_sta(struct adapter *padapter, u8 *addr)
struct sta_priv *pstapriv = &padapter->stapriv;
struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
struct __queue *pacl_node_q = &pacl_list->acl_node_q;
- u8 baddr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; /* Baddr is used for clearing acl_list */
spin_lock_bh(&(pacl_node_q->lock));
@@ -1248,7 +1247,7 @@ void rtw_acl_remove_sta(struct adapter *padapter, u8 *addr)
if (
!memcmp(paclnode->addr, addr, ETH_ALEN) ||
- !memcmp(baddr, addr, ETH_ALEN)
+ is_broadcast_ether_addr(addr)
) {
if (paclnode->valid) {
paclnode->valid = false;
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme.c b/drivers/staging/rtl8723bs/core/rtw_mlme.c
index 7e2c61c75150..b221913733fb 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme.c
@@ -226,9 +226,8 @@ struct wlan_network *_rtw_find_network(struct __queue *scanned_queue, u8 *addr)
{
struct list_head *phead, *plist;
struct wlan_network *pnetwork = NULL;
- u8 zero_addr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
- if (!memcmp(zero_addr, addr, ETH_ALEN)) {
+ if (is_zero_ether_addr(addr)) {
pnetwork = NULL;
goto exit;
}
@@ -2513,7 +2512,7 @@ void rtw_issue_addbareq_cmd(struct adapter *padapter, struct xmit_frame *pxmitfr
struct sta_info *psta;
struct ht_priv *phtpriv;
struct pkt_attrib *pattrib = &pxmitframe->attrib;
- s32 bmcst = IS_MCAST(pattrib->ra);
+ s32 bmcst = is_multicast_ether_addr(pattrib->ra);
/* if (bmcst || (padapter->mlmepriv.LinkDetectInfo.bTxBusyTraffic == false)) */
if (bmcst || (padapter->mlmepriv.LinkDetectInfo.NumTxOkInPeriod < 100))
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
index 1148c9829890..985683767a40 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
@@ -421,13 +421,12 @@ void free_mlme_ext_priv(struct mlme_ext_priv *pmlmeext)
static void _mgt_dispatcher(struct adapter *padapter, struct mlme_handler *ptable, union recv_frame *precv_frame)
{
- u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
u8 *pframe = precv_frame->u.hdr.rx_data;
if (ptable->func) {
/* receive the frames that ra(a1) is my address or ra(a1) is bc address. */
if (memcmp(GetAddr1Ptr(pframe), myid(&padapter->eeprompriv), ETH_ALEN) &&
- memcmp(GetAddr1Ptr(pframe), bc_addr, ETH_ALEN))
+ !is_broadcast_ether_addr(GetAddr1Ptr(pframe)))
return;
ptable->func(padapter, precv_frame);
@@ -439,7 +438,6 @@ void mgt_dispatcher(struct adapter *padapter, union recv_frame *precv_frame)
int index;
struct mlme_handler *ptable;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
u8 *pframe = precv_frame->u.hdr.rx_data;
struct sta_info *psta = rtw_get_stainfo(&padapter->stapriv, GetAddr2Ptr(pframe));
struct dvobj_priv *psdpriv = padapter->dvobj;
@@ -450,7 +448,7 @@ void mgt_dispatcher(struct adapter *padapter, union recv_frame *precv_frame)
/* receive the frames that ra(a1) is my address or ra(a1) is bc address. */
if (memcmp(GetAddr1Ptr(pframe), myid(&padapter->eeprompriv), ETH_ALEN) &&
- memcmp(GetAddr1Ptr(pframe), bc_addr, ETH_ALEN)) {
+ !is_broadcast_ether_addr(GetAddr1Ptr(pframe))) {
return;
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_recv.c b/drivers/staging/rtl8723bs/core/rtw_recv.c
index 7c7b6495965f..0eadc23a7d54 100644
--- a/drivers/staging/rtl8723bs/core/rtw_recv.c
+++ b/drivers/staging/rtl8723bs/core/rtw_recv.c
@@ -317,7 +317,7 @@ static signed int recvframe_chkmic(struct adapter *adapter, union recv_frame *p
if (prxattrib->encrypt == _TKIP_) {
/* calculate mic code */
if (stainfo) {
- if (IS_MCAST(prxattrib->ra)) {
+ if (is_multicast_ether_addr(prxattrib->ra)) {
/* mickey =&psecuritypriv->dot118021XGrprxmickey.skey[0]; */
/* iv = precvframe->u.hdr.rx_data+prxattrib->hdrlen; */
/* rxdata_key_idx =(((iv[3])>>6)&0x3) ; */
@@ -352,18 +352,18 @@ static signed int recvframe_chkmic(struct adapter *adapter, union recv_frame *p
if (bmic_err == true) {
/* double check key_index for some timing issue , */
/* cannot compare with psecuritypriv->dot118021XGrpKeyid also cause timing issue */
- if ((IS_MCAST(prxattrib->ra) == true) && (prxattrib->key_index != pmlmeinfo->key_index))
+ if ((is_multicast_ether_addr(prxattrib->ra) == true) && (prxattrib->key_index != pmlmeinfo->key_index))
brpt_micerror = false;
if (prxattrib->bdecrypted && brpt_micerror)
- rtw_handle_tkip_mic_err(adapter, (u8)IS_MCAST(prxattrib->ra));
+ rtw_handle_tkip_mic_err(adapter, (u8)is_multicast_ether_addr(prxattrib->ra));
res = _FAIL;
} else {
/* mic checked ok */
if (!psecuritypriv->bcheck_grpkey &&
- IS_MCAST(prxattrib->ra))
+ is_multicast_ether_addr(prxattrib->ra))
psecuritypriv->bcheck_grpkey = true;
}
}
@@ -625,7 +625,7 @@ static void count_rx_stats(struct adapter *padapter, union recv_frame *prframe,
padapter->mlmepriv.LinkDetectInfo.NumRxOkInPeriod++;
- if ((!MacAddr_isBcst(pattrib->dst)) && (!IS_MCAST(pattrib->dst)))
+ if ((!is_broadcast_ether_addr(pattrib->dst)) && (!is_multicast_ether_addr(pattrib->dst)))
padapter->mlmepriv.LinkDetectInfo.NumRxUnicastOkInPeriod++;
if (sta)
@@ -654,7 +654,7 @@ static signed int sta2sta_data_frame(struct adapter *adapter, union recv_frame *
u8 *mybssid = get_bssid(pmlmepriv);
u8 *myhwaddr = myid(&adapter->eeprompriv);
u8 *sta_addr = NULL;
- signed int bmcast = IS_MCAST(pattrib->dst);
+ signed int bmcast = is_multicast_ether_addr(pattrib->dst);
if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true) ||
(check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true)) {
@@ -670,9 +670,9 @@ static signed int sta2sta_data_frame(struct adapter *adapter, union recv_frame *
goto exit;
}
- if (!memcmp(pattrib->bssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) ||
- !memcmp(mybssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) ||
- (memcmp(pattrib->bssid, mybssid, ETH_ALEN))) {
+ if (is_zero_ether_addr(pattrib->bssid) ||
+ is_zero_ether_addr(mybssid) ||
+ (memcmp(pattrib->bssid, mybssid, ETH_ALEN))) {
ret = _FAIL;
goto exit;
}
@@ -690,7 +690,7 @@ static signed int sta2sta_data_frame(struct adapter *adapter, union recv_frame *
} else if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) {
if (bmcast) {
/* For AP mode, if DA == MCAST, then BSSID should be also MCAST */
- if (!IS_MCAST(pattrib->bssid)) {
+ if (!is_multicast_ether_addr(pattrib->bssid)) {
ret = _FAIL;
goto exit;
}
@@ -741,7 +741,7 @@ static signed int ap2sta_data_frame(struct adapter *adapter, union recv_frame *p
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
u8 *mybssid = get_bssid(pmlmepriv);
u8 *myhwaddr = myid(&adapter->eeprompriv);
- signed int bmcast = IS_MCAST(pattrib->dst);
+ signed int bmcast = is_multicast_ether_addr(pattrib->dst);
if ((check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) &&
(check_fwstate(pmlmepriv, _FW_LINKED) == true ||
@@ -762,9 +762,9 @@ static signed int ap2sta_data_frame(struct adapter *adapter, union recv_frame *p
/* check BSSID */
- if (!memcmp(pattrib->bssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) ||
- !memcmp(mybssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) ||
- (memcmp(pattrib->bssid, mybssid, ETH_ALEN))) {
+ if (is_zero_ether_addr(pattrib->bssid) ||
+ is_zero_ether_addr(mybssid) ||
+ (memcmp(pattrib->bssid, mybssid, ETH_ALEN))) {
if (!bmcast)
issue_deauth(adapter, pattrib->bssid, WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA);
@@ -1329,7 +1329,7 @@ static signed int validate_recv_data_frame(struct adapter *adapter, union recv_f
}
if (pattrib->privacy) {
- GET_ENCRY_ALGO(psecuritypriv, psta, pattrib->encrypt, IS_MCAST(pattrib->ra));
+ GET_ENCRY_ALGO(psecuritypriv, psta, pattrib->encrypt, is_multicast_ether_addr(pattrib->ra));
SET_ICE_IV_LEN(pattrib->iv_len, pattrib->icv_len, pattrib->encrypt);
} else {
@@ -1354,7 +1354,7 @@ static signed int validate_80211w_mgmt(struct adapter *adapter, union recv_frame
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) && check_fwstate(pmlmepriv, _FW_LINKED) &&
adapter->securitypriv.binstallBIPkey == true) {
/* unicast management frame decrypt */
- if (pattrib->privacy && !(IS_MCAST(GetAddr1Ptr(ptr))) &&
+ if (pattrib->privacy && !(is_multicast_ether_addr(GetAddr1Ptr(ptr))) &&
(subtype == WIFI_DEAUTH || subtype == WIFI_DISASSOC || subtype == WIFI_ACTION)) {
u8 *mgmt_DATA;
u32 data_len = 0;
@@ -1381,7 +1381,7 @@ static signed int validate_80211w_mgmt(struct adapter *adapter, union recv_frame
kfree(mgmt_DATA);
if (!precv_frame)
goto validate_80211w_fail;
- } else if (IS_MCAST(GetAddr1Ptr(ptr)) &&
+ } else if (is_multicast_ether_addr(GetAddr1Ptr(ptr)) &&
(subtype == WIFI_DEAUTH || subtype == WIFI_DISASSOC)) {
signed int BIP_ret = _SUCCESS;
/* verify BIP MME IE of broadcast/multicast de-auth/disassoc packet */
@@ -2041,7 +2041,7 @@ static int recv_func(struct adapter *padapter, union recv_frame *rframe)
/* check if need to enqueue into uc_swdec_pending_queue*/
if (check_fwstate(mlmepriv, WIFI_STATION_STATE) &&
- !IS_MCAST(prxattrib->ra) && prxattrib->encrypt > 0 &&
+ !is_multicast_ether_addr(prxattrib->ra) && prxattrib->encrypt > 0 &&
(prxattrib->bdecrypted == 0 || psecuritypriv->sw_decrypt == true) &&
psecuritypriv->ndisauthtype == Ndis802_11AuthModeWPAPSK &&
!psecuritypriv->busetkipkey) {
diff --git a/drivers/staging/rtl8723bs/core/rtw_security.c b/drivers/staging/rtl8723bs/core/rtw_security.c
index ac731415f733..7ecdaa2eeaf3 100644
--- a/drivers/staging/rtl8723bs/core/rtw_security.c
+++ b/drivers/staging/rtl8723bs/core/rtw_security.c
@@ -486,7 +486,7 @@ u32 rtw_tkip_encrypt(struct adapter *padapter, u8 *pxmitframe)
if (pattrib->encrypt == _TKIP_) {
{
- if (IS_MCAST(pattrib->ra))
+ if (is_multicast_ether_addr(pattrib->ra))
prwskey = psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey;
else
prwskey = pattrib->dot118021x_UncstKey.skey;
@@ -554,7 +554,7 @@ u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe)
if (prxattrib->encrypt == _TKIP_) {
stainfo = rtw_get_stainfo(&padapter->stapriv, &prxattrib->ta[0]);
if (stainfo) {
- if (IS_MCAST(prxattrib->ra)) {
+ if (is_multicast_ether_addr(prxattrib->ra)) {
static unsigned long start;
static u32 no_gkey_bc_cnt;
static u32 no_gkey_mc_cnt;
@@ -1051,7 +1051,7 @@ u32 rtw_aes_encrypt(struct adapter *padapter, u8 *pxmitframe)
/* 4 start to encrypt each fragment */
if (pattrib->encrypt == _AES_) {
- if (IS_MCAST(pattrib->ra))
+ if (is_multicast_ether_addr(pattrib->ra))
prwskey = psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey;
else
prwskey = pattrib->dot118021x_UncstKey.skey;
@@ -1305,7 +1305,7 @@ u32 rtw_aes_decrypt(struct adapter *padapter, u8 *precvframe)
if (prxattrib->encrypt == _AES_) {
stainfo = rtw_get_stainfo(&padapter->stapriv, &prxattrib->ta[0]);
if (stainfo) {
- if (IS_MCAST(prxattrib->ra)) {
+ if (is_multicast_ether_addr(prxattrib->ra)) {
static unsigned long start;
static u32 no_gkey_bc_cnt;
static u32 no_gkey_mc_cnt;
diff --git a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
index c7de81f21bec..1593980d2c6a 100644
--- a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
@@ -471,7 +471,7 @@ struct sta_info *rtw_get_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
if (!hwaddr)
return NULL;
- if (IS_MCAST(hwaddr))
+ if (is_multicast_ether_addr(hwaddr))
addr = bc_addr;
else
addr = hwaddr;
diff --git a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
index ba39c8b1a9ae..7fac9ca3e9a0 100644
--- a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
@@ -1779,10 +1779,9 @@ void adaptive_early_32k(struct mlme_ext_priv *pmlmeext, u8 *pframe, uint len)
void rtw_alloc_macid(struct adapter *padapter, struct sta_info *psta)
{
int i;
- u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct dvobj_priv *pdvobj = adapter_to_dvobj(padapter);
- if (!memcmp(psta->hwaddr, bc_addr, ETH_ALEN))
+ if (is_broadcast_ether_addr(psta->hwaddr))
return;
if (!memcmp(psta->hwaddr, myid(&padapter->eeprompriv), ETH_ALEN)) {
@@ -1807,10 +1806,9 @@ void rtw_alloc_macid(struct adapter *padapter, struct sta_info *psta)
void rtw_release_macid(struct adapter *padapter, struct sta_info *psta)
{
- u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct dvobj_priv *pdvobj = adapter_to_dvobj(padapter);
- if (!memcmp(psta->hwaddr, bc_addr, ETH_ALEN))
+ if (is_broadcast_ether_addr(psta->hwaddr))
return;
if (!memcmp(psta->hwaddr, myid(&padapter->eeprompriv), ETH_ALEN))
diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c
index a22512633d1b..b1965ec0181f 100644
--- a/drivers/staging/rtl8723bs/core/rtw_xmit.c
+++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c
@@ -473,7 +473,7 @@ static s32 update_attrib_sec_info(struct adapter *padapter, struct pkt_attrib *p
signed int res = _SUCCESS;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct security_priv *psecuritypriv = &padapter->securitypriv;
- signed int bmcast = IS_MCAST(pattrib->ra);
+ signed int bmcast = is_multicast_ether_addr(pattrib->ra);
memset(pattrib->dot118021x_UncstKey.skey, 0, 16);
memset(pattrib->dot11tkiptxmickey.skey, 0, 16);
@@ -691,7 +691,7 @@ static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct p
else if (pattrib->dhcp_pkt == 1)
rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SPECIAL_PACKET, 1);
- bmcast = IS_MCAST(pattrib->ra);
+ bmcast = is_multicast_ether_addr(pattrib->ra);
/* get sta_info */
if (bmcast) {
@@ -765,7 +765,7 @@ static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitfr
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
u8 priority[4] = {0x0, 0x0, 0x0, 0x0};
u8 hw_hdr_offset = 0;
- signed int bmcst = IS_MCAST(pattrib->ra);
+ signed int bmcst = is_multicast_ether_addr(pattrib->ra);
hw_hdr_offset = TXDESC_OFFSET;
@@ -1035,7 +1035,7 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct
u8 *pbuf_start;
- s32 bmcst = IS_MCAST(pattrib->ra);
+ s32 bmcst = is_multicast_ether_addr(pattrib->ra);
s32 res = _SUCCESS;
if (!pxmitframe->buf_addr)
@@ -1143,7 +1143,7 @@ s32 rtw_mgmt_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, s
u8 subtype;
struct sta_info *psta = NULL;
struct pkt_attrib *pattrib = &pxmitframe->attrib;
- s32 bmcst = IS_MCAST(pattrib->ra);
+ s32 bmcst = is_multicast_ether_addr(pattrib->ra);
u8 *BIP_AAD = NULL;
u8 *MGMT_body = NULL;
@@ -2016,7 +2016,7 @@ signed int xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct x
struct sta_priv *pstapriv = &padapter->stapriv;
struct pkt_attrib *pattrib = &pxmitframe->attrib;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- signed int bmcst = IS_MCAST(pattrib->ra);
+ signed int bmcst = is_multicast_ether_addr(pattrib->ra);
bool update_tim = false;
if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == false)
diff --git a/drivers/staging/rtl8723bs/hal/hal_intf.c b/drivers/staging/rtl8723bs/hal/hal_intf.c
index 6bb0ff8d7c78..7e3db8d3c910 100644
--- a/drivers/staging/rtl8723bs/hal/hal_intf.c
+++ b/drivers/staging/rtl8723bs/hal/hal_intf.c
@@ -217,7 +217,7 @@ s32 rtw_hal_mgnt_xmit(struct adapter *padapter, struct xmit_frame *pmgntframe)
/* memcpy(pmgntframe->attrib.ra, pwlanhdr->addr1, ETH_ALEN); */
if (padapter->securitypriv.binstallBIPkey == true) {
- if (IS_MCAST(pmgntframe->attrib.ra)) {
+ if (is_multicast_ether_addr(pmgntframe->attrib.ra)) {
pmgntframe->attrib.encrypt = _BIP_;
/* pmgntframe->attrib.bswenc = true; */
} else {
diff --git a/drivers/staging/rtl8723bs/hal/odm.c b/drivers/staging/rtl8723bs/hal/odm.c
index 31f65d817899..ea3b4cd32360 100644
--- a/drivers/staging/rtl8723bs/hal/odm.c
+++ b/drivers/staging/rtl8723bs/hal/odm.c
@@ -429,7 +429,7 @@ static void odm_RefreshRateAdaptiveMaskCE(struct dm_odm_t *pDM_Odm)
PSTA_INFO_T pstat = pDM_Odm->pODM_StaInfo[i];
if (IS_STA_VALID(pstat)) {
- if (IS_MCAST(pstat->hwaddr)) /* if (psta->mac_id == 1) */
+ if (is_multicast_ether_addr(pstat->hwaddr)) /* if (psta->mac_id == 1) */
continue;
if (true == ODM_RAStateCheck(pDM_Odm, pstat->rssi_stat.UndecoratedSmoothedPWDB, false, &pstat->rssi_level)) {
@@ -576,7 +576,7 @@ static void odm_RSSIMonitorCheckCE(struct dm_odm_t *pDM_Odm)
for (i = 0; i < ODM_ASSOCIATE_ENTRY_NUM; i++) {
psta = pDM_Odm->pODM_StaInfo[i];
if (IS_STA_VALID(psta)) {
- if (IS_MCAST(psta->hwaddr)) /* if (psta->mac_id == 1) */
+ if (is_multicast_ether_addr(psta->hwaddr)) /* if (psta->mac_id == 1) */
continue;
if (psta->rssi_stat.UndecoratedSmoothedPWDB == (-1))
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
index 1e9e1089032b..c5219a4a4919 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
@@ -2609,7 +2609,7 @@ static void rtl8723b_fill_default_txdesc(
pmlmeinfo = &(pmlmeext->mlmext_info);
pattrib = &pxmitframe->attrib;
- bmcst = IS_MCAST(pattrib->ra);
+ bmcst = is_multicast_ether_addr(pattrib->ra);
ptxdesc = (struct txdesc_8723b *)pbuf;
diff --git a/drivers/staging/rtl8723bs/include/wifi.h b/drivers/staging/rtl8723bs/include/wifi.h
index f03e26818d45..53f9411fcc4c 100644
--- a/drivers/staging/rtl8723bs/include/wifi.h
+++ b/drivers/staging/rtl8723bs/include/wifi.h
@@ -211,21 +211,6 @@ enum {
#define GetAddr4Ptr(pbuf) ((unsigned char *)((size_t)(pbuf) + 24))
-#define MacAddr_isBcst(addr) \
- (\
- ((addr[0] == 0xff) && (addr[1] == 0xff) && \
- (addr[2] == 0xff) && (addr[3] == 0xff) && \
- (addr[4] == 0xff) && (addr[5] == 0xff)) ? true : false \
-)
-
-static inline int IS_MCAST(unsigned char *da)
-{
- if ((*da) & 0x01)
- return true;
- else
- return false;
-}
-
static inline unsigned char *rtl8723bs_get_ra(unsigned char *pframe)
{
unsigned char *ra;
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index 2ae7843abdf7..af155fca39b8 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -95,14 +95,14 @@ static struct ieee80211_channel rtw_2ghz_channels[] = {
static void rtw_2g_channels_init(struct ieee80211_channel *channels)
{
memcpy((void *)channels, (void *)rtw_2ghz_channels,
- sizeof(struct ieee80211_channel)*RTW_2G_CHANNELS_NUM
+ sizeof(struct ieee80211_channel) * RTW_2G_CHANNELS_NUM
);
}
static void rtw_2g_rates_init(struct ieee80211_rate *rates)
{
memcpy(rates, rtw_g_rates,
- sizeof(struct ieee80211_rate)*RTW_G_RATES_NUM
+ sizeof(struct ieee80211_rate) * RTW_G_RATES_NUM
);
}
@@ -126,8 +126,8 @@ static struct ieee80211_supported_band *rtw_spt_band_alloc(
if (!spt_band)
goto exit;
- spt_band->channels = (struct ieee80211_channel *)(((u8 *)spt_band)+sizeof(struct ieee80211_supported_band));
- spt_band->bitrates = (struct ieee80211_rate *)(((u8 *)spt_band->channels)+sizeof(struct ieee80211_channel)*n_channels);
+ spt_band->channels = (struct ieee80211_channel *)(((u8 *)spt_band) + sizeof(struct ieee80211_supported_band));
+ spt_band->bitrates = (struct ieee80211_rate *)(((u8 *)spt_band->channels) + sizeof(struct ieee80211_channel) * n_channels);
spt_band->band = band;
spt_band->n_channels = n_channels;
spt_band->n_bitrates = n_bitrates;
@@ -247,10 +247,10 @@ struct cfg80211_bss *rtw_cfg80211_inform_bss(struct adapter *padapter, struct wl
u32 wpsielen = 0;
u8 *wpsie = NULL;
- wpsie = rtw_get_wps_ie(pnetwork->network.ies+_FIXED_IE_LENGTH_, pnetwork->network.ie_length-_FIXED_IE_LENGTH_, NULL, &wpsielen);
+ wpsie = rtw_get_wps_ie(pnetwork->network.ies + _FIXED_IE_LENGTH_, pnetwork->network.ie_length - _FIXED_IE_LENGTH_, NULL, &wpsielen);
if (wpsie && wpsielen > 0)
- psr = rtw_get_wps_attr_content(wpsie, wpsielen, WPS_ATTR_SELECTED_REGISTRAR, (u8 *)(&sr), NULL);
+ psr = rtw_get_wps_attr_content(wpsie, wpsielen, WPS_ATTR_SELECTED_REGISTRAR, (u8 *)(&sr), NULL);
if (sr != 0) {
/* it means under processing WPS */
@@ -266,7 +266,6 @@ struct cfg80211_bss *rtw_cfg80211_inform_bss(struct adapter *padapter, struct wl
}
/* spin_unlock_bh(&pwdev_priv->scan_req_lock); */
-
channel = pnetwork->network.configuration.ds_config;
freq = rtw_ieee80211_channel_to_frequency(channel, NL80211_BAND_2GHZ);
@@ -276,10 +275,10 @@ struct cfg80211_bss *rtw_cfg80211_inform_bss(struct adapter *padapter, struct wl
/* We've set wiphy's signal_type as CFG80211_SIGNAL_TYPE_MBM: signal strength in mBm (100*dBm) */
if (check_fwstate(pmlmepriv, _FW_LINKED) == true &&
- is_same_network(&pmlmepriv->cur_network.network, &pnetwork->network, 0)) {
- notify_signal = 100*translate_percentage_to_dbm(padapter->recvpriv.signal_strength);/* dbm */
+ is_same_network(&pmlmepriv->cur_network.network, &pnetwork->network, 0)) {
+ notify_signal = 100 * translate_percentage_to_dbm(padapter->recvpriv.signal_strength);/* dbm */
} else {
- notify_signal = 100*translate_percentage_to_dbm(pnetwork->network.phy_info.signal_strength);/* dbm */
+ notify_signal = 100 * translate_percentage_to_dbm(pnetwork->network.phy_info.signal_strength);/* dbm */
}
buf = kzalloc(MAX_BSSINFO_LEN, GFP_ATOMIC);
@@ -314,7 +313,7 @@ struct cfg80211_bss *rtw_cfg80211_inform_bss(struct adapter *padapter, struct wl
*((__le64 *)pbuf) = cpu_to_le64(notify_timestamp);
bss = cfg80211_inform_bss_frame(wiphy, notify_channel, (struct ieee80211_mgmt *)buf,
- len, notify_signal, GFP_ATOMIC);
+ len, notify_signal, GFP_ATOMIC);
if (unlikely(!bss))
goto exit;
@@ -346,9 +345,9 @@ int rtw_cfg80211_check_bss(struct adapter *padapter)
notify_channel = ieee80211_get_channel(padapter->rtw_wdev->wiphy, freq);
bss = cfg80211_get_bss(padapter->rtw_wdev->wiphy, notify_channel,
- pnetwork->mac_address, pnetwork->ssid.ssid,
- pnetwork->ssid.ssid_length,
- IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
+ pnetwork->mac_address, pnetwork->ssid.ssid,
+ pnetwork->ssid.ssid_length,
+ IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
cfg80211_put_bss(padapter->rtw_wdev->wiphy, bss);
@@ -449,20 +448,20 @@ check_bss:
roam_info.links[0].channel = notify_channel;
roam_info.links[0].bssid = cur_network->network.mac_address;
roam_info.req_ie =
- pmlmepriv->assoc_req+sizeof(struct ieee80211_hdr_3addr)+2;
+ pmlmepriv->assoc_req + sizeof(struct ieee80211_hdr_3addr) + 2;
roam_info.req_ie_len =
- pmlmepriv->assoc_req_len-sizeof(struct ieee80211_hdr_3addr)-2;
+ pmlmepriv->assoc_req_len - sizeof(struct ieee80211_hdr_3addr) - 2;
roam_info.resp_ie =
- pmlmepriv->assoc_rsp+sizeof(struct ieee80211_hdr_3addr)+6;
+ pmlmepriv->assoc_rsp + sizeof(struct ieee80211_hdr_3addr) + 6;
roam_info.resp_ie_len =
- pmlmepriv->assoc_rsp_len-sizeof(struct ieee80211_hdr_3addr)-6;
+ pmlmepriv->assoc_rsp_len - sizeof(struct ieee80211_hdr_3addr) - 6;
cfg80211_roamed(padapter->pnetdev, &roam_info, GFP_ATOMIC);
} else {
cfg80211_connect_result(padapter->pnetdev, cur_network->network.mac_address
- , pmlmepriv->assoc_req+sizeof(struct ieee80211_hdr_3addr)+2
- , pmlmepriv->assoc_req_len-sizeof(struct ieee80211_hdr_3addr)-2
- , pmlmepriv->assoc_rsp+sizeof(struct ieee80211_hdr_3addr)+6
- , pmlmepriv->assoc_rsp_len-sizeof(struct ieee80211_hdr_3addr)-6
+ , pmlmepriv->assoc_req + sizeof(struct ieee80211_hdr_3addr) + 2
+ , pmlmepriv->assoc_req_len - sizeof(struct ieee80211_hdr_3addr) - 2
+ , pmlmepriv->assoc_rsp + sizeof(struct ieee80211_hdr_3addr) + 6
+ , pmlmepriv->assoc_rsp_len - sizeof(struct ieee80211_hdr_3addr) - 6
, WLAN_STATUS_SUCCESS, GFP_ATOMIC);
}
}
@@ -487,7 +486,7 @@ void rtw_cfg80211_indicate_disconnect(struct adapter *padapter)
NULL, 0, true, GFP_ATOMIC);
} else {
cfg80211_connect_result(padapter->pnetdev, NULL, NULL, 0, NULL, 0,
- WLAN_STATUS_UNSPECIFIED_FAILURE, GFP_ATOMIC/*GFP_KERNEL*/);
+ WLAN_STATUS_UNSPECIFIED_FAILURE, GFP_ATOMIC/*GFP_KERNEL*/);
}
}
}
@@ -708,7 +707,7 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param
param->u.crypt.err = 0;
param->u.crypt.alg[IEEE_CRYPT_ALG_NAME_LEN - 1] = '\0';
- if (param_len < (u32) ((u8 *) param->u.crypt.key - (u8 *) param) + param->u.crypt.key_len) {
+ if (param_len < (u32)((u8 *)param->u.crypt.key - (u8 *)param) + param->u.crypt.key_len) {
ret = -EINVAL;
goto exit;
}
@@ -778,7 +777,7 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param
psta->ieee8021x_blocked = false;
if ((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled) ||
- (padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled)) {
+ (padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled)) {
psta->dot118021XPrivacy = padapter->securitypriv.dot11PrivacyAlgrthm;
}
@@ -827,7 +826,7 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param
pbcmc_sta->ieee8021x_blocked = false;
if ((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled) ||
- (padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled)) {
+ (padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled)) {
pbcmc_sta->dot118021XPrivacy = padapter->securitypriv.dot11PrivacyAlgrthm;
}
}
@@ -945,9 +944,9 @@ static int cfg80211_rtw_del_key(struct wiphy *wiphy, struct net_device *ndev,
}
static int cfg80211_rtw_set_default_key(struct wiphy *wiphy,
- struct net_device *ndev, int link_id, u8 key_index
- , bool unicast, bool multicast
- )
+ struct net_device *ndev, int link_id,
+ u8 key_index, bool unicast,
+ bool multicast)
{
struct adapter *padapter = rtw_netdev_priv(ndev);
struct security_priv *psecuritypriv = &padapter->securitypriv;
@@ -1019,10 +1018,10 @@ static int cfg80211_rtw_get_station(struct wiphy *wiphy,
}
/* for Ad-Hoc/AP mode */
- if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)
- || check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)
- || check_fwstate(pmlmepriv, WIFI_AP_STATE))
- && check_fwstate(pmlmepriv, _FW_LINKED)) {
+ if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) ||
+ check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) ||
+ check_fwstate(pmlmepriv, WIFI_AP_STATE)) &&
+ check_fwstate(pmlmepriv, _FW_LINKED)) {
/* TODO: should acquire station info... */
}
@@ -1122,9 +1121,10 @@ void rtw_cfg80211_unlink_bss(struct adapter *padapter, struct wlan_network *pnet
struct wlan_bssid_ex *select_network = &pnetwork->network;
bss = cfg80211_get_bss(wiphy, NULL/*notify_channel*/,
- select_network->mac_address, select_network->ssid.ssid,
- select_network->ssid.ssid_length, IEEE80211_BSS_TYPE_ANY,
- IEEE80211_PRIVACY_ANY);
+ select_network->mac_address,
+ select_network->ssid.ssid,
+ select_network->ssid.ssid_length,
+ IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
if (bss) {
cfg80211_unlink_bss(wiphy, bss);
@@ -1219,7 +1219,7 @@ static int cfg80211_rtw_scan(struct wiphy *wiphy
spin_unlock_bh(&pwdev_priv->scan_req_lock);
if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) {
- if (check_fwstate(pmlmepriv, WIFI_UNDER_WPS|_FW_UNDER_SURVEY|_FW_UNDER_LINKING) == true) {
+ if (check_fwstate(pmlmepriv, WIFI_UNDER_WPS | _FW_UNDER_SURVEY | _FW_UNDER_LINKING) == true) {
need_indicate_scan_done = true;
goto check_need_indicate_scan_done;
}
@@ -1273,7 +1273,7 @@ static int cfg80211_rtw_scan(struct wiphy *wiphy
}
/* parsing channels, n_channels */
- memset(ch, 0, sizeof(struct rtw_ieee80211_channel)*RTW_CHANNEL_SCAN_AMOUNT);
+ memset(ch, 0, sizeof(struct rtw_ieee80211_channel) * RTW_CHANNEL_SCAN_AMOUNT);
for (i = 0; i < request->n_channels && i < RTW_CHANNEL_SCAN_AMOUNT; i++) {
ch[i].hw_value = request->channels[i]->hw_value;
ch[i].flags = request->channels[i]->flags;
@@ -1287,7 +1287,7 @@ static int cfg80211_rtw_scan(struct wiphy *wiphy
} else if (request->n_channels <= 4) {
for (j = request->n_channels - 1; j >= 0; j--)
for (i = 0; i < survey_times; i++)
- memcpy(&ch[j*survey_times+i], &ch[j], sizeof(struct rtw_ieee80211_channel));
+ memcpy(&ch[j * survey_times + i], &ch[j], sizeof(struct rtw_ieee80211_channel));
_status = rtw_sitesurvey_cmd(padapter, ssid, RTW_SSID_SCAN_AMOUNT, ch, survey_times * request->n_channels);
} else {
_status = rtw_sitesurvey_cmd(padapter, ssid, RTW_SSID_SCAN_AMOUNT, NULL, 0);
@@ -1329,7 +1329,7 @@ static int rtw_cfg80211_set_wpa_version(struct security_priv *psecuritypriv, u32
}
static int rtw_cfg80211_set_auth_type(struct security_priv *psecuritypriv,
- enum nl80211_auth_type sme_auth_type)
+ enum nl80211_auth_type sme_auth_type)
{
switch (sme_auth_type) {
case NL80211_AUTHTYPE_AUTOMATIC:
@@ -1436,7 +1436,7 @@ static int rtw_cfg80211_set_wpa_ie(struct adapter *padapter, u8 *pie, size_t iel
goto exit;
}
- if (ielen > MAX_WPA_IE_LEN+MAX_WPS_IE_LEN+MAX_P2P_IE_LEN) {
+ if (ielen > MAX_WPA_IE_LEN + MAX_WPS_IE_LEN + MAX_P2P_IE_LEN) {
ret = -EINVAL;
goto exit;
}
@@ -1456,19 +1456,19 @@ static int rtw_cfg80211_set_wpa_ie(struct adapter *padapter, u8 *pie, size_t iel
pwpa = rtw_get_wpa_ie(buf, &wpa_ielen, ielen);
if (pwpa && wpa_ielen > 0) {
- if (rtw_parse_wpa_ie(pwpa, wpa_ielen+2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
+ if (rtw_parse_wpa_ie(pwpa, wpa_ielen + 2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeWPAPSK;
- memcpy(padapter->securitypriv.supplicant_ie, &pwpa[0], wpa_ielen+2);
+ memcpy(padapter->securitypriv.supplicant_ie, &pwpa[0], wpa_ielen + 2);
}
}
pwpa2 = rtw_get_wpa2_ie(buf, &wpa2_ielen, ielen);
if (pwpa2 && wpa2_ielen > 0) {
- if (rtw_parse_wpa2_ie(pwpa2, wpa2_ielen+2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
+ if (rtw_parse_wpa2_ie(pwpa2, wpa2_ielen + 2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeWPA2PSK;
- memcpy(padapter->securitypriv.supplicant_ie, &pwpa2[0], wpa2_ielen+2);
+ memcpy(padapter->securitypriv.supplicant_ie, &pwpa2[0], wpa2_ielen + 2);
}
}
@@ -1634,7 +1634,7 @@ leave_ibss:
}
static int cfg80211_rtw_connect(struct wiphy *wiphy, struct net_device *ndev,
- struct cfg80211_connect_params *sme)
+ struct cfg80211_connect_params *sme)
{
int ret = 0;
enum ndis_802_11_authentication_mode authmode;
@@ -1709,7 +1709,7 @@ static int cfg80211_rtw_connect(struct wiphy *wiphy, struct net_device *ndev,
/* For WEP Shared auth */
if ((psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_Shared ||
- psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_Auto) && sme->key) {
+ psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_Auto) && sme->key) {
u32 wep_key_idx, wep_key_len, wep_total_len;
struct ndis_802_11_wep *pwep = NULL;
@@ -1807,15 +1807,14 @@ static int cfg80211_rtw_disconnect(struct wiphy *wiphy, struct net_device *ndev,
}
static int cfg80211_rtw_set_txpower(struct wiphy *wiphy,
- struct wireless_dev *wdev,
- enum nl80211_tx_power_setting type, int mbm)
+ struct wireless_dev *wdev,
+ enum nl80211_tx_power_setting type, int mbm)
{
return 0;
}
static int cfg80211_rtw_get_txpower(struct wiphy *wiphy,
- struct wireless_dev *wdev,
- int *dbm)
+ struct wireless_dev *wdev, int *dbm)
{
*dbm = (12);
@@ -1851,9 +1850,8 @@ static int cfg80211_rtw_set_pmksa(struct wiphy *wiphy,
u8 index, blInserted = false;
struct adapter *padapter = rtw_netdev_priv(ndev);
struct security_priv *psecuritypriv = &padapter->securitypriv;
- u8 strZeroMacAddress[ETH_ALEN] = { 0x00 };
- if (!memcmp((u8 *)pmksa->bssid, strZeroMacAddress, ETH_ALEN))
+ if (is_zero_ether_addr((u8 *)pmksa->bssid))
return -EINVAL;
blInserted = false;
@@ -1863,7 +1861,7 @@ static int cfg80211_rtw_set_pmksa(struct wiphy *wiphy,
if (!memcmp(psecuritypriv->PMKIDList[index].Bssid, (u8 *)pmksa->bssid, ETH_ALEN)) {
memcpy(psecuritypriv->PMKIDList[index].PMKID, (u8 *)pmksa->pmkid, WLAN_PMKID_LEN);
psecuritypriv->PMKIDList[index].bUsed = true;
- psecuritypriv->PMKIDIndex = index+1;
+ psecuritypriv->PMKIDIndex = index + 1;
blInserted = true;
break;
}
@@ -2064,8 +2062,8 @@ static netdev_tx_t rtw_cfg80211_monitor_if_xmit_entry(struct sk_buff *skb, struc
_rtw_xmit_entry(skb, padapter->pnetdev);
return NETDEV_TX_OK;
- } else if ((frame_control & (IEEE80211_FCTL_FTYPE|IEEE80211_FCTL_STYPE)) ==
- (IEEE80211_FTYPE_MGMT|IEEE80211_STYPE_ACTION)) {
+ } else if ((frame_control & (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ (IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION)) {
/* only for action frames */
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
@@ -2174,7 +2172,7 @@ static int rtw_cfg80211_add_monitor_if(struct adapter *padapter, char *name, str
goto out;
*ndev = pwdev_priv->pmon_ndev = mon_ndev;
- memcpy(pwdev_priv->ifname_mon, name, IFNAMSIZ+1);
+ memcpy(pwdev_priv->ifname_mon, name, IFNAMSIZ + 1);
out:
if (ret && mon_wdev) {
@@ -2228,7 +2226,7 @@ static struct wireless_dev *
}
static int cfg80211_rtw_del_virtual_intf(struct wiphy *wiphy,
- struct wireless_dev *wdev
+ struct wireless_dev *wdev
)
{
struct net_device *ndev = wdev_to_ndev(wdev);
@@ -2268,14 +2266,14 @@ static int rtw_add_beacon(struct adapter *adapter, const u8 *head, size_t head_l
if (head_len < 24)
return -EINVAL;
- pbuf = rtw_zmalloc(head_len+tail_len);
+ pbuf = rtw_zmalloc(head_len + tail_len);
if (!pbuf)
return -ENOMEM;
- memcpy(pbuf, (void *)head+24, head_len-24);/* 24 =beacon header len. */
- memcpy(pbuf+head_len-24, (void *)tail, tail_len);
+ memcpy(pbuf, (void *)head + 24, head_len - 24);/* 24 =beacon header len. */
+ memcpy(pbuf + head_len - 24, (void *)tail, tail_len);
- len = head_len+tail_len-24;
+ len = head_len + tail_len - 24;
/* check wps ie if inclued */
rtw_get_wps_ie(pbuf + _FIXED_IE_LENGTH_, len - _FIXED_IE_LENGTH_, NULL, &wps_ielen);
@@ -2295,13 +2293,14 @@ static int rtw_add_beacon(struct adapter *adapter, const u8 *head, size_t head_l
}
static int cfg80211_rtw_start_ap(struct wiphy *wiphy, struct net_device *ndev,
- struct cfg80211_ap_settings *settings)
+ struct cfg80211_ap_settings *settings)
{
int ret = 0;
struct adapter *adapter = rtw_netdev_priv(ndev);
- ret = rtw_add_beacon(adapter, settings->beacon.head, settings->beacon.head_len,
- settings->beacon.tail, settings->beacon.tail_len);
+ ret = rtw_add_beacon(adapter, settings->beacon.head,
+ settings->beacon.head_len, settings->beacon.tail,
+ settings->beacon.tail_len);
adapter->mlmeextpriv.mlmext_info.hidden_ssid_mode = settings->hidden_ssid;
@@ -2318,8 +2317,9 @@ static int cfg80211_rtw_start_ap(struct wiphy *wiphy, struct net_device *ndev,
return ret;
}
-static int cfg80211_rtw_change_beacon(struct wiphy *wiphy, struct net_device *ndev,
- struct cfg80211_beacon_data *info)
+static int cfg80211_rtw_change_beacon(struct wiphy *wiphy,
+ struct net_device *ndev,
+ struct cfg80211_beacon_data *info)
{
struct adapter *adapter = rtw_netdev_priv(ndev);
@@ -2332,9 +2332,10 @@ static int cfg80211_rtw_stop_ap(struct wiphy *wiphy, struct net_device *ndev,
return 0;
}
-static int cfg80211_rtw_add_station(struct wiphy *wiphy, struct net_device *ndev,
- const u8 *mac,
- struct station_parameters *params)
+static int cfg80211_rtw_add_station(struct wiphy *wiphy,
+ struct net_device *ndev,
+ const u8 *mac,
+ struct station_parameters *params)
{
return 0;
}
@@ -2396,8 +2397,10 @@ static int cfg80211_rtw_del_station(struct wiphy *wiphy, struct net_device *ndev
return ret;
}
-static int cfg80211_rtw_change_station(struct wiphy *wiphy, struct net_device *ndev,
- const u8 *mac, struct station_parameters *params)
+static int cfg80211_rtw_change_station(struct wiphy *wiphy,
+ struct net_device *ndev,
+ const u8 *mac,
+ struct station_parameters *params)
{
return 0;
}
@@ -2422,8 +2425,10 @@ static struct sta_info *rtw_sta_info_get_by_idx(const int idx, struct sta_priv *
return psta;
}
-static int cfg80211_rtw_dump_station(struct wiphy *wiphy, struct net_device *ndev,
- int idx, u8 *mac, struct station_info *sinfo)
+static int cfg80211_rtw_dump_station(struct wiphy *wiphy,
+ struct net_device *ndev,
+ int idx, u8 *mac,
+ struct station_info *sinfo)
{
int ret = 0;
struct adapter *padapter = rtw_netdev_priv(ndev);
@@ -2445,8 +2450,9 @@ exit:
return ret;
}
-static int cfg80211_rtw_change_bss(struct wiphy *wiphy, struct net_device *ndev,
- struct bss_parameters *params)
+static int cfg80211_rtw_change_bss(struct wiphy *wiphy,
+ struct net_device *ndev,
+ struct bss_parameters *params)
{
return 0;
}
@@ -2529,10 +2535,9 @@ exit:
return ret;
}
-static int cfg80211_rtw_mgmt_tx(struct wiphy *wiphy,
- struct wireless_dev *wdev,
- struct cfg80211_mgmt_tx_params *params,
- u64 *cookie)
+static int cfg80211_rtw_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
+ struct cfg80211_mgmt_tx_params *params,
+ u64 *cookie)
{
struct net_device *ndev = wdev_to_ndev(wdev);
struct ieee80211_channel *chan = params->chan;
@@ -2558,7 +2563,7 @@ static int cfg80211_rtw_mgmt_tx(struct wiphy *wiphy,
pwdev_priv = adapter_wdev_data(padapter);
/* cookie generation */
- *cookie = (unsigned long) buf;
+ *cookie = (unsigned long)buf;
/* indicate ack before issue frame to avoid racing with rsp frame */
rtw_cfg80211_mgmt_tx_status(padapter, *cookie, buf, len, ack, GFP_KERNEL);
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
index 40a3157fb735..c81b30f1f1b0 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
@@ -13,7 +13,7 @@
#include <linux/jiffies.h>
#include <linux/kernel.h>
-#define RTL_IOCTL_WPA_SUPPLICANT (SIOCIWFIRSTPRIV+30)
+#define RTL_IOCTL_WPA_SUPPLICANT (SIOCIWFIRSTPRIV + 30)
static int wpa_set_auth_algs(struct net_device *dev, u32 value)
{
@@ -40,7 +40,6 @@ static int wpa_set_auth_algs(struct net_device *dev, u32 value)
}
return ret;
-
}
static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param, u32 param_len)
@@ -80,7 +79,6 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
}
if (strcmp(param->u.crypt.alg, "WEP") == 0) {
-
padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
padapter->securitypriv.dot11PrivacyAlgrthm = _WEP40_;
padapter->securitypriv.dot118021XGrpPrivacy = _WEP40_;
@@ -127,7 +125,7 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
goto exit;
}
- memcpy(&(psecuritypriv->dot11DefKey[wep_key_idx].skey[0]), pwep->key_material, pwep->key_length);
+ memcpy(&psecuritypriv->dot11DefKey[wep_key_idx].skey[0], pwep->key_material, pwep->key_length);
psecuritypriv->dot11DefKeylen[wep_key_idx] = pwep->key_length;
rtw_set_key(padapter, psecuritypriv, wep_key_idx, 0, true);
}
@@ -149,7 +147,7 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
psta->ieee8021x_blocked = false;
if ((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled) ||
- (padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled)) {
+ (padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled)) {
psta->dot118021XPrivacy = padapter->securitypriv.dot11PrivacyAlgrthm;
}
@@ -158,8 +156,8 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
if (strcmp(param->u.crypt.alg, "TKIP") == 0) { /* set mic key */
/* DEBUG_ERR(("\nset key length :param->u.crypt.key_len =%d\n", param->u.crypt.key_len)); */
- memcpy(psta->dot11tkiptxmickey.skey, &(param->u.crypt.key[16]), 8);
- memcpy(psta->dot11tkiprxmickey.skey, &(param->u.crypt.key[24]), 8);
+ memcpy(psta->dot11tkiptxmickey.skey, &param->u.crypt.key[16], 8);
+ memcpy(psta->dot11tkiprxmickey.skey, &param->u.crypt.key[24], 8);
padapter->securitypriv.busetkipkey = false;
/* _set_timer(&padapter->securitypriv.tkip_timer, 50); */
@@ -171,8 +169,8 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
memcpy(padapter->securitypriv.dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
/* only TKIP group key need to install this */
if (param->u.crypt.key_len > 16) {
- memcpy(padapter->securitypriv.dot118021XGrptxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[16]), 8);
- memcpy(padapter->securitypriv.dot118021XGrprxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[24]), 8);
+ memcpy(padapter->securitypriv.dot118021XGrptxmickey[param->u.crypt.idx].skey, &param->u.crypt.key[16], 8);
+ memcpy(padapter->securitypriv.dot118021XGrprxmickey[param->u.crypt.idx].skey, &param->u.crypt.key[24], 8);
}
padapter->securitypriv.binstallGrpkey = true;
@@ -202,7 +200,7 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
pbcmc_sta->ieee8021x_blocked = false;
if ((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled) ||
- (padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled)) {
+ (padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled)) {
pbcmc_sta->dot118021XPrivacy = padapter->securitypriv.dot11PrivacyAlgrthm;
}
}
@@ -317,18 +315,18 @@ static int rtw_set_wpa_ie(struct adapter *padapter, char *pie, unsigned short ie
while (cnt < ielen) {
eid = buf[cnt];
- if ((eid == WLAN_EID_VENDOR_SPECIFIC) && (!memcmp(&buf[cnt+2], wps_oui, 4))) {
- padapter->securitypriv.wps_ie_len = ((buf[cnt+1]+2) < MAX_WPS_IE_LEN) ? (buf[cnt+1]+2):MAX_WPS_IE_LEN;
+ if ((eid == WLAN_EID_VENDOR_SPECIFIC) && (!memcmp(&buf[cnt + 2], wps_oui, 4))) {
+ padapter->securitypriv.wps_ie_len = ((buf[cnt + 1] + 2) < MAX_WPS_IE_LEN) ? (buf[cnt + 1] + 2) : MAX_WPS_IE_LEN;
memcpy(padapter->securitypriv.wps_ie, &buf[cnt], padapter->securitypriv.wps_ie_len);
set_fwstate(&padapter->mlmepriv, WIFI_UNDER_WPS);
- cnt += buf[cnt+1]+2;
+ cnt += buf[cnt + 1] + 2;
break;
} else {
- cnt += buf[cnt+1]+2; /* goto next */
+ cnt += buf[cnt + 1] + 2; /* goto next */
}
}
}
@@ -336,8 +334,8 @@ static int rtw_set_wpa_ie(struct adapter *padapter, char *pie, unsigned short ie
/* TKIP and AES disallow multicast packets until installing group key */
if (padapter->securitypriv.dot11PrivacyAlgrthm == _TKIP_ ||
- padapter->securitypriv.dot11PrivacyAlgrthm == _TKIP_WTMIC_ ||
- padapter->securitypriv.dot11PrivacyAlgrthm == _AES_)
+ padapter->securitypriv.dot11PrivacyAlgrthm == _TKIP_WTMIC_ ||
+ padapter->securitypriv.dot11PrivacyAlgrthm == _AES_)
/* WPS open need to enable multicast */
/* check_fwstate(&padapter->mlmepriv, WIFI_UNDER_WPS) == true) */
rtw_hal_set_hwreg(padapter, HW_VAR_OFF_RCR_AM, null_addr);
@@ -361,7 +359,7 @@ static int wpa_set_param(struct net_device *dev, u8 name, u32 value)
/* ret = ieee80211_wpa_enable(ieee, value); */
- switch ((value)&0xff) {
+ switch ((value) & 0xff) {
case 1: /* WPA */
padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeWPAPSK; /* WPA_PSK */
padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption2Enabled;
@@ -392,7 +390,6 @@ static int wpa_set_param(struct net_device *dev, u8 name, u32 value)
* be set.
*/
break;
-
}
case IEEE_PARAM_PRIVACY_INVOKED:
@@ -426,17 +423,12 @@ static int wpa_set_param(struct net_device *dev, u8 name, u32 value)
default:
-
-
ret = -EOPNOTSUPP;
-
break;
-
}
return ret;
-
}
static int wpa_mlme(struct net_device *dev, u32 command, u32 reason)
@@ -465,7 +457,6 @@ static int wpa_mlme(struct net_device *dev, u32 command, u32 reason)
}
return ret;
-
}
static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
@@ -488,7 +479,6 @@ static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
}
switch (param->cmd) {
-
case IEEE_CMD_SET_WPA_PARAM:
ret = wpa_set_param(dev, param->u.wpa_param.name, param->u.wpa_param.value);
break;
@@ -509,7 +499,6 @@ static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
default:
ret = -EOPNOTSUPP;
break;
-
}
if (ret == 0 && copy_to_user(p->pointer, param, p->length))
@@ -529,7 +518,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
struct sta_info *psta = NULL, *pbcmc_sta = NULL;
struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct security_priv *psecuritypriv = &(padapter->securitypriv);
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
struct sta_priv *pstapriv = &padapter->stapriv;
char *txkey = padapter->securitypriv.dot118021XGrptxmickey[param->u.crypt.idx].skey;
char *rxkey = padapter->securitypriv.dot118021XGrprxmickey[param->u.crypt.idx].skey;
@@ -570,7 +559,6 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
goto exit;
}
-
if (strcmp(param->u.crypt.alg, "WEP") == 0 && !psta) {
wep_key_idx = param->u.crypt.idx;
wep_key_len = param->u.crypt.key_len;
@@ -580,7 +568,6 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
goto exit;
}
-
if (wep_key_len > 0) {
wep_key_len = wep_key_len <= 5 ? 5 : 13;
wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, key_material);
@@ -591,7 +578,6 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
pwep->key_length = wep_key_len;
pwep->length = wep_total_len;
-
}
pwep->key_index = wep_key_idx;
@@ -609,10 +595,9 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
psecuritypriv->dot118021XGrpPrivacy = _WEP104_;
}
-
psecuritypriv->dot11PrivacyKeyIndex = wep_key_idx;
- memcpy(&(psecuritypriv->dot11DefKey[wep_key_idx].skey[0]), pwep->key_material, pwep->key_length);
+ memcpy(&psecuritypriv->dot11DefKey[wep_key_idx].skey[0], pwep->key_material, pwep->key_length);
psecuritypriv->dot11DefKeylen[wep_key_idx] = pwep->key_length;
@@ -621,7 +606,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
/* don't update "psecuritypriv->dot11PrivacyAlgrthm" and */
/* psecuritypriv->dot11PrivacyKeyIndex =keyid", but can rtw_set_key to cam */
- memcpy(&(psecuritypriv->dot11DefKey[wep_key_idx].skey[0]), pwep->key_material, pwep->key_length);
+ memcpy(&psecuritypriv->dot11DefKey[wep_key_idx].skey[0], pwep->key_material, pwep->key_length);
psecuritypriv->dot11DefKeylen[wep_key_idx] = pwep->key_length;
@@ -629,10 +614,8 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
}
goto exit;
-
}
-
if (!psta && check_fwstate(pmlmepriv, WIFI_AP_STATE)) { /* group key */
if (param->u.crypt.set_tx == 1) {
if (strcmp(param->u.crypt.alg, "WEP") == 0) {
@@ -640,7 +623,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
if (param->u.crypt.key_len == 13)
- psecuritypriv->dot118021XGrpPrivacy = _WEP104_;
+ psecuritypriv->dot118021XGrpPrivacy = _WEP104_;
} else if (strcmp(param->u.crypt.alg, "TKIP") == 0) {
psecuritypriv->dot118021XGrpPrivacy = _TKIP_;
@@ -649,13 +632,12 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
/* DEBUG_ERR("set key length :param->u.crypt.key_len =%d\n", param->u.crypt.key_len); */
/* set mic key */
- memcpy(txkey, &(param->u.crypt.key[16]), 8);
- memcpy(psecuritypriv->dot118021XGrprxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[24]), 8);
+ memcpy(txkey, &param->u.crypt.key[16], 8);
+ memcpy(psecuritypriv->dot118021XGrprxmickey[param->u.crypt.idx].skey, &param->u.crypt.key[24], 8);
psecuritypriv->busetkipkey = true;
- }
- else if (strcmp(param->u.crypt.alg, "CCMP") == 0) {
+ } else if (strcmp(param->u.crypt.alg, "CCMP") == 0) {
psecuritypriv->dot118021XGrpPrivacy = _AES_;
memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
@@ -679,7 +661,6 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
}
goto exit;
-
}
if (psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X && psta) { /* psk/802_1x */
@@ -696,13 +677,12 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
/* DEBUG_ERR("set key length :param->u.crypt.key_len =%d\n", param->u.crypt.key_len); */
/* set mic key */
- memcpy(psta->dot11tkiptxmickey.skey, &(param->u.crypt.key[16]), 8);
- memcpy(psta->dot11tkiprxmickey.skey, &(param->u.crypt.key[24]), 8);
+ memcpy(psta->dot11tkiptxmickey.skey, &param->u.crypt.key[16], 8);
+ memcpy(psta->dot11tkiprxmickey.skey, &param->u.crypt.key[24], 8);
psecuritypriv->busetkipkey = true;
} else if (strcmp(param->u.crypt.alg, "CCMP") == 0) {
-
psta->dot118021XPrivacy = _AES_;
} else {
psta->dot118021XPrivacy = _NO_PRIVACY_;
@@ -726,8 +706,8 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
/* DEBUG_ERR("set key length :param->u.crypt.key_len =%d\n", param->u.crypt.key_len); */
/* set mic key */
- memcpy(txkey, &(param->u.crypt.key[16]), 8);
- memcpy(rxkey, &(param->u.crypt.key[24]), 8);
+ memcpy(txkey, &param->u.crypt.key[16], 8);
+ memcpy(rxkey, &param->u.crypt.key[24], 8);
psecuritypriv->busetkipkey = true;
@@ -760,14 +740,13 @@ exit:
kfree(pwep);
return ret;
-
}
static int rtw_set_beacon(struct net_device *dev, struct ieee_param *param, int len)
{
int ret = 0;
struct adapter *padapter = rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct sta_priv *pstapriv = &padapter->stapriv;
unsigned char *pbuf = param->u.bcn_ie.buf;
@@ -779,15 +758,12 @@ static int rtw_set_beacon(struct net_device *dev, struct ieee_param *param, int
if ((pstapriv->max_num_sta > NUM_STA) || (pstapriv->max_num_sta <= 0))
pstapriv->max_num_sta = NUM_STA;
-
- if (rtw_check_beacon_data(padapter, pbuf, (len-12-2)) == _SUCCESS)/* 12 = param header, 2:no packed */
+ if (rtw_check_beacon_data(padapter, pbuf, (len - 12 - 2)) == _SUCCESS)/* 12 = param header, 2:no packed */
ret = 0;
else
ret = -EINVAL;
-
return ret;
-
}
static void rtw_hostapd_sta_flush(struct net_device *dev)
@@ -808,10 +784,10 @@ static int rtw_add_sta(struct net_device *dev, struct ieee_param *param)
int ret = 0;
struct sta_info *psta = NULL;
struct adapter *padapter = rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct sta_priv *pstapriv = &padapter->stapriv;
- if (check_fwstate(pmlmepriv, (_FW_LINKED|WIFI_AP_STATE)) != true)
+ if (check_fwstate(pmlmepriv, (_FW_LINKED | WIFI_AP_STATE)) != true)
return -EINVAL;
if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
@@ -838,9 +814,8 @@ static int rtw_add_sta(struct net_device *dev, struct ieee_param *param)
memcpy(psta->bssrateset, param->u.add_sta.tx_supp_rates, 16);
-
/* check wmm cap. */
- if (WLAN_STA_WME&flags)
+ if (WLAN_STA_WME & flags)
psta->qos_option = 1;
else
psta->qos_option = 0;
@@ -849,7 +824,7 @@ static int rtw_add_sta(struct net_device *dev, struct ieee_param *param)
psta->qos_option = 0;
/* chec 802.11n ht cap. */
- if (WLAN_STA_HT&flags) {
+ if (WLAN_STA_HT & flags) {
psta->htpriv.ht_option = true;
psta->qos_option = 1;
memcpy((void *)&psta->htpriv.ht_cap, (void *)&param->u.add_sta.ht_cap, sizeof(struct ieee80211_ht_cap));
@@ -857,18 +832,16 @@ static int rtw_add_sta(struct net_device *dev, struct ieee_param *param)
psta->htpriv.ht_option = false;
}
- if (pmlmepriv->htpriv.ht_option == false)
+ if (!pmlmepriv->htpriv.ht_option)
psta->htpriv.ht_option = false;
update_sta_info_apmode(padapter, psta);
-
} else {
ret = -ENOMEM;
}
return ret;
-
}
static int rtw_del_sta(struct net_device *dev, struct ieee_param *param)
@@ -876,10 +849,10 @@ static int rtw_del_sta(struct net_device *dev, struct ieee_param *param)
int ret = 0;
struct sta_info *psta = NULL;
struct adapter *padapter = rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct sta_priv *pstapriv = &padapter->stapriv;
- if (check_fwstate(pmlmepriv, (_FW_LINKED|WIFI_AP_STATE)) != true)
+ if (check_fwstate(pmlmepriv, (_FW_LINKED | WIFI_AP_STATE)) != true)
return -EINVAL;
if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
@@ -897,18 +870,15 @@ static int rtw_del_sta(struct net_device *dev, struct ieee_param *param)
list_del_init(&psta->asoc_list);
pstapriv->asoc_list_cnt--;
updated = ap_free_sta(padapter, psta, true, WLAN_REASON_DEAUTH_LEAVING);
-
}
spin_unlock_bh(&pstapriv->asoc_list_lock);
associated_clients_update(padapter, updated);
psta = NULL;
-
}
return ret;
-
}
static int rtw_ioctl_get_sta_data(struct net_device *dev, struct ieee_param *param, int len)
@@ -916,12 +886,12 @@ static int rtw_ioctl_get_sta_data(struct net_device *dev, struct ieee_param *par
int ret = 0;
struct sta_info *psta = NULL;
struct adapter *padapter = rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct sta_priv *pstapriv = &padapter->stapriv;
struct ieee_param_ex *param_ex = (struct ieee_param_ex *)param;
struct sta_data *psta_data = (struct sta_data *)param_ex->data;
- if (check_fwstate(pmlmepriv, (_FW_LINKED|WIFI_AP_STATE)) != true)
+ if (check_fwstate(pmlmepriv, (_FW_LINKED | WIFI_AP_STATE)) != true)
return -EINVAL;
if (param_ex->sta_addr[0] == 0xff && param_ex->sta_addr[1] == 0xff &&
@@ -963,13 +933,11 @@ static int rtw_ioctl_get_sta_data(struct net_device *dev, struct ieee_param *par
psta_data->tx_bytes = psta->sta_stats.tx_bytes;
psta_data->tx_drops = psta->sta_stats.tx_drops;
-
} else {
ret = -1;
}
return ret;
-
}
static int rtw_get_sta_wpaie(struct net_device *dev, struct ieee_param *param)
@@ -977,10 +945,10 @@ static int rtw_get_sta_wpaie(struct net_device *dev, struct ieee_param *param)
int ret = 0;
struct sta_info *psta = NULL;
struct adapter *padapter = rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct sta_priv *pstapriv = &padapter->stapriv;
- if (check_fwstate(pmlmepriv, (_FW_LINKED|WIFI_AP_STATE)) != true)
+ if (check_fwstate(pmlmepriv, (_FW_LINKED | WIFI_AP_STATE)) != true)
return -EINVAL;
if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
@@ -997,7 +965,7 @@ static int rtw_get_sta_wpaie(struct net_device *dev, struct ieee_param *param)
wpa_ie_len = psta->wpa_ie[1];
- copy_len = ((wpa_ie_len+2) > sizeof(psta->wpa_ie)) ? (sizeof(psta->wpa_ie)):(wpa_ie_len+2);
+ copy_len = ((wpa_ie_len + 2) > sizeof(psta->wpa_ie)) ? (sizeof(psta->wpa_ie)) : (wpa_ie_len + 2);
param->u.wpa_ie.len = copy_len;
@@ -1008,7 +976,6 @@ static int rtw_get_sta_wpaie(struct net_device *dev, struct ieee_param *param)
}
return ret;
-
}
static int rtw_set_wps_beacon(struct net_device *dev, struct ieee_param *param, int len)
@@ -1016,15 +983,14 @@ static int rtw_set_wps_beacon(struct net_device *dev, struct ieee_param *param,
int ret = 0;
unsigned char wps_oui[4] = {0x0, 0x50, 0xf2, 0x04};
struct adapter *padapter = rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
- struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
int ie_len;
if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
return -EINVAL;
- ie_len = len-12-2;/* 12 = param header, 2:no packed */
-
+ ie_len = len - 12 - 2;/* 12 = param header, 2:no packed */
kfree(pmlmepriv->wps_beacon_ie);
pmlmepriv->wps_beacon_ie = NULL;
@@ -1042,23 +1008,20 @@ static int rtw_set_wps_beacon(struct net_device *dev, struct ieee_param *param,
pmlmeext->bstart_bss = true;
}
-
return ret;
-
}
static int rtw_set_wps_probe_resp(struct net_device *dev, struct ieee_param *param, int len)
{
int ret = 0;
struct adapter *padapter = rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
int ie_len;
if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
return -EINVAL;
- ie_len = len-12-2;/* 12 = param header, 2:no packed */
-
+ ie_len = len - 12 - 2;/* 12 = param header, 2:no packed */
kfree(pmlmepriv->wps_probe_resp_ie);
pmlmepriv->wps_probe_resp_ie = NULL;
@@ -1072,23 +1035,20 @@ static int rtw_set_wps_probe_resp(struct net_device *dev, struct ieee_param *par
memcpy(pmlmepriv->wps_probe_resp_ie, param->u.bcn_ie.buf, ie_len);
}
-
return ret;
-
}
static int rtw_set_wps_assoc_resp(struct net_device *dev, struct ieee_param *param, int len)
{
int ret = 0;
struct adapter *padapter = rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
int ie_len;
if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
return -EINVAL;
- ie_len = len-12-2;/* 12 = param header, 2:no packed */
-
+ ie_len = len - 12 - 2;/* 12 = param header, 2:no packed */
kfree(pmlmepriv->wps_assoc_resp_ie);
pmlmepriv->wps_assoc_resp_ie = NULL;
@@ -1102,18 +1062,16 @@ static int rtw_set_wps_assoc_resp(struct net_device *dev, struct ieee_param *par
memcpy(pmlmepriv->wps_assoc_resp_ie, param->u.bcn_ie.buf, ie_len);
}
-
return ret;
-
}
static int rtw_set_hidden_ssid(struct net_device *dev, struct ieee_param *param, int len)
{
int ret = 0;
struct adapter *adapter = rtw_netdev_priv(dev);
- struct mlme_priv *mlmepriv = &(adapter->mlmepriv);
- struct mlme_ext_priv *mlmeext = &(adapter->mlmeextpriv);
- struct mlme_ext_info *mlmeinfo = &(mlmeext->mlmext_info);
+ struct mlme_priv *mlmepriv = &adapter->mlmepriv;
+ struct mlme_ext_priv *mlmeext = &adapter->mlmeextpriv;
+ struct mlme_ext_info *mlmeinfo = &mlmeext->mlmext_info;
int ie_len;
u8 *ssid_ie;
char ssid[NDIS_802_11_LENGTH_SSID + 1];
@@ -1128,14 +1086,14 @@ static int rtw_set_hidden_ssid(struct net_device *dev, struct ieee_param *param,
mlmeinfo->hidden_ssid_mode = ignore_broadcast_ssid = param->u.bcn_ie.reserved[1];
- ie_len = len-12-2;/* 12 = param header, 2:no packed */
+ ie_len = len - 12 - 2;/* 12 = param header, 2:no packed */
ssid_ie = rtw_get_ie(param->u.bcn_ie.buf, WLAN_EID_SSID, &ssid_len, ie_len);
if (ssid_ie && ssid_len > 0 && ssid_len <= NDIS_802_11_LENGTH_SSID) {
struct wlan_bssid_ex *pbss_network = &mlmepriv->cur_network.network;
struct wlan_bssid_ex *pbss_network_ext = &mlmeinfo->network;
- memcpy(ssid, ssid_ie+2, ssid_len);
+ memcpy(ssid, ssid_ie + 2, ssid_len);
ssid[ssid_len] = 0x0;
memcpy(pbss_network->ssid.ssid, (void *)ssid, ssid_len);
@@ -1150,7 +1108,7 @@ static int rtw_set_hidden_ssid(struct net_device *dev, struct ieee_param *param,
static int rtw_ioctl_acl_remove_sta(struct net_device *dev, struct ieee_param *param, int len)
{
struct adapter *padapter = rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
return -EINVAL;
@@ -1163,13 +1121,12 @@ static int rtw_ioctl_acl_remove_sta(struct net_device *dev, struct ieee_param *p
rtw_acl_remove_sta(padapter, param->sta_addr);
return 0;
-
}
static int rtw_ioctl_acl_add_sta(struct net_device *dev, struct ieee_param *param, int len)
{
struct adapter *padapter = rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
return -EINVAL;
@@ -1181,14 +1138,13 @@ static int rtw_ioctl_acl_add_sta(struct net_device *dev, struct ieee_param *para
}
return rtw_acl_add_sta(padapter, param->sta_addr);
-
}
static int rtw_ioctl_set_macaddr_acl(struct net_device *dev, struct ieee_param *param, int len)
{
int ret = 0;
struct adapter *padapter = rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
return -EINVAL;
@@ -1205,9 +1161,9 @@ static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p)
struct adapter *padapter = rtw_netdev_priv(dev);
/*
- * this function is expect to call in master mode, which allows no power saving
- * so, we just check hw_init_completed
- */
+ * this function is expect to call in master mode, which allows no power saving
+ * so, we just check hw_init_completed
+ */
if (!padapter->hw_init_completed)
return -EPERM;
@@ -1312,7 +1268,6 @@ static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p)
default:
ret = -EOPNOTSUPP;
break;
-
}
if (ret == 0 && copy_to_user(p->pointer, param, p->length))
diff --git a/drivers/staging/rtl8723bs/os_dep/recv_linux.c b/drivers/staging/rtl8723bs/os_dep/recv_linux.c
index 88a69c7ca8f2..4d28b300b235 100644
--- a/drivers/staging/rtl8723bs/os_dep/recv_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/recv_linux.c
@@ -101,7 +101,7 @@ void rtw_os_recv_indicate_pkt(struct adapter *padapter, struct sk_buff *pkt, str
struct sk_buff *pskb2 = NULL;
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
- int bmcast = IS_MCAST(pattrib->dst);
+ int bmcast = is_multicast_ether_addr(pattrib->dst);
if (memcmp(pattrib->dst, myid(&padapter->eeprompriv), ETH_ALEN)) {
if (bmcast) {
diff --git a/drivers/staging/rts5208/sd.c b/drivers/staging/rts5208/sd.c
index 4b7122add51a..74c4f476b3a4 100644
--- a/drivers/staging/rts5208/sd.c
+++ b/drivers/staging/rts5208/sd.c
@@ -4501,8 +4501,7 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
sd_card->sd_lock_notify = 1;
if (sd_lock_state &&
(sd_card->sd_lock_status & SD_LOCK_1BIT_MODE)) {
- sd_card->sd_lock_status |= (
- SD_UNLOCK_POW_ON | SD_SDR_RST);
+ sd_card->sd_lock_status |= (SD_UNLOCK_POW_ON | SD_SDR_RST);
if (CHK_SD(sd_card)) {
retval = reset_sd(chip);
if (retval != STATUS_SUCCESS) {
diff --git a/drivers/staging/sm750fb/ddk750_dvi.c b/drivers/staging/sm750fb/ddk750_dvi.c
index e0c7ff3352bf..8b81e8642f9e 100644
--- a/drivers/staging/sm750fb/ddk750_dvi.c
+++ b/drivers/staging/sm750fb/ddk750_dvi.c
@@ -14,7 +14,7 @@
static struct dvi_ctrl_device dcft_supported_dvi_controller[] = {
#ifdef DVI_CTRL_SII164
{
- .init = sii164InitChip,
+ .init = sii164_init_chip,
.get_vendor_id = sii164_get_vendor_id,
.get_device_id = sii164GetDeviceID,
#ifdef SII164_FULL_FUNCTIONS
diff --git a/drivers/staging/sm750fb/ddk750_sii164.c b/drivers/staging/sm750fb/ddk750_sii164.c
index 3da1796cd7aa..2532b60245ac 100644
--- a/drivers/staging/sm750fb/ddk750_sii164.c
+++ b/drivers/staging/sm750fb/ddk750_sii164.c
@@ -72,7 +72,7 @@ unsigned short sii164GetDeviceID(void)
*/
/*
- * sii164InitChip
+ * sii164_init_chip
* This function initialize and detect the DVI controller chip.
*
* Input:
@@ -118,16 +118,16 @@ unsigned short sii164GetDeviceID(void)
* 0 - Success
* -1 - Fail.
*/
-long sii164InitChip(unsigned char edge_select,
- unsigned char bus_select,
- unsigned char dual_edge_clk_select,
- unsigned char hsync_enable,
- unsigned char vsync_enable,
- unsigned char deskew_enable,
- unsigned char deskew_setting,
- unsigned char continuous_sync_enable,
- unsigned char pll_filter_enable,
- unsigned char pll_filter_value)
+long sii164_init_chip(unsigned char edge_select,
+ unsigned char bus_select,
+ unsigned char dual_edge_clk_select,
+ unsigned char hsync_enable,
+ unsigned char vsync_enable,
+ unsigned char deskew_enable,
+ unsigned char deskew_setting,
+ unsigned char continuous_sync_enable,
+ unsigned char pll_filter_enable,
+ unsigned char pll_filter_value)
{
unsigned char config;
diff --git a/drivers/staging/sm750fb/ddk750_sii164.h b/drivers/staging/sm750fb/ddk750_sii164.h
index ca330f6a43e2..71a7c1cb42c4 100644
--- a/drivers/staging/sm750fb/ddk750_sii164.h
+++ b/drivers/staging/sm750fb/ddk750_sii164.h
@@ -16,16 +16,16 @@ enum sii164_hot_plug_mode {
};
/* Silicon Image SiI164 chip prototype */
-long sii164InitChip(unsigned char edgeSelect,
- unsigned char busSelect,
- unsigned char dualEdgeClkSelect,
- unsigned char hsyncEnable,
- unsigned char vsyncEnable,
- unsigned char deskewEnable,
- unsigned char deskewSetting,
- unsigned char continuousSyncEnable,
- unsigned char pllFilterEnable,
- unsigned char pllFilterValue);
+long sii164_init_chip(unsigned char edgeSelect,
+ unsigned char busSelect,
+ unsigned char dualEdgeClkSelect,
+ unsigned char hsyncEnable,
+ unsigned char vsyncEnable,
+ unsigned char deskewEnable,
+ unsigned char deskewSetting,
+ unsigned char continuousSyncEnable,
+ unsigned char pllFilterEnable,
+ unsigned char pllFilterValue);
unsigned short sii164_get_vendor_id(void);
unsigned short sii164GetDeviceID(void);
diff --git a/drivers/staging/vme_user/vme.c b/drivers/staging/vme_user/vme.c
index b5555683a069..d0366dd3f2b1 100644
--- a/drivers/staging/vme_user/vme.c
+++ b/drivers/staging/vme_user/vme.c
@@ -79,7 +79,7 @@ static struct vme_bridge *find_bridge(struct vme_resource *resource)
* Return: Virtual address of allocation on success, NULL on failure.
*/
void *vme_alloc_consistent(struct vme_resource *resource, size_t size,
- dma_addr_t *dma)
+ dma_addr_t *dma)
{
struct vme_bridge *bridge;
@@ -119,7 +119,7 @@ EXPORT_SYMBOL(vme_alloc_consistent);
* Free previously allocated block of contiguous memory.
*/
void vme_free_consistent(struct vme_resource *resource, size_t size,
- void *vaddr, dma_addr_t dma)
+ void *vaddr, dma_addr_t dma)
{
struct vme_bridge *bridge;
@@ -169,14 +169,14 @@ size_t vme_get_size(struct vme_resource *resource)
switch (resource->type) {
case VME_MASTER:
retval = vme_master_get(resource, &enabled, &base, &size,
- &aspace, &cycle, &dwidth);
+ &aspace, &cycle, &dwidth);
if (retval)
return 0;
return size;
case VME_SLAVE:
retval = vme_slave_get(resource, &enabled, &base, &size,
- &buf_base, &aspace, &cycle);
+ &buf_base, &aspace, &cycle);
if (retval)
return 0;
@@ -279,7 +279,7 @@ static u32 vme_get_aspace(int am)
* Return: Pointer to VME resource on success, NULL on failure.
*/
struct vme_resource *vme_slave_request(struct vme_dev *vdev, u32 address,
- u32 cycle)
+ u32 cycle)
{
struct vme_bridge *bridge;
struct list_head *slave_pos = NULL;
@@ -296,7 +296,7 @@ struct vme_resource *vme_slave_request(struct vme_dev *vdev, u32 address,
/* Loop through slave resources */
list_for_each(slave_pos, &bridge->slave_resources) {
slave_image = list_entry(slave_pos,
- struct vme_slave_resource, list);
+ struct vme_slave_resource, list);
if (!slave_image) {
printk(KERN_ERR "Registered NULL Slave resource\n");
@@ -306,9 +306,8 @@ struct vme_resource *vme_slave_request(struct vme_dev *vdev, u32 address,
/* Find an unlocked and compatible image */
mutex_lock(&slave_image->mtx);
if (((slave_image->address_attr & address) == address) &&
- ((slave_image->cycle_attr & cycle) == cycle) &&
- (slave_image->locked == 0)) {
-
+ ((slave_image->cycle_attr & cycle) == cycle) &&
+ (slave_image->locked == 0)) {
slave_image->locked = 1;
mutex_unlock(&slave_image->mtx);
allocated_image = slave_image;
@@ -359,8 +358,8 @@ EXPORT_SYMBOL(vme_slave_request);
* returned.
*/
int vme_slave_set(struct vme_resource *resource, int enabled,
- unsigned long long vme_base, unsigned long long size,
- dma_addr_t buf_base, u32 aspace, u32 cycle)
+ unsigned long long vme_base, unsigned long long size,
+ dma_addr_t buf_base, u32 aspace, u32 cycle)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_slave_resource *image;
@@ -379,7 +378,7 @@ int vme_slave_set(struct vme_resource *resource, int enabled,
}
if (!(((image->address_attr & aspace) == aspace) &&
- ((image->cycle_attr & cycle) == cycle))) {
+ ((image->cycle_attr & cycle) == cycle))) {
printk(KERN_ERR "Invalid attributes\n");
return -EINVAL;
}
@@ -409,8 +408,8 @@ EXPORT_SYMBOL(vme_slave_set);
* device or if an invalid resource has been provided.
*/
int vme_slave_get(struct vme_resource *resource, int *enabled,
- unsigned long long *vme_base, unsigned long long *size,
- dma_addr_t *buf_base, u32 *aspace, u32 *cycle)
+ unsigned long long *vme_base, unsigned long long *size,
+ dma_addr_t *buf_base, u32 *aspace, u32 *cycle)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_slave_resource *image;
@@ -448,7 +447,7 @@ void vme_slave_free(struct vme_resource *resource)
}
slave_image = list_entry(resource->entry, struct vme_slave_resource,
- list);
+ list);
if (!slave_image) {
printk(KERN_ERR "Can't find slave resource\n");
return;
@@ -480,7 +479,7 @@ EXPORT_SYMBOL(vme_slave_free);
* Return: Pointer to VME resource on success, NULL on failure.
*/
struct vme_resource *vme_master_request(struct vme_dev *vdev, u32 address,
- u32 cycle, u32 dwidth)
+ u32 cycle, u32 dwidth)
{
struct vme_bridge *bridge;
struct list_head *master_pos = NULL;
@@ -497,7 +496,7 @@ struct vme_resource *vme_master_request(struct vme_dev *vdev, u32 address,
/* Loop through master resources */
list_for_each(master_pos, &bridge->master_resources) {
master_image = list_entry(master_pos,
- struct vme_master_resource, list);
+ struct vme_master_resource, list);
if (!master_image) {
printk(KERN_WARNING "Registered NULL master resource\n");
@@ -507,10 +506,9 @@ struct vme_resource *vme_master_request(struct vme_dev *vdev, u32 address,
/* Find an unlocked and compatible image */
spin_lock(&master_image->lock);
if (((master_image->address_attr & address) == address) &&
- ((master_image->cycle_attr & cycle) == cycle) &&
- ((master_image->width_attr & dwidth) == dwidth) &&
- (master_image->locked == 0)) {
-
+ ((master_image->cycle_attr & cycle) == cycle) &&
+ ((master_image->width_attr & dwidth) == dwidth) &&
+ (master_image->locked == 0)) {
master_image->locked = 1;
spin_unlock(&master_image->lock);
allocated_image = master_image;
@@ -563,8 +561,8 @@ EXPORT_SYMBOL(vme_master_request);
* returned.
*/
int vme_master_set(struct vme_resource *resource, int enabled,
- unsigned long long vme_base, unsigned long long size, u32 aspace,
- u32 cycle, u32 dwidth)
+ unsigned long long vme_base, unsigned long long size,
+ u32 aspace, u32 cycle, u32 dwidth)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_master_resource *image;
@@ -583,8 +581,8 @@ int vme_master_set(struct vme_resource *resource, int enabled,
}
if (!(((image->address_attr & aspace) == aspace) &&
- ((image->cycle_attr & cycle) == cycle) &&
- ((image->width_attr & dwidth) == dwidth))) {
+ ((image->cycle_attr & cycle) == cycle) &&
+ ((image->width_attr & dwidth) == dwidth))) {
printk(KERN_WARNING "Invalid attributes\n");
return -EINVAL;
}
@@ -614,8 +612,8 @@ EXPORT_SYMBOL(vme_master_set);
* device or if an invalid resource has been provided.
*/
int vme_master_get(struct vme_resource *resource, int *enabled,
- unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
- u32 *cycle, u32 *dwidth)
+ unsigned long long *vme_base, unsigned long long *size,
+ u32 *aspace, u32 *cycle, u32 *dwidth)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_master_resource *image;
@@ -653,7 +651,7 @@ EXPORT_SYMBOL(vme_master_get);
* returned.
*/
ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count,
- loff_t offset)
+ loff_t offset)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_master_resource *image;
@@ -682,7 +680,6 @@ ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count,
count = length - offset;
return bridge->master_read(image, buf, count, offset);
-
}
EXPORT_SYMBOL(vme_master_read);
@@ -702,7 +699,7 @@ EXPORT_SYMBOL(vme_master_read);
* returned.
*/
ssize_t vme_master_write(struct vme_resource *resource, void *buf,
- size_t count, loff_t offset)
+ size_t count, loff_t offset)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_master_resource *image;
@@ -754,7 +751,7 @@ EXPORT_SYMBOL(vme_master_write);
* errors may also be returned.
*/
unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int mask,
- unsigned int compare, unsigned int swap, loff_t offset)
+ unsigned int compare, unsigned int swap, loff_t offset)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_master_resource *image;
@@ -828,7 +825,7 @@ void vme_master_free(struct vme_resource *resource)
}
master_image = list_entry(resource->entry, struct vme_master_resource,
- list);
+ list);
if (!master_image) {
printk(KERN_ERR "Can't find master resource\n");
return;
@@ -877,7 +874,7 @@ struct vme_resource *vme_dma_request(struct vme_dev *vdev, u32 route)
/* Loop through DMA resources */
list_for_each(dma_pos, &bridge->dma_resources) {
dma_ctrlr = list_entry(dma_pos,
- struct vme_dma_resource, list);
+ struct vme_dma_resource, list);
if (!dma_ctrlr) {
printk(KERN_ERR "Registered NULL DMA resource\n");
continue;
@@ -886,8 +883,7 @@ struct vme_resource *vme_dma_request(struct vme_dev *vdev, u32 route)
/* Find an unlocked and compatible controller */
mutex_lock(&dma_ctrlr->mtx);
if (((dma_ctrlr->route_attr & route) == route) &&
- (dma_ctrlr->locked == 0)) {
-
+ (dma_ctrlr->locked == 0)) {
dma_ctrlr->locked = 1;
mutex_unlock(&dma_ctrlr->mtx);
allocated_ctrlr = dma_ctrlr;
@@ -1045,7 +1041,7 @@ EXPORT_SYMBOL(vme_dma_pci_attribute);
* Return: Pointer to VME DMA attribute, NULL on failure.
*/
struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address,
- u32 aspace, u32 cycle, u32 dwidth)
+ u32 aspace, u32 cycle, u32 dwidth)
{
struct vme_dma_attr *attributes;
struct vme_dma_vme *vme_attr;
@@ -1107,7 +1103,7 @@ EXPORT_SYMBOL(vme_dma_free_attribute);
* Hardware specific errors also possible.
*/
int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
- struct vme_dma_attr *dest, size_t count)
+ struct vme_dma_attr *dest, size_t count)
{
struct vme_bridge *bridge = list->parent->parent;
int retval;
@@ -1271,9 +1267,8 @@ void vme_bus_error_handler(struct vme_bridge *bridge,
}
EXPORT_SYMBOL(vme_bus_error_handler);
-struct vme_error_handler *vme_register_error_handler(
- struct vme_bridge *bridge, u32 aspace,
- unsigned long long address, size_t len)
+struct vme_error_handler *vme_register_error_handler(struct vme_bridge *bridge, u32 aspace,
+ unsigned long long address, size_t len)
{
struct vme_error_handler *handler;
@@ -1331,8 +1326,8 @@ EXPORT_SYMBOL(vme_irq_handler);
* already in use. Hardware specific errors also possible.
*/
int vme_irq_request(struct vme_dev *vdev, int level, int statid,
- void (*callback)(int, int, void *),
- void *priv_data)
+ void (*callback)(int, int, void *),
+ void *priv_data)
{
struct vme_bridge *bridge;
@@ -1479,7 +1474,7 @@ struct vme_resource *vme_lm_request(struct vme_dev *vdev)
/* Loop through LM resources */
list_for_each(lm_pos, &bridge->lm_resources) {
lm = list_entry(lm_pos,
- struct vme_lm_resource, list);
+ struct vme_lm_resource, list);
if (!lm) {
printk(KERN_ERR "Registered NULL Location Monitor resource\n");
continue;
@@ -1561,7 +1556,7 @@ EXPORT_SYMBOL(vme_lm_count);
* errors may also be returned.
*/
int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
- u32 aspace, u32 cycle)
+ u32 aspace, u32 cycle)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_lm_resource *lm;
@@ -1597,7 +1592,7 @@ EXPORT_SYMBOL(vme_lm_set);
* errors may also be returned.
*/
int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
- u32 *aspace, u32 *cycle)
+ u32 *aspace, u32 *cycle)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_lm_resource *lm;
@@ -1634,7 +1629,7 @@ EXPORT_SYMBOL(vme_lm_get);
* errors may also be returned.
*/
int vme_lm_attach(struct vme_resource *resource, int monitor,
- void (*callback)(void *), void *data)
+ void (*callback)(void *), void *data)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_lm_resource *lm;
@@ -1841,7 +1836,8 @@ EXPORT_SYMBOL(vme_unregister_bridge);
/* - Driver Registration --------------------------------------------------- */
static int __vme_register_driver_bus(struct vme_driver *drv,
- struct vme_bridge *bridge, unsigned int ndevs)
+ struct vme_bridge *bridge,
+ unsigned int ndevs)
{
int err;
unsigned int i;
@@ -1861,7 +1857,7 @@ static int __vme_register_driver_bus(struct vme_driver *drv,
vdev->dev.parent = bridge->parent;
vdev->dev.bus = &vme_bus_type;
dev_set_name(&vdev->dev, "%s.%u-%u", drv->name, bridge->num,
- vdev->num);
+ vdev->num);
err = device_register(&vdev->dev);
if (err)
diff --git a/drivers/staging/vme_user/vme_bridge.h b/drivers/staging/vme_user/vme_bridge.h
index 11df0a5e7f7b..9bdc41bb6602 100644
--- a/drivers/staging/vme_user/vme_bridge.h
+++ b/drivers/staging/vme_user/vme_bridge.h
@@ -128,28 +128,24 @@ struct vme_bridge {
struct mutex irq_mtx;
/* Slave Functions */
- int (*slave_get)(struct vme_slave_resource *, int *,
- unsigned long long *, unsigned long long *, dma_addr_t *,
- u32 *, u32 *);
+ int (*slave_get)(struct vme_slave_resource *, int *, unsigned long long *,
+ unsigned long long *, dma_addr_t *, u32 *, u32 *);
int (*slave_set)(struct vme_slave_resource *, int, unsigned long long,
- unsigned long long, dma_addr_t, u32, u32);
+ unsigned long long, dma_addr_t, u32, u32);
/* Master Functions */
- int (*master_get)(struct vme_master_resource *, int *,
- unsigned long long *, unsigned long long *, u32 *, u32 *,
- u32 *);
- int (*master_set)(struct vme_master_resource *, int,
- unsigned long long, unsigned long long, u32, u32, u32);
- ssize_t (*master_read)(struct vme_master_resource *, void *, size_t,
- loff_t);
- ssize_t (*master_write)(struct vme_master_resource *, void *, size_t,
- loff_t);
+ int (*master_get)(struct vme_master_resource *, int *, unsigned long long *,
+ unsigned long long *, u32 *, u32 *, u32 *);
+ int (*master_set)(struct vme_master_resource *, int, unsigned long long,
+ unsigned long long, u32, u32, u32);
+ ssize_t (*master_read)(struct vme_master_resource *, void *, size_t, loff_t);
+ ssize_t (*master_write)(struct vme_master_resource *, void *, size_t, loff_t);
unsigned int (*master_rmw)(struct vme_master_resource *, unsigned int,
- unsigned int, unsigned int, loff_t);
+ unsigned int, unsigned int, loff_t);
/* DMA Functions */
int (*dma_list_add)(struct vme_dma_list *, struct vme_dma_attr *,
- struct vme_dma_attr *, size_t);
+ struct vme_dma_attr *, size_t);
int (*dma_list_exec)(struct vme_dma_list *);
int (*dma_list_empty)(struct vme_dma_list *);
@@ -159,32 +155,26 @@ struct vme_bridge {
/* Location monitor functions */
int (*lm_set)(struct vme_lm_resource *, unsigned long long, u32, u32);
- int (*lm_get)(struct vme_lm_resource *, unsigned long long *, u32 *,
- u32 *);
- int (*lm_attach)(struct vme_lm_resource *, int,
- void (*callback)(void *), void *);
+ int (*lm_get)(struct vme_lm_resource *, unsigned long long *, u32 *, u32 *);
+ int (*lm_attach)(struct vme_lm_resource *, int, void (*callback)(void *), void *);
int (*lm_detach)(struct vme_lm_resource *, int);
/* CR/CSR space functions */
int (*slot_get)(struct vme_bridge *);
/* Bridge parent interface */
- void *(*alloc_consistent)(struct device *dev, size_t size,
- dma_addr_t *dma);
- void (*free_consistent)(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma);
+ void *(*alloc_consistent)(struct device *dev, size_t size, dma_addr_t *dma);
+ void (*free_consistent)(struct device *dev, size_t size, void *vaddr, dma_addr_t dma);
};
-void vme_bus_error_handler(struct vme_bridge *bridge,
- unsigned long long address, int am);
+void vme_bus_error_handler(struct vme_bridge *bridge, unsigned long long address, int am);
void vme_irq_handler(struct vme_bridge *, int, int);
struct vme_bridge *vme_init_bridge(struct vme_bridge *);
int vme_register_bridge(struct vme_bridge *);
void vme_unregister_bridge(struct vme_bridge *);
-struct vme_error_handler *vme_register_error_handler(
- struct vme_bridge *bridge, u32 aspace,
- unsigned long long address, size_t len);
+struct vme_error_handler *vme_register_error_handler(struct vme_bridge *bridge, u32 aspace,
+ unsigned long long address, size_t len);
void vme_unregister_error_handler(struct vme_error_handler *handler);
#endif /* _VME_BRIDGE_H_ */
diff --git a/drivers/staging/vt6655/baseband.c b/drivers/staging/vt6655/baseband.c
index 0e135af8316b..696d4dd03aa2 100644
--- a/drivers/staging/vt6655/baseband.c
+++ b/drivers/staging/vt6655/baseband.c
@@ -499,7 +499,7 @@ static const unsigned char by_vt3253_init_tab_rfmd[CB_VT3253_INIT_FOR_RFMD][2] =
};
#define CB_VT3253B0_INIT_FOR_RFMD 256
-static const unsigned char byVT3253B0_RFMD[CB_VT3253B0_INIT_FOR_RFMD][2] = {
+static const unsigned char vt3253b0_rfmd[CB_VT3253B0_INIT_FOR_RFMD][2] = {
{0x00, 0x31},
{0x01, 0x00},
{0x02, 0x00},
@@ -2005,8 +2005,8 @@ bool bb_vt3253_init(struct vnt_private *priv)
} else {
for (ii = 0; ii < CB_VT3253B0_INIT_FOR_RFMD; ii++)
result &= bb_write_embedded(priv,
- byVT3253B0_RFMD[ii][0],
- byVT3253B0_RFMD[ii][1]);
+ vt3253b0_rfmd[ii][0],
+ vt3253b0_rfmd[ii][1]);
for (ii = 0; ii < CB_VT3253B0_AGC_FOR_RFMD2959; ii++)
result &= bb_write_embedded(priv,
diff --git a/drivers/staging/vt6655/srom.c b/drivers/staging/vt6655/srom.c
index ee5ca4db74dc..1b89d401a7eb 100644
--- a/drivers/staging/vt6655/srom.c
+++ b/drivers/staging/vt6655/srom.c
@@ -49,7 +49,7 @@
* Parameters:
* In:
* iobase - I/O base address
- * byContntOffset - address of EEPROM
+ * contnt_offset - address of EEPROM
* Out:
* none
*
@@ -57,7 +57,7 @@
*
*/
unsigned char SROMbyReadEmbedded(void __iomem *iobase,
- unsigned char byContntOffset)
+ unsigned char contnt_offset)
{
unsigned short wDelay, wNoACK;
unsigned char byWait;
@@ -70,7 +70,7 @@ unsigned char SROMbyReadEmbedded(void __iomem *iobase,
iowrite8(byOrg & (~I2MCFG_NORETRY), iobase + MAC_REG_I2MCFG);
for (wNoACK = 0; wNoACK < W_MAX_I2CRETRY; wNoACK++) {
iowrite8(EEP_I2C_DEV_ID, iobase + MAC_REG_I2MTGID);
- iowrite8(byContntOffset, iobase + MAC_REG_I2MTGAD);
+ iowrite8(contnt_offset, iobase + MAC_REG_I2MTGAD);
/* issue read command */
iowrite8(I2MCSR_EEMR, iobase + MAC_REG_I2MCSR);
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 5d0f51822414..1cff6052e820 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -45,9 +45,9 @@ static ssize_t lio_target_np_driver_show(struct config_item *item, char *page,
tpg_np_new = iscsit_tpg_locate_child_np(tpg_np, type);
if (tpg_np_new)
- rb = sprintf(page, "1\n");
+ rb = sysfs_emit(page, "1\n");
else
- rb = sprintf(page, "0\n");
+ rb = sysfs_emit(page, "0\n");
return rb;
}
@@ -282,7 +282,7 @@ static ssize_t iscsi_nacl_attrib_##name##_show(struct config_item *item,\
{ \
struct se_node_acl *se_nacl = attrib_to_nacl(item); \
struct iscsi_node_acl *nacl = to_iscsi_nacl(se_nacl); \
- return sprintf(page, "%u\n", nacl->node_attrib.name); \
+ return sysfs_emit(page, "%u\n", nacl->node_attrib.name); \
} \
\
static ssize_t iscsi_nacl_attrib_##name##_store(struct config_item *item,\
@@ -320,7 +320,7 @@ static ssize_t iscsi_nacl_attrib_authentication_show(struct config_item *item,
struct se_node_acl *se_nacl = attrib_to_nacl(item);
struct iscsi_node_acl *nacl = to_iscsi_nacl(se_nacl);
- return sprintf(page, "%d\n", nacl->node_attrib.authentication);
+ return sysfs_emit(page, "%d\n", nacl->node_attrib.authentication);
}
static ssize_t iscsi_nacl_attrib_authentication_store(struct config_item *item,
@@ -533,102 +533,102 @@ static ssize_t lio_target_nacl_info_show(struct config_item *item, char *page)
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (!se_sess) {
- rb += sprintf(page+rb, "No active iSCSI Session for Initiator"
+ rb += sysfs_emit_at(page, rb, "No active iSCSI Session for Initiator"
" Endpoint: %s\n", se_nacl->initiatorname);
} else {
sess = se_sess->fabric_sess_ptr;
- rb += sprintf(page+rb, "InitiatorName: %s\n",
+ rb += sysfs_emit_at(page, rb, "InitiatorName: %s\n",
sess->sess_ops->InitiatorName);
- rb += sprintf(page+rb, "InitiatorAlias: %s\n",
+ rb += sysfs_emit_at(page, rb, "InitiatorAlias: %s\n",
sess->sess_ops->InitiatorAlias);
- rb += sprintf(page+rb,
+ rb += sysfs_emit_at(page, rb,
"LIO Session ID: %u ISID: 0x%6ph TSIH: %hu ",
sess->sid, sess->isid, sess->tsih);
- rb += sprintf(page+rb, "SessionType: %s\n",
+ rb += sysfs_emit_at(page, rb, "SessionType: %s\n",
(sess->sess_ops->SessionType) ?
"Discovery" : "Normal");
- rb += sprintf(page+rb, "Session State: ");
+ rb += sysfs_emit_at(page, rb, "Session State: ");
switch (sess->session_state) {
case TARG_SESS_STATE_FREE:
- rb += sprintf(page+rb, "TARG_SESS_FREE\n");
+ rb += sysfs_emit_at(page, rb, "TARG_SESS_FREE\n");
break;
case TARG_SESS_STATE_ACTIVE:
- rb += sprintf(page+rb, "TARG_SESS_STATE_ACTIVE\n");
+ rb += sysfs_emit_at(page, rb, "TARG_SESS_STATE_ACTIVE\n");
break;
case TARG_SESS_STATE_LOGGED_IN:
- rb += sprintf(page+rb, "TARG_SESS_STATE_LOGGED_IN\n");
+ rb += sysfs_emit_at(page, rb, "TARG_SESS_STATE_LOGGED_IN\n");
break;
case TARG_SESS_STATE_FAILED:
- rb += sprintf(page+rb, "TARG_SESS_STATE_FAILED\n");
+ rb += sysfs_emit_at(page, rb, "TARG_SESS_STATE_FAILED\n");
break;
case TARG_SESS_STATE_IN_CONTINUE:
- rb += sprintf(page+rb, "TARG_SESS_STATE_IN_CONTINUE\n");
+ rb += sysfs_emit_at(page, rb, "TARG_SESS_STATE_IN_CONTINUE\n");
break;
default:
- rb += sprintf(page+rb, "ERROR: Unknown Session"
+ rb += sysfs_emit_at(page, rb, "ERROR: Unknown Session"
" State!\n");
break;
}
- rb += sprintf(page+rb, "---------------------[iSCSI Session"
+ rb += sysfs_emit_at(page, rb, "---------------------[iSCSI Session"
" Values]-----------------------\n");
- rb += sprintf(page+rb, " CmdSN/WR : CmdSN/WC : ExpCmdSN"
+ rb += sysfs_emit_at(page, rb, " CmdSN/WR : CmdSN/WC : ExpCmdSN"
" : MaxCmdSN : ITT : TTT\n");
max_cmd_sn = (u32) atomic_read(&sess->max_cmd_sn);
- rb += sprintf(page+rb, " 0x%08x 0x%08x 0x%08x 0x%08x"
+ rb += sysfs_emit_at(page, rb, " 0x%08x 0x%08x 0x%08x 0x%08x"
" 0x%08x 0x%08x\n",
sess->cmdsn_window,
(max_cmd_sn - sess->exp_cmd_sn) + 1,
sess->exp_cmd_sn, max_cmd_sn,
sess->init_task_tag, sess->targ_xfer_tag);
- rb += sprintf(page+rb, "----------------------[iSCSI"
+ rb += sysfs_emit_at(page, rb, "----------------------[iSCSI"
" Connections]-------------------------\n");
spin_lock(&sess->conn_lock);
list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
- rb += sprintf(page+rb, "CID: %hu Connection"
+ rb += sysfs_emit_at(page, rb, "CID: %hu Connection"
" State: ", conn->cid);
switch (conn->conn_state) {
case TARG_CONN_STATE_FREE:
- rb += sprintf(page+rb,
+ rb += sysfs_emit_at(page, rb,
"TARG_CONN_STATE_FREE\n");
break;
case TARG_CONN_STATE_XPT_UP:
- rb += sprintf(page+rb,
+ rb += sysfs_emit_at(page, rb,
"TARG_CONN_STATE_XPT_UP\n");
break;
case TARG_CONN_STATE_IN_LOGIN:
- rb += sprintf(page+rb,
+ rb += sysfs_emit_at(page, rb,
"TARG_CONN_STATE_IN_LOGIN\n");
break;
case TARG_CONN_STATE_LOGGED_IN:
- rb += sprintf(page+rb,
+ rb += sysfs_emit_at(page, rb,
"TARG_CONN_STATE_LOGGED_IN\n");
break;
case TARG_CONN_STATE_IN_LOGOUT:
- rb += sprintf(page+rb,
+ rb += sysfs_emit_at(page, rb,
"TARG_CONN_STATE_IN_LOGOUT\n");
break;
case TARG_CONN_STATE_LOGOUT_REQUESTED:
- rb += sprintf(page+rb,
+ rb += sysfs_emit_at(page, rb,
"TARG_CONN_STATE_LOGOUT_REQUESTED\n");
break;
case TARG_CONN_STATE_CLEANUP_WAIT:
- rb += sprintf(page+rb,
+ rb += sysfs_emit_at(page, rb,
"TARG_CONN_STATE_CLEANUP_WAIT\n");
break;
default:
- rb += sprintf(page+rb,
+ rb += sysfs_emit_at(page, rb,
"ERROR: Unknown Connection State!\n");
break;
}
- rb += sprintf(page+rb, " Address %pISc %s", &conn->login_sockaddr,
+ rb += sysfs_emit_at(page, rb, " Address %pISc %s", &conn->login_sockaddr,
(conn->network_transport == ISCSI_TCP) ?
"TCP" : "SCTP");
- rb += sprintf(page+rb, " StatSN: 0x%08x\n",
+ rb += sysfs_emit_at(page, rb, " StatSN: 0x%08x\n",
conn->stat_sn);
}
spin_unlock(&sess->conn_lock);
@@ -641,7 +641,7 @@ static ssize_t lio_target_nacl_info_show(struct config_item *item, char *page)
static ssize_t lio_target_nacl_cmdsn_depth_show(struct config_item *item,
char *page)
{
- return sprintf(page, "%u\n", acl_to_nacl(item)->queue_depth);
+ return sysfs_emit(page, "%u\n", acl_to_nacl(item)->queue_depth);
}
static ssize_t lio_target_nacl_cmdsn_depth_store(struct config_item *item,
@@ -750,7 +750,7 @@ static ssize_t iscsi_tpg_attrib_##name##_show(struct config_item *item, \
if (iscsit_get_tpg(tpg) < 0) \
return -EINVAL; \
\
- rb = sprintf(page, "%u\n", tpg->tpg_attrib.name); \
+ rb = sysfs_emit(page, "%u\n", tpg->tpg_attrib.name); \
iscsit_put_tpg(tpg); \
return rb; \
} \
@@ -783,7 +783,6 @@ CONFIGFS_ATTR(iscsi_tpg_attrib_, name)
DEF_TPG_ATTRIB(authentication);
DEF_TPG_ATTRIB(login_timeout);
-DEF_TPG_ATTRIB(netif_timeout);
DEF_TPG_ATTRIB(generate_node_acls);
DEF_TPG_ATTRIB(default_cmdsn_depth);
DEF_TPG_ATTRIB(cache_dynamic_acls);
@@ -799,7 +798,6 @@ DEF_TPG_ATTRIB(login_keys_workaround);
static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
&iscsi_tpg_attrib_attr_authentication,
&iscsi_tpg_attrib_attr_login_timeout,
- &iscsi_tpg_attrib_attr_netif_timeout,
&iscsi_tpg_attrib_attr_generate_node_acls,
&iscsi_tpg_attrib_attr_default_cmdsn_depth,
&iscsi_tpg_attrib_attr_cache_dynamic_acls,
@@ -1138,7 +1136,7 @@ static void lio_target_tiqn_deltpg(struct se_portal_group *se_tpg)
static ssize_t lio_target_wwn_lio_version_show(struct config_item *item,
char *page)
{
- return sprintf(page, "Datera Inc. iSCSI Target "ISCSIT_VERSION"\n");
+ return sysfs_emit(page, "Datera Inc. iSCSI Target %s\n", ISCSIT_VERSION);
}
CONFIGFS_ATTR_RO(lio_target_wwn_, lio_version);
@@ -1146,7 +1144,7 @@ CONFIGFS_ATTR_RO(lio_target_wwn_, lio_version);
static ssize_t lio_target_wwn_cpus_allowed_list_show(
struct config_item *item, char *page)
{
- return sprintf(page, "%*pbl\n",
+ return sysfs_emit(page, "%*pbl\n",
cpumask_pr_args(iscsit_global->allowed_cpumask));
}
@@ -1283,7 +1281,7 @@ static ssize_t iscsi_disc_enforce_discovery_auth_show(struct config_item *item,
{
struct iscsi_node_auth *discovery_auth = &iscsit_global->discovery_acl.node_auth;
- return sprintf(page, "%d\n", discovery_auth->enforce_discovery_auth);
+ return sysfs_emit(page, "%d\n", discovery_auth->enforce_discovery_auth);
}
static ssize_t iscsi_disc_enforce_discovery_auth_store(struct config_item *item,
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 3cac1aafef68..f7bac98fd4fe 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -211,7 +211,6 @@ static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg)
a->authentication = TA_AUTHENTICATION;
a->login_timeout = TA_LOGIN_TIMEOUT;
- a->netif_timeout = TA_NETIF_TIMEOUT;
a->default_cmdsn_depth = TA_DEFAULT_CMDSN_DEPTH;
a->generate_node_acls = TA_GENERATE_NODE_ACLS;
a->cache_dynamic_acls = TA_CACHE_DYNAMIC_ACLS;
@@ -666,31 +665,6 @@ int iscsit_ta_login_timeout(
return 0;
}
-int iscsit_ta_netif_timeout(
- struct iscsi_portal_group *tpg,
- u32 netif_timeout)
-{
- struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
-
- if (netif_timeout > TA_NETIF_TIMEOUT_MAX) {
- pr_err("Requested Network Interface Timeout %u larger"
- " than maximum %u\n", netif_timeout,
- TA_NETIF_TIMEOUT_MAX);
- return -EINVAL;
- } else if (netif_timeout < TA_NETIF_TIMEOUT_MIN) {
- pr_err("Requested Network Interface Timeout %u smaller"
- " than minimum %u\n", netif_timeout,
- TA_NETIF_TIMEOUT_MIN);
- return -EINVAL;
- }
-
- a->netif_timeout = netif_timeout;
- pr_debug("Set Network Interface Timeout to %u for"
- " Target Portal Group %hu\n", a->netif_timeout, tpg->tpgt);
-
- return 0;
-}
-
int iscsit_ta_generate_node_acls(
struct iscsi_portal_group *tpg,
u32 flag)
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
index 839e45362776..71d067f62177 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.h
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -38,7 +38,6 @@ extern int iscsit_tpg_del_network_portal(struct iscsi_portal_group *,
struct iscsi_tpg_np *);
extern int iscsit_ta_authentication(struct iscsi_portal_group *, u32);
extern int iscsit_ta_login_timeout(struct iscsi_portal_group *, u32);
-extern int iscsit_ta_netif_timeout(struct iscsi_portal_group *, u32);
extern int iscsit_ta_generate_node_acls(struct iscsi_portal_group *, u32);
extern int iscsit_ta_default_cmdsn_depth(struct iscsi_portal_group *, u32);
extern int iscsit_ta_cache_dynamic_acls(struct iscsi_portal_group *, u32);
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index a7050f63b7cc..a6a06a5f7483 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -739,11 +739,16 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
if (data_direction == DMA_TO_DEVICE) {
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+
+ /*
+ * Set bits to indicate WRITE_ODIRECT so we are not throttled
+ * by WBT.
+ */
+ opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
/*
* Force writethrough using REQ_FUA if a volatile write cache
* is not enabled, or if initiator set the Force Unit Access bit.
*/
- opf = REQ_OP_WRITE;
miter_dir = SG_MITER_TO_SG;
if (bdev_fua(ib_dev->ibd_bd)) {
if (cmd->se_cmd_flags & SCF_FUA)
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 19a4b33cb564..c81a00fbca7d 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -510,4 +510,16 @@ config KHADAS_MCU_FAN_THERMAL
If you say yes here you get support for the FAN controlled
by the Microcontroller found on the Khadas VIM boards.
+config LOONGSON2_THERMAL
+ tristate "Loongson-2 SoC series thermal driver"
+ depends on LOONGARCH || COMPILE_TEST
+ depends on OF
+ help
+ Support for Thermal driver found on Loongson-2 SoC series platforms.
+ The thermal driver realizes get_temp and set_trips function, which
+ are used to obtain the temperature of the current node and set the
+ temperature range to trigger the interrupt. When the input temperature
+ is higher than the high temperature threshold or lower than the low
+ temperature threshold, the interrupt will occur.
+
endif
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 058664bc3ec0..c934cab309ae 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -63,3 +63,4 @@ obj-$(CONFIG_UNIPHIER_THERMAL) += uniphier_thermal.o
obj-$(CONFIG_AMLOGIC_THERMAL) += amlogic_thermal.o
obj-$(CONFIG_SPRD_THERMAL) += sprd_thermal.o
obj-$(CONFIG_KHADAS_MCU_FAN_THERMAL) += khadas_mcu_fan.o
+obj-$(CONFIG_LOONGSON2_THERMAL) += loongson2_thermal.o
diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c
index 9f6dc4fc9112..f00765bfc22e 100644
--- a/drivers/thermal/armada_thermal.c
+++ b/drivers/thermal/armada_thermal.c
@@ -876,8 +876,9 @@ static int armada_thermal_probe(struct platform_device *pdev)
/* Wait the sensors to be valid */
armada_wait_sensor_validity(priv);
- tz = thermal_zone_device_register(priv->zone_name, 0, 0, priv,
- &legacy_ops, NULL, 0, 0);
+ tz = thermal_tripless_zone_device_register(priv->zone_name,
+ priv, &legacy_ops,
+ NULL);
if (IS_ERR(tz)) {
dev_err(&pdev->dev,
"Failed to register thermal zone device\n");
diff --git a/drivers/thermal/broadcom/brcmstb_thermal.c b/drivers/thermal/broadcom/brcmstb_thermal.c
index 0b73abdaa792..9674e5ffcfa2 100644
--- a/drivers/thermal/broadcom/brcmstb_thermal.c
+++ b/drivers/thermal/broadcom/brcmstb_thermal.c
@@ -334,7 +334,6 @@ static int brcmstb_thermal_probe(struct platform_device *pdev)
return PTR_ERR(priv->tmon_base);
priv->dev = &pdev->dev;
- platform_set_drvdata(pdev, priv);
of_ops = priv->temp_params->of_ops;
thermal = devm_thermal_of_zone_register(&pdev->dev, 0, priv,
diff --git a/drivers/thermal/broadcom/sr-thermal.c b/drivers/thermal/broadcom/sr-thermal.c
index 747915890022..9a29dfd4c7fe 100644
--- a/drivers/thermal/broadcom/sr-thermal.c
+++ b/drivers/thermal/broadcom/sr-thermal.c
@@ -91,7 +91,6 @@ static int sr_thermal_probe(struct platform_device *pdev)
dev_dbg(dev, "thermal sensor %d registered\n", i);
}
- platform_set_drvdata(pdev, sr_thermal);
return 0;
}
diff --git a/drivers/thermal/db8500_thermal.c b/drivers/thermal/db8500_thermal.c
index fca5c2c93bf9..576f88b6a1b3 100644
--- a/drivers/thermal/db8500_thermal.c
+++ b/drivers/thermal/db8500_thermal.c
@@ -229,7 +229,7 @@ MODULE_DEVICE_TABLE(of, db8500_thermal_match);
static struct platform_driver db8500_thermal_driver = {
.driver = {
.name = "db8500-thermal",
- .of_match_table = of_match_ptr(db8500_thermal_match),
+ .of_match_table = db8500_thermal_match,
},
.probe = db8500_thermal_probe,
.suspend = db8500_thermal_suspend,
diff --git a/drivers/thermal/dove_thermal.c b/drivers/thermal/dove_thermal.c
index 9954040d1d2c..7a18cb960bee 100644
--- a/drivers/thermal/dove_thermal.c
+++ b/drivers/thermal/dove_thermal.c
@@ -139,8 +139,8 @@ static int dove_thermal_probe(struct platform_device *pdev)
return ret;
}
- thermal = thermal_zone_device_register("dove_thermal", 0, 0,
- priv, &ops, NULL, 0, 0);
+ thermal = thermal_tripless_zone_device_register("dove_thermal", priv,
+ &ops, NULL);
if (IS_ERR(thermal)) {
dev_err(&pdev->dev,
"Failed to register thermal zone device\n");
diff --git a/drivers/thermal/imx8mm_thermal.c b/drivers/thermal/imx8mm_thermal.c
index e89b11b3f2b9..14111ccf6e4c 100644
--- a/drivers/thermal/imx8mm_thermal.c
+++ b/drivers/thermal/imx8mm_thermal.c
@@ -178,10 +178,8 @@ static int imx8mm_tmu_probe_set_calib_v1(struct platform_device *pdev,
int ret;
ret = nvmem_cell_read_u32(&pdev->dev, "calib", &ana0);
- if (ret) {
- dev_warn(dev, "Failed to read OCOTP nvmem cell (%d).\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to read OCOTP nvmem cell\n");
writel(FIELD_PREP(TASR_BUF_VREF_MASK,
FIELD_GET(ANA0_BUF_VREF_MASK, ana0)) |
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index ddd600820f68..ffc2871a021c 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -609,9 +609,9 @@ static int int3400_thermal_probe(struct platform_device *pdev)
evaluate_odvp(priv);
- priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0,
- priv, &int3400_thermal_ops,
- &int3400_thermal_params, 0, 0);
+ priv->thermal = thermal_tripless_zone_device_register("INT3400 Thermal", priv,
+ &int3400_thermal_ops,
+ &int3400_thermal_params);
if (IS_ERR(priv->thermal)) {
result = PTR_ERR(priv->thermal);
goto free_art_trt;
diff --git a/drivers/thermal/k3_bandgap.c b/drivers/thermal/k3_bandgap.c
index 68f59b3735d3..4a918c1e92f9 100644
--- a/drivers/thermal/k3_bandgap.c
+++ b/drivers/thermal/k3_bandgap.c
@@ -225,7 +225,6 @@ static int k3_bandgap_probe(struct platform_device *pdev)
devm_thermal_add_hwmon_sysfs(dev, data[id].tzd);
}
- platform_set_drvdata(pdev, bgp);
return 0;
diff --git a/drivers/thermal/k3_j72xx_bandgap.c b/drivers/thermal/k3_j72xx_bandgap.c
index a5a0fc9b9356..2fc799b07b90 100644
--- a/drivers/thermal/k3_j72xx_bandgap.c
+++ b/drivers/thermal/k3_j72xx_bandgap.c
@@ -502,8 +502,6 @@ static int k3_j72xx_bandgap_probe(struct platform_device *pdev)
writel(K3_VTM_ANYMAXT_OUTRG_ALERT_EN, data[0].bgp->cfg2_base +
K3_VTM_MISC_CTRL_OFFSET);
- platform_set_drvdata(pdev, bgp);
-
print_look_up_table(dev, ref_table);
/*
* Now that the derived_table has the appropriate look up values
diff --git a/drivers/thermal/kirkwood_thermal.c b/drivers/thermal/kirkwood_thermal.c
index 668747bd86ef..acb10d24256d 100644
--- a/drivers/thermal/kirkwood_thermal.c
+++ b/drivers/thermal/kirkwood_thermal.c
@@ -71,8 +71,8 @@ static int kirkwood_thermal_probe(struct platform_device *pdev)
if (IS_ERR(priv->sensor))
return PTR_ERR(priv->sensor);
- thermal = thermal_zone_device_register("kirkwood_thermal", 0, 0,
- priv, &ops, NULL, 0, 0);
+ thermal = thermal_tripless_zone_device_register("kirkwood_thermal",
+ priv, &ops, NULL);
if (IS_ERR(thermal)) {
dev_err(&pdev->dev,
"Failed to register thermal zone device\n");
diff --git a/drivers/thermal/loongson2_thermal.c b/drivers/thermal/loongson2_thermal.c
new file mode 100644
index 000000000000..133098dc0854
--- /dev/null
+++ b/drivers/thermal/loongson2_thermal.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Author: zhanghongchen <zhanghongchen@loongson.cn>
+ * Yinbo Zhu <zhuyinbo@loongson.cn>
+ * Copyright (C) 2022-2023 Loongson Technology Corporation Limited
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/thermal.h>
+#include <linux/units.h>
+#include "thermal_hwmon.h"
+
+#define LOONGSON2_MAX_SENSOR_SEL_NUM 3
+
+#define LOONGSON2_THSENS_CTRL_HI_REG 0x0
+#define LOONGSON2_THSENS_CTRL_LOW_REG 0x8
+#define LOONGSON2_THSENS_STATUS_REG 0x10
+#define LOONGSON2_THSENS_OUT_REG 0x14
+
+#define LOONGSON2_THSENS_INT_LO BIT(0)
+#define LOONGSON2_THSENS_INT_HIGH BIT(1)
+#define LOONGSON2_THSENS_OUT_MASK 0xFF
+
+struct loongson2_thermal_chip_data {
+ unsigned int thermal_sensor_sel;
+};
+
+struct loongson2_thermal_data {
+ void __iomem *regs;
+ const struct loongson2_thermal_chip_data *chip_data;
+};
+
+static int loongson2_thermal_set(struct loongson2_thermal_data *data,
+ int low, int high, bool enable)
+{
+ u64 reg_ctrl = 0;
+ int reg_off = data->chip_data->thermal_sensor_sel * 2;
+
+ low = clamp(-40, low, high);
+ high = clamp(125, low, high);
+
+ low += HECTO;
+ high += HECTO;
+
+ reg_ctrl = low;
+ reg_ctrl |= enable ? 0x100 : 0;
+ writew(reg_ctrl, data->regs + LOONGSON2_THSENS_CTRL_LOW_REG + reg_off);
+
+ reg_ctrl = high;
+ reg_ctrl |= enable ? 0x100 : 0;
+ writew(reg_ctrl, data->regs + LOONGSON2_THSENS_CTRL_HI_REG + reg_off);
+
+ return 0;
+}
+
+static int loongson2_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
+{
+ u32 reg_val;
+ struct loongson2_thermal_data *data = thermal_zone_device_priv(tz);
+
+ reg_val = readl(data->regs + LOONGSON2_THSENS_OUT_REG);
+ *temp = ((reg_val & LOONGSON2_THSENS_OUT_MASK) - HECTO) * KILO;
+
+ return 0;
+}
+
+static irqreturn_t loongson2_thermal_irq_thread(int irq, void *dev)
+{
+ struct thermal_zone_device *tzd = dev;
+ struct loongson2_thermal_data *data = thermal_zone_device_priv(tzd);
+
+ writeb(LOONGSON2_THSENS_INT_LO | LOONGSON2_THSENS_INT_HIGH, data->regs +
+ LOONGSON2_THSENS_STATUS_REG);
+
+ thermal_zone_device_update(tzd, THERMAL_EVENT_UNSPECIFIED);
+
+ return IRQ_HANDLED;
+}
+
+static int loongson2_thermal_set_trips(struct thermal_zone_device *tz, int low, int high)
+{
+ struct loongson2_thermal_data *data = thermal_zone_device_priv(tz);
+
+ return loongson2_thermal_set(data, low/MILLI, high/MILLI, true);
+}
+
+static const struct thermal_zone_device_ops loongson2_of_thermal_ops = {
+ .get_temp = loongson2_thermal_get_temp,
+ .set_trips = loongson2_thermal_set_trips,
+};
+
+static int loongson2_thermal_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct loongson2_thermal_data *data;
+ struct thermal_zone_device *tzd;
+ int ret, irq, i;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->chip_data = device_get_match_data(dev);
+
+ data->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(data->regs))
+ return PTR_ERR(data->regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ writeb(LOONGSON2_THSENS_INT_LO | LOONGSON2_THSENS_INT_HIGH, data->regs +
+ LOONGSON2_THSENS_STATUS_REG);
+
+ loongson2_thermal_set(data, 0, 0, false);
+
+ for (i = 0; i <= LOONGSON2_MAX_SENSOR_SEL_NUM; i++) {
+ tzd = devm_thermal_of_zone_register(dev, i, data,
+ &loongson2_of_thermal_ops);
+
+ if (!IS_ERR(tzd))
+ break;
+
+ if (PTR_ERR(tzd) != ENODEV)
+ continue;
+
+ return dev_err_probe(dev, PTR_ERR(tzd), "failed to register");
+ }
+
+ ret = devm_request_threaded_irq(dev, irq, NULL, loongson2_thermal_irq_thread,
+ IRQF_ONESHOT, "loongson2_thermal", tzd);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to request alarm irq\n");
+
+ devm_thermal_add_hwmon_sysfs(dev, tzd);
+
+ return 0;
+}
+
+static const struct loongson2_thermal_chip_data loongson2_thermal_ls2k1000_data = {
+ .thermal_sensor_sel = 0,
+};
+
+static const struct of_device_id of_loongson2_thermal_match[] = {
+ {
+ .compatible = "loongson,ls2k1000-thermal",
+ .data = &loongson2_thermal_ls2k1000_data,
+ },
+ { /* end */ }
+};
+MODULE_DEVICE_TABLE(of, of_loongson2_thermal_match);
+
+static struct platform_driver loongson2_thermal_driver = {
+ .driver = {
+ .name = "loongson2_thermal",
+ .of_match_table = of_loongson2_thermal_match,
+ },
+ .probe = loongson2_thermal_probe,
+};
+module_platform_driver(loongson2_thermal_driver);
+
+MODULE_DESCRIPTION("Loongson2 thermal driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/thermal/max77620_thermal.c b/drivers/thermal/max77620_thermal.c
index 61c7622d9945..919b6ee208d8 100644
--- a/drivers/thermal/max77620_thermal.c
+++ b/drivers/thermal/max77620_thermal.c
@@ -139,8 +139,6 @@ static int max77620_thermal_probe(struct platform_device *pdev)
return ret;
}
- platform_set_drvdata(pdev, mtherm);
-
return 0;
}
diff --git a/drivers/thermal/mediatek/auxadc_thermal.c b/drivers/thermal/mediatek/auxadc_thermal.c
index c537aed71017..843214d30bd8 100644
--- a/drivers/thermal/mediatek/auxadc_thermal.c
+++ b/drivers/thermal/mediatek/auxadc_thermal.c
@@ -1282,8 +1282,6 @@ static int mtk_thermal_probe(struct platform_device *pdev)
mtk_thermal_init_bank(mt, i, apmixed_phys_base,
auxadc_phys_base, ctrl_id);
- platform_set_drvdata(pdev, mt);
-
tzdev = devm_thermal_of_zone_register(&pdev->dev, 0, mt,
&mtk_thermal_ops);
if (IS_ERR(tzdev))
diff --git a/drivers/thermal/mediatek/lvts_thermal.c b/drivers/thermal/mediatek/lvts_thermal.c
index 054c965ae5e1..effd9b00a424 100644
--- a/drivers/thermal/mediatek/lvts_thermal.c
+++ b/drivers/thermal/mediatek/lvts_thermal.c
@@ -58,14 +58,19 @@
#define LVTS_PROTTC(__base) (__base + 0x00CC)
#define LVTS_CLKEN(__base) (__base + 0x00E4)
-#define LVTS_PERIOD_UNIT ((118 * 1000) / (256 * 38))
-#define LVTS_GROUP_INTERVAL 1
-#define LVTS_FILTER_INTERVAL 1
-#define LVTS_SENSOR_INTERVAL 1
-#define LVTS_HW_FILTER 0x2
+#define LVTS_PERIOD_UNIT 0
+#define LVTS_GROUP_INTERVAL 0
+#define LVTS_FILTER_INTERVAL 0
+#define LVTS_SENSOR_INTERVAL 0
+#define LVTS_HW_FILTER 0x0
#define LVTS_TSSEL_CONF 0x13121110
#define LVTS_CALSCALE_CONF 0x300
-#define LVTS_MONINT_CONF 0x9FBF7BDE
+#define LVTS_MONINT_CONF 0x8300318C
+
+#define LVTS_MONINT_OFFSET_SENSOR0 0xC
+#define LVTS_MONINT_OFFSET_SENSOR1 0x180
+#define LVTS_MONINT_OFFSET_SENSOR2 0x3000
+#define LVTS_MONINT_OFFSET_SENSOR3 0x3000000
#define LVTS_INT_SENSOR0 0x0009001F
#define LVTS_INT_SENSOR1 0x001203E0
@@ -81,8 +86,13 @@
#define LVTS_MSR_IMMEDIATE_MODE 0
#define LVTS_MSR_FILTERED_MODE 1
+#define LVTS_MSR_READ_TIMEOUT_US 400
+#define LVTS_MSR_READ_WAIT_US (LVTS_MSR_READ_TIMEOUT_US / 2)
+
#define LVTS_HW_SHUTDOWN_MT8195 105000
+#define LVTS_MINIMUM_THRESHOLD 20000
+
static int golden_temp = LVTS_GOLDEN_TEMP_DEFAULT;
static int coeff_b = LVTS_COEFF_B;
@@ -110,6 +120,8 @@ struct lvts_sensor {
void __iomem *base;
int id;
int dt_id;
+ int low_thresh;
+ int high_thresh;
};
struct lvts_ctrl {
@@ -119,6 +131,8 @@ struct lvts_ctrl {
int num_lvts_sensor;
int mode;
void __iomem *base;
+ int low_thresh;
+ int high_thresh;
};
struct lvts_domain {
@@ -190,7 +204,7 @@ static int lvts_debugfs_init(struct device *dev, struct lvts_domain *lvts_td)
int i;
lvts_td->dom_dentry = debugfs_create_dir(dev_name(dev), NULL);
- if (!lvts_td->dom_dentry)
+ if (IS_ERR(lvts_td->dom_dentry))
return 0;
for (i = 0; i < lvts_td->num_lvts_ctrl; i++) {
@@ -257,6 +271,7 @@ static int lvts_get_temp(struct thermal_zone_device *tz, int *temp)
struct lvts_sensor *lvts_sensor = thermal_zone_device_priv(tz);
void __iomem *msr = lvts_sensor->msr;
u32 value;
+ int rc;
/*
* Measurement registers:
@@ -269,7 +284,8 @@ static int lvts_get_temp(struct thermal_zone_device *tz, int *temp)
* 16 : Valid temperature
* 15-0 : Raw temperature
*/
- value = readl(msr);
+ rc = readl_poll_timeout(msr, value, value & BIT(16),
+ LVTS_MSR_READ_WAIT_US, LVTS_MSR_READ_TIMEOUT_US);
/*
* As the thermal zone temperature will read before the
@@ -282,7 +298,7 @@ static int lvts_get_temp(struct thermal_zone_device *tz, int *temp)
* functionning temperature and directly jump to a system
* shutdown.
*/
- if (!(value & BIT(16)))
+ if (rc)
return -EAGAIN;
*temp = lvts_raw_to_temp(value & 0xFFFF);
@@ -290,32 +306,84 @@ static int lvts_get_temp(struct thermal_zone_device *tz, int *temp)
return 0;
}
+static void lvts_update_irq_mask(struct lvts_ctrl *lvts_ctrl)
+{
+ u32 masks[] = {
+ LVTS_MONINT_OFFSET_SENSOR0,
+ LVTS_MONINT_OFFSET_SENSOR1,
+ LVTS_MONINT_OFFSET_SENSOR2,
+ LVTS_MONINT_OFFSET_SENSOR3,
+ };
+ u32 value = 0;
+ int i;
+
+ value = readl(LVTS_MONINT(lvts_ctrl->base));
+
+ for (i = 0; i < ARRAY_SIZE(masks); i++) {
+ if (lvts_ctrl->sensors[i].high_thresh == lvts_ctrl->high_thresh
+ && lvts_ctrl->sensors[i].low_thresh == lvts_ctrl->low_thresh)
+ value |= masks[i];
+ else
+ value &= ~masks[i];
+ }
+
+ writel(value, LVTS_MONINT(lvts_ctrl->base));
+}
+
+static bool lvts_should_update_thresh(struct lvts_ctrl *lvts_ctrl, int high)
+{
+ int i;
+
+ if (high > lvts_ctrl->high_thresh)
+ return true;
+
+ for (i = 0; i < lvts_ctrl->num_lvts_sensor; i++)
+ if (lvts_ctrl->sensors[i].high_thresh == lvts_ctrl->high_thresh
+ && lvts_ctrl->sensors[i].low_thresh == lvts_ctrl->low_thresh)
+ return false;
+
+ return true;
+}
+
static int lvts_set_trips(struct thermal_zone_device *tz, int low, int high)
{
struct lvts_sensor *lvts_sensor = thermal_zone_device_priv(tz);
+ struct lvts_ctrl *lvts_ctrl = container_of(lvts_sensor, struct lvts_ctrl, sensors[lvts_sensor->id]);
void __iomem *base = lvts_sensor->base;
- u32 raw_low = lvts_temp_to_raw(low);
+ u32 raw_low = lvts_temp_to_raw(low != -INT_MAX ? low : LVTS_MINIMUM_THRESHOLD);
u32 raw_high = lvts_temp_to_raw(high);
+ bool should_update_thresh;
+
+ lvts_sensor->low_thresh = low;
+ lvts_sensor->high_thresh = high;
+
+ should_update_thresh = lvts_should_update_thresh(lvts_ctrl, high);
+ if (should_update_thresh) {
+ lvts_ctrl->high_thresh = high;
+ lvts_ctrl->low_thresh = low;
+ }
+ lvts_update_irq_mask(lvts_ctrl);
+
+ if (!should_update_thresh)
+ return 0;
/*
- * Hot to normal temperature threshold
+ * Low offset temperature threshold
*
- * LVTS_H2NTHRE
+ * LVTS_OFFSETL
*
* Bits:
*
* 14-0 : Raw temperature for threshold
*/
- if (low != -INT_MAX) {
- pr_debug("%s: Setting low limit temperature interrupt: %d\n",
- thermal_zone_device_type(tz), low);
- writel(raw_low, LVTS_H2NTHRE(base));
- }
+ pr_debug("%s: Setting low limit temperature interrupt: %d\n",
+ thermal_zone_device_type(tz), low);
+ writel(raw_low, LVTS_OFFSETL(base));
/*
- * Hot temperature threshold
+ * High offset temperature threshold
*
- * LVTS_HTHRE
+ * LVTS_OFFSETH
*
* Bits:
*
@@ -323,7 +391,7 @@ static int lvts_set_trips(struct thermal_zone_device *tz, int low, int high)
*/
pr_debug("%s: Setting high limit temperature interrupt: %d\n",
thermal_zone_device_type(tz), high);
- writel(raw_high, LVTS_HTHRE(base));
+ writel(raw_high, LVTS_OFFSETH(base));
return 0;
}
@@ -451,7 +519,7 @@ static irqreturn_t lvts_irq_handler(int irq, void *data)
for (i = 0; i < lvts_td->num_lvts_ctrl; i++) {
- aux = lvts_ctrl_irq_handler(lvts_td->lvts_ctrl);
+ aux = lvts_ctrl_irq_handler(&lvts_td->lvts_ctrl[i]);
if (aux != IRQ_HANDLED)
continue;
@@ -521,6 +589,9 @@ static int lvts_sensor_init(struct device *dev, struct lvts_ctrl *lvts_ctrl,
*/
lvts_sensor[i].msr = lvts_ctrl_data->mode == LVTS_MSR_IMMEDIATE_MODE ?
imm_regs[i] : msr_regs[i];
+
+ lvts_sensor[i].low_thresh = INT_MIN;
+ lvts_sensor[i].high_thresh = INT_MIN;
};
lvts_ctrl->num_lvts_sensor = lvts_ctrl_data->num_lvts_sensor;
@@ -688,6 +759,9 @@ static int lvts_ctrl_init(struct device *dev, struct lvts_domain *lvts_td,
*/
lvts_ctrl[i].hw_tshut_raw_temp =
lvts_temp_to_raw(lvts_data->lvts_ctrl[i].hw_tshut_temp);
+
+ lvts_ctrl[i].low_thresh = INT_MIN;
+ lvts_ctrl[i].high_thresh = INT_MIN;
}
/*
@@ -897,24 +971,6 @@ static int lvts_ctrl_configure(struct device *dev, struct lvts_ctrl *lvts_ctrl)
writel(value, LVTS_MSRCTL0(lvts_ctrl->base));
/*
- * LVTS_MSRCTL1 : Measurement control
- *
- * Bits:
- *
- * 9: Ignore MSRCTL0 config and do immediate measurement on sensor3
- * 6: Ignore MSRCTL0 config and do immediate measurement on sensor2
- * 5: Ignore MSRCTL0 config and do immediate measurement on sensor1
- * 4: Ignore MSRCTL0 config and do immediate measurement on sensor0
- *
- * That configuration will ignore the filtering and the delays
- * introduced below in MONCTL1 and MONCTL2
- */
- if (lvts_ctrl->mode == LVTS_MSR_IMMEDIATE_MODE) {
- value = BIT(9) | BIT(6) | BIT(5) | BIT(4);
- writel(value, LVTS_MSRCTL1(lvts_ctrl->base));
- }
-
- /*
* LVTS_MONCTL1 : Period unit and group interval configuration
*
* The clock source of LVTS thermal controller is 26MHz.
@@ -979,6 +1035,15 @@ static int lvts_ctrl_start(struct device *dev, struct lvts_ctrl *lvts_ctrl)
struct thermal_zone_device *tz;
u32 sensor_map = 0;
int i;
+ /*
+ * Bitmaps to enable each sensor on immediate and filtered modes, as
+ * described in MSRCTL1 and MONCTL0 registers below, respectively.
+ */
+ u32 sensor_imm_bitmap[] = { BIT(4), BIT(5), BIT(6), BIT(9) };
+ u32 sensor_filt_bitmap[] = { BIT(0), BIT(1), BIT(2), BIT(3) };
+
+ u32 *sensor_bitmap = lvts_ctrl->mode == LVTS_MSR_IMMEDIATE_MODE ?
+ sensor_imm_bitmap : sensor_filt_bitmap;
for (i = 0; i < lvts_ctrl->num_lvts_sensor; i++) {
@@ -1016,20 +1081,38 @@ static int lvts_ctrl_start(struct device *dev, struct lvts_ctrl *lvts_ctrl)
* map, so we can enable the temperature monitoring in
* the hardware thermal controller.
*/
- sensor_map |= BIT(i);
+ sensor_map |= sensor_bitmap[i];
}
/*
- * Bits:
- * 9: Single point access flow
- * 0-3: Enable sensing point 0-3
- *
* The initialization of the thermal zones give us
* which sensor point to enable. If any thermal zone
* was not described in the device tree, it won't be
* enabled here in the sensor map.
*/
- writel(sensor_map | BIT(9), LVTS_MONCTL0(lvts_ctrl->base));
+ if (lvts_ctrl->mode == LVTS_MSR_IMMEDIATE_MODE) {
+ /*
+ * LVTS_MSRCTL1 : Measurement control
+ *
+ * Bits:
+ *
+ * 9: Ignore MSRCTL0 config and do immediate measurement on sensor3
+ * 6: Ignore MSRCTL0 config and do immediate measurement on sensor2
+ * 5: Ignore MSRCTL0 config and do immediate measurement on sensor1
+ * 4: Ignore MSRCTL0 config and do immediate measurement on sensor0
+ *
+ * That configuration will ignore the filtering and the delays
+ * introduced in MONCTL1 and MONCTL2
+ */
+ writel(sensor_map, LVTS_MSRCTL1(lvts_ctrl->base));
+ } else {
+ /*
+ * Bits:
+ * 9: Single point access flow
+ * 0-3: Enable sensing point 0-3
+ */
+ writel(sensor_map | BIT(9), LVTS_MONCTL0(lvts_ctrl->base));
+ }
return 0;
}
@@ -1138,7 +1221,7 @@ static int lvts_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0)
- return dev_err_probe(dev, irq, "No irq resource\n");
+ return irq;
ret = lvts_domain_init(dev, lvts_td, lvts_data);
if (ret)
diff --git a/drivers/thermal/qcom/tsens-v0_1.c b/drivers/thermal/qcom/tsens-v0_1.c
index a941b4241b0a..87c09f62ee81 100644
--- a/drivers/thermal/qcom/tsens-v0_1.c
+++ b/drivers/thermal/qcom/tsens-v0_1.c
@@ -23,7 +23,7 @@
#define BIT_APPEND 0x3
-struct tsens_legacy_calibration_format tsens_8916_nvmem = {
+static struct tsens_legacy_calibration_format tsens_8916_nvmem = {
.base_len = 7,
.base_shift = 3,
.sp_len = 5,
@@ -39,7 +39,7 @@ struct tsens_legacy_calibration_format tsens_8916_nvmem = {
},
};
-struct tsens_legacy_calibration_format tsens_8974_nvmem = {
+static struct tsens_legacy_calibration_format tsens_8974_nvmem = {
.base_len = 8,
.base_shift = 2,
.sp_len = 6,
@@ -61,7 +61,7 @@ struct tsens_legacy_calibration_format tsens_8974_nvmem = {
},
};
-struct tsens_legacy_calibration_format tsens_8974_backup_nvmem = {
+static struct tsens_legacy_calibration_format tsens_8974_backup_nvmem = {
.base_len = 8,
.base_shift = 2,
.sp_len = 6,
diff --git a/drivers/thermal/qcom/tsens-v1.c b/drivers/thermal/qcom/tsens-v1.c
index 51322430f1fe..dc1c4ae2d8b0 100644
--- a/drivers/thermal/qcom/tsens-v1.c
+++ b/drivers/thermal/qcom/tsens-v1.c
@@ -21,7 +21,7 @@
#define TM_HIGH_LOW_INT_STATUS_OFF 0x0088
#define TM_HIGH_LOW_Sn_INT_THRESHOLD_OFF 0x0090
-struct tsens_legacy_calibration_format tsens_qcs404_nvmem = {
+static struct tsens_legacy_calibration_format tsens_qcs404_nvmem = {
.base_len = 8,
.base_shift = 2,
.sp_len = 6,
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index 58f4d8f7a3fd..e5bc2c82010f 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -887,7 +887,7 @@ static int exynos_map_dt_data(struct platform_device *pdev)
return -EADDRNOTAVAIL;
}
- data->soc = (enum soc_type)of_device_get_match_data(&pdev->dev);
+ data->soc = (uintptr_t)of_device_get_match_data(&pdev->dev);
switch (data->soc) {
case SOC_ARCH_EXYNOS4210:
diff --git a/drivers/thermal/spear_thermal.c b/drivers/thermal/spear_thermal.c
index 6e78616a576e..96d99289799a 100644
--- a/drivers/thermal/spear_thermal.c
+++ b/drivers/thermal/spear_thermal.c
@@ -122,8 +122,8 @@ static int spear_thermal_probe(struct platform_device *pdev)
stdev->flags = val;
writel_relaxed(stdev->flags, stdev->thermal_base);
- spear_thermal = thermal_zone_device_register("spear_thermal", 0, 0,
- stdev, &ops, NULL, 0, 0);
+ spear_thermal = thermal_tripless_zone_device_register("spear_thermal",
+ stdev, &ops, NULL);
if (IS_ERR(spear_thermal)) {
dev_err(&pdev->dev, "thermal zone device is NULL\n");
ret = PTR_ERR(spear_thermal);
diff --git a/drivers/thermal/sun8i_thermal.c b/drivers/thermal/sun8i_thermal.c
index cca16d632d9f..f989b55a8aa8 100644
--- a/drivers/thermal/sun8i_thermal.c
+++ b/drivers/thermal/sun8i_thermal.c
@@ -56,8 +56,6 @@
#define SUN50I_H6_THS_PC_TEMP_PERIOD(x) ((GENMASK(19, 0) & (x)) << 12)
#define SUN50I_H6_THS_DATA_IRQ_STS(x) BIT(x)
-/* millidegree celsius */
-
struct tsensor {
struct ths_device *tmdev;
struct thermal_zone_device *tzd;
@@ -286,7 +284,7 @@ static int sun8i_ths_calibrate(struct ths_device *tmdev)
size_t callen;
int ret = 0;
- calcell = devm_nvmem_cell_get(dev, "calibration");
+ calcell = nvmem_cell_get(dev, "calibration");
if (IS_ERR(calcell)) {
if (PTR_ERR(calcell) == -EPROBE_DEFER)
return -EPROBE_DEFER;
@@ -316,6 +314,8 @@ static int sun8i_ths_calibrate(struct ths_device *tmdev)
kfree(caldata);
out:
+ if (!IS_ERR(calcell))
+ nvmem_cell_put(calcell);
return ret;
}
@@ -489,8 +489,6 @@ static int sun8i_ths_probe(struct platform_device *pdev)
if (!tmdev->chip)
return -EINVAL;
- platform_set_drvdata(pdev, tmdev);
-
ret = sun8i_ths_resource_init(tmdev);
if (ret)
return ret;
diff --git a/drivers/thermal/tegra/tegra-bpmp-thermal.c b/drivers/thermal/tegra/tegra-bpmp-thermal.c
index a2879d624945..4ffc3bb3bf35 100644
--- a/drivers/thermal/tegra/tegra-bpmp-thermal.c
+++ b/drivers/thermal/tegra/tegra-bpmp-thermal.c
@@ -167,19 +167,69 @@ static int tegra_bpmp_thermal_get_num_zones(struct tegra_bpmp *bpmp,
return 0;
}
+static int tegra_bpmp_thermal_trips_supported(struct tegra_bpmp *bpmp, bool *supported)
+{
+ struct mrq_thermal_host_to_bpmp_request req;
+ union mrq_thermal_bpmp_to_host_response reply;
+ struct tegra_bpmp_message msg;
+ int err;
+
+ memset(&req, 0, sizeof(req));
+ req.type = CMD_THERMAL_QUERY_ABI;
+ req.query_abi.type = CMD_THERMAL_SET_TRIP;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.mrq = MRQ_THERMAL;
+ msg.tx.data = &req;
+ msg.tx.size = sizeof(req);
+ msg.rx.data = &reply;
+ msg.rx.size = sizeof(reply);
+
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err)
+ return err;
+
+ if (msg.rx.ret == 0) {
+ *supported = true;
+ return 0;
+ } else if (msg.rx.ret == -BPMP_ENODEV) {
+ *supported = false;
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+}
+
static const struct thermal_zone_device_ops tegra_bpmp_of_thermal_ops = {
.get_temp = tegra_bpmp_thermal_get_temp,
.set_trips = tegra_bpmp_thermal_set_trips,
};
+static const struct thermal_zone_device_ops tegra_bpmp_of_thermal_ops_notrips = {
+ .get_temp = tegra_bpmp_thermal_get_temp,
+};
+
static int tegra_bpmp_thermal_probe(struct platform_device *pdev)
{
struct tegra_bpmp *bpmp = dev_get_drvdata(pdev->dev.parent);
+ const struct thermal_zone_device_ops *thermal_ops;
struct tegra_bpmp_thermal *tegra;
struct thermal_zone_device *tzd;
unsigned int i, max_num_zones;
+ bool supported;
int err;
+ err = tegra_bpmp_thermal_trips_supported(bpmp, &supported);
+ if (err) {
+ dev_err(&pdev->dev, "failed to determine if trip points are supported\n");
+ return err;
+ }
+
+ if (supported)
+ thermal_ops = &tegra_bpmp_of_thermal_ops;
+ else
+ thermal_ops = &tegra_bpmp_of_thermal_ops_notrips;
+
tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
if (!tegra)
return -ENOMEM;
@@ -222,7 +272,7 @@ static int tegra_bpmp_thermal_probe(struct platform_device *pdev)
}
tzd = devm_thermal_of_zone_register(
- &pdev->dev, i, zone, &tegra_bpmp_of_thermal_ops);
+ &pdev->dev, i, zone, thermal_ops);
if (IS_ERR(tzd)) {
if (PTR_ERR(tzd) == -EPROBE_DEFER)
return -EPROBE_DEFER;
diff --git a/drivers/thermal/thermal-generic-adc.c b/drivers/thermal/thermal-generic-adc.c
index f4f1a04f8c0f..1717e4a19dcb 100644
--- a/drivers/thermal/thermal-generic-adc.c
+++ b/drivers/thermal/thermal-generic-adc.c
@@ -142,7 +142,6 @@ static int gadc_thermal_probe(struct platform_device *pdev)
return ret;
gti->dev = &pdev->dev;
- platform_set_drvdata(pdev, gti);
gti->tz_dev = devm_thermal_of_zone_register(&pdev->dev, 0, gti,
&gadc_thermal_ops);
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index a59700593d32..8717a3343512 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -1266,7 +1266,7 @@ thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *t
return ERR_PTR(-EINVAL);
}
- if (num_trips > 0 && (!ops->get_trip_type || !ops->get_trip_temp) && !trips)
+ if (num_trips > 0 && !trips)
return ERR_PTR(-EINVAL);
if (!thermal_class)
@@ -1389,16 +1389,16 @@ free_tz:
}
EXPORT_SYMBOL_GPL(thermal_zone_device_register_with_trips);
-struct thermal_zone_device *thermal_zone_device_register(const char *type, int ntrips, int mask,
- void *devdata, struct thermal_zone_device_ops *ops,
- const struct thermal_zone_params *tzp, int passive_delay,
- int polling_delay)
+struct thermal_zone_device *thermal_tripless_zone_device_register(
+ const char *type,
+ void *devdata,
+ struct thermal_zone_device_ops *ops,
+ const struct thermal_zone_params *tzp)
{
- return thermal_zone_device_register_with_trips(type, NULL, ntrips, mask,
- devdata, ops, tzp,
- passive_delay, polling_delay);
+ return thermal_zone_device_register_with_trips(type, NULL, 0, 0, devdata,
+ ops, tzp, 0, 0);
}
-EXPORT_SYMBOL_GPL(thermal_zone_device_register);
+EXPORT_SYMBOL_GPL(thermal_tripless_zone_device_register);
void *thermal_zone_device_priv(struct thermal_zone_device *tzd)
{
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index 04513f9fbfa1..de884bea28b6 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -70,7 +70,7 @@ static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev)
void thermal_cdev_update(struct thermal_cooling_device *);
void __thermal_cdev_update(struct thermal_cooling_device *cdev);
-int get_tz_trend(struct thermal_zone_device *tz, int trip);
+int get_tz_trend(struct thermal_zone_device *tz, int trip_index);
struct thermal_instance *
get_thermal_instance(struct thermal_zone_device *tz,
diff --git a/drivers/thermal/thermal_helpers.c b/drivers/thermal/thermal_helpers.c
index cfba0965a22d..4d66372c9629 100644
--- a/drivers/thermal/thermal_helpers.c
+++ b/drivers/thermal/thermal_helpers.c
@@ -22,8 +22,9 @@
#include "thermal_core.h"
#include "thermal_trace.h"
-int get_tz_trend(struct thermal_zone_device *tz, int trip)
+int get_tz_trend(struct thermal_zone_device *tz, int trip_index)
{
+ struct thermal_trip *trip = tz->trips ? &tz->trips[trip_index] : NULL;
enum thermal_trend trend;
if (tz->emul_temperature || !tz->ops->get_trend ||
diff --git a/drivers/thermal/thermal_trip.c b/drivers/thermal/thermal_trip.c
index 53115cfdfd42..024e2e365a26 100644
--- a/drivers/thermal/thermal_trip.c
+++ b/drivers/thermal/thermal_trip.c
@@ -101,29 +101,11 @@ void __thermal_zone_set_trips(struct thermal_zone_device *tz)
int __thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id,
struct thermal_trip *trip)
{
- int ret;
-
- if (!tz || trip_id < 0 || trip_id >= tz->num_trips || !trip)
+ if (!tz || !tz->trips || trip_id < 0 || trip_id >= tz->num_trips || !trip)
return -EINVAL;
- if (tz->trips) {
- *trip = tz->trips[trip_id];
- return 0;
- }
-
- if (tz->ops->get_trip_hyst) {
- ret = tz->ops->get_trip_hyst(tz, trip_id, &trip->hysteresis);
- if (ret)
- return ret;
- } else {
- trip->hysteresis = 0;
- }
-
- ret = tz->ops->get_trip_temp(tz, trip_id, &trip->temperature);
- if (ret)
- return ret;
-
- return tz->ops->get_trip_type(tz, trip_id, &trip->type);
+ *trip = tz->trips[trip_id];
+ return 0;
}
EXPORT_SYMBOL_GPL(__thermal_zone_get_trip);
diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
index a1c9a1530183..0c2eb9c6e58b 100644
--- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c
+++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
@@ -314,7 +314,7 @@ int ti_bandgap_adc_to_mcelsius(struct ti_bandgap *bgp, int adc_val, int *t)
*/
static inline int ti_bandgap_validate(struct ti_bandgap *bgp, int id)
{
- if (!bgp || IS_ERR(bgp)) {
+ if (IS_ERR_OR_NULL(bgp)) {
pr_err("%s: invalid bandgap pointer\n", __func__);
return -EINVAL;
}
diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
index d414a4b7a94a..6ba2613627e1 100644
--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
@@ -109,7 +109,8 @@ static inline int __ti_thermal_get_temp(struct thermal_zone_device *tz, int *tem
return ret;
}
-static int __ti_thermal_get_trend(struct thermal_zone_device *tz, int trip, enum thermal_trend *trend)
+static int __ti_thermal_get_trend(struct thermal_zone_device *tz,
+ struct thermal_trip *trip, enum thermal_trend *trend)
{
struct ti_thermal_data *data = thermal_zone_device_priv(tz);
struct ti_bandgap *bgp;
diff --git a/drivers/thunderbolt/acpi.c b/drivers/thunderbolt/acpi.c
index 38fefd0e5268..c9b6bb46111c 100644
--- a/drivers/thunderbolt/acpi.c
+++ b/drivers/thunderbolt/acpi.c
@@ -12,7 +12,7 @@
#include "tb.h"
static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data,
- void **return_value)
+ void **ret)
{
struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
struct fwnode_handle *fwnode;
@@ -84,6 +84,7 @@ static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data,
if (link) {
dev_dbg(&nhi->pdev->dev, "created link from %s\n",
dev_name(&pdev->dev));
+ *(bool *)ret = true;
} else {
dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
dev_name(&pdev->dev));
@@ -104,22 +105,29 @@ out_put:
* Goes over ACPI namespace finding tunneled ports that reference to
* @nhi ACPI node. For each reference a device link is added. The link
* is automatically removed by the driver core.
+ *
+ * Returns %true if at least one link was created.
*/
-void tb_acpi_add_links(struct tb_nhi *nhi)
+bool tb_acpi_add_links(struct tb_nhi *nhi)
{
acpi_status status;
+ bool ret = false;
if (!has_acpi_companion(&nhi->pdev->dev))
- return;
+ return false;
/*
* Find all devices that have usb4-host-controller interface
* property that references to this NHI.
*/
status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, 32,
- tb_acpi_add_link, NULL, nhi, NULL);
- if (ACPI_FAILURE(status))
+ tb_acpi_add_link, NULL, nhi, (void **)&ret);
+ if (ACPI_FAILURE(status)) {
dev_warn(&nhi->pdev->dev, "failed to enumerate tunneled ports\n");
+ return false;
+ }
+
+ return ret;
}
/**
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 7ea63bb31714..43171cc1cc2d 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -2188,46 +2188,47 @@ struct device_type tb_switch_type = {
static int tb_switch_get_generation(struct tb_switch *sw)
{
- switch (sw->config.device_id) {
- case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
- case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
- case PCI_DEVICE_ID_INTEL_LIGHT_PEAK:
- case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
- case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
- case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
- case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE:
- case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE:
- return 1;
-
- case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE:
- case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
- case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
- return 2;
-
- case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
- case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
- case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
- case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
- case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
- case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
- case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
- case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
- case PCI_DEVICE_ID_INTEL_ICL_NHI0:
- case PCI_DEVICE_ID_INTEL_ICL_NHI1:
- return 3;
+ if (tb_switch_is_usb4(sw))
+ return 4;
- default:
- if (tb_switch_is_usb4(sw))
- return 4;
+ if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
+ switch (sw->config.device_id) {
+ case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
+ case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
+ case PCI_DEVICE_ID_INTEL_LIGHT_PEAK:
+ case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
+ case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
+ case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
+ case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE:
+ return 1;
- /*
- * For unknown switches assume generation to be 1 to be
- * on the safe side.
- */
- tb_sw_warn(sw, "unsupported switch device id %#x\n",
- sw->config.device_id);
- return 1;
+ case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
+ return 2;
+
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
+ case PCI_DEVICE_ID_INTEL_ICL_NHI0:
+ case PCI_DEVICE_ID_INTEL_ICL_NHI1:
+ return 3;
+ }
}
+
+ /*
+ * For unknown switches assume generation to be 1 to be on the
+ * safe side.
+ */
+ tb_sw_warn(sw, "unsupported switch device id %#x\n",
+ sw->config.device_id);
+ return 1;
}
static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth)
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 3fb4553a6442..dd0a1ef8cf12 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -2368,12 +2368,13 @@ static const struct tb_cm_ops tb_cm_ops = {
* downstream ports and the NHI so that the device core will make sure
* NHI is resumed first before the rest.
*/
-static void tb_apple_add_links(struct tb_nhi *nhi)
+static bool tb_apple_add_links(struct tb_nhi *nhi)
{
struct pci_dev *upstream, *pdev;
+ bool ret;
if (!x86_apple_machine)
- return;
+ return false;
switch (nhi->pdev->device) {
case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
@@ -2382,26 +2383,27 @@ static void tb_apple_add_links(struct tb_nhi *nhi)
case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
break;
default:
- return;
+ return false;
}
upstream = pci_upstream_bridge(nhi->pdev);
while (upstream) {
if (!pci_is_pcie(upstream))
- return;
+ return false;
if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM)
break;
upstream = pci_upstream_bridge(upstream);
}
if (!upstream)
- return;
+ return false;
/*
* For each hotplug downstream port, create add device link
* back to NHI so that PCIe tunnels can be re-established after
* sleep.
*/
+ ret = false;
for_each_pci_bridge(pdev, upstream->subordinate) {
const struct device_link *link;
@@ -2417,11 +2419,14 @@ static void tb_apple_add_links(struct tb_nhi *nhi)
if (link) {
dev_dbg(&nhi->pdev->dev, "created link from %s\n",
dev_name(&pdev->dev));
+ ret = true;
} else {
dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
dev_name(&pdev->dev));
}
}
+
+ return ret;
}
struct tb *tb_probe(struct tb_nhi *nhi)
@@ -2448,8 +2453,13 @@ struct tb *tb_probe(struct tb_nhi *nhi)
tb_dbg(tb, "using software connection manager\n");
- tb_apple_add_links(nhi);
- tb_acpi_add_links(nhi);
+ /*
+ * Device links are needed to make sure we establish tunnels
+ * before the PCIe/USB stack is resumed so complain here if we
+ * found them missing.
+ */
+ if (!tb_apple_add_links(nhi) && !tb_acpi_add_links(nhi))
+ tb_warn(tb, "device links to tunneled native ports are missing!\n");
return tb;
}
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 57a9b272cb94..d2a55ad2fd3e 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -1333,7 +1333,7 @@ static inline bool usb4_port_device_is_offline(const struct usb4_port *usb4)
void tb_check_quirks(struct tb_switch *sw);
#ifdef CONFIG_ACPI
-void tb_acpi_add_links(struct tb_nhi *nhi);
+bool tb_acpi_add_links(struct tb_nhi *nhi);
bool tb_acpi_is_native(void);
bool tb_acpi_may_tunnel_usb3(void);
@@ -1346,7 +1346,7 @@ void tb_acpi_exit(void);
int tb_acpi_power_on_retimers(struct tb_port *port);
int tb_acpi_power_off_retimers(struct tb_port *port);
#else
-static inline void tb_acpi_add_links(struct tb_nhi *nhi) { }
+static inline bool tb_acpi_add_links(struct tb_nhi *nhi) { return false; }
static inline bool tb_acpi_is_native(void) { return true; }
static inline bool tb_acpi_may_tunnel_usb3(void) { return true; }
diff --git a/drivers/thunderbolt/tmu.c b/drivers/thunderbolt/tmu.c
index 0dfd1e083994..747f88703d5c 100644
--- a/drivers/thunderbolt/tmu.c
+++ b/drivers/thunderbolt/tmu.c
@@ -19,7 +19,7 @@ static const unsigned int tmu_rates[] = {
[TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI] = 16,
};
-const struct {
+static const struct {
unsigned int freq_meas_window;
unsigned int avg_const;
unsigned int delta_avg_const;
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index 069de553127c..5646dc6242cd 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -239,6 +239,7 @@ config MOXA_SMARTIO
config SYNCLINK_GT
tristate "SyncLink GT/AC support"
depends on SERIAL_NONSTANDARD && PCI
+ depends on BROKEN
help
Support for SyncLink GT and SyncLink AC families of
synchronous and asynchronous serial adapters
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index c06ad0a0744b..785558c65ae8 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -696,7 +696,7 @@ static void change_speed(struct tty_struct *tty, struct serial_state *info,
local_irq_restore(flags);
}
-static int rs_put_char(struct tty_struct *tty, unsigned char ch)
+static int rs_put_char(struct tty_struct *tty, u8 ch)
{
struct serial_state *info;
unsigned long flags;
@@ -741,7 +741,7 @@ static void rs_flush_chars(struct tty_struct *tty)
local_irq_restore(flags);
}
-static int rs_write(struct tty_struct * tty, const unsigned char *buf, int count)
+static ssize_t rs_write(struct tty_struct * tty, const u8 *buf, size_t count)
{
int c, ret = 0;
struct serial_state *info = tty->driver_data;
diff --git a/drivers/tty/ehv_bytechan.c b/drivers/tty/ehv_bytechan.c
index 8595483f4697..a067628e01c8 100644
--- a/drivers/tty/ehv_bytechan.c
+++ b/drivers/tty/ehv_bytechan.c
@@ -466,8 +466,8 @@ static irqreturn_t ehv_bc_tty_tx_isr(int irq, void *data)
* ehv_bc_tty_write_room() will never lie, so the tty layer will never send us
* too much data.
*/
-static int ehv_bc_tty_write(struct tty_struct *ttys, const unsigned char *s,
- int count)
+static ssize_t ehv_bc_tty_write(struct tty_struct *ttys, const u8 *s,
+ size_t count)
{
struct ehv_bc_data *bc = ttys->driver_data;
unsigned long flags;
diff --git a/drivers/tty/goldfish.c b/drivers/tty/goldfish.c
index d02de3f0326f..4591f940b7a1 100644
--- a/drivers/tty/goldfish.c
+++ b/drivers/tty/goldfish.c
@@ -125,8 +125,7 @@ static void goldfish_tty_rw(struct goldfish_tty *qtty,
}
}
-static void goldfish_tty_do_write(int line, const char *buf,
- unsigned int count)
+static void goldfish_tty_do_write(int line, const u8 *buf, unsigned int count)
{
struct goldfish_tty *qtty = &goldfish_ttys[line];
unsigned long address = (unsigned long)(void *)buf;
@@ -186,8 +185,8 @@ static void goldfish_tty_hangup(struct tty_struct *tty)
tty_port_hangup(tty->port);
}
-static int goldfish_tty_write(struct tty_struct *tty, const unsigned char *buf,
- int count)
+static ssize_t goldfish_tty_write(struct tty_struct *tty, const u8 *buf,
+ size_t count)
{
goldfish_tty_do_write(tty->index, buf, count);
return count;
diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
index 10c10cfdf92a..959fae54ca39 100644
--- a/drivers/tty/hvc/hvc_console.c
+++ b/drivers/tty/hvc/hvc_console.c
@@ -496,11 +496,11 @@ static int hvc_push(struct hvc_struct *hp)
return n;
}
-static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count)
+static ssize_t hvc_write(struct tty_struct *tty, const u8 *buf, size_t count)
{
struct hvc_struct *hp = tty->driver_data;
unsigned long flags;
- int rsize, written = 0;
+ size_t rsize, written = 0;
/* This write was probably executed during a tty close. */
if (!hp)
diff --git a/drivers/tty/hvc/hvc_opal.c b/drivers/tty/hvc/hvc_opal.c
index 794c7b18aa06..992e199e0ea8 100644
--- a/drivers/tty/hvc/hvc_opal.c
+++ b/drivers/tty/hvc/hvc_opal.c
@@ -14,7 +14,7 @@
#include <linux/console.h>
#include <linux/of.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/export.h>
#include <linux/interrupt.h>
diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
index 1de1a09bf82d..d29fdfe9d93d 100644
--- a/drivers/tty/hvc/hvcs.c
+++ b/drivers/tty/hvc/hvcs.c
@@ -1257,15 +1257,14 @@ static void hvcs_hangup(struct tty_struct * tty)
* tty_hangup will allow hvcs_write time to complete execution before it
* terminates our device.
*/
-static int hvcs_write(struct tty_struct *tty,
- const unsigned char *buf, int count)
+static ssize_t hvcs_write(struct tty_struct *tty, const u8 *buf, size_t count)
{
struct hvcs_struct *hvcsd = tty->driver_data;
unsigned int unit_address;
const unsigned char *charbuf;
unsigned long flags;
- int total_sent = 0;
- int tosend = 0;
+ size_t total_sent = 0;
+ size_t tosend = 0;
int result = 0;
/*
@@ -1300,7 +1299,8 @@ static int hvcs_write(struct tty_struct *tty,
unit_address = hvcsd->vdev->unit_address;
while (count > 0) {
- tosend = min(count, (HVCS_BUFF_LEN - hvcsd->chars_in_buffer));
+ tosend = min_t(size_t, count,
+ (HVCS_BUFF_LEN - hvcsd->chars_in_buffer));
/*
* No more space, this probably means that the last call to
* hvcs_write() didn't succeed and the buffer was filled up.
diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
index a200d01eceed..a94068bce76f 100644
--- a/drivers/tty/hvc/hvsi.c
+++ b/drivers/tty/hvc/hvsi.c
@@ -904,14 +904,13 @@ static unsigned int hvsi_chars_in_buffer(struct tty_struct *tty)
return hp->n_outbuf;
}
-static int hvsi_write(struct tty_struct *tty,
- const unsigned char *buf, int count)
+static ssize_t hvsi_write(struct tty_struct *tty, const u8 *source,
+ size_t count)
{
struct hvsi_struct *hp = tty->driver_data;
- const char *source = buf;
unsigned long flags;
- int total = 0;
- int origcount = count;
+ size_t total = 0;
+ size_t origcount = count;
spin_lock_irqsave(&hp->lock, flags);
@@ -929,7 +928,7 @@ static int hvsi_write(struct tty_struct *tty,
* will see there is no room in outbuf and return.
*/
while ((count > 0) && (hvsi_write_room(tty) > 0)) {
- int chunksize = min_t(int, count, hvsi_write_room(tty));
+ size_t chunksize = min_t(size_t, count, hvsi_write_room(tty));
BUG_ON(hp->n_outbuf < 0);
memcpy(hp->outbuf + hp->n_outbuf, source, chunksize);
@@ -953,8 +952,8 @@ out:
spin_unlock_irqrestore(&hp->lock, flags);
if (total != origcount)
- pr_debug("%s: wanted %i, only wrote %i\n", __func__, origcount,
- total);
+ pr_debug("%s: wanted %zu, only wrote %zu\n", __func__,
+ origcount, total);
return total;
}
diff --git a/drivers/tty/ipwireless/hardware.c b/drivers/tty/ipwireless/hardware.c
index f5d3e68f5750..001ec318a918 100644
--- a/drivers/tty/ipwireless/hardware.c
+++ b/drivers/tty/ipwireless/hardware.c
@@ -1292,7 +1292,7 @@ static void *alloc_ctrl_packet(int header_size,
}
int ipwireless_send_packet(struct ipw_hardware *hw, unsigned int channel_idx,
- const unsigned char *data, unsigned int length,
+ const u8 *data, unsigned int length,
void (*callback) (void *cb, unsigned int length),
void *callback_data)
{
diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
index 9edd5ae17580..b6de40815fb9 100644
--- a/drivers/tty/ipwireless/tty.c
+++ b/drivers/tty/ipwireless/tty.c
@@ -186,8 +186,8 @@ static void ipw_write_packet_sent_callback(void *callback_data,
tty->tx_bytes_queued -= packet_length;
}
-static int ipw_write(struct tty_struct *linux_tty,
- const unsigned char *buf, int count)
+static ssize_t ipw_write(struct tty_struct *linux_tty, const u8 *buf,
+ size_t count)
{
struct ipw_tty *tty = linux_tty->driver_data;
int room, ret;
diff --git a/drivers/tty/mips_ejtag_fdc.c b/drivers/tty/mips_ejtag_fdc.c
index e81701a66429..369ec71c24ef 100644
--- a/drivers/tty/mips_ejtag_fdc.c
+++ b/drivers/tty/mips_ejtag_fdc.c
@@ -796,8 +796,8 @@ static void mips_ejtag_fdc_tty_hangup(struct tty_struct *tty)
tty_port_hangup(tty->port);
}
-static int mips_ejtag_fdc_tty_write(struct tty_struct *tty,
- const unsigned char *buf, int total)
+static ssize_t mips_ejtag_fdc_tty_write(struct tty_struct *tty, const u8 *buf,
+ size_t total)
{
int count, block;
struct mips_ejtag_fdc_tty_port *dport = tty->driver_data;
@@ -816,7 +816,7 @@ static int mips_ejtag_fdc_tty_write(struct tty_struct *tty,
*/
spin_lock(&dport->xmit_lock);
/* Work out how many bytes we can write to the xmit buffer */
- total = min(total, (int)(priv->xmit_size - dport->xmit_cnt));
+ total = min_t(size_t, total, priv->xmit_size - dport->xmit_cnt);
atomic_add(total, &priv->xmit_total);
dport->xmit_cnt += total;
/* Write the actual bytes (may need splitting if it wraps) */
diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
index 42fa4c878b2e..bf3f87ba3a92 100644
--- a/drivers/tty/moxa.c
+++ b/drivers/tty/moxa.c
@@ -487,7 +487,7 @@ module_param(ttymajor, int, 0);
*/
static int moxa_open(struct tty_struct *, struct file *);
static void moxa_close(struct tty_struct *, struct file *);
-static int moxa_write(struct tty_struct *, const unsigned char *, int);
+static ssize_t moxa_write(struct tty_struct *, const u8 *, size_t);
static unsigned int moxa_write_room(struct tty_struct *);
static void moxa_flush_buffer(struct tty_struct *);
static unsigned int moxa_chars_in_buffer(struct tty_struct *);
@@ -1499,8 +1499,7 @@ static void moxa_close(struct tty_struct *tty, struct file *filp)
tty_port_close(&ch->port, tty, filp);
}
-static int moxa_write(struct tty_struct *tty,
- const unsigned char *buf, int count)
+static ssize_t moxa_write(struct tty_struct *tty, const u8 *buf, size_t count)
{
struct moxa_port *ch = tty->driver_data;
unsigned long flags;
@@ -2164,8 +2163,7 @@ static int MoxaPortLineStatus(struct moxa_port *port)
return val;
}
-static int MoxaPortWriteData(struct tty_struct *tty,
- const unsigned char *buffer, int len)
+static int MoxaPortWriteData(struct tty_struct *tty, const u8 *buffer, int len)
{
struct moxa_port *port = tty->driver_data;
void __iomem *baseAddr, *ofsAddr, *ofs;
diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
index 10855e66fda1..10aa4ed38793 100644
--- a/drivers/tty/mxser.c
+++ b/drivers/tty/mxser.c
@@ -901,7 +901,7 @@ static void mxser_close(struct tty_struct *tty, struct file *filp)
tty_port_close(tty->port, tty, filp);
}
-static int mxser_write(struct tty_struct *tty, const unsigned char *buf, int count)
+static ssize_t mxser_write(struct tty_struct *tty, const u8 *buf, size_t count)
{
struct mxser_port *info = tty->driver_data;
unsigned long flags;
@@ -920,7 +920,7 @@ static int mxser_write(struct tty_struct *tty, const unsigned char *buf, int cou
return written;
}
-static int mxser_put_char(struct tty_struct *tty, unsigned char ch)
+static int mxser_put_char(struct tty_struct *tty, u8 ch)
{
struct mxser_port *info = tty->driver_data;
unsigned long flags;
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 739f522cb893..b3550ff9c494 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -339,6 +339,7 @@ struct gsm_mux {
unsigned long bad_fcs;
unsigned long malformed;
unsigned long io_error;
+ unsigned long open_error;
unsigned long bad_size;
unsigned long unsupported;
};
@@ -1450,15 +1451,16 @@ static int gsm_control_command(struct gsm_mux *gsm, int cmd, const u8 *data,
int dlen)
{
struct gsm_msg *msg;
+ struct gsm_dlci *dlci = gsm->dlci[0];
- msg = gsm_data_alloc(gsm, 0, dlen + 2, gsm->dlci[0]->ftype);
+ msg = gsm_data_alloc(gsm, 0, dlen + 2, dlci->ftype);
if (msg == NULL)
return -ENOMEM;
msg->data[0] = (cmd << 1) | CR | EA; /* Set C/R */
msg->data[1] = (dlen << 1) | EA;
memcpy(msg->data + 2, data, dlen);
- gsm_data_queue(gsm->dlci[0], msg);
+ gsm_data_queue(dlci, msg);
return 0;
}
@@ -1477,14 +1479,15 @@ static void gsm_control_reply(struct gsm_mux *gsm, int cmd, const u8 *data,
int dlen)
{
struct gsm_msg *msg;
+ struct gsm_dlci *dlci = gsm->dlci[0];
- msg = gsm_data_alloc(gsm, 0, dlen + 2, gsm->dlci[0]->ftype);
+ msg = gsm_data_alloc(gsm, 0, dlen + 2, dlci->ftype);
if (msg == NULL)
return;
msg->data[0] = (cmd & 0xFE) << 1 | EA; /* Clear C/R */
msg->data[1] = (dlen << 1) | EA;
memcpy(msg->data + 2, data, dlen);
- gsm_data_queue(gsm->dlci[0], msg);
+ gsm_data_queue(dlci, msg);
}
/**
@@ -1589,6 +1592,7 @@ static int gsm_process_negotiation(struct gsm_mux *gsm, unsigned int addr,
if (debug & DBG_ERRORS)
pr_info("%s unsupported I frame request in PN\n",
__func__);
+ gsm->unsupported++;
return -EINVAL;
default:
if (debug & DBG_ERRORS)
@@ -1730,25 +1734,32 @@ static void gsm_control_negotiation(struct gsm_mux *gsm, unsigned int cr,
struct gsm_dlci *dlci;
struct gsm_dlci_param_bits *params;
- if (dlen < sizeof(struct gsm_dlci_param_bits))
+ if (dlen < sizeof(struct gsm_dlci_param_bits)) {
+ gsm->open_error++;
return;
+ }
/* Invalid DLCI? */
params = (struct gsm_dlci_param_bits *)data;
addr = FIELD_GET(PN_D_FIELD_DLCI, params->d_bits);
- if (addr == 0 || addr >= NUM_DLCI || !gsm->dlci[addr])
+ if (addr == 0 || addr >= NUM_DLCI || !gsm->dlci[addr]) {
+ gsm->open_error++;
return;
+ }
dlci = gsm->dlci[addr];
/* Too late for parameter negotiation? */
- if ((!cr && dlci->state == DLCI_OPENING) || dlci->state == DLCI_OPEN)
+ if ((!cr && dlci->state == DLCI_OPENING) || dlci->state == DLCI_OPEN) {
+ gsm->open_error++;
return;
+ }
/* Process the received parameters */
if (gsm_process_negotiation(gsm, addr, cr, params) != 0) {
/* Negotiation failed. Close the link. */
if (debug & DBG_ERRORS)
pr_info("%s PN failed\n", __func__);
+ gsm->open_error++;
gsm_dlci_close(dlci);
return;
}
@@ -1768,6 +1779,7 @@ static void gsm_control_negotiation(struct gsm_mux *gsm, unsigned int cr,
} else {
if (debug & DBG_ERRORS)
pr_info("%s PN in invalid state\n", __func__);
+ gsm->open_error++;
}
}
@@ -1888,6 +1900,8 @@ static void gsm_control_message(struct gsm_mux *gsm, unsigned int command,
/* Optional unsupported commands */
case CMD_RPN: /* Remote port negotiation */
case CMD_SNC: /* Service negotiation command */
+ gsm->unsupported++;
+ fallthrough;
default:
/* Reply to bad commands with an NSC */
buf[0] = command;
@@ -2221,6 +2235,7 @@ static void gsm_dlci_t1(struct timer_list *t)
dlci->retries--;
mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
} else {
+ gsm->open_error++;
gsm_dlci_begin_close(dlci); /* prevent half open link */
}
break;
@@ -2236,6 +2251,7 @@ static void gsm_dlci_t1(struct timer_list *t)
dlci->mode = DLCI_MODE_ADM;
gsm_dlci_open(dlci);
} else {
+ gsm->open_error++;
gsm_dlci_begin_close(dlci); /* prevent half open link */
}
@@ -2444,8 +2460,10 @@ static void gsm_dlci_command(struct gsm_dlci *dlci, const u8 *data, int len)
data += dlen;
/* Malformed command? */
- if (clen > len)
+ if (clen > len) {
+ dlci->gsm->malformed++;
return;
+ }
if (command & 1)
gsm_control_message(dlci->gsm, command, data, clen);
@@ -2532,6 +2550,8 @@ static int gsm_dlci_config(struct gsm_dlci *dlci, struct gsm_dlci_config *dc, in
return -EINVAL;
if (dc->k > 7)
return -EINVAL;
+ if (dc->flags & ~GSM_FL_RESTART) /* allow future extensions */
+ return -EINVAL;
/*
* See what is needed for reconfiguration
@@ -2546,6 +2566,8 @@ static int gsm_dlci_config(struct gsm_dlci *dlci, struct gsm_dlci_config *dc, in
/* Requires care */
if (dc->priority != dlci->prio)
need_restart = true;
+ if (dc->flags & GSM_FL_RESTART)
+ need_restart = true;
if ((open && gsm->wait_config) || need_restart)
need_open = true;
@@ -2753,12 +2775,16 @@ static void gsm_queue(struct gsm_mux *gsm)
switch (gsm->control) {
case SABM|PF:
- if (cr == 1)
+ if (cr == 1) {
+ gsm->open_error++;
goto invalid;
+ }
if (dlci == NULL)
dlci = gsm_dlci_alloc(gsm, address);
- if (dlci == NULL)
+ if (dlci == NULL) {
+ gsm->open_error++;
return;
+ }
if (dlci->dead)
gsm_response(gsm, address, DM|PF);
else {
@@ -3276,7 +3302,6 @@ static void gsm_copy_config_values(struct gsm_mux *gsm,
static int gsm_config(struct gsm_mux *gsm, struct gsm_config *c)
{
- int ret = 0;
int need_close = 0;
int need_restart = 0;
@@ -3355,7 +3380,7 @@ static int gsm_config(struct gsm_mux *gsm, struct gsm_config *c)
* and removing from the mux array
*/
if (gsm->dead) {
- ret = gsm_activate_mux(gsm);
+ int ret = gsm_activate_mux(gsm);
if (ret)
return ret;
if (gsm->initiator)
@@ -3374,6 +3399,7 @@ static void gsm_copy_config_ext_values(struct gsm_mux *gsm,
static int gsm_config_ext(struct gsm_mux *gsm, struct gsm_config_ext *ce)
{
+ bool need_restart = false;
unsigned int i;
/*
@@ -3383,6 +3409,20 @@ static int gsm_config_ext(struct gsm_mux *gsm, struct gsm_config_ext *ce)
for (i = 0; i < ARRAY_SIZE(ce->reserved); i++)
if (ce->reserved[i])
return -EINVAL;
+ if (ce->flags & ~GSM_FL_RESTART)
+ return -EINVAL;
+
+ /* Requires care */
+ if (ce->flags & GSM_FL_RESTART)
+ need_restart = true;
+
+ /*
+ * Close down what is needed, restart and initiate the new
+ * configuration. On the first time there is no DLCI[0]
+ * and closing or cleaning up is not necessary.
+ */
+ if (need_restart)
+ gsm_cleanup_mux(gsm, true);
/*
* Setup the new configuration values
@@ -3390,6 +3430,14 @@ static int gsm_config_ext(struct gsm_mux *gsm, struct gsm_config_ext *ce)
gsm->wait_config = ce->wait_config ? true : false;
gsm->keep_alive = ce->keep_alive;
+ if (gsm->dead) {
+ int ret = gsm_activate_mux(gsm);
+ if (ret)
+ return ret;
+ if (gsm->initiator)
+ gsm_dlci_begin_open(gsm->dlci[0]);
+ }
+
return 0;
}
@@ -3490,8 +3538,8 @@ static void gsmld_detach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
gsm->tty = NULL;
}
-static void gsmld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
- const char *fp, int count)
+static void gsmld_receive_buf(struct tty_struct *tty, const u8 *cp,
+ const u8 *fp, size_t count)
{
struct gsm_mux *gsm = tty->disc_data;
char flags = TTY_NORMAL;
@@ -3577,6 +3625,9 @@ static int gsmld_open(struct tty_struct *tty)
{
struct gsm_mux *gsm;
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
if (tty->ops->write == NULL)
return -EINVAL;
@@ -3636,9 +3687,8 @@ static void gsmld_write_wakeup(struct tty_struct *tty)
* This code must be sure never to sleep through a hangup.
*/
-static ssize_t gsmld_read(struct tty_struct *tty, struct file *file,
- unsigned char *buf, size_t nr,
- void **cookie, unsigned long offset)
+static ssize_t gsmld_read(struct tty_struct *tty, struct file *file, u8 *buf,
+ size_t nr, void **cookie, unsigned long offset)
{
return -EOPNOTSUPP;
}
@@ -3658,7 +3708,7 @@ static ssize_t gsmld_read(struct tty_struct *tty, struct file *file,
*/
static ssize_t gsmld_write(struct tty_struct *tty, struct file *file,
- const unsigned char *buf, size_t nr)
+ const u8 *buf, size_t nr)
{
struct gsm_mux *gsm = tty->disc_data;
unsigned long flags;
@@ -4254,8 +4304,7 @@ static void gsmtty_hangup(struct tty_struct *tty)
gsm_dlci_begin_close(dlci);
}
-static int gsmtty_write(struct tty_struct *tty, const unsigned char *buf,
- int len)
+static ssize_t gsmtty_write(struct tty_struct *tty, const u8 *buf, size_t len)
{
int sent;
struct gsm_dlci *dlci = tty->driver_data;
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index 46b09bfb6f3a..a670419efe79 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -369,13 +369,13 @@ static void n_hdlc_tty_wakeup(struct tty_struct *tty)
* Called by tty low level driver when receive data is available. Data is
* interpreted as one HDLC frame.
*/
-static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *data,
- const char *flags, int count)
+static void n_hdlc_tty_receive(struct tty_struct *tty, const u8 *data,
+ const u8 *flags, size_t count)
{
register struct n_hdlc *n_hdlc = tty->disc_data;
register struct n_hdlc_buf *buf;
- pr_debug("%s() called count=%d\n", __func__, count);
+ pr_debug("%s() called count=%zu\n", __func__, count);
if (count > maxframe) {
pr_debug("rx count>maxframesize, data discarded\n");
@@ -425,8 +425,8 @@ static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *data,
* Returns the number of bytes returned or error code.
*/
static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
- __u8 *kbuf, size_t nr,
- void **cookie, unsigned long offset)
+ u8 *kbuf, size_t nr, void **cookie,
+ unsigned long offset)
{
struct n_hdlc *n_hdlc = tty->disc_data;
int ret = 0;
@@ -518,7 +518,7 @@ done_with_rbuf:
* Returns the number of bytes written (or error code).
*/
static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file,
- const unsigned char *data, size_t count)
+ const u8 *data, size_t count)
{
struct n_hdlc *n_hdlc = tty->disc_data;
int error = 0;
diff --git a/drivers/tty/n_null.c b/drivers/tty/n_null.c
index f913b665af72..5a429d923eb3 100644
--- a/drivers/tty/n_null.c
+++ b/drivers/tty/n_null.c
@@ -10,43 +10,24 @@
* Copyright (C) Intel 2017
*/
-static int n_null_open(struct tty_struct *tty)
-{
- return 0;
-}
-
-static void n_null_close(struct tty_struct *tty)
-{
-}
-
-static ssize_t n_null_read(struct tty_struct *tty, struct file *file,
- unsigned char *buf, size_t nr,
- void **cookie, unsigned long offset)
+static ssize_t n_null_read(struct tty_struct *tty, struct file *file, u8 *buf,
+ size_t nr, void **cookie, unsigned long offset)
{
return -EOPNOTSUPP;
}
static ssize_t n_null_write(struct tty_struct *tty, struct file *file,
- const unsigned char *buf, size_t nr)
+ const u8 *buf, size_t nr)
{
return -EOPNOTSUPP;
}
-static void n_null_receivebuf(struct tty_struct *tty,
- const unsigned char *cp, const char *fp,
- int cnt)
-{
-}
-
static struct tty_ldisc_ops null_ldisc = {
.owner = THIS_MODULE,
.num = N_NULL,
.name = "n_null",
- .open = n_null_open,
- .close = n_null_close,
.read = n_null_read,
.write = n_null_write,
- .receive_buf = n_null_receivebuf
};
static int __init n_null_init(void)
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 552e8a741562..6c9a408d67cd 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -99,7 +99,7 @@ struct n_tty_data {
/* private to n_tty_receive_overrun (single-threaded) */
unsigned long overrun_time;
- int num_overrun;
+ unsigned int num_overrun;
/* non-atomic */
bool no_room;
@@ -109,9 +109,9 @@ struct n_tty_data {
unsigned char push:1;
/* shared by producer and consumer */
- char read_buf[N_TTY_BUF_SIZE];
+ u8 read_buf[N_TTY_BUF_SIZE];
DECLARE_BITMAP(read_flags, N_TTY_BUF_SIZE);
- unsigned char echo_buf[N_TTY_BUF_SIZE];
+ u8 echo_buf[N_TTY_BUF_SIZE];
/* consumer-published */
size_t read_tail;
@@ -136,38 +136,36 @@ static inline size_t read_cnt(struct n_tty_data *ldata)
return ldata->read_head - ldata->read_tail;
}
-static inline unsigned char read_buf(struct n_tty_data *ldata, size_t i)
+static inline u8 read_buf(struct n_tty_data *ldata, size_t i)
{
- return ldata->read_buf[i & (N_TTY_BUF_SIZE - 1)];
+ return ldata->read_buf[MASK(i)];
}
-static inline unsigned char *read_buf_addr(struct n_tty_data *ldata, size_t i)
+static inline u8 *read_buf_addr(struct n_tty_data *ldata, size_t i)
{
- return &ldata->read_buf[i & (N_TTY_BUF_SIZE - 1)];
+ return &ldata->read_buf[MASK(i)];
}
-static inline unsigned char echo_buf(struct n_tty_data *ldata, size_t i)
+static inline u8 echo_buf(struct n_tty_data *ldata, size_t i)
{
smp_rmb(); /* Matches smp_wmb() in add_echo_byte(). */
- return ldata->echo_buf[i & (N_TTY_BUF_SIZE - 1)];
+ return ldata->echo_buf[MASK(i)];
}
-static inline unsigned char *echo_buf_addr(struct n_tty_data *ldata, size_t i)
+static inline u8 *echo_buf_addr(struct n_tty_data *ldata, size_t i)
{
- return &ldata->echo_buf[i & (N_TTY_BUF_SIZE - 1)];
+ return &ldata->echo_buf[MASK(i)];
}
/* If we are not echoing the data, perhaps this is a secret so erase it */
-static void zero_buffer(struct tty_struct *tty, u8 *buffer, int size)
+static void zero_buffer(const struct tty_struct *tty, u8 *buffer, size_t size)
{
- bool icanon = !!L_ICANON(tty);
- bool no_echo = !L_ECHO(tty);
-
- if (icanon && no_echo)
- memset(buffer, 0x00, size);
+ if (L_ICANON(tty) && !L_ECHO(tty))
+ memset(buffer, 0, size);
}
-static void tty_copy(struct tty_struct *tty, void *to, size_t tail, size_t n)
+static void tty_copy(const struct tty_struct *tty, void *to, size_t tail,
+ size_t n)
{
struct n_tty_data *ldata = tty->disc_data;
size_t size = N_TTY_BUF_SIZE - tail;
@@ -198,7 +196,7 @@ static void tty_copy(struct tty_struct *tty, void *to, size_t tail, size_t n)
* * n_tty_read()/consumer path:
* holds non-exclusive %termios_rwsem
*/
-static void n_tty_kick_worker(struct tty_struct *tty)
+static void n_tty_kick_worker(const struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
@@ -218,16 +216,12 @@ static void n_tty_kick_worker(struct tty_struct *tty)
}
}
-static ssize_t chars_in_buffer(struct tty_struct *tty)
+static ssize_t chars_in_buffer(const struct tty_struct *tty)
{
- struct n_tty_data *ldata = tty->disc_data;
- ssize_t n = 0;
+ const struct n_tty_data *ldata = tty->disc_data;
+ size_t head = ldata->icanon ? ldata->canon_head : ldata->commit_head;
- if (!ldata->icanon)
- n = ldata->commit_head - ldata->read_tail;
- else
- n = ldata->canon_head - ldata->read_tail;
- return n;
+ return head - ldata->read_tail;
}
/**
@@ -309,7 +303,7 @@ static void n_tty_check_unthrottle(struct tty_struct *tty)
* * n_tty_receive_buf()/producer path:
* caller holds non-exclusive %termios_rwsem
*/
-static inline void put_tty_queue(unsigned char c, struct n_tty_data *ldata)
+static inline void put_tty_queue(u8 c, struct n_tty_data *ldata)
{
*read_buf_addr(ldata, ldata->read_head) = c;
ldata->read_head++;
@@ -383,7 +377,7 @@ static void n_tty_flush_buffer(struct tty_struct *tty)
* character. We use this to correctly compute the on-screen size of the
* character when printing.
*/
-static inline int is_utf8_continuation(unsigned char c)
+static inline int is_utf8_continuation(u8 c)
{
return (c & 0xc0) == 0x80;
}
@@ -396,7 +390,7 @@ static inline int is_utf8_continuation(unsigned char c)
* Returns: true if the utf8 character @c is a multibyte continuation character
* and the terminal is in unicode mode.
*/
-static inline int is_continuation(unsigned char c, struct tty_struct *tty)
+static inline int is_continuation(u8 c, const struct tty_struct *tty)
{
return I_IUTF8(tty) && is_utf8_continuation(c);
}
@@ -420,7 +414,7 @@ static inline int is_continuation(unsigned char c, struct tty_struct *tty)
* Locking: should be called under the %output_lock to protect the column state
* and space left in the buffer.
*/
-static int do_output_char(unsigned char c, struct tty_struct *tty, int space)
+static int do_output_char(u8 c, struct tty_struct *tty, int space)
{
struct n_tty_data *ldata = tty->disc_data;
int spaces;
@@ -494,7 +488,7 @@ static int do_output_char(unsigned char c, struct tty_struct *tty, int space)
* Locking: %output_lock to protect column state and space left (also, this is
*called from n_tty_write() under the tty layer write lock).
*/
-static int process_output(unsigned char c, struct tty_struct *tty)
+static int process_output(u8 c, struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
int space, retval;
@@ -530,12 +524,12 @@ static int process_output(unsigned char c, struct tty_struct *tty)
* called from n_tty_write() under the tty layer write lock).
*/
static ssize_t process_output_block(struct tty_struct *tty,
- const unsigned char *buf, unsigned int nr)
+ const u8 *buf, unsigned int nr)
{
struct n_tty_data *ldata = tty->disc_data;
int space;
int i;
- const unsigned char *cp;
+ const u8 *cp;
mutex_lock(&ldata->output_lock);
@@ -548,7 +542,7 @@ static ssize_t process_output_block(struct tty_struct *tty,
nr = space;
for (i = 0, cp = buf; i < nr; i++, cp++) {
- unsigned char c = *cp;
+ u8 c = *cp;
switch (c) {
case '\n':
@@ -588,6 +582,100 @@ break_out:
return i;
}
+static int n_tty_process_echo_ops(struct tty_struct *tty, size_t *tail,
+ int space)
+{
+ struct n_tty_data *ldata = tty->disc_data;
+ u8 op;
+
+ /*
+ * Since add_echo_byte() is called without holding output_lock, we
+ * might see only portion of multi-byte operation.
+ */
+ if (MASK(ldata->echo_commit) == MASK(*tail + 1))
+ return -ENODATA;
+
+ /*
+ * If the buffer byte is the start of a multi-byte operation, get the
+ * next byte, which is either the op code or a control character value.
+ */
+ op = echo_buf(ldata, *tail + 1);
+
+ switch (op) {
+ case ECHO_OP_ERASE_TAB: {
+ unsigned int num_chars, num_bs;
+
+ if (MASK(ldata->echo_commit) == MASK(*tail + 2))
+ return -ENODATA;
+
+ num_chars = echo_buf(ldata, *tail + 2);
+
+ /*
+ * Determine how many columns to go back in order to erase the
+ * tab. This depends on the number of columns used by other
+ * characters within the tab area. If this (modulo 8) count is
+ * from the start of input rather than from a previous tab, we
+ * offset by canon column. Otherwise, tab spacing is normal.
+ */
+ if (!(num_chars & 0x80))
+ num_chars += ldata->canon_column;
+ num_bs = 8 - (num_chars & 7);
+
+ if (num_bs > space)
+ return -ENOSPC;
+
+ space -= num_bs;
+ while (num_bs--) {
+ tty_put_char(tty, '\b');
+ if (ldata->column > 0)
+ ldata->column--;
+ }
+ *tail += 3;
+ break;
+ }
+ case ECHO_OP_SET_CANON_COL:
+ ldata->canon_column = ldata->column;
+ *tail += 2;
+ break;
+
+ case ECHO_OP_MOVE_BACK_COL:
+ if (ldata->column > 0)
+ ldata->column--;
+ *tail += 2;
+ break;
+
+ case ECHO_OP_START:
+ /* This is an escaped echo op start code */
+ if (!space)
+ return -ENOSPC;
+
+ tty_put_char(tty, ECHO_OP_START);
+ ldata->column++;
+ space--;
+ *tail += 2;
+ break;
+
+ default:
+ /*
+ * If the op is not a special byte code, it is a ctrl char
+ * tagged to be echoed as "^X" (where X is the letter
+ * representing the control char). Note that we must ensure
+ * there is enough space for the whole ctrl pair.
+ */
+ if (space < 2)
+ return -ENOSPC;
+
+ tty_put_char(tty, '^');
+ tty_put_char(tty, op ^ 0100);
+ ldata->column += 2;
+ space -= 2;
+ *tail += 2;
+ break;
+ }
+
+ return space;
+}
+
/**
* __process_echoes - write pending echo characters
* @tty: terminal device
@@ -615,7 +703,7 @@ static size_t __process_echoes(struct tty_struct *tty)
struct n_tty_data *ldata = tty->disc_data;
int space, old_space;
size_t tail;
- unsigned char c;
+ u8 c;
old_space = space = tty_write_room(tty);
@@ -623,104 +711,12 @@ static size_t __process_echoes(struct tty_struct *tty)
while (MASK(ldata->echo_commit) != MASK(tail)) {
c = echo_buf(ldata, tail);
if (c == ECHO_OP_START) {
- unsigned char op;
- bool space_left = true;
-
- /*
- * Since add_echo_byte() is called without holding
- * output_lock, we might see only portion of multi-byte
- * operation.
- */
- if (MASK(ldata->echo_commit) == MASK(tail + 1))
+ int ret = n_tty_process_echo_ops(tty, &tail, space);
+ if (ret == -ENODATA)
goto not_yet_stored;
- /*
- * If the buffer byte is the start of a multi-byte
- * operation, get the next byte, which is either the
- * op code or a control character value.
- */
- op = echo_buf(ldata, tail + 1);
-
- switch (op) {
- case ECHO_OP_ERASE_TAB: {
- unsigned int num_chars, num_bs;
-
- if (MASK(ldata->echo_commit) == MASK(tail + 2))
- goto not_yet_stored;
- num_chars = echo_buf(ldata, tail + 2);
-
- /*
- * Determine how many columns to go back
- * in order to erase the tab.
- * This depends on the number of columns
- * used by other characters within the tab
- * area. If this (modulo 8) count is from
- * the start of input rather than from a
- * previous tab, we offset by canon column.
- * Otherwise, tab spacing is normal.
- */
- if (!(num_chars & 0x80))
- num_chars += ldata->canon_column;
- num_bs = 8 - (num_chars & 7);
-
- if (num_bs > space) {
- space_left = false;
- break;
- }
- space -= num_bs;
- while (num_bs--) {
- tty_put_char(tty, '\b');
- if (ldata->column > 0)
- ldata->column--;
- }
- tail += 3;
- break;
- }
- case ECHO_OP_SET_CANON_COL:
- ldata->canon_column = ldata->column;
- tail += 2;
- break;
-
- case ECHO_OP_MOVE_BACK_COL:
- if (ldata->column > 0)
- ldata->column--;
- tail += 2;
- break;
-
- case ECHO_OP_START:
- /* This is an escaped echo op start code */
- if (!space) {
- space_left = false;
- break;
- }
- tty_put_char(tty, ECHO_OP_START);
- ldata->column++;
- space--;
- tail += 2;
- break;
-
- default:
- /*
- * If the op is not a special byte code,
- * it is a ctrl char tagged to be echoed
- * as "^X" (where X is the letter
- * representing the control char).
- * Note that we must ensure there is
- * enough space for the whole ctrl pair.
- *
- */
- if (space < 2) {
- space_left = false;
- break;
- }
- tty_put_char(tty, '^');
- tty_put_char(tty, op ^ 0100);
- ldata->column += 2;
- space -= 2;
- tail += 2;
- }
-
- if (!space_left)
+ if (ret < 0)
break;
+ space = ret;
} else {
if (O_OPOST(tty)) {
int retval = do_output_char(c, tty, space);
@@ -824,7 +820,7 @@ static void flush_echoes(struct tty_struct *tty)
*
* Add a character or operation byte to the echo buffer.
*/
-static inline void add_echo_byte(unsigned char c, struct n_tty_data *ldata)
+static inline void add_echo_byte(u8 c, struct n_tty_data *ldata)
{
*echo_buf_addr(ldata, ldata->echo_head) = c;
smp_wmb(); /* Matches smp_rmb() in echo_buf(). */
@@ -895,7 +891,7 @@ static void echo_erase_tab(unsigned int num_chars, int after_tab,
*
* This variant does not treat control characters specially.
*/
-static void echo_char_raw(unsigned char c, struct n_tty_data *ldata)
+static void echo_char_raw(u8 c, struct n_tty_data *ldata)
{
if (c == ECHO_OP_START) {
add_echo_byte(ECHO_OP_START, ldata);
@@ -916,7 +912,7 @@ static void echo_char_raw(unsigned char c, struct n_tty_data *ldata)
* This variant tags control characters to be echoed as "^X" (where X is the
* letter representing the control char).
*/
-static void echo_char(unsigned char c, struct tty_struct *tty)
+static void echo_char(u8 c, const struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
@@ -954,7 +950,7 @@ static inline void finish_erasing(struct n_tty_data *ldata)
* Locking: n_tty_receive_buf()/producer path:
* caller holds non-exclusive %termios_rwsem
*/
-static void eraser(unsigned char c, struct tty_struct *tty)
+static void eraser(u8 c, const struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
enum { ERASE, WERASE, KILL } kill_type;
@@ -1170,14 +1166,13 @@ static void n_tty_receive_break(struct tty_struct *tty)
* Called from the receive_buf path so single threaded. Does not need locking
* as num_overrun and overrun_time are function private.
*/
-static void n_tty_receive_overrun(struct tty_struct *tty)
+static void n_tty_receive_overrun(const struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
ldata->num_overrun++;
- if (time_after(jiffies, ldata->overrun_time + HZ) ||
- time_after(ldata->overrun_time, jiffies)) {
- tty_warn(tty, "%d input overrun(s)\n", ldata->num_overrun);
+ if (time_is_before_jiffies(ldata->overrun_time + HZ)) {
+ tty_warn(tty, "%u input overrun(s)\n", ldata->num_overrun);
ldata->overrun_time = jiffies;
ldata->num_overrun = 0;
}
@@ -1194,7 +1189,8 @@ static void n_tty_receive_overrun(struct tty_struct *tty)
* Locking: n_tty_receive_buf()/producer path:
* caller holds non-exclusive %termios_rwsem
*/
-static void n_tty_receive_parity_error(struct tty_struct *tty, unsigned char c)
+static void n_tty_receive_parity_error(const struct tty_struct *tty,
+ u8 c)
{
struct n_tty_data *ldata = tty->disc_data;
@@ -1212,7 +1208,7 @@ static void n_tty_receive_parity_error(struct tty_struct *tty, unsigned char c)
}
static void
-n_tty_receive_signal_char(struct tty_struct *tty, int signal, unsigned char c)
+n_tty_receive_signal_char(struct tty_struct *tty, int signal, u8 c)
{
isig(signal, tty);
if (I_IXON(tty))
@@ -1224,7 +1220,7 @@ n_tty_receive_signal_char(struct tty_struct *tty, int signal, unsigned char c)
process_echoes(tty);
}
-static bool n_tty_is_char_flow_ctrl(struct tty_struct *tty, unsigned char c)
+static bool n_tty_is_char_flow_ctrl(struct tty_struct *tty, u8 c)
{
return c == START_CHAR(tty) || c == STOP_CHAR(tty);
}
@@ -1244,7 +1240,7 @@ static bool n_tty_is_char_flow_ctrl(struct tty_struct *tty, unsigned char c)
* Returns true if @c is consumed as flow-control character, the character
* must not be treated as normal character.
*/
-static bool n_tty_receive_char_flow_ctrl(struct tty_struct *tty, unsigned char c,
+static bool n_tty_receive_char_flow_ctrl(struct tty_struct *tty, u8 c,
bool lookahead_done)
{
if (!n_tty_is_char_flow_ctrl(tty, c))
@@ -1264,7 +1260,103 @@ static bool n_tty_receive_char_flow_ctrl(struct tty_struct *tty, unsigned char c
return true;
}
-static void n_tty_receive_char_special(struct tty_struct *tty, unsigned char c,
+static void n_tty_receive_handle_newline(struct tty_struct *tty, u8 c)
+{
+ struct n_tty_data *ldata = tty->disc_data;
+
+ set_bit(MASK(ldata->read_head), ldata->read_flags);
+ put_tty_queue(c, ldata);
+ smp_store_release(&ldata->canon_head, ldata->read_head);
+ kill_fasync(&tty->fasync, SIGIO, POLL_IN);
+ wake_up_interruptible_poll(&tty->read_wait, EPOLLIN | EPOLLRDNORM);
+}
+
+static bool n_tty_receive_char_canon(struct tty_struct *tty, u8 c)
+{
+ struct n_tty_data *ldata = tty->disc_data;
+
+ if (c == ERASE_CHAR(tty) || c == KILL_CHAR(tty) ||
+ (c == WERASE_CHAR(tty) && L_IEXTEN(tty))) {
+ eraser(c, tty);
+ commit_echoes(tty);
+
+ return true;
+ }
+
+ if (c == LNEXT_CHAR(tty) && L_IEXTEN(tty)) {
+ ldata->lnext = 1;
+ if (L_ECHO(tty)) {
+ finish_erasing(ldata);
+ if (L_ECHOCTL(tty)) {
+ echo_char_raw('^', ldata);
+ echo_char_raw('\b', ldata);
+ commit_echoes(tty);
+ }
+ }
+
+ return true;
+ }
+
+ if (c == REPRINT_CHAR(tty) && L_ECHO(tty) && L_IEXTEN(tty)) {
+ size_t tail = ldata->canon_head;
+
+ finish_erasing(ldata);
+ echo_char(c, tty);
+ echo_char_raw('\n', ldata);
+ while (MASK(tail) != MASK(ldata->read_head)) {
+ echo_char(read_buf(ldata, tail), tty);
+ tail++;
+ }
+ commit_echoes(tty);
+
+ return true;
+ }
+
+ if (c == '\n') {
+ if (L_ECHO(tty) || L_ECHONL(tty)) {
+ echo_char_raw('\n', ldata);
+ commit_echoes(tty);
+ }
+ n_tty_receive_handle_newline(tty, c);
+
+ return true;
+ }
+
+ if (c == EOF_CHAR(tty)) {
+ c = __DISABLED_CHAR;
+ n_tty_receive_handle_newline(tty, c);
+
+ return true;
+ }
+
+ if ((c == EOL_CHAR(tty)) ||
+ (c == EOL2_CHAR(tty) && L_IEXTEN(tty))) {
+ /*
+ * XXX are EOL_CHAR and EOL2_CHAR echoed?!?
+ */
+ if (L_ECHO(tty)) {
+ /* Record the column of first canon char. */
+ if (ldata->canon_head == ldata->read_head)
+ echo_set_canon_col(ldata);
+ echo_char(c, tty);
+ commit_echoes(tty);
+ }
+ /*
+ * XXX does PARMRK doubling happen for
+ * EOL_CHAR and EOL2_CHAR?
+ */
+ if (c == '\377' && I_PARMRK(tty))
+ put_tty_queue(c, ldata);
+
+ n_tty_receive_handle_newline(tty, c);
+
+ return true;
+ }
+
+ return false;
+}
+
+static void n_tty_receive_char_special(struct tty_struct *tty, u8 c,
bool lookahead_done)
{
struct n_tty_data *ldata = tty->disc_data;
@@ -1298,77 +1390,8 @@ static void n_tty_receive_char_special(struct tty_struct *tty, unsigned char c,
} else if (c == '\n' && I_INLCR(tty))
c = '\r';
- if (ldata->icanon) {
- if (c == ERASE_CHAR(tty) || c == KILL_CHAR(tty) ||
- (c == WERASE_CHAR(tty) && L_IEXTEN(tty))) {
- eraser(c, tty);
- commit_echoes(tty);
- return;
- }
- if (c == LNEXT_CHAR(tty) && L_IEXTEN(tty)) {
- ldata->lnext = 1;
- if (L_ECHO(tty)) {
- finish_erasing(ldata);
- if (L_ECHOCTL(tty)) {
- echo_char_raw('^', ldata);
- echo_char_raw('\b', ldata);
- commit_echoes(tty);
- }
- }
- return;
- }
- if (c == REPRINT_CHAR(tty) && L_ECHO(tty) && L_IEXTEN(tty)) {
- size_t tail = ldata->canon_head;
-
- finish_erasing(ldata);
- echo_char(c, tty);
- echo_char_raw('\n', ldata);
- while (MASK(tail) != MASK(ldata->read_head)) {
- echo_char(read_buf(ldata, tail), tty);
- tail++;
- }
- commit_echoes(tty);
- return;
- }
- if (c == '\n') {
- if (L_ECHO(tty) || L_ECHONL(tty)) {
- echo_char_raw('\n', ldata);
- commit_echoes(tty);
- }
- goto handle_newline;
- }
- if (c == EOF_CHAR(tty)) {
- c = __DISABLED_CHAR;
- goto handle_newline;
- }
- if ((c == EOL_CHAR(tty)) ||
- (c == EOL2_CHAR(tty) && L_IEXTEN(tty))) {
- /*
- * XXX are EOL_CHAR and EOL2_CHAR echoed?!?
- */
- if (L_ECHO(tty)) {
- /* Record the column of first canon char. */
- if (ldata->canon_head == ldata->read_head)
- echo_set_canon_col(ldata);
- echo_char(c, tty);
- commit_echoes(tty);
- }
- /*
- * XXX does PARMRK doubling happen for
- * EOL_CHAR and EOL2_CHAR?
- */
- if (c == (unsigned char) '\377' && I_PARMRK(tty))
- put_tty_queue(c, ldata);
-
-handle_newline:
- set_bit(ldata->read_head & (N_TTY_BUF_SIZE - 1), ldata->read_flags);
- put_tty_queue(c, ldata);
- smp_store_release(&ldata->canon_head, ldata->read_head);
- kill_fasync(&tty->fasync, SIGIO, POLL_IN);
- wake_up_interruptible_poll(&tty->read_wait, EPOLLIN | EPOLLRDNORM);
- return;
- }
- }
+ if (ldata->icanon && n_tty_receive_char_canon(tty, c))
+ return;
if (L_ECHO(tty)) {
finish_erasing(ldata);
@@ -1384,7 +1407,7 @@ handle_newline:
}
/* PARMRK doubling check */
- if (c == (unsigned char) '\377' && I_PARMRK(tty))
+ if (c == '\377' && I_PARMRK(tty))
put_tty_queue(c, ldata);
put_tty_queue(c, ldata);
@@ -1402,7 +1425,7 @@ handle_newline:
* caller holds non-exclusive %termios_rwsem
* publishes canon_head if canonical mode is active
*/
-static void n_tty_receive_char(struct tty_struct *tty, unsigned char c)
+static void n_tty_receive_char(struct tty_struct *tty, u8 c)
{
struct n_tty_data *ldata = tty->disc_data;
@@ -1419,12 +1442,12 @@ static void n_tty_receive_char(struct tty_struct *tty, unsigned char c)
commit_echoes(tty);
}
/* PARMRK doubling check */
- if (c == (unsigned char) '\377' && I_PARMRK(tty))
+ if (c == '\377' && I_PARMRK(tty))
put_tty_queue(c, ldata);
put_tty_queue(c, ldata);
}
-static void n_tty_receive_char_closing(struct tty_struct *tty, unsigned char c,
+static void n_tty_receive_char_closing(struct tty_struct *tty, u8 c,
bool lookahead_done)
{
if (I_ISTRIP(tty))
@@ -1444,7 +1467,7 @@ static void n_tty_receive_char_closing(struct tty_struct *tty, unsigned char c,
}
static void
-n_tty_receive_char_flagged(struct tty_struct *tty, unsigned char c, char flag)
+n_tty_receive_char_flagged(struct tty_struct *tty, u8 c, u8 flag)
{
switch (flag) {
case TTY_BREAK:
@@ -1458,13 +1481,13 @@ n_tty_receive_char_flagged(struct tty_struct *tty, unsigned char c, char flag)
n_tty_receive_overrun(tty);
break;
default:
- tty_err(tty, "unknown flag %d\n", flag);
+ tty_err(tty, "unknown flag %u\n", flag);
break;
}
}
static void
-n_tty_receive_char_lnext(struct tty_struct *tty, unsigned char c, char flag)
+n_tty_receive_char_lnext(struct tty_struct *tty, u8 c, u8 flag)
{
struct n_tty_data *ldata = tty->disc_data;
@@ -1480,11 +1503,11 @@ n_tty_receive_char_lnext(struct tty_struct *tty, unsigned char c, char flag)
}
/* Caller must ensure count > 0 */
-static void n_tty_lookahead_flow_ctrl(struct tty_struct *tty, const unsigned char *cp,
- const unsigned char *fp, unsigned int count)
+static void n_tty_lookahead_flow_ctrl(struct tty_struct *tty, const u8 *cp,
+ const u8 *fp, size_t count)
{
struct n_tty_data *ldata = tty->disc_data;
- unsigned char flag = TTY_NORMAL;
+ u8 flag = TTY_NORMAL;
ldata->lookahead_count += count;
@@ -1501,31 +1524,30 @@ static void n_tty_lookahead_flow_ctrl(struct tty_struct *tty, const unsigned cha
}
static void
-n_tty_receive_buf_real_raw(struct tty_struct *tty, const unsigned char *cp,
- const char *fp, int count)
+n_tty_receive_buf_real_raw(const struct tty_struct *tty, const u8 *cp,
+ size_t count)
{
struct n_tty_data *ldata = tty->disc_data;
- size_t n, head;
- head = ldata->read_head & (N_TTY_BUF_SIZE - 1);
- n = min_t(size_t, count, N_TTY_BUF_SIZE - head);
- memcpy(read_buf_addr(ldata, head), cp, n);
- ldata->read_head += n;
- cp += n;
- count -= n;
+ /* handle buffer wrap-around by a loop */
+ for (unsigned int i = 0; i < 2; i++) {
+ size_t head = MASK(ldata->read_head);
+ size_t n = min(count, N_TTY_BUF_SIZE - head);
+
+ memcpy(read_buf_addr(ldata, head), cp, n);
- head = ldata->read_head & (N_TTY_BUF_SIZE - 1);
- n = min_t(size_t, count, N_TTY_BUF_SIZE - head);
- memcpy(read_buf_addr(ldata, head), cp, n);
- ldata->read_head += n;
+ ldata->read_head += n;
+ cp += n;
+ count -= n;
+ }
}
static void
-n_tty_receive_buf_raw(struct tty_struct *tty, const unsigned char *cp,
- const char *fp, int count)
+n_tty_receive_buf_raw(struct tty_struct *tty, const u8 *cp, const u8 *fp,
+ size_t count)
{
struct n_tty_data *ldata = tty->disc_data;
- char flag = TTY_NORMAL;
+ u8 flag = TTY_NORMAL;
while (count--) {
if (fp)
@@ -1538,10 +1560,10 @@ n_tty_receive_buf_raw(struct tty_struct *tty, const unsigned char *cp,
}
static void
-n_tty_receive_buf_closing(struct tty_struct *tty, const unsigned char *cp,
- const char *fp, int count, bool lookahead_done)
+n_tty_receive_buf_closing(struct tty_struct *tty, const u8 *cp, const u8 *fp,
+ size_t count, bool lookahead_done)
{
- char flag = TTY_NORMAL;
+ u8 flag = TTY_NORMAL;
while (count--) {
if (fp)
@@ -1551,14 +1573,15 @@ n_tty_receive_buf_closing(struct tty_struct *tty, const unsigned char *cp,
}
}
-static void n_tty_receive_buf_standard(struct tty_struct *tty,
- const unsigned char *cp, const char *fp, int count, bool lookahead_done)
+static void n_tty_receive_buf_standard(struct tty_struct *tty, const u8 *cp,
+ const u8 *fp, size_t count,
+ bool lookahead_done)
{
struct n_tty_data *ldata = tty->disc_data;
- char flag = TTY_NORMAL;
+ u8 flag = TTY_NORMAL;
while (count--) {
- unsigned char c = *cp++;
+ u8 c = *cp++;
if (fp)
flag = *fp++;
@@ -1589,15 +1612,15 @@ static void n_tty_receive_buf_standard(struct tty_struct *tty,
}
}
-static void __receive_buf(struct tty_struct *tty, const unsigned char *cp,
- const char *fp, int count)
+static void __receive_buf(struct tty_struct *tty, const u8 *cp, const u8 *fp,
+ size_t count)
{
struct n_tty_data *ldata = tty->disc_data;
bool preops = I_ISTRIP(tty) || (I_IUCLC(tty) && L_IEXTEN(tty));
- size_t la_count = min_t(size_t, ldata->lookahead_count, count);
+ size_t la_count = min(ldata->lookahead_count, count);
if (ldata->real_raw)
- n_tty_receive_buf_real_raw(tty, cp, fp, count);
+ n_tty_receive_buf_real_raw(tty, cp, count);
else if (ldata->raw || (L_EXTPROC(tty) && !preops))
n_tty_receive_buf_raw(tty, cp, fp, count);
else if (tty->closing && !L_EXTPROC(tty)) {
@@ -1663,12 +1686,13 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp,
* claims non-exclusive %termios_rwsem
* publishes commit_head or canon_head
*/
-static int
-n_tty_receive_buf_common(struct tty_struct *tty, const unsigned char *cp,
- const char *fp, int count, int flow)
+static size_t
+n_tty_receive_buf_common(struct tty_struct *tty, const u8 *cp, const u8 *fp,
+ size_t count, bool flow)
{
struct n_tty_data *ldata = tty->disc_data;
- int room, n, rcvd = 0, overflow;
+ size_t n, rcvd = 0;
+ int room, overflow;
down_read(&tty->termios_rwsem);
@@ -1701,7 +1725,7 @@ n_tty_receive_buf_common(struct tty_struct *tty, const unsigned char *cp,
} else
overflow = 0;
- n = min(count, room);
+ n = min_t(size_t, count, room);
if (!n)
break;
@@ -1744,16 +1768,16 @@ n_tty_receive_buf_common(struct tty_struct *tty, const unsigned char *cp,
return rcvd;
}
-static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
- const char *fp, int count)
+static void n_tty_receive_buf(struct tty_struct *tty, const u8 *cp,
+ const u8 *fp, size_t count)
{
- n_tty_receive_buf_common(tty, cp, fp, count, 0);
+ n_tty_receive_buf_common(tty, cp, fp, count, false);
}
-static int n_tty_receive_buf2(struct tty_struct *tty, const unsigned char *cp,
- const char *fp, int count)
+static size_t n_tty_receive_buf2(struct tty_struct *tty, const u8 *cp,
+ const u8 *fp, size_t count)
{
- return n_tty_receive_buf_common(tty, cp, fp, count, 1);
+ return n_tty_receive_buf_common(tty, cp, fp, count, true);
}
/**
@@ -1779,8 +1803,7 @@ static void n_tty_set_termios(struct tty_struct *tty, const struct ktermios *old
ldata->canon_head = ldata->read_tail;
ldata->push = 0;
} else {
- set_bit((ldata->read_head - 1) & (N_TTY_BUF_SIZE - 1),
- ldata->read_flags);
+ set_bit(MASK(ldata->read_head - 1), ldata->read_flags);
ldata->canon_head = ldata->read_head;
ldata->push = 1;
}
@@ -1903,9 +1926,9 @@ static int n_tty_open(struct tty_struct *tty)
return 0;
}
-static inline int input_available_p(struct tty_struct *tty, int poll)
+static inline int input_available_p(const struct tty_struct *tty, int poll)
{
- struct n_tty_data *ldata = tty->disc_data;
+ const struct n_tty_data *ldata = tty->disc_data;
int amt = poll && !TIME_CHAR(tty) && MIN_CHAR(tty) ? MIN_CHAR(tty) : 1;
if (ldata->icanon && !L_EXTPROC(tty))
@@ -1932,21 +1955,20 @@ static inline int input_available_p(struct tty_struct *tty, int poll)
* caller holds non-exclusive %termios_rwsem;
* read_tail published
*/
-static bool copy_from_read_buf(struct tty_struct *tty,
- unsigned char **kbp,
- size_t *nr)
+static bool copy_from_read_buf(const struct tty_struct *tty, u8 **kbp,
+ size_t *nr)
{
struct n_tty_data *ldata = tty->disc_data;
size_t n;
bool is_eof;
size_t head = smp_load_acquire(&ldata->commit_head);
- size_t tail = ldata->read_tail & (N_TTY_BUF_SIZE - 1);
+ size_t tail = MASK(ldata->read_tail);
n = min(head - ldata->read_tail, N_TTY_BUF_SIZE - tail);
n = min(*nr, n);
if (n) {
- unsigned char *from = read_buf_addr(ldata, tail);
+ u8 *from = read_buf_addr(ldata, tail);
memcpy(*kbp, from, n);
is_eof = n == 1 && *from == EOF_CHAR(tty);
tty_audit_add_data(tty, from, n);
@@ -1987,8 +2009,7 @@ static bool copy_from_read_buf(struct tty_struct *tty,
* caller holds non-exclusive %termios_rwsem;
* read_tail published
*/
-static bool canon_copy_from_read_buf(struct tty_struct *tty,
- unsigned char **kbp,
+static bool canon_copy_from_read_buf(const struct tty_struct *tty, u8 **kbp,
size_t *nr)
{
struct n_tty_data *ldata = tty->disc_data;
@@ -2004,7 +2025,7 @@ static bool canon_copy_from_read_buf(struct tty_struct *tty,
canon_head = smp_load_acquire(&ldata->canon_head);
n = min(*nr, canon_head - ldata->read_tail);
- tail = ldata->read_tail & (N_TTY_BUF_SIZE - 1);
+ tail = MASK(ldata->read_tail);
size = min_t(size_t, tail + n, N_TTY_BUF_SIZE);
n_tty_trace("%s: nr:%zu tail:%zu n:%zu size:%zu\n",
@@ -2056,9 +2077,8 @@ static bool canon_copy_from_read_buf(struct tty_struct *tty,
* EOF (special EOL character that's a __DISABLED_CHAR)
* in the stream, silently eat the EOF.
*/
-static void canon_skip_eof(struct tty_struct *tty)
+static void canon_skip_eof(struct n_tty_data *ldata)
{
- struct n_tty_data *ldata = tty->disc_data;
size_t tail, canon_head;
canon_head = smp_load_acquire(&ldata->canon_head);
@@ -2128,12 +2148,11 @@ static int job_control(struct tty_struct *tty, struct file *file)
* claims non-exclusive termios_rwsem;
* publishes read_tail
*/
-static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
- unsigned char *kbuf, size_t nr,
- void **cookie, unsigned long offset)
+static ssize_t n_tty_read(struct tty_struct *tty, struct file *file, u8 *kbuf,
+ size_t nr, void **cookie, unsigned long offset)
{
struct n_tty_data *ldata = tty->disc_data;
- unsigned char *kb = kbuf;
+ u8 *kb = kbuf;
DEFINE_WAIT_FUNC(wait, woken_wake_function);
int c;
int minimum, time;
@@ -2156,7 +2175,7 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
* releasing the lock and returning done.
*/
if (!nr)
- canon_skip_eof(tty);
+ canon_skip_eof(ldata);
else if (canon_copy_from_read_buf(tty, &kb, &nr))
return kb - kbuf;
} else {
@@ -2209,7 +2228,7 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
while (nr) {
/* First test for status change. */
if (packet && tty->link->ctrl.pktstatus) {
- unsigned char cs;
+ u8 cs;
if (kb != kbuf)
break;
spin_lock_irq(&tty->link->ctrl.lock);
@@ -2332,12 +2351,11 @@ more_to_be_read:
*/
static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
- const unsigned char *buf, size_t nr)
+ const u8 *buf, size_t nr)
{
- const unsigned char *b = buf;
+ const u8 *b = buf;
DEFINE_WAIT_FUNC(wait, woken_wake_function);
- int c;
- ssize_t retval = 0;
+ ssize_t num, retval = 0;
/* Job control check -- must be done at start (POSIX.1 7.1.1.4). */
if (L_TOSTOP(tty) && file->f_op->write_iter != redirected_tty_write) {
@@ -2363,7 +2381,7 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
}
if (O_OPOST(tty)) {
while (nr > 0) {
- ssize_t num = process_output_block(tty, b, nr);
+ num = process_output_block(tty, b, nr);
if (num < 0) {
if (num == -EAGAIN)
break;
@@ -2374,8 +2392,7 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
nr -= num;
if (nr == 0)
break;
- c = *b;
- if (process_output(c, tty) < 0)
+ if (process_output(*b, tty) < 0)
break;
b++; nr--;
}
@@ -2386,16 +2403,16 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
while (nr > 0) {
mutex_lock(&ldata->output_lock);
- c = tty->ops->write(tty, b, nr);
+ num = tty->ops->write(tty, b, nr);
mutex_unlock(&ldata->output_lock);
- if (c < 0) {
- retval = c;
+ if (num < 0) {
+ retval = num;
goto break_out;
}
- if (!c)
+ if (!num)
break;
- b += c;
- nr -= c;
+ b += num;
+ nr -= num;
}
}
if (!nr)
@@ -2470,7 +2487,7 @@ static unsigned long inq_canon(struct n_tty_data *ldata)
nr = head - tail;
/* Skip EOF-chars.. */
while (MASK(head) != MASK(tail)) {
- if (test_bit(tail & (N_TTY_BUF_SIZE - 1), ldata->read_flags) &&
+ if (test_bit(MASK(tail), ldata->read_flags) &&
read_buf(ldata, tail) == __DISABLED_CHAR)
nr--;
tail++;
diff --git a/drivers/tty/nozomi.c b/drivers/tty/nozomi.c
index 0454c78deee6..02cd40147b3a 100644
--- a/drivers/tty/nozomi.c
+++ b/drivers/tty/nozomi.c
@@ -1599,8 +1599,8 @@ static void ntty_hangup(struct tty_struct *tty)
* called when the userspace process writes to the tty (/dev/noz*).
* Data is inserted into a fifo, which is then read and transferred to the modem.
*/
-static int ntty_write(struct tty_struct *tty, const unsigned char *buffer,
- int count)
+static ssize_t ntty_write(struct tty_struct *tty, const u8 *buffer,
+ size_t count)
{
int rval = -EINVAL;
struct nozomi *dc = get_dc_by_tty(tty);
@@ -1610,7 +1610,7 @@ static int ntty_write(struct tty_struct *tty, const unsigned char *buffer,
if (!dc || !port)
return -ENODEV;
- rval = kfifo_in(&port->fifo_ul, (unsigned char *)buffer, count);
+ rval = kfifo_in(&port->fifo_ul, buffer, count);
spin_lock_irqsave(&dc->spin_mutex, flags);
/* CTS is only valid on the modem channel */
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 2b1c8ab99dba..df08f13052ff 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -108,7 +108,7 @@ static void pty_unthrottle(struct tty_struct *tty)
* the other side of the pty/tty pair.
*/
-static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c)
+static ssize_t pty_write(struct tty_struct *tty, const u8 *buf, size_t c)
{
struct tty_struct *to = tty->link;
diff --git a/drivers/tty/rpmsg_tty.c b/drivers/tty/rpmsg_tty.c
index 29db413bbc03..60a2915f5cfe 100644
--- a/drivers/tty/rpmsg_tty.c
+++ b/drivers/tty/rpmsg_tty.c
@@ -73,7 +73,8 @@ static void rpmsg_tty_close(struct tty_struct *tty, struct file *filp)
return tty_port_close(tty->port, tty, filp);
}
-static int rpmsg_tty_write(struct tty_struct *tty, const u8 *buf, int len)
+static ssize_t rpmsg_tty_write(struct tty_struct *tty, const u8 *buf,
+ size_t len)
{
struct rpmsg_tty_port *cport = tty->driver_data;
struct rpmsg_device *rpdev;
@@ -86,7 +87,7 @@ static int rpmsg_tty_write(struct tty_struct *tty, const u8 *buf, int len)
if (msg_max_size < 0)
return msg_max_size;
- msg_size = min(len, msg_max_size);
+ msg_size = min_t(unsigned int, len, msg_max_size);
/*
* Use rpmsg_trysend instead of rpmsg_send to send the message so the caller is not
diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c
index 8033ef19669c..e3856814ce77 100644
--- a/drivers/tty/serdev/serdev-ttyport.c
+++ b/drivers/tty/serdev/serdev-ttyport.c
@@ -22,8 +22,8 @@ struct serport {
* Callback functions from the tty port.
*/
-static int ttyport_receive_buf(struct tty_port *port, const unsigned char *cp,
- const unsigned char *fp, size_t count)
+static size_t ttyport_receive_buf(struct tty_port *port, const u8 *cp,
+ const u8 *fp, size_t count)
{
struct serdev_controller *ctrl = port->client_data;
struct serport *serport = serdev_controller_get_drvdata(ctrl);
diff --git a/drivers/tty/serial/21285.c b/drivers/tty/serial/21285.c
index 185462fd959c..d756fcc884cb 100644
--- a/drivers/tty/serial/21285.c
+++ b/drivers/tty/serial/21285.c
@@ -117,7 +117,8 @@ static void serial21285_stop_rx(struct uart_port *port)
static irqreturn_t serial21285_rx_chars(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
- unsigned int status, ch, flag, rxs, max_count = 256;
+ unsigned int status, rxs, max_count = 256;
+ u8 ch, flag;
status = *CSR_UARTFLG;
while (!(status & 0x10) && max_count--) {
diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c
index d4b05d7ad9e8..aa5aff046756 100644
--- a/drivers/tty/serial/8250/8250_bcm7271.c
+++ b/drivers/tty/serial/8250/8250_bcm7271.c
@@ -1042,7 +1042,7 @@ static int brcmuart_probe(struct platform_device *pdev)
dev_dbg(dev, "DMA is %senabled\n", priv->dma_enabled ? "" : "not ");
memset(&up, 0, sizeof(up));
- up.port.type = PORT_16550A;
+ up.port.type = PORT_BCM7271;
up.port.uartclk = clk_rate;
up.port.dev = dev;
up.port.mapbase = mapbase;
@@ -1056,8 +1056,6 @@ static int brcmuart_probe(struct platform_device *pdev)
| UPF_FIXED_PORT | UPF_FIXED_TYPE;
up.port.dev = dev;
up.port.private_data = priv;
- up.capabilities = UART_CAP_FIFO | UART_CAP_AFE;
- up.port.fifosize = 32;
/* Check for a fixed line number */
ret = of_alias_get_id(np, "serial");
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 7db51781289e..f4cafca1a7da 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -523,7 +523,10 @@ static int dw8250_probe(struct platform_device *pdev)
if (!regs)
return dev_err_probe(dev, -EINVAL, "no registers defined\n");
- irq = platform_get_irq(pdev, 0);
+ irq = platform_get_irq_optional(pdev, 0);
+ /* no interrupt -> fall back to polling */
+ if (irq == -ENXIO)
+ irq = 0;
if (irq < 0)
return irq;
diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c
index 4299a8bd83d9..9837a27739fd 100644
--- a/drivers/tty/serial/8250/8250_early.c
+++ b/drivers/tty/serial/8250/8250_early.c
@@ -27,7 +27,6 @@
#include <linux/init.h>
#include <linux/console.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/serial_reg.h>
#include <linux/serial.h>
#include <linux/serial_8250.h>
diff --git a/drivers/tty/serial/8250/8250_ingenic.c b/drivers/tty/serial/8250/8250_ingenic.c
index 617b8ce60d6b..4c4c4da73ad0 100644
--- a/drivers/tty/serial/8250/8250_ingenic.c
+++ b/drivers/tty/serial/8250/8250_ingenic.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/serial_8250.h>
#include <linux/serial_core.h>
diff --git a/drivers/tty/serial/8250/8250_men_mcb.c b/drivers/tty/serial/8250/8250_men_mcb.c
index f46ca13ff4aa..dc9e093b1cb3 100644
--- a/drivers/tty/serial/8250/8250_men_mcb.c
+++ b/drivers/tty/serial/8250/8250_men_mcb.c
@@ -12,11 +12,42 @@
#define MEN_UART_ID_Z057 0x39
#define MEN_UART_ID_Z125 0x7d
-#define MEN_UART_MEM_SIZE 0x10
+/*
+ * IP Cores Z025 and Z057 can have up to 4 UART
+ * The UARTs available are stored in a global
+ * register saved in physical address + 0x40
+ * Is saved as follows:
+ *
+ * 7 0
+ * +------+-------+-------+-------+-------+-------+-------+-------+
+ * |UART4 | UART3 | UART2 | UART1 | U4irq | U3irq | U2irq | U1irq |
+ * +------+-------+-------+-------+-------+-------+-------+-------+
+ */
+#define MEN_UART1_MASK 0x01
+#define MEN_UART2_MASK 0x02
+#define MEN_UART3_MASK 0x04
+#define MEN_UART4_MASK 0x08
+
+#define MEN_Z125_UARTS_AVAILABLE 0x01
+
+#define MEN_Z025_MAX_UARTS 4
+#define MEN_UART_MEM_SIZE 0x10
+#define MEM_UART_REGISTER_SIZE 0x01
+#define MEN_Z025_REGISTER_OFFSET 0x40
+
+#define MEN_UART1_OFFSET 0
+#define MEN_UART2_OFFSET (MEN_UART1_OFFSET + MEN_UART_MEM_SIZE)
+#define MEN_UART3_OFFSET (MEN_UART2_OFFSET + MEN_UART_MEM_SIZE)
+#define MEN_UART4_OFFSET (MEN_UART3_OFFSET + MEN_UART_MEM_SIZE)
+
+#define MEN_READ_REGISTER(addr) readb(addr)
+
+#define MAX_PORTS 4
struct serial_8250_men_mcb_data {
- struct uart_8250_port uart;
- int line;
+ int num_ports;
+ int line[MAX_PORTS];
+ unsigned int offset[MAX_PORTS];
};
/*
@@ -37,10 +68,10 @@ static u32 men_lookup_uartclk(struct mcb_device *mdev)
clkval = 1041666;
else if (strncmp(mdev->bus->name, "F216", 4) == 0)
clkval = 1843200;
- else if (strncmp(mdev->bus->name, "G215", 4) == 0)
- clkval = 1843200;
else if (strncmp(mdev->bus->name, "F210", 4) == 0)
clkval = 115200;
+ else if (strstr(mdev->bus->name, "215"))
+ clkval = 1843200;
else
dev_info(&mdev->dev,
"board not detected, using default uartclk\n");
@@ -50,16 +81,98 @@ static u32 men_lookup_uartclk(struct mcb_device *mdev)
return clkval;
}
-static int get_num_ports(struct mcb_device *mdev,
- void __iomem *membase)
+static int read_uarts_available_from_register(struct resource *mem_res,
+ u8 *uarts_available)
+{
+ void __iomem *mem;
+ int reg_value;
+
+ if (!request_mem_region(mem_res->start + MEN_Z025_REGISTER_OFFSET,
+ MEM_UART_REGISTER_SIZE, KBUILD_MODNAME)) {
+ return -EBUSY;
+ }
+
+ mem = ioremap(mem_res->start + MEN_Z025_REGISTER_OFFSET,
+ MEM_UART_REGISTER_SIZE);
+ if (!mem) {
+ release_mem_region(mem_res->start + MEN_Z025_REGISTER_OFFSET,
+ MEM_UART_REGISTER_SIZE);
+ return -ENOMEM;
+ }
+
+ reg_value = MEN_READ_REGISTER(mem);
+
+ iounmap(mem);
+
+ release_mem_region(mem_res->start + MEN_Z025_REGISTER_OFFSET,
+ MEM_UART_REGISTER_SIZE);
+
+ *uarts_available = reg_value >> 4;
+
+ return 0;
+}
+
+static int read_serial_data(struct mcb_device *mdev,
+ struct resource *mem_res,
+ struct serial_8250_men_mcb_data *serial_data)
+{
+ u8 uarts_available;
+ int count = 0;
+ int mask;
+ int res;
+ int i;
+
+ res = read_uarts_available_from_register(mem_res, &uarts_available);
+ if (res < 0)
+ return res;
+
+ for (i = 0; i < MAX_PORTS; i++) {
+ mask = 0x1 << i;
+ switch (uarts_available & mask) {
+ case MEN_UART1_MASK:
+ serial_data->offset[count] = MEN_UART1_OFFSET;
+ count++;
+ break;
+ case MEN_UART2_MASK:
+ serial_data->offset[count] = MEN_UART2_OFFSET;
+ count++;
+ break;
+ case MEN_UART3_MASK:
+ serial_data->offset[count] = MEN_UART3_OFFSET;
+ count++;
+ break;
+ case MEN_UART4_MASK:
+ serial_data->offset[count] = MEN_UART4_OFFSET;
+ count++;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ if (count <= 0 || count > MAX_PORTS) {
+ dev_err(&mdev->dev, "unexpected number of ports: %u\n",
+ count);
+ return -ENODEV;
+ }
+
+ serial_data->num_ports = count;
+
+ return 0;
+}
+
+static int init_serial_data(struct mcb_device *mdev,
+ struct resource *mem_res,
+ struct serial_8250_men_mcb_data *serial_data)
{
switch (mdev->id) {
case MEN_UART_ID_Z125:
- return 1U;
+ serial_data->num_ports = 1;
+ serial_data->offset[0] = 0;
+ return 0;
case MEN_UART_ID_Z025:
- return readb(membase) >> 4;
case MEN_UART_ID_Z057:
- return 4U;
+ return read_serial_data(mdev, mem_res, serial_data);
default:
dev_err(&mdev->dev, "no supported device!\n");
return -ENODEV;
@@ -69,62 +182,54 @@ static int get_num_ports(struct mcb_device *mdev,
static int serial_8250_men_mcb_probe(struct mcb_device *mdev,
const struct mcb_device_id *id)
{
+ struct uart_8250_port uart;
struct serial_8250_men_mcb_data *data;
struct resource *mem;
- int num_ports;
int i;
- void __iomem *membase;
+ int res;
mem = mcb_get_resource(mdev, IORESOURCE_MEM);
if (mem == NULL)
return -ENXIO;
- membase = devm_ioremap_resource(&mdev->dev, mem);
- if (IS_ERR(membase))
- return PTR_ERR_OR_ZERO(membase);
-
- num_ports = get_num_ports(mdev, membase);
- dev_dbg(&mdev->dev, "found a 16z%03u with %u ports\n",
- mdev->id, num_ports);
-
- if (num_ports <= 0 || num_ports > 4) {
- dev_err(&mdev->dev, "unexpected number of ports: %u\n",
- num_ports);
- return -ENODEV;
- }
-
- data = devm_kcalloc(&mdev->dev, num_ports,
+ data = devm_kzalloc(&mdev->dev,
sizeof(struct serial_8250_men_mcb_data),
GFP_KERNEL);
if (!data)
return -ENOMEM;
+ res = init_serial_data(mdev, mem, data);
+ if (res < 0)
+ return res;
+
+ dev_dbg(&mdev->dev, "found a 16z%03u with %u ports\n",
+ mdev->id, data->num_ports);
+
mcb_set_drvdata(mdev, data);
- for (i = 0; i < num_ports; i++) {
- data[i].uart.port.dev = mdev->dma_dev;
- spin_lock_init(&data[i].uart.port.lock);
-
- data[i].uart.port.type = PORT_16550;
- data[i].uart.port.flags = UPF_SKIP_TEST | UPF_SHARE_IRQ
- | UPF_FIXED_TYPE;
- data[i].uart.port.iotype = UPIO_MEM;
- data[i].uart.port.uartclk = men_lookup_uartclk(mdev);
- data[i].uart.port.regshift = 0;
- data[i].uart.port.irq = mcb_get_irq(mdev);
- data[i].uart.port.membase = membase;
- data[i].uart.port.fifosize = 60;
- data[i].uart.port.mapbase = (unsigned long) mem->start
- + i * MEN_UART_MEM_SIZE;
- data[i].uart.port.iobase = data[i].uart.port.mapbase;
+ for (i = 0; i < data->num_ports; i++) {
+ memset(&uart, 0, sizeof(struct uart_8250_port));
+ spin_lock_init(&uart.port.lock);
+
+ uart.port.flags = UPF_SKIP_TEST |
+ UPF_SHARE_IRQ |
+ UPF_BOOT_AUTOCONF |
+ UPF_IOREMAP;
+ uart.port.iotype = UPIO_MEM;
+ uart.port.uartclk = men_lookup_uartclk(mdev);
+ uart.port.irq = mcb_get_irq(mdev);
+ uart.port.mapbase = (unsigned long) mem->start
+ + data->offset[i];
/* ok, register the port */
- data[i].line = serial8250_register_8250_port(&data[i].uart);
- if (data[i].line < 0) {
+ res = serial8250_register_8250_port(&uart);
+ if (res < 0) {
dev_err(&mdev->dev, "unable to register UART port\n");
- return data[i].line;
+ return res;
}
- dev_info(&mdev->dev, "found MCB UART: ttyS%d\n", data[i].line);
+
+ data->line[i] = res;
+ dev_info(&mdev->dev, "found MCB UART: ttyS%d\n", data->line[i]);
}
return 0;
@@ -132,20 +237,14 @@ static int serial_8250_men_mcb_probe(struct mcb_device *mdev,
static void serial_8250_men_mcb_remove(struct mcb_device *mdev)
{
- int num_ports, i;
+ int i;
struct serial_8250_men_mcb_data *data = mcb_get_drvdata(mdev);
if (!data)
return;
- num_ports = get_num_ports(mdev, data[0].uart.port.membase);
- if (num_ports <= 0 || num_ports > 4) {
- dev_err(&mdev->dev, "error retrieving number of ports!\n");
- return;
- }
-
- for (i = 0; i < num_ports; i++)
- serial8250_unregister_port(data[i].line);
+ for (i = 0; i < data->num_ports; i++)
+ serial8250_unregister_port(data->line[i]);
}
static const struct mcb_device_id serial_8250_men_mcb_ids[] = {
@@ -159,7 +258,6 @@ MODULE_DEVICE_TABLE(mcb, serial_8250_men_mcb_ids);
static struct mcb_driver mcb_driver = {
.driver = {
.name = "8250_men_mcb",
- .owner = THIS_MODULE,
},
.probe = serial_8250_men_mcb_probe,
.remove = serial_8250_men_mcb_remove,
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index d48a82f1634e..26dd089d8e82 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -18,7 +18,6 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/of_irq.h>
#include <linux/delay.h>
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index d2d547b5da95..62a9bd30b4db 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -67,6 +67,8 @@ static const struct pci_device_id pci_use_msi[] = {
0xA000, 0x1000) },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9922,
0xA000, 0x1000) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ASIX, PCI_DEVICE_ID_ASIX_AX99100,
+ 0xA000, 0x1000) },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_HP_3PAR, PCI_DEVICE_ID_HPE_PCI_SERIAL,
PCI_ANY_ID, PCI_ANY_ID) },
{ }
@@ -5557,6 +5559,14 @@ static const struct pci_device_id serial_pci_tbl[] = {
{ PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
0xA000, 0x3004,
0, 0, pbn_b0_bt_4_115200 },
+
+ /*
+ * ASIX AX99100 PCIe to Multi I/O Controller
+ */
+ { PCI_VENDOR_ID_ASIX, PCI_DEVICE_ID_ASIX_AX99100,
+ 0xA000, 0x1000,
+ 0, 0, pbn_b0_1_115200 },
+
/* Intel CE4100 */
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CE4100_UART,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 483bb552cdc4..fb891b67968f 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -322,6 +322,14 @@ static const struct serial8250_config uart_config[] = {
.rxtrig_bytes = {2, 66, 130, 194},
.flags = UART_CAP_FIFO,
},
+ [PORT_BCM7271] = {
+ .name = "Broadcom BCM7271 UART",
+ .fifo_size = 32,
+ .tx_loadsz = 32,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01,
+ .rxtrig_bytes = {1, 8, 16, 30},
+ .flags = UART_CAP_FIFO | UART_CAP_AFE,
+ },
};
/* Uart divisor latch read */
@@ -1703,8 +1711,7 @@ static void serial8250_enable_ms(struct uart_port *port)
void serial8250_read_char(struct uart_8250_port *up, u16 lsr)
{
struct uart_port *port = &up->port;
- unsigned char ch;
- char flag = TTY_NORMAL;
+ u8 ch, flag = TTY_NORMAL;
if (likely(lsr & UART_LSR_DR))
ch = serial_in(up, UART_RX);
diff --git a/drivers/tty/serial/8250/8250_pxa.c b/drivers/tty/serial/8250/8250_pxa.c
index 28b341f602c6..a5b3ea27fc90 100644
--- a/drivers/tty/serial/8250/8250_pxa.c
+++ b/drivers/tty/serial/8250/8250_pxa.c
@@ -183,6 +183,7 @@ static int __init early_serial_pxa_setup(struct earlycon_device *device,
return early_serial8250_setup(device, NULL);
}
OF_EARLYCON_DECLARE(early_pxa, "mrvl,pxa-uart", early_serial_pxa_setup);
+OF_EARLYCON_DECLARE(mmp, "mrvl,mmp-uart", early_serial_pxa_setup);
#endif
MODULE_AUTHOR("Sergei Ianovich");
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index d4123469583d..138abbc89738 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -41,7 +41,7 @@ obj-$(CONFIG_SERIAL_HS_LPC32XX) += lpc32xx_hs.o
obj-$(CONFIG_SERIAL_DZ) += dz.o
obj-$(CONFIG_SERIAL_ZS) += zs.o
obj-$(CONFIG_SERIAL_SH_SCI) += sh-sci.o
-obj-$(CONFIG_SERIAL_CPM) += cpm_uart/
+obj-$(CONFIG_SERIAL_CPM) += cpm_uart.o
obj-$(CONFIG_SERIAL_IMX) += imx.o
obj-$(CONFIG_SERIAL_IMX_EARLYCON) += imx_earlycon.o
obj-$(CONFIG_SERIAL_MPC52xx) += mpc52xx_uart.o
diff --git a/drivers/tty/serial/altera_jtaguart.c b/drivers/tty/serial/altera_jtaguart.c
index 9f843d1cee40..5fab4c978891 100644
--- a/drivers/tty/serial/altera_jtaguart.c
+++ b/drivers/tty/serial/altera_jtaguart.c
@@ -110,8 +110,8 @@ static void altera_jtaguart_set_termios(struct uart_port *port,
static void altera_jtaguart_rx_chars(struct uart_port *port)
{
- unsigned char ch;
- unsigned long status;
+ u32 status;
+ u8 ch;
while ((status = readl(port->membase + ALTERA_JTAGUART_DATA_REG)) &
ALTERA_JTAGUART_DATA_RVALID_MSK) {
diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
index 9ce3d24af536..a9c41942190c 100644
--- a/drivers/tty/serial/altera_uart.c
+++ b/drivers/tty/serial/altera_uart.c
@@ -201,8 +201,8 @@ static void altera_uart_set_termios(struct uart_port *port,
static void altera_uart_rx_chars(struct uart_port *port)
{
- unsigned char ch, flag;
unsigned short status;
+ u8 ch, flag;
while ((status = altera_uart_readl(port, ALTERA_UART_STATUS_REG)) &
ALTERA_UART_STATUS_RRDY_MSK) {
diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c
index a98fae2ca422..b5a7404cbacb 100644
--- a/drivers/tty/serial/amba-pl010.c
+++ b/drivers/tty/serial/amba-pl010.c
@@ -112,7 +112,8 @@ static void pl010_enable_ms(struct uart_port *port)
static void pl010_rx_chars(struct uart_port *port)
{
- unsigned int status, ch, flag, rsr, max_count = 256;
+ unsigned int status, rsr, max_count = 256;
+ u8 ch, flag;
status = readb(port->membase + UART01x_FR);
while (UART_RX_DATA(status) && max_count--) {
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index c5c3f4674459..3dc9b0fcab1c 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -20,6 +20,7 @@
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/console.h>
+#include <linux/platform_device.h>
#include <linux/sysrq.h>
#include <linux/device.h>
#include <linux/tty.h>
@@ -36,7 +37,6 @@
#include <linux/delay.h>
#include <linux/types.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/pinctrl/consumer.h>
#include <linux/sizes.h>
#include <linux/io.h>
@@ -307,9 +307,10 @@ static void pl011_write(unsigned int val, const struct uart_amba_port *uap,
*/
static int pl011_fifo_to_tty(struct uart_amba_port *uap)
{
- unsigned int ch, flag, fifotaken;
+ unsigned int ch, fifotaken;
int sysrq;
u16 status;
+ u8 flag;
for (fifotaken = 0; fifotaken != 256; fifotaken++) {
status = pl011_read(uap, REG_FR);
diff --git a/drivers/tty/serial/apbuart.c b/drivers/tty/serial/apbuart.c
index 915ee4b0d594..d7658f380838 100644
--- a/drivers/tty/serial/apbuart.c
+++ b/drivers/tty/serial/apbuart.c
@@ -22,9 +22,6 @@
#include <linux/kthread.h>
#include <linux/device.h>
#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
-#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/serial_core.h>
@@ -70,8 +67,9 @@ static void apbuart_stop_rx(struct uart_port *port)
static void apbuart_rx_chars(struct uart_port *port)
{
- unsigned int status, ch, rsr, flag;
+ unsigned int status, rsr;
unsigned int max_chars = port->fifosize;
+ u8 ch, flag;
status = UART_GET_STATUS(port);
diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
index 4c3d04c6826a..924c1a89347c 100644
--- a/drivers/tty/serial/ar933x_uart.c
+++ b/drivers/tty/serial/ar933x_uart.c
@@ -749,8 +749,7 @@ static int ar933x_uart_probe(struct platform_device *pdev)
port = &up->port;
- mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- port->membase = devm_ioremap_resource(&pdev->dev, mem_res);
+ port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res);
if (IS_ERR(port->membase))
return PTR_ERR(port->membase);
diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c
index 4b2512eef577..ad4ae19b6ce3 100644
--- a/drivers/tty/serial/arc_uart.c
+++ b/drivers/tty/serial/arc_uart.c
@@ -195,8 +195,6 @@ static void arc_serial_start_tx(struct uart_port *port)
static void arc_serial_rx_chars(struct uart_port *port, unsigned int status)
{
- unsigned int ch, flg = 0;
-
/*
* UART has 4 deep RX-FIFO. Driver's recongnition of this fact
* is very subtle. Here's how ...
@@ -207,24 +205,23 @@ static void arc_serial_rx_chars(struct uart_port *port, unsigned int status)
* controller, which is indeed the Rx-FIFO.
*/
do {
+ u8 ch, flg = TTY_NORMAL;
+
/*
* This could be an Rx Intr for err (no data),
* so check err and clear that Intr first
*/
- if (unlikely(status & (RXOERR | RXFERR))) {
- if (status & RXOERR) {
- port->icount.overrun++;
- flg = TTY_OVERRUN;
- UART_CLR_STATUS(port, RXOERR);
- }
-
- if (status & RXFERR) {
- port->icount.frame++;
- flg = TTY_FRAME;
- UART_CLR_STATUS(port, RXFERR);
- }
- } else
- flg = TTY_NORMAL;
+ if (status & RXOERR) {
+ port->icount.overrun++;
+ flg = TTY_OVERRUN;
+ UART_CLR_STATUS(port, RXOERR);
+ }
+
+ if (status & RXFERR) {
+ port->icount.frame++;
+ flg = TTY_FRAME;
+ UART_CLR_STATUS(port, RXFERR);
+ }
if (status & RXEMPTY)
continue;
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 3467a875641a..88cdafa5ac54 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -21,7 +21,6 @@
#include <linux/tty_flip.h>
#include <linux/platform_device.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/atmel_pdc.h>
@@ -1516,8 +1515,8 @@ static void atmel_rx_from_ring(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
struct circ_buf *ring = &atmel_port->rx_ring;
- unsigned int flg;
unsigned int status;
+ u8 flg;
while (ring->head != ring->tail) {
struct atmel_uart_char c;
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index 55e82d0bf92d..0dd8cceb837c 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -832,14 +832,10 @@ static int bcm_uart_probe(struct platform_device *pdev)
return -EBUSY;
memset(port, 0, sizeof(*port));
- res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res_mem)
- return -ENODEV;
-
- port->mapbase = res_mem->start;
- port->membase = devm_ioremap_resource(&pdev->dev, res_mem);
+ port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res_mem);
if (IS_ERR(port->membase))
return PTR_ERR(port->membase);
+ port->mapbase = res_mem->start;
ret = platform_get_irq(pdev, 0);
if (ret < 0)
diff --git a/drivers/tty/serial/clps711x.c b/drivers/tty/serial/clps711x.c
index e49bc4019b50..55d19937efbd 100644
--- a/drivers/tty/serial/clps711x.c
+++ b/drivers/tty/serial/clps711x.c
@@ -92,8 +92,9 @@ static irqreturn_t uart_clps711x_int_rx(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
struct clps711x_port *s = dev_get_drvdata(port->dev);
- unsigned int status, flg;
+ unsigned int status;
u16 ch;
+ u8 flg;
for (;;) {
u32 sysflg = 0;
@@ -450,8 +451,7 @@ static int uart_clps711x_probe(struct platform_device *pdev)
if (IS_ERR(uart_clk))
return PTR_ERR(uart_clk);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- s->port.membase = devm_ioremap_resource(&pdev->dev, res);
+ s->port.membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(s->port.membase))
return PTR_ERR(s->port.membase);
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart.c
index 66afa9bea6bf..626423022d62 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/tty/serial/cpm_uart.c
@@ -26,17 +26,17 @@
#include <linux/device.h>
#include <linux/memblock.h>
#include <linux/dma-mapping.h>
-#include <linux/fs_uart_pd.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/gpio/consumer.h>
#include <linux/clk.h>
+#include <sysdev/fsl_soc.h>
+
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/delay.h>
-#include <asm/fs_pd.h>
#include <asm/udbg.h>
#include <linux/serial_core.h>
@@ -48,14 +48,17 @@
/**************************************************************/
static int cpm_uart_tx_pump(struct uart_port *port);
-static void cpm_uart_init_smc(struct uart_cpm_port *pinfo);
-static void cpm_uart_init_scc(struct uart_cpm_port *pinfo);
static void cpm_uart_initbd(struct uart_cpm_port *pinfo);
/**************************************************************/
#define HW_BUF_SPD_THRESHOLD 2400
+static void cpm_line_cr_cmd(struct uart_cpm_port *port, int cmd)
+{
+ cpm_command(port->command, cmd);
+}
+
/*
* Check, if transmit buffers are processed
*/
@@ -605,7 +608,7 @@ static void cpm_uart_set_termios(struct uart_port *port,
if (pinfo->clk)
clk_set_rate(pinfo->clk, baud);
else
- cpm_set_brg(pinfo->brg - 1, baud);
+ cpm_setbrg(pinfo->brg - 1, baud);
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -771,7 +774,8 @@ static void cpm_uart_init_scc(struct uart_cpm_port *pinfo)
* parameter ram.
*/
- cpm_set_scc_fcr(sup);
+ out_8(&sup->scc_genscc.scc_rfcr, CPMFCR_GBL | CPMFCR_EB);
+ out_8(&sup->scc_genscc.scc_tfcr, CPMFCR_GBL | CPMFCR_EB);
out_be16(&sup->scc_genscc.scc_mrblr, pinfo->rx_fifosize);
out_be16(&sup->scc_maxidl, 0x10);
@@ -842,7 +846,8 @@ static void cpm_uart_init_smc(struct uart_cpm_port *pinfo)
/* Set up the uart parameters in the
* parameter ram.
*/
- cpm_set_smc_fcr(up);
+ out_8(&up->smc_rfcr, CPMFCR_GBL | CPMFCR_EB);
+ out_8(&up->smc_tfcr, CPMFCR_GBL | CPMFCR_EB);
/* Using idle character time requires some additional tuning. */
out_be16(&up->smc_mrblr, pinfo->rx_fifosize);
@@ -864,6 +869,78 @@ static void cpm_uart_init_smc(struct uart_cpm_port *pinfo)
}
/*
+ * Allocate DP-Ram and memory buffers. We need to allocate a transmit and
+ * receive buffer descriptors from dual port ram, and a character
+ * buffer area from host mem. If we are allocating for the console we need
+ * to do it from bootmem
+ */
+static int cpm_uart_allocbuf(struct uart_cpm_port *pinfo, unsigned int is_con)
+{
+ int dpmemsz, memsz;
+ u8 __iomem *dp_mem;
+ unsigned long dp_offset;
+ u8 *mem_addr;
+ dma_addr_t dma_addr = 0;
+
+ pr_debug("CPM uart[%d]:allocbuf\n", pinfo->port.line);
+
+ dpmemsz = sizeof(cbd_t) * (pinfo->rx_nrfifos + pinfo->tx_nrfifos);
+ dp_offset = cpm_muram_alloc(dpmemsz, 8);
+ if (IS_ERR_VALUE(dp_offset)) {
+ pr_err("%s: could not allocate buffer descriptors\n", __func__);
+ return -ENOMEM;
+ }
+
+ dp_mem = cpm_muram_addr(dp_offset);
+
+ memsz = L1_CACHE_ALIGN(pinfo->rx_nrfifos * pinfo->rx_fifosize) +
+ L1_CACHE_ALIGN(pinfo->tx_nrfifos * pinfo->tx_fifosize);
+ if (IS_ENABLED(CONFIG_CPM1) && is_con) {
+ /* was hostalloc but changed cause it blows away the */
+ /* large tlb mapping when pinning the kernel area */
+ mem_addr = (u8 __force *)cpm_muram_addr(cpm_muram_alloc(memsz, 8));
+ dma_addr = cpm_muram_dma((void __iomem *)mem_addr);
+ } else if (is_con) {
+ mem_addr = kzalloc(memsz, GFP_NOWAIT);
+ dma_addr = virt_to_bus(mem_addr);
+ } else {
+ mem_addr = dma_alloc_coherent(pinfo->port.dev, memsz, &dma_addr,
+ GFP_KERNEL);
+ }
+
+ if (!mem_addr) {
+ cpm_muram_free(dp_offset);
+ pr_err("%s: could not allocate coherent memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ pinfo->dp_addr = dp_offset;
+ pinfo->mem_addr = mem_addr;
+ pinfo->dma_addr = dma_addr;
+ pinfo->mem_size = memsz;
+
+ pinfo->rx_buf = mem_addr;
+ pinfo->tx_buf = pinfo->rx_buf + L1_CACHE_ALIGN(pinfo->rx_nrfifos
+ * pinfo->rx_fifosize);
+
+ pinfo->rx_bd_base = (cbd_t __iomem *)dp_mem;
+ pinfo->tx_bd_base = pinfo->rx_bd_base + pinfo->rx_nrfifos;
+
+ return 0;
+}
+
+static void cpm_uart_freebuf(struct uart_cpm_port *pinfo)
+{
+ dma_free_coherent(pinfo->port.dev, L1_CACHE_ALIGN(pinfo->rx_nrfifos *
+ pinfo->rx_fifosize) +
+ L1_CACHE_ALIGN(pinfo->tx_nrfifos *
+ pinfo->tx_fifosize), (void __force *)pinfo->mem_addr,
+ pinfo->dma_addr);
+
+ cpm_muram_free(pinfo->dp_addr);
+}
+
+/*
* Initialize port. This is called from early_console stuff
* so we have to be careful here !
*/
@@ -1128,7 +1205,55 @@ static const struct uart_ops cpm_uart_pops = {
#endif
};
-struct uart_cpm_port cpm_uart_ports[UART_NR];
+static struct uart_cpm_port cpm_uart_ports[UART_NR];
+
+static void __iomem *cpm_uart_map_pram(struct uart_cpm_port *port,
+ struct device_node *np)
+{
+ void __iomem *pram;
+ unsigned long offset;
+ struct resource res;
+ resource_size_t len;
+
+ /* Don't remap parameter RAM if it has already been initialized
+ * during console setup.
+ */
+ if (IS_SMC(port) && port->smcup)
+ return port->smcup;
+ else if (!IS_SMC(port) && port->sccup)
+ return port->sccup;
+
+ if (of_address_to_resource(np, 1, &res))
+ return NULL;
+
+ len = resource_size(&res);
+ pram = ioremap(res.start, len);
+ if (!pram)
+ return NULL;
+
+ if (!IS_ENABLED(CONFIG_CPM2) || !IS_SMC(port))
+ return pram;
+
+ if (len != 2) {
+ pr_warn("cpm_uart[%d]: device tree references "
+ "SMC pram, using boot loader/wrapper pram mapping. "
+ "Please fix your device tree to reference the pram "
+ "base register instead.\n",
+ port->port.line);
+ return pram;
+ }
+
+ offset = cpm_muram_alloc(64, 64);
+ out_be16(pram, offset);
+ iounmap(pram);
+ return cpm_muram_addr(offset);
+}
+
+static void cpm_uart_unmap_pram(struct uart_cpm_port *port, void __iomem *pram)
+{
+ if (!IS_ENABLED(CONFIG_CPM2) || !IS_SMC(port))
+ iounmap(pram);
+}
static int cpm_uart_init_port(struct device_node *np,
struct uart_cpm_port *pinfo)
@@ -1255,19 +1380,14 @@ static void cpm_uart_console_write(struct console *co, const char *s,
{
struct uart_cpm_port *pinfo = &cpm_uart_ports[co->index];
unsigned long flags;
- int nolock = oops_in_progress;
- if (unlikely(nolock)) {
+ if (unlikely(oops_in_progress)) {
local_irq_save(flags);
- } else {
- spin_lock_irqsave(&pinfo->port.lock, flags);
- }
-
- cpm_uart_early_write(pinfo, s, count, true);
-
- if (unlikely(nolock)) {
+ cpm_uart_early_write(pinfo, s, count, true);
local_irq_restore(flags);
} else {
+ spin_lock_irqsave(&pinfo->port.lock, flags);
+ cpm_uart_early_write(pinfo, s, count, true);
spin_unlock_irqrestore(&pinfo->port.lock, flags);
}
}
@@ -1319,7 +1439,8 @@ static int __init cpm_uart_console_setup(struct console *co, char *options)
if (options) {
uart_parse_options(options, &baud, &parity, &bits, &flow);
} else {
- if ((baud = uart_baudrate()) == -1)
+ baud = get_baudrate();
+ if (baud == -1)
baud = 9600;
}
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart.h b/drivers/tty/serial/cpm_uart.h
index 46c03ed71c31..37bb6e976e03 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart.h
+++ b/drivers/tty/serial/cpm_uart.h
@@ -11,41 +11,31 @@
#define CPM_UART_H
#include <linux/platform_device.h>
-#include <linux/fs_uart_pd.h>
struct gpio_desc;
#if defined(CONFIG_CPM2)
-#include "cpm_uart_cpm2.h"
+#include "asm/cpm2.h"
#elif defined(CONFIG_CPM1)
-#include "cpm_uart_cpm1.h"
+#include "asm/cpm1.h"
#endif
+#define DPRAM_BASE ((u8 __iomem *)cpm_muram_addr(0))
+
#define SERIAL_CPM_MAJOR 204
#define SERIAL_CPM_MINOR 46
#define IS_SMC(pinfo) (pinfo->flags & FLAG_SMC)
-#define IS_DISCARDING(pinfo) (pinfo->flags & FLAG_DISCARDING)
-#define FLAG_DISCARDING 0x00000004 /* when set, don't discard */
#define FLAG_SMC 0x00000002
#define FLAG_CONSOLE 0x00000001
-#define UART_SMC1 fsid_smc1_uart
-#define UART_SMC2 fsid_smc2_uart
-#define UART_SCC1 fsid_scc1_uart
-#define UART_SCC2 fsid_scc2_uart
-#define UART_SCC3 fsid_scc3_uart
-#define UART_SCC4 fsid_scc4_uart
-
-#define UART_NR fs_uart_nr
+#define UART_NR 6
#define RX_NUM_FIFO 4
#define RX_BUF_SIZE 32
#define TX_NUM_FIFO 4
#define TX_BUF_SIZE 32
-#define SCC_WAIT_CLOSING 100
-
#define GPIO_CTS 0
#define GPIO_RTS 1
#define GPIO_DCD 2
@@ -85,24 +75,6 @@ struct uart_cpm_port {
struct gpio_desc *gpios[NUM_GPIOS];
};
-extern struct uart_cpm_port cpm_uart_ports[UART_NR];
-
-/* these are located in their respective files */
-void cpm_line_cr_cmd(struct uart_cpm_port *port, int cmd);
-void __iomem *cpm_uart_map_pram(struct uart_cpm_port *port,
- struct device_node *np);
-void cpm_uart_unmap_pram(struct uart_cpm_port *port, void __iomem *pram);
-int cpm_uart_init_portdesc(void);
-int cpm_uart_allocbuf(struct uart_cpm_port *pinfo, unsigned int is_con);
-void cpm_uart_freebuf(struct uart_cpm_port *pinfo);
-
-void smc1_lineif(struct uart_cpm_port *pinfo);
-void smc2_lineif(struct uart_cpm_port *pinfo);
-void scc1_lineif(struct uart_cpm_port *pinfo);
-void scc2_lineif(struct uart_cpm_port *pinfo);
-void scc3_lineif(struct uart_cpm_port *pinfo);
-void scc4_lineif(struct uart_cpm_port *pinfo);
-
/*
virtual to phys transtalion
*/
diff --git a/drivers/tty/serial/cpm_uart/Makefile b/drivers/tty/serial/cpm_uart/Makefile
deleted file mode 100644
index 3f3a6ed02ed4..000000000000
--- a/drivers/tty/serial/cpm_uart/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for the Motorola 8xx FEC ethernet controller
-#
-
-obj-$(CONFIG_SERIAL_CPM) += cpm_uart.o
-
-# Select the correct platform objects.
-cpm_uart-objs-$(CONFIG_CPM2) += cpm_uart_cpm2.o
-cpm_uart-objs-$(CONFIG_CPM1) += cpm_uart_cpm1.o
-
-cpm_uart-objs := cpm_uart_core.o $(cpm_uart-objs-y)
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c
deleted file mode 100644
index 56fc527015cb..000000000000
--- a/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.c
+++ /dev/null
@@ -1,122 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Driver for CPM (SCC/SMC) serial ports; CPM1 definitions
- *
- * Maintainer: Kumar Gala (galak@kernel.crashing.org) (CPM2)
- * Pantelis Antoniou (panto@intracom.gr) (CPM1)
- *
- * Copyright (C) 2004 Freescale Semiconductor, Inc.
- * (C) 2004 Intracom, S.A.
- * (C) 2006 MontaVista Software, Inc.
- * Vitaly Bordug <vbordug@ru.mvista.com>
- */
-
-#include <linux/module.h>
-#include <linux/tty.h>
-#include <linux/gfp.h>
-#include <linux/ioport.h>
-#include <linux/serial.h>
-#include <linux/console.h>
-#include <linux/sysrq.h>
-#include <linux/device.h>
-#include <linux/memblock.h>
-#include <linux/dma-mapping.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/fs_pd.h>
-
-#include <linux/serial_core.h>
-#include <linux/kernel.h>
-
-#include <linux/of.h>
-#include <linux/of_address.h>
-
-#include "cpm_uart.h"
-
-/**************************************************************/
-
-void cpm_line_cr_cmd(struct uart_cpm_port *port, int cmd)
-{
- cpm_command(port->command, cmd);
-}
-
-void __iomem *cpm_uart_map_pram(struct uart_cpm_port *port,
- struct device_node *np)
-{
- return of_iomap(np, 1);
-}
-
-void cpm_uart_unmap_pram(struct uart_cpm_port *port, void __iomem *pram)
-{
- iounmap(pram);
-}
-
-/*
- * Allocate DP-Ram and memory buffers. We need to allocate a transmit and
- * receive buffer descriptors from dual port ram, and a character
- * buffer area from host mem. If we are allocating for the console we need
- * to do it from bootmem
- */
-int cpm_uart_allocbuf(struct uart_cpm_port *pinfo, unsigned int is_con)
-{
- int dpmemsz, memsz;
- u8 *dp_mem;
- unsigned long dp_offset;
- u8 *mem_addr;
- dma_addr_t dma_addr = 0;
-
- pr_debug("CPM uart[%d]:allocbuf\n", pinfo->port.line);
-
- dpmemsz = sizeof(cbd_t) * (pinfo->rx_nrfifos + pinfo->tx_nrfifos);
- dp_offset = cpm_dpalloc(dpmemsz, 8);
- if (IS_ERR_VALUE(dp_offset)) {
- printk(KERN_ERR
- "cpm_uart_cpm1.c: could not allocate buffer descriptors\n");
- return -ENOMEM;
- }
- dp_mem = cpm_dpram_addr(dp_offset);
-
- memsz = L1_CACHE_ALIGN(pinfo->rx_nrfifos * pinfo->rx_fifosize) +
- L1_CACHE_ALIGN(pinfo->tx_nrfifos * pinfo->tx_fifosize);
- if (is_con) {
- /* was hostalloc but changed cause it blows away the */
- /* large tlb mapping when pinning the kernel area */
- mem_addr = (u8 *) cpm_dpram_addr(cpm_dpalloc(memsz, 8));
- dma_addr = (u32)cpm_dpram_phys(mem_addr);
- } else
- mem_addr = dma_alloc_coherent(pinfo->port.dev, memsz, &dma_addr,
- GFP_KERNEL);
-
- if (mem_addr == NULL) {
- cpm_dpfree(dp_offset);
- printk(KERN_ERR
- "cpm_uart_cpm1.c: could not allocate coherent memory\n");
- return -ENOMEM;
- }
-
- pinfo->dp_addr = dp_offset;
- pinfo->mem_addr = mem_addr; /* virtual address*/
- pinfo->dma_addr = dma_addr; /* physical address*/
- pinfo->mem_size = memsz;
-
- pinfo->rx_buf = mem_addr;
- pinfo->tx_buf = pinfo->rx_buf + L1_CACHE_ALIGN(pinfo->rx_nrfifos
- * pinfo->rx_fifosize);
-
- pinfo->rx_bd_base = (cbd_t __iomem __force *)dp_mem;
- pinfo->tx_bd_base = pinfo->rx_bd_base + pinfo->rx_nrfifos;
-
- return 0;
-}
-
-void cpm_uart_freebuf(struct uart_cpm_port *pinfo)
-{
- dma_free_coherent(pinfo->port.dev, L1_CACHE_ALIGN(pinfo->rx_nrfifos *
- pinfo->rx_fifosize) +
- L1_CACHE_ALIGN(pinfo->tx_nrfifos *
- pinfo->tx_fifosize), pinfo->mem_addr,
- pinfo->dma_addr);
-
- cpm_dpfree(pinfo->dp_addr);
-}
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.h b/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.h
deleted file mode 100644
index 18ec0849918a..000000000000
--- a/drivers/tty/serial/cpm_uart/cpm_uart_cpm1.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Driver for CPM (SCC/SMC) serial ports
- *
- * definitions for cpm1
- *
- */
-
-#ifndef CPM_UART_CPM1_H
-#define CPM_UART_CPM1_H
-
-#include <asm/cpm1.h>
-
-static inline void cpm_set_brg(int brg, int baud)
-{
- cpm_setbrg(brg, baud);
-}
-
-static inline void cpm_set_scc_fcr(scc_uart_t __iomem * sup)
-{
- out_8(&sup->scc_genscc.scc_rfcr, SMC_EB);
- out_8(&sup->scc_genscc.scc_tfcr, SMC_EB);
-}
-
-static inline void cpm_set_smc_fcr(smc_uart_t __iomem * up)
-{
- out_8(&up->smc_rfcr, SMC_EB);
- out_8(&up->smc_tfcr, SMC_EB);
-}
-
-#define DPRAM_BASE ((u8 __iomem __force *)cpm_dpram_addr(0))
-
-#endif
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c
deleted file mode 100644
index 108af254e8f3..000000000000
--- a/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.c
+++ /dev/null
@@ -1,156 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Driver for CPM (SCC/SMC) serial ports; CPM2 definitions
- *
- * Maintainer: Kumar Gala (galak@kernel.crashing.org) (CPM2)
- * Pantelis Antoniou (panto@intracom.gr) (CPM1)
- *
- * Copyright (C) 2004 Freescale Semiconductor, Inc.
- * (C) 2004 Intracom, S.A.
- * (C) 2006 MontaVista Software, Inc.
- * Vitaly Bordug <vbordug@ru.mvista.com>
- */
-
-#include <linux/module.h>
-#include <linux/tty.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/serial.h>
-#include <linux/console.h>
-#include <linux/sysrq.h>
-#include <linux/device.h>
-#include <linux/memblock.h>
-#include <linux/dma-mapping.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/fs_pd.h>
-
-#include <linux/serial_core.h>
-#include <linux/kernel.h>
-
-#include "cpm_uart.h"
-
-/**************************************************************/
-
-void cpm_line_cr_cmd(struct uart_cpm_port *port, int cmd)
-{
- cpm_command(port->command, cmd);
-}
-
-void __iomem *cpm_uart_map_pram(struct uart_cpm_port *port,
- struct device_node *np)
-{
- void __iomem *pram;
- unsigned long offset;
- struct resource res;
- resource_size_t len;
-
- /* Don't remap parameter RAM if it has already been initialized
- * during console setup.
- */
- if (IS_SMC(port) && port->smcup)
- return port->smcup;
- else if (!IS_SMC(port) && port->sccup)
- return port->sccup;
-
- if (of_address_to_resource(np, 1, &res))
- return NULL;
-
- len = resource_size(&res);
- pram = ioremap(res.start, len);
- if (!pram)
- return NULL;
-
- if (!IS_SMC(port))
- return pram;
-
- if (len != 2) {
- printk(KERN_WARNING "cpm_uart[%d]: device tree references "
- "SMC pram, using boot loader/wrapper pram mapping. "
- "Please fix your device tree to reference the pram "
- "base register instead.\n",
- port->port.line);
- return pram;
- }
-
- offset = cpm_dpalloc(PROFF_SMC_SIZE, 64);
- out_be16(pram, offset);
- iounmap(pram);
- return cpm_muram_addr(offset);
-}
-
-void cpm_uart_unmap_pram(struct uart_cpm_port *port, void __iomem *pram)
-{
- if (!IS_SMC(port))
- iounmap(pram);
-}
-
-/*
- * Allocate DP-Ram and memory buffers. We need to allocate a transmit and
- * receive buffer descriptors from dual port ram, and a character
- * buffer area from host mem. If we are allocating for the console we need
- * to do it from bootmem
- */
-int cpm_uart_allocbuf(struct uart_cpm_port *pinfo, unsigned int is_con)
-{
- int dpmemsz, memsz;
- u8 __iomem *dp_mem;
- unsigned long dp_offset;
- u8 *mem_addr;
- dma_addr_t dma_addr = 0;
-
- pr_debug("CPM uart[%d]:allocbuf\n", pinfo->port.line);
-
- dpmemsz = sizeof(cbd_t) * (pinfo->rx_nrfifos + pinfo->tx_nrfifos);
- dp_offset = cpm_dpalloc(dpmemsz, 8);
- if (IS_ERR_VALUE(dp_offset)) {
- printk(KERN_ERR
- "cpm_uart_cpm.c: could not allocate buffer descriptors\n");
- return -ENOMEM;
- }
-
- dp_mem = cpm_dpram_addr(dp_offset);
-
- memsz = L1_CACHE_ALIGN(pinfo->rx_nrfifos * pinfo->rx_fifosize) +
- L1_CACHE_ALIGN(pinfo->tx_nrfifos * pinfo->tx_fifosize);
- if (is_con) {
- mem_addr = kzalloc(memsz, GFP_NOWAIT);
- dma_addr = virt_to_bus(mem_addr);
- }
- else
- mem_addr = dma_alloc_coherent(pinfo->port.dev, memsz, &dma_addr,
- GFP_KERNEL);
-
- if (mem_addr == NULL) {
- cpm_dpfree(dp_offset);
- printk(KERN_ERR
- "cpm_uart_cpm.c: could not allocate coherent memory\n");
- return -ENOMEM;
- }
-
- pinfo->dp_addr = dp_offset;
- pinfo->mem_addr = mem_addr;
- pinfo->dma_addr = dma_addr;
- pinfo->mem_size = memsz;
-
- pinfo->rx_buf = mem_addr;
- pinfo->tx_buf = pinfo->rx_buf + L1_CACHE_ALIGN(pinfo->rx_nrfifos
- * pinfo->rx_fifosize);
-
- pinfo->rx_bd_base = (cbd_t __iomem *)dp_mem;
- pinfo->tx_bd_base = pinfo->rx_bd_base + pinfo->rx_nrfifos;
-
- return 0;
-}
-
-void cpm_uart_freebuf(struct uart_cpm_port *pinfo)
-{
- dma_free_coherent(pinfo->port.dev, L1_CACHE_ALIGN(pinfo->rx_nrfifos *
- pinfo->rx_fifosize) +
- L1_CACHE_ALIGN(pinfo->tx_nrfifos *
- pinfo->tx_fifosize), (void __force *)pinfo->mem_addr,
- pinfo->dma_addr);
-
- cpm_dpfree(pinfo->dp_addr);
-}
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.h b/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.h
deleted file mode 100644
index 051a8509c3e5..000000000000
--- a/drivers/tty/serial/cpm_uart/cpm_uart_cpm2.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Driver for CPM (SCC/SMC) serial ports
- *
- * definitions for cpm2
- *
- */
-
-#ifndef CPM_UART_CPM2_H
-#define CPM_UART_CPM2_H
-
-#include <asm/cpm2.h>
-
-static inline void cpm_set_brg(int brg, int baud)
-{
- cpm_setbrg(brg, baud);
-}
-
-static inline void cpm_set_scc_fcr(scc_uart_t __iomem *sup)
-{
- out_8(&sup->scc_genscc.scc_rfcr, CPMFCR_GBL | CPMFCR_EB);
- out_8(&sup->scc_genscc.scc_tfcr, CPMFCR_GBL | CPMFCR_EB);
-}
-
-static inline void cpm_set_smc_fcr(smc_uart_t __iomem *up)
-{
- out_8(&up->smc_rfcr, CPMFCR_GBL | CPMFCR_EB);
- out_8(&up->smc_tfcr, CPMFCR_GBL | CPMFCR_EB);
-}
-
-#define DPRAM_BASE ((u8 __iomem __force *)cpm_dpram_addr(0))
-
-#endif
diff --git a/drivers/tty/serial/digicolor-usart.c b/drivers/tty/serial/digicolor-usart.c
index ed197705f7ee..128b5479e813 100644
--- a/drivers/tty/serial/digicolor-usart.c
+++ b/drivers/tty/serial/digicolor-usart.c
@@ -136,8 +136,7 @@ static void digicolor_uart_rx(struct uart_port *port)
spin_lock_irqsave(&port->lock, flags);
while (1) {
- u8 status, ch;
- unsigned int ch_flag;
+ u8 status, ch, ch_flag;
if (digicolor_uart_rx_empty(port))
break;
diff --git a/drivers/tty/serial/dz.c b/drivers/tty/serial/dz.c
index 6b7ed7f2f3ca..667f52e83277 100644
--- a/drivers/tty/serial/dz.c
+++ b/drivers/tty/serial/dz.c
@@ -181,8 +181,8 @@ static inline void dz_receive_chars(struct dz_mux *mux)
struct dz_port *dport = &mux->dport[0];
struct uart_icount *icount;
int lines_rx[DZ_NB_PORT] = { [0 ... DZ_NB_PORT - 1] = 0 };
- unsigned char ch, flag;
u16 status;
+ u8 ch, flag;
int i;
while ((status = dz_in(dport, DZ_RBUF)) & DZ_DVAL) {
diff --git a/drivers/tty/serial/fsl_linflexuart.c b/drivers/tty/serial/fsl_linflexuart.c
index 6fc21b6684e6..249cb380c3c6 100644
--- a/drivers/tty/serial/fsl_linflexuart.c
+++ b/drivers/tty/serial/fsl_linflexuart.c
@@ -11,7 +11,7 @@
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/serial_core.h>
#include <linux/slab.h>
#include <linux/tty_flip.h>
@@ -827,14 +827,10 @@ static int linflex_probe(struct platform_device *pdev)
sport->line = ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
- sport->mapbase = res->start;
- sport->membase = devm_ioremap_resource(&pdev->dev, res);
+ sport->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(sport->membase))
return PTR_ERR(sport->membase);
+ sport->mapbase = res->start;
sport->dev = &pdev->dev;
sport->type = PORT_LINFLEXUART;
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index c569a08b5b19..f72e1340b47d 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -18,9 +18,9 @@
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/serial_core.h>
#include <linux/slab.h>
@@ -288,6 +288,7 @@ struct lpuart_port {
wait_queue_head_t dma_wait;
bool is_cs7; /* Set to true when character size is 7 */
/* and the parity is enabled */
+ bool dma_idle_int;
};
struct lpuart_soc_data {
@@ -1064,26 +1065,6 @@ static irqreturn_t lpuart_int(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static irqreturn_t lpuart32_int(int irq, void *dev_id)
-{
- struct lpuart_port *sport = dev_id;
- unsigned long sts, rxcount;
-
- sts = lpuart32_read(&sport->port, UARTSTAT);
- rxcount = lpuart32_read(&sport->port, UARTWATER);
- rxcount = rxcount >> UARTWATER_RXCNT_OFF;
-
- if ((sts & UARTSTAT_RDRF || rxcount > 0) && !sport->lpuart_dma_rx_use)
- lpuart32_rxint(sport);
-
- if ((sts & UARTSTAT_TDRE) && !sport->lpuart_dma_tx_use)
- lpuart32_txint(sport);
-
- lpuart32_write(&sport->port, sts, UARTSTAT);
- return IRQ_HANDLED;
-}
-
-
static inline void lpuart_handle_sysrq_chars(struct uart_port *port,
unsigned char *p, int count)
{
@@ -1266,7 +1247,8 @@ exit:
spin_unlock_irqrestore(&sport->port.lock, flags);
tty_flip_buffer_push(port);
- mod_timer(&sport->lpuart_timer, jiffies + sport->dma_rx_timeout);
+ if (!sport->dma_idle_int)
+ mod_timer(&sport->lpuart_timer, jiffies + sport->dma_rx_timeout);
}
static void lpuart_dma_rx_complete(void *arg)
@@ -1276,6 +1258,50 @@ static void lpuart_dma_rx_complete(void *arg)
lpuart_copy_rx_to_tty(sport);
}
+static void lpuart32_dma_idleint(struct lpuart_port *sport)
+{
+ enum dma_status dmastat;
+ struct dma_chan *chan = sport->dma_rx_chan;
+ struct circ_buf *ring = &sport->rx_ring;
+ struct dma_tx_state state;
+ int count = 0;
+
+ dmastat = dmaengine_tx_status(chan, sport->dma_rx_cookie, &state);
+ if (dmastat == DMA_ERROR) {
+ dev_err(sport->port.dev, "Rx DMA transfer failed!\n");
+ return;
+ }
+
+ ring->head = sport->rx_sgl.length - state.residue;
+ count = CIRC_CNT(ring->head, ring->tail, sport->rx_sgl.length);
+
+ /* Check if new data received before copying */
+ if (count)
+ lpuart_copy_rx_to_tty(sport);
+}
+
+static irqreturn_t lpuart32_int(int irq, void *dev_id)
+{
+ struct lpuart_port *sport = dev_id;
+ unsigned long sts, rxcount;
+
+ sts = lpuart32_read(&sport->port, UARTSTAT);
+ rxcount = lpuart32_read(&sport->port, UARTWATER);
+ rxcount = rxcount >> UARTWATER_RXCNT_OFF;
+
+ if ((sts & UARTSTAT_RDRF || rxcount > 0) && !sport->lpuart_dma_rx_use)
+ lpuart32_rxint(sport);
+
+ if ((sts & UARTSTAT_TDRE) && !sport->lpuart_dma_tx_use)
+ lpuart32_txint(sport);
+
+ if ((sts & UARTSTAT_IDLE) && sport->lpuart_dma_rx_use && sport->dma_idle_int)
+ lpuart32_dma_idleint(sport);
+
+ lpuart32_write(&sport->port, sts, UARTSTAT);
+ return IRQ_HANDLED;
+}
+
/*
* Timer function to simulate the hardware EOP (End Of Package) event.
* The timer callback is to check for new RX data and copy to TTY buffer.
@@ -1392,6 +1418,12 @@ static inline int lpuart_start_rx_dma(struct lpuart_port *sport)
unsigned long temp = lpuart32_read(&sport->port, UARTBAUD);
lpuart32_write(&sport->port, temp | UARTBAUD_RDMAE, UARTBAUD);
+
+ if (sport->dma_idle_int) {
+ unsigned long ctrl = lpuart32_read(&sport->port, UARTCTRL);
+
+ lpuart32_write(&sport->port, ctrl | UARTCTRL_ILIE, UARTCTRL);
+ }
} else {
writeb(readb(sport->port.membase + UARTCR5) | UARTCR5_RDMAS,
sport->port.membase + UARTCR5);
@@ -1407,7 +1439,9 @@ static void lpuart_dma_rx_free(struct uart_port *port)
struct dma_chan *chan = sport->dma_rx_chan;
dmaengine_terminate_sync(chan);
- del_timer_sync(&sport->lpuart_timer);
+ if (!sport->dma_idle_int)
+ del_timer_sync(&sport->lpuart_timer);
+
dma_unmap_sg(chan->device->dev, &sport->rx_sgl, 1, DMA_FROM_DEVICE);
kfree(sport->rx_ring.buf);
sport->rx_ring.tail = 0;
@@ -1669,6 +1703,9 @@ static void lpuart32_setup_watermark_enable(struct lpuart_port *sport)
static void rx_dma_timer_init(struct lpuart_port *sport)
{
+ if (sport->dma_idle_int)
+ return;
+
timer_setup(&sport->lpuart_timer, lpuart_timer_func, 0);
sport->lpuart_timer.expires = jiffies + sport->dma_rx_timeout;
add_timer(&sport->lpuart_timer);
@@ -2811,8 +2848,7 @@ static int lpuart_probe(struct platform_device *pdev)
if (!sport)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- sport->port.membase = devm_ioremap_resource(&pdev->dev, res);
+ sport->port.membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(sport->port.membase))
return PTR_ERR(sport->port.membase);
@@ -2822,6 +2858,8 @@ static int lpuart_probe(struct platform_device *pdev)
sport->port.type = PORT_LPUART;
sport->devtype = sdata->devtype;
sport->rx_watermark = sdata->rx_watermark;
+ sport->dma_idle_int = is_imx7ulp_lpuart(sport) || is_imx8ulp_lpuart(sport) ||
+ is_imx8qxp_lpuart(sport);
ret = platform_get_irq(pdev, 0);
if (ret < 0)
return ret;
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 7341d060f85c..13cb78340709 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -25,7 +25,6 @@
#include <linux/rational.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/io.h>
#include <linux/dma-mapping.h>
@@ -2276,8 +2275,7 @@ static int imx_uart_probe(struct platform_device *pdev)
return -EINVAL;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/tty/serial/ip22zilog.c b/drivers/tty/serial/ip22zilog.c
index b1f27e168135..845ff706bc59 100644
--- a/drivers/tty/serial/ip22zilog.c
+++ b/drivers/tty/serial/ip22zilog.c
@@ -248,8 +248,8 @@ static void ip22zilog_maybe_update_regs(struct uart_ip22zilog_port *up,
static bool ip22zilog_receive_chars(struct uart_ip22zilog_port *up,
struct zilog_channel *channel)
{
- unsigned char ch, flag;
unsigned int r1;
+ u8 ch, flag;
bool push = up->port.state != NULL;
for (;;) {
diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c
index 55c3c9db7462..e93850f6447a 100644
--- a/drivers/tty/serial/kgdb_nmi.c
+++ b/drivers/tty/serial/kgdb_nmi.c
@@ -304,7 +304,8 @@ static unsigned int kgdb_nmi_tty_write_room(struct tty_struct *tty)
return 2048;
}
-static int kgdb_nmi_tty_write(struct tty_struct *tty, const unchar *buf, int c)
+static ssize_t kgdb_nmi_tty_write(struct tty_struct *tty, const u8 *buf,
+ size_t c)
{
int i;
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c
index bcaa479608d8..3adb60c683f7 100644
--- a/drivers/tty/serial/lantiq.c
+++ b/drivers/tty/serial/lantiq.c
@@ -17,7 +17,8 @@
#include <linux/ioport.h>
#include <linux/lantiq.h>
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/slab.h>
diff --git a/drivers/tty/serial/liteuart.c b/drivers/tty/serial/liteuart.c
index 80de3a42b67b..d881cdd2a58f 100644
--- a/drivers/tty/serial/liteuart.c
+++ b/drivers/tty/serial/liteuart.c
@@ -11,8 +11,7 @@
#include <linux/litex.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/slab.h>
diff --git a/drivers/tty/serial/ma35d1_serial.c b/drivers/tty/serial/ma35d1_serial.c
index 2604b4d9fb78..465b1def9e11 100644
--- a/drivers/tty/serial/ma35d1_serial.c
+++ b/drivers/tty/serial/ma35d1_serial.c
@@ -8,7 +8,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/iopoll.h>
#include <linux/serial_core.h>
#include <linux/slab.h>
@@ -788,7 +788,6 @@ static struct platform_driver ma35d1serial_driver = {
.resume = ma35d1serial_resume,
.driver = {
.name = "ma35d1-uart",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(ma35d1_serial_of_match),
},
};
diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c
index 86dcbff8faa3..5efb2b593be3 100644
--- a/drivers/tty/serial/max3100.c
+++ b/drivers/tty/serial/max3100.c
@@ -215,8 +215,9 @@ static int max3100_sr(struct max3100_port *s, u16 tx, u16 *rx)
static int max3100_handlerx(struct max3100_port *s, u16 rx)
{
- unsigned int ch, flg, status = 0;
+ unsigned int status = 0;
int ret = 0, cts;
+ u8 ch, flg;
if (rx & MAX3100_R && s->rx_enabled) {
dev_dbg(&s->spi->dev, "%s\n", __func__);
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index 997e39449766..db3204d2a305 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -669,7 +669,8 @@ static void max310x_batch_read(struct uart_port *port, u8 *rxbuf, unsigned int l
static void max310x_handle_rx(struct uart_port *port, unsigned int rxlen)
{
struct max310x_one *one = to_max310x_port(port);
- unsigned int sts, ch, flag, i;
+ unsigned int sts, i;
+ u8 ch, flag;
if (port->read_status_mask == MAX310X_LSR_RXOVR_BIT) {
/* We are just reading, happily ignoring any error conditions.
@@ -1368,6 +1369,11 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
s->p[i].port.flags = UPF_FIXED_TYPE | UPF_LOW_LATENCY;
s->p[i].port.iotype = UPIO_PORT;
s->p[i].port.iobase = i;
+ /*
+ * Use all ones as membase to make sure uart_configure_port() in
+ * serial_core.c does not abort for SPI/I2C devices where the
+ * membase address is not applicable.
+ */
s->p[i].port.membase = (void __iomem *)~0;
s->p[i].port.uartclk = uartclk;
s->p[i].port.rs485_config = max310x_rs485_config;
@@ -1399,7 +1405,7 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
}
#ifdef CONFIG_GPIOLIB
- /* Setup GPIO cotroller */
+ /* Setup GPIO controller */
s->gpio.owner = THIS_MODULE;
s->gpio.parent = dev;
s->gpio.label = devtype->name;
diff --git a/drivers/tty/serial/mcf.c b/drivers/tty/serial/mcf.c
index 3239babe12a4..1666ce012e5e 100644
--- a/drivers/tty/serial/mcf.c
+++ b/drivers/tty/serial/mcf.c
@@ -281,7 +281,7 @@ static void mcf_set_termios(struct uart_port *port, struct ktermios *termios,
static void mcf_rx_chars(struct mcf_uart *pp)
{
struct uart_port *port = &pp->port;
- unsigned char status, ch, flag;
+ u8 status, ch, flag;
while ((status = readb(port->membase + MCFUART_USR)) & MCFUART_USR_RXREADY) {
ch = readb(port->membase + MCFUART_URB);
diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c
index 2501db5a7aaf..790d910dafa5 100644
--- a/drivers/tty/serial/meson_uart.c
+++ b/drivers/tty/serial/meson_uart.c
@@ -72,16 +72,17 @@
#define AML_UART_PORT_NUM 12
#define AML_UART_PORT_OFFSET 6
-#define AML_UART_DEV_NAME "ttyAML"
#define AML_UART_POLL_USEC 5
#define AML_UART_TIMEOUT_USEC 10000
-static struct uart_driver meson_uart_driver;
+static struct uart_driver meson_uart_driver_ttyAML;
+static struct uart_driver meson_uart_driver_ttyS;
static struct uart_port *meson_ports[AML_UART_PORT_NUM];
struct meson_uart_data {
+ struct uart_driver *uart_driver;
bool has_xtal_div2;
};
@@ -611,21 +612,19 @@ static int meson_serial_console_setup(struct console *co, char *options)
return uart_set_options(port, co, baud, parity, bits, flow);
}
-static struct console meson_serial_console = {
- .name = AML_UART_DEV_NAME,
- .write = meson_serial_console_write,
- .device = uart_console_device,
- .setup = meson_serial_console_setup,
- .flags = CON_PRINTBUFFER,
- .index = -1,
- .data = &meson_uart_driver,
-};
+#define MESON_SERIAL_CONSOLE(_devname) \
+ static struct console meson_serial_console_##_devname = { \
+ .name = __stringify(_devname), \
+ .write = meson_serial_console_write, \
+ .device = uart_console_device, \
+ .setup = meson_serial_console_setup, \
+ .flags = CON_PRINTBUFFER, \
+ .index = -1, \
+ .data = &meson_uart_driver_##_devname, \
+ }
-static int __init meson_serial_console_init(void)
-{
- register_console(&meson_serial_console);
- return 0;
-}
+MESON_SERIAL_CONSOLE(ttyAML);
+MESON_SERIAL_CONSOLE(ttyS);
static void meson_serial_early_console_write(struct console *co,
const char *s,
@@ -650,21 +649,22 @@ meson_serial_early_console_setup(struct earlycon_device *device, const char *opt
OF_EARLYCON_DECLARE(meson, "amlogic,meson-ao-uart",
meson_serial_early_console_setup);
-#define MESON_SERIAL_CONSOLE (&meson_serial_console)
+#define MESON_SERIAL_CONSOLE_PTR(_devname) (&meson_serial_console_##_devname)
#else
-static int __init meson_serial_console_init(void) {
- return 0;
-}
-#define MESON_SERIAL_CONSOLE NULL
+#define MESON_SERIAL_CONSOLE_PTR(_devname) (NULL)
#endif
-static struct uart_driver meson_uart_driver = {
- .owner = THIS_MODULE,
- .driver_name = "meson_uart",
- .dev_name = AML_UART_DEV_NAME,
- .nr = AML_UART_PORT_NUM,
- .cons = MESON_SERIAL_CONSOLE,
-};
+#define MESON_UART_DRIVER(_devname) \
+ static struct uart_driver meson_uart_driver_##_devname = { \
+ .owner = THIS_MODULE, \
+ .driver_name = "meson_uart", \
+ .dev_name = __stringify(_devname), \
+ .nr = AML_UART_PORT_NUM, \
+ .cons = MESON_SERIAL_CONSOLE_PTR(_devname), \
+ }
+
+MESON_UART_DRIVER(ttyAML);
+MESON_UART_DRIVER(ttyS);
static int meson_uart_probe_clocks(struct platform_device *pdev,
struct uart_port *port)
@@ -690,8 +690,16 @@ static int meson_uart_probe_clocks(struct platform_device *pdev,
return 0;
}
+static struct uart_driver *meson_uart_current(const struct meson_uart_data *pd)
+{
+ return (pd && pd->uart_driver) ?
+ pd->uart_driver : &meson_uart_driver_ttyAML;
+}
+
static int meson_uart_probe(struct platform_device *pdev)
{
+ const struct meson_uart_data *priv_data;
+ struct uart_driver *uart_driver;
struct resource *res_mem;
struct uart_port *port;
u32 fifosize = 64; /* Default is 64, 128 for EE UART_0 */
@@ -726,8 +734,8 @@ static int meson_uart_probe(struct platform_device *pdev)
of_property_read_u32(pdev->dev.of_node, "fifo-size", &fifosize);
if (meson_ports[pdev->id]) {
- dev_err(&pdev->dev, "port %d already allocated\n", pdev->id);
- return -EBUSY;
+ return dev_err_probe(&pdev->dev, -EBUSY,
+ "port %d already allocated\n", pdev->id);
}
port = devm_kzalloc(&pdev->dev, sizeof(struct uart_port), GFP_KERNEL);
@@ -738,6 +746,17 @@ static int meson_uart_probe(struct platform_device *pdev)
if (ret)
return ret;
+ priv_data = device_get_match_data(&pdev->dev);
+
+ uart_driver = meson_uart_current(priv_data);
+
+ if (!uart_driver->state) {
+ ret = uart_register_driver(uart_driver);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "can't register uart driver\n");
+ }
+
port->iotype = UPIO_MEM;
port->mapbase = res_mem->start;
port->mapsize = resource_size(res_mem);
@@ -750,7 +769,7 @@ static int meson_uart_probe(struct platform_device *pdev)
port->x_char = 0;
port->ops = &meson_uart_ops;
port->fifosize = fifosize;
- port->private_data = (void *)device_get_match_data(&pdev->dev);
+ port->private_data = (void *)priv_data;
meson_ports[pdev->id] = port;
platform_set_drvdata(pdev, port);
@@ -761,7 +780,7 @@ static int meson_uart_probe(struct platform_device *pdev)
meson_uart_release_port(port);
}
- ret = uart_add_one_port(&meson_uart_driver, port);
+ ret = uart_add_one_port(uart_driver, port);
if (ret)
meson_ports[pdev->id] = NULL;
@@ -770,12 +789,21 @@ static int meson_uart_probe(struct platform_device *pdev)
static int meson_uart_remove(struct platform_device *pdev)
{
+ struct uart_driver *uart_driver;
struct uart_port *port;
port = platform_get_drvdata(pdev);
- uart_remove_one_port(&meson_uart_driver, port);
+ uart_driver = meson_uart_current(port->private_data);
+ uart_remove_one_port(uart_driver, port);
meson_ports[pdev->id] = NULL;
+ for (int id = 0; id < AML_UART_PORT_NUM; id++)
+ if (meson_ports[id])
+ return 0;
+
+ /* No more available uart ports, unregister uart driver */
+ uart_unregister_driver(uart_driver);
+
return 0;
}
@@ -783,6 +811,16 @@ static struct meson_uart_data meson_g12a_uart_data = {
.has_xtal_div2 = true,
};
+static struct meson_uart_data meson_a1_uart_data = {
+ .uart_driver = &meson_uart_driver_ttyS,
+ .has_xtal_div2 = false,
+};
+
+static struct meson_uart_data meson_s4_uart_data = {
+ .uart_driver = &meson_uart_driver_ttyS,
+ .has_xtal_div2 = true,
+};
+
static const struct of_device_id meson_uart_dt_match[] = {
{ .compatible = "amlogic,meson6-uart" },
{ .compatible = "amlogic,meson8-uart" },
@@ -794,7 +832,11 @@ static const struct of_device_id meson_uart_dt_match[] = {
},
{
.compatible = "amlogic,meson-s4-uart",
- .data = (void *)&meson_g12a_uart_data,
+ .data = (void *)&meson_s4_uart_data,
+ },
+ {
+ .compatible = "amlogic,meson-a1-uart",
+ .data = (void *)&meson_a1_uart_data,
},
{ /* sentinel */ },
};
@@ -809,33 +851,7 @@ static struct platform_driver meson_uart_platform_driver = {
},
};
-static int __init meson_uart_init(void)
-{
- int ret;
-
- ret = meson_serial_console_init();
- if (ret)
- return ret;
-
- ret = uart_register_driver(&meson_uart_driver);
- if (ret)
- return ret;
-
- ret = platform_driver_register(&meson_uart_platform_driver);
- if (ret)
- uart_unregister_driver(&meson_uart_driver);
-
- return ret;
-}
-
-static void __exit meson_uart_exit(void)
-{
- platform_driver_unregister(&meson_uart_platform_driver);
- uart_unregister_driver(&meson_uart_driver);
-}
-
-module_init(meson_uart_init);
-module_exit(meson_uart_exit);
+module_platform_driver(meson_uart_platform_driver);
MODULE_AUTHOR("Carlo Caione <carlo@caione.org>");
MODULE_DESCRIPTION("Amlogic Meson serial port driver");
diff --git a/drivers/tty/serial/milbeaut_usio.c b/drivers/tty/serial/milbeaut_usio.c
index 44988a2941b8..70a910085e93 100644
--- a/drivers/tty/serial/milbeaut_usio.c
+++ b/drivers/tty/serial/milbeaut_usio.c
@@ -148,8 +148,7 @@ static void mlb_usio_enable_ms(struct uart_port *port)
static void mlb_usio_rx_chars(struct uart_port *port)
{
struct tty_port *ttyport = &port->state->port;
- unsigned long flag = 0;
- char ch = 0;
+ u8 flag = 0, ch = 0;
u8 status;
int max_count = 2;
diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c
index 384ca195e3d5..916507b8f31d 100644
--- a/drivers/tty/serial/mpc52xx_uart.c
+++ b/drivers/tty/serial/mpc52xx_uart.c
@@ -40,7 +40,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/clk.h>
#include <asm/mpc52xx.h>
diff --git a/drivers/tty/serial/mps2-uart.c b/drivers/tty/serial/mps2-uart.c
index 860d161fa594..ea5a7911cb15 100644
--- a/drivers/tty/serial/mps2-uart.c
+++ b/drivers/tty/serial/mps2-uart.c
@@ -16,7 +16,6 @@
#include <linux/console.h>
#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/serial_core.h>
@@ -539,8 +538,7 @@ static int mps2_init_port(struct platform_device *pdev,
struct resource *res;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mps_port->port.membase = devm_ioremap_resource(&pdev->dev, res);
+ mps_port->port.membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(mps_port->port.membase))
return PTR_ERR(mps_port->port.membase);
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index 31f739c7a08b..ea924e9b913b 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -876,18 +876,13 @@ static int uart_num_counter;
static int mvebu_uart_probe(struct platform_device *pdev)
{
- struct resource *reg = platform_get_resource(pdev, IORESOURCE_MEM, 0);
const struct of_device_id *match = of_match_device(mvebu_uart_of_match,
&pdev->dev);
struct uart_port *port;
struct mvebu_uart *mvuart;
+ struct resource *reg;
int id, irq;
- if (!reg) {
- dev_err(&pdev->dev, "no registers defined\n");
- return -EINVAL;
- }
-
/* Assume that all UART ports have a DT alias or none has */
id = of_alias_get_id(pdev->dev.of_node, "serial");
if (!pdev->dev.of_node || id < 0)
@@ -922,11 +917,11 @@ static int mvebu_uart_probe(struct platform_device *pdev)
*/
port->irq = 0;
port->irqflags = 0;
- port->mapbase = reg->start;
- port->membase = devm_ioremap_resource(&pdev->dev, reg);
+ port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &reg);
if (IS_ERR(port->membase))
return PTR_ERR(port->membase);
+ port->mapbase = reg->start;
mvuart = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_uart),
GFP_KERNEL);
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index a368f4293967..8eeecf8ad359 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -30,7 +30,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
@@ -616,9 +616,8 @@ static void mxs_auart_tx_chars(struct mxs_auart_port *s)
static void mxs_auart_rx_char(struct mxs_auart_port *s)
{
- int flag;
u32 stat;
- u8 c;
+ u8 c, flag;
c = mxs_read(s, REG_DATA);
stat = mxs_read(s, REG_STAT);
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 9be63a1f1f0c..0ead88c5a19a 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -437,7 +437,7 @@ static unsigned int check_modem_status(struct uart_omap_port *up)
static void serial_omap_rlsi(struct uart_omap_port *up, unsigned int lsr)
{
- unsigned int flag;
+ u8 flag;
/*
* Read one data character out to avoid stalling the receiver according
@@ -493,8 +493,7 @@ static void serial_omap_rlsi(struct uart_omap_port *up, unsigned int lsr)
static void serial_omap_rdi(struct uart_omap_port *up, unsigned int lsr)
{
- unsigned char ch = 0;
- unsigned int flag;
+ u8 ch;
if (!(lsr & UART_LSR_DR))
return;
@@ -507,13 +506,12 @@ static void serial_omap_rdi(struct uart_omap_port *up, unsigned int lsr)
return;
}
- flag = TTY_NORMAL;
up->port.icount.rx++;
if (uart_handle_sysrq_char(&up->port, ch))
return;
- uart_insert_char(&up->port, lsr, UART_LSR_OE, ch, flag);
+ uart_insert_char(&up->port, lsr, UART_LSR_OE, ch, TTY_NORMAL);
}
/**
@@ -1568,8 +1566,7 @@ static int serial_omap_probe(struct platform_device *pdev)
if (!up)
return -ENOMEM;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, mem);
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/tty/serial/pic32_uart.c b/drivers/tty/serial/pic32_uart.c
index 196a4e678451..e308d5022b3f 100644
--- a/drivers/tty/serial/pic32_uart.c
+++ b/drivers/tty/serial/pic32_uart.c
@@ -11,7 +11,6 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/of_gpio.h>
#include <linux/init.h>
diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c
index 444fa4b654ac..73c60f5ea027 100644
--- a/drivers/tty/serial/pxa.c
+++ b/drivers/tty/serial/pxa.c
@@ -90,7 +90,7 @@ static void serial_pxa_stop_rx(struct uart_port *port)
static inline void receive_chars(struct uart_pxa_port *up, int *status)
{
- unsigned int ch, flag;
+ u8 ch, flag;
int max_count = 256;
do {
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index daaf2a64e7f1..b8aa4c1293ba 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -11,7 +11,6 @@
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/pm_opp.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -126,6 +125,7 @@ struct qcom_geni_serial_port {
dma_addr_t rx_dma_addr;
bool setup;
unsigned int baud;
+ unsigned long clk_rate;
void *rx_buf;
u32 loopback;
bool brk;
@@ -591,7 +591,6 @@ static void qcom_geni_serial_stop_tx_dma(struct uart_port *uport)
{
struct qcom_geni_serial_port *port = to_dev_port(uport);
bool done;
- u32 m_irq_en;
if (!qcom_geni_serial_main_active(uport))
return;
@@ -603,12 +602,10 @@ static void qcom_geni_serial_stop_tx_dma(struct uart_port *uport)
port->tx_remaining = 0;
}
- m_irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
- writel(m_irq_en, uport->membase + SE_GENI_M_IRQ_EN);
geni_se_cancel_m_cmd(&port->se);
- done = qcom_geni_serial_poll_bit(uport, SE_GENI_S_IRQ_STATUS,
- S_CMD_CANCEL_EN, true);
+ done = qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+ M_CMD_CANCEL_EN, true);
if (!done) {
geni_se_abort_m_cmd(&port->se);
done = qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
@@ -1245,10 +1242,11 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport,
goto out_restart_rx;
}
- dev_dbg(port->se.dev, "desired_rate-%u, clk_rate-%lu, clk_div-%u\n",
+ dev_dbg(port->se.dev, "desired_rate = %u, clk_rate = %lu, clk_div = %u\n",
baud * sampling_rate, clk_rate, clk_div);
uport->uartclk = clk_rate;
+ port->clk_rate = clk_rate;
dev_pm_opp_set_rate(uport->dev, clk_rate);
ser_clk_cfg = SER_CLK_EN;
ser_clk_cfg |= clk_div << CLK_DIV_SHFT;
@@ -1513,10 +1511,13 @@ static void qcom_geni_serial_pm(struct uart_port *uport,
if (new_state == UART_PM_STATE_ON && old_state == UART_PM_STATE_OFF) {
geni_icc_enable(&port->se);
+ if (port->clk_rate)
+ dev_pm_opp_set_rate(uport->dev, port->clk_rate);
geni_se_resources_on(&port->se);
} else if (new_state == UART_PM_STATE_OFF &&
old_state == UART_PM_STATE_ON) {
geni_se_resources_off(&port->se);
+ dev_pm_opp_set_rate(uport->dev, 0);
geni_icc_disable(&port->se);
}
}
@@ -1750,7 +1751,7 @@ static int qcom_geni_serial_sys_hib_resume(struct device *dev)
private_data = uport->private_data;
if (uart_console(uport)) {
- geni_icc_set_tag(&port->se, 0x7);
+ geni_icc_set_tag(&port->se, QCOM_ICC_TAG_ALWAYS);
geni_icc_set_bw(&port->se);
ret = uart_resume_port(private_data->drv, uport);
/*
diff --git a/drivers/tty/serial/rp2.c b/drivers/tty/serial/rp2.c
index 749b873a5d99..de220ac8ca54 100644
--- a/drivers/tty/serial/rp2.c
+++ b/drivers/tty/serial/rp2.c
@@ -401,14 +401,14 @@ static void rp2_rx_chars(struct rp2_uart_port *up)
for (; bytes != 0; bytes--) {
u32 byte = readw(up->base + RP2_DATA_BYTE) | RP2_DUMMY_READ;
- char ch = byte & 0xff;
+ u8 ch = byte & 0xff;
if (likely(!(byte & RP2_DATA_BYTE_EXCEPTION_MASK))) {
if (!uart_handle_sysrq_char(&up->port, ch))
uart_insert_char(&up->port, byte, 0, ch,
TTY_NORMAL);
} else {
- char flag = TTY_NORMAL;
+ u8 flag = TTY_NORMAL;
if (byte & RP2_DATA_BYTE_BREAK_m)
flag = TTY_BREAK;
diff --git a/drivers/tty/serial/sa1100.c b/drivers/tty/serial/sa1100.c
index 55107bbc00ce..ad011f1e2f4d 100644
--- a/drivers/tty/serial/sa1100.c
+++ b/drivers/tty/serial/sa1100.c
@@ -180,7 +180,8 @@ static void sa1100_enable_ms(struct uart_port *port)
static void
sa1100_rx_chars(struct sa1100_port *sport)
{
- unsigned int status, ch, flg;
+ unsigned int status;
+ u8 ch, flg;
status = UTSR1_TO_SM(UART_GET_UTSR1(sport)) |
UTSR0_TO_SM(UART_GET_UTSR0(sport));
diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c
index b29e9dfd81a6..07fb8a9dac63 100644
--- a/drivers/tty/serial/samsung_tty.c
+++ b/drivers/tty/serial/samsung_tty.c
@@ -759,9 +759,10 @@ finish:
static void s3c24xx_serial_rx_drain_fifo(struct s3c24xx_uart_port *ourport)
{
struct uart_port *port = &ourport->port;
- unsigned int ufcon, ch, flag, ufstat, uerstat;
+ unsigned int ufcon, ufstat, uerstat;
unsigned int fifocnt = 0;
int max_count = port->fifosize;
+ u8 ch, flag;
while (max_count-- > 0) {
/*
@@ -2273,9 +2274,8 @@ static int s3c24xx_serial_resume_noirq(struct device *dev)
}
static const struct dev_pm_ops s3c24xx_serial_pm_ops = {
- .suspend = s3c24xx_serial_suspend,
- .resume = s3c24xx_serial_resume,
- .resume_noirq = s3c24xx_serial_resume_noirq,
+ SET_SYSTEM_SLEEP_PM_OPS(s3c24xx_serial_suspend, s3c24xx_serial_resume)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, s3c24xx_serial_resume_noirq)
};
#define SERIAL_SAMSUNG_PM_OPS (&s3c24xx_serial_pm_ops)
diff --git a/drivers/tty/serial/sb1250-duart.c b/drivers/tty/serial/sb1250-duart.c
index b6de0dc51f29..f3cd69346482 100644
--- a/drivers/tty/serial/sb1250-duart.c
+++ b/drivers/tty/serial/sb1250-duart.c
@@ -331,8 +331,9 @@ static void sbd_receive_chars(struct sbd_port *sport)
{
struct uart_port *uport = &sport->port;
struct uart_icount *icount;
- unsigned int status, ch, flag;
+ unsigned int status;
int count;
+ u8 ch, flag;
for (count = 16; count; count--) {
status = read_sbdchn(sport, R_DUART_STATUS);
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 2e7e7c409cf2..f61d98e09dc3 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -236,7 +236,8 @@
/* IOControl register bits (Only 750/760) */
#define SC16IS7XX_IOCONTROL_LATCH_BIT (1 << 0) /* Enable input latching */
-#define SC16IS7XX_IOCONTROL_MODEM_BIT (1 << 1) /* Enable GPIO[7:4] as modem pins */
+#define SC16IS7XX_IOCONTROL_MODEM_A_BIT (1 << 1) /* Enable GPIO[7:4] as modem A pins */
+#define SC16IS7XX_IOCONTROL_MODEM_B_BIT (1 << 2) /* Enable GPIO[3:0] as modem B pins */
#define SC16IS7XX_IOCONTROL_SRESET_BIT (1 << 3) /* Software Reset */
/* EFCR register bits */
@@ -301,12 +302,12 @@
/* Misc definitions */
#define SC16IS7XX_FIFO_SIZE (64)
#define SC16IS7XX_REG_SHIFT 2
+#define SC16IS7XX_GPIOS_PER_BANK 4
struct sc16is7xx_devtype {
char name[10];
int nr_gpio;
int nr_uart;
- int has_mctrl;
};
#define SC16IS7XX_RECONF_MD (1 << 0)
@@ -336,7 +337,9 @@ struct sc16is7xx_port {
struct clk *clk;
#ifdef CONFIG_GPIOLIB
struct gpio_chip gpio;
+ unsigned long gpio_valid_mask;
#endif
+ u8 mctrl_mask;
unsigned char buf[SC16IS7XX_FIFO_SIZE];
struct kthread_worker kworker;
struct task_struct *kworker_task;
@@ -447,35 +450,30 @@ static const struct sc16is7xx_devtype sc16is74x_devtype = {
.name = "SC16IS74X",
.nr_gpio = 0,
.nr_uart = 1,
- .has_mctrl = 0,
};
static const struct sc16is7xx_devtype sc16is750_devtype = {
.name = "SC16IS750",
- .nr_gpio = 4,
+ .nr_gpio = 8,
.nr_uart = 1,
- .has_mctrl = 1,
};
static const struct sc16is7xx_devtype sc16is752_devtype = {
.name = "SC16IS752",
- .nr_gpio = 0,
+ .nr_gpio = 8,
.nr_uart = 2,
- .has_mctrl = 1,
};
static const struct sc16is7xx_devtype sc16is760_devtype = {
.name = "SC16IS760",
- .nr_gpio = 4,
+ .nr_gpio = 8,
.nr_uart = 1,
- .has_mctrl = 1,
};
static const struct sc16is7xx_devtype sc16is762_devtype = {
.name = "SC16IS762",
- .nr_gpio = 0,
+ .nr_gpio = 8,
.nr_uart = 2,
- .has_mctrl = 1,
};
static bool sc16is7xx_regmap_volatile(struct device *dev, unsigned int reg)
@@ -488,6 +486,7 @@ static bool sc16is7xx_regmap_volatile(struct device *dev, unsigned int reg)
case SC16IS7XX_TXLVL_REG:
case SC16IS7XX_RXLVL_REG:
case SC16IS7XX_IOSTATE_REG:
+ case SC16IS7XX_IOCONTROL_REG:
return true;
default:
break;
@@ -578,8 +577,9 @@ static void sc16is7xx_handle_rx(struct uart_port *port, unsigned int rxlen,
unsigned int iir)
{
struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
- unsigned int lsr = 0, ch, flag, bytes_read, i;
+ unsigned int lsr = 0, bytes_read, i;
bool read_lsr = (iir == SC16IS7XX_IIR_RLSE_SRC) ? true : false;
+ u8 ch, flag;
if (unlikely(rxlen >= sizeof(s->buf))) {
dev_warn_ratelimited(port->dev,
@@ -1342,14 +1342,113 @@ static int sc16is7xx_gpio_direction_output(struct gpio_chip *chip,
state |= BIT(offset);
else
state &= ~BIT(offset);
- sc16is7xx_port_write(port, SC16IS7XX_IOSTATE_REG, state);
+
+ /*
+ * If we write IOSTATE first, and then IODIR, the output value is not
+ * transferred to the corresponding I/O pin.
+ * The datasheet states that each register bit will be transferred to
+ * the corresponding I/O pin programmed as output when writing to
+ * IOSTATE. Therefore, configure direction first with IODIR, and then
+ * set value after with IOSTATE.
+ */
sc16is7xx_port_update(port, SC16IS7XX_IODIR_REG, BIT(offset),
BIT(offset));
+ sc16is7xx_port_write(port, SC16IS7XX_IOSTATE_REG, state);
return 0;
}
+
+static int sc16is7xx_gpio_init_valid_mask(struct gpio_chip *chip,
+ unsigned long *valid_mask,
+ unsigned int ngpios)
+{
+ struct sc16is7xx_port *s = gpiochip_get_data(chip);
+
+ *valid_mask = s->gpio_valid_mask;
+
+ return 0;
+}
+
+static int sc16is7xx_setup_gpio_chip(struct sc16is7xx_port *s)
+{
+ struct device *dev = s->p[0].port.dev;
+
+ if (!s->devtype->nr_gpio)
+ return 0;
+
+ switch (s->mctrl_mask) {
+ case 0:
+ s->gpio_valid_mask = GENMASK(7, 0);
+ break;
+ case SC16IS7XX_IOCONTROL_MODEM_A_BIT:
+ s->gpio_valid_mask = GENMASK(3, 0);
+ break;
+ case SC16IS7XX_IOCONTROL_MODEM_B_BIT:
+ s->gpio_valid_mask = GENMASK(7, 4);
+ break;
+ default:
+ break;
+ }
+
+ if (s->gpio_valid_mask == 0)
+ return 0;
+
+ s->gpio.owner = THIS_MODULE;
+ s->gpio.parent = dev;
+ s->gpio.label = dev_name(dev);
+ s->gpio.init_valid_mask = sc16is7xx_gpio_init_valid_mask;
+ s->gpio.direction_input = sc16is7xx_gpio_direction_input;
+ s->gpio.get = sc16is7xx_gpio_get;
+ s->gpio.direction_output = sc16is7xx_gpio_direction_output;
+ s->gpio.set = sc16is7xx_gpio_set;
+ s->gpio.base = -1;
+ s->gpio.ngpio = s->devtype->nr_gpio;
+ s->gpio.can_sleep = 1;
+
+ return gpiochip_add_data(&s->gpio, s);
+}
#endif
+/*
+ * Configure ports designated to operate as modem control lines.
+ */
+static int sc16is7xx_setup_mctrl_ports(struct sc16is7xx_port *s)
+{
+ int i;
+ int ret;
+ int count;
+ u32 mctrl_port[2];
+ struct device *dev = s->p[0].port.dev;
+
+ count = device_property_count_u32(dev, "nxp,modem-control-line-ports");
+ if (count < 0 || count > ARRAY_SIZE(mctrl_port))
+ return 0;
+
+ ret = device_property_read_u32_array(dev, "nxp,modem-control-line-ports",
+ mctrl_port, count);
+ if (ret)
+ return ret;
+
+ s->mctrl_mask = 0;
+
+ for (i = 0; i < count; i++) {
+ /* Use GPIO lines as modem control lines */
+ if (mctrl_port[i] == 0)
+ s->mctrl_mask |= SC16IS7XX_IOCONTROL_MODEM_A_BIT;
+ else if (mctrl_port[i] == 1)
+ s->mctrl_mask |= SC16IS7XX_IOCONTROL_MODEM_B_BIT;
+ }
+
+ if (s->mctrl_mask)
+ regmap_update_bits(
+ s->regmap,
+ SC16IS7XX_IOCONTROL_REG << SC16IS7XX_REG_SHIFT,
+ SC16IS7XX_IOCONTROL_MODEM_A_BIT |
+ SC16IS7XX_IOCONTROL_MODEM_B_BIT, s->mctrl_mask);
+
+ return 0;
+}
+
static const struct serial_rs485 sc16is7xx_rs485_supported = {
.flags = SER_RS485_ENABLED | SER_RS485_RTS_AFTER_SEND,
.delay_rts_before_send = 1,
@@ -1436,6 +1535,12 @@ static int sc16is7xx_probe(struct device *dev,
s->p[i].port.fifosize = SC16IS7XX_FIFO_SIZE;
s->p[i].port.flags = UPF_FIXED_TYPE | UPF_LOW_LATENCY;
s->p[i].port.iobase = i;
+ /*
+ * Use all ones as membase to make sure uart_configure_port() in
+ * serial_core.c does not abort for SPI/I2C devices where the
+ * membase address is not applicable.
+ */
+ s->p[i].port.membase = (void __iomem *)~0;
s->p[i].port.iotype = UPIO_PORT;
s->p[i].port.uartclk = freq;
s->p[i].port.rs485_config = sc16is7xx_config_rs485;
@@ -1449,6 +1554,10 @@ static int sc16is7xx_probe(struct device *dev,
goto out_ports;
}
+ ret = uart_get_rs485_mode(&s->p[i].port);
+ if (ret)
+ goto out_ports;
+
/* Disable all interrupts */
sc16is7xx_port_write(&s->p[i].port, SC16IS7XX_IER_REG, 0);
/* Disable TX/RX */
@@ -1456,12 +1565,6 @@ static int sc16is7xx_probe(struct device *dev,
SC16IS7XX_EFCR_RXDISABLE_BIT |
SC16IS7XX_EFCR_TXDISABLE_BIT);
- /* Use GPIO lines as modem status registers */
- if (devtype->has_mctrl)
- sc16is7xx_port_write(&s->p[i].port,
- SC16IS7XX_IOCONTROL_REG,
- SC16IS7XX_IOCONTROL_MODEM_BIT);
-
/* Initialize kthread work structs */
kthread_init_work(&s->p[i].tx_work, sc16is7xx_tx_proc);
kthread_init_work(&s->p[i].reg_work, sc16is7xx_reg_proc);
@@ -1499,23 +1602,14 @@ static int sc16is7xx_probe(struct device *dev,
s->p[u].irda_mode = true;
}
+ ret = sc16is7xx_setup_mctrl_ports(s);
+ if (ret)
+ goto out_ports;
+
#ifdef CONFIG_GPIOLIB
- if (devtype->nr_gpio) {
- /* Setup GPIO cotroller */
- s->gpio.owner = THIS_MODULE;
- s->gpio.parent = dev;
- s->gpio.label = dev_name(dev);
- s->gpio.direction_input = sc16is7xx_gpio_direction_input;
- s->gpio.get = sc16is7xx_gpio_get;
- s->gpio.direction_output = sc16is7xx_gpio_direction_output;
- s->gpio.set = sc16is7xx_gpio_set;
- s->gpio.base = -1;
- s->gpio.ngpio = devtype->nr_gpio;
- s->gpio.can_sleep = 1;
- ret = gpiochip_add_data(&s->gpio, s);
- if (ret)
- goto out_thread;
- }
+ ret = sc16is7xx_setup_gpio_chip(s);
+ if (ret)
+ goto out_ports;
#endif
/*
@@ -1538,10 +1632,8 @@ static int sc16is7xx_probe(struct device *dev,
return 0;
#ifdef CONFIG_GPIOLIB
- if (devtype->nr_gpio)
+ if (s->gpio_valid_mask)
gpiochip_remove(&s->gpio);
-
-out_thread:
#endif
out_ports:
@@ -1564,7 +1656,7 @@ static void sc16is7xx_remove(struct device *dev)
int i;
#ifdef CONFIG_GPIOLIB
- if (s->devtype->nr_gpio)
+ if (s->gpio_valid_mask)
gpiochip_remove(&s->gpio);
#endif
diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c
index 4f2fc5f7bb19..2be2c1098025 100644
--- a/drivers/tty/serial/sccnxp.c
+++ b/drivers/tty/serial/sccnxp.c
@@ -383,8 +383,7 @@ static void sccnxp_set_bit(struct uart_port *port, int sig, int state)
static void sccnxp_handle_rx(struct uart_port *port)
{
- u8 sr;
- unsigned int ch, flag;
+ u8 sr, ch, flag;
for (;;) {
sr = sccnxp_port_read(port, SCCNXP_SR_REG);
@@ -880,14 +879,14 @@ MODULE_DEVICE_TABLE(platform, sccnxp_id_table);
static int sccnxp_probe(struct platform_device *pdev)
{
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct sccnxp_pdata *pdata = dev_get_platdata(&pdev->dev);
+ struct resource *res;
int i, ret, uartclk;
struct sccnxp_port *s;
void __iomem *membase;
struct clk *clk;
- membase = devm_ioremap_resource(&pdev->dev, res);
+ membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(membase))
return PTR_ERR(membase);
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index 1cf08b33456c..d4ec943cb8e9 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -20,7 +20,6 @@
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/pagemap.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
@@ -434,10 +433,10 @@ static int tegra_set_baudrate(struct tegra_uart_port *tup, unsigned int baud)
return 0;
}
-static char tegra_uart_decode_rx_error(struct tegra_uart_port *tup,
+static u8 tegra_uart_decode_rx_error(struct tegra_uart_port *tup,
unsigned long lsr)
{
- char flag = TTY_NORMAL;
+ u8 flag = TTY_NORMAL;
if (unlikely(lsr & TEGRA_UART_LSR_ANY)) {
if (lsr & UART_LSR_OE) {
@@ -642,9 +641,8 @@ static void tegra_uart_handle_rx_pio(struct tegra_uart_port *tup,
struct tty_port *port)
{
do {
- char flag = TTY_NORMAL;
unsigned long lsr = 0;
- unsigned char ch;
+ u8 ch, flag = TTY_NORMAL;
lsr = tegra_uart_read(tup, UART_LSR);
if (!(lsr & UART_LSR_DR))
@@ -998,7 +996,11 @@ static int tegra_uart_hw_init(struct tegra_uart_port *tup)
tup->ier_shadow = 0;
tup->current_baud = 0;
- clk_prepare_enable(tup->uart_clk);
+ ret = clk_prepare_enable(tup->uart_clk);
+ if (ret) {
+ dev_err(tup->uport.dev, "could not enable clk\n");
+ return ret;
+ }
/* Reset the UART controller to clear all previous status.*/
reset_control_assert(tup->rst);
@@ -1579,22 +1581,15 @@ static int tegra_uart_probe(struct platform_device *pdev)
tup->cdata = cdata;
platform_set_drvdata(pdev, tup);
- resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!resource) {
- dev_err(&pdev->dev, "No IO memory resource\n");
- return -ENODEV;
- }
- u->mapbase = resource->start;
- u->membase = devm_ioremap_resource(&pdev->dev, resource);
+ u->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &resource);
if (IS_ERR(u->membase))
return PTR_ERR(u->membase);
+ u->mapbase = resource->start;
tup->uart_clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(tup->uart_clk)) {
- dev_err(&pdev->dev, "Couldn't get the clock\n");
- return PTR_ERR(tup->uart_clk);
- }
+ if (IS_ERR(tup->uart_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(tup->uart_clk), "Couldn't get the clock");
tup->rst = devm_reset_control_get_exclusive(&pdev->dev, "serial");
if (IS_ERR(tup->rst)) {
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 831d033611e6..7bdc21d5e13b 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -133,9 +133,8 @@ static void uart_stop(struct tty_struct *tty)
uart_port_unlock(port, flags);
}
-static void __uart_start(struct tty_struct *tty)
+static void __uart_start(struct uart_state *state)
{
- struct uart_state *state = tty->driver_data;
struct uart_port *port = state->uart_port;
struct serial_port_device *port_dev;
int err;
@@ -170,7 +169,7 @@ static void uart_start(struct tty_struct *tty)
unsigned long flags;
port = uart_port_lock(state, flags);
- __uart_start(tty);
+ __uart_start(state);
uart_port_unlock(port, flags);
}
@@ -239,7 +238,7 @@ static void uart_change_line_settings(struct tty_struct *tty, struct uart_state
if (!old_hw_stopped)
uport->ops->stop_tx(uport);
else
- __uart_start(tty);
+ __uart_start(state);
}
spin_unlock_irq(&uport->lock);
}
@@ -552,7 +551,7 @@ uart_get_divisor(struct uart_port *port, unsigned int baud)
}
EXPORT_SYMBOL(uart_get_divisor);
-static int uart_put_char(struct tty_struct *tty, unsigned char c)
+static int uart_put_char(struct tty_struct *tty, u8 c)
{
struct uart_state *state = tty->driver_data;
struct uart_port *port;
@@ -581,8 +580,7 @@ static void uart_flush_chars(struct tty_struct *tty)
uart_start(tty);
}
-static int uart_write(struct tty_struct *tty,
- const unsigned char *buf, int count)
+static ssize_t uart_write(struct tty_struct *tty, const u8 *buf, size_t count)
{
struct uart_state *state = tty->driver_data;
struct uart_port *port;
@@ -594,10 +592,8 @@ static int uart_write(struct tty_struct *tty,
* This means you called this function _after_ the port was
* closed. No cookie for you.
*/
- if (!state) {
- WARN_ON(1);
+ if (WARN_ON(!state))
return -EL3HLT;
- }
port = uart_port_lock(state, flags);
circ = &state->xmit;
@@ -619,7 +615,7 @@ static int uart_write(struct tty_struct *tty,
ret += c;
}
- __uart_start(tty);
+ __uart_start(state);
uart_port_unlock(port, flags);
return ret;
}
@@ -660,10 +656,8 @@ static void uart_flush_buffer(struct tty_struct *tty)
* This means you called this function _after_ the port was
* closed. No cookie for you.
*/
- if (!state) {
- WARN_ON(1);
+ if (WARN_ON(!state))
return;
- }
pr_debug("uart_flush_buffer(%d) called\n", tty->index);
@@ -3486,7 +3480,7 @@ EXPORT_SYMBOL_GPL(uart_handle_cts_change);
* @flag: flag for the character (see TTY_NORMAL and friends)
*/
void uart_insert_char(struct uart_port *port, unsigned int status,
- unsigned int overrun, unsigned int ch, unsigned int flag)
+ unsigned int overrun, u8 ch, u8 flag)
{
struct tty_port *tport = &port->state->port;
@@ -3505,7 +3499,7 @@ void uart_insert_char(struct uart_port *port, unsigned int status,
EXPORT_SYMBOL_GPL(uart_insert_char);
#ifdef CONFIG_MAGIC_SYSRQ_SERIAL
-static const char sysrq_toggle_seq[] = CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE;
+static const u8 sysrq_toggle_seq[] = CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE;
static void uart_sysrq_on(struct work_struct *w)
{
@@ -3528,7 +3522,7 @@ static DECLARE_WORK(sysrq_enable_work, uart_sysrq_on);
* Returns: %false if @ch is out of enabling sequence and should be
* handled some other way, %true if @ch was consumed.
*/
-bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch)
+bool uart_try_toggle_sysrq(struct uart_port *port, u8 ch)
{
int sysrq_toggle_seq_len = strlen(sysrq_toggle_seq);
diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
index eab387b01e36..be08fb6f749c 100644
--- a/drivers/tty/serial/serial_txx9.c
+++ b/drivers/tty/serial/serial_txx9.c
@@ -246,11 +246,10 @@ static void serial_txx9_initialize(struct uart_port *up)
static inline void
receive_chars(struct uart_port *up, unsigned int *status)
{
- unsigned char ch;
unsigned int disr = *status;
int max_count = 256;
- char flag;
unsigned int next_ignore_status_mask;
+ u8 ch, flag;
do {
ch = sio_in(up, TXX9_SIRFIFO);
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 8b7a42e05d6d..a560b729fa3b 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -35,7 +35,6 @@
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c
index a19db49327e2..d195c5de52e7 100644
--- a/drivers/tty/serial/sifive.c
+++ b/drivers/tty/serial/sifive.c
@@ -402,9 +402,9 @@ static char __ssp_receive_char(struct sifive_serial_port *ssp, char *is_empty)
*/
static void __ssp_receive_chars(struct sifive_serial_port *ssp)
{
- unsigned char ch;
char is_empty;
int c;
+ u8 ch;
for (c = SIFIVE_RX_FIFO_DEPTH; c > 0; --c) {
ch = __ssp_receive_char(ssp, &is_empty);
@@ -917,12 +917,9 @@ static int sifive_serial_probe(struct platform_device *pdev)
if (irq < 0)
return -EPROBE_DEFER;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, mem);
- if (IS_ERR(base)) {
- dev_err(&pdev->dev, "could not acquire device memory\n");
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
+ if (IS_ERR(base))
return PTR_ERR(base);
- }
clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(clk)) {
@@ -1022,6 +1019,23 @@ static int sifive_serial_remove(struct platform_device *dev)
return 0;
}
+static int sifive_serial_suspend(struct device *dev)
+{
+ struct sifive_serial_port *ssp = dev_get_drvdata(dev);
+
+ return uart_suspend_port(&sifive_serial_uart_driver, &ssp->port);
+}
+
+static int sifive_serial_resume(struct device *dev)
+{
+ struct sifive_serial_port *ssp = dev_get_drvdata(dev);
+
+ return uart_resume_port(&sifive_serial_uart_driver, &ssp->port);
+}
+
+DEFINE_SIMPLE_DEV_PM_OPS(sifive_uart_pm_ops, sifive_serial_suspend,
+ sifive_serial_resume);
+
static const struct of_device_id sifive_serial_of_match[] = {
{ .compatible = "sifive,fu540-c000-uart0" },
{ .compatible = "sifive,uart0" },
@@ -1034,7 +1048,8 @@ static struct platform_driver sifive_serial_platform_driver = {
.remove = sifive_serial_remove,
.driver = {
.name = SIFIVE_SERIAL_NAME,
- .of_match_table = of_match_ptr(sifive_serial_of_match),
+ .pm = pm_sleep_ptr(&sifive_uart_pm_ops),
+ .of_match_table = sifive_serial_of_match,
},
};
diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
index b58f51296ace..f328fa57231f 100644
--- a/drivers/tty/serial/sprd_serial.c
+++ b/drivers/tty/serial/sprd_serial.c
@@ -364,7 +364,7 @@ static void sprd_rx_free_buf(struct sprd_uart_port *sp)
if (sp->rx_dma.virt)
dma_free_coherent(sp->port.dev, SPRD_UART_RX_SIZE,
sp->rx_dma.virt, sp->rx_dma.phys_addr);
-
+ sp->rx_dma.virt = NULL;
}
static int sprd_rx_dma_config(struct uart_port *port, u32 burst)
@@ -558,7 +558,7 @@ static void sprd_break_ctl(struct uart_port *port, int break_state)
}
static int handle_lsr_errors(struct uart_port *port,
- unsigned int *flag,
+ u8 *flag,
unsigned int *lsr)
{
int ret = 0;
@@ -594,7 +594,8 @@ static inline void sprd_rx(struct uart_port *port)
struct sprd_uart_port *sp = container_of(port, struct sprd_uart_port,
port);
struct tty_port *tty = &port->state->port;
- unsigned int ch, flag, lsr, max_count = SPRD_TIMEOUT;
+ unsigned int lsr, max_count = SPRD_TIMEOUT;
+ u8 ch, flag;
if (sp->rx_dma.enable) {
sprd_uart_dma_irq(port);
@@ -1106,7 +1107,7 @@ static bool sprd_uart_is_console(struct uart_port *uport)
static int sprd_clk_init(struct uart_port *uport)
{
struct clk *clk_uart, *clk_parent;
- struct sprd_uart_port *u = sprd_port[uport->line];
+ struct sprd_uart_port *u = container_of(uport, struct sprd_uart_port, port);
clk_uart = devm_clk_get(uport->dev, "uart");
if (IS_ERR(clk_uart)) {
@@ -1149,22 +1150,22 @@ static int sprd_probe(struct platform_device *pdev)
{
struct resource *res;
struct uart_port *up;
+ struct sprd_uart_port *sport;
int irq;
int index;
int ret;
index = of_alias_get_id(pdev->dev.of_node, "serial");
- if (index < 0 || index >= ARRAY_SIZE(sprd_port)) {
+ if (index < 0 || index >= UART_NR_MAX) {
dev_err(&pdev->dev, "got a wrong serial alias id %d\n", index);
return -EINVAL;
}
- sprd_port[index] = devm_kzalloc(&pdev->dev, sizeof(*sprd_port[index]),
- GFP_KERNEL);
- if (!sprd_port[index])
+ sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL);
+ if (!sport)
return -ENOMEM;
- up = &sprd_port[index]->port;
+ up = &sport->port;
up->dev = &pdev->dev;
up->line = index;
up->type = PORT_SPRD;
@@ -1179,8 +1180,7 @@ static int sprd_probe(struct platform_device *pdev)
if (ret)
return ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- up->membase = devm_ioremap_resource(&pdev->dev, res);
+ up->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(up->membase))
return PTR_ERR(up->membase);
@@ -1195,7 +1195,7 @@ static int sprd_probe(struct platform_device *pdev)
* Allocate one dma buffer to prepare for receive transfer, in case
* memory allocation failure at runtime.
*/
- ret = sprd_rx_alloc_buf(sprd_port[index]);
+ ret = sprd_rx_alloc_buf(sport);
if (ret)
return ret;
@@ -1203,17 +1203,27 @@ static int sprd_probe(struct platform_device *pdev)
ret = uart_register_driver(&sprd_uart_driver);
if (ret < 0) {
pr_err("Failed to register SPRD-UART driver\n");
- return ret;
+ goto free_rx_buf;
}
}
+
sprd_ports_num++;
+ sprd_port[index] = sport;
ret = uart_add_one_port(&sprd_uart_driver, up);
if (ret)
- sprd_remove(pdev);
+ goto clean_port;
platform_set_drvdata(pdev, up);
+ return 0;
+
+clean_port:
+ sprd_port[index] = NULL;
+ if (--sprd_ports_num == 0)
+ uart_unregister_driver(&sprd_uart_driver);
+free_rx_buf:
+ sprd_rx_free_buf(sport);
return ret;
}
diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
index aa471c9c24d9..92b9f6894006 100644
--- a/drivers/tty/serial/st-asc.c
+++ b/drivers/tty/serial/st-asc.c
@@ -250,7 +250,7 @@ static void asc_receive_chars(struct uart_port *port)
struct tty_port *tport = &port->state->port;
unsigned long status, mode;
unsigned long c = 0;
- char flag;
+ u8 flag;
bool ignore_pe = false;
/*
@@ -691,8 +691,7 @@ static int asc_init_port(struct asc_port *ascport,
port->irq = platform_get_irq(pdev, 0);
port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_ST_ASC_CONSOLE);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- port->membase = devm_ioremap_resource(&pdev->dev, res);
+ port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(port->membase))
return PTR_ERR(port->membase);
port->mapbase = res->start;
@@ -704,7 +703,9 @@ static int asc_init_port(struct asc_port *ascport,
if (WARN_ON(IS_ERR(ascport->clk)))
return -EINVAL;
/* ensure that clk rate is correct by enabling the clk */
- clk_prepare_enable(ascport->clk);
+ ret = clk_prepare_enable(ascport->clk);
+ if (ret)
+ return ret;
ascport->port.uartclk = clk_get_rate(ascport->clk);
WARN_ON(ascport->port.uartclk == 0);
clk_disable_unprepare(ascport->clk);
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index e9e11a259621..5e9cf0c48813 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -289,15 +289,57 @@ static int stm32_usart_init_rs485(struct uart_port *port,
return uart_get_rs485_mode(port);
}
-static bool stm32_usart_rx_dma_enabled(struct uart_port *port)
+static bool stm32_usart_rx_dma_started(struct stm32_port *stm32_port)
{
- struct stm32_port *stm32_port = to_stm32_port(port);
- const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ return stm32_port->rx_ch ? stm32_port->rx_dma_busy : false;
+}
+
+static void stm32_usart_rx_dma_terminate(struct stm32_port *stm32_port)
+{
+ dmaengine_terminate_async(stm32_port->rx_ch);
+ stm32_port->rx_dma_busy = false;
+}
+
+static int stm32_usart_dma_pause_resume(struct stm32_port *stm32_port,
+ struct dma_chan *chan,
+ enum dma_status expected_status,
+ int dmaengine_pause_or_resume(struct dma_chan *),
+ bool stm32_usart_xx_dma_started(struct stm32_port *),
+ void stm32_usart_xx_dma_terminate(struct stm32_port *))
+{
+ struct uart_port *port = &stm32_port->port;
+ enum dma_status dma_status;
+ int ret;
+
+ if (!stm32_usart_xx_dma_started(stm32_port))
+ return -EPERM;
+
+ dma_status = dmaengine_tx_status(chan, chan->cookie, NULL);
+ if (dma_status != expected_status)
+ return -EAGAIN;
- if (!stm32_port->rx_ch)
- return false;
+ ret = dmaengine_pause_or_resume(chan);
+ if (ret) {
+ dev_err(port->dev, "DMA failed with error code: %d\n", ret);
+ stm32_usart_xx_dma_terminate(stm32_port);
+ }
+ return ret;
+}
- return !!(readl_relaxed(port->membase + ofs->cr3) & USART_CR3_DMAR);
+static int stm32_usart_rx_dma_pause(struct stm32_port *stm32_port)
+{
+ return stm32_usart_dma_pause_resume(stm32_port, stm32_port->rx_ch,
+ DMA_IN_PROGRESS, dmaengine_pause,
+ stm32_usart_rx_dma_started,
+ stm32_usart_rx_dma_terminate);
+}
+
+static int stm32_usart_rx_dma_resume(struct stm32_port *stm32_port)
+{
+ return stm32_usart_dma_pause_resume(stm32_port, stm32_port->rx_ch,
+ DMA_PAUSED, dmaengine_resume,
+ stm32_usart_rx_dma_started,
+ stm32_usart_rx_dma_terminate);
}
/* Return true when data is pending (in pio mode), and false when no data is pending. */
@@ -310,7 +352,7 @@ static bool stm32_usart_pending_rx_pio(struct uart_port *port, u32 *sr)
/* Get pending characters in RDR or FIFO */
if (*sr & USART_SR_RXNE) {
/* Get all pending characters from the RDR or the FIFO when using interrupts */
- if (!stm32_usart_rx_dma_enabled(port))
+ if (!stm32_usart_rx_dma_started(stm32_port))
return true;
/* Handle only RX data errors when using DMA */
@@ -321,7 +363,7 @@ static bool stm32_usart_pending_rx_pio(struct uart_port *port, u32 *sr)
return false;
}
-static unsigned long stm32_usart_get_char_pio(struct uart_port *port)
+static u8 stm32_usart_get_char_pio(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
@@ -338,10 +380,9 @@ static unsigned int stm32_usart_receive_chars_pio(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
- unsigned long c;
unsigned int size = 0;
u32 sr;
- char flag;
+ u8 c, flag;
while (stm32_usart_pending_rx_pio(port, &sr)) {
sr |= USART_SR_DUMMY_RX;
@@ -456,11 +497,12 @@ static unsigned int stm32_usart_receive_chars(struct uart_port *port, bool force
u32 sr;
unsigned int size = 0;
- if (stm32_usart_rx_dma_enabled(port) || force_dma_flush) {
+ if (stm32_usart_rx_dma_started(stm32_port) || force_dma_flush) {
rx_dma_status = dmaengine_tx_status(stm32_port->rx_ch,
stm32_port->rx_ch->cookie,
&stm32_port->rx_dma_state);
- if (rx_dma_status == DMA_IN_PROGRESS) {
+ if (rx_dma_status == DMA_IN_PROGRESS ||
+ rx_dma_status == DMA_PAUSED) {
/* Empty DMA buffer */
size = stm32_usart_receive_chars_dma(port);
sr = readl_relaxed(port->membase + ofs->isr);
@@ -476,8 +518,7 @@ static unsigned int stm32_usart_receive_chars(struct uart_port *port, bool force
}
} else {
/* Disable RX DMA */
- dmaengine_terminate_async(stm32_port->rx_ch);
- stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
+ stm32_usart_rx_dma_terminate(stm32_port);
/* Fall back to interrupt mode */
dev_dbg(port->dev, "DMA error, fallback to irq mode\n");
size = stm32_usart_receive_chars_pio(port);
@@ -489,6 +530,76 @@ static unsigned int stm32_usart_receive_chars(struct uart_port *port, bool force
return size;
}
+static void stm32_usart_rx_dma_complete(void *arg)
+{
+ struct uart_port *port = arg;
+ struct tty_port *tport = &port->state->port;
+ unsigned int size;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+ size = stm32_usart_receive_chars(port, false);
+ uart_unlock_and_check_sysrq_irqrestore(port, flags);
+ if (size)
+ tty_flip_buffer_push(tport);
+}
+
+static int stm32_usart_rx_dma_start_or_resume(struct uart_port *port)
+{
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ struct dma_async_tx_descriptor *desc;
+ enum dma_status rx_dma_status;
+ int ret;
+
+ if (stm32_port->throttled)
+ return 0;
+
+ if (stm32_port->rx_dma_busy) {
+ rx_dma_status = dmaengine_tx_status(stm32_port->rx_ch,
+ stm32_port->rx_ch->cookie,
+ NULL);
+ if (rx_dma_status == DMA_IN_PROGRESS)
+ return 0;
+
+ if (rx_dma_status == DMA_PAUSED && !stm32_usart_rx_dma_resume(stm32_port))
+ return 0;
+
+ dev_err(port->dev, "DMA failed : status error.\n");
+ stm32_usart_rx_dma_terminate(stm32_port);
+ }
+
+ stm32_port->rx_dma_busy = true;
+
+ stm32_port->last_res = RX_BUF_L;
+ /* Prepare a DMA cyclic transaction */
+ desc = dmaengine_prep_dma_cyclic(stm32_port->rx_ch,
+ stm32_port->rx_dma_buf,
+ RX_BUF_L, RX_BUF_P,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(port->dev, "rx dma prep cyclic failed\n");
+ stm32_port->rx_dma_busy = false;
+ return -ENODEV;
+ }
+
+ desc->callback = stm32_usart_rx_dma_complete;
+ desc->callback_param = port;
+
+ /* Push current DMA transaction in the pending queue */
+ ret = dma_submit_error(dmaengine_submit(desc));
+ if (ret) {
+ dmaengine_terminate_sync(stm32_port->rx_ch);
+ stm32_port->rx_dma_busy = false;
+ return ret;
+ }
+
+ /* Issue pending DMA requests */
+ dma_async_issue_pending(stm32_port->rx_ch);
+
+ return 0;
+}
+
static void stm32_usart_tx_dma_terminate(struct stm32_port *stm32_port)
{
dmaengine_terminate_async(stm32_port->tx_ch);
@@ -507,21 +618,28 @@ static bool stm32_usart_tx_dma_started(struct stm32_port *stm32_port)
return stm32_port->tx_dma_busy;
}
-static bool stm32_usart_tx_dma_enabled(struct stm32_port *stm32_port)
+static int stm32_usart_tx_dma_pause(struct stm32_port *stm32_port)
{
- const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ return stm32_usart_dma_pause_resume(stm32_port, stm32_port->tx_ch,
+ DMA_IN_PROGRESS, dmaengine_pause,
+ stm32_usart_tx_dma_started,
+ stm32_usart_tx_dma_terminate);
+}
- return !!(readl_relaxed(stm32_port->port.membase + ofs->cr3) & USART_CR3_DMAT);
+static int stm32_usart_tx_dma_resume(struct stm32_port *stm32_port)
+{
+ return stm32_usart_dma_pause_resume(stm32_port, stm32_port->tx_ch,
+ DMA_PAUSED, dmaengine_resume,
+ stm32_usart_tx_dma_started,
+ stm32_usart_tx_dma_terminate);
}
static void stm32_usart_tx_dma_complete(void *arg)
{
struct uart_port *port = arg;
struct stm32_port *stm32port = to_stm32_port(port);
- const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
unsigned long flags;
- stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
stm32_usart_tx_dma_terminate(stm32port);
/* Let's see if we have pending data to send */
@@ -553,20 +671,6 @@ static void stm32_usart_tc_interrupt_enable(struct uart_port *port)
stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TCIE);
}
-static void stm32_usart_rx_dma_complete(void *arg)
-{
- struct uart_port *port = arg;
- struct tty_port *tport = &port->state->port;
- unsigned int size;
- unsigned long flags;
-
- spin_lock_irqsave(&port->lock, flags);
- size = stm32_usart_receive_chars(port, false);
- uart_unlock_and_check_sysrq_irqrestore(port, flags);
- if (size)
- tty_flip_buffer_push(tport);
-}
-
static void stm32_usart_tx_interrupt_disable(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
@@ -592,9 +696,6 @@ static void stm32_usart_transmit_chars_pio(struct uart_port *port)
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
struct circ_buf *xmit = &port->state->xmit;
- if (stm32_usart_tx_dma_enabled(stm32_port))
- stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
-
while (!uart_circ_empty(xmit)) {
/* Check that TDR is empty before filling FIFO */
if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE))
@@ -613,14 +714,15 @@ static void stm32_usart_transmit_chars_pio(struct uart_port *port)
static void stm32_usart_transmit_chars_dma(struct uart_port *port)
{
struct stm32_port *stm32port = to_stm32_port(port);
- const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
struct circ_buf *xmit = &port->state->xmit;
struct dma_async_tx_descriptor *desc = NULL;
unsigned int count;
+ int ret;
if (stm32_usart_tx_dma_started(stm32port)) {
- if (!stm32_usart_tx_dma_enabled(stm32port))
- stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAT);
+ ret = stm32_usart_tx_dma_resume(stm32port);
+ if (ret < 0 && ret != -EAGAIN)
+ goto fallback_err;
return;
}
@@ -665,8 +767,10 @@ static void stm32_usart_transmit_chars_dma(struct uart_port *port)
desc->callback_param = port;
/* Push current DMA TX transaction in the pending queue */
- if (dma_submit_error(dmaengine_submit(desc))) {
- /* dma no yet started, safe to free resources */
+ /* DMA no yet started, safe to free resources */
+ ret = dma_submit_error(dmaengine_submit(desc));
+ if (ret) {
+ dev_err(port->dev, "DMA failed with error code: %d\n", ret);
stm32_usart_tx_dma_terminate(stm32port);
goto fallback_err;
}
@@ -674,8 +778,6 @@ static void stm32_usart_transmit_chars_dma(struct uart_port *port)
/* Issue pending DMA TX requests */
dma_async_issue_pending(stm32port->tx_ch);
- stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAT);
-
uart_xmit_advance(port, count);
return;
@@ -701,9 +803,8 @@ static void stm32_usart_transmit_chars(struct uart_port *port)
}
if (port->x_char) {
- if (stm32_usart_tx_dma_started(stm32_port) &&
- stm32_usart_tx_dma_enabled(stm32_port))
- stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
+ /* dma terminate may have been called in case of dma pause failure */
+ stm32_usart_tx_dma_pause(stm32_port);
/* Check that TDR is empty before filling FIFO */
ret =
@@ -717,8 +818,9 @@ static void stm32_usart_transmit_chars(struct uart_port *port)
writel_relaxed(port->x_char, port->membase + ofs->tdr);
port->x_char = 0;
port->icount.tx++;
- if (stm32_usart_tx_dma_started(stm32_port))
- stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAT);
+
+ /* dma terminate may have been called in case of dma resume failure */
+ stm32_usart_tx_dma_resume(stm32_port);
return;
}
@@ -785,8 +887,8 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
* line has been masked by HW and rx data are stacking in FIFO.
*/
if (!stm32_port->throttled) {
- if (((sr & USART_SR_RXNE) && !stm32_usart_rx_dma_enabled(port)) ||
- ((sr & USART_SR_ERR_MASK) && stm32_usart_rx_dma_enabled(port))) {
+ if (((sr & USART_SR_RXNE) && !stm32_usart_rx_dma_started(stm32_port)) ||
+ ((sr & USART_SR_ERR_MASK) && stm32_usart_rx_dma_started(stm32_port))) {
spin_lock(&port->lock);
size = stm32_usart_receive_chars(port, false);
uart_unlock_and_check_sysrq(port);
@@ -802,7 +904,7 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
}
/* Receiver timeout irq for DMA RX */
- if (stm32_usart_rx_dma_enabled(port) && !stm32_port->throttled) {
+ if (stm32_usart_rx_dma_started(stm32_port) && !stm32_port->throttled) {
spin_lock(&port->lock);
size = stm32_usart_receive_chars(port, false);
uart_unlock_and_check_sysrq(port);
@@ -851,11 +953,11 @@ static void stm32_usart_disable_ms(struct uart_port *port)
static void stm32_usart_stop_tx(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
- const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
stm32_usart_tx_interrupt_disable(port);
- if (stm32_usart_tx_dma_started(stm32_port) && stm32_usart_tx_dma_enabled(stm32_port))
- stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
+
+ /* dma terminate may have been called in case of dma pause failure */
+ stm32_usart_tx_dma_pause(stm32_port);
stm32_usart_rs485_rts_disable(port);
}
@@ -879,12 +981,9 @@ static void stm32_usart_start_tx(struct uart_port *port)
static void stm32_usart_flush_buffer(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
- const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
- if (stm32_port->tx_ch) {
+ if (stm32_port->tx_ch)
stm32_usart_tx_dma_terminate(stm32_port);
- stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
- }
}
/* Throttle the remote when input buffer is about to overflow. */
@@ -897,11 +996,10 @@ static void stm32_usart_throttle(struct uart_port *port)
spin_lock_irqsave(&port->lock, flags);
/*
- * Disable DMA request line if enabled, so the RX data gets queued into the FIFO.
+ * Pause DMA transfer, so the RX data gets queued into the FIFO.
* Hardware flow control is triggered when RX FIFO is full.
*/
- if (stm32_usart_rx_dma_enabled(port))
- stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
+ stm32_usart_rx_dma_pause(stm32_port);
stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq);
if (stm32_port->cr3_irq)
@@ -923,14 +1021,15 @@ static void stm32_usart_unthrottle(struct uart_port *port)
if (stm32_port->cr3_irq)
stm32_usart_set_bits(port, ofs->cr3, stm32_port->cr3_irq);
+ stm32_port->throttled = false;
+
/*
- * Switch back to DMA mode (re-enable DMA request line).
+ * Switch back to DMA mode (resume DMA).
* Hardware flow control is stopped when FIFO is not full any more.
*/
if (stm32_port->rx_ch)
- stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR);
+ stm32_usart_rx_dma_start_or_resume(port);
- stm32_port->throttled = false;
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -941,8 +1040,7 @@ static void stm32_usart_stop_rx(struct uart_port *port)
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
/* Disable DMA request line. */
- if (stm32_port->rx_ch)
- stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
+ stm32_usart_rx_dma_pause(stm32_port);
stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq);
if (stm32_port->cr3_irq)
@@ -954,48 +1052,6 @@ static void stm32_usart_break_ctl(struct uart_port *port, int break_state)
{
}
-static int stm32_usart_start_rx_dma_cyclic(struct uart_port *port)
-{
- struct stm32_port *stm32_port = to_stm32_port(port);
- const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
- struct dma_async_tx_descriptor *desc;
- int ret;
-
- stm32_port->last_res = RX_BUF_L;
- /* Prepare a DMA cyclic transaction */
- desc = dmaengine_prep_dma_cyclic(stm32_port->rx_ch,
- stm32_port->rx_dma_buf,
- RX_BUF_L, RX_BUF_P,
- DMA_DEV_TO_MEM,
- DMA_PREP_INTERRUPT);
- if (!desc) {
- dev_err(port->dev, "rx dma prep cyclic failed\n");
- return -ENODEV;
- }
-
- desc->callback = stm32_usart_rx_dma_complete;
- desc->callback_param = port;
-
- /* Push current DMA transaction in the pending queue */
- ret = dma_submit_error(dmaengine_submit(desc));
- if (ret) {
- dmaengine_terminate_sync(stm32_port->rx_ch);
- return ret;
- }
-
- /* Issue pending DMA requests */
- dma_async_issue_pending(stm32_port->rx_ch);
-
- /*
- * DMA request line not re-enabled at resume when port is throttled.
- * It will be re-enabled by unthrottle ops.
- */
- if (!stm32_port->throttled)
- stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR);
-
- return 0;
-}
-
static int stm32_usart_startup(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
@@ -1021,7 +1077,7 @@ static int stm32_usart_startup(struct uart_port *port)
writel_relaxed(USART_RQR_RXFRQ, port->membase + ofs->rqr);
if (stm32_port->rx_ch) {
- ret = stm32_usart_start_rx_dma_cyclic(port);
+ ret = stm32_usart_rx_dma_start_or_resume(port);
if (ret) {
free_irq(port->irq, port);
return ret;
@@ -1043,12 +1099,12 @@ static void stm32_usart_shutdown(struct uart_port *port)
u32 val, isr;
int ret;
- if (stm32_usart_tx_dma_enabled(stm32_port))
- stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
-
if (stm32_usart_tx_dma_started(stm32_port))
stm32_usart_tx_dma_terminate(stm32_port);
+ if (stm32_port->tx_ch)
+ stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
+
/* Disable modem control interrupts */
stm32_usart_disable_ms(port);
@@ -1067,8 +1123,10 @@ static void stm32_usart_shutdown(struct uart_port *port)
dev_err(port->dev, "Transmission is not complete\n");
/* Disable RX DMA. */
- if (stm32_port->rx_ch)
- dmaengine_terminate_async(stm32_port->rx_ch);
+ if (stm32_port->rx_ch) {
+ stm32_usart_rx_dma_terminate(stm32_port);
+ dmaengine_synchronize(stm32_port->rx_ch);
+ }
/* flush RX & TX FIFO */
if (ofs->rqr != UNDEF_REG)
@@ -1259,6 +1317,9 @@ static void stm32_usart_set_termios(struct uart_port *port,
cr3 |= USART_CR3_DDRE;
}
+ if (stm32_port->tx_ch)
+ cr3 |= USART_CR3_DMAT;
+
if (rs485conf->flags & SER_RS485_ENABLED) {
stm32_usart_config_reg_rs485(&cr1, &cr3,
rs485conf->delay_rts_before_send,
@@ -1765,11 +1826,6 @@ static int stm32_usart_serial_remove(struct platform_device *pdev)
pm_runtime_put_noidle(&pdev->dev);
stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_PEIE);
- cr3 = readl_relaxed(port->membase + ofs->cr3);
- cr3 &= ~USART_CR3_EIE;
- cr3 &= ~USART_CR3_DMAR;
- cr3 &= ~USART_CR3_DDRE;
- writel_relaxed(cr3, port->membase + ofs->cr3);
if (stm32_port->tx_ch) {
stm32_usart_of_dma_tx_remove(stm32_port, pdev);
@@ -1781,7 +1837,12 @@ static int stm32_usart_serial_remove(struct platform_device *pdev)
dma_release_channel(stm32_port->rx_ch);
}
- stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
+ cr3 = readl_relaxed(port->membase + ofs->cr3);
+ cr3 &= ~USART_CR3_EIE;
+ cr3 &= ~USART_CR3_DMAR;
+ cr3 &= ~USART_CR3_DMAT;
+ cr3 &= ~USART_CR3_DDRE;
+ writel_relaxed(cr3, port->membase + ofs->cr3);
if (stm32_port->wakeup_src) {
dev_pm_clear_wake_irq(&pdev->dev);
@@ -1953,7 +2014,7 @@ static int __maybe_unused stm32_usart_serial_en_wakeup(struct uart_port *port,
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
struct tty_port *tport = &port->state->port;
int ret;
- unsigned int size;
+ unsigned int size = 0;
unsigned long flags;
if (!stm32_port->wakeup_src || !tty_port_initialized(tport))
@@ -1975,11 +2036,10 @@ static int __maybe_unused stm32_usart_serial_en_wakeup(struct uart_port *port,
*/
if (stm32_port->rx_ch) {
spin_lock_irqsave(&port->lock, flags);
- /* Avoid race with RX IRQ when DMAR is cleared */
- stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
/* Poll data from DMA RX buffer if any */
- size = stm32_usart_receive_chars(port, true);
- dmaengine_terminate_async(stm32_port->rx_ch);
+ if (!stm32_usart_rx_dma_pause(stm32_port))
+ size += stm32_usart_receive_chars(port, true);
+ stm32_usart_rx_dma_terminate(stm32_port);
uart_unlock_and_check_sysrq_irqrestore(port, flags);
if (size)
tty_flip_buffer_push(tport);
@@ -1989,7 +2049,7 @@ static int __maybe_unused stm32_usart_serial_en_wakeup(struct uart_port *port,
stm32_usart_receive_chars(port, false);
} else {
if (stm32_port->rx_ch) {
- ret = stm32_usart_start_rx_dma_cyclic(port);
+ ret = stm32_usart_rx_dma_start_or_resume(port);
if (ret)
return ret;
}
diff --git a/drivers/tty/serial/stm32-usart.h b/drivers/tty/serial/stm32-usart.h
index 903285b5aea7..f59f831b2a10 100644
--- a/drivers/tty/serial/stm32-usart.h
+++ b/drivers/tty/serial/stm32-usart.h
@@ -199,6 +199,7 @@ struct stm32_port {
u32 cr3_irq; /* USART_CR3_RXFTIE */
int last_res;
bool tx_dma_busy; /* dma tx transaction in progress */
+ bool rx_dma_busy; /* dma rx transaction in progress */
bool throttled; /* port throttled */
bool hw_flow_control;
bool swap; /* swap RX & TX pins */
diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
index 7d38c33ef506..c671d674bce4 100644
--- a/drivers/tty/serial/sunhv.c
+++ b/drivers/tty/serial/sunhv.c
@@ -17,11 +17,11 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/init.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <asm/hypervisor.h>
#include <asm/spitfire.h>
-#include <asm/prom.h>
#include <asm/irq.h>
#include <asm/setup.h>
diff --git a/drivers/tty/serial/sunplus-uart.c b/drivers/tty/serial/sunplus-uart.c
index 727942c43c45..3aacd5eb414c 100644
--- a/drivers/tty/serial/sunplus-uart.c
+++ b/drivers/tty/serial/sunplus-uart.c
@@ -231,7 +231,7 @@ static void transmit_chars(struct uart_port *port)
static void receive_chars(struct uart_port *port)
{
unsigned int lsr = readl(port->membase + SUP_UART_LSR);
- unsigned int ch, flag;
+ u8 ch, flag;
do {
ch = readl(port->membase + SUP_UART_DATA);
diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c
index 48b39fdb0397..40eeaf835bba 100644
--- a/drivers/tty/serial/sunsab.c
+++ b/drivers/tty/serial/sunsab.c
@@ -33,7 +33,8 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/init.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/io.h>
#include <asm/irq.h>
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index fed052a0b931..58a4342ad0f9 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -37,11 +37,11 @@
#include <linux/serial_reg.h>
#include <linux/init.h>
#include <linux/delay.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/io.h>
#include <asm/irq.h>
-#include <asm/prom.h>
#include <asm/setup.h>
#include <linux/serial_core.h>
diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c
index 0fbeb3dbd843..c8c71c56264c 100644
--- a/drivers/tty/serial/sunzilog.c
+++ b/drivers/tty/serial/sunzilog.c
@@ -33,11 +33,11 @@
#include <linux/serio.h>
#endif
#include <linux/init.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/io.h>
#include <asm/irq.h>
-#include <asm/prom.h>
#include <asm/setup.h>
#include <linux/serial_core.h>
diff --git a/drivers/tty/serial/tegra-tcu.c b/drivers/tty/serial/tegra-tcu.c
index 23500b342da7..65069daf36ec 100644
--- a/drivers/tty/serial/tegra-tcu.c
+++ b/drivers/tty/serial/tegra-tcu.c
@@ -7,7 +7,6 @@
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index 679574893ebe..b225a78f6175 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -20,9 +20,6 @@
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
#include <linux/clk.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index 0a370b9ea70b..b06661b80f41 100644
--- a/drivers/tty/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
@@ -17,18 +17,18 @@
*/
#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/io.h>
+#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
#include <linux/dma-mapping.h>
-#include <linux/fs_uart_pd.h>
#include <soc/fsl/qe/ucc_slow.h>
#include <linux/firmware.h>
diff --git a/drivers/tty/serial/vt8500_serial.c b/drivers/tty/serial/vt8500_serial.c
index cc9157df732f..c5d5c2765119 100644
--- a/drivers/tty/serial/vt8500_serial.c
+++ b/drivers/tty/serial/vt8500_serial.c
@@ -14,6 +14,7 @@
#include <linux/irq.h>
#include <linux/init.h>
#include <linux/console.h>
+#include <linux/platform_device.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/serial_core.h>
@@ -21,7 +22,6 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/err.h>
/*
@@ -611,10 +611,6 @@ static int vt8500_serial_probe(struct platform_device *pdev)
if (!flags)
return -EINVAL;
- mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mmres)
- return -ENODEV;
-
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
@@ -647,7 +643,7 @@ static int vt8500_serial_probe(struct platform_device *pdev)
if (!vt8500_port)
return -ENOMEM;
- vt8500_port->uart.membase = devm_ioremap_resource(&pdev->dev, mmres);
+ vt8500_port->uart.membase = devm_platform_get_and_ioremap_resource(pdev, 0, &mmres);
if (IS_ERR(vt8500_port->uart.membase))
return PTR_ERR(vt8500_port->uart.membase);
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 20a751663ef9..2e5e86a00a77 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -1562,8 +1562,8 @@ static int cdns_uart_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- rc = -ENXIO;
+ if (irq < 0) {
+ rc = irq;
goto err_out_clk_disable;
}
diff --git a/drivers/tty/serial/zs.c b/drivers/tty/serial/zs.c
index 730c648e32ff..65ca4da6e368 100644
--- a/drivers/tty/serial/zs.c
+++ b/drivers/tty/serial/zs.c
@@ -539,8 +539,9 @@ static void zs_receive_chars(struct zs_port *zport)
struct uart_port *uport = &zport->port;
struct zs_scc *scc = zport->scc;
struct uart_icount *icount;
- unsigned int avail, status, ch, flag;
+ unsigned int avail, status;
int count;
+ u8 ch, flag;
for (count = 16; count; count--) {
spin_lock(&scc->zlock);
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
index 16e469e581ec..8112d9d5a0d8 100644
--- a/drivers/tty/synclink_gt.c
+++ b/drivers/tty/synclink_gt.c
@@ -87,18 +87,17 @@
/*
* module identification
*/
-static char *driver_name = "SyncLink GT";
-static char *slgt_driver_name = "synclink_gt";
-static char *tty_dev_prefix = "ttySLG";
+static const char driver_name[] = "SyncLink GT";
+static const char tty_dev_prefix[] = "ttySLG";
MODULE_LICENSE("GPL");
#define MAX_DEVICES 32
static const struct pci_device_id pci_table[] = {
- {PCI_VENDOR_ID_MICROGATE, SYNCLINK_GT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
- {PCI_VENDOR_ID_MICROGATE, SYNCLINK_GT2_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
- {PCI_VENDOR_ID_MICROGATE, SYNCLINK_GT4_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
- {PCI_VENDOR_ID_MICROGATE, SYNCLINK_AC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
- {0,}, /* terminate list */
+ { PCI_VDEVICE(MICROGATE, SYNCLINK_GT_DEVICE_ID) },
+ { PCI_VDEVICE(MICROGATE, SYNCLINK_GT2_DEVICE_ID) },
+ { PCI_VDEVICE(MICROGATE, SYNCLINK_GT4_DEVICE_ID) },
+ { PCI_VDEVICE(MICROGATE, SYNCLINK_AC_DEVICE_ID) },
+ { 0 }, /* terminate list */
};
MODULE_DEVICE_TABLE(pci, pci_table);
@@ -323,7 +322,7 @@ struct slgt_info {
};
-static MGSL_PARAMS default_params = {
+static const MGSL_PARAMS default_params = {
.mode = MGSL_MODE_HDLC,
.loopback = 0,
.flags = HDLC_FLAG_UNDERRUN_ABORT15,
@@ -432,7 +431,7 @@ static void tx_set_idle(struct slgt_info *info);
static unsigned int tbuf_bytes(struct slgt_info *info);
static void reset_tbufs(struct slgt_info *info);
static void tdma_reset(struct slgt_info *info);
-static bool tx_load(struct slgt_info *info, const char *buf, unsigned int count);
+static bool tx_load(struct slgt_info *info, const u8 *buf, unsigned int count);
static void get_gtsignals(struct slgt_info *info);
static void set_gtsignals(struct slgt_info *info);
@@ -746,8 +745,7 @@ static void update_tx_timer(struct slgt_info *info)
}
}
-static int write(struct tty_struct *tty,
- const unsigned char *buf, int count)
+static ssize_t write(struct tty_struct *tty, const u8 *buf, size_t count)
{
int ret = 0;
struct slgt_info *info = tty->driver_data;
@@ -756,7 +754,7 @@ static int write(struct tty_struct *tty,
if (sanity_check(info, tty->name, "write"))
return -EIO;
- DBGINFO(("%s write count=%d\n", info->device_name, count));
+ DBGINFO(("%s write count=%zu\n", info->device_name, count));
if (!info->tx_buf || (count > info->max_frame_size))
return -EIO;
@@ -782,7 +780,7 @@ cleanup:
return ret;
}
-static int put_char(struct tty_struct *tty, unsigned char ch)
+static int put_char(struct tty_struct *tty, u8 ch)
{
struct slgt_info *info = tty->driver_data;
unsigned long flags;
@@ -790,7 +788,7 @@ static int put_char(struct tty_struct *tty, unsigned char ch)
if (sanity_check(info, tty->name, "put_char"))
return 0;
- DBGINFO(("%s put_char(%d)\n", info->device_name, ch));
+ DBGINFO(("%s put_char(%u)\n", info->device_name, ch));
if (!info->tx_buf)
return 0;
spin_lock_irqsave(&info->lock,flags);
@@ -1088,12 +1086,13 @@ static long get_params32(struct slgt_info *info, struct MGSL_PARAMS32 __user *us
static long set_params32(struct slgt_info *info, struct MGSL_PARAMS32 __user *new_params)
{
struct MGSL_PARAMS32 tmp_params;
+ unsigned long flags;
DBGINFO(("%s set_params32\n", info->device_name));
if (copy_from_user(&tmp_params, new_params, sizeof(struct MGSL_PARAMS32)))
return -EFAULT;
- spin_lock(&info->lock);
+ spin_lock_irqsave(&info->lock, flags);
if (tmp_params.mode == MGSL_MODE_BASE_CLOCK) {
info->base_clock = tmp_params.clock_speed;
} else {
@@ -1111,7 +1110,7 @@ static long set_params32(struct slgt_info *info, struct MGSL_PARAMS32 __user *ne
info->params.stop_bits = tmp_params.stop_bits;
info->params.parity = tmp_params.parity;
}
- spin_unlock(&info->lock);
+ spin_unlock_irqrestore(&info->lock, flags);
program_hw(info);
@@ -3629,8 +3628,6 @@ static void slgt_cleanup(void)
struct slgt_info *info;
struct slgt_info *tmp;
- printk(KERN_INFO "unload %s\n", driver_name);
-
if (serial_driver) {
for (info=slgt_device_list ; info != NULL ; info=info->next_device)
tty_unregister_device(serial_driver, info->line);
@@ -3672,8 +3669,6 @@ static int __init slgt_init(void)
{
int rc;
- printk(KERN_INFO "%s\n", driver_name);
-
serial_driver = tty_alloc_driver(MAX_DEVICES, TTY_DRIVER_REAL_RAW |
TTY_DRIVER_DYNAMIC_DEV);
if (IS_ERR(serial_driver)) {
@@ -3683,7 +3678,7 @@ static int __init slgt_init(void)
/* Initialize the tty_driver structure */
- serial_driver->driver_name = slgt_driver_name;
+ serial_driver->driver_name = "synclink_gt";
serial_driver->name = tty_dev_prefix;
serial_driver->major = ttymajor;
serial_driver->minor_start = 64;
@@ -3702,9 +3697,6 @@ static int __init slgt_init(void)
goto error;
}
- printk(KERN_INFO "%s, tty major#%d\n",
- driver_name, serial_driver->major);
-
slgt_device_count = 0;
if ((rc = pci_register_driver(&pci_driver)) < 0) {
printk("%s pci_register_driver error=%d\n", driver_name, rc);
@@ -3712,9 +3704,6 @@ static int __init slgt_init(void)
}
pci_registered = true;
- if (!slgt_device_list)
- printk("%s no devices found\n",driver_name);
-
return 0;
error:
@@ -3734,47 +3723,47 @@ module_exit(slgt_exit);
* register access routines
*/
-#define CALC_REGADDR() \
- unsigned long reg_addr = ((unsigned long)info->reg_addr) + addr; \
- if (addr >= 0x80) \
- reg_addr += (info->port_num) * 32; \
- else if (addr >= 0x40) \
- reg_addr += (info->port_num) * 16;
+static inline void __iomem *calc_regaddr(struct slgt_info *info,
+ unsigned int addr)
+{
+ void __iomem *reg_addr = info->reg_addr + addr;
+
+ if (addr >= 0x80)
+ reg_addr += info->port_num * 32;
+ else if (addr >= 0x40)
+ reg_addr += info->port_num * 16;
+
+ return reg_addr;
+}
static __u8 rd_reg8(struct slgt_info *info, unsigned int addr)
{
- CALC_REGADDR();
- return readb((void __iomem *)reg_addr);
+ return readb(calc_regaddr(info, addr));
}
static void wr_reg8(struct slgt_info *info, unsigned int addr, __u8 value)
{
- CALC_REGADDR();
- writeb(value, (void __iomem *)reg_addr);
+ writeb(value, calc_regaddr(info, addr));
}
static __u16 rd_reg16(struct slgt_info *info, unsigned int addr)
{
- CALC_REGADDR();
- return readw((void __iomem *)reg_addr);
+ return readw(calc_regaddr(info, addr));
}
static void wr_reg16(struct slgt_info *info, unsigned int addr, __u16 value)
{
- CALC_REGADDR();
- writew(value, (void __iomem *)reg_addr);
+ writew(value, calc_regaddr(info, addr));
}
static __u32 rd_reg32(struct slgt_info *info, unsigned int addr)
{
- CALC_REGADDR();
- return readl((void __iomem *)reg_addr);
+ return readl(calc_regaddr(info, addr));
}
static void wr_reg32(struct slgt_info *info, unsigned int addr, __u32 value)
{
- CALC_REGADDR();
- writel(value, (void __iomem *)reg_addr);
+ writel(value, calc_regaddr(info, addr));
}
static void rdma_reset(struct slgt_info *info)
@@ -4777,7 +4766,7 @@ static unsigned int tbuf_bytes(struct slgt_info *info)
* load data into transmit DMA buffer ring and start transmitter if needed
* return true if data accepted, otherwise false (buffers full)
*/
-static bool tx_load(struct slgt_info *info, const char *buf, unsigned int size)
+static bool tx_load(struct slgt_info *info, const u8 *buf, unsigned int size)
{
unsigned short count;
unsigned int i;
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index e1df63a88aac..23198e3f1461 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -98,14 +98,13 @@ static int __init sysrq_always_enabled_setup(char *str)
__setup("sysrq_always_enabled", sysrq_always_enabled_setup);
-static void sysrq_handle_loglevel(int key)
+static void sysrq_handle_loglevel(u8 key)
{
- int i;
+ u8 loglevel = key - '0';
- i = key - '0';
console_loglevel = CONSOLE_LOGLEVEL_DEFAULT;
- pr_info("Loglevel set to %d\n", i);
- console_loglevel = i;
+ pr_info("Loglevel set to %u\n", loglevel);
+ console_loglevel = loglevel;
}
static const struct sysrq_key_op sysrq_loglevel_op = {
.handler = sysrq_handle_loglevel,
@@ -115,7 +114,7 @@ static const struct sysrq_key_op sysrq_loglevel_op = {
};
#ifdef CONFIG_VT
-static void sysrq_handle_SAK(int key)
+static void sysrq_handle_SAK(u8 key)
{
struct work_struct *SAK_work = &vc_cons[fg_console].SAK_work;
@@ -132,7 +131,7 @@ static const struct sysrq_key_op sysrq_SAK_op = {
#endif
#ifdef CONFIG_VT
-static void sysrq_handle_unraw(int key)
+static void sysrq_handle_unraw(u8 key)
{
vt_reset_unicode(fg_console);
}
@@ -147,7 +146,7 @@ static const struct sysrq_key_op sysrq_unraw_op = {
#define sysrq_unraw_op (*(const struct sysrq_key_op *)NULL)
#endif /* CONFIG_VT */
-static void sysrq_handle_crash(int key)
+static void sysrq_handle_crash(u8 key)
{
/* release the RCU read lock before crashing */
rcu_read_unlock();
@@ -161,7 +160,7 @@ static const struct sysrq_key_op sysrq_crash_op = {
.enable_mask = SYSRQ_ENABLE_DUMP,
};
-static void sysrq_handle_reboot(int key)
+static void sysrq_handle_reboot(u8 key)
{
lockdep_off();
local_irq_enable();
@@ -176,7 +175,7 @@ static const struct sysrq_key_op sysrq_reboot_op = {
const struct sysrq_key_op *__sysrq_reboot_op = &sysrq_reboot_op;
-static void sysrq_handle_sync(int key)
+static void sysrq_handle_sync(u8 key)
{
emergency_sync();
}
@@ -187,7 +186,7 @@ static const struct sysrq_key_op sysrq_sync_op = {
.enable_mask = SYSRQ_ENABLE_SYNC,
};
-static void sysrq_handle_show_timers(int key)
+static void sysrq_handle_show_timers(u8 key)
{
sysrq_timer_list_show();
}
@@ -198,7 +197,7 @@ static const struct sysrq_key_op sysrq_show_timers_op = {
.action_msg = "Show clockevent devices & pending hrtimers (no others)",
};
-static void sysrq_handle_mountro(int key)
+static void sysrq_handle_mountro(u8 key)
{
emergency_remount();
}
@@ -210,7 +209,7 @@ static const struct sysrq_key_op sysrq_mountro_op = {
};
#ifdef CONFIG_LOCKDEP
-static void sysrq_handle_showlocks(int key)
+static void sysrq_handle_showlocks(u8 key)
{
debug_show_all_locks();
}
@@ -250,7 +249,7 @@ static void sysrq_showregs_othercpus(struct work_struct *dummy)
static DECLARE_WORK(sysrq_showallcpus, sysrq_showregs_othercpus);
-static void sysrq_handle_showallcpus(int key)
+static void sysrq_handle_showallcpus(u8 key)
{
/*
* Fall back to the workqueue based printing if the
@@ -283,7 +282,7 @@ static const struct sysrq_key_op sysrq_showallcpus_op = {
#define sysrq_showallcpus_op (*(const struct sysrq_key_op *)NULL)
#endif
-static void sysrq_handle_showregs(int key)
+static void sysrq_handle_showregs(u8 key)
{
struct pt_regs *regs = NULL;
@@ -300,7 +299,7 @@ static const struct sysrq_key_op sysrq_showregs_op = {
.enable_mask = SYSRQ_ENABLE_DUMP,
};
-static void sysrq_handle_showstate(int key)
+static void sysrq_handle_showstate(u8 key)
{
show_state();
show_all_workqueues();
@@ -312,7 +311,7 @@ static const struct sysrq_key_op sysrq_showstate_op = {
.enable_mask = SYSRQ_ENABLE_DUMP,
};
-static void sysrq_handle_showstate_blocked(int key)
+static void sysrq_handle_showstate_blocked(u8 key)
{
show_state_filter(TASK_UNINTERRUPTIBLE);
}
@@ -326,7 +325,7 @@ static const struct sysrq_key_op sysrq_showstate_blocked_op = {
#ifdef CONFIG_TRACING
#include <linux/ftrace.h>
-static void sysrq_ftrace_dump(int key)
+static void sysrq_ftrace_dump(u8 key)
{
ftrace_dump(DUMP_ALL);
}
@@ -340,7 +339,7 @@ static const struct sysrq_key_op sysrq_ftrace_dump_op = {
#define sysrq_ftrace_dump_op (*(const struct sysrq_key_op *)NULL)
#endif
-static void sysrq_handle_showmem(int key)
+static void sysrq_handle_showmem(u8 key)
{
show_mem();
}
@@ -370,7 +369,7 @@ static void send_sig_all(int sig)
read_unlock(&tasklist_lock);
}
-static void sysrq_handle_term(int key)
+static void sysrq_handle_term(u8 key)
{
send_sig_all(SIGTERM);
console_loglevel = CONSOLE_LOGLEVEL_DEBUG;
@@ -401,7 +400,7 @@ static void moom_callback(struct work_struct *ignored)
static DECLARE_WORK(moom_work, moom_callback);
-static void sysrq_handle_moom(int key)
+static void sysrq_handle_moom(u8 key)
{
schedule_work(&moom_work);
}
@@ -413,7 +412,7 @@ static const struct sysrq_key_op sysrq_moom_op = {
};
#ifdef CONFIG_BLOCK
-static void sysrq_handle_thaw(int key)
+static void sysrq_handle_thaw(u8 key)
{
emergency_thaw_all();
}
@@ -427,7 +426,7 @@ static const struct sysrq_key_op sysrq_thaw_op = {
#define sysrq_thaw_op (*(const struct sysrq_key_op *)NULL)
#endif
-static void sysrq_handle_kill(int key)
+static void sysrq_handle_kill(u8 key)
{
send_sig_all(SIGKILL);
console_loglevel = CONSOLE_LOGLEVEL_DEBUG;
@@ -439,7 +438,7 @@ static const struct sysrq_key_op sysrq_kill_op = {
.enable_mask = SYSRQ_ENABLE_SIGNAL,
};
-static void sysrq_handle_unrt(int key)
+static void sysrq_handle_unrt(u8 key)
{
normalize_rt_tasks();
}
@@ -531,25 +530,24 @@ static const struct sysrq_key_op *sysrq_key_table[62] = {
};
/* key2index calculation, -1 on invalid index */
-static int sysrq_key_table_key2index(int key)
+static int sysrq_key_table_key2index(u8 key)
{
- int retval;
-
- if ((key >= '0') && (key <= '9'))
- retval = key - '0';
- else if ((key >= 'a') && (key <= 'z'))
- retval = key + 10 - 'a';
- else if ((key >= 'A') && (key <= 'Z'))
- retval = key + 36 - 'A';
- else
- retval = -1;
- return retval;
+ switch (key) {
+ case '0' ... '9':
+ return key - '0';
+ case 'a' ... 'z':
+ return key - 'a' + 10;
+ case 'A' ... 'Z':
+ return key - 'A' + 10 + 26;
+ default:
+ return -1;
+ }
}
/*
* get and put functions for the table, exposed to modules.
*/
-static const struct sysrq_key_op *__sysrq_get_key_op(int key)
+static const struct sysrq_key_op *__sysrq_get_key_op(u8 key)
{
const struct sysrq_key_op *op_p = NULL;
int i;
@@ -561,7 +559,7 @@ static const struct sysrq_key_op *__sysrq_get_key_op(int key)
return op_p;
}
-static void __sysrq_put_key_op(int key, const struct sysrq_key_op *op_p)
+static void __sysrq_put_key_op(u8 key, const struct sysrq_key_op *op_p)
{
int i = sysrq_key_table_key2index(key);
@@ -569,7 +567,7 @@ static void __sysrq_put_key_op(int key, const struct sysrq_key_op *op_p)
sysrq_key_table[i] = op_p;
}
-void __handle_sysrq(int key, bool check_mask)
+void __handle_sysrq(u8 key, bool check_mask)
{
const struct sysrq_key_op *op_p;
int orig_log_level;
@@ -628,7 +626,7 @@ void __handle_sysrq(int key, bool check_mask)
suppress_printk = orig_suppress_printk;
}
-void handle_sysrq(int key)
+void handle_sysrq(u8 key)
{
if (sysrq_on())
__handle_sysrq(key, true);
@@ -1112,7 +1110,7 @@ int sysrq_toggle_support(int enable_mask)
}
EXPORT_SYMBOL_GPL(sysrq_toggle_support);
-static int __sysrq_swap_key_ops(int key, const struct sysrq_key_op *insert_op_p,
+static int __sysrq_swap_key_ops(u8 key, const struct sysrq_key_op *insert_op_p,
const struct sysrq_key_op *remove_op_p)
{
int retval;
@@ -1136,13 +1134,13 @@ static int __sysrq_swap_key_ops(int key, const struct sysrq_key_op *insert_op_p,
return retval;
}
-int register_sysrq_key(int key, const struct sysrq_key_op *op_p)
+int register_sysrq_key(u8 key, const struct sysrq_key_op *op_p)
{
return __sysrq_swap_key_ops(key, op_p, NULL);
}
EXPORT_SYMBOL(register_sysrq_key);
-int unregister_sysrq_key(int key, const struct sysrq_key_op *op_p)
+int unregister_sysrq_key(u8 key, const struct sysrq_key_op *op_p)
{
return __sysrq_swap_key_ops(key, NULL, op_p);
}
diff --git a/drivers/tty/tty.h b/drivers/tty/tty.h
index 89769a1f1f97..50862f98273e 100644
--- a/drivers/tty/tty.h
+++ b/drivers/tty/tty.h
@@ -63,7 +63,7 @@ int tty_check_change(struct tty_struct *tty);
void __stop_tty(struct tty_struct *tty);
void __start_tty(struct tty_struct *tty);
void tty_write_unlock(struct tty_struct *tty);
-int tty_write_lock(struct tty_struct *tty, int ndelay);
+int tty_write_lock(struct tty_struct *tty, bool ndelay);
void tty_vhangup_session(struct tty_struct *tty);
void tty_open_proc_set_tty(struct file *filp, struct tty_struct *tty);
int tty_signal_session_leader(struct tty_struct *tty, int exit_session);
@@ -101,13 +101,13 @@ extern int tty_ldisc_autoload;
#ifdef CONFIG_AUDIT
void tty_audit_add_data(const struct tty_struct *tty, const void *data,
size_t size);
-void tty_audit_tiocsti(const struct tty_struct *tty, char ch);
+void tty_audit_tiocsti(const struct tty_struct *tty, u8 ch);
#else
static inline void tty_audit_add_data(const struct tty_struct *tty,
const void *data, size_t size)
{
}
-static inline void tty_audit_tiocsti(const struct tty_struct *tty, char ch)
+static inline void tty_audit_tiocsti(const struct tty_struct *tty, u8 ch)
{
}
#endif
@@ -115,6 +115,6 @@ static inline void tty_audit_tiocsti(const struct tty_struct *tty, char ch)
ssize_t redirected_tty_write(struct kiocb *, struct iov_iter *);
int tty_insert_flip_string_and_push_buffer(struct tty_port *port,
- const unsigned char *chars, size_t cnt);
+ const u8 *chars, size_t cnt);
#endif
diff --git a/drivers/tty/tty_audit.c b/drivers/tty/tty_audit.c
index 24d010589379..1d81eeefb068 100644
--- a/drivers/tty/tty_audit.c
+++ b/drivers/tty/tty_audit.c
@@ -17,7 +17,7 @@ struct tty_audit_buf {
dev_t dev; /* The TTY which the data is from */
bool icanon;
size_t valid;
- unsigned char *data; /* Allocated size N_TTY_BUF_SIZE */
+ u8 *data; /* Allocated size N_TTY_BUF_SIZE */
};
static struct tty_audit_buf *tty_audit_buf_ref(void)
@@ -59,7 +59,7 @@ static void tty_audit_buf_free(struct tty_audit_buf *buf)
}
static void tty_audit_log(const char *description, dev_t dev,
- const unsigned char *data, size_t size)
+ const u8 *data, size_t size)
{
struct audit_buffer *ab;
pid_t pid = task_pid_nr(current);
@@ -134,7 +134,7 @@ void tty_audit_fork(struct signal_struct *sig)
/*
* tty_audit_tiocsti - Log TIOCSTI
*/
-void tty_audit_tiocsti(const struct tty_struct *tty, char ch)
+void tty_audit_tiocsti(const struct tty_struct *tty, u8 ch)
{
dev_t dev;
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 2df86ed90574..5f6d0cf67571 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -177,8 +177,7 @@ static struct tty_buffer *tty_buffer_alloc(struct tty_port *port, size_t size)
*/
if (atomic_read(&port->buf.mem_used) > port->buf.mem_limit)
return NULL;
- p = kmalloc(sizeof(struct tty_buffer) + 2 * size,
- GFP_ATOMIC | __GFP_NOWARN);
+ p = kmalloc(struct_size(p, data, 2 * size), GFP_ATOMIC | __GFP_NOWARN);
if (p == NULL)
return NULL;
@@ -263,38 +262,32 @@ static int __tty_buffer_request_room(struct tty_port *port, size_t size,
bool flags)
{
struct tty_bufhead *buf = &port->buf;
- struct tty_buffer *b, *n;
- int left, change;
-
- b = buf->tail;
- if (!b->flags)
- left = 2 * b->size - b->used;
- else
- left = b->size - b->used;
-
- change = !b->flags && flags;
- if (change || left < size) {
- /* This is the slow path - looking for new buffers to use */
- n = tty_buffer_alloc(port, size);
- if (n != NULL) {
- n->flags = flags;
- buf->tail = n;
- /*
- * Paired w/ acquire in flush_to_ldisc() and lookahead_bufs()
- * ensures they see all buffer data.
- */
- smp_store_release(&b->commit, b->used);
- /*
- * Paired w/ acquire in flush_to_ldisc() and lookahead_bufs()
- * ensures the latest commit value can be read before the head
- * is advanced to the next buffer.
- */
- smp_store_release(&b->next, n);
- } else if (change)
- size = 0;
- else
- size = left;
- }
+ struct tty_buffer *n, *b = buf->tail;
+ size_t left = (b->flags ? 1 : 2) * b->size - b->used;
+ bool change = !b->flags && flags;
+
+ if (!change && left >= size)
+ return size;
+
+ /* This is the slow path - looking for new buffers to use */
+ n = tty_buffer_alloc(port, size);
+ if (n == NULL)
+ return change ? 0 : left;
+
+ n->flags = flags;
+ buf->tail = n;
+ /*
+ * Paired w/ acquire in flush_to_ldisc() and lookahead_bufs()
+ * ensures they see all buffer data.
+ */
+ smp_store_release(&b->commit, b->used);
+ /*
+ * Paired w/ acquire in flush_to_ldisc() and lookahead_bufs()
+ * ensures the latest commit value can be read before the head
+ * is advanced to the next buffer.
+ */
+ smp_store_release(&b->next, n);
+
return size;
}
@@ -304,108 +297,45 @@ int tty_buffer_request_room(struct tty_port *port, size_t size)
}
EXPORT_SYMBOL_GPL(tty_buffer_request_room);
-/**
- * tty_insert_flip_string_fixed_flag - add characters to the tty buffer
- * @port: tty port
- * @chars: characters
- * @flag: flag value for each character
- * @size: size
- *
- * Queue a series of bytes to the tty buffering. All the characters passed are
- * marked with the supplied flag.
- *
- * Returns: the number added.
- */
-int tty_insert_flip_string_fixed_flag(struct tty_port *port,
- const unsigned char *chars, char flag, size_t size)
+size_t __tty_insert_flip_string_flags(struct tty_port *port, const u8 *chars,
+ const u8 *flags, bool mutable_flags,
+ size_t size)
{
- int copied = 0;
- bool flags = flag != TTY_NORMAL;
+ bool need_flags = mutable_flags || flags[0] != TTY_NORMAL;
+ size_t copied = 0;
do {
- int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE);
- int space = __tty_buffer_request_room(port, goal, flags);
+ size_t goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE);
+ size_t space = __tty_buffer_request_room(port, goal, need_flags);
struct tty_buffer *tb = port->buf.tail;
if (unlikely(space == 0))
break;
- memcpy(char_buf_ptr(tb, tb->used), chars, space);
- if (tb->flags)
- memset(flag_buf_ptr(tb, tb->used), flag, space);
- tb->used += space;
- copied += space;
- chars += space;
- /* There is a small chance that we need to split the data over
- * several buffers. If this is the case we must loop.
- */
- } while (unlikely(size > copied));
- return copied;
-}
-EXPORT_SYMBOL(tty_insert_flip_string_fixed_flag);
-/**
- * tty_insert_flip_string_flags - add characters to the tty buffer
- * @port: tty port
- * @chars: characters
- * @flags: flag bytes
- * @size: size
- *
- * Queue a series of bytes to the tty buffering. For each character the flags
- * array indicates the status of the character.
- *
- * Returns: the number added.
- */
-int tty_insert_flip_string_flags(struct tty_port *port,
- const unsigned char *chars, const char *flags, size_t size)
-{
- int copied = 0;
+ memcpy(char_buf_ptr(tb, tb->used), chars, space);
- do {
- int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE);
- int space = tty_buffer_request_room(port, goal);
- struct tty_buffer *tb = port->buf.tail;
+ if (mutable_flags) {
+ memcpy(flag_buf_ptr(tb, tb->used), flags, space);
+ flags += space;
+ } else if (tb->flags) {
+ memset(flag_buf_ptr(tb, tb->used), flags[0], space);
+ } else {
+ /* tb->flags should be available once requested */
+ WARN_ON_ONCE(need_flags);
+ }
- if (unlikely(space == 0))
- break;
- memcpy(char_buf_ptr(tb, tb->used), chars, space);
- memcpy(flag_buf_ptr(tb, tb->used), flags, space);
tb->used += space;
copied += space;
chars += space;
- flags += space;
+
/* There is a small chance that we need to split the data over
* several buffers. If this is the case we must loop.
*/
} while (unlikely(size > copied));
- return copied;
-}
-EXPORT_SYMBOL(tty_insert_flip_string_flags);
-
-/**
- * __tty_insert_flip_char - add one character to the tty buffer
- * @port: tty port
- * @ch: character
- * @flag: flag byte
- *
- * Queue a single byte @ch to the tty buffering, with an optional flag. This is
- * the slow path of tty_insert_flip_char().
- */
-int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag)
-{
- struct tty_buffer *tb;
- bool flags = flag != TTY_NORMAL;
-
- if (!__tty_buffer_request_room(port, 1, flags))
- return 0;
- tb = port->buf.tail;
- if (tb->flags)
- *flag_buf_ptr(tb, tb->used) = flag;
- *char_buf_ptr(tb, tb->used++) = ch;
-
- return 1;
+ return copied;
}
-EXPORT_SYMBOL(__tty_insert_flip_char);
+EXPORT_SYMBOL(__tty_insert_flip_string_flags);
/**
* tty_prepare_flip_string - make room for characters
@@ -421,10 +351,9 @@ EXPORT_SYMBOL(__tty_insert_flip_char);
* Returns: the length available and buffer pointer (@chars) to the space which
* is now allocated and accounted for as ready for normal characters.
*/
-int tty_prepare_flip_string(struct tty_port *port, unsigned char **chars,
- size_t size)
+size_t tty_prepare_flip_string(struct tty_port *port, u8 **chars, size_t size)
{
- int space = __tty_buffer_request_room(port, size, false);
+ size_t space = __tty_buffer_request_room(port, size, false);
if (likely(space)) {
struct tty_buffer *tb = port->buf.tail;
@@ -434,6 +363,7 @@ int tty_prepare_flip_string(struct tty_port *port, unsigned char **chars,
memset(flag_buf_ptr(tb, tb->used), TTY_NORMAL, space);
tb->used += space;
}
+
return space;
}
EXPORT_SYMBOL_GPL(tty_prepare_flip_string);
@@ -450,13 +380,13 @@ EXPORT_SYMBOL_GPL(tty_prepare_flip_string);
*
* Returns: the number of bytes processed.
*/
-int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p,
- const char *f, int count)
+size_t tty_ldisc_receive_buf(struct tty_ldisc *ld, const u8 *p, const u8 *f,
+ size_t count)
{
if (ld->ops->receive_buf2)
count = ld->ops->receive_buf2(ld->tty, p, f, count);
else {
- count = min_t(int, count, ld->tty->receive_room);
+ count = min_t(size_t, count, ld->tty->receive_room);
if (count && ld->ops->receive_buf)
ld->ops->receive_buf(ld->tty, p, f, count);
}
@@ -489,7 +419,7 @@ static void lookahead_bufs(struct tty_port *port, struct tty_buffer *head)
}
if (port->client_ops->lookahead_buf) {
- unsigned char *p, *f = NULL;
+ u8 *p, *f = NULL;
p = char_buf_ptr(head, head->lookahead);
if (head->flags)
@@ -502,12 +432,12 @@ static void lookahead_bufs(struct tty_port *port, struct tty_buffer *head)
}
}
-static int
-receive_buf(struct tty_port *port, struct tty_buffer *head, int count)
+static size_t
+receive_buf(struct tty_port *port, struct tty_buffer *head, size_t count)
{
- unsigned char *p = char_buf_ptr(head, head->read);
- const char *f = NULL;
- int n;
+ u8 *p = char_buf_ptr(head, head->read);
+ const u8 *f = NULL;
+ size_t n;
if (head->flags)
f = flag_buf_ptr(head, head->read);
@@ -539,7 +469,7 @@ static void flush_to_ldisc(struct work_struct *work)
while (1) {
struct tty_buffer *head = buf->head;
struct tty_buffer *next;
- int count, rcvd;
+ size_t count, rcvd;
/* Ldisc or user is trying to gain exclusive access */
if (atomic_read(&buf->priority))
@@ -620,7 +550,7 @@ EXPORT_SYMBOL(tty_flip_buffer_push);
* Returns: the number added.
*/
int tty_insert_flip_string_and_push_buffer(struct tty_port *port,
- const unsigned char *chars, size_t size)
+ const u8 *chars, size_t size)
{
struct tty_bufhead *buf = &port->buf;
unsigned long flags;
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 27d8e3a1aace..8a94e5a43c6d 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -270,7 +270,7 @@ static int tty_paranoia_check(struct tty_struct *tty, struct inode *inode,
}
/* Caller must hold tty_lock */
-static int check_tty_count(struct tty_struct *tty, const char *routine)
+static void check_tty_count(struct tty_struct *tty, const char *routine)
{
#ifdef CHECK_TTY_COUNT
struct list_head *p;
@@ -290,10 +290,8 @@ static int check_tty_count(struct tty_struct *tty, const char *routine)
if (tty->count != (count + kopen_count)) {
tty_warn(tty, "%s: tty->count(%d) != (#fd's(%d) + #kopen's(%d))\n",
routine, tty->count, count, kopen_count);
- return (count + kopen_count);
}
#endif
- return 0;
}
/**
@@ -845,19 +843,18 @@ static void tty_update_time(struct tty_struct *tty, bool mtime)
* data or clears the cookie. The cookie may be something that the
* ldisc maintains state for and needs to free.
*/
-static int iterate_tty_read(struct tty_ldisc *ld, struct tty_struct *tty,
- struct file *file, struct iov_iter *to)
+static ssize_t iterate_tty_read(struct tty_ldisc *ld, struct tty_struct *tty,
+ struct file *file, struct iov_iter *to)
{
- int retval = 0;
void *cookie = NULL;
unsigned long offset = 0;
char kernel_buf[64];
- size_t count = iov_iter_count(to);
+ ssize_t retval = 0;
+ size_t copied, count = iov_iter_count(to);
do {
- int size, copied;
+ ssize_t size = min(count, sizeof(kernel_buf));
- size = count > sizeof(kernel_buf) ? sizeof(kernel_buf) : count;
size = ld->ops->read(tty, file, kernel_buf, size, &cookie, offset);
if (!size)
break;
@@ -914,11 +911,11 @@ static int iterate_tty_read(struct tty_ldisc *ld, struct tty_struct *tty,
*/
static ssize_t tty_read(struct kiocb *iocb, struct iov_iter *to)
{
- int i;
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
struct tty_struct *tty = file_tty(file);
struct tty_ldisc *ld;
+ ssize_t ret;
if (tty_paranoia_check(tty, inode, "tty_read"))
return -EIO;
@@ -931,15 +928,15 @@ static ssize_t tty_read(struct kiocb *iocb, struct iov_iter *to)
ld = tty_ldisc_ref_wait(tty);
if (!ld)
return hung_up_tty_read(iocb, to);
- i = -EIO;
+ ret = -EIO;
if (ld->ops->read)
- i = iterate_tty_read(ld, tty, file, to);
+ ret = iterate_tty_read(ld, tty, file, to);
tty_ldisc_deref(ld);
- if (i > 0)
+ if (ret > 0)
tty_update_time(tty, false);
- return i;
+ return ret;
}
void tty_write_unlock(struct tty_struct *tty)
@@ -948,7 +945,7 @@ void tty_write_unlock(struct tty_struct *tty)
wake_up_interruptible_poll(&tty->write_wait, EPOLLOUT);
}
-int tty_write_lock(struct tty_struct *tty, int ndelay)
+int tty_write_lock(struct tty_struct *tty, bool ndelay)
{
if (!mutex_trylock(&tty->atomic_write_lock)) {
if (ndelay)
@@ -963,15 +960,11 @@ int tty_write_lock(struct tty_struct *tty, int ndelay)
* Split writes up in sane blocksizes to avoid
* denial-of-service type attacks
*/
-static inline ssize_t do_tty_write(
- ssize_t (*write)(struct tty_struct *, struct file *, const unsigned char *, size_t),
- struct tty_struct *tty,
- struct file *file,
- struct iov_iter *from)
+static ssize_t iterate_tty_write(struct tty_ldisc *ld, struct tty_struct *tty,
+ struct file *file, struct iov_iter *from)
{
- size_t count = iov_iter_count(from);
+ size_t chunk, count = iov_iter_count(from);
ssize_t ret, written = 0;
- unsigned int chunk;
ret = tty_write_lock(tty, file->f_flags & O_NDELAY);
if (ret < 0)
@@ -1015,16 +1008,13 @@ static inline ssize_t do_tty_write(
/* Do the write .. */
for (;;) {
- size_t size = count;
-
- if (size > chunk)
- size = chunk;
+ size_t size = min(chunk, count);
ret = -EFAULT;
if (copy_from_iter(tty->write_buf, size, from) != size)
break;
- ret = write(tty, file, tty->write_buf, size);
+ ret = ld->ops->write(tty, file, tty->write_buf, size);
if (ret <= 0)
break;
@@ -1095,7 +1085,7 @@ static ssize_t file_tty_write(struct file *file, struct kiocb *iocb, struct iov_
if (!ld->ops->write)
ret = -EIO;
else
- ret = do_tty_write(ld->ops->write, tty, file, from);
+ ret = iterate_tty_write(ld, tty, file, from);
tty_ldisc_deref(ld);
return ret;
}
@@ -1162,7 +1152,7 @@ int tty_send_xchar(struct tty_struct *tty, char ch)
return 0;
}
- if (tty_write_lock(tty, 0) < 0)
+ if (tty_write_lock(tty, false) < 0)
return -ERESTARTSYS;
down_read(&tty->termios_rwsem);
@@ -2488,7 +2478,7 @@ static int send_break(struct tty_struct *tty, unsigned int duration)
retval = tty->ops->break_ctl(tty, duration);
else {
/* Do the work ourselves */
- if (tty_write_lock(tty, 0) < 0)
+ if (tty_write_lock(tty, false) < 0)
return -EINTR;
retval = tty->ops->break_ctl(tty, -1);
if (retval)
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index 2e88b414cf95..7958bf6d27c4 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -28,14 +28,6 @@
#include <asm/io.h>
#include <linux/uaccess.h>
-#undef TTY_DEBUG_WAIT_UNTIL_SENT
-
-#ifdef TTY_DEBUG_WAIT_UNTIL_SENT
-# define tty_debug_wait_until_sent(tty, f, args...) tty_debug(tty, f, ##args)
-#else
-# define tty_debug_wait_until_sent(tty, f, args...) do {} while (0)
-#endif
-
#undef DEBUG
/*
@@ -198,8 +190,6 @@ int tty_unthrottle_safe(struct tty_struct *tty)
void tty_wait_until_sent(struct tty_struct *tty, long timeout)
{
- tty_debug_wait_until_sent(tty, "wait until sent, timeout=%ld\n", timeout);
-
if (!timeout)
timeout = MAX_SCHEDULE_TIMEOUT;
@@ -507,7 +497,7 @@ retry_write_wait:
if (retval < 0)
return retval;
- if (tty_write_lock(tty, 0) < 0)
+ if (tty_write_lock(tty, false) < 0)
goto retry_write_wait;
/* Racing writer? */
@@ -747,17 +737,17 @@ static int set_ltchars(struct tty_struct *tty, struct ltchars __user *ltchars)
/**
* tty_change_softcar - carrier change ioctl helper
* @tty: tty to update
- * @arg: enable/disable CLOCAL
+ * @enable: enable/disable CLOCAL
*
* Perform a change to the CLOCAL state and call into the driver
* layer to make it visible. All done with the termios rwsem
*/
-static int tty_change_softcar(struct tty_struct *tty, int arg)
+static int tty_change_softcar(struct tty_struct *tty, bool enable)
{
int ret = 0;
- int bit = arg ? CLOCAL : 0;
struct ktermios old;
+ tcflag_t bit = enable ? CLOCAL : 0;
down_write(&tty->termios_rwsem);
old = tty->termios;
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index a788a6bf487d..624d104bd145 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -20,47 +20,45 @@
#include <linux/serdev.h>
#include "tty.h"
-static int tty_port_default_receive_buf(struct tty_port *port,
- const unsigned char *p,
- const unsigned char *f, size_t count)
+static size_t tty_port_default_receive_buf(struct tty_port *port, const u8 *p,
+ const u8 *f, size_t count)
{
- int ret;
struct tty_struct *tty;
- struct tty_ldisc *disc;
+ struct tty_ldisc *ld;
tty = READ_ONCE(port->itty);
if (!tty)
return 0;
- disc = tty_ldisc_ref(tty);
- if (!disc)
+ ld = tty_ldisc_ref(tty);
+ if (!ld)
return 0;
- ret = tty_ldisc_receive_buf(disc, p, (char *)f, count);
+ count = tty_ldisc_receive_buf(ld, p, f, count);
- tty_ldisc_deref(disc);
+ tty_ldisc_deref(ld);
- return ret;
+ return count;
}
-static void tty_port_default_lookahead_buf(struct tty_port *port, const unsigned char *p,
- const unsigned char *f, unsigned int count)
+static void tty_port_default_lookahead_buf(struct tty_port *port, const u8 *p,
+ const u8 *f, size_t count)
{
struct tty_struct *tty;
- struct tty_ldisc *disc;
+ struct tty_ldisc *ld;
tty = READ_ONCE(port->itty);
if (!tty)
return;
- disc = tty_ldisc_ref(tty);
- if (!disc)
+ ld = tty_ldisc_ref(tty);
+ if (!ld)
return;
- if (disc->ops->lookahead_buf)
- disc->ops->lookahead_buf(disc->tty, p, f, count);
+ if (ld->ops->lookahead_buf)
+ ld->ops->lookahead_buf(ld->tty, p, f, count);
- tty_ldisc_deref(disc);
+ tty_ldisc_deref(ld);
}
static void tty_port_default_wakeup(struct tty_port *port)
diff --git a/drivers/tty/ttynull.c b/drivers/tty/ttynull.c
index 1d4438472442..e4c4273993bc 100644
--- a/drivers/tty/ttynull.c
+++ b/drivers/tty/ttynull.c
@@ -29,8 +29,8 @@ static void ttynull_hangup(struct tty_struct *tty)
tty_port_hangup(&ttynull_port);
}
-static int ttynull_write(struct tty_struct *tty, const unsigned char *buf,
- int count)
+static ssize_t ttynull_write(struct tty_struct *tty, const u8 *buf,
+ size_t count)
{
return count;
}
diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c
index 34ba6e54789a..a39ed981bfd3 100644
--- a/drivers/tty/vcc.c
+++ b/drivers/tty/vcc.c
@@ -36,7 +36,7 @@ struct vcc_port {
* and guarantee that any characters that the driver accepts will
* be eventually sent, either immediately or later.
*/
- int chars_in_buffer;
+ size_t chars_in_buffer;
struct vio_vcc buffer;
struct timer_list rx_timer;
@@ -385,7 +385,7 @@ static void vcc_tx_timer(struct timer_list *t)
struct vcc_port *port = from_timer(port, t, tx_timer);
struct vio_vcc *pkt;
unsigned long flags;
- int tosend = 0;
+ size_t tosend = 0;
int rv;
spin_lock_irqsave(&port->lock, flags);
@@ -804,14 +804,13 @@ static void vcc_hangup(struct tty_struct *tty)
tty_port_hangup(tty->port);
}
-static int vcc_write(struct tty_struct *tty, const unsigned char *buf,
- int count)
+static ssize_t vcc_write(struct tty_struct *tty, const u8 *buf, size_t count)
{
struct vcc_port *port;
struct vio_vcc *pkt;
unsigned long flags;
- int total_sent = 0;
- int tosend = 0;
+ size_t total_sent = 0;
+ size_t tosend = 0;
int rv = -EINVAL;
port = vcc_get_ne(tty->index);
@@ -827,7 +826,8 @@ static int vcc_write(struct tty_struct *tty, const unsigned char *buf,
while (count > 0) {
/* Minimum of data to write and space available */
- tosend = min(count, (VCC_BUFF_LEN - port->chars_in_buffer));
+ tosend = min_t(size_t, count,
+ (VCC_BUFF_LEN - port->chars_in_buffer));
if (!tosend)
break;
@@ -847,7 +847,7 @@ static int vcc_write(struct tty_struct *tty, const unsigned char *buf,
* hypervisor actually took it because we have it buffered.
*/
rv = ldc_write(port->vio.lp, pkt, (VIO_TAG_SIZE + tosend));
- vccdbg("VCC: write: ldc_write(%d)=%d\n",
+ vccdbg("VCC: write: ldc_write(%zu)=%d\n",
(VIO_TAG_SIZE + tosend), rv);
total_sent += tosend;
@@ -864,7 +864,7 @@ static int vcc_write(struct tty_struct *tty, const unsigned char *buf,
vcc_put(port, false);
- vccdbg("VCC: write: total=%d rv=%d", total_sent, rv);
+ vccdbg("VCC: write: total=%zu rv=%d", total_sent, rv);
return total_sent ? total_sent : rv;
}
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index 6ef22f01cc51..8967c3a0d916 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -376,7 +376,7 @@ int paste_selection(struct tty_struct *tty)
{
struct vc_data *vc = tty->driver_data;
int pasted = 0;
- unsigned int count;
+ size_t count;
struct tty_ldisc *ld;
DECLARE_WAITQUEUE(wait, current);
int ret = 0;
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 1e8e57b45688..5c47f77804f0 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -140,8 +140,7 @@ EXPORT_SYMBOL(vc_cons);
static const struct consw *con_driver_map[MAX_NR_CONSOLES];
static int con_open(struct tty_struct *, struct file *);
-static void vc_init(struct vc_data *vc, unsigned int rows,
- unsigned int cols, int do_clear);
+static void vc_init(struct vc_data *vc, int do_clear);
static void gotoxy(struct vc_data *vc, int new_x, int new_y);
static void save_cur(struct vc_data *vc);
static void reset_terminal(struct vc_data *vc, int do_clear);
@@ -1103,7 +1102,7 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */
if (global_cursor_default == -1)
global_cursor_default = 1;
- vc_init(vc, vc->vc_rows, vc->vc_cols, 1);
+ vc_init(vc, 1);
vcs_make_sysfs(currcons);
atomic_notifier_call_chain(&vt_notifier_list, VT_ALLOCATE, &param);
@@ -2846,7 +2845,7 @@ static int vc_con_write_normal(struct vc_data *vc, int tc, int c,
}
/* acquires console_lock */
-static int do_con_write(struct tty_struct *tty, const unsigned char *buf, int count)
+static int do_con_write(struct tty_struct *tty, const u8 *buf, int count)
{
struct vc_draw_region draw = {
.x = -1,
@@ -3239,7 +3238,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
* /dev/ttyN handling
*/
-static int con_write(struct tty_struct *tty, const unsigned char *buf, int count)
+static ssize_t con_write(struct tty_struct *tty, const u8 *buf, size_t count)
{
int retval;
@@ -3249,7 +3248,7 @@ static int con_write(struct tty_struct *tty, const unsigned char *buf, int count
return retval;
}
-static int con_put_char(struct tty_struct *tty, unsigned char ch)
+static int con_put_char(struct tty_struct *tty, u8 ch)
{
return do_con_write(tty, &ch, 1);
}
@@ -3398,16 +3397,10 @@ module_param_named(color, default_color, int, S_IRUGO | S_IWUSR);
module_param_named(italic, default_italic_color, int, S_IRUGO | S_IWUSR);
module_param_named(underline, default_underline_color, int, S_IRUGO | S_IWUSR);
-static void vc_init(struct vc_data *vc, unsigned int rows,
- unsigned int cols, int do_clear)
+static void vc_init(struct vc_data *vc, int do_clear)
{
int j, k ;
- vc->vc_cols = cols;
- vc->vc_rows = rows;
- vc->vc_size_row = cols << 1;
- vc->vc_screenbuf_size = vc->vc_rows * vc->vc_size_row;
-
set_origin(vc);
vc->vc_pos = vc->vc_origin;
reset_vc(vc);
@@ -3475,8 +3468,7 @@ static int __init con_init(void)
visual_init(vc, currcons, 1);
/* Assuming vc->vc_{cols,rows,screenbuf_size} are sane here. */
vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_NOWAIT);
- vc_init(vc, vc->vc_rows, vc->vc_cols,
- currcons || !vc->vc_sw->con_save_screen);
+ vc_init(vc, currcons || !vc->vc_sw->con_save_screen);
}
currcons = fg_console = 0;
master_display_fg = vc = vc_cons[currcons].d;
diff --git a/drivers/ufs/core/Kconfig b/drivers/ufs/core/Kconfig
index e11978171403..817208ee64ec 100644
--- a/drivers/ufs/core/Kconfig
+++ b/drivers/ufs/core/Kconfig
@@ -35,14 +35,6 @@ config SCSI_UFS_CRYPTO
capabilities of the UFS device (if present) to perform crypto
operations on data being transferred to/from the device.
-config SCSI_UFS_HPB
- bool "Support UFS Host Performance Booster"
- help
- The UFS HPB feature improves random read performance. It caches
- L2P (logical to physical) map of UFS to host DRAM. The driver uses HPB
- read command by piggybacking physical page number for bypassing FTL (flash
- translation layer)'s L2P address translation.
-
config SCSI_UFS_FAULT_INJECTION
bool "UFS Fault Injection Support"
depends on FAULT_INJECTION
diff --git a/drivers/ufs/core/Makefile b/drivers/ufs/core/Makefile
index 4d02e0f2de10..cf820fa09a04 100644
--- a/drivers/ufs/core/Makefile
+++ b/drivers/ufs/core/Makefile
@@ -5,6 +5,5 @@ ufshcd-core-y += ufshcd.o ufs-sysfs.o ufs-mcq.o
ufshcd-core-$(CONFIG_DEBUG_FS) += ufs-debugfs.o
ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o
ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO) += ufshcd-crypto.o
-ufshcd-core-$(CONFIG_SCSI_UFS_HPB) += ufshpb.o
ufshcd-core-$(CONFIG_SCSI_UFS_FAULT_INJECTION) += ufs-fault-injection.o
ufshcd-core-$(CONFIG_SCSI_UFS_HWMON) += ufs-hwmon.o
diff --git a/drivers/ufs/core/ufs-hwmon.c b/drivers/ufs/core/ufs-hwmon.c
index 101d7082446f..34194064367f 100644
--- a/drivers/ufs/core/ufs-hwmon.c
+++ b/drivers/ufs/core/ufs-hwmon.c
@@ -127,7 +127,8 @@ static int ufs_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32
return err;
}
-static umode_t ufs_hwmon_is_visible(const void *_data, enum hwmon_sensor_types type, u32 attr,
+static umode_t ufs_hwmon_is_visible(const void *data,
+ enum hwmon_sensor_types type, u32 attr,
int channel)
{
if (type != hwmon_temp)
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
index 386674ead7f0..2ba8ec254dce 100644
--- a/drivers/ufs/core/ufs-mcq.c
+++ b/drivers/ufs/core/ufs-mcq.c
@@ -97,6 +97,7 @@ void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds)
val |= FIELD_PREP(MCQ_CFG_MAC_MASK, max_active_cmds);
ufshcd_writel(hba, val, REG_UFS_MCQ_CFG);
}
+EXPORT_SYMBOL_GPL(ufshcd_mcq_config_mac);
/**
* ufshcd_mcq_req_to_hwq - find the hardware queue on which the
@@ -104,7 +105,7 @@ void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds)
* @hba: per adapter instance
* @req: pointer to the request to be issued
*
- * Returns the hardware queue instance on which the request would
+ * Return: the hardware queue instance on which the request would
* be queued.
*/
struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
@@ -120,7 +121,7 @@ struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
* ufshcd_mcq_decide_queue_depth - decide the queue depth
* @hba: per adapter instance
*
- * Returns queue-depth on success, non-zero on error
+ * Return: queue-depth on success, non-zero on error
*
* MAC - Max. Active Command of the Host Controller (HC)
* HC wouldn't send more than this commands to the device.
@@ -245,6 +246,7 @@ u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i)
{
return readl(mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIS);
}
+EXPORT_SYMBOL_GPL(ufshcd_mcq_read_cqis);
void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i)
{
@@ -388,6 +390,7 @@ void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
MCQ_CFG_n(REG_SQATTR, i));
}
}
+EXPORT_SYMBOL_GPL(ufshcd_mcq_make_queues_operational);
void ufshcd_mcq_enable_esi(struct ufs_hba *hba)
{
@@ -487,10 +490,10 @@ static int ufshcd_mcq_sq_start(struct ufs_hba *hba, struct ufs_hw_queue *hwq)
/**
* ufshcd_mcq_sq_cleanup - Clean up submission queue resources
* associated with the pending command.
- * @hba - per adapter instance.
- * @task_tag - The command's task tag.
+ * @hba: per adapter instance.
+ * @task_tag: The command's task tag.
*
- * Returns 0 for success; error code otherwise.
+ * Return: 0 for success; error code otherwise.
*/
int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag)
{
@@ -551,16 +554,11 @@ unlock:
* Write the sqe's Command Type to 0xF. The host controller will not
* fetch any sqe with Command Type = 0xF.
*
- * @utrd - UTP Transfer Request Descriptor to be nullified.
+ * @utrd: UTP Transfer Request Descriptor to be nullified.
*/
static void ufshcd_mcq_nullify_sqe(struct utp_transfer_req_desc *utrd)
{
- u32 dword_0;
-
- dword_0 = le32_to_cpu(utrd->header.dword_0);
- dword_0 &= ~UPIU_COMMAND_TYPE_MASK;
- dword_0 |= FIELD_PREP(UPIU_COMMAND_TYPE_MASK, 0xF);
- utrd->header.dword_0 = cpu_to_le32(dword_0);
+ utrd->header.command_type = 0xf;
}
/**
@@ -568,11 +566,11 @@ static void ufshcd_mcq_nullify_sqe(struct utp_transfer_req_desc *utrd)
* If the command is in the submission queue and not issued to the device yet,
* nullify the sqe so the host controller will skip fetching the sqe.
*
- * @hba - per adapter instance.
- * @hwq - Hardware Queue to be searched.
- * @task_tag - The command's task tag.
+ * @hba: per adapter instance.
+ * @hwq: Hardware Queue to be searched.
+ * @task_tag: The command's task tag.
*
- * Returns true if the SQE containing the command is present in the SQ
+ * Return: true if the SQE containing the command is present in the SQ
* (not fetched by the controller); returns false if the SQE is not in the SQ.
*/
static bool ufshcd_mcq_sqe_search(struct ufs_hba *hba,
@@ -621,9 +619,9 @@ out:
/**
* ufshcd_mcq_abort - Abort the command in MCQ.
- * @cmd - The command to be aborted.
+ * @cmd: The command to be aborted.
*
- * Returns SUCCESS or FAILED error codes
+ * Return: SUCCESS or FAILED error codes
*/
int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
{
diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c
index 6c72075750dd..c95906443d5f 100644
--- a/drivers/ufs/core/ufs-sysfs.c
+++ b/drivers/ufs/core/ufs-sysfs.c
@@ -718,8 +718,6 @@ UFS_DEVICE_DESC_PARAM(device_version, _DEV_VER, 2);
UFS_DEVICE_DESC_PARAM(number_of_secure_wpa, _NUM_SEC_WPA, 1);
UFS_DEVICE_DESC_PARAM(psa_max_data_size, _PSA_MAX_DATA, 4);
UFS_DEVICE_DESC_PARAM(psa_state_timeout, _PSA_TMT, 1);
-UFS_DEVICE_DESC_PARAM(hpb_version, _HPB_VER, 2);
-UFS_DEVICE_DESC_PARAM(hpb_control, _HPB_CONTROL, 1);
UFS_DEVICE_DESC_PARAM(ext_feature_sup, _EXT_UFS_FEATURE_SUP, 4);
UFS_DEVICE_DESC_PARAM(wb_presv_us_en, _WB_PRESRV_USRSPC_EN, 1);
UFS_DEVICE_DESC_PARAM(wb_type, _WB_TYPE, 1);
@@ -752,8 +750,6 @@ static struct attribute *ufs_sysfs_device_descriptor[] = {
&dev_attr_number_of_secure_wpa.attr,
&dev_attr_psa_max_data_size.attr,
&dev_attr_psa_state_timeout.attr,
- &dev_attr_hpb_version.attr,
- &dev_attr_hpb_control.attr,
&dev_attr_ext_feature_sup.attr,
&dev_attr_wb_presv_us_en.attr,
&dev_attr_wb_type.attr,
@@ -827,10 +823,6 @@ UFS_GEOMETRY_DESC_PARAM(enh4_memory_max_alloc_units,
_ENM4_MAX_NUM_UNITS, 4);
UFS_GEOMETRY_DESC_PARAM(enh4_memory_capacity_adjustment_factor,
_ENM4_CAP_ADJ_FCTR, 2);
-UFS_GEOMETRY_DESC_PARAM(hpb_region_size, _HPB_REGION_SIZE, 1);
-UFS_GEOMETRY_DESC_PARAM(hpb_number_lu, _HPB_NUMBER_LU, 1);
-UFS_GEOMETRY_DESC_PARAM(hpb_subregion_size, _HPB_SUBREGION_SIZE, 1);
-UFS_GEOMETRY_DESC_PARAM(hpb_max_active_regions, _HPB_MAX_ACTIVE_REGS, 2);
UFS_GEOMETRY_DESC_PARAM(wb_max_alloc_units, _WB_MAX_ALLOC_UNITS, 4);
UFS_GEOMETRY_DESC_PARAM(wb_max_wb_luns, _WB_MAX_WB_LUNS, 1);
UFS_GEOMETRY_DESC_PARAM(wb_buff_cap_adj, _WB_BUFF_CAP_ADJ, 1);
@@ -868,10 +860,6 @@ static struct attribute *ufs_sysfs_geometry_descriptor[] = {
&dev_attr_enh3_memory_capacity_adjustment_factor.attr,
&dev_attr_enh4_memory_max_alloc_units.attr,
&dev_attr_enh4_memory_capacity_adjustment_factor.attr,
- &dev_attr_hpb_region_size.attr,
- &dev_attr_hpb_number_lu.attr,
- &dev_attr_hpb_subregion_size.attr,
- &dev_attr_hpb_max_active_regions.attr,
&dev_attr_wb_max_alloc_units.attr,
&dev_attr_wb_max_wb_luns.attr,
&dev_attr_wb_buff_cap_adj.attr,
@@ -1132,7 +1120,6 @@ UFS_FLAG(disable_fw_update, _PERMANENTLY_DISABLE_FW_UPDATE);
UFS_FLAG(wb_enable, _WB_EN);
UFS_FLAG(wb_flush_en, _WB_BUFF_FLUSH_EN);
UFS_FLAG(wb_flush_during_h8, _WB_BUFF_FLUSH_DURING_HIBERN8);
-UFS_FLAG(hpb_enable, _HPB_EN);
static struct attribute *ufs_sysfs_device_flags[] = {
&dev_attr_device_init.attr,
@@ -1146,7 +1133,6 @@ static struct attribute *ufs_sysfs_device_flags[] = {
&dev_attr_wb_enable.attr,
&dev_attr_wb_flush_en.attr,
&dev_attr_wb_flush_during_h8.attr,
- &dev_attr_hpb_enable.attr,
NULL,
};
@@ -1193,7 +1179,6 @@ out: \
static DEVICE_ATTR_RO(_name)
UFS_ATTRIBUTE(boot_lun_enabled, _BOOT_LU_EN);
-UFS_ATTRIBUTE(max_data_size_hpb_single_cmd, _MAX_HPB_SINGLE_CMD);
UFS_ATTRIBUTE(current_power_mode, _POWER_MODE);
UFS_ATTRIBUTE(active_icc_level, _ACTIVE_ICC_LVL);
UFS_ATTRIBUTE(ooo_data_enabled, _OOO_DATA_EN);
@@ -1217,7 +1202,6 @@ UFS_ATTRIBUTE(wb_cur_buf, _CURR_WB_BUFF_SIZE);
static struct attribute *ufs_sysfs_attributes[] = {
&dev_attr_boot_lun_enabled.attr,
- &dev_attr_max_data_size_hpb_single_cmd.attr,
&dev_attr_current_power_mode.attr,
&dev_attr_active_icc_level.attr,
&dev_attr_ooo_data_enabled.attr,
@@ -1291,9 +1275,6 @@ UFS_UNIT_DESC_PARAM(provisioning_type, _PROVISIONING_TYPE, 1);
UFS_UNIT_DESC_PARAM(physical_memory_resourse_count, _PHY_MEM_RSRC_CNT, 8);
UFS_UNIT_DESC_PARAM(context_capabilities, _CTX_CAPABILITIES, 2);
UFS_UNIT_DESC_PARAM(large_unit_granularity, _LARGE_UNIT_SIZE_M1, 1);
-UFS_UNIT_DESC_PARAM(hpb_lu_max_active_regions, _HPB_LU_MAX_ACTIVE_RGNS, 2);
-UFS_UNIT_DESC_PARAM(hpb_pinned_region_start_offset, _HPB_PIN_RGN_START_OFF, 2);
-UFS_UNIT_DESC_PARAM(hpb_number_pinned_regions, _HPB_NUM_PIN_RGNS, 2);
UFS_UNIT_DESC_PARAM(wb_buf_alloc_units, _WB_BUF_ALLOC_UNITS, 4);
static struct attribute *ufs_sysfs_unit_descriptor[] = {
@@ -1311,9 +1292,6 @@ static struct attribute *ufs_sysfs_unit_descriptor[] = {
&dev_attr_physical_memory_resourse_count.attr,
&dev_attr_context_capabilities.attr,
&dev_attr_large_unit_granularity.attr,
- &dev_attr_hpb_lu_max_active_regions.attr,
- &dev_attr_hpb_pinned_region_start_offset.attr,
- &dev_attr_hpb_number_pinned_regions.attr,
&dev_attr_wb_buf_alloc_units.attr,
NULL,
};
diff --git a/drivers/ufs/core/ufs_bsg.c b/drivers/ufs/core/ufs_bsg.c
index 0d38e7fa34cc..34e423924e06 100644
--- a/drivers/ufs/core/ufs_bsg.c
+++ b/drivers/ufs/core/ufs_bsg.c
@@ -232,6 +232,8 @@ static inline void ufs_bsg_node_release(struct device *dev)
* @hba: per adapter object
*
* Called during initial loading of the driver, and before scsi_scan_host.
+ *
+ * Returns: 0 (success).
*/
int ufs_bsg_probe(struct ufs_hba *hba)
{
diff --git a/drivers/ufs/core/ufshcd-crypto.h b/drivers/ufs/core/ufshcd-crypto.h
index 504cc841540b..be8596f20ba2 100644
--- a/drivers/ufs/core/ufshcd-crypto.h
+++ b/drivers/ufs/core/ufshcd-crypto.h
@@ -26,15 +26,15 @@ static inline void ufshcd_prepare_lrbp_crypto(struct request *rq,
}
static inline void
-ufshcd_prepare_req_desc_hdr_crypto(struct ufshcd_lrb *lrbp, u32 *dword_0,
- u32 *dword_1, u32 *dword_3)
+ufshcd_prepare_req_desc_hdr_crypto(struct ufshcd_lrb *lrbp,
+ struct request_desc_header *h)
{
- if (lrbp->crypto_key_slot >= 0) {
- *dword_0 |= UTP_REQ_DESC_CRYPTO_ENABLE_CMD;
- *dword_0 |= lrbp->crypto_key_slot;
- *dword_1 = lower_32_bits(lrbp->data_unit_num);
- *dword_3 = upper_32_bits(lrbp->data_unit_num);
- }
+ if (lrbp->crypto_key_slot < 0)
+ return;
+ h->enable_crypto = 1;
+ h->cci = lrbp->crypto_key_slot;
+ h->dunl = cpu_to_le32(lower_32_bits(lrbp->data_unit_num));
+ h->dunu = cpu_to_le32(upper_32_bits(lrbp->data_unit_num));
}
bool ufshcd_crypto_enable(struct ufs_hba *hba);
@@ -51,8 +51,8 @@ static inline void ufshcd_prepare_lrbp_crypto(struct request *rq,
struct ufshcd_lrb *lrbp) { }
static inline void
-ufshcd_prepare_req_desc_hdr_crypto(struct ufshcd_lrb *lrbp, u32 *dword_0,
- u32 *dword_1, u32 *dword_3) { }
+ufshcd_prepare_req_desc_hdr_crypto(struct ufshcd_lrb *lrbp,
+ struct request_desc_header *h) { }
static inline bool ufshcd_crypto_enable(struct ufs_hba *hba)
{
diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h
index 0f3bd943b58b..f42d99ce5bf1 100644
--- a/drivers/ufs/core/ufshcd-priv.h
+++ b/drivers/ufs/core/ufshcd-priv.h
@@ -93,7 +93,7 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
struct utp_upiu_req *req_upiu,
struct utp_upiu_req *rsp_upiu,
- int msgcode,
+ enum upiu_request_transaction msgcode,
u8 *desc_buff, int *buff_len,
enum query_opcode desc_op);
@@ -294,7 +294,7 @@ extern const struct ufs_pm_lvl_states ufs_pm_lvl_states[];
* ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN
* @scsi_lun: scsi LUN id
*
- * Returns UPIU LUN id
+ * Return: UPIU LUN id
*/
static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
{
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 129446775796..e4318171381b 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -34,7 +34,6 @@
#include "ufs-fault-injection.h"
#include "ufs_bsg.h"
#include "ufshcd-crypto.h"
-#include "ufshpb.h"
#include <asm/unaligned.h>
#define CREATE_TRACE_POINTS
@@ -238,8 +237,7 @@ static const struct ufs_dev_quirk ufs_fixups[] = {
/* UFS cards deviations table */
{ .wmanufacturerid = UFS_VENDOR_MICRON,
.model = UFS_ANY_MODEL,
- .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
- UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ },
+ .quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
{ .wmanufacturerid = UFS_VENDOR_SAMSUNG,
.model = UFS_ANY_MODEL,
.quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
@@ -703,8 +701,7 @@ EXPORT_SYMBOL_GPL(ufshcd_delay_us);
* @interval_us: polling interval in microseconds
* @timeout_ms: timeout in milliseconds
*
- * Return:
- * -ETIMEDOUT on error, zero on success.
+ * Return: -ETIMEDOUT on error, zero on success.
*/
static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
u32 val, unsigned long interval_us,
@@ -732,7 +729,7 @@ static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
* ufshcd_get_intr_mask - Get the interrupt bit mask
* @hba: Pointer to adapter instance
*
- * Returns interrupt bit mask per version
+ * Return: interrupt bit mask per version
*/
static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
{
@@ -748,7 +745,7 @@ static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
* ufshcd_get_ufs_version - Get the UFS version supported by the HBA
* @hba: Pointer to adapter instance
*
- * Returns UFSHCI version supported by the controller
+ * Return: UFSHCI version supported by the controller
*/
static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
{
@@ -775,7 +772,7 @@ static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
* the host controller
* @hba: pointer to adapter instance
*
- * Returns true if device present, false if no device detected
+ * Return: true if device present, false if no device detected
*/
static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
{
@@ -788,7 +785,8 @@ static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
* @cqe: pointer to the completion queue entry
*
* This function is used to get the OCS field from UTRD
- * Returns the OCS field in the UTRD
+ *
+ * Return: the OCS field in the UTRD.
*/
static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp,
struct cq_entry *cqe)
@@ -796,7 +794,7 @@ static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp,
if (cqe)
return le32_to_cpu(cqe->status) & MASK_OCS;
- return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
+ return lrbp->utr_descriptor_ptr->header.ocs & MASK_OCS;
}
/**
@@ -841,7 +839,7 @@ static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
* ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
* @reg: Register value of host controller status
*
- * Returns integer, 0 on Success and positive value if failed
+ * Return: 0 on success; a positive value if failed.
*/
static inline int ufshcd_get_lists_status(u32 reg)
{
@@ -853,7 +851,8 @@ static inline int ufshcd_get_lists_status(u32 reg)
* @hba: Pointer to adapter instance
*
* This function gets the result of UIC command completion
- * Returns 0 on success, non zero value on error
+ *
+ * Return: 0 on success; non-zero value on error.
*/
static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
{
@@ -866,7 +865,8 @@ static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
* @hba: Pointer to adapter instance
*
* This function gets UIC command argument3
- * Returns 0 on success, non zero value on error
+ *
+ * Return: 0 on success; non-zero value on error.
*/
static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
{
@@ -876,38 +876,13 @@ static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
/**
* ufshcd_get_req_rsp - returns the TR response transaction type
* @ucd_rsp_ptr: pointer to response UPIU
- */
-static inline int
-ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
-{
- return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
-}
-
-/**
- * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
- * @ucd_rsp_ptr: pointer to response UPIU
- *
- * This function gets the response status and scsi_status from response UPIU
- * Returns the response result code.
- */
-static inline int
-ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
-{
- return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
-}
-
-/*
- * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
- * from response UPIU
- * @ucd_rsp_ptr: pointer to response UPIU
*
- * Return the data segment length.
+ * Return: UPIU type.
*/
-static inline unsigned int
-ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
+static inline enum upiu_response_transaction
+ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
{
- return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
- MASK_RSP_UPIU_DATA_SEG_LEN;
+ return ucd_rsp_ptr->header.transaction_code;
}
/**
@@ -917,12 +892,11 @@ ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
* The function checks if the device raised an exception event indicated in
* the Device Information field of response UPIU.
*
- * Returns true if exception is raised, false otherwise.
+ * Return: true if exception is raised, false otherwise.
*/
static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
{
- return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
- MASK_RSP_EXCEPTION_EVENT;
+ return ucd_rsp_ptr->header.device_information & 1;
}
/**
@@ -993,12 +967,13 @@ static inline void ufshcd_hba_start(struct ufs_hba *hba)
* ufshcd_is_hba_active - Get controller state
* @hba: per adapter instance
*
- * Returns true if and only if the controller is active.
+ * Return: true if and only if the controller is active.
*/
-static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
+bool ufshcd_is_hba_active(struct ufs_hba *hba)
{
return ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE;
}
+EXPORT_SYMBOL_GPL(ufshcd_is_hba_active);
u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
{
@@ -1029,8 +1004,7 @@ static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
* @hba: per adapter instance
* @scale_up: If True, set max possible frequency othewise set low frequency
*
- * Returns 0 if successful
- * Returns < 0 for any other errors
+ * Return: 0 if successful; < 0 upon failure.
*/
static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
{
@@ -1092,8 +1066,7 @@ out:
* @hba: per adapter instance
* @scale_up: True if scaling up and false if scaling down
*
- * Returns 0 if successful
- * Returns < 0 for any other errors
+ * Return: 0 if successful; < 0 upon failure.
*/
static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
{
@@ -1124,7 +1097,7 @@ out:
* @hba: per adapter instance
* @scale_up: True if scaling up and false if scaling down
*
- * Returns true if scaling is required, false otherwise.
+ * Return: true if scaling is required, false otherwise.
*/
static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
bool scale_up)
@@ -1241,9 +1214,8 @@ out:
* @hba: per adapter instance
* @scale_up: True for scaling up gear and false for scaling down
*
- * Returns 0 for success,
- * Returns -EBUSY if scaling can't happen at this time
- * Returns non-zero for any other errors
+ * Return: 0 for success; -EBUSY if scaling can't happen at this time;
+ * non-zero for any other errors.
*/
static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
{
@@ -1333,9 +1305,8 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool sc
* @hba: per adapter instance
* @scale_up: True for scaling up and false for scalin down
*
- * Returns 0 for success,
- * Returns -EBUSY if scaling can't happen at this time
- * Returns non-zero for any other errors
+ * Return: 0 for success; -EBUSY if scaling can't happen at this time; non-zero
+ * for any other errors.
*/
static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
{
@@ -2225,10 +2196,11 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag,
static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
{
u8 *const sense_buffer = lrbp->cmd->sense_buffer;
+ u16 resp_len;
int len;
- if (sense_buffer &&
- ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
+ resp_len = be16_to_cpu(lrbp->ucd_rsp_ptr->header.data_segment_length);
+ if (sense_buffer && resp_len) {
int len_to_copy;
len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
@@ -2244,6 +2216,8 @@ static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
* descriptor
* @hba: per adapter instance
* @lrbp: pointer to local reference block
+ *
+ * Return: 0 upon success; < 0 upon failure.
*/
static
int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
@@ -2261,8 +2235,8 @@ int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
u16 buf_len;
/* data segment length */
- resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
- MASK_QUERY_DATA_SEG_LEN;
+ resp_len = be16_to_cpu(lrbp->ucd_rsp_ptr->header
+ .data_segment_length);
buf_len = be16_to_cpu(
hba->dev_cmd.query.request.upiu_req.length);
if (likely(buf_len >= resp_len)) {
@@ -2320,7 +2294,8 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
* ufshcd_ready_for_uic_cmd - Check if controller is ready
* to accept UIC commands
* @hba: per adapter instance
- * Return true on success, else false
+ *
+ * Return: true on success, else false.
*/
static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
{
@@ -2332,7 +2307,8 @@ static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
* @hba: Pointer to adapter instance
*
* This function gets the UPMCRS field of HCS register
- * Returns value of UPMCRS field
+ *
+ * Return: value of UPMCRS field.
*/
static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
{
@@ -2370,7 +2346,7 @@ ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
* @hba: per adapter instance
* @uic_cmd: UIC command
*
- * Returns 0 only if success.
+ * Return: 0 only if success.
*/
static int
ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
@@ -2409,7 +2385,7 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
* @uic_cmd: UIC command
* @completion: initialize the completion only if this is set to true
*
- * Returns 0 only if success.
+ * Return: 0 only if success.
*/
static int
__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
@@ -2438,7 +2414,7 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
* @hba: per adapter instance
* @uic_cmd: UIC command
*
- * Returns 0 only if success.
+ * Return: 0 only if success.
*/
int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
{
@@ -2515,7 +2491,7 @@ static void ufshcd_sgl_to_prdt(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int
* @hba: per adapter instance
* @lrbp: pointer to local reference block
*
- * Returns 0 in case of success, non-zero value in case of failure
+ * Return: 0 in case of success, non-zero value in case of failure.
*/
static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
{
@@ -2584,10 +2560,10 @@ static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, u8 *upiu_flags,
enum dma_data_direction cmd_dir, int ehs_length)
{
struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
- u32 data_direction;
- u32 dword_0;
- u32 dword_1 = 0;
- u32 dword_3 = 0;
+ struct request_desc_header *h = &req_desc->header;
+ enum utp_data_direction data_direction;
+
+ *h = (typeof(*h)){ };
if (cmd_dir == DMA_FROM_DEVICE) {
data_direction = UTP_DEVICE_TO_HOST;
@@ -2600,25 +2576,22 @@ static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, u8 *upiu_flags,
*upiu_flags = UPIU_CMD_FLAGS_NONE;
}
- dword_0 = data_direction | (lrbp->command_type << UPIU_COMMAND_TYPE_OFFSET) |
- ehs_length << 8;
+ h->command_type = lrbp->command_type;
+ h->data_direction = data_direction;
+ h->ehs_length = ehs_length;
+
if (lrbp->intr_cmd)
- dword_0 |= UTP_REQ_DESC_INT_CMD;
+ h->interrupt = 1;
/* Prepare crypto related dwords */
- ufshcd_prepare_req_desc_hdr_crypto(lrbp, &dword_0, &dword_1, &dword_3);
+ ufshcd_prepare_req_desc_hdr_crypto(lrbp, h);
- /* Transfer request descriptor header fields */
- req_desc->header.dword_0 = cpu_to_le32(dword_0);
- req_desc->header.dword_1 = cpu_to_le32(dword_1);
/*
* assigning invalid value for command status. Controller
* updates OCS on command completion, with the command
* status
*/
- req_desc->header.dword_2 =
- cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
- req_desc->header.dword_3 = cpu_to_le32(dword_3);
+ h->ocs = OCS_INVALID_COMMAND_STATUS;
req_desc->prd_table_length = 0;
}
@@ -2636,15 +2609,13 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags)
struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
unsigned short cdb_len;
- /* command descriptor fields */
- ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
- UPIU_TRANSACTION_COMMAND, upiu_flags,
- lrbp->lun, lrbp->task_tag);
- ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
- UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
-
- /* Total EHS length and Data segment length will be zero */
- ucd_req_ptr->header.dword_2 = 0;
+ ucd_req_ptr->header = (struct utp_upiu_header){
+ .transaction_code = UPIU_TRANSACTION_COMMAND,
+ .flags = upiu_flags,
+ .lun = lrbp->lun,
+ .task_tag = lrbp->task_tag,
+ .command_set_type = UPIU_COMMAND_SET_TYPE_SCSI,
+ };
ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length);
@@ -2669,18 +2640,19 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
u16 len = be16_to_cpu(query->request.upiu_req.length);
/* Query request header */
- ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
- UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
- lrbp->lun, lrbp->task_tag);
- ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
- 0, query->request.query_func, 0, 0);
-
- /* Data segment length only need for WRITE_DESC */
- if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
- ucd_req_ptr->header.dword_2 =
- UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
- else
- ucd_req_ptr->header.dword_2 = 0;
+ ucd_req_ptr->header = (struct utp_upiu_header){
+ .transaction_code = UPIU_TRANSACTION_QUERY_REQ,
+ .flags = upiu_flags,
+ .lun = lrbp->lun,
+ .task_tag = lrbp->task_tag,
+ .query_function = query->request.query_func,
+ /* Data segment length only need for WRITE_DESC */
+ .data_segment_length =
+ query->request.upiu_req.opcode ==
+ UPIU_QUERY_OPCODE_WRITE_DESC ?
+ cpu_to_be16(len) :
+ 0,
+ };
/* Copy the Query Request buffer as is */
memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
@@ -2699,13 +2671,10 @@ static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
- /* command descriptor fields */
- ucd_req_ptr->header.dword_0 =
- UPIU_HEADER_DWORD(
- UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
- /* clear rest of the fields of basic header */
- ucd_req_ptr->header.dword_1 = 0;
- ucd_req_ptr->header.dword_2 = 0;
+ ucd_req_ptr->header = (struct utp_upiu_header){
+ .transaction_code = UPIU_TRANSACTION_NOP_OUT,
+ .task_tag = lrbp->task_tag,
+ };
memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
}
@@ -2715,6 +2684,8 @@ static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
* for Device Management Purposes
* @hba: per adapter instance
* @lrbp: pointer to local reference block
+ *
+ * Return: 0 upon success; < 0 upon failure.
*/
static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp)
@@ -2743,6 +2714,8 @@ static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
* for SCSI Purposes
* @hba: per adapter instance
* @lrbp: pointer to local reference block
+ *
+ * Return: 0 upon success; < 0 upon failure.
*/
static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
{
@@ -2768,7 +2741,7 @@ static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
* ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
* @upiu_wlun_id: UPIU W-LUN id
*
- * Returns SCSI W-LUN id
+ * Return: SCSI W-LUN id.
*/
static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
{
@@ -2839,7 +2812,7 @@ static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
* @host: SCSI host pointer
* @cmd: command from SCSI Midlayer
*
- * Returns 0 for success, non-zero in case of failure
+ * Return: 0 for success, non-zero in case of failure.
*/
static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
{
@@ -2908,8 +2881,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
lrbp->req_abort_skip = false;
- ufshpb_prep(hba, lrbp);
-
ufshcd_comp_scsi_upiu(hba, lrbp);
err = ufshcd_map_sg(hba, lrbp);
@@ -2952,7 +2923,7 @@ static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
* Check with the block layer if the command is inflight
* @cmd: command to check.
*
- * Returns true if command is inflight; false if not.
+ * Return: true if command is inflight; false if not.
*/
bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd)
{
@@ -3007,26 +2978,17 @@ static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag)
mask, ~mask, 1000, 1000);
}
-static int
-ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
-{
- struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
-
- /* Get the UPIU response */
- query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
- UPIU_RSP_CODE_OFFSET;
- return query_res->response;
-}
-
/**
* ufshcd_dev_cmd_completion() - handles device management command responses
* @hba: per adapter instance
* @lrbp: pointer to local reference block
+ *
+ * Return: 0 upon success; < 0 upon failure.
*/
static int
ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
{
- int resp;
+ enum upiu_response_transaction resp;
int err = 0;
hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
@@ -3040,11 +3002,13 @@ ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
__func__, resp);
}
break;
- case UPIU_TRANSACTION_QUERY_RSP:
- err = ufshcd_check_query_response(hba, lrbp);
- if (!err)
+ case UPIU_TRANSACTION_QUERY_RSP: {
+ u8 response = lrbp->ucd_rsp_ptr->header.response;
+
+ if (response == 0)
err = ufshcd_copy_query_response(hba, lrbp);
break;
+ }
case UPIU_TRANSACTION_REJECT_UPIU:
/* TODO: handle Reject UPIU Response */
err = -EPERM;
@@ -3159,6 +3123,8 @@ retry:
* @cmd_type: specifies the type (NOP, Query...)
* @timeout: timeout in milliseconds
*
+ * Return: 0 upon success; < 0 upon failure.
+ *
* NOTE: Since there is only one available tag for device management commands,
* it is expected you hold the hba->dev_cmd.lock mutex.
*/
@@ -3250,7 +3216,7 @@ static int ufshcd_query_flag_retry(struct ufs_hba *hba,
* @index: flag index to access
* @flag_res: the flag value after the query request completes
*
- * Returns 0 for success, non-zero in case of failure
+ * Return: 0 for success, non-zero in case of failure.
*/
int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
enum flag_idn idn, u8 index, bool *flag_res)
@@ -3319,7 +3285,7 @@ out_unlock:
* @selector: selector field
* @attr_val: the attribute value after the query request completes
*
- * Returns 0 for success, non-zero in case of failure
+ * Return: 0 for success, non-zero in case of failure.
*/
int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
@@ -3384,7 +3350,7 @@ out_unlock:
* @attr_val: the attribute value after the query request
* completes
*
- * Returns 0 for success, non-zero in case of failure
+ * Return: 0 for success, non-zero in case of failure.
*/
int ufshcd_query_attr_retry(struct ufs_hba *hba,
enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
@@ -3482,9 +3448,10 @@ out_unlock:
* @desc_buf: the buffer that contains the descriptor
* @buf_len: length parameter passed to the device
*
- * Returns 0 for success, non-zero in case of failure.
* The buf_len parameter will contain, on return, the length parameter
* received on the response.
+ *
+ * Return: 0 for success, non-zero in case of failure.
*/
int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
enum query_opcode opcode,
@@ -3514,7 +3481,7 @@ int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
* @param_read_buf: pointer to buffer where parameter would be read
* @param_size: sizeof(param_read_buf)
*
- * Return 0 in case of success, non-zero otherwise
+ * Return: 0 in case of success, non-zero otherwise.
*/
int ufshcd_read_desc_param(struct ufs_hba *hba,
enum desc_idn desc_id,
@@ -3694,7 +3661,7 @@ out:
* @param_read_buf: pointer to buffer where parameter would be read
* @param_size: sizeof(param_read_buf)
*
- * Return 0 in case of success, non-zero otherwise
+ * Return: 0 in case of success, non-zero otherwise.
*/
static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
int lun,
@@ -3749,7 +3716,7 @@ static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba *hba)
* (UTMRDL)
* 4. Allocate memory for local reference block(lrb).
*
- * Returns 0 for success, non-zero in case of failure
+ * Return: 0 for success, non-zero in case of failure.
*/
static int ufshcd_memory_alloc(struct ufs_hba *hba)
{
@@ -3896,7 +3863,7 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
* Once the Unipro links are up, the device connected to the controller
* is detected.
*
- * Returns 0 on success, non-zero value on failure
+ * Return: 0 on success, non-zero value on failure.
*/
static int ufshcd_dme_link_startup(struct ufs_hba *hba)
{
@@ -3918,7 +3885,7 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba)
* DME_RESET command is issued in order to reset UniPro stack.
* This function now deals with cold reset.
*
- * Returns 0 on success, non-zero value on failure
+ * Return: 0 on success, non-zero value on failure.
*/
static int ufshcd_dme_reset(struct ufs_hba *hba)
{
@@ -3957,7 +3924,7 @@ EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt);
*
* DME_ENABLE command is issued in order to enable UniPro stack.
*
- * Returns 0 on success, non-zero value on failure
+ * Return: 0 on success, non-zero value on failure.
*/
static int ufshcd_dme_enable(struct ufs_hba *hba)
{
@@ -4013,7 +3980,7 @@ static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
* @mib_val: setting value as uic command argument3
* @peer: indicate whether peer or local
*
- * Returns 0 on success, non-zero value on failure
+ * Return: 0 on success, non-zero value on failure.
*/
int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
u8 attr_set, u32 mib_val, u8 peer)
@@ -4057,7 +4024,7 @@ EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
* @mib_val: the value of the attribute as returned by the UIC command
* @peer: indicate whether peer or local
*
- * Returns 0 on success, non-zero value on failure
+ * Return: 0 on success, non-zero value on failure.
*/
int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
u32 *mib_val, u8 peer)
@@ -4138,7 +4105,7 @@ EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
* addition to normal UIC command completion Status (UCCS). This function only
* returns after the relevant status bits indicate the completion.
*
- * Returns 0 on success, non-zero value on failure
+ * Return: 0 on success, non-zero value on failure.
*/
static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
{
@@ -4228,7 +4195,7 @@ out_unlock:
* @hba: per adapter instance
* @mode: powr mode value
*
- * Returns 0 on success, non-zero value on failure
+ * Return: 0 on success, non-zero value on failure.
*/
int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
{
@@ -4380,8 +4347,8 @@ static void ufshcd_init_pwr_info(struct ufs_hba *hba)
{
hba->pwr_info.gear_rx = UFS_PWM_G1;
hba->pwr_info.gear_tx = UFS_PWM_G1;
- hba->pwr_info.lane_rx = 1;
- hba->pwr_info.lane_tx = 1;
+ hba->pwr_info.lane_rx = UFS_LANE_1;
+ hba->pwr_info.lane_tx = UFS_LANE_1;
hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
hba->pwr_info.hs_rate = 0;
@@ -4390,6 +4357,8 @@ static void ufshcd_init_pwr_info(struct ufs_hba *hba)
/**
* ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
* @hba: per-adapter instance
+ *
+ * Return: 0 upon success; < 0 upon failure.
*/
static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
{
@@ -4547,6 +4516,8 @@ static int ufshcd_change_power_mode(struct ufs_hba *hba,
* ufshcd_config_pwr_mode - configure a new power mode
* @hba: per-adapter instance
* @desired_pwr_mode: desired power configuration
+ *
+ * Return: 0 upon success; < 0 upon failure.
*/
int ufshcd_config_pwr_mode(struct ufs_hba *hba,
struct ufs_pa_layer_attr *desired_pwr_mode)
@@ -4571,6 +4542,8 @@ EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
* @hba: per-adapter instance
*
* Set fDeviceInit flag and poll until device toggles it.
+ *
+ * Return: 0 upon success; < 0 upon failure.
*/
static int ufshcd_complete_dev_init(struct ufs_hba *hba)
{
@@ -4621,7 +4594,7 @@ out:
* 3. Program UTRL and UTMRL base address
* 4. Configure run-stop-registers
*
- * Returns 0 on success, non-zero value on failure
+ * Return: 0 on success, non-zero value on failure.
*/
int ufshcd_make_hba_operational(struct ufs_hba *hba)
{
@@ -4702,7 +4675,7 @@ EXPORT_SYMBOL_GPL(ufshcd_hba_stop);
* sequence kicks off. When controller is ready it will set
* the Host Controller Enable bit to 1.
*
- * Returns 0 on success, non-zero value on failure
+ * Return: 0 on success, non-zero value on failure.
*/
static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
{
@@ -4847,7 +4820,7 @@ EXPORT_SYMBOL_GPL(ufshcd_update_evt_hist);
* ufshcd_link_startup - Initialize unipro link startup
* @hba: per adapter instance
*
- * Returns 0 for success, non-zero in case of failure
+ * Return: 0 for success, non-zero in case of failure.
*/
static int ufshcd_link_startup(struct ufs_hba *hba)
{
@@ -4942,6 +4915,8 @@ out:
* If the UTP layer at the device side is not initialized, it may
* not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
* and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
+ *
+ * Return: 0 upon success; < 0 upon failure.
*/
static int ufshcd_verify_dev_init(struct ufs_hba *hba)
{
@@ -5066,7 +5041,7 @@ set_qdepth:
* ufshcd_slave_alloc - handle initial SCSI device configurations
* @sdev: pointer to SCSI device
*
- * Returns success
+ * Return: success.
*/
static int ufshcd_slave_alloc(struct scsi_device *sdev)
{
@@ -5102,43 +5077,25 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev)
* @depth: required depth to set
*
* Change queue depth and make sure the max. limits are not crossed.
+ *
+ * Return: new queue depth.
*/
static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
{
return scsi_change_queue_depth(sdev, min(depth, sdev->host->can_queue));
}
-static void ufshcd_hpb_destroy(struct ufs_hba *hba, struct scsi_device *sdev)
-{
- /* skip well-known LU */
- if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) ||
- !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba))
- return;
-
- ufshpb_destroy_lu(hba, sdev);
-}
-
-static void ufshcd_hpb_configure(struct ufs_hba *hba, struct scsi_device *sdev)
-{
- /* skip well-known LU */
- if ((sdev->lun >= UFS_UPIU_MAX_UNIT_NUM_ID) ||
- !(hba->dev_info.hpb_enabled) || !ufshpb_is_allowed(hba))
- return;
-
- ufshpb_init_hpb_lu(hba, sdev);
-}
-
/**
* ufshcd_slave_configure - adjust SCSI device configurations
* @sdev: pointer to SCSI device
+ *
+ * Return: 0 (success).
*/
static int ufshcd_slave_configure(struct scsi_device *sdev)
{
struct ufs_hba *hba = shost_priv(sdev->host);
struct request_queue *q = sdev->request_queue;
- ufshcd_hpb_configure(hba, sdev);
-
blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
if (hba->quirks & UFSHCD_QUIRK_4KB_DMA_ALIGNMENT)
blk_queue_update_dma_alignment(q, SZ_4K - 1);
@@ -5173,8 +5130,6 @@ static void ufshcd_slave_destroy(struct scsi_device *sdev)
hba = shost_priv(sdev->host);
- ufshcd_hpb_destroy(hba, sdev);
-
/* Drop the reference as it won't be needed anymore */
if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -5208,7 +5163,7 @@ static void ufshcd_slave_destroy(struct scsi_device *sdev)
* @lrbp: pointer to local reference block of completed command
* @scsi_status: SCSI command status
*
- * Returns value base on SCSI command status
+ * Return: value base on SCSI command status.
*/
static inline int
ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
@@ -5242,7 +5197,7 @@ ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
* @lrbp: pointer to local reference block of completed command
* @cqe: pointer to the completion queue entry
*
- * Returns result of the command to notify SCSI midlayer
+ * Return: result of the command to notify SCSI midlayer.
*/
static inline int
ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
@@ -5251,36 +5206,37 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
int result = 0;
int scsi_status;
enum utp_ocs ocs;
+ u8 upiu_flags;
+ u32 resid;
- scsi_set_resid(lrbp->cmd,
- be32_to_cpu(lrbp->ucd_rsp_ptr->sr.residual_transfer_count));
+ upiu_flags = lrbp->ucd_rsp_ptr->header.flags;
+ resid = be32_to_cpu(lrbp->ucd_rsp_ptr->sr.residual_transfer_count);
+ /*
+ * Test !overflow instead of underflow to support UFS devices that do
+ * not set either flag.
+ */
+ if (resid && !(upiu_flags & UPIU_RSP_FLAG_OVERFLOW))
+ scsi_set_resid(lrbp->cmd, resid);
/* overall command status of utrd */
ocs = ufshcd_get_tr_ocs(lrbp, cqe);
if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) {
- if (be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_1) &
- MASK_RSP_UPIU_RESULT)
+ if (lrbp->ucd_rsp_ptr->header.response ||
+ lrbp->ucd_rsp_ptr->header.status)
ocs = OCS_SUCCESS;
}
switch (ocs) {
case OCS_SUCCESS:
- result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
- switch (result) {
+ switch (ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr)) {
case UPIU_TRANSACTION_RESPONSE:
/*
- * get the response UPIU result to extract
- * the SCSI command status
- */
- result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
-
- /*
* get the result based on SCSI status response
* to notify the SCSI midlayer of the command status
*/
- scsi_status = result & MASK_SCSI_STATUS;
+ scsi_status = lrbp->ucd_rsp_ptr->header.status;
result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
/*
@@ -5300,9 +5256,6 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
/* Flushed in suspend */
schedule_work(&hba->eeh_work);
-
- if (scsi_status == SAM_STAT_GOOD)
- ufshpb_rsp_upiu(hba, lrbp);
break;
case UPIU_TRANSACTION_REJECT_UPIU:
/* TODO: handle Reject UPIU Response */
@@ -5372,7 +5325,7 @@ static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
* @hba: per adapter instance
* @intr_status: interrupt status generated by the controller
*
- * Returns
+ * Return:
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt
*/
@@ -5448,8 +5401,7 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
if (hba->dev_cmd.complete) {
if (cqe) {
ocs = le32_to_cpu(cqe->status) & MASK_OCS;
- lrbp->utr_descriptor_ptr->header.dword_2 =
- cpu_to_le32(ocs);
+ lrbp->utr_descriptor_ptr->header.ocs = ocs;
}
complete(hba->dev_cmd.complete);
ufshcd_clk_scaling_update_busy(hba);
@@ -5492,7 +5444,7 @@ static void ufshcd_clear_polled(struct ufs_hba *hba,
}
/*
- * Returns > 0 if one or more commands have been completed or 0 if no
+ * Return: > 0 if one or more commands have been completed or 0 if no
* requests have been completed.
*/
static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
@@ -5582,7 +5534,7 @@ static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba,
* ufshcd_transfer_req_compl - handle SCSI and query command completion
* @hba: per adapter instance
*
- * Returns
+ * Return:
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt
*/
@@ -5659,7 +5611,7 @@ int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask,
* Disables exception event in the device so that the EVENT_ALERT
* bit is not set.
*
- * Returns zero on success, non-zero error value on failure.
+ * Return: zero on success, non-zero error value on failure.
*/
static inline int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
{
@@ -5674,7 +5626,7 @@ static inline int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
* Enable corresponding exception event in the device to allow
* device to alert host in critical scenarios.
*
- * Returns zero on success, non-zero error value on failure.
+ * Return: zero on success, non-zero error value on failure.
*/
static inline int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
{
@@ -5690,7 +5642,7 @@ static inline int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
* as the device is allowed to manage its own way of handling background
* operations.
*
- * Returns zero on success, non-zero on failure.
+ * Return: zero on success, non-zero on failure.
*/
static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
{
@@ -5729,7 +5681,7 @@ out:
* host is idle so that BKOPS are managed effectively without any negative
* impacts.
*
- * Returns zero on success, non-zero on failure.
+ * Return: zero on success, non-zero on failure.
*/
static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
{
@@ -5805,7 +5757,7 @@ static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
* bkops_status is greater than or equal to "status" argument passed to
* this function, disable otherwise.
*
- * Returns 0 for success, non-zero in case of failure.
+ * Return: 0 for success, non-zero in case of failure.
*
* NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
* to know whether auto bkops is enabled or disabled after this function
@@ -5846,6 +5798,8 @@ out:
*
* If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
* and negative error value for any other failure.
+ *
+ * Return: 0 upon success; < 0 upon failure.
*/
static int ufshcd_urgent_bkops(struct ufs_hba *hba)
{
@@ -6157,7 +6111,7 @@ static void ufshcd_complete_requests(struct ufs_hba *hba, bool force_compl)
* to recover from the DL NAC errors or not.
* @hba: per-adapter instance
*
- * Returns true if error handling is required, false otherwise
+ * Return: true if error handling is required, false otherwise.
*/
static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
{
@@ -6384,54 +6338,48 @@ static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba)
return false;
}
+static bool ufshcd_abort_one(struct request *rq, void *priv)
+{
+ int *ret = priv;
+ u32 tag = rq->tag;
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
+ struct scsi_device *sdev = cmd->device;
+ struct Scsi_Host *shost = sdev->host;
+ struct ufs_hba *hba = shost_priv(shost);
+
+ *ret = ufshcd_try_to_abort_task(hba, tag);
+ dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
+ hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
+ *ret ? "failed" : "succeeded");
+ return *ret == 0;
+}
+
+/**
+ * ufshcd_abort_all - Abort all pending commands.
+ * @hba: Host bus adapter pointer.
+ *
+ * Return: true if and only if the host controller needs to be reset.
+ */
static bool ufshcd_abort_all(struct ufs_hba *hba)
{
- bool needs_reset = false;
- int tag, ret;
+ int tag, ret = 0;
- if (is_mcq_enabled(hba)) {
- struct ufshcd_lrb *lrbp;
- int tag;
+ blk_mq_tagset_busy_iter(&hba->host->tag_set, ufshcd_abort_one, &ret);
+ if (ret)
+ goto out;
- for (tag = 0; tag < hba->nutrs; tag++) {
- lrbp = &hba->lrb[tag];
- if (!ufshcd_cmd_inflight(lrbp->cmd))
- continue;
- ret = ufshcd_try_to_abort_task(hba, tag);
- dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
- hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
- ret ? "failed" : "succeeded");
- if (ret) {
- needs_reset = true;
- goto out;
- }
- }
- } else {
- /* Clear pending transfer requests */
- for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
- ret = ufshcd_try_to_abort_task(hba, tag);
- dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
- hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
- ret ? "failed" : "succeeded");
- if (ret) {
- needs_reset = true;
- goto out;
- }
- }
- }
/* Clear pending task management requests */
for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
- if (ufshcd_clear_tm_cmd(hba, tag)) {
- needs_reset = true;
+ ret = ufshcd_clear_tm_cmd(hba, tag);
+ if (ret)
goto out;
- }
}
out:
/* Complete the requests that are cleared by s/w */
ufshcd_complete_requests(hba, false);
- return needs_reset;
+ return ret != 0;
}
/**
@@ -6618,7 +6566,7 @@ skip_err_handling:
* ufshcd_update_uic_error - check and set fatal UIC error flags.
* @hba: per-adapter instance
*
- * Returns
+ * Return:
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt
*/
@@ -6711,7 +6659,7 @@ static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
* @hba: per-adapter instance
* @intr_status: interrupt status generated by the controller
*
- * Returns
+ * Return:
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt
*/
@@ -6787,7 +6735,7 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
* ufshcd_tmc_handler - handle task management function completion
* @hba: per adapter instance
*
- * Returns
+ * Return:
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt
*/
@@ -6816,7 +6764,7 @@ static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
* ufshcd_handle_mcq_cq_events - handle MCQ completion queue events
* @hba: per adapter instance
*
- * Returns IRQ_HANDLED if interrupt is handled
+ * Return: IRQ_HANDLED if interrupt is handled.
*/
static irqreturn_t ufshcd_handle_mcq_cq_events(struct ufs_hba *hba)
{
@@ -6851,7 +6799,7 @@ static irqreturn_t ufshcd_handle_mcq_cq_events(struct ufs_hba *hba)
* @hba: per adapter instance
* @intr_status: contains interrupts generated by the controller
*
- * Returns
+ * Return:
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt
*/
@@ -6882,7 +6830,7 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
* @irq: irq number
* @__hba: pointer to adapter instance
*
- * Returns
+ * Return:
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt
*/
@@ -6978,7 +6926,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
WARN_ONCE(task_tag < 0 || task_tag >= hba->nutmrs, "Invalid tag %d\n",
task_tag);
hba->tmf_rqs[req->tag] = req;
- treq->upiu_req.req_header.dword_0 |= cpu_to_be32(task_tag);
+ treq->upiu_req.req_header.task_tag = task_tag;
memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq));
ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function);
@@ -7031,23 +6979,23 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
* @tm_function: task management function opcode
* @tm_response: task management service response return value
*
- * Returns non-zero value on error, zero on success.
+ * Return: non-zero value on error, zero on success.
*/
static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
u8 tm_function, u8 *tm_response)
{
- struct utp_task_req_desc treq = { { 0 }, };
+ struct utp_task_req_desc treq = { };
enum utp_ocs ocs_value;
int err;
/* Configure task request descriptor */
- treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
- treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
+ treq.header.interrupt = 1;
+ treq.header.ocs = OCS_INVALID_COMMAND_STATUS;
/* Configure task request UPIU */
- treq.upiu_req.req_header.dword_0 = cpu_to_be32(lun_id << 8) |
- cpu_to_be32(UPIU_TRANSACTION_TASK_REQ << 24);
- treq.upiu_req.req_header.dword_1 = cpu_to_be32(tm_function << 16);
+ treq.upiu_req.req_header.transaction_code = UPIU_TRANSACTION_TASK_REQ;
+ treq.upiu_req.req_header.lun = lun_id;
+ treq.upiu_req.req_header.tm_function = tm_function;
/*
* The host shall provide the same value for LUN field in the basic
@@ -7060,7 +7008,7 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
if (err == -ETIMEDOUT)
return err;
- ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
+ ocs_value = treq.header.ocs & MASK_OCS;
if (ocs_value != OCS_SUCCESS)
dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
__func__, ocs_value);
@@ -7086,6 +7034,8 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
*
* Since there is only one available tag for device management commands,
* the caller is expected to hold the hba->dev_cmd.lock mutex.
+ *
+ * Return: 0 upon success; < 0 upon failure.
*/
static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
struct utp_upiu_req *req_upiu,
@@ -7119,7 +7069,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
/* update the task tag in the request upiu */
- req_upiu->header.dword_0 |= cpu_to_be32(tag);
+ req_upiu->header.task_tag = tag;
ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE, 0);
@@ -7152,8 +7102,8 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
if (desc_buff && desc_op == UPIU_QUERY_OPCODE_READ_DESC) {
u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + sizeof(*rsp_upiu);
- u16 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
- MASK_QUERY_DATA_SEG_LEN;
+ u16 resp_len = be16_to_cpu(lrbp->ucd_rsp_ptr->header
+ .data_segment_length);
if (*buff_len >= resp_len) {
memcpy(desc_buff, descp, resp_len);
@@ -7187,19 +7137,21 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
* Management requests.
* It is up to the caller to fill the upiu conent properly, as it will
* be copied without any further input validations.
+ *
+ * Return: 0 upon success; < 0 upon failure.
*/
int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
struct utp_upiu_req *req_upiu,
struct utp_upiu_req *rsp_upiu,
- int msgcode,
+ enum upiu_request_transaction msgcode,
u8 *desc_buff, int *buff_len,
enum query_opcode desc_op)
{
int err;
enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY;
- struct utp_task_req_desc treq = { { 0 }, };
+ struct utp_task_req_desc treq = { };
enum utp_ocs ocs_value;
- u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC;
+ u8 tm_f = req_upiu->header.tm_function;
switch (msgcode) {
case UPIU_TRANSACTION_NOP_OUT:
@@ -7216,8 +7168,8 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
break;
case UPIU_TRANSACTION_TASK_REQ:
- treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
- treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
+ treq.header.interrupt = 1;
+ treq.header.ocs = OCS_INVALID_COMMAND_STATUS;
memcpy(&treq.upiu_req, req_upiu, sizeof(*req_upiu));
@@ -7225,7 +7177,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
if (err == -ETIMEDOUT)
break;
- ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
+ ocs_value = treq.header.ocs & MASK_OCS;
if (ocs_value != OCS_SUCCESS) {
dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__,
ocs_value);
@@ -7255,7 +7207,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
* @sg_list: Pointer to SG list when DATA IN/OUT UPIU is required in ARPMB operation
* @dir: DMA direction
*
- * Returns zero on success, non-zero on failure
+ * Return: zero on success, non-zero on failure.
*/
int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *req_upiu,
struct utp_upiu_req *rsp_upiu, struct ufs_ehs *req_ehs,
@@ -7291,7 +7243,9 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 2);
/* update the task tag and LUN in the request upiu */
- req_upiu->header.dword_0 |= cpu_to_be32(upiu_flags << 16 | UFS_UPIU_RPMB_WLUN << 8 | tag);
+ req_upiu->header.flags = upiu_flags;
+ req_upiu->header.lun = UFS_UPIU_RPMB_WLUN;
+ req_upiu->header.task_tag = tag;
/* copy the UPIU(contains CDB) request as it is */
memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
@@ -7313,9 +7267,10 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
/* Just copy the upiu response as it is */
memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
/* Get the response UPIU result */
- result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
+ result = (lrbp->ucd_rsp_ptr->header.response << 8) |
+ lrbp->ucd_rsp_ptr->header.status;
- ehs_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) >> 24;
+ ehs_len = lrbp->ucd_rsp_ptr->header.ehs_length;
/*
* Since the bLength in EHS indicates the total size of the EHS Header and EHS Data
* in 32 Byte units, the value of the bLength Request/Response for Advanced RPMB
@@ -7341,7 +7296,7 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
* ufshcd_eh_device_reset_handler() - Reset a single logical unit.
* @cmd: SCSI command pointer
*
- * Returns SUCCESS/FAILED
+ * Return: SUCCESS or FAILED.
*/
static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
{
@@ -7436,7 +7391,7 @@ static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
* issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
* really issued and then try to abort it.
*
- * Returns zero on success, non-zero on failure
+ * Return: zero on success, non-zero on failure.
*/
int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
{
@@ -7524,7 +7479,7 @@ out:
* ufshcd_abort - scsi host template eh_abort_handler callback
* @cmd: SCSI command pointer
*
- * Returns SUCCESS/FAILED
+ * Return: SUCCESS or FAILED.
*/
static int ufshcd_abort(struct scsi_cmnd *cmd)
{
@@ -7649,7 +7604,7 @@ release:
* local and remote (device) Uni-Pro stack and the attributes
* are reset to default state.
*
- * Returns zero on success, non-zero on failure
+ * Return: zero on success, non-zero on failure.
*/
static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
{
@@ -7659,7 +7614,6 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
* Stop the host controller and complete the requests
* cleared by h/w
*/
- ufshpb_toggle_state(hba, HPB_PRESENT, HPB_RESET);
ufshcd_hba_stop(hba);
hba->silence_err_logs = true;
ufshcd_complete_requests(hba, true);
@@ -7687,7 +7641,7 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
* Reset and recover device, host and re-establish link. This
* is helpful to recover the communication in fatal error conditions.
*
- * Returns zero on success, non-zero on failure
+ * Return: zero on success, non-zero on failure.
*/
static int ufshcd_reset_and_restore(struct ufs_hba *hba)
{
@@ -7745,7 +7699,7 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba)
* ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
* @cmd: SCSI command pointer
*
- * Returns SUCCESS/FAILED
+ * Return: SUCCESS or FAILED.
*/
static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
{
@@ -7777,7 +7731,7 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
* @start_scan: row at the desc table to start scan from
* @buff: power descriptor buffer
*
- * Returns calculated max ICC level for specific regulator
+ * Return: calculated max ICC level for specific regulator.
*/
static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan,
const char *buff)
@@ -7823,7 +7777,7 @@ static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan,
* @hba: per-adapter instance
* @desc_buf: power descriptor buffer to extract ICC levels from.
*
- * Returns calculated ICC level
+ * Return: calculated ICC level.
*/
static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
const u8 *desc_buf)
@@ -7932,7 +7886,7 @@ static inline void ufshcd_blk_pm_runtime_init(struct scsi_device *sdev)
* This function adds scsi device instances for each of all well known LUs
* (except "REPORT LUNS" LU).
*
- * Returns zero on success (all required W-LUs are added successfully),
+ * Return: zero on success (all required W-LUs are added successfully),
* non-zero error value on failure (if failed to add any of the required W-LU).
*/
static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
@@ -8122,7 +8076,6 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
{
int err;
u8 model_index;
- u8 b_ufs_feature_sup;
u8 *desc_buf;
struct ufs_dev_info *dev_info = &hba->dev_info;
@@ -8151,26 +8104,9 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
dev_info->bqueuedepth = desc_buf[DEVICE_DESC_PARAM_Q_DPTH];
- b_ufs_feature_sup = desc_buf[DEVICE_DESC_PARAM_UFS_FEAT];
model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
- if (dev_info->wspecversion >= UFS_DEV_HPB_SUPPORT_VERSION &&
- (b_ufs_feature_sup & UFS_DEV_HPB_SUPPORT)) {
- bool hpb_en = false;
-
- ufshpb_get_dev_info(hba, desc_buf);
-
- if (!ufshpb_is_legacy(hba))
- err = ufshcd_query_flag_retry(hba,
- UPIU_QUERY_OPCODE_READ_FLAG,
- QUERY_FLAG_IDN_HPB_EN, 0,
- &hpb_en);
-
- if (ufshpb_is_legacy(hba) || (!err && hpb_en))
- dev_info->hpb_enabled = true;
- }
-
err = ufshcd_read_string_desc(hba, model_index,
&dev_info->model, SD_ASCII_STD);
if (err < 0) {
@@ -8219,7 +8155,7 @@ static void ufs_put_device_desc(struct ufs_hba *hba)
* RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
* the hibern8 exit latency.
*
- * Returns zero on success, non-zero error value on failure.
+ * Return: zero on success, non-zero error value on failure.
*/
static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
{
@@ -8254,7 +8190,7 @@ out:
* TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
* This optimal value can help reduce the hibern8 exit latency.
*
- * Returns zero on success, non-zero error value on failure.
+ * Return: zero on success, non-zero error value on failure.
*/
static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
{
@@ -8296,7 +8232,7 @@ out:
* PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
* for such devices.
*
- * Returns zero on success, non-zero error value on failure.
+ * Return: zero on success, non-zero error value on failure.
*/
static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
{
@@ -8405,10 +8341,6 @@ static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
hba->dev_info.max_lu_supported = 8;
- if (desc_buf[QUERY_DESC_LENGTH_OFFSET] >=
- GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS)
- ufshpb_get_geo_info(hba, desc_buf);
-
out:
kfree(desc_buf);
return err;
@@ -8558,6 +8490,8 @@ static void ufshcd_set_timestamp_attr(struct ufs_hba *hba)
/**
* ufshcd_add_lus - probe and add UFS logical units
* @hba: per-adapter instance
+ *
+ * Return: 0 upon success; < 0 upon failure.
*/
static int ufshcd_add_lus(struct ufs_hba *hba)
{
@@ -8584,7 +8518,6 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
}
ufs_bsg_probe(hba);
- ufshpb_init(hba);
scsi_scan_host(hba->host);
pm_runtime_put_sync(hba->dev);
@@ -8770,6 +8703,8 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
* @init_dev_params: whether or not to call ufshcd_device_params_init().
*
* Execute link-startup and verify device initialization
+ *
+ * Return: 0 upon success; < 0 upon failure.
*/
static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
{
@@ -8818,7 +8753,6 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
/* Enable Auto-Hibernate if configured */
ufshcd_auto_hibern8_enable(hba);
- ufshpb_toggle_state(hba, HPB_RESET, HPB_PRESENT);
out:
spin_lock_irqsave(hba->host->host_lock, flags);
if (ret)
@@ -8888,10 +8822,6 @@ static enum scsi_timeout_action ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
static const struct attribute_group *ufshcd_driver_groups[] = {
&ufs_sysfs_unit_descriptor_group,
&ufs_sysfs_lun_attributes_group,
-#ifdef CONFIG_SCSI_UFS_HPB
- &ufs_sysfs_hpb_stat_group,
- &ufs_sysfs_hpb_param_group,
-#endif
NULL,
};
@@ -9235,8 +9165,9 @@ static int ufshcd_variant_hba_init(struct ufs_hba *hba)
err = ufshcd_vops_init(hba);
if (err)
- dev_err(hba->dev, "%s: variant %s init failed err %d\n",
- __func__, ufshcd_get_var_name(hba), err);
+ dev_err_probe(hba->dev, err,
+ "%s: variant %s init failed with err %d\n",
+ __func__, ufshcd_get_var_name(hba), err);
out:
return err;
}
@@ -9345,8 +9276,8 @@ static int ufshcd_execute_start_stop(struct scsi_device *sdev,
* @hba: per adapter instance
* @pwr_mode: device power mode to set
*
- * Returns 0 if requested power mode is set successfully
- * Returns < 0 if failed to set the requested power mode
+ * Return: 0 if requested power mode is set successfully;
+ * < 0 if failed to set the requested power mode.
*/
static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
enum ufs_dev_pwr_mode pwr_mode)
@@ -9576,8 +9507,6 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
req_link_state = UIC_LINK_OFF_STATE;
}
- ufshpb_suspend(hba);
-
/*
* If we can't transition into any of the low power modes
* just gate the clocks.
@@ -9731,7 +9660,6 @@ out:
ufshcd_update_evt_hist(hba, UFS_EVT_WL_SUSP_ERR, (u32)ret);
hba->clk_gating.is_suspended = false;
ufshcd_release(hba);
- ufshpb_resume(hba);
}
hba->pm_op_in_progress = false;
return ret;
@@ -9812,7 +9740,6 @@ static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
/* Enable Auto-Hibernate if configured */
ufshcd_auto_hibern8_enable(hba);
- ufshpb_resume(hba);
goto out;
set_old_link_state:
@@ -9934,6 +9861,8 @@ out:
*
* This function will put disable irqs, turn off clocks
* and set vreg and hba-vreg in lpm mode.
+ *
+ * Return: 0 upon success; < 0 upon failure.
*/
static int ufshcd_suspend(struct ufs_hba *hba)
{
@@ -9971,7 +9900,7 @@ static int ufshcd_suspend(struct ufs_hba *hba)
* This function basically turns on the regulators, clocks and
* irqs of the hba.
*
- * Returns 0 for success and non-zero for failure
+ * Return: 0 for success and non-zero for failure.
*/
static int ufshcd_resume(struct ufs_hba *hba)
{
@@ -10012,7 +9941,7 @@ out:
* Executed before putting the system into a sleep state in which the contents
* of main memory are preserved.
*
- * Returns 0 for success and non-zero for failure
+ * Return: 0 for success and non-zero for failure.
*/
int ufshcd_system_suspend(struct device *dev)
{
@@ -10039,7 +9968,7 @@ EXPORT_SYMBOL(ufshcd_system_suspend);
* Executed after waking the system up from a sleep state in which the contents
* of main memory were preserved.
*
- * Returns 0 for success and non-zero for failure
+ * Return: 0 for success and non-zero for failure.
*/
int ufshcd_system_resume(struct device *dev)
{
@@ -10069,7 +9998,7 @@ EXPORT_SYMBOL(ufshcd_system_resume);
*
* Check the description of ufshcd_suspend() function for more details.
*
- * Returns 0 for success and non-zero for failure
+ * Return: 0 for success and non-zero for failure.
*/
int ufshcd_runtime_suspend(struct device *dev)
{
@@ -10095,6 +10024,8 @@ EXPORT_SYMBOL(ufshcd_runtime_suspend);
*
* 1. Turn on all the controller related clocks
* 2. Turn ON VCC rail
+ *
+ * Return: 0 upon success; < 0 upon failure.
*/
int ufshcd_runtime_resume(struct device *dev)
{
@@ -10152,7 +10083,6 @@ void ufshcd_remove(struct ufs_hba *hba)
ufshcd_rpm_get_sync(hba);
ufs_hwmon_remove(hba);
ufs_bsg_remove(hba);
- ufshpb_remove(hba);
ufs_sysfs_remove_nodes(hba->dev);
blk_mq_destroy_queue(hba->tmf_queue);
blk_put_queue(hba->tmf_queue);
@@ -10230,7 +10160,7 @@ EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
* addressing capability
* @hba: per adapter instance
*
- * Returns 0 for success, non-zero for failure
+ * Return: 0 for success, non-zero for failure.
*/
static int ufshcd_set_dma_mask(struct ufs_hba *hba)
{
@@ -10245,7 +10175,8 @@ static int ufshcd_set_dma_mask(struct ufs_hba *hba)
* ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
* @dev: pointer to device handle
* @hba_handle: driver private handle
- * Returns 0 on success, non-zero value on failure
+ *
+ * Return: 0 on success, non-zero value on failure.
*/
int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
{
@@ -10301,7 +10232,8 @@ static const struct blk_mq_ops ufshcd_tmf_ops = {
* @hba: per-adapter instance
* @mmio_base: base register address
* @irq: Interrupt line of device
- * Returns 0 on success, non-zero value on failure
+ *
+ * Return: 0 on success, non-zero value on failure.
*/
int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
{
@@ -10632,6 +10564,53 @@ static const struct dev_pm_ops ufshcd_wl_pm_ops = {
SET_RUNTIME_PM_OPS(ufshcd_wl_runtime_suspend, ufshcd_wl_runtime_resume, NULL)
};
+static void ufshcd_check_header_layout(void)
+{
+ /*
+ * gcc compilers before version 10 cannot do constant-folding for
+ * sub-byte bitfields. Hence skip the layout checks for gcc 9 and
+ * before.
+ */
+ if (IS_ENABLED(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION < 100000)
+ return;
+
+ BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
+ .cci = 3})[0] != 3);
+
+ BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
+ .ehs_length = 2})[1] != 2);
+
+ BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
+ .enable_crypto = 1})[2]
+ != 0x80);
+
+ BUILD_BUG_ON((((u8 *)&(struct request_desc_header){
+ .command_type = 5,
+ .data_direction = 3,
+ .interrupt = 1,
+ })[3]) != ((5 << 4) | (3 << 1) | 1));
+
+ BUILD_BUG_ON(((__le32 *)&(struct request_desc_header){
+ .dunl = cpu_to_le32(0xdeadbeef)})[1] !=
+ cpu_to_le32(0xdeadbeef));
+
+ BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
+ .ocs = 4})[8] != 4);
+
+ BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
+ .cds = 5})[9] != 5);
+
+ BUILD_BUG_ON(((__le32 *)&(struct request_desc_header){
+ .dunu = cpu_to_le32(0xbadcafe)})[3] !=
+ cpu_to_le32(0xbadcafe));
+
+ BUILD_BUG_ON(((u8 *)&(struct utp_upiu_header){
+ .iid = 0xf })[4] != 0xf0);
+
+ BUILD_BUG_ON(((u8 *)&(struct utp_upiu_header){
+ .command_set_type = 0xf })[4] != 0xf);
+}
+
/*
* ufs_dev_wlun_template - describes ufs device wlun
* ufs-device wlun - used to send pm commands
@@ -10657,6 +10636,8 @@ static int __init ufshcd_core_init(void)
{
int ret;
+ ufshcd_check_header_layout();
+
ufs_debugfs_init();
ret = scsi_register_driver(&ufs_dev_wlun_template.gendrv);
diff --git a/drivers/ufs/core/ufshpb.c b/drivers/ufs/core/ufshpb.c
deleted file mode 100644
index 255f8b38d0c2..000000000000
--- a/drivers/ufs/core/ufshpb.c
+++ /dev/null
@@ -1,2668 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Universal Flash Storage Host Performance Booster
- *
- * Copyright (C) 2017-2021 Samsung Electronics Co., Ltd.
- *
- * Authors:
- * Yongmyung Lee <ymhungry.lee@samsung.com>
- * Jinyoung Choi <j-young.choi@samsung.com>
- */
-
-#include <asm/unaligned.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/module.h>
-#include <scsi/scsi_cmnd.h>
-
-#include "ufshcd-priv.h"
-#include "ufshpb.h"
-#include "../../scsi/sd.h"
-
-#define ACTIVATION_THRESHOLD 8 /* 8 IOs */
-#define READ_TO_MS 1000
-#define READ_TO_EXPIRIES 100
-#define POLLING_INTERVAL_MS 200
-#define THROTTLE_MAP_REQ_DEFAULT 1
-
-/* memory management */
-static struct kmem_cache *ufshpb_mctx_cache;
-static mempool_t *ufshpb_mctx_pool;
-static mempool_t *ufshpb_page_pool;
-/* A cache size of 2MB can cache ppn in the 1GB range. */
-static unsigned int ufshpb_host_map_kbytes = SZ_2K;
-static int tot_active_srgn_pages;
-
-static struct workqueue_struct *ufshpb_wq;
-
-static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
- int srgn_idx);
-
-bool ufshpb_is_allowed(struct ufs_hba *hba)
-{
- return !(hba->ufshpb_dev.hpb_disabled);
-}
-
-/* HPB version 1.0 is called as legacy version. */
-bool ufshpb_is_legacy(struct ufs_hba *hba)
-{
- return hba->ufshpb_dev.is_legacy;
-}
-
-static struct ufshpb_lu *ufshpb_get_hpb_data(struct scsi_device *sdev)
-{
- return sdev->hostdata;
-}
-
-static int ufshpb_get_state(struct ufshpb_lu *hpb)
-{
- return atomic_read(&hpb->hpb_state);
-}
-
-static void ufshpb_set_state(struct ufshpb_lu *hpb, int state)
-{
- atomic_set(&hpb->hpb_state, state);
-}
-
-static int ufshpb_is_valid_srgn(struct ufshpb_region *rgn,
- struct ufshpb_subregion *srgn)
-{
- return rgn->rgn_state != HPB_RGN_INACTIVE &&
- srgn->srgn_state == HPB_SRGN_VALID;
-}
-
-static bool ufshpb_is_read_cmd(struct scsi_cmnd *cmd)
-{
- return req_op(scsi_cmd_to_rq(cmd)) == REQ_OP_READ;
-}
-
-static bool ufshpb_is_write_or_discard(struct scsi_cmnd *cmd)
-{
- return op_is_write(req_op(scsi_cmd_to_rq(cmd))) ||
- op_is_discard(req_op(scsi_cmd_to_rq(cmd)));
-}
-
-static bool ufshpb_is_supported_chunk(struct ufshpb_lu *hpb, int transfer_len)
-{
- return transfer_len <= hpb->pre_req_max_tr_len;
-}
-
-static bool ufshpb_is_general_lun(int lun)
-{
- return lun < UFS_UPIU_MAX_UNIT_NUM_ID;
-}
-
-static bool ufshpb_is_pinned_region(struct ufshpb_lu *hpb, int rgn_idx)
-{
- return hpb->lu_pinned_end != PINNED_NOT_SET &&
- rgn_idx >= hpb->lu_pinned_start && rgn_idx <= hpb->lu_pinned_end;
-}
-
-static void ufshpb_kick_map_work(struct ufshpb_lu *hpb)
-{
- bool ret = false;
- unsigned long flags;
-
- if (ufshpb_get_state(hpb) != HPB_PRESENT)
- return;
-
- spin_lock_irqsave(&hpb->rsp_list_lock, flags);
- if (!list_empty(&hpb->lh_inact_rgn) || !list_empty(&hpb->lh_act_srgn))
- ret = true;
- spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
-
- if (ret)
- queue_work(ufshpb_wq, &hpb->map_work);
-}
-
-static bool ufshpb_is_hpb_rsp_valid(struct ufs_hba *hba,
- struct ufshcd_lrb *lrbp,
- struct utp_hpb_rsp *rsp_field)
-{
- /* Check HPB_UPDATE_ALERT */
- if (!(lrbp->ucd_rsp_ptr->header.dword_2 &
- UPIU_HEADER_DWORD(0, 2, 0, 0)))
- return false;
-
- if (be16_to_cpu(rsp_field->sense_data_len) != DEV_SENSE_SEG_LEN ||
- rsp_field->desc_type != DEV_DES_TYPE ||
- rsp_field->additional_len != DEV_ADDITIONAL_LEN ||
- rsp_field->active_rgn_cnt > MAX_ACTIVE_NUM ||
- rsp_field->inactive_rgn_cnt > MAX_INACTIVE_NUM ||
- rsp_field->hpb_op == HPB_RSP_NONE ||
- (rsp_field->hpb_op == HPB_RSP_REQ_REGION_UPDATE &&
- !rsp_field->active_rgn_cnt && !rsp_field->inactive_rgn_cnt))
- return false;
-
- if (!ufshpb_is_general_lun(rsp_field->lun)) {
- dev_warn(hba->dev, "ufshpb: lun(%d) not supported\n",
- lrbp->lun);
- return false;
- }
-
- return true;
-}
-
-static void ufshpb_iterate_rgn(struct ufshpb_lu *hpb, int rgn_idx, int srgn_idx,
- int srgn_offset, int cnt, bool set_dirty)
-{
- struct ufshpb_region *rgn;
- struct ufshpb_subregion *srgn, *prev_srgn = NULL;
- int set_bit_len;
- int bitmap_len;
- unsigned long flags;
-
-next_srgn:
- rgn = hpb->rgn_tbl + rgn_idx;
- srgn = rgn->srgn_tbl + srgn_idx;
-
- if (likely(!srgn->is_last))
- bitmap_len = hpb->entries_per_srgn;
- else
- bitmap_len = hpb->last_srgn_entries;
-
- if ((srgn_offset + cnt) > bitmap_len)
- set_bit_len = bitmap_len - srgn_offset;
- else
- set_bit_len = cnt;
-
- spin_lock_irqsave(&hpb->rgn_state_lock, flags);
- if (rgn->rgn_state != HPB_RGN_INACTIVE) {
- if (set_dirty) {
- if (srgn->srgn_state == HPB_SRGN_VALID)
- bitmap_set(srgn->mctx->ppn_dirty, srgn_offset,
- set_bit_len);
- } else if (hpb->is_hcm) {
- /* rewind the read timer for lru regions */
- rgn->read_timeout = ktime_add_ms(ktime_get(),
- rgn->hpb->params.read_timeout_ms);
- rgn->read_timeout_expiries =
- rgn->hpb->params.read_timeout_expiries;
- }
- }
- spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
-
- if (hpb->is_hcm && prev_srgn != srgn) {
- bool activate = false;
-
- spin_lock(&rgn->rgn_lock);
- if (set_dirty) {
- rgn->reads -= srgn->reads;
- srgn->reads = 0;
- set_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
- } else {
- srgn->reads++;
- rgn->reads++;
- if (srgn->reads == hpb->params.activation_thld)
- activate = true;
- }
- spin_unlock(&rgn->rgn_lock);
-
- if (activate ||
- test_and_clear_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags)) {
- spin_lock_irqsave(&hpb->rsp_list_lock, flags);
- ufshpb_update_active_info(hpb, rgn_idx, srgn_idx);
- spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
- dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
- "activate region %d-%d\n", rgn_idx, srgn_idx);
- }
-
- prev_srgn = srgn;
- }
-
- srgn_offset = 0;
- if (++srgn_idx == hpb->srgns_per_rgn) {
- srgn_idx = 0;
- rgn_idx++;
- }
-
- cnt -= set_bit_len;
- if (cnt > 0)
- goto next_srgn;
-}
-
-static bool ufshpb_test_ppn_dirty(struct ufshpb_lu *hpb, int rgn_idx,
- int srgn_idx, int srgn_offset, int cnt)
-{
- struct ufshpb_region *rgn;
- struct ufshpb_subregion *srgn;
- int bitmap_len;
- int bit_len;
-
-next_srgn:
- rgn = hpb->rgn_tbl + rgn_idx;
- srgn = rgn->srgn_tbl + srgn_idx;
-
- if (!ufshpb_is_valid_srgn(rgn, srgn))
- return true;
-
- /*
- * If the region state is active, mctx must be allocated.
- * In this case, check whether the region is evicted or
- * mctx allocation fail.
- */
- if (unlikely(!srgn->mctx)) {
- dev_err(&hpb->sdev_ufs_lu->sdev_dev,
- "no mctx in region %d subregion %d.\n",
- srgn->rgn_idx, srgn->srgn_idx);
- return true;
- }
-
- if (likely(!srgn->is_last))
- bitmap_len = hpb->entries_per_srgn;
- else
- bitmap_len = hpb->last_srgn_entries;
-
- if ((srgn_offset + cnt) > bitmap_len)
- bit_len = bitmap_len - srgn_offset;
- else
- bit_len = cnt;
-
- if (find_next_bit(srgn->mctx->ppn_dirty, bit_len + srgn_offset,
- srgn_offset) < bit_len + srgn_offset)
- return true;
-
- srgn_offset = 0;
- if (++srgn_idx == hpb->srgns_per_rgn) {
- srgn_idx = 0;
- rgn_idx++;
- }
-
- cnt -= bit_len;
- if (cnt > 0)
- goto next_srgn;
-
- return false;
-}
-
-static inline bool is_rgn_dirty(struct ufshpb_region *rgn)
-{
- return test_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
-}
-
-static int ufshpb_fill_ppn_from_page(struct ufshpb_lu *hpb,
- struct ufshpb_map_ctx *mctx, int pos,
- int len, __be64 *ppn_buf)
-{
- struct page *page;
- int index, offset;
- int copied;
-
- index = pos / (PAGE_SIZE / HPB_ENTRY_SIZE);
- offset = pos % (PAGE_SIZE / HPB_ENTRY_SIZE);
-
- if ((offset + len) <= (PAGE_SIZE / HPB_ENTRY_SIZE))
- copied = len;
- else
- copied = (PAGE_SIZE / HPB_ENTRY_SIZE) - offset;
-
- page = mctx->m_page[index];
- if (unlikely(!page)) {
- dev_err(&hpb->sdev_ufs_lu->sdev_dev,
- "error. cannot find page in mctx\n");
- return -ENOMEM;
- }
-
- memcpy(ppn_buf, page_address(page) + (offset * HPB_ENTRY_SIZE),
- copied * HPB_ENTRY_SIZE);
-
- return copied;
-}
-
-static void
-ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb, unsigned long lpn, int *rgn_idx,
- int *srgn_idx, int *offset)
-{
- int rgn_offset;
-
- *rgn_idx = lpn >> hpb->entries_per_rgn_shift;
- rgn_offset = lpn & hpb->entries_per_rgn_mask;
- *srgn_idx = rgn_offset >> hpb->entries_per_srgn_shift;
- *offset = rgn_offset & hpb->entries_per_srgn_mask;
-}
-
-static void
-ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
- __be64 ppn, u8 transfer_len)
-{
- unsigned char *cdb = lrbp->cmd->cmnd;
- __be64 ppn_tmp = ppn;
- cdb[0] = UFSHPB_READ;
-
- if (hba->dev_quirks & UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ)
- ppn_tmp = (__force __be64)swab64((__force u64)ppn);
-
- /* ppn value is stored as big-endian in the host memory */
- memcpy(&cdb[6], &ppn_tmp, sizeof(__be64));
- cdb[14] = transfer_len;
- cdb[15] = 0;
-
- lrbp->cmd->cmd_len = UFS_CDB_SIZE;
-}
-
-/*
- * This function will set up HPB read command using host-side L2P map data.
- */
-int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
-{
- struct ufshpb_lu *hpb;
- struct ufshpb_region *rgn;
- struct ufshpb_subregion *srgn;
- struct scsi_cmnd *cmd = lrbp->cmd;
- u32 lpn;
- __be64 ppn;
- unsigned long flags;
- int transfer_len, rgn_idx, srgn_idx, srgn_offset;
- int err = 0;
-
- hpb = ufshpb_get_hpb_data(cmd->device);
- if (!hpb)
- return -ENODEV;
-
- if (ufshpb_get_state(hpb) == HPB_INIT)
- return -ENODEV;
-
- if (ufshpb_get_state(hpb) != HPB_PRESENT) {
- dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
- "%s: ufshpb state is not PRESENT", __func__);
- return -ENODEV;
- }
-
- if (blk_rq_is_passthrough(scsi_cmd_to_rq(cmd)) ||
- (!ufshpb_is_write_or_discard(cmd) &&
- !ufshpb_is_read_cmd(cmd)))
- return 0;
-
- transfer_len = sectors_to_logical(cmd->device,
- blk_rq_sectors(scsi_cmd_to_rq(cmd)));
- if (unlikely(!transfer_len))
- return 0;
-
- lpn = sectors_to_logical(cmd->device, blk_rq_pos(scsi_cmd_to_rq(cmd)));
- ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset);
- rgn = hpb->rgn_tbl + rgn_idx;
- srgn = rgn->srgn_tbl + srgn_idx;
-
- /* If command type is WRITE or DISCARD, set bitmap as dirty */
- if (ufshpb_is_write_or_discard(cmd)) {
- ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
- transfer_len, true);
- return 0;
- }
-
- if (!ufshpb_is_supported_chunk(hpb, transfer_len))
- return 0;
-
- if (hpb->is_hcm) {
- /*
- * in host control mode, reads are the main source for
- * activation trials.
- */
- ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset,
- transfer_len, false);
-
- /* keep those counters normalized */
- if (rgn->reads > hpb->entries_per_srgn)
- schedule_work(&hpb->ufshpb_normalization_work);
- }
-
- spin_lock_irqsave(&hpb->rgn_state_lock, flags);
- if (ufshpb_test_ppn_dirty(hpb, rgn_idx, srgn_idx, srgn_offset,
- transfer_len)) {
- hpb->stats.miss_cnt++;
- spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
- return 0;
- }
-
- err = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset, 1, &ppn);
- spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
- if (unlikely(err < 0)) {
- /*
- * In this case, the region state is active,
- * but the ppn table is not allocated.
- * Make sure that ppn table must be allocated on
- * active state.
- */
- dev_err(hba->dev, "get ppn failed. err %d\n", err);
- return err;
- }
-
- ufshpb_set_hpb_read_to_upiu(hba, lrbp, ppn, transfer_len);
-
- hpb->stats.hit_cnt++;
- return 0;
-}
-
-static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb, int rgn_idx,
- enum req_op op, bool atomic)
-{
- struct ufshpb_req *rq;
- struct request *req;
- int retries = HPB_MAP_REQ_RETRIES;
-
- rq = kmem_cache_alloc(hpb->map_req_cache, GFP_KERNEL);
- if (!rq)
- return NULL;
-
-retry:
- req = blk_mq_alloc_request(hpb->sdev_ufs_lu->request_queue, op,
- BLK_MQ_REQ_NOWAIT);
-
- if (!atomic && (PTR_ERR(req) == -EWOULDBLOCK) && (--retries > 0)) {
- usleep_range(3000, 3100);
- goto retry;
- }
-
- if (IS_ERR(req))
- goto free_rq;
-
- rq->hpb = hpb;
- rq->req = req;
- rq->rb.rgn_idx = rgn_idx;
-
- return rq;
-
-free_rq:
- kmem_cache_free(hpb->map_req_cache, rq);
- return NULL;
-}
-
-static void ufshpb_put_req(struct ufshpb_lu *hpb, struct ufshpb_req *rq)
-{
- blk_mq_free_request(rq->req);
- kmem_cache_free(hpb->map_req_cache, rq);
-}
-
-static struct ufshpb_req *ufshpb_get_map_req(struct ufshpb_lu *hpb,
- struct ufshpb_subregion *srgn)
-{
- struct ufshpb_req *map_req;
- struct bio *bio;
- unsigned long flags;
-
- if (hpb->is_hcm &&
- hpb->num_inflight_map_req >= hpb->params.inflight_map_req) {
- dev_info(&hpb->sdev_ufs_lu->sdev_dev,
- "map_req throttle. inflight %d throttle %d",
- hpb->num_inflight_map_req,
- hpb->params.inflight_map_req);
- return NULL;
- }
-
- map_req = ufshpb_get_req(hpb, srgn->rgn_idx, REQ_OP_DRV_IN, false);
- if (!map_req)
- return NULL;
-
- bio = bio_alloc(NULL, hpb->pages_per_srgn, 0, GFP_KERNEL);
- if (!bio) {
- ufshpb_put_req(hpb, map_req);
- return NULL;
- }
-
- map_req->bio = bio;
-
- map_req->rb.srgn_idx = srgn->srgn_idx;
- map_req->rb.mctx = srgn->mctx;
-
- spin_lock_irqsave(&hpb->param_lock, flags);
- hpb->num_inflight_map_req++;
- spin_unlock_irqrestore(&hpb->param_lock, flags);
-
- return map_req;
-}
-
-static void ufshpb_put_map_req(struct ufshpb_lu *hpb,
- struct ufshpb_req *map_req)
-{
- unsigned long flags;
-
- bio_put(map_req->bio);
- ufshpb_put_req(hpb, map_req);
-
- spin_lock_irqsave(&hpb->param_lock, flags);
- hpb->num_inflight_map_req--;
- spin_unlock_irqrestore(&hpb->param_lock, flags);
-}
-
-static int ufshpb_clear_dirty_bitmap(struct ufshpb_lu *hpb,
- struct ufshpb_subregion *srgn)
-{
- struct ufshpb_region *rgn;
- u32 num_entries = hpb->entries_per_srgn;
-
- if (!srgn->mctx) {
- dev_err(&hpb->sdev_ufs_lu->sdev_dev,
- "no mctx in region %d subregion %d.\n",
- srgn->rgn_idx, srgn->srgn_idx);
- return -1;
- }
-
- if (unlikely(srgn->is_last))
- num_entries = hpb->last_srgn_entries;
-
- bitmap_zero(srgn->mctx->ppn_dirty, num_entries);
-
- rgn = hpb->rgn_tbl + srgn->rgn_idx;
- clear_bit(RGN_FLAG_DIRTY, &rgn->rgn_flags);
-
- return 0;
-}
-
-static void ufshpb_update_active_info(struct ufshpb_lu *hpb, int rgn_idx,
- int srgn_idx)
-{
- struct ufshpb_region *rgn;
- struct ufshpb_subregion *srgn;
-
- rgn = hpb->rgn_tbl + rgn_idx;
- srgn = rgn->srgn_tbl + srgn_idx;
-
- list_del_init(&rgn->list_inact_rgn);
-
- if (list_empty(&srgn->list_act_srgn))
- list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
-
- hpb->stats.rcmd_active_cnt++;
-}
-
-static void ufshpb_update_inactive_info(struct ufshpb_lu *hpb, int rgn_idx)
-{
- struct ufshpb_region *rgn;
- struct ufshpb_subregion *srgn;
- int srgn_idx;
-
- rgn = hpb->rgn_tbl + rgn_idx;
-
- for_each_sub_region(rgn, srgn_idx, srgn)
- list_del_init(&srgn->list_act_srgn);
-
- if (list_empty(&rgn->list_inact_rgn))
- list_add_tail(&rgn->list_inact_rgn, &hpb->lh_inact_rgn);
-
- hpb->stats.rcmd_inactive_cnt++;
-}
-
-static void ufshpb_activate_subregion(struct ufshpb_lu *hpb,
- struct ufshpb_subregion *srgn)
-{
- struct ufshpb_region *rgn;
-
- /*
- * If there is no mctx in subregion
- * after I/O progress for HPB_READ_BUFFER, the region to which the
- * subregion belongs was evicted.
- * Make sure the region must not evict in I/O progress
- */
- if (!srgn->mctx) {
- dev_err(&hpb->sdev_ufs_lu->sdev_dev,
- "no mctx in region %d subregion %d.\n",
- srgn->rgn_idx, srgn->srgn_idx);
- srgn->srgn_state = HPB_SRGN_INVALID;
- return;
- }
-
- rgn = hpb->rgn_tbl + srgn->rgn_idx;
-
- if (unlikely(rgn->rgn_state == HPB_RGN_INACTIVE)) {
- dev_err(&hpb->sdev_ufs_lu->sdev_dev,
- "region %d subregion %d evicted\n",
- srgn->rgn_idx, srgn->srgn_idx);
- srgn->srgn_state = HPB_SRGN_INVALID;
- return;
- }
- srgn->srgn_state = HPB_SRGN_VALID;
-}
-
-static enum rq_end_io_ret ufshpb_umap_req_compl_fn(struct request *req,
- blk_status_t error)
-{
- struct ufshpb_req *umap_req = req->end_io_data;
-
- ufshpb_put_req(umap_req->hpb, umap_req);
- return RQ_END_IO_NONE;
-}
-
-static enum rq_end_io_ret ufshpb_map_req_compl_fn(struct request *req,
- blk_status_t error)
-{
- struct ufshpb_req *map_req = req->end_io_data;
- struct ufshpb_lu *hpb = map_req->hpb;
- struct ufshpb_subregion *srgn;
- unsigned long flags;
-
- srgn = hpb->rgn_tbl[map_req->rb.rgn_idx].srgn_tbl +
- map_req->rb.srgn_idx;
-
- ufshpb_clear_dirty_bitmap(hpb, srgn);
- spin_lock_irqsave(&hpb->rgn_state_lock, flags);
- ufshpb_activate_subregion(hpb, srgn);
- spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
-
- ufshpb_put_map_req(map_req->hpb, map_req);
- return RQ_END_IO_NONE;
-}
-
-static void ufshpb_set_unmap_cmd(unsigned char *cdb, struct ufshpb_region *rgn)
-{
- cdb[0] = UFSHPB_WRITE_BUFFER;
- cdb[1] = rgn ? UFSHPB_WRITE_BUFFER_INACT_SINGLE_ID :
- UFSHPB_WRITE_BUFFER_INACT_ALL_ID;
- if (rgn)
- put_unaligned_be16(rgn->rgn_idx, &cdb[2]);
- cdb[9] = 0x00;
-}
-
-static void ufshpb_set_read_buf_cmd(unsigned char *cdb, int rgn_idx,
- int srgn_idx, int srgn_mem_size)
-{
- cdb[0] = UFSHPB_READ_BUFFER;
- cdb[1] = UFSHPB_READ_BUFFER_ID;
-
- put_unaligned_be16(rgn_idx, &cdb[2]);
- put_unaligned_be16(srgn_idx, &cdb[4]);
- put_unaligned_be24(srgn_mem_size, &cdb[6]);
-
- cdb[9] = 0x00;
-}
-
-static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb,
- struct ufshpb_req *umap_req,
- struct ufshpb_region *rgn)
-{
- struct request *req = umap_req->req;
- struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
-
- req->timeout = 0;
- req->end_io_data = umap_req;
- req->end_io = ufshpb_umap_req_compl_fn;
-
- ufshpb_set_unmap_cmd(scmd->cmnd, rgn);
- scmd->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH;
-
- blk_execute_rq_nowait(req, true);
-
- hpb->stats.umap_req_cnt++;
-}
-
-static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
- struct ufshpb_req *map_req, bool last)
-{
- struct request_queue *q;
- struct request *req;
- struct scsi_cmnd *scmd;
- int mem_size = hpb->srgn_mem_size;
- int ret = 0;
- int i;
-
- q = hpb->sdev_ufs_lu->request_queue;
- for (i = 0; i < hpb->pages_per_srgn; i++) {
- ret = bio_add_pc_page(q, map_req->bio, map_req->rb.mctx->m_page[i],
- PAGE_SIZE, 0);
- if (ret != PAGE_SIZE) {
- dev_err(&hpb->sdev_ufs_lu->sdev_dev,
- "bio_add_pc_page fail %d - %d\n",
- map_req->rb.rgn_idx, map_req->rb.srgn_idx);
- return ret;
- }
- }
-
- req = map_req->req;
-
- blk_rq_append_bio(req, map_req->bio);
-
- req->end_io_data = map_req;
- req->end_io = ufshpb_map_req_compl_fn;
-
- if (unlikely(last))
- mem_size = hpb->last_srgn_entries * HPB_ENTRY_SIZE;
-
- scmd = blk_mq_rq_to_pdu(req);
- ufshpb_set_read_buf_cmd(scmd->cmnd, map_req->rb.rgn_idx,
- map_req->rb.srgn_idx, mem_size);
- scmd->cmd_len = HPB_READ_BUFFER_CMD_LENGTH;
-
- blk_execute_rq_nowait(req, true);
-
- hpb->stats.map_req_cnt++;
- return 0;
-}
-
-static struct ufshpb_map_ctx *ufshpb_get_map_ctx(struct ufshpb_lu *hpb,
- bool last)
-{
- struct ufshpb_map_ctx *mctx;
- u32 num_entries = hpb->entries_per_srgn;
- int i, j;
-
- mctx = mempool_alloc(ufshpb_mctx_pool, GFP_KERNEL);
- if (!mctx)
- return NULL;
-
- mctx->m_page = kmem_cache_alloc(hpb->m_page_cache, GFP_KERNEL);
- if (!mctx->m_page)
- goto release_mctx;
-
- if (unlikely(last))
- num_entries = hpb->last_srgn_entries;
-
- mctx->ppn_dirty = bitmap_zalloc(num_entries, GFP_KERNEL);
- if (!mctx->ppn_dirty)
- goto release_m_page;
-
- for (i = 0; i < hpb->pages_per_srgn; i++) {
- mctx->m_page[i] = mempool_alloc(ufshpb_page_pool, GFP_KERNEL);
- if (!mctx->m_page[i]) {
- for (j = 0; j < i; j++)
- mempool_free(mctx->m_page[j], ufshpb_page_pool);
- goto release_ppn_dirty;
- }
- clear_page(page_address(mctx->m_page[i]));
- }
-
- return mctx;
-
-release_ppn_dirty:
- bitmap_free(mctx->ppn_dirty);
-release_m_page:
- kmem_cache_free(hpb->m_page_cache, mctx->m_page);
-release_mctx:
- mempool_free(mctx, ufshpb_mctx_pool);
- return NULL;
-}
-
-static void ufshpb_put_map_ctx(struct ufshpb_lu *hpb,
- struct ufshpb_map_ctx *mctx)
-{
- int i;
-
- for (i = 0; i < hpb->pages_per_srgn; i++)
- mempool_free(mctx->m_page[i], ufshpb_page_pool);
-
- bitmap_free(mctx->ppn_dirty);
- kmem_cache_free(hpb->m_page_cache, mctx->m_page);
- mempool_free(mctx, ufshpb_mctx_pool);
-}
-
-static int ufshpb_check_srgns_issue_state(struct ufshpb_lu *hpb,
- struct ufshpb_region *rgn)
-{
- struct ufshpb_subregion *srgn;
- int srgn_idx;
-
- for_each_sub_region(rgn, srgn_idx, srgn)
- if (srgn->srgn_state == HPB_SRGN_ISSUED)
- return -EPERM;
-
- return 0;
-}
-
-static void ufshpb_read_to_handler(struct work_struct *work)
-{
- struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
- ufshpb_read_to_work.work);
- struct victim_select_info *lru_info = &hpb->lru_info;
- struct ufshpb_region *rgn, *next_rgn;
- unsigned long flags;
- unsigned int poll;
- LIST_HEAD(expired_list);
-
- if (test_and_set_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits))
- return;
-
- spin_lock_irqsave(&hpb->rgn_state_lock, flags);
-
- list_for_each_entry_safe(rgn, next_rgn, &lru_info->lh_lru_rgn,
- list_lru_rgn) {
- bool timedout = ktime_after(ktime_get(), rgn->read_timeout);
-
- if (timedout) {
- rgn->read_timeout_expiries--;
- if (is_rgn_dirty(rgn) ||
- rgn->read_timeout_expiries == 0)
- list_add(&rgn->list_expired_rgn, &expired_list);
- else
- rgn->read_timeout = ktime_add_ms(ktime_get(),
- hpb->params.read_timeout_ms);
- }
- }
-
- spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
-
- list_for_each_entry_safe(rgn, next_rgn, &expired_list,
- list_expired_rgn) {
- list_del_init(&rgn->list_expired_rgn);
- spin_lock_irqsave(&hpb->rsp_list_lock, flags);
- ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
- spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
- }
-
- ufshpb_kick_map_work(hpb);
-
- clear_bit(TIMEOUT_WORK_RUNNING, &hpb->work_data_bits);
-
- poll = hpb->params.timeout_polling_interval_ms;
- schedule_delayed_work(&hpb->ufshpb_read_to_work,
- msecs_to_jiffies(poll));
-}
-
-static void ufshpb_add_lru_info(struct victim_select_info *lru_info,
- struct ufshpb_region *rgn)
-{
- rgn->rgn_state = HPB_RGN_ACTIVE;
- list_add_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
- atomic_inc(&lru_info->active_cnt);
- if (rgn->hpb->is_hcm) {
- rgn->read_timeout =
- ktime_add_ms(ktime_get(),
- rgn->hpb->params.read_timeout_ms);
- rgn->read_timeout_expiries =
- rgn->hpb->params.read_timeout_expiries;
- }
-}
-
-static void ufshpb_hit_lru_info(struct victim_select_info *lru_info,
- struct ufshpb_region *rgn)
-{
- list_move_tail(&rgn->list_lru_rgn, &lru_info->lh_lru_rgn);
-}
-
-static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb)
-{
- struct victim_select_info *lru_info = &hpb->lru_info;
- struct ufshpb_region *rgn, *victim_rgn = NULL;
-
- list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) {
- if (ufshpb_check_srgns_issue_state(hpb, rgn))
- continue;
-
- /*
- * in host control mode, verify that the exiting region
- * has fewer reads
- */
- if (hpb->is_hcm &&
- rgn->reads > hpb->params.eviction_thld_exit)
- continue;
-
- victim_rgn = rgn;
- break;
- }
-
- if (!victim_rgn)
- dev_err(&hpb->sdev_ufs_lu->sdev_dev,
- "%s: no region allocated\n",
- __func__);
-
- return victim_rgn;
-}
-
-static void ufshpb_cleanup_lru_info(struct victim_select_info *lru_info,
- struct ufshpb_region *rgn)
-{
- list_del_init(&rgn->list_lru_rgn);
- rgn->rgn_state = HPB_RGN_INACTIVE;
- atomic_dec(&lru_info->active_cnt);
-}
-
-static void ufshpb_purge_active_subregion(struct ufshpb_lu *hpb,
- struct ufshpb_subregion *srgn)
-{
- if (srgn->srgn_state != HPB_SRGN_UNUSED) {
- ufshpb_put_map_ctx(hpb, srgn->mctx);
- srgn->srgn_state = HPB_SRGN_UNUSED;
- srgn->mctx = NULL;
- }
-}
-
-static int ufshpb_issue_umap_req(struct ufshpb_lu *hpb,
- struct ufshpb_region *rgn,
- bool atomic)
-{
- struct ufshpb_req *umap_req;
- int rgn_idx = rgn ? rgn->rgn_idx : 0;
-
- umap_req = ufshpb_get_req(hpb, rgn_idx, REQ_OP_DRV_OUT, atomic);
- if (!umap_req)
- return -ENOMEM;
-
- ufshpb_execute_umap_req(hpb, umap_req, rgn);
-
- return 0;
-}
-
-static int ufshpb_issue_umap_single_req(struct ufshpb_lu *hpb,
- struct ufshpb_region *rgn)
-{
- return ufshpb_issue_umap_req(hpb, rgn, true);
-}
-
-static void __ufshpb_evict_region(struct ufshpb_lu *hpb,
- struct ufshpb_region *rgn)
-{
- struct victim_select_info *lru_info;
- struct ufshpb_subregion *srgn;
- int srgn_idx;
-
- lru_info = &hpb->lru_info;
-
- dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "evict region %d\n", rgn->rgn_idx);
-
- ufshpb_cleanup_lru_info(lru_info, rgn);
-
- for_each_sub_region(rgn, srgn_idx, srgn)
- ufshpb_purge_active_subregion(hpb, srgn);
-}
-
-static int ufshpb_evict_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
-{
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&hpb->rgn_state_lock, flags);
- if (rgn->rgn_state == HPB_RGN_PINNED) {
- dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
- "pinned region cannot drop-out. region %d\n",
- rgn->rgn_idx);
- goto out;
- }
-
- if (!list_empty(&rgn->list_lru_rgn)) {
- if (ufshpb_check_srgns_issue_state(hpb, rgn)) {
- ret = -EBUSY;
- goto out;
- }
-
- if (hpb->is_hcm) {
- spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
- ret = ufshpb_issue_umap_single_req(hpb, rgn);
- spin_lock_irqsave(&hpb->rgn_state_lock, flags);
- if (ret)
- goto out;
- }
-
- __ufshpb_evict_region(hpb, rgn);
- }
-out:
- spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
- return ret;
-}
-
-static int ufshpb_issue_map_req(struct ufshpb_lu *hpb,
- struct ufshpb_region *rgn,
- struct ufshpb_subregion *srgn)
-{
- struct ufshpb_req *map_req;
- unsigned long flags;
- int ret;
- int err = -EAGAIN;
- bool alloc_required = false;
- enum HPB_SRGN_STATE state = HPB_SRGN_INVALID;
-
- spin_lock_irqsave(&hpb->rgn_state_lock, flags);
-
- if (ufshpb_get_state(hpb) != HPB_PRESENT) {
- dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
- "%s: ufshpb state is not PRESENT\n", __func__);
- goto unlock_out;
- }
-
- if ((rgn->rgn_state == HPB_RGN_INACTIVE) &&
- (srgn->srgn_state == HPB_SRGN_INVALID)) {
- err = 0;
- goto unlock_out;
- }
-
- if (srgn->srgn_state == HPB_SRGN_UNUSED)
- alloc_required = true;
-
- /*
- * If the subregion is already ISSUED state,
- * a specific event (e.g., GC or wear-leveling, etc.) occurs in
- * the device and HPB response for map loading is received.
- * In this case, after finishing the HPB_READ_BUFFER,
- * the next HPB_READ_BUFFER is performed again to obtain the latest
- * map data.
- */
- if (srgn->srgn_state == HPB_SRGN_ISSUED)
- goto unlock_out;
-
- srgn->srgn_state = HPB_SRGN_ISSUED;
- spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
-
- if (alloc_required) {
- srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last);
- if (!srgn->mctx) {
- dev_err(&hpb->sdev_ufs_lu->sdev_dev,
- "get map_ctx failed. region %d - %d\n",
- rgn->rgn_idx, srgn->srgn_idx);
- state = HPB_SRGN_UNUSED;
- goto change_srgn_state;
- }
- }
-
- map_req = ufshpb_get_map_req(hpb, srgn);
- if (!map_req)
- goto change_srgn_state;
-
-
- ret = ufshpb_execute_map_req(hpb, map_req, srgn->is_last);
- if (ret) {
- dev_err(&hpb->sdev_ufs_lu->sdev_dev,
- "%s: issue map_req failed: %d, region %d - %d\n",
- __func__, ret, srgn->rgn_idx, srgn->srgn_idx);
- goto free_map_req;
- }
- return 0;
-
-free_map_req:
- ufshpb_put_map_req(hpb, map_req);
-change_srgn_state:
- spin_lock_irqsave(&hpb->rgn_state_lock, flags);
- srgn->srgn_state = state;
-unlock_out:
- spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
- return err;
-}
-
-static int ufshpb_add_region(struct ufshpb_lu *hpb, struct ufshpb_region *rgn)
-{
- struct ufshpb_region *victim_rgn = NULL;
- struct victim_select_info *lru_info = &hpb->lru_info;
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&hpb->rgn_state_lock, flags);
- /*
- * If region belongs to lru_list, just move the region
- * to the front of lru list because the state of the region
- * is already active-state.
- */
- if (!list_empty(&rgn->list_lru_rgn)) {
- ufshpb_hit_lru_info(lru_info, rgn);
- goto out;
- }
-
- if (rgn->rgn_state == HPB_RGN_INACTIVE) {
- if (atomic_read(&lru_info->active_cnt) ==
- lru_info->max_lru_active_cnt) {
- /*
- * If the maximum number of active regions
- * is exceeded, evict the least recently used region.
- * This case may occur when the device responds
- * to the eviction information late.
- * It is okay to evict the least recently used region,
- * because the device could detect this region
- * by not issuing HPB_READ
- *
- * in host control mode, verify that the entering
- * region has enough reads
- */
- if (hpb->is_hcm &&
- rgn->reads < hpb->params.eviction_thld_enter) {
- ret = -EACCES;
- goto out;
- }
-
- victim_rgn = ufshpb_victim_lru_info(hpb);
- if (!victim_rgn) {
- dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
- "cannot get victim region %s\n",
- hpb->is_hcm ? "" : "error");
- ret = -ENOMEM;
- goto out;
- }
-
- dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
- "LRU full (%d), choose victim %d\n",
- atomic_read(&lru_info->active_cnt),
- victim_rgn->rgn_idx);
-
- if (hpb->is_hcm) {
- spin_unlock_irqrestore(&hpb->rgn_state_lock,
- flags);
- ret = ufshpb_issue_umap_single_req(hpb,
- victim_rgn);
- spin_lock_irqsave(&hpb->rgn_state_lock,
- flags);
- if (ret)
- goto out;
- }
-
- __ufshpb_evict_region(hpb, victim_rgn);
- }
-
- /*
- * When a region is added to lru_info list_head,
- * it is guaranteed that the subregion has been
- * assigned all mctx. If failed, try to receive mctx again
- * without being added to lru_info list_head
- */
- ufshpb_add_lru_info(lru_info, rgn);
- }
-out:
- spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
- return ret;
-}
-/**
- *ufshpb_submit_region_inactive() - submit a region to be inactivated later
- *@hpb: per-LU HPB instance
- *@region_index: the index associated with the region that will be inactivated later
- */
-static void ufshpb_submit_region_inactive(struct ufshpb_lu *hpb, int region_index)
-{
- int subregion_index;
- struct ufshpb_region *rgn;
- struct ufshpb_subregion *srgn;
-
- /*
- * Remove this region from active region list and add it to inactive list
- */
- spin_lock(&hpb->rsp_list_lock);
- ufshpb_update_inactive_info(hpb, region_index);
- spin_unlock(&hpb->rsp_list_lock);
-
- rgn = hpb->rgn_tbl + region_index;
-
- /*
- * Set subregion state to be HPB_SRGN_INVALID, there will no HPB read on this subregion
- */
- spin_lock(&hpb->rgn_state_lock);
- if (rgn->rgn_state != HPB_RGN_INACTIVE) {
- for (subregion_index = 0; subregion_index < rgn->srgn_cnt; subregion_index++) {
- srgn = rgn->srgn_tbl + subregion_index;
- if (srgn->srgn_state == HPB_SRGN_VALID)
- srgn->srgn_state = HPB_SRGN_INVALID;
- }
- }
- spin_unlock(&hpb->rgn_state_lock);
-}
-
-static void ufshpb_rsp_req_region_update(struct ufshpb_lu *hpb,
- struct utp_hpb_rsp *rsp_field)
-{
- struct ufshpb_region *rgn;
- struct ufshpb_subregion *srgn;
- int i, rgn_i, srgn_i;
-
- BUILD_BUG_ON(sizeof(struct ufshpb_active_field) != HPB_ACT_FIELD_SIZE);
- /*
- * If the active region and the inactive region are the same,
- * we will inactivate this region.
- * The device could check this (region inactivated) and
- * will response the proper active region information
- */
- for (i = 0; i < rsp_field->active_rgn_cnt; i++) {
- rgn_i =
- be16_to_cpu(rsp_field->hpb_active_field[i].active_rgn);
- srgn_i =
- be16_to_cpu(rsp_field->hpb_active_field[i].active_srgn);
-
- rgn = hpb->rgn_tbl + rgn_i;
- if (hpb->is_hcm &&
- (rgn->rgn_state != HPB_RGN_ACTIVE || is_rgn_dirty(rgn))) {
- /*
- * in host control mode, subregion activation
- * recommendations are only allowed to active regions.
- * Also, ignore recommendations for dirty regions - the
- * host will make decisions concerning those by himself
- */
- continue;
- }
-
- dev_dbg(&hpb->sdev_ufs_lu->sdev_dev,
- "activate(%d) region %d - %d\n", i, rgn_i, srgn_i);
-
- spin_lock(&hpb->rsp_list_lock);
- ufshpb_update_active_info(hpb, rgn_i, srgn_i);
- spin_unlock(&hpb->rsp_list_lock);
-
- srgn = rgn->srgn_tbl + srgn_i;
-
- /* blocking HPB_READ */
- spin_lock(&hpb->rgn_state_lock);
- if (srgn->srgn_state == HPB_SRGN_VALID)
- srgn->srgn_state = HPB_SRGN_INVALID;
- spin_unlock(&hpb->rgn_state_lock);
- }
-
- if (hpb->is_hcm) {
- /*
- * in host control mode the device is not allowed to inactivate
- * regions
- */
- goto out;
- }
-
- for (i = 0; i < rsp_field->inactive_rgn_cnt; i++) {
- rgn_i = be16_to_cpu(rsp_field->hpb_inactive_field[i]);
- dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "inactivate(%d) region %d\n", i, rgn_i);
- ufshpb_submit_region_inactive(hpb, rgn_i);
- }
-
-out:
- dev_dbg(&hpb->sdev_ufs_lu->sdev_dev, "Noti: #ACT %u #INACT %u\n",
- rsp_field->active_rgn_cnt, rsp_field->inactive_rgn_cnt);
-
- if (ufshpb_get_state(hpb) == HPB_PRESENT)
- queue_work(ufshpb_wq, &hpb->map_work);
-}
-
-/*
- * Set the flags of all active regions to RGN_FLAG_UPDATE to let host side reload L2P entries later
- */
-static void ufshpb_set_regions_update(struct ufshpb_lu *hpb)
-{
- struct victim_select_info *lru_info = &hpb->lru_info;
- struct ufshpb_region *rgn;
- unsigned long flags;
-
- spin_lock_irqsave(&hpb->rgn_state_lock, flags);
-
- list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn)
- set_bit(RGN_FLAG_UPDATE, &rgn->rgn_flags);
-
- spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
-}
-
-static void ufshpb_dev_reset_handler(struct ufs_hba *hba)
-{
- struct scsi_device *sdev;
- struct ufshpb_lu *hpb;
-
- __shost_for_each_device(sdev, hba->host) {
- hpb = ufshpb_get_hpb_data(sdev);
- if (!hpb)
- continue;
-
- if (hpb->is_hcm) {
- /*
- * For the HPB host control mode, in case device powered up and lost HPB
- * information, we will set the region flag to be RGN_FLAG_UPDATE, it will
- * let host reload its L2P entries(reactivate region in the UFS device).
- */
- ufshpb_set_regions_update(hpb);
- } else {
- /*
- * For the HPB device control mode, if host side receives 02h:HPB Operation
- * in UPIU response, which means device recommends the host side should
- * inactivate all active regions. Here we add all active regions to inactive
- * list, they will be inactivated later in ufshpb_map_work_handler().
- */
- struct victim_select_info *lru_info = &hpb->lru_info;
- struct ufshpb_region *rgn;
-
- list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn)
- ufshpb_submit_region_inactive(hpb, rgn->rgn_idx);
-
- if (ufshpb_get_state(hpb) == HPB_PRESENT)
- queue_work(ufshpb_wq, &hpb->map_work);
- }
- }
-}
-
-/*
- * This function will parse recommended active subregion information in sense
- * data field of response UPIU with SAM_STAT_GOOD state.
- */
-void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
-{
- struct ufshpb_lu *hpb = ufshpb_get_hpb_data(lrbp->cmd->device);
- struct utp_hpb_rsp *rsp_field = &lrbp->ucd_rsp_ptr->hr;
- int data_seg_len;
-
- data_seg_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2)
- & MASK_RSP_UPIU_DATA_SEG_LEN;
-
- /* If data segment length is zero, rsp_field is not valid */
- if (!data_seg_len)
- return;
-
- if (unlikely(lrbp->lun != rsp_field->lun)) {
- struct scsi_device *sdev;
- bool found = false;
-
- __shost_for_each_device(sdev, hba->host) {
- hpb = ufshpb_get_hpb_data(sdev);
-
- if (!hpb)
- continue;
-
- if (rsp_field->lun == hpb->lun) {
- found = true;
- break;
- }
- }
-
- if (!found)
- return;
- }
-
- if (!hpb)
- return;
-
- if (ufshpb_get_state(hpb) == HPB_INIT)
- return;
-
- if ((ufshpb_get_state(hpb) != HPB_PRESENT) &&
- (ufshpb_get_state(hpb) != HPB_SUSPEND)) {
- dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
- "%s: ufshpb state is not PRESENT/SUSPEND\n",
- __func__);
- return;
- }
-
- BUILD_BUG_ON(sizeof(struct utp_hpb_rsp) != UTP_HPB_RSP_SIZE);
-
- if (!ufshpb_is_hpb_rsp_valid(hba, lrbp, rsp_field))
- return;
-
- hpb->stats.rcmd_noti_cnt++;
-
- switch (rsp_field->hpb_op) {
- case HPB_RSP_REQ_REGION_UPDATE:
- if (data_seg_len != DEV_DATA_SEG_LEN)
- dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
- "%s: data seg length is not same.\n",
- __func__);
- ufshpb_rsp_req_region_update(hpb, rsp_field);
- break;
- case HPB_RSP_DEV_RESET:
- dev_warn(&hpb->sdev_ufs_lu->sdev_dev,
- "UFS device lost HPB information during PM.\n");
- ufshpb_dev_reset_handler(hba);
-
- break;
- default:
- dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
- "hpb_op is not available: %d\n",
- rsp_field->hpb_op);
- break;
- }
-}
-
-static void ufshpb_add_active_list(struct ufshpb_lu *hpb,
- struct ufshpb_region *rgn,
- struct ufshpb_subregion *srgn)
-{
- if (!list_empty(&rgn->list_inact_rgn))
- return;
-
- if (!list_empty(&srgn->list_act_srgn)) {
- list_move(&srgn->list_act_srgn, &hpb->lh_act_srgn);
- return;
- }
-
- list_add(&srgn->list_act_srgn, &hpb->lh_act_srgn);
-}
-
-static void ufshpb_add_pending_evict_list(struct ufshpb_lu *hpb,
- struct ufshpb_region *rgn,
- struct list_head *pending_list)
-{
- struct ufshpb_subregion *srgn;
- int srgn_idx;
-
- if (!list_empty(&rgn->list_inact_rgn))
- return;
-
- for_each_sub_region(rgn, srgn_idx, srgn)
- if (!list_empty(&srgn->list_act_srgn))
- return;
-
- list_add_tail(&rgn->list_inact_rgn, pending_list);
-}
-
-static void ufshpb_run_active_subregion_list(struct ufshpb_lu *hpb)
-{
- struct ufshpb_region *rgn;
- struct ufshpb_subregion *srgn;
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&hpb->rsp_list_lock, flags);
- while ((srgn = list_first_entry_or_null(&hpb->lh_act_srgn,
- struct ufshpb_subregion,
- list_act_srgn))) {
- if (ufshpb_get_state(hpb) == HPB_SUSPEND)
- break;
-
- list_del_init(&srgn->list_act_srgn);
- spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
-
- rgn = hpb->rgn_tbl + srgn->rgn_idx;
- ret = ufshpb_add_region(hpb, rgn);
- if (ret)
- goto active_failed;
-
- ret = ufshpb_issue_map_req(hpb, rgn, srgn);
- if (ret) {
- dev_err(&hpb->sdev_ufs_lu->sdev_dev,
- "issue map_req failed. ret %d, region %d - %d\n",
- ret, rgn->rgn_idx, srgn->srgn_idx);
- goto active_failed;
- }
- spin_lock_irqsave(&hpb->rsp_list_lock, flags);
- }
- spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
- return;
-
-active_failed:
- dev_err(&hpb->sdev_ufs_lu->sdev_dev, "failed to activate region %d - %d, will retry\n",
- rgn->rgn_idx, srgn->srgn_idx);
- spin_lock_irqsave(&hpb->rsp_list_lock, flags);
- ufshpb_add_active_list(hpb, rgn, srgn);
- spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
-}
-
-static void ufshpb_run_inactive_region_list(struct ufshpb_lu *hpb)
-{
- struct ufshpb_region *rgn;
- unsigned long flags;
- int ret;
- LIST_HEAD(pending_list);
-
- spin_lock_irqsave(&hpb->rsp_list_lock, flags);
- while ((rgn = list_first_entry_or_null(&hpb->lh_inact_rgn,
- struct ufshpb_region,
- list_inact_rgn))) {
- if (ufshpb_get_state(hpb) == HPB_SUSPEND)
- break;
-
- list_del_init(&rgn->list_inact_rgn);
- spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
-
- ret = ufshpb_evict_region(hpb, rgn);
- if (ret) {
- spin_lock_irqsave(&hpb->rsp_list_lock, flags);
- ufshpb_add_pending_evict_list(hpb, rgn, &pending_list);
- spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
- }
-
- spin_lock_irqsave(&hpb->rsp_list_lock, flags);
- }
-
- list_splice(&pending_list, &hpb->lh_inact_rgn);
- spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
-}
-
-static void ufshpb_normalization_work_handler(struct work_struct *work)
-{
- struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu,
- ufshpb_normalization_work);
- int rgn_idx;
- u8 factor = hpb->params.normalization_factor;
-
- for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
- struct ufshpb_region *rgn = hpb->rgn_tbl + rgn_idx;
- int srgn_idx;
-
- spin_lock(&rgn->rgn_lock);
- rgn->reads = 0;
- for (srgn_idx = 0; srgn_idx < hpb->srgns_per_rgn; srgn_idx++) {
- struct ufshpb_subregion *srgn = rgn->srgn_tbl + srgn_idx;
-
- srgn->reads >>= factor;
- rgn->reads += srgn->reads;
- }
- spin_unlock(&rgn->rgn_lock);
-
- if (rgn->rgn_state != HPB_RGN_ACTIVE || rgn->reads)
- continue;
-
- /* if region is active but has no reads - inactivate it */
- spin_lock(&hpb->rsp_list_lock);
- ufshpb_update_inactive_info(hpb, rgn->rgn_idx);
- spin_unlock(&hpb->rsp_list_lock);
- }
-}
-
-static void ufshpb_map_work_handler(struct work_struct *work)
-{
- struct ufshpb_lu *hpb = container_of(work, struct ufshpb_lu, map_work);
-
- if (ufshpb_get_state(hpb) != HPB_PRESENT) {
- dev_notice(&hpb->sdev_ufs_lu->sdev_dev,
- "%s: ufshpb state is not PRESENT\n", __func__);
- return;
- }
-
- ufshpb_run_inactive_region_list(hpb);
- ufshpb_run_active_subregion_list(hpb);
-}
-
-/*
- * this function doesn't need to hold lock due to be called in init.
- * (rgn_state_lock, rsp_list_lock, etc..)
- */
-static int ufshpb_init_pinned_active_region(struct ufs_hba *hba,
- struct ufshpb_lu *hpb,
- struct ufshpb_region *rgn)
-{
- struct ufshpb_subregion *srgn;
- int srgn_idx, i;
- int err = 0;
-
- for_each_sub_region(rgn, srgn_idx, srgn) {
- srgn->mctx = ufshpb_get_map_ctx(hpb, srgn->is_last);
- srgn->srgn_state = HPB_SRGN_INVALID;
- if (!srgn->mctx) {
- err = -ENOMEM;
- dev_err(hba->dev,
- "alloc mctx for pinned region failed\n");
- goto release;
- }
-
- list_add_tail(&srgn->list_act_srgn, &hpb->lh_act_srgn);
- }
-
- rgn->rgn_state = HPB_RGN_PINNED;
- return 0;
-
-release:
- for (i = 0; i < srgn_idx; i++) {
- srgn = rgn->srgn_tbl + i;
- ufshpb_put_map_ctx(hpb, srgn->mctx);
- }
- return err;
-}
-
-static void ufshpb_init_subregion_tbl(struct ufshpb_lu *hpb,
- struct ufshpb_region *rgn, bool last)
-{
- int srgn_idx;
- struct ufshpb_subregion *srgn;
-
- for_each_sub_region(rgn, srgn_idx, srgn) {
- INIT_LIST_HEAD(&srgn->list_act_srgn);
-
- srgn->rgn_idx = rgn->rgn_idx;
- srgn->srgn_idx = srgn_idx;
- srgn->srgn_state = HPB_SRGN_UNUSED;
- }
-
- if (unlikely(last && hpb->last_srgn_entries))
- srgn->is_last = true;
-}
-
-static int ufshpb_alloc_subregion_tbl(struct ufshpb_lu *hpb,
- struct ufshpb_region *rgn, int srgn_cnt)
-{
- rgn->srgn_tbl = kvcalloc(srgn_cnt, sizeof(struct ufshpb_subregion),
- GFP_KERNEL);
- if (!rgn->srgn_tbl)
- return -ENOMEM;
-
- rgn->srgn_cnt = srgn_cnt;
- return 0;
-}
-
-static void ufshpb_lu_parameter_init(struct ufs_hba *hba,
- struct ufshpb_lu *hpb,
- struct ufshpb_dev_info *hpb_dev_info,
- struct ufshpb_lu_info *hpb_lu_info)
-{
- u32 entries_per_rgn;
- u64 rgn_mem_size, tmp;
-
- if (ufshpb_is_legacy(hba))
- hpb->pre_req_max_tr_len = HPB_LEGACY_CHUNK_HIGH;
- else
- hpb->pre_req_max_tr_len = hpb_dev_info->max_hpb_single_cmd;
-
- hpb->lu_pinned_start = hpb_lu_info->pinned_start;
- hpb->lu_pinned_end = hpb_lu_info->num_pinned ?
- (hpb_lu_info->pinned_start + hpb_lu_info->num_pinned - 1)
- : PINNED_NOT_SET;
- hpb->lru_info.max_lru_active_cnt =
- hpb_lu_info->max_active_rgns - hpb_lu_info->num_pinned;
-
- rgn_mem_size = (1ULL << hpb_dev_info->rgn_size) * HPB_RGN_SIZE_UNIT
- * HPB_ENTRY_SIZE;
- do_div(rgn_mem_size, HPB_ENTRY_BLOCK_SIZE);
- hpb->srgn_mem_size = (1ULL << hpb_dev_info->srgn_size)
- * HPB_RGN_SIZE_UNIT / HPB_ENTRY_BLOCK_SIZE * HPB_ENTRY_SIZE;
-
- tmp = rgn_mem_size;
- do_div(tmp, HPB_ENTRY_SIZE);
- entries_per_rgn = (u32)tmp;
- hpb->entries_per_rgn_shift = ilog2(entries_per_rgn);
- hpb->entries_per_rgn_mask = entries_per_rgn - 1;
-
- hpb->entries_per_srgn = hpb->srgn_mem_size / HPB_ENTRY_SIZE;
- hpb->entries_per_srgn_shift = ilog2(hpb->entries_per_srgn);
- hpb->entries_per_srgn_mask = hpb->entries_per_srgn - 1;
-
- tmp = rgn_mem_size;
- do_div(tmp, hpb->srgn_mem_size);
- hpb->srgns_per_rgn = (int)tmp;
-
- hpb->rgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks,
- entries_per_rgn);
- hpb->srgns_per_lu = DIV_ROUND_UP(hpb_lu_info->num_blocks,
- (hpb->srgn_mem_size / HPB_ENTRY_SIZE));
- hpb->last_srgn_entries = hpb_lu_info->num_blocks
- % (hpb->srgn_mem_size / HPB_ENTRY_SIZE);
-
- hpb->pages_per_srgn = DIV_ROUND_UP(hpb->srgn_mem_size, PAGE_SIZE);
-
- if (hpb_dev_info->control_mode == HPB_HOST_CONTROL)
- hpb->is_hcm = true;
-}
-
-static int ufshpb_alloc_region_tbl(struct ufs_hba *hba, struct ufshpb_lu *hpb)
-{
- struct ufshpb_region *rgn_table, *rgn;
- int rgn_idx, i;
- int ret = 0;
-
- rgn_table = kvcalloc(hpb->rgns_per_lu, sizeof(struct ufshpb_region),
- GFP_KERNEL);
- if (!rgn_table)
- return -ENOMEM;
-
- for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
- int srgn_cnt = hpb->srgns_per_rgn;
- bool last_srgn = false;
-
- rgn = rgn_table + rgn_idx;
- rgn->rgn_idx = rgn_idx;
-
- spin_lock_init(&rgn->rgn_lock);
-
- INIT_LIST_HEAD(&rgn->list_inact_rgn);
- INIT_LIST_HEAD(&rgn->list_lru_rgn);
- INIT_LIST_HEAD(&rgn->list_expired_rgn);
-
- if (rgn_idx == hpb->rgns_per_lu - 1) {
- srgn_cnt = ((hpb->srgns_per_lu - 1) %
- hpb->srgns_per_rgn) + 1;
- last_srgn = true;
- }
-
- ret = ufshpb_alloc_subregion_tbl(hpb, rgn, srgn_cnt);
- if (ret)
- goto release_srgn_table;
- ufshpb_init_subregion_tbl(hpb, rgn, last_srgn);
-
- if (ufshpb_is_pinned_region(hpb, rgn_idx)) {
- ret = ufshpb_init_pinned_active_region(hba, hpb, rgn);
- if (ret)
- goto release_srgn_table;
- } else {
- rgn->rgn_state = HPB_RGN_INACTIVE;
- }
-
- rgn->rgn_flags = 0;
- rgn->hpb = hpb;
- }
-
- hpb->rgn_tbl = rgn_table;
-
- return 0;
-
-release_srgn_table:
- for (i = 0; i <= rgn_idx; i++)
- kvfree(rgn_table[i].srgn_tbl);
-
- kvfree(rgn_table);
- return ret;
-}
-
-static void ufshpb_destroy_subregion_tbl(struct ufshpb_lu *hpb,
- struct ufshpb_region *rgn)
-{
- int srgn_idx;
- struct ufshpb_subregion *srgn;
-
- for_each_sub_region(rgn, srgn_idx, srgn)
- if (srgn->srgn_state != HPB_SRGN_UNUSED) {
- srgn->srgn_state = HPB_SRGN_UNUSED;
- ufshpb_put_map_ctx(hpb, srgn->mctx);
- }
-}
-
-static void ufshpb_destroy_region_tbl(struct ufshpb_lu *hpb)
-{
- int rgn_idx;
-
- for (rgn_idx = 0; rgn_idx < hpb->rgns_per_lu; rgn_idx++) {
- struct ufshpb_region *rgn;
-
- rgn = hpb->rgn_tbl + rgn_idx;
- if (rgn->rgn_state != HPB_RGN_INACTIVE) {
- rgn->rgn_state = HPB_RGN_INACTIVE;
-
- ufshpb_destroy_subregion_tbl(hpb, rgn);
- }
-
- kvfree(rgn->srgn_tbl);
- }
-
- kvfree(hpb->rgn_tbl);
-}
-
-/* SYSFS functions */
-#define ufshpb_sysfs_attr_show_func(__name) \
-static ssize_t __name##_show(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- struct scsi_device *sdev = to_scsi_device(dev); \
- struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); \
- \
- if (!hpb) \
- return -ENODEV; \
- \
- return sysfs_emit(buf, "%llu\n", hpb->stats.__name); \
-} \
-\
-static DEVICE_ATTR_RO(__name)
-
-ufshpb_sysfs_attr_show_func(hit_cnt);
-ufshpb_sysfs_attr_show_func(miss_cnt);
-ufshpb_sysfs_attr_show_func(rcmd_noti_cnt);
-ufshpb_sysfs_attr_show_func(rcmd_active_cnt);
-ufshpb_sysfs_attr_show_func(rcmd_inactive_cnt);
-ufshpb_sysfs_attr_show_func(map_req_cnt);
-ufshpb_sysfs_attr_show_func(umap_req_cnt);
-
-static struct attribute *hpb_dev_stat_attrs[] = {
- &dev_attr_hit_cnt.attr,
- &dev_attr_miss_cnt.attr,
- &dev_attr_rcmd_noti_cnt.attr,
- &dev_attr_rcmd_active_cnt.attr,
- &dev_attr_rcmd_inactive_cnt.attr,
- &dev_attr_map_req_cnt.attr,
- &dev_attr_umap_req_cnt.attr,
- NULL,
-};
-
-struct attribute_group ufs_sysfs_hpb_stat_group = {
- .name = "hpb_stats",
- .attrs = hpb_dev_stat_attrs,
-};
-
-/* SYSFS functions */
-#define ufshpb_sysfs_param_show_func(__name) \
-static ssize_t __name##_show(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- struct scsi_device *sdev = to_scsi_device(dev); \
- struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev); \
- \
- if (!hpb) \
- return -ENODEV; \
- \
- return sysfs_emit(buf, "%d\n", hpb->params.__name); \
-}
-
-ufshpb_sysfs_param_show_func(requeue_timeout_ms);
-static ssize_t
-requeue_timeout_ms_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
- struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
- int val;
-
- if (!hpb)
- return -ENODEV;
-
- if (kstrtouint(buf, 0, &val))
- return -EINVAL;
-
- if (val < 0)
- return -EINVAL;
-
- hpb->params.requeue_timeout_ms = val;
-
- return count;
-}
-static DEVICE_ATTR_RW(requeue_timeout_ms);
-
-ufshpb_sysfs_param_show_func(activation_thld);
-static ssize_t
-activation_thld_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
- struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
- int val;
-
- if (!hpb)
- return -ENODEV;
-
- if (!hpb->is_hcm)
- return -EOPNOTSUPP;
-
- if (kstrtouint(buf, 0, &val))
- return -EINVAL;
-
- if (val <= 0)
- return -EINVAL;
-
- hpb->params.activation_thld = val;
-
- return count;
-}
-static DEVICE_ATTR_RW(activation_thld);
-
-ufshpb_sysfs_param_show_func(normalization_factor);
-static ssize_t
-normalization_factor_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
- struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
- int val;
-
- if (!hpb)
- return -ENODEV;
-
- if (!hpb->is_hcm)
- return -EOPNOTSUPP;
-
- if (kstrtouint(buf, 0, &val))
- return -EINVAL;
-
- if (val <= 0 || val > ilog2(hpb->entries_per_srgn))
- return -EINVAL;
-
- hpb->params.normalization_factor = val;
-
- return count;
-}
-static DEVICE_ATTR_RW(normalization_factor);
-
-ufshpb_sysfs_param_show_func(eviction_thld_enter);
-static ssize_t
-eviction_thld_enter_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
- struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
- int val;
-
- if (!hpb)
- return -ENODEV;
-
- if (!hpb->is_hcm)
- return -EOPNOTSUPP;
-
- if (kstrtouint(buf, 0, &val))
- return -EINVAL;
-
- if (val <= hpb->params.eviction_thld_exit)
- return -EINVAL;
-
- hpb->params.eviction_thld_enter = val;
-
- return count;
-}
-static DEVICE_ATTR_RW(eviction_thld_enter);
-
-ufshpb_sysfs_param_show_func(eviction_thld_exit);
-static ssize_t
-eviction_thld_exit_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
- struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
- int val;
-
- if (!hpb)
- return -ENODEV;
-
- if (!hpb->is_hcm)
- return -EOPNOTSUPP;
-
- if (kstrtouint(buf, 0, &val))
- return -EINVAL;
-
- if (val <= hpb->params.activation_thld)
- return -EINVAL;
-
- hpb->params.eviction_thld_exit = val;
-
- return count;
-}
-static DEVICE_ATTR_RW(eviction_thld_exit);
-
-ufshpb_sysfs_param_show_func(read_timeout_ms);
-static ssize_t
-read_timeout_ms_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
- struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
- int val;
-
- if (!hpb)
- return -ENODEV;
-
- if (!hpb->is_hcm)
- return -EOPNOTSUPP;
-
- if (kstrtouint(buf, 0, &val))
- return -EINVAL;
-
- /* read_timeout >> timeout_polling_interval */
- if (val < hpb->params.timeout_polling_interval_ms * 2)
- return -EINVAL;
-
- hpb->params.read_timeout_ms = val;
-
- return count;
-}
-static DEVICE_ATTR_RW(read_timeout_ms);
-
-ufshpb_sysfs_param_show_func(read_timeout_expiries);
-static ssize_t
-read_timeout_expiries_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
- struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
- int val;
-
- if (!hpb)
- return -ENODEV;
-
- if (!hpb->is_hcm)
- return -EOPNOTSUPP;
-
- if (kstrtouint(buf, 0, &val))
- return -EINVAL;
-
- if (val <= 0)
- return -EINVAL;
-
- hpb->params.read_timeout_expiries = val;
-
- return count;
-}
-static DEVICE_ATTR_RW(read_timeout_expiries);
-
-ufshpb_sysfs_param_show_func(timeout_polling_interval_ms);
-static ssize_t
-timeout_polling_interval_ms_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
- struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
- int val;
-
- if (!hpb)
- return -ENODEV;
-
- if (!hpb->is_hcm)
- return -EOPNOTSUPP;
-
- if (kstrtouint(buf, 0, &val))
- return -EINVAL;
-
- /* timeout_polling_interval << read_timeout */
- if (val <= 0 || val > hpb->params.read_timeout_ms / 2)
- return -EINVAL;
-
- hpb->params.timeout_polling_interval_ms = val;
-
- return count;
-}
-static DEVICE_ATTR_RW(timeout_polling_interval_ms);
-
-ufshpb_sysfs_param_show_func(inflight_map_req);
-static ssize_t inflight_map_req_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct scsi_device *sdev = to_scsi_device(dev);
- struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
- int val;
-
- if (!hpb)
- return -ENODEV;
-
- if (!hpb->is_hcm)
- return -EOPNOTSUPP;
-
- if (kstrtouint(buf, 0, &val))
- return -EINVAL;
-
- if (val <= 0 || val > hpb->sdev_ufs_lu->queue_depth - 1)
- return -EINVAL;
-
- hpb->params.inflight_map_req = val;
-
- return count;
-}
-static DEVICE_ATTR_RW(inflight_map_req);
-
-static void ufshpb_hcm_param_init(struct ufshpb_lu *hpb)
-{
- hpb->params.activation_thld = ACTIVATION_THRESHOLD;
- hpb->params.normalization_factor = 1;
- hpb->params.eviction_thld_enter = (ACTIVATION_THRESHOLD << 5);
- hpb->params.eviction_thld_exit = (ACTIVATION_THRESHOLD << 4);
- hpb->params.read_timeout_ms = READ_TO_MS;
- hpb->params.read_timeout_expiries = READ_TO_EXPIRIES;
- hpb->params.timeout_polling_interval_ms = POLLING_INTERVAL_MS;
- hpb->params.inflight_map_req = THROTTLE_MAP_REQ_DEFAULT;
-}
-
-static struct attribute *hpb_dev_param_attrs[] = {
- &dev_attr_requeue_timeout_ms.attr,
- &dev_attr_activation_thld.attr,
- &dev_attr_normalization_factor.attr,
- &dev_attr_eviction_thld_enter.attr,
- &dev_attr_eviction_thld_exit.attr,
- &dev_attr_read_timeout_ms.attr,
- &dev_attr_read_timeout_expiries.attr,
- &dev_attr_timeout_polling_interval_ms.attr,
- &dev_attr_inflight_map_req.attr,
- NULL,
-};
-
-struct attribute_group ufs_sysfs_hpb_param_group = {
- .name = "hpb_params",
- .attrs = hpb_dev_param_attrs,
-};
-
-static int ufshpb_pre_req_mempool_init(struct ufshpb_lu *hpb)
-{
- struct ufshpb_req *pre_req = NULL, *t;
- int qd = hpb->sdev_ufs_lu->queue_depth / 2;
- int i;
-
- INIT_LIST_HEAD(&hpb->lh_pre_req_free);
-
- hpb->pre_req = kcalloc(qd, sizeof(struct ufshpb_req), GFP_KERNEL);
- hpb->throttle_pre_req = qd;
- hpb->num_inflight_pre_req = 0;
-
- if (!hpb->pre_req)
- goto release_mem;
-
- for (i = 0; i < qd; i++) {
- pre_req = hpb->pre_req + i;
- INIT_LIST_HEAD(&pre_req->list_req);
- pre_req->req = NULL;
-
- pre_req->bio = bio_alloc(NULL, 1, 0, GFP_KERNEL);
- if (!pre_req->bio)
- goto release_mem;
-
- pre_req->wb.m_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (!pre_req->wb.m_page) {
- bio_put(pre_req->bio);
- goto release_mem;
- }
-
- list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free);
- }
-
- return 0;
-release_mem:
- list_for_each_entry_safe(pre_req, t, &hpb->lh_pre_req_free, list_req) {
- list_del_init(&pre_req->list_req);
- bio_put(pre_req->bio);
- __free_page(pre_req->wb.m_page);
- }
-
- kfree(hpb->pre_req);
- return -ENOMEM;
-}
-
-static void ufshpb_pre_req_mempool_destroy(struct ufshpb_lu *hpb)
-{
- struct ufshpb_req *pre_req = NULL;
- int i;
-
- for (i = 0; i < hpb->throttle_pre_req; i++) {
- pre_req = hpb->pre_req + i;
- bio_put(hpb->pre_req[i].bio);
- if (!pre_req->wb.m_page)
- __free_page(hpb->pre_req[i].wb.m_page);
- list_del_init(&pre_req->list_req);
- }
-
- kfree(hpb->pre_req);
-}
-
-static void ufshpb_stat_init(struct ufshpb_lu *hpb)
-{
- hpb->stats.hit_cnt = 0;
- hpb->stats.miss_cnt = 0;
- hpb->stats.rcmd_noti_cnt = 0;
- hpb->stats.rcmd_active_cnt = 0;
- hpb->stats.rcmd_inactive_cnt = 0;
- hpb->stats.map_req_cnt = 0;
- hpb->stats.umap_req_cnt = 0;
-}
-
-static void ufshpb_param_init(struct ufshpb_lu *hpb)
-{
- hpb->params.requeue_timeout_ms = HPB_REQUEUE_TIME_MS;
- if (hpb->is_hcm)
- ufshpb_hcm_param_init(hpb);
-}
-
-static int ufshpb_lu_hpb_init(struct ufs_hba *hba, struct ufshpb_lu *hpb)
-{
- int ret;
-
- spin_lock_init(&hpb->rgn_state_lock);
- spin_lock_init(&hpb->rsp_list_lock);
- spin_lock_init(&hpb->param_lock);
-
- INIT_LIST_HEAD(&hpb->lru_info.lh_lru_rgn);
- INIT_LIST_HEAD(&hpb->lh_act_srgn);
- INIT_LIST_HEAD(&hpb->lh_inact_rgn);
- INIT_LIST_HEAD(&hpb->list_hpb_lu);
-
- INIT_WORK(&hpb->map_work, ufshpb_map_work_handler);
- if (hpb->is_hcm) {
- INIT_WORK(&hpb->ufshpb_normalization_work,
- ufshpb_normalization_work_handler);
- INIT_DELAYED_WORK(&hpb->ufshpb_read_to_work,
- ufshpb_read_to_handler);
- }
-
- hpb->map_req_cache = kmem_cache_create("ufshpb_req_cache",
- sizeof(struct ufshpb_req), 0, 0, NULL);
- if (!hpb->map_req_cache) {
- dev_err(hba->dev, "ufshpb(%d) ufshpb_req_cache create fail",
- hpb->lun);
- return -ENOMEM;
- }
-
- hpb->m_page_cache = kmem_cache_create("ufshpb_m_page_cache",
- sizeof(struct page *) * hpb->pages_per_srgn,
- 0, 0, NULL);
- if (!hpb->m_page_cache) {
- dev_err(hba->dev, "ufshpb(%d) ufshpb_m_page_cache create fail",
- hpb->lun);
- ret = -ENOMEM;
- goto release_req_cache;
- }
-
- ret = ufshpb_pre_req_mempool_init(hpb);
- if (ret) {
- dev_err(hba->dev, "ufshpb(%d) pre_req_mempool init fail",
- hpb->lun);
- goto release_m_page_cache;
- }
-
- ret = ufshpb_alloc_region_tbl(hba, hpb);
- if (ret)
- goto release_pre_req_mempool;
-
- ufshpb_stat_init(hpb);
- ufshpb_param_init(hpb);
-
- if (hpb->is_hcm) {
- unsigned int poll;
-
- poll = hpb->params.timeout_polling_interval_ms;
- schedule_delayed_work(&hpb->ufshpb_read_to_work,
- msecs_to_jiffies(poll));
- }
-
- return 0;
-
-release_pre_req_mempool:
- ufshpb_pre_req_mempool_destroy(hpb);
-release_m_page_cache:
- kmem_cache_destroy(hpb->m_page_cache);
-release_req_cache:
- kmem_cache_destroy(hpb->map_req_cache);
- return ret;
-}
-
-static struct ufshpb_lu *
-ufshpb_alloc_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev,
- struct ufshpb_dev_info *hpb_dev_info,
- struct ufshpb_lu_info *hpb_lu_info)
-{
- struct ufshpb_lu *hpb;
- int ret;
-
- hpb = kzalloc(sizeof(struct ufshpb_lu), GFP_KERNEL);
- if (!hpb)
- return NULL;
-
- hpb->lun = sdev->lun;
- hpb->sdev_ufs_lu = sdev;
-
- ufshpb_lu_parameter_init(hba, hpb, hpb_dev_info, hpb_lu_info);
-
- ret = ufshpb_lu_hpb_init(hba, hpb);
- if (ret) {
- dev_err(hba->dev, "hpb lu init failed. ret %d", ret);
- goto release_hpb;
- }
-
- sdev->hostdata = hpb;
- return hpb;
-
-release_hpb:
- kfree(hpb);
- return NULL;
-}
-
-static void ufshpb_discard_rsp_lists(struct ufshpb_lu *hpb)
-{
- struct ufshpb_region *rgn, *next_rgn;
- struct ufshpb_subregion *srgn, *next_srgn;
- unsigned long flags;
-
- /*
- * If the device reset occurred, the remaining HPB region information
- * may be stale. Therefore, by discarding the lists of HPB response
- * that remained after reset, we prevent unnecessary work.
- */
- spin_lock_irqsave(&hpb->rsp_list_lock, flags);
- list_for_each_entry_safe(rgn, next_rgn, &hpb->lh_inact_rgn,
- list_inact_rgn)
- list_del_init(&rgn->list_inact_rgn);
-
- list_for_each_entry_safe(srgn, next_srgn, &hpb->lh_act_srgn,
- list_act_srgn)
- list_del_init(&srgn->list_act_srgn);
- spin_unlock_irqrestore(&hpb->rsp_list_lock, flags);
-}
-
-static void ufshpb_cancel_jobs(struct ufshpb_lu *hpb)
-{
- if (hpb->is_hcm) {
- cancel_delayed_work_sync(&hpb->ufshpb_read_to_work);
- cancel_work_sync(&hpb->ufshpb_normalization_work);
- }
- cancel_work_sync(&hpb->map_work);
-}
-
-static bool ufshpb_check_hpb_reset_query(struct ufs_hba *hba)
-{
- int err = 0;
- bool flag_res = true;
- int try;
-
- /* wait for the device to complete HPB reset query */
- for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) {
- dev_dbg(hba->dev,
- "%s: start flag reset polling %d times\n",
- __func__, try);
-
- /* Poll fHpbReset flag to be cleared */
- err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
- QUERY_FLAG_IDN_HPB_RESET, 0, &flag_res);
-
- if (err) {
- dev_err(hba->dev,
- "%s: reading fHpbReset flag failed with error %d\n",
- __func__, err);
- return flag_res;
- }
-
- if (!flag_res)
- goto out;
-
- usleep_range(1000, 1100);
- }
- if (flag_res) {
- dev_err(hba->dev,
- "%s: fHpbReset was not cleared by the device\n",
- __func__);
- }
-out:
- return flag_res;
-}
-
-/**
- * ufshpb_toggle_state - switch HPB state of all LUs
- * @hba: per-adapter instance
- * @src: expected current HPB state
- * @dest: target HPB state to switch to
- */
-void ufshpb_toggle_state(struct ufs_hba *hba, enum UFSHPB_STATE src, enum UFSHPB_STATE dest)
-{
- struct ufshpb_lu *hpb;
- struct scsi_device *sdev;
-
- shost_for_each_device(sdev, hba->host) {
- hpb = ufshpb_get_hpb_data(sdev);
-
- if (!hpb || ufshpb_get_state(hpb) != src)
- continue;
- ufshpb_set_state(hpb, dest);
-
- if (dest == HPB_RESET) {
- ufshpb_cancel_jobs(hpb);
- ufshpb_discard_rsp_lists(hpb);
- }
- }
-}
-
-void ufshpb_suspend(struct ufs_hba *hba)
-{
- struct ufshpb_lu *hpb;
- struct scsi_device *sdev;
-
- shost_for_each_device(sdev, hba->host) {
- hpb = ufshpb_get_hpb_data(sdev);
- if (!hpb || ufshpb_get_state(hpb) != HPB_PRESENT)
- continue;
-
- ufshpb_set_state(hpb, HPB_SUSPEND);
- ufshpb_cancel_jobs(hpb);
- }
-}
-
-void ufshpb_resume(struct ufs_hba *hba)
-{
- struct ufshpb_lu *hpb;
- struct scsi_device *sdev;
-
- shost_for_each_device(sdev, hba->host) {
- hpb = ufshpb_get_hpb_data(sdev);
- if (!hpb || ufshpb_get_state(hpb) != HPB_SUSPEND)
- continue;
-
- ufshpb_set_state(hpb, HPB_PRESENT);
- ufshpb_kick_map_work(hpb);
- if (hpb->is_hcm) {
- unsigned int poll = hpb->params.timeout_polling_interval_ms;
-
- schedule_delayed_work(&hpb->ufshpb_read_to_work, msecs_to_jiffies(poll));
- }
- }
-}
-
-static int ufshpb_get_lu_info(struct ufs_hba *hba, int lun,
- struct ufshpb_lu_info *hpb_lu_info)
-{
- u16 max_active_rgns;
- u8 lu_enable;
- int size = QUERY_DESC_MAX_SIZE;
- int ret;
- char desc_buf[QUERY_DESC_MAX_SIZE];
-
- ufshcd_rpm_get_sync(hba);
- ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
- QUERY_DESC_IDN_UNIT, lun, 0,
- desc_buf, &size);
- ufshcd_rpm_put_sync(hba);
-
- if (ret) {
- dev_err(hba->dev,
- "%s: idn: %d lun: %d query request failed",
- __func__, QUERY_DESC_IDN_UNIT, lun);
- return ret;
- }
-
- lu_enable = desc_buf[UNIT_DESC_PARAM_LU_ENABLE];
- if (lu_enable != LU_ENABLED_HPB_FUNC)
- return -ENODEV;
-
- max_active_rgns = get_unaligned_be16(
- desc_buf + UNIT_DESC_PARAM_HPB_LU_MAX_ACTIVE_RGNS);
- if (!max_active_rgns) {
- dev_err(hba->dev,
- "lun %d wrong number of max active regions\n", lun);
- return -ENODEV;
- }
-
- hpb_lu_info->num_blocks = get_unaligned_be64(
- desc_buf + UNIT_DESC_PARAM_LOGICAL_BLK_COUNT);
- hpb_lu_info->pinned_start = get_unaligned_be16(
- desc_buf + UNIT_DESC_PARAM_HPB_PIN_RGN_START_OFF);
- hpb_lu_info->num_pinned = get_unaligned_be16(
- desc_buf + UNIT_DESC_PARAM_HPB_NUM_PIN_RGNS);
- hpb_lu_info->max_active_rgns = max_active_rgns;
-
- return 0;
-}
-
-void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev)
-{
- struct ufshpb_lu *hpb = ufshpb_get_hpb_data(sdev);
-
- if (!hpb)
- return;
-
- ufshpb_set_state(hpb, HPB_FAILED);
-
- sdev = hpb->sdev_ufs_lu;
- sdev->hostdata = NULL;
-
- ufshpb_cancel_jobs(hpb);
-
- ufshpb_pre_req_mempool_destroy(hpb);
- ufshpb_destroy_region_tbl(hpb);
-
- kmem_cache_destroy(hpb->map_req_cache);
- kmem_cache_destroy(hpb->m_page_cache);
-
- list_del_init(&hpb->list_hpb_lu);
-
- kfree(hpb);
-}
-
-static void ufshpb_hpb_lu_prepared(struct ufs_hba *hba)
-{
- int pool_size;
- struct ufshpb_lu *hpb;
- struct scsi_device *sdev;
- bool init_success;
-
- if (tot_active_srgn_pages == 0) {
- ufshpb_remove(hba);
- return;
- }
-
- init_success = !ufshpb_check_hpb_reset_query(hba);
-
- pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * SZ_1K) / PAGE_SIZE;
- if (pool_size > tot_active_srgn_pages) {
- mempool_resize(ufshpb_mctx_pool, tot_active_srgn_pages);
- mempool_resize(ufshpb_page_pool, tot_active_srgn_pages);
- }
-
- shost_for_each_device(sdev, hba->host) {
- hpb = ufshpb_get_hpb_data(sdev);
- if (!hpb)
- continue;
-
- if (init_success) {
- ufshpb_set_state(hpb, HPB_PRESENT);
- if ((hpb->lu_pinned_end - hpb->lu_pinned_start) > 0)
- queue_work(ufshpb_wq, &hpb->map_work);
- } else {
- dev_err(hba->dev, "destroy HPB lu %d\n", hpb->lun);
- ufshpb_destroy_lu(hba, sdev);
- }
- }
-
- if (!init_success)
- ufshpb_remove(hba);
-}
-
-void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev)
-{
- struct ufshpb_lu *hpb;
- int ret;
- struct ufshpb_lu_info hpb_lu_info = { 0 };
- int lun = sdev->lun;
-
- if (lun >= hba->dev_info.max_lu_supported)
- goto out;
-
- ret = ufshpb_get_lu_info(hba, lun, &hpb_lu_info);
- if (ret)
- goto out;
-
- hpb = ufshpb_alloc_hpb_lu(hba, sdev, &hba->ufshpb_dev,
- &hpb_lu_info);
- if (!hpb)
- goto out;
-
- tot_active_srgn_pages += hpb_lu_info.max_active_rgns *
- hpb->srgns_per_rgn * hpb->pages_per_srgn;
-
-out:
- /* All LUs are initialized */
- if (atomic_dec_and_test(&hba->ufshpb_dev.slave_conf_cnt))
- ufshpb_hpb_lu_prepared(hba);
-}
-
-static int ufshpb_init_mem_wq(struct ufs_hba *hba)
-{
- int ret;
- unsigned int pool_size;
-
- ufshpb_mctx_cache = kmem_cache_create("ufshpb_mctx_cache",
- sizeof(struct ufshpb_map_ctx),
- 0, 0, NULL);
- if (!ufshpb_mctx_cache) {
- dev_err(hba->dev, "ufshpb: cannot init mctx cache\n");
- return -ENOMEM;
- }
-
- pool_size = PAGE_ALIGN(ufshpb_host_map_kbytes * SZ_1K) / PAGE_SIZE;
- dev_info(hba->dev, "%s:%d ufshpb_host_map_kbytes %u pool_size %u\n",
- __func__, __LINE__, ufshpb_host_map_kbytes, pool_size);
-
- ufshpb_mctx_pool = mempool_create_slab_pool(pool_size,
- ufshpb_mctx_cache);
- if (!ufshpb_mctx_pool) {
- dev_err(hba->dev, "ufshpb: cannot init mctx pool\n");
- ret = -ENOMEM;
- goto release_mctx_cache;
- }
-
- ufshpb_page_pool = mempool_create_page_pool(pool_size, 0);
- if (!ufshpb_page_pool) {
- dev_err(hba->dev, "ufshpb: cannot init page pool\n");
- ret = -ENOMEM;
- goto release_mctx_pool;
- }
-
- ufshpb_wq = alloc_workqueue("ufshpb-wq",
- WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
- if (!ufshpb_wq) {
- dev_err(hba->dev, "ufshpb: alloc workqueue failed\n");
- ret = -ENOMEM;
- goto release_page_pool;
- }
-
- return 0;
-
-release_page_pool:
- mempool_destroy(ufshpb_page_pool);
-release_mctx_pool:
- mempool_destroy(ufshpb_mctx_pool);
-release_mctx_cache:
- kmem_cache_destroy(ufshpb_mctx_cache);
- return ret;
-}
-
-void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf)
-{
- struct ufshpb_dev_info *hpb_info = &hba->ufshpb_dev;
- int max_active_rgns = 0;
- int hpb_num_lu;
-
- hpb_num_lu = geo_buf[GEOMETRY_DESC_PARAM_HPB_NUMBER_LU];
- if (hpb_num_lu == 0) {
- dev_err(hba->dev, "No HPB LU supported\n");
- hpb_info->hpb_disabled = true;
- return;
- }
-
- hpb_info->rgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_REGION_SIZE];
- hpb_info->srgn_size = geo_buf[GEOMETRY_DESC_PARAM_HPB_SUBREGION_SIZE];
- max_active_rgns = get_unaligned_be16(geo_buf +
- GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS);
-
- if (hpb_info->rgn_size == 0 || hpb_info->srgn_size == 0 ||
- max_active_rgns == 0) {
- dev_err(hba->dev, "No HPB supported device\n");
- hpb_info->hpb_disabled = true;
- return;
- }
-}
-
-void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf)
-{
- struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev;
- int version, ret;
- int max_single_cmd;
-
- hpb_dev_info->control_mode = desc_buf[DEVICE_DESC_PARAM_HPB_CONTROL];
-
- version = get_unaligned_be16(desc_buf + DEVICE_DESC_PARAM_HPB_VER);
- if ((version != HPB_SUPPORT_VERSION) &&
- (version != HPB_SUPPORT_LEGACY_VERSION)) {
- dev_err(hba->dev, "%s: HPB %x version is not supported.\n",
- __func__, version);
- hpb_dev_info->hpb_disabled = true;
- return;
- }
-
- if (version == HPB_SUPPORT_LEGACY_VERSION)
- hpb_dev_info->is_legacy = true;
-
- /*
- * Get the number of user logical unit to check whether all
- * scsi_device finish initialization
- */
- hpb_dev_info->num_lu = desc_buf[DEVICE_DESC_PARAM_NUM_LU];
-
- if (hpb_dev_info->is_legacy)
- return;
-
- ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
- QUERY_ATTR_IDN_MAX_HPB_SINGLE_CMD, 0, 0, &max_single_cmd);
-
- if (ret)
- hpb_dev_info->max_hpb_single_cmd = HPB_LEGACY_CHUNK_HIGH;
- else
- hpb_dev_info->max_hpb_single_cmd = min(max_single_cmd + 1, HPB_MULTI_CHUNK_HIGH);
-}
-
-void ufshpb_init(struct ufs_hba *hba)
-{
- struct ufshpb_dev_info *hpb_dev_info = &hba->ufshpb_dev;
- int try;
- int ret;
-
- if (!ufshpb_is_allowed(hba) || !hba->dev_info.hpb_enabled)
- return;
-
- if (ufshpb_init_mem_wq(hba)) {
- hpb_dev_info->hpb_disabled = true;
- return;
- }
-
- atomic_set(&hpb_dev_info->slave_conf_cnt, hpb_dev_info->num_lu);
- tot_active_srgn_pages = 0;
- /* issue HPB reset query */
- for (try = 0; try < HPB_RESET_REQ_RETRIES; try++) {
- ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
- QUERY_FLAG_IDN_HPB_RESET, 0, NULL);
- if (!ret)
- break;
- }
-}
-
-void ufshpb_remove(struct ufs_hba *hba)
-{
- mempool_destroy(ufshpb_page_pool);
- mempool_destroy(ufshpb_mctx_pool);
- kmem_cache_destroy(ufshpb_mctx_cache);
-
- destroy_workqueue(ufshpb_wq);
-}
-
-module_param(ufshpb_host_map_kbytes, uint, 0644);
-MODULE_PARM_DESC(ufshpb_host_map_kbytes,
- "ufshpb host mapping memory kilo-bytes for ufshpb memory-pool");
diff --git a/drivers/ufs/core/ufshpb.h b/drivers/ufs/core/ufshpb.h
deleted file mode 100644
index b428bbdd2799..000000000000
--- a/drivers/ufs/core/ufshpb.h
+++ /dev/null
@@ -1,318 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Universal Flash Storage Host Performance Booster
- *
- * Copyright (C) 2017-2021 Samsung Electronics Co., Ltd.
- *
- * Authors:
- * Yongmyung Lee <ymhungry.lee@samsung.com>
- * Jinyoung Choi <j-young.choi@samsung.com>
- */
-
-#ifndef _UFSHPB_H_
-#define _UFSHPB_H_
-
-/* hpb response UPIU macro */
-#define HPB_RSP_NONE 0x0
-#define HPB_RSP_REQ_REGION_UPDATE 0x1
-#define HPB_RSP_DEV_RESET 0x2
-#define MAX_ACTIVE_NUM 2
-#define MAX_INACTIVE_NUM 2
-#define DEV_DATA_SEG_LEN 0x14
-#define DEV_SENSE_SEG_LEN 0x12
-#define DEV_DES_TYPE 0x80
-#define DEV_ADDITIONAL_LEN 0x10
-
-/* hpb map & entries macro */
-#define HPB_RGN_SIZE_UNIT 512
-#define HPB_ENTRY_BLOCK_SIZE SZ_4K
-#define HPB_ENTRY_SIZE 0x8
-#define PINNED_NOT_SET U32_MAX
-
-/* hpb support chunk size */
-#define HPB_LEGACY_CHUNK_HIGH 1
-#define HPB_MULTI_CHUNK_HIGH 255
-
-/* hpb vender defined opcode */
-#define UFSHPB_READ 0xF8
-#define UFSHPB_READ_BUFFER 0xF9
-#define UFSHPB_READ_BUFFER_ID 0x01
-#define UFSHPB_WRITE_BUFFER 0xFA
-#define UFSHPB_WRITE_BUFFER_INACT_SINGLE_ID 0x01
-#define UFSHPB_WRITE_BUFFER_PREFETCH_ID 0x02
-#define UFSHPB_WRITE_BUFFER_INACT_ALL_ID 0x03
-#define HPB_WRITE_BUFFER_CMD_LENGTH 10
-#define MAX_HPB_READ_ID 0x7F
-#define HPB_READ_BUFFER_CMD_LENGTH 10
-#define LU_ENABLED_HPB_FUNC 0x02
-
-#define HPB_RESET_REQ_RETRIES 10
-#define HPB_MAP_REQ_RETRIES 5
-#define HPB_REQUEUE_TIME_MS 0
-
-#define HPB_SUPPORT_VERSION 0x200
-#define HPB_SUPPORT_LEGACY_VERSION 0x100
-
-enum UFSHPB_MODE {
- HPB_HOST_CONTROL,
- HPB_DEVICE_CONTROL,
-};
-
-enum UFSHPB_STATE {
- HPB_INIT,
- HPB_PRESENT,
- HPB_SUSPEND,
- HPB_FAILED,
- HPB_RESET,
-};
-
-enum HPB_RGN_STATE {
- HPB_RGN_INACTIVE,
- HPB_RGN_ACTIVE,
- /* pinned regions are always active */
- HPB_RGN_PINNED,
-};
-
-enum HPB_SRGN_STATE {
- HPB_SRGN_UNUSED,
- HPB_SRGN_INVALID,
- HPB_SRGN_VALID,
- HPB_SRGN_ISSUED,
-};
-
-/**
- * struct ufshpb_lu_info - UFSHPB logical unit related info
- * @num_blocks: the number of logical block
- * @pinned_start: the start region number of pinned region
- * @num_pinned: the number of pinned regions
- * @max_active_rgns: maximum number of active regions
- */
-struct ufshpb_lu_info {
- int num_blocks;
- int pinned_start;
- int num_pinned;
- int max_active_rgns;
-};
-
-struct ufshpb_map_ctx {
- struct page **m_page;
- unsigned long *ppn_dirty;
-};
-
-struct ufshpb_subregion {
- struct ufshpb_map_ctx *mctx;
- enum HPB_SRGN_STATE srgn_state;
- int rgn_idx;
- int srgn_idx;
- bool is_last;
-
- /* subregion reads - for host mode */
- unsigned int reads;
-
- /* below information is used by rsp_list */
- struct list_head list_act_srgn;
-};
-
-struct ufshpb_region {
- struct ufshpb_lu *hpb;
- struct ufshpb_subregion *srgn_tbl;
- enum HPB_RGN_STATE rgn_state;
- int rgn_idx;
- int srgn_cnt;
-
- /* below information is used by rsp_list */
- struct list_head list_inact_rgn;
-
- /* below information is used by lru */
- struct list_head list_lru_rgn;
- unsigned long rgn_flags;
-#define RGN_FLAG_DIRTY 0
-#define RGN_FLAG_UPDATE 1
-
- /* region reads - for host mode */
- spinlock_t rgn_lock;
- unsigned int reads;
- /* region "cold" timer - for host mode */
- ktime_t read_timeout;
- unsigned int read_timeout_expiries;
- struct list_head list_expired_rgn;
-};
-
-#define for_each_sub_region(rgn, i, srgn) \
- for ((i) = 0; \
- ((i) < (rgn)->srgn_cnt) && ((srgn) = &(rgn)->srgn_tbl[i]); \
- (i)++)
-
-/**
- * struct ufshpb_req - HPB related request structure (write/read buffer)
- * @req: block layer request structure
- * @bio: bio for this request
- * @hpb: ufshpb_lu structure that related to
- * @list_req: ufshpb_req mempool list
- * @sense: store its sense data
- * @mctx: L2P map information
- * @rgn_idx: target region index
- * @srgn_idx: target sub-region index
- * @lun: target logical unit number
- * @m_page: L2P map information data for pre-request
- * @len: length of host-side cached L2P map in m_page
- * @lpn: start LPN of L2P map in m_page
- */
-struct ufshpb_req {
- struct request *req;
- struct bio *bio;
- struct ufshpb_lu *hpb;
- struct list_head list_req;
- union {
- struct {
- struct ufshpb_map_ctx *mctx;
- unsigned int rgn_idx;
- unsigned int srgn_idx;
- unsigned int lun;
- } rb;
- struct {
- struct page *m_page;
- unsigned int len;
- unsigned long lpn;
- } wb;
- };
-};
-
-struct victim_select_info {
- struct list_head lh_lru_rgn; /* LRU list of regions */
- int max_lru_active_cnt; /* supported hpb #region - pinned #region */
- atomic_t active_cnt;
-};
-
-/**
- * ufshpb_params - ufs hpb parameters
- * @requeue_timeout_ms - requeue threshold of wb command (0x2)
- * @activation_thld - min reads [IOs] to activate/update a region
- * @normalization_factor - shift right the region's reads
- * @eviction_thld_enter - min reads [IOs] for the entering region in eviction
- * @eviction_thld_exit - max reads [IOs] for the exiting region in eviction
- * @read_timeout_ms - timeout [ms] from the last read IO to the region
- * @read_timeout_expiries - amount of allowable timeout expireis
- * @timeout_polling_interval_ms - frequency in which timeouts are checked
- * @inflight_map_req - number of inflight map requests
- */
-struct ufshpb_params {
- unsigned int requeue_timeout_ms;
- unsigned int activation_thld;
- unsigned int normalization_factor;
- unsigned int eviction_thld_enter;
- unsigned int eviction_thld_exit;
- unsigned int read_timeout_ms;
- unsigned int read_timeout_expiries;
- unsigned int timeout_polling_interval_ms;
- unsigned int inflight_map_req;
-};
-
-struct ufshpb_stats {
- u64 hit_cnt;
- u64 miss_cnt;
- u64 rcmd_noti_cnt;
- u64 rcmd_active_cnt;
- u64 rcmd_inactive_cnt;
- u64 map_req_cnt;
- u64 pre_req_cnt;
- u64 umap_req_cnt;
-};
-
-struct ufshpb_lu {
- int lun;
- struct scsi_device *sdev_ufs_lu;
-
- spinlock_t rgn_state_lock; /* for protect rgn/srgn state */
- struct ufshpb_region *rgn_tbl;
-
- atomic_t hpb_state;
-
- spinlock_t rsp_list_lock;
- struct list_head lh_act_srgn; /* hold rsp_list_lock */
- struct list_head lh_inact_rgn; /* hold rsp_list_lock */
-
- /* pre request information */
- struct ufshpb_req *pre_req;
- int num_inflight_pre_req;
- int throttle_pre_req;
- int num_inflight_map_req; /* hold param_lock */
- spinlock_t param_lock;
-
- struct list_head lh_pre_req_free;
- int pre_req_max_tr_len;
-
- /* cached L2P map management worker */
- struct work_struct map_work;
-
- /* for selecting victim */
- struct victim_select_info lru_info;
- struct work_struct ufshpb_normalization_work;
- struct delayed_work ufshpb_read_to_work;
- unsigned long work_data_bits;
-#define TIMEOUT_WORK_RUNNING 0
-
- /* pinned region information */
- u32 lu_pinned_start;
- u32 lu_pinned_end;
-
- /* HPB related configuration */
- u32 rgns_per_lu;
- u32 srgns_per_lu;
- u32 last_srgn_entries;
- int srgns_per_rgn;
- u32 srgn_mem_size;
- u32 entries_per_rgn_mask;
- u32 entries_per_rgn_shift;
- u32 entries_per_srgn;
- u32 entries_per_srgn_mask;
- u32 entries_per_srgn_shift;
- u32 pages_per_srgn;
-
- bool is_hcm;
-
- struct ufshpb_stats stats;
- struct ufshpb_params params;
-
- struct kmem_cache *map_req_cache;
- struct kmem_cache *m_page_cache;
-
- struct list_head list_hpb_lu;
-};
-
-struct ufs_hba;
-struct ufshcd_lrb;
-
-#ifndef CONFIG_SCSI_UFS_HPB
-static int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) { return 0; }
-static void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) {}
-static void ufshpb_resume(struct ufs_hba *hba) {}
-static void ufshpb_suspend(struct ufs_hba *hba) {}
-static void ufshpb_toggle_state(struct ufs_hba *hba, enum UFSHPB_STATE src, enum UFSHPB_STATE dest) {}
-static void ufshpb_init(struct ufs_hba *hba) {}
-static void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev) {}
-static void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev) {}
-static void ufshpb_remove(struct ufs_hba *hba) {}
-static bool ufshpb_is_allowed(struct ufs_hba *hba) { return false; }
-static void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf) {}
-static void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf) {}
-static bool ufshpb_is_legacy(struct ufs_hba *hba) { return false; }
-#else
-int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp);
-void ufshpb_rsp_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp);
-void ufshpb_resume(struct ufs_hba *hba);
-void ufshpb_suspend(struct ufs_hba *hba);
-void ufshpb_toggle_state(struct ufs_hba *hba, enum UFSHPB_STATE src, enum UFSHPB_STATE dest);
-void ufshpb_init(struct ufs_hba *hba);
-void ufshpb_init_hpb_lu(struct ufs_hba *hba, struct scsi_device *sdev);
-void ufshpb_destroy_lu(struct ufs_hba *hba, struct scsi_device *sdev);
-void ufshpb_remove(struct ufs_hba *hba);
-bool ufshpb_is_allowed(struct ufs_hba *hba);
-void ufshpb_get_geo_info(struct ufs_hba *hba, u8 *geo_buf);
-void ufshpb_get_dev_info(struct ufs_hba *hba, u8 *desc_buf);
-bool ufshpb_is_legacy(struct ufs_hba *hba);
-extern struct attribute_group ufs_sysfs_hpb_stat_group;
-extern struct attribute_group ufs_sysfs_hpb_param_group;
-#endif
-
-#endif /* End of Header */
diff --git a/drivers/ufs/host/cdns-pltfrm.c b/drivers/ufs/host/cdns-pltfrm.c
index 26761425a76c..2491e7e87028 100644
--- a/drivers/ufs/host/cdns-pltfrm.c
+++ b/drivers/ufs/host/cdns-pltfrm.c
@@ -101,11 +101,10 @@ static void cdns_ufs_set_l4_attr(struct ufs_hba *hba)
}
/**
- * cdns_ufs_set_hclkdiv()
- * Sets HCLKDIV register value based on the core_clk
+ * cdns_ufs_set_hclkdiv() - set HCLKDIV register value based on the core_clk.
* @hba: host controller instance
*
- * Return zero for success and non-zero for failure
+ * Return: zero for success and non-zero for failure.
*/
static int cdns_ufs_set_hclkdiv(struct ufs_hba *hba)
{
@@ -143,12 +142,11 @@ static int cdns_ufs_set_hclkdiv(struct ufs_hba *hba)
}
/**
- * cdns_ufs_hce_enable_notify()
- * Called before and after HCE enable bit is set.
+ * cdns_ufs_hce_enable_notify() - set HCLKDIV register
* @hba: host controller instance
* @status: notify stage (pre, post change)
*
- * Return zero for success and non-zero for failure
+ * Return: zero for success and non-zero for failure.
*/
static int cdns_ufs_hce_enable_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status)
@@ -160,12 +158,10 @@ static int cdns_ufs_hce_enable_notify(struct ufs_hba *hba,
}
/**
- * cdns_ufs_hibern8_notify()
- * Called around hibern8 enter/exit.
+ * cdns_ufs_hibern8_notify() - save and restore L4 attributes.
* @hba: host controller instance
* @cmd: UIC Command
* @status: notify stage (pre, post change)
- *
*/
static void cdns_ufs_hibern8_notify(struct ufs_hba *hba, enum uic_cmd_dme cmd,
enum ufs_notify_change_status status)
@@ -177,12 +173,11 @@ static void cdns_ufs_hibern8_notify(struct ufs_hba *hba, enum uic_cmd_dme cmd,
}
/**
- * cdns_ufs_link_startup_notify()
- * Called before and after Link startup is carried out.
+ * cdns_ufs_link_startup_notify() - handle link startup.
* @hba: host controller instance
* @status: notify stage (pre, post change)
*
- * Return zero for success and non-zero for failure
+ * Return: zero for success and non-zero for failure.
*/
static int cdns_ufs_link_startup_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status)
@@ -212,7 +207,7 @@ static int cdns_ufs_link_startup_notify(struct ufs_hba *hba,
* cdns_ufs_init - performs additional ufs initialization
* @hba: host controller instance
*
- * Returns status of initialization
+ * Return: status of initialization.
*/
static int cdns_ufs_init(struct ufs_hba *hba)
{
@@ -235,7 +230,7 @@ static int cdns_ufs_init(struct ufs_hba *hba)
* cdns_ufs_m31_16nm_phy_initialization - performs m31 phy initialization
* @hba: host controller instance
*
- * Always returns 0
+ * Return: 0 (success).
*/
static int cdns_ufs_m31_16nm_phy_initialization(struct ufs_hba *hba)
{
@@ -284,7 +279,7 @@ MODULE_DEVICE_TABLE(of, cdns_ufs_of_match);
* cdns_ufs_pltfrm_probe - probe routine of the driver
* @pdev: pointer to platform device handle
*
- * Return zero for success and non-zero for failure
+ * Return: zero for success and non-zero for failure.
*/
static int cdns_ufs_pltfrm_probe(struct platform_device *pdev)
{
@@ -308,7 +303,7 @@ static int cdns_ufs_pltfrm_probe(struct platform_device *pdev)
* cdns_ufs_pltfrm_remove - removes the ufs driver
* @pdev: pointer to platform device handle
*
- * Always returns 0
+ * Return: 0 (success).
*/
static int cdns_ufs_pltfrm_remove(struct platform_device *pdev)
{
diff --git a/drivers/ufs/host/tc-dwc-g210-pci.c b/drivers/ufs/host/tc-dwc-g210-pci.c
index f96fe5855841..876781fd6861 100644
--- a/drivers/ufs/host/tc-dwc-g210-pci.c
+++ b/drivers/ufs/host/tc-dwc-g210-pci.c
@@ -51,7 +51,7 @@ static void tc_dwc_g210_pci_remove(struct pci_dev *pdev)
* @pdev: pointer to PCI device handle
* @id: PCI device id
*
- * Returns 0 on success, non-zero value on failure
+ * Return: 0 on success, non-zero value on failure.
*/
static int
tc_dwc_g210_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/ufs/host/tc-dwc-g210.c b/drivers/ufs/host/tc-dwc-g210.c
index deb93dbd83a4..0ac53cc8465e 100644
--- a/drivers/ufs/host/tc-dwc-g210.c
+++ b/drivers/ufs/host/tc-dwc-g210.c
@@ -17,11 +17,10 @@
#include "tc-dwc-g210.h"
/**
- * tc_dwc_g210_setup_40bit_rmmi()
- * This function configures Synopsys TC specific atributes (40-bit RMMI)
+ * tc_dwc_g210_setup_40bit_rmmi() - configure 40-bit RMMI.
* @hba: Pointer to drivers structure
*
- * Returns 0 on success or non-zero value on failure
+ * Return: 0 on success or non-zero value on failure.
*/
static int tc_dwc_g210_setup_40bit_rmmi(struct ufs_hba *hba)
{
@@ -81,11 +80,10 @@ static int tc_dwc_g210_setup_40bit_rmmi(struct ufs_hba *hba)
}
/**
- * tc_dwc_g210_setup_20bit_rmmi_lane0()
- * This function configures Synopsys TC 20-bit RMMI Lane 0
+ * tc_dwc_g210_setup_20bit_rmmi_lane0() - configure 20-bit RMMI Lane 0.
* @hba: Pointer to drivers structure
*
- * Returns 0 on success or non-zero value on failure
+ * Return: 0 on success or non-zero value on failure.
*/
static int tc_dwc_g210_setup_20bit_rmmi_lane0(struct ufs_hba *hba)
{
@@ -134,11 +132,10 @@ static int tc_dwc_g210_setup_20bit_rmmi_lane0(struct ufs_hba *hba)
}
/**
- * tc_dwc_g210_setup_20bit_rmmi_lane1()
- * This function configures Synopsys TC 20-bit RMMI Lane 1
+ * tc_dwc_g210_setup_20bit_rmmi_lane1() - configure 20-bit RMMI Lane 1.
* @hba: Pointer to drivers structure
*
- * Returns 0 on success or non-zero value on failure
+ * Return: 0 on success or non-zero value on failure.
*/
static int tc_dwc_g210_setup_20bit_rmmi_lane1(struct ufs_hba *hba)
{
@@ -211,11 +208,10 @@ out:
}
/**
- * tc_dwc_g210_setup_20bit_rmmi()
- * This function configures Synopsys TC specific atributes (20-bit RMMI)
+ * tc_dwc_g210_setup_20bit_rmmi() - configure 20-bit RMMI.
* @hba: Pointer to drivers structure
*
- * Returns 0 on success or non-zero value on failure
+ * Return: 0 on success or non-zero value on failure.
*/
static int tc_dwc_g210_setup_20bit_rmmi(struct ufs_hba *hba)
{
@@ -251,12 +247,10 @@ out:
}
/**
- * tc_dwc_g210_config_40_bit()
- * This function configures Local (host) Synopsys 40-bit TC specific attributes
- *
+ * tc_dwc_g210_config_40_bit() - configure 40-bit TC specific attributes.
* @hba: Pointer to drivers structure
*
- * Returns 0 on success non-zero value on failure
+ * Return: 0 on success non-zero value on failure.
*/
int tc_dwc_g210_config_40_bit(struct ufs_hba *hba)
{
@@ -283,12 +277,10 @@ out:
EXPORT_SYMBOL(tc_dwc_g210_config_40_bit);
/**
- * tc_dwc_g210_config_20_bit()
- * This function configures Local (host) Synopsys 20-bit TC specific attributes
- *
+ * tc_dwc_g210_config_20_bit() - configure 20-bit TC specific attributes.
* @hba: Pointer to drivers structure
*
- * Returns 0 on success non-zero value on failure
+ * Return: 0 on success non-zero value on failure.
*/
int tc_dwc_g210_config_20_bit(struct ufs_hba *hba)
{
diff --git a/drivers/ufs/host/ti-j721e-ufs.c b/drivers/ufs/host/ti-j721e-ufs.c
index 122d650d0810..117eb7da92ac 100644
--- a/drivers/ufs/host/ti-j721e-ufs.c
+++ b/drivers/ufs/host/ti-j721e-ufs.c
@@ -81,6 +81,8 @@ static const struct of_device_id ti_j721e_ufs_of_match[] = {
{ },
};
+MODULE_DEVICE_TABLE(of, ti_j721e_ufs_of_match);
+
static struct platform_driver ti_j721e_ufs_driver = {
.probe = ti_j721e_ufs_probe,
.remove = ti_j721e_ufs_remove,
diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c
index e68b05976f9e..2383ecd88f1c 100644
--- a/drivers/ufs/host/ufs-mediatek.c
+++ b/drivers/ufs/host/ufs-mediatek.c
@@ -14,6 +14,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_qos.h>
@@ -27,8 +28,14 @@
#include <ufs/unipro.h>
#include "ufs-mediatek.h"
+static int ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq);
+
#define CREATE_TRACE_POINTS
#include "ufs-mediatek-trace.h"
+#undef CREATE_TRACE_POINTS
+
+#define MAX_SUPP_MAC 64
+#define MCQ_QUEUE_OFFSET(c) ((((c) >> 16) & 0xFF) * 0x200)
static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = {
{ .wmanufacturerid = UFS_ANY_VENDOR,
@@ -659,7 +666,7 @@ static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on)
* @on: If true, enable clocks else disable them.
* @status: PRE_CHANGE or POST_CHANGE notify
*
- * Returns 0 on success, non-zero on failure.
+ * Return: 0 on success, non-zero on failure.
*/
static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
enum ufs_notify_change_status status)
@@ -840,6 +847,37 @@ static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba)
}
}
+static void ufs_mtk_init_mcq_irq(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ struct platform_device *pdev;
+ int i;
+ int irq;
+
+ host->mcq_nr_intr = UFSHCD_MAX_Q_NR;
+ pdev = container_of(hba->dev, struct platform_device, dev);
+
+ for (i = 0; i < host->mcq_nr_intr; i++) {
+ /* irq index 0 is legacy irq, sq/cq irq start from index 1 */
+ irq = platform_get_irq(pdev, i + 1);
+ if (irq < 0) {
+ host->mcq_intr_info[i].irq = MTK_MCQ_INVALID_IRQ;
+ goto failed;
+ }
+ host->mcq_intr_info[i].hba = hba;
+ host->mcq_intr_info[i].irq = irq;
+ dev_info(hba->dev, "get platform mcq irq: %d, %d\n", i, irq);
+ }
+
+ return;
+failed:
+ /* invalidate irq info */
+ for (i = 0; i < host->mcq_nr_intr; i++)
+ host->mcq_intr_info[i].irq = MTK_MCQ_INVALID_IRQ;
+
+ host->mcq_nr_intr = 0;
+}
+
/**
* ufs_mtk_init - find other essential mmio bases
* @hba: host controller instance
@@ -847,7 +885,7 @@ static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba)
* Binds PHY with controller and powers up PHY enabling clocks
* and regulators.
*
- * Returns -EPROBE_DEFER if binding fails, returns negative error
+ * Return: -EPROBE_DEFER if binding fails, returns negative error
* on phy power up failure and returns zero on success.
*/
static int ufs_mtk_init(struct ufs_hba *hba)
@@ -876,6 +914,8 @@ static int ufs_mtk_init(struct ufs_hba *hba)
/* Initialize host capability */
ufs_mtk_init_host_caps(hba);
+ ufs_mtk_init_mcq_irq(hba);
+
err = ufs_mtk_bind_mphy(hba);
if (err)
goto out_variant_clear;
@@ -1173,7 +1213,17 @@ static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
else
return err;
- err = ufshcd_make_hba_operational(hba);
+ if (!hba->mcq_enabled) {
+ err = ufshcd_make_hba_operational(hba);
+ } else {
+ ufs_mtk_config_mcq(hba, false);
+ ufshcd_mcq_make_queues_operational(hba);
+ ufshcd_mcq_config_mac(hba, hba->nutrs);
+ /* Enable MCQ mode */
+ ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x1,
+ REG_UFS_MEM_CFG);
+ }
+
if (err)
return err;
@@ -1497,6 +1547,121 @@ static int ufs_mtk_clk_scale_notify(struct ufs_hba *hba, bool scale_up,
return 0;
}
+static int ufs_mtk_get_hba_mac(struct ufs_hba *hba)
+{
+ return MAX_SUPP_MAC;
+}
+
+static int ufs_mtk_op_runtime_config(struct ufs_hba *hba)
+{
+ struct ufshcd_mcq_opr_info_t *opr;
+ int i;
+
+ hba->mcq_opr[OPR_SQD].offset = REG_UFS_MTK_SQD;
+ hba->mcq_opr[OPR_SQIS].offset = REG_UFS_MTK_SQIS;
+ hba->mcq_opr[OPR_CQD].offset = REG_UFS_MTK_CQD;
+ hba->mcq_opr[OPR_CQIS].offset = REG_UFS_MTK_CQIS;
+
+ for (i = 0; i < OPR_MAX; i++) {
+ opr = &hba->mcq_opr[i];
+ opr->stride = REG_UFS_MCQ_STRIDE;
+ opr->base = hba->mmio_base + opr->offset;
+ }
+
+ return 0;
+}
+
+static int ufs_mtk_mcq_config_resource(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ /* fail mcq initialization if interrupt is not filled properly */
+ if (!host->mcq_nr_intr) {
+ dev_info(hba->dev, "IRQs not ready. MCQ disabled.");
+ return -EINVAL;
+ }
+
+ hba->mcq_base = hba->mmio_base + MCQ_QUEUE_OFFSET(hba->mcq_capabilities);
+ return 0;
+}
+
+static irqreturn_t ufs_mtk_mcq_intr(int irq, void *__intr_info)
+{
+ struct ufs_mtk_mcq_intr_info *mcq_intr_info = __intr_info;
+ struct ufs_hba *hba = mcq_intr_info->hba;
+ struct ufs_hw_queue *hwq;
+ u32 events;
+ int qid = mcq_intr_info->qid;
+
+ hwq = &hba->uhq[qid];
+
+ events = ufshcd_mcq_read_cqis(hba, qid);
+ if (events)
+ ufshcd_mcq_write_cqis(hba, events, qid);
+
+ if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS)
+ ufshcd_mcq_poll_cqe_lock(hba, hwq);
+
+ return IRQ_HANDLED;
+}
+
+static int ufs_mtk_config_mcq_irq(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ u32 irq, i;
+ int ret;
+
+ for (i = 0; i < host->mcq_nr_intr; i++) {
+ irq = host->mcq_intr_info[i].irq;
+ if (irq == MTK_MCQ_INVALID_IRQ) {
+ dev_err(hba->dev, "invalid irq. %d\n", i);
+ return -ENOPARAM;
+ }
+
+ host->mcq_intr_info[i].qid = i;
+ ret = devm_request_irq(hba->dev, irq, ufs_mtk_mcq_intr, 0, UFSHCD,
+ &host->mcq_intr_info[i]);
+
+ dev_dbg(hba->dev, "request irq %d intr %s\n", irq, ret ? "failed" : "");
+
+ if (ret) {
+ dev_err(hba->dev, "Cannot request irq %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ int ret = 0;
+
+ if (!host->mcq_set_intr) {
+ /* Disable irq option register */
+ ufshcd_rmwl(hba, MCQ_INTR_EN_MSK, 0, REG_UFS_MMIO_OPT_CTRL_0);
+
+ if (irq) {
+ ret = ufs_mtk_config_mcq_irq(hba);
+ if (ret)
+ return ret;
+ }
+
+ host->mcq_set_intr = true;
+ }
+
+ ufshcd_rmwl(hba, MCQ_AH8, MCQ_AH8, REG_UFS_MMIO_OPT_CTRL_0);
+ ufshcd_rmwl(hba, MCQ_INTR_EN_MSK, MCQ_MULTI_INTR_EN, REG_UFS_MMIO_OPT_CTRL_0);
+
+ return 0;
+}
+
+static int ufs_mtk_config_esi(struct ufs_hba *hba)
+{
+ return ufs_mtk_config_mcq(hba, true);
+}
+
/*
* struct ufs_hba_mtk_vops - UFS MTK specific variant operations
*
@@ -1520,13 +1685,18 @@ static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
.event_notify = ufs_mtk_event_notify,
.config_scaling_param = ufs_mtk_config_scaling_param,
.clk_scale_notify = ufs_mtk_clk_scale_notify,
+ /* mcq vops */
+ .get_hba_mac = ufs_mtk_get_hba_mac,
+ .op_runtime_config = ufs_mtk_op_runtime_config,
+ .mcq_config_resource = ufs_mtk_mcq_config_resource,
+ .config_esi = ufs_mtk_config_esi,
};
/**
* ufs_mtk_probe - probe routine of the driver
* @pdev: pointer to Platform device handle
*
- * Return zero for success and non-zero for failure
+ * Return: zero for success and non-zero for failure.
*/
static int ufs_mtk_probe(struct platform_device *pdev)
{
@@ -1566,7 +1736,7 @@ skip_reset:
out:
if (err)
- dev_info(dev, "probe failed %d\n", err);
+ dev_err(dev, "probe failed %d\n", err);
of_node_put(reset_node);
return err;
diff --git a/drivers/ufs/host/ufs-mediatek.h b/drivers/ufs/host/ufs-mediatek.h
index 2fc6d7b87694..f76e80d91729 100644
--- a/drivers/ufs/host/ufs-mediatek.h
+++ b/drivers/ufs/host/ufs-mediatek.h
@@ -11,10 +11,26 @@
#include <linux/soc/mediatek/mtk_sip_svc.h>
/*
+ * MCQ define and struct
+ */
+#define UFSHCD_MAX_Q_NR 8
+#define MTK_MCQ_INVALID_IRQ 0xFFFF
+
+/* REG_UFS_MMIO_OPT_CTRL_0 160h */
+#define EHS_EN BIT(0)
+#define PFM_IMPV BIT(1)
+#define MCQ_MULTI_INTR_EN BIT(2)
+#define MCQ_CMB_INTR_EN BIT(3)
+#define MCQ_AH8 BIT(4)
+
+#define MCQ_INTR_EN_MSK (MCQ_MULTI_INTR_EN | MCQ_CMB_INTR_EN)
+
+/*
* Vendor specific UFSHCI Registers
*/
#define REG_UFS_XOUFS_CTRL 0x140
#define REG_UFS_REFCLK_CTRL 0x144
+#define REG_UFS_MMIO_OPT_CTRL_0 0x160
#define REG_UFS_EXTREG 0x2100
#define REG_UFS_MPHYCTRL 0x2200
#define REG_UFS_MTK_IP_VER 0x2240
@@ -26,6 +42,13 @@
#define REG_UFS_DEBUG_SEL_B2 0x22D8
#define REG_UFS_DEBUG_SEL_B3 0x22DC
+#define REG_UFS_MTK_SQD 0x2800
+#define REG_UFS_MTK_SQIS 0x2814
+#define REG_UFS_MTK_CQD 0x281C
+#define REG_UFS_MTK_CQIS 0x2824
+
+#define REG_UFS_MCQ_STRIDE 0x30
+
/*
* Ref-clk control
*
@@ -136,6 +159,12 @@ struct ufs_mtk_hw_ver {
u8 major;
};
+struct ufs_mtk_mcq_intr_info {
+ struct ufs_hba *hba;
+ u32 irq;
+ u8 qid;
+};
+
struct ufs_mtk_host {
struct phy *mphy;
struct pm_qos_request pm_qos_req;
@@ -155,6 +184,10 @@ struct ufs_mtk_host {
u16 ref_clk_ungating_wait_us;
u16 ref_clk_gating_wait_us;
u32 ip_ver;
+
+ bool mcq_set_intr;
+ int mcq_nr_intr;
+ struct ufs_mtk_mcq_intr_info mcq_intr_info[UFSHCD_MAX_Q_NR];
};
/*
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index c1557d21b027..d1149b1c3ed5 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -7,6 +7,7 @@
#include <linux/time.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/interconnect.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -46,6 +47,49 @@ enum {
TSTBUS_MAX,
};
+#define QCOM_UFS_MAX_GEAR 4
+#define QCOM_UFS_MAX_LANE 2
+
+enum {
+ MODE_MIN,
+ MODE_PWM,
+ MODE_HS_RA,
+ MODE_HS_RB,
+ MODE_MAX,
+};
+
+static const struct __ufs_qcom_bw_table {
+ u32 mem_bw;
+ u32 cfg_bw;
+} ufs_qcom_bw_table[MODE_MAX + 1][QCOM_UFS_MAX_GEAR + 1][QCOM_UFS_MAX_LANE + 1] = {
+ [MODE_MIN][0][0] = { 0, 0 }, /* Bandwidth values in KB/s */
+ [MODE_PWM][UFS_PWM_G1][UFS_LANE_1] = { 922, 1000 },
+ [MODE_PWM][UFS_PWM_G2][UFS_LANE_1] = { 1844, 1000 },
+ [MODE_PWM][UFS_PWM_G3][UFS_LANE_1] = { 3688, 1000 },
+ [MODE_PWM][UFS_PWM_G4][UFS_LANE_1] = { 7376, 1000 },
+ [MODE_PWM][UFS_PWM_G1][UFS_LANE_2] = { 1844, 1000 },
+ [MODE_PWM][UFS_PWM_G2][UFS_LANE_2] = { 3688, 1000 },
+ [MODE_PWM][UFS_PWM_G3][UFS_LANE_2] = { 7376, 1000 },
+ [MODE_PWM][UFS_PWM_G4][UFS_LANE_2] = { 14752, 1000 },
+ [MODE_HS_RA][UFS_HS_G1][UFS_LANE_1] = { 127796, 1000 },
+ [MODE_HS_RA][UFS_HS_G2][UFS_LANE_1] = { 255591, 1000 },
+ [MODE_HS_RA][UFS_HS_G3][UFS_LANE_1] = { 1492582, 102400 },
+ [MODE_HS_RA][UFS_HS_G4][UFS_LANE_1] = { 2915200, 204800 },
+ [MODE_HS_RA][UFS_HS_G1][UFS_LANE_2] = { 255591, 1000 },
+ [MODE_HS_RA][UFS_HS_G2][UFS_LANE_2] = { 511181, 1000 },
+ [MODE_HS_RA][UFS_HS_G3][UFS_LANE_2] = { 1492582, 204800 },
+ [MODE_HS_RA][UFS_HS_G4][UFS_LANE_2] = { 2915200, 409600 },
+ [MODE_HS_RB][UFS_HS_G1][UFS_LANE_1] = { 149422, 1000 },
+ [MODE_HS_RB][UFS_HS_G2][UFS_LANE_1] = { 298189, 1000 },
+ [MODE_HS_RB][UFS_HS_G3][UFS_LANE_1] = { 1492582, 102400 },
+ [MODE_HS_RB][UFS_HS_G4][UFS_LANE_1] = { 2915200, 204800 },
+ [MODE_HS_RB][UFS_HS_G1][UFS_LANE_2] = { 298189, 1000 },
+ [MODE_HS_RB][UFS_HS_G2][UFS_LANE_2] = { 596378, 1000 },
+ [MODE_HS_RB][UFS_HS_G3][UFS_LANE_2] = { 1492582, 204800 },
+ [MODE_HS_RB][UFS_HS_G4][UFS_LANE_2] = { 2915200, 409600 },
+ [MODE_MAX][0][0] = { 7643136, 307200 },
+};
+
static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
@@ -485,7 +529,7 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
}
/*
- * Returns zero for success and non-zero in case of a failure
+ * Return: zero for success and non-zero in case of a failure.
*/
static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
u32 hs, u32 rate, bool update_link_startup_timer)
@@ -789,6 +833,51 @@ static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
}
}
+static int ufs_qcom_icc_set_bw(struct ufs_qcom_host *host, u32 mem_bw, u32 cfg_bw)
+{
+ struct device *dev = host->hba->dev;
+ int ret;
+
+ ret = icc_set_bw(host->icc_ddr, 0, mem_bw);
+ if (ret < 0) {
+ dev_err(dev, "failed to set bandwidth request: %d\n", ret);
+ return ret;
+ }
+
+ ret = icc_set_bw(host->icc_cpu, 0, cfg_bw);
+ if (ret < 0) {
+ dev_err(dev, "failed to set bandwidth request: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct __ufs_qcom_bw_table ufs_qcom_get_bw_table(struct ufs_qcom_host *host)
+{
+ struct ufs_pa_layer_attr *p = &host->dev_req_params;
+ int gear = max_t(u32, p->gear_rx, p->gear_tx);
+ int lane = max_t(u32, p->lane_rx, p->lane_tx);
+
+ if (ufshcd_is_hs_mode(p)) {
+ if (p->hs_rate == PA_HS_MODE_B)
+ return ufs_qcom_bw_table[MODE_HS_RB][gear][lane];
+ else
+ return ufs_qcom_bw_table[MODE_HS_RA][gear][lane];
+ } else {
+ return ufs_qcom_bw_table[MODE_PWM][gear][lane];
+ }
+}
+
+static int ufs_qcom_icc_update_bw(struct ufs_qcom_host *host)
+{
+ struct __ufs_qcom_bw_table bw_table;
+
+ bw_table = ufs_qcom_get_bw_table(host);
+
+ return ufs_qcom_icc_set_bw(host, bw_table.mem_bw, bw_table.cfg_bw);
+}
+
static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status,
struct ufs_pa_layer_attr *dev_max_params,
@@ -852,6 +941,8 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
memcpy(&host->dev_req_params,
dev_req_params, sizeof(*dev_req_params));
+ ufs_qcom_icc_update_bw(host);
+
/* disable the device ref clock if entered PWM mode */
if (ufshcd_is_hs_mode(&hba->pwr_info) &&
!ufshcd_is_hs_mode(dev_req_params))
@@ -964,7 +1055,7 @@ static void ufs_qcom_set_caps(struct ufs_hba *hba)
* @on: If true, enable clocks else disable them.
* @status: PRE_CHANGE or POST_CHANGE notify
*
- * Returns 0 on success, non-zero on failure.
+ * Return: 0 on success, non-zero on failure.
*/
static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
enum ufs_notify_change_status status)
@@ -981,7 +1072,9 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
switch (status) {
case PRE_CHANGE:
- if (!on) {
+ if (on) {
+ ufs_qcom_icc_update_bw(host);
+ } else {
if (!ufs_qcom_is_link_active(hba)) {
/* disable device ref_clk */
ufs_qcom_dev_ref_clk_ctrl(host, false);
@@ -993,6 +1086,9 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
/* enable the device ref clock for HS mode*/
if (ufshcd_is_hs_mode(&hba->pwr_info))
ufs_qcom_dev_ref_clk_ctrl(host, true);
+ } else {
+ ufs_qcom_icc_set_bw(host, ufs_qcom_bw_table[MODE_MIN][0][0].mem_bw,
+ ufs_qcom_bw_table[MODE_MIN][0][0].cfg_bw);
}
break;
}
@@ -1031,6 +1127,34 @@ static const struct reset_control_ops ufs_qcom_reset_ops = {
.deassert = ufs_qcom_reset_deassert,
};
+static int ufs_qcom_icc_init(struct ufs_qcom_host *host)
+{
+ struct device *dev = host->hba->dev;
+ int ret;
+
+ host->icc_ddr = devm_of_icc_get(dev, "ufs-ddr");
+ if (IS_ERR(host->icc_ddr))
+ return dev_err_probe(dev, PTR_ERR(host->icc_ddr),
+ "failed to acquire interconnect path\n");
+
+ host->icc_cpu = devm_of_icc_get(dev, "cpu-ufs");
+ if (IS_ERR(host->icc_cpu))
+ return dev_err_probe(dev, PTR_ERR(host->icc_cpu),
+ "failed to acquire interconnect path\n");
+
+ /*
+ * Set Maximum bandwidth vote before initializing the UFS controller and
+ * device. Ideally, a minimal interconnect vote would suffice for the
+ * initialization, but a max vote would allow faster initialization.
+ */
+ ret = ufs_qcom_icc_set_bw(host, ufs_qcom_bw_table[MODE_MAX][0][0].mem_bw,
+ ufs_qcom_bw_table[MODE_MAX][0][0].cfg_bw);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to set bandwidth request\n");
+
+ return 0;
+}
+
/**
* ufs_qcom_init - bind phy with controller
* @hba: host controller instance
@@ -1038,7 +1162,7 @@ static const struct reset_control_ops ufs_qcom_reset_ops = {
* Binds PHY with controller and powers up PHY enabling clocks
* and regulators.
*
- * Returns -EPROBE_DEFER if binding fails, returns negative error
+ * Return: -EPROBE_DEFER if binding fails, returns negative error
* on phy power up failure and returns zero on success.
*/
static int ufs_qcom_init(struct ufs_hba *hba)
@@ -1085,6 +1209,10 @@ static int ufs_qcom_init(struct ufs_hba *hba)
}
}
+ err = ufs_qcom_icc_init(host);
+ if (err)
+ goto out_variant_clear;
+
host->device_reset = devm_gpiod_get_optional(dev, "reset",
GPIOD_OUT_HIGH);
if (IS_ERR(host->device_reset)) {
@@ -1254,6 +1382,10 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params;
int err = 0;
+ /* check the host controller state before sending hibern8 cmd */
+ if (!ufshcd_is_hba_active(hba))
+ return 0;
+
if (status == PRE_CHANGE) {
err = ufshcd_uic_hibern8_enter(hba);
if (err)
@@ -1282,6 +1414,7 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
dev_req_params->pwr_rx,
dev_req_params->hs_rate,
false);
+ ufs_qcom_icc_update_bw(host);
ufshcd_uic_hibern8_exit(hba);
}
@@ -1483,6 +1616,7 @@ static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
struct devfreq_simple_ondemand_data *d)
{
p->polling_ms = 60;
+ p->timer = DEVFREQ_TIMER_DELAYED;
d->upthreshold = 70;
d->downdifferential = 5;
}
@@ -1643,11 +1777,12 @@ static void ufs_qcom_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
ufshcd_mcq_config_esi(hba, msg);
}
-static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *__hba)
+static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *data)
{
- struct ufs_hba *hba = __hba;
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- u32 id = irq - host->esi_base;
+ struct msi_desc *desc = data;
+ struct device *dev = msi_desc_to_dev(desc);
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ u32 id = desc->msi_index;
struct ufs_hw_queue *hwq = &hba->uhq[id];
ufshcd_mcq_write_cqis(hba, 0x1, id);
@@ -1665,8 +1800,6 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
if (host->esi_enabled)
return 0;
- else if (host->esi_base < 0)
- return -EINVAL;
/*
* 1. We only handle CQs as of now.
@@ -1675,16 +1808,16 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL];
ret = platform_msi_domain_alloc_irqs(hba->dev, nr_irqs,
ufs_qcom_write_msi_msg);
- if (ret)
+ if (ret) {
+ dev_err(hba->dev, "Failed to request Platform MSI %d\n", ret);
goto out;
+ }
+ msi_lock_descs(hba->dev);
msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) {
- if (!desc->msi_index)
- host->esi_base = desc->irq;
-
ret = devm_request_irq(hba->dev, desc->irq,
ufs_qcom_mcq_esi_handler,
- IRQF_SHARED, "qcom-mcq-esi", hba);
+ IRQF_SHARED, "qcom-mcq-esi", desc);
if (ret) {
dev_err(hba->dev, "%s: Fail to request IRQ for %d, err = %d\n",
__func__, desc->irq, ret);
@@ -1692,14 +1825,17 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
break;
}
}
+ msi_unlock_descs(hba->dev);
if (ret) {
/* Rewind */
+ msi_lock_descs(hba->dev);
msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) {
if (desc == failed_desc)
break;
devm_free_irq(hba->dev, desc->irq, hba);
}
+ msi_unlock_descs(hba->dev);
platform_msi_domain_free_irqs(hba->dev);
} else {
if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 &&
@@ -1712,12 +1848,8 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
}
out:
- if (ret) {
- host->esi_base = -1;
- dev_warn(hba->dev, "Failed to request Platform MSI %d\n", ret);
- } else {
+ if (!ret)
host->esi_enabled = true;
- }
return ret;
}
@@ -1757,7 +1889,7 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
* ufs_qcom_probe - probe routine of the driver
* @pdev: pointer to Platform device handle
*
- * Return zero for success and non-zero for failure
+ * Return: zero for success and non-zero for failure.
*/
static int ufs_qcom_probe(struct platform_device *pdev)
{
diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h
index 6289ad5a42d0..d6f8e74bd538 100644
--- a/drivers/ufs/host/ufs-qcom.h
+++ b/drivers/ufs/host/ufs-qcom.h
@@ -206,6 +206,9 @@ struct ufs_qcom_host {
struct clk *tx_l1_sync_clk;
bool is_lane_clks_enabled;
+ struct icc_path *icc_ddr;
+ struct icc_path *icc_cpu;
+
#ifdef CONFIG_SCSI_UFS_CRYPTO
struct qcom_ice *ice;
#endif
@@ -226,7 +229,6 @@ struct ufs_qcom_host {
u32 hs_gear;
- int esi_base;
bool esi_enabled;
};
diff --git a/drivers/ufs/host/ufs-renesas.c b/drivers/ufs/host/ufs-renesas.c
index ab0652d8705a..cc94970b86c9 100644
--- a/drivers/ufs/host/ufs-renesas.c
+++ b/drivers/ufs/host/ufs-renesas.c
@@ -12,7 +12,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <ufs/ufshcd.h>
diff --git a/drivers/ufs/host/ufshcd-dwc.c b/drivers/ufs/host/ufshcd-dwc.c
index e28a67e1e314..21b1cf912dcc 100644
--- a/drivers/ufs/host/ufshcd-dwc.c
+++ b/drivers/ufs/host/ufshcd-dwc.c
@@ -34,9 +34,7 @@ int ufshcd_dwc_dme_set_attrs(struct ufs_hba *hba,
EXPORT_SYMBOL(ufshcd_dwc_dme_set_attrs);
/**
- * ufshcd_dwc_program_clk_div()
- * This function programs the clk divider value. This value is needed to
- * provide 1 microsecond tick to unipro layer.
+ * ufshcd_dwc_program_clk_div() - program clock divider.
* @hba: Private Structure pointer
* @divider_val: clock divider value to be programmed
*
@@ -47,11 +45,10 @@ static void ufshcd_dwc_program_clk_div(struct ufs_hba *hba, u32 divider_val)
}
/**
- * ufshcd_dwc_link_is_up()
- * Check if link is up
+ * ufshcd_dwc_link_is_up() - check if link is up.
* @hba: private structure pointer
*
- * Returns 0 on success, non-zero value on failure
+ * Return: 0 on success, non-zero value on failure.
*/
static int ufshcd_dwc_link_is_up(struct ufs_hba *hba)
{
@@ -68,7 +65,9 @@ static int ufshcd_dwc_link_is_up(struct ufs_hba *hba)
}
/**
- * ufshcd_dwc_connection_setup()
+ * ufshcd_dwc_connection_setup() - configure unipro attributes.
+ * @hba: pointer to drivers private data
+ *
* This function configures both the local side (host) and the peer side
* (device) unipro attributes to establish the connection to application/
* cport.
@@ -76,9 +75,7 @@ static int ufshcd_dwc_link_is_up(struct ufs_hba *hba)
* have this connection setup on reset. But invoking this function does no
* harm and should be fine even working with any ufs device.
*
- * @hba: pointer to drivers private data
- *
- * Returns 0 on success non-zero value on failure
+ * Return: 0 on success non-zero value on failure.
*/
static int ufshcd_dwc_connection_setup(struct ufs_hba *hba)
{
@@ -107,12 +104,11 @@ static int ufshcd_dwc_connection_setup(struct ufs_hba *hba)
}
/**
- * ufshcd_dwc_link_startup_notify()
- * UFS Host DWC specific link startup sequence
+ * ufshcd_dwc_link_startup_notify() - program clock divider.
* @hba: private structure pointer
* @status: Callback notify status
*
- * Returns 0 on success, non-zero value on failure
+ * Return: 0 on success, non-zero value on failure.
*/
int ufshcd_dwc_link_startup_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status)
diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c
index cf3987773051..248a49e5e7f3 100644
--- a/drivers/ufs/host/ufshcd-pci.c
+++ b/drivers/ufs/host/ufshcd-pci.c
@@ -524,7 +524,7 @@ static void ufshcd_pci_remove(struct pci_dev *pdev)
* @pdev: pointer to PCI device handle
* @id: PCI device id
*
- * Returns 0 on success, non-zero value on failure
+ * Return: 0 on success, non-zero value on failure.
*/
static int
ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -590,6 +590,7 @@ static const struct dev_pm_ops ufshcd_pci_pm_ops = {
};
static const struct pci_device_id ufshcd_pci_tbl[] = {
+ { PCI_VENDOR_ID_REDHAT, 0x0013, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VDEVICE(INTEL, 0x9DFA), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
{ PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
diff --git a/drivers/ufs/host/ufshcd-pltfrm.c b/drivers/ufs/host/ufshcd-pltfrm.c
index 0b7430033047..797a4dfe45d9 100644
--- a/drivers/ufs/host/ufshcd-pltfrm.c
+++ b/drivers/ufs/host/ufshcd-pltfrm.c
@@ -166,6 +166,8 @@ EXPORT_SYMBOL_GPL(ufshcd_populate_vreg);
* If any of the supplies are not defined it is assumed that they are always-on
* and hence return zero. If the property is defined but parsing is failed
* then return corresponding error.
+ *
+ * Return: 0 upon success; < 0 upon failure.
*/
static int ufshcd_parse_regulator_info(struct ufs_hba *hba)
{
@@ -212,7 +214,7 @@ static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba)
* @dev_max: pointer to device attributes
* @agreed_pwr: returned agreed attributes
*
- * Returns 0 on success, non-zero value on failure
+ * Return: 0 on success, non-zero value on failure.
*/
int ufshcd_get_pwr_dev_param(const struct ufs_dev_params *pltfrm_param,
const struct ufs_pa_layer_attr *dev_max,
@@ -305,8 +307,8 @@ EXPORT_SYMBOL_GPL(ufshcd_get_pwr_dev_param);
void ufshcd_init_pwr_dev_param(struct ufs_dev_params *dev_param)
{
*dev_param = (struct ufs_dev_params){
- .tx_lanes = 2,
- .rx_lanes = 2,
+ .tx_lanes = UFS_LANE_2,
+ .rx_lanes = UFS_LANE_2,
.hs_rx_gear = UFS_HS_G3,
.hs_tx_gear = UFS_HS_G3,
.pwm_rx_gear = UFS_PWM_G4,
@@ -326,7 +328,7 @@ EXPORT_SYMBOL_GPL(ufshcd_init_pwr_dev_param);
* @pdev: pointer to Platform device handle
* @vops: pointer to variant ops
*
- * Returns 0 on success, non-zero value on failure
+ * Return: 0 on success, non-zero value on failure.
*/
int ufshcd_pltfrm_init(struct platform_device *pdev,
const struct ufs_hba_variant_ops *vops)
@@ -373,7 +375,8 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
err = ufshcd_init(hba, mmio_base, irq);
if (err) {
- dev_err(dev, "Initialization failed\n");
+ dev_err_probe(dev, err, "Initialization failed with error %d\n",
+ err);
goto dealloc_host;
}
diff --git a/drivers/uio/uio_pruss.c b/drivers/uio/uio_pruss.c
index 83966dbd3bbf..77e2dc404885 100644
--- a/drivers/uio/uio_pruss.c
+++ b/drivers/uio/uio_pruss.c
@@ -175,8 +175,12 @@ static int pruss_probe(struct platform_device *pdev)
goto err_free_ddr_vaddr;
}
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ goto err_unmap;
+
+ gdev->hostirq_start = ret;
gdev->pintc_base = pdata->pintc_base;
- gdev->hostirq_start = platform_get_irq(pdev, 0);
for (cnt = 0, p = gdev->info; cnt < MAX_PRUSS_EVT; cnt++, p++) {
p->mem[0].addr = regs_prussio->start;
@@ -211,6 +215,7 @@ err_unloop:
for (i = 0, p = gdev->info; i < cnt; i++, p++) {
uio_unregister_device(p);
}
+err_unmap:
iounmap(gdev->prussio_vaddr);
err_free_ddr_vaddr:
dma_free_coherent(dev, extram_pool_sz, gdev->ddr_vaddr,
diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
index aa0111b365bb..11a5b3437c32 100644
--- a/drivers/usb/cdns3/cdns3-gadget.c
+++ b/drivers/usb/cdns3/cdns3-gadget.c
@@ -61,6 +61,7 @@
#include <linux/module.h>
#include <linux/dmapool.h>
#include <linux/iopoll.h>
+#include <linux/property.h>
#include "core.h"
#include "gadget-export.h"
diff --git a/drivers/usb/cdns3/cdns3-plat.c b/drivers/usb/cdns3/cdns3-plat.c
index 884e2301237f..2c1aca84f226 100644
--- a/drivers/usb/cdns3/cdns3-plat.c
+++ b/drivers/usb/cdns3/cdns3-plat.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/irq.h>
#include <linux/kernel.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -255,9 +256,10 @@ static int cdns3_controller_resume(struct device *dev, pm_message_t msg)
cdns3_set_platform_suspend(cdns->dev, false, false);
spin_lock_irqsave(&cdns->lock, flags);
- cdns_resume(cdns, !PMSG_IS_AUTO(msg));
+ cdns_resume(cdns);
cdns->in_lpm = false;
spin_unlock_irqrestore(&cdns->lock, flags);
+ cdns_set_active(cdns, !PMSG_IS_AUTO(msg));
if (cdns->wakeup_pending) {
cdns->wakeup_pending = false;
enable_irq(cdns->wakeup_irq);
diff --git a/drivers/usb/cdns3/cdns3-starfive.c b/drivers/usb/cdns3/cdns3-starfive.c
index fc1f003b145d..a7265b86e427 100644
--- a/drivers/usb/cdns3/cdns3-starfive.c
+++ b/drivers/usb/cdns3/cdns3-starfive.c
@@ -166,7 +166,7 @@ static int cdns_starfive_remove_core(struct device *dev, void *c)
return 0;
}
-static int cdns_starfive_remove(struct platform_device *pdev)
+static void cdns_starfive_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct cdns_starfive *data = dev_get_drvdata(dev);
@@ -178,8 +178,6 @@ static int cdns_starfive_remove(struct platform_device *pdev)
pm_runtime_put_noidle(dev);
cdns_clk_rst_deinit(data);
platform_set_drvdata(pdev, NULL);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -232,7 +230,7 @@ MODULE_DEVICE_TABLE(of, cdns_starfive_of_match);
static struct platform_driver cdns_starfive_driver = {
.probe = cdns_starfive_probe,
- .remove = cdns_starfive_remove,
+ .remove_new = cdns_starfive_remove,
.driver = {
.name = "cdns3-starfive",
.of_match_table = cdns_starfive_of_match,
diff --git a/drivers/usb/cdns3/cdns3-ti.c b/drivers/usb/cdns3/cdns3-ti.c
index 81b9132e3aaa..5945c4b1e11f 100644
--- a/drivers/usb/cdns3/cdns3-ti.c
+++ b/drivers/usb/cdns3/cdns3-ti.c
@@ -15,6 +15,7 @@
#include <linux/io.h>
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
/* USB Wrapper register offsets */
#define USBSS_PID 0x0
diff --git a/drivers/usb/cdns3/cdnsp-pci.c b/drivers/usb/cdns3/cdnsp-pci.c
index 7b151f5af3cc..0725668ffea4 100644
--- a/drivers/usb/cdns3/cdnsp-pci.c
+++ b/drivers/usb/cdns3/cdnsp-pci.c
@@ -208,8 +208,9 @@ static int __maybe_unused cdnsp_pci_resume(struct device *dev)
int ret;
spin_lock_irqsave(&cdns->lock, flags);
- ret = cdns_resume(cdns, 1);
+ ret = cdns_resume(cdns);
spin_unlock_irqrestore(&cdns->lock, flags);
+ cdns_set_active(cdns, 1);
return ret;
}
diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c
index dbcdf3b24b47..33548771a0d3 100644
--- a/drivers/usb/cdns3/core.c
+++ b/drivers/usb/cdns3/core.c
@@ -14,6 +14,7 @@
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -522,9 +523,8 @@ int cdns_suspend(struct cdns *cdns)
}
EXPORT_SYMBOL_GPL(cdns_suspend);
-int cdns_resume(struct cdns *cdns, u8 set_active)
+int cdns_resume(struct cdns *cdns)
{
- struct device *dev = cdns->dev;
enum usb_role real_role;
bool role_changed = false;
int ret = 0;
@@ -556,15 +556,23 @@ int cdns_resume(struct cdns *cdns, u8 set_active)
if (cdns->roles[cdns->role]->resume)
cdns->roles[cdns->role]->resume(cdns, cdns_power_is_lost(cdns));
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cdns_resume);
+
+void cdns_set_active(struct cdns *cdns, u8 set_active)
+{
+ struct device *dev = cdns->dev;
+
if (set_active) {
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
}
- return 0;
+ return;
}
-EXPORT_SYMBOL_GPL(cdns_resume);
+EXPORT_SYMBOL_GPL(cdns_set_active);
#endif /* CONFIG_PM_SLEEP */
MODULE_AUTHOR("Peter Chen <peter.chen@nxp.com>");
diff --git a/drivers/usb/cdns3/core.h b/drivers/usb/cdns3/core.h
index 2d332a788871..4a4dbc2c1561 100644
--- a/drivers/usb/cdns3/core.h
+++ b/drivers/usb/cdns3/core.h
@@ -125,10 +125,13 @@ int cdns_init(struct cdns *cdns);
int cdns_remove(struct cdns *cdns);
#ifdef CONFIG_PM_SLEEP
-int cdns_resume(struct cdns *cdns, u8 set_active);
+int cdns_resume(struct cdns *cdns);
int cdns_suspend(struct cdns *cdns);
+void cdns_set_active(struct cdns *cdns, u8 set_active);
#else /* CONFIG_PM_SLEEP */
-static inline int cdns_resume(struct cdns *cdns, u8 set_active)
+static inline int cdns_resume(struct cdns *cdns)
+{ return 0; }
+static inline int cdns_set_active(struct cdns *cdns, u8 set_active)
{ return 0; }
static inline int cdns_suspend(struct cdns *cdns)
{ return 0; }
diff --git a/drivers/usb/cdns3/drd.c b/drivers/usb/cdns3/drd.c
index d00ff98dffab..04b6d12f2b9a 100644
--- a/drivers/usb/cdns3/drd.c
+++ b/drivers/usb/cdns3/drd.c
@@ -196,6 +196,7 @@ int cdns_drd_host_on(struct cdns *cdns)
if (ret)
dev_err(cdns->dev, "timeout waiting for xhci_ready\n");
+ phy_set_mode(cdns->usb2_phy, PHY_MODE_USB_HOST);
phy_set_mode(cdns->usb3_phy, PHY_MODE_USB_HOST);
return ret;
}
@@ -216,6 +217,7 @@ void cdns_drd_host_off(struct cdns *cdns)
readl_poll_timeout_atomic(&cdns->otg_regs->state, val,
!(val & OTGSTATE_HOST_STATE_MASK),
1, 2000000);
+ phy_set_mode(cdns->usb2_phy, PHY_MODE_INVALID);
phy_set_mode(cdns->usb3_phy, PHY_MODE_INVALID);
}
@@ -248,6 +250,7 @@ int cdns_drd_gadget_on(struct cdns *cdns)
return ret;
}
+ phy_set_mode(cdns->usb2_phy, PHY_MODE_USB_DEVICE);
phy_set_mode(cdns->usb3_phy, PHY_MODE_USB_DEVICE);
return 0;
}
@@ -273,6 +276,7 @@ void cdns_drd_gadget_off(struct cdns *cdns)
readl_poll_timeout_atomic(&cdns->otg_regs->state, val,
!(val & OTGSTATE_DEV_STATE_MASK),
1, 2000000);
+ phy_set_mode(cdns->usb2_phy, PHY_MODE_INVALID);
phy_set_mode(cdns->usb3_phy, PHY_MODE_INVALID);
}
EXPORT_SYMBOL_GPL(cdns_drd_gadget_off);
diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
index f210b7489fd5..d9bb3d3f026e 100644
--- a/drivers/usb/chipidea/ci.h
+++ b/drivers/usb/chipidea/ci.h
@@ -257,6 +257,7 @@ struct ci_hdrc {
bool id_event;
bool b_sess_valid_event;
bool imx28_write_fix;
+ bool has_portsc_pec_bug;
bool supports_runtime_pm;
bool in_lpm;
bool wakeup_int;
@@ -281,8 +282,19 @@ static inline int ci_role_start(struct ci_hdrc *ci, enum ci_role role)
return -ENXIO;
ret = ci->roles[role]->start(ci);
- if (!ret)
- ci->role = role;
+ if (ret)
+ return ret;
+
+ ci->role = role;
+
+ if (ci->usb_phy) {
+ if (role == CI_ROLE_HOST)
+ usb_phy_set_event(ci->usb_phy, USB_EVENT_ID);
+ else
+ /* in device mode but vbus is invalid*/
+ usb_phy_set_event(ci->usb_phy, USB_EVENT_NONE);
+ }
+
return ret;
}
@@ -296,6 +308,9 @@ static inline void ci_role_stop(struct ci_hdrc *ci)
ci->role = CI_ROLE_END;
ci->roles[role]->stop(ci);
+
+ if (ci->usb_phy)
+ usb_phy_set_event(ci->usb_phy, USB_EVENT_NONE);
}
static inline enum usb_role ci_role_to_usb_role(struct ci_hdrc *ci)
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 336ef6dd8e7d..e28bb2f2612d 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -6,6 +6,7 @@
*/
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -67,11 +68,13 @@ static const struct ci_hdrc_imx_platform_flag imx7d_usb_data = {
static const struct ci_hdrc_imx_platform_flag imx7ulp_usb_data = {
.flags = CI_HDRC_SUPPORTS_RUNTIME_PM |
+ CI_HDRC_HAS_PORTSC_PEC_MISSED |
CI_HDRC_PMQOS,
};
static const struct ci_hdrc_imx_platform_flag imx8ulp_usb_data = {
- .flags = CI_HDRC_SUPPORTS_RUNTIME_PM,
+ .flags = CI_HDRC_SUPPORTS_RUNTIME_PM |
+ CI_HDRC_HAS_PORTSC_PEC_MISSED,
};
static const struct of_device_id ci_hdrc_imx_dt_ids[] = {
@@ -175,10 +178,15 @@ static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev)
if (of_usb_get_phy_mode(np) == USBPHY_INTERFACE_MODE_ULPI)
data->ulpi = 1;
- of_property_read_u32(np, "samsung,picophy-pre-emp-curr-control",
- &data->emp_curr_control);
- of_property_read_u32(np, "samsung,picophy-dc-vol-level-adjust",
- &data->dc_vol_level_adjust);
+ if (of_property_read_u32(np, "samsung,picophy-pre-emp-curr-control",
+ &data->emp_curr_control))
+ data->emp_curr_control = -1;
+ if (of_property_read_u32(np, "samsung,picophy-dc-vol-level-adjust",
+ &data->dc_vol_level_adjust))
+ data->dc_vol_level_adjust = -1;
+ if (of_property_read_u32(np, "fsl,picophy-rise-fall-time-adjust",
+ &data->rise_fall_time_adjust))
+ data->rise_fall_time_adjust = -1;
return data;
}
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.h b/drivers/usb/chipidea/ci_hdrc_imx.h
index 7135b9a5d913..88b8da79d518 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.h
+++ b/drivers/usb/chipidea/ci_hdrc_imx.h
@@ -28,6 +28,7 @@ struct imx_usbmisc_data {
enum usb_dr_mode available_role; /* runtime usb dr mode */
int emp_curr_control;
int dc_vol_level_adjust;
+ int rise_fall_time_adjust;
};
int imx_usbmisc_init(struct imx_usbmisc_data *data);
diff --git a/drivers/usb/chipidea/ci_hdrc_tegra.c b/drivers/usb/chipidea/ci_hdrc_tegra.c
index ca36d11a69ea..8e78bf643e25 100644
--- a/drivers/usb/chipidea/ci_hdrc_tegra.c
+++ b/drivers/usb/chipidea/ci_hdrc_tegra.c
@@ -6,7 +6,8 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 51994d655b82..7ac39a281b8c 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -1028,8 +1028,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
return -ENODEV;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -1045,6 +1044,8 @@ static int ci_hdrc_probe(struct platform_device *pdev)
CI_HDRC_IMX28_WRITE_FIX);
ci->supports_runtime_pm = !!(ci->platdata->flags &
CI_HDRC_SUPPORTS_RUNTIME_PM);
+ ci->has_portsc_pec_bug = !!(ci->platdata->flags &
+ CI_HDRC_HAS_PORTSC_PEC_MISSED);
platform_set_drvdata(pdev, ci);
ret = hw_device_init(ci, base);
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index ebe7400243b1..08af26b762a2 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -151,6 +151,7 @@ static int host_start(struct ci_hdrc *ci)
ehci->has_hostpc = ci->hw_bank.lpm;
ehci->has_tdi_phy_lpm = ci->hw_bank.lpm;
ehci->imx28_write_fix = ci->imx28_write_fix;
+ ehci->has_ci_pec_bug = ci->has_portsc_pec_bug;
priv = (struct ehci_ci_priv *)ehci->priv;
priv->reg_vbus = NULL;
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 54c09245ad05..0b7bd3c643c3 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1463,7 +1463,7 @@ static int ep_disable(struct usb_ep *ep)
*/
static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
{
- struct ci_hw_req *hwreq = NULL;
+ struct ci_hw_req *hwreq;
if (ep == NULL)
return NULL;
@@ -1718,6 +1718,13 @@ static int ci_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
ret = ci->platdata->notify_event(ci,
CI_HDRC_CONTROLLER_VBUS_EVENT);
+ if (ci->usb_phy) {
+ if (is_active)
+ usb_phy_set_event(ci->usb_phy, USB_EVENT_VBUS);
+ else
+ usb_phy_set_event(ci->usb_phy, USB_EVENT_NONE);
+ }
+
if (ci->driver)
ci_hdrc_gadget_connect(_gadget, is_active);
@@ -2034,6 +2041,9 @@ static irqreturn_t udc_irq(struct ci_hdrc *ci)
if (USBi_PCI & intr) {
ci->gadget.speed = hw_port_is_high_speed(ci) ?
USB_SPEED_HIGH : USB_SPEED_FULL;
+ if (ci->usb_phy)
+ usb_phy_set_event(ci->usb_phy,
+ USB_EVENT_ENUMERATED);
if (ci->suspended) {
if (ci->driver->resume) {
spin_unlock(&ci->lock);
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
index 9ee9621e2ccc..173c78afd502 100644
--- a/drivers/usb/chipidea/usbmisc_imx.c
+++ b/drivers/usb/chipidea/usbmisc_imx.c
@@ -4,10 +4,11 @@
*/
#include <linux/module.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/delay.h>
+#include <linux/platform_device.h>
#include <linux/usb/otg.h>
#include "ci_hdrc_imx.h"
@@ -130,6 +131,8 @@
#define MX7D_USB_OTG_PHY_CFG1 0x30
#define TXPREEMPAMPTUNE0_BIT 28
#define TXPREEMPAMPTUNE0_MASK (3 << 28)
+#define TXRISETUNE0_BIT 24
+#define TXRISETUNE0_MASK (3 << 24)
#define TXVREFTUNE0_BIT 20
#define TXVREFTUNE0_MASK (0xf << 20)
@@ -659,18 +662,27 @@ static int usbmisc_imx7d_init(struct imx_usbmisc_data *data)
usbmisc->base + MX7D_USBNC_USB_CTRL2);
/* PHY tuning for signal quality */
reg = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG1);
- if (data->emp_curr_control && data->emp_curr_control <=
+ if (data->emp_curr_control >= 0 &&
+ data->emp_curr_control <=
(TXPREEMPAMPTUNE0_MASK >> TXPREEMPAMPTUNE0_BIT)) {
reg &= ~TXPREEMPAMPTUNE0_MASK;
reg |= (data->emp_curr_control << TXPREEMPAMPTUNE0_BIT);
}
- if (data->dc_vol_level_adjust && data->dc_vol_level_adjust <=
+ if (data->dc_vol_level_adjust >= 0 &&
+ data->dc_vol_level_adjust <=
(TXVREFTUNE0_MASK >> TXVREFTUNE0_BIT)) {
reg &= ~TXVREFTUNE0_MASK;
reg |= (data->dc_vol_level_adjust << TXVREFTUNE0_BIT);
}
+ if (data->rise_fall_time_adjust >= 0 &&
+ data->rise_fall_time_adjust <=
+ (TXRISETUNE0_MASK >> TXRISETUNE0_BIT)) {
+ reg &= ~TXRISETUNE0_MASK;
+ reg |= (data->rise_fall_time_adjust << TXRISETUNE0_BIT);
+ }
+
writel(reg, usbmisc->base + MX7D_USB_OTG_PHY_CFG1);
}
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 11da5fb284d0..a1f4e1ead97f 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -28,6 +28,7 @@
#include <linux/serial.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
+#include <linux/tty_ldisc.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
@@ -318,6 +319,16 @@ static void acm_process_notification(struct acm *acm, unsigned char *buf)
}
difference = acm->ctrlin ^ newctrl;
+
+ if ((difference & USB_CDC_SERIAL_STATE_DCD) && acm->port.tty) {
+ struct tty_ldisc *ld = tty_ldisc_ref(acm->port.tty);
+ if (ld) {
+ if (ld->ops->dcd_change)
+ ld->ops->dcd_change(acm->port.tty, newctrl & USB_CDC_SERIAL_STATE_DCD);
+ tty_ldisc_deref(ld);
+ }
+ }
+
spin_lock_irqsave(&acm->read_lock, flags);
acm->ctrlin = newctrl;
acm->oldcount = acm->iocount;
@@ -789,8 +800,8 @@ static void acm_tty_close(struct tty_struct *tty, struct file *filp)
tty_port_close(&acm->port, tty, filp);
}
-static int acm_tty_write(struct tty_struct *tty,
- const unsigned char *buf, int count)
+static ssize_t acm_tty_write(struct tty_struct *tty, const u8 *buf,
+ size_t count)
{
struct acm *acm = tty->driver_data;
int stat;
@@ -801,7 +812,7 @@ static int acm_tty_write(struct tty_struct *tty,
if (!count)
return 0;
- dev_vdbg(&acm->data->dev, "%d bytes from tty layer\n", count);
+ dev_vdbg(&acm->data->dev, "%zu bytes from tty layer\n", count);
spin_lock_irqsave(&acm->write_lock, flags);
wbn = acm_wb_alloc(acm);
@@ -818,7 +829,7 @@ static int acm_tty_write(struct tty_struct *tty,
}
count = (count > acm->writesize) ? acm->writesize : count;
- dev_vdbg(&acm->data->dev, "writing %d bytes\n", count);
+ dev_vdbg(&acm->data->dev, "writing %zu bytes\n", count);
memcpy(wb->buf, buf, count);
wb->len = count;
@@ -853,6 +864,19 @@ static unsigned int acm_tty_write_room(struct tty_struct *tty)
return acm_wb_is_avail(acm) ? acm->writesize : 0;
}
+static void acm_tty_flush_buffer(struct tty_struct *tty)
+{
+ struct acm *acm = tty->driver_data;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&acm->write_lock, flags);
+ for (i = 0; i < ACM_NW; i++)
+ if (acm->wb[i].use)
+ usb_unlink_urb(acm->wb[i].urb);
+ spin_unlock_irqrestore(&acm->write_lock, flags);
+}
+
static unsigned int acm_tty_chars_in_buffer(struct tty_struct *tty)
{
struct acm *acm = tty->driver_data;
@@ -2016,6 +2040,7 @@ static const struct tty_operations acm_ops = {
.hangup = acm_tty_hangup,
.write = acm_tty_write,
.write_room = acm_tty_write_room,
+ .flush_buffer = acm_tty_flush_buffer,
.ioctl = acm_tty_ioctl,
.throttle = acm_tty_throttle,
.unthrottle = acm_tty_unthrottle,
diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c
index c9bdeb4ddcb5..b84efae26e15 100644
--- a/drivers/usb/common/common.c
+++ b/drivers/usb/common/common.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/usb/ch9.h>
#include <linux/usb/of.h>
#include <linux/usb/otg.h>
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 725b8dbcfe5f..b19e38d5fd10 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -1051,9 +1051,6 @@ int usb_get_bos_descriptor(struct usb_device *dev)
}
switch (cap_type) {
- case USB_CAP_TYPE_WIRELESS_USB:
- /* Wireless USB cap descriptor is handled by wusb */
- break;
case USB_CAP_TYPE_EXT:
dev->bos->ext_cap =
(struct usb_ext_cap_descriptor *)buffer;
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index 2c14a9636056..a247da73f34d 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -424,7 +424,6 @@ static ssize_t usb_device_dump(char __user **buffer, size_t *nbytes,
case USB_SPEED_UNKNOWN: /* usb 1.1 root hub code */
case USB_SPEED_FULL:
speed = "12"; break;
- case USB_SPEED_WIRELESS: /* Wireless has no real fixed speed */
case USB_SPEED_HIGH:
speed = "480"; break;
case USB_SPEED_SUPER:
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index c4ed3310e069..a88ced93b5e7 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -29,7 +29,6 @@
#define MAX_USB_MINORS 256
static const struct file_operations *usb_minors[MAX_USB_MINORS];
static DECLARE_RWSEM(minor_rwsem);
-static DEFINE_MUTEX(init_usb_class_mutex);
static int usb_open(struct inode *inode, struct file *file)
{
@@ -57,11 +56,6 @@ static const struct file_operations usb_fops = {
.llseek = noop_llseek,
};
-static struct usb_class {
- struct kref kref;
- struct class *class;
-} *usb_class;
-
static char *usb_devnode(const struct device *dev, umode_t *mode)
{
struct usb_class_driver *drv;
@@ -72,50 +66,10 @@ static char *usb_devnode(const struct device *dev, umode_t *mode)
return drv->devnode(dev, mode);
}
-static int init_usb_class(void)
-{
- int result = 0;
-
- if (usb_class != NULL) {
- kref_get(&usb_class->kref);
- goto exit;
- }
-
- usb_class = kmalloc(sizeof(*usb_class), GFP_KERNEL);
- if (!usb_class) {
- result = -ENOMEM;
- goto exit;
- }
-
- kref_init(&usb_class->kref);
- usb_class->class = class_create("usbmisc");
- if (IS_ERR(usb_class->class)) {
- result = PTR_ERR(usb_class->class);
- printk(KERN_ERR "class_create failed for usb devices\n");
- kfree(usb_class);
- usb_class = NULL;
- goto exit;
- }
- usb_class->class->devnode = usb_devnode;
-
-exit:
- return result;
-}
-
-static void release_usb_class(struct kref *kref)
-{
- /* Ok, we cheat as we know we only have one usb_class */
- class_destroy(usb_class->class);
- kfree(usb_class);
- usb_class = NULL;
-}
-
-static void destroy_usb_class(void)
-{
- mutex_lock(&init_usb_class_mutex);
- kref_put(&usb_class->kref, release_usb_class);
- mutex_unlock(&init_usb_class_mutex);
-}
+const struct class usbmisc_class = {
+ .name = "usbmisc",
+ .devnode = usb_devnode,
+};
int usb_major_init(void)
{
@@ -156,7 +110,7 @@ void usb_major_cleanup(void)
int usb_register_dev(struct usb_interface *intf,
struct usb_class_driver *class_driver)
{
- int retval;
+ int retval = 0;
int minor_base = class_driver->minor_base;
int minor;
char name[20];
@@ -175,13 +129,6 @@ int usb_register_dev(struct usb_interface *intf,
if (intf->minor >= 0)
return -EADDRINUSE;
- mutex_lock(&init_usb_class_mutex);
- retval = init_usb_class();
- mutex_unlock(&init_usb_class_mutex);
-
- if (retval)
- return retval;
-
dev_dbg(&intf->dev, "looking for a minor, starting at %d\n", minor_base);
down_write(&minor_rwsem);
@@ -200,7 +147,7 @@ int usb_register_dev(struct usb_interface *intf,
/* create a usb class device for this usb interface */
snprintf(name, sizeof(name), class_driver->name, minor - minor_base);
- intf->usb_dev = device_create(usb_class->class, &intf->dev,
+ intf->usb_dev = device_create(&usbmisc_class, &intf->dev,
MKDEV(USB_MAJOR, minor), class_driver,
"%s", kbasename(name));
if (IS_ERR(intf->usb_dev)) {
@@ -234,7 +181,7 @@ void usb_deregister_dev(struct usb_interface *intf,
return;
dev_dbg(&intf->dev, "removing %d minor\n", intf->minor);
- device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor));
+ device_destroy(&usbmisc_class, MKDEV(USB_MAJOR, intf->minor));
down_write(&minor_rwsem);
usb_minors[intf->minor] = NULL;
@@ -242,6 +189,5 @@ void usb_deregister_dev(struct usb_interface *intf,
intf->usb_dev = NULL;
intf->minor = -1;
- destroy_usb_class();
}
EXPORT_SYMBOL_GPL(usb_deregister_dev);
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 8300baedafd2..12b6dfeaf658 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -156,27 +156,6 @@ static const u8 usb3_rh_dev_descriptor[18] = {
0x01 /* __u8 bNumConfigurations; */
};
-/* usb 2.5 (wireless USB 1.0) root hub device descriptor */
-static const u8 usb25_rh_dev_descriptor[18] = {
- 0x12, /* __u8 bLength; */
- USB_DT_DEVICE, /* __u8 bDescriptorType; Device */
- 0x50, 0x02, /* __le16 bcdUSB; v2.5 */
-
- 0x09, /* __u8 bDeviceClass; HUB_CLASSCODE */
- 0x00, /* __u8 bDeviceSubClass; */
- 0x00, /* __u8 bDeviceProtocol; [ usb 2.0 no TT ] */
- 0xFF, /* __u8 bMaxPacketSize0; always 0xFF (WUSB Spec 7.4.1). */
-
- 0x6b, 0x1d, /* __le16 idVendor; Linux Foundation 0x1d6b */
- 0x02, 0x00, /* __le16 idProduct; device 0x0002 */
- KERNEL_VER, KERNEL_REL, /* __le16 bcdDevice */
-
- 0x03, /* __u8 iManufacturer; */
- 0x02, /* __u8 iProduct; */
- 0x01, /* __u8 iSerialNumber; */
- 0x01 /* __u8 bNumConfigurations; */
-};
-
/* usb 2.0 root hub device descriptor */
static const u8 usb2_rh_dev_descriptor[18] = {
0x12, /* __u8 bLength; */
@@ -368,7 +347,7 @@ static const u8 ss_rh_config_descriptor[] = {
};
/* authorized_default behaviour:
- * -1 is authorized for all devices except wireless (old behaviour)
+ * -1 is authorized for all devices (leftover from wireless USB)
* 0 is unauthorized for all devices
* 1 is authorized for all devices
* 2 is authorized for internal devices
@@ -383,7 +362,7 @@ module_param(authorized_default, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(authorized_default,
"Default USB device authorization: 0 is not authorized, 1 is "
"authorized, 2 is authorized for internal devices, -1 is "
- "authorized except for wireless USB (default, old behaviour)");
+ "authorized (default, same as 1)");
/*-------------------------------------------------------------------------*/
/**
@@ -578,9 +557,6 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb)
case HCD_USB3:
bufp = usb3_rh_dev_descriptor;
break;
- case HCD_USB25:
- bufp = usb25_rh_dev_descriptor;
- break;
case HCD_USB2:
bufp = usb2_rh_dev_descriptor;
break;
@@ -602,7 +578,6 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb)
bufp = ss_rh_config_descriptor;
len = sizeof ss_rh_config_descriptor;
break;
- case HCD_USB25:
case HCD_USB2:
bufp = hs_rh_config_descriptor;
len = sizeof hs_rh_config_descriptor;
@@ -983,6 +958,7 @@ static int register_root_hub(struct usb_hcd *hcd)
{
struct device *parent_dev = hcd->self.controller;
struct usb_device *usb_dev = hcd->self.root_hub;
+ struct usb_device_descriptor *descr;
const int devnum = 1;
int retval;
@@ -994,13 +970,16 @@ static int register_root_hub(struct usb_hcd *hcd)
mutex_lock(&usb_bus_idr_lock);
usb_dev->ep0.desc.wMaxPacketSize = cpu_to_le16(64);
- retval = usb_get_device_descriptor(usb_dev, USB_DT_DEVICE_SIZE);
- if (retval != sizeof usb_dev->descriptor) {
+ descr = usb_get_device_descriptor(usb_dev);
+ if (IS_ERR(descr)) {
+ retval = PTR_ERR(descr);
mutex_unlock(&usb_bus_idr_lock);
dev_dbg (parent_dev, "can't read %s device descriptor %d\n",
dev_name(&usb_dev->dev), retval);
- return (retval < 0) ? retval : -EMSGSIZE;
+ return retval;
}
+ usb_dev->descriptor = *descr;
+ kfree(descr);
if (le16_to_cpu(usb_dev->descriptor.bcdUSB) >= 0x0201) {
retval = usb_get_bos_descriptor(usb_dev);
@@ -2844,18 +2823,14 @@ int usb_add_hcd(struct usb_hcd *hcd,
hcd->dev_policy = USB_DEVICE_AUTHORIZE_NONE;
break;
- case USB_AUTHORIZE_ALL:
- hcd->dev_policy = USB_DEVICE_AUTHORIZE_ALL;
- break;
-
case USB_AUTHORIZE_INTERNAL:
hcd->dev_policy = USB_DEVICE_AUTHORIZE_INTERNAL;
break;
+ case USB_AUTHORIZE_ALL:
case USB_AUTHORIZE_WIRED:
default:
- hcd->dev_policy = hcd->wireless ?
- USB_DEVICE_AUTHORIZE_NONE : USB_DEVICE_AUTHORIZE_ALL;
+ hcd->dev_policy = USB_DEVICE_AUTHORIZE_ALL;
break;
}
@@ -2899,9 +2874,6 @@ int usb_add_hcd(struct usb_hcd *hcd,
case HCD_USB2:
rhdev->speed = USB_SPEED_HIGH;
break;
- case HCD_USB25:
- rhdev->speed = USB_SPEED_WIRELESS;
- break;
case HCD_USB3:
rhdev->speed = USB_SPEED_SUPER;
break;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index a739403a9e45..3c54b218301c 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -614,6 +614,29 @@ static int hub_ext_port_status(struct usb_hub *hub, int port1, int type,
ret = 0;
}
mutex_unlock(&hub->status_mutex);
+
+ /*
+ * There is no need to lock status_mutex here, because status_mutex
+ * protects hub->status, and the phy driver only checks the port
+ * status without changing the status.
+ */
+ if (!ret) {
+ struct usb_device *hdev = hub->hdev;
+
+ /*
+ * Only roothub will be notified of port state changes,
+ * since the USB PHY only cares about changes at the next
+ * level.
+ */
+ if (is_root_hub(hdev)) {
+ struct usb_hcd *hcd = bus_to_hcd(hdev->bus);
+
+ if (hcd->usb_phy)
+ usb_phy_notify_port_status(hcd->usb_phy,
+ port1 - 1, *status, *change);
+ }
+ }
+
return ret;
}
@@ -2117,22 +2140,6 @@ EXPORT_SYMBOL_GPL(usb_set_device_state);
* USB-3.0 buses the address is assigned by the controller hardware
* and it usually is not the same as the device number.
*
- * WUSB devices are simple: they have no hubs behind, so the mapping
- * device <-> virtual port number becomes 1:1. Why? to simplify the
- * life of the device connection logic in
- * drivers/usb/wusbcore/devconnect.c. When we do the initial secret
- * handshake we need to assign a temporary address in the unauthorized
- * space. For simplicity we use the first virtual port number found to
- * be free [drivers/usb/wusbcore/devconnect.c:wusbhc_devconnect_ack()]
- * and that becomes it's address [X < 128] or its unauthorized address
- * [X | 0x80].
- *
- * We add 1 as an offset to the one-based USB-stack port number
- * (zero-based wusb virtual port index) for two reasons: (a) dev addr
- * 0 is reserved by USB for default address; (b) Linux's USB stack
- * uses always #1 for the root hub of the controller. So USB stack's
- * port #1, which is wusb virtual-port #0 has address #2.
- *
* Devices connected under xHCI are not as simple. The host controller
* supports virtualization, so the hardware assigns device addresses and
* the HCD must setup data structures before issuing a set address
@@ -2145,19 +2152,13 @@ static void choose_devnum(struct usb_device *udev)
/* be safe when more hub events are proceed in parallel */
mutex_lock(&bus->devnum_next_mutex);
- if (udev->wusb) {
- devnum = udev->portnum + 1;
- BUG_ON(test_bit(devnum, bus->devmap.devicemap));
- } else {
- /* Try to allocate the next devnum beginning at
- * bus->devnum_next. */
- devnum = find_next_zero_bit(bus->devmap.devicemap, 128,
- bus->devnum_next);
- if (devnum >= 128)
- devnum = find_next_zero_bit(bus->devmap.devicemap,
- 128, 1);
- bus->devnum_next = (devnum >= 127 ? 1 : devnum + 1);
- }
+
+ /* Try to allocate the next devnum beginning at bus->devnum_next. */
+ devnum = find_next_zero_bit(bus->devmap.devicemap, 128,
+ bus->devnum_next);
+ if (devnum >= 128)
+ devnum = find_next_zero_bit(bus->devmap.devicemap, 128, 1);
+ bus->devnum_next = (devnum >= 127 ? 1 : devnum + 1);
if (devnum < 128) {
set_bit(devnum, bus->devmap.devicemap);
udev->devnum = devnum;
@@ -2175,9 +2176,7 @@ static void release_devnum(struct usb_device *udev)
static void update_devnum(struct usb_device *udev, int devnum)
{
- /* The address for a WUSB device is managed by wusbcore. */
- if (!udev->wusb)
- udev->devnum = devnum;
+ udev->devnum = devnum;
if (!udev->devaddr)
udev->devaddr = (u8)devnum;
}
@@ -2670,15 +2669,6 @@ int usb_authorize_device(struct usb_device *usb_dev)
goto error_autoresume;
}
- if (usb_dev->wusb) {
- result = usb_get_device_descriptor(usb_dev, sizeof(usb_dev->descriptor));
- if (result < 0) {
- dev_err(&usb_dev->dev, "can't re-read device descriptor for "
- "authorization: %d\n", result);
- goto error_device_descriptor;
- }
- }
-
usb_dev->authorized = 1;
/* Choose and set the configuration. This registers the interfaces
* with the driver core and lets interface drivers bind to them.
@@ -2695,7 +2685,6 @@ int usb_authorize_device(struct usb_device *usb_dev)
}
dev_info(&usb_dev->dev, "authorized to connect\n");
-error_device_descriptor:
usb_autosuspend_device(usb_dev);
error_autoresume:
out_authorized:
@@ -2778,17 +2767,6 @@ out:
return USB_SSP_GEN_UNKNOWN;
}
-/* Returns 1 if @hub is a WUSB root hub, 0 otherwise */
-static unsigned hub_is_wusb(struct usb_hub *hub)
-{
- struct usb_hcd *hcd;
- if (hub->hdev->parent != NULL) /* not a root hub? */
- return 0;
- hcd = bus_to_hcd(hub->hdev->bus);
- return hcd->wireless;
-}
-
-
#ifdef CONFIG_USB_FEW_INIT_RETRIES
#define PORT_RESET_TRIES 2
#define SET_ADDRESS_TRIES 1
@@ -2941,9 +2919,7 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
udev->tx_lanes = 1;
udev->ssp_rate = USB_SSP_GEN_UNKNOWN;
}
- if (hub_is_wusb(hub))
- udev->speed = USB_SPEED_WIRELESS;
- else if (udev->ssp_rate != USB_SSP_GEN_UNKNOWN)
+ if (udev->ssp_rate != USB_SSP_GEN_UNKNOWN)
udev->speed = USB_SPEED_SUPER_PLUS;
else if (hub_is_superspeed(hub->hdev))
udev->speed = USB_SPEED_SUPER;
@@ -4718,6 +4694,67 @@ static int hub_enable_device(struct usb_device *udev)
return hcd->driver->enable_device(hcd, udev);
}
+/*
+ * Get the bMaxPacketSize0 value during initialization by reading the
+ * device's device descriptor. Since we don't already know this value,
+ * the transfer is unsafe and it ignores I/O errors, only testing for
+ * reasonable received values.
+ *
+ * For "old scheme" initialization, size will be 8 so we read just the
+ * start of the device descriptor, which should work okay regardless of
+ * the actual bMaxPacketSize0 value. For "new scheme" initialization,
+ * size will be 64 (and buf will point to a sufficiently large buffer),
+ * which might not be kosher according to the USB spec but it's what
+ * Windows does and what many devices expect.
+ *
+ * Returns: bMaxPacketSize0 or a negative error code.
+ */
+static int get_bMaxPacketSize0(struct usb_device *udev,
+ struct usb_device_descriptor *buf, int size, bool first_time)
+{
+ int i, rc;
+
+ /*
+ * Retry on all errors; some devices are flakey.
+ * 255 is for WUSB devices, we actually need to use
+ * 512 (WUSB1.0[4.8.1]).
+ */
+ for (i = 0; i < GET_MAXPACKET0_TRIES; ++i) {
+ /* Start with invalid values in case the transfer fails */
+ buf->bDescriptorType = buf->bMaxPacketSize0 = 0;
+ rc = usb_control_msg(udev, usb_rcvaddr0pipe(),
+ USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
+ USB_DT_DEVICE << 8, 0,
+ buf, size,
+ initial_descriptor_timeout);
+ switch (buf->bMaxPacketSize0) {
+ case 8: case 16: case 32: case 64: case 9:
+ if (buf->bDescriptorType == USB_DT_DEVICE) {
+ rc = buf->bMaxPacketSize0;
+ break;
+ }
+ fallthrough;
+ default:
+ if (rc >= 0)
+ rc = -EPROTO;
+ break;
+ }
+
+ /*
+ * Some devices time out if they are powered on
+ * when already connected. They need a second
+ * reset, so return early. But only on the first
+ * attempt, lest we get into a time-out/reset loop.
+ */
+ if (rc > 0 || (rc == -ETIMEDOUT && first_time &&
+ udev->speed > USB_SPEED_FULL))
+ break;
+ }
+ return rc;
+}
+
+#define GET_DESCRIPTOR_BUFSIZE 64
+
/* Reset device, (re)assign address, get device descriptor.
* Device connection must be stable, no more debouncing needed.
* Returns device in USB_STATE_ADDRESS, except on error.
@@ -4727,10 +4764,17 @@ static int hub_enable_device(struct usb_device *udev)
* the port lock. For a newly detected device that is not accessible
* through any global pointers, it's not necessary to lock the device,
* but it is still necessary to lock the port.
+ *
+ * For a newly detected device, @dev_descr must be NULL. The device
+ * descriptor retrieved from the device will then be stored in
+ * @udev->descriptor. For an already existing device, @dev_descr
+ * must be non-NULL. The device descriptor will be stored there,
+ * not in @udev->descriptor, because descriptors for registered
+ * devices are meant to be immutable.
*/
static int
hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
- int retry_counter)
+ int retry_counter, struct usb_device_descriptor *dev_descr)
{
struct usb_device *hdev = hub->hdev;
struct usb_hcd *hcd = bus_to_hcd(hdev->bus);
@@ -4742,6 +4786,13 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
int devnum = udev->devnum;
const char *driver_name;
bool do_new_scheme;
+ const bool initial = !dev_descr;
+ int maxp0;
+ struct usb_device_descriptor *buf, *descr;
+
+ buf = kmalloc(GET_DESCRIPTOR_BUFSIZE, GFP_NOIO);
+ if (!buf)
+ return -ENOMEM;
/* root hub ports have a slightly longer reset period
* (from USB 2.0 spec, section 7.1.7.5)
@@ -4774,38 +4825,34 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
}
oldspeed = udev->speed;
- /* USB 2.0 section 5.5.3 talks about ep0 maxpacket ...
- * it's fixed size except for full speed devices.
- * For Wireless USB devices, ep0 max packet is always 512 (tho
- * reported as 0xff in the device descriptor). WUSB1.0[4.8.1].
- */
- switch (udev->speed) {
- case USB_SPEED_SUPER_PLUS:
- case USB_SPEED_SUPER:
- case USB_SPEED_WIRELESS: /* fixed at 512 */
- udev->ep0.desc.wMaxPacketSize = cpu_to_le16(512);
- break;
- case USB_SPEED_HIGH: /* fixed at 64 */
- udev->ep0.desc.wMaxPacketSize = cpu_to_le16(64);
- break;
- case USB_SPEED_FULL: /* 8, 16, 32, or 64 */
- /* to determine the ep0 maxpacket size, try to read
- * the device descriptor to get bMaxPacketSize0 and
- * then correct our initial guess.
+ if (initial) {
+ /* USB 2.0 section 5.5.3 talks about ep0 maxpacket ...
+ * it's fixed size except for full speed devices.
*/
- udev->ep0.desc.wMaxPacketSize = cpu_to_le16(64);
- break;
- case USB_SPEED_LOW: /* fixed at 8 */
- udev->ep0.desc.wMaxPacketSize = cpu_to_le16(8);
- break;
- default:
- goto fail;
+ switch (udev->speed) {
+ case USB_SPEED_SUPER_PLUS:
+ case USB_SPEED_SUPER:
+ udev->ep0.desc.wMaxPacketSize = cpu_to_le16(512);
+ break;
+ case USB_SPEED_HIGH: /* fixed at 64 */
+ udev->ep0.desc.wMaxPacketSize = cpu_to_le16(64);
+ break;
+ case USB_SPEED_FULL: /* 8, 16, 32, or 64 */
+ /* to determine the ep0 maxpacket size, try to read
+ * the device descriptor to get bMaxPacketSize0 and
+ * then correct our initial guess.
+ */
+ udev->ep0.desc.wMaxPacketSize = cpu_to_le16(64);
+ break;
+ case USB_SPEED_LOW: /* fixed at 8 */
+ udev->ep0.desc.wMaxPacketSize = cpu_to_le16(8);
+ break;
+ default:
+ goto fail;
+ }
}
- if (udev->speed == USB_SPEED_WIRELESS)
- speed = "variable speed Wireless";
- else
- speed = usb_speed_string(udev->speed);
+ speed = usb_speed_string(udev->speed);
/*
* The controller driver may be NULL if the controller device
@@ -4822,22 +4869,24 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
if (udev->speed < USB_SPEED_SUPER)
dev_info(&udev->dev,
"%s %s USB device number %d using %s\n",
- (udev->config) ? "reset" : "new", speed,
+ (initial ? "new" : "reset"), speed,
devnum, driver_name);
- /* Set up TT records, if needed */
- if (hdev->tt) {
- udev->tt = hdev->tt;
- udev->ttport = hdev->ttport;
- } else if (udev->speed != USB_SPEED_HIGH
- && hdev->speed == USB_SPEED_HIGH) {
- if (!hub->tt.hub) {
- dev_err(&udev->dev, "parent hub has no TT\n");
- retval = -EINVAL;
- goto fail;
+ if (initial) {
+ /* Set up TT records, if needed */
+ if (hdev->tt) {
+ udev->tt = hdev->tt;
+ udev->ttport = hdev->ttport;
+ } else if (udev->speed != USB_SPEED_HIGH
+ && hdev->speed == USB_SPEED_HIGH) {
+ if (!hub->tt.hub) {
+ dev_err(&udev->dev, "parent hub has no TT\n");
+ retval = -EINVAL;
+ goto fail;
+ }
+ udev->tt = &hub->tt;
+ udev->ttport = port1;
}
- udev->tt = &hub->tt;
- udev->ttport = port1;
}
/* Why interleave GET_DESCRIPTOR and SET_ADDRESS this way?
@@ -4861,9 +4910,6 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
}
if (do_new_scheme) {
- struct usb_device_descriptor *buf;
- int r = 0;
-
retval = hub_enable_device(udev);
if (retval < 0) {
dev_err(&udev->dev,
@@ -4872,52 +4918,14 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
goto fail;
}
-#define GET_DESCRIPTOR_BUFSIZE 64
- buf = kmalloc(GET_DESCRIPTOR_BUFSIZE, GFP_NOIO);
- if (!buf) {
- retval = -ENOMEM;
- continue;
- }
-
- /* Retry on all errors; some devices are flakey.
- * 255 is for WUSB devices, we actually need to use
- * 512 (WUSB1.0[4.8.1]).
- */
- for (operations = 0; operations < GET_MAXPACKET0_TRIES;
- ++operations) {
- buf->bMaxPacketSize0 = 0;
- r = usb_control_msg(udev, usb_rcvaddr0pipe(),
- USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
- USB_DT_DEVICE << 8, 0,
- buf, GET_DESCRIPTOR_BUFSIZE,
- initial_descriptor_timeout);
- switch (buf->bMaxPacketSize0) {
- case 8: case 16: case 32: case 64: case 255:
- if (buf->bDescriptorType ==
- USB_DT_DEVICE) {
- r = 0;
- break;
- }
- fallthrough;
- default:
- if (r == 0)
- r = -EPROTO;
- break;
- }
- /*
- * Some devices time out if they are powered on
- * when already connected. They need a second
- * reset. But only on the first attempt,
- * lest we get into a time out/reset loop
- */
- if (r == 0 || (r == -ETIMEDOUT &&
- retries == 0 &&
- udev->speed > USB_SPEED_FULL))
- break;
+ maxp0 = get_bMaxPacketSize0(udev, buf,
+ GET_DESCRIPTOR_BUFSIZE, retries == 0);
+ if (maxp0 > 0 && !initial &&
+ maxp0 != udev->descriptor.bMaxPacketSize0) {
+ dev_err(&udev->dev, "device reset changed ep0 maxpacket size!\n");
+ retval = -ENODEV;
+ goto fail;
}
- udev->descriptor.bMaxPacketSize0 =
- buf->bMaxPacketSize0;
- kfree(buf);
retval = hub_port_reset(hub, port1, udev, delay, false);
if (retval < 0) /* error or disconnect */
@@ -4928,71 +4936,68 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
retval = -ENODEV;
goto fail;
}
- if (r) {
- if (r != -ENODEV)
+ if (maxp0 < 0) {
+ if (maxp0 != -ENODEV)
dev_err(&udev->dev, "device descriptor read/64, error %d\n",
- r);
- retval = -EMSGSIZE;
+ maxp0);
+ retval = maxp0;
continue;
}
-#undef GET_DESCRIPTOR_BUFSIZE
+ }
+
+ for (operations = 0; operations < SET_ADDRESS_TRIES; ++operations) {
+ retval = hub_set_address(udev, devnum);
+ if (retval >= 0)
+ break;
+ msleep(200);
+ }
+ if (retval < 0) {
+ if (retval != -ENODEV)
+ dev_err(&udev->dev, "device not accepting address %d, error %d\n",
+ devnum, retval);
+ goto fail;
+ }
+ if (udev->speed >= USB_SPEED_SUPER) {
+ devnum = udev->devnum;
+ dev_info(&udev->dev,
+ "%s SuperSpeed%s%s USB device number %d using %s\n",
+ (udev->config) ? "reset" : "new",
+ (udev->speed == USB_SPEED_SUPER_PLUS) ?
+ " Plus" : "",
+ (udev->ssp_rate == USB_SSP_GEN_2x2) ?
+ " Gen 2x2" :
+ (udev->ssp_rate == USB_SSP_GEN_2x1) ?
+ " Gen 2x1" :
+ (udev->ssp_rate == USB_SSP_GEN_1x2) ?
+ " Gen 1x2" : "",
+ devnum, driver_name);
}
/*
- * If device is WUSB, we already assigned an
- * unauthorized address in the Connect Ack sequence;
- * authorization will assign the final address.
+ * cope with hardware quirkiness:
+ * - let SET_ADDRESS settle, some device hardware wants it
+ * - read ep0 maxpacket even for high and low speed,
*/
- if (udev->wusb == 0) {
- for (operations = 0; operations < SET_ADDRESS_TRIES; ++operations) {
- retval = hub_set_address(udev, devnum);
- if (retval >= 0)
- break;
- msleep(200);
- }
- if (retval < 0) {
- if (retval != -ENODEV)
- dev_err(&udev->dev, "device not accepting address %d, error %d\n",
- devnum, retval);
- goto fail;
- }
- if (udev->speed >= USB_SPEED_SUPER) {
- devnum = udev->devnum;
- dev_info(&udev->dev,
- "%s SuperSpeed%s%s USB device number %d using %s\n",
- (udev->config) ? "reset" : "new",
- (udev->speed == USB_SPEED_SUPER_PLUS) ?
- " Plus" : "",
- (udev->ssp_rate == USB_SSP_GEN_2x2) ?
- " Gen 2x2" :
- (udev->ssp_rate == USB_SSP_GEN_2x1) ?
- " Gen 2x1" :
- (udev->ssp_rate == USB_SSP_GEN_1x2) ?
- " Gen 1x2" : "",
- devnum, driver_name);
- }
+ msleep(10);
- /* cope with hardware quirkiness:
- * - let SET_ADDRESS settle, some device hardware wants it
- * - read ep0 maxpacket even for high and low speed,
- */
- msleep(10);
- if (do_new_scheme)
- break;
- }
+ if (do_new_scheme)
+ break;
- retval = usb_get_device_descriptor(udev, 8);
- if (retval < 8) {
+ maxp0 = get_bMaxPacketSize0(udev, buf, 8, retries == 0);
+ if (maxp0 < 0) {
+ retval = maxp0;
if (retval != -ENODEV)
dev_err(&udev->dev,
"device descriptor read/8, error %d\n",
retval);
- if (retval >= 0)
- retval = -EMSGSIZE;
} else {
u32 delay;
- retval = 0;
+ if (!initial && maxp0 != udev->descriptor.bMaxPacketSize0) {
+ dev_err(&udev->dev, "device reset changed ep0 maxpacket size!\n");
+ retval = -ENODEV;
+ goto fail;
+ }
delay = udev->parent->hub_delay;
udev->hub_delay = min_t(u32, delay,
@@ -5011,54 +5016,67 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
goto fail;
/*
- * Some superspeed devices have finished the link training process
- * and attached to a superspeed hub port, but the device descriptor
- * got from those devices show they aren't superspeed devices. Warm
- * reset the port attached by the devices can fix them.
+ * Check the ep0 maxpacket guess and correct it if necessary.
+ * maxp0 is the value stored in the device descriptor;
+ * i is the value it encodes (logarithmic for SuperSpeed or greater).
*/
- if ((udev->speed >= USB_SPEED_SUPER) &&
- (le16_to_cpu(udev->descriptor.bcdUSB) < 0x0300)) {
- dev_err(&udev->dev, "got a wrong device descriptor, "
- "warm reset device\n");
- hub_port_reset(hub, port1, udev,
- HUB_BH_RESET_TIME, true);
- retval = -EINVAL;
- goto fail;
- }
-
- if (udev->descriptor.bMaxPacketSize0 == 0xff ||
- udev->speed >= USB_SPEED_SUPER)
- i = 512;
- else
- i = udev->descriptor.bMaxPacketSize0;
- if (usb_endpoint_maxp(&udev->ep0.desc) != i) {
- if (udev->speed == USB_SPEED_LOW ||
- !(i == 8 || i == 16 || i == 32 || i == 64)) {
- dev_err(&udev->dev, "Invalid ep0 maxpacket: %d\n", i);
- retval = -EMSGSIZE;
- goto fail;
- }
+ i = maxp0;
+ if (udev->speed >= USB_SPEED_SUPER) {
+ if (maxp0 <= 16)
+ i = 1 << maxp0;
+ else
+ i = 0; /* Invalid */
+ }
+ if (usb_endpoint_maxp(&udev->ep0.desc) == i) {
+ ; /* Initial ep0 maxpacket guess is right */
+ } else if ((udev->speed == USB_SPEED_FULL ||
+ udev->speed == USB_SPEED_HIGH) &&
+ (i == 8 || i == 16 || i == 32 || i == 64)) {
+ /* Initial guess is wrong; use the descriptor's value */
if (udev->speed == USB_SPEED_FULL)
dev_dbg(&udev->dev, "ep0 maxpacket = %d\n", i);
else
dev_warn(&udev->dev, "Using ep0 maxpacket: %d\n", i);
udev->ep0.desc.wMaxPacketSize = cpu_to_le16(i);
usb_ep0_reinit(udev);
+ } else {
+ /* Initial guess is wrong and descriptor's value is invalid */
+ dev_err(&udev->dev, "Invalid ep0 maxpacket: %d\n", maxp0);
+ retval = -EMSGSIZE;
+ goto fail;
}
- retval = usb_get_device_descriptor(udev, USB_DT_DEVICE_SIZE);
- if (retval < (signed)sizeof(udev->descriptor)) {
+ descr = usb_get_device_descriptor(udev);
+ if (IS_ERR(descr)) {
+ retval = PTR_ERR(descr);
if (retval != -ENODEV)
dev_err(&udev->dev, "device descriptor read/all, error %d\n",
retval);
- if (retval >= 0)
- retval = -ENOMSG;
+ goto fail;
+ }
+ if (initial)
+ udev->descriptor = *descr;
+ else
+ *dev_descr = *descr;
+ kfree(descr);
+
+ /*
+ * Some superspeed devices have finished the link training process
+ * and attached to a superspeed hub port, but the device descriptor
+ * got from those devices show they aren't superspeed devices. Warm
+ * reset the port attached by the devices can fix them.
+ */
+ if ((udev->speed >= USB_SPEED_SUPER) &&
+ (le16_to_cpu(udev->descriptor.bcdUSB) < 0x0300)) {
+ dev_err(&udev->dev, "got a wrong device descriptor, warm reset device\n");
+ hub_port_reset(hub, port1, udev, HUB_BH_RESET_TIME, true);
+ retval = -EINVAL;
goto fail;
}
usb_detect_quirks(udev);
- if (udev->wusb == 0 && le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0201) {
+ if (le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0201) {
retval = usb_get_bos_descriptor(udev);
if (!retval) {
udev->lpm_capable = usb_device_supports_lpm(udev);
@@ -5078,6 +5096,7 @@ fail:
hub_port_disable(hub, port1, 0);
update_devnum(udev, devnum); /* for disconnect processing */
}
+ kfree(buf);
return retval;
}
@@ -5158,7 +5177,7 @@ hub_power_remaining(struct usb_hub *hub)
static int descriptors_changed(struct usb_device *udev,
- struct usb_device_descriptor *old_device_descriptor,
+ struct usb_device_descriptor *new_device_descriptor,
struct usb_host_bos *old_bos)
{
int changed = 0;
@@ -5169,8 +5188,8 @@ static int descriptors_changed(struct usb_device *udev,
int length;
char *buf;
- if (memcmp(&udev->descriptor, old_device_descriptor,
- sizeof(*old_device_descriptor)) != 0)
+ if (memcmp(&udev->descriptor, new_device_descriptor,
+ sizeof(*new_device_descriptor)) != 0)
return 1;
if ((old_bos && !udev->bos) || (!old_bos && udev->bos))
@@ -5333,7 +5352,6 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
usb_set_device_state(udev, USB_STATE_POWERED);
udev->bus_mA = hub->mA_per_port;
udev->level = hdev->level + 1;
- udev->wusb = hub_is_wusb(hub);
/* Devices connected to SuperSpeed hubs are USB 3.0 or later */
if (hub_is_superspeed(hub->hdev))
@@ -5348,7 +5366,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
}
/* reset (non-USB 3.0 devices) and get descriptor */
- status = hub_port_init(hub, udev, port1, i);
+ status = hub_port_init(hub, udev, port1, i, NULL);
if (status < 0)
goto loop;
@@ -5495,9 +5513,8 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
{
struct usb_port *port_dev = hub->ports[port1 - 1];
struct usb_device *udev = port_dev->child;
- struct usb_device_descriptor descriptor;
+ struct usb_device_descriptor *descr;
int status = -ENODEV;
- int retval;
dev_dbg(&port_dev->dev, "status %04x, change %04x, %s\n", portstatus,
portchange, portspeed(hub, portstatus));
@@ -5524,23 +5541,20 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
* changed device descriptors before resuscitating the
* device.
*/
- descriptor = udev->descriptor;
- retval = usb_get_device_descriptor(udev,
- sizeof(udev->descriptor));
- if (retval < 0) {
+ descr = usb_get_device_descriptor(udev);
+ if (IS_ERR(descr)) {
dev_dbg(&udev->dev,
- "can't read device descriptor %d\n",
- retval);
+ "can't read device descriptor %ld\n",
+ PTR_ERR(descr));
} else {
- if (descriptors_changed(udev, &descriptor,
+ if (descriptors_changed(udev, descr,
udev->bos)) {
dev_dbg(&udev->dev,
"device descriptor has changed\n");
- /* for disconnect() calls */
- udev->descriptor = descriptor;
} else {
status = 0; /* Nothing to do */
}
+ kfree(descr);
}
#ifdef CONFIG_PM
} else if (udev->state == USB_STATE_SUSPENDED &&
@@ -5982,7 +5996,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
struct usb_device *parent_hdev = udev->parent;
struct usb_hub *parent_hub;
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
- struct usb_device_descriptor descriptor = udev->descriptor;
+ struct usb_device_descriptor descriptor;
struct usb_host_bos *bos;
int i, j, ret = 0;
int port1 = udev->portnum;
@@ -6018,7 +6032,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
/* ep0 maxpacket size may change; let the HCD know about it.
* Other endpoints will be handled by re-enumeration. */
usb_ep0_reinit(udev);
- ret = hub_port_init(parent_hub, udev, port1, i);
+ ret = hub_port_init(parent_hub, udev, port1, i, &descriptor);
if (ret >= 0 || ret == -ENOTCONN || ret == -ENODEV)
break;
}
@@ -6030,7 +6044,6 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
/* Device might have changed firmware (DFU or similar) */
if (descriptors_changed(udev, &descriptor, bos)) {
dev_info(&udev->dev, "device firmware changed\n");
- udev->descriptor = descriptor; /* for disconnect() calls */
goto re_enumerate;
}
diff --git a/drivers/usb/core/ledtrig-usbport.c b/drivers/usb/core/ledtrig-usbport.c
index ba371a24ff78..85c999f71ad7 100644
--- a/drivers/usb/core/ledtrig-usbport.c
+++ b/drivers/usb/core/ledtrig-usbport.c
@@ -350,18 +350,7 @@ static struct led_trigger usbport_led_trigger = {
.deactivate = usbport_trig_deactivate,
};
-static int __init usbport_trig_init(void)
-{
- return led_trigger_register(&usbport_led_trigger);
-}
-
-static void __exit usbport_trig_exit(void)
-{
- led_trigger_unregister(&usbport_led_trigger);
-}
-
-module_init(usbport_trig_init);
-module_exit(usbport_trig_exit);
+module_led_trigger(usbport_led_trigger);
MODULE_AUTHOR("Rafał Miłecki <rafal@milecki.pl>");
MODULE_DESCRIPTION("USB port trigger");
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index b5811620f1de..077dfe48d01c 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -9,6 +9,7 @@
#include <linux/pci.h> /* for scatterlist macros */
#include <linux/usb.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/timer.h>
@@ -1040,40 +1041,35 @@ char *usb_cache_string(struct usb_device *udev, int index)
EXPORT_SYMBOL_GPL(usb_cache_string);
/*
- * usb_get_device_descriptor - (re)reads the device descriptor (usbcore)
- * @dev: the device whose device descriptor is being updated
- * @size: how much of the descriptor to read
+ * usb_get_device_descriptor - read the device descriptor
+ * @udev: the device whose device descriptor should be read
*
* Context: task context, might sleep.
*
- * Updates the copy of the device descriptor stored in the device structure,
- * which dedicates space for this purpose.
- *
* Not exported, only for use by the core. If drivers really want to read
* the device descriptor directly, they can call usb_get_descriptor() with
* type = USB_DT_DEVICE and index = 0.
*
- * This call is synchronous, and may not be used in an interrupt context.
- *
- * Return: The number of bytes received on success, or else the status code
- * returned by the underlying usb_control_msg() call.
+ * Returns: a pointer to a dynamically allocated usb_device_descriptor
+ * structure (which the caller must deallocate), or an ERR_PTR value.
*/
-int usb_get_device_descriptor(struct usb_device *dev, unsigned int size)
+struct usb_device_descriptor *usb_get_device_descriptor(struct usb_device *udev)
{
struct usb_device_descriptor *desc;
int ret;
- if (size > sizeof(*desc))
- return -EINVAL;
desc = kmalloc(sizeof(*desc), GFP_NOIO);
if (!desc)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
+
+ ret = usb_get_descriptor(udev, USB_DT_DEVICE, 0, desc, sizeof(*desc));
+ if (ret == sizeof(*desc))
+ return desc;
- ret = usb_get_descriptor(dev, USB_DT_DEVICE, 0, desc, size);
if (ret >= 0)
- memcpy(&dev->descriptor, desc, size);
+ ret = -EMSGSIZE;
kfree(desc);
- return ret;
+ return ERR_PTR(ret);
}
/*
diff --git a/drivers/usb/core/of.c b/drivers/usb/core/of.c
index 617e92569b2c..db4ccf9ce3d9 100644
--- a/drivers/usb/core/of.c
+++ b/drivers/usb/core/of.c
@@ -8,7 +8,6 @@
*/
#include <linux/of.h>
-#include <linux/of_platform.h>
#include <linux/usb/of.h>
/**
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 323dc02becbe..5d21718afb05 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -161,9 +161,6 @@ static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
case USB_SPEED_HIGH:
speed = "480";
break;
- case USB_SPEED_WIRELESS:
- speed = "480";
- break;
case USB_SPEED_SUPER:
speed = "5000";
break;
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 9f3c54032556..7576920e2d5a 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -480,8 +480,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
urb->iso_frame_desc[n].status = -EXDEV;
urb->iso_frame_desc[n].actual_length = 0;
}
- } else if (urb->num_sgs && !urb->dev->bus->no_sg_constraint &&
- dev->speed != USB_SPEED_WIRELESS) {
+ } else if (urb->num_sgs && !urb->dev->bus->no_sg_constraint) {
struct scatterlist *sg;
int i;
@@ -540,17 +539,9 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
case USB_ENDPOINT_XFER_ISOC:
case USB_ENDPOINT_XFER_INT:
/* too small? */
- switch (dev->speed) {
- case USB_SPEED_WIRELESS:
- if ((urb->interval < 6)
- && (xfertype == USB_ENDPOINT_XFER_INT))
- return -EINVAL;
- fallthrough;
- default:
- if (urb->interval <= 0)
- return -EINVAL;
- break;
- }
+ if (urb->interval <= 0)
+ return -EINVAL;
+
/* too big? */
switch (dev->speed) {
case USB_SPEED_SUPER_PLUS:
@@ -560,10 +551,6 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
return -EINVAL;
max = 1 << 15;
break;
- case USB_SPEED_WIRELESS:
- if (urb->interval > 16)
- return -EINVAL;
- break;
case USB_SPEED_HIGH: /* units are microframes */
/* NOTE usb handles 2^15 */
if (urb->interval > (1024 * 8))
@@ -587,10 +574,8 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
default:
return -EINVAL;
}
- if (dev->speed != USB_SPEED_WIRELESS) {
- /* Round down to a power of 2, no more than max */
- urb->interval = min(max, 1 << ilog2(urb->interval));
- }
+ /* Round down to a power of 2, no more than max */
+ urb->interval = min(max, 1 << ilog2(urb->interval));
}
return usb_hcd_submit_urb(urb, mem_flags);
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 901ec732321c..2a938cf47ccd 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/of.h>
#include <linux/string.h>
#include <linux/bitops.h>
#include <linux/slab.h>
@@ -601,14 +602,6 @@ struct device_type usb_device_type = {
#endif
};
-
-/* Returns 1 if @usb_bus is WUSB, 0 otherwise */
-static unsigned usb_bus_is_wusb(struct usb_bus *bus)
-{
- struct usb_hcd *hcd = bus_to_hcd(bus);
- return hcd->wireless;
-}
-
static bool usb_dev_authorized(struct usb_device *dev, struct usb_hcd *hcd)
{
struct usb_hub *hub;
@@ -652,7 +645,6 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
{
struct usb_device *dev;
struct usb_hcd *usb_hcd = bus_to_hcd(bus);
- unsigned root_hub = 0;
unsigned raw_port = port1;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
@@ -702,7 +694,6 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
dev->dev.parent = bus->controller;
device_set_of_node_from_dev(&dev->dev, bus->sysdev);
dev_set_name(&dev->dev, "usb%d", bus->busnum);
- root_hub = 1;
} else {
/* match any labeling on the hubs; it's one-based */
if (parent->devpath[0] == '0') {
@@ -748,9 +739,6 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
#endif
dev->authorized = usb_dev_authorized(dev, usb_hcd);
- if (!root_hub)
- dev->wusb = usb_bus_is_wusb(bus) ? 1 : 0;
-
return dev;
}
EXPORT_SYMBOL_GPL(usb_alloc_dev);
@@ -1101,6 +1089,9 @@ static int __init usb_init(void)
retval = usb_major_init();
if (retval)
goto major_init_failed;
+ retval = class_register(&usbmisc_class);
+ if (retval)
+ goto class_register_failed;
retval = usb_register(&usbfs_driver);
if (retval)
goto driver_register_failed;
@@ -1120,6 +1111,8 @@ hub_init_failed:
usb_devio_init_failed:
usb_deregister(&usbfs_driver);
driver_register_failed:
+ class_unregister(&usbmisc_class);
+class_register_failed:
usb_major_cleanup();
major_init_failed:
bus_unregister_notifier(&usb_bus_type, &usb_bus_nb);
@@ -1147,6 +1140,7 @@ static void __exit usb_exit(void)
usb_deregister(&usbfs_driver);
usb_devio_cleanup();
usb_hub_cleanup();
+ class_unregister(&usbmisc_class);
bus_unregister_notifier(&usb_bus_type, &usb_bus_nb);
bus_unregister(&usb_bus_type);
usb_acpi_unregister();
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index ffe3f6818e9c..60363153fc3f 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -43,8 +43,8 @@ extern bool usb_endpoint_is_ignored(struct usb_device *udev,
struct usb_endpoint_descriptor *epd);
extern int usb_remove_device(struct usb_device *udev);
-extern int usb_get_device_descriptor(struct usb_device *dev,
- unsigned int size);
+extern struct usb_device_descriptor *usb_get_device_descriptor(
+ struct usb_device *udev);
extern int usb_set_isoch_delay(struct usb_device *dev);
extern int usb_get_bos_descriptor(struct usb_device *dev);
extern void usb_release_bos_descriptor(struct usb_device *dev);
@@ -141,6 +141,7 @@ static inline int usb_disable_usb2_hardware_lpm(struct usb_device *udev)
#endif
+extern const struct class usbmisc_class;
extern const struct bus_type usb_bus_type;
extern struct mutex usb_port_peer_mutex;
extern struct device_type usb_device_type;
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 0bb4c0c845bf..c92a1da46a01 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -1330,6 +1330,7 @@ irqreturn_t dwc2_handle_common_intr(int irq, void *dev);
/* The device ID match table */
extern const struct of_device_id dwc2_of_match_table[];
extern const struct acpi_device_id dwc2_acpi_match[];
+extern const struct pci_device_id dwc2_pci_ids[];
int dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg);
int dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg);
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 8b15742d9e8a..b517a7216de2 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -22,7 +22,6 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/slab.h>
-#include <linux/of_platform.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
index c9740caa5974..0144ca8350c3 100644
--- a/drivers/usb/dwc2/hcd_intr.c
+++ b/drivers/usb/dwc2/hcd_intr.c
@@ -2203,11 +2203,13 @@ static void dwc2_hc_intr(struct dwc2_hsotg *hsotg)
irqreturn_t dwc2_handle_hcd_intr(struct dwc2_hsotg *hsotg)
{
u32 gintsts, dbg_gintsts;
- irqreturn_t retval = IRQ_NONE;
+ irqreturn_t retval = IRQ_HANDLED;
if (!dwc2_is_controller_alive(hsotg)) {
dev_warn(hsotg->dev, "Controller is dead\n");
return retval;
+ } else {
+ retval = IRQ_NONE;
}
spin_lock(&hsotg->lock);
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index 4c7c3dd15f9b..93f52e371cdd 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -7,9 +7,14 @@
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/usb/of.h>
+#include <linux/pci_ids.h>
+#include <linux/pci.h>
#include "core.h"
+#define PCI_PRODUCT_ID_HAPS_HSOTG 0xabc0
+#define PCI_DEVICE_ID_LOONGSON_DWC2 0x7a04
+
static void dwc2_set_bcm_params(struct dwc2_hsotg *hsotg)
{
struct dwc2_core_params *p = &hsotg->params;
@@ -55,6 +60,14 @@ static void dwc2_set_jz4775_params(struct dwc2_hsotg *hsotg)
!device_property_read_bool(hsotg->dev, "disable-over-current");
}
+static void dwc2_set_loongson_params(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_core_params *p = &hsotg->params;
+
+ p->phy_utmi_width = 8;
+ p->power_down = DWC2_POWER_DOWN_PARAM_PARTIAL;
+}
+
static void dwc2_set_x1600_params(struct dwc2_hsotg *hsotg)
{
struct dwc2_core_params *p = &hsotg->params;
@@ -302,6 +315,23 @@ const struct acpi_device_id dwc2_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, dwc2_acpi_match);
+const struct pci_device_id dwc2_pci_ids[] = {
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, PCI_PRODUCT_ID_HAPS_HSOTG),
+ },
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_STMICRO,
+ PCI_DEVICE_ID_STMICRO_USB_OTG),
+ },
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_LOONGSON, PCI_DEVICE_ID_LOONGSON_DWC2),
+ .driver_data = (unsigned long)dwc2_set_loongson_params,
+ },
+ { /* end: all zeroes */ }
+};
+MODULE_DEVICE_TABLE(pci, dwc2_pci_ids);
+EXPORT_SYMBOL_GPL(dwc2_pci_ids);
+
static void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg)
{
switch (hsotg->hw_params.op_mode) {
@@ -948,13 +978,20 @@ int dwc2_init_params(struct dwc2_hsotg *hsotg)
if (match && match->data) {
set_params = match->data;
set_params(hsotg);
- } else {
+ } else if (!match) {
const struct acpi_device_id *amatch;
+ const struct pci_device_id *pmatch = NULL;
amatch = acpi_match_device(dwc2_acpi_match, hsotg->dev);
if (amatch && amatch->driver_data) {
set_params = (set_params_cb)amatch->driver_data;
set_params(hsotg);
+ } else if (!amatch)
+ pmatch = pci_match_id(dwc2_pci_ids, to_pci_dev(hsotg->dev->parent));
+
+ if (pmatch && pmatch->driver_data) {
+ set_params = (set_params_cb)pmatch->driver_data;
+ set_params(hsotg);
}
}
diff --git a/drivers/usb/dwc2/pci.c b/drivers/usb/dwc2/pci.c
index b7306ed8be4c..f3a1e4232a31 100644
--- a/drivers/usb/dwc2/pci.c
+++ b/drivers/usb/dwc2/pci.c
@@ -24,7 +24,7 @@
#include <linux/platform_device.h>
#include <linux/usb/usb_phy_generic.h>
-#define PCI_PRODUCT_ID_HAPS_HSOTG 0xabc0
+#include "core.h"
static const char dwc2_driver_name[] = "dwc2-pci";
@@ -122,18 +122,6 @@ err:
return ret;
}
-static const struct pci_device_id dwc2_pci_ids[] = {
- {
- PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, PCI_PRODUCT_ID_HAPS_HSOTG),
- },
- {
- PCI_DEVICE(PCI_VENDOR_ID_STMICRO,
- PCI_DEVICE_ID_STMICRO_USB_OTG),
- },
- { /* end: all zeroes */ }
-};
-MODULE_DEVICE_TABLE(pci, dwc2_pci_ids);
-
static struct pci_driver dwc2_pci_driver = {
.name = dwc2_driver_name,
.id_table = dwc2_pci_ids,
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 0a806f80217e..b1d48019e944 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -11,7 +11,7 @@
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index be954a9abbe0..98efcbb76c88 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -168,4 +168,14 @@ config USB_DWC3_AM62
The Designware Core USB3 IP is programmed to operate in
in USB 2.0 mode only.
Say 'Y' or 'M' here if you have one such device
+
+config USB_DWC3_OCTEON
+ tristate "Cavium Octeon Platforms"
+ depends on CAVIUM_OCTEON_SOC || COMPILE_TEST
+ default USB_DWC3
+ help
+ Support Cavium Octeon platforms with DesignWare Core USB3 IP.
+ Only the host mode is currently supported.
+ Say 'Y' or 'M' here if you have one such device.
+
endif
diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile
index 9f66bd82b639..fe1493d4bbe5 100644
--- a/drivers/usb/dwc3/Makefile
+++ b/drivers/usb/dwc3/Makefile
@@ -54,3 +54,4 @@ obj-$(CONFIG_USB_DWC3_ST) += dwc3-st.o
obj-$(CONFIG_USB_DWC3_QCOM) += dwc3-qcom.o
obj-$(CONFIG_USB_DWC3_IMX8MP) += dwc3-imx8mp.o
obj-$(CONFIG_USB_DWC3_XILINX) += dwc3-xilinx.o
+obj-$(CONFIG_USB_DWC3_OCTEON) += dwc3-octeon.o
diff --git a/drivers/usb/dwc3/dwc3-am62.c b/drivers/usb/dwc3/dwc3-am62.c
index 1755f2f848c5..90a587bc29b7 100644
--- a/drivers/usb/dwc3/dwc3-am62.c
+++ b/drivers/usb/dwc3/dwc3-am62.c
@@ -102,7 +102,7 @@
#define DWC3_AM62_AUTOSUSPEND_DELAY 100
-struct dwc3_data {
+struct dwc3_am62 {
struct device *dev;
void __iomem *usbss;
struct clk *usb2_refclk;
@@ -129,19 +129,19 @@ static const int dwc3_ti_rate_table[] = { /* in KHZ */
52000,
};
-static inline u32 dwc3_ti_readl(struct dwc3_data *data, u32 offset)
+static inline u32 dwc3_ti_readl(struct dwc3_am62 *am62, u32 offset)
{
- return readl((data->usbss) + offset);
+ return readl((am62->usbss) + offset);
}
-static inline void dwc3_ti_writel(struct dwc3_data *data, u32 offset, u32 value)
+static inline void dwc3_ti_writel(struct dwc3_am62 *am62, u32 offset, u32 value)
{
- writel(value, (data->usbss) + offset);
+ writel(value, (am62->usbss) + offset);
}
-static int phy_syscon_pll_refclk(struct dwc3_data *data)
+static int phy_syscon_pll_refclk(struct dwc3_am62 *am62)
{
- struct device *dev = data->dev;
+ struct device *dev = am62->dev;
struct device_node *node = dev->of_node;
struct of_phandle_args args;
struct regmap *syscon;
@@ -153,16 +153,16 @@ static int phy_syscon_pll_refclk(struct dwc3_data *data)
return PTR_ERR(syscon);
}
- data->syscon = syscon;
+ am62->syscon = syscon;
ret = of_parse_phandle_with_fixed_args(node, "ti,syscon-phy-pll-refclk", 1,
0, &args);
if (ret)
return ret;
- data->offset = args.args[0];
+ am62->offset = args.args[0];
- ret = regmap_update_bits(data->syscon, data->offset, PHY_PLL_REFCLK_MASK, data->rate_code);
+ ret = regmap_update_bits(am62->syscon, am62->offset, PHY_PLL_REFCLK_MASK, am62->rate_code);
if (ret) {
dev_err(dev, "failed to set phy pll reference clock rate\n");
return ret;
@@ -175,32 +175,32 @@ static int dwc3_ti_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = pdev->dev.of_node;
- struct dwc3_data *data;
+ struct dwc3_am62 *am62;
int i, ret;
unsigned long rate;
u32 reg;
- data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
- if (!data)
+ am62 = devm_kzalloc(dev, sizeof(*am62), GFP_KERNEL);
+ if (!am62)
return -ENOMEM;
- data->dev = dev;
- platform_set_drvdata(pdev, data);
+ am62->dev = dev;
+ platform_set_drvdata(pdev, am62);
- data->usbss = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(data->usbss)) {
+ am62->usbss = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(am62->usbss)) {
dev_err(dev, "can't map IOMEM resource\n");
- return PTR_ERR(data->usbss);
+ return PTR_ERR(am62->usbss);
}
- data->usb2_refclk = devm_clk_get(dev, "ref");
- if (IS_ERR(data->usb2_refclk)) {
+ am62->usb2_refclk = devm_clk_get(dev, "ref");
+ if (IS_ERR(am62->usb2_refclk)) {
dev_err(dev, "can't get usb2_refclk\n");
- return PTR_ERR(data->usb2_refclk);
+ return PTR_ERR(am62->usb2_refclk);
}
/* Calculate the rate code */
- rate = clk_get_rate(data->usb2_refclk);
+ rate = clk_get_rate(am62->usb2_refclk);
rate /= 1000; // To KHz
for (i = 0; i < ARRAY_SIZE(dwc3_ti_rate_table); i++) {
if (dwc3_ti_rate_table[i] == rate)
@@ -212,20 +212,20 @@ static int dwc3_ti_probe(struct platform_device *pdev)
return -EINVAL;
}
- data->rate_code = i;
+ am62->rate_code = i;
/* Read the syscon property and set the rate code */
- ret = phy_syscon_pll_refclk(data);
+ ret = phy_syscon_pll_refclk(am62);
if (ret)
return ret;
/* VBUS divider select */
- data->vbus_divider = device_property_read_bool(dev, "ti,vbus-divider");
- reg = dwc3_ti_readl(data, USBSS_PHY_CONFIG);
- if (data->vbus_divider)
+ am62->vbus_divider = device_property_read_bool(dev, "ti,vbus-divider");
+ reg = dwc3_ti_readl(am62, USBSS_PHY_CONFIG);
+ if (am62->vbus_divider)
reg |= 1 << USBSS_PHY_VBUS_SEL_SHIFT;
- dwc3_ti_writel(data, USBSS_PHY_CONFIG, reg);
+ dwc3_ti_writel(am62, USBSS_PHY_CONFIG, reg);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
@@ -233,7 +233,7 @@ static int dwc3_ti_probe(struct platform_device *pdev)
* Don't ignore its dependencies with its children
*/
pm_suspend_ignore_children(dev, false);
- clk_prepare_enable(data->usb2_refclk);
+ clk_prepare_enable(am62->usb2_refclk);
pm_runtime_get_noresume(dev);
ret = of_platform_populate(node, NULL, NULL, dev);
@@ -243,9 +243,9 @@ static int dwc3_ti_probe(struct platform_device *pdev)
}
/* Set mode valid bit to indicate role is valid */
- reg = dwc3_ti_readl(data, USBSS_MODE_CONTROL);
+ reg = dwc3_ti_readl(am62, USBSS_MODE_CONTROL);
reg |= USBSS_MODE_VALID;
- dwc3_ti_writel(data, USBSS_MODE_CONTROL, reg);
+ dwc3_ti_writel(am62, USBSS_MODE_CONTROL, reg);
/* Device has capability to wakeup system from sleep */
device_set_wakeup_capable(dev, true);
@@ -261,7 +261,7 @@ static int dwc3_ti_probe(struct platform_device *pdev)
return 0;
err_pm_disable:
- clk_disable_unprepare(data->usb2_refclk);
+ clk_disable_unprepare(am62->usb2_refclk);
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
return ret;
@@ -278,36 +278,34 @@ static int dwc3_ti_remove_core(struct device *dev, void *c)
static void dwc3_ti_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct dwc3_data *data = platform_get_drvdata(pdev);
+ struct dwc3_am62 *am62 = platform_get_drvdata(pdev);
u32 reg;
device_for_each_child(dev, NULL, dwc3_ti_remove_core);
/* Clear mode valid bit */
- reg = dwc3_ti_readl(data, USBSS_MODE_CONTROL);
+ reg = dwc3_ti_readl(am62, USBSS_MODE_CONTROL);
reg &= ~USBSS_MODE_VALID;
- dwc3_ti_writel(data, USBSS_MODE_CONTROL, reg);
+ dwc3_ti_writel(am62, USBSS_MODE_CONTROL, reg);
pm_runtime_put_sync(dev);
- clk_disable_unprepare(data->usb2_refclk);
+ clk_disable_unprepare(am62->usb2_refclk);
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
-
- platform_set_drvdata(pdev, NULL);
}
#ifdef CONFIG_PM
static int dwc3_ti_suspend_common(struct device *dev)
{
- struct dwc3_data *data = dev_get_drvdata(dev);
+ struct dwc3_am62 *am62 = dev_get_drvdata(dev);
u32 reg, current_prtcap_dir;
if (device_may_wakeup(dev)) {
- reg = dwc3_ti_readl(data, USBSS_CORE_STAT);
+ reg = dwc3_ti_readl(am62, USBSS_CORE_STAT);
current_prtcap_dir = (reg & USBSS_CORE_OPERATIONAL_MODE_MASK)
>> USBSS_CORE_OPERATIONAL_MODE_SHIFT;
/* Set wakeup config enable bits */
- reg = dwc3_ti_readl(data, USBSS_WAKEUP_CONFIG);
+ reg = dwc3_ti_readl(am62, USBSS_WAKEUP_CONFIG);
if (current_prtcap_dir == DWC3_GCTL_PRTCAP_HOST) {
reg = USBSS_WAKEUP_CFG_LINESTATE_EN | USBSS_WAKEUP_CFG_OVERCURRENT_EN;
} else {
@@ -317,30 +315,30 @@ static int dwc3_ti_suspend_common(struct device *dev)
* and in U2/L3 state else it causes spurious wake-up.
*/
}
- dwc3_ti_writel(data, USBSS_WAKEUP_CONFIG, reg);
+ dwc3_ti_writel(am62, USBSS_WAKEUP_CONFIG, reg);
/* clear wakeup status so we know what caused the wake up */
- dwc3_ti_writel(data, USBSS_WAKEUP_STAT, USBSS_WAKEUP_STAT_CLR);
+ dwc3_ti_writel(am62, USBSS_WAKEUP_STAT, USBSS_WAKEUP_STAT_CLR);
}
- clk_disable_unprepare(data->usb2_refclk);
+ clk_disable_unprepare(am62->usb2_refclk);
return 0;
}
static int dwc3_ti_resume_common(struct device *dev)
{
- struct dwc3_data *data = dev_get_drvdata(dev);
+ struct dwc3_am62 *am62 = dev_get_drvdata(dev);
u32 reg;
- clk_prepare_enable(data->usb2_refclk);
+ clk_prepare_enable(am62->usb2_refclk);
if (device_may_wakeup(dev)) {
/* Clear wakeup config enable bits */
- dwc3_ti_writel(data, USBSS_WAKEUP_CONFIG, USBSS_WAKEUP_CFG_NONE);
+ dwc3_ti_writel(am62, USBSS_WAKEUP_CONFIG, USBSS_WAKEUP_CFG_NONE);
}
- reg = dwc3_ti_readl(data, USBSS_WAKEUP_STAT);
- data->wakeup_stat = reg;
+ reg = dwc3_ti_readl(am62, USBSS_WAKEUP_STAT);
+ am62->wakeup_stat = reg;
return 0;
}
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index f882dd647340..5d365ca51771 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -163,6 +163,12 @@ static const struct dwc3_exynos_driverdata exynos7_drvdata = {
.suspend_clk_idx = 1,
};
+static const struct dwc3_exynos_driverdata exynos850_drvdata = {
+ .clk_names = { "bus_early", "ref" },
+ .num_clks = 2,
+ .suspend_clk_idx = -1,
+};
+
static const struct of_device_id exynos_dwc3_match[] = {
{
.compatible = "samsung,exynos5250-dwusb3",
@@ -174,6 +180,9 @@ static const struct of_device_id exynos_dwc3_match[] = {
.compatible = "samsung,exynos7-dwusb3",
.data = &exynos7_drvdata,
}, {
+ .compatible = "samsung,exynos850-dwusb3",
+ .data = &exynos850_drvdata,
+ }, {
}
};
MODULE_DEVICE_TABLE(of, exynos_dwc3_match);
diff --git a/drivers/usb/dwc3/dwc3-imx8mp.c b/drivers/usb/dwc3/dwc3-imx8mp.c
index 8b9a3bb587bf..a1e15f2fffdb 100644
--- a/drivers/usb/dwc3/dwc3-imx8mp.c
+++ b/drivers/usb/dwc3/dwc3-imx8mp.c
@@ -10,6 +10,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -279,7 +280,6 @@ static void dwc3_imx8mp_remove(struct platform_device *pdev)
pm_runtime_disable(dev);
pm_runtime_put_noidle(dev);
- platform_set_drvdata(pdev, NULL);
}
static int __maybe_unused dwc3_imx8mp_suspend(struct dwc3_imx8mp *dwc3_imx,
diff --git a/drivers/usb/dwc3/dwc3-keystone.c b/drivers/usb/dwc3/dwc3-keystone.c
index 0a09aedc2573..8899348b6276 100644
--- a/drivers/usb/dwc3/dwc3-keystone.c
+++ b/drivers/usb/dwc3/dwc3-keystone.c
@@ -13,6 +13,7 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/pm_runtime.h>
@@ -196,8 +197,6 @@ static void kdwc3_remove(struct platform_device *pdev)
phy_power_off(kdwc->usb3_phy);
phy_exit(kdwc->usb3_phy);
phy_pm_runtime_put_sync(kdwc->usb3_phy);
-
- platform_set_drvdata(pdev, NULL);
}
static const struct of_device_id kdwc3_of_match[] = {
diff --git a/drivers/usb/dwc3/dwc3-meson-g12a.c b/drivers/usb/dwc3/dwc3-meson-g12a.c
index e99c7489dba0..2c07c038b584 100644
--- a/drivers/usb/dwc3/dwc3-meson-g12a.c
+++ b/drivers/usb/dwc3/dwc3-meson-g12a.c
@@ -926,6 +926,12 @@ static int __maybe_unused dwc3_meson_g12a_resume(struct device *dev)
return ret;
}
+ if (priv->drvdata->usb_post_init) {
+ ret = priv->drvdata->usb_post_init(priv);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
diff --git a/arch/mips/cavium-octeon/octeon-usb.c b/drivers/usb/dwc3/dwc3-octeon.c
index 2add435ad038..6010135e1acc 100644
--- a/arch/mips/cavium-octeon/octeon-usb.c
+++ b/drivers/usb/dwc3/dwc3-octeon.c
@@ -1,11 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * XHCI HCD glue for Cavium Octeon III SOCs.
+ * DWC3 glue for Cavium Octeon III SOCs.
*
* Copyright (C) 2010-2017 Cavium Networks
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
+ * Copyright (C) 2023 RACOM s.r.o.
*/
#include <linux/bitfield.h>
@@ -15,7 +13,9 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/platform_device.h>
/*
* USB Control Register
@@ -24,9 +24,9 @@
/* BIST fast-clear mode select. A BIST run with this bit set
* clears all entries in USBH RAMs to 0x0.
*/
-# define USBDRD_UCTL_CTL_CLEAR_BIST BIT(63)
+# define USBDRD_UCTL_CTL_CLEAR_BIST BIT_ULL(63)
/* 1 = Start BIST and cleared by hardware */
-# define USBDRD_UCTL_CTL_START_BIST BIT(62)
+# define USBDRD_UCTL_CTL_START_BIST BIT_ULL(62)
/* Reference clock select for SuperSpeed and HighSpeed PLLs:
* 0x0 = Both PLLs use DLMC_REF_CLK0 for reference clock
* 0x1 = Both PLLs use DLMC_REF_CLK1 for reference clock
@@ -35,32 +35,32 @@
* 0x3 = SuperSpeed PLL uses DLMC_REF_CLK1 for reference clock &
* HighSpeed PLL uses PLL_REF_CLK for reference clck
*/
-# define USBDRD_UCTL_CTL_REF_CLK_SEL GENMASK(61, 60)
+# define USBDRD_UCTL_CTL_REF_CLK_SEL GENMASK_ULL(61, 60)
/* 1 = Spread-spectrum clock enable, 0 = SS clock disable */
-# define USBDRD_UCTL_CTL_SSC_EN BIT(59)
+# define USBDRD_UCTL_CTL_SSC_EN BIT_ULL(59)
/* Spread-spectrum clock modulation range:
* 0x0 = -4980 ppm downspread
* 0x1 = -4492 ppm downspread
* 0x2 = -4003 ppm downspread
* 0x3 - 0x7 = Reserved
*/
-# define USBDRD_UCTL_CTL_SSC_RANGE GENMASK(58, 56)
+# define USBDRD_UCTL_CTL_SSC_RANGE GENMASK_ULL(58, 56)
/* Enable non-standard oscillator frequencies:
* [55:53] = modules -1
* [52:47] = 2's complement push amount, 0 = Feature disabled
*/
-# define USBDRD_UCTL_CTL_SSC_REF_CLK_SEL GENMASK(55, 47)
+# define USBDRD_UCTL_CTL_SSC_REF_CLK_SEL GENMASK_ULL(55, 47)
/* Reference clock multiplier for non-standard frequencies:
* 0x19 = 100MHz on DLMC_REF_CLK* if REF_CLK_SEL = 0x0 or 0x1
* 0x28 = 125MHz on DLMC_REF_CLK* if REF_CLK_SEL = 0x0 or 0x1
* 0x32 = 50MHz on DLMC_REF_CLK* if REF_CLK_SEL = 0x0 or 0x1
* Other Values = Reserved
*/
-# define USBDRD_UCTL_CTL_MPLL_MULTIPLIER GENMASK(46, 40)
+# define USBDRD_UCTL_CTL_MPLL_MULTIPLIER GENMASK_ULL(46, 40)
/* Enable reference clock to prescaler for SuperSpeed functionality.
* Should always be set to "1"
*/
-# define USBDRD_UCTL_CTL_REF_SSP_EN BIT(39)
+# define USBDRD_UCTL_CTL_REF_SSP_EN BIT_ULL(39)
/* Divide the reference clock by 2 before entering the
* REF_CLK_FSEL divider:
* If REF_CLK_SEL = 0x0 or 0x1, then only 0x0 is legal
@@ -68,21 +68,21 @@
* 0x1 = DLMC_REF_CLK* is 125MHz
* 0x0 = DLMC_REF_CLK* is another supported frequency
*/
-# define USBDRD_UCTL_CTL_REF_CLK_DIV2 BIT(38)
+# define USBDRD_UCTL_CTL_REF_CLK_DIV2 BIT_ULL(38)
/* Select reference clock freqnuency for both PLL blocks:
* 0x27 = REF_CLK_SEL is 0x0 or 0x1
* 0x07 = REF_CLK_SEL is 0x2 or 0x3
*/
-# define USBDRD_UCTL_CTL_REF_CLK_FSEL GENMASK(37, 32)
+# define USBDRD_UCTL_CTL_REF_CLK_FSEL GENMASK_ULL(37, 32)
/* Controller clock enable. */
-# define USBDRD_UCTL_CTL_H_CLK_EN BIT(30)
+# define USBDRD_UCTL_CTL_H_CLK_EN BIT_ULL(30)
/* Select bypass input to controller clock divider:
* 0x0 = Use divided coprocessor clock from H_CLKDIV
* 0x1 = Use clock from GPIO pins
*/
-# define USBDRD_UCTL_CTL_H_CLK_BYP_SEL BIT(29)
+# define USBDRD_UCTL_CTL_H_CLK_BYP_SEL BIT_ULL(29)
/* Reset controller clock divider. */
-# define USBDRD_UCTL_CTL_H_CLKDIV_RST BIT(28)
+# define USBDRD_UCTL_CTL_H_CLKDIV_RST BIT_ULL(28)
/* Clock divider select:
* 0x0 = divide by 1
* 0x1 = divide by 2
@@ -93,29 +93,29 @@
* 0x6 = divide by 24
* 0x7 = divide by 32
*/
-# define USBDRD_UCTL_CTL_H_CLKDIV_SEL GENMASK(26, 24)
+# define USBDRD_UCTL_CTL_H_CLKDIV_SEL GENMASK_ULL(26, 24)
/* USB3 port permanently attached: 0x0 = No, 0x1 = Yes */
-# define USBDRD_UCTL_CTL_USB3_PORT_PERM_ATTACH BIT(21)
+# define USBDRD_UCTL_CTL_USB3_PORT_PERM_ATTACH BIT_ULL(21)
/* USB2 port permanently attached: 0x0 = No, 0x1 = Yes */
-# define USBDRD_UCTL_CTL_USB2_PORT_PERM_ATTACH BIT(20)
+# define USBDRD_UCTL_CTL_USB2_PORT_PERM_ATTACH BIT_ULL(20)
/* Disable SuperSpeed PHY: 0x0 = No, 0x1 = Yes */
-# define USBDRD_UCTL_CTL_USB3_PORT_DISABLE BIT(18)
+# define USBDRD_UCTL_CTL_USB3_PORT_DISABLE BIT_ULL(18)
/* Disable HighSpeed PHY: 0x0 = No, 0x1 = Yes */
-# define USBDRD_UCTL_CTL_USB2_PORT_DISABLE BIT(16)
+# define USBDRD_UCTL_CTL_USB2_PORT_DISABLE BIT_ULL(16)
/* Enable PHY SuperSpeed block power: 0x0 = No, 0x1 = Yes */
-# define USBDRD_UCTL_CTL_SS_POWER_EN BIT(14)
+# define USBDRD_UCTL_CTL_SS_POWER_EN BIT_ULL(14)
/* Enable PHY HighSpeed block power: 0x0 = No, 0x1 = Yes */
-# define USBDRD_UCTL_CTL_HS_POWER_EN BIT(12)
+# define USBDRD_UCTL_CTL_HS_POWER_EN BIT_ULL(12)
/* Enable USB UCTL interface clock: 0xx = No, 0x1 = Yes */
-# define USBDRD_UCTL_CTL_CSCLK_EN BIT(4)
+# define USBDRD_UCTL_CTL_CSCLK_EN BIT_ULL(4)
/* Controller mode: 0x0 = Host, 0x1 = Device */
-# define USBDRD_UCTL_CTL_DRD_MODE BIT(3)
+# define USBDRD_UCTL_CTL_DRD_MODE BIT_ULL(3)
/* PHY reset */
-# define USBDRD_UCTL_CTL_UPHY_RST BIT(2)
+# define USBDRD_UCTL_CTL_UPHY_RST BIT_ULL(2)
/* Software reset UAHC */
-# define USBDRD_UCTL_CTL_UAHC_RST BIT(1)
+# define USBDRD_UCTL_CTL_UAHC_RST BIT_ULL(1)
/* Software resets UCTL */
-# define USBDRD_UCTL_CTL_UCTL_RST BIT(0)
+# define USBDRD_UCTL_CTL_UCTL_RST BIT_ULL(0)
#define USBDRD_UCTL_BIST_STATUS 0x08
#define USBDRD_UCTL_SPARE0 0x10
@@ -130,64 +130,69 @@
*/
#define USBDRD_UCTL_HOST_CFG 0xe0
/* Indicates minimum value of all received BELT values */
-# define USBDRD_UCTL_HOST_CFG_HOST_CURRENT_BELT GENMASK(59, 48)
+# define USBDRD_UCTL_HOST_CFG_HOST_CURRENT_BELT GENMASK_ULL(59, 48)
/* HS jitter adjustment */
-# define USBDRD_UCTL_HOST_CFG_FLA GENMASK(37, 32)
+# define USBDRD_UCTL_HOST_CFG_FLA GENMASK_ULL(37, 32)
/* Bus-master enable: 0x0 = Disabled (stall DMAs), 0x1 = enabled */
-# define USBDRD_UCTL_HOST_CFG_BME BIT(28)
+# define USBDRD_UCTL_HOST_CFG_BME BIT_ULL(28)
/* Overcurrent protection enable: 0x0 = unavailable, 0x1 = available */
-# define USBDRD_UCTL_HOST_OCI_EN BIT(27)
+# define USBDRD_UCTL_HOST_OCI_EN BIT_ULL(27)
/* Overcurrent sene selection:
* 0x0 = Overcurrent indication from off-chip is active-low
* 0x1 = Overcurrent indication from off-chip is active-high
*/
-# define USBDRD_UCTL_HOST_OCI_ACTIVE_HIGH_EN BIT(26)
+# define USBDRD_UCTL_HOST_OCI_ACTIVE_HIGH_EN BIT_ULL(26)
/* Port power control enable: 0x0 = unavailable, 0x1 = available */
-# define USBDRD_UCTL_HOST_PPC_EN BIT(25)
+# define USBDRD_UCTL_HOST_PPC_EN BIT_ULL(25)
/* Port power control sense selection:
* 0x0 = Port power to off-chip is active-low
* 0x1 = Port power to off-chip is active-high
*/
-# define USBDRD_UCTL_HOST_PPC_ACTIVE_HIGH_EN BIT(24)
+# define USBDRD_UCTL_HOST_PPC_ACTIVE_HIGH_EN BIT_ULL(24)
/*
* UCTL Shim Features Register
*/
#define USBDRD_UCTL_SHIM_CFG 0xe8
/* Out-of-bound UAHC register access: 0 = read, 1 = write */
-# define USBDRD_UCTL_SHIM_CFG_XS_NCB_OOB_WRN BIT(63)
+# define USBDRD_UCTL_SHIM_CFG_XS_NCB_OOB_WRN BIT_ULL(63)
/* SRCID error log for out-of-bound UAHC register access:
* [59:58] = chipID
* [57] = Request source: 0 = core, 1 = NCB-device
* [56:51] = Core/NCB-device number, [56] always 0 for NCB devices
* [50:48] = SubID
*/
-# define USBDRD_UCTL_SHIM_CFG_XS_NCB_OOB_OSRC GENMASK(59, 48)
+# define USBDRD_UCTL_SHIM_CFG_XS_NCB_OOB_OSRC GENMASK_ULL(59, 48)
/* Error log for bad UAHC DMA access: 0 = Read log, 1 = Write log */
-# define USBDRD_UCTL_SHIM_CFG_XM_BAD_DMA_WRN BIT(47)
+# define USBDRD_UCTL_SHIM_CFG_XM_BAD_DMA_WRN BIT_ULL(47)
/* Encoded error type for bad UAHC DMA */
-# define USBDRD_UCTL_SHIM_CFG_XM_BAD_DMA_TYPE GENMASK(43, 40)
+# define USBDRD_UCTL_SHIM_CFG_XM_BAD_DMA_TYPE GENMASK_ULL(43, 40)
/* Select the IOI read command used by DMA accesses */
-# define USBDRD_UCTL_SHIM_CFG_DMA_READ_CMD BIT(12)
+# define USBDRD_UCTL_SHIM_CFG_DMA_READ_CMD BIT_ULL(12)
/* Select endian format for DMA accesses to the L2C:
* 0x0 = Little endian
* 0x1 = Big endian
* 0x2 = Reserved
* 0x3 = Reserved
*/
-# define USBDRD_UCTL_SHIM_CFG_DMA_ENDIAN_MODE GENMASK(9, 8)
+# define USBDRD_UCTL_SHIM_CFG_DMA_ENDIAN_MODE GENMASK_ULL(9, 8)
/* Select endian format for IOI CSR access to UAHC:
* 0x0 = Little endian
* 0x1 = Big endian
* 0x2 = Reserved
* 0x3 = Reserved
*/
-# define USBDRD_UCTL_SHIM_CFG_CSR_ENDIAN_MODE GENMASK(1, 0)
+# define USBDRD_UCTL_SHIM_CFG_CSR_ENDIAN_MODE GENMASK_ULL(1, 0)
#define USBDRD_UCTL_ECC 0xf0
#define USBDRD_UCTL_SPARE1 0xf8
-static DEFINE_MUTEX(dwc3_octeon_clocks_mutex);
+struct dwc3_octeon {
+ struct device *dev;
+ void __iomem *base;
+};
+
+#define DWC3_GPIO_POWER_NONE (-1)
#ifdef CONFIG_CAVIUM_OCTEON_SOC
#include <asm/octeon/octeon.h>
@@ -233,6 +238,11 @@ static inline uint64_t dwc3_octeon_readq(void __iomem *addr)
static inline void dwc3_octeon_writeq(void __iomem *base, uint64_t val) { }
static inline void dwc3_octeon_config_gpio(int index, int gpio) { }
+
+static uint64_t octeon_get_io_clock_rate(void)
+{
+ return 150000000;
+}
#endif
static int dwc3_octeon_get_divider(void)
@@ -243,115 +253,22 @@ static int dwc3_octeon_get_divider(void)
while (div < ARRAY_SIZE(clk_div)) {
uint64_t rate = octeon_get_io_clock_rate() / clk_div[div];
if (rate <= 300000000 && rate >= 150000000)
- break;
+ return div;
div++;
}
- return div;
+ return -EINVAL;
}
-static int dwc3_octeon_config_power(struct device *dev, void __iomem *base)
+static int dwc3_octeon_setup(struct dwc3_octeon *octeon,
+ int ref_clk_sel, int ref_clk_fsel, int mpll_mul,
+ int power_gpio, int power_active_low)
{
- uint32_t gpio_pwr[3];
- int gpio, len, power_active_low;
- struct device_node *node = dev->of_node;
u64 val;
- void __iomem *uctl_host_cfg_reg = base + USBDRD_UCTL_HOST_CFG;
-
- if (of_find_property(node, "power", &len) != NULL) {
- if (len == 12) {
- of_property_read_u32_array(node, "power", gpio_pwr, 3);
- power_active_low = gpio_pwr[2] & 0x01;
- gpio = gpio_pwr[1];
- } else if (len == 8) {
- of_property_read_u32_array(node, "power", gpio_pwr, 2);
- power_active_low = 0;
- gpio = gpio_pwr[1];
- } else {
- dev_err(dev, "invalid power configuration\n");
- return -EINVAL;
- }
- dwc3_octeon_config_gpio(((u64)base >> 24) & 1, gpio);
-
- /* Enable XHCI power control and set if active high or low. */
- val = dwc3_octeon_readq(uctl_host_cfg_reg);
- val |= USBDRD_UCTL_HOST_PPC_EN;
- if (power_active_low)
- val &= ~USBDRD_UCTL_HOST_PPC_ACTIVE_HIGH_EN;
- else
- val |= USBDRD_UCTL_HOST_PPC_ACTIVE_HIGH_EN;
- dwc3_octeon_writeq(uctl_host_cfg_reg, val);
- } else {
- /* Disable XHCI power control and set if active high. */
- val = dwc3_octeon_readq(uctl_host_cfg_reg);
- val &= ~USBDRD_UCTL_HOST_PPC_EN;
- val &= ~USBDRD_UCTL_HOST_PPC_ACTIVE_HIGH_EN;
- dwc3_octeon_writeq(uctl_host_cfg_reg, val);
- dev_info(dev, "power control disabled\n");
- }
- return 0;
-}
-
-static int dwc3_octeon_clocks_start(struct device *dev, void __iomem *base)
-{
- int i, div, mpll_mul, ref_clk_fsel, ref_clk_sel = 2;
- u32 clock_rate;
- u64 val;
- void __iomem *uctl_ctl_reg = base + USBDRD_UCTL_CTL;
-
- if (dev->of_node) {
- const char *ss_clock_type;
- const char *hs_clock_type;
-
- i = of_property_read_u32(dev->of_node,
- "refclk-frequency", &clock_rate);
- if (i) {
- dev_err(dev, "No UCTL \"refclk-frequency\"\n");
- return -EINVAL;
- }
- i = of_property_read_string(dev->of_node,
- "refclk-type-ss", &ss_clock_type);
- if (i) {
- dev_err(dev, "No UCTL \"refclk-type-ss\"\n");
- return -EINVAL;
- }
- i = of_property_read_string(dev->of_node,
- "refclk-type-hs", &hs_clock_type);
- if (i) {
- dev_err(dev, "No UCTL \"refclk-type-hs\"\n");
- return -EINVAL;
- }
- if (strcmp("dlmc_ref_clk0", ss_clock_type) == 0) {
- if (strcmp(hs_clock_type, "dlmc_ref_clk0") == 0)
- ref_clk_sel = 0;
- else if (strcmp(hs_clock_type, "pll_ref_clk") == 0)
- ref_clk_sel = 2;
- else
- dev_warn(dev, "Invalid HS clock type %s, using pll_ref_clk instead\n",
- hs_clock_type);
- } else if (strcmp(ss_clock_type, "dlmc_ref_clk1") == 0) {
- if (strcmp(hs_clock_type, "dlmc_ref_clk1") == 0)
- ref_clk_sel = 1;
- else if (strcmp(hs_clock_type, "pll_ref_clk") == 0)
- ref_clk_sel = 3;
- else {
- dev_warn(dev, "Invalid HS clock type %s, using pll_ref_clk instead\n",
- hs_clock_type);
- ref_clk_sel = 3;
- }
- } else
- dev_warn(dev, "Invalid SS clock type %s, using dlmc_ref_clk0 instead\n",
- ss_clock_type);
-
- if ((ref_clk_sel == 0 || ref_clk_sel == 1) &&
- (clock_rate != 100000000))
- dev_warn(dev, "Invalid UCTL clock rate of %u, using 100000000 instead\n",
- clock_rate);
-
- } else {
- dev_err(dev, "No USB UCTL device node\n");
- return -EINVAL;
- }
+ int div;
+ struct device *dev = octeon->dev;
+ void __iomem *uctl_ctl_reg = octeon->base + USBDRD_UCTL_CTL;
+ void __iomem *uctl_host_cfg_reg = octeon->base + USBDRD_UCTL_HOST_CFG;
/*
* Step 1: Wait for all voltages to be stable...that surely
@@ -374,6 +291,10 @@ static int dwc3_octeon_clocks_start(struct device *dev, void __iomem *base)
/* Step 4b: Select controller clock frequency. */
div = dwc3_octeon_get_divider();
+ if (div < 0) {
+ dev_err(dev, "clock divider invalid\n");
+ return div;
+ }
val = dwc3_octeon_readq(uctl_ctl_reg);
val &= ~USBDRD_UCTL_CTL_H_CLKDIV_SEL;
val |= FIELD_PREP(USBDRD_UCTL_CTL_H_CLKDIV_SEL, div);
@@ -382,8 +303,8 @@ static int dwc3_octeon_clocks_start(struct device *dev, void __iomem *base)
val = dwc3_octeon_readq(uctl_ctl_reg);
if ((div != FIELD_GET(USBDRD_UCTL_CTL_H_CLKDIV_SEL, val)) ||
(!(FIELD_GET(USBDRD_UCTL_CTL_H_CLK_EN, val)))) {
- dev_err(dev, "dwc3 controller clock init failure.\n");
- return -EINVAL;
+ dev_err(dev, "clock init failure (UCTL_CTL=%016llx)\n", val);
+ return -EINVAL;
}
/* Step 4c: Deassert the controller clock divider reset. */
@@ -396,24 +317,6 @@ static int dwc3_octeon_clocks_start(struct device *dev, void __iomem *base)
val &= ~USBDRD_UCTL_CTL_REF_CLK_SEL;
val |= FIELD_PREP(USBDRD_UCTL_CTL_REF_CLK_SEL, ref_clk_sel);
- ref_clk_fsel = 0x07;
- switch (clock_rate) {
- default:
- dev_warn(dev, "Invalid ref_clk %u, using 100000000 instead\n",
- clock_rate);
- fallthrough;
- case 100000000:
- mpll_mul = 0x19;
- if (ref_clk_sel < 2)
- ref_clk_fsel = 0x27;
- break;
- case 50000000:
- mpll_mul = 0x32;
- break;
- case 125000000:
- mpll_mul = 0x28;
- break;
- }
val &= ~USBDRD_UCTL_CTL_REF_CLK_FSEL;
val |= FIELD_PREP(USBDRD_UCTL_CTL_REF_CLK_FSEL, ref_clk_fsel);
@@ -444,9 +347,22 @@ static int dwc3_octeon_clocks_start(struct device *dev, void __iomem *base)
/* Step 8b: Wait 10 controller-clock cycles. */
udelay(10);
- /* Steo 8c: Setup power-power control. */
- if (dwc3_octeon_config_power(dev, base))
- return -EINVAL;
+ /* Step 8c: Setup power control. */
+ val = dwc3_octeon_readq(uctl_host_cfg_reg);
+ val |= USBDRD_UCTL_HOST_PPC_EN;
+ if (power_gpio == DWC3_GPIO_POWER_NONE) {
+ val &= ~USBDRD_UCTL_HOST_PPC_EN;
+ } else {
+ val |= USBDRD_UCTL_HOST_PPC_EN;
+ dwc3_octeon_config_gpio(((__force uintptr_t)octeon->base >> 24) & 1,
+ power_gpio);
+ dev_dbg(dev, "power control is using gpio%d\n", power_gpio);
+ }
+ if (power_active_low)
+ val &= ~USBDRD_UCTL_HOST_PPC_ACTIVE_HIGH_EN;
+ else
+ val |= USBDRD_UCTL_HOST_PPC_ACTIVE_HIGH_EN;
+ dwc3_octeon_writeq(uctl_host_cfg_reg, val);
/* Step 8d: Deassert UAHC reset signal. */
val = dwc3_octeon_readq(uctl_ctl_reg);
@@ -469,10 +385,10 @@ static int dwc3_octeon_clocks_start(struct device *dev, void __iomem *base)
return 0;
}
-static void __init dwc3_octeon_set_endian_mode(void __iomem *base)
+static void dwc3_octeon_set_endian_mode(struct dwc3_octeon *octeon)
{
u64 val;
- void __iomem *uctl_shim_cfg_reg = base + USBDRD_UCTL_SHIM_CFG;
+ void __iomem *uctl_shim_cfg_reg = octeon->base + USBDRD_UCTL_SHIM_CFG;
val = dwc3_octeon_readq(uctl_shim_cfg_reg);
val &= ~USBDRD_UCTL_SHIM_CFG_DMA_ENDIAN_MODE;
@@ -484,68 +400,146 @@ static void __init dwc3_octeon_set_endian_mode(void __iomem *base)
dwc3_octeon_writeq(uctl_shim_cfg_reg, val);
}
-static void __init dwc3_octeon_phy_reset(void __iomem *base)
+static void dwc3_octeon_phy_reset(struct dwc3_octeon *octeon)
{
u64 val;
- void __iomem *uctl_ctl_reg = base + USBDRD_UCTL_CTL;
+ void __iomem *uctl_ctl_reg = octeon->base + USBDRD_UCTL_CTL;
val = dwc3_octeon_readq(uctl_ctl_reg);
val &= ~USBDRD_UCTL_CTL_UPHY_RST;
dwc3_octeon_writeq(uctl_ctl_reg, val);
}
-static int __init dwc3_octeon_device_init(void)
+static int dwc3_octeon_probe(struct platform_device *pdev)
{
- const char compat_node_name[] = "cavium,octeon-7130-usb-uctl";
- struct platform_device *pdev;
- struct device_node *node;
- struct resource *res;
- void __iomem *base;
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct dwc3_octeon *octeon;
+ const char *hs_clock_type, *ss_clock_type;
+ int ref_clk_sel, ref_clk_fsel, mpll_mul;
+ int power_active_low, power_gpio;
+ int err, len;
+ u32 clock_rate;
- /*
- * There should only be three universal controllers, "uctl"
- * in the device tree. Two USB and a SATA, which we ignore.
- */
- node = NULL;
- do {
- node = of_find_node_by_name(node, "uctl");
- if (!node)
- return -ENODEV;
-
- if (of_device_is_compatible(node, compat_node_name)) {
- pdev = of_find_device_by_node(node);
- if (!pdev)
- return -ENODEV;
-
- /*
- * The code below maps in the registers necessary for
- * setting up the clocks and reseting PHYs. We must
- * release the resources so the dwc3 subsystem doesn't
- * know the difference.
- */
- base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
- if (IS_ERR(base)) {
- put_device(&pdev->dev);
- return PTR_ERR(base);
- }
-
- mutex_lock(&dwc3_octeon_clocks_mutex);
- if (dwc3_octeon_clocks_start(&pdev->dev, base) == 0)
- dev_info(&pdev->dev, "clocks initialized.\n");
- dwc3_octeon_set_endian_mode(base);
- dwc3_octeon_phy_reset(base);
- mutex_unlock(&dwc3_octeon_clocks_mutex);
- devm_iounmap(&pdev->dev, base);
- devm_release_mem_region(&pdev->dev, res->start,
- resource_size(res));
- put_device(&pdev->dev);
+ if (of_property_read_u32(node, "refclk-frequency", &clock_rate)) {
+ dev_err(dev, "No UCTL \"refclk-frequency\"\n");
+ return -EINVAL;
+ }
+ if (of_property_read_string(node, "refclk-type-ss", &ss_clock_type)) {
+ dev_err(dev, "No UCTL \"refclk-type-ss\"\n");
+ return -EINVAL;
+ }
+ if (of_property_read_string(node, "refclk-type-hs", &hs_clock_type)) {
+ dev_err(dev, "No UCTL \"refclk-type-hs\"\n");
+ return -EINVAL;
+ }
+
+ ref_clk_sel = 2;
+ if (strcmp("dlmc_ref_clk0", ss_clock_type) == 0) {
+ if (strcmp(hs_clock_type, "dlmc_ref_clk0") == 0)
+ ref_clk_sel = 0;
+ else if (strcmp(hs_clock_type, "pll_ref_clk"))
+ dev_warn(dev, "Invalid HS clock type %s, using pll_ref_clk instead\n",
+ hs_clock_type);
+ } else if (strcmp(ss_clock_type, "dlmc_ref_clk1") == 0) {
+ if (strcmp(hs_clock_type, "dlmc_ref_clk1") == 0) {
+ ref_clk_sel = 1;
+ } else {
+ ref_clk_sel = 3;
+ if (strcmp(hs_clock_type, "pll_ref_clk"))
+ dev_warn(dev, "Invalid HS clock type %s, using pll_ref_clk instead\n",
+ hs_clock_type);
}
- } while (node != NULL);
+ } else {
+ dev_warn(dev, "Invalid SS clock type %s, using dlmc_ref_clk0 instead\n",
+ ss_clock_type);
+ }
- return 0;
+ ref_clk_fsel = 0x07;
+ switch (clock_rate) {
+ default:
+ dev_warn(dev, "Invalid ref_clk %u, using 100000000 instead\n",
+ clock_rate);
+ fallthrough;
+ case 100000000:
+ mpll_mul = 0x19;
+ if (ref_clk_sel < 2)
+ ref_clk_fsel = 0x27;
+ break;
+ case 50000000:
+ mpll_mul = 0x32;
+ break;
+ case 125000000:
+ mpll_mul = 0x28;
+ break;
+ }
+
+ power_gpio = DWC3_GPIO_POWER_NONE;
+ power_active_low = 0;
+ if (of_find_property(node, "power", &len)) {
+ u32 gpio_pwr[3];
+
+ switch (len) {
+ case 8:
+ of_property_read_u32_array(node, "power", gpio_pwr, 2);
+ break;
+ case 12:
+ of_property_read_u32_array(node, "power", gpio_pwr, 3);
+ power_active_low = gpio_pwr[2] & 0x01;
+ break;
+ default:
+ dev_err(dev, "invalid power configuration\n");
+ return -EINVAL;
+ }
+ power_gpio = gpio_pwr[1];
+ }
+
+ octeon = devm_kzalloc(dev, sizeof(*octeon), GFP_KERNEL);
+ if (!octeon)
+ return -ENOMEM;
+
+ octeon->dev = dev;
+ octeon->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(octeon->base))
+ return PTR_ERR(octeon->base);
+
+ err = dwc3_octeon_setup(octeon, ref_clk_sel, ref_clk_fsel, mpll_mul,
+ power_gpio, power_active_low);
+ if (err)
+ return err;
+
+ dwc3_octeon_set_endian_mode(octeon);
+ dwc3_octeon_phy_reset(octeon);
+
+ platform_set_drvdata(pdev, octeon);
+
+ return of_platform_populate(node, NULL, NULL, dev);
+}
+
+static void dwc3_octeon_remove(struct platform_device *pdev)
+{
+ struct dwc3_octeon *octeon = platform_get_drvdata(pdev);
+
+ of_platform_depopulate(octeon->dev);
}
-device_initcall(dwc3_octeon_device_init);
-MODULE_AUTHOR("David Daney <david.daney@cavium.com>");
+static const struct of_device_id dwc3_octeon_of_match[] = {
+ { .compatible = "cavium,octeon-7130-usb-uctl" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, dwc3_octeon_of_match);
+
+static struct platform_driver dwc3_octeon_driver = {
+ .probe = dwc3_octeon_probe,
+ .remove_new = dwc3_octeon_remove,
+ .driver = {
+ .name = "dwc3-octeon",
+ .of_match_table = dwc3_octeon_of_match,
+ },
+};
+module_platform_driver(dwc3_octeon_driver);
+
+MODULE_ALIAS("platform:dwc3-octeon");
+MODULE_AUTHOR("Ladislav Michl <ladis@linux-mips.org>");
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("USB driver for OCTEON III SoC");
+MODULE_DESCRIPTION("DesignWare USB3 OCTEON III Glue Layer");
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
index 7e6ad8fe61a5..d1539fc9eabd 100644
--- a/drivers/usb/dwc3/dwc3-of-simple.c
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -170,7 +170,6 @@ static const struct dev_pm_ops dwc3_of_simple_dev_pm_ops = {
static const struct of_device_id of_dwc3_simple_match[] = {
{ .compatible = "rockchip,rk3399-dwc3" },
- { .compatible = "cavium,octeon-7130-usb-uctl" },
{ .compatible = "sprd,sc9860-dwc3" },
{ .compatible = "allwinner,sun50i-h6-dwc3" },
{ .compatible = "hisilicon,hi3670-dwc3" },
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 336db8f92afa..b3592bcb0f96 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -208,6 +208,9 @@ config USB_F_UVC
config USB_F_MIDI
tristate
+config USB_F_MIDI2
+ tristate
+
config USB_F_HID
tristate
@@ -436,6 +439,21 @@ config USB_CONFIGFS_F_MIDI
connections can then be made on the gadget system, using
ALSA's aconnect utility etc.
+config USB_CONFIGFS_F_MIDI2
+ bool "MIDI 2.0 function"
+ depends on USB_CONFIGFS
+ depends on SND
+ select USB_LIBCOMPOSITE
+ select SND_UMP
+ select SND_UMP_LEGACY_RAWMIDI
+ select USB_F_MIDI2
+ help
+ The MIDI 2.0 function driver provides the generic emulated
+ USB MIDI 2.0 interface, looped back to ALSA UMP rawmidi
+ device on the gadget host. It supports UMP 1.1 spec and
+ responds UMP Stream messages for UMP Endpoint and Function
+ Block information / configuration.
+
config USB_CONFIGFS_F_HID
bool "HID function"
depends on USB_CONFIGFS
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index dd9b90481b4c..0ace45b66a31 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -170,33 +170,27 @@ int config_ep_by_speed_and_alt(struct usb_gadget *g,
/* select desired speed */
switch (g->speed) {
case USB_SPEED_SUPER_PLUS:
- if (gadget_is_superspeed_plus(g)) {
- if (f->ssp_descriptors) {
- speed_desc = f->ssp_descriptors;
- want_comp_desc = 1;
- break;
- }
- incomplete_desc = true;
+ if (f->ssp_descriptors) {
+ speed_desc = f->ssp_descriptors;
+ want_comp_desc = 1;
+ break;
}
+ incomplete_desc = true;
fallthrough;
case USB_SPEED_SUPER:
- if (gadget_is_superspeed(g)) {
- if (f->ss_descriptors) {
- speed_desc = f->ss_descriptors;
- want_comp_desc = 1;
- break;
- }
- incomplete_desc = true;
+ if (f->ss_descriptors) {
+ speed_desc = f->ss_descriptors;
+ want_comp_desc = 1;
+ break;
}
+ incomplete_desc = true;
fallthrough;
case USB_SPEED_HIGH:
- if (gadget_is_dualspeed(g)) {
- if (f->hs_descriptors) {
- speed_desc = f->hs_descriptors;
- break;
- }
- incomplete_desc = true;
+ if (f->hs_descriptors) {
+ speed_desc = f->hs_descriptors;
+ break;
}
+ incomplete_desc = true;
fallthrough;
default:
speed_desc = f->fs_descriptors;
diff --git a/drivers/usb/gadget/config.c b/drivers/usb/gadget/config.c
index 05507606b2b4..b1f625245713 100644
--- a/drivers/usb/gadget/config.c
+++ b/drivers/usb/gadget/config.c
@@ -162,8 +162,6 @@ int usb_assign_descriptors(struct usb_function *f,
struct usb_descriptor_header **ss,
struct usb_descriptor_header **ssp)
{
- struct usb_gadget *g = f->config->cdev->gadget;
-
/* super-speed-plus descriptor falls back to super-speed one,
* if such a descriptor was provided, thus avoiding a NULL
* pointer dereference if a 5gbps capable gadget is used with
@@ -177,17 +175,17 @@ int usb_assign_descriptors(struct usb_function *f,
if (!f->fs_descriptors)
goto err;
}
- if (hs && gadget_is_dualspeed(g)) {
+ if (hs) {
f->hs_descriptors = usb_copy_descriptors(hs);
if (!f->hs_descriptors)
goto err;
}
- if (ss && gadget_is_superspeed(g)) {
+ if (ss) {
f->ss_descriptors = usb_copy_descriptors(ss);
if (!f->ss_descriptors)
goto err;
}
- if (ssp && gadget_is_superspeed_plus(g)) {
+ if (ssp) {
f->ssp_descriptors = usb_copy_descriptors(ssp);
if (!f->ssp_descriptors)
goto err;
diff --git a/drivers/usb/gadget/function/Makefile b/drivers/usb/gadget/function/Makefile
index 5d3a6cf02218..87917a7d4a9b 100644
--- a/drivers/usb/gadget/function/Makefile
+++ b/drivers/usb/gadget/function/Makefile
@@ -44,6 +44,8 @@ usb_f_uvc-y := f_uvc.o uvc_queue.o uvc_v4l2.o uvc_video.o uvc_configfs.o
obj-$(CONFIG_USB_F_UVC) += usb_f_uvc.o
usb_f_midi-y := f_midi.o
obj-$(CONFIG_USB_F_MIDI) += usb_f_midi.o
+usb_f_midi2-y := f_midi2.o
+obj-$(CONFIG_USB_F_MIDI2) += usb_f_midi2.o
usb_f_hid-y := f_hid.o
obj-$(CONFIG_USB_F_HID) += usb_f_hid.o
usb_f_printer-y := f_printer.o
diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c
index cb523f118f04..f616059c5e1e 100644
--- a/drivers/usb/gadget/function/f_acm.c
+++ b/drivers/usb/gadget/function/f_acm.c
@@ -691,10 +691,8 @@ acm_bind(struct usb_configuration *c, struct usb_function *f)
goto fail;
dev_dbg(&cdev->gadget->dev,
- "acm ttyGS%d: %s speed IN/%s OUT/%s NOTIFY/%s\n",
+ "acm ttyGS%d: IN/%s OUT/%s NOTIFY/%s\n",
acm->port_num,
- gadget_is_superspeed(c->cdev->gadget) ? "super" :
- gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
acm->port.in->name, acm->port.out->name,
acm->notify->name);
return 0;
diff --git a/drivers/usb/gadget/function/f_ecm.c b/drivers/usb/gadget/function/f_ecm.c
index c6e63ad77a40..f55f60639e42 100644
--- a/drivers/usb/gadget/function/f_ecm.c
+++ b/drivers/usb/gadget/function/f_ecm.c
@@ -65,17 +65,6 @@ static inline struct f_ecm *func_to_ecm(struct usb_function *f)
return container_of(f, struct f_ecm, port.func);
}
-/* peak (theoretical) bulk transfer rate in bits-per-second */
-static inline unsigned ecm_bitrate(struct usb_gadget *g)
-{
- if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
- return 13 * 1024 * 8 * 1000 * 8;
- else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
- return 13 * 512 * 8 * 1000 * 8;
- else
- return 19 * 64 * 1 * 1000 * 8;
-}
-
/*-------------------------------------------------------------------------*/
/*
@@ -411,10 +400,10 @@ static void ecm_do_notify(struct f_ecm *ecm)
/* SPEED_CHANGE data is up/down speeds in bits/sec */
data = req->buf + sizeof *event;
- data[0] = cpu_to_le32(ecm_bitrate(cdev->gadget));
+ data[0] = cpu_to_le32(gether_bitrate(cdev->gadget));
data[1] = data[0];
- DBG(cdev, "notify speed %d\n", ecm_bitrate(cdev->gadget));
+ DBG(cdev, "notify speed %d\n", gether_bitrate(cdev->gadget));
ecm->notify_state = ECM_NOTIFY_NONE;
break;
}
@@ -799,9 +788,7 @@ ecm_bind(struct usb_configuration *c, struct usb_function *f)
ecm->port.open = ecm_open;
ecm->port.close = ecm_close;
- DBG(cdev, "CDC Ethernet: %s speed IN/%s OUT/%s NOTIFY/%s\n",
- gadget_is_superspeed(c->cdev->gadget) ? "super" :
- gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+ DBG(cdev, "CDC Ethernet: IN/%s OUT/%s NOTIFY/%s\n",
ecm->port.in_ep->name, ecm->port.out_ep->name,
ecm->notify->name);
return 0;
diff --git a/drivers/usb/gadget/function/f_eem.c b/drivers/usb/gadget/function/f_eem.c
index 5d38f29bda72..3b445bd88498 100644
--- a/drivers/usb/gadget/function/f_eem.c
+++ b/drivers/usb/gadget/function/f_eem.c
@@ -311,9 +311,7 @@ static int eem_bind(struct usb_configuration *c, struct usb_function *f)
if (status)
goto fail;
- DBG(cdev, "CDC Ethernet (EEM): %s speed IN/%s OUT/%s\n",
- gadget_is_superspeed(c->cdev->gadget) ? "super" :
- gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+ DBG(cdev, "CDC Ethernet (EEM): IN/%s OUT/%s\n",
eem->port.in_ep->name, eem->port.out_ep->name);
return 0;
diff --git a/drivers/usb/gadget/function/f_loopback.c b/drivers/usb/gadget/function/f_loopback.c
index ae41f556eb75..17ac6ace0cff 100644
--- a/drivers/usb/gadget/function/f_loopback.c
+++ b/drivers/usb/gadget/function/f_loopback.c
@@ -211,9 +211,7 @@ autoconf_fail:
if (ret)
return ret;
- DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
- (gadget_is_superspeed(c->cdev->gadget) ? "super" :
- (gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full")),
+ DBG(cdev, "%s: IN/%s, OUT/%s\n",
f->name, loop->in_ep->name, loop->out_ep->name);
return 0;
}
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index da07e45ae6df..722a3ab2b337 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -927,7 +927,7 @@ static void invalidate_sub(struct fsg_lun *curlun)
{
struct file *filp = curlun->filp;
struct inode *inode = file_inode(filp);
- unsigned long rc;
+ unsigned long __maybe_unused rc;
rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
VLDBG(curlun, "invalidate_mapping_pages -> %ld\n", rc);
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index fddf539008a9..2d02f25f9597 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -1023,40 +1023,30 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
if (!f->fs_descriptors)
goto fail_f_midi;
- if (gadget_is_dualspeed(c->cdev->gadget)) {
- bulk_in_desc.wMaxPacketSize = cpu_to_le16(512);
- bulk_out_desc.wMaxPacketSize = cpu_to_le16(512);
- f->hs_descriptors = usb_copy_descriptors(midi_function);
- if (!f->hs_descriptors)
- goto fail_f_midi;
- }
+ bulk_in_desc.wMaxPacketSize = cpu_to_le16(512);
+ bulk_out_desc.wMaxPacketSize = cpu_to_le16(512);
+ f->hs_descriptors = usb_copy_descriptors(midi_function);
+ if (!f->hs_descriptors)
+ goto fail_f_midi;
- if (gadget_is_superspeed(c->cdev->gadget)) {
- bulk_in_desc.wMaxPacketSize = cpu_to_le16(1024);
- bulk_out_desc.wMaxPacketSize = cpu_to_le16(1024);
- i = endpoint_descriptor_index;
- midi_function[i++] = (struct usb_descriptor_header *)
- &bulk_out_desc;
- midi_function[i++] = (struct usb_descriptor_header *)
- &bulk_out_ss_comp_desc;
- midi_function[i++] = (struct usb_descriptor_header *)
- &ms_out_desc;
- midi_function[i++] = (struct usb_descriptor_header *)
- &bulk_in_desc;
- midi_function[i++] = (struct usb_descriptor_header *)
- &bulk_in_ss_comp_desc;
- midi_function[i++] = (struct usb_descriptor_header *)
- &ms_in_desc;
- f->ss_descriptors = usb_copy_descriptors(midi_function);
- if (!f->ss_descriptors)
- goto fail_f_midi;
-
- if (gadget_is_superspeed_plus(c->cdev->gadget)) {
- f->ssp_descriptors = usb_copy_descriptors(midi_function);
- if (!f->ssp_descriptors)
- goto fail_f_midi;
- }
- }
+ bulk_in_desc.wMaxPacketSize = cpu_to_le16(1024);
+ bulk_out_desc.wMaxPacketSize = cpu_to_le16(1024);
+ i = endpoint_descriptor_index;
+ midi_function[i++] = (struct usb_descriptor_header *)
+ &bulk_out_desc;
+ midi_function[i++] = (struct usb_descriptor_header *)
+ &bulk_out_ss_comp_desc;
+ midi_function[i++] = (struct usb_descriptor_header *)
+ &ms_out_desc;
+ midi_function[i++] = (struct usb_descriptor_header *)
+ &bulk_in_desc;
+ midi_function[i++] = (struct usb_descriptor_header *)
+ &bulk_in_ss_comp_desc;
+ midi_function[i++] = (struct usb_descriptor_header *)
+ &ms_in_desc;
+ f->ss_descriptors = usb_copy_descriptors(midi_function);
+ if (!f->ss_descriptors)
+ goto fail_f_midi;
kfree(midi_function);
diff --git a/drivers/usb/gadget/function/f_midi2.c b/drivers/usb/gadget/function/f_midi2.c
new file mode 100644
index 000000000000..ec8cd7c7bbfc
--- /dev/null
+++ b/drivers/usb/gadget/function/f_midi2.c
@@ -0,0 +1,2871 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * f_midi2.c -- USB MIDI 2.0 class function driver
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <sound/core.h>
+#include <sound/control.h>
+#include <sound/ump.h>
+#include <sound/ump_msg.h>
+#include <sound/ump_convert.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/audio.h>
+#include <linux/usb/midi-v2.h>
+
+#include "u_f.h"
+#include "u_midi2.h"
+
+struct f_midi2;
+struct f_midi2_ep;
+struct f_midi2_usb_ep;
+
+/* Context for each USB request */
+struct f_midi2_req_ctx {
+ struct f_midi2_usb_ep *usb_ep; /* belonging USB EP */
+ unsigned int index; /* array index: 0-31 */
+ struct usb_request *req; /* assigned request */
+};
+
+/* Resources for a USB Endpoint */
+struct f_midi2_usb_ep {
+ struct f_midi2 *card; /* belonging card */
+ struct f_midi2_ep *ep; /* belonging UMP EP (optional) */
+ struct usb_ep *usb_ep; /* assigned USB EP */
+ void (*complete)(struct usb_ep *usb_ep, struct usb_request *req);
+ unsigned long free_reqs; /* bitmap for unused requests */
+ unsigned int num_reqs; /* number of allocated requests */
+ struct f_midi2_req_ctx *reqs; /* request context array */
+};
+
+/* Resources for UMP Function Block (and USB Group Terminal Block) */
+struct f_midi2_block {
+ struct f_midi2_block_info info; /* FB info, copied from configfs */
+ struct snd_ump_block *fb; /* assigned FB */
+ unsigned int gtb_id; /* assigned GTB id */
+ unsigned int string_id; /* assigned string id */
+};
+
+/* Temporary buffer for altset 0 MIDI 1.0 handling */
+struct f_midi2_midi1_port {
+ unsigned int pending; /* pending bytes on the input buffer */
+ u8 buf[32]; /* raw MIDI 1.0 byte input */
+ u8 state; /* running status */
+ u8 data[2]; /* rendered USB MIDI 1.0 packet data */
+};
+
+/* MIDI 1.0 message states */
+enum {
+ STATE_INITIAL = 0, /* pseudo state */
+ STATE_1PARAM,
+ STATE_2PARAM_1,
+ STATE_2PARAM_2,
+ STATE_SYSEX_0,
+ STATE_SYSEX_1,
+ STATE_SYSEX_2,
+ STATE_REAL_TIME,
+ STATE_FINISHED, /* pseudo state */
+};
+
+/* Resources for UMP Endpoint */
+struct f_midi2_ep {
+ struct snd_ump_endpoint *ump; /* assigned UMP EP */
+ struct f_midi2 *card; /* belonging MIDI 2.0 device */
+
+ struct f_midi2_ep_info info; /* UMP EP info, copied from configfs */
+ unsigned int num_blks; /* number of FBs */
+ struct f_midi2_block blks[SNDRV_UMP_MAX_BLOCKS]; /* UMP FBs */
+
+ struct f_midi2_usb_ep ep_in; /* USB MIDI EP-in */
+ struct f_midi2_usb_ep ep_out; /* USB MIDI EP-out */
+
+ u8 in_group_to_cable[SNDRV_UMP_MAX_GROUPS]; /* map to cable; 1-based! */
+};
+
+/* indices for USB strings */
+enum {
+ STR_IFACE = 0,
+ STR_GTB1 = 1,
+};
+
+/* 1-based GTB id to string id */
+#define gtb_to_str_id(id) (STR_GTB1 + (id) - 1)
+
+/* mapping from MIDI 1.0 cable to UMP group */
+struct midi1_cable_mapping {
+ struct f_midi2_ep *ep;
+ unsigned char block;
+ unsigned char group;
+};
+
+/* operation mode */
+enum {
+ MIDI_OP_MODE_UNSET, /* no altset set yet */
+ MIDI_OP_MODE_MIDI1, /* MIDI 1.0 (altset 0) is used */
+ MIDI_OP_MODE_MIDI2, /* MIDI 2.0 (altset 1) is used */
+};
+
+/* Resources for MIDI 2.0 Device */
+struct f_midi2 {
+ struct usb_function func;
+ struct usb_gadget *gadget;
+ struct snd_card *card;
+
+ /* MIDI 1.0 in/out USB EPs */
+ struct f_midi2_usb_ep midi1_ep_in;
+ struct f_midi2_usb_ep midi1_ep_out;
+
+ /* number of MIDI 1.0 I/O cables */
+ unsigned int num_midi1_in;
+ unsigned int num_midi1_out;
+
+ /* conversion for MIDI 1.0 EP-in */
+ struct f_midi2_midi1_port midi1_port[MAX_CABLES];
+ /* conversion for MIDI 1.0 EP-out */
+ struct ump_cvt_to_ump midi1_ump_cvt;
+ /* mapping between cables and UMP groups */
+ struct midi1_cable_mapping in_cable_mapping[MAX_CABLES];
+ struct midi1_cable_mapping out_cable_mapping[MAX_CABLES];
+
+ int midi_if; /* USB MIDI interface number */
+ int operation_mode; /* current operation mode */
+
+ spinlock_t queue_lock;
+
+ struct f_midi2_card_info info; /* card info, copied from configfs */
+
+ unsigned int num_eps;
+ struct f_midi2_ep midi2_eps[MAX_UMP_EPS];
+
+ unsigned int total_blocks; /* total number of blocks of all EPs */
+ struct usb_string *string_defs;
+ struct usb_string *strings;
+};
+
+#define func_to_midi2(f) container_of(f, struct f_midi2, func)
+
+/* get EP name string */
+static const char *ump_ep_name(const struct f_midi2_ep *ep)
+{
+ return ep->info.ep_name ? ep->info.ep_name : "MIDI 2.0 Gadget";
+}
+
+/* get EP product ID string */
+static const char *ump_product_id(const struct f_midi2_ep *ep)
+{
+ return ep->info.product_id ? ep->info.product_id : "Unique Product ID";
+}
+
+/* get FB name string */
+static const char *ump_fb_name(const struct f_midi2_block_info *info)
+{
+ return info->name ? info->name : "MIDI 2.0 Gadget I/O";
+}
+
+/*
+ * USB Descriptor Definitions
+ */
+/* GTB header descriptor */
+static struct usb_ms20_gr_trm_block_header_descriptor gtb_header_desc = {
+ .bLength = sizeof(gtb_header_desc),
+ .bDescriptorType = USB_DT_CS_GR_TRM_BLOCK,
+ .bDescriptorSubtype = USB_MS_GR_TRM_BLOCK_HEADER,
+ .wTotalLength = __cpu_to_le16(0x12), // to be filled
+};
+
+/* GTB descriptor template: most items are replaced dynamically */
+static struct usb_ms20_gr_trm_block_descriptor gtb_desc = {
+ .bLength = sizeof(gtb_desc),
+ .bDescriptorType = USB_DT_CS_GR_TRM_BLOCK,
+ .bDescriptorSubtype = USB_MS_GR_TRM_BLOCK,
+ .bGrpTrmBlkID = 0x01,
+ .bGrpTrmBlkType = USB_MS_GR_TRM_BLOCK_TYPE_BIDIRECTIONAL,
+ .nGroupTrm = 0x00,
+ .nNumGroupTrm = 1,
+ .iBlockItem = 0,
+ .bMIDIProtocol = USB_MS_MIDI_PROTO_1_0_64,
+ .wMaxInputBandwidth = 0,
+ .wMaxOutputBandwidth = 0,
+};
+
+DECLARE_USB_MIDI_OUT_JACK_DESCRIPTOR(1);
+DECLARE_USB_MS_ENDPOINT_DESCRIPTOR(16);
+DECLARE_UAC_AC_HEADER_DESCRIPTOR(1);
+DECLARE_USB_MS20_ENDPOINT_DESCRIPTOR(32);
+
+#define EP_MAX_PACKET_INT 8
+
+/* Audio Control Interface */
+static struct usb_interface_descriptor midi2_audio_if_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bInterfaceNumber = 0, // to be filled
+ .bNumEndpoints = 0,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
+ .bInterfaceProtocol = 0,
+ .iInterface = 0,
+};
+
+static struct uac1_ac_header_descriptor_1 midi2_audio_class_desc = {
+ .bLength = 0x09,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = 0x01,
+ .bcdADC = __cpu_to_le16(0x0100),
+ .wTotalLength = __cpu_to_le16(0x0009),
+ .bInCollection = 0x01,
+ .baInterfaceNr = { 0x01 }, // to be filled
+};
+
+/* MIDI 1.0 Streaming Interface (altset 0) */
+static struct usb_interface_descriptor midi2_midi1_if_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bInterfaceNumber = 0, // to be filled
+ .bAlternateSetting = 0,
+ .bNumEndpoints = 2, // to be filled
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_MIDISTREAMING,
+ .bInterfaceProtocol = 0,
+ .iInterface = 0, // to be filled
+};
+
+static struct usb_ms_header_descriptor midi2_midi1_class_desc = {
+ .bLength = 0x07,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = USB_MS_HEADER,
+ .bcdMSC = __cpu_to_le16(0x0100),
+ .wTotalLength = __cpu_to_le16(0x41), // to be calculated
+};
+
+/* MIDI 1.0 EP OUT */
+static struct usb_endpoint_descriptor midi2_midi1_ep_out_desc = {
+ .bLength = USB_DT_ENDPOINT_AUDIO_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT | 0, // set up dynamically
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_ss_ep_comp_descriptor midi2_midi1_ep_out_ss_comp_desc = {
+ .bLength = sizeof(midi2_midi1_ep_out_ss_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+};
+
+static struct usb_ms_endpoint_descriptor_16 midi2_midi1_ep_out_class_desc = {
+ .bLength = 0x05, // to be filled
+ .bDescriptorType = USB_DT_CS_ENDPOINT,
+ .bDescriptorSubtype = USB_MS_GENERAL,
+ .bNumEmbMIDIJack = 1,
+ .baAssocJackID = { 0x01 },
+};
+
+/* MIDI 1.0 EP IN */
+static struct usb_endpoint_descriptor midi2_midi1_ep_in_desc = {
+ .bLength = USB_DT_ENDPOINT_AUDIO_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN | 0, // set up dynamically
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_ss_ep_comp_descriptor midi2_midi1_ep_in_ss_comp_desc = {
+ .bLength = sizeof(midi2_midi1_ep_in_ss_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+};
+
+static struct usb_ms_endpoint_descriptor_16 midi2_midi1_ep_in_class_desc = {
+ .bLength = 0x05, // to be filled
+ .bDescriptorType = USB_DT_CS_ENDPOINT,
+ .bDescriptorSubtype = USB_MS_GENERAL,
+ .bNumEmbMIDIJack = 1,
+ .baAssocJackID = { 0x03 },
+};
+
+/* MIDI 2.0 Streaming Interface (altset 1) */
+static struct usb_interface_descriptor midi2_midi2_if_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bInterfaceNumber = 0, // to be filled
+ .bAlternateSetting = 1,
+ .bNumEndpoints = 2, // to be filled
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_MIDISTREAMING,
+ .bInterfaceProtocol = 0,
+ .iInterface = 0, // to be filled
+};
+
+static struct usb_ms_header_descriptor midi2_midi2_class_desc = {
+ .bLength = 0x07,
+ .bDescriptorType = USB_DT_CS_INTERFACE,
+ .bDescriptorSubtype = USB_MS_HEADER,
+ .bcdMSC = __cpu_to_le16(0x0200),
+ .wTotalLength = __cpu_to_le16(0x07),
+};
+
+/* MIDI 2.0 EP OUT */
+static struct usb_endpoint_descriptor midi2_midi2_ep_out_desc[MAX_UMP_EPS];
+
+static struct usb_ss_ep_comp_descriptor midi2_midi2_ep_out_ss_comp_desc = {
+ .bLength = sizeof(midi2_midi1_ep_out_ss_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+};
+
+static struct usb_ms20_endpoint_descriptor_32 midi2_midi2_ep_out_class_desc[MAX_UMP_EPS];
+
+/* MIDI 2.0 EP IN */
+static struct usb_endpoint_descriptor midi2_midi2_ep_in_desc[MAX_UMP_EPS];
+
+static struct usb_ss_ep_comp_descriptor midi2_midi2_ep_in_ss_comp_desc = {
+ .bLength = sizeof(midi2_midi2_ep_in_ss_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+};
+
+static struct usb_ms20_endpoint_descriptor_32 midi2_midi2_ep_in_class_desc[MAX_UMP_EPS];
+
+/* Arrays of descriptors to be created */
+static void *midi2_audio_descs[] = {
+ &midi2_audio_if_desc,
+ &midi2_audio_class_desc,
+ NULL
+};
+
+static void *midi2_midi1_descs[] = {
+ &midi2_midi1_if_desc,
+ &midi2_midi1_class_desc,
+ NULL
+};
+
+static void *midi2_midi1_ep_out_descs[] = {
+ &midi2_midi1_ep_out_desc,
+ &midi2_midi1_ep_out_class_desc,
+ NULL
+};
+
+static void *midi2_midi1_ep_in_descs[] = {
+ &midi2_midi1_ep_in_desc,
+ &midi2_midi1_ep_in_class_desc,
+ NULL
+};
+
+static void *midi2_midi1_ep_out_ss_descs[] = {
+ &midi2_midi1_ep_out_desc,
+ &midi2_midi1_ep_out_ss_comp_desc,
+ &midi2_midi1_ep_out_class_desc,
+ NULL
+};
+
+static void *midi2_midi1_ep_in_ss_descs[] = {
+ &midi2_midi1_ep_in_desc,
+ &midi2_midi1_ep_in_ss_comp_desc,
+ &midi2_midi1_ep_in_class_desc,
+ NULL
+};
+
+static void *midi2_midi2_descs[] = {
+ &midi2_midi2_if_desc,
+ &midi2_midi2_class_desc,
+ NULL
+};
+
+/*
+ * USB request handling
+ */
+
+/* get an empty request for the given EP */
+static struct usb_request *get_empty_request(struct f_midi2_usb_ep *usb_ep)
+{
+ struct usb_request *req = NULL;
+ unsigned long flags;
+ int index;
+
+ spin_lock_irqsave(&usb_ep->card->queue_lock, flags);
+ if (!usb_ep->free_reqs)
+ goto unlock;
+ index = find_first_bit(&usb_ep->free_reqs, usb_ep->num_reqs);
+ if (index >= usb_ep->num_reqs)
+ goto unlock;
+ req = usb_ep->reqs[index].req;
+ if (!req)
+ goto unlock;
+ clear_bit(index, &usb_ep->free_reqs);
+ req->length = 0;
+ unlock:
+ spin_unlock_irqrestore(&usb_ep->card->queue_lock, flags);
+ return req;
+}
+
+/* put the empty request back */
+static void put_empty_request(struct usb_request *req)
+{
+ struct f_midi2_req_ctx *ctx = req->context;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->usb_ep->card->queue_lock, flags);
+ set_bit(ctx->index, &ctx->usb_ep->free_reqs);
+ spin_unlock_irqrestore(&ctx->usb_ep->card->queue_lock, flags);
+}
+
+/*
+ * UMP v1.1 Stream message handling
+ */
+
+/* queue a request to UMP EP; request is either queued or freed after this */
+static int queue_request_ep_raw(struct usb_request *req)
+{
+ struct f_midi2_req_ctx *ctx = req->context;
+ int err;
+
+ req->complete = ctx->usb_ep->complete;
+ err = usb_ep_queue(ctx->usb_ep->usb_ep, req, GFP_ATOMIC);
+ if (err) {
+ put_empty_request(req);
+ return err;
+ }
+ return 0;
+}
+
+/* queue a request with endianness conversion */
+static int queue_request_ep_in(struct usb_request *req)
+{
+ /* UMP packets have to be converted to little-endian */
+ cpu_to_le32_array((u32 *)req->buf, req->length >> 2);
+ return queue_request_ep_raw(req);
+}
+
+/* reply a UMP packet via EP-in */
+static int reply_ep_in(struct f_midi2_ep *ep, const void *buf, int len)
+{
+ struct f_midi2_usb_ep *usb_ep = &ep->ep_in;
+ struct usb_request *req;
+
+ req = get_empty_request(usb_ep);
+ if (!req)
+ return -ENOSPC;
+
+ req->length = len;
+ memcpy(req->buf, buf, len);
+ return queue_request_ep_in(req);
+}
+
+/* reply a UMP stream EP info */
+static void reply_ump_stream_ep_info(struct f_midi2_ep *ep)
+{
+ struct snd_ump_stream_msg_ep_info rep = {
+ .type = UMP_MSG_TYPE_STREAM,
+ .status = UMP_STREAM_MSG_STATUS_EP_INFO,
+ .ump_version_major = 0x01,
+ .ump_version_minor = 0x01,
+ .num_function_blocks = ep->num_blks,
+ .static_function_block = !!ep->card->info.static_block,
+ .protocol = (UMP_STREAM_MSG_EP_INFO_CAP_MIDI1 |
+ UMP_STREAM_MSG_EP_INFO_CAP_MIDI2) >> 8,
+ };
+
+ reply_ep_in(ep, &rep, sizeof(rep));
+}
+
+/* reply a UMP EP device info */
+static void reply_ump_stream_ep_device(struct f_midi2_ep *ep)
+{
+ struct snd_ump_stream_msg_devince_info rep = {
+ .type = UMP_MSG_TYPE_STREAM,
+ .status = UMP_STREAM_MSG_STATUS_DEVICE_INFO,
+ .manufacture_id = ep->info.manufacturer,
+ .family_lsb = ep->info.family & 0xff,
+ .family_msb = (ep->info.family >> 8) & 0xff,
+ .model_lsb = ep->info.model & 0xff,
+ .model_msb = (ep->info.model >> 8) & 0xff,
+ .sw_revision = ep->info.sw_revision,
+ };
+
+ reply_ep_in(ep, &rep, sizeof(rep));
+}
+
+#define UMP_STREAM_PKT_BYTES 16 /* UMP stream packet size = 16 bytes*/
+#define UMP_STREAM_EP_STR_OFF 2 /* offset of name string for EP info */
+#define UMP_STREAM_FB_STR_OFF 3 /* offset of name string for FB info */
+
+/* Helper to replay a string */
+static void reply_ump_stream_string(struct f_midi2_ep *ep, const u8 *name,
+ unsigned int type, unsigned int extra,
+ unsigned int start_ofs)
+{
+ struct f_midi2_usb_ep *usb_ep = &ep->ep_in;
+ struct f_midi2 *midi2 = ep->card;
+ struct usb_request *req;
+ unsigned int pos;
+ u32 *buf;
+
+ if (!*name)
+ return;
+ req = get_empty_request(usb_ep);
+ if (!req)
+ return;
+
+ buf = (u32 *)req->buf;
+ pos = start_ofs;
+ for (;;) {
+ if (pos == start_ofs) {
+ memset(buf, 0, UMP_STREAM_PKT_BYTES);
+ buf[0] = ump_stream_compose(type, 0) | extra;
+ }
+ buf[pos / 4] |= *name++ << ((3 - (pos % 4)) * 8);
+ if (!*name) {
+ if (req->length)
+ buf[0] |= UMP_STREAM_MSG_FORMAT_END << 26;
+ req->length += UMP_STREAM_PKT_BYTES;
+ break;
+ }
+ if (++pos == UMP_STREAM_PKT_BYTES) {
+ if (!req->length)
+ buf[0] |= UMP_STREAM_MSG_FORMAT_START << 26;
+ else
+ buf[0] |= UMP_STREAM_MSG_FORMAT_CONTINUE << 26;
+ req->length += UMP_STREAM_PKT_BYTES;
+ if (midi2->info.req_buf_size - req->length < UMP_STREAM_PKT_BYTES)
+ break;
+ buf += 4;
+ pos = start_ofs;
+ }
+ }
+
+ if (req->length)
+ queue_request_ep_in(req);
+ else
+ put_empty_request(req);
+}
+
+/* Reply a UMP EP name string */
+static void reply_ump_stream_ep_name(struct f_midi2_ep *ep)
+{
+ reply_ump_stream_string(ep, ump_ep_name(ep),
+ UMP_STREAM_MSG_STATUS_EP_NAME, 0,
+ UMP_STREAM_EP_STR_OFF);
+}
+
+/* Reply a UMP EP product ID string */
+static void reply_ump_stream_ep_pid(struct f_midi2_ep *ep)
+{
+ reply_ump_stream_string(ep, ump_product_id(ep),
+ UMP_STREAM_MSG_STATUS_PRODUCT_ID, 0,
+ UMP_STREAM_EP_STR_OFF);
+}
+
+/* Reply a UMP EP stream config */
+static void reply_ump_stream_ep_config(struct f_midi2_ep *ep)
+{
+ struct snd_ump_stream_msg_stream_cfg rep = {
+ .type = UMP_MSG_TYPE_STREAM,
+ .status = UMP_STREAM_MSG_STATUS_STREAM_CFG,
+ };
+
+ if ((ep->info.protocol & SNDRV_UMP_EP_INFO_PROTO_MIDI_MASK) ==
+ SNDRV_UMP_EP_INFO_PROTO_MIDI2)
+ rep.protocol = UMP_STREAM_MSG_EP_INFO_CAP_MIDI2 >> 8;
+ else
+ rep.protocol = UMP_STREAM_MSG_EP_INFO_CAP_MIDI1 >> 8;
+
+ reply_ep_in(ep, &rep, sizeof(rep));
+}
+
+/* Reply a UMP FB info */
+static void reply_ump_stream_fb_info(struct f_midi2_ep *ep, int blk)
+{
+ struct f_midi2_block_info *b = &ep->blks[blk].info;
+ struct snd_ump_stream_msg_fb_info rep = {
+ .type = UMP_MSG_TYPE_STREAM,
+ .status = UMP_STREAM_MSG_STATUS_FB_INFO,
+ .active = !!b->active,
+ .function_block_id = blk,
+ .ui_hint = b->ui_hint,
+ .midi_10 = b->is_midi1,
+ .direction = b->direction,
+ .first_group = b->first_group,
+ .num_groups = b->num_groups,
+ .midi_ci_version = b->midi_ci_version,
+ .sysex8_streams = b->sysex8_streams,
+ };
+
+ reply_ep_in(ep, &rep, sizeof(rep));
+}
+
+/* Reply a FB name string */
+static void reply_ump_stream_fb_name(struct f_midi2_ep *ep, unsigned int blk)
+{
+ reply_ump_stream_string(ep, ump_fb_name(&ep->blks[blk].info),
+ UMP_STREAM_MSG_STATUS_FB_NAME, blk << 8,
+ UMP_STREAM_FB_STR_OFF);
+}
+
+/* Process a UMP Stream message */
+static void process_ump_stream_msg(struct f_midi2_ep *ep, const u32 *data)
+{
+ struct f_midi2 *midi2 = ep->card;
+ unsigned int format, status, blk;
+
+ format = ump_stream_message_format(*data);
+ status = ump_stream_message_status(*data);
+ switch (status) {
+ case UMP_STREAM_MSG_STATUS_EP_DISCOVERY:
+ if (format)
+ return; // invalid
+ if (data[1] & UMP_STREAM_MSG_REQUEST_EP_INFO)
+ reply_ump_stream_ep_info(ep);
+ if (data[1] & UMP_STREAM_MSG_REQUEST_DEVICE_INFO)
+ reply_ump_stream_ep_device(ep);
+ if (data[1] & UMP_STREAM_MSG_REQUEST_EP_NAME)
+ reply_ump_stream_ep_name(ep);
+ if (data[1] & UMP_STREAM_MSG_REQUEST_PRODUCT_ID)
+ reply_ump_stream_ep_pid(ep);
+ if (data[1] & UMP_STREAM_MSG_REQUEST_STREAM_CFG)
+ reply_ump_stream_ep_config(ep);
+ return;
+ case UMP_STREAM_MSG_STATUS_STREAM_CFG_REQUEST:
+ if (*data & UMP_STREAM_MSG_EP_INFO_CAP_MIDI2) {
+ ep->info.protocol = SNDRV_UMP_EP_INFO_PROTO_MIDI2;
+ DBG(midi2, "Switching Protocol to MIDI2\n");
+ } else {
+ ep->info.protocol = SNDRV_UMP_EP_INFO_PROTO_MIDI1;
+ DBG(midi2, "Switching Protocol to MIDI1\n");
+ }
+ snd_ump_switch_protocol(ep->ump, ep->info.protocol);
+ reply_ump_stream_ep_config(ep);
+ return;
+ case UMP_STREAM_MSG_STATUS_FB_DISCOVERY:
+ if (format)
+ return; // invalid
+ blk = (*data >> 8) & 0xff;
+ if (blk >= ep->num_blks)
+ return;
+ if (*data & UMP_STREAM_MSG_REQUEST_FB_INFO)
+ reply_ump_stream_fb_info(ep, blk);
+ if (*data & UMP_STREAM_MSG_REQUEST_FB_NAME)
+ reply_ump_stream_fb_name(ep, blk);
+ return;
+ }
+}
+
+/* Process UMP messages included in a USB request */
+static void process_ump(struct f_midi2_ep *ep, const struct usb_request *req)
+{
+ const u32 *data = (u32 *)req->buf;
+ int len = req->actual >> 2;
+ const u32 *in_buf = ep->ump->input_buf;
+
+ for (; len > 0; len--, data++) {
+ if (snd_ump_receive_ump_val(ep->ump, *data) <= 0)
+ continue;
+ if (ump_message_type(*in_buf) == UMP_MSG_TYPE_STREAM)
+ process_ump_stream_msg(ep, in_buf);
+ }
+}
+
+/*
+ * MIDI 2.0 UMP USB request handling
+ */
+
+/* complete handler for UMP EP-out requests */
+static void f_midi2_ep_out_complete(struct usb_ep *usb_ep,
+ struct usb_request *req)
+{
+ struct f_midi2_req_ctx *ctx = req->context;
+ struct f_midi2_ep *ep = ctx->usb_ep->ep;
+ struct f_midi2 *midi2 = ep->card;
+ int status = req->status;
+
+ if (status) {
+ DBG(midi2, "%s complete error %d: %d/%d\n",
+ usb_ep->name, status, req->actual, req->length);
+ goto error;
+ }
+
+ /* convert to UMP packet in native endianness */
+ le32_to_cpu_array((u32 *)req->buf, req->actual >> 2);
+
+ if (midi2->info.process_ump)
+ process_ump(ep, req);
+
+ snd_ump_receive(ep->ump, req->buf, req->actual & ~3);
+
+ if (midi2->operation_mode != MIDI_OP_MODE_MIDI2)
+ goto error;
+
+ if (queue_request_ep_raw(req))
+ goto error;
+ return;
+
+ error:
+ put_empty_request(req);
+}
+
+/* Transmit UMP packets received from user-space to the gadget */
+static void process_ump_transmit(struct f_midi2_ep *ep)
+{
+ struct f_midi2_usb_ep *usb_ep = &ep->ep_in;
+ struct f_midi2 *midi2 = ep->card;
+ struct usb_request *req;
+ int len;
+
+ if (!usb_ep->usb_ep->enabled)
+ return;
+
+ for (;;) {
+ req = get_empty_request(usb_ep);
+ if (!req)
+ break;
+ len = snd_ump_transmit(ep->ump, (u32 *)req->buf,
+ midi2->info.req_buf_size);
+ if (len <= 0) {
+ put_empty_request(req);
+ break;
+ }
+
+ req->length = len;
+ if (queue_request_ep_in(req) < 0)
+ break;
+ }
+}
+
+/* Complete handler for UMP EP-in requests */
+static void f_midi2_ep_in_complete(struct usb_ep *usb_ep,
+ struct usb_request *req)
+{
+ struct f_midi2_req_ctx *ctx = req->context;
+ struct f_midi2_ep *ep = ctx->usb_ep->ep;
+ struct f_midi2 *midi2 = ep->card;
+ int status = req->status;
+
+ put_empty_request(req);
+
+ if (status) {
+ DBG(midi2, "%s complete error %d: %d/%d\n",
+ usb_ep->name, status, req->actual, req->length);
+ return;
+ }
+
+ process_ump_transmit(ep);
+}
+
+/*
+ * MIDI1 (altset 0) USB request handling
+ */
+
+/* process one MIDI byte -- copied from f_midi.c
+ *
+ * fill the packet or request if needed
+ * returns true if the request became empty (queued)
+ */
+static bool process_midi1_byte(struct f_midi2 *midi2, u8 cable, u8 b,
+ struct usb_request **req_p)
+{
+ struct f_midi2_midi1_port *port = &midi2->midi1_port[cable];
+ u8 p[4] = { cable << 4, 0, 0, 0 };
+ int next_state = STATE_INITIAL;
+ struct usb_request *req = *req_p;
+
+ switch (b) {
+ case 0xf8 ... 0xff:
+ /* System Real-Time Messages */
+ p[0] |= 0x0f;
+ p[1] = b;
+ next_state = port->state;
+ port->state = STATE_REAL_TIME;
+ break;
+
+ case 0xf7:
+ /* End of SysEx */
+ switch (port->state) {
+ case STATE_SYSEX_0:
+ p[0] |= 0x05;
+ p[1] = 0xf7;
+ next_state = STATE_FINISHED;
+ break;
+ case STATE_SYSEX_1:
+ p[0] |= 0x06;
+ p[1] = port->data[0];
+ p[2] = 0xf7;
+ next_state = STATE_FINISHED;
+ break;
+ case STATE_SYSEX_2:
+ p[0] |= 0x07;
+ p[1] = port->data[0];
+ p[2] = port->data[1];
+ p[3] = 0xf7;
+ next_state = STATE_FINISHED;
+ break;
+ default:
+ /* Ignore byte */
+ next_state = port->state;
+ port->state = STATE_INITIAL;
+ }
+ break;
+
+ case 0xf0 ... 0xf6:
+ /* System Common Messages */
+ port->data[0] = port->data[1] = 0;
+ port->state = STATE_INITIAL;
+ switch (b) {
+ case 0xf0:
+ port->data[0] = b;
+ port->data[1] = 0;
+ next_state = STATE_SYSEX_1;
+ break;
+ case 0xf1:
+ case 0xf3:
+ port->data[0] = b;
+ next_state = STATE_1PARAM;
+ break;
+ case 0xf2:
+ port->data[0] = b;
+ next_state = STATE_2PARAM_1;
+ break;
+ case 0xf4:
+ case 0xf5:
+ next_state = STATE_INITIAL;
+ break;
+ case 0xf6:
+ p[0] |= 0x05;
+ p[1] = 0xf6;
+ next_state = STATE_FINISHED;
+ break;
+ }
+ break;
+
+ case 0x80 ... 0xef:
+ /*
+ * Channel Voice Messages, Channel Mode Messages
+ * and Control Change Messages.
+ */
+ port->data[0] = b;
+ port->data[1] = 0;
+ port->state = STATE_INITIAL;
+ if (b >= 0xc0 && b <= 0xdf)
+ next_state = STATE_1PARAM;
+ else
+ next_state = STATE_2PARAM_1;
+ break;
+
+ case 0x00 ... 0x7f:
+ /* Message parameters */
+ switch (port->state) {
+ case STATE_1PARAM:
+ if (port->data[0] < 0xf0)
+ p[0] |= port->data[0] >> 4;
+ else
+ p[0] |= 0x02;
+
+ p[1] = port->data[0];
+ p[2] = b;
+ /* This is to allow Running State Messages */
+ next_state = STATE_1PARAM;
+ break;
+ case STATE_2PARAM_1:
+ port->data[1] = b;
+ next_state = STATE_2PARAM_2;
+ break;
+ case STATE_2PARAM_2:
+ if (port->data[0] < 0xf0)
+ p[0] |= port->data[0] >> 4;
+ else
+ p[0] |= 0x03;
+
+ p[1] = port->data[0];
+ p[2] = port->data[1];
+ p[3] = b;
+ /* This is to allow Running State Messages */
+ next_state = STATE_2PARAM_1;
+ break;
+ case STATE_SYSEX_0:
+ port->data[0] = b;
+ next_state = STATE_SYSEX_1;
+ break;
+ case STATE_SYSEX_1:
+ port->data[1] = b;
+ next_state = STATE_SYSEX_2;
+ break;
+ case STATE_SYSEX_2:
+ p[0] |= 0x04;
+ p[1] = port->data[0];
+ p[2] = port->data[1];
+ p[3] = b;
+ next_state = STATE_SYSEX_0;
+ break;
+ }
+ break;
+ }
+
+ /* States where we have to write into the USB request */
+ if (next_state == STATE_FINISHED ||
+ port->state == STATE_SYSEX_2 ||
+ port->state == STATE_1PARAM ||
+ port->state == STATE_2PARAM_2 ||
+ port->state == STATE_REAL_TIME) {
+ memcpy(req->buf + req->length, p, sizeof(p));
+ req->length += sizeof(p);
+
+ if (next_state == STATE_FINISHED) {
+ next_state = STATE_INITIAL;
+ port->data[0] = port->data[1] = 0;
+ }
+
+ if (midi2->info.req_buf_size - req->length <= 4) {
+ queue_request_ep_raw(req);
+ *req_p = NULL;
+ return true;
+ }
+ }
+
+ port->state = next_state;
+ return false;
+}
+
+/* process all pending MIDI bytes in the internal buffer;
+ * returns true if the request gets empty
+ * returns false if all have been processed
+ */
+static bool process_midi1_pending_buf(struct f_midi2 *midi2,
+ struct usb_request **req_p)
+{
+ unsigned int cable, c;
+
+ for (cable = 0; cable < midi2->num_midi1_in; cable++) {
+ struct f_midi2_midi1_port *port = &midi2->midi1_port[cable];
+
+ if (!port->pending)
+ continue;
+ for (c = 0; c < port->pending; c++) {
+ if (process_midi1_byte(midi2, cable, port->buf[c],
+ req_p)) {
+ port->pending -= c;
+ if (port->pending)
+ memmove(port->buf, port->buf + c,
+ port->pending);
+ return true;
+ }
+ }
+ port->pending = 0;
+ }
+
+ return false;
+}
+
+/* fill the MIDI bytes onto the temporary buffer
+ */
+static void fill_midi1_pending_buf(struct f_midi2 *midi2, u8 cable, u8 *buf,
+ unsigned int size)
+{
+ struct f_midi2_midi1_port *port = &midi2->midi1_port[cable];
+
+ if (port->pending + size > sizeof(port->buf))
+ return;
+ memcpy(port->buf + port->pending, buf, size);
+ port->pending += size;
+}
+
+/* try to process data given from the associated UMP stream */
+static void process_midi1_transmit(struct f_midi2 *midi2)
+{
+ struct f_midi2_usb_ep *usb_ep = &midi2->midi1_ep_in;
+ struct f_midi2_ep *ep = &midi2->midi2_eps[0];
+ struct usb_request *req = NULL;
+ /* 12 is the largest outcome (4 MIDI1 cmds) for a single UMP packet */
+ unsigned char outbuf[12];
+ unsigned char group, cable;
+ int len, size;
+ u32 ump;
+
+ if (!usb_ep->usb_ep || !usb_ep->usb_ep->enabled)
+ return;
+
+ for (;;) {
+ if (!req) {
+ req = get_empty_request(usb_ep);
+ if (!req)
+ break;
+ }
+
+ if (process_midi1_pending_buf(midi2, &req))
+ continue;
+
+ len = snd_ump_transmit(ep->ump, &ump, 4);
+ if (len <= 0)
+ break;
+ if (snd_ump_receive_ump_val(ep->ump, ump) <= 0)
+ continue;
+ size = snd_ump_convert_from_ump(ep->ump->input_buf, outbuf,
+ &group);
+ if (size <= 0)
+ continue;
+ cable = ep->in_group_to_cable[group];
+ if (!cable)
+ continue;
+ cable--; /* to 0-base */
+ fill_midi1_pending_buf(midi2, cable, outbuf, size);
+ }
+
+ if (req) {
+ if (req->length)
+ queue_request_ep_raw(req);
+ else
+ put_empty_request(req);
+ }
+}
+
+/* complete handler for MIDI1 EP-in requests */
+static void f_midi2_midi1_ep_in_complete(struct usb_ep *usb_ep,
+ struct usb_request *req)
+{
+ struct f_midi2_req_ctx *ctx = req->context;
+ struct f_midi2 *midi2 = ctx->usb_ep->card;
+ int status = req->status;
+
+ put_empty_request(req);
+
+ if (status) {
+ DBG(midi2, "%s complete error %d: %d/%d\n",
+ usb_ep->name, status, req->actual, req->length);
+ return;
+ }
+
+ process_midi1_transmit(midi2);
+}
+
+/* complete handler for MIDI1 EP-out requests */
+static void f_midi2_midi1_ep_out_complete(struct usb_ep *usb_ep,
+ struct usb_request *req)
+{
+ struct f_midi2_req_ctx *ctx = req->context;
+ struct f_midi2 *midi2 = ctx->usb_ep->card;
+ struct f_midi2_ep *ep;
+ struct ump_cvt_to_ump *cvt = &midi2->midi1_ump_cvt;
+ static const u8 midi1_packet_bytes[16] = {
+ 0, 0, 2, 3, 3, 1, 2, 3, 3, 3, 3, 3, 2, 2, 3, 1
+ };
+ unsigned int group, cable, bytes, c, len;
+ int status = req->status;
+ const u8 *buf = req->buf;
+
+ if (status) {
+ DBG(midi2, "%s complete error %d: %d/%d\n",
+ usb_ep->name, status, req->actual, req->length);
+ goto error;
+ }
+
+ len = req->actual >> 2;
+ for (; len; len--, buf += 4) {
+ cable = *buf >> 4;
+ ep = midi2->out_cable_mapping[cable].ep;
+ if (!ep)
+ continue;
+ group = midi2->out_cable_mapping[cable].group;
+ bytes = midi1_packet_bytes[*buf & 0x0f];
+ for (c = 0; c < bytes; c++) {
+ snd_ump_convert_to_ump(cvt, group, ep->info.protocol,
+ buf[c + 1]);
+ if (cvt->ump_bytes) {
+ snd_ump_receive(ep->ump, cvt->ump,
+ cvt->ump_bytes);
+ cvt->ump_bytes = 0;
+ }
+ }
+ }
+
+ if (midi2->operation_mode != MIDI_OP_MODE_MIDI1)
+ goto error;
+
+ if (queue_request_ep_raw(req))
+ goto error;
+ return;
+
+ error:
+ put_empty_request(req);
+}
+
+/*
+ * Common EP handling helpers
+ */
+
+/* Start MIDI EP */
+static int f_midi2_start_ep(struct f_midi2_usb_ep *usb_ep,
+ struct usb_function *fn)
+{
+ int err;
+
+ if (!usb_ep->usb_ep)
+ return 0;
+
+ usb_ep_disable(usb_ep->usb_ep);
+ err = config_ep_by_speed(usb_ep->card->gadget, fn, usb_ep->usb_ep);
+ if (err)
+ return err;
+ return usb_ep_enable(usb_ep->usb_ep);
+}
+
+/* Drop pending requests */
+static void f_midi2_drop_reqs(struct f_midi2_usb_ep *usb_ep)
+{
+ int i;
+
+ if (!usb_ep->usb_ep || !usb_ep->num_reqs)
+ return;
+
+ for (i = 0; i < usb_ep->num_reqs; i++) {
+ if (!test_bit(i, &usb_ep->free_reqs) && usb_ep->reqs[i].req) {
+ usb_ep_dequeue(usb_ep->usb_ep, usb_ep->reqs[i].req);
+ set_bit(i, &usb_ep->free_reqs);
+ }
+ }
+}
+
+/* Allocate requests for the given EP */
+static int f_midi2_alloc_ep_reqs(struct f_midi2_usb_ep *usb_ep)
+{
+ struct f_midi2 *midi2 = usb_ep->card;
+ int i;
+
+ if (!usb_ep->usb_ep)
+ return 0;
+ if (!usb_ep->reqs)
+ return -EINVAL;
+
+ for (i = 0; i < midi2->info.num_reqs; i++) {
+ if (usb_ep->reqs[i].req)
+ continue;
+ usb_ep->reqs[i].req = alloc_ep_req(usb_ep->usb_ep,
+ midi2->info.req_buf_size);
+ if (!usb_ep->reqs[i].req)
+ return -ENOMEM;
+ usb_ep->reqs[i].req->context = &usb_ep->reqs[i];
+ }
+ return 0;
+}
+
+/* Free allocated requests */
+static void f_midi2_free_ep_reqs(struct f_midi2_usb_ep *usb_ep)
+{
+ struct f_midi2 *midi2 = usb_ep->card;
+ int i;
+
+ for (i = 0; i < midi2->info.num_reqs; i++) {
+ if (!usb_ep->reqs[i].req)
+ continue;
+ free_ep_req(usb_ep->usb_ep, usb_ep->reqs[i].req);
+ usb_ep->reqs[i].req = NULL;
+ }
+}
+
+/* Initialize EP */
+static int f_midi2_init_ep(struct f_midi2 *midi2, struct f_midi2_ep *ep,
+ struct f_midi2_usb_ep *usb_ep,
+ void *desc,
+ void (*complete)(struct usb_ep *usb_ep,
+ struct usb_request *req))
+{
+ int i;
+
+ usb_ep->card = midi2;
+ usb_ep->ep = ep;
+ usb_ep->usb_ep = usb_ep_autoconfig(midi2->gadget, desc);
+ if (!usb_ep->usb_ep)
+ return -ENODEV;
+ usb_ep->complete = complete;
+
+ usb_ep->reqs = kcalloc(midi2->info.num_reqs, sizeof(*usb_ep->reqs),
+ GFP_KERNEL);
+ if (!usb_ep->reqs)
+ return -ENOMEM;
+ for (i = 0; i < midi2->info.num_reqs; i++) {
+ usb_ep->reqs[i].index = i;
+ usb_ep->reqs[i].usb_ep = usb_ep;
+ set_bit(i, &usb_ep->free_reqs);
+ usb_ep->num_reqs++;
+ }
+
+ return 0;
+}
+
+/* Free EP */
+static void f_midi2_free_ep(struct f_midi2_usb_ep *usb_ep)
+{
+ f_midi2_drop_reqs(usb_ep);
+
+ f_midi2_free_ep_reqs(usb_ep);
+
+ kfree(usb_ep->reqs);
+ usb_ep->num_reqs = 0;
+ usb_ep->free_reqs = 0;
+ usb_ep->reqs = NULL;
+}
+
+/* Queue requests for EP-out at start */
+static void f_midi2_queue_out_reqs(struct f_midi2_usb_ep *usb_ep)
+{
+ int i, err;
+
+ if (!usb_ep->usb_ep)
+ return;
+
+ for (i = 0; i < usb_ep->num_reqs; i++) {
+ if (!test_bit(i, &usb_ep->free_reqs) || !usb_ep->reqs[i].req)
+ continue;
+ usb_ep->reqs[i].req->complete = usb_ep->complete;
+ err = usb_ep_queue(usb_ep->usb_ep, usb_ep->reqs[i].req,
+ GFP_ATOMIC);
+ if (!err)
+ clear_bit(i, &usb_ep->free_reqs);
+ }
+}
+
+/*
+ * Gadget Function callbacks
+ */
+
+/* stop both IN and OUT EPs */
+static void f_midi2_stop_eps(struct f_midi2_usb_ep *ep_in,
+ struct f_midi2_usb_ep *ep_out)
+{
+ f_midi2_drop_reqs(ep_in);
+ f_midi2_drop_reqs(ep_out);
+ f_midi2_free_ep_reqs(ep_in);
+ f_midi2_free_ep_reqs(ep_out);
+}
+
+/* start/queue both IN and OUT EPs */
+static int f_midi2_start_eps(struct f_midi2_usb_ep *ep_in,
+ struct f_midi2_usb_ep *ep_out,
+ struct usb_function *fn)
+{
+ int err;
+
+ err = f_midi2_start_ep(ep_in, fn);
+ if (err)
+ return err;
+ err = f_midi2_start_ep(ep_out, fn);
+ if (err)
+ return err;
+
+ err = f_midi2_alloc_ep_reqs(ep_in);
+ if (err)
+ return err;
+ err = f_midi2_alloc_ep_reqs(ep_out);
+ if (err)
+ return err;
+
+ f_midi2_queue_out_reqs(ep_out);
+ return 0;
+}
+
+/* gadget function set_alt callback */
+static int f_midi2_set_alt(struct usb_function *fn, unsigned int intf,
+ unsigned int alt)
+{
+ struct f_midi2 *midi2 = func_to_midi2(fn);
+ struct f_midi2_ep *ep;
+ int i, op_mode, err;
+
+ if (intf != midi2->midi_if || alt > 1)
+ return 0;
+
+ if (alt == 0)
+ op_mode = MIDI_OP_MODE_MIDI1;
+ else if (alt == 1)
+ op_mode = MIDI_OP_MODE_MIDI2;
+ else
+ op_mode = MIDI_OP_MODE_UNSET;
+
+ if (midi2->operation_mode == op_mode)
+ return 0;
+
+ midi2->operation_mode = op_mode;
+
+ if (op_mode != MIDI_OP_MODE_MIDI1)
+ f_midi2_stop_eps(&midi2->midi1_ep_in, &midi2->midi1_ep_out);
+
+ if (op_mode != MIDI_OP_MODE_MIDI2) {
+ for (i = 0; i < midi2->num_eps; i++) {
+ ep = &midi2->midi2_eps[i];
+ f_midi2_stop_eps(&ep->ep_in, &ep->ep_out);
+ }
+ }
+
+ if (op_mode == MIDI_OP_MODE_MIDI1)
+ return f_midi2_start_eps(&midi2->midi1_ep_in,
+ &midi2->midi1_ep_out, fn);
+
+ if (op_mode == MIDI_OP_MODE_MIDI2) {
+ for (i = 0; i < midi2->num_eps; i++) {
+ ep = &midi2->midi2_eps[i];
+
+ err = f_midi2_start_eps(&ep->ep_in, &ep->ep_out, fn);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/* gadget function get_alt callback */
+static int f_midi2_get_alt(struct usb_function *fn, unsigned int intf)
+{
+ struct f_midi2 *midi2 = func_to_midi2(fn);
+
+ if (intf == midi2->midi_if &&
+ midi2->operation_mode == MIDI_OP_MODE_MIDI2)
+ return 1;
+ return 0;
+}
+
+/* convert UMP direction to USB MIDI 2.0 direction */
+static unsigned int ump_to_usb_dir(unsigned int ump_dir)
+{
+ switch (ump_dir) {
+ case SNDRV_UMP_DIR_INPUT:
+ return USB_MS_GR_TRM_BLOCK_TYPE_INPUT_ONLY;
+ case SNDRV_UMP_DIR_OUTPUT:
+ return USB_MS_GR_TRM_BLOCK_TYPE_OUTPUT_ONLY;
+ default:
+ return USB_MS_GR_TRM_BLOCK_TYPE_BIDIRECTIONAL;
+ }
+}
+
+/* assign GTB descriptors (for the given request) */
+static void assign_block_descriptors(struct f_midi2 *midi2,
+ struct usb_request *req,
+ int max_len)
+{
+ struct usb_ms20_gr_trm_block_header_descriptor header;
+ struct usb_ms20_gr_trm_block_descriptor *desc;
+ struct f_midi2_block_info *b;
+ struct f_midi2_ep *ep;
+ int i, blk, len;
+ char *data;
+
+ len = sizeof(gtb_header_desc) + sizeof(gtb_desc) * midi2->total_blocks;
+ if (WARN_ON(len > midi2->info.req_buf_size))
+ return;
+
+ header = gtb_header_desc;
+ header.wTotalLength = cpu_to_le16(len);
+ if (max_len < len) {
+ len = min_t(int, len, sizeof(header));
+ memcpy(req->buf, &header, len);
+ req->length = len;
+ req->zero = len < max_len;
+ return;
+ }
+
+ memcpy(req->buf, &header, sizeof(header));
+ data = req->buf + sizeof(header);
+ for (i = 0; i < midi2->num_eps; i++) {
+ ep = &midi2->midi2_eps[i];
+ for (blk = 0; blk < ep->num_blks; blk++) {
+ b = &ep->blks[blk].info;
+ desc = (struct usb_ms20_gr_trm_block_descriptor *)data;
+
+ *desc = gtb_desc;
+ desc->bGrpTrmBlkID = ep->blks[blk].gtb_id;
+ desc->bGrpTrmBlkType = ump_to_usb_dir(b->direction);
+ desc->nGroupTrm = b->first_group;
+ desc->nNumGroupTrm = b->num_groups;
+ desc->iBlockItem = ep->blks[blk].string_id;
+
+ if (ep->info.protocol & SNDRV_UMP_EP_INFO_PROTO_MIDI2)
+ desc->bMIDIProtocol = USB_MS_MIDI_PROTO_2_0;
+ else
+ desc->bMIDIProtocol = USB_MS_MIDI_PROTO_1_0_128;
+
+ if (b->is_midi1 == 2) {
+ desc->wMaxInputBandwidth = cpu_to_le16(1);
+ desc->wMaxOutputBandwidth = cpu_to_le16(1);
+ }
+
+ data += sizeof(*desc);
+ }
+ }
+
+ req->length = len;
+ req->zero = len < max_len;
+}
+
+/* gadget function setup callback: handle GTB requests */
+static int f_midi2_setup(struct usb_function *fn,
+ const struct usb_ctrlrequest *ctrl)
+{
+ struct f_midi2 *midi2 = func_to_midi2(fn);
+ struct usb_composite_dev *cdev = fn->config->cdev;
+ struct usb_request *req = cdev->req;
+ u16 value, length;
+
+ if ((ctrl->bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD ||
+ ctrl->bRequest != USB_REQ_GET_DESCRIPTOR)
+ return -EOPNOTSUPP;
+
+ value = le16_to_cpu(ctrl->wValue);
+ length = le16_to_cpu(ctrl->wLength);
+
+ if ((value >> 8) != USB_DT_CS_GR_TRM_BLOCK)
+ return -EOPNOTSUPP;
+
+ /* handle only altset 1 */
+ if ((value & 0xff) != 1)
+ return -EOPNOTSUPP;
+
+ assign_block_descriptors(midi2, req, length);
+ return usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+}
+
+/* gadget function disable callback */
+static void f_midi2_disable(struct usb_function *fn)
+{
+ struct f_midi2 *midi2 = func_to_midi2(fn);
+
+ midi2->operation_mode = MIDI_OP_MODE_UNSET;
+}
+
+/*
+ * ALSA UMP ops: most of them are NOPs, only trigger for write is needed
+ */
+static int f_midi2_ump_open(struct snd_ump_endpoint *ump, int dir)
+{
+ return 0;
+}
+
+static void f_midi2_ump_close(struct snd_ump_endpoint *ump, int dir)
+{
+}
+
+static void f_midi2_ump_trigger(struct snd_ump_endpoint *ump, int dir, int up)
+{
+ struct f_midi2_ep *ep = ump->private_data;
+ struct f_midi2 *midi2 = ep->card;
+
+ if (up && dir == SNDRV_RAWMIDI_STREAM_OUTPUT) {
+ switch (midi2->operation_mode) {
+ case MIDI_OP_MODE_MIDI1:
+ process_midi1_transmit(midi2);
+ break;
+ case MIDI_OP_MODE_MIDI2:
+ process_ump_transmit(ep);
+ break;
+ }
+ }
+}
+
+static void f_midi2_ump_drain(struct snd_ump_endpoint *ump, int dir)
+{
+}
+
+static const struct snd_ump_ops f_midi2_ump_ops = {
+ .open = f_midi2_ump_open,
+ .close = f_midi2_ump_close,
+ .trigger = f_midi2_ump_trigger,
+ .drain = f_midi2_ump_drain,
+};
+
+/*
+ * "Operation Mode" control element
+ */
+static int f_midi2_operation_mode_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 1;
+ uinfo->value.integer.min = MIDI_OP_MODE_UNSET;
+ uinfo->value.integer.max = MIDI_OP_MODE_MIDI2;
+ return 0;
+}
+
+static int f_midi2_operation_mode_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct f_midi2 *midi2 = snd_kcontrol_chip(kcontrol);
+
+ ucontrol->value.integer.value[0] = midi2->operation_mode;
+ return 0;
+}
+
+static const struct snd_kcontrol_new operation_mode_ctl = {
+ .iface = SNDRV_CTL_ELEM_IFACE_RAWMIDI,
+ .name = "Operation Mode",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+ .info = f_midi2_operation_mode_info,
+ .get = f_midi2_operation_mode_get,
+};
+
+/*
+ * ALSA UMP instance creation / deletion
+ */
+static void f_midi2_free_card(struct f_midi2 *midi2)
+{
+ if (midi2->card) {
+ snd_card_free_when_closed(midi2->card);
+ midi2->card = NULL;
+ }
+}
+
+/* use a reverse direction for the gadget host */
+static int reverse_dir(int dir)
+{
+ if (!dir || dir == SNDRV_UMP_DIR_BIDIRECTION)
+ return dir;
+ return (dir == SNDRV_UMP_DIR_OUTPUT) ?
+ SNDRV_UMP_DIR_INPUT : SNDRV_UMP_DIR_OUTPUT;
+}
+
+static int f_midi2_create_card(struct f_midi2 *midi2)
+{
+ struct snd_card *card;
+ struct snd_ump_endpoint *ump;
+ struct f_midi2_ep *ep;
+ int i, id, blk, err;
+ __be32 sw;
+
+ err = snd_card_new(&midi2->gadget->dev, -1, NULL, THIS_MODULE, 0,
+ &card);
+ if (err < 0)
+ return err;
+ midi2->card = card;
+
+ strcpy(card->driver, "f_midi2");
+ strcpy(card->shortname, "MIDI 2.0 Gadget");
+ strcpy(card->longname, "MIDI 2.0 Gadget");
+
+ id = 0;
+ for (i = 0; i < midi2->num_eps; i++) {
+ ep = &midi2->midi2_eps[i];
+ err = snd_ump_endpoint_new(card, "MIDI 2.0 Gadget", id,
+ 1, 1, &ump);
+ if (err < 0)
+ goto error;
+ id++;
+
+ ep->ump = ump;
+ ump->no_process_stream = true;
+ ump->private_data = ep;
+ ump->ops = &f_midi2_ump_ops;
+ if (midi2->info.static_block)
+ ump->info.flags |= SNDRV_UMP_EP_INFO_STATIC_BLOCKS;
+ ump->info.protocol_caps = (ep->info.protocol_caps & 3) << 8;
+ ump->info.protocol = (ep->info.protocol & 3) << 8;
+ ump->info.version = 0x0101;
+ ump->info.family_id = ep->info.family;
+ ump->info.model_id = ep->info.model;
+ ump->info.manufacturer_id = ep->info.manufacturer & 0xffffff;
+ sw = cpu_to_be32(ep->info.sw_revision);
+ memcpy(ump->info.sw_revision, &sw, 4);
+
+ strscpy(ump->info.name, ump_ep_name(ep),
+ sizeof(ump->info.name));
+ strscpy(ump->info.product_id, ump_product_id(ep),
+ sizeof(ump->info.product_id));
+ strscpy(ump->core.name, ump->info.name, sizeof(ump->core.name));
+
+ for (blk = 0; blk < ep->num_blks; blk++) {
+ const struct f_midi2_block_info *b = &ep->blks[blk].info;
+ struct snd_ump_block *fb;
+
+ err = snd_ump_block_new(ump, blk,
+ reverse_dir(b->direction),
+ b->first_group, b->num_groups,
+ &ep->blks[blk].fb);
+ if (err < 0)
+ goto error;
+ fb = ep->blks[blk].fb;
+ fb->info.active = !!b->active;
+ fb->info.midi_ci_version = b->midi_ci_version;
+ fb->info.ui_hint = reverse_dir(b->ui_hint);
+ fb->info.sysex8_streams = b->sysex8_streams;
+ fb->info.flags |= b->is_midi1;
+ strscpy(fb->info.name, ump_fb_name(b),
+ sizeof(fb->info.name));
+ }
+ }
+
+ for (i = 0; i < midi2->num_eps; i++) {
+ err = snd_ump_attach_legacy_rawmidi(midi2->midi2_eps[i].ump,
+ "Legacy MIDI", id);
+ if (err < 0)
+ goto error;
+ id++;
+ }
+
+ err = snd_ctl_add(card, snd_ctl_new1(&operation_mode_ctl, midi2));
+ if (err < 0)
+ goto error;
+
+ err = snd_card_register(card);
+ if (err < 0)
+ goto error;
+
+ return 0;
+
+ error:
+ f_midi2_free_card(midi2);
+ return err;
+}
+
+/*
+ * Creation of USB descriptors
+ */
+struct f_midi2_usb_config {
+ struct usb_descriptor_header **list;
+ unsigned int size;
+ unsigned int alloc;
+
+ /* MIDI 1.0 jacks */
+ unsigned char jack_in, jack_out, jack_id;
+ struct usb_midi_in_jack_descriptor jack_ins[MAX_CABLES];
+ struct usb_midi_out_jack_descriptor_1 jack_outs[MAX_CABLES];
+};
+
+static int append_config(struct f_midi2_usb_config *config, void *d)
+{
+ unsigned int size;
+ void *buf;
+
+ if (config->size + 2 >= config->alloc) {
+ size = config->size + 16;
+ buf = krealloc(config->list, size * sizeof(void *), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ config->list = buf;
+ config->alloc = size;
+ }
+
+ config->list[config->size] = d;
+ config->size++;
+ config->list[config->size] = NULL;
+ return 0;
+}
+
+static int append_configs(struct f_midi2_usb_config *config, void **d)
+{
+ int err;
+
+ for (; *d; d++) {
+ err = append_config(config, *d);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int append_midi1_in_jack(struct f_midi2 *midi2,
+ struct f_midi2_usb_config *config,
+ struct midi1_cable_mapping *map,
+ unsigned int type)
+{
+ struct usb_midi_in_jack_descriptor *jack =
+ &config->jack_ins[config->jack_in++];
+ int id = ++config->jack_id;
+ int err;
+
+ jack->bLength = 0x06;
+ jack->bDescriptorType = USB_DT_CS_INTERFACE;
+ jack->bDescriptorSubtype = USB_MS_MIDI_IN_JACK;
+ jack->bJackType = type;
+ jack->bJackID = id;
+ /* use the corresponding block name as jack name */
+ if (map->ep)
+ jack->iJack = map->ep->blks[map->block].string_id;
+
+ err = append_config(config, jack);
+ if (err < 0)
+ return err;
+ return id;
+}
+
+static int append_midi1_out_jack(struct f_midi2 *midi2,
+ struct f_midi2_usb_config *config,
+ struct midi1_cable_mapping *map,
+ unsigned int type, unsigned int source)
+{
+ struct usb_midi_out_jack_descriptor_1 *jack =
+ &config->jack_outs[config->jack_out++];
+ int id = ++config->jack_id;
+ int err;
+
+ jack->bLength = 0x09;
+ jack->bDescriptorType = USB_DT_CS_INTERFACE;
+ jack->bDescriptorSubtype = USB_MS_MIDI_OUT_JACK;
+ jack->bJackType = type;
+ jack->bJackID = id;
+ jack->bNrInputPins = 1;
+ jack->pins[0].baSourceID = source;
+ jack->pins[0].baSourcePin = 0x01;
+ /* use the corresponding block name as jack name */
+ if (map->ep)
+ jack->iJack = map->ep->blks[map->block].string_id;
+
+ err = append_config(config, jack);
+ if (err < 0)
+ return err;
+ return id;
+}
+
+static int f_midi2_create_usb_configs(struct f_midi2 *midi2,
+ struct f_midi2_usb_config *config,
+ int speed)
+{
+ void **midi1_in_eps, **midi1_out_eps;
+ int i, jack, total;
+ int err;
+
+ switch (speed) {
+ default:
+ case USB_SPEED_HIGH:
+ midi2_midi1_ep_out_desc.wMaxPacketSize = cpu_to_le16(512);
+ midi2_midi1_ep_in_desc.wMaxPacketSize = cpu_to_le16(512);
+ for (i = 0; i < midi2->num_eps; i++)
+ midi2_midi2_ep_out_desc[i].wMaxPacketSize =
+ cpu_to_le16(512);
+ fallthrough;
+ case USB_SPEED_FULL:
+ midi1_in_eps = midi2_midi1_ep_in_descs;
+ midi1_out_eps = midi2_midi1_ep_out_descs;
+ break;
+ case USB_SPEED_SUPER:
+ midi2_midi1_ep_out_desc.wMaxPacketSize = cpu_to_le16(1024);
+ midi2_midi1_ep_in_desc.wMaxPacketSize = cpu_to_le16(1024);
+ for (i = 0; i < midi2->num_eps; i++)
+ midi2_midi2_ep_out_desc[i].wMaxPacketSize =
+ cpu_to_le16(1024);
+ midi1_in_eps = midi2_midi1_ep_in_ss_descs;
+ midi1_out_eps = midi2_midi1_ep_out_ss_descs;
+ break;
+ }
+
+ err = append_configs(config, midi2_audio_descs);
+ if (err < 0)
+ return err;
+
+ if (midi2->num_midi1_in && midi2->num_midi1_out)
+ midi2_midi1_if_desc.bNumEndpoints = 2;
+ else
+ midi2_midi1_if_desc.bNumEndpoints = 1;
+
+ err = append_configs(config, midi2_midi1_descs);
+ if (err < 0)
+ return err;
+
+ total = USB_DT_MS_HEADER_SIZE;
+ if (midi2->num_midi1_out) {
+ midi2_midi1_ep_out_class_desc.bLength =
+ USB_DT_MS_ENDPOINT_SIZE(midi2->num_midi1_out);
+ total += midi2_midi1_ep_out_class_desc.bLength;
+ midi2_midi1_ep_out_class_desc.bNumEmbMIDIJack =
+ midi2->num_midi1_out;
+ total += midi2->num_midi1_out *
+ (USB_DT_MIDI_IN_SIZE + USB_DT_MIDI_OUT_SIZE(1));
+ for (i = 0; i < midi2->num_midi1_out; i++) {
+ jack = append_midi1_in_jack(midi2, config,
+ &midi2->in_cable_mapping[i],
+ USB_MS_EMBEDDED);
+ if (jack < 0)
+ return jack;
+ midi2_midi1_ep_out_class_desc.baAssocJackID[i] = jack;
+ jack = append_midi1_out_jack(midi2, config,
+ &midi2->in_cable_mapping[i],
+ USB_MS_EXTERNAL, jack);
+ if (jack < 0)
+ return jack;
+ }
+ }
+
+ if (midi2->num_midi1_in) {
+ midi2_midi1_ep_in_class_desc.bLength =
+ USB_DT_MS_ENDPOINT_SIZE(midi2->num_midi1_in);
+ total += midi2_midi1_ep_in_class_desc.bLength;
+ midi2_midi1_ep_in_class_desc.bNumEmbMIDIJack =
+ midi2->num_midi1_in;
+ total += midi2->num_midi1_in *
+ (USB_DT_MIDI_IN_SIZE + USB_DT_MIDI_OUT_SIZE(1));
+ for (i = 0; i < midi2->num_midi1_in; i++) {
+ jack = append_midi1_in_jack(midi2, config,
+ &midi2->out_cable_mapping[i],
+ USB_MS_EXTERNAL);
+ if (jack < 0)
+ return jack;
+ jack = append_midi1_out_jack(midi2, config,
+ &midi2->out_cable_mapping[i],
+ USB_MS_EMBEDDED, jack);
+ if (jack < 0)
+ return jack;
+ midi2_midi1_ep_in_class_desc.baAssocJackID[i] = jack;
+ }
+ }
+
+ midi2_midi1_class_desc.wTotalLength = cpu_to_le16(total);
+
+ if (midi2->num_midi1_out) {
+ err = append_configs(config, midi1_out_eps);
+ if (err < 0)
+ return err;
+ }
+ if (midi2->num_midi1_in) {
+ err = append_configs(config, midi1_in_eps);
+ if (err < 0)
+ return err;
+ }
+
+ err = append_configs(config, midi2_midi2_descs);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < midi2->num_eps; i++) {
+ err = append_config(config, &midi2_midi2_ep_out_desc[i]);
+ if (err < 0)
+ return err;
+ if (speed == USB_SPEED_SUPER || speed == USB_SPEED_SUPER_PLUS) {
+ err = append_config(config, &midi2_midi2_ep_out_ss_comp_desc);
+ if (err < 0)
+ return err;
+ }
+ err = append_config(config, &midi2_midi2_ep_out_class_desc[i]);
+ if (err < 0)
+ return err;
+ err = append_config(config, &midi2_midi2_ep_in_desc[i]);
+ if (err < 0)
+ return err;
+ if (speed == USB_SPEED_SUPER || speed == USB_SPEED_SUPER_PLUS) {
+ err = append_config(config, &midi2_midi2_ep_in_ss_comp_desc);
+ if (err < 0)
+ return err;
+ }
+ err = append_config(config, &midi2_midi2_ep_in_class_desc[i]);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static void f_midi2_free_usb_configs(struct f_midi2_usb_config *config)
+{
+ kfree(config->list);
+ memset(config, 0, sizeof(*config));
+}
+
+/* as we use the static descriptors for simplicity, serialize bind call */
+static DEFINE_MUTEX(f_midi2_desc_mutex);
+
+/* fill MIDI2 EP class-specific descriptor */
+static void fill_midi2_class_desc(struct f_midi2_ep *ep,
+ struct usb_ms20_endpoint_descriptor_32 *cdesc)
+{
+ int blk;
+
+ cdesc->bLength = USB_DT_MS20_ENDPOINT_SIZE(ep->num_blks);
+ cdesc->bDescriptorType = USB_DT_CS_ENDPOINT;
+ cdesc->bDescriptorSubtype = USB_MS_GENERAL_2_0;
+ cdesc->bNumGrpTrmBlock = ep->num_blks;
+ for (blk = 0; blk < ep->num_blks; blk++)
+ cdesc->baAssoGrpTrmBlkID[blk] = ep->blks[blk].gtb_id;
+}
+
+/* initialize MIDI2 EP-in */
+static int f_midi2_init_midi2_ep_in(struct f_midi2 *midi2, int index)
+{
+ struct f_midi2_ep *ep = &midi2->midi2_eps[index];
+ struct usb_endpoint_descriptor *desc = &midi2_midi2_ep_in_desc[index];
+
+ desc->bLength = USB_DT_ENDPOINT_SIZE;
+ desc->bDescriptorType = USB_DT_ENDPOINT;
+ desc->bEndpointAddress = USB_DIR_IN;
+ desc->bmAttributes = USB_ENDPOINT_XFER_INT;
+ desc->wMaxPacketSize = cpu_to_le16(EP_MAX_PACKET_INT);
+ desc->bInterval = 1;
+
+ fill_midi2_class_desc(ep, &midi2_midi2_ep_in_class_desc[index]);
+
+ return f_midi2_init_ep(midi2, ep, &ep->ep_in, desc,
+ f_midi2_ep_in_complete);
+}
+
+/* initialize MIDI2 EP-out */
+static int f_midi2_init_midi2_ep_out(struct f_midi2 *midi2, int index)
+{
+ struct f_midi2_ep *ep = &midi2->midi2_eps[index];
+ struct usb_endpoint_descriptor *desc = &midi2_midi2_ep_out_desc[index];
+
+ desc->bLength = USB_DT_ENDPOINT_SIZE;
+ desc->bDescriptorType = USB_DT_ENDPOINT;
+ desc->bEndpointAddress = USB_DIR_OUT;
+ desc->bmAttributes = USB_ENDPOINT_XFER_BULK;
+
+ fill_midi2_class_desc(ep, &midi2_midi2_ep_out_class_desc[index]);
+
+ return f_midi2_init_ep(midi2, ep, &ep->ep_out, desc,
+ f_midi2_ep_out_complete);
+}
+
+/* gadget function bind callback */
+static int f_midi2_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct f_midi2 *midi2 = func_to_midi2(f);
+ struct f_midi2_ep *ep;
+ struct f_midi2_usb_config config = {};
+ struct usb_gadget_strings string_fn = {
+ .language = 0x0409, /* en-us */
+ .strings = midi2->string_defs,
+ };
+ struct usb_gadget_strings *strings[] = {
+ &string_fn,
+ NULL,
+ };
+ int i, blk, status;
+
+ midi2->gadget = cdev->gadget;
+ midi2->operation_mode = MIDI_OP_MODE_UNSET;
+
+ status = f_midi2_create_card(midi2);
+ if (status < 0)
+ goto fail_register;
+
+ /* maybe allocate device-global string ID */
+ midi2->strings = usb_gstrings_attach(c->cdev, strings,
+ midi2->total_blocks + 1);
+ if (IS_ERR(midi2->strings)) {
+ status = PTR_ERR(midi2->strings);
+ goto fail_string;
+ }
+
+ mutex_lock(&f_midi2_desc_mutex);
+ midi2_midi1_if_desc.iInterface = midi2->strings[STR_IFACE].id;
+ midi2_midi2_if_desc.iInterface = midi2->strings[STR_IFACE].id;
+ for (i = 0; i < midi2->num_eps; i++) {
+ ep = &midi2->midi2_eps[i];
+ for (blk = 0; blk < ep->num_blks; blk++)
+ ep->blks[blk].string_id =
+ midi2->strings[gtb_to_str_id(ep->blks[blk].gtb_id)].id;
+ }
+
+ midi2_midi2_if_desc.bNumEndpoints = midi2->num_eps * 2;
+
+ /* audio interface */
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ midi2_audio_if_desc.bInterfaceNumber = status;
+
+ /* MIDI streaming */
+ status = usb_interface_id(c, f);
+ if (status < 0)
+ goto fail;
+ midi2->midi_if = status;
+ midi2_midi1_if_desc.bInterfaceNumber = status;
+ midi2_midi2_if_desc.bInterfaceNumber = status;
+ midi2_audio_class_desc.baInterfaceNr[0] = status;
+
+ /* allocate instance-specific endpoints */
+ if (midi2->midi2_eps[0].blks[0].info.direction != SNDRV_UMP_DIR_OUTPUT) {
+ status = f_midi2_init_ep(midi2, NULL, &midi2->midi1_ep_in,
+ &midi2_midi1_ep_in_desc,
+ f_midi2_midi1_ep_in_complete);
+ if (status)
+ goto fail;
+ }
+
+ if (midi2->midi2_eps[0].blks[0].info.direction != SNDRV_UMP_DIR_INPUT) {
+ status = f_midi2_init_ep(midi2, NULL, &midi2->midi1_ep_out,
+ &midi2_midi1_ep_out_desc,
+ f_midi2_midi1_ep_out_complete);
+ if (status)
+ goto fail;
+ }
+
+ for (i = 0; i < midi2->num_eps; i++) {
+ status = f_midi2_init_midi2_ep_in(midi2, i);
+ if (status)
+ goto fail;
+ status = f_midi2_init_midi2_ep_out(midi2, i);
+ if (status)
+ goto fail;
+ }
+
+ status = f_midi2_create_usb_configs(midi2, &config, USB_SPEED_FULL);
+ if (status < 0)
+ goto fail;
+ f->fs_descriptors = usb_copy_descriptors(config.list);
+ if (!f->fs_descriptors) {
+ status = -ENOMEM;
+ goto fail;
+ }
+ f_midi2_free_usb_configs(&config);
+
+ status = f_midi2_create_usb_configs(midi2, &config, USB_SPEED_HIGH);
+ if (status < 0)
+ goto fail;
+ f->hs_descriptors = usb_copy_descriptors(config.list);
+ if (!f->hs_descriptors) {
+ status = -ENOMEM;
+ goto fail;
+ }
+ f_midi2_free_usb_configs(&config);
+
+ status = f_midi2_create_usb_configs(midi2, &config, USB_SPEED_SUPER);
+ if (status < 0)
+ goto fail;
+ f->ss_descriptors = usb_copy_descriptors(config.list);
+ if (!f->ss_descriptors) {
+ status = -ENOMEM;
+ goto fail;
+ }
+ f_midi2_free_usb_configs(&config);
+
+ mutex_unlock(&f_midi2_desc_mutex);
+ return 0;
+
+fail:
+ f_midi2_free_usb_configs(&config);
+ mutex_unlock(&f_midi2_desc_mutex);
+ usb_free_all_descriptors(f);
+fail_string:
+ f_midi2_free_card(midi2);
+fail_register:
+ ERROR(midi2, "%s: can't bind, err %d\n", f->name, status);
+ return status;
+}
+
+/* gadget function unbind callback */
+static void f_midi2_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct f_midi2 *midi2 = func_to_midi2(f);
+ int i;
+
+ f_midi2_free_card(midi2);
+
+ f_midi2_free_ep(&midi2->midi1_ep_in);
+ f_midi2_free_ep(&midi2->midi1_ep_out);
+ for (i = 0; i < midi2->num_eps; i++) {
+ f_midi2_free_ep(&midi2->midi2_eps[i].ep_in);
+ f_midi2_free_ep(&midi2->midi2_eps[i].ep_out);
+ }
+
+ usb_free_all_descriptors(f);
+}
+
+/*
+ * ConfigFS interface
+ */
+
+/* type conversion helpers */
+static inline struct f_midi2_opts *to_f_midi2_opts(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct f_midi2_opts,
+ func_inst.group);
+}
+
+static inline struct f_midi2_ep_opts *
+to_f_midi2_ep_opts(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct f_midi2_ep_opts,
+ group);
+}
+
+static inline struct f_midi2_block_opts *
+to_f_midi2_block_opts(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct f_midi2_block_opts,
+ group);
+}
+
+/* trim the string to be usable for EP and FB name strings */
+static void make_name_string(char *s)
+{
+ char *p;
+
+ p = strchr(s, '\n');
+ if (p)
+ *p = 0;
+
+ p = s + strlen(s);
+ for (; p > s && isspace(*p); p--)
+ *p = 0;
+}
+
+/* configfs helpers: generic show/store for unisnged int */
+static ssize_t f_midi2_opts_uint_show(struct f_midi2_opts *opts,
+ u32 val, const char *format, char *page)
+{
+ int result;
+
+ mutex_lock(&opts->lock);
+ result = sprintf(page, format, val);
+ mutex_unlock(&opts->lock);
+ return result;
+}
+
+static ssize_t f_midi2_opts_uint_store(struct f_midi2_opts *opts,
+ u32 *valp, u32 minval, u32 maxval,
+ const char *page, size_t len)
+{
+ int ret;
+ u32 val;
+
+ mutex_lock(&opts->lock);
+ if (opts->refcnt) {
+ ret = -EBUSY;
+ goto end;
+ }
+
+ ret = kstrtou32(page, 0, &val);
+ if (ret)
+ goto end;
+ if (val < minval || val > maxval) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ *valp = val;
+ ret = len;
+
+end:
+ mutex_unlock(&opts->lock);
+ return ret;
+}
+
+/* generic store for bool */
+static ssize_t f_midi2_opts_bool_store(struct f_midi2_opts *opts,
+ bool *valp, const char *page, size_t len)
+{
+ int ret;
+ bool val;
+
+ mutex_lock(&opts->lock);
+ if (opts->refcnt) {
+ ret = -EBUSY;
+ goto end;
+ }
+
+ ret = kstrtobool(page, &val);
+ if (ret)
+ goto end;
+ *valp = val;
+ ret = len;
+
+end:
+ mutex_unlock(&opts->lock);
+ return ret;
+}
+
+/* generic show/store for string */
+static ssize_t f_midi2_opts_str_show(struct f_midi2_opts *opts,
+ const char *str, char *page)
+{
+ int result = 0;
+
+ mutex_lock(&opts->lock);
+ if (str)
+ result = scnprintf(page, PAGE_SIZE, "%s\n", str);
+ mutex_unlock(&opts->lock);
+ return result;
+}
+
+static ssize_t f_midi2_opts_str_store(struct f_midi2_opts *opts,
+ const char **strp, size_t maxlen,
+ const char *page, size_t len)
+{
+ char *c;
+ int ret;
+
+ mutex_lock(&opts->lock);
+ if (opts->refcnt) {
+ ret = -EBUSY;
+ goto end;
+ }
+
+ c = kstrndup(page, min(len, maxlen), GFP_KERNEL);
+ if (!c) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ kfree(*strp);
+ make_name_string(c);
+ *strp = c;
+ ret = len;
+
+end:
+ mutex_unlock(&opts->lock);
+ return ret;
+}
+
+/*
+ * Definitions for UMP Block config
+ */
+
+/* define an uint option for block */
+#define F_MIDI2_BLOCK_OPT(name, format, minval, maxval) \
+static ssize_t f_midi2_block_opts_##name##_show(struct config_item *item,\
+ char *page) \
+{ \
+ struct f_midi2_block_opts *opts = to_f_midi2_block_opts(item); \
+ return f_midi2_opts_uint_show(opts->ep->opts, opts->info.name, \
+ format "\n", page); \
+} \
+ \
+static ssize_t f_midi2_block_opts_##name##_store(struct config_item *item,\
+ const char *page, size_t len) \
+{ \
+ struct f_midi2_block_opts *opts = to_f_midi2_block_opts(item); \
+ return f_midi2_opts_uint_store(opts->ep->opts, &opts->info.name,\
+ minval, maxval, page, len); \
+} \
+ \
+CONFIGFS_ATTR(f_midi2_block_opts_, name)
+
+/* define a boolean option for block */
+#define F_MIDI2_BLOCK_BOOL_OPT(name) \
+static ssize_t f_midi2_block_opts_##name##_show(struct config_item *item,\
+ char *page) \
+{ \
+ struct f_midi2_block_opts *opts = to_f_midi2_block_opts(item); \
+ return f_midi2_opts_uint_show(opts->ep->opts, opts->info.name, \
+ "%u\n", page); \
+} \
+ \
+static ssize_t f_midi2_block_opts_##name##_store(struct config_item *item,\
+ const char *page, size_t len) \
+{ \
+ struct f_midi2_block_opts *opts = to_f_midi2_block_opts(item); \
+ return f_midi2_opts_bool_store(opts->ep->opts, &opts->info.name,\
+ page, len); \
+} \
+ \
+CONFIGFS_ATTR(f_midi2_block_opts_, name)
+
+F_MIDI2_BLOCK_OPT(direction, "0x%x", 1, 3);
+F_MIDI2_BLOCK_OPT(first_group, "0x%x", 0, 15);
+F_MIDI2_BLOCK_OPT(num_groups, "0x%x", 1, 16);
+F_MIDI2_BLOCK_OPT(midi1_first_group, "0x%x", 0, 15);
+F_MIDI2_BLOCK_OPT(midi1_num_groups, "0x%x", 0, 16);
+F_MIDI2_BLOCK_OPT(ui_hint, "0x%x", 0, 3);
+F_MIDI2_BLOCK_OPT(midi_ci_version, "%u", 0, 1);
+F_MIDI2_BLOCK_OPT(sysex8_streams, "%u", 0, 255);
+F_MIDI2_BLOCK_OPT(is_midi1, "%u", 0, 2);
+F_MIDI2_BLOCK_BOOL_OPT(active);
+
+static ssize_t f_midi2_block_opts_name_show(struct config_item *item,
+ char *page)
+{
+ struct f_midi2_block_opts *opts = to_f_midi2_block_opts(item);
+
+ return f_midi2_opts_str_show(opts->ep->opts, opts->info.name, page);
+}
+
+static ssize_t f_midi2_block_opts_name_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct f_midi2_block_opts *opts = to_f_midi2_block_opts(item);
+
+ return f_midi2_opts_str_store(opts->ep->opts, &opts->info.name, 128,
+ page, len);
+}
+
+CONFIGFS_ATTR(f_midi2_block_opts_, name);
+
+static struct configfs_attribute *f_midi2_block_attrs[] = {
+ &f_midi2_block_opts_attr_direction,
+ &f_midi2_block_opts_attr_first_group,
+ &f_midi2_block_opts_attr_num_groups,
+ &f_midi2_block_opts_attr_midi1_first_group,
+ &f_midi2_block_opts_attr_midi1_num_groups,
+ &f_midi2_block_opts_attr_ui_hint,
+ &f_midi2_block_opts_attr_midi_ci_version,
+ &f_midi2_block_opts_attr_sysex8_streams,
+ &f_midi2_block_opts_attr_is_midi1,
+ &f_midi2_block_opts_attr_active,
+ &f_midi2_block_opts_attr_name,
+ NULL,
+};
+
+static void f_midi2_block_opts_release(struct config_item *item)
+{
+ struct f_midi2_block_opts *opts = to_f_midi2_block_opts(item);
+
+ kfree(opts->info.name);
+ kfree(opts);
+}
+
+static struct configfs_item_operations f_midi2_block_item_ops = {
+ .release = f_midi2_block_opts_release,
+};
+
+static const struct config_item_type f_midi2_block_type = {
+ .ct_item_ops = &f_midi2_block_item_ops,
+ .ct_attrs = f_midi2_block_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/* create a f_midi2_block_opts instance for the given block number */
+static int f_midi2_block_opts_create(struct f_midi2_ep_opts *ep_opts,
+ unsigned int blk,
+ struct f_midi2_block_opts **block_p)
+{
+ struct f_midi2_block_opts *block_opts;
+ int ret = 0;
+
+ mutex_lock(&ep_opts->opts->lock);
+ if (ep_opts->opts->refcnt || ep_opts->blks[blk]) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ block_opts = kzalloc(sizeof(*block_opts), GFP_KERNEL);
+ if (!block_opts) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ block_opts->ep = ep_opts;
+ block_opts->id = blk;
+
+ /* set up the default values */
+ block_opts->info.direction = SNDRV_UMP_DIR_BIDIRECTION;
+ block_opts->info.first_group = 0;
+ block_opts->info.num_groups = 1;
+ block_opts->info.ui_hint = SNDRV_UMP_BLOCK_UI_HINT_BOTH;
+ block_opts->info.active = 1;
+
+ ep_opts->blks[blk] = block_opts;
+ *block_p = block_opts;
+
+ out:
+ mutex_unlock(&ep_opts->opts->lock);
+ return ret;
+}
+
+/* make_group callback for a block */
+static struct config_group *
+f_midi2_opts_block_make(struct config_group *group, const char *name)
+{
+ struct f_midi2_ep_opts *ep_opts;
+ struct f_midi2_block_opts *block_opts;
+ unsigned int blk;
+ int ret;
+
+ if (strncmp(name, "block.", 6))
+ return ERR_PTR(-EINVAL);
+ ret = kstrtouint(name + 6, 10, &blk);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ep_opts = to_f_midi2_ep_opts(&group->cg_item);
+
+ if (blk >= SNDRV_UMP_MAX_BLOCKS)
+ return ERR_PTR(-EINVAL);
+ if (ep_opts->blks[blk])
+ return ERR_PTR(-EBUSY);
+ ret = f_midi2_block_opts_create(ep_opts, blk, &block_opts);
+ if (ret)
+ return ERR_PTR(ret);
+
+ config_group_init_type_name(&block_opts->group, name,
+ &f_midi2_block_type);
+ return &block_opts->group;
+}
+
+/* drop_item callback for a block */
+static void
+f_midi2_opts_block_drop(struct config_group *group, struct config_item *item)
+{
+ struct f_midi2_block_opts *block_opts = to_f_midi2_block_opts(item);
+
+ mutex_lock(&block_opts->ep->opts->lock);
+ block_opts->ep->blks[block_opts->id] = NULL;
+ mutex_unlock(&block_opts->ep->opts->lock);
+ config_item_put(item);
+}
+
+/*
+ * Definitions for UMP Endpoint config
+ */
+
+/* define an uint option for EP */
+#define F_MIDI2_EP_OPT(name, format, minval, maxval) \
+static ssize_t f_midi2_ep_opts_##name##_show(struct config_item *item, \
+ char *page) \
+{ \
+ struct f_midi2_ep_opts *opts = to_f_midi2_ep_opts(item); \
+ return f_midi2_opts_uint_show(opts->opts, opts->info.name, \
+ format "\n", page); \
+} \
+ \
+static ssize_t f_midi2_ep_opts_##name##_store(struct config_item *item, \
+ const char *page, size_t len)\
+{ \
+ struct f_midi2_ep_opts *opts = to_f_midi2_ep_opts(item); \
+ return f_midi2_opts_uint_store(opts->opts, &opts->info.name, \
+ minval, maxval, page, len); \
+} \
+ \
+CONFIGFS_ATTR(f_midi2_ep_opts_, name)
+
+/* define a string option for EP */
+#define F_MIDI2_EP_STR_OPT(name, maxlen) \
+static ssize_t f_midi2_ep_opts_##name##_show(struct config_item *item, \
+ char *page) \
+{ \
+ struct f_midi2_ep_opts *opts = to_f_midi2_ep_opts(item); \
+ return f_midi2_opts_str_show(opts->opts, opts->info.name, page);\
+} \
+ \
+static ssize_t f_midi2_ep_opts_##name##_store(struct config_item *item, \
+ const char *page, size_t len) \
+{ \
+ struct f_midi2_ep_opts *opts = to_f_midi2_ep_opts(item); \
+ return f_midi2_opts_str_store(opts->opts, &opts->info.name, maxlen,\
+ page, len); \
+} \
+ \
+CONFIGFS_ATTR(f_midi2_ep_opts_, name)
+
+F_MIDI2_EP_OPT(protocol, "0x%x", 1, 2);
+F_MIDI2_EP_OPT(protocol_caps, "0x%x", 1, 3);
+F_MIDI2_EP_OPT(manufacturer, "0x%x", 0, 0xffffff);
+F_MIDI2_EP_OPT(family, "0x%x", 0, 0xffff);
+F_MIDI2_EP_OPT(model, "0x%x", 0, 0xffff);
+F_MIDI2_EP_OPT(sw_revision, "0x%x", 0, 0xffffffff);
+F_MIDI2_EP_STR_OPT(ep_name, 128);
+F_MIDI2_EP_STR_OPT(product_id, 128);
+
+static struct configfs_attribute *f_midi2_ep_attrs[] = {
+ &f_midi2_ep_opts_attr_protocol,
+ &f_midi2_ep_opts_attr_protocol_caps,
+ &f_midi2_ep_opts_attr_ep_name,
+ &f_midi2_ep_opts_attr_product_id,
+ &f_midi2_ep_opts_attr_manufacturer,
+ &f_midi2_ep_opts_attr_family,
+ &f_midi2_ep_opts_attr_model,
+ &f_midi2_ep_opts_attr_sw_revision,
+ NULL,
+};
+
+static void f_midi2_ep_opts_release(struct config_item *item)
+{
+ struct f_midi2_ep_opts *opts = to_f_midi2_ep_opts(item);
+
+ kfree(opts->info.ep_name);
+ kfree(opts->info.product_id);
+ kfree(opts);
+}
+
+static struct configfs_item_operations f_midi2_ep_item_ops = {
+ .release = f_midi2_ep_opts_release,
+};
+
+static struct configfs_group_operations f_midi2_ep_group_ops = {
+ .make_group = f_midi2_opts_block_make,
+ .drop_item = f_midi2_opts_block_drop,
+};
+
+static const struct config_item_type f_midi2_ep_type = {
+ .ct_item_ops = &f_midi2_ep_item_ops,
+ .ct_group_ops = &f_midi2_ep_group_ops,
+ .ct_attrs = f_midi2_ep_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+/* create a f_midi2_ep_opts instance */
+static int f_midi2_ep_opts_create(struct f_midi2_opts *opts,
+ unsigned int index,
+ struct f_midi2_ep_opts **ep_p)
+{
+ struct f_midi2_ep_opts *ep_opts;
+
+ ep_opts = kzalloc(sizeof(*ep_opts), GFP_KERNEL);
+ if (!ep_opts)
+ return -ENOMEM;
+
+ ep_opts->opts = opts;
+ ep_opts->index = index;
+
+ /* set up the default values */
+ ep_opts->info.protocol = 2;
+ ep_opts->info.protocol_caps = 3;
+
+ opts->eps[index] = ep_opts;
+ *ep_p = ep_opts;
+ return 0;
+}
+
+/* make_group callback for an EP */
+static struct config_group *
+f_midi2_opts_ep_make(struct config_group *group, const char *name)
+{
+ struct f_midi2_opts *opts;
+ struct f_midi2_ep_opts *ep_opts;
+ unsigned int index;
+ int ret;
+
+ if (strncmp(name, "ep.", 3))
+ return ERR_PTR(-EINVAL);
+ ret = kstrtouint(name + 3, 10, &index);
+ if (ret)
+ return ERR_PTR(ret);
+
+ opts = to_f_midi2_opts(&group->cg_item);
+ if (index >= MAX_UMP_EPS)
+ return ERR_PTR(-EINVAL);
+ if (opts->eps[index])
+ return ERR_PTR(-EBUSY);
+ ret = f_midi2_ep_opts_create(opts, index, &ep_opts);
+ if (ret)
+ return ERR_PTR(ret);
+
+ config_group_init_type_name(&ep_opts->group, name, &f_midi2_ep_type);
+ return &ep_opts->group;
+}
+
+/* drop_item callback for an EP */
+static void
+f_midi2_opts_ep_drop(struct config_group *group, struct config_item *item)
+{
+ struct f_midi2_ep_opts *ep_opts = to_f_midi2_ep_opts(item);
+
+ mutex_lock(&ep_opts->opts->lock);
+ ep_opts->opts->eps[ep_opts->index] = NULL;
+ mutex_unlock(&ep_opts->opts->lock);
+ config_item_put(item);
+}
+
+/*
+ * Definitions for card config
+ */
+
+/* define a bool option for card */
+#define F_MIDI2_BOOL_OPT(name) \
+static ssize_t f_midi2_opts_##name##_show(struct config_item *item, \
+ char *page) \
+{ \
+ struct f_midi2_opts *opts = to_f_midi2_opts(item); \
+ return f_midi2_opts_uint_show(opts, opts->info.name, \
+ "%u\n", page); \
+} \
+ \
+static ssize_t f_midi2_opts_##name##_store(struct config_item *item, \
+ const char *page, size_t len) \
+{ \
+ struct f_midi2_opts *opts = to_f_midi2_opts(item); \
+ return f_midi2_opts_bool_store(opts, &opts->info.name, \
+ page, len); \
+} \
+ \
+CONFIGFS_ATTR(f_midi2_opts_, name)
+
+F_MIDI2_BOOL_OPT(process_ump);
+F_MIDI2_BOOL_OPT(static_block);
+
+static ssize_t f_midi2_opts_iface_name_show(struct config_item *item,
+ char *page)
+{
+ struct f_midi2_opts *opts = to_f_midi2_opts(item);
+
+ return f_midi2_opts_str_show(opts, opts->info.iface_name, page);
+}
+
+static ssize_t f_midi2_opts_iface_name_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct f_midi2_opts *opts = to_f_midi2_opts(item);
+
+ return f_midi2_opts_str_store(opts, &opts->info.iface_name, 128,
+ page, len);
+}
+
+CONFIGFS_ATTR(f_midi2_opts_, iface_name);
+
+static struct configfs_attribute *f_midi2_attrs[] = {
+ &f_midi2_opts_attr_process_ump,
+ &f_midi2_opts_attr_static_block,
+ &f_midi2_opts_attr_iface_name,
+ NULL
+};
+
+static void f_midi2_opts_release(struct config_item *item)
+{
+ struct f_midi2_opts *opts = to_f_midi2_opts(item);
+
+ usb_put_function_instance(&opts->func_inst);
+}
+
+static struct configfs_item_operations f_midi2_item_ops = {
+ .release = f_midi2_opts_release,
+};
+
+static struct configfs_group_operations f_midi2_group_ops = {
+ .make_group = f_midi2_opts_ep_make,
+ .drop_item = f_midi2_opts_ep_drop,
+};
+
+static const struct config_item_type f_midi2_func_type = {
+ .ct_item_ops = &f_midi2_item_ops,
+ .ct_group_ops = &f_midi2_group_ops,
+ .ct_attrs = f_midi2_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static void f_midi2_free_inst(struct usb_function_instance *f)
+{
+ struct f_midi2_opts *opts;
+
+ opts = container_of(f, struct f_midi2_opts, func_inst);
+
+ kfree(opts->info.iface_name);
+ kfree(opts);
+}
+
+/* gadget alloc_inst */
+static struct usb_function_instance *f_midi2_alloc_inst(void)
+{
+ struct f_midi2_opts *opts;
+ struct f_midi2_ep_opts *ep_opts;
+ struct f_midi2_block_opts *block_opts;
+ int ret;
+
+ opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+ if (!opts)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&opts->lock);
+ opts->func_inst.free_func_inst = f_midi2_free_inst;
+ opts->info.process_ump = true;
+ opts->info.static_block = true;
+ opts->info.num_reqs = 32;
+ opts->info.req_buf_size = 512;
+
+ /* create the default ep */
+ ret = f_midi2_ep_opts_create(opts, 0, &ep_opts);
+ if (ret) {
+ kfree(opts);
+ return ERR_PTR(ret);
+ }
+
+ /* create the default block */
+ ret = f_midi2_block_opts_create(ep_opts, 0, &block_opts);
+ if (ret) {
+ kfree(ep_opts);
+ kfree(opts);
+ return ERR_PTR(ret);
+ }
+
+ /* set up the default MIDI1 (that is mandatory) */
+ block_opts->info.midi1_num_groups = 1;
+
+ config_group_init_type_name(&opts->func_inst.group, "",
+ &f_midi2_func_type);
+
+ config_group_init_type_name(&ep_opts->group, "ep.0",
+ &f_midi2_ep_type);
+ configfs_add_default_group(&ep_opts->group, &opts->func_inst.group);
+
+ config_group_init_type_name(&block_opts->group, "block.0",
+ &f_midi2_block_type);
+ configfs_add_default_group(&block_opts->group, &ep_opts->group);
+
+ return &opts->func_inst;
+}
+
+static void do_f_midi2_free(struct f_midi2 *midi2, struct f_midi2_opts *opts)
+{
+ mutex_lock(&opts->lock);
+ --opts->refcnt;
+ mutex_unlock(&opts->lock);
+ kfree(midi2->string_defs);
+ kfree(midi2);
+}
+
+static void f_midi2_free(struct usb_function *f)
+{
+ do_f_midi2_free(func_to_midi2(f),
+ container_of(f->fi, struct f_midi2_opts, func_inst));
+}
+
+/* verify the parameters set up via configfs;
+ * return the number of EPs or a negative error
+ */
+static int verify_parameters(struct f_midi2_opts *opts)
+{
+ int i, j, num_eps, num_blks;
+ struct f_midi2_ep_info *ep;
+ struct f_midi2_block_info *bp;
+
+ for (num_eps = 0; num_eps < MAX_UMP_EPS && opts->eps[num_eps];
+ num_eps++)
+ ;
+ if (!num_eps) {
+ pr_err("f_midi2: No EP is defined\n");
+ return -EINVAL;
+ }
+
+ num_blks = 0;
+ for (i = 0; i < num_eps; i++) {
+ ep = &opts->eps[i]->info;
+ if (!(ep->protocol_caps & ep->protocol)) {
+ pr_err("f_midi2: Invalid protocol 0x%x (caps 0x%x) for EP %d\n",
+ ep->protocol, ep->protocol_caps, i);
+ return -EINVAL;
+ }
+
+ for (j = 0; j < SNDRV_UMP_MAX_BLOCKS && opts->eps[i]->blks[j];
+ j++, num_blks++) {
+ bp = &opts->eps[i]->blks[j]->info;
+ if (bp->first_group + bp->num_groups > SNDRV_UMP_MAX_GROUPS) {
+ pr_err("f_midi2: Invalid group definitions for block %d:%d\n",
+ i, j);
+ return -EINVAL;
+ }
+
+ if (bp->midi1_num_groups) {
+ if (bp->midi1_first_group < bp->first_group ||
+ bp->midi1_first_group + bp->midi1_num_groups >
+ bp->first_group + bp->num_groups) {
+ pr_err("f_midi2: Invalid MIDI1 group definitions for block %d:%d\n",
+ i, j);
+ return -EINVAL;
+ }
+ }
+ }
+ }
+ if (!num_blks) {
+ pr_err("f_midi2: No block is defined\n");
+ return -EINVAL;
+ }
+
+ return num_eps;
+}
+
+/* fill mapping between MIDI 1.0 cable and UMP EP/group */
+static void fill_midi1_cable_mapping(struct f_midi2 *midi2,
+ struct f_midi2_ep *ep,
+ int blk)
+{
+ const struct f_midi2_block_info *binfo = &ep->blks[blk].info;
+ struct midi1_cable_mapping *map;
+ int i, group;
+
+ if (!binfo->midi1_num_groups)
+ return;
+ if (binfo->direction != SNDRV_UMP_DIR_OUTPUT) {
+ group = binfo->midi1_first_group;
+ map = midi2->in_cable_mapping + midi2->num_midi1_in;
+ for (i = 0; i < binfo->midi1_num_groups; i++, group++, map++) {
+ if (midi2->num_midi1_in >= MAX_CABLES)
+ break;
+ map->ep = ep;
+ map->block = blk;
+ map->group = group;
+ midi2->num_midi1_in++;
+ /* store 1-based cable number */
+ ep->in_group_to_cable[group] = midi2->num_midi1_in;
+ }
+ }
+
+ if (binfo->direction != SNDRV_UMP_DIR_INPUT) {
+ group = binfo->midi1_first_group;
+ map = midi2->out_cable_mapping + midi2->num_midi1_out;
+ for (i = 0; i < binfo->midi1_num_groups; i++, group++, map++) {
+ if (midi2->num_midi1_out >= MAX_CABLES)
+ break;
+ map->ep = ep;
+ map->block = blk;
+ map->group = group;
+ midi2->num_midi1_out++;
+ }
+ }
+}
+
+/* gadget alloc callback */
+static struct usb_function *f_midi2_alloc(struct usb_function_instance *fi)
+{
+ struct f_midi2 *midi2;
+ struct f_midi2_opts *opts;
+ struct f_midi2_ep *ep;
+ struct f_midi2_block *bp;
+ int i, num_eps, blk;
+
+ midi2 = kzalloc(sizeof(*midi2), GFP_KERNEL);
+ if (!midi2)
+ return ERR_PTR(-ENOMEM);
+
+ opts = container_of(fi, struct f_midi2_opts, func_inst);
+ mutex_lock(&opts->lock);
+ num_eps = verify_parameters(opts);
+ if (num_eps < 0) {
+ mutex_unlock(&opts->lock);
+ kfree(midi2);
+ return ERR_PTR(num_eps);
+ }
+ ++opts->refcnt;
+ mutex_unlock(&opts->lock);
+
+ spin_lock_init(&midi2->queue_lock);
+
+ midi2->func.name = "midi2_func";
+ midi2->func.bind = f_midi2_bind;
+ midi2->func.unbind = f_midi2_unbind;
+ midi2->func.get_alt = f_midi2_get_alt;
+ midi2->func.set_alt = f_midi2_set_alt;
+ midi2->func.setup = f_midi2_setup;
+ midi2->func.disable = f_midi2_disable;
+ midi2->func.free_func = f_midi2_free;
+
+ midi2->info = opts->info;
+ midi2->num_eps = num_eps;
+
+ for (i = 0; i < num_eps; i++) {
+ ep = &midi2->midi2_eps[i];
+ ep->info = opts->eps[i]->info;
+ ep->card = midi2;
+ for (blk = 0; blk < SNDRV_UMP_MAX_BLOCKS &&
+ opts->eps[i]->blks[blk]; blk++) {
+ bp = &ep->blks[blk];
+ ep->num_blks++;
+ bp->info = opts->eps[i]->blks[blk]->info;
+ bp->gtb_id = ++midi2->total_blocks;
+ }
+ }
+
+ midi2->string_defs = kcalloc(midi2->total_blocks + 1,
+ sizeof(*midi2->string_defs), GFP_KERNEL);
+ if (!midi2->string_defs) {
+ do_f_midi2_free(midi2, opts);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (opts->info.iface_name && *opts->info.iface_name)
+ midi2->string_defs[STR_IFACE].s = opts->info.iface_name;
+ else
+ midi2->string_defs[STR_IFACE].s = ump_ep_name(&midi2->midi2_eps[0]);
+
+ for (i = 0; i < midi2->num_eps; i++) {
+ ep = &midi2->midi2_eps[i];
+ for (blk = 0; blk < ep->num_blks; blk++) {
+ bp = &ep->blks[blk];
+ midi2->string_defs[gtb_to_str_id(bp->gtb_id)].s =
+ ump_fb_name(&bp->info);
+
+ fill_midi1_cable_mapping(midi2, ep, blk);
+ }
+ }
+
+ if (!midi2->num_midi1_in && !midi2->num_midi1_out) {
+ pr_err("f_midi2: MIDI1 definition is missing\n");
+ do_f_midi2_free(midi2, opts);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return &midi2->func;
+}
+
+DECLARE_USB_FUNCTION_INIT(midi2, f_midi2_alloc_inst, f_midi2_alloc);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 424bb3b666db..feccf4c8cc4f 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -80,21 +80,6 @@ static inline struct f_ncm *func_to_ncm(struct usb_function *f)
return container_of(f, struct f_ncm, port.func);
}
-/* peak (theoretical) bulk transfer rate in bits-per-second */
-static inline unsigned ncm_bitrate(struct usb_gadget *g)
-{
- if (!g)
- return 0;
- else if (gadget_is_superspeed(g) && g->speed >= USB_SPEED_SUPER_PLUS)
- return 4250000000U;
- else if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
- return 3750000000U;
- else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
- return 13 * 512 * 8 * 1000 * 8;
- else
- return 19 * 64 * 1 * 1000 * 8;
-}
-
/*-------------------------------------------------------------------------*/
/*
@@ -576,10 +561,10 @@ static void ncm_do_notify(struct f_ncm *ncm)
/* SPEED_CHANGE data is up/down speeds in bits/sec */
data = req->buf + sizeof *event;
- data[0] = cpu_to_le32(ncm_bitrate(cdev->gadget));
+ data[0] = cpu_to_le32(gether_bitrate(cdev->gadget));
data[1] = data[0];
- DBG(cdev, "notify speed %u\n", ncm_bitrate(cdev->gadget));
+ DBG(cdev, "notify speed %u\n", gether_bitrate(cdev->gadget));
ncm->notify_state = NCM_NOTIFY_CONNECT;
break;
}
@@ -1544,9 +1529,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
hrtimer_init(&ncm->task_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
ncm->task_timer.function = ncm_tx_timeout;
- DBG(cdev, "CDC Network: %s speed IN/%s OUT/%s NOTIFY/%s\n",
- gadget_is_superspeed(c->cdev->gadget) ? "super" :
- gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+ DBG(cdev, "CDC Network: IN/%s OUT/%s NOTIFY/%s\n",
ncm->port.in_ep->name, ncm->port.out_ep->name,
ncm->notify->name);
return 0;
diff --git a/drivers/usb/gadget/function/f_obex.c b/drivers/usb/gadget/function/f_obex.c
index ab26d84ed95e..dcb093210305 100644
--- a/drivers/usb/gadget/function/f_obex.c
+++ b/drivers/usb/gadget/function/f_obex.c
@@ -365,9 +365,8 @@ static int obex_bind(struct usb_configuration *c, struct usb_function *f)
if (status)
goto fail;
- dev_dbg(&cdev->gadget->dev, "obex ttyGS%d: %s speed IN/%s OUT/%s\n",
+ dev_dbg(&cdev->gadget->dev, "obex ttyGS%d: IN/%s OUT/%s\n",
obex->port_num,
- gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
obex->port.in->name, obex->port.out->name);
return 0;
diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c
index ee95e8f5f9d4..b47f99d17ee9 100644
--- a/drivers/usb/gadget/function/f_rndis.c
+++ b/drivers/usb/gadget/function/f_rndis.c
@@ -84,19 +84,6 @@ static inline struct f_rndis *func_to_rndis(struct usb_function *f)
return container_of(f, struct f_rndis, port.func);
}
-/* peak (theoretical) bulk transfer rate in bits-per-second */
-static unsigned int bitrate(struct usb_gadget *g)
-{
- if (gadget_is_superspeed(g) && g->speed >= USB_SPEED_SUPER_PLUS)
- return 4250000000U;
- if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
- return 3750000000U;
- else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
- return 13 * 512 * 8 * 1000 * 8;
- else
- return 19 * 64 * 1 * 1000 * 8;
-}
-
/*-------------------------------------------------------------------------*/
/*
@@ -640,7 +627,7 @@ static void rndis_open(struct gether *geth)
DBG(cdev, "%s\n", __func__);
rndis_set_param_medium(rndis->params, RNDIS_MEDIUM_802_3,
- bitrate(cdev->gadget) / 100);
+ gether_bitrate(cdev->gadget) / 100);
rndis_signal_connect(rndis->params);
}
@@ -811,9 +798,7 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
* until we're activated via set_alt().
*/
- DBG(cdev, "RNDIS: %s speed IN/%s OUT/%s NOTIFY/%s\n",
- gadget_is_superspeed(c->cdev->gadget) ? "super" :
- gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+ DBG(cdev, "RNDIS: IN/%s OUT/%s NOTIFY/%s\n",
rndis->port.in_ep->name, rndis->port.out_ep->name,
rndis->notify->name);
return 0;
diff --git a/drivers/usb/gadget/function/f_serial.c b/drivers/usb/gadget/function/f_serial.c
index a9480b9e312e..65c50092aea2 100644
--- a/drivers/usb/gadget/function/f_serial.c
+++ b/drivers/usb/gadget/function/f_serial.c
@@ -236,10 +236,8 @@ static int gser_bind(struct usb_configuration *c, struct usb_function *f)
gser_ss_function, gser_ss_function);
if (status)
goto fail;
- dev_dbg(&cdev->gadget->dev, "generic ttyGS%d: %s speed IN/%s OUT/%s\n",
+ dev_dbg(&cdev->gadget->dev, "generic ttyGS%d: IN/%s OUT/%s\n",
gser->port_num,
- gadget_is_superspeed(c->cdev->gadget) ? "super" :
- gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
gser->port.in->name, gser->port.out->name);
return 0;
diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c
index 6803cd60cc6d..2edbd9b510d6 100644
--- a/drivers/usb/gadget/function/f_sourcesink.c
+++ b/drivers/usb/gadget/function/f_sourcesink.c
@@ -436,9 +436,7 @@ no_iso:
if (ret)
return ret;
- DBG(cdev, "%s speed %s: IN/%s, OUT/%s, ISO-IN/%s, ISO-OUT/%s\n",
- (gadget_is_superspeed(c->cdev->gadget) ? "super" :
- (gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full")),
+ DBG(cdev, "%s: IN/%s, OUT/%s, ISO-IN/%s, ISO-OUT/%s\n",
f->name, ss->in_ep->name, ss->out_ep->name,
ss->iso_in_ep ? ss->iso_in_ep->name : "<none>",
ss->iso_out_ep ? ss->iso_out_ep->name : "<none>");
diff --git a/drivers/usb/gadget/function/f_subset.c b/drivers/usb/gadget/function/f_subset.c
index 51c1cae162d9..8ae9689ef2a0 100644
--- a/drivers/usb/gadget/function/f_subset.c
+++ b/drivers/usb/gadget/function/f_subset.c
@@ -367,9 +367,7 @@ geth_bind(struct usb_configuration *c, struct usb_function *f)
* until we're activated via set_alt().
*/
- DBG(cdev, "CDC Subset: %s speed IN/%s OUT/%s\n",
- gadget_is_superspeed(c->cdev->gadget) ? "super" :
- gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+ DBG(cdev, "CDC Subset: IN/%s OUT/%s\n",
geth->port.in_ep->name, geth->port.out_ep->name);
return 0;
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index 5e919fb65833..faa398109431 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -719,21 +719,13 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
}
uvc->enable_interrupt_ep = opts->enable_interrupt_ep;
- if (gadget_is_superspeed(c->cdev->gadget))
- ep = usb_ep_autoconfig_ss(cdev->gadget, &uvc_ss_streaming_ep,
- &uvc_ss_streaming_comp);
- else if (gadget_is_dualspeed(cdev->gadget))
- ep = usb_ep_autoconfig(cdev->gadget, &uvc_hs_streaming_ep);
- else
- ep = usb_ep_autoconfig(cdev->gadget, &uvc_fs_streaming_ep);
-
+ ep = usb_ep_autoconfig(cdev->gadget, &uvc_fs_streaming_ep);
if (!ep) {
uvcg_info(f, "Unable to allocate streaming EP\n");
goto error;
}
uvc->video.ep = ep;
- uvc_fs_streaming_ep.bEndpointAddress = uvc->video.ep->address;
uvc_hs_streaming_ep.bEndpointAddress = uvc->video.ep->address;
uvc_ss_streaming_ep.bEndpointAddress = uvc->video.ep->address;
@@ -788,21 +780,19 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
f->fs_descriptors = NULL;
goto error;
}
- if (gadget_is_dualspeed(cdev->gadget)) {
- f->hs_descriptors = uvc_copy_descriptors(uvc, USB_SPEED_HIGH);
- if (IS_ERR(f->hs_descriptors)) {
- ret = PTR_ERR(f->hs_descriptors);
- f->hs_descriptors = NULL;
- goto error;
- }
+
+ f->hs_descriptors = uvc_copy_descriptors(uvc, USB_SPEED_HIGH);
+ if (IS_ERR(f->hs_descriptors)) {
+ ret = PTR_ERR(f->hs_descriptors);
+ f->hs_descriptors = NULL;
+ goto error;
}
- if (gadget_is_superspeed(c->cdev->gadget)) {
- f->ss_descriptors = uvc_copy_descriptors(uvc, USB_SPEED_SUPER);
- if (IS_ERR(f->ss_descriptors)) {
- ret = PTR_ERR(f->ss_descriptors);
- f->ss_descriptors = NULL;
- goto error;
- }
+
+ f->ss_descriptors = uvc_copy_descriptors(uvc, USB_SPEED_SUPER);
+ if (IS_ERR(f->ss_descriptors)) {
+ ret = PTR_ERR(f->ss_descriptors);
+ f->ss_descriptors = NULL;
+ goto error;
}
/* Preallocate control endpoint request. */
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index a366abb45623..4bb0553da658 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -93,11 +93,10 @@ struct eth_dev {
#define DEFAULT_QLEN 2 /* double buffering by default */
-/* for dual-speed hardware, use deeper queues at high/super speed */
+/* use deeper queues at high/super speed */
static inline int qlen(struct usb_gadget *gadget, unsigned qmult)
{
- if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
- gadget->speed >= USB_SPEED_SUPER))
+ if (gadget->speed == USB_SPEED_HIGH || gadget->speed >= USB_SPEED_SUPER)
return qmult * DEFAULT_QLEN;
else
return DEFAULT_QLEN;
diff --git a/drivers/usb/gadget/function/u_ether.h b/drivers/usb/gadget/function/u_ether.h
index 851ee10d6e63..34be220cef77 100644
--- a/drivers/usb/gadget/function/u_ether.h
+++ b/drivers/usb/gadget/function/u_ether.h
@@ -279,4 +279,17 @@ static inline bool can_support_ecm(struct usb_gadget *gadget)
return true;
}
+/* peak (theoretical) bulk transfer rate in bits-per-second */
+static inline unsigned int gether_bitrate(struct usb_gadget *g)
+{
+ if (g->speed >= USB_SPEED_SUPER_PLUS)
+ return 4250000000U;
+ if (g->speed == USB_SPEED_SUPER)
+ return 3750000000U;
+ else if (g->speed == USB_SPEED_HIGH)
+ return 13 * 512 * 8 * 1000 * 8;
+ else
+ return 19 * 64 * 1 * 1000 * 8;
+}
+
#endif /* __U_ETHER_H */
diff --git a/drivers/usb/gadget/function/u_midi2.h b/drivers/usb/gadget/function/u_midi2.h
new file mode 100644
index 000000000000..4e7adb41dfb7
--- /dev/null
+++ b/drivers/usb/gadget/function/u_midi2.h
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Utility definitions for MIDI 2.0 function
+ */
+
+#ifndef U_MIDI2_H
+#define U_MIDI2_H
+
+#include <linux/usb/composite.h>
+#include <sound/asound.h>
+
+struct f_midi2_opts;
+struct f_midi2_ep_opts;
+struct f_midi2_block_opts;
+
+/* UMP Function Block info */
+struct f_midi2_block_info {
+ unsigned int direction; /* FB direction: 1-3 */
+ unsigned int first_group; /* first UMP group: 0-15 */
+ unsigned int num_groups; /* number of UMP groups: 1-16 */
+ unsigned int midi1_first_group; /* first UMP group for MIDI 1.0 */
+ unsigned int midi1_num_groups; /* number of UMP groups for MIDI 1.0 */
+ unsigned int ui_hint; /* UI-hint: 0-3 */
+ unsigned int midi_ci_version; /* MIDI-CI version: 0-255 */
+ unsigned int sysex8_streams; /* number of sysex8 streams: 0-255 */
+ unsigned int is_midi1; /* MIDI 1.0 port: 0-2 */
+ bool active; /* FB active flag: bool */
+ const char *name; /* FB name */
+};
+
+/* UMP Endpoint info */
+struct f_midi2_ep_info {
+ unsigned int protocol_caps; /* protocol capabilities: 1-3 */
+ unsigned int protocol; /* default protocol: 1-2 */
+ unsigned int manufacturer; /* manufacturer id: 0-0xffffff */
+ unsigned int family; /* device family id: 0-0xffff */
+ unsigned int model; /* device model id: 0x-0xffff */
+ unsigned int sw_revision; /* software revision: 32bit */
+
+ const char *ep_name; /* Endpoint name */
+ const char *product_id; /* Product ID */
+};
+
+struct f_midi2_card_info {
+ bool process_ump; /* process UMP stream: bool */
+ bool static_block; /* static FBs: bool */
+ unsigned int req_buf_size; /* request buffer size */
+ unsigned int num_reqs; /* number of requests */
+ const char *iface_name; /* interface name */
+};
+
+struct f_midi2_block_opts {
+ struct config_group group;
+ unsigned int id;
+ struct f_midi2_block_info info;
+ struct f_midi2_ep_opts *ep;
+};
+
+struct f_midi2_ep_opts {
+ struct config_group group;
+ unsigned int index;
+ struct f_midi2_ep_info info;
+ struct f_midi2_block_opts *blks[SNDRV_UMP_MAX_BLOCKS];
+ struct f_midi2_opts *opts;
+};
+
+#define MAX_UMP_EPS 4
+#define MAX_CABLES 16
+
+struct f_midi2_opts {
+ struct usb_function_instance func_inst;
+ struct mutex lock;
+ int refcnt;
+
+ struct f_midi2_card_info info;
+
+ unsigned int num_eps;
+ struct f_midi2_ep_opts *eps[MAX_UMP_EPS];
+};
+
+#endif /* U_MIDI2_H */
diff --git a/drivers/usb/gadget/function/u_phonet.h b/drivers/usb/gadget/function/u_phonet.h
index c53233b37192..ff62ca22c40d 100644
--- a/drivers/usb/gadget/function/u_phonet.h
+++ b/drivers/usb/gadget/function/u_phonet.h
@@ -20,7 +20,6 @@ struct f_phonet_opts {
struct net_device *gphonet_setup_default(void);
void gphonet_set_gadget(struct net_device *net, struct usb_gadget *g);
int gphonet_register_netdev(struct net_device *net);
-int phonet_bind_config(struct usb_configuration *c, struct net_device *dev);
void gphonet_cleanup(struct net_device *dev);
#endif /* __U_PHONET_H */
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index 1115396b46a0..a92eb6d90976 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -734,12 +734,12 @@ exit:
spin_unlock_irq(&port->port_lock);
}
-static int gs_write(struct tty_struct *tty, const unsigned char *buf, int count)
+static ssize_t gs_write(struct tty_struct *tty, const u8 *buf, size_t count)
{
struct gs_port *port = tty->driver_data;
unsigned long flags;
- pr_vdebug("gs_write: ttyGS%d (%p) writing %d bytes\n",
+ pr_vdebug("gs_write: ttyGS%d (%p) writing %zu bytes\n",
port->port_num, tty, count);
spin_lock_irqsave(&port->port_lock, flags);
@@ -753,7 +753,7 @@ static int gs_write(struct tty_struct *tty, const unsigned char *buf, int count)
return count;
}
-static int gs_put_char(struct tty_struct *tty, unsigned char ch)
+static int gs_put_char(struct tty_struct *tty, u8 ch)
{
struct gs_port *port = tty->driver_data;
unsigned long flags;
diff --git a/drivers/usb/gadget/function/u_serial.h b/drivers/usb/gadget/function/u_serial.h
index 102a7323a1fd..901d99310bc4 100644
--- a/drivers/usb/gadget/function/u_serial.h
+++ b/drivers/usb/gadget/function/u_serial.h
@@ -71,8 +71,4 @@ void gserial_disconnect(struct gserial *);
void gserial_suspend(struct gserial *p);
void gserial_resume(struct gserial *p);
-/* functions are bound to configurations by a config or gadget driver */
-int gser_bind_config(struct usb_configuration *c, u8 port_num);
-int obex_bind_config(struct usb_configuration *c, u8 port_num);
-
#endif /* __U_SERIAL_H */
diff --git a/drivers/usb/gadget/function/uvc.h b/drivers/usb/gadget/function/uvc.h
index 100475b1363e..6751de8b63ad 100644
--- a/drivers/usb/gadget/function/uvc.h
+++ b/drivers/usb/gadget/function/uvc.h
@@ -178,8 +178,6 @@ struct uvc_file_handle {
*/
extern void uvc_function_setup_continue(struct uvc_device *uvc);
-extern void uvc_endpoint_stream(struct uvc_device *dev);
-
extern void uvc_function_connect(struct uvc_device *uvc);
extern void uvc_function_disconnect(struct uvc_device *uvc);
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/core.c b/drivers/usb/gadget/udc/aspeed-vhub/core.c
index 16f2db8c4a2b..f60a019bb173 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/core.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/core.c
@@ -328,8 +328,7 @@ static int ast_vhub_probe(struct platform_device *pdev)
vhub->port_irq_mask = GENMASK(VHUB_IRQ_DEV1_BIT + vhub->max_ports - 1,
VHUB_IRQ_DEV1_BIT);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- vhub->regs = devm_ioremap_resource(&pdev->dev, res);
+ vhub->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(vhub->regs)) {
dev_err(&pdev->dev, "Failed to map resources\n");
return PTR_ERR(vhub->regs);
diff --git a/drivers/usb/gadget/udc/aspeed_udc.c b/drivers/usb/gadget/udc/aspeed_udc.c
index 01968e2167f9..2ef89a442f50 100644
--- a/drivers/usb/gadget/udc/aspeed_udc.c
+++ b/drivers/usb/gadget/udc/aspeed_udc.c
@@ -1468,7 +1468,6 @@ static int ast_udc_probe(struct platform_device *pdev)
enum usb_device_speed max_speed;
struct device *dev = &pdev->dev;
struct ast_udc_dev *udc;
- struct resource *res;
int rc;
udc = devm_kzalloc(&pdev->dev, sizeof(struct ast_udc_dev), GFP_KERNEL);
@@ -1484,8 +1483,7 @@ static int ast_udc_probe(struct platform_device *pdev)
udc->gadget.name = "aspeed-udc";
udc->gadget.dev.init_name = "gadget";
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- udc->reg = devm_ioremap_resource(&pdev->dev, res);
+ udc->reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(udc->reg)) {
dev_err(&pdev->dev, "Failed to map resources\n");
return PTR_ERR(udc->reg);
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 6c0ed3fa5eb1..02b1bef5e22e 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -2285,15 +2285,13 @@ static int usba_udc_probe(struct platform_device *pdev)
udc->gadget = usba_gadget_template;
INIT_LIST_HEAD(&udc->gadget.ep_list);
- res = platform_get_resource(pdev, IORESOURCE_MEM, CTRL_IOMEM_ID);
- udc->regs = devm_ioremap_resource(&pdev->dev, res);
+ udc->regs = devm_platform_get_and_ioremap_resource(pdev, CTRL_IOMEM_ID, &res);
if (IS_ERR(udc->regs))
return PTR_ERR(udc->regs);
dev_info(&pdev->dev, "MMIO registers at %pR mapped at %p\n",
res, udc->regs);
- res = platform_get_resource(pdev, IORESOURCE_MEM, FIFO_IOMEM_ID);
- udc->fifo = devm_ioremap_resource(&pdev->dev, res);
+ udc->fifo = devm_platform_get_and_ioremap_resource(pdev, FIFO_IOMEM_ID, &res);
if (IS_ERR(udc->fifo))
return PTR_ERR(udc->fifo);
dev_info(&pdev->dev, "FIFO at %pR mapped at %p\n", res, udc->fifo);
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 7d49d8a0b00c..7166d1117742 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -40,6 +40,7 @@ static const struct bus_type gadget_bus_type;
* @allow_connect: Indicates whether UDC is allowed to be pulled up.
* Set/cleared by gadget_(un)bind_driver() after gadget driver is bound or
* unbound.
+ * @vbus_work: work routine to handle VBUS status change notifications.
* @connect_lock: protects udc->started, gadget->connect,
* gadget->allow_connect and gadget->deactivate. The routines
* usb_gadget_connect_locked(), usb_gadget_disconnect_locked(),
diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c
index 9c5dc1c1a68e..4aae86b47edf 100644
--- a/drivers/usb/gadget/udc/fsl_qe_udc.c
+++ b/drivers/usb/gadget/udc/fsl_qe_udc.c
@@ -1959,6 +1959,8 @@ static void ch9getstatus(struct qe_udc *udc, u8 request_type, u16 value,
} else if ((request_type & USB_RECIP_MASK) == USB_RECIP_ENDPOINT) {
/* Get endpoint status */
int pipe = index & USB_ENDPOINT_NUMBER_MASK;
+ if (pipe >= USB_MAX_ENDPOINTS)
+ goto stall;
struct qe_ep *target_ep = &udc->eps[pipe];
u16 usep;
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index a67873a074b7..ee5705d336e3 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -36,7 +36,6 @@
#include <linux/platform_device.h>
#include <linux/fsl_devices.h>
#include <linux/dmapool.h>
-#include <linux/of_device.h>
#include <asm/byteorder.h>
#include <asm/io.h>
@@ -672,7 +671,7 @@ static int fsl_ep_disable(struct usb_ep *_ep)
static struct usb_request *
fsl_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
{
- struct fsl_req *req = NULL;
+ struct fsl_req *req;
req = kzalloc(sizeof *req, gfp_flags);
if (!req)
diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c
index 09762559912d..c6dfa7cccc11 100644
--- a/drivers/usb/gadget/udc/gr_udc.c
+++ b/drivers/usb/gadget/udc/gr_udc.c
@@ -23,6 +23,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/errno.h>
@@ -36,9 +37,7 @@
#include <linux/dmapool.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
-#include <linux/of_platform.h>
-#include <linux/of_irq.h>
-#include <linux/of_address.h>
+#include <linux/of.h>
#include <asm/byteorder.h>
@@ -2137,15 +2136,15 @@ static int gr_probe(struct platform_device *pdev)
return PTR_ERR(regs);
dev->irq = platform_get_irq(pdev, 0);
- if (dev->irq <= 0)
- return -ENODEV;
+ if (dev->irq < 0)
+ return dev->irq;
/* Some core configurations has separate irqs for IN and OUT events */
dev->irqi = platform_get_irq(pdev, 1);
if (dev->irqi > 0) {
dev->irqo = platform_get_irq(pdev, 2);
- if (dev->irqo <= 0)
- return -ENODEV;
+ if (dev->irqo < 0)
+ return dev->irqo;
} else {
dev->irqi = 0;
}
diff --git a/drivers/usb/gadget/udc/max3420_udc.c b/drivers/usb/gadget/udc/max3420_udc.c
index 12c519f32bf7..2d57786d3db7 100644
--- a/drivers/usb/gadget/udc/max3420_udc.c
+++ b/drivers/usb/gadget/udc/max3420_udc.c
@@ -19,9 +19,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/bitfield.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/prefetch.h>
#include <linux/usb/ch9.h>
diff --git a/drivers/usb/gadget/udc/mv_u3d_core.c b/drivers/usb/gadget/udc/mv_u3d_core.c
index 3473048a85f5..2a421f0ff931 100644
--- a/drivers/usb/gadget/udc/mv_u3d_core.c
+++ b/drivers/usb/gadget/udc/mv_u3d_core.c
@@ -665,7 +665,7 @@ static int mv_u3d_ep_disable(struct usb_ep *_ep)
static struct usb_request *
mv_u3d_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
{
- struct mv_u3d_req *req = NULL;
+ struct mv_u3d_req *req;
req = kzalloc(sizeof *req, gfp_flags);
if (!req)
@@ -1779,7 +1779,7 @@ static void mv_u3d_remove(struct platform_device *dev)
static int mv_u3d_probe(struct platform_device *dev)
{
- struct mv_u3d *u3d = NULL;
+ struct mv_u3d *u3d;
struct mv_usb_platform_data *pdata = dev_get_platdata(&dev->dev);
int retval = 0;
struct resource *r;
diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
index 79db74e2040b..d888dcda2bc8 100644
--- a/drivers/usb/gadget/udc/mv_udc_core.c
+++ b/drivers/usb/gadget/udc/mv_udc_core.c
@@ -595,7 +595,7 @@ static int mv_ep_disable(struct usb_ep *_ep)
static struct usb_request *
mv_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
{
- struct mv_req *req = NULL;
+ struct mv_req *req;
req = kzalloc(sizeof *req, gfp_flags);
if (!req)
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c
index c4e1d957f913..61424cfd2e1c 100644
--- a/drivers/usb/gadget/udc/pxa27x_udc.c
+++ b/drivers/usb/gadget/udc/pxa27x_udc.c
@@ -23,7 +23,7 @@
#include <linux/prefetch.h>
#include <linux/byteorder/generic.h>
#include <linux/platform_data/pxa2xx_udc.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/usb.h>
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 59bb25de2015..3b01734ce1b7 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -14,7 +14,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/usb/gadget/udc/renesas_usbf.c b/drivers/usb/gadget/udc/renesas_usbf.c
index 6cd0af83e91e..657f265ac7cc 100644
--- a/drivers/usb/gadget/udc/renesas_usbf.c
+++ b/drivers/usb/gadget/udc/renesas_usbf.c
@@ -12,10 +12,9 @@
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/kfifo.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/types.h>
#include <linux/usb/composite.h>
@@ -3379,7 +3378,6 @@ MODULE_DEVICE_TABLE(of, usbf_match);
static struct platform_driver udc_driver = {
.driver = {
.name = "usbf_renesas",
- .owner = THIS_MODULE,
.of_match_table = usbf_match,
},
.probe = usbf_probe,
diff --git a/drivers/usb/gadget/udc/snps_udc_plat.c b/drivers/usb/gadget/udc/snps_udc_plat.c
index 0ed685db149d..547af2ed9e5e 100644
--- a/drivers/usb/gadget/udc/snps_udc_plat.c
+++ b/drivers/usb/gadget/udc/snps_udc_plat.c
@@ -112,8 +112,7 @@ static int udc_plat_probe(struct platform_device *pdev)
spin_lock_init(&udc->lock);
udc->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- udc->virt_addr = devm_ioremap_resource(dev, res);
+ udc->virt_addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(udc->virt_addr))
return PTR_ERR(udc->virt_addr);
@@ -301,7 +300,6 @@ static const struct dev_pm_ops udc_plat_pm_ops = {
};
#endif
-#if defined(CONFIG_OF)
static const struct of_device_id of_udc_match[] = {
{ .compatible = "brcm,ns2-udc", },
{ .compatible = "brcm,cygnus-udc", },
@@ -309,14 +307,13 @@ static const struct of_device_id of_udc_match[] = {
{ }
};
MODULE_DEVICE_TABLE(of, of_udc_match);
-#endif
static struct platform_driver udc_plat_driver = {
.probe = udc_plat_probe,
.remove_new = udc_plat_remove,
.driver = {
.name = "snps-udc-plat",
- .of_match_table = of_match_ptr(of_udc_match),
+ .of_match_table = of_udc_match,
#ifdef CONFIG_PM_SLEEP
.pm = &udc_plat_pm_ops,
#endif
diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
index df6028f7b273..cb85168fd00c 100644
--- a/drivers/usb/gadget/udc/tegra-xudc.c
+++ b/drivers/usb/gadget/udc/tegra-xudc.c
@@ -16,7 +16,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/phy/tegra/xusb.h>
#include <linux/pm_domain.h>
diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
index a4a7b90a97e7..56b8286a8009 100644
--- a/drivers/usb/gadget/udc/udc-xilinx.c
+++ b/drivers/usb/gadget/udc/udc-xilinx.c
@@ -18,10 +18,8 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
-#include <linux/of_irq.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/prefetch.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
@@ -1617,13 +1615,13 @@ static void xudc_getstatus(struct xusb_udc *udc)
case USB_RECIP_INTERFACE:
break;
case USB_RECIP_ENDPOINT:
- epnum = udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK;
+ epnum = le16_to_cpu(udc->setup.wIndex) & USB_ENDPOINT_NUMBER_MASK;
if (epnum >= XUSB_MAX_ENDPOINTS)
goto stall;
target_ep = &udc->ep[epnum];
epcfgreg = udc->read_fn(udc->addr + target_ep->offset);
halt = epcfgreg & XUSB_EP_CFG_STALL_MASK;
- if (udc->setup.wIndex & USB_DIR_IN) {
+ if (le16_to_cpu(udc->setup.wIndex) & USB_DIR_IN) {
if (!target_ep->is_in)
goto stall;
} else {
@@ -1638,7 +1636,7 @@ static void xudc_getstatus(struct xusb_udc *udc)
}
req->usb_req.length = 2;
- *(u16 *)req->usb_req.buf = cpu_to_le16(status);
+ *(__le16 *)req->usb_req.buf = cpu_to_le16(status);
ret = __xudc_ep0_queue(ep0, req);
if (ret == 0)
return;
@@ -1666,7 +1664,7 @@ static void xudc_set_clear_feature(struct xusb_udc *udc)
switch (udc->setup.bRequestType) {
case USB_RECIP_DEVICE:
- switch (udc->setup.wValue) {
+ switch (le16_to_cpu(udc->setup.wValue)) {
case USB_DEVICE_TEST_MODE:
/*
* The Test Mode will be executed
@@ -1686,13 +1684,15 @@ static void xudc_set_clear_feature(struct xusb_udc *udc)
break;
case USB_RECIP_ENDPOINT:
if (!udc->setup.wValue) {
- endpoint = udc->setup.wIndex & USB_ENDPOINT_NUMBER_MASK;
+ endpoint = le16_to_cpu(udc->setup.wIndex) &
+ USB_ENDPOINT_NUMBER_MASK;
if (endpoint >= XUSB_MAX_ENDPOINTS) {
xudc_ep0_stall(udc);
return;
}
target_ep = &udc->ep[endpoint];
- outinbit = udc->setup.wIndex & USB_ENDPOINT_DIR_MASK;
+ outinbit = le16_to_cpu(udc->setup.wIndex) &
+ USB_ENDPOINT_DIR_MASK;
outinbit = outinbit >> 7;
/* Make sure direction matches.*/
@@ -1755,9 +1755,9 @@ static void xudc_handle_setup(struct xusb_udc *udc)
memcpy(&setup, ep0rambase, 8);
udc->setup = setup;
- udc->setup.wValue = cpu_to_le16(setup.wValue);
- udc->setup.wIndex = cpu_to_le16(setup.wIndex);
- udc->setup.wLength = cpu_to_le16(setup.wLength);
+ udc->setup.wValue = cpu_to_le16((u16 __force)setup.wValue);
+ udc->setup.wIndex = cpu_to_le16((u16 __force)setup.wIndex);
+ udc->setup.wLength = cpu_to_le16((u16 __force)setup.wLength);
/* Clear previous requests */
xudc_nuke(ep0, -ECONNRESET);
@@ -1869,7 +1869,7 @@ static void xudc_ep0_in(struct xusb_udc *udc)
u16 count = 0;
u16 length;
u8 *ep0rambase;
- u8 test_mode = udc->setup.wIndex >> 8;
+ u8 test_mode = le16_to_cpu(udc->setup.wIndex) >> 8;
req = list_first_entry(&ep0->queue, struct xusb_req, queue);
bytes_to_tx = req->usb_req.length - req->usb_req.actual;
@@ -1880,12 +1880,12 @@ static void xudc_ep0_in(struct xusb_udc *udc)
case USB_REQ_SET_ADDRESS:
/* Set the address of the device.*/
udc->write_fn(udc->addr, XUSB_ADDRESS_OFFSET,
- udc->setup.wValue);
+ le16_to_cpu(udc->setup.wValue));
break;
case USB_REQ_SET_FEATURE:
if (udc->setup.bRequestType ==
USB_RECIP_DEVICE) {
- if (udc->setup.wValue ==
+ if (le16_to_cpu(udc->setup.wValue) ==
USB_DEVICE_TEST_MODE)
udc->write_fn(udc->addr,
XUSB_TESTMODE_OFFSET,
@@ -2080,8 +2080,7 @@ static int xudc_probe(struct platform_device *pdev)
udc->req->usb_req.buf = buff;
/* Map the registers */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- udc->addr = devm_ioremap_resource(&pdev->dev, res);
+ udc->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(udc->addr))
return PTR_ERR(udc->addr);
diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c
index 61808c51e702..6a6e1c510b28 100644
--- a/drivers/usb/host/ehci-atmel.c
+++ b/drivers/usb/host/ehci-atmel.c
@@ -102,8 +102,8 @@ static int ehci_atmel_drv_probe(struct platform_device *pdev)
pr_debug("Initializing Atmel-SoC USB Host Controller\n");
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- retval = -ENODEV;
+ if (irq < 0) {
+ retval = irq;
goto fail_create_hcd;
}
@@ -122,8 +122,7 @@ static int ehci_atmel_drv_probe(struct platform_device *pdev)
}
atmel_ehci = hcd_to_atmel_ehci_priv(hcd);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(hcd->regs)) {
retval = PTR_ERR(hcd->regs);
goto fail_request_resource;
diff --git a/drivers/usb/host/ehci-brcm.c b/drivers/usb/host/ehci-brcm.c
index 0362a082abb4..77e42c739c58 100644
--- a/drivers/usb/host/ehci-brcm.c
+++ b/drivers/usb/host/ehci-brcm.c
@@ -140,8 +140,8 @@ static int ehci_brcm_probe(struct platform_device *pdev)
return err;
irq = platform_get_irq(pdev, 0);
- if (irq <= 0)
- return irq ? irq : -EINVAL;
+ if (irq < 0)
+ return irq;
/* Hook the hub control routine to work around a bug */
ehci_brcm_hc_driver.hub_control = ehci_brcm_hub_control;
diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c
index 20f8c0ec6810..f644b131cc0b 100644
--- a/drivers/usb/host/ehci-exynos.c
+++ b/drivers/usb/host/ehci-exynos.c
@@ -173,8 +173,7 @@ static int exynos_ehci_probe(struct platform_device *pdev)
if (err)
goto fail_clk;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(hcd->regs)) {
err = PTR_ERR(hcd->regs);
goto fail_io;
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 81d60a695510..5b1ce394a417 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -22,7 +22,7 @@
#include <linux/usb/otg.h>
#include <linux/platform_device.h>
#include <linux/fsl_devices.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/io.h>
#include "ehci.h"
@@ -87,8 +87,7 @@ static int fsl_ehci_drv_probe(struct platform_device *pdev)
goto err1;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(hcd->regs)) {
retval = PTR_ERR(hcd->regs);
goto err2;
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index a1930db0da1c..802bfafb1012 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -755,10 +755,14 @@ restart:
/* normal [4.15.1.2] or error [4.15.1.1] completion */
if (likely ((status & (STS_INT|STS_ERR)) != 0)) {
- if (likely ((status & STS_ERR) == 0))
+ if (likely ((status & STS_ERR) == 0)) {
INCR(ehci->stats.normal);
- else
+ } else {
+ /* Force to check port status */
+ if (ehci->has_ci_pec_bug)
+ status |= STS_PCD;
INCR(ehci->stats.error);
+ }
bh = 1;
}
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index efe30e3be22f..1aee392e8492 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -674,7 +674,8 @@ ehci_hub_status_data (struct usb_hcd *hcd, char *buf)
if ((temp & mask) != 0 || test_bit(i, &ehci->port_c_suspend)
|| (ehci->reset_done[i] && time_after_eq(
- jiffies, ehci->reset_done[i]))) {
+ jiffies, ehci->reset_done[i]))
+ || ehci_has_ci_pec_bug(ehci, temp)) {
if (i < 7)
buf [0] |= 1 << (i + 1);
else
@@ -875,6 +876,13 @@ int ehci_hub_control(
if (temp & PORT_PEC)
status |= USB_PORT_STAT_C_ENABLE << 16;
+ if (ehci_has_ci_pec_bug(ehci, temp)) {
+ status |= USB_PORT_STAT_C_ENABLE << 16;
+ ehci_info(ehci,
+ "PE is cleared by HW port:%d PORTSC:%08x\n",
+ wIndex + 1, temp);
+ }
+
if ((temp & PORT_OCC) && (!ignore_oc && !ehci->spurious_oc)){
status |= USB_PORT_STAT_C_OVERCURRENT << 16;
diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
index 9320cf0e5bc7..2f1fc7eb8b72 100644
--- a/drivers/usb/host/ehci-mv.c
+++ b/drivers/usb/host/ehci-mv.c
@@ -142,8 +142,7 @@ static int mv_ehci_probe(struct platform_device *pdev)
goto err_put_hcd;
}
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ehci_mv->base = devm_ioremap_resource(&pdev->dev, r);
+ ehci_mv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
if (IS_ERR(ehci_mv->base)) {
retval = PTR_ERR(ehci_mv->base);
goto err_put_hcd;
diff --git a/drivers/usb/host/ehci-npcm7xx.c b/drivers/usb/host/ehci-npcm7xx.c
index ad79ce63afcf..3d3317a1a0b3 100644
--- a/drivers/usb/host/ehci-npcm7xx.c
+++ b/drivers/usb/host/ehci-npcm7xx.c
@@ -53,7 +53,7 @@ static int npcm7xx_ehci_hcd_drv_probe(struct platform_device *pdev)
int irq;
int retval;
- dev_dbg(&pdev->dev, "initializing npcm7xx ehci USB Controller\n");
+ dev_dbg(&pdev->dev, "initializing npcm7xx ehci USB Controller\n");
if (usb_disabled())
return -ENODEV;
@@ -79,8 +79,7 @@ static int npcm7xx_ehci_hcd_drv_probe(struct platform_device *pdev)
goto fail;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(hcd->regs)) {
retval = PTR_ERR(hcd->regs);
goto err_put_hcd;
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index cb6509a735ac..b24f371a46f3 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -113,8 +113,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(dev, res);
+ regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 2cfb27dc943a..6c47ab0a491d 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -13,8 +13,6 @@
#include <linux/platform_data/usb-ehci-orion.h>
#include <linux/of.h>
#include <linux/phy/phy.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/io.h>
@@ -220,8 +218,8 @@ static int ehci_orion_drv_probe(struct platform_device *pdev)
pr_debug("Initializing Orion-SoC USB Host Controller\n");
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- err = -ENODEV;
+ if (irq < 0) {
+ err = irq;
goto err;
}
@@ -234,8 +232,7 @@ static int ehci_orion_drv_probe(struct platform_device *pdev)
if (err)
goto err;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(&pdev->dev, res);
+ regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(regs)) {
err = PTR_ERR(regs);
goto err;
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index 83bf56c9424f..98b073185e1c 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -359,8 +359,7 @@ static int ehci_platform_probe(struct platform_device *dev)
goto err_reset;
}
- res_mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
- hcd->regs = devm_ioremap_resource(&dev->dev, res_mem);
+ hcd->regs = devm_platform_get_and_ioremap_resource(dev, 0, &res_mem);
if (IS_ERR(hcd->regs)) {
err = PTR_ERR(hcd->regs);
goto err_power;
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index bd542b6fc46b..7e834587e7de 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -490,13 +490,14 @@ static int tt_no_collision(
static void enable_periodic(struct ehci_hcd *ehci)
{
if (ehci->periodic_count++)
- return;
+ goto out;
/* Stop waiting to turn off the periodic schedule */
ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_PERIODIC);
/* Don't start the schedule until PSS is 0 */
ehci_poll_PSS(ehci);
+out:
turn_on_io_watchdog(ehci);
}
diff --git a/drivers/usb/host/ehci-sh.c b/drivers/usb/host/ehci-sh.c
index 0520e762801d..d31d9506e41a 100644
--- a/drivers/usb/host/ehci-sh.c
+++ b/drivers/usb/host/ehci-sh.c
@@ -82,8 +82,8 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev)
return -ENODEV;
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- ret = -ENODEV;
+ if (irq < 0) {
+ ret = irq;
goto fail_create_hcd;
}
@@ -95,8 +95,7 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev)
goto fail_create_hcd;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(hcd->regs)) {
ret = PTR_ERR(hcd->regs);
goto fail_request_resource;
diff --git a/drivers/usb/host/ehci-spear.c b/drivers/usb/host/ehci-spear.c
index 1407703649be..d0e94e4c9fe2 100644
--- a/drivers/usb/host/ehci-spear.c
+++ b/drivers/usb/host/ehci-spear.c
@@ -91,8 +91,7 @@ static int spear_ehci_hcd_drv_probe(struct platform_device *pdev)
goto fail;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(hcd->regs)) {
retval = PTR_ERR(hcd->regs);
goto err_put_hcd;
diff --git a/drivers/usb/host/ehci-st.c b/drivers/usb/host/ehci-st.c
index ee0976b815b4..2dbb0d86daaa 100644
--- a/drivers/usb/host/ehci-st.c
+++ b/drivers/usb/host/ehci-st.c
@@ -158,11 +158,6 @@ static int st_ehci_platform_probe(struct platform_device *dev)
irq = platform_get_irq(dev, 0);
if (irq < 0)
return irq;
- res_mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (!res_mem) {
- dev_err(&dev->dev, "no memory resource provided");
- return -ENXIO;
- }
hcd = usb_create_hcd(&ehci_platform_hc_driver, &dev->dev,
dev_name(&dev->dev));
@@ -222,14 +217,13 @@ static int st_ehci_platform_probe(struct platform_device *dev)
goto err_put_clks;
}
- hcd->rsrc_start = res_mem->start;
- hcd->rsrc_len = resource_size(res_mem);
-
- hcd->regs = devm_ioremap_resource(&dev->dev, res_mem);
+ hcd->regs = devm_platform_get_and_ioremap_resource(dev, 0, &res_mem);
if (IS_ERR(hcd->regs)) {
err = PTR_ERR(hcd->regs);
goto err_put_clks;
}
+ hcd->rsrc_start = res_mem->start;
+ hcd->rsrc_len = resource_size(res_mem);
err = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (err)
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index c5c7f8782549..1441e3400796 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -207,6 +207,7 @@ struct ehci_hcd { /* one per controller */
unsigned has_fsl_port_bug:1; /* FreeScale */
unsigned has_fsl_hs_errata:1; /* Freescale HS quirk */
unsigned has_fsl_susp_errata:1; /* NXP SUSP quirk */
+ unsigned has_ci_pec_bug:1; /* ChipIdea PEC bug */
unsigned big_endian_mmio:1;
unsigned big_endian_desc:1;
unsigned big_endian_capbase:1;
@@ -708,6 +709,15 @@ ehci_port_speed(struct ehci_hcd *ehci, unsigned int portsc)
#define ehci_has_fsl_susp_errata(e) ((e)->has_fsl_susp_errata)
/*
+ * Some Freescale/NXP processors using ChipIdea IP have a bug in which
+ * disabling the port (PE is cleared) does not cause PEC to be asserted
+ * when frame babble is detected.
+ */
+#define ehci_has_ci_pec_bug(e, portsc) \
+ ((e)->has_ci_pec_bug && ((e)->command & CMD_PSE) \
+ && !(portsc & PORT_PEC) && !(portsc & PORT_PE))
+
+/*
* While most USB host controllers implement their registers in
* little-endian format, a minority (celleb companion chip) implement
* them in big endian format.
diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c
index 66a045e01dad..9a1b5224f239 100644
--- a/drivers/usb/host/fhci-hcd.c
+++ b/drivers/usb/host/fhci-hcd.c
@@ -22,9 +22,10 @@
#include <linux/io.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
+#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/gpio/consumer.h>
#include <soc/fsl/qe/qe.h>
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
index a9877f2569f4..8508d37a2aff 100644
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -10,7 +10,8 @@
#include <linux/fsl_devices.h>
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index 606f0a64f3b7..a52c3d858f3e 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -2651,8 +2651,7 @@ static int isp1362_probe(struct platform_device *pdev)
if (IS_ERR(addr_reg))
return PTR_ERR(addr_reg);
- data = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data_reg = devm_ioremap_resource(&pdev->dev, data);
+ data_reg = devm_platform_get_and_ioremap_resource(pdev, 0, &data);
if (IS_ERR(data_reg))
return PTR_ERR(data_reg);
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index b805c4b52ac3..f691cd98a574 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -17,13 +17,13 @@
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/gpio/consumer.h>
-#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/platform_data/atmel.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mfd/syscon.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
@@ -190,18 +190,15 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
int irq;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_dbg(dev, "hcd probe: missing irq resource\n");
+ if (irq < 0)
return irq;
- }
hcd = usb_create_hcd(driver, dev, "at91");
if (!hcd)
return -ENOMEM;
ohci_at91 = hcd_to_ohci_at91_priv(hcd);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hcd->regs = devm_ioremap_resource(dev, res);
+ hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(hcd->regs)) {
retval = PTR_ERR(hcd->regs);
goto err;
diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c
index e4191a868944..d9adae53466b 100644
--- a/drivers/usb/host/ohci-da8xx.c
+++ b/drivers/usb/host/ohci-da8xx.c
@@ -15,6 +15,7 @@
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
#include <linux/platform_data/usb-davinci.h>
@@ -435,8 +436,7 @@ static int ohci_da8xx_probe(struct platform_device *pdev)
goto err;
}
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hcd->regs = devm_ioremap_resource(dev, mem);
+ hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
if (IS_ERR(hcd->regs)) {
error = PTR_ERR(hcd->regs);
goto err;
diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c
index ab31c459b32d..20e26a474591 100644
--- a/drivers/usb/host/ohci-exynos.c
+++ b/drivers/usb/host/ohci-exynos.c
@@ -149,8 +149,7 @@ static int exynos_ohci_probe(struct platform_device *pdev)
if (err)
goto fail_clk;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(hcd->regs)) {
err = PTR_ERR(hcd->regs);
goto fail_io;
diff --git a/drivers/usb/host/ohci-nxp.c b/drivers/usb/host/ohci-nxp.c
index c04b2af5c766..8264c454f6bd 100644
--- a/drivers/usb/host/ohci-nxp.c
+++ b/drivers/usb/host/ohci-nxp.c
@@ -202,8 +202,7 @@ static int ohci_hcd_nxp_probe(struct platform_device *pdev)
goto fail_hcd;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(hcd->regs)) {
ret = PTR_ERR(hcd->regs);
goto fail_resource;
diff --git a/drivers/usb/host/ohci-platform.c b/drivers/usb/host/ohci-platform.c
index 45a2c981319e..4a75507325dd 100644
--- a/drivers/usb/host/ohci-platform.c
+++ b/drivers/usb/host/ohci-platform.c
@@ -200,8 +200,7 @@ static int ohci_platform_probe(struct platform_device *dev)
goto err_reset;
}
- res_mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
- hcd->regs = devm_ioremap_resource(&dev->dev, res_mem);
+ hcd->regs = devm_platform_get_and_ioremap_resource(dev, 0, &res_mem);
if (IS_ERR(hcd->regs)) {
err = PTR_ERR(hcd->regs);
goto err_power;
diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c
index 35a7ad7e2569..f64bfe5f4d4d 100644
--- a/drivers/usb/host/ohci-ppc-of.c
+++ b/drivers/usb/host/ohci-ppc-of.c
@@ -15,9 +15,10 @@
*/
#include <linux/signal.h>
+#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
static int
ohci_ppc_of_start(struct usb_hcd *hcd)
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index 7410442f720f..357d9aee38a3 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -435,8 +435,7 @@ static int ohci_hcd_pxa27x_probe(struct platform_device *pdev)
if (!hcd)
return -ENOMEM;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hcd->regs = devm_ioremap_resource(&pdev->dev, r);
+ hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
if (IS_ERR(hcd->regs)) {
retval = PTR_ERR(hcd->regs);
goto err;
diff --git a/drivers/usb/host/ohci-sm501.c b/drivers/usb/host/ohci-sm501.c
index 0468eeb4fcfd..4b39e9d6f33a 100644
--- a/drivers/usb/host/ohci-sm501.c
+++ b/drivers/usb/host/ohci-sm501.c
@@ -195,8 +195,7 @@ static void ohci_hcd_sm501_drv_remove(struct platform_device *pdev)
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (mem)
- release_mem_region(mem->start, resource_size(mem));
+ release_mem_region(mem->start, resource_size(mem));
/* mask interrupts and disable power */
diff --git a/drivers/usb/host/ohci-spear.c b/drivers/usb/host/ohci-spear.c
index f4b2656407dd..993f347c5c28 100644
--- a/drivers/usb/host/ohci-spear.c
+++ b/drivers/usb/host/ohci-spear.c
@@ -68,8 +68,7 @@ static int spear_ohci_hcd_drv_probe(struct platform_device *pdev)
goto fail;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(hcd->regs)) {
retval = PTR_ERR(hcd->regs);
goto err_put_hcd;
diff --git a/drivers/usb/host/ohci-st.c b/drivers/usb/host/ohci-st.c
index 884e447a8098..214342013f7e 100644
--- a/drivers/usb/host/ohci-st.c
+++ b/drivers/usb/host/ohci-st.c
@@ -139,12 +139,6 @@ static int st_ohci_platform_probe(struct platform_device *dev)
if (irq < 0)
return irq;
- res_mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (!res_mem) {
- dev_err(&dev->dev, "no memory resource provided");
- return -ENXIO;
- }
-
hcd = usb_create_hcd(&ohci_platform_hc_driver, &dev->dev,
dev_name(&dev->dev));
if (!hcd)
@@ -199,14 +193,14 @@ static int st_ohci_platform_probe(struct platform_device *dev)
goto err_power;
}
- hcd->rsrc_start = res_mem->start;
- hcd->rsrc_len = resource_size(res_mem);
-
- hcd->regs = devm_ioremap_resource(&dev->dev, res_mem);
+ hcd->regs = devm_platform_get_and_ioremap_resource(dev, 0, &res_mem);
if (IS_ERR(hcd->regs)) {
err = PTR_ERR(hcd->regs);
goto err_power;
}
+ hcd->rsrc_start = res_mem->start;
+ hcd->rsrc_len = resource_size(res_mem);
+
err = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (err)
goto err_power;
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index 50c1ccabb0f5..d467472f9d3c 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -4230,8 +4230,7 @@ static int oxu_drv_probe(struct platform_device *pdev)
return irq;
dev_dbg(&pdev->dev, "IRQ resource %d\n", irq);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base)) {
ret = PTR_ERR(base);
goto error;
diff --git a/drivers/usb/host/uhci-platform.c b/drivers/usb/host/uhci-platform.c
index 71ca532fc086..3dec5dd3a0d5 100644
--- a/drivers/usb/host/uhci-platform.c
+++ b/drivers/usb/host/uhci-platform.c
@@ -91,8 +91,7 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev)
uhci = hcd_to_uhci(hcd);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ hcd->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(hcd->regs)) {
ret = PTR_ERR(hcd->regs);
goto err_rmr;
diff --git a/drivers/usb/host/xhci-dbgtty.c b/drivers/usb/host/xhci-dbgtty.c
index d3acc0829ee5..b74e98e94393 100644
--- a/drivers/usb/host/xhci-dbgtty.c
+++ b/drivers/usb/host/xhci-dbgtty.c
@@ -208,9 +208,8 @@ static void dbc_tty_close(struct tty_struct *tty, struct file *file)
tty_port_close(&port->port, tty, file);
}
-static int dbc_tty_write(struct tty_struct *tty,
- const unsigned char *buf,
- int count)
+static ssize_t dbc_tty_write(struct tty_struct *tty, const u8 *buf,
+ size_t count)
{
struct dbc_port *port = tty->driver_data;
unsigned long flags;
@@ -224,7 +223,7 @@ static int dbc_tty_write(struct tty_struct *tty,
return count;
}
-static int dbc_tty_put_char(struct tty_struct *tty, unsigned char ch)
+static int dbc_tty_put_char(struct tty_struct *tty, u8 ch)
{
struct dbc_port *port = tty->driver_data;
unsigned long flags;
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 19a402123de0..8714ab5bf04d 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1108,9 +1108,6 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_LS);
max_packets = MAX_PACKET(8);
break;
- case USB_SPEED_WIRELESS:
- xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
- return -EINVAL;
default:
/* Speed was set earlier, this shouldn't happen. */
return -EINVAL;
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index b26ea7cb4357..28218c8f1837 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/usb/phy.h>
#include <linux/slab.h>
diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
index bf5261fed32c..ab9c5969e462 100644
--- a/drivers/usb/host/xhci-rcar.c
+++ b/drivers/usb/host/xhci-rcar.c
@@ -10,7 +10,6 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/usb/phy.h>
#include "xhci.h"
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index 4693d83351c6..6246d5ad1468 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -14,7 +14,7 @@
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/phy/phy.h>
#include <linux/phy/tegra/xusb.h>
@@ -1912,6 +1912,15 @@ put_padctl:
return err;
}
+static void tegra_xusb_disable(struct tegra_xusb *tegra)
+{
+ tegra_xusb_powergate_partitions(tegra);
+ tegra_xusb_powerdomain_remove(tegra->dev, tegra);
+ tegra_xusb_phy_disable(tegra);
+ tegra_xusb_clk_disable(tegra);
+ regulator_bulk_disable(tegra->soc->num_supplies, tegra->supplies);
+}
+
static void tegra_xusb_remove(struct platform_device *pdev)
{
struct tegra_xusb *tegra = platform_get_drvdata(pdev);
@@ -1934,14 +1943,18 @@ static void tegra_xusb_remove(struct platform_device *pdev)
pm_runtime_put(&pdev->dev);
- tegra_xusb_powergate_partitions(tegra);
+ tegra_xusb_disable(tegra);
+ tegra_xusb_padctl_put(tegra->padctl);
+}
- tegra_xusb_powerdomain_remove(&pdev->dev, tegra);
+static void tegra_xusb_shutdown(struct platform_device *pdev)
+{
+ struct tegra_xusb *tegra = platform_get_drvdata(pdev);
- tegra_xusb_phy_disable(tegra);
- tegra_xusb_clk_disable(tegra);
- regulator_bulk_disable(tegra->soc->num_supplies, tegra->supplies);
- tegra_xusb_padctl_put(tegra->padctl);
+ pm_runtime_get_sync(&pdev->dev);
+ disable_irq(tegra->xhci_irq);
+ xhci_shutdown(tegra->hcd);
+ tegra_xusb_disable(tegra);
}
static bool xhci_hub_ports_suspended(struct xhci_hub *hub)
@@ -2652,6 +2665,7 @@ MODULE_DEVICE_TABLE(of, tegra_xusb_of_match);
static struct platform_driver tegra_xusb_driver = {
.probe = tegra_xusb_probe,
.remove_new = tegra_xusb_remove,
+ .shutdown = tegra_xusb_shutdown,
.driver = {
.name = "tegra-xusb",
.pm = &tegra_xusb_pm_ops,
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index fae994f679d4..e1b1b64a0723 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -2194,7 +2194,6 @@ static unsigned int xhci_get_block_size(struct usb_device *udev)
case USB_SPEED_SUPER_PLUS:
return SS_BLOCK;
case USB_SPEED_UNKNOWN:
- case USB_SPEED_WIRELESS:
default:
/* Should never happen */
return 1;
@@ -2555,10 +2554,7 @@ static void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
case USB_SPEED_HIGH:
interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
break;
- case USB_SPEED_SUPER:
- case USB_SPEED_SUPER_PLUS:
- case USB_SPEED_UNKNOWN:
- case USB_SPEED_WIRELESS:
+ default:
/* Should never happen because only LS/FS/HS endpoints will get
* added to the endpoint list.
*/
@@ -2615,10 +2611,7 @@ static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
case USB_SPEED_HIGH:
interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
break;
- case USB_SPEED_SUPER:
- case USB_SPEED_SUPER_PLUS:
- case USB_SPEED_UNKNOWN:
- case USB_SPEED_WIRELESS:
+ default:
/* Should never happen because only LS/FS/HS endpoints will get
* added to the endpoint list.
*/
diff --git a/drivers/usb/misc/cypress_cy7c63.c b/drivers/usb/misc/cypress_cy7c63.c
index 14faec51d7a5..cecd7693b741 100644
--- a/drivers/usb/misc/cypress_cy7c63.c
+++ b/drivers/usb/misc/cypress_cy7c63.c
@@ -203,7 +203,7 @@ ATTRIBUTE_GROUPS(cypress);
static int cypress_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
- struct cypress *dev = NULL;
+ struct cypress *dev;
int retval = -ENOMEM;
/* allocate memory for our device state and initialize it */
diff --git a/drivers/usb/misc/cytherm.c b/drivers/usb/misc/cytherm.c
index 3e3802aaefa3..875016dd073c 100644
--- a/drivers/usb/misc/cytherm.c
+++ b/drivers/usb/misc/cytherm.c
@@ -304,20 +304,20 @@ static int cytherm_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(interface);
- struct usb_cytherm *dev = NULL;
+ struct usb_cytherm *dev;
int retval = -ENOMEM;
- dev = kzalloc (sizeof(struct usb_cytherm), GFP_KERNEL);
+ dev = kzalloc(sizeof(struct usb_cytherm), GFP_KERNEL);
if (!dev)
goto error_mem;
dev->udev = usb_get_dev(udev);
- usb_set_intfdata (interface, dev);
+ usb_set_intfdata(interface, dev);
dev->brightness = 0xFF;
- dev_info (&interface->dev,
+ dev_info(&interface->dev,
"Cypress thermometer device now attached\n");
return 0;
@@ -329,10 +329,10 @@ static void cytherm_disconnect(struct usb_interface *interface)
{
struct usb_cytherm *dev;
- dev = usb_get_intfdata (interface);
+ dev = usb_get_intfdata(interface);
/* first remove the files, then NULL the pointer */
- usb_set_intfdata (interface, NULL);
+ usb_set_intfdata(interface, NULL);
usb_put_dev(dev->udev);
diff --git a/drivers/usb/misc/onboard_usb_hub.c b/drivers/usb/misc/onboard_usb_hub.c
index 83f14ca1d38e..3da1a4659c5f 100644
--- a/drivers/usb/misc/onboard_usb_hub.c
+++ b/drivers/usb/misc/onboard_usb_hub.c
@@ -27,6 +27,17 @@
#include "onboard_usb_hub.h"
+/*
+ * Use generic names, as the actual names might differ between hubs. If a new
+ * hub requires more than the currently supported supplies, add a new one here.
+ */
+static const char * const supply_names[] = {
+ "vdd",
+ "vdd2",
+};
+
+#define MAX_SUPPLIES ARRAY_SIZE(supply_names)
+
static void onboard_hub_attach_usb_driver(struct work_struct *work);
static struct usb_device_driver onboard_hub_usbdev_driver;
@@ -40,7 +51,7 @@ struct usbdev_node {
};
struct onboard_hub {
- struct regulator *vdd;
+ struct regulator_bulk_data supplies[MAX_SUPPLIES];
struct device *dev;
const struct onboard_hub_pdata *pdata;
struct gpio_desc *reset_gpio;
@@ -55,9 +66,9 @@ static int onboard_hub_power_on(struct onboard_hub *hub)
{
int err;
- err = regulator_enable(hub->vdd);
+ err = regulator_bulk_enable(hub->pdata->num_supplies, hub->supplies);
if (err) {
- dev_err(hub->dev, "failed to enable regulator: %d\n", err);
+ dev_err(hub->dev, "failed to enable supplies: %d\n", err);
return err;
}
@@ -75,9 +86,9 @@ static int onboard_hub_power_off(struct onboard_hub *hub)
gpiod_set_value_cansleep(hub->reset_gpio, 1);
- err = regulator_disable(hub->vdd);
+ err = regulator_bulk_disable(hub->pdata->num_supplies, hub->supplies);
if (err) {
- dev_err(hub->dev, "failed to disable regulator: %d\n", err);
+ dev_err(hub->dev, "failed to disable supplies: %d\n", err);
return err;
}
@@ -232,6 +243,7 @@ static int onboard_hub_probe(struct platform_device *pdev)
const struct of_device_id *of_id;
struct device *dev = &pdev->dev;
struct onboard_hub *hub;
+ unsigned int i;
int err;
hub = devm_kzalloc(dev, sizeof(*hub), GFP_KERNEL);
@@ -246,9 +258,18 @@ static int onboard_hub_probe(struct platform_device *pdev)
if (!hub->pdata)
return -EINVAL;
- hub->vdd = devm_regulator_get(dev, "vdd");
- if (IS_ERR(hub->vdd))
- return PTR_ERR(hub->vdd);
+ if (hub->pdata->num_supplies > MAX_SUPPLIES)
+ return dev_err_probe(dev, -EINVAL, "max %zu supplies supported!\n",
+ MAX_SUPPLIES);
+
+ for (i = 0; i < hub->pdata->num_supplies; i++)
+ hub->supplies[i].supply = supply_names[i];
+
+ err = devm_regulator_bulk_get(dev, hub->pdata->num_supplies, hub->supplies);
+ if (err) {
+ dev_err(dev, "Failed to get regulator supplies: %d\n", err);
+ return err;
+ }
hub->reset_gpio = devm_gpiod_get_optional(dev, "reset",
GPIOD_OUT_HIGH);
@@ -329,6 +350,7 @@ static struct platform_driver onboard_hub_driver = {
/************************** USB driver **************************/
+#define VENDOR_ID_CYPRESS 0x04b4
#define VENDOR_ID_GENESYS 0x05e3
#define VENDOR_ID_MICROCHIP 0x0424
#define VENDOR_ID_REALTEK 0x0bda
@@ -407,8 +429,11 @@ static void onboard_hub_usbdev_disconnect(struct usb_device *udev)
}
static const struct usb_device_id onboard_hub_id_table[] = {
+ { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6504) }, /* CYUSB33{0,1,2}x/CYUSB230x 3.0 */
+ { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6506) }, /* CYUSB33{0,1,2}x/CYUSB230x 2.0 */
{ USB_DEVICE(VENDOR_ID_GENESYS, 0x0608) }, /* Genesys Logic GL850G USB 2.0 */
{ USB_DEVICE(VENDOR_ID_GENESYS, 0x0610) }, /* Genesys Logic GL852G USB 2.0 */
+ { USB_DEVICE(VENDOR_ID_GENESYS, 0x0620) }, /* Genesys Logic GL3523 USB 3.1 */
{ USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2514) }, /* USB2514B USB 2.0 */
{ USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2517) }, /* USB2517 USB 2.0 */
{ USB_DEVICE(VENDOR_ID_REALTEK, 0x0411) }, /* RTS5411 USB 3.1 */
diff --git a/drivers/usb/misc/onboard_usb_hub.h b/drivers/usb/misc/onboard_usb_hub.h
index aca5f50eb0da..4026ba64c592 100644
--- a/drivers/usb/misc/onboard_usb_hub.h
+++ b/drivers/usb/misc/onboard_usb_hub.h
@@ -8,30 +8,42 @@
struct onboard_hub_pdata {
unsigned long reset_us; /* reset pulse width in us */
+ unsigned int num_supplies; /* number of supplies */
};
static const struct onboard_hub_pdata microchip_usb424_data = {
.reset_us = 1,
+ .num_supplies = 1,
};
static const struct onboard_hub_pdata realtek_rts5411_data = {
.reset_us = 0,
+ .num_supplies = 1,
};
static const struct onboard_hub_pdata ti_tusb8041_data = {
.reset_us = 3000,
+ .num_supplies = 1,
+};
+
+static const struct onboard_hub_pdata cypress_hx3_data = {
+ .reset_us = 10000,
+ .num_supplies = 2,
};
static const struct onboard_hub_pdata genesys_gl850g_data = {
.reset_us = 3,
+ .num_supplies = 1,
};
static const struct onboard_hub_pdata genesys_gl852g_data = {
.reset_us = 50,
+ .num_supplies = 1,
};
static const struct onboard_hub_pdata vialab_vl817_data = {
.reset_us = 10,
+ .num_supplies = 1,
};
static const struct of_device_id onboard_hub_match[] = {
@@ -39,8 +51,11 @@ static const struct of_device_id onboard_hub_match[] = {
{ .compatible = "usb424,2517", .data = &microchip_usb424_data, },
{ .compatible = "usb451,8140", .data = &ti_tusb8041_data, },
{ .compatible = "usb451,8142", .data = &ti_tusb8041_data, },
+ { .compatible = "usb4b4,6504", .data = &cypress_hx3_data, },
+ { .compatible = "usb4b4,6506", .data = &cypress_hx3_data, },
{ .compatible = "usb5e3,608", .data = &genesys_gl850g_data, },
{ .compatible = "usb5e3,610", .data = &genesys_gl852g_data, },
+ { .compatible = "usb5e3,620", .data = &genesys_gl852g_data, },
{ .compatible = "usbbda,411", .data = &realtek_rts5411_data, },
{ .compatible = "usbbda,5411", .data = &realtek_rts5411_data, },
{ .compatible = "usbbda,414", .data = &realtek_rts5411_data, },
diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c
index e4edb486b69e..7da404f55a6d 100644
--- a/drivers/usb/misc/usb251xb.c
+++ b/drivers/usb/misc/usb251xb.c
@@ -16,7 +16,7 @@
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/nls.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
diff --git a/drivers/usb/misc/usb_u132.h b/drivers/usb/misc/usb_u132.h
deleted file mode 100644
index 1584efbbd704..000000000000
--- a/drivers/usb/misc/usb_u132.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
-* Common Header File for the Elan Digital Systems U132 adapter
-* this file should be included by both the "ftdi-u132" and
-* the "u132-hcd" modules.
-*
-* Copyright(C) 2006 Elan Digital Systems Limited
-*(http://www.elandigitalsystems.com)
-*
-* Author and Maintainer - Tony Olech - Elan Digital Systems
-*(tony.olech@elandigitalsystems.com)
-*
-* The driver was written by Tony Olech(tony.olech@elandigitalsystems.com)
-* based on various USB client drivers in the 2.6.15 linux kernel
-* with constant reference to the 3rd Edition of Linux Device Drivers
-* published by O'Reilly
-*
-* The U132 adapter is a USB to CardBus adapter specifically designed
-* for PC cards that contain an OHCI host controller. Typical PC cards
-* are the Orange Mobile 3G Option GlobeTrotter Fusion card.
-*
-* The U132 adapter will *NOT *work with PC cards that do not contain
-* an OHCI controller. A simple way to test whether a PC card has an
-* OHCI controller as an interface is to insert the PC card directly
-* into a laptop(or desktop) with a CardBus slot and if "lspci" shows
-* a new USB controller and "lsusb -v" shows a new OHCI Host Controller
-* then there is a good chance that the U132 adapter will support the
-* PC card.(you also need the specific client driver for the PC card)
-*
-* Please inform the Author and Maintainer about any PC cards that
-* contain OHCI Host Controller and work when directly connected to
-* an embedded CardBus slot but do not work when they are connected
-* via an ELAN U132 adapter.
-*
-* The driver consists of two modules, the "ftdi-u132" module is
-* a USB client driver that interfaces to the FTDI chip within
-* the U132 adapter manufactured by Elan Digital Systems, and the
-* "u132-hcd" module is a USB host controller driver that talks
-* to the OHCI controller within CardBus card that are inserted
-* in the U132 adapter.
-*
-* The "ftdi-u132" module should be loaded automatically by the
-* hot plug system when the U132 adapter is plugged in. The module
-* initialises the adapter which mostly consists of synchronising
-* the FTDI chip, before continuously polling the adapter to detect
-* PC card insertions. As soon as a PC card containing a recognised
-* OHCI controller is seen the "ftdi-u132" module explicitly requests
-* the kernel to load the "u132-hcd" module.
-*
-* The "ftdi-u132" module provides the interface to the inserted
-* PC card and the "u132-hcd" module uses the API to send and receive
-* data. The API features call-backs, so that part of the "u132-hcd"
-* module code will run in the context of one of the kernel threads
-* of the "ftdi-u132" module.
-*
-*/
-int ftdi_elan_switch_on_diagnostics(int number);
-void ftdi_elan_gone_away(struct platform_device *pdev);
-void start_usb_lock_device_tracing(void);
-struct u132_platform_data {
- u16 vendor;
- u16 device;
- u8 potpg;
- void (*port_power) (struct device *dev, int is_on);
- void (*reset) (struct device *dev);
-};
-int usb_ftdi_elan_edset_single(struct platform_device *pdev, u8 ed_number,
- void *endp, struct urb *urb, u8 address, u8 ep_number, u8 toggle_bits,
- void (*callback) (void *endp, struct urb *urb, u8 *buf, int len,
- int toggle_bits, int error_count, int condition_code, int repeat_number,
- int halted, int skipped, int actual, int non_null));
-int usb_ftdi_elan_edset_output(struct platform_device *pdev, u8 ed_number,
- void *endp, struct urb *urb, u8 address, u8 ep_number, u8 toggle_bits,
- void (*callback) (void *endp, struct urb *urb, u8 *buf, int len,
- int toggle_bits, int error_count, int condition_code, int repeat_number,
- int halted, int skipped, int actual, int non_null));
-int usb_ftdi_elan_edset_empty(struct platform_device *pdev, u8 ed_number,
- void *endp, struct urb *urb, u8 address, u8 ep_number, u8 toggle_bits,
- void (*callback) (void *endp, struct urb *urb, u8 *buf, int len,
- int toggle_bits, int error_count, int condition_code, int repeat_number,
- int halted, int skipped, int actual, int non_null));
-int usb_ftdi_elan_edset_input(struct platform_device *pdev, u8 ed_number,
- void *endp, struct urb *urb, u8 address, u8 ep_number, u8 toggle_bits,
- void (*callback) (void *endp, struct urb *urb, u8 *buf, int len,
- int toggle_bits, int error_count, int condition_code, int repeat_number,
- int halted, int skipped, int actual, int non_null));
-int usb_ftdi_elan_edset_setup(struct platform_device *pdev, u8 ed_number,
- void *endp, struct urb *urb, u8 address, u8 ep_number, u8 toggle_bits,
- void (*callback) (void *endp, struct urb *urb, u8 *buf, int len,
- int toggle_bits, int error_count, int condition_code, int repeat_number,
- int halted, int skipped, int actual, int non_null));
-int usb_ftdi_elan_edset_flush(struct platform_device *pdev, u8 ed_number,
- void *endp);
-int usb_ftdi_elan_read_pcimem(struct platform_device *pdev, int mem_offset,
- u8 width, u32 *data);
-int usb_ftdi_elan_write_pcimem(struct platform_device *pdev, int mem_offset,
- u8 width, u32 data);
diff --git a/drivers/usb/misc/usbsevseg.c b/drivers/usb/misc/usbsevseg.c
index c3114d9bd128..546deff754ba 100644
--- a/drivers/usb/misc/usbsevseg.c
+++ b/drivers/usb/misc/usbsevseg.c
@@ -305,7 +305,7 @@ static int sevseg_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(interface);
- struct usb_sevsegdev *mydev = NULL;
+ struct usb_sevsegdev *mydev;
int rc = -ENOMEM;
mydev = kzalloc(sizeof(struct usb_sevsegdev), GFP_KERNEL);
diff --git a/drivers/usb/mtu3/mtu3.h b/drivers/usb/mtu3/mtu3.h
index b4a7662dded5..c11840b9a6f1 100644
--- a/drivers/usb/mtu3/mtu3.h
+++ b/drivers/usb/mtu3/mtu3.h
@@ -16,6 +16,7 @@
#include <linux/extcon.h>
#include <linux/interrupt.h>
#include <linux/list.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/regulator/consumer.h>
#include <linux/usb.h>
diff --git a/drivers/usb/mtu3/mtu3_host.c b/drivers/usb/mtu3/mtu3_host.c
index 177d2caf887c..9f2be22af844 100644
--- a/drivers/usb/mtu3/mtu3_host.c
+++ b/drivers/usb/mtu3/mtu3_host.c
@@ -11,6 +11,7 @@
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/regmap.h>
diff --git a/drivers/usb/musb/cppi_dma.h b/drivers/usb/musb/cppi_dma.h
index 16dd1ed44bb5..3606be897cb2 100644
--- a/drivers/usb/musb/cppi_dma.h
+++ b/drivers/usb/musb/cppi_dma.h
@@ -121,9 +121,6 @@ struct cppi {
struct list_head tx_complete;
};
-/* CPPI IRQ handler */
-extern irqreturn_t cppi_interrupt(int, void *);
-
struct cppi41_dma_channel {
struct dma_channel channel;
struct cppi41_dma_controller *controller;
diff --git a/drivers/usb/musb/jz4740.c b/drivers/usb/musb/jz4740.c
index 5aabdd7e2511..b38df9226278 100644
--- a/drivers/usb/musb/jz4740.c
+++ b/drivers/usb/musb/jz4740.c
@@ -10,7 +10,7 @@
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/usb/role.h>
diff --git a/drivers/usb/musb/mediatek.c b/drivers/usb/musb/mediatek.c
index 598ee5c0bf34..0a35aab3ab81 100644
--- a/drivers/usb/musb/mediatek.c
+++ b/drivers/usb/musb/mediatek.c
@@ -10,6 +10,7 @@
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/usb/role.h>
diff --git a/drivers/usb/musb/mpfs.c b/drivers/usb/musb/mpfs.c
index 24b98716f7fc..f0f56df38835 100644
--- a/drivers/usb/musb/mpfs.c
+++ b/drivers/usb/musb/mpfs.c
@@ -13,6 +13,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/usb/usb_phy_generic.h>
#include "musb_core.h"
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index ecbd3784bec3..b24adb5b399f 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2610,8 +2610,8 @@ static int musb_probe(struct platform_device *pdev)
int irq = platform_get_irq_byname(pdev, "mc");
void __iomem *base;
- if (irq <= 0)
- return -ENODEV;
+ if (irq < 0)
+ return irq;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
diff --git a/drivers/usb/musb/musb_dma.h b/drivers/usb/musb/musb_dma.h
index e2445ca3356d..0cd7fc468de8 100644
--- a/drivers/usb/musb/musb_dma.h
+++ b/drivers/usb/musb/musb_dma.h
@@ -199,10 +199,6 @@ tusb_dma_controller_create(struct musb *musb, void __iomem *base);
extern void tusb_dma_controller_destroy(struct dma_controller *c);
extern struct dma_controller *
-cppi_dma_controller_create(struct musb *musb, void __iomem *base);
-extern void cppi_dma_controller_destroy(struct dma_controller *c);
-
-extern struct dma_controller *
cppi41_dma_controller_create(struct musb *musb, void __iomem *base);
extern void cppi41_dma_controller_destroy(struct dma_controller *c);
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 9119b1d51370..98b42dc04dee 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -26,9 +26,7 @@
#include <linux/sizes.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_address.h>
-#include <linux/of_irq.h>
#include <linux/usb/of.h>
#include <linux/debugfs.h>
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 31c44325e828..051c6da7cf6d 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1130,7 +1130,7 @@ static int musb_gadget_disable(struct usb_ep *ep)
struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
{
struct musb_ep *musb_ep = to_musb_ep(ep);
- struct musb_request *request = NULL;
+ struct musb_request *request;
request = kzalloc(sizeof *request, gfp_flags);
if (!request)
diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c
index c5c6c4e09300..d54283fd026b 100644
--- a/drivers/usb/musb/sunxi.c
+++ b/drivers/usb/musb/sunxi.c
@@ -15,7 +15,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/phy/phy-sun4i-usb.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index cbc707fe570f..461587629bf2 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -21,6 +21,7 @@
#include <linux/usb.h>
#include <linux/irq.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
@@ -1029,7 +1030,7 @@ static int tusb_musb_start(struct musb *musb)
void __iomem *tbase = musb->ctrl_base;
unsigned long flags;
u32 reg;
- int i;
+ int ret;
/*
* Enable or disable power to TUSB6010. When enabling, turn on 3.3 V and
@@ -1037,17 +1038,13 @@ static int tusb_musb_start(struct musb *musb)
* provide then PGOOD signal to TUSB6010 which will release it from reset.
*/
gpiod_set_value(glue->enable, 1);
- msleep(1);
/* Wait for 100ms until TUSB6010 pulls INT pin down */
- i = 100;
- while (i && gpiod_get_value(glue->intpin)) {
- msleep(1);
- i--;
- }
- if (!i) {
- pr_err("tusb: Powerup respones failed\n");
- return -ENODEV;
+ ret = read_poll_timeout(gpiod_get_value, reg, !reg, 5000, 100000, true,
+ glue->intpin);
+ if (ret) {
+ pr_err("tusb: Powerup response failed\n");
+ return ret;
}
spin_lock_irqsave(&musb->lock, flags);
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index e1a2b2ea098b..acd46b72899e 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -14,7 +14,7 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
#include <linux/iopoll.h>
@@ -388,19 +388,14 @@ static void __mxs_phy_disconnect_line(struct mxs_phy *mxs_phy, bool disconnect)
static bool mxs_phy_is_otg_host(struct mxs_phy *mxs_phy)
{
- void __iomem *base = mxs_phy->phy.io_priv;
- u32 phyctrl = readl(base + HW_USBPHY_CTRL);
-
- if (IS_ENABLED(CONFIG_USB_OTG) &&
- !(phyctrl & BM_USBPHY_CTRL_OTG_ID_VALUE))
- return true;
-
- return false;
+ return IS_ENABLED(CONFIG_USB_OTG) &&
+ mxs_phy->phy.last_event == USB_EVENT_ID;
}
static void mxs_phy_disconnect_line(struct mxs_phy *mxs_phy, bool on)
{
bool vbus_is_on = false;
+ enum usb_phy_events last_event = mxs_phy->phy.last_event;
/* If the SoCs don't need to disconnect line without vbus, quit */
if (!(mxs_phy->data->flags & MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS))
@@ -412,7 +407,8 @@ static void mxs_phy_disconnect_line(struct mxs_phy *mxs_phy, bool on)
vbus_is_on = mxs_phy_get_vbus_status(mxs_phy);
- if (on && !vbus_is_on && !mxs_phy_is_otg_host(mxs_phy))
+ if (on && ((!vbus_is_on && !mxs_phy_is_otg_host(mxs_phy))
+ || (last_event == USB_EVENT_VBUS)))
__mxs_phy_disconnect_line(mxs_phy, true);
else
__mxs_phy_disconnect_line(mxs_phy, false);
diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c
index 8b2ff3a8882d..4ea47e6f835b 100644
--- a/drivers/usb/phy/phy-tegra-usb.c
+++ b/drivers/usb/phy/phy-tegra-usb.c
@@ -16,7 +16,7 @@
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/resource.h>
#include <linux/slab.h>
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index 111b7ee152c4..dd1c17542439 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -11,7 +11,7 @@
#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/slab.h>
diff --git a/drivers/usb/renesas_usbhs/rza.c b/drivers/usb/renesas_usbhs/rza.c
index 2d77edefb4b3..97b5217c5a90 100644
--- a/drivers/usb/renesas_usbhs/rza.c
+++ b/drivers/usb/renesas_usbhs/rza.c
@@ -8,7 +8,7 @@
#include <linux/delay.h>
#include <linux/io.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include "common.h"
#include "rza.h"
diff --git a/drivers/usb/renesas_usbhs/rza2.c b/drivers/usb/renesas_usbhs/rza2.c
index 3eed3334a17f..f079817250bb 100644
--- a/drivers/usb/renesas_usbhs/rza2.c
+++ b/drivers/usb/renesas_usbhs/rza2.c
@@ -8,7 +8,6 @@
#include <linux/delay.h>
#include <linux/io.h>
-#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include "common.h"
#include "rza.h"
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 8ac98e60fff5..7994a4549a6c 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -259,6 +259,7 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_PRODUCT_EM05G 0x030a
#define QUECTEL_PRODUCT_EM060K 0x030b
#define QUECTEL_PRODUCT_EM05G_CS 0x030c
+#define QUECTEL_PRODUCT_EM05GV2 0x030e
#define QUECTEL_PRODUCT_EM05CN_SG 0x0310
#define QUECTEL_PRODUCT_EM05G_SG 0x0311
#define QUECTEL_PRODUCT_EM05CN 0x0312
@@ -1188,6 +1189,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(6) | ZLP },
{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G, 0xff),
.driver_info = RSVD(6) | ZLP },
+ { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05GV2, 0xff),
+ .driver_info = RSVD(4) | ZLP },
{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_CS, 0xff),
.driver_info = RSVD(6) | ZLP },
{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_GR, 0xff),
@@ -2232,6 +2235,10 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
{ USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0db, 0xff), /* Foxconn T99W265 MBIM */
.driver_info = RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0ee, 0xff), /* Foxconn T99W368 MBIM */
+ .driver_info = RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0f0, 0xff), /* Foxconn T99W373 MBIM */
+ .driver_info = RSVD(3) },
{ USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 (IOT version) */
.driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
{ USB_DEVICE(0x1782, 0x4d10) }, /* Fibocom L610 (AT mode) */
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 7b4805c1004d..17b09f03ef84 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -361,8 +361,7 @@ static void serial_cleanup(struct tty_struct *tty)
module_put(owner);
}
-static int serial_write(struct tty_struct *tty, const unsigned char *buf,
- int count)
+static ssize_t serial_write(struct tty_struct *tty, const u8 *buf, size_t count)
{
struct usb_serial_port *port = tty->driver_data;
int retval = -ENODEV;
@@ -370,7 +369,7 @@ static int serial_write(struct tty_struct *tty, const unsigned char *buf,
if (port->serial->dev->state == USB_STATE_NOTATTACHED)
goto exit;
- dev_dbg(&port->dev, "%s - %d byte(s)\n", __func__, count);
+ dev_dbg(&port->dev, "%s - %zu byte(s)\n", __func__, count);
retval = port->serial->type->write(tty, port, buf, count);
if (retval < 0)
diff --git a/drivers/usb/serial/xr_serial.c b/drivers/usb/serial/xr_serial.c
index 4ec7c5892b84..1d9a12628f81 100644
--- a/drivers/usb/serial/xr_serial.c
+++ b/drivers/usb/serial/xr_serial.c
@@ -93,6 +93,7 @@ struct xr_txrx_clk_mask {
#define XR_GPIO_MODE_SEL_DTR_DSR 0x2
#define XR_GPIO_MODE_SEL_RS485 0x3
#define XR_GPIO_MODE_SEL_RS485_ADDR 0x4
+#define XR_GPIO_MODE_RS485_TX_H 0x8
#define XR_GPIO_MODE_TX_TOGGLE 0x100
#define XR_GPIO_MODE_RX_TOGGLE 0x200
@@ -237,6 +238,7 @@ static const struct xr_type xr_types[] = {
struct xr_data {
const struct xr_type *type;
u8 channel; /* zero-based index or interface number */
+ struct serial_rs485 rs485;
};
static int xr_set_reg(struct usb_serial_port *port, u8 channel, u16 reg, u16 val)
@@ -629,6 +631,7 @@ static void xr_set_flow_mode(struct tty_struct *tty,
struct xr_data *data = usb_get_serial_port_data(port);
const struct xr_type *type = data->type;
u16 flow, gpio_mode;
+ bool rs485_enabled;
int ret;
ret = xr_get_reg_uart(port, type->gpio_mode, &gpio_mode);
@@ -645,7 +648,17 @@ static void xr_set_flow_mode(struct tty_struct *tty,
/* Set GPIO mode for controlling the pins manually by default. */
gpio_mode &= ~XR_GPIO_MODE_SEL_MASK;
- if (C_CRTSCTS(tty) && C_BAUD(tty) != B0) {
+ rs485_enabled = !!(data->rs485.flags & SER_RS485_ENABLED);
+ if (rs485_enabled) {
+ dev_dbg(&port->dev, "Enabling RS-485\n");
+ gpio_mode |= XR_GPIO_MODE_SEL_RS485;
+ if (data->rs485.flags & SER_RS485_RTS_ON_SEND)
+ gpio_mode &= ~XR_GPIO_MODE_RS485_TX_H;
+ else
+ gpio_mode |= XR_GPIO_MODE_RS485_TX_H;
+ }
+
+ if (C_CRTSCTS(tty) && C_BAUD(tty) != B0 && !rs485_enabled) {
dev_dbg(&port->dev, "Enabling hardware flow ctrl\n");
gpio_mode |= XR_GPIO_MODE_SEL_RTS_CTS;
flow = XR_UART_FLOW_MODE_HW;
@@ -809,6 +822,79 @@ static void xr_cdc_set_line_coding(struct tty_struct *tty,
kfree(lc);
}
+static void xr_sanitize_serial_rs485(struct serial_rs485 *rs485)
+{
+ if (!(rs485->flags & SER_RS485_ENABLED)) {
+ memset(rs485, 0, sizeof(*rs485));
+ return;
+ }
+
+ /* RTS always toggles after TX */
+ if (rs485->flags & SER_RS485_RTS_ON_SEND)
+ rs485->flags &= ~SER_RS485_RTS_AFTER_SEND;
+ else
+ rs485->flags |= SER_RS485_RTS_AFTER_SEND;
+
+ /* Only the flags are implemented at the moment */
+ rs485->flags &= SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND |
+ SER_RS485_RTS_AFTER_SEND;
+ rs485->delay_rts_before_send = 0;
+ rs485->delay_rts_after_send = 0;
+ memset(rs485->padding, 0, sizeof(rs485->padding));
+}
+
+static int xr_get_rs485_config(struct tty_struct *tty,
+ struct serial_rs485 __user *argp)
+{
+ struct usb_serial_port *port = tty->driver_data;
+ struct xr_data *data = usb_get_serial_port_data(port);
+
+ down_read(&tty->termios_rwsem);
+ if (copy_to_user(argp, &data->rs485, sizeof(data->rs485))) {
+ up_read(&tty->termios_rwsem);
+ return -EFAULT;
+ }
+ up_read(&tty->termios_rwsem);
+
+ return 0;
+}
+
+static int xr_set_rs485_config(struct tty_struct *tty,
+ struct serial_rs485 __user *argp)
+{
+ struct usb_serial_port *port = tty->driver_data;
+ struct xr_data *data = usb_get_serial_port_data(port);
+ struct serial_rs485 rs485;
+
+ if (copy_from_user(&rs485, argp, sizeof(rs485)))
+ return -EFAULT;
+ xr_sanitize_serial_rs485(&rs485);
+
+ down_write(&tty->termios_rwsem);
+ data->rs485 = rs485;
+ xr_set_flow_mode(tty, port, NULL);
+ up_write(&tty->termios_rwsem);
+
+ if (copy_to_user(argp, &rs485, sizeof(rs485)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int xr_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+
+ switch (cmd) {
+ case TIOCGRS485:
+ return xr_get_rs485_config(tty, argp);
+ case TIOCSRS485:
+ return xr_set_rs485_config(tty, argp);
+ }
+
+ return -ENOIOCTLCMD;
+}
+
static void xr_set_termios(struct tty_struct *tty,
struct usb_serial_port *port,
const struct ktermios *old_termios)
@@ -1010,6 +1096,7 @@ static struct usb_serial_driver xr_device = {
.set_termios = xr_set_termios,
.tiocmget = xr_tiocmget,
.tiocmset = xr_tiocmset,
+ .ioctl = xr_ioctl,
.dtr_rts = xr_dtr_rts
};
diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
index cdf8261e22db..426c88a516e5 100644
--- a/drivers/usb/typec/altmodes/displayport.c
+++ b/drivers/usb/typec/altmodes/displayport.c
@@ -594,7 +594,10 @@ int dp_altmode_probe(struct typec_altmode *alt)
alt->ops = &dp_altmode_ops;
fwnode = dev_fwnode(alt->dev.parent->parent); /* typec_port fwnode */
- dp->connector_fwnode = fwnode_find_reference(fwnode, "displayport", 0);
+ if (fwnode_property_present(fwnode, "displayport"))
+ dp->connector_fwnode = fwnode_find_reference(fwnode, "displayport", 0);
+ else
+ dp->connector_fwnode = fwnode_handle_get(fwnode); /* embedded DP */
if (IS_ERR(dp->connector_fwnode))
dp->connector_fwnode = NULL;
diff --git a/drivers/usb/typec/bus.c b/drivers/usb/typec/bus.c
index fe5b9a2e61f5..e95ec7e382bb 100644
--- a/drivers/usb/typec/bus.c
+++ b/drivers/usb/typec/bus.c
@@ -183,12 +183,20 @@ EXPORT_SYMBOL_GPL(typec_altmode_exit);
*
* Notifies the partner of @adev about Attention command.
*/
-void typec_altmode_attention(struct typec_altmode *adev, u32 vdo)
+int typec_altmode_attention(struct typec_altmode *adev, u32 vdo)
{
- struct typec_altmode *pdev = &to_altmode(adev)->partner->adev;
+ struct altmode *partner = to_altmode(adev)->partner;
+ struct typec_altmode *pdev;
+
+ if (!partner)
+ return -ENODEV;
+
+ pdev = &partner->adev;
if (pdev->ops && pdev->ops->attention)
pdev->ops->attention(pdev, vdo);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(typec_altmode_attention);
diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c
index 5e8edf3881c0..60ed1f809130 100644
--- a/drivers/usb/typec/mux/intel_pmc_mux.c
+++ b/drivers/usb/typec/mux/intel_pmc_mux.c
@@ -59,7 +59,7 @@ enum {
};
/* Common Mode Data bits */
-#define PMC_USB_ALTMODE_ACTIVE_CABLE BIT(2)
+#define PMC_USB_ALTMODE_RETIMER_CABLE BIT(2)
#define PMC_USB_ALTMODE_ORI_SHIFT 1
#define PMC_USB_ALTMODE_UFP_SHIFT 3
@@ -71,6 +71,7 @@ enum {
#define PMC_USB_ALTMODE_TBT_TYPE BIT(17)
#define PMC_USB_ALTMODE_CABLE_TYPE BIT(18)
#define PMC_USB_ALTMODE_ACTIVE_LINK BIT(20)
+#define PMC_USB_ALTMODE_ACTIVE_CABLE BIT(22)
#define PMC_USB_ALTMODE_FORCE_LSR BIT(23)
#define PMC_USB_ALTMODE_CABLE_SPD(_s_) (((_s_) & GENMASK(2, 0)) << 25)
#define PMC_USB_ALTMODE_CABLE_USB31 1
@@ -117,6 +118,16 @@ enum {
IOM_PORT_STATUS_DHPD_HPD_STATUS_SHIFT) & \
IOM_PORT_STATUS_DHPD_HPD_STATUS_ASSERT)
+/* IOM port status register */
+#define IOM_PORT_STATUS_REGS(_offset_, _size_) ((_offset_) | (_size_))
+#define IOM_PORT_STATUS_REGS_SZ_MASK BIT(0)
+#define IOM_PORT_STATUS_REGS_SZ_4 0
+#define IOM_PORT_STATUS_REGS_SZ_8 1
+#define IOM_PORT_STATUS_REGS_OFFSET(_d_) \
+ ((_d_) & ~IOM_PORT_STATUS_REGS_SZ_MASK)
+#define IOM_PORT_STATUS_REGS_SIZE(_d_) \
+ (4 << ((_d_) & IOM_PORT_STATUS_REGS_SZ_MASK))
+
struct pmc_usb;
struct pmc_usb_port {
@@ -145,6 +156,7 @@ struct pmc_usb {
struct acpi_device *iom_adev;
void __iomem *iom_base;
u32 iom_port_status_offset;
+ u8 iom_port_status_size;
struct dentry *dentry;
};
@@ -160,7 +172,7 @@ static void update_port_status(struct pmc_usb_port *port)
port->iom_status = readl(port->pmc->iom_base +
port->pmc->iom_port_status_offset +
- port_num * sizeof(u32));
+ port_num * port->pmc->iom_port_status_size);
}
static int sbu_orientation(struct pmc_usb_port *port)
@@ -319,8 +331,18 @@ pmc_usb_mux_tbt(struct pmc_usb_port *port, struct typec_mux_state *state)
if (data->cable_mode & TBT_CABLE_LINK_TRAINING)
req.mode_data |= PMC_USB_ALTMODE_ACTIVE_LINK;
- if (data->enter_vdo & TBT_ENTER_MODE_ACTIVE_CABLE)
- req.mode_data |= PMC_USB_ALTMODE_ACTIVE_CABLE;
+ if (acpi_dev_hid_uid_match(port->pmc->iom_adev, "INTC1072", NULL) ||
+ acpi_dev_hid_uid_match(port->pmc->iom_adev, "INTC1079", NULL)) {
+ if ((data->enter_vdo & TBT_ENTER_MODE_ACTIVE_CABLE) ||
+ (data->cable_mode & TBT_CABLE_RETIMER))
+ req.mode_data |= PMC_USB_ALTMODE_RETIMER_CABLE;
+ } else {
+ if (data->enter_vdo & TBT_ENTER_MODE_ACTIVE_CABLE)
+ req.mode_data |= PMC_USB_ALTMODE_ACTIVE_CABLE;
+
+ if (data->cable_mode & TBT_CABLE_RETIMER)
+ req.mode_data |= PMC_USB_ALTMODE_RETIMER_CABLE;
+ }
req.mode_data |= PMC_USB_ALTMODE_CABLE_SPD(cable_speed);
@@ -359,8 +381,17 @@ pmc_usb_mux_usb4(struct pmc_usb_port *port, struct typec_mux_state *state)
case EUDO_CABLE_TYPE_OPTICAL:
req.mode_data |= PMC_USB_ALTMODE_CABLE_TYPE;
fallthrough;
+ case EUDO_CABLE_TYPE_RE_TIMER:
+ if (!acpi_dev_hid_uid_match(port->pmc->iom_adev, "INTC1072", NULL) ||
+ !acpi_dev_hid_uid_match(port->pmc->iom_adev, "INTC1079", NULL))
+ req.mode_data |= PMC_USB_ALTMODE_RETIMER_CABLE;
+ fallthrough;
default:
- req.mode_data |= PMC_USB_ALTMODE_ACTIVE_CABLE;
+ if (acpi_dev_hid_uid_match(port->pmc->iom_adev, "INTC1072", NULL) ||
+ acpi_dev_hid_uid_match(port->pmc->iom_adev, "INTC1079", NULL))
+ req.mode_data |= PMC_USB_ALTMODE_RETIMER_CABLE;
+ else
+ req.mode_data |= PMC_USB_ALTMODE_ACTIVE_CABLE;
/* Configure data rate to rounded in the case of Active TBT3
* and USB4 cables.
@@ -589,13 +620,16 @@ err_unregister_switch:
/* IOM ACPI IDs and IOM_PORT_STATUS_OFFSET */
static const struct acpi_device_id iom_acpi_ids[] = {
/* TigerLake */
- { "INTC1072", 0x560, },
+ { "INTC1072", IOM_PORT_STATUS_REGS(0x560, IOM_PORT_STATUS_REGS_SZ_4) },
/* AlderLake */
- { "INTC1079", 0x160, },
+ { "INTC1079", IOM_PORT_STATUS_REGS(0x160, IOM_PORT_STATUS_REGS_SZ_4) },
/* Meteor Lake */
- { "INTC107A", 0x160, },
+ { "INTC107A", IOM_PORT_STATUS_REGS(0x160, IOM_PORT_STATUS_REGS_SZ_4) },
+
+ /* Lunar Lake */
+ { "INTC10EA", IOM_PORT_STATUS_REGS(0x150, IOM_PORT_STATUS_REGS_SZ_8) },
{}
};
@@ -615,7 +649,8 @@ static int pmc_usb_probe_iom(struct pmc_usb *pmc)
if (!adev)
return -ENODEV;
- pmc->iom_port_status_offset = (u32)dev_id->driver_data;
+ pmc->iom_port_status_offset = IOM_PORT_STATUS_REGS_OFFSET(dev_id->driver_data);
+ pmc->iom_port_status_size = IOM_PORT_STATUS_REGS_SIZE(dev_id->driver_data);
INIT_LIST_HEAD(&resource_list);
ret = acpi_dev_get_memory_resources(adev, &resource_list);
diff --git a/drivers/usb/typec/mux/nb7vpq904m.c b/drivers/usb/typec/mux/nb7vpq904m.c
index 4d1122d95013..cda206cf0c38 100644
--- a/drivers/usb/typec/mux/nb7vpq904m.c
+++ b/drivers/usb/typec/mux/nb7vpq904m.c
@@ -528,7 +528,7 @@ static struct i2c_driver nb7vpq904m_driver = {
.name = "nb7vpq904m",
.of_match_table = nb7vpq904m_of_table,
},
- .probe_new = nb7vpq904m_probe,
+ .probe = nb7vpq904m_probe,
.remove = nb7vpq904m_remove,
.id_table = nb7vpq904m_table,
};
diff --git a/drivers/usb/typec/tcpm/Kconfig b/drivers/usb/typec/tcpm/Kconfig
index 5d393f520fc2..0b2993fef564 100644
--- a/drivers/usb/typec/tcpm/Kconfig
+++ b/drivers/usb/typec/tcpm/Kconfig
@@ -79,6 +79,7 @@ config TYPEC_WCOVE
config TYPEC_QCOM_PMIC
tristate "Qualcomm PMIC USB Type-C Port Controller Manager driver"
depends on ARCH_QCOM || COMPILE_TEST
+ depends on DRM || DRM=n
help
A Type-C port and Power Delivery driver which aggregates two
discrete pieces of silicon in the PM8150b PMIC block: the
diff --git a/drivers/usb/typec/tcpm/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c
index 7fc1ffa14f76..bc21006e979c 100644
--- a/drivers/usb/typec/tcpm/fusb302.c
+++ b/drivers/usb/typec/tcpm/fusb302.c
@@ -15,7 +15,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/pinctrl/consumer.h>
#include <linux/proc_fs.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c
index 9b467a346114..581199d37b49 100644
--- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c
@@ -8,7 +8,7 @@
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -17,6 +17,9 @@
#include <linux/usb/role.h>
#include <linux/usb/tcpm.h>
#include <linux/usb/typec_mux.h>
+
+#include <drm/drm_bridge.h>
+
#include "qcom_pmic_typec_pdphy.h"
#include "qcom_pmic_typec_port.h"
@@ -33,6 +36,7 @@ struct pmic_typec {
struct pmic_typec_port *pmic_typec_port;
bool vbus_enabled;
struct mutex lock; /* VBUS state serialization */
+ struct drm_bridge bridge;
};
#define tcpc_to_tcpm(_tcpc_) container_of(_tcpc_, struct pmic_typec, tcpc)
@@ -146,6 +150,35 @@ static int qcom_pmic_typec_init(struct tcpc_dev *tcpc)
return 0;
}
+#if IS_ENABLED(CONFIG_DRM)
+static int qcom_pmic_typec_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL;
+}
+
+static const struct drm_bridge_funcs qcom_pmic_typec_bridge_funcs = {
+ .attach = qcom_pmic_typec_attach,
+};
+
+static int qcom_pmic_typec_init_drm(struct pmic_typec *tcpm)
+{
+ tcpm->bridge.funcs = &qcom_pmic_typec_bridge_funcs;
+#ifdef CONFIG_OF
+ tcpm->bridge.of_node = of_get_child_by_name(tcpm->dev->of_node, "connector");
+#endif
+ tcpm->bridge.ops = DRM_BRIDGE_OP_HPD;
+ tcpm->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
+
+ return devm_drm_bridge_add(tcpm->dev, &tcpm->bridge);
+}
+#else
+static int qcom_pmic_typec_init_drm(struct pmic_typec *tcpm)
+{
+ return 0;
+}
+#endif
+
static int qcom_pmic_typec_probe(struct platform_device *pdev)
{
struct pmic_typec *tcpm;
@@ -208,6 +241,10 @@ static int qcom_pmic_typec_probe(struct platform_device *pdev)
mutex_init(&tcpm->lock);
platform_set_drvdata(pdev, tcpm);
+ ret = qcom_pmic_typec_init_drm(tcpm);
+ if (ret)
+ return ret;
+
tcpm->tcpc.fwnode = device_get_named_child_node(tcpm->dev, "connector");
if (!tcpm->tcpc.fwnode)
return -EINVAL;
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c
index 4e1b846627d2..bb0b8479d80f 100644
--- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c
@@ -8,8 +8,6 @@
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c
index 94285f64b67d..a8f3f4d3a450 100644
--- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c
@@ -9,7 +9,6 @@
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
@@ -214,6 +213,11 @@ int qcom_pmic_typec_port_get_cc(struct pmic_typec_port *pmic_typec_port,
if (ret)
goto done;
switch (val & DETECTED_SRC_TYPE_MASK) {
+ case AUDIO_ACCESS_RA_RA:
+ val = TYPEC_CC_RA;
+ *cc1 = TYPEC_CC_RA;
+ *cc2 = TYPEC_CC_RA;
+ break;
case SRC_RD_OPEN:
val = TYPEC_CC_RD;
break;
diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index fc708c289a73..0ee3e6e29bb1 100644
--- a/drivers/usb/typec/tcpm/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
@@ -602,6 +602,10 @@ static int tcpci_init(struct tcpc_dev *tcpc)
if (time_after(jiffies, timeout))
return -ETIMEDOUT;
+ ret = tcpci_write16(tcpci, TCPC_FAULT_STATUS, TCPC_FAULT_STATUS_ALL_REG_RST_TO_DEFAULT);
+ if (ret < 0)
+ return ret;
+
/* Handle vendor init */
if (tcpci->data->init) {
ret = tcpci->data->init(tcpci, tcpci->data);
diff --git a/drivers/usb/typec/tcpm/tcpci_mt6370.c b/drivers/usb/typec/tcpm/tcpci_mt6370.c
index 2a079464b398..9cda1005ef01 100644
--- a/drivers/usb/typec/tcpm/tcpci_mt6370.c
+++ b/drivers/usb/typec/tcpm/tcpci_mt6370.c
@@ -147,7 +147,7 @@ static int mt6370_tcpc_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0)
- return dev_err_probe(dev, irq, "Failed to get irq\n");
+ return irq;
/* Assign TCPCI feature and ops */
priv->tcpci_data.auto_discharge_disconnect = 1;
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index cc1d83926497..d962f67c95ae 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -1877,7 +1877,8 @@ static void tcpm_handle_vdm_request(struct tcpm_port *port,
}
break;
case ADEV_ATTENTION:
- typec_altmode_attention(adev, p[1]);
+ if (typec_altmode_attention(adev, p[1]))
+ tcpm_log(port, "typec_altmode_attention no port partner altmode");
break;
}
}
@@ -2753,6 +2754,13 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
port->sink_cap_done = true;
tcpm_set_state(port, ready_state(port), 0);
break;
+ /*
+ * Some port partners do not support GET_STATUS, avoid soft reset the link to
+ * prevent redundant power re-negotiation
+ */
+ case GET_STATUS_SEND:
+ tcpm_set_state(port, ready_state(port), 0);
+ break;
case SRC_READY:
case SNK_READY:
if (port->vdm_state > VDM_STATE_READY) {
@@ -3253,23 +3261,12 @@ static int tcpm_pd_select_pdo(struct tcpm_port *port, int *sink_pdo,
return ret;
}
-#define min_pps_apdo_current(x, y) \
- min(pdo_pps_apdo_max_current(x), pdo_pps_apdo_max_current(y))
-
static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
{
- unsigned int i, j, max_mw = 0, max_mv = 0;
- unsigned int min_src_mv, max_src_mv, src_ma, src_mw;
- unsigned int min_snk_mv, max_snk_mv;
- unsigned int max_op_mv;
- u32 pdo, src, snk;
- unsigned int src_pdo = 0, snk_pdo = 0;
+ unsigned int i, src_ma, max_temp_mw = 0, max_op_ma, op_mw;
+ unsigned int src_pdo = 0;
+ u32 pdo, src;
- /*
- * Select the source PPS APDO providing the most power while staying
- * within the board's limits. We skip the first PDO as this is always
- * 5V 3A.
- */
for (i = 1; i < port->nr_source_caps; ++i) {
pdo = port->source_caps[i];
@@ -3280,54 +3277,17 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
continue;
}
- min_src_mv = pdo_pps_apdo_min_voltage(pdo);
- max_src_mv = pdo_pps_apdo_max_voltage(pdo);
- src_ma = pdo_pps_apdo_max_current(pdo);
- src_mw = (src_ma * max_src_mv) / 1000;
-
- /*
- * Now search through the sink PDOs to find a matching
- * PPS APDO. Again skip the first sink PDO as this will
- * always be 5V 3A.
- */
- for (j = 1; j < port->nr_snk_pdo; j++) {
- pdo = port->snk_pdo[j];
-
- switch (pdo_type(pdo)) {
- case PDO_TYPE_APDO:
- if (pdo_apdo_type(pdo) != APDO_TYPE_PPS) {
- tcpm_log(port,
- "Not PPS APDO (sink), ignoring");
- continue;
- }
-
- min_snk_mv =
- pdo_pps_apdo_min_voltage(pdo);
- max_snk_mv =
- pdo_pps_apdo_max_voltage(pdo);
- break;
- default:
- tcpm_log(port,
- "Not APDO type (sink), ignoring");
- continue;
- }
+ if (port->pps_data.req_out_volt > pdo_pps_apdo_max_voltage(pdo) ||
+ port->pps_data.req_out_volt < pdo_pps_apdo_min_voltage(pdo))
+ continue;
- if (min_src_mv <= max_snk_mv &&
- max_src_mv >= min_snk_mv) {
- max_op_mv = min(max_src_mv, max_snk_mv);
- src_mw = (max_op_mv * src_ma) / 1000;
- /* Prefer higher voltages if available */
- if ((src_mw == max_mw &&
- max_op_mv > max_mv) ||
- src_mw > max_mw) {
- src_pdo = i;
- snk_pdo = j;
- max_mw = src_mw;
- max_mv = max_op_mv;
- }
- }
+ src_ma = pdo_pps_apdo_max_current(pdo);
+ max_op_ma = min(src_ma, port->pps_data.req_op_curr);
+ op_mw = max_op_ma * port->pps_data.req_out_volt / 1000;
+ if (op_mw > max_temp_mw) {
+ src_pdo = i;
+ max_temp_mw = op_mw;
}
-
break;
default:
tcpm_log(port, "Not APDO type (source), ignoring");
@@ -3337,16 +3297,10 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
if (src_pdo) {
src = port->source_caps[src_pdo];
- snk = port->snk_pdo[snk_pdo];
-
- port->pps_data.req_min_volt = max(pdo_pps_apdo_min_voltage(src),
- pdo_pps_apdo_min_voltage(snk));
- port->pps_data.req_max_volt = min(pdo_pps_apdo_max_voltage(src),
- pdo_pps_apdo_max_voltage(snk));
- port->pps_data.req_max_curr = min_pps_apdo_current(src, snk);
- port->pps_data.req_out_volt = min(port->pps_data.req_max_volt,
- max(port->pps_data.req_min_volt,
- port->pps_data.req_out_volt));
+
+ port->pps_data.req_min_volt = pdo_pps_apdo_min_voltage(src);
+ port->pps_data.req_max_volt = pdo_pps_apdo_max_voltage(src);
+ port->pps_data.req_max_curr = pdo_pps_apdo_max_current(src);
port->pps_data.req_op_curr = min(port->pps_data.req_max_curr,
port->pps_data.req_op_curr);
}
@@ -3464,32 +3418,16 @@ static int tcpm_pd_send_request(struct tcpm_port *port)
static int tcpm_pd_build_pps_request(struct tcpm_port *port, u32 *rdo)
{
unsigned int out_mv, op_ma, op_mw, max_mv, max_ma, flags;
- enum pd_pdo_type type;
unsigned int src_pdo_index;
- u32 pdo;
src_pdo_index = tcpm_pd_select_pps_apdo(port);
if (!src_pdo_index)
return -EOPNOTSUPP;
- pdo = port->source_caps[src_pdo_index];
- type = pdo_type(pdo);
-
- switch (type) {
- case PDO_TYPE_APDO:
- if (pdo_apdo_type(pdo) != APDO_TYPE_PPS) {
- tcpm_log(port, "Invalid APDO selected!");
- return -EINVAL;
- }
- max_mv = port->pps_data.req_max_volt;
- max_ma = port->pps_data.req_max_curr;
- out_mv = port->pps_data.req_out_volt;
- op_ma = port->pps_data.req_op_curr;
- break;
- default:
- tcpm_log(port, "Invalid PDO selected!");
- return -EINVAL;
- }
+ max_mv = port->pps_data.req_max_volt;
+ max_ma = port->pps_data.req_max_curr;
+ out_mv = port->pps_data.req_out_volt;
+ op_ma = port->pps_data.req_op_curr;
flags = RDO_USB_COMM | RDO_NO_SUSPEND;
@@ -3789,6 +3727,9 @@ static void tcpm_detach(struct tcpm_port *port)
if (tcpm_port_is_disconnected(port))
port->hard_reset_count = 0;
+ port->try_src_count = 0;
+ port->try_snk_count = 0;
+
if (!port->attached)
return;
@@ -3928,6 +3869,29 @@ static enum typec_cc_status tcpm_pwr_opmode_to_rp(enum typec_pwr_opmode opmode)
}
}
+static void tcpm_set_initial_svdm_version(struct tcpm_port *port)
+{
+ switch (port->negotiated_rev) {
+ case PD_REV30:
+ break;
+ /*
+ * 6.4.4.2.3 Structured VDM Version
+ * 2.0 states "At this time, there is only one version (1.0) defined.
+ * This field Shall be set to zero to indicate Version 1.0."
+ * 3.0 states "This field Shall be set to 01b to indicate Version 2.0."
+ * To ensure that we follow the Power Delivery revision we are currently
+ * operating on, downgrade the SVDM version to the highest one supported
+ * by the Power Delivery revision.
+ */
+ case PD_REV20:
+ typec_partner_set_svdm_version(port->partner, SVDM_VER_1_0);
+ break;
+ default:
+ typec_partner_set_svdm_version(port->partner, SVDM_VER_1_0);
+ break;
+ }
+}
+
static void run_state_machine(struct tcpm_port *port)
{
int ret;
@@ -4165,10 +4129,12 @@ static void run_state_machine(struct tcpm_port *port)
* For now, this driver only supports SOP for DISCOVER_IDENTITY, thus using
* port->explicit_contract to decide whether to send the command.
*/
- if (port->explicit_contract)
+ if (port->explicit_contract) {
+ tcpm_set_initial_svdm_version(port);
mod_send_discover_delayed_work(port, 0);
- else
+ } else {
port->send_discover = false;
+ }
/*
* 6.3.5
@@ -4301,7 +4267,9 @@ static void run_state_machine(struct tcpm_port *port)
if (port->slow_charger_loop && (current_lim > PD_P_SNK_STDBY_MW / 5))
current_lim = PD_P_SNK_STDBY_MW / 5;
tcpm_set_current_limit(port, current_lim, 5000);
- tcpm_set_charge(port, true);
+ /* Not sink vbus if operational current is 0mA */
+ tcpm_set_charge(port, !!pdo_max_current(port->snk_pdo[0]));
+
if (!port->pd_supported)
tcpm_set_state(port, SNK_READY, 0);
else
@@ -4455,10 +4423,12 @@ static void run_state_machine(struct tcpm_port *port)
* For now, this driver only supports SOP for DISCOVER_IDENTITY, thus using
* port->explicit_contract.
*/
- if (port->explicit_contract)
+ if (port->explicit_contract) {
+ tcpm_set_initial_svdm_version(port);
mod_send_discover_delayed_work(port, 0);
- else
+ } else {
port->send_discover = false;
+ }
power_supply_changed(port->psy);
break;
@@ -4582,7 +4552,8 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_set_current_limit(port,
tcpm_get_current_limit(port),
5000);
- tcpm_set_charge(port, true);
+ /* Not sink vbus if operational current is 0mA */
+ tcpm_set_charge(port, !!pdo_max_current(port->snk_pdo[0]));
}
if (port->ams == HARD_RESET)
tcpm_ams_finish(port);
@@ -5889,12 +5860,6 @@ static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 req_out_volt)
goto port_unlock;
}
- if (req_out_volt < port->pps_data.min_volt ||
- req_out_volt > port->pps_data.max_volt) {
- ret = -EINVAL;
- goto port_unlock;
- }
-
target_mw = (port->current_limit * req_out_volt) / 1000;
if (target_mw < port->operating_snk_mw) {
ret = -EINVAL;
@@ -6447,11 +6412,7 @@ static int tcpm_psy_set_prop(struct power_supply *psy,
ret = tcpm_psy_set_online(port, val);
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
- if (val->intval < port->pps_data.min_volt * 1000 ||
- val->intval > port->pps_data.max_volt * 1000)
- ret = -EINVAL;
- else
- ret = tcpm_pps_set_out_volt(port, val->intval / 1000);
+ ret = tcpm_pps_set_out_volt(port, val->intval / 1000);
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
if (val->intval > port->pps_data.max_curr * 1000)
diff --git a/drivers/usb/typec/ucsi/Kconfig b/drivers/usb/typec/ucsi/Kconfig
index b3bb0191987e..bdcb1764cfae 100644
--- a/drivers/usb/typec/ucsi/Kconfig
+++ b/drivers/usb/typec/ucsi/Kconfig
@@ -4,6 +4,7 @@ config TYPEC_UCSI
tristate "USB Type-C Connector System Software Interface driver"
depends on !CPU_BIG_ENDIAN
depends on USB_ROLE_SWITCH || !USB_ROLE_SWITCH
+ select USB_COMMON if DEBUG_FS
help
USB Type-C Connector System Software Interface (UCSI) is a
specification for an interface that allows the operating system to
diff --git a/drivers/usb/typec/ucsi/Makefile b/drivers/usb/typec/ucsi/Makefile
index 77f09e136956..b4679f94696b 100644
--- a/drivers/usb/typec/ucsi/Makefile
+++ b/drivers/usb/typec/ucsi/Makefile
@@ -5,6 +5,8 @@ obj-$(CONFIG_TYPEC_UCSI) += typec_ucsi.o
typec_ucsi-y := ucsi.o
+typec_ucsi-$(CONFIG_DEBUG_FS) += debugfs.o
+
typec_ucsi-$(CONFIG_TRACING) += trace.o
ifneq ($(CONFIG_POWER_SUPPLY),)
diff --git a/drivers/usb/typec/ucsi/debugfs.c b/drivers/usb/typec/ucsi/debugfs.c
new file mode 100644
index 000000000000..0c7bf88d4a7f
--- /dev/null
+++ b/drivers/usb/typec/ucsi/debugfs.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * UCSI debugfs interface
+ *
+ * Copyright (C) 2023 Intel Corporation
+ *
+ * Authors: Rajaram Regupathy <rajaram.regupathy@intel.com>
+ * Gopal Saranya <saranya.gopal@intel.com>
+ */
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/usb.h>
+
+#include <asm/errno.h>
+
+#include "ucsi.h"
+
+static struct dentry *ucsi_debugfs_root;
+
+static int ucsi_cmd(void *data, u64 val)
+{
+ struct ucsi *ucsi = data;
+ int ret;
+
+ memset(&ucsi->debugfs->response, 0, sizeof(ucsi->debugfs->response));
+ ucsi->debugfs->status = 0;
+
+ switch (UCSI_COMMAND(val)) {
+ case UCSI_SET_UOM:
+ case UCSI_SET_UOR:
+ case UCSI_SET_PDR:
+ case UCSI_CONNECTOR_RESET:
+ ret = ucsi_send_command(ucsi, val, NULL, 0);
+ break;
+ case UCSI_GET_CAPABILITY:
+ case UCSI_GET_CONNECTOR_CAPABILITY:
+ case UCSI_GET_ALTERNATE_MODES:
+ case UCSI_GET_CURRENT_CAM:
+ case UCSI_GET_PDOS:
+ case UCSI_GET_CABLE_PROPERTY:
+ case UCSI_GET_CONNECTOR_STATUS:
+ ret = ucsi_send_command(ucsi, val,
+ &ucsi->debugfs->response,
+ sizeof(ucsi->debugfs->response));
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ if (ret < 0) {
+ ucsi->debugfs->status = ret;
+ return ret;
+ }
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(ucsi_cmd_fops, NULL, ucsi_cmd, "0x%llx\n");
+
+static int ucsi_resp_show(struct seq_file *s, void *not_used)
+{
+ struct ucsi *ucsi = s->private;
+
+ if (ucsi->debugfs->status)
+ return ucsi->debugfs->status;
+
+ seq_printf(s, "0x%016llx%016llx\n", ucsi->debugfs->response.high,
+ ucsi->debugfs->response.low);
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(ucsi_resp);
+
+void ucsi_debugfs_register(struct ucsi *ucsi)
+{
+ ucsi->debugfs = kzalloc(sizeof(*ucsi->debugfs), GFP_KERNEL);
+ if (!ucsi->debugfs)
+ return;
+
+ ucsi->debugfs->dentry = debugfs_create_dir(dev_name(ucsi->dev), ucsi_debugfs_root);
+ debugfs_create_file("command", 0200, ucsi->debugfs->dentry, ucsi, &ucsi_cmd_fops);
+ debugfs_create_file("response", 0400, ucsi->debugfs->dentry, ucsi, &ucsi_resp_fops);
+}
+
+void ucsi_debugfs_unregister(struct ucsi *ucsi)
+{
+ debugfs_remove_recursive(ucsi->debugfs->dentry);
+ kfree(ucsi->debugfs);
+}
+
+void ucsi_debugfs_init(void)
+{
+ ucsi_debugfs_root = debugfs_create_dir("ucsi", usb_debug_root);
+}
+
+void ucsi_debugfs_exit(void)
+{
+ debugfs_remove(ucsi_debugfs_root);
+}
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index f6901319639d..c6dfe3dff346 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -1530,6 +1530,7 @@ EXPORT_SYMBOL_GPL(ucsi_create);
*/
void ucsi_destroy(struct ucsi *ucsi)
{
+ ucsi_debugfs_unregister(ucsi);
kfree(ucsi);
}
EXPORT_SYMBOL_GPL(ucsi_destroy);
@@ -1552,6 +1553,7 @@ int ucsi_register(struct ucsi *ucsi)
queue_delayed_work(system_long_wq, &ucsi->work, 0);
+ ucsi_debugfs_register(ucsi);
return 0;
}
EXPORT_SYMBOL_GPL(ucsi_register);
@@ -1611,6 +1613,19 @@ void ucsi_unregister(struct ucsi *ucsi)
}
EXPORT_SYMBOL_GPL(ucsi_unregister);
+static int __init ucsi_module_init(void)
+{
+ ucsi_debugfs_init();
+ return 0;
+}
+module_init(ucsi_module_init);
+
+static void __exit ucsi_module_exit(void)
+{
+ ucsi_debugfs_exit();
+}
+module_exit(ucsi_module_exit);
+
MODULE_AUTHOR("Heikki Krogerus <heikki.krogerus@linux.intel.com>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("USB Type-C Connector System Software Interface driver");
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index c09af859f573..474315a72c77 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -15,6 +15,7 @@
struct ucsi;
struct ucsi_altmode;
+struct dentry;
/* UCSI offsets (Bytes) */
#define UCSI_VERSION 0
@@ -277,6 +278,16 @@ struct ucsi_connector_status {
/* -------------------------------------------------------------------------- */
+struct ucsi_debugfs_entry {
+ u64 command;
+ struct ucsi_data {
+ u64 low;
+ u64 high;
+ } response;
+ u32 status;
+ struct dentry *dentry;
+};
+
struct ucsi {
u16 version;
struct device *dev;
@@ -286,6 +297,7 @@ struct ucsi {
struct ucsi_capability cap;
struct ucsi_connector *connector;
+ struct ucsi_debugfs_entry *debugfs;
struct work_struct resume_work;
struct delayed_work work;
@@ -388,6 +400,18 @@ static inline void
ucsi_displayport_remove_partner(struct typec_altmode *adev) { }
#endif /* CONFIG_TYPEC_DP_ALTMODE */
+#ifdef CONFIG_DEBUG_FS
+void ucsi_debugfs_init(void);
+void ucsi_debugfs_exit(void);
+void ucsi_debugfs_register(struct ucsi *ucsi);
+void ucsi_debugfs_unregister(struct ucsi *ucsi);
+#else
+static inline void ucsi_debugfs_init(void) { }
+static inline void ucsi_debugfs_exit(void) { }
+static inline void ucsi_debugfs_register(struct ucsi *ucsi) { }
+static inline void ucsi_debugfs_unregister(struct ucsi *ucsi) { }
+#endif /* CONFIG_DEBUG_FS */
+
/*
* NVIDIA VirtualLink (svid 0x955) has two altmode. VirtualLink
* DP mode with vdo=0x1 and NVIDIA test mode with vdo=0x3
diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c
index 1fe9cb5b6bd9..bb1854b3311d 100644
--- a/drivers/usb/typec/ucsi/ucsi_glink.c
+++ b/drivers/usb/typec/ucsi/ucsi_glink.c
@@ -5,7 +5,6 @@
*/
#include <linux/auxiliary_bus.h>
#include <linux/module.h>
-#include <linux/of_device.h>
#include <linux/mutex.h>
#include <linux/property.h>
#include <linux/soc/qcom/pdr.h>
diff --git a/drivers/usb/usbip/vudc_dev.c b/drivers/usb/usbip/vudc_dev.c
index 2bc428f2e261..44b04c54c086 100644
--- a/drivers/usb/usbip/vudc_dev.c
+++ b/drivers/usb/usbip/vudc_dev.c
@@ -489,11 +489,11 @@ static void vudc_device_unusable(struct usbip_device *ud)
struct vudc_device *alloc_vudc_device(int devid)
{
- struct vudc_device *udc_dev = NULL;
+ struct vudc_device *udc_dev;
udc_dev = kzalloc(sizeof(*udc_dev), GFP_KERNEL);
if (!udc_dev)
- goto out;
+ return NULL;
INIT_LIST_HEAD(&udc_dev->dev_entry);
@@ -503,7 +503,6 @@ struct vudc_device *alloc_vudc_device(int devid)
udc_dev = NULL;
}
-out:
return udc_dev;
}
diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
index b53420e874ac..ca56242972b3 100644
--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
@@ -100,9 +100,6 @@ struct mlx5_vdpa_dev {
bool suspended;
};
-int mlx5_vdpa_alloc_pd(struct mlx5_vdpa_dev *dev, u32 *pdn, u16 uid);
-int mlx5_vdpa_dealloc_pd(struct mlx5_vdpa_dev *dev, u32 pdn, u16 uid);
-int mlx5_vdpa_get_null_mkey(struct mlx5_vdpa_dev *dev, u32 *null_mkey);
int mlx5_vdpa_create_tis(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tisn);
void mlx5_vdpa_destroy_tis(struct mlx5_vdpa_dev *mvdev, u32 tisn);
int mlx5_vdpa_create_rqt(struct mlx5_vdpa_dev *mvdev, void *in, int inlen, u32 *rqtn);
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index d343af4fa60e..76d41058add9 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -18,6 +18,7 @@
#include <linux/vdpa.h>
#include <linux/vhost_iotlb.h>
#include <uapi/linux/vdpa.h>
+#include <uapi/linux/vhost_types.h>
#include "vdpa_sim.h"
@@ -410,6 +411,11 @@ static u64 vdpasim_get_device_features(struct vdpa_device *vdpa)
return vdpasim->dev_attr.supported_features;
}
+static u64 vdpasim_get_backend_features(const struct vdpa_device *vdpa)
+{
+ return BIT_ULL(VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK);
+}
+
static int vdpasim_set_driver_features(struct vdpa_device *vdpa, u64 features)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
@@ -733,6 +739,7 @@ static const struct vdpa_config_ops vdpasim_config_ops = {
.get_vq_align = vdpasim_get_vq_align,
.get_vq_group = vdpasim_get_vq_group,
.get_device_features = vdpasim_get_device_features,
+ .get_backend_features = vdpasim_get_backend_features,
.set_driver_features = vdpasim_set_driver_features,
.get_driver_features = vdpasim_get_driver_features,
.set_config_cb = vdpasim_set_config_cb,
@@ -770,6 +777,7 @@ static const struct vdpa_config_ops vdpasim_batch_config_ops = {
.get_vq_align = vdpasim_get_vq_align,
.get_vq_group = vdpasim_get_vq_group,
.get_device_features = vdpasim_get_device_features,
+ .get_backend_features = vdpasim_get_backend_features,
.set_driver_features = vdpasim_set_driver_features,
.get_driver_features = vdpasim_get_driver_features,
.set_config_cb = vdpasim_set_config_cb,
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index b43e8680eee8..78379ffd2336 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -403,6 +403,17 @@ static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
return 0;
}
+static u64 vhost_vdpa_get_backend_features(const struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ if (!ops->get_backend_features)
+ return 0;
+ else
+ return ops->get_backend_features(vdpa);
+}
+
static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
{
struct vdpa_device *vdpa = v->vdpa;
@@ -680,7 +691,8 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
return -EFAULT;
if (features & ~(VHOST_VDPA_BACKEND_FEATURES |
BIT_ULL(VHOST_BACKEND_F_SUSPEND) |
- BIT_ULL(VHOST_BACKEND_F_RESUME)))
+ BIT_ULL(VHOST_BACKEND_F_RESUME) |
+ BIT_ULL(VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK)))
return -EOPNOTSUPP;
if ((features & BIT_ULL(VHOST_BACKEND_F_SUSPEND)) &&
!vhost_vdpa_can_suspend(v))
@@ -741,6 +753,7 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
features |= BIT_ULL(VHOST_BACKEND_F_SUSPEND);
if (vhost_vdpa_can_resume(v))
features |= BIT_ULL(VHOST_BACKEND_F_RESUME);
+ features |= vhost_vdpa_get_backend_features(v);
if (copy_to_user(featurep, &features, sizeof(features)))
r = -EFAULT;
break;
diff --git a/drivers/video/backlight/gpio_backlight.c b/drivers/video/backlight/gpio_backlight.c
index d3bea42407f1..d28c30b2a35d 100644
--- a/drivers/video/backlight/gpio_backlight.c
+++ b/drivers/video/backlight/gpio_backlight.c
@@ -87,8 +87,7 @@ static int gpio_backlight_probe(struct platform_device *pdev)
/* Not booted with device tree or no phandle link to the node */
bl->props.power = def_value ? FB_BLANK_UNBLANK
: FB_BLANK_POWERDOWN;
- else if (gpiod_get_direction(gbl->gpiod) == 0 &&
- gpiod_get_value_cansleep(gbl->gpiod) == 0)
+ else if (gpiod_get_value_cansleep(gbl->gpiod) == 0)
bl->props.power = FB_BLANK_POWERDOWN;
else
bl->props.power = FB_BLANK_UNBLANK;
diff --git a/drivers/video/backlight/led_bl.c b/drivers/video/backlight/led_bl.c
index 3259292fda76..032f8bddf872 100644
--- a/drivers/video/backlight/led_bl.c
+++ b/drivers/video/backlight/led_bl.c
@@ -243,7 +243,7 @@ MODULE_DEVICE_TABLE(of, led_bl_of_match);
static struct platform_driver led_bl_driver = {
.driver = {
.name = "led-backlight",
- .of_match_table = of_match_ptr(led_bl_of_match),
+ .of_match_table = led_bl_of_match,
},
.probe = led_bl_probe,
.remove_new = led_bl_remove,
diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c
index 1c9e921bca14..da1f124db69c 100644
--- a/drivers/video/backlight/lp855x_bl.c
+++ b/drivers/video/backlight/lp855x_bl.c
@@ -71,6 +71,7 @@ struct lp855x {
struct device *dev;
struct lp855x_platform_data *pdata;
struct pwm_device *pwm;
+ bool needs_pwm_init;
struct regulator *supply; /* regulator for VDD input */
struct regulator *enable; /* regulator for EN/VDDIO input */
};
@@ -216,16 +217,24 @@ err:
return ret;
}
-static void lp855x_pwm_ctrl(struct lp855x *lp, int br, int max_br)
+static int lp855x_pwm_ctrl(struct lp855x *lp, int br, int max_br)
{
struct pwm_state state;
- pwm_get_state(lp->pwm, &state);
+ if (lp->needs_pwm_init) {
+ pwm_init_state(lp->pwm, &state);
+ /* Legacy platform data compatibility */
+ if (lp->pdata->period_ns > 0)
+ state.period = lp->pdata->period_ns;
+ lp->needs_pwm_init = false;
+ } else {
+ pwm_get_state(lp->pwm, &state);
+ }
state.duty_cycle = div_u64(br * state.period, max_br);
state.enabled = state.duty_cycle;
- pwm_apply_state(lp->pwm, &state);
+ return pwm_apply_state(lp->pwm, &state);
}
static int lp855x_bl_update_status(struct backlight_device *bl)
@@ -237,11 +246,12 @@ static int lp855x_bl_update_status(struct backlight_device *bl)
brightness = 0;
if (lp->mode == PWM_BASED)
- lp855x_pwm_ctrl(lp, brightness, bl->props.max_brightness);
+ return lp855x_pwm_ctrl(lp, brightness,
+ bl->props.max_brightness);
else if (lp->mode == REGISTER_BASED)
- lp855x_write_byte(lp, lp->cfg->reg_brightness, (u8)brightness);
-
- return 0;
+ return lp855x_write_byte(lp, lp->cfg->reg_brightness,
+ (u8)brightness);
+ return -EINVAL;
}
static const struct backlight_ops lp855x_bl_ops = {
@@ -387,7 +397,6 @@ static int lp855x_probe(struct i2c_client *cl)
const struct i2c_device_id *id = i2c_client_get_device_id(cl);
const struct acpi_device_id *acpi_id = NULL;
struct device *dev = &cl->dev;
- struct pwm_state pwmstate;
struct lp855x *lp;
int ret;
@@ -470,15 +479,11 @@ static int lp855x_probe(struct i2c_client *cl)
else
return dev_err_probe(dev, ret, "getting PWM\n");
+ lp->needs_pwm_init = false;
lp->mode = REGISTER_BASED;
dev_dbg(dev, "mode: register based\n");
} else {
- pwm_init_state(lp->pwm, &pwmstate);
- /* Legacy platform data compatibility */
- if (lp->pdata->period_ns > 0)
- pwmstate.period = lp->pdata->period_ns;
- pwm_apply_state(lp->pwm, &pwmstate);
-
+ lp->needs_pwm_init = true;
lp->mode = PWM_BASED;
dev_dbg(dev, "mode: PWM based\n");
}
diff --git a/drivers/video/backlight/qcom-wled.c b/drivers/video/backlight/qcom-wled.c
index c6996aa288e6..10129095a4c1 100644
--- a/drivers/video/backlight/qcom-wled.c
+++ b/drivers/video/backlight/qcom-wled.c
@@ -9,8 +9,8 @@
#include <linux/backlight.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_address.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
/* From DT binding */
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 5d93ecc01f6a..eac0ba39581e 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -1887,17 +1887,6 @@ config FB_PRE_INIT_FB
Select this option if display contents should be inherited as set by
the bootloader.
-config FB_MX3
- tristate "MX3 Framebuffer support"
- depends on FB && MX3_IPU
- select BACKLIGHT_CLASS_DEVICE
- select FB_IOMEM_HELPERS
- default y
- help
- This is a framebuffer device for the i.MX31 LCD Controller. So
- far only synchronous displays are supported. If you plan to use
- an LCD display with your i.MX31 system, say Y here.
-
config FB_BROADSHEET
tristate "E-Ink Broadsheet/Epson S1D13521 controller support"
depends on FB && (ARCH_PXA || COMPILE_TEST)
diff --git a/drivers/video/fbdev/Makefile b/drivers/video/fbdev/Makefile
index e6b0ae094b8b..70569f7027ed 100644
--- a/drivers/video/fbdev/Makefile
+++ b/drivers/video/fbdev/Makefile
@@ -122,7 +122,6 @@ obj-$(CONFIG_FB_VESA) += vesafb.o
obj-$(CONFIG_FB_EFI) += efifb.o
obj-$(CONFIG_FB_VGA16) += vga16fb.o
obj-$(CONFIG_FB_OF) += offb.o
-obj-$(CONFIG_FB_MX3) += mx3fb.o
obj-$(CONFIG_FB_DA8XX) += da8xx-fb.o
obj-$(CONFIG_FB_SSD1307) += ssd1307fb.o
obj-$(CONFIG_FB_SIMPLE) += simplefb.o
diff --git a/drivers/video/fbdev/core/fbsysfs.c b/drivers/video/fbdev/core/fbsysfs.c
index fafe574398b0..1b3c9958ef5c 100644
--- a/drivers/video/fbdev/core/fbsysfs.c
+++ b/drivers/video/fbdev/core/fbsysfs.c
@@ -61,14 +61,12 @@ static ssize_t store_mode(struct device *device, struct device_attribute *attr,
struct fb_var_screeninfo var;
struct fb_modelist *modelist;
struct fb_videomode *mode;
- struct list_head *pos;
size_t i;
int err;
memset(&var, 0, sizeof(var));
- list_for_each(pos, &fb_info->modelist) {
- modelist = list_entry(pos, struct fb_modelist, list);
+ list_for_each_entry(modelist, &fb_info->modelist, list) {
mode = &modelist->mode;
i = mode_string(mstr, 0, mode);
if (strncmp(mstr, buf, max(count, i)) == 0) {
@@ -129,13 +127,11 @@ static ssize_t show_modes(struct device *device, struct device_attribute *attr,
{
struct fb_info *fb_info = dev_get_drvdata(device);
unsigned int i;
- struct list_head *pos;
struct fb_modelist *modelist;
const struct fb_videomode *mode;
i = 0;
- list_for_each(pos, &fb_info->modelist) {
- modelist = list_entry(pos, struct fb_modelist, list);
+ list_for_each_entry(modelist, &fb_info->modelist, list) {
mode = &modelist->mode;
i += mode_string(buf, i, mode);
}
diff --git a/drivers/video/fbdev/core/modedb.c b/drivers/video/fbdev/core/modedb.c
index f7e019dded0f..7196b055f2bd 100644
--- a/drivers/video/fbdev/core/modedb.c
+++ b/drivers/video/fbdev/core/modedb.c
@@ -963,15 +963,12 @@ int fb_mode_is_equal(const struct fb_videomode *mode1,
const struct fb_videomode *fb_find_best_mode(const struct fb_var_screeninfo *var,
struct list_head *head)
{
- struct list_head *pos;
struct fb_modelist *modelist;
struct fb_videomode *mode, *best = NULL;
u32 diff = -1;
- list_for_each(pos, head) {
+ list_for_each_entry(modelist, head, list) {
u32 d;
-
- modelist = list_entry(pos, struct fb_modelist, list);
mode = &modelist->mode;
if (mode->xres >= var->xres && mode->yres >= var->yres) {
@@ -1001,15 +998,12 @@ const struct fb_videomode *fb_find_best_mode(const struct fb_var_screeninfo *var
const struct fb_videomode *fb_find_nearest_mode(const struct fb_videomode *mode,
struct list_head *head)
{
- struct list_head *pos;
struct fb_modelist *modelist;
struct fb_videomode *cmode, *best = NULL;
u32 diff = -1, diff_refresh = -1;
- list_for_each(pos, head) {
+ list_for_each_entry(modelist, head, list) {
u32 d;
-
- modelist = list_entry(pos, struct fb_modelist, list);
cmode = &modelist->mode;
d = abs(cmode->xres - mode->xres) +
@@ -1041,13 +1035,11 @@ const struct fb_videomode *fb_find_nearest_mode(const struct fb_videomode *mode,
const struct fb_videomode *fb_match_mode(const struct fb_var_screeninfo *var,
struct list_head *head)
{
- struct list_head *pos;
struct fb_modelist *modelist;
struct fb_videomode *m, mode;
fb_var_to_videomode(&mode, var);
- list_for_each(pos, head) {
- modelist = list_entry(pos, struct fb_modelist, list);
+ list_for_each_entry(modelist, head, list) {
m = &modelist->mode;
if (fb_mode_is_equal(m, &mode))
return m;
@@ -1065,13 +1057,11 @@ const struct fb_videomode *fb_match_mode(const struct fb_var_screeninfo *var,
*/
int fb_add_videomode(const struct fb_videomode *mode, struct list_head *head)
{
- struct list_head *pos;
struct fb_modelist *modelist;
struct fb_videomode *m;
int found = 0;
- list_for_each(pos, head) {
- modelist = list_entry(pos, struct fb_modelist, list);
+ list_for_each_entry(modelist, head, list) {
m = &modelist->mode;
if (fb_mode_is_equal(m, mode)) {
found = 1;
@@ -1152,7 +1142,6 @@ void fb_videomode_to_modelist(const struct fb_videomode *modedb, int num,
const struct fb_videomode *fb_find_best_display(const struct fb_monspecs *specs,
struct list_head *head)
{
- struct list_head *pos;
struct fb_modelist *modelist;
const struct fb_videomode *m, *m1 = NULL, *md = NULL, *best = NULL;
int first = 0;
@@ -1161,8 +1150,7 @@ const struct fb_videomode *fb_find_best_display(const struct fb_monspecs *specs,
goto finished;
/* get the first detailed mode and the very first mode */
- list_for_each(pos, head) {
- modelist = list_entry(pos, struct fb_modelist, list);
+ list_for_each_entry(modelist, head, list) {
m = &modelist->mode;
if (!first) {
diff --git a/drivers/video/fbdev/g364fb.c b/drivers/video/fbdev/g364fb.c
index 7a1013b22fa7..ee6fe51e0a6b 100644
--- a/drivers/video/fbdev/g364fb.c
+++ b/drivers/video/fbdev/g364fb.c
@@ -112,7 +112,7 @@ static int g364fb_blank(int blank, struct fb_info *info);
static const struct fb_ops g364fb_ops = {
.owner = THIS_MODULE,
- FB_DEFAULT_IOMEM_HELPERS,
+ FB_DEFAULT_IOMEM_OPS,
.fb_setcolreg = g364fb_setcolreg,
.fb_pan_display = g364fb_pan_display,
.fb_blank = g364fb_blank,
diff --git a/drivers/video/fbdev/mx3fb.c b/drivers/video/fbdev/mx3fb.c
deleted file mode 100644
index e6bafaba3460..000000000000
--- a/drivers/video/fbdev/mx3fb.c
+++ /dev/null
@@ -1,1695 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2008
- * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
- *
- * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/fb.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/dma-mapping.h>
-#include <linux/dmaengine.h>
-#include <linux/console.h>
-#include <linux/clk.h>
-#include <linux/mutex.h>
-#include <linux/dma/ipu-dma.h>
-#include <linux/backlight.h>
-
-#include <linux/dma/imx-dma.h>
-#include <linux/platform_data/video-mx3fb.h>
-
-#include <asm/io.h>
-#include <linux/uaccess.h>
-
-#define MX3FB_NAME "mx3_sdc_fb"
-
-#define MX3FB_REG_OFFSET 0xB4
-
-/* SDC Registers */
-#define SDC_COM_CONF (0xB4 - MX3FB_REG_OFFSET)
-#define SDC_GW_CTRL (0xB8 - MX3FB_REG_OFFSET)
-#define SDC_FG_POS (0xBC - MX3FB_REG_OFFSET)
-#define SDC_BG_POS (0xC0 - MX3FB_REG_OFFSET)
-#define SDC_CUR_POS (0xC4 - MX3FB_REG_OFFSET)
-#define SDC_PWM_CTRL (0xC8 - MX3FB_REG_OFFSET)
-#define SDC_CUR_MAP (0xCC - MX3FB_REG_OFFSET)
-#define SDC_HOR_CONF (0xD0 - MX3FB_REG_OFFSET)
-#define SDC_VER_CONF (0xD4 - MX3FB_REG_OFFSET)
-#define SDC_SHARP_CONF_1 (0xD8 - MX3FB_REG_OFFSET)
-#define SDC_SHARP_CONF_2 (0xDC - MX3FB_REG_OFFSET)
-
-/* Register bits */
-#define SDC_COM_TFT_COLOR 0x00000001UL
-#define SDC_COM_FG_EN 0x00000010UL
-#define SDC_COM_GWSEL 0x00000020UL
-#define SDC_COM_GLB_A 0x00000040UL
-#define SDC_COM_KEY_COLOR_G 0x00000080UL
-#define SDC_COM_BG_EN 0x00000200UL
-#define SDC_COM_SHARP 0x00001000UL
-
-#define SDC_V_SYNC_WIDTH_L 0x00000001UL
-
-/* Display Interface registers */
-#define DI_DISP_IF_CONF (0x0124 - MX3FB_REG_OFFSET)
-#define DI_DISP_SIG_POL (0x0128 - MX3FB_REG_OFFSET)
-#define DI_SER_DISP1_CONF (0x012C - MX3FB_REG_OFFSET)
-#define DI_SER_DISP2_CONF (0x0130 - MX3FB_REG_OFFSET)
-#define DI_HSP_CLK_PER (0x0134 - MX3FB_REG_OFFSET)
-#define DI_DISP0_TIME_CONF_1 (0x0138 - MX3FB_REG_OFFSET)
-#define DI_DISP0_TIME_CONF_2 (0x013C - MX3FB_REG_OFFSET)
-#define DI_DISP0_TIME_CONF_3 (0x0140 - MX3FB_REG_OFFSET)
-#define DI_DISP1_TIME_CONF_1 (0x0144 - MX3FB_REG_OFFSET)
-#define DI_DISP1_TIME_CONF_2 (0x0148 - MX3FB_REG_OFFSET)
-#define DI_DISP1_TIME_CONF_3 (0x014C - MX3FB_REG_OFFSET)
-#define DI_DISP2_TIME_CONF_1 (0x0150 - MX3FB_REG_OFFSET)
-#define DI_DISP2_TIME_CONF_2 (0x0154 - MX3FB_REG_OFFSET)
-#define DI_DISP2_TIME_CONF_3 (0x0158 - MX3FB_REG_OFFSET)
-#define DI_DISP3_TIME_CONF (0x015C - MX3FB_REG_OFFSET)
-#define DI_DISP0_DB0_MAP (0x0160 - MX3FB_REG_OFFSET)
-#define DI_DISP0_DB1_MAP (0x0164 - MX3FB_REG_OFFSET)
-#define DI_DISP0_DB2_MAP (0x0168 - MX3FB_REG_OFFSET)
-#define DI_DISP0_CB0_MAP (0x016C - MX3FB_REG_OFFSET)
-#define DI_DISP0_CB1_MAP (0x0170 - MX3FB_REG_OFFSET)
-#define DI_DISP0_CB2_MAP (0x0174 - MX3FB_REG_OFFSET)
-#define DI_DISP1_DB0_MAP (0x0178 - MX3FB_REG_OFFSET)
-#define DI_DISP1_DB1_MAP (0x017C - MX3FB_REG_OFFSET)
-#define DI_DISP1_DB2_MAP (0x0180 - MX3FB_REG_OFFSET)
-#define DI_DISP1_CB0_MAP (0x0184 - MX3FB_REG_OFFSET)
-#define DI_DISP1_CB1_MAP (0x0188 - MX3FB_REG_OFFSET)
-#define DI_DISP1_CB2_MAP (0x018C - MX3FB_REG_OFFSET)
-#define DI_DISP2_DB0_MAP (0x0190 - MX3FB_REG_OFFSET)
-#define DI_DISP2_DB1_MAP (0x0194 - MX3FB_REG_OFFSET)
-#define DI_DISP2_DB2_MAP (0x0198 - MX3FB_REG_OFFSET)
-#define DI_DISP2_CB0_MAP (0x019C - MX3FB_REG_OFFSET)
-#define DI_DISP2_CB1_MAP (0x01A0 - MX3FB_REG_OFFSET)
-#define DI_DISP2_CB2_MAP (0x01A4 - MX3FB_REG_OFFSET)
-#define DI_DISP3_B0_MAP (0x01A8 - MX3FB_REG_OFFSET)
-#define DI_DISP3_B1_MAP (0x01AC - MX3FB_REG_OFFSET)
-#define DI_DISP3_B2_MAP (0x01B0 - MX3FB_REG_OFFSET)
-#define DI_DISP_ACC_CC (0x01B4 - MX3FB_REG_OFFSET)
-#define DI_DISP_LLA_CONF (0x01B8 - MX3FB_REG_OFFSET)
-#define DI_DISP_LLA_DATA (0x01BC - MX3FB_REG_OFFSET)
-
-/* DI_DISP_SIG_POL bits */
-#define DI_D3_VSYNC_POL_SHIFT 28
-#define DI_D3_HSYNC_POL_SHIFT 27
-#define DI_D3_DRDY_SHARP_POL_SHIFT 26
-#define DI_D3_CLK_POL_SHIFT 25
-#define DI_D3_DATA_POL_SHIFT 24
-
-/* DI_DISP_IF_CONF bits */
-#define DI_D3_CLK_IDLE_SHIFT 26
-#define DI_D3_CLK_SEL_SHIFT 25
-#define DI_D3_DATAMSK_SHIFT 24
-
-enum ipu_panel {
- IPU_PANEL_SHARP_TFT,
- IPU_PANEL_TFT,
-};
-
-struct ipu_di_signal_cfg {
- unsigned datamask_en:1;
- unsigned clksel_en:1;
- unsigned clkidle_en:1;
- unsigned data_pol:1; /* true = inverted */
- unsigned clk_pol:1; /* true = rising edge */
- unsigned enable_pol:1;
- unsigned Hsync_pol:1; /* true = active high */
- unsigned Vsync_pol:1;
-};
-
-static const struct fb_videomode mx3fb_modedb[] = {
- {
- /* 240x320 @ 60 Hz */
- .name = "Sharp-QVGA",
- .refresh = 60,
- .xres = 240,
- .yres = 320,
- .pixclock = 185925,
- .left_margin = 9,
- .right_margin = 16,
- .upper_margin = 7,
- .lower_margin = 9,
- .hsync_len = 1,
- .vsync_len = 1,
- .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_SHARP_MODE |
- FB_SYNC_CLK_INVERT | FB_SYNC_DATA_INVERT |
- FB_SYNC_CLK_IDLE_EN,
- .vmode = FB_VMODE_NONINTERLACED,
- .flag = 0,
- }, {
- /* 240x33 @ 60 Hz */
- .name = "Sharp-CLI",
- .refresh = 60,
- .xres = 240,
- .yres = 33,
- .pixclock = 185925,
- .left_margin = 9,
- .right_margin = 16,
- .upper_margin = 7,
- .lower_margin = 9 + 287,
- .hsync_len = 1,
- .vsync_len = 1,
- .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_SHARP_MODE |
- FB_SYNC_CLK_INVERT | FB_SYNC_DATA_INVERT |
- FB_SYNC_CLK_IDLE_EN,
- .vmode = FB_VMODE_NONINTERLACED,
- .flag = 0,
- }, {
- /* 640x480 @ 60 Hz */
- .name = "NEC-VGA",
- .refresh = 60,
- .xres = 640,
- .yres = 480,
- .pixclock = 38255,
- .left_margin = 144,
- .right_margin = 0,
- .upper_margin = 34,
- .lower_margin = 40,
- .hsync_len = 1,
- .vsync_len = 1,
- .sync = FB_SYNC_VERT_HIGH_ACT | FB_SYNC_OE_ACT_HIGH,
- .vmode = FB_VMODE_NONINTERLACED,
- .flag = 0,
- }, {
- /* NTSC TV output */
- .name = "TV-NTSC",
- .refresh = 60,
- .xres = 640,
- .yres = 480,
- .pixclock = 37538,
- .left_margin = 38,
- .right_margin = 858 - 640 - 38 - 3,
- .upper_margin = 36,
- .lower_margin = 518 - 480 - 36 - 1,
- .hsync_len = 3,
- .vsync_len = 1,
- .sync = 0,
- .vmode = FB_VMODE_NONINTERLACED,
- .flag = 0,
- }, {
- /* PAL TV output */
- .name = "TV-PAL",
- .refresh = 50,
- .xres = 640,
- .yres = 480,
- .pixclock = 37538,
- .left_margin = 38,
- .right_margin = 960 - 640 - 38 - 32,
- .upper_margin = 32,
- .lower_margin = 555 - 480 - 32 - 3,
- .hsync_len = 32,
- .vsync_len = 3,
- .sync = 0,
- .vmode = FB_VMODE_NONINTERLACED,
- .flag = 0,
- }, {
- /* TV output VGA mode, 640x480 @ 65 Hz */
- .name = "TV-VGA",
- .refresh = 60,
- .xres = 640,
- .yres = 480,
- .pixclock = 40574,
- .left_margin = 35,
- .right_margin = 45,
- .upper_margin = 9,
- .lower_margin = 1,
- .hsync_len = 46,
- .vsync_len = 5,
- .sync = 0,
- .vmode = FB_VMODE_NONINTERLACED,
- .flag = 0,
- },
-};
-
-struct mx3fb_data {
- struct fb_info *fbi;
- int backlight_level;
- void __iomem *reg_base;
- spinlock_t lock;
- struct device *dev;
- struct backlight_device *bl;
-
- uint32_t h_start_width;
- uint32_t v_start_width;
- enum disp_data_mapping disp_data_fmt;
-};
-
-struct dma_chan_request {
- struct mx3fb_data *mx3fb;
- enum ipu_channel id;
-};
-
-/* MX3 specific framebuffer information. */
-struct mx3fb_info {
- int blank;
- enum ipu_channel ipu_ch;
- uint32_t cur_ipu_buf;
-
- u32 pseudo_palette[16];
-
- struct completion flip_cmpl;
- struct mutex mutex; /* Protects fb-ops */
- struct mx3fb_data *mx3fb;
- struct idmac_channel *idmac_channel;
- struct dma_async_tx_descriptor *txd;
- dma_cookie_t cookie;
- struct scatterlist sg[2];
-
- struct fb_var_screeninfo cur_var; /* current var info */
-};
-
-static void sdc_set_brightness(struct mx3fb_data *mx3fb, uint8_t value);
-static u32 sdc_get_brightness(struct mx3fb_data *mx3fb);
-
-static int mx3fb_bl_get_brightness(struct backlight_device *bl)
-{
- struct mx3fb_data *fbd = bl_get_data(bl);
-
- return sdc_get_brightness(fbd);
-}
-
-static int mx3fb_bl_update_status(struct backlight_device *bl)
-{
- struct mx3fb_data *fbd = bl_get_data(bl);
- int brightness = backlight_get_brightness(bl);
-
- fbd->backlight_level = (fbd->backlight_level & ~0xFF) | brightness;
-
- sdc_set_brightness(fbd, fbd->backlight_level);
-
- return 0;
-}
-
-static const struct backlight_ops mx3fb_lcdc_bl_ops = {
- .update_status = mx3fb_bl_update_status,
- .get_brightness = mx3fb_bl_get_brightness,
-};
-
-static void mx3fb_init_backlight(struct mx3fb_data *fbd)
-{
- struct backlight_properties props;
- struct backlight_device *bl;
-
- if (fbd->bl)
- return;
-
- memset(&props, 0, sizeof(struct backlight_properties));
- props.max_brightness = 0xff;
- props.type = BACKLIGHT_RAW;
- sdc_set_brightness(fbd, fbd->backlight_level);
-
- bl = backlight_device_register("mx3fb-bl", fbd->dev, fbd,
- &mx3fb_lcdc_bl_ops, &props);
- if (IS_ERR(bl)) {
- dev_err(fbd->dev, "error %ld on backlight register\n",
- PTR_ERR(bl));
- return;
- }
-
- fbd->bl = bl;
- bl->props.power = FB_BLANK_UNBLANK;
- bl->props.fb_blank = FB_BLANK_UNBLANK;
- bl->props.brightness = mx3fb_bl_get_brightness(bl);
-}
-
-static void mx3fb_exit_backlight(struct mx3fb_data *fbd)
-{
- backlight_device_unregister(fbd->bl);
-}
-
-static void mx3fb_dma_done(void *);
-
-/* Used fb-mode and bpp. Can be set on kernel command line, therefore file-static. */
-static const char *fb_mode;
-static unsigned long default_bpp = 16;
-
-static u32 mx3fb_read_reg(struct mx3fb_data *mx3fb, unsigned long reg)
-{
- return __raw_readl(mx3fb->reg_base + reg);
-}
-
-static void mx3fb_write_reg(struct mx3fb_data *mx3fb, u32 value, unsigned long reg)
-{
- __raw_writel(value, mx3fb->reg_base + reg);
-}
-
-struct di_mapping {
- uint32_t b0, b1, b2;
-};
-
-static const struct di_mapping di_mappings[] = {
- [IPU_DISP_DATA_MAPPING_RGB666] = { 0x0005000f, 0x000b000f, 0x0011000f },
- [IPU_DISP_DATA_MAPPING_RGB565] = { 0x0004003f, 0x000a000f, 0x000f003f },
- [IPU_DISP_DATA_MAPPING_RGB888] = { 0x00070000, 0x000f0000, 0x00170000 },
-};
-
-static void sdc_fb_init(struct mx3fb_info *fbi)
-{
- struct mx3fb_data *mx3fb = fbi->mx3fb;
- uint32_t reg;
-
- reg = mx3fb_read_reg(mx3fb, SDC_COM_CONF);
-
- mx3fb_write_reg(mx3fb, reg | SDC_COM_BG_EN, SDC_COM_CONF);
-}
-
-/* Returns enabled flag before uninit */
-static uint32_t sdc_fb_uninit(struct mx3fb_info *fbi)
-{
- struct mx3fb_data *mx3fb = fbi->mx3fb;
- uint32_t reg;
-
- reg = mx3fb_read_reg(mx3fb, SDC_COM_CONF);
-
- mx3fb_write_reg(mx3fb, reg & ~SDC_COM_BG_EN, SDC_COM_CONF);
-
- return reg & SDC_COM_BG_EN;
-}
-
-static void sdc_enable_channel(struct mx3fb_info *mx3_fbi)
-{
- struct mx3fb_data *mx3fb = mx3_fbi->mx3fb;
- struct idmac_channel *ichan = mx3_fbi->idmac_channel;
- struct dma_chan *dma_chan = &ichan->dma_chan;
- unsigned long flags;
- dma_cookie_t cookie;
-
- if (mx3_fbi->txd)
- dev_dbg(mx3fb->dev, "mx3fbi %p, desc %p, sg %p\n", mx3_fbi,
- to_tx_desc(mx3_fbi->txd), to_tx_desc(mx3_fbi->txd)->sg);
- else
- dev_dbg(mx3fb->dev, "mx3fbi %p, txd = NULL\n", mx3_fbi);
-
- /* This enables the channel */
- if (mx3_fbi->cookie < 0) {
- mx3_fbi->txd = dmaengine_prep_slave_sg(dma_chan,
- &mx3_fbi->sg[0], 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
- if (!mx3_fbi->txd) {
- dev_err(mx3fb->dev, "Cannot allocate descriptor on %d\n",
- dma_chan->chan_id);
- return;
- }
-
- mx3_fbi->txd->callback_param = mx3_fbi->txd;
- mx3_fbi->txd->callback = mx3fb_dma_done;
-
- cookie = mx3_fbi->txd->tx_submit(mx3_fbi->txd);
- dev_dbg(mx3fb->dev, "%d: Submit %p #%d [%c]\n", __LINE__,
- mx3_fbi->txd, cookie, list_empty(&ichan->queue) ? '-' : '+');
- } else {
- if (!mx3_fbi->txd || !mx3_fbi->txd->tx_submit) {
- dev_err(mx3fb->dev, "Cannot enable channel %d\n",
- dma_chan->chan_id);
- return;
- }
-
- /* Just re-activate the same buffer */
- dma_async_issue_pending(dma_chan);
- cookie = mx3_fbi->cookie;
- dev_dbg(mx3fb->dev, "%d: Re-submit %p #%d [%c]\n", __LINE__,
- mx3_fbi->txd, cookie, list_empty(&ichan->queue) ? '-' : '+');
- }
-
- if (cookie >= 0) {
- spin_lock_irqsave(&mx3fb->lock, flags);
- sdc_fb_init(mx3_fbi);
- mx3_fbi->cookie = cookie;
- spin_unlock_irqrestore(&mx3fb->lock, flags);
- }
-
- /*
- * Attention! Without this msleep the channel keeps generating
- * interrupts. Next sdc_set_brightness() is going to be called
- * from mx3fb_blank().
- */
- msleep(2);
-}
-
-static void sdc_disable_channel(struct mx3fb_info *mx3_fbi)
-{
- struct mx3fb_data *mx3fb = mx3_fbi->mx3fb;
- unsigned long flags;
-
- if (mx3_fbi->txd == NULL)
- return;
-
- spin_lock_irqsave(&mx3fb->lock, flags);
-
- sdc_fb_uninit(mx3_fbi);
-
- spin_unlock_irqrestore(&mx3fb->lock, flags);
-
- dmaengine_terminate_all(mx3_fbi->txd->chan);
- mx3_fbi->txd = NULL;
- mx3_fbi->cookie = -EINVAL;
-}
-
-/**
- * sdc_set_window_pos() - set window position of the respective plane.
- * @mx3fb: mx3fb context.
- * @channel: IPU DMAC channel ID.
- * @x_pos: X coordinate relative to the top left corner to place window at.
- * @y_pos: Y coordinate relative to the top left corner to place window at.
- * @return: 0 on success or negative error code on failure.
- */
-static int sdc_set_window_pos(struct mx3fb_data *mx3fb, enum ipu_channel channel,
- int16_t x_pos, int16_t y_pos)
-{
- if (channel != IDMAC_SDC_0)
- return -EINVAL;
-
- x_pos += mx3fb->h_start_width;
- y_pos += mx3fb->v_start_width;
-
- mx3fb_write_reg(mx3fb, (x_pos << 16) | y_pos, SDC_BG_POS);
- return 0;
-}
-
-/**
- * sdc_init_panel() - initialize a synchronous LCD panel.
- * @mx3fb: mx3fb context.
- * @panel: panel type.
- * @pixel_clk: desired pixel clock frequency in Hz.
- * @width: width of panel in pixels.
- * @height: height of panel in pixels.
- * @h_start_width: number of pixel clocks between the HSYNC signal pulse
- * and the start of valid data.
- * @h_sync_width: width of the HSYNC signal in units of pixel clocks.
- * @h_end_width: number of pixel clocks between the end of valid data
- * and the HSYNC signal for next line.
- * @v_start_width: number of lines between the VSYNC signal pulse and the
- * start of valid data.
- * @v_sync_width: width of the VSYNC signal in units of lines
- * @v_end_width: number of lines between the end of valid data and the
- * VSYNC signal for next frame.
- * @sig: bitfield of signal polarities for LCD interface.
- * @return: 0 on success or negative error code on failure.
- */
-static int sdc_init_panel(struct mx3fb_data *mx3fb, enum ipu_panel panel,
- uint32_t pixel_clk,
- uint16_t width, uint16_t height,
- uint16_t h_start_width, uint16_t h_sync_width,
- uint16_t h_end_width, uint16_t v_start_width,
- uint16_t v_sync_width, uint16_t v_end_width,
- const struct ipu_di_signal_cfg *sig)
-{
- unsigned long lock_flags;
- uint32_t reg;
- uint32_t old_conf;
- uint32_t div;
- struct clk *ipu_clk;
- const struct di_mapping *map;
-
- dev_dbg(mx3fb->dev, "panel size = %d x %d", width, height);
-
- if (v_sync_width == 0 || h_sync_width == 0)
- return -EINVAL;
-
- /* Init panel size and blanking periods */
- reg = ((uint32_t) (h_sync_width - 1) << 26) |
- ((uint32_t) (width + h_start_width + h_end_width - 1) << 16);
- mx3fb_write_reg(mx3fb, reg, SDC_HOR_CONF);
-
-#ifdef DEBUG
- printk(KERN_CONT " hor_conf %x,", reg);
-#endif
-
- reg = ((uint32_t) (v_sync_width - 1) << 26) | SDC_V_SYNC_WIDTH_L |
- ((uint32_t) (height + v_start_width + v_end_width - 1) << 16);
- mx3fb_write_reg(mx3fb, reg, SDC_VER_CONF);
-
-#ifdef DEBUG
- printk(KERN_CONT " ver_conf %x\n", reg);
-#endif
-
- mx3fb->h_start_width = h_start_width;
- mx3fb->v_start_width = v_start_width;
-
- switch (panel) {
- case IPU_PANEL_SHARP_TFT:
- mx3fb_write_reg(mx3fb, 0x00FD0102L, SDC_SHARP_CONF_1);
- mx3fb_write_reg(mx3fb, 0x00F500F4L, SDC_SHARP_CONF_2);
- mx3fb_write_reg(mx3fb, SDC_COM_SHARP | SDC_COM_TFT_COLOR, SDC_COM_CONF);
- break;
- case IPU_PANEL_TFT:
- mx3fb_write_reg(mx3fb, SDC_COM_TFT_COLOR, SDC_COM_CONF);
- break;
- default:
- return -EINVAL;
- }
-
- /* Init clocking */
-
- /*
- * Calculate divider: fractional part is 4 bits so simply multiple by
- * 2^4 to get fractional part, as long as we stay under ~250MHz and on
- * i.MX31 it (HSP_CLK) is <= 178MHz. Currently 128.267MHz
- */
- ipu_clk = clk_get(mx3fb->dev, NULL);
- if (!IS_ERR(ipu_clk)) {
- div = clk_get_rate(ipu_clk) * 16 / pixel_clk;
- clk_put(ipu_clk);
- } else {
- div = 0;
- }
-
- if (div < 0x40) { /* Divider less than 4 */
- dev_dbg(mx3fb->dev,
- "InitPanel() - Pixel clock divider less than 4\n");
- div = 0x40;
- }
-
- dev_dbg(mx3fb->dev, "pixel clk = %u, divider %u.%u\n",
- pixel_clk, div >> 4, (div & 7) * 125);
-
- spin_lock_irqsave(&mx3fb->lock, lock_flags);
-
- /*
- * DISP3_IF_CLK_DOWN_WR is half the divider value and 2 fraction bits
- * fewer. Subtract 1 extra from DISP3_IF_CLK_DOWN_WR based on timing
- * debug. DISP3_IF_CLK_UP_WR is 0
- */
- mx3fb_write_reg(mx3fb, (((div / 8) - 1) << 22) | div, DI_DISP3_TIME_CONF);
-
- /* DI settings */
- old_conf = mx3fb_read_reg(mx3fb, DI_DISP_IF_CONF) & 0x78FFFFFF;
- old_conf |= sig->datamask_en << DI_D3_DATAMSK_SHIFT |
- sig->clksel_en << DI_D3_CLK_SEL_SHIFT |
- sig->clkidle_en << DI_D3_CLK_IDLE_SHIFT;
- mx3fb_write_reg(mx3fb, old_conf, DI_DISP_IF_CONF);
-
- old_conf = mx3fb_read_reg(mx3fb, DI_DISP_SIG_POL) & 0xE0FFFFFF;
- old_conf |= sig->data_pol << DI_D3_DATA_POL_SHIFT |
- sig->clk_pol << DI_D3_CLK_POL_SHIFT |
- sig->enable_pol << DI_D3_DRDY_SHARP_POL_SHIFT |
- sig->Hsync_pol << DI_D3_HSYNC_POL_SHIFT |
- sig->Vsync_pol << DI_D3_VSYNC_POL_SHIFT;
- mx3fb_write_reg(mx3fb, old_conf, DI_DISP_SIG_POL);
-
- map = &di_mappings[mx3fb->disp_data_fmt];
- mx3fb_write_reg(mx3fb, map->b0, DI_DISP3_B0_MAP);
- mx3fb_write_reg(mx3fb, map->b1, DI_DISP3_B1_MAP);
- mx3fb_write_reg(mx3fb, map->b2, DI_DISP3_B2_MAP);
-
- spin_unlock_irqrestore(&mx3fb->lock, lock_flags);
-
- dev_dbg(mx3fb->dev, "DI_DISP_IF_CONF = 0x%08X\n",
- mx3fb_read_reg(mx3fb, DI_DISP_IF_CONF));
- dev_dbg(mx3fb->dev, "DI_DISP_SIG_POL = 0x%08X\n",
- mx3fb_read_reg(mx3fb, DI_DISP_SIG_POL));
- dev_dbg(mx3fb->dev, "DI_DISP3_TIME_CONF = 0x%08X\n",
- mx3fb_read_reg(mx3fb, DI_DISP3_TIME_CONF));
-
- return 0;
-}
-
-/**
- * sdc_set_color_key() - set the transparent color key for SDC graphic plane.
- * @mx3fb: mx3fb context.
- * @channel: IPU DMAC channel ID.
- * @enable: boolean to enable or disable color keyl.
- * @color_key: 24-bit RGB color to use as transparent color key.
- * @return: 0 on success or negative error code on failure.
- */
-static int sdc_set_color_key(struct mx3fb_data *mx3fb, enum ipu_channel channel,
- bool enable, uint32_t color_key)
-{
- uint32_t reg, sdc_conf;
- unsigned long lock_flags;
-
- spin_lock_irqsave(&mx3fb->lock, lock_flags);
-
- sdc_conf = mx3fb_read_reg(mx3fb, SDC_COM_CONF);
- if (channel == IDMAC_SDC_0)
- sdc_conf &= ~SDC_COM_GWSEL;
- else
- sdc_conf |= SDC_COM_GWSEL;
-
- if (enable) {
- reg = mx3fb_read_reg(mx3fb, SDC_GW_CTRL) & 0xFF000000L;
- mx3fb_write_reg(mx3fb, reg | (color_key & 0x00FFFFFFL),
- SDC_GW_CTRL);
-
- sdc_conf |= SDC_COM_KEY_COLOR_G;
- } else {
- sdc_conf &= ~SDC_COM_KEY_COLOR_G;
- }
- mx3fb_write_reg(mx3fb, sdc_conf, SDC_COM_CONF);
-
- spin_unlock_irqrestore(&mx3fb->lock, lock_flags);
-
- return 0;
-}
-
-/**
- * sdc_set_global_alpha() - set global alpha blending modes.
- * @mx3fb: mx3fb context.
- * @enable: boolean to enable or disable global alpha blending. If disabled,
- * per pixel blending is used.
- * @alpha: global alpha value.
- * @return: 0 on success or negative error code on failure.
- */
-static int sdc_set_global_alpha(struct mx3fb_data *mx3fb, bool enable, uint8_t alpha)
-{
- uint32_t reg;
- unsigned long lock_flags;
-
- spin_lock_irqsave(&mx3fb->lock, lock_flags);
-
- if (enable) {
- reg = mx3fb_read_reg(mx3fb, SDC_GW_CTRL) & 0x00FFFFFFL;
- mx3fb_write_reg(mx3fb, reg | ((uint32_t) alpha << 24), SDC_GW_CTRL);
-
- reg = mx3fb_read_reg(mx3fb, SDC_COM_CONF);
- mx3fb_write_reg(mx3fb, reg | SDC_COM_GLB_A, SDC_COM_CONF);
- } else {
- reg = mx3fb_read_reg(mx3fb, SDC_COM_CONF);
- mx3fb_write_reg(mx3fb, reg & ~SDC_COM_GLB_A, SDC_COM_CONF);
- }
-
- spin_unlock_irqrestore(&mx3fb->lock, lock_flags);
-
- return 0;
-}
-
-static u32 sdc_get_brightness(struct mx3fb_data *mx3fb)
-{
- u32 brightness;
-
- brightness = mx3fb_read_reg(mx3fb, SDC_PWM_CTRL);
- brightness = (brightness >> 16) & 0xFF;
-
- return brightness;
-}
-
-static void sdc_set_brightness(struct mx3fb_data *mx3fb, uint8_t value)
-{
- dev_dbg(mx3fb->dev, "%s: value = %d\n", __func__, value);
- /* This might be board-specific */
- mx3fb_write_reg(mx3fb, 0x03000000UL | value << 16, SDC_PWM_CTRL);
- return;
-}
-
-static uint32_t bpp_to_pixfmt(int bpp)
-{
- uint32_t pixfmt = 0;
- switch (bpp) {
- case 24:
- pixfmt = IPU_PIX_FMT_BGR24;
- break;
- case 32:
- pixfmt = IPU_PIX_FMT_BGR32;
- break;
- case 16:
- pixfmt = IPU_PIX_FMT_RGB565;
- break;
- }
- return pixfmt;
-}
-
-static int mx3fb_blank(int blank, struct fb_info *fbi);
-static int mx3fb_map_video_memory(struct fb_info *fbi, unsigned int mem_len,
- bool lock);
-static int mx3fb_unmap_video_memory(struct fb_info *fbi);
-
-/**
- * mx3fb_set_fix() - set fixed framebuffer parameters from variable settings.
- * @fbi: framebuffer information pointer
- * @return: 0 on success or negative error code on failure.
- */
-static int mx3fb_set_fix(struct fb_info *fbi)
-{
- struct fb_fix_screeninfo *fix = &fbi->fix;
- struct fb_var_screeninfo *var = &fbi->var;
-
- memcpy(fix->id, "DISP3 BG", 8);
-
- fix->line_length = var->xres_virtual * var->bits_per_pixel / 8;
-
- fix->type = FB_TYPE_PACKED_PIXELS;
- fix->accel = FB_ACCEL_NONE;
- fix->visual = FB_VISUAL_TRUECOLOR;
- fix->xpanstep = 1;
- fix->ypanstep = 1;
-
- return 0;
-}
-
-static void mx3fb_dma_done(void *arg)
-{
- struct idmac_tx_desc *tx_desc = to_tx_desc(arg);
- struct dma_chan *chan = tx_desc->txd.chan;
- struct idmac_channel *ichannel = to_idmac_chan(chan);
- struct mx3fb_data *mx3fb = ichannel->client;
- struct mx3fb_info *mx3_fbi = mx3fb->fbi->par;
-
- dev_dbg(mx3fb->dev, "irq %d callback\n", ichannel->eof_irq);
-
- /* We only need one interrupt, it will be re-enabled as needed */
- disable_irq_nosync(ichannel->eof_irq);
-
- complete(&mx3_fbi->flip_cmpl);
-}
-
-static bool mx3fb_must_set_par(struct fb_info *fbi)
-{
- struct mx3fb_info *mx3_fbi = fbi->par;
- struct fb_var_screeninfo old_var = mx3_fbi->cur_var;
- struct fb_var_screeninfo new_var = fbi->var;
-
- if ((fbi->var.activate & FB_ACTIVATE_FORCE) &&
- (fbi->var.activate & FB_ACTIVATE_MASK) == FB_ACTIVATE_NOW)
- return true;
-
- /*
- * Ignore xoffset and yoffset update,
- * because pan display handles this case.
- */
- old_var.xoffset = new_var.xoffset;
- old_var.yoffset = new_var.yoffset;
-
- return !!memcmp(&old_var, &new_var, sizeof(struct fb_var_screeninfo));
-}
-
-static int __set_par(struct fb_info *fbi, bool lock)
-{
- u32 mem_len, cur_xoffset, cur_yoffset;
- struct ipu_di_signal_cfg sig_cfg;
- enum ipu_panel mode = IPU_PANEL_TFT;
- struct mx3fb_info *mx3_fbi = fbi->par;
- struct mx3fb_data *mx3fb = mx3_fbi->mx3fb;
- struct idmac_channel *ichan = mx3_fbi->idmac_channel;
- struct idmac_video_param *video = &ichan->params.video;
- struct scatterlist *sg = mx3_fbi->sg;
-
- /* Total cleanup */
- if (mx3_fbi->txd)
- sdc_disable_channel(mx3_fbi);
-
- mx3fb_set_fix(fbi);
-
- mem_len = fbi->var.yres_virtual * fbi->fix.line_length;
- if (mem_len > fbi->fix.smem_len) {
- if (fbi->fix.smem_start)
- mx3fb_unmap_video_memory(fbi);
-
- if (mx3fb_map_video_memory(fbi, mem_len, lock) < 0)
- return -ENOMEM;
- }
-
- sg_init_table(&sg[0], 1);
- sg_init_table(&sg[1], 1);
-
- sg_dma_address(&sg[0]) = fbi->fix.smem_start;
- sg_set_page(&sg[0], virt_to_page(fbi->screen_base),
- fbi->fix.smem_len,
- offset_in_page(fbi->screen_base));
-
- if (mx3_fbi->ipu_ch == IDMAC_SDC_0) {
- memset(&sig_cfg, 0, sizeof(sig_cfg));
- if (fbi->var.sync & FB_SYNC_HOR_HIGH_ACT)
- sig_cfg.Hsync_pol = true;
- if (fbi->var.sync & FB_SYNC_VERT_HIGH_ACT)
- sig_cfg.Vsync_pol = true;
- if (fbi->var.sync & FB_SYNC_CLK_INVERT)
- sig_cfg.clk_pol = true;
- if (fbi->var.sync & FB_SYNC_DATA_INVERT)
- sig_cfg.data_pol = true;
- if (fbi->var.sync & FB_SYNC_OE_ACT_HIGH)
- sig_cfg.enable_pol = true;
- if (fbi->var.sync & FB_SYNC_CLK_IDLE_EN)
- sig_cfg.clkidle_en = true;
- if (fbi->var.sync & FB_SYNC_CLK_SEL_EN)
- sig_cfg.clksel_en = true;
- if (fbi->var.sync & FB_SYNC_SHARP_MODE)
- mode = IPU_PANEL_SHARP_TFT;
-
- dev_dbg(fbi->device, "pixclock = %u Hz\n",
- (u32) (PICOS2KHZ(fbi->var.pixclock) * 1000UL));
-
- if (sdc_init_panel(mx3fb, mode,
- (PICOS2KHZ(fbi->var.pixclock)) * 1000UL,
- fbi->var.xres, fbi->var.yres,
- fbi->var.left_margin,
- fbi->var.hsync_len,
- fbi->var.right_margin +
- fbi->var.hsync_len,
- fbi->var.upper_margin,
- fbi->var.vsync_len,
- fbi->var.lower_margin +
- fbi->var.vsync_len, &sig_cfg) != 0) {
- dev_err(fbi->device,
- "mx3fb: Error initializing panel.\n");
- return -EINVAL;
- }
- }
-
- sdc_set_window_pos(mx3fb, mx3_fbi->ipu_ch, 0, 0);
-
- mx3_fbi->cur_ipu_buf = 0;
-
- video->out_pixel_fmt = bpp_to_pixfmt(fbi->var.bits_per_pixel);
- video->out_width = fbi->var.xres;
- video->out_height = fbi->var.yres;
- video->out_stride = fbi->var.xres_virtual;
-
- if (mx3_fbi->blank == FB_BLANK_UNBLANK) {
- sdc_enable_channel(mx3_fbi);
- /*
- * sg[0] points to fb smem_start address
- * and is actually active in controller.
- */
- mx3_fbi->cur_var.xoffset = 0;
- mx3_fbi->cur_var.yoffset = 0;
- }
-
- /*
- * Preserve xoffset and yoffest in case they are
- * inactive in controller as fb is blanked.
- */
- cur_xoffset = mx3_fbi->cur_var.xoffset;
- cur_yoffset = mx3_fbi->cur_var.yoffset;
- mx3_fbi->cur_var = fbi->var;
- mx3_fbi->cur_var.xoffset = cur_xoffset;
- mx3_fbi->cur_var.yoffset = cur_yoffset;
-
- return 0;
-}
-
-/**
- * mx3fb_set_par() - set framebuffer parameters and change the operating mode.
- * @fbi: framebuffer information pointer.
- * @return: 0 on success or negative error code on failure.
- */
-static int mx3fb_set_par(struct fb_info *fbi)
-{
- struct mx3fb_info *mx3_fbi = fbi->par;
- struct mx3fb_data *mx3fb = mx3_fbi->mx3fb;
- struct idmac_channel *ichan = mx3_fbi->idmac_channel;
- int ret;
-
- dev_dbg(mx3fb->dev, "%s [%c]\n", __func__, list_empty(&ichan->queue) ? '-' : '+');
-
- mutex_lock(&mx3_fbi->mutex);
-
- ret = mx3fb_must_set_par(fbi) ? __set_par(fbi, true) : 0;
-
- mutex_unlock(&mx3_fbi->mutex);
-
- return ret;
-}
-
-/**
- * mx3fb_check_var() - check and adjust framebuffer variable parameters.
- * @var: framebuffer variable parameters
- * @fbi: framebuffer information pointer
- */
-static int mx3fb_check_var(struct fb_var_screeninfo *var, struct fb_info *fbi)
-{
- struct mx3fb_info *mx3_fbi = fbi->par;
- u32 vtotal;
- u32 htotal;
-
- dev_dbg(fbi->device, "%s\n", __func__);
-
- if (var->xres_virtual < var->xres)
- var->xres_virtual = var->xres;
- if (var->yres_virtual < var->yres)
- var->yres_virtual = var->yres;
-
- if ((var->bits_per_pixel != 32) && (var->bits_per_pixel != 24) &&
- (var->bits_per_pixel != 16))
- var->bits_per_pixel = default_bpp;
-
- switch (var->bits_per_pixel) {
- case 16:
- var->red.length = 5;
- var->red.offset = 11;
- var->red.msb_right = 0;
-
- var->green.length = 6;
- var->green.offset = 5;
- var->green.msb_right = 0;
-
- var->blue.length = 5;
- var->blue.offset = 0;
- var->blue.msb_right = 0;
-
- var->transp.length = 0;
- var->transp.offset = 0;
- var->transp.msb_right = 0;
- break;
- case 24:
- var->red.length = 8;
- var->red.offset = 16;
- var->red.msb_right = 0;
-
- var->green.length = 8;
- var->green.offset = 8;
- var->green.msb_right = 0;
-
- var->blue.length = 8;
- var->blue.offset = 0;
- var->blue.msb_right = 0;
-
- var->transp.length = 0;
- var->transp.offset = 0;
- var->transp.msb_right = 0;
- break;
- case 32:
- var->red.length = 8;
- var->red.offset = 16;
- var->red.msb_right = 0;
-
- var->green.length = 8;
- var->green.offset = 8;
- var->green.msb_right = 0;
-
- var->blue.length = 8;
- var->blue.offset = 0;
- var->blue.msb_right = 0;
-
- var->transp.length = 8;
- var->transp.offset = 24;
- var->transp.msb_right = 0;
- break;
- }
-
- if (var->pixclock < 1000) {
- htotal = var->xres + var->right_margin + var->hsync_len +
- var->left_margin;
- vtotal = var->yres + var->lower_margin + var->vsync_len +
- var->upper_margin;
- var->pixclock = (vtotal * htotal * 6UL) / 100UL;
- var->pixclock = KHZ2PICOS(var->pixclock);
- dev_dbg(fbi->device, "pixclock set for 60Hz refresh = %u ps\n",
- var->pixclock);
- }
-
- var->height = -1;
- var->width = -1;
- var->grayscale = 0;
-
- /* Preserve sync flags */
- var->sync |= mx3_fbi->cur_var.sync;
- mx3_fbi->cur_var.sync |= var->sync;
-
- return 0;
-}
-
-static u32 chan_to_field(unsigned int chan, struct fb_bitfield *bf)
-{
- chan &= 0xffff;
- chan >>= 16 - bf->length;
- return chan << bf->offset;
-}
-
-static int mx3fb_setcolreg(unsigned int regno, unsigned int red,
- unsigned int green, unsigned int blue,
- unsigned int trans, struct fb_info *fbi)
-{
- struct mx3fb_info *mx3_fbi = fbi->par;
- u32 val;
- int ret = 1;
-
- dev_dbg(fbi->device, "%s, regno = %u\n", __func__, regno);
-
- mutex_lock(&mx3_fbi->mutex);
- /*
- * If greyscale is true, then we convert the RGB value
- * to greyscale no matter what visual we are using.
- */
- if (fbi->var.grayscale)
- red = green = blue = (19595 * red + 38470 * green +
- 7471 * blue) >> 16;
- switch (fbi->fix.visual) {
- case FB_VISUAL_TRUECOLOR:
- /*
- * 16-bit True Colour. We encode the RGB value
- * according to the RGB bitfield information.
- */
- if (regno < 16) {
- u32 *pal = fbi->pseudo_palette;
-
- val = chan_to_field(red, &fbi->var.red);
- val |= chan_to_field(green, &fbi->var.green);
- val |= chan_to_field(blue, &fbi->var.blue);
-
- pal[regno] = val;
-
- ret = 0;
- }
- break;
-
- case FB_VISUAL_STATIC_PSEUDOCOLOR:
- case FB_VISUAL_PSEUDOCOLOR:
- break;
- }
- mutex_unlock(&mx3_fbi->mutex);
-
- return ret;
-}
-
-static void __blank(int blank, struct fb_info *fbi)
-{
- struct mx3fb_info *mx3_fbi = fbi->par;
- struct mx3fb_data *mx3fb = mx3_fbi->mx3fb;
- int was_blank = mx3_fbi->blank;
-
- mx3_fbi->blank = blank;
-
- /* Attention!
- * Do not call sdc_disable_channel() for a channel that is disabled
- * already! This will result in a kernel NULL pointer dereference
- * (mx3_fbi->txd is NULL). Hide the fact, that all blank modes are
- * handled equally by this driver.
- */
- if (blank > FB_BLANK_UNBLANK && was_blank > FB_BLANK_UNBLANK)
- return;
-
- switch (blank) {
- case FB_BLANK_POWERDOWN:
- case FB_BLANK_VSYNC_SUSPEND:
- case FB_BLANK_HSYNC_SUSPEND:
- case FB_BLANK_NORMAL:
- sdc_set_brightness(mx3fb, 0);
- memset((char *)fbi->screen_base, 0, fbi->fix.smem_len);
- /* Give LCD time to update - enough for 50 and 60 Hz */
- msleep(25);
- sdc_disable_channel(mx3_fbi);
- break;
- case FB_BLANK_UNBLANK:
- sdc_enable_channel(mx3_fbi);
- sdc_set_brightness(mx3fb, mx3fb->backlight_level);
- break;
- }
-}
-
-/**
- * mx3fb_blank() - blank the display.
- * @blank: blank value for the panel
- * @fbi: framebuffer information pointer
- */
-static int mx3fb_blank(int blank, struct fb_info *fbi)
-{
- struct mx3fb_info *mx3_fbi = fbi->par;
-
- dev_dbg(fbi->device, "%s, blank = %d, base %p, len %u\n", __func__,
- blank, fbi->screen_base, fbi->fix.smem_len);
-
- if (mx3_fbi->blank == blank)
- return 0;
-
- mutex_lock(&mx3_fbi->mutex);
- __blank(blank, fbi);
- mutex_unlock(&mx3_fbi->mutex);
-
- return 0;
-}
-
-/**
- * mx3fb_pan_display() - pan or wrap the display
- * @var: variable screen buffer information.
- * @fbi: framebuffer information pointer.
- *
- * We look only at xoffset, yoffset and the FB_VMODE_YWRAP flag
- */
-static int mx3fb_pan_display(struct fb_var_screeninfo *var,
- struct fb_info *fbi)
-{
- struct mx3fb_info *mx3_fbi = fbi->par;
- u32 y_bottom;
- unsigned long base;
- off_t offset;
- dma_cookie_t cookie;
- struct scatterlist *sg = mx3_fbi->sg;
- struct dma_chan *dma_chan = &mx3_fbi->idmac_channel->dma_chan;
- struct dma_async_tx_descriptor *txd;
- int ret;
-
- dev_dbg(fbi->device, "%s [%c]\n", __func__,
- list_empty(&mx3_fbi->idmac_channel->queue) ? '-' : '+');
-
- if (var->xoffset > 0) {
- dev_dbg(fbi->device, "x panning not supported\n");
- return -EINVAL;
- }
-
- if (mx3_fbi->cur_var.xoffset == var->xoffset &&
- mx3_fbi->cur_var.yoffset == var->yoffset)
- return 0; /* No change, do nothing */
-
- y_bottom = var->yoffset;
-
- if (!(var->vmode & FB_VMODE_YWRAP))
- y_bottom += fbi->var.yres;
-
- if (y_bottom > fbi->var.yres_virtual)
- return -EINVAL;
-
- mutex_lock(&mx3_fbi->mutex);
-
- offset = var->yoffset * fbi->fix.line_length
- + var->xoffset * (fbi->var.bits_per_pixel / 8);
- base = fbi->fix.smem_start + offset;
-
- dev_dbg(fbi->device, "Updating SDC BG buf %d address=0x%08lX\n",
- mx3_fbi->cur_ipu_buf, base);
-
- /*
- * We enable the End of Frame interrupt, which will free a tx-descriptor,
- * which we will need for the next dmaengine_prep_slave_sg(). The
- * IRQ-handler will disable the IRQ again.
- */
- init_completion(&mx3_fbi->flip_cmpl);
- enable_irq(mx3_fbi->idmac_channel->eof_irq);
-
- ret = wait_for_completion_timeout(&mx3_fbi->flip_cmpl, HZ / 10);
- if (ret <= 0) {
- mutex_unlock(&mx3_fbi->mutex);
- dev_info(fbi->device, "Panning failed due to %s\n", ret < 0 ?
- "user interrupt" : "timeout");
- disable_irq(mx3_fbi->idmac_channel->eof_irq);
- return ret ? : -ETIMEDOUT;
- }
-
- mx3_fbi->cur_ipu_buf = !mx3_fbi->cur_ipu_buf;
-
- sg_dma_address(&sg[mx3_fbi->cur_ipu_buf]) = base;
- sg_set_page(&sg[mx3_fbi->cur_ipu_buf],
- virt_to_page(fbi->screen_base + offset), fbi->fix.smem_len,
- offset_in_page(fbi->screen_base + offset));
-
- if (mx3_fbi->txd)
- async_tx_ack(mx3_fbi->txd);
-
- txd = dmaengine_prep_slave_sg(dma_chan, sg +
- mx3_fbi->cur_ipu_buf, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
- if (!txd) {
- dev_err(fbi->device,
- "Error preparing a DMA transaction descriptor.\n");
- mutex_unlock(&mx3_fbi->mutex);
- return -EIO;
- }
-
- txd->callback_param = txd;
- txd->callback = mx3fb_dma_done;
-
- /*
- * Emulate original mx3fb behaviour: each new call to idmac_tx_submit()
- * should switch to another buffer
- */
- cookie = txd->tx_submit(txd);
- dev_dbg(fbi->device, "%d: Submit %p #%d\n", __LINE__, txd, cookie);
- if (cookie < 0) {
- dev_err(fbi->device,
- "Error updating SDC buf %d to address=0x%08lX\n",
- mx3_fbi->cur_ipu_buf, base);
- mutex_unlock(&mx3_fbi->mutex);
- return -EIO;
- }
-
- mx3_fbi->txd = txd;
-
- fbi->var.xoffset = var->xoffset;
- fbi->var.yoffset = var->yoffset;
-
- if (var->vmode & FB_VMODE_YWRAP)
- fbi->var.vmode |= FB_VMODE_YWRAP;
- else
- fbi->var.vmode &= ~FB_VMODE_YWRAP;
-
- mx3_fbi->cur_var = fbi->var;
-
- mutex_unlock(&mx3_fbi->mutex);
-
- dev_dbg(fbi->device, "Update complete\n");
-
- return 0;
-}
-
-/*
- * This structure contains the pointers to the control functions that are
- * invoked by the core framebuffer driver to perform operations like
- * blitting, rectangle filling, copy regions and cursor definition.
- */
-static const struct fb_ops mx3fb_ops = {
- .owner = THIS_MODULE,
- FB_DEFAULT_IOMEM_OPS,
- .fb_set_par = mx3fb_set_par,
- .fb_check_var = mx3fb_check_var,
- .fb_setcolreg = mx3fb_setcolreg,
- .fb_pan_display = mx3fb_pan_display,
- .fb_blank = mx3fb_blank,
-};
-
-#ifdef CONFIG_PM
-/*
- * Power management hooks. Note that we won't be called from IRQ context,
- * unlike the blank functions above, so we may sleep.
- */
-
-/*
- * Suspends the framebuffer and blanks the screen. Power management support
- */
-static int mx3fb_suspend(struct platform_device *pdev, pm_message_t state)
-{
- struct mx3fb_data *mx3fb = platform_get_drvdata(pdev);
- struct mx3fb_info *mx3_fbi = mx3fb->fbi->par;
-
- console_lock();
- fb_set_suspend(mx3fb->fbi, 1);
- console_unlock();
-
- if (mx3_fbi->blank == FB_BLANK_UNBLANK) {
- sdc_disable_channel(mx3_fbi);
- sdc_set_brightness(mx3fb, 0);
-
- }
- return 0;
-}
-
-/*
- * Resumes the framebuffer and unblanks the screen. Power management support
- */
-static int mx3fb_resume(struct platform_device *pdev)
-{
- struct mx3fb_data *mx3fb = platform_get_drvdata(pdev);
- struct mx3fb_info *mx3_fbi = mx3fb->fbi->par;
-
- if (mx3_fbi->blank == FB_BLANK_UNBLANK) {
- sdc_enable_channel(mx3_fbi);
- sdc_set_brightness(mx3fb, mx3fb->backlight_level);
- }
-
- console_lock();
- fb_set_suspend(mx3fb->fbi, 0);
- console_unlock();
-
- return 0;
-}
-#else
-#define mx3fb_suspend NULL
-#define mx3fb_resume NULL
-#endif
-
-/*
- * Main framebuffer functions
- */
-
-/**
- * mx3fb_map_video_memory() - allocates the DRAM memory for the frame buffer.
- * @fbi: framebuffer information pointer
- * @mem_len: length of mapped memory
- * @lock: do not lock during initialisation
- * @return: Error code indicating success or failure
- *
- * This buffer is remapped into a non-cached, non-buffered, memory region to
- * allow palette and pixel writes to occur without flushing the cache. Once this
- * area is remapped, all virtual memory access to the video memory should occur
- * at the new region.
- */
-static int mx3fb_map_video_memory(struct fb_info *fbi, unsigned int mem_len,
- bool lock)
-{
- int retval = 0;
- dma_addr_t addr;
-
- fbi->screen_base = dma_alloc_wc(fbi->device, mem_len, &addr,
- GFP_DMA | GFP_KERNEL);
-
- if (!fbi->screen_base) {
- dev_err(fbi->device, "Cannot allocate %u bytes framebuffer memory\n",
- mem_len);
- retval = -EBUSY;
- goto err0;
- }
-
- if (lock)
- mutex_lock(&fbi->mm_lock);
- fbi->fix.smem_start = addr;
- fbi->fix.smem_len = mem_len;
- if (lock)
- mutex_unlock(&fbi->mm_lock);
-
- dev_dbg(fbi->device, "allocated fb @ p=0x%08x, v=0x%p, size=%d.\n",
- (uint32_t) fbi->fix.smem_start, fbi->screen_base, fbi->fix.smem_len);
-
- fbi->screen_size = fbi->fix.smem_len;
-
- /* Clear the screen */
- memset((char *)fbi->screen_base, 0, fbi->fix.smem_len);
-
- return 0;
-
-err0:
- fbi->fix.smem_len = 0;
- fbi->fix.smem_start = 0;
- fbi->screen_base = NULL;
- return retval;
-}
-
-/**
- * mx3fb_unmap_video_memory() - de-allocate frame buffer memory.
- * @fbi: framebuffer information pointer
- * @return: error code indicating success or failure
- */
-static int mx3fb_unmap_video_memory(struct fb_info *fbi)
-{
- dma_free_wc(fbi->device, fbi->fix.smem_len, fbi->screen_base,
- fbi->fix.smem_start);
-
- fbi->screen_base = NULL;
- mutex_lock(&fbi->mm_lock);
- fbi->fix.smem_start = 0;
- fbi->fix.smem_len = 0;
- mutex_unlock(&fbi->mm_lock);
- return 0;
-}
-
-/**
- * mx3fb_init_fbinfo() - initialize framebuffer information object.
- * @dev: the device
- * @ops: framebuffer device operations
- * @return: initialized framebuffer structure.
- */
-static struct fb_info *mx3fb_init_fbinfo(struct device *dev,
- const struct fb_ops *ops)
-{
- struct fb_info *fbi;
- struct mx3fb_info *mx3fbi;
- int ret;
-
- /* Allocate sufficient memory for the fb structure */
- fbi = framebuffer_alloc(sizeof(struct mx3fb_info), dev);
- if (!fbi)
- return NULL;
-
- mx3fbi = fbi->par;
- mx3fbi->cookie = -EINVAL;
- mx3fbi->cur_ipu_buf = 0;
-
- fbi->var.activate = FB_ACTIVATE_NOW;
-
- fbi->fbops = ops;
- fbi->pseudo_palette = mx3fbi->pseudo_palette;
-
- mutex_init(&mx3fbi->mutex);
-
- /* Allocate colormap */
- ret = fb_alloc_cmap(&fbi->cmap, 16, 0);
- if (ret < 0) {
- framebuffer_release(fbi);
- return NULL;
- }
-
- return fbi;
-}
-
-static int init_fb_chan(struct mx3fb_data *mx3fb, struct idmac_channel *ichan)
-{
- struct device *dev = mx3fb->dev;
- struct mx3fb_platform_data *mx3fb_pdata = dev_get_platdata(dev);
- const char *name = mx3fb_pdata->name;
- struct fb_info *fbi;
- struct mx3fb_info *mx3fbi;
- const struct fb_videomode *mode;
- int ret, num_modes;
-
- if (mx3fb_pdata->disp_data_fmt >= ARRAY_SIZE(di_mappings)) {
- dev_err(dev, "Illegal display data format %d\n",
- mx3fb_pdata->disp_data_fmt);
- return -EINVAL;
- }
-
- ichan->client = mx3fb;
-
- if (ichan->dma_chan.chan_id != IDMAC_SDC_0)
- return -EINVAL;
-
- fbi = mx3fb_init_fbinfo(dev, &mx3fb_ops);
- if (!fbi)
- return -ENOMEM;
-
- if (!fb_mode)
- fb_mode = name;
-
- if (!fb_mode) {
- ret = -EINVAL;
- goto emode;
- }
-
- if (mx3fb_pdata->mode && mx3fb_pdata->num_modes) {
- mode = mx3fb_pdata->mode;
- num_modes = mx3fb_pdata->num_modes;
- } else {
- mode = mx3fb_modedb;
- num_modes = ARRAY_SIZE(mx3fb_modedb);
- }
-
- if (!fb_find_mode(&fbi->var, fbi, fb_mode, mode,
- num_modes, NULL, default_bpp)) {
- ret = -EBUSY;
- goto emode;
- }
-
- fb_videomode_to_modelist(mode, num_modes, &fbi->modelist);
-
- /* Default Y virtual size is 2x panel size */
- fbi->var.yres_virtual = fbi->var.yres * 2;
-
- mx3fb->fbi = fbi;
-
- /* set Display Interface clock period */
- mx3fb_write_reg(mx3fb, 0x00100010L, DI_HSP_CLK_PER);
- /* Might need to trigger HSP clock change - see 44.3.3.8.5 */
-
- sdc_set_brightness(mx3fb, 255);
- sdc_set_global_alpha(mx3fb, true, 0xFF);
- sdc_set_color_key(mx3fb, IDMAC_SDC_0, false, 0);
-
- mx3fbi = fbi->par;
- mx3fbi->idmac_channel = ichan;
- mx3fbi->ipu_ch = ichan->dma_chan.chan_id;
- mx3fbi->mx3fb = mx3fb;
- mx3fbi->blank = FB_BLANK_NORMAL;
-
- mx3fb->disp_data_fmt = mx3fb_pdata->disp_data_fmt;
-
- init_completion(&mx3fbi->flip_cmpl);
- disable_irq(ichan->eof_irq);
- dev_dbg(mx3fb->dev, "disabling irq %d\n", ichan->eof_irq);
- ret = __set_par(fbi, false);
- if (ret < 0)
- goto esetpar;
-
- __blank(FB_BLANK_UNBLANK, fbi);
-
- dev_info(dev, "registered, using mode %s\n", fb_mode);
-
- ret = register_framebuffer(fbi);
- if (ret < 0)
- goto erfb;
-
- return 0;
-
-erfb:
-esetpar:
-emode:
- fb_dealloc_cmap(&fbi->cmap);
- framebuffer_release(fbi);
-
- return ret;
-}
-
-static bool chan_filter(struct dma_chan *chan, void *arg)
-{
- struct dma_chan_request *rq = arg;
- struct device *dev;
- struct mx3fb_platform_data *mx3fb_pdata;
-
- if (!imx_dma_is_ipu(chan))
- return false;
-
- if (!rq)
- return false;
-
- dev = rq->mx3fb->dev;
- mx3fb_pdata = dev_get_platdata(dev);
-
- return rq->id == chan->chan_id &&
- mx3fb_pdata->dma_dev == chan->device->dev;
-}
-
-static void release_fbi(struct fb_info *fbi)
-{
- mx3fb_unmap_video_memory(fbi);
-
- fb_dealloc_cmap(&fbi->cmap);
-
- unregister_framebuffer(fbi);
- framebuffer_release(fbi);
-}
-
-static int mx3fb_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- int ret;
- struct resource *sdc_reg;
- struct mx3fb_data *mx3fb;
- dma_cap_mask_t mask;
- struct dma_chan *chan;
- struct dma_chan_request rq;
-
- /*
- * Display Interface (DI) and Synchronous Display Controller (SDC)
- * registers
- */
- sdc_reg = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!sdc_reg)
- return -EINVAL;
-
- mx3fb = devm_kzalloc(&pdev->dev, sizeof(*mx3fb), GFP_KERNEL);
- if (!mx3fb)
- return -ENOMEM;
-
- spin_lock_init(&mx3fb->lock);
-
- mx3fb->reg_base = ioremap(sdc_reg->start, resource_size(sdc_reg));
- if (!mx3fb->reg_base) {
- ret = -ENOMEM;
- goto eremap;
- }
-
- pr_debug("Remapped %pR at %p\n", sdc_reg, mx3fb->reg_base);
-
- /* IDMAC interface */
- dmaengine_get();
-
- mx3fb->dev = dev;
- platform_set_drvdata(pdev, mx3fb);
-
- rq.mx3fb = mx3fb;
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
- dma_cap_set(DMA_PRIVATE, mask);
- rq.id = IDMAC_SDC_0;
- chan = dma_request_channel(mask, chan_filter, &rq);
- if (!chan) {
- ret = -EBUSY;
- goto ersdc0;
- }
-
- mx3fb->backlight_level = 255;
-
- ret = init_fb_chan(mx3fb, to_idmac_chan(chan));
- if (ret < 0)
- goto eisdc0;
-
- mx3fb_init_backlight(mx3fb);
-
- return 0;
-
-eisdc0:
- dma_release_channel(chan);
-ersdc0:
- dmaengine_put();
- iounmap(mx3fb->reg_base);
-eremap:
- dev_err(dev, "mx3fb: failed to register fb\n");
- return ret;
-}
-
-static void mx3fb_remove(struct platform_device *dev)
-{
- struct mx3fb_data *mx3fb = platform_get_drvdata(dev);
- struct fb_info *fbi = mx3fb->fbi;
- struct mx3fb_info *mx3_fbi = fbi->par;
- struct dma_chan *chan;
-
- chan = &mx3_fbi->idmac_channel->dma_chan;
- release_fbi(fbi);
-
- mx3fb_exit_backlight(mx3fb);
-
- dma_release_channel(chan);
- dmaengine_put();
-
- iounmap(mx3fb->reg_base);
-}
-
-static struct platform_driver mx3fb_driver = {
- .driver = {
- .name = MX3FB_NAME,
- },
- .probe = mx3fb_probe,
- .remove_new = mx3fb_remove,
- .suspend = mx3fb_suspend,
- .resume = mx3fb_resume,
-};
-
-/*
- * Parse user specified options (`video=mx3fb:')
- * example:
- * video=mx3fb:bpp=16
- */
-static int __init mx3fb_setup(void)
-{
-#ifndef MODULE
- char *opt, *options = NULL;
-
- if (fb_get_options("mx3fb", &options))
- return -ENODEV;
-
- if (!options || !*options)
- return 0;
-
- while ((opt = strsep(&options, ",")) != NULL) {
- if (!*opt)
- continue;
- if (!strncmp(opt, "bpp=", 4))
- default_bpp = simple_strtoul(opt + 4, NULL, 0);
- else
- fb_mode = opt;
- }
-#endif
-
- return 0;
-}
-
-static int __init mx3fb_init(void)
-{
- int ret = mx3fb_setup();
-
- if (ret < 0)
- return ret;
-
- ret = platform_driver_register(&mx3fb_driver);
- return ret;
-}
-
-static void __exit mx3fb_exit(void)
-{
- platform_driver_unregister(&mx3fb_driver);
-}
-
-module_init(mx3fb_init);
-module_exit(mx3fb_exit);
-
-MODULE_AUTHOR("Freescale Semiconductor, Inc.");
-MODULE_DESCRIPTION("MX3 framebuffer driver");
-MODULE_ALIAS("platform:" MX3FB_NAME);
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/fbdev/neofb.c b/drivers/video/fbdev/neofb.c
index d2f622b4c372..b58b11015c0c 100644
--- a/drivers/video/fbdev/neofb.c
+++ b/drivers/video/fbdev/neofb.c
@@ -1948,49 +1948,40 @@ static struct fb_info *neo_alloc_fb_info(struct pci_dev *dev,
switch (info->fix.accel) {
case FB_ACCEL_NEOMAGIC_NM2070:
- snprintf(info->fix.id, sizeof(info->fix.id),
- "MagicGraph 128");
+ strscpy(info->fix.id, "MagicGraph128", sizeof(info->fix.id));
break;
case FB_ACCEL_NEOMAGIC_NM2090:
- snprintf(info->fix.id, sizeof(info->fix.id),
- "MagicGraph 128V");
+ strscpy(info->fix.id, "MagicGraph128V", sizeof(info->fix.id));
break;
case FB_ACCEL_NEOMAGIC_NM2093:
- snprintf(info->fix.id, sizeof(info->fix.id),
- "MagicGraph 128ZV");
+ strscpy(info->fix.id, "MagicGraph128ZV", sizeof(info->fix.id));
break;
case FB_ACCEL_NEOMAGIC_NM2097:
- snprintf(info->fix.id, sizeof(info->fix.id),
- "MagicGraph 128ZV+");
+ strscpy(info->fix.id, "Mag.Graph128ZV+", sizeof(info->fix.id));
break;
case FB_ACCEL_NEOMAGIC_NM2160:
- snprintf(info->fix.id, sizeof(info->fix.id),
- "MagicGraph 128XD");
+ strscpy(info->fix.id, "MagicGraph128XD", sizeof(info->fix.id));
break;
case FB_ACCEL_NEOMAGIC_NM2200:
- snprintf(info->fix.id, sizeof(info->fix.id),
- "MagicGraph 256AV");
+ strscpy(info->fix.id, "MagicGraph256AV", sizeof(info->fix.id));
info->flags |= FBINFO_HWACCEL_IMAGEBLIT |
FBINFO_HWACCEL_COPYAREA |
FBINFO_HWACCEL_FILLRECT;
break;
case FB_ACCEL_NEOMAGIC_NM2230:
- snprintf(info->fix.id, sizeof(info->fix.id),
- "MagicGraph 256AV+");
+ strscpy(info->fix.id, "Mag.Graph256AV+", sizeof(info->fix.id));
info->flags |= FBINFO_HWACCEL_IMAGEBLIT |
FBINFO_HWACCEL_COPYAREA |
FBINFO_HWACCEL_FILLRECT;
break;
case FB_ACCEL_NEOMAGIC_NM2360:
- snprintf(info->fix.id, sizeof(info->fix.id),
- "MagicGraph 256ZX");
+ strscpy(info->fix.id, "MagicGraph256ZX", sizeof(info->fix.id));
info->flags |= FBINFO_HWACCEL_IMAGEBLIT |
FBINFO_HWACCEL_COPYAREA |
FBINFO_HWACCEL_FILLRECT;
break;
case FB_ACCEL_NEOMAGIC_NM2380:
- snprintf(info->fix.id, sizeof(info->fix.id),
- "MagicGraph 256XL+");
+ strscpy(info->fix.id, "Mag.Graph256XL+", sizeof(info->fix.id));
info->flags |= FBINFO_HWACCEL_IMAGEBLIT |
FBINFO_HWACCEL_COPYAREA |
FBINFO_HWACCEL_FILLRECT;
diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
index 86dd24022871..5ae48e36fccb 100644
--- a/drivers/video/fbdev/ssd1307fb.c
+++ b/drivers/video/fbdev/ssd1307fb.c
@@ -52,8 +52,8 @@ struct ssd1307fb_deviceinfo {
u32 default_vcomh;
u32 default_dclk_div;
u32 default_dclk_frq;
- int need_pwm;
- int need_chargepump;
+ bool need_pwm;
+ bool need_chargepump;
};
struct ssd1307fb_par {
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index c5310eaf8b46..51d8f3299c10 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -172,6 +172,14 @@ struct vring_virtqueue {
/* Host publishes avail event idx */
bool event;
+ /* Do DMA mapping by driver */
+ bool premapped;
+
+ /* Do unmap or not for desc. Just when premapped is False and
+ * use_dma_api is true, this is true.
+ */
+ bool do_unmap;
+
/* Head of free buffer list. */
unsigned int free_head;
/* Number we've added since last sync. */
@@ -355,10 +363,14 @@ static struct device *vring_dma_dev(const struct vring_virtqueue *vq)
}
/* Map one sg entry. */
-static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
- struct scatterlist *sg,
- enum dma_data_direction direction)
+static int vring_map_one_sg(const struct vring_virtqueue *vq, struct scatterlist *sg,
+ enum dma_data_direction direction, dma_addr_t *addr)
{
+ if (vq->premapped) {
+ *addr = sg_dma_address(sg);
+ return 0;
+ }
+
if (!vq->use_dma_api) {
/*
* If DMA is not used, KMSAN doesn't know that the scatterlist
@@ -366,7 +378,8 @@ static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
* depending on the direction.
*/
kmsan_handle_dma(sg_page(sg), sg->offset, sg->length, direction);
- return (dma_addr_t)sg_phys(sg);
+ *addr = (dma_addr_t)sg_phys(sg);
+ return 0;
}
/*
@@ -374,9 +387,14 @@ static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
* the way it expects (we don't guarantee that the scatterlist
* will exist for the lifetime of the mapping).
*/
- return dma_map_page(vring_dma_dev(vq),
+ *addr = dma_map_page(vring_dma_dev(vq),
sg_page(sg), sg->offset, sg->length,
direction);
+
+ if (dma_mapping_error(vring_dma_dev(vq), *addr))
+ return -ENOMEM;
+
+ return 0;
}
static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
@@ -427,7 +445,7 @@ static void vring_unmap_one_split_indirect(const struct vring_virtqueue *vq,
{
u16 flags;
- if (!vq->use_dma_api)
+ if (!vq->do_unmap)
return;
flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
@@ -445,18 +463,21 @@ static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
struct vring_desc_extra *extra = vq->split.desc_extra;
u16 flags;
- if (!vq->use_dma_api)
- goto out;
-
flags = extra[i].flags;
if (flags & VRING_DESC_F_INDIRECT) {
+ if (!vq->use_dma_api)
+ goto out;
+
dma_unmap_single(vring_dma_dev(vq),
extra[i].addr,
extra[i].len,
(flags & VRING_DESC_F_WRITE) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE);
} else {
+ if (!vq->do_unmap)
+ goto out;
+
dma_unmap_page(vring_dma_dev(vq),
extra[i].addr,
extra[i].len,
@@ -588,8 +609,9 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
for (n = 0; n < out_sgs; n++) {
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
- dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
- if (vring_mapping_error(vq, addr))
+ dma_addr_t addr;
+
+ if (vring_map_one_sg(vq, sg, DMA_TO_DEVICE, &addr))
goto unmap_release;
prev = i;
@@ -603,8 +625,9 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
}
for (; n < (out_sgs + in_sgs); n++) {
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
- dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
- if (vring_mapping_error(vq, addr))
+ dma_addr_t addr;
+
+ if (vring_map_one_sg(vq, sg, DMA_FROM_DEVICE, &addr))
goto unmap_release;
prev = i;
@@ -620,7 +643,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
}
/* Last one doesn't continue. */
desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
- if (!indirect && vq->use_dma_api)
+ if (!indirect && vq->do_unmap)
vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &=
~VRING_DESC_F_NEXT;
@@ -629,8 +652,12 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
dma_addr_t addr = vring_map_single(
vq, desc, total_sg * sizeof(struct vring_desc),
DMA_TO_DEVICE);
- if (vring_mapping_error(vq, addr))
+ if (vring_mapping_error(vq, addr)) {
+ if (vq->premapped)
+ goto free_indirect;
+
goto unmap_release;
+ }
virtqueue_add_desc_split(_vq, vq->split.vring.desc,
head, addr,
@@ -696,6 +723,7 @@ unmap_release:
i = vring_unmap_one_split(vq, i);
}
+free_indirect:
if (indirect)
kfree(desc);
@@ -774,8 +802,10 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
VRING_DESC_F_INDIRECT));
BUG_ON(len == 0 || len % sizeof(struct vring_desc));
- for (j = 0; j < len / sizeof(struct vring_desc); j++)
- vring_unmap_one_split_indirect(vq, &indir_desc[j]);
+ if (vq->do_unmap) {
+ for (j = 0; j < len / sizeof(struct vring_desc); j++)
+ vring_unmap_one_split_indirect(vq, &indir_desc[j]);
+ }
kfree(indir_desc);
vq->split.desc_state[head].indir_desc = NULL;
@@ -1195,17 +1225,20 @@ static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
{
u16 flags;
- if (!vq->use_dma_api)
- return;
-
flags = extra->flags;
if (flags & VRING_DESC_F_INDIRECT) {
+ if (!vq->use_dma_api)
+ return;
+
dma_unmap_single(vring_dma_dev(vq),
extra->addr, extra->len,
(flags & VRING_DESC_F_WRITE) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE);
} else {
+ if (!vq->do_unmap)
+ return;
+
dma_unmap_page(vring_dma_dev(vq),
extra->addr, extra->len,
(flags & VRING_DESC_F_WRITE) ?
@@ -1218,7 +1251,7 @@ static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
{
u16 flags;
- if (!vq->use_dma_api)
+ if (!vq->do_unmap)
return;
flags = le16_to_cpu(desc->flags);
@@ -1279,9 +1312,8 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
for (n = 0; n < out_sgs + in_sgs; n++) {
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
- addr = vring_map_one_sg(vq, sg, n < out_sgs ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE);
- if (vring_mapping_error(vq, addr))
+ if (vring_map_one_sg(vq, sg, n < out_sgs ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE, &addr))
goto unmap_release;
desc[i].flags = cpu_to_le16(n < out_sgs ?
@@ -1296,15 +1328,19 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
addr = vring_map_single(vq, desc,
total_sg * sizeof(struct vring_packed_desc),
DMA_TO_DEVICE);
- if (vring_mapping_error(vq, addr))
+ if (vring_mapping_error(vq, addr)) {
+ if (vq->premapped)
+ goto free_desc;
+
goto unmap_release;
+ }
vq->packed.vring.desc[head].addr = cpu_to_le64(addr);
vq->packed.vring.desc[head].len = cpu_to_le32(total_sg *
sizeof(struct vring_packed_desc));
vq->packed.vring.desc[head].id = cpu_to_le16(id);
- if (vq->use_dma_api) {
+ if (vq->do_unmap) {
vq->packed.desc_extra[id].addr = addr;
vq->packed.desc_extra[id].len = total_sg *
sizeof(struct vring_packed_desc);
@@ -1355,6 +1391,7 @@ unmap_release:
for (i = 0; i < err_idx; i++)
vring_unmap_desc_packed(vq, &desc[i]);
+free_desc:
kfree(desc);
END_USE(vq);
@@ -1426,9 +1463,10 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
c = 0;
for (n = 0; n < out_sgs + in_sgs; n++) {
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
- dma_addr_t addr = vring_map_one_sg(vq, sg, n < out_sgs ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE);
- if (vring_mapping_error(vq, addr))
+ dma_addr_t addr;
+
+ if (vring_map_one_sg(vq, sg, n < out_sgs ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE, &addr))
goto unmap_release;
flags = cpu_to_le16(vq->packed.avail_used_flags |
@@ -1443,7 +1481,7 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
desc[i].len = cpu_to_le32(sg->length);
desc[i].id = cpu_to_le16(id);
- if (unlikely(vq->use_dma_api)) {
+ if (unlikely(vq->do_unmap)) {
vq->packed.desc_extra[curr].addr = addr;
vq->packed.desc_extra[curr].len = sg->length;
vq->packed.desc_extra[curr].flags =
@@ -1461,7 +1499,7 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
}
}
- if (i < head)
+ if (i <= head)
vq->packed.avail_wrap_counter ^= 1;
/* We're using some buffers from the free list. */
@@ -1577,7 +1615,7 @@ static void detach_buf_packed(struct vring_virtqueue *vq,
vq->free_head = id;
vq->vq.num_free += state->num;
- if (unlikely(vq->use_dma_api)) {
+ if (unlikely(vq->do_unmap)) {
curr = id;
for (i = 0; i < state->num; i++) {
vring_unmap_extra_packed(vq,
@@ -1594,7 +1632,7 @@ static void detach_buf_packed(struct vring_virtqueue *vq,
if (!desc)
return;
- if (vq->use_dma_api) {
+ if (vq->do_unmap) {
len = vq->packed.desc_extra[id].len;
for (i = 0; i < len / sizeof(struct vring_packed_desc);
i++)
@@ -2052,6 +2090,8 @@ static struct virtqueue *vring_create_virtqueue_packed(
vq->packed_ring = true;
vq->dma_dev = dma_dev;
vq->use_dma_api = vring_use_dma_api(vdev);
+ vq->premapped = false;
+ vq->do_unmap = vq->use_dma_api;
vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
!context;
@@ -2112,6 +2152,43 @@ err_ring:
return -ENOMEM;
}
+static int virtqueue_disable_and_recycle(struct virtqueue *_vq,
+ void (*recycle)(struct virtqueue *vq, void *buf))
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ struct virtio_device *vdev = vq->vq.vdev;
+ void *buf;
+ int err;
+
+ if (!vq->we_own_ring)
+ return -EPERM;
+
+ if (!vdev->config->disable_vq_and_reset)
+ return -ENOENT;
+
+ if (!vdev->config->enable_vq_after_reset)
+ return -ENOENT;
+
+ err = vdev->config->disable_vq_and_reset(_vq);
+ if (err)
+ return err;
+
+ while ((buf = virtqueue_detach_unused_buf(_vq)) != NULL)
+ recycle(_vq, buf);
+
+ return 0;
+}
+
+static int virtqueue_enable_after_reset(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ struct virtio_device *vdev = vq->vq.vdev;
+
+ if (vdev->config->enable_vq_after_reset(_vq))
+ return -EBUSY;
+
+ return 0;
+}
/*
* Generic functions and exported symbols.
@@ -2238,6 +2315,23 @@ int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
/**
+ * virtqueue_dma_dev - get the dma dev
+ * @_vq: the struct virtqueue we're talking about.
+ *
+ * Returns the dma dev. That can been used for dma api.
+ */
+struct device *virtqueue_dma_dev(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ if (vq->use_dma_api)
+ return vring_dma_dev(vq);
+ else
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(virtqueue_dma_dev);
+
+/**
* virtqueue_kick_prepare - first half of split virtqueue_kick call.
* @_vq: the struct virtqueue
*
@@ -2541,6 +2635,8 @@ static struct virtqueue *__vring_new_virtqueue(unsigned int index,
#endif
vq->dma_dev = dma_dev;
vq->use_dma_api = vring_use_dma_api(vdev);
+ vq->premapped = false;
+ vq->do_unmap = vq->use_dma_api;
vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
!context;
@@ -2619,7 +2715,7 @@ EXPORT_SYMBOL_GPL(vring_create_virtqueue_dma);
* virtqueue_resize - resize the vring of vq
* @_vq: the struct virtqueue we're talking about.
* @num: new ring num
- * @recycle: callback for recycle the useless buffer
+ * @recycle: callback to recycle unused buffers
*
* When it is really necessary to create a new vring, it will set the current vq
* into the reset state. Then call the passed callback to recycle the buffer
@@ -2643,13 +2739,8 @@ int virtqueue_resize(struct virtqueue *_vq, u32 num,
void (*recycle)(struct virtqueue *vq, void *buf))
{
struct vring_virtqueue *vq = to_vvq(_vq);
- struct virtio_device *vdev = vq->vq.vdev;
- void *buf;
int err;
- if (!vq->we_own_ring)
- return -EPERM;
-
if (num > vq->vq.num_max)
return -E2BIG;
@@ -2659,31 +2750,101 @@ int virtqueue_resize(struct virtqueue *_vq, u32 num,
if ((vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num) == num)
return 0;
- if (!vdev->config->disable_vq_and_reset)
- return -ENOENT;
-
- if (!vdev->config->enable_vq_after_reset)
- return -ENOENT;
-
- err = vdev->config->disable_vq_and_reset(_vq);
+ err = virtqueue_disable_and_recycle(_vq, recycle);
if (err)
return err;
- while ((buf = virtqueue_detach_unused_buf(_vq)) != NULL)
- recycle(_vq, buf);
-
if (vq->packed_ring)
err = virtqueue_resize_packed(_vq, num);
else
err = virtqueue_resize_split(_vq, num);
- if (vdev->config->enable_vq_after_reset(_vq))
- return -EBUSY;
-
- return err;
+ return virtqueue_enable_after_reset(_vq);
}
EXPORT_SYMBOL_GPL(virtqueue_resize);
+/**
+ * virtqueue_set_dma_premapped - set the vring premapped mode
+ * @_vq: the struct virtqueue we're talking about.
+ *
+ * Enable the premapped mode of the vq.
+ *
+ * The vring in premapped mode does not do dma internally, so the driver must
+ * do dma mapping in advance. The driver must pass the dma_address through
+ * dma_address of scatterlist. When the driver got a used buffer from
+ * the vring, it has to unmap the dma address.
+ *
+ * This function must be called immediately after creating the vq, or after vq
+ * reset, and before adding any buffers to it.
+ *
+ * Caller must ensure we don't call this with other virtqueue operations
+ * at the same time (except where noted).
+ *
+ * Returns zero or a negative error.
+ * 0: success.
+ * -EINVAL: vring does not use the dma api, so we can not enable premapped mode.
+ */
+int virtqueue_set_dma_premapped(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ u32 num;
+
+ START_USE(vq);
+
+ num = vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num;
+
+ if (num != vq->vq.num_free) {
+ END_USE(vq);
+ return -EINVAL;
+ }
+
+ if (!vq->use_dma_api) {
+ END_USE(vq);
+ return -EINVAL;
+ }
+
+ vq->premapped = true;
+ vq->do_unmap = false;
+
+ END_USE(vq);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(virtqueue_set_dma_premapped);
+
+/**
+ * virtqueue_reset - detach and recycle all unused buffers
+ * @_vq: the struct virtqueue we're talking about.
+ * @recycle: callback to recycle unused buffers
+ *
+ * Caller must ensure we don't call this with other virtqueue operations
+ * at the same time (except where noted).
+ *
+ * Returns zero or a negative error.
+ * 0: success.
+ * -EBUSY: Failed to sync with device, vq may not work properly
+ * -ENOENT: Transport or device not supported
+ * -EPERM: Operation not permitted
+ */
+int virtqueue_reset(struct virtqueue *_vq,
+ void (*recycle)(struct virtqueue *vq, void *buf))
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ int err;
+
+ err = virtqueue_disable_and_recycle(_vq, recycle);
+ if (err)
+ return err;
+
+ if (vq->packed_ring)
+ virtqueue_reinit_packed(vq);
+ else
+ virtqueue_reinit_split(vq);
+
+ return virtqueue_enable_after_reset(_vq);
+}
+EXPORT_SYMBOL_GPL(virtqueue_reset);
+
/* Only available for split ring */
struct virtqueue *vring_new_virtqueue(unsigned int index,
unsigned int num,
@@ -2945,4 +3106,149 @@ const struct vring *virtqueue_get_vring(const struct virtqueue *vq)
}
EXPORT_SYMBOL_GPL(virtqueue_get_vring);
+/**
+ * virtqueue_dma_map_single_attrs - map DMA for _vq
+ * @_vq: the struct virtqueue we're talking about.
+ * @ptr: the pointer of the buffer to do dma
+ * @size: the size of the buffer to do dma
+ * @dir: DMA direction
+ * @attrs: DMA Attrs
+ *
+ * The caller calls this to do dma mapping in advance. The DMA address can be
+ * passed to this _vq when it is in pre-mapped mode.
+ *
+ * return DMA address. Caller should check that by virtqueue_dma_mapping_error().
+ */
+dma_addr_t virtqueue_dma_map_single_attrs(struct virtqueue *_vq, void *ptr,
+ size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ if (!vq->use_dma_api)
+ return (dma_addr_t)virt_to_phys(ptr);
+
+ return dma_map_single_attrs(vring_dma_dev(vq), ptr, size, dir, attrs);
+}
+EXPORT_SYMBOL_GPL(virtqueue_dma_map_single_attrs);
+
+/**
+ * virtqueue_dma_unmap_single_attrs - unmap DMA for _vq
+ * @_vq: the struct virtqueue we're talking about.
+ * @addr: the dma address to unmap
+ * @size: the size of the buffer
+ * @dir: DMA direction
+ * @attrs: DMA Attrs
+ *
+ * Unmap the address that is mapped by the virtqueue_dma_map_* APIs.
+ *
+ */
+void virtqueue_dma_unmap_single_attrs(struct virtqueue *_vq, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ if (!vq->use_dma_api)
+ return;
+
+ dma_unmap_single_attrs(vring_dma_dev(vq), addr, size, dir, attrs);
+}
+EXPORT_SYMBOL_GPL(virtqueue_dma_unmap_single_attrs);
+
+/**
+ * virtqueue_dma_mapping_error - check dma address
+ * @_vq: the struct virtqueue we're talking about.
+ * @addr: DMA address
+ *
+ * Returns 0 means dma valid. Other means invalid dma address.
+ */
+int virtqueue_dma_mapping_error(struct virtqueue *_vq, dma_addr_t addr)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ if (!vq->use_dma_api)
+ return 0;
+
+ return dma_mapping_error(vring_dma_dev(vq), addr);
+}
+EXPORT_SYMBOL_GPL(virtqueue_dma_mapping_error);
+
+/**
+ * virtqueue_dma_need_sync - check a dma address needs sync
+ * @_vq: the struct virtqueue we're talking about.
+ * @addr: DMA address
+ *
+ * Check if the dma address mapped by the virtqueue_dma_map_* APIs needs to be
+ * synchronized
+ *
+ * return bool
+ */
+bool virtqueue_dma_need_sync(struct virtqueue *_vq, dma_addr_t addr)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ if (!vq->use_dma_api)
+ return false;
+
+ return dma_need_sync(vring_dma_dev(vq), addr);
+}
+EXPORT_SYMBOL_GPL(virtqueue_dma_need_sync);
+
+/**
+ * virtqueue_dma_sync_single_range_for_cpu - dma sync for cpu
+ * @_vq: the struct virtqueue we're talking about.
+ * @addr: DMA address
+ * @offset: DMA address offset
+ * @size: buf size for sync
+ * @dir: DMA direction
+ *
+ * Before calling this function, use virtqueue_dma_need_sync() to confirm that
+ * the DMA address really needs to be synchronized
+ *
+ */
+void virtqueue_dma_sync_single_range_for_cpu(struct virtqueue *_vq,
+ dma_addr_t addr,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ struct device *dev = vring_dma_dev(vq);
+
+ if (!vq->use_dma_api)
+ return;
+
+ dma_sync_single_range_for_cpu(dev, addr, offset, size,
+ DMA_BIDIRECTIONAL);
+}
+EXPORT_SYMBOL_GPL(virtqueue_dma_sync_single_range_for_cpu);
+
+/**
+ * virtqueue_dma_sync_single_range_for_device - dma sync for device
+ * @_vq: the struct virtqueue we're talking about.
+ * @addr: DMA address
+ * @offset: DMA address offset
+ * @size: buf size for sync
+ * @dir: DMA direction
+ *
+ * Before calling this function, use virtqueue_dma_need_sync() to confirm that
+ * the DMA address really needs to be synchronized
+ */
+void virtqueue_dma_sync_single_range_for_device(struct virtqueue *_vq,
+ dma_addr_t addr,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+ struct device *dev = vring_dma_dev(vq);
+
+ if (!vq->use_dma_api)
+ return;
+
+ dma_sync_single_range_for_device(dev, addr, offset, size,
+ DMA_BIDIRECTIONAL);
+}
+EXPORT_SYMBOL_GPL(virtqueue_dma_sync_single_range_for_device);
+
MODULE_LICENSE("GPL");
diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
index 961161da5900..06ce6d8c2e00 100644
--- a/drivers/virtio/virtio_vdpa.c
+++ b/drivers/virtio/virtio_vdpa.c
@@ -366,11 +366,14 @@ static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
struct irq_affinity default_affd = { 0 };
struct cpumask *masks;
struct vdpa_callback cb;
+ bool has_affinity = desc && ops->set_vq_affinity;
int i, err, queue_idx = 0;
- masks = create_affinity_masks(nvqs, desc ? desc : &default_affd);
- if (!masks)
- return -ENOMEM;
+ if (has_affinity) {
+ masks = create_affinity_masks(nvqs, desc ? desc : &default_affd);
+ if (!masks)
+ return -ENOMEM;
+ }
for (i = 0; i < nvqs; ++i) {
if (!names[i]) {
@@ -386,20 +389,22 @@ static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
goto err_setup_vq;
}
- if (ops->set_vq_affinity)
+ if (has_affinity)
ops->set_vq_affinity(vdpa, i, &masks[i]);
}
cb.callback = virtio_vdpa_config_cb;
cb.private = vd_dev;
ops->set_config_cb(vdpa, &cb);
- kfree(masks);
+ if (has_affinity)
+ kfree(masks);
return 0;
err_setup_vq:
virtio_vdpa_del_vqs(vdev);
- kfree(masks);
+ if (has_affinity)
+ kfree(masks);
return err;
}
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index ee97d89dfc11..751458959411 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -307,7 +307,7 @@ config XILINX_WATCHDOG
config XILINX_WINDOW_WATCHDOG
tristate "Xilinx window watchdog timer"
depends on HAS_IOMEM
- depends on ARM64
+ depends on ARM64 || COMPILE_TEST
select WATCHDOG_CORE
help
Window watchdog driver for the versal_wwdt IP core.
@@ -343,7 +343,7 @@ config RAVE_SP_WATCHDOG
config MLX_WDT
tristate "Mellanox Watchdog"
- depends on MELLANOX_PLATFORM
+ depends on MELLANOX_PLATFORM || COMPILE_TEST
select WATCHDOG_CORE
select REGMAP
help
@@ -493,7 +493,7 @@ config FTWDT010_WATCHDOG
config IXP4XX_WATCHDOG
tristate "IXP4xx Watchdog"
- depends on ARCH_IXP4XX
+ depends on ARCH_IXP4XX || (ARM && COMPILE_TEST)
select WATCHDOG_CORE
help
Say Y here if to include support for the watchdog timer
@@ -529,7 +529,7 @@ config S3C2410_WATCHDOG
config SA1100_WATCHDOG
tristate "SA1100/PXA2xx watchdog"
- depends on ARCH_SA1100 || ARCH_PXA
+ depends on ARCH_SA1100 || ARCH_PXA || COMPILE_TEST
help
Watchdog timer embedded into SA11x0 and PXA2xx chips. This will
reboot your system when timeout is reached.
@@ -720,7 +720,7 @@ config IMX2_WDT
config IMX_SC_WDT
tristate "IMX SC Watchdog"
depends on HAVE_ARM_SMCCC
- depends on IMX_SCU
+ depends on IMX_SCU || COMPILE_TEST
select WATCHDOG_CORE
help
This is the driver for the system controller watchdog
@@ -931,7 +931,7 @@ config ASPEED_WATCHDOG
config STM32_WATCHDOG
tristate "STM32 Independent WatchDoG (IWDG) support"
- depends on ARCH_STM32
+ depends on ARCH_STM32 || COMPILE_TEST
select WATCHDOG_CORE
default y
help
@@ -1065,7 +1065,7 @@ config ACQUIRE_WDT
config ADVANTECH_WDT
tristate "Advantech SBC Watchdog Timer"
- depends on X86
+ depends on X86 || COMPILE_TEST
help
If you are configuring a Linux kernel for the Advantech single-board
computer, say `Y' here to support its built-in watchdog timer
@@ -1074,14 +1074,16 @@ config ADVANTECH_WDT
config ADVANTECH_EC_WDT
tristate "Advantech Embedded Controller Watchdog Timer"
- depends on X86
+ depends on X86 || COMPILE_TEST
+ select ISA_BUS_API
+ select WATCHDOG_CORE
help
This driver supports Advantech products with ITE based Embedded Controller.
It does not support Advantech products with other ECs or without EC.
config ALIM1535_WDT
tristate "ALi M1535 PMU Watchdog Timer"
- depends on X86 && PCI
+ depends on (X86 || COMPILE_TEST) && PCI
help
This is the driver for the hardware watchdog on the ALi M1535 PMU.
@@ -1105,7 +1107,7 @@ config ALIM7101_WDT
config EBC_C384_WDT
tristate "WinSystems EBC-C384 Watchdog Timer"
- depends on X86
+ depends on X86 || COMPILE_TEST
select ISA_BUS_API
select WATCHDOG_CORE
help
@@ -1115,7 +1117,7 @@ config EBC_C384_WDT
config EXAR_WDT
tristate "Exar Watchdog Timer"
- depends on X86
+ depends on X86 || COMPILE_TEST
select WATCHDOG_CORE
help
Enables watchdog timer support for the watchdog timer present
@@ -1126,7 +1128,7 @@ config EXAR_WDT
config F71808E_WDT
tristate "Fintek F718xx, F818xx Super I/O Watchdog"
- depends on X86
+ depends on X86 || COMPILE_TEST
select WATCHDOG_CORE
help
This is the driver for the hardware watchdog on the Fintek F71808E,
@@ -1138,7 +1140,7 @@ config F71808E_WDT
config SP5100_TCO
tristate "AMD/ATI SP5100 TCO Timer/Watchdog"
- depends on X86 && PCI
+ depends on (X86 || COMPILE_TEST) && PCI
select WATCHDOG_CORE
help
Hardware watchdog driver for the AMD/ATI SP5100 chipset. The TCO
@@ -1177,7 +1179,7 @@ config SC520_WDT
config SBC_FITPC2_WATCHDOG
tristate "Compulab SBC-FITPC2 watchdog"
- depends on X86
+ depends on X86 || COMPILE_TEST
help
This is the driver for the built-in watchdog timer on the fit-PC2,
fit-PC2i, CM-iAM single-board computers made by Compulab.
@@ -1202,7 +1204,7 @@ config SBC_FITPC2_WATCHDOG
config EUROTECH_WDT
tristate "Eurotech CPU-1220/1410 Watchdog Timer"
- depends on X86
+ depends on X86 || COMPILE_TEST
help
Enable support for the watchdog timer on the Eurotech CPU-1220 and
CPU-1410 cards. These are PC/104 SBCs. Spec sheets and product
@@ -1210,7 +1212,7 @@ config EUROTECH_WDT
config IB700_WDT
tristate "IB700 SBC Watchdog Timer"
- depends on X86
+ depends on X86 || COMPILE_TEST
help
This is the driver for the hardware watchdog on the IB700 Single
Board Computer produced by TMC Technology (www.tmc-uk.com). This
@@ -1227,7 +1229,7 @@ config IB700_WDT
config IBMASR
tristate "IBM Automatic Server Restart"
- depends on X86
+ depends on X86 || COMPILE_TEST
help
This is the driver for the IBM Automatic Server Restart watchdog
timer built-in into some eServer xSeries machines.
@@ -1237,7 +1239,7 @@ config IBMASR
config WAFER_WDT
tristate "ICP Single Board Computer Watchdog Timer"
- depends on X86
+ depends on X86 || COMPILE_TEST
help
This is a driver for the hardware watchdog on the ICP Single
Board Computer. This driver is working on (at least) the following
@@ -1259,7 +1261,7 @@ config I6300ESB_WDT
config IE6XX_WDT
tristate "Intel Atom E6xx Watchdog"
- depends on X86 && PCI
+ depends on (X86 || COMPILE_TEST) && PCI
select WATCHDOG_CORE
select MFD_CORE
select LPC_SCH
@@ -1319,7 +1321,7 @@ config ITCO_VENDOR_SUPPORT
config IT8712F_WDT
tristate "IT8712F (Smart Guardian) Watchdog Timer"
- depends on X86
+ depends on X86 || COMPILE_TEST
help
This is the driver for the built-in watchdog timer on the IT8712F
Super I/0 chipset used on many motherboards.
@@ -1332,7 +1334,7 @@ config IT8712F_WDT
config IT87_WDT
tristate "IT87 Watchdog Timer"
- depends on X86
+ depends on X86 || COMPILE_TEST
select WATCHDOG_CORE
help
This is the driver for the hardware watchdog on the ITE IT8607,
@@ -1350,7 +1352,7 @@ config IT87_WDT
config HP_WATCHDOG
tristate "HP ProLiant iLO2+ Hardware Watchdog Timer"
select WATCHDOG_CORE
- depends on (ARM64 || X86) && PCI
+ depends on (ARM64 || X86 || COMPILE_TEST) && PCI
help
A software monitoring watchdog and NMI handling driver. This driver
will detect lockups and provide a stack trace. This is a driver that
@@ -1380,7 +1382,7 @@ config KEMPLD_WDT
config SC1200_WDT
tristate "National Semiconductor PC87307/PC97307 (ala SC1200) Watchdog"
- depends on X86
+ depends on X86 || COMPILE_TEST
help
This is a driver for National Semiconductor PC87307/PC97307 hardware
watchdog cards as found on the SC1200. This watchdog is mainly used
@@ -1403,7 +1405,7 @@ config SCx200_WDT
config PC87413_WDT
tristate "NS PC87413 watchdog"
- depends on X86
+ depends on X86 || COMPILE_TEST
help
This is the driver for the hardware watchdog on the PC87413 chipset
This watchdog simply watches your kernel to make sure it doesn't
@@ -1417,7 +1419,7 @@ config PC87413_WDT
config NV_TCO
tristate "nVidia TCO Timer/Watchdog"
- depends on X86 && PCI
+ depends on (X86 || COMPILE_TEST) && PCI
help
Hardware driver for the TCO timer built into the nVidia Hub family
(such as the MCP51). The TCO (Total Cost of Ownership) timer is a
@@ -1446,7 +1448,7 @@ config RDC321X_WDT
config 60XX_WDT
tristate "SBC-60XX Watchdog Timer"
- depends on X86
+ depends on X86 || COMPILE_TEST
help
This driver can be used with the watchdog timer found on some
single board computers, namely the 6010 PII based computer.
@@ -1486,7 +1488,7 @@ config SBC7240_WDT
config CPU5_WDT
tristate "SMA CPU5 Watchdog"
- depends on X86
+ depends on X86 || COMPILE_TEST
help
TBD.
To compile this driver as a module, choose M here: the
@@ -1494,7 +1496,7 @@ config CPU5_WDT
config SMSC_SCH311X_WDT
tristate "SMSC SCH311X Watchdog Timer"
- depends on X86
+ depends on X86 || COMPILE_TEST
help
This is the driver for the hardware watchdog timer on the
SMSC SCH3112, SCH3114 and SCH3116 Super IO chipset
@@ -1506,7 +1508,7 @@ config SMSC_SCH311X_WDT
config SMSC37B787_WDT
tristate "Winbond SMsC37B787 Watchdog Timer"
- depends on X86
+ depends on X86 || COMPILE_TEST
help
This is the driver for the hardware watchdog component on the
Winbond SMsC37B787 chipset as used on the NetRunner Mainboard
@@ -1526,7 +1528,7 @@ config SMSC37B787_WDT
config TQMX86_WDT
tristate "TQ-Systems TQMX86 Watchdog Timer"
- depends on X86
+ depends on X86 || COMPILE_TEST
select WATCHDOG_CORE
help
This is the driver for the hardware watchdog timer in the TQMX86 IO
@@ -1539,7 +1541,7 @@ config TQMX86_WDT
config VIA_WDT
tristate "VIA Watchdog Timer"
- depends on X86 && PCI
+ depends on (X86 || COMPILE_TEST) && PCI
select WATCHDOG_CORE
help
This is the driver for the hardware watchdog timer on VIA
@@ -1552,7 +1554,7 @@ config VIA_WDT
config W83627HF_WDT
tristate "Watchdog timer for W83627HF/W83627DHG and compatibles"
- depends on X86
+ depends on X86 || COMPILE_TEST
select WATCHDOG_CORE
help
This is the driver for the hardware watchdog on the following
@@ -1582,7 +1584,7 @@ config W83627HF_WDT
config W83877F_WDT
tristate "W83877F (EMACS) Watchdog Timer"
- depends on X86
+ depends on X86 || COMPILE_TEST
help
This is the driver for the hardware watchdog on the W83877F chipset
as used in EMACS PC-104 motherboards (and likely others). This
@@ -1597,7 +1599,7 @@ config W83877F_WDT
config W83977F_WDT
tristate "W83977F (PCM-5335) Watchdog Timer"
- depends on X86
+ depends on X86 || COMPILE_TEST
help
This is the driver for the hardware watchdog on the W83977F I/O chip
as used in AAEON's PCM-5335 SBC (and likely others). This
@@ -1610,7 +1612,7 @@ config W83977F_WDT
config MACHZ_WDT
tristate "ZF MachZ Watchdog"
- depends on X86
+ depends on X86 || COMPILE_TEST
help
If you are using a ZF Micro MachZ processor, say Y here, otherwise
N. This is the driver for the watchdog timer built-in on that
@@ -1623,7 +1625,7 @@ config MACHZ_WDT
config SBC_EPX_C3_WATCHDOG
tristate "Winsystems SBC EPX-C3 watchdog"
- depends on X86
+ depends on X86 || COMPILE_TEST
help
This is the driver for the built-in watchdog timer on the EPX-C3
Single-board computer made by Winsystems, Inc.
@@ -1680,9 +1682,10 @@ config NIC7018_WDT
config SIEMENS_SIMATIC_IPC_WDT
tristate "Siemens Simatic IPC Watchdog"
- depends on SIEMENS_SIMATIC_IPC
+ depends on SIEMENS_SIMATIC_IPC && PCI
+ default y
select WATCHDOG_CORE
- select P2SB
+ select P2SB if X86
help
This driver adds support for several watchdogs found in Industrial
PCs from Siemens.
@@ -1738,7 +1741,7 @@ config INDYDOG
config JZ4740_WDT
tristate "Ingenic jz4740 SoC hardware watchdog"
- depends on MIPS
+ depends on MIPS || COMPILE_TEST
depends on COMMON_CLK
select WATCHDOG_CORE
select MFD_SYSCON
@@ -1797,6 +1800,19 @@ config OCTEON_WDT
from the first interrupt, it is then only poked when the
device is written.
+config MARVELL_GTI_WDT
+ tristate "Marvell GTI Watchdog driver"
+ depends on ARCH_THUNDER || (COMPILE_TEST && 64BIT)
+ default y
+ select WATCHDOG_CORE
+ help
+ Marvell GTI hardware supports watchdog timer. First timeout
+ works as watchdog pretimeout and installed interrupt handler
+ will be called on first timeout. Hardware can generate interrupt
+ to SCP on second timeout but it is not enabled, so second
+ timeout is ignored. If device poke does not happen then system
+ will reboot on third timeout.
+
config BCM2835_WDT
tristate "Broadcom BCM2835 hardware watchdog"
depends on ARCH_BCM2835 || (OF && COMPILE_TEST)
@@ -1822,7 +1838,7 @@ config BCM_KONA_WDT
config BCM_KONA_WDT_DEBUG
bool "DEBUGFS support for BCM Kona Watchdog"
- depends on BCM_KONA_WDT
+ depends on BCM_KONA_WDT || COMPILE_TEST
help
If enabled, adds /sys/kernel/debug/bcm_kona_wdt/info which provides
access to the driver's internal data structures as well as watchdog
@@ -1863,7 +1879,7 @@ config LANTIQ_WDT
config LOONGSON1_WDT
tristate "Loongson1 SoC hardware watchdog"
- depends on MACH_LOONGSON32
+ depends on MACH_LOONGSON32 || COMPILE_TEST
select WATCHDOG_CORE
help
Hardware driver for the Loongson1 SoC Watchdog Timer.
@@ -1877,7 +1893,7 @@ config RALINK_WDT
config GXP_WATCHDOG
tristate "HPE GXP watchdog support"
- depends on ARCH_HPE_GXP
+ depends on ARCH_HPE_GXP || COMPILE_TEST
select WATCHDOG_CORE
help
Say Y here to include support for the watchdog timer
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 3633f5b98236..7eab9de311cb 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -98,6 +98,7 @@ obj-$(CONFIG_VISCONTI_WATCHDOG) += visconti_wdt.o
obj-$(CONFIG_MSC313E_WATCHDOG) += msc313e_wdt.o
obj-$(CONFIG_APPLE_WATCHDOG) += apple_wdt.o
obj-$(CONFIG_SUNPLUS_WATCHDOG) += sunplus_wdt.o
+obj-$(CONFIG_MARVELL_GTI_WDT) += marvell_gti_wdt.o
# X86 (i386 + ia64 + x86_64) Architecture
obj-$(CONFIG_ACQUIRE_WDT) += acquirewdt.o
diff --git a/drivers/watchdog/armada_37xx_wdt.c b/drivers/watchdog/armada_37xx_wdt.c
index e58652939f8a..8133a5d05647 100644
--- a/drivers/watchdog/armada_37xx_wdt.c
+++ b/drivers/watchdog/armada_37xx_wdt.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/types.h>
diff --git a/drivers/watchdog/at91rm9200_wdt.c b/drivers/watchdog/at91rm9200_wdt.c
index d20ec27ba354..558015f08c7a 100644
--- a/drivers/watchdog/at91rm9200_wdt.c
+++ b/drivers/watchdog/at91rm9200_wdt.c
@@ -18,6 +18,7 @@
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/atmel-st.h>
#include <linux/miscdevice.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
@@ -26,8 +27,6 @@
#include <linux/types.h>
#include <linux/watchdog.h>
#include <linux/uaccess.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
#define WDT_DEFAULT_TIME 5 /* seconds */
#define WDT_MAX_TIME 256 /* seconds */
diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c
index 47250f9b68c7..901b94d456db 100644
--- a/drivers/watchdog/cpwd.c
+++ b/drivers/watchdog/cpwd.c
@@ -31,7 +31,7 @@
#include <linux/mutex.h>
#include <linux/io.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/uaccess.h>
#include <asm/irq.h>
diff --git a/drivers/watchdog/ftwdt010_wdt.c b/drivers/watchdog/ftwdt010_wdt.c
index 442c5bf63ff4..28f5af752c10 100644
--- a/drivers/watchdog/ftwdt010_wdt.c
+++ b/drivers/watchdog/ftwdt010_wdt.c
@@ -14,7 +14,7 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/watchdog.h>
@@ -221,20 +221,18 @@ static const struct dev_pm_ops ftwdt010_wdt_dev_pm_ops = {
ftwdt010_wdt_resume)
};
-#ifdef CONFIG_OF
static const struct of_device_id ftwdt010_wdt_match[] = {
{ .compatible = "faraday,ftwdt010" },
{ .compatible = "cortina,gemini-watchdog" },
{},
};
MODULE_DEVICE_TABLE(of, ftwdt010_wdt_match);
-#endif
static struct platform_driver ftwdt010_wdt_driver = {
.probe = ftwdt010_wdt_probe,
.driver = {
.name = "ftwdt010-wdt",
- .of_match_table = of_match_ptr(ftwdt010_wdt_match),
+ .of_match_table = ftwdt010_wdt_match,
.pm = &ftwdt010_wdt_dev_pm_ops,
},
};
diff --git a/drivers/watchdog/gef_wdt.c b/drivers/watchdog/gef_wdt.c
index 97afc907f659..6a1db1c783fa 100644
--- a/drivers/watchdog/gef_wdt.c
+++ b/drivers/watchdog/gef_wdt.c
@@ -31,7 +31,7 @@
#include <linux/fs.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/uaccess.h>
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index 6fcc3596103c..42e8ffae18dd 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -26,8 +26,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/watchdog.h>
@@ -375,7 +374,7 @@ static void imx2_wdt_shutdown(struct platform_device *pdev)
*/
imx2_wdt_set_timeout(wdog, IMX2_WDT_MAX_TIME);
imx2_wdt_ping(wdog);
- dev_crit(&pdev->dev, "Device shutdown: Expect reboot!\n");
+ dev_crit(&pdev->dev, "Device shutdown.\n");
}
}
diff --git a/drivers/watchdog/imx7ulp_wdt.c b/drivers/watchdog/imx7ulp_wdt.c
index 7ca486794ba7..c703586c6e5f 100644
--- a/drivers/watchdog/imx7ulp_wdt.c
+++ b/drivers/watchdog/imx7ulp_wdt.c
@@ -9,7 +9,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <linux/watchdog.h>
diff --git a/drivers/watchdog/intel-mid_wdt.c b/drivers/watchdog/intel-mid_wdt.c
index 9b2173f765c8..fb7fae750181 100644
--- a/drivers/watchdog/intel-mid_wdt.c
+++ b/drivers/watchdog/intel-mid_wdt.c
@@ -203,3 +203,4 @@ module_platform_driver(mid_wdt_driver);
MODULE_AUTHOR("David Cohen <david.a.cohen@linux.intel.com>");
MODULE_DESCRIPTION("Watchdog Driver for Intel MID platform");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:intel_mid_wdt");
diff --git a/drivers/watchdog/lantiq_wdt.c b/drivers/watchdog/lantiq_wdt.c
index 6fab504af88b..a273b97ebcb4 100644
--- a/drivers/watchdog/lantiq_wdt.c
+++ b/drivers/watchdog/lantiq_wdt.c
@@ -9,7 +9,8 @@
#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/watchdog.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/uaccess.h>
#include <linux/clk.h>
#include <linux/io.h>
diff --git a/drivers/watchdog/loongson1_wdt.c b/drivers/watchdog/loongson1_wdt.c
index 4ac7810a314d..0587ff44d3a1 100644
--- a/drivers/watchdog/loongson1_wdt.c
+++ b/drivers/watchdog/loongson1_wdt.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
diff --git a/drivers/watchdog/marvell_gti_wdt.c b/drivers/watchdog/marvell_gti_wdt.c
new file mode 100644
index 000000000000..d7eb8286e11e
--- /dev/null
+++ b/drivers/watchdog/marvell_gti_wdt.c
@@ -0,0 +1,340 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell GTI Watchdog driver
+ *
+ * Copyright (C) 2023 Marvell.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/watchdog.h>
+
+/*
+ * Hardware supports following mode of operation:
+ * 1) Interrupt Only:
+ * This will generate the interrupt to arm core whenever timeout happens.
+ *
+ * 2) Interrupt + del3t (Interrupt to firmware (SCP processor)).
+ * This will generate interrupt to arm core on 1st timeout happens
+ * This will generate interrupt to SCP processor on 2nd timeout happens
+ *
+ * 3) Interrupt + Interrupt to SCP processor (called delt3t) + reboot.
+ * This will generate interrupt to arm core on 1st timeout happens
+ * Will generate interrupt to SCP processor on 2nd timeout happens,
+ * if interrupt is configured.
+ * Reboot on 3rd timeout.
+ *
+ * Driver will use hardware in mode-3 above so that system can reboot in case
+ * a hardware hang. Also h/w is configured not to generate SCP interrupt, so
+ * effectively 2nd timeout is ignored within hardware.
+ *
+ * First timeout is effectively watchdog pretimeout.
+ */
+
+/* GTI CWD Watchdog (GTI_CWD_WDOG) Register */
+#define GTI_CWD_WDOG(reg_offset) (0x8 * (reg_offset))
+#define GTI_CWD_WDOG_MODE_INT_DEL3T_RST 0x3
+#define GTI_CWD_WDOG_MODE_MASK GENMASK_ULL(1, 0)
+#define GTI_CWD_WDOG_LEN_SHIFT 4
+#define GTI_CWD_WDOG_LEN_MASK GENMASK_ULL(19, 4)
+#define GTI_CWD_WDOG_CNT_SHIFT 20
+#define GTI_CWD_WDOG_CNT_MASK GENMASK_ULL(43, 20)
+
+/* GTI CWD Watchdog Interrupt (GTI_CWD_INT) Register */
+#define GTI_CWD_INT 0x200
+#define GTI_CWD_INT_PENDING_STATUS(bit) BIT_ULL(bit)
+
+/* GTI CWD Watchdog Interrupt Enable Clear (GTI_CWD_INT_ENA_CLR) Register */
+#define GTI_CWD_INT_ENA_CLR 0x210
+#define GTI_CWD_INT_ENA_CLR_VAL(bit) BIT_ULL(bit)
+
+/* GTI CWD Watchdog Interrupt Enable Set (GTI_CWD_INT_ENA_SET) Register */
+#define GTI_CWD_INT_ENA_SET 0x218
+#define GTI_CWD_INT_ENA_SET_VAL(bit) BIT_ULL(bit)
+
+/* GTI CWD Watchdog Poke (GTI_CWD_POKE) Registers */
+#define GTI_CWD_POKE(reg_offset) (0x10000 + 0x8 * (reg_offset))
+#define GTI_CWD_POKE_VAL 1
+
+struct gti_match_data {
+ u32 gti_num_timers;
+};
+
+static const struct gti_match_data match_data_octeontx2 = {
+ .gti_num_timers = 54,
+};
+
+static const struct gti_match_data match_data_cn10k = {
+ .gti_num_timers = 64,
+};
+
+struct gti_wdt_priv {
+ struct watchdog_device wdev;
+ void __iomem *base;
+ u32 clock_freq;
+ struct clk *sclk;
+ /* wdt_timer_idx used for timer to be used for system watchdog */
+ u32 wdt_timer_idx;
+ const struct gti_match_data *data;
+};
+
+static irqreturn_t gti_wdt_interrupt(int irq, void *data)
+{
+ struct watchdog_device *wdev = data;
+ struct gti_wdt_priv *priv = watchdog_get_drvdata(wdev);
+
+ /* Clear Interrupt Pending Status */
+ writeq(GTI_CWD_INT_PENDING_STATUS(priv->wdt_timer_idx),
+ priv->base + GTI_CWD_INT);
+
+ watchdog_notify_pretimeout(wdev);
+
+ return IRQ_HANDLED;
+}
+
+static int gti_wdt_ping(struct watchdog_device *wdev)
+{
+ struct gti_wdt_priv *priv = watchdog_get_drvdata(wdev);
+
+ writeq(GTI_CWD_POKE_VAL,
+ priv->base + GTI_CWD_POKE(priv->wdt_timer_idx));
+
+ return 0;
+}
+
+static int gti_wdt_start(struct watchdog_device *wdev)
+{
+ struct gti_wdt_priv *priv = watchdog_get_drvdata(wdev);
+ u64 regval;
+
+ if (!wdev->pretimeout)
+ return -EINVAL;
+
+ set_bit(WDOG_HW_RUNNING, &wdev->status);
+
+ /* Clear any pending interrupt */
+ writeq(GTI_CWD_INT_PENDING_STATUS(priv->wdt_timer_idx),
+ priv->base + GTI_CWD_INT);
+
+ /* Enable Interrupt */
+ writeq(GTI_CWD_INT_ENA_SET_VAL(priv->wdt_timer_idx),
+ priv->base + GTI_CWD_INT_ENA_SET);
+
+ /* Set (Interrupt + SCP interrupt (DEL3T) + core domain reset) Mode */
+ regval = readq(priv->base + GTI_CWD_WDOG(priv->wdt_timer_idx));
+ regval |= GTI_CWD_WDOG_MODE_INT_DEL3T_RST;
+ writeq(regval, priv->base + GTI_CWD_WDOG(priv->wdt_timer_idx));
+
+ return 0;
+}
+
+static int gti_wdt_stop(struct watchdog_device *wdev)
+{
+ struct gti_wdt_priv *priv = watchdog_get_drvdata(wdev);
+ u64 regval;
+
+ /* Disable Interrupt */
+ writeq(GTI_CWD_INT_ENA_CLR_VAL(priv->wdt_timer_idx),
+ priv->base + GTI_CWD_INT_ENA_CLR);
+
+ /* Set GTI_CWD_WDOG.Mode = 0 to stop the timer */
+ regval = readq(priv->base + GTI_CWD_WDOG(priv->wdt_timer_idx));
+ regval &= ~GTI_CWD_WDOG_MODE_MASK;
+ writeq(regval, priv->base + GTI_CWD_WDOG(priv->wdt_timer_idx));
+
+ return 0;
+}
+
+static int gti_wdt_settimeout(struct watchdog_device *wdev,
+ unsigned int timeout)
+{
+ struct gti_wdt_priv *priv = watchdog_get_drvdata(wdev);
+ u64 timeout_wdog, regval;
+
+ /* Update new timeout */
+ wdev->timeout = timeout;
+
+ /* Pretimeout is 1/3 of timeout */
+ wdev->pretimeout = timeout / 3;
+
+ /* Get clock cycles from pretimeout */
+ timeout_wdog = (u64)priv->clock_freq * wdev->pretimeout;
+
+ /* Watchdog counts in 1024 cycle steps */
+ timeout_wdog = timeout_wdog >> 10;
+
+ /* GTI_CWD_WDOG.CNT: reload counter is 16-bit */
+ timeout_wdog = (timeout_wdog + 0xff) >> 8;
+ if (timeout_wdog >= 0x10000)
+ timeout_wdog = 0xffff;
+
+ /*
+ * GTI_CWD_WDOG.LEN is 24bit, lower 8-bits should be zero and
+ * upper 16-bits are same as GTI_CWD_WDOG.CNT
+ */
+ regval = readq(priv->base + GTI_CWD_WDOG(priv->wdt_timer_idx));
+ regval &= GTI_CWD_WDOG_MODE_MASK;
+ regval |= (timeout_wdog << (GTI_CWD_WDOG_CNT_SHIFT + 8)) |
+ (timeout_wdog << GTI_CWD_WDOG_LEN_SHIFT);
+ writeq(regval, priv->base + GTI_CWD_WDOG(priv->wdt_timer_idx));
+
+ return 0;
+}
+
+static int gti_wdt_set_pretimeout(struct watchdog_device *wdev,
+ unsigned int timeout)
+{
+ struct gti_wdt_priv *priv = watchdog_get_drvdata(wdev);
+ struct watchdog_device *wdog_dev = &priv->wdev;
+
+ /* pretimeout should 1/3 of max_timeout */
+ if (timeout * 3 <= wdog_dev->max_timeout)
+ return gti_wdt_settimeout(wdev, timeout * 3);
+
+ return -EINVAL;
+}
+
+static void gti_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
+static int gti_wdt_get_cntfrq(struct platform_device *pdev,
+ struct gti_wdt_priv *priv)
+{
+ int err;
+
+ priv->sclk = devm_clk_get_enabled(&pdev->dev, NULL);
+ if (IS_ERR(priv->sclk))
+ return PTR_ERR(priv->sclk);
+
+ err = devm_add_action_or_reset(&pdev->dev,
+ gti_clk_disable_unprepare, priv->sclk);
+ if (err)
+ return err;
+
+ priv->clock_freq = clk_get_rate(priv->sclk);
+ if (!priv->clock_freq)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct watchdog_info gti_wdt_ident = {
+ .identity = "Marvell GTI watchdog",
+ .options = WDIOF_SETTIMEOUT | WDIOF_PRETIMEOUT | WDIOF_KEEPALIVEPING |
+ WDIOF_MAGICCLOSE | WDIOF_CARDRESET,
+};
+
+static const struct watchdog_ops gti_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = gti_wdt_start,
+ .stop = gti_wdt_stop,
+ .ping = gti_wdt_ping,
+ .set_timeout = gti_wdt_settimeout,
+ .set_pretimeout = gti_wdt_set_pretimeout,
+};
+
+static int gti_wdt_probe(struct platform_device *pdev)
+{
+ struct gti_wdt_priv *priv;
+ struct device *dev = &pdev->dev;
+ struct watchdog_device *wdog_dev;
+ u64 max_pretimeout;
+ u32 wdt_idx;
+ int irq;
+ int err;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->base),
+ "reg property not valid/found\n");
+
+ err = gti_wdt_get_cntfrq(pdev, priv);
+ if (err)
+ return dev_err_probe(&pdev->dev, err,
+ "GTI clock frequency not valid/found");
+
+ priv->data = of_device_get_match_data(dev);
+
+ /* default use last timer for watchdog */
+ priv->wdt_timer_idx = priv->data->gti_num_timers - 1;
+
+ err = of_property_read_u32(dev->of_node, "marvell,wdt-timer-index",
+ &wdt_idx);
+ if (!err) {
+ if (wdt_idx >= priv->data->gti_num_timers)
+ return dev_err_probe(&pdev->dev, err,
+ "GTI wdog timer index not valid");
+
+ priv->wdt_timer_idx = wdt_idx;
+ }
+
+ wdog_dev = &priv->wdev;
+ wdog_dev->info = &gti_wdt_ident,
+ wdog_dev->ops = &gti_wdt_ops,
+ wdog_dev->parent = dev;
+ /*
+ * Watchdog counter is 24 bit where lower 8 bits are zeros
+ * This counter decrements every 1024 clock cycles.
+ */
+ max_pretimeout = (GTI_CWD_WDOG_CNT_MASK >> GTI_CWD_WDOG_CNT_SHIFT);
+ max_pretimeout &= ~0xFFUL;
+ max_pretimeout = (max_pretimeout * 1024) / priv->clock_freq;
+ wdog_dev->pretimeout = max_pretimeout;
+
+ /* Maximum timeout is 3 times the pretimeout */
+ wdog_dev->max_timeout = max_pretimeout * 3;
+ /* Minimum first timeout (pretimeout) is 1, so min_timeout as 3 */
+ wdog_dev->min_timeout = 3;
+ wdog_dev->timeout = wdog_dev->pretimeout;
+
+ watchdog_set_drvdata(wdog_dev, priv);
+ platform_set_drvdata(pdev, priv);
+ gti_wdt_settimeout(wdog_dev, wdog_dev->timeout);
+ watchdog_stop_on_reboot(wdog_dev);
+ watchdog_stop_on_unregister(wdog_dev);
+
+ err = devm_watchdog_register_device(dev, wdog_dev);
+ if (err)
+ return err;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return dev_err_probe(&pdev->dev, irq, "IRQ resource not found\n");
+
+ err = devm_request_irq(dev, irq, gti_wdt_interrupt, 0,
+ pdev->name, &priv->wdev);
+ if (err)
+ return dev_err_probe(dev, err, "Failed to register interrupt handler\n");
+
+ dev_info(dev, "Watchdog enabled (timeout=%d sec)\n", wdog_dev->timeout);
+ return 0;
+}
+
+static const struct of_device_id gti_wdt_of_match[] = {
+ { .compatible = "marvell,cn9670-wdt", .data = &match_data_octeontx2},
+ { .compatible = "marvell,cn10624-wdt", .data = &match_data_cn10k},
+ { },
+};
+MODULE_DEVICE_TABLE(of, gti_wdt_of_match);
+
+static struct platform_driver gti_wdt_driver = {
+ .driver = {
+ .name = "gti-wdt",
+ .of_match_table = gti_wdt_of_match,
+ },
+ .probe = gti_wdt_probe,
+};
+module_platform_driver(gti_wdt_driver);
+
+MODULE_AUTHOR("Bharat Bhushan <bbhushan2@marvell.com>");
+MODULE_DESCRIPTION("Marvell GTI watchdog driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/menz69_wdt.c b/drivers/watchdog/menz69_wdt.c
index 3c98030b9fcd..c7de30270043 100644
--- a/drivers/watchdog/menz69_wdt.c
+++ b/drivers/watchdog/menz69_wdt.c
@@ -153,7 +153,6 @@ MODULE_DEVICE_TABLE(mcb, men_z069_ids);
static struct mcb_driver men_z069_driver = {
.driver = {
.name = "z069-wdt",
- .owner = THIS_MODULE,
},
.probe = men_z069_probe,
.remove = men_z069_remove,
diff --git a/drivers/watchdog/meson_gxbb_wdt.c b/drivers/watchdog/meson_gxbb_wdt.c
index 35d80cb39856..a48622d11ad7 100644
--- a/drivers/watchdog/meson_gxbb_wdt.c
+++ b/drivers/watchdog/meson_gxbb_wdt.c
@@ -22,7 +22,6 @@
#define GXBB_WDT_CTRL_CLKDIV_EN BIT(25)
#define GXBB_WDT_CTRL_CLK_EN BIT(24)
-#define GXBB_WDT_CTRL_EE_RESET BIT(21)
#define GXBB_WDT_CTRL_EN BIT(18)
#define GXBB_WDT_CTRL_DIV_MASK (BIT(18) - 1)
@@ -45,6 +44,10 @@ struct meson_gxbb_wdt {
struct clk *clk;
};
+struct wdt_params {
+ u32 rst;
+};
+
static int meson_gxbb_wdt_start(struct watchdog_device *wdt_dev)
{
struct meson_gxbb_wdt *data = watchdog_get_drvdata(wdt_dev);
@@ -140,8 +143,17 @@ static const struct dev_pm_ops meson_gxbb_wdt_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(meson_gxbb_wdt_suspend, meson_gxbb_wdt_resume)
};
+static const struct wdt_params gxbb_params = {
+ .rst = BIT(21),
+};
+
+static const struct wdt_params t7_params = {
+ .rst = BIT(22),
+};
+
static const struct of_device_id meson_gxbb_wdt_dt_ids[] = {
- { .compatible = "amlogic,meson-gxbb-wdt", },
+ { .compatible = "amlogic,meson-gxbb-wdt", .data = &gxbb_params, },
+ { .compatible = "amlogic,t7-wdt", .data = &t7_params, },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, meson_gxbb_wdt_dt_ids);
@@ -150,6 +162,7 @@ static int meson_gxbb_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct meson_gxbb_wdt *data;
+ struct wdt_params *params;
u32 ctrl_reg;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
@@ -164,6 +177,8 @@ static int meson_gxbb_wdt_probe(struct platform_device *pdev)
if (IS_ERR(data->clk))
return PTR_ERR(data->clk);
+ params = (struct wdt_params *)of_device_get_match_data(dev);
+
platform_set_drvdata(pdev, data);
data->wdt_dev.parent = dev;
@@ -191,7 +206,7 @@ static int meson_gxbb_wdt_probe(struct platform_device *pdev)
/* Setup with 1ms timebase */
ctrl_reg |= ((clk_get_rate(data->clk) / 1000) &
GXBB_WDT_CTRL_DIV_MASK) |
- GXBB_WDT_CTRL_EE_RESET |
+ params->rst |
GXBB_WDT_CTRL_CLK_EN |
GXBB_WDT_CTRL_CLKDIV_EN;
diff --git a/drivers/watchdog/meson_wdt.c b/drivers/watchdog/meson_wdt.c
index 539feaa1f904..497496f64f55 100644
--- a/drivers/watchdog/meson_wdt.c
+++ b/drivers/watchdog/meson_wdt.c
@@ -11,11 +11,11 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/types.h>
#include <linux/watchdog.h>
diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c
index 1c569be72ea2..867f9f311379 100644
--- a/drivers/watchdog/mpc8xxx_wdt.c
+++ b/drivers/watchdog/mpc8xxx_wdt.c
@@ -16,8 +16,8 @@
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/kernel.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/watchdog.h>
#include <linux/io.h>
diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c
index a9c437598e7e..b2330b16b497 100644
--- a/drivers/watchdog/mtk_wdt.c
+++ b/drivers/watchdog/mtk_wdt.c
@@ -25,7 +25,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <linux/types.h>
diff --git a/drivers/watchdog/of_xilinx_wdt.c b/drivers/watchdog/of_xilinx_wdt.c
index 2a079ca04aa3..05657dc1d36a 100644
--- a/drivers/watchdog/of_xilinx_wdt.c
+++ b/drivers/watchdog/of_xilinx_wdt.c
@@ -10,14 +10,13 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/ioport.h>
#include <linux/watchdog.h>
#include <linux/io.h>
#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_address.h>
/* Register offsets for the Wdt device */
#define XWT_TWCSR0_OFFSET 0x0 /* Control/Status Register0 */
diff --git a/drivers/watchdog/pic32-dmt.c b/drivers/watchdog/pic32-dmt.c
index bc4ccddc75a3..ab0682492c85 100644
--- a/drivers/watchdog/pic32-dmt.c
+++ b/drivers/watchdog/pic32-dmt.c
@@ -12,7 +12,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/watchdog.h>
diff --git a/drivers/watchdog/pic32-wdt.c b/drivers/watchdog/pic32-wdt.c
index 6d1a00222991..1d282de312ef 100644
--- a/drivers/watchdog/pic32-wdt.c
+++ b/drivers/watchdog/pic32-wdt.c
@@ -12,7 +12,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/watchdog.h>
diff --git a/drivers/watchdog/pika_wdt.c b/drivers/watchdog/pika_wdt.c
index a98abd0d3146..782b8c23d99c 100644
--- a/drivers/watchdog/pika_wdt.c
+++ b/drivers/watchdog/pika_wdt.c
@@ -23,8 +23,8 @@
#include <linux/bitops.h>
#include <linux/uaccess.h>
#include <linux/io.h>
+#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_platform.h>
#define DRV_NAME "PIKA-WDT"
diff --git a/drivers/watchdog/pm8916_wdt.c b/drivers/watchdog/pm8916_wdt.c
index f4bfbffaf49c..f3fcbeb0852c 100644
--- a/drivers/watchdog/pm8916_wdt.c
+++ b/drivers/watchdog/pm8916_wdt.c
@@ -266,7 +266,7 @@ static struct platform_driver pm8916_wdt_driver = {
.probe = pm8916_wdt_probe,
.driver = {
.name = "pm8916-wdt",
- .of_match_table = of_match_ptr(pm8916_wdt_id_table),
+ .of_match_table = pm8916_wdt_id_table,
.pm = &pm8916_wdt_pm_ops,
},
};
diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c
index d776474dcdf3..9e790f0c2096 100644
--- a/drivers/watchdog/qcom-wdt.c
+++ b/drivers/watchdog/qcom-wdt.c
@@ -11,7 +11,6 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/watchdog.h>
-#include <linux/of_device.h>
enum wdt_reg {
WDT_RST,
diff --git a/drivers/watchdog/rave-sp-wdt.c b/drivers/watchdog/rave-sp-wdt.c
index 2c95615b6354..5d1c2176d445 100644
--- a/drivers/watchdog/rave-sp-wdt.c
+++ b/drivers/watchdog/rave-sp-wdt.c
@@ -13,7 +13,7 @@
#include <linux/mfd/rave-sp.h>
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <linux/slab.h>
diff --git a/drivers/watchdog/riowd.c b/drivers/watchdog/riowd.c
index c04b383e1712..b293792a292a 100644
--- a/drivers/watchdog/riowd.c
+++ b/drivers/watchdog/riowd.c
@@ -14,7 +14,7 @@
#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
diff --git a/drivers/watchdog/rti_wdt.c b/drivers/watchdog/rti_wdt.c
index ce8f18e93aa9..8e1be7ba0103 100644
--- a/drivers/watchdog/rti_wdt.c
+++ b/drivers/watchdog/rti_wdt.c
@@ -14,6 +14,8 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/types.h>
@@ -52,6 +54,11 @@
#define DWDST BIT(1)
+#define PON_REASON_SOF_NUM 0xBBBBCCCC
+#define PON_REASON_MAGIC_NUM 0xDDDDDDDD
+#define PON_REASON_EOF_NUM 0xCCCCBBBB
+#define RESERVED_MEM_MIN_SIZE 12
+
static int heartbeat = DEFAULT_HEARTBEAT;
/*
@@ -198,6 +205,11 @@ static int rti_wdt_probe(struct platform_device *pdev)
struct rti_wdt_device *wdt;
struct clk *clk;
u32 last_ping = 0;
+ struct device_node *node;
+ u32 reserved_mem_size;
+ struct resource res;
+ u32 *vaddr;
+ u64 paddr;
wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
if (!wdt)
@@ -284,6 +296,42 @@ static int rti_wdt_probe(struct platform_device *pdev)
}
}
+ node = of_parse_phandle(pdev->dev.of_node, "memory-region", 0);
+ if (node) {
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ dev_err(dev, "No memory address assigned to the region.\n");
+ goto err_iomap;
+ }
+
+ /*
+ * If reserved memory is defined for watchdog reset cause.
+ * Readout the Power-on(PON) reason and pass to bootstatus.
+ */
+ paddr = res.start;
+ reserved_mem_size = resource_size(&res);
+ if (reserved_mem_size < RESERVED_MEM_MIN_SIZE) {
+ dev_err(dev, "The size of reserved memory is too small.\n");
+ ret = -EINVAL;
+ goto err_iomap;
+ }
+
+ vaddr = memremap(paddr, reserved_mem_size, MEMREMAP_WB);
+ if (!vaddr) {
+ dev_err(dev, "Failed to map memory-region.\n");
+ ret = -ENOMEM;
+ goto err_iomap;
+ }
+
+ if (vaddr[0] == PON_REASON_SOF_NUM &&
+ vaddr[1] == PON_REASON_MAGIC_NUM &&
+ vaddr[2] == PON_REASON_EOF_NUM) {
+ wdd->bootstatus |= WDIOF_CARDRESET;
+ }
+ memset(vaddr, 0, reserved_mem_size);
+ memunmap(vaddr);
+ }
+
watchdog_init_timeout(wdd, heartbeat, dev);
ret = watchdog_register_device(wdd);
diff --git a/drivers/watchdog/rza_wdt.c b/drivers/watchdog/rza_wdt.c
index fe6c2ed35e04..cb4901b3f777 100644
--- a/drivers/watchdog/rza_wdt.c
+++ b/drivers/watchdog/rza_wdt.c
@@ -9,9 +9,9 @@
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/watchdog.h>
diff --git a/drivers/watchdog/rzg2l_wdt.c b/drivers/watchdog/rzg2l_wdt.c
index d404953d0e0f..1741f98ca67c 100644
--- a/drivers/watchdog/rzg2l_wdt.c
+++ b/drivers/watchdog/rzg2l_wdt.c
@@ -11,7 +11,7 @@
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 95416a9bdd4b..0b4bd883ff28 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -23,7 +23,6 @@
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/delay.h>
@@ -379,10 +378,11 @@ static int s3c2410wdt_enable(struct s3c2410_wdt *wdt, bool en)
static int s3c2410wdt_keepalive(struct watchdog_device *wdd)
{
struct s3c2410_wdt *wdt = watchdog_get_drvdata(wdd);
+ unsigned long flags;
- spin_lock(&wdt->lock);
+ spin_lock_irqsave(&wdt->lock, flags);
writel(wdt->count, wdt->reg_base + S3C2410_WTCNT);
- spin_unlock(&wdt->lock);
+ spin_unlock_irqrestore(&wdt->lock, flags);
return 0;
}
@@ -399,10 +399,11 @@ static void __s3c2410wdt_stop(struct s3c2410_wdt *wdt)
static int s3c2410wdt_stop(struct watchdog_device *wdd)
{
struct s3c2410_wdt *wdt = watchdog_get_drvdata(wdd);
+ unsigned long flags;
- spin_lock(&wdt->lock);
+ spin_lock_irqsave(&wdt->lock, flags);
__s3c2410wdt_stop(wdt);
- spin_unlock(&wdt->lock);
+ spin_unlock_irqrestore(&wdt->lock, flags);
return 0;
}
@@ -411,8 +412,9 @@ static int s3c2410wdt_start(struct watchdog_device *wdd)
{
unsigned long wtcon;
struct s3c2410_wdt *wdt = watchdog_get_drvdata(wdd);
+ unsigned long flags;
- spin_lock(&wdt->lock);
+ spin_lock_irqsave(&wdt->lock, flags);
__s3c2410wdt_stop(wdt);
@@ -433,7 +435,7 @@ static int s3c2410wdt_start(struct watchdog_device *wdd)
writel(wdt->count, wdt->reg_base + S3C2410_WTDAT);
writel(wdt->count, wdt->reg_base + S3C2410_WTCNT);
writel(wtcon, wdt->reg_base + S3C2410_WTCON);
- spin_unlock(&wdt->lock);
+ spin_unlock_irqrestore(&wdt->lock, flags);
return 0;
}
diff --git a/drivers/watchdog/sama5d4_wdt.c b/drivers/watchdog/sama5d4_wdt.c
index aeee934ca51b..13e72918338a 100644
--- a/drivers/watchdog/sama5d4_wdt.c
+++ b/drivers/watchdog/sama5d4_wdt.c
@@ -11,7 +11,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
@@ -255,6 +254,7 @@ static int sama5d4_wdt_probe(struct platform_device *pdev)
struct sama5d4_wdt *wdt;
void __iomem *regs;
u32 irq = 0;
+ u32 reg;
int ret;
wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
@@ -305,6 +305,12 @@ static int sama5d4_wdt_probe(struct platform_device *pdev)
watchdog_init_timeout(wdd, wdt_timeout, dev);
+ reg = wdt_read(wdt, AT91_WDT_MR);
+ if (!(reg & AT91_WDT_WDDIS)) {
+ wdt->mr &= ~AT91_WDT_WDDIS;
+ set_bit(WDOG_HW_RUNNING, &wdd->status);
+ }
+
ret = sama5d4_wdt_init(wdt);
if (ret)
return ret;
diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c
index fd3cfdda4949..421ebcda62e6 100644
--- a/drivers/watchdog/sbsa_gwdt.c
+++ b/drivers/watchdog/sbsa_gwdt.c
@@ -43,10 +43,9 @@
#include <linux/io.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/uaccess.h>
#include <linux/watchdog.h>
diff --git a/drivers/watchdog/simatic-ipc-wdt.c b/drivers/watchdog/simatic-ipc-wdt.c
index 6599695dc672..cdc1a2e15180 100644
--- a/drivers/watchdog/simatic-ipc-wdt.c
+++ b/drivers/watchdog/simatic-ipc-wdt.c
@@ -155,9 +155,8 @@ static int simatic_ipc_wdt_probe(struct platform_device *pdev)
switch (plat->devmode) {
case SIMATIC_IPC_DEVICE_227E:
- if (!devm_request_region(dev, gp_status_reg_227e_res.start,
- resource_size(&gp_status_reg_227e_res),
- KBUILD_MODNAME)) {
+ res = &gp_status_reg_227e_res;
+ if (!request_muxed_region(res->start, resource_size(res), res->name)) {
dev_err(dev,
"Unable to register IO resource at %pR\n",
&gp_status_reg_227e_res);
@@ -210,6 +209,10 @@ static int simatic_ipc_wdt_probe(struct platform_device *pdev)
if (wdd_data.bootstatus)
dev_warn(dev, "last reboot caused by watchdog reset\n");
+ if (plat->devmode == SIMATIC_IPC_DEVICE_227E)
+ release_region(gp_status_reg_227e_res.start,
+ resource_size(&gp_status_reg_227e_res));
+
watchdog_set_nowayout(&wdd_data, nowayout);
watchdog_stop_on_reboot(&wdd_data);
return devm_watchdog_register_device(dev, &wdd_data);
diff --git a/drivers/watchdog/starfive-wdt.c b/drivers/watchdog/starfive-wdt.c
index 8058fca4d05d..5f501b41faf9 100644
--- a/drivers/watchdog/starfive-wdt.c
+++ b/drivers/watchdog/starfive-wdt.c
@@ -8,7 +8,8 @@
#include <linux/clk.h>
#include <linux/iopoll.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/watchdog.h>
@@ -526,7 +527,6 @@ static void starfive_wdt_shutdown(struct platform_device *pdev)
starfive_wdt_pm_stop(&wdt->wdd);
}
-#ifdef CONFIG_PM_SLEEP
static int starfive_wdt_suspend(struct device *dev)
{
struct starfive_wdt *wdt = dev_get_drvdata(dev);
@@ -556,9 +556,7 @@ static int starfive_wdt_resume(struct device *dev)
return starfive_wdt_start(wdt);
}
-#endif /* CONFIG_PM_SLEEP */
-#ifdef CONFIG_PM
static int starfive_wdt_runtime_suspend(struct device *dev)
{
struct starfive_wdt *wdt = dev_get_drvdata(dev);
@@ -574,11 +572,10 @@ static int starfive_wdt_runtime_resume(struct device *dev)
return starfive_wdt_enable_clock(wdt);
}
-#endif /* CONFIG_PM */
static const struct dev_pm_ops starfive_wdt_pm_ops = {
- SET_RUNTIME_PM_OPS(starfive_wdt_runtime_suspend, starfive_wdt_runtime_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(starfive_wdt_suspend, starfive_wdt_resume)
+ RUNTIME_PM_OPS(starfive_wdt_runtime_suspend, starfive_wdt_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(starfive_wdt_suspend, starfive_wdt_resume)
};
static const struct of_device_id starfive_wdt_match[] = {
@@ -594,7 +591,7 @@ static struct platform_driver starfive_wdt_driver = {
.shutdown = starfive_wdt_shutdown,
.driver = {
.name = "starfive-wdt",
- .pm = &starfive_wdt_pm_ops,
+ .pm = pm_ptr(&starfive_wdt_pm_ops),
.of_match_table = starfive_wdt_match,
},
};
diff --git a/drivers/watchdog/stm32_iwdg.c b/drivers/watchdog/stm32_iwdg.c
index 570a71509d2a..d9fd50df9802 100644
--- a/drivers/watchdog/stm32_iwdg.c
+++ b/drivers/watchdog/stm32_iwdg.c
@@ -17,7 +17,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/watchdog.h>
@@ -288,7 +287,7 @@ static struct platform_driver stm32_iwdg_driver = {
.probe = stm32_iwdg_probe,
.driver = {
.name = "iwdg",
- .of_match_table = of_match_ptr(stm32_iwdg_of_match),
+ .of_match_table = stm32_iwdg_of_match,
},
};
module_platform_driver(stm32_iwdg_driver);
diff --git a/drivers/watchdog/sunxi_wdt.c b/drivers/watchdog/sunxi_wdt.c
index 6cf82922d3fb..b85354a99582 100644
--- a/drivers/watchdog/sunxi_wdt.c
+++ b/drivers/watchdog/sunxi_wdt.c
@@ -18,7 +18,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include <linux/watchdog.h>
diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c
index d4c5a736fdcb..5b55ccae06d4 100644
--- a/drivers/watchdog/watchdog_core.c
+++ b/drivers/watchdog/watchdog_core.c
@@ -161,7 +161,7 @@ static int watchdog_reboot_notifier(struct notifier_block *nb,
struct watchdog_device *wdd;
wdd = container_of(nb, struct watchdog_device, reboot_nb);
- if (code == SYS_DOWN || code == SYS_HALT) {
+ if (code == SYS_DOWN || code == SYS_HALT || code == SYS_POWER_OFF) {
if (watchdog_hw_running(wdd)) {
int ret;
diff --git a/drivers/watchdog/xilinx_wwdt.c b/drivers/watchdog/xilinx_wwdt.c
index 2585038d5575..d271e2e8d6e2 100644
--- a/drivers/watchdog/xilinx_wwdt.c
+++ b/drivers/watchdog/xilinx_wwdt.c
@@ -9,9 +9,10 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/ioport.h>
+#include <linux/math64.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_address.h>
+#include <linux/platform_device.h>
#include <linux/watchdog.h>
/* Max timeout is calculated at 100MHz source clock */
@@ -71,7 +72,7 @@ static int xilinx_wwdt_start(struct watchdog_device *wdd)
/* Calculate timeout count */
time_out = xdev->freq * wdd->timeout;
- closed_timeout = (time_out * xdev->close_percent) / 100;
+ closed_timeout = div_u64(time_out * xdev->close_percent, 100);
open_timeout = time_out - closed_timeout;
wdd->min_hw_heartbeat_ms = xdev->close_percent * 10 * wdd->timeout;
diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
index 93539aac0e5b..f5693164ca9a 100644
--- a/fs/Kconfig.binfmt
+++ b/fs/Kconfig.binfmt
@@ -58,7 +58,7 @@ config ARCH_USE_GNU_PROPERTY
config BINFMT_ELF_FDPIC
bool "Kernel support for FDPIC ELF binaries"
default y if !BINFMT_ELF
- depends on ARM || ((M68K || SUPERH || XTENSA) && !MMU)
+ depends on ARM || ((M68K || RISCV || SUPERH || XTENSA) && !MMU)
select ELFCORE
help
ELF FDPIC binaries are based on ELF, but allow the individual load
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index 1c6c5832af86..43b2a2851ba3 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -138,7 +138,7 @@ static int is_constdisp(struct elfhdr *hdr)
static int elf_fdpic_fetch_phdrs(struct elf_fdpic_params *params,
struct file *file)
{
- struct elf32_phdr *phdr;
+ struct elf_phdr *phdr;
unsigned long size;
int retval, loop;
loff_t pos = params->hdr.e_phoff;
@@ -560,8 +560,8 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
sp &= ~7UL;
/* stack the load map(s) */
- len = sizeof(struct elf32_fdpic_loadmap);
- len += sizeof(struct elf32_fdpic_loadseg) * exec_params->loadmap->nsegs;
+ len = sizeof(struct elf_fdpic_loadmap);
+ len += sizeof(struct elf_fdpic_loadseg) * exec_params->loadmap->nsegs;
sp = (sp - len) & ~7UL;
exec_params->map_addr = sp;
@@ -571,8 +571,8 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
current->mm->context.exec_fdpic_loadmap = (unsigned long) sp;
if (interp_params->loadmap) {
- len = sizeof(struct elf32_fdpic_loadmap);
- len += sizeof(struct elf32_fdpic_loadseg) *
+ len = sizeof(struct elf_fdpic_loadmap);
+ len += sizeof(struct elf_fdpic_loadseg) *
interp_params->loadmap->nsegs;
sp = (sp - len) & ~7UL;
interp_params->map_addr = sp;
@@ -740,13 +740,13 @@ static int elf_fdpic_map_file(struct elf_fdpic_params *params,
struct mm_struct *mm,
const char *what)
{
- struct elf32_fdpic_loadmap *loadmap;
+ struct elf_fdpic_loadmap *loadmap;
#ifdef CONFIG_MMU
- struct elf32_fdpic_loadseg *mseg;
+ struct elf_fdpic_loadseg *mseg;
unsigned long load_addr;
#endif
- struct elf32_fdpic_loadseg *seg;
- struct elf32_phdr *phdr;
+ struct elf_fdpic_loadseg *seg;
+ struct elf_phdr *phdr;
unsigned nloads, tmp;
unsigned long stop;
int loop, ret;
@@ -766,7 +766,7 @@ static int elf_fdpic_map_file(struct elf_fdpic_params *params,
params->loadmap = loadmap;
- loadmap->version = ELF32_FDPIC_LOADMAP_VERSION;
+ loadmap->version = ELF_FDPIC_LOADMAP_VERSION;
loadmap->nsegs = nloads;
/* map the requested LOADs into the memory space */
@@ -839,8 +839,8 @@ static int elf_fdpic_map_file(struct elf_fdpic_params *params,
if (phdr->p_vaddr >= seg->p_vaddr &&
phdr->p_vaddr + phdr->p_memsz <=
seg->p_vaddr + seg->p_memsz) {
- Elf32_Dyn __user *dyn;
- Elf32_Sword d_tag;
+ Elf_Dyn __user *dyn;
+ Elf_Sword d_tag;
params->dynamic_addr =
(phdr->p_vaddr - seg->p_vaddr) +
@@ -850,11 +850,11 @@ static int elf_fdpic_map_file(struct elf_fdpic_params *params,
* one item, and that the last item is a NULL
* entry */
if (phdr->p_memsz == 0 ||
- phdr->p_memsz % sizeof(Elf32_Dyn) != 0)
+ phdr->p_memsz % sizeof(Elf_Dyn) != 0)
goto dynamic_error;
- tmp = phdr->p_memsz / sizeof(Elf32_Dyn);
- dyn = (Elf32_Dyn __user *)params->dynamic_addr;
+ tmp = phdr->p_memsz / sizeof(Elf_Dyn);
+ dyn = (Elf_Dyn __user *)params->dynamic_addr;
if (get_user(d_tag, &dyn[tmp - 1].d_tag) ||
d_tag != 0)
goto dynamic_error;
@@ -923,8 +923,8 @@ static int elf_fdpic_map_file_constdisp_on_uclinux(
struct file *file,
struct mm_struct *mm)
{
- struct elf32_fdpic_loadseg *seg;
- struct elf32_phdr *phdr;
+ struct elf_fdpic_loadseg *seg;
+ struct elf_phdr *phdr;
unsigned long load_addr, base = ULONG_MAX, top = 0, maddr = 0;
int loop, ret;
@@ -1007,8 +1007,8 @@ static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *params,
struct file *file,
struct mm_struct *mm)
{
- struct elf32_fdpic_loadseg *seg;
- struct elf32_phdr *phdr;
+ struct elf_fdpic_loadseg *seg;
+ struct elf_phdr *phdr;
unsigned long load_addr, delta_vaddr;
int loop, dvset;
diff --git a/fs/ceph/Makefile b/fs/ceph/Makefile
index 50c635dc7f71..1f77ca04c426 100644
--- a/fs/ceph/Makefile
+++ b/fs/ceph/Makefile
@@ -12,3 +12,4 @@ ceph-y := super.o inode.o dir.o file.o locks.o addr.o ioctl.o \
ceph-$(CONFIG_CEPH_FSCACHE) += cache.o
ceph-$(CONFIG_CEPH_FS_POSIX_ACL) += acl.o
+ceph-$(CONFIG_FS_ENCRYPTION) += crypto.o
diff --git a/fs/ceph/acl.c b/fs/ceph/acl.c
index c91b293267d7..c53a1d220622 100644
--- a/fs/ceph/acl.c
+++ b/fs/ceph/acl.c
@@ -140,7 +140,7 @@ int ceph_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
newattrs.ia_ctime = current_time(inode);
newattrs.ia_mode = new_mode;
newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
- ret = __ceph_setattr(inode, &newattrs);
+ ret = __ceph_setattr(inode, &newattrs, NULL);
if (ret)
goto out_free;
}
@@ -151,7 +151,7 @@ int ceph_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
newattrs.ia_ctime = old_ctime;
newattrs.ia_mode = old_mode;
newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
- __ceph_setattr(inode, &newattrs);
+ __ceph_setattr(inode, &newattrs, NULL);
}
goto out_free;
}
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 59cbfb80edbd..f4863078f7fe 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -18,6 +18,7 @@
#include "mds_client.h"
#include "cache.h"
#include "metric.h"
+#include "crypto.h"
#include <linux/ceph/osd_client.h>
#include <linux/ceph/striper.h>
@@ -242,11 +243,13 @@ static bool ceph_netfs_clamp_length(struct netfs_io_subrequest *subreq)
static void finish_netfs_read(struct ceph_osd_request *req)
{
- struct ceph_fs_client *fsc = ceph_inode_to_client(req->r_inode);
+ struct inode *inode = req->r_inode;
+ struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
struct netfs_io_subrequest *subreq = req->r_priv;
- int num_pages;
+ struct ceph_osd_req_op *op = &req->r_ops[0];
int err = req->r_result;
+ bool sparse = (op->op == CEPH_OSD_OP_SPARSE_READ);
ceph_update_read_metrics(&fsc->mdsc->metric, req->r_start_latency,
req->r_end_latency, osd_data->length, err);
@@ -260,14 +263,29 @@ static void finish_netfs_read(struct ceph_osd_request *req)
else if (err == -EBLOCKLISTED)
fsc->blocklisted = true;
- if (err >= 0 && err < subreq->len)
- __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
+ if (err >= 0) {
+ if (sparse && err > 0)
+ err = ceph_sparse_ext_map_end(op);
+ if (err < subreq->len)
+ __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
+ if (IS_ENCRYPTED(inode) && err > 0) {
+ err = ceph_fscrypt_decrypt_extents(inode,
+ osd_data->pages, subreq->start,
+ op->extent.sparse_ext,
+ op->extent.sparse_ext_cnt);
+ if (err > subreq->len)
+ err = subreq->len;
+ }
+ }
+ if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
+ ceph_put_page_vector(osd_data->pages,
+ calc_pages_for(osd_data->alignment,
+ osd_data->length), false);
+ }
netfs_subreq_terminated(subreq, err, false);
-
- num_pages = calc_pages_for(osd_data->alignment, osd_data->length);
- ceph_put_page_vector(osd_data->pages, num_pages, false);
iput(req->r_inode);
+ ceph_dec_osd_stopping_blocker(fsc->mdsc);
}
static bool ceph_netfs_issue_op_inline(struct netfs_io_subrequest *subreq)
@@ -334,10 +352,10 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
struct ceph_osd_request *req = NULL;
struct ceph_vino vino = ceph_vino(inode);
struct iov_iter iter;
- struct page **pages;
- size_t page_off;
int err = 0;
u64 len = subreq->len;
+ bool sparse = IS_ENCRYPTED(inode) || ceph_test_mount_opt(fsc, SPARSEREAD);
+ u64 off = subreq->start;
if (ceph_inode_is_shutdown(inode)) {
err = -EIO;
@@ -347,8 +365,10 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
if (ceph_has_inline_data(ci) && ceph_netfs_issue_op_inline(subreq))
return;
- req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, vino, subreq->start, &len,
- 0, 1, CEPH_OSD_OP_READ,
+ ceph_fscrypt_adjust_off_and_len(inode, &off, &len);
+
+ req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, vino,
+ off, &len, 0, 1, sparse ? CEPH_OSD_OP_SPARSE_READ : CEPH_OSD_OP_READ,
CEPH_OSD_FLAG_READ | fsc->client->osdc.client->options->read_from_replica,
NULL, ci->i_truncate_seq, ci->i_truncate_size, false);
if (IS_ERR(req)) {
@@ -357,20 +377,48 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
goto out;
}
+ if (sparse) {
+ err = ceph_alloc_sparse_ext_map(&req->r_ops[0]);
+ if (err)
+ goto out;
+ }
+
dout("%s: pos=%llu orig_len=%zu len=%llu\n", __func__, subreq->start, subreq->len, len);
+
iov_iter_xarray(&iter, ITER_DEST, &rreq->mapping->i_pages, subreq->start, len);
- err = iov_iter_get_pages_alloc2(&iter, &pages, len, &page_off);
- if (err < 0) {
- dout("%s: iov_ter_get_pages_alloc returned %d\n", __func__, err);
- goto out;
- }
- /* should always give us a page-aligned read */
- WARN_ON_ONCE(page_off);
- len = err;
- err = 0;
+ /*
+ * FIXME: For now, use CEPH_OSD_DATA_TYPE_PAGES instead of _ITER for
+ * encrypted inodes. We'd need infrastructure that handles an iov_iter
+ * instead of page arrays, and we don't have that as of yet. Once the
+ * dust settles on the write helpers and encrypt/decrypt routines for
+ * netfs, we should be able to rework this.
+ */
+ if (IS_ENCRYPTED(inode)) {
+ struct page **pages;
+ size_t page_off;
+
+ err = iov_iter_get_pages_alloc2(&iter, &pages, len, &page_off);
+ if (err < 0) {
+ dout("%s: iov_ter_get_pages_alloc returned %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ /* should always give us a page-aligned read */
+ WARN_ON_ONCE(page_off);
+ len = err;
+ err = 0;
- osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false, false);
+ osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false,
+ false);
+ } else {
+ osd_req_op_extent_osd_iter(req, 0, &iter);
+ }
+ if (!ceph_inc_osd_stopping_blocker(fsc->mdsc)) {
+ err = -EIO;
+ goto out;
+ }
req->r_callback = finish_netfs_read;
req->r_priv = subreq;
req->r_inode = inode;
@@ -571,10 +619,12 @@ static u64 get_writepages_data_length(struct inode *inode,
struct page *page, u64 start)
{
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_snap_context *snapc = page_snap_context(page);
+ struct ceph_snap_context *snapc;
struct ceph_cap_snap *capsnap = NULL;
u64 end = i_size_read(inode);
+ u64 ret;
+ snapc = page_snap_context(ceph_fscrypt_pagecache_page(page));
if (snapc != ci->i_head_snapc) {
bool found = false;
spin_lock(&ci->i_ceph_lock);
@@ -589,9 +639,12 @@ static u64 get_writepages_data_length(struct inode *inode,
spin_unlock(&ci->i_ceph_lock);
WARN_ON(!found);
}
- if (end > page_offset(page) + thp_size(page))
- end = page_offset(page) + thp_size(page);
- return end > start ? end - start : 0;
+ if (end > ceph_fscrypt_page_offset(page) + thp_size(page))
+ end = ceph_fscrypt_page_offset(page) + thp_size(page);
+ ret = end > start ? end - start : 0;
+ if (ret && fscrypt_is_bounce_page(page))
+ ret = round_up(ret, CEPH_FSCRYPT_BLOCK_SIZE);
+ return ret;
}
/*
@@ -610,10 +663,12 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
loff_t page_off = page_offset(page);
int err;
loff_t len = thp_size(page);
+ loff_t wlen;
struct ceph_writeback_ctl ceph_wbc;
struct ceph_osd_client *osdc = &fsc->client->osdc;
struct ceph_osd_request *req;
bool caching = ceph_is_cache_enabled(inode);
+ struct page *bounce_page = NULL;
dout("writepage %p idx %lu\n", page, page->index);
@@ -649,31 +704,51 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
if (ceph_wbc.i_size < page_off + len)
len = ceph_wbc.i_size - page_off;
+ wlen = IS_ENCRYPTED(inode) ? round_up(len, CEPH_FSCRYPT_BLOCK_SIZE) : len;
dout("writepage %p page %p index %lu on %llu~%llu snapc %p seq %lld\n",
- inode, page, page->index, page_off, len, snapc, snapc->seq);
+ inode, page, page->index, page_off, wlen, snapc, snapc->seq);
if (atomic_long_inc_return(&fsc->writeback_count) >
CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb))
fsc->write_congested = true;
- req = ceph_osdc_new_request(osdc, &ci->i_layout, ceph_vino(inode), page_off, &len, 0, 1,
- CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE, snapc,
- ceph_wbc.truncate_seq, ceph_wbc.truncate_size,
- true);
+ req = ceph_osdc_new_request(osdc, &ci->i_layout, ceph_vino(inode),
+ page_off, &wlen, 0, 1, CEPH_OSD_OP_WRITE,
+ CEPH_OSD_FLAG_WRITE, snapc,
+ ceph_wbc.truncate_seq,
+ ceph_wbc.truncate_size, true);
if (IS_ERR(req)) {
redirty_page_for_writepage(wbc, page);
return PTR_ERR(req);
}
+ if (wlen < len)
+ len = wlen;
+
set_page_writeback(page);
if (caching)
ceph_set_page_fscache(page);
ceph_fscache_write_to_cache(inode, page_off, len, caching);
+ if (IS_ENCRYPTED(inode)) {
+ bounce_page = fscrypt_encrypt_pagecache_blocks(page,
+ CEPH_FSCRYPT_BLOCK_SIZE, 0,
+ GFP_NOFS);
+ if (IS_ERR(bounce_page)) {
+ redirty_page_for_writepage(wbc, page);
+ end_page_writeback(page);
+ ceph_osdc_put_request(req);
+ return PTR_ERR(bounce_page);
+ }
+ }
+
/* it may be a short write due to an object boundary */
WARN_ON_ONCE(len > thp_size(page));
- osd_req_op_extent_osd_data_pages(req, 0, &page, len, 0, false, false);
- dout("writepage %llu~%llu (%llu bytes)\n", page_off, len, len);
+ osd_req_op_extent_osd_data_pages(req, 0,
+ bounce_page ? &bounce_page : &page, wlen, 0,
+ false, false);
+ dout("writepage %llu~%llu (%llu bytes, %sencrypted)\n",
+ page_off, len, wlen, IS_ENCRYPTED(inode) ? "" : "not ");
req->r_mtime = inode->i_mtime;
ceph_osdc_start_request(osdc, req);
@@ -681,7 +756,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
req->r_end_latency, len, err);
-
+ fscrypt_free_bounce_page(bounce_page);
ceph_osdc_put_request(req);
if (err == 0)
err = len;
@@ -800,6 +875,11 @@ static void writepages_finish(struct ceph_osd_request *req)
total_pages += num_pages;
for (j = 0; j < num_pages; j++) {
page = osd_data->pages[j];
+ if (fscrypt_is_bounce_page(page)) {
+ page = fscrypt_pagecache_page(page);
+ fscrypt_free_bounce_page(osd_data->pages[j]);
+ osd_data->pages[j] = page;
+ }
BUG_ON(!page);
WARN_ON(!PageUptodate(page));
@@ -835,6 +915,7 @@ static void writepages_finish(struct ceph_osd_request *req)
else
kfree(osd_data->pages);
ceph_osdc_put_request(req);
+ ceph_dec_osd_stopping_blocker(fsc->mdsc);
}
/*
@@ -1070,9 +1151,28 @@ get_more_pages:
fsc->mount_options->congestion_kb))
fsc->write_congested = true;
- pages[locked_pages++] = page;
- fbatch.folios[i] = NULL;
+ if (IS_ENCRYPTED(inode)) {
+ pages[locked_pages] =
+ fscrypt_encrypt_pagecache_blocks(page,
+ PAGE_SIZE, 0,
+ locked_pages ? GFP_NOWAIT : GFP_NOFS);
+ if (IS_ERR(pages[locked_pages])) {
+ if (PTR_ERR(pages[locked_pages]) == -EINVAL)
+ pr_err("%s: inode->i_blkbits=%hhu\n",
+ __func__, inode->i_blkbits);
+ /* better not fail on first page! */
+ BUG_ON(locked_pages == 0);
+ pages[locked_pages] = NULL;
+ redirty_page_for_writepage(wbc, page);
+ unlock_page(page);
+ break;
+ }
+ ++locked_pages;
+ } else {
+ pages[locked_pages++] = page;
+ }
+ fbatch.folios[i] = NULL;
len += thp_size(page);
}
@@ -1100,7 +1200,7 @@ get_more_pages:
}
new_request:
- offset = page_offset(pages[0]);
+ offset = ceph_fscrypt_page_offset(pages[0]);
len = wsize;
req = ceph_osdc_new_request(&fsc->client->osdc,
@@ -1121,9 +1221,13 @@ new_request:
ceph_wbc.truncate_size, true);
BUG_ON(IS_ERR(req));
}
- BUG_ON(len < page_offset(pages[locked_pages - 1]) +
- thp_size(page) - offset);
+ BUG_ON(len < ceph_fscrypt_page_offset(pages[locked_pages - 1]) +
+ thp_size(pages[locked_pages - 1]) - offset);
+ if (!ceph_inc_osd_stopping_blocker(fsc->mdsc)) {
+ rc = -EIO;
+ goto release_folios;
+ }
req->r_callback = writepages_finish;
req->r_inode = inode;
@@ -1132,7 +1236,9 @@ new_request:
data_pages = pages;
op_idx = 0;
for (i = 0; i < locked_pages; i++) {
- u64 cur_offset = page_offset(pages[i]);
+ struct page *page = ceph_fscrypt_pagecache_page(pages[i]);
+
+ u64 cur_offset = page_offset(page);
/*
* Discontinuity in page range? Ceph can handle that by just passing
* multiple extents in the write op.
@@ -1161,9 +1267,9 @@ new_request:
op_idx++;
}
- set_page_writeback(pages[i]);
+ set_page_writeback(page);
if (caching)
- ceph_set_page_fscache(pages[i]);
+ ceph_set_page_fscache(page);
len += thp_size(page);
}
ceph_fscache_write_to_cache(inode, offset, len, caching);
@@ -1179,8 +1285,16 @@ new_request:
offset);
len = max(len, min_len);
}
+ if (IS_ENCRYPTED(inode))
+ len = round_up(len, CEPH_FSCRYPT_BLOCK_SIZE);
+
dout("writepages got pages at %llu~%llu\n", offset, len);
+ if (IS_ENCRYPTED(inode) &&
+ ((offset | len) & ~CEPH_FSCRYPT_BLOCK_MASK))
+ pr_warn("%s: bad encrypted write offset=%lld len=%llu\n",
+ __func__, offset, len);
+
osd_req_op_extent_osd_data_pages(req, op_idx, data_pages, len,
0, from_pool, false);
osd_req_op_extent_update(req, op_idx, len);
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 09cd6d334604..14215ec646f7 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -14,6 +14,7 @@
#include "super.h"
#include "mds_client.h"
#include "cache.h"
+#include "crypto.h"
#include <linux/ceph/decode.h>
#include <linux/ceph/messenger.h>
@@ -1216,15 +1217,11 @@ struct cap_msg_args {
umode_t mode;
bool inline_data;
bool wake;
+ bool encrypted;
+ u32 fscrypt_auth_len;
+ u8 fscrypt_auth[sizeof(struct ceph_fscrypt_auth)]; // for context
};
-/*
- * cap struct size + flock buffer size + inline version + inline data size +
- * osd_epoch_barrier + oldest_flush_tid
- */
-#define CAP_MSG_SIZE (sizeof(struct ceph_mds_caps) + \
- 4 + 8 + 4 + 4 + 8 + 4 + 4 + 4 + 8 + 8 + 4)
-
/* Marshal up the cap msg to the MDS */
static void encode_cap_msg(struct ceph_msg *msg, struct cap_msg_args *arg)
{
@@ -1240,7 +1237,7 @@ static void encode_cap_msg(struct ceph_msg *msg, struct cap_msg_args *arg)
arg->size, arg->max_size, arg->xattr_version,
arg->xattr_buf ? (int)arg->xattr_buf->vec.iov_len : 0);
- msg->hdr.version = cpu_to_le16(10);
+ msg->hdr.version = cpu_to_le16(12);
msg->hdr.tid = cpu_to_le64(arg->flush_tid);
fc = msg->front.iov_base;
@@ -1257,7 +1254,13 @@ static void encode_cap_msg(struct ceph_msg *msg, struct cap_msg_args *arg)
fc->ino = cpu_to_le64(arg->ino);
fc->snap_follows = cpu_to_le64(arg->follows);
- fc->size = cpu_to_le64(arg->size);
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+ if (arg->encrypted)
+ fc->size = cpu_to_le64(round_up(arg->size,
+ CEPH_FSCRYPT_BLOCK_SIZE));
+ else
+#endif
+ fc->size = cpu_to_le64(arg->size);
fc->max_size = cpu_to_le64(arg->max_size);
ceph_encode_timespec64(&fc->mtime, &arg->mtime);
ceph_encode_timespec64(&fc->atime, &arg->atime);
@@ -1311,6 +1314,27 @@ static void encode_cap_msg(struct ceph_msg *msg, struct cap_msg_args *arg)
/* Advisory flags (version 10) */
ceph_encode_32(&p, arg->flags);
+
+ /* dirstats (version 11) - these are r/o on the client */
+ ceph_encode_64(&p, 0);
+ ceph_encode_64(&p, 0);
+
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+ /*
+ * fscrypt_auth and fscrypt_file (version 12)
+ *
+ * fscrypt_auth holds the crypto context (if any). fscrypt_file
+ * tracks the real i_size as an __le64 field (and we use a rounded-up
+ * i_size in the traditional size field).
+ */
+ ceph_encode_32(&p, arg->fscrypt_auth_len);
+ ceph_encode_copy(&p, arg->fscrypt_auth, arg->fscrypt_auth_len);
+ ceph_encode_32(&p, sizeof(__le64));
+ ceph_encode_64(&p, arg->size);
+#else /* CONFIG_FS_ENCRYPTION */
+ ceph_encode_32(&p, 0);
+ ceph_encode_32(&p, 0);
+#endif /* CONFIG_FS_ENCRYPTION */
}
/*
@@ -1378,7 +1402,6 @@ static void __prep_cap(struct cap_msg_args *arg, struct ceph_cap *cap,
arg->follows = flushing ? ci->i_head_snapc->seq : 0;
arg->flush_tid = flush_tid;
arg->oldest_flush_tid = oldest_flush_tid;
-
arg->size = i_size_read(inode);
ci->i_reported_size = arg->size;
arg->max_size = ci->i_wanted_max_size;
@@ -1432,8 +1455,39 @@ static void __prep_cap(struct cap_msg_args *arg, struct ceph_cap *cap,
}
}
arg->flags = flags;
+ arg->encrypted = IS_ENCRYPTED(inode);
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+ if (ci->fscrypt_auth_len &&
+ WARN_ON_ONCE(ci->fscrypt_auth_len > sizeof(struct ceph_fscrypt_auth))) {
+ /* Don't set this if it's too big */
+ arg->fscrypt_auth_len = 0;
+ } else {
+ arg->fscrypt_auth_len = ci->fscrypt_auth_len;
+ memcpy(arg->fscrypt_auth, ci->fscrypt_auth,
+ min_t(size_t, ci->fscrypt_auth_len,
+ sizeof(arg->fscrypt_auth)));
+ }
+#endif /* CONFIG_FS_ENCRYPTION */
}
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+#define CAP_MSG_FIXED_FIELDS (sizeof(struct ceph_mds_caps) + \
+ 4 + 8 + 4 + 4 + 8 + 4 + 4 + 4 + 8 + 8 + 4 + 8 + 8 + 4 + 4 + 8)
+
+static inline int cap_msg_size(struct cap_msg_args *arg)
+{
+ return CAP_MSG_FIXED_FIELDS + arg->fscrypt_auth_len;
+}
+#else
+#define CAP_MSG_FIXED_FIELDS (sizeof(struct ceph_mds_caps) + \
+ 4 + 8 + 4 + 4 + 8 + 4 + 4 + 4 + 8 + 8 + 4 + 8 + 8 + 4 + 4)
+
+static inline int cap_msg_size(struct cap_msg_args *arg)
+{
+ return CAP_MSG_FIXED_FIELDS;
+}
+#endif /* CONFIG_FS_ENCRYPTION */
+
/*
* Send a cap msg on the given inode.
*
@@ -1444,7 +1498,8 @@ static void __send_cap(struct cap_msg_args *arg, struct ceph_inode_info *ci)
struct ceph_msg *msg;
struct inode *inode = &ci->netfs.inode;
- msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, CAP_MSG_SIZE, GFP_NOFS, false);
+ msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, cap_msg_size(arg), GFP_NOFS,
+ false);
if (!msg) {
pr_err("error allocating cap msg: ino (%llx.%llx) flushing %s tid %llu, requeuing cap.\n",
ceph_vinop(inode), ceph_cap_string(arg->dirty),
@@ -1470,10 +1525,6 @@ static inline int __send_flush_snap(struct inode *inode,
struct cap_msg_args arg;
struct ceph_msg *msg;
- msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, CAP_MSG_SIZE, GFP_NOFS, false);
- if (!msg)
- return -ENOMEM;
-
arg.session = session;
arg.ino = ceph_vino(inode).ino;
arg.cid = 0;
@@ -1510,6 +1561,15 @@ static inline int __send_flush_snap(struct inode *inode,
arg.inline_data = capsnap->inline_data;
arg.flags = 0;
arg.wake = false;
+ arg.encrypted = IS_ENCRYPTED(inode);
+
+ /* No fscrypt_auth changes from a capsnap.*/
+ arg.fscrypt_auth_len = 0;
+
+ msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, cap_msg_size(&arg),
+ GFP_NOFS, false);
+ if (!msg)
+ return -ENOMEM;
encode_cap_msg(msg, &arg);
ceph_con_send(&arg.session->s_con, msg);
@@ -2900,10 +2960,9 @@ int ceph_try_get_caps(struct inode *inode, int need, int want,
* due to a small max_size, make sure we check_max_size (and possibly
* ask the mds) so we don't get hung up indefinitely.
*/
-int ceph_get_caps(struct file *filp, int need, int want, loff_t endoff, int *got)
+int __ceph_get_caps(struct inode *inode, struct ceph_file_info *fi, int need,
+ int want, loff_t endoff, int *got)
{
- struct ceph_file_info *fi = filp->private_data;
- struct inode *inode = file_inode(filp);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
int ret, _got, flags;
@@ -2912,7 +2971,7 @@ int ceph_get_caps(struct file *filp, int need, int want, loff_t endoff, int *got
if (ret < 0)
return ret;
- if ((fi->fmode & CEPH_FILE_MODE_WR) &&
+ if (fi && (fi->fmode & CEPH_FILE_MODE_WR) &&
fi->filp_gen != READ_ONCE(fsc->filp_gen))
return -EBADF;
@@ -2965,7 +3024,7 @@ int ceph_get_caps(struct file *filp, int need, int want, loff_t endoff, int *got
continue;
}
- if ((fi->fmode & CEPH_FILE_MODE_WR) &&
+ if (fi && (fi->fmode & CEPH_FILE_MODE_WR) &&
fi->filp_gen != READ_ONCE(fsc->filp_gen)) {
if (ret >= 0 && _got)
ceph_put_cap_refs(ci, _got);
@@ -3028,6 +3087,15 @@ int ceph_get_caps(struct file *filp, int need, int want, loff_t endoff, int *got
return 0;
}
+int ceph_get_caps(struct file *filp, int need, int want, loff_t endoff,
+ int *got)
+{
+ struct ceph_file_info *fi = filp->private_data;
+ struct inode *inode = file_inode(filp);
+
+ return __ceph_get_caps(inode, fi, need, want, endoff, got);
+}
+
/*
* Take cap refs. Caller must already know we hold at least one ref
* on the caps in question or we don't know this is safe.
@@ -3323,6 +3391,9 @@ struct cap_extra_info {
/* currently issued */
int issued;
struct timespec64 btime;
+ u8 *fscrypt_auth;
+ u32 fscrypt_auth_len;
+ u64 fscrypt_file_size;
};
/*
@@ -3355,6 +3426,14 @@ static void handle_cap_grant(struct inode *inode,
bool deleted_inode = false;
bool fill_inline = false;
+ /*
+ * If there is at least one crypto block then we'll trust
+ * fscrypt_file_size. If the real length of the file is 0, then
+ * ignore it (it has probably been truncated down to 0 by the MDS).
+ */
+ if (IS_ENCRYPTED(inode) && size)
+ size = extra_info->fscrypt_file_size;
+
dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n",
inode, cap, session->s_mds, seq, ceph_cap_string(newcaps));
dout(" size %llu max_size %llu, i_size %llu\n", size, max_size,
@@ -3421,6 +3500,14 @@ static void handle_cap_grant(struct inode *inode,
dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
from_kuid(&init_user_ns, inode->i_uid),
from_kgid(&init_user_ns, inode->i_gid));
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+ if (ci->fscrypt_auth_len != extra_info->fscrypt_auth_len ||
+ memcmp(ci->fscrypt_auth, extra_info->fscrypt_auth,
+ ci->fscrypt_auth_len))
+ pr_warn_ratelimited("%s: cap grant attempt to change fscrypt_auth on non-I_NEW inode (old len %d new len %d)\n",
+ __func__, ci->fscrypt_auth_len,
+ extra_info->fscrypt_auth_len);
+#endif
}
if ((newcaps & CEPH_CAP_LINK_SHARED) &&
@@ -3837,7 +3924,8 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
*/
static bool handle_cap_trunc(struct inode *inode,
struct ceph_mds_caps *trunc,
- struct ceph_mds_session *session)
+ struct ceph_mds_session *session,
+ struct cap_extra_info *extra_info)
{
struct ceph_inode_info *ci = ceph_inode(inode);
int mds = session->s_mds;
@@ -3854,8 +3942,16 @@ static bool handle_cap_trunc(struct inode *inode,
issued |= implemented | dirty;
- dout("handle_cap_trunc inode %p mds%d seq %d to %lld seq %d\n",
- inode, mds, seq, truncate_size, truncate_seq);
+ /*
+ * If there is at least one crypto block then we'll trust
+ * fscrypt_file_size. If the real length of the file is 0, then
+ * ignore it (it has probably been truncated down to 0 by the MDS).
+ */
+ if (IS_ENCRYPTED(inode) && size)
+ size = extra_info->fscrypt_file_size;
+
+ dout("%s inode %p mds%d seq %d to %lld truncate seq %d\n",
+ __func__, inode, mds, seq, truncate_size, truncate_seq);
queue_trunc = ceph_fill_file_size(inode, issued,
truncate_seq, truncate_size, size);
return queue_trunc;
@@ -4075,6 +4171,52 @@ retry:
*target_cap = cap;
}
+#ifdef CONFIG_FS_ENCRYPTION
+static int parse_fscrypt_fields(void **p, void *end,
+ struct cap_extra_info *extra)
+{
+ u32 len;
+
+ ceph_decode_32_safe(p, end, extra->fscrypt_auth_len, bad);
+ if (extra->fscrypt_auth_len) {
+ ceph_decode_need(p, end, extra->fscrypt_auth_len, bad);
+ extra->fscrypt_auth = kmalloc(extra->fscrypt_auth_len,
+ GFP_KERNEL);
+ if (!extra->fscrypt_auth)
+ return -ENOMEM;
+ ceph_decode_copy_safe(p, end, extra->fscrypt_auth,
+ extra->fscrypt_auth_len, bad);
+ }
+
+ ceph_decode_32_safe(p, end, len, bad);
+ if (len >= sizeof(u64)) {
+ ceph_decode_64_safe(p, end, extra->fscrypt_file_size, bad);
+ len -= sizeof(u64);
+ }
+ ceph_decode_skip_n(p, end, len, bad);
+ return 0;
+bad:
+ return -EIO;
+}
+#else
+static int parse_fscrypt_fields(void **p, void *end,
+ struct cap_extra_info *extra)
+{
+ u32 len;
+
+ /* Don't care about these fields unless we're encryption-capable */
+ ceph_decode_32_safe(p, end, len, bad);
+ if (len)
+ ceph_decode_skip_n(p, end, len, bad);
+ ceph_decode_32_safe(p, end, len, bad);
+ if (len)
+ ceph_decode_skip_n(p, end, len, bad);
+ return 0;
+bad:
+ return -EIO;
+}
+#endif
+
/*
* Handle a caps message from the MDS.
*
@@ -4105,6 +4247,9 @@ void ceph_handle_caps(struct ceph_mds_session *session,
dout("handle_caps from mds%d\n", session->s_mds);
+ if (!ceph_inc_mds_stopping_blocker(mdsc, session))
+ return;
+
/* decode */
end = msg->front.iov_base + msg->front.iov_len;
if (msg->front.iov_len < sizeof(*h))
@@ -4195,13 +4340,17 @@ void ceph_handle_caps(struct ceph_mds_session *session,
ceph_decode_64_safe(&p, end, extra_info.nsubdirs, bad);
}
+ if (msg_version >= 12) {
+ if (parse_fscrypt_fields(&p, end, &extra_info))
+ goto bad;
+ }
+
/* lookup ino */
inode = ceph_find_inode(mdsc->fsc->sb, vino);
dout(" op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), vino.ino,
vino.snap, inode);
mutex_lock(&session->s_mutex);
- inc_session_sequence(session);
dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq,
(unsigned)seq);
@@ -4292,7 +4441,8 @@ void ceph_handle_caps(struct ceph_mds_session *session,
break;
case CEPH_CAP_OP_TRUNC:
- queue_trunc = handle_cap_trunc(inode, h, session);
+ queue_trunc = handle_cap_trunc(inode, h, session,
+ &extra_info);
spin_unlock(&ci->i_ceph_lock);
if (queue_trunc)
ceph_queue_vmtruncate(inode);
@@ -4309,12 +4459,15 @@ done:
done_unlocked:
iput(inode);
out:
+ ceph_dec_mds_stopping_blocker(mdsc);
+
ceph_put_string(extra_info.pool_ns);
/* Defer closing the sessions after s_mutex lock being released */
if (close_sessions)
ceph_mdsc_close_sessions(mdsc);
+ kfree(extra_info.fscrypt_auth);
return;
flush_cap_releases:
@@ -4611,6 +4764,18 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
return ret;
}
+/**
+ * ceph_encode_dentry_release - encode a dentry release into an outgoing request
+ * @p: outgoing request buffer
+ * @dentry: dentry to release
+ * @dir: dir to release it from
+ * @mds: mds that we're speaking to
+ * @drop: caps being dropped
+ * @unless: unless we have these caps
+ *
+ * Encode a dentry release into an outgoing request buffer. Returns 1 if the
+ * thing was released, or a negative error code otherwise.
+ */
int ceph_encode_dentry_release(void **p, struct dentry *dentry,
struct inode *dir,
int mds, int drop, int unless)
@@ -4643,13 +4808,25 @@ int ceph_encode_dentry_release(void **p, struct dentry *dentry,
if (ret && di->lease_session && di->lease_session->s_mds == mds) {
dout("encode_dentry_release %p mds%d seq %d\n",
dentry, mds, (int)di->lease_seq);
- rel->dname_len = cpu_to_le32(dentry->d_name.len);
- memcpy(*p, dentry->d_name.name, dentry->d_name.len);
- *p += dentry->d_name.len;
rel->dname_seq = cpu_to_le32(di->lease_seq);
__ceph_mdsc_drop_dentry_lease(dentry);
+ spin_unlock(&dentry->d_lock);
+ if (IS_ENCRYPTED(dir) && fscrypt_has_encryption_key(dir)) {
+ int ret2 = ceph_encode_encrypted_fname(dir, dentry, *p);
+
+ if (ret2 < 0)
+ return ret2;
+
+ rel->dname_len = cpu_to_le32(ret2);
+ *p += ret2;
+ } else {
+ rel->dname_len = cpu_to_le32(dentry->d_name.len);
+ memcpy(*p, dentry->d_name.name, dentry->d_name.len);
+ *p += dentry->d_name.len;
+ }
+ } else {
+ spin_unlock(&dentry->d_lock);
}
- spin_unlock(&dentry->d_lock);
return ret;
}
diff --git a/fs/ceph/crypto.c b/fs/ceph/crypto.c
new file mode 100644
index 000000000000..e4d5cd56a80b
--- /dev/null
+++ b/fs/ceph/crypto.c
@@ -0,0 +1,673 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * The base64 encode/decode code was copied from fscrypt:
+ * Copyright (C) 2015, Google, Inc.
+ * Copyright (C) 2015, Motorola Mobility
+ * Written by Uday Savagaonkar, 2014.
+ * Modified by Jaegeuk Kim, 2015.
+ */
+#include <linux/ceph/ceph_debug.h>
+#include <linux/xattr.h>
+#include <linux/fscrypt.h>
+#include <linux/ceph/striper.h>
+
+#include "super.h"
+#include "mds_client.h"
+#include "crypto.h"
+
+/*
+ * The base64url encoding used by fscrypt includes the '_' character, which may
+ * cause problems in snapshot names (which can not start with '_'). Thus, we
+ * used the base64 encoding defined for IMAP mailbox names (RFC 3501) instead,
+ * which replaces '-' and '_' by '+' and ','.
+ */
+static const char base64_table[65] =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+,";
+
+int ceph_base64_encode(const u8 *src, int srclen, char *dst)
+{
+ u32 ac = 0;
+ int bits = 0;
+ int i;
+ char *cp = dst;
+
+ for (i = 0; i < srclen; i++) {
+ ac = (ac << 8) | src[i];
+ bits += 8;
+ do {
+ bits -= 6;
+ *cp++ = base64_table[(ac >> bits) & 0x3f];
+ } while (bits >= 6);
+ }
+ if (bits)
+ *cp++ = base64_table[(ac << (6 - bits)) & 0x3f];
+ return cp - dst;
+}
+
+int ceph_base64_decode(const char *src, int srclen, u8 *dst)
+{
+ u32 ac = 0;
+ int bits = 0;
+ int i;
+ u8 *bp = dst;
+
+ for (i = 0; i < srclen; i++) {
+ const char *p = strchr(base64_table, src[i]);
+
+ if (p == NULL || src[i] == 0)
+ return -1;
+ ac = (ac << 6) | (p - base64_table);
+ bits += 6;
+ if (bits >= 8) {
+ bits -= 8;
+ *bp++ = (u8)(ac >> bits);
+ }
+ }
+ if (ac & ((1 << bits) - 1))
+ return -1;
+ return bp - dst;
+}
+
+static int ceph_crypt_get_context(struct inode *inode, void *ctx, size_t len)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_fscrypt_auth *cfa = (struct ceph_fscrypt_auth *)ci->fscrypt_auth;
+ u32 ctxlen;
+
+ /* Non existent or too short? */
+ if (!cfa || (ci->fscrypt_auth_len < (offsetof(struct ceph_fscrypt_auth, cfa_blob) + 1)))
+ return -ENOBUFS;
+
+ /* Some format we don't recognize? */
+ if (le32_to_cpu(cfa->cfa_version) != CEPH_FSCRYPT_AUTH_VERSION)
+ return -ENOBUFS;
+
+ ctxlen = le32_to_cpu(cfa->cfa_blob_len);
+ if (len < ctxlen)
+ return -ERANGE;
+
+ memcpy(ctx, cfa->cfa_blob, ctxlen);
+ return ctxlen;
+}
+
+static int ceph_crypt_set_context(struct inode *inode, const void *ctx,
+ size_t len, void *fs_data)
+{
+ int ret;
+ struct iattr attr = { };
+ struct ceph_iattr cia = { };
+ struct ceph_fscrypt_auth *cfa;
+
+ WARN_ON_ONCE(fs_data);
+
+ if (len > FSCRYPT_SET_CONTEXT_MAX_SIZE)
+ return -EINVAL;
+
+ cfa = kzalloc(sizeof(*cfa), GFP_KERNEL);
+ if (!cfa)
+ return -ENOMEM;
+
+ cfa->cfa_version = cpu_to_le32(CEPH_FSCRYPT_AUTH_VERSION);
+ cfa->cfa_blob_len = cpu_to_le32(len);
+ memcpy(cfa->cfa_blob, ctx, len);
+
+ cia.fscrypt_auth = cfa;
+
+ ret = __ceph_setattr(inode, &attr, &cia);
+ if (ret == 0)
+ inode_set_flags(inode, S_ENCRYPTED, S_ENCRYPTED);
+ kfree(cia.fscrypt_auth);
+ return ret;
+}
+
+static bool ceph_crypt_empty_dir(struct inode *inode)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+
+ return ci->i_rsubdirs + ci->i_rfiles == 1;
+}
+
+static const union fscrypt_policy *ceph_get_dummy_policy(struct super_block *sb)
+{
+ return ceph_sb_to_client(sb)->fsc_dummy_enc_policy.policy;
+}
+
+static struct fscrypt_operations ceph_fscrypt_ops = {
+ .get_context = ceph_crypt_get_context,
+ .set_context = ceph_crypt_set_context,
+ .get_dummy_policy = ceph_get_dummy_policy,
+ .empty_dir = ceph_crypt_empty_dir,
+};
+
+void ceph_fscrypt_set_ops(struct super_block *sb)
+{
+ fscrypt_set_ops(sb, &ceph_fscrypt_ops);
+}
+
+void ceph_fscrypt_free_dummy_policy(struct ceph_fs_client *fsc)
+{
+ fscrypt_free_dummy_policy(&fsc->fsc_dummy_enc_policy);
+}
+
+int ceph_fscrypt_prepare_context(struct inode *dir, struct inode *inode,
+ struct ceph_acl_sec_ctx *as)
+{
+ int ret, ctxsize;
+ bool encrypted = false;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+
+ ret = fscrypt_prepare_new_inode(dir, inode, &encrypted);
+ if (ret)
+ return ret;
+ if (!encrypted)
+ return 0;
+
+ as->fscrypt_auth = kzalloc(sizeof(*as->fscrypt_auth), GFP_KERNEL);
+ if (!as->fscrypt_auth)
+ return -ENOMEM;
+
+ ctxsize = fscrypt_context_for_new_inode(as->fscrypt_auth->cfa_blob,
+ inode);
+ if (ctxsize < 0)
+ return ctxsize;
+
+ as->fscrypt_auth->cfa_version = cpu_to_le32(CEPH_FSCRYPT_AUTH_VERSION);
+ as->fscrypt_auth->cfa_blob_len = cpu_to_le32(ctxsize);
+
+ WARN_ON_ONCE(ci->fscrypt_auth);
+ kfree(ci->fscrypt_auth);
+ ci->fscrypt_auth_len = ceph_fscrypt_auth_len(as->fscrypt_auth);
+ ci->fscrypt_auth = kmemdup(as->fscrypt_auth, ci->fscrypt_auth_len,
+ GFP_KERNEL);
+ if (!ci->fscrypt_auth)
+ return -ENOMEM;
+
+ inode->i_flags |= S_ENCRYPTED;
+
+ return 0;
+}
+
+void ceph_fscrypt_as_ctx_to_req(struct ceph_mds_request *req,
+ struct ceph_acl_sec_ctx *as)
+{
+ swap(req->r_fscrypt_auth, as->fscrypt_auth);
+}
+
+/*
+ * User-created snapshots can't start with '_'. Snapshots that start with this
+ * character are special (hint: there aren't real snapshots) and use the
+ * following format:
+ *
+ * _<SNAPSHOT-NAME>_<INODE-NUMBER>
+ *
+ * where:
+ * - <SNAPSHOT-NAME> - the real snapshot name that may need to be decrypted,
+ * - <INODE-NUMBER> - the inode number (in decimal) for the actual snapshot
+ *
+ * This function parses these snapshot names and returns the inode
+ * <INODE-NUMBER>. 'name_len' will also bet set with the <SNAPSHOT-NAME>
+ * length.
+ */
+static struct inode *parse_longname(const struct inode *parent,
+ const char *name, int *name_len)
+{
+ struct inode *dir = NULL;
+ struct ceph_vino vino = { .snap = CEPH_NOSNAP };
+ char *inode_number;
+ char *name_end;
+ int orig_len = *name_len;
+ int ret = -EIO;
+
+ /* Skip initial '_' */
+ name++;
+ name_end = strrchr(name, '_');
+ if (!name_end) {
+ dout("Failed to parse long snapshot name: %s\n", name);
+ return ERR_PTR(-EIO);
+ }
+ *name_len = (name_end - name);
+ if (*name_len <= 0) {
+ pr_err("Failed to parse long snapshot name\n");
+ return ERR_PTR(-EIO);
+ }
+
+ /* Get the inode number */
+ inode_number = kmemdup_nul(name_end + 1,
+ orig_len - *name_len - 2,
+ GFP_KERNEL);
+ if (!inode_number)
+ return ERR_PTR(-ENOMEM);
+ ret = kstrtou64(inode_number, 10, &vino.ino);
+ if (ret) {
+ dout("Failed to parse inode number: %s\n", name);
+ dir = ERR_PTR(ret);
+ goto out;
+ }
+
+ /* And finally the inode */
+ dir = ceph_find_inode(parent->i_sb, vino);
+ if (!dir) {
+ /* This can happen if we're not mounting cephfs on the root */
+ dir = ceph_get_inode(parent->i_sb, vino, NULL);
+ if (!dir)
+ dir = ERR_PTR(-ENOENT);
+ }
+ if (IS_ERR(dir))
+ dout("Can't find inode %s (%s)\n", inode_number, name);
+
+out:
+ kfree(inode_number);
+ return dir;
+}
+
+int ceph_encode_encrypted_dname(struct inode *parent, struct qstr *d_name,
+ char *buf)
+{
+ struct inode *dir = parent;
+ struct qstr iname;
+ u32 len;
+ int name_len;
+ int elen;
+ int ret;
+ u8 *cryptbuf = NULL;
+
+ iname.name = d_name->name;
+ name_len = d_name->len;
+
+ /* Handle the special case of snapshot names that start with '_' */
+ if ((ceph_snap(dir) == CEPH_SNAPDIR) && (name_len > 0) &&
+ (iname.name[0] == '_')) {
+ dir = parse_longname(parent, iname.name, &name_len);
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+ iname.name++; /* skip initial '_' */
+ }
+ iname.len = name_len;
+
+ if (!fscrypt_has_encryption_key(dir)) {
+ memcpy(buf, d_name->name, d_name->len);
+ elen = d_name->len;
+ goto out;
+ }
+
+ /*
+ * Convert cleartext d_name to ciphertext. If result is longer than
+ * CEPH_NOHASH_NAME_MAX, sha256 the remaining bytes
+ *
+ * See: fscrypt_setup_filename
+ */
+ if (!fscrypt_fname_encrypted_size(dir, iname.len, NAME_MAX, &len)) {
+ elen = -ENAMETOOLONG;
+ goto out;
+ }
+
+ /* Allocate a buffer appropriate to hold the result */
+ cryptbuf = kmalloc(len > CEPH_NOHASH_NAME_MAX ? NAME_MAX : len,
+ GFP_KERNEL);
+ if (!cryptbuf) {
+ elen = -ENOMEM;
+ goto out;
+ }
+
+ ret = fscrypt_fname_encrypt(dir, &iname, cryptbuf, len);
+ if (ret) {
+ elen = ret;
+ goto out;
+ }
+
+ /* hash the end if the name is long enough */
+ if (len > CEPH_NOHASH_NAME_MAX) {
+ u8 hash[SHA256_DIGEST_SIZE];
+ u8 *extra = cryptbuf + CEPH_NOHASH_NAME_MAX;
+
+ /*
+ * hash the extra bytes and overwrite crypttext beyond that
+ * point with it
+ */
+ sha256(extra, len - CEPH_NOHASH_NAME_MAX, hash);
+ memcpy(extra, hash, SHA256_DIGEST_SIZE);
+ len = CEPH_NOHASH_NAME_MAX + SHA256_DIGEST_SIZE;
+ }
+
+ /* base64 encode the encrypted name */
+ elen = ceph_base64_encode(cryptbuf, len, buf);
+ dout("base64-encoded ciphertext name = %.*s\n", elen, buf);
+
+ /* To understand the 240 limit, see CEPH_NOHASH_NAME_MAX comments */
+ WARN_ON(elen > 240);
+ if ((elen > 0) && (dir != parent)) {
+ char tmp_buf[NAME_MAX];
+
+ elen = snprintf(tmp_buf, sizeof(tmp_buf), "_%.*s_%ld",
+ elen, buf, dir->i_ino);
+ memcpy(buf, tmp_buf, elen);
+ }
+
+out:
+ kfree(cryptbuf);
+ if (dir != parent) {
+ if ((dir->i_state & I_NEW))
+ discard_new_inode(dir);
+ else
+ iput(dir);
+ }
+ return elen;
+}
+
+int ceph_encode_encrypted_fname(struct inode *parent, struct dentry *dentry,
+ char *buf)
+{
+ WARN_ON_ONCE(!fscrypt_has_encryption_key(parent));
+
+ return ceph_encode_encrypted_dname(parent, &dentry->d_name, buf);
+}
+
+/**
+ * ceph_fname_to_usr - convert a filename for userland presentation
+ * @fname: ceph_fname to be converted
+ * @tname: temporary name buffer to use for conversion (may be NULL)
+ * @oname: where converted name should be placed
+ * @is_nokey: set to true if key wasn't available during conversion (may be NULL)
+ *
+ * Given a filename (usually from the MDS), format it for presentation to
+ * userland. If @parent is not encrypted, just pass it back as-is.
+ *
+ * Otherwise, base64 decode the string, and then ask fscrypt to format it
+ * for userland presentation.
+ *
+ * Returns 0 on success or negative error code on error.
+ */
+int ceph_fname_to_usr(const struct ceph_fname *fname, struct fscrypt_str *tname,
+ struct fscrypt_str *oname, bool *is_nokey)
+{
+ struct inode *dir = fname->dir;
+ struct fscrypt_str _tname = FSTR_INIT(NULL, 0);
+ struct fscrypt_str iname;
+ char *name = fname->name;
+ int name_len = fname->name_len;
+ int ret;
+
+ /* Sanity check that the resulting name will fit in the buffer */
+ if (fname->name_len > NAME_MAX || fname->ctext_len > NAME_MAX)
+ return -EIO;
+
+ /* Handle the special case of snapshot names that start with '_' */
+ if ((ceph_snap(dir) == CEPH_SNAPDIR) && (name_len > 0) &&
+ (name[0] == '_')) {
+ dir = parse_longname(dir, name, &name_len);
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+ name++; /* skip initial '_' */
+ }
+
+ if (!IS_ENCRYPTED(dir)) {
+ oname->name = fname->name;
+ oname->len = fname->name_len;
+ ret = 0;
+ goto out_inode;
+ }
+
+ ret = ceph_fscrypt_prepare_readdir(dir);
+ if (ret)
+ goto out_inode;
+
+ /*
+ * Use the raw dentry name as sent by the MDS instead of
+ * generating a nokey name via fscrypt.
+ */
+ if (!fscrypt_has_encryption_key(dir)) {
+ if (fname->no_copy)
+ oname->name = fname->name;
+ else
+ memcpy(oname->name, fname->name, fname->name_len);
+ oname->len = fname->name_len;
+ if (is_nokey)
+ *is_nokey = true;
+ ret = 0;
+ goto out_inode;
+ }
+
+ if (fname->ctext_len == 0) {
+ int declen;
+
+ if (!tname) {
+ ret = fscrypt_fname_alloc_buffer(NAME_MAX, &_tname);
+ if (ret)
+ goto out_inode;
+ tname = &_tname;
+ }
+
+ declen = ceph_base64_decode(name, name_len, tname->name);
+ if (declen <= 0) {
+ ret = -EIO;
+ goto out;
+ }
+ iname.name = tname->name;
+ iname.len = declen;
+ } else {
+ iname.name = fname->ctext;
+ iname.len = fname->ctext_len;
+ }
+
+ ret = fscrypt_fname_disk_to_usr(dir, 0, 0, &iname, oname);
+ if (!ret && (dir != fname->dir)) {
+ char tmp_buf[CEPH_BASE64_CHARS(NAME_MAX)];
+
+ name_len = snprintf(tmp_buf, sizeof(tmp_buf), "_%.*s_%ld",
+ oname->len, oname->name, dir->i_ino);
+ memcpy(oname->name, tmp_buf, name_len);
+ oname->len = name_len;
+ }
+
+out:
+ fscrypt_fname_free_buffer(&_tname);
+out_inode:
+ if ((dir != fname->dir) && !IS_ERR(dir)) {
+ if ((dir->i_state & I_NEW))
+ discard_new_inode(dir);
+ else
+ iput(dir);
+ }
+ return ret;
+}
+
+/**
+ * ceph_fscrypt_prepare_readdir - simple __fscrypt_prepare_readdir() wrapper
+ * @dir: directory inode for readdir prep
+ *
+ * Simple wrapper around __fscrypt_prepare_readdir() that will mark directory as
+ * non-complete if this call results in having the directory unlocked.
+ *
+ * Returns:
+ * 1 - if directory was locked and key is now loaded (i.e. dir is unlocked)
+ * 0 - if directory is still locked
+ * < 0 - if __fscrypt_prepare_readdir() fails
+ */
+int ceph_fscrypt_prepare_readdir(struct inode *dir)
+{
+ bool had_key = fscrypt_has_encryption_key(dir);
+ int err;
+
+ if (!IS_ENCRYPTED(dir))
+ return 0;
+
+ err = __fscrypt_prepare_readdir(dir);
+ if (err)
+ return err;
+ if (!had_key && fscrypt_has_encryption_key(dir)) {
+ /* directory just got unlocked, mark it as not complete */
+ ceph_dir_clear_complete(dir);
+ return 1;
+ }
+ return 0;
+}
+
+int ceph_fscrypt_decrypt_block_inplace(const struct inode *inode,
+ struct page *page, unsigned int len,
+ unsigned int offs, u64 lblk_num)
+{
+ dout("%s: len %u offs %u blk %llu\n", __func__, len, offs, lblk_num);
+ return fscrypt_decrypt_block_inplace(inode, page, len, offs, lblk_num);
+}
+
+int ceph_fscrypt_encrypt_block_inplace(const struct inode *inode,
+ struct page *page, unsigned int len,
+ unsigned int offs, u64 lblk_num,
+ gfp_t gfp_flags)
+{
+ dout("%s: len %u offs %u blk %llu\n", __func__, len, offs, lblk_num);
+ return fscrypt_encrypt_block_inplace(inode, page, len, offs, lblk_num,
+ gfp_flags);
+}
+
+/**
+ * ceph_fscrypt_decrypt_pages - decrypt an array of pages
+ * @inode: pointer to inode associated with these pages
+ * @page: pointer to page array
+ * @off: offset into the file that the read data starts
+ * @len: max length to decrypt
+ *
+ * Decrypt an array of fscrypt'ed pages and return the amount of
+ * data decrypted. Any data in the page prior to the start of the
+ * first complete block in the read is ignored. Any incomplete
+ * crypto blocks at the end of the array are ignored (and should
+ * probably be zeroed by the caller).
+ *
+ * Returns the length of the decrypted data or a negative errno.
+ */
+int ceph_fscrypt_decrypt_pages(struct inode *inode, struct page **page,
+ u64 off, int len)
+{
+ int i, num_blocks;
+ u64 baseblk = off >> CEPH_FSCRYPT_BLOCK_SHIFT;
+ int ret = 0;
+
+ /*
+ * We can't deal with partial blocks on an encrypted file, so mask off
+ * the last bit.
+ */
+ num_blocks = ceph_fscrypt_blocks(off, len & CEPH_FSCRYPT_BLOCK_MASK);
+
+ /* Decrypt each block */
+ for (i = 0; i < num_blocks; ++i) {
+ int blkoff = i << CEPH_FSCRYPT_BLOCK_SHIFT;
+ int pgidx = blkoff >> PAGE_SHIFT;
+ unsigned int pgoffs = offset_in_page(blkoff);
+ int fret;
+
+ fret = ceph_fscrypt_decrypt_block_inplace(inode, page[pgidx],
+ CEPH_FSCRYPT_BLOCK_SIZE, pgoffs,
+ baseblk + i);
+ if (fret < 0) {
+ if (ret == 0)
+ ret = fret;
+ break;
+ }
+ ret += CEPH_FSCRYPT_BLOCK_SIZE;
+ }
+ return ret;
+}
+
+/**
+ * ceph_fscrypt_decrypt_extents: decrypt received extents in given buffer
+ * @inode: inode associated with pages being decrypted
+ * @page: pointer to page array
+ * @off: offset into the file that the data in page[0] starts
+ * @map: pointer to extent array
+ * @ext_cnt: length of extent array
+ *
+ * Given an extent map and a page array, decrypt the received data in-place,
+ * skipping holes. Returns the offset into buffer of end of last decrypted
+ * block.
+ */
+int ceph_fscrypt_decrypt_extents(struct inode *inode, struct page **page,
+ u64 off, struct ceph_sparse_extent *map,
+ u32 ext_cnt)
+{
+ int i, ret = 0;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ u64 objno, objoff;
+ u32 xlen;
+
+ /* Nothing to do for empty array */
+ if (ext_cnt == 0) {
+ dout("%s: empty array, ret 0\n", __func__);
+ return 0;
+ }
+
+ ceph_calc_file_object_mapping(&ci->i_layout, off, map[0].len,
+ &objno, &objoff, &xlen);
+
+ for (i = 0; i < ext_cnt; ++i) {
+ struct ceph_sparse_extent *ext = &map[i];
+ int pgsoff = ext->off - objoff;
+ int pgidx = pgsoff >> PAGE_SHIFT;
+ int fret;
+
+ if ((ext->off | ext->len) & ~CEPH_FSCRYPT_BLOCK_MASK) {
+ pr_warn("%s: bad encrypted sparse extent idx %d off %llx len %llx\n",
+ __func__, i, ext->off, ext->len);
+ return -EIO;
+ }
+ fret = ceph_fscrypt_decrypt_pages(inode, &page[pgidx],
+ off + pgsoff, ext->len);
+ dout("%s: [%d] 0x%llx~0x%llx fret %d\n", __func__, i,
+ ext->off, ext->len, fret);
+ if (fret < 0) {
+ if (ret == 0)
+ ret = fret;
+ break;
+ }
+ ret = pgsoff + fret;
+ }
+ dout("%s: ret %d\n", __func__, ret);
+ return ret;
+}
+
+/**
+ * ceph_fscrypt_encrypt_pages - encrypt an array of pages
+ * @inode: pointer to inode associated with these pages
+ * @page: pointer to page array
+ * @off: offset into the file that the data starts
+ * @len: max length to encrypt
+ * @gfp: gfp flags to use for allocation
+ *
+ * Decrypt an array of cleartext pages and return the amount of
+ * data encrypted. Any data in the page prior to the start of the
+ * first complete block in the read is ignored. Any incomplete
+ * crypto blocks at the end of the array are ignored.
+ *
+ * Returns the length of the encrypted data or a negative errno.
+ */
+int ceph_fscrypt_encrypt_pages(struct inode *inode, struct page **page, u64 off,
+ int len, gfp_t gfp)
+{
+ int i, num_blocks;
+ u64 baseblk = off >> CEPH_FSCRYPT_BLOCK_SHIFT;
+ int ret = 0;
+
+ /*
+ * We can't deal with partial blocks on an encrypted file, so mask off
+ * the last bit.
+ */
+ num_blocks = ceph_fscrypt_blocks(off, len & CEPH_FSCRYPT_BLOCK_MASK);
+
+ /* Encrypt each block */
+ for (i = 0; i < num_blocks; ++i) {
+ int blkoff = i << CEPH_FSCRYPT_BLOCK_SHIFT;
+ int pgidx = blkoff >> PAGE_SHIFT;
+ unsigned int pgoffs = offset_in_page(blkoff);
+ int fret;
+
+ fret = ceph_fscrypt_encrypt_block_inplace(inode, page[pgidx],
+ CEPH_FSCRYPT_BLOCK_SIZE, pgoffs,
+ baseblk + i, gfp);
+ if (fret < 0) {
+ if (ret == 0)
+ ret = fret;
+ break;
+ }
+ ret += CEPH_FSCRYPT_BLOCK_SIZE;
+ }
+ return ret;
+}
diff --git a/fs/ceph/crypto.h b/fs/ceph/crypto.h
new file mode 100644
index 000000000000..47e0c319fc68
--- /dev/null
+++ b/fs/ceph/crypto.h
@@ -0,0 +1,288 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Ceph fscrypt functionality
+ */
+
+#ifndef _CEPH_CRYPTO_H
+#define _CEPH_CRYPTO_H
+
+#include <crypto/sha2.h>
+#include <linux/fscrypt.h>
+
+#define CEPH_FSCRYPT_BLOCK_SHIFT 12
+#define CEPH_FSCRYPT_BLOCK_SIZE (_AC(1, UL) << CEPH_FSCRYPT_BLOCK_SHIFT)
+#define CEPH_FSCRYPT_BLOCK_MASK (~(CEPH_FSCRYPT_BLOCK_SIZE-1))
+
+struct ceph_fs_client;
+struct ceph_acl_sec_ctx;
+struct ceph_mds_request;
+
+struct ceph_fname {
+ struct inode *dir;
+ char *name; // b64 encoded, possibly hashed
+ unsigned char *ctext; // binary crypttext (if any)
+ u32 name_len; // length of name buffer
+ u32 ctext_len; // length of crypttext
+ bool no_copy;
+};
+
+/*
+ * Header for the crypted file when truncating the size, this
+ * will be sent to MDS, and the MDS will update the encrypted
+ * last block and then truncate the size.
+ */
+struct ceph_fscrypt_truncate_size_header {
+ __u8 ver;
+ __u8 compat;
+
+ /*
+ * It will be sizeof(assert_ver + file_offset + block_size)
+ * if the last block is empty when it's located in a file
+ * hole. Or the data_len will plus CEPH_FSCRYPT_BLOCK_SIZE.
+ */
+ __le32 data_len;
+
+ __le64 change_attr;
+ __le64 file_offset;
+ __le32 block_size;
+} __packed;
+
+struct ceph_fscrypt_auth {
+ __le32 cfa_version;
+ __le32 cfa_blob_len;
+ u8 cfa_blob[FSCRYPT_SET_CONTEXT_MAX_SIZE];
+} __packed;
+
+#define CEPH_FSCRYPT_AUTH_VERSION 1
+static inline u32 ceph_fscrypt_auth_len(struct ceph_fscrypt_auth *fa)
+{
+ u32 ctxsize = le32_to_cpu(fa->cfa_blob_len);
+
+ return offsetof(struct ceph_fscrypt_auth, cfa_blob) + ctxsize;
+}
+
+#ifdef CONFIG_FS_ENCRYPTION
+/*
+ * We want to encrypt filenames when creating them, but the encrypted
+ * versions of those names may have illegal characters in them. To mitigate
+ * that, we base64 encode them, but that gives us a result that can exceed
+ * NAME_MAX.
+ *
+ * Follow a similar scheme to fscrypt itself, and cap the filename to a
+ * smaller size. If the ciphertext name is longer than the value below, then
+ * sha256 hash the remaining bytes.
+ *
+ * For the fscrypt_nokey_name struct the dirhash[2] member is useless in ceph
+ * so the corresponding struct will be:
+ *
+ * struct fscrypt_ceph_nokey_name {
+ * u8 bytes[157];
+ * u8 sha256[SHA256_DIGEST_SIZE];
+ * }; // 180 bytes => 240 bytes base64-encoded, which is <= NAME_MAX (255)
+ *
+ * (240 bytes is the maximum size allowed for snapshot names to take into
+ * account the format: '_<SNAPSHOT-NAME>_<INODE-NUMBER>'.)
+ *
+ * Note that for long names that end up having their tail portion hashed, we
+ * must also store the full encrypted name (in the dentry's alternate_name
+ * field).
+ */
+#define CEPH_NOHASH_NAME_MAX (180 - SHA256_DIGEST_SIZE)
+
+#define CEPH_BASE64_CHARS(nbytes) DIV_ROUND_UP((nbytes) * 4, 3)
+
+int ceph_base64_encode(const u8 *src, int srclen, char *dst);
+int ceph_base64_decode(const char *src, int srclen, u8 *dst);
+
+void ceph_fscrypt_set_ops(struct super_block *sb);
+
+void ceph_fscrypt_free_dummy_policy(struct ceph_fs_client *fsc);
+
+int ceph_fscrypt_prepare_context(struct inode *dir, struct inode *inode,
+ struct ceph_acl_sec_ctx *as);
+void ceph_fscrypt_as_ctx_to_req(struct ceph_mds_request *req,
+ struct ceph_acl_sec_ctx *as);
+int ceph_encode_encrypted_dname(struct inode *parent, struct qstr *d_name,
+ char *buf);
+int ceph_encode_encrypted_fname(struct inode *parent, struct dentry *dentry,
+ char *buf);
+
+static inline int ceph_fname_alloc_buffer(struct inode *parent,
+ struct fscrypt_str *fname)
+{
+ if (!IS_ENCRYPTED(parent))
+ return 0;
+ return fscrypt_fname_alloc_buffer(NAME_MAX, fname);
+}
+
+static inline void ceph_fname_free_buffer(struct inode *parent,
+ struct fscrypt_str *fname)
+{
+ if (IS_ENCRYPTED(parent))
+ fscrypt_fname_free_buffer(fname);
+}
+
+int ceph_fname_to_usr(const struct ceph_fname *fname, struct fscrypt_str *tname,
+ struct fscrypt_str *oname, bool *is_nokey);
+int ceph_fscrypt_prepare_readdir(struct inode *dir);
+
+static inline unsigned int ceph_fscrypt_blocks(u64 off, u64 len)
+{
+ /* crypto blocks cannot span more than one page */
+ BUILD_BUG_ON(CEPH_FSCRYPT_BLOCK_SHIFT > PAGE_SHIFT);
+
+ return ((off+len+CEPH_FSCRYPT_BLOCK_SIZE-1) >> CEPH_FSCRYPT_BLOCK_SHIFT) -
+ (off >> CEPH_FSCRYPT_BLOCK_SHIFT);
+}
+
+/*
+ * If we have an encrypted inode then we must adjust the offset and
+ * range of the on-the-wire read to cover an entire encryption block.
+ * The copy will be done using the original offset and length, after
+ * we've decrypted the result.
+ */
+static inline void ceph_fscrypt_adjust_off_and_len(struct inode *inode,
+ u64 *off, u64 *len)
+{
+ if (IS_ENCRYPTED(inode)) {
+ *len = ceph_fscrypt_blocks(*off, *len) * CEPH_FSCRYPT_BLOCK_SIZE;
+ *off &= CEPH_FSCRYPT_BLOCK_MASK;
+ }
+}
+
+int ceph_fscrypt_decrypt_block_inplace(const struct inode *inode,
+ struct page *page, unsigned int len,
+ unsigned int offs, u64 lblk_num);
+int ceph_fscrypt_encrypt_block_inplace(const struct inode *inode,
+ struct page *page, unsigned int len,
+ unsigned int offs, u64 lblk_num,
+ gfp_t gfp_flags);
+int ceph_fscrypt_decrypt_pages(struct inode *inode, struct page **page,
+ u64 off, int len);
+int ceph_fscrypt_decrypt_extents(struct inode *inode, struct page **page,
+ u64 off, struct ceph_sparse_extent *map,
+ u32 ext_cnt);
+int ceph_fscrypt_encrypt_pages(struct inode *inode, struct page **page, u64 off,
+ int len, gfp_t gfp);
+
+static inline struct page *ceph_fscrypt_pagecache_page(struct page *page)
+{
+ return fscrypt_is_bounce_page(page) ? fscrypt_pagecache_page(page) : page;
+}
+
+#else /* CONFIG_FS_ENCRYPTION */
+
+static inline void ceph_fscrypt_set_ops(struct super_block *sb)
+{
+}
+
+static inline void ceph_fscrypt_free_dummy_policy(struct ceph_fs_client *fsc)
+{
+}
+
+static inline int ceph_fscrypt_prepare_context(struct inode *dir,
+ struct inode *inode,
+ struct ceph_acl_sec_ctx *as)
+{
+ if (IS_ENCRYPTED(dir))
+ return -EOPNOTSUPP;
+ return 0;
+}
+
+static inline void ceph_fscrypt_as_ctx_to_req(struct ceph_mds_request *req,
+ struct ceph_acl_sec_ctx *as_ctx)
+{
+}
+
+static inline int ceph_encode_encrypted_dname(struct inode *parent,
+ struct qstr *d_name, char *buf)
+{
+ memcpy(buf, d_name->name, d_name->len);
+ return d_name->len;
+}
+
+static inline int ceph_encode_encrypted_fname(struct inode *parent,
+ struct dentry *dentry, char *buf)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int ceph_fname_alloc_buffer(struct inode *parent,
+ struct fscrypt_str *fname)
+{
+ return 0;
+}
+
+static inline void ceph_fname_free_buffer(struct inode *parent,
+ struct fscrypt_str *fname)
+{
+}
+
+static inline int ceph_fname_to_usr(const struct ceph_fname *fname,
+ struct fscrypt_str *tname,
+ struct fscrypt_str *oname, bool *is_nokey)
+{
+ oname->name = fname->name;
+ oname->len = fname->name_len;
+ return 0;
+}
+
+static inline int ceph_fscrypt_prepare_readdir(struct inode *dir)
+{
+ return 0;
+}
+
+static inline void ceph_fscrypt_adjust_off_and_len(struct inode *inode,
+ u64 *off, u64 *len)
+{
+}
+
+static inline int ceph_fscrypt_decrypt_block_inplace(const struct inode *inode,
+ struct page *page, unsigned int len,
+ unsigned int offs, u64 lblk_num)
+{
+ return 0;
+}
+
+static inline int ceph_fscrypt_encrypt_block_inplace(const struct inode *inode,
+ struct page *page, unsigned int len,
+ unsigned int offs, u64 lblk_num,
+ gfp_t gfp_flags)
+{
+ return 0;
+}
+
+static inline int ceph_fscrypt_decrypt_pages(struct inode *inode,
+ struct page **page, u64 off,
+ int len)
+{
+ return 0;
+}
+
+static inline int ceph_fscrypt_decrypt_extents(struct inode *inode,
+ struct page **page, u64 off,
+ struct ceph_sparse_extent *map,
+ u32 ext_cnt)
+{
+ return 0;
+}
+
+static inline int ceph_fscrypt_encrypt_pages(struct inode *inode,
+ struct page **page, u64 off,
+ int len, gfp_t gfp)
+{
+ return 0;
+}
+
+static inline struct page *ceph_fscrypt_pagecache_page(struct page *page)
+{
+ return page;
+}
+#endif /* CONFIG_FS_ENCRYPTION */
+
+static inline loff_t ceph_fscrypt_page_offset(struct page *page)
+{
+ return page_offset(ceph_fscrypt_pagecache_page(page));
+}
+
+#endif /* _CEPH_CRYPTO_H */
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index bdcffb04513f..854cbdd66661 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -9,6 +9,7 @@
#include "super.h"
#include "mds_client.h"
+#include "crypto.h"
/*
* Directory operations: readdir, lookup, create, link, unlink,
@@ -241,7 +242,9 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
di = ceph_dentry(dentry);
if (d_unhashed(dentry) ||
d_really_is_negative(dentry) ||
- di->lease_shared_gen != shared_gen) {
+ di->lease_shared_gen != shared_gen ||
+ ((dentry->d_flags & DCACHE_NOKEY_NAME) &&
+ fscrypt_has_encryption_key(dir))) {
spin_unlock(&dentry->d_lock);
dput(dentry);
err = -EAGAIN;
@@ -340,6 +343,10 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
ctx->pos = 2;
}
+ err = ceph_fscrypt_prepare_readdir(inode);
+ if (err < 0)
+ return err;
+
spin_lock(&ci->i_ceph_lock);
/* request Fx cap. if have Fx, we don't need to release Fs cap
* for later create/unlink. */
@@ -389,6 +396,7 @@ more:
req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
if (IS_ERR(req))
return PTR_ERR(req);
+
err = ceph_alloc_readdir_reply_buffer(req, inode);
if (err) {
ceph_mdsc_put_request(req);
@@ -402,11 +410,21 @@ more:
req->r_inode_drop = CEPH_CAP_FILE_EXCL;
}
if (dfi->last_name) {
- req->r_path2 = kstrdup(dfi->last_name, GFP_KERNEL);
+ struct qstr d_name = { .name = dfi->last_name,
+ .len = strlen(dfi->last_name) };
+
+ req->r_path2 = kzalloc(NAME_MAX + 1, GFP_KERNEL);
if (!req->r_path2) {
ceph_mdsc_put_request(req);
return -ENOMEM;
}
+
+ err = ceph_encode_encrypted_dname(inode, &d_name,
+ req->r_path2);
+ if (err < 0) {
+ ceph_mdsc_put_request(req);
+ return err;
+ }
} else if (is_hash_order(ctx->pos)) {
req->r_args.readdir.offset_hash =
cpu_to_le32(fpos_hash(ctx->pos));
@@ -511,15 +529,20 @@ more:
for (; i < rinfo->dir_nr; i++) {
struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
- BUG_ON(rde->offset < ctx->pos);
+ if (rde->offset < ctx->pos) {
+ pr_warn("%s: rde->offset 0x%llx ctx->pos 0x%llx\n",
+ __func__, rde->offset, ctx->pos);
+ return -EIO;
+ }
+
+ if (WARN_ON_ONCE(!rde->inode.in))
+ return -EIO;
ctx->pos = rde->offset;
dout("readdir (%d/%d) -> %llx '%.*s' %p\n",
i, rinfo->dir_nr, ctx->pos,
rde->name_len, rde->name, &rde->inode.in);
- BUG_ON(!rde->inode.in);
-
if (!dir_emit(ctx, rde->name, rde->name_len,
ceph_present_ino(inode->i_sb, le64_to_cpu(rde->inode.in->ino)),
le32_to_cpu(rde->inode.in->mode) >> 12)) {
@@ -532,6 +555,8 @@ more:
dout("filldir stopping us...\n");
return 0;
}
+
+ /* Reset the lengths to their original allocated vals */
ctx->pos++;
}
@@ -586,7 +611,6 @@ more:
dfi->dir_ordered_count);
spin_unlock(&ci->i_ceph_lock);
}
-
dout("readdir %p file %p done.\n", inode, file);
return 0;
}
@@ -760,6 +784,18 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
if (dentry->d_name.len > NAME_MAX)
return ERR_PTR(-ENAMETOOLONG);
+ if (IS_ENCRYPTED(dir)) {
+ bool had_key = fscrypt_has_encryption_key(dir);
+
+ err = fscrypt_prepare_lookup_partial(dir, dentry);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ /* mark directory as incomplete if it has been unlocked */
+ if (!had_key && fscrypt_has_encryption_key(dir))
+ ceph_dir_clear_complete(dir);
+ }
+
/* can we conclude ENOENT locally? */
if (d_really_is_negative(dentry)) {
struct ceph_inode_info *ci = ceph_inode(dir);
@@ -865,13 +901,6 @@ static int ceph_mknod(struct mnt_idmap *idmap, struct inode *dir,
goto out;
}
- err = ceph_pre_init_acls(dir, &mode, &as_ctx);
- if (err < 0)
- goto out;
- err = ceph_security_init_secctx(dentry, mode, &as_ctx);
- if (err < 0)
- goto out;
-
dout("mknod in dir %p dentry %p mode 0%ho rdev %d\n",
dir, dentry, mode, rdev);
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS);
@@ -879,6 +908,17 @@ static int ceph_mknod(struct mnt_idmap *idmap, struct inode *dir,
err = PTR_ERR(req);
goto out;
}
+
+ req->r_new_inode = ceph_new_inode(dir, dentry, &mode, &as_ctx);
+ if (IS_ERR(req->r_new_inode)) {
+ err = PTR_ERR(req->r_new_inode);
+ req->r_new_inode = NULL;
+ goto out_req;
+ }
+
+ if (S_ISREG(mode) && IS_ENCRYPTED(dir))
+ set_bit(CEPH_MDS_R_FSCRYPT_FILE, &req->r_req_flags);
+
req->r_dentry = dget(dentry);
req->r_num_caps = 2;
req->r_parent = dir;
@@ -889,13 +929,13 @@ static int ceph_mknod(struct mnt_idmap *idmap, struct inode *dir,
req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL |
CEPH_CAP_XATTR_EXCL;
req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
- if (as_ctx.pagelist) {
- req->r_pagelist = as_ctx.pagelist;
- as_ctx.pagelist = NULL;
- }
+
+ ceph_as_ctx_to_req(req, &as_ctx);
+
err = ceph_mdsc_do_request(mdsc, dir, req);
if (!err && !req->r_reply_info.head->is_dentry)
err = ceph_handle_notrace_create(dir, dentry);
+out_req:
ceph_mdsc_put_request(req);
out:
if (!err)
@@ -912,12 +952,50 @@ static int ceph_create(struct mnt_idmap *idmap, struct inode *dir,
return ceph_mknod(idmap, dir, dentry, mode, 0);
}
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+static int prep_encrypted_symlink_target(struct ceph_mds_request *req,
+ const char *dest)
+{
+ int err;
+ int len = strlen(dest);
+ struct fscrypt_str osd_link = FSTR_INIT(NULL, 0);
+
+ err = fscrypt_prepare_symlink(req->r_parent, dest, len, PATH_MAX,
+ &osd_link);
+ if (err)
+ goto out;
+
+ err = fscrypt_encrypt_symlink(req->r_new_inode, dest, len, &osd_link);
+ if (err)
+ goto out;
+
+ req->r_path2 = kmalloc(CEPH_BASE64_CHARS(osd_link.len) + 1, GFP_KERNEL);
+ if (!req->r_path2) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ len = ceph_base64_encode(osd_link.name, osd_link.len, req->r_path2);
+ req->r_path2[len] = '\0';
+out:
+ fscrypt_fname_free_buffer(&osd_link);
+ return err;
+}
+#else
+static int prep_encrypted_symlink_target(struct ceph_mds_request *req,
+ const char *dest)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
static int ceph_symlink(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, const char *dest)
{
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
struct ceph_mds_request *req;
struct ceph_acl_sec_ctx as_ctx = {};
+ umode_t mode = S_IFLNK | 0777;
int err;
if (ceph_snap(dir) != CEPH_NOSNAP)
@@ -932,38 +1010,48 @@ static int ceph_symlink(struct mnt_idmap *idmap, struct inode *dir,
goto out;
}
- err = ceph_security_init_secctx(dentry, S_IFLNK | 0777, &as_ctx);
- if (err < 0)
- goto out;
-
dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest);
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto out;
}
- req->r_path2 = kstrdup(dest, GFP_KERNEL);
- if (!req->r_path2) {
- err = -ENOMEM;
- ceph_mdsc_put_request(req);
- goto out;
+
+ req->r_new_inode = ceph_new_inode(dir, dentry, &mode, &as_ctx);
+ if (IS_ERR(req->r_new_inode)) {
+ err = PTR_ERR(req->r_new_inode);
+ req->r_new_inode = NULL;
+ goto out_req;
}
+
req->r_parent = dir;
ihold(dir);
+ if (IS_ENCRYPTED(req->r_new_inode)) {
+ err = prep_encrypted_symlink_target(req, dest);
+ if (err)
+ goto out_req;
+ } else {
+ req->r_path2 = kstrdup(dest, GFP_KERNEL);
+ if (!req->r_path2) {
+ err = -ENOMEM;
+ goto out_req;
+ }
+ }
+
set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
req->r_dentry = dget(dentry);
req->r_num_caps = 2;
req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL |
CEPH_CAP_XATTR_EXCL;
req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
- if (as_ctx.pagelist) {
- req->r_pagelist = as_ctx.pagelist;
- as_ctx.pagelist = NULL;
- }
+
+ ceph_as_ctx_to_req(req, &as_ctx);
+
err = ceph_mdsc_do_request(mdsc, dir, req);
if (!err && !req->r_reply_info.head->is_dentry)
err = ceph_handle_notrace_create(dir, dentry);
+out_req:
ceph_mdsc_put_request(req);
out:
if (err)
@@ -1003,14 +1091,12 @@ static int ceph_mkdir(struct mnt_idmap *idmap, struct inode *dir,
err = -EDQUOT;
goto out;
}
-
- mode |= S_IFDIR;
- err = ceph_pre_init_acls(dir, &mode, &as_ctx);
- if (err < 0)
- goto out;
- err = ceph_security_init_secctx(dentry, mode, &as_ctx);
- if (err < 0)
+ if ((op == CEPH_MDS_OP_MKSNAP) && IS_ENCRYPTED(dir) &&
+ !fscrypt_has_encryption_key(dir)) {
+ err = -ENOKEY;
goto out;
+ }
+
req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
if (IS_ERR(req)) {
@@ -1018,6 +1104,14 @@ static int ceph_mkdir(struct mnt_idmap *idmap, struct inode *dir,
goto out;
}
+ mode |= S_IFDIR;
+ req->r_new_inode = ceph_new_inode(dir, dentry, &mode, &as_ctx);
+ if (IS_ERR(req->r_new_inode)) {
+ err = PTR_ERR(req->r_new_inode);
+ req->r_new_inode = NULL;
+ goto out_req;
+ }
+
req->r_dentry = dget(dentry);
req->r_num_caps = 2;
req->r_parent = dir;
@@ -1027,15 +1121,15 @@ static int ceph_mkdir(struct mnt_idmap *idmap, struct inode *dir,
req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL |
CEPH_CAP_XATTR_EXCL;
req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
- if (as_ctx.pagelist) {
- req->r_pagelist = as_ctx.pagelist;
- as_ctx.pagelist = NULL;
- }
+
+ ceph_as_ctx_to_req(req, &as_ctx);
+
err = ceph_mdsc_do_request(mdsc, dir, req);
if (!err &&
!req->r_reply_info.head->is_target &&
!req->r_reply_info.head->is_dentry)
err = ceph_handle_notrace_create(dir, dentry);
+out_req:
ceph_mdsc_put_request(req);
out:
if (!err)
@@ -1063,6 +1157,10 @@ static int ceph_link(struct dentry *old_dentry, struct inode *dir,
if (ceph_snap(dir) != CEPH_NOSNAP)
return -EROFS;
+ err = fscrypt_prepare_link(old_dentry, dir, dentry);
+ if (err)
+ return err;
+
dout("link in dir %p %llx.%llx old_dentry %p:'%pd' dentry %p:'%pd'\n",
dir, ceph_vinop(dir), old_dentry, old_dentry, dentry, dentry);
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS);
@@ -1310,6 +1408,11 @@ static int ceph_rename(struct mnt_idmap *idmap, struct inode *old_dir,
if (err)
return err;
+ err = fscrypt_prepare_rename(old_dir, old_dentry, new_dir, new_dentry,
+ flags);
+ if (err)
+ return err;
+
dout("rename dir %p dentry %p to dir %p dentry %p\n",
old_dir, old_dentry, new_dir, new_dentry);
req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
@@ -1765,6 +1868,10 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
struct inode *dir, *inode;
struct ceph_mds_client *mdsc;
+ valid = fscrypt_d_revalidate(dentry, flags);
+ if (valid <= 0)
+ return valid;
+
if (flags & LOOKUP_RCU) {
parent = READ_ONCE(dentry->d_parent);
dir = d_inode_rcu(parent);
@@ -1777,8 +1884,9 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
inode = d_inode(dentry);
}
- dout("d_revalidate %p '%pd' inode %p offset 0x%llx\n", dentry,
- dentry, inode, ceph_dentry(dentry)->offset);
+ dout("d_revalidate %p '%pd' inode %p offset 0x%llx nokey %d\n", dentry,
+ dentry, inode, ceph_dentry(dentry)->offset,
+ !!(dentry->d_flags & DCACHE_NOKEY_NAME));
mdsc = ceph_sb_to_client(dir->i_sb)->mdsc;
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index f780e4e0d062..8559990a59a5 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -7,6 +7,7 @@
#include "super.h"
#include "mds_client.h"
+#include "crypto.h"
/*
* Basic fh
@@ -535,7 +536,9 @@ static int ceph_get_name(struct dentry *parent, char *name,
{
struct ceph_mds_client *mdsc;
struct ceph_mds_request *req;
+ struct inode *dir = d_inode(parent);
struct inode *inode = d_inode(child);
+ struct ceph_mds_reply_info_parsed *rinfo;
int err;
if (ceph_snap(inode) != CEPH_NOSNAP)
@@ -547,30 +550,47 @@ static int ceph_get_name(struct dentry *parent, char *name,
if (IS_ERR(req))
return PTR_ERR(req);
- inode_lock(d_inode(parent));
-
+ inode_lock(dir);
req->r_inode = inode;
ihold(inode);
req->r_ino2 = ceph_vino(d_inode(parent));
- req->r_parent = d_inode(parent);
- ihold(req->r_parent);
+ req->r_parent = dir;
+ ihold(dir);
set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
req->r_num_caps = 2;
err = ceph_mdsc_do_request(mdsc, NULL, req);
+ inode_unlock(dir);
- inode_unlock(d_inode(parent));
+ if (err)
+ goto out;
- if (!err) {
- struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
+ rinfo = &req->r_reply_info;
+ if (!IS_ENCRYPTED(dir)) {
memcpy(name, rinfo->dname, rinfo->dname_len);
name[rinfo->dname_len] = 0;
- dout("get_name %p ino %llx.%llx name %s\n",
- child, ceph_vinop(inode), name);
} else {
- dout("get_name %p ino %llx.%llx err %d\n",
- child, ceph_vinop(inode), err);
- }
+ struct fscrypt_str oname = FSTR_INIT(NULL, 0);
+ struct ceph_fname fname = { .dir = dir,
+ .name = rinfo->dname,
+ .ctext = rinfo->altname,
+ .name_len = rinfo->dname_len,
+ .ctext_len = rinfo->altname_len };
+
+ err = ceph_fname_alloc_buffer(dir, &oname);
+ if (err < 0)
+ goto out;
+ err = ceph_fname_to_usr(&fname, NULL, &oname, NULL);
+ if (!err) {
+ memcpy(name, oname.name, oname.len);
+ name[oname.len] = 0;
+ }
+ ceph_fname_free_buffer(dir, &oname);
+ }
+out:
+ dout("get_name %p ino %llx.%llx err %d %s%s\n",
+ child, ceph_vinop(inode), err,
+ err ? "" : "name ", err ? "" : name);
ceph_mdsc_put_request(req);
return err;
}
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 63efe5389783..b1da02f5dbe3 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -366,8 +366,13 @@ int ceph_open(struct inode *inode, struct file *file)
/* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */
flags = file->f_flags & ~(O_CREAT|O_EXCL);
- if (S_ISDIR(inode->i_mode))
+ if (S_ISDIR(inode->i_mode)) {
flags = O_DIRECTORY; /* mds likes to know */
+ } else if (S_ISREG(inode->i_mode)) {
+ err = fscrypt_file_open(inode, file);
+ if (err)
+ return err;
+ }
dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
ceph_vinop(inode), file, flags, file->f_flags);
@@ -604,7 +609,8 @@ out:
ceph_mdsc_release_dir_caps(req);
}
-static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
+static int ceph_finish_async_create(struct inode *dir, struct inode *inode,
+ struct dentry *dentry,
struct file *file, umode_t mode,
struct ceph_mds_request *req,
struct ceph_acl_sec_ctx *as_ctx,
@@ -616,7 +622,6 @@ static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
struct ceph_mds_reply_info_in iinfo = { .in = &in };
struct ceph_inode_info *ci = ceph_inode(dir);
struct ceph_dentry_info *di = ceph_dentry(dentry);
- struct inode *inode;
struct timespec64 now;
struct ceph_string *pool_ns;
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
@@ -625,10 +630,6 @@ static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
ktime_get_real_ts64(&now);
- inode = ceph_get_inode(dentry->d_sb, vino);
- if (IS_ERR(inode))
- return PTR_ERR(inode);
-
iinfo.inline_version = CEPH_INLINE_NONE;
iinfo.change_attr = 1;
ceph_encode_timespec64(&iinfo.btime, &now);
@@ -686,8 +687,7 @@ static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
ceph_dir_clear_complete(dir);
if (!d_unhashed(dentry))
d_drop(dentry);
- if (inode->i_state & I_NEW)
- discard_new_inode(inode);
+ discard_new_inode(inode);
} else {
struct dentry *dn;
@@ -733,6 +733,7 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_mds_request *req;
+ struct inode *new_inode = NULL;
struct dentry *dn;
struct ceph_acl_sec_ctx as_ctx = {};
bool try_async = ceph_test_mount_opt(fsc, ASYNC_DIROPS);
@@ -755,15 +756,16 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
*/
flags &= ~O_TRUNC;
+retry:
if (flags & O_CREAT) {
if (ceph_quota_is_max_files_exceeded(dir))
return -EDQUOT;
- err = ceph_pre_init_acls(dir, &mode, &as_ctx);
- if (err < 0)
- return err;
- err = ceph_security_init_secctx(dentry, mode, &as_ctx);
- if (err < 0)
+
+ new_inode = ceph_new_inode(dir, dentry, &mode, &as_ctx);
+ if (IS_ERR(new_inode)) {
+ err = PTR_ERR(new_inode);
goto out_ctx;
+ }
/* Async create can't handle more than a page of xattrs */
if (as_ctx.pagelist &&
!list_is_singular(&as_ctx.pagelist->head))
@@ -772,7 +774,7 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
/* If it's not being looked up, it's negative */
return -ENOENT;
}
-retry:
+
/* do the open */
req = prepare_open_request(dir->i_sb, flags, mode);
if (IS_ERR(req)) {
@@ -787,6 +789,12 @@ retry:
req->r_args.open.mask = cpu_to_le32(mask);
req->r_parent = dir;
ihold(dir);
+ if (IS_ENCRYPTED(dir)) {
+ set_bit(CEPH_MDS_R_FSCRYPT_FILE, &req->r_req_flags);
+ err = fscrypt_prepare_lookup_partial(dir, dentry);
+ if (err < 0)
+ goto out_req;
+ }
if (flags & O_CREAT) {
struct ceph_file_layout lo;
@@ -794,32 +802,47 @@ retry:
req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL |
CEPH_CAP_XATTR_EXCL;
req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
- if (as_ctx.pagelist) {
- req->r_pagelist = as_ctx.pagelist;
- as_ctx.pagelist = NULL;
- }
- if (try_async &&
- (req->r_dir_caps =
- try_prep_async_create(dir, dentry, &lo,
- &req->r_deleg_ino))) {
+
+ ceph_as_ctx_to_req(req, &as_ctx);
+
+ if (try_async && (req->r_dir_caps =
+ try_prep_async_create(dir, dentry, &lo,
+ &req->r_deleg_ino))) {
+ struct ceph_vino vino = { .ino = req->r_deleg_ino,
+ .snap = CEPH_NOSNAP };
struct ceph_dentry_info *di = ceph_dentry(dentry);
set_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags);
req->r_args.open.flags |= cpu_to_le32(CEPH_O_EXCL);
req->r_callback = ceph_async_create_cb;
+ /* Hash inode before RPC */
+ new_inode = ceph_get_inode(dir->i_sb, vino, new_inode);
+ if (IS_ERR(new_inode)) {
+ err = PTR_ERR(new_inode);
+ new_inode = NULL;
+ goto out_req;
+ }
+ WARN_ON_ONCE(!(new_inode->i_state & I_NEW));
+
spin_lock(&dentry->d_lock);
di->flags |= CEPH_DENTRY_ASYNC_CREATE;
spin_unlock(&dentry->d_lock);
err = ceph_mdsc_submit_request(mdsc, dir, req);
if (!err) {
- err = ceph_finish_async_create(dir, dentry,
- file, mode, req,
- &as_ctx, &lo);
+ err = ceph_finish_async_create(dir, new_inode,
+ dentry, file,
+ mode, req,
+ &as_ctx, &lo);
+ new_inode = NULL;
} else if (err == -EJUKEBOX) {
restore_deleg_ino(dir, req->r_deleg_ino);
ceph_mdsc_put_request(req);
+ discard_new_inode(new_inode);
+ ceph_release_acl_sec_ctx(&as_ctx);
+ memset(&as_ctx, 0, sizeof(as_ctx));
+ new_inode = NULL;
try_async = false;
ceph_put_string(rcu_dereference_raw(lo.pool_ns));
goto retry;
@@ -830,6 +853,8 @@ retry:
}
set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
+ req->r_new_inode = new_inode;
+ new_inode = NULL;
err = ceph_mdsc_do_request(mdsc, (flags & O_CREAT) ? dir : NULL, req);
if (err == -ENOENT) {
dentry = ceph_handle_snapdir(req, dentry);
@@ -858,6 +883,13 @@ retry:
dout("atomic_open finish_no_open on dn %p\n", dn);
err = finish_no_open(file, dn);
} else {
+ if (IS_ENCRYPTED(dir) &&
+ !fscrypt_has_permitted_context(dir, d_inode(dentry))) {
+ pr_warn("Inconsistent encryption context (parent %llx:%llx child %llx:%llx)\n",
+ ceph_vinop(dir), ceph_vinop(d_inode(dentry)));
+ goto out_req;
+ }
+
dout("atomic_open finish_open on dn %p\n", dn);
if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
struct inode *newino = d_inode(dentry);
@@ -870,6 +902,7 @@ retry:
}
out_req:
ceph_mdsc_put_request(req);
+ iput(new_inode);
out_ctx:
ceph_release_acl_sec_ctx(&as_ctx);
dout("atomic_open result=%d\n", err);
@@ -924,21 +957,24 @@ enum {
* If we get a short result from the OSD, check against i_size; we need to
* only return a short read to the caller if we hit EOF.
*/
-static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
- int *retry_op)
+ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
+ struct iov_iter *to, int *retry_op,
+ u64 *last_objver)
{
- struct file *file = iocb->ki_filp;
- struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_osd_client *osdc = &fsc->client->osdc;
ssize_t ret;
- u64 off = iocb->ki_pos;
+ u64 off = *ki_pos;
u64 len = iov_iter_count(to);
u64 i_size = i_size_read(inode);
+ bool sparse = IS_ENCRYPTED(inode) || ceph_test_mount_opt(fsc, SPARSEREAD);
+ u64 objver = 0;
- dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len,
- (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
+ dout("sync_read on inode %p %llx~%llx\n", inode, *ki_pos, len);
+
+ if (ceph_inode_is_shutdown(inode))
+ return -EIO;
if (!len)
return 0;
@@ -962,10 +998,21 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
bool more;
int idx;
size_t left;
+ struct ceph_osd_req_op *op;
+ u64 read_off = off;
+ u64 read_len = len;
+
+ /* determine new offset/length if encrypted */
+ ceph_fscrypt_adjust_off_and_len(inode, &read_off, &read_len);
+
+ dout("sync_read orig %llu~%llu reading %llu~%llu",
+ off, len, read_off, read_len);
req = ceph_osdc_new_request(osdc, &ci->i_layout,
- ci->i_vino, off, &len, 0, 1,
- CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
+ ci->i_vino, read_off, &read_len, 0, 1,
+ sparse ? CEPH_OSD_OP_SPARSE_READ :
+ CEPH_OSD_OP_READ,
+ CEPH_OSD_FLAG_READ,
NULL, ci->i_truncate_seq,
ci->i_truncate_size, false);
if (IS_ERR(req)) {
@@ -973,10 +1020,13 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
break;
}
+ /* adjust len downward if the request truncated the len */
+ if (off + len > read_off + read_len)
+ len = read_off + read_len - off;
more = len < iov_iter_count(to);
- num_pages = calc_pages_for(off, len);
- page_off = off & ~PAGE_MASK;
+ num_pages = calc_pages_for(read_off, read_len);
+ page_off = offset_in_page(off);
pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
if (IS_ERR(pages)) {
ceph_osdc_put_request(req);
@@ -984,29 +1034,75 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
break;
}
- osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_off,
+ osd_req_op_extent_osd_data_pages(req, 0, pages, read_len,
+ offset_in_page(read_off),
false, false);
+
+ op = &req->r_ops[0];
+ if (sparse) {
+ ret = ceph_alloc_sparse_ext_map(op);
+ if (ret) {
+ ceph_osdc_put_request(req);
+ break;
+ }
+ }
+
ceph_osdc_start_request(osdc, req);
ret = ceph_osdc_wait_request(osdc, req);
ceph_update_read_metrics(&fsc->mdsc->metric,
req->r_start_latency,
req->r_end_latency,
- len, ret);
+ read_len, ret);
- ceph_osdc_put_request(req);
+ if (ret > 0)
+ objver = req->r_version;
i_size = i_size_read(inode);
dout("sync_read %llu~%llu got %zd i_size %llu%s\n",
off, len, ret, i_size, (more ? " MORE" : ""));
- if (ret == -ENOENT)
+ /* Fix it to go to end of extent map */
+ if (sparse && ret >= 0)
+ ret = ceph_sparse_ext_map_end(op);
+ else if (ret == -ENOENT)
ret = 0;
+
+ if (ret > 0 && IS_ENCRYPTED(inode)) {
+ int fret;
+
+ fret = ceph_fscrypt_decrypt_extents(inode, pages,
+ read_off, op->extent.sparse_ext,
+ op->extent.sparse_ext_cnt);
+ if (fret < 0) {
+ ret = fret;
+ ceph_osdc_put_request(req);
+ break;
+ }
+
+ /* account for any partial block at the beginning */
+ fret -= (off - read_off);
+
+ /*
+ * Short read after big offset adjustment?
+ * Nothing is usable, just call it a zero
+ * len read.
+ */
+ fret = max(fret, 0);
+
+ /* account for partial block at the end */
+ ret = min_t(ssize_t, fret, len);
+ }
+
+ ceph_osdc_put_request(req);
+
+ /* Short read but not EOF? Zero out the remainder. */
if (ret >= 0 && ret < len && (off + ret < i_size)) {
int zlen = min(len - ret, i_size - off - ret);
int zoff = page_off + ret;
+
dout("sync_read zero gap %llu~%llu\n",
- off + ret, off + ret + zlen);
+ off + ret, off + ret + zlen);
ceph_zero_page_vector_range(zoff, zlen, pages);
ret += zlen;
}
@@ -1014,15 +1110,16 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
idx = 0;
left = ret > 0 ? ret : 0;
while (left > 0) {
- size_t len, copied;
- page_off = off & ~PAGE_MASK;
- len = min_t(size_t, left, PAGE_SIZE - page_off);
+ size_t plen, copied;
+
+ plen = min_t(size_t, left, PAGE_SIZE - page_off);
SetPageUptodate(pages[idx]);
copied = copy_page_to_iter(pages[idx++],
- page_off, len, to);
+ page_off, plen, to);
off += copied;
left -= copied;
- if (copied < len) {
+ page_off = 0;
+ if (copied < plen) {
ret = -EFAULT;
break;
}
@@ -1039,21 +1136,37 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
break;
}
- if (off > iocb->ki_pos) {
- if (off >= i_size) {
- *retry_op = CHECK_EOF;
- ret = i_size - iocb->ki_pos;
- iocb->ki_pos = i_size;
- } else {
- ret = off - iocb->ki_pos;
- iocb->ki_pos = off;
+ if (ret > 0) {
+ if (off > *ki_pos) {
+ if (off >= i_size) {
+ *retry_op = CHECK_EOF;
+ ret = i_size - *ki_pos;
+ *ki_pos = i_size;
+ } else {
+ ret = off - *ki_pos;
+ *ki_pos = off;
+ }
}
- }
+ if (last_objver)
+ *last_objver = objver;
+ }
dout("sync_read result %zd retry_op %d\n", ret, *retry_op);
return ret;
}
+static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
+ int *retry_op)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file_inode(file);
+
+ dout("sync_read on file %p %llx~%zx %s\n", file, iocb->ki_pos,
+ iov_iter_count(to), (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
+
+ return __ceph_sync_read(inode, &iocb->ki_pos, to, retry_op, NULL);
+}
+
struct ceph_aio_request {
struct kiocb *iocb;
size_t total_len;
@@ -1125,8 +1238,10 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req)
struct inode *inode = req->r_inode;
struct ceph_aio_request *aio_req = req->r_priv;
struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
+ struct ceph_osd_req_op *op = &req->r_ops[0];
struct ceph_client_metric *metric = &ceph_sb_to_mdsc(inode->i_sb)->metric;
unsigned int len = osd_data->bvec_pos.iter.bi_size;
+ bool sparse = (op->op == CEPH_OSD_OP_SPARSE_READ);
BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS);
BUG_ON(!osd_data->num_bvecs);
@@ -1147,6 +1262,8 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req)
}
rc = -ENOMEM;
} else if (!aio_req->write) {
+ if (sparse && rc >= 0)
+ rc = ceph_sparse_ext_map_end(op);
if (rc == -ENOENT)
rc = 0;
if (rc >= 0 && len > rc) {
@@ -1283,6 +1400,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
loff_t pos = iocb->ki_pos;
bool write = iov_iter_rw(iter) == WRITE;
bool should_dirty = !write && user_backed_iter(iter);
+ bool sparse = ceph_test_mount_opt(fsc, SPARSEREAD);
if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
return -EROFS;
@@ -1310,6 +1428,8 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
while (iov_iter_count(iter) > 0) {
u64 size = iov_iter_count(iter);
ssize_t len;
+ struct ceph_osd_req_op *op;
+ int readop = sparse ? CEPH_OSD_OP_SPARSE_READ : CEPH_OSD_OP_READ;
if (write)
size = min_t(u64, size, fsc->mount_options->wsize);
@@ -1320,8 +1440,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
vino, pos, &size, 0,
1,
- write ? CEPH_OSD_OP_WRITE :
- CEPH_OSD_OP_READ,
+ write ? CEPH_OSD_OP_WRITE : readop,
flags, snapc,
ci->i_truncate_seq,
ci->i_truncate_size,
@@ -1372,6 +1491,14 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
}
osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
+ op = &req->r_ops[0];
+ if (sparse) {
+ ret = ceph_alloc_sparse_ext_map(op);
+ if (ret) {
+ ceph_osdc_put_request(req);
+ break;
+ }
+ }
if (aio_req) {
aio_req->total_len += len;
@@ -1399,8 +1526,11 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
size = i_size_read(inode);
if (!write) {
- if (ret == -ENOENT)
+ if (sparse && ret >= 0)
+ ret = ceph_sparse_ext_map_end(op);
+ else if (ret == -ENOENT)
ret = 0;
+
if (ret >= 0 && ret < len && pos + ret < size) {
struct iov_iter i;
int zlen = min_t(size_t, len - ret,
@@ -1481,13 +1611,12 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
- struct ceph_vino vino;
+ struct ceph_osd_client *osdc = &fsc->client->osdc;
struct ceph_osd_request *req;
struct page **pages;
u64 len;
int num_pages;
int written = 0;
- int flags;
int ret;
bool check_caps = false;
struct timespec64 mtime = current_time(inode);
@@ -1505,79 +1634,350 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
return ret;
ceph_fscache_invalidate(inode, false);
- ret = invalidate_inode_pages2_range(inode->i_mapping,
- pos >> PAGE_SHIFT,
- (pos + count - 1) >> PAGE_SHIFT);
- if (ret < 0)
- dout("invalidate_inode_pages2_range returned %d\n", ret);
-
- flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
while ((len = iov_iter_count(from)) > 0) {
size_t left;
int n;
-
- vino = ceph_vino(inode);
- req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
- vino, pos, &len, 0, 1,
- CEPH_OSD_OP_WRITE, flags, snapc,
- ci->i_truncate_seq,
- ci->i_truncate_size,
- false);
- if (IS_ERR(req)) {
- ret = PTR_ERR(req);
- break;
- }
+ u64 write_pos = pos;
+ u64 write_len = len;
+ u64 objnum, objoff;
+ u32 xlen;
+ u64 assert_ver = 0;
+ bool rmw;
+ bool first, last;
+ struct iov_iter saved_iter = *from;
+ size_t off;
+
+ ceph_fscrypt_adjust_off_and_len(inode, &write_pos, &write_len);
+
+ /* clamp the length to the end of first object */
+ ceph_calc_file_object_mapping(&ci->i_layout, write_pos,
+ write_len, &objnum, &objoff,
+ &xlen);
+ write_len = xlen;
+
+ /* adjust len downward if it goes beyond current object */
+ if (pos + len > write_pos + write_len)
+ len = write_pos + write_len - pos;
/*
- * write from beginning of first page,
- * regardless of io alignment
+ * If we had to adjust the length or position to align with a
+ * crypto block, then we must do a read/modify/write cycle. We
+ * use a version assertion to redrive the thing if something
+ * changes in between.
*/
- num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ first = pos != write_pos;
+ last = (pos + len) != (write_pos + write_len);
+ rmw = first || last;
+
+ dout("sync_write ino %llx %lld~%llu adjusted %lld~%llu -- %srmw\n",
+ ci->i_vino.ino, pos, len, write_pos, write_len,
+ rmw ? "" : "no ");
+ /*
+ * The data is emplaced into the page as it would be if it were
+ * in an array of pagecache pages.
+ */
+ num_pages = calc_pages_for(write_pos, write_len);
pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
if (IS_ERR(pages)) {
ret = PTR_ERR(pages);
- goto out;
+ break;
+ }
+
+ /* Do we need to preload the pages? */
+ if (rmw) {
+ u64 first_pos = write_pos;
+ u64 last_pos = (write_pos + write_len) - CEPH_FSCRYPT_BLOCK_SIZE;
+ u64 read_len = CEPH_FSCRYPT_BLOCK_SIZE;
+ struct ceph_osd_req_op *op;
+
+ /* We should only need to do this for encrypted inodes */
+ WARN_ON_ONCE(!IS_ENCRYPTED(inode));
+
+ /* No need to do two reads if first and last blocks are same */
+ if (first && last_pos == first_pos)
+ last = false;
+
+ /*
+ * Allocate a read request for one or two extents,
+ * depending on how the request was aligned.
+ */
+ req = ceph_osdc_new_request(osdc, &ci->i_layout,
+ ci->i_vino, first ? first_pos : last_pos,
+ &read_len, 0, (first && last) ? 2 : 1,
+ CEPH_OSD_OP_SPARSE_READ, CEPH_OSD_FLAG_READ,
+ NULL, ci->i_truncate_seq,
+ ci->i_truncate_size, false);
+ if (IS_ERR(req)) {
+ ceph_release_page_vector(pages, num_pages);
+ ret = PTR_ERR(req);
+ break;
+ }
+
+ /* Something is misaligned! */
+ if (read_len != CEPH_FSCRYPT_BLOCK_SIZE) {
+ ceph_osdc_put_request(req);
+ ceph_release_page_vector(pages, num_pages);
+ ret = -EIO;
+ break;
+ }
+
+ /* Add extent for first block? */
+ op = &req->r_ops[0];
+
+ if (first) {
+ osd_req_op_extent_osd_data_pages(req, 0, pages,
+ CEPH_FSCRYPT_BLOCK_SIZE,
+ offset_in_page(first_pos),
+ false, false);
+ /* We only expect a single extent here */
+ ret = __ceph_alloc_sparse_ext_map(op, 1);
+ if (ret) {
+ ceph_osdc_put_request(req);
+ ceph_release_page_vector(pages, num_pages);
+ break;
+ }
+ }
+
+ /* Add extent for last block */
+ if (last) {
+ /* Init the other extent if first extent has been used */
+ if (first) {
+ op = &req->r_ops[1];
+ osd_req_op_extent_init(req, 1,
+ CEPH_OSD_OP_SPARSE_READ,
+ last_pos, CEPH_FSCRYPT_BLOCK_SIZE,
+ ci->i_truncate_size,
+ ci->i_truncate_seq);
+ }
+
+ ret = __ceph_alloc_sparse_ext_map(op, 1);
+ if (ret) {
+ ceph_osdc_put_request(req);
+ ceph_release_page_vector(pages, num_pages);
+ break;
+ }
+
+ osd_req_op_extent_osd_data_pages(req, first ? 1 : 0,
+ &pages[num_pages - 1],
+ CEPH_FSCRYPT_BLOCK_SIZE,
+ offset_in_page(last_pos),
+ false, false);
+ }
+
+ ceph_osdc_start_request(osdc, req);
+ ret = ceph_osdc_wait_request(osdc, req);
+
+ /* FIXME: length field is wrong if there are 2 extents */
+ ceph_update_read_metrics(&fsc->mdsc->metric,
+ req->r_start_latency,
+ req->r_end_latency,
+ read_len, ret);
+
+ /* Ok if object is not already present */
+ if (ret == -ENOENT) {
+ /*
+ * If there is no object, then we can't assert
+ * on its version. Set it to 0, and we'll use an
+ * exclusive create instead.
+ */
+ ceph_osdc_put_request(req);
+ ret = 0;
+
+ /*
+ * zero out the soon-to-be uncopied parts of the
+ * first and last pages.
+ */
+ if (first)
+ zero_user_segment(pages[0], 0,
+ offset_in_page(first_pos));
+ if (last)
+ zero_user_segment(pages[num_pages - 1],
+ offset_in_page(last_pos),
+ PAGE_SIZE);
+ } else {
+ if (ret < 0) {
+ ceph_osdc_put_request(req);
+ ceph_release_page_vector(pages, num_pages);
+ break;
+ }
+
+ op = &req->r_ops[0];
+ if (op->extent.sparse_ext_cnt == 0) {
+ if (first)
+ zero_user_segment(pages[0], 0,
+ offset_in_page(first_pos));
+ else
+ zero_user_segment(pages[num_pages - 1],
+ offset_in_page(last_pos),
+ PAGE_SIZE);
+ } else if (op->extent.sparse_ext_cnt != 1 ||
+ ceph_sparse_ext_map_end(op) !=
+ CEPH_FSCRYPT_BLOCK_SIZE) {
+ ret = -EIO;
+ ceph_osdc_put_request(req);
+ ceph_release_page_vector(pages, num_pages);
+ break;
+ }
+
+ if (first && last) {
+ op = &req->r_ops[1];
+ if (op->extent.sparse_ext_cnt == 0) {
+ zero_user_segment(pages[num_pages - 1],
+ offset_in_page(last_pos),
+ PAGE_SIZE);
+ } else if (op->extent.sparse_ext_cnt != 1 ||
+ ceph_sparse_ext_map_end(op) !=
+ CEPH_FSCRYPT_BLOCK_SIZE) {
+ ret = -EIO;
+ ceph_osdc_put_request(req);
+ ceph_release_page_vector(pages, num_pages);
+ break;
+ }
+ }
+
+ /* Grab assert version. It must be non-zero. */
+ assert_ver = req->r_version;
+ WARN_ON_ONCE(ret > 0 && assert_ver == 0);
+
+ ceph_osdc_put_request(req);
+ if (first) {
+ ret = ceph_fscrypt_decrypt_block_inplace(inode,
+ pages[0], CEPH_FSCRYPT_BLOCK_SIZE,
+ offset_in_page(first_pos),
+ first_pos >> CEPH_FSCRYPT_BLOCK_SHIFT);
+ if (ret < 0) {
+ ceph_release_page_vector(pages, num_pages);
+ break;
+ }
+ }
+ if (last) {
+ ret = ceph_fscrypt_decrypt_block_inplace(inode,
+ pages[num_pages - 1],
+ CEPH_FSCRYPT_BLOCK_SIZE,
+ offset_in_page(last_pos),
+ last_pos >> CEPH_FSCRYPT_BLOCK_SHIFT);
+ if (ret < 0) {
+ ceph_release_page_vector(pages, num_pages);
+ break;
+ }
+ }
+ }
}
left = len;
+ off = offset_in_page(pos);
for (n = 0; n < num_pages; n++) {
- size_t plen = min_t(size_t, left, PAGE_SIZE);
- ret = copy_page_from_iter(pages[n], 0, plen, from);
+ size_t plen = min_t(size_t, left, PAGE_SIZE - off);
+
+ /* copy the data */
+ ret = copy_page_from_iter(pages[n], off, plen, from);
if (ret != plen) {
ret = -EFAULT;
break;
}
+ off = 0;
left -= ret;
}
-
if (ret < 0) {
+ dout("sync_write write failed with %d\n", ret);
ceph_release_page_vector(pages, num_pages);
- goto out;
+ break;
}
- req->r_inode = inode;
+ if (IS_ENCRYPTED(inode)) {
+ ret = ceph_fscrypt_encrypt_pages(inode, pages,
+ write_pos, write_len,
+ GFP_KERNEL);
+ if (ret < 0) {
+ dout("encryption failed with %d\n", ret);
+ ceph_release_page_vector(pages, num_pages);
+ break;
+ }
+ }
- osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
- false, true);
+ req = ceph_osdc_new_request(osdc, &ci->i_layout,
+ ci->i_vino, write_pos, &write_len,
+ rmw ? 1 : 0, rmw ? 2 : 1,
+ CEPH_OSD_OP_WRITE,
+ CEPH_OSD_FLAG_WRITE,
+ snapc, ci->i_truncate_seq,
+ ci->i_truncate_size, false);
+ if (IS_ERR(req)) {
+ ret = PTR_ERR(req);
+ ceph_release_page_vector(pages, num_pages);
+ break;
+ }
+ dout("sync_write write op %lld~%llu\n", write_pos, write_len);
+ osd_req_op_extent_osd_data_pages(req, rmw ? 1 : 0, pages, write_len,
+ offset_in_page(write_pos), false,
+ true);
+ req->r_inode = inode;
req->r_mtime = mtime;
- ceph_osdc_start_request(&fsc->client->osdc, req);
- ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
+
+ /* Set up the assertion */
+ if (rmw) {
+ /*
+ * Set up the assertion. If we don't have a version
+ * number, then the object doesn't exist yet. Use an
+ * exclusive create instead of a version assertion in
+ * that case.
+ */
+ if (assert_ver) {
+ osd_req_op_init(req, 0, CEPH_OSD_OP_ASSERT_VER, 0);
+ req->r_ops[0].assert_ver.ver = assert_ver;
+ } else {
+ osd_req_op_init(req, 0, CEPH_OSD_OP_CREATE,
+ CEPH_OSD_OP_FLAG_EXCL);
+ }
+ }
+
+ ceph_osdc_start_request(osdc, req);
+ ret = ceph_osdc_wait_request(osdc, req);
ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
req->r_end_latency, len, ret);
-out:
ceph_osdc_put_request(req);
if (ret != 0) {
+ dout("sync_write osd write returned %d\n", ret);
+ /* Version changed! Must re-do the rmw cycle */
+ if ((assert_ver && (ret == -ERANGE || ret == -EOVERFLOW)) ||
+ (!assert_ver && ret == -EEXIST)) {
+ /* We should only ever see this on a rmw */
+ WARN_ON_ONCE(!rmw);
+
+ /* The version should never go backward */
+ WARN_ON_ONCE(ret == -EOVERFLOW);
+
+ *from = saved_iter;
+
+ /* FIXME: limit number of times we loop? */
+ continue;
+ }
ceph_set_error_write(ci);
break;
}
ceph_clear_error_write(ci);
+
+ /*
+ * We successfully wrote to a range of the file. Declare
+ * that region of the pagecache invalid.
+ */
+ ret = invalidate_inode_pages2_range(
+ inode->i_mapping,
+ pos >> PAGE_SHIFT,
+ (pos + len - 1) >> PAGE_SHIFT);
+ if (ret < 0) {
+ dout("invalidate_inode_pages2_range returned %d\n",
+ ret);
+ ret = 0;
+ }
pos += len;
written += len;
+ dout("sync_write written %d\n", written);
if (pos > i_size_read(inode)) {
check_caps = ceph_inode_set_size(inode, pos);
if (check_caps)
@@ -1591,6 +1991,7 @@ out:
ret = written;
iocb->ki_pos = pos;
}
+ dout("sync_write returning %d\n", ret);
return ret;
}
@@ -1648,7 +2049,9 @@ again:
ceph_cap_string(got));
if (!ceph_has_inline_data(ci)) {
- if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
+ if (!retry_op &&
+ (iocb->ki_flags & IOCB_DIRECT) &&
+ !IS_ENCRYPTED(inode)) {
ret = ceph_direct_read_write(iocb, to,
NULL, NULL);
if (ret >= 0 && ret < len)
@@ -1934,7 +2337,7 @@ retry_snap:
/* we might need to revert back to that point */
data = *from;
- if (iocb->ki_flags & IOCB_DIRECT)
+ if ((iocb->ki_flags & IOCB_DIRECT) && !IS_ENCRYPTED(inode))
written = ceph_direct_read_write(iocb, &data, snapc,
&prealloc_cf);
else
@@ -2165,6 +2568,9 @@ static long ceph_fallocate(struct file *file, int mode,
if (!S_ISREG(inode->i_mode))
return -EOPNOTSUPP;
+ if (IS_ENCRYPTED(inode))
+ return -EOPNOTSUPP;
+
prealloc_cf = ceph_alloc_cap_flush();
if (!prealloc_cf)
return -ENOMEM;
@@ -2486,6 +2892,10 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
return -EOPNOTSUPP;
}
+ /* Every encrypted inode gets its own key, so we can't offload them */
+ if (IS_ENCRYPTED(src_inode) || IS_ENCRYPTED(dst_inode))
+ return -EOPNOTSUPP;
+
if (len < src_ci->i_layout.object_size)
return -EOPNOTSUPP; /* no remote copy will be done */
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index fd05d68e2990..800ab7920513 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -14,10 +14,12 @@
#include <linux/random.h>
#include <linux/sort.h>
#include <linux/iversion.h>
+#include <linux/fscrypt.h>
#include "super.h"
#include "mds_client.h"
#include "cache.h"
+#include "crypto.h"
#include <linux/ceph/decode.h>
/*
@@ -33,6 +35,7 @@
*/
static const struct inode_operations ceph_symlink_iops;
+static const struct inode_operations ceph_encrypted_symlink_iops;
static void ceph_inode_work(struct work_struct *work);
@@ -52,17 +55,99 @@ static int ceph_set_ino_cb(struct inode *inode, void *data)
return 0;
}
-struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
+/**
+ * ceph_new_inode - allocate a new inode in advance of an expected create
+ * @dir: parent directory for new inode
+ * @dentry: dentry that may eventually point to new inode
+ * @mode: mode of new inode
+ * @as_ctx: pointer to inherited security context
+ *
+ * Allocate a new inode in advance of an operation to create a new inode.
+ * This allocates the inode and sets up the acl_sec_ctx with appropriate
+ * info for the new inode.
+ *
+ * Returns a pointer to the new inode or an ERR_PTR.
+ */
+struct inode *ceph_new_inode(struct inode *dir, struct dentry *dentry,
+ umode_t *mode, struct ceph_acl_sec_ctx *as_ctx)
+{
+ int err;
+ struct inode *inode;
+
+ inode = new_inode(dir->i_sb);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+
+ if (!S_ISLNK(*mode)) {
+ err = ceph_pre_init_acls(dir, mode, as_ctx);
+ if (err < 0)
+ goto out_err;
+ }
+
+ inode->i_state = 0;
+ inode->i_mode = *mode;
+
+ err = ceph_security_init_secctx(dentry, *mode, as_ctx);
+ if (err < 0)
+ goto out_err;
+
+ /*
+ * We'll skip setting fscrypt context for snapshots, leaving that for
+ * the handle_reply().
+ */
+ if (ceph_snap(dir) != CEPH_SNAPDIR) {
+ err = ceph_fscrypt_prepare_context(dir, inode, as_ctx);
+ if (err)
+ goto out_err;
+ }
+
+ return inode;
+out_err:
+ iput(inode);
+ return ERR_PTR(err);
+}
+
+void ceph_as_ctx_to_req(struct ceph_mds_request *req,
+ struct ceph_acl_sec_ctx *as_ctx)
+{
+ if (as_ctx->pagelist) {
+ req->r_pagelist = as_ctx->pagelist;
+ as_ctx->pagelist = NULL;
+ }
+ ceph_fscrypt_as_ctx_to_req(req, as_ctx);
+}
+
+/**
+ * ceph_get_inode - find or create/hash a new inode
+ * @sb: superblock to search and allocate in
+ * @vino: vino to search for
+ * @newino: optional new inode to insert if one isn't found (may be NULL)
+ *
+ * Search for or insert a new inode into the hash for the given vino, and
+ * return a reference to it. If new is non-NULL, its reference is consumed.
+ */
+struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino,
+ struct inode *newino)
{
struct inode *inode;
if (ceph_vino_is_reserved(vino))
return ERR_PTR(-EREMOTEIO);
- inode = iget5_locked(sb, (unsigned long)vino.ino, ceph_ino_compare,
- ceph_set_ino_cb, &vino);
- if (!inode)
+ if (newino) {
+ inode = inode_insert5(newino, (unsigned long)vino.ino,
+ ceph_ino_compare, ceph_set_ino_cb, &vino);
+ if (inode != newino)
+ iput(newino);
+ } else {
+ inode = iget5_locked(sb, (unsigned long)vino.ino,
+ ceph_ino_compare, ceph_set_ino_cb, &vino);
+ }
+
+ if (!inode) {
+ dout("No inode found for %llx.%llx\n", vino.ino, vino.snap);
return ERR_PTR(-ENOMEM);
+ }
dout("get_inode on %llu=%llx.%llx got %p new %d\n", ceph_present_inode(inode),
ceph_vinop(inode), inode, !!(inode->i_state & I_NEW));
@@ -78,8 +163,9 @@ struct inode *ceph_get_snapdir(struct inode *parent)
.ino = ceph_ino(parent),
.snap = CEPH_SNAPDIR,
};
- struct inode *inode = ceph_get_inode(parent->i_sb, vino);
+ struct inode *inode = ceph_get_inode(parent->i_sb, vino, NULL);
struct ceph_inode_info *ci = ceph_inode(inode);
+ int ret = -ENOTDIR;
if (IS_ERR(inode))
return inode;
@@ -105,6 +191,24 @@ struct inode *ceph_get_snapdir(struct inode *parent)
ci->i_rbytes = 0;
ci->i_btime = ceph_inode(parent)->i_btime;
+#ifdef CONFIG_FS_ENCRYPTION
+ /* if encrypted, just borrow fscrypt_auth from parent */
+ if (IS_ENCRYPTED(parent)) {
+ struct ceph_inode_info *pci = ceph_inode(parent);
+
+ ci->fscrypt_auth = kmemdup(pci->fscrypt_auth,
+ pci->fscrypt_auth_len,
+ GFP_KERNEL);
+ if (ci->fscrypt_auth) {
+ inode->i_flags |= S_ENCRYPTED;
+ ci->fscrypt_auth_len = pci->fscrypt_auth_len;
+ } else {
+ dout("Failed to alloc snapdir fscrypt_auth\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ }
+#endif
if (inode->i_state & I_NEW) {
inode->i_op = &ceph_snapdir_iops;
inode->i_fop = &ceph_snapdir_fops;
@@ -118,7 +222,7 @@ err:
discard_new_inode(inode);
else
iput(inode);
- return ERR_PTR(-ENOTDIR);
+ return ERR_PTR(ret);
}
const struct inode_operations ceph_file_iops = {
@@ -517,6 +621,7 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
ci->i_truncate_seq = 0;
ci->i_truncate_size = 0;
ci->i_truncate_pending = 0;
+ ci->i_truncate_pagecache_size = 0;
ci->i_max_size = 0;
ci->i_reported_size = 0;
@@ -547,6 +652,10 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
INIT_WORK(&ci->i_work, ceph_inode_work);
ci->i_work_mask = 0;
memset(&ci->i_btime, '\0', sizeof(ci->i_btime));
+#ifdef CONFIG_FS_ENCRYPTION
+ ci->fscrypt_auth = NULL;
+ ci->fscrypt_auth_len = 0;
+#endif
return &ci->netfs.inode;
}
@@ -555,6 +664,10 @@ void ceph_free_inode(struct inode *inode)
struct ceph_inode_info *ci = ceph_inode(inode);
kfree(ci->i_symlink);
+#ifdef CONFIG_FS_ENCRYPTION
+ kfree(ci->fscrypt_auth);
+#endif
+ fscrypt_free_inode(inode);
kmem_cache_free(ceph_inode_cachep, ci);
}
@@ -575,6 +688,7 @@ void ceph_evict_inode(struct inode *inode)
clear_inode(inode);
ceph_fscache_unregister_inode_cookie(ci);
+ fscrypt_put_encryption_info(inode);
__ceph_remove_caps(ci);
@@ -650,7 +764,7 @@ int ceph_fill_file_size(struct inode *inode, int issued,
ceph_fscache_update(inode);
ci->i_reported_size = size;
if (truncate_seq != ci->i_truncate_seq) {
- dout("truncate_seq %u -> %u\n",
+ dout("%s truncate_seq %u -> %u\n", __func__,
ci->i_truncate_seq, truncate_seq);
ci->i_truncate_seq = truncate_seq;
@@ -674,11 +788,26 @@ int ceph_fill_file_size(struct inode *inode, int issued,
}
}
}
- if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
- ci->i_truncate_size != truncate_size) {
- dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
- truncate_size);
+
+ /*
+ * It's possible that the new sizes of the two consecutive
+ * size truncations will be in the same fscrypt last block,
+ * and we need to truncate the corresponding page caches
+ * anyway.
+ */
+ if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0) {
+ dout("%s truncate_size %lld -> %llu, encrypted %d\n", __func__,
+ ci->i_truncate_size, truncate_size, !!IS_ENCRYPTED(inode));
+
ci->i_truncate_size = truncate_size;
+
+ if (IS_ENCRYPTED(inode)) {
+ dout("%s truncate_pagecache_size %lld -> %llu\n",
+ __func__, ci->i_truncate_pagecache_size, size);
+ ci->i_truncate_pagecache_size = size;
+ } else {
+ ci->i_truncate_pagecache_size = truncate_size;
+ }
}
return queue_trunc;
}
@@ -752,6 +881,34 @@ void ceph_fill_file_time(struct inode *inode, int issued,
inode, time_warp_seq, ci->i_time_warp_seq);
}
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+static int decode_encrypted_symlink(const char *encsym, int enclen, u8 **decsym)
+{
+ int declen;
+ u8 *sym;
+
+ sym = kmalloc(enclen + 1, GFP_NOFS);
+ if (!sym)
+ return -ENOMEM;
+
+ declen = ceph_base64_decode(encsym, enclen, sym);
+ if (declen < 0) {
+ pr_err("%s: can't decode symlink (%d). Content: %.*s\n",
+ __func__, declen, enclen, encsym);
+ kfree(sym);
+ return -EIO;
+ }
+ sym[declen + 1] = '\0';
+ *decsym = sym;
+ return declen;
+}
+#else
+static int decode_encrypted_symlink(const char *encsym, int symlen, u8 **decsym)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
/*
* Populate an inode based on info from mds. May be called on new or
* existing inodes.
@@ -857,15 +1014,20 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
issued |= __ceph_caps_dirty(ci);
new_issued = ~issued & info_caps;
- /* directories have fl_stripe_unit set to zero */
- if (le32_to_cpu(info->layout.fl_stripe_unit))
- inode->i_blkbits =
- fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
- else
- inode->i_blkbits = CEPH_BLOCK_SHIFT;
-
__ceph_update_quota(ci, iinfo->max_bytes, iinfo->max_files);
+#ifdef CONFIG_FS_ENCRYPTION
+ if (iinfo->fscrypt_auth_len &&
+ ((inode->i_state & I_NEW) || (ci->fscrypt_auth_len == 0))) {
+ kfree(ci->fscrypt_auth);
+ ci->fscrypt_auth_len = iinfo->fscrypt_auth_len;
+ ci->fscrypt_auth = iinfo->fscrypt_auth;
+ iinfo->fscrypt_auth = NULL;
+ iinfo->fscrypt_auth_len = 0;
+ inode_set_flags(inode, S_ENCRYPTED, S_ENCRYPTED);
+ }
+#endif
+
if ((new_version || (new_issued & CEPH_CAP_AUTH_SHARED)) &&
(issued & CEPH_CAP_AUTH_EXCL) == 0) {
inode->i_mode = mode;
@@ -878,6 +1040,15 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
ceph_decode_timespec64(&ci->i_snap_btime, &iinfo->snap_btime);
}
+ /* directories have fl_stripe_unit set to zero */
+ if (IS_ENCRYPTED(inode))
+ inode->i_blkbits = CEPH_FSCRYPT_BLOCK_SHIFT;
+ else if (le32_to_cpu(info->layout.fl_stripe_unit))
+ inode->i_blkbits =
+ fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
+ else
+ inode->i_blkbits = CEPH_BLOCK_SHIFT;
+
if ((new_version || (new_issued & CEPH_CAP_LINK_SHARED)) &&
(issued & CEPH_CAP_LINK_EXCL) == 0)
set_nlink(inode, le32_to_cpu(info->nlink));
@@ -899,6 +1070,7 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
if (new_version ||
(new_issued & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR))) {
+ u64 size = le64_to_cpu(info->size);
s64 old_pool = ci->i_layout.pool_id;
struct ceph_string *old_ns;
@@ -912,10 +1084,22 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
pool_ns = old_ns;
+ if (IS_ENCRYPTED(inode) && size &&
+ iinfo->fscrypt_file_len == sizeof(__le64)) {
+ u64 fsize = __le64_to_cpu(*(__le64 *)iinfo->fscrypt_file);
+
+ if (size == round_up(fsize, CEPH_FSCRYPT_BLOCK_SIZE)) {
+ size = fsize;
+ } else {
+ pr_warn("fscrypt size mismatch: size=%llu fscrypt_file=%llu, discarding fscrypt_file size.\n",
+ info->size, size);
+ }
+ }
+
queue_trunc = ceph_fill_file_size(inode, issued,
le32_to_cpu(info->truncate_seq),
le64_to_cpu(info->truncate_size),
- le64_to_cpu(info->size));
+ size);
/* only update max_size on auth cap */
if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
ci->i_max_size != le64_to_cpu(info->max_size)) {
@@ -975,26 +1159,42 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
inode->i_fop = &ceph_file_fops;
break;
case S_IFLNK:
- inode->i_op = &ceph_symlink_iops;
if (!ci->i_symlink) {
u32 symlen = iinfo->symlink_len;
char *sym;
spin_unlock(&ci->i_ceph_lock);
- if (symlen != i_size_read(inode)) {
- pr_err("%s %llx.%llx BAD symlink "
- "size %lld\n", __func__,
- ceph_vinop(inode),
- i_size_read(inode));
+ if (IS_ENCRYPTED(inode)) {
+ if (symlen != i_size_read(inode))
+ pr_err("%s %llx.%llx BAD symlink size %lld\n",
+ __func__, ceph_vinop(inode),
+ i_size_read(inode));
+
+ err = decode_encrypted_symlink(iinfo->symlink,
+ symlen, (u8 **)&sym);
+ if (err < 0) {
+ pr_err("%s decoding encrypted symlink failed: %d\n",
+ __func__, err);
+ goto out;
+ }
+ symlen = err;
i_size_write(inode, symlen);
inode->i_blocks = calc_inode_blocks(symlen);
- }
+ } else {
+ if (symlen != i_size_read(inode)) {
+ pr_err("%s %llx.%llx BAD symlink size %lld\n",
+ __func__, ceph_vinop(inode),
+ i_size_read(inode));
+ i_size_write(inode, symlen);
+ inode->i_blocks = calc_inode_blocks(symlen);
+ }
- err = -ENOMEM;
- sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS);
- if (!sym)
- goto out;
+ err = -ENOMEM;
+ sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS);
+ if (!sym)
+ goto out;
+ }
spin_lock(&ci->i_ceph_lock);
if (!ci->i_symlink)
@@ -1002,7 +1202,17 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
else
kfree(sym); /* lost a race */
}
- inode->i_link = ci->i_symlink;
+
+ if (IS_ENCRYPTED(inode)) {
+ /*
+ * Encrypted symlinks need to be decrypted before we can
+ * cache their targets in i_link. Don't touch it here.
+ */
+ inode->i_op = &ceph_encrypted_symlink_iops;
+ } else {
+ inode->i_link = ci->i_symlink;
+ inode->i_op = &ceph_symlink_iops;
+ }
break;
case S_IFDIR:
inode->i_op = &ceph_dir_iops;
@@ -1310,8 +1520,15 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
if (dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME &&
test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
+ bool is_nokey = false;
struct qstr dname;
struct dentry *dn, *parent;
+ struct fscrypt_str oname = FSTR_INIT(NULL, 0);
+ struct ceph_fname fname = { .dir = dir,
+ .name = rinfo->dname,
+ .ctext = rinfo->altname,
+ .name_len = rinfo->dname_len,
+ .ctext_len = rinfo->altname_len };
BUG_ON(!rinfo->head->is_target);
BUG_ON(req->r_dentry);
@@ -1319,8 +1536,20 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
parent = d_find_any_alias(dir);
BUG_ON(!parent);
- dname.name = rinfo->dname;
- dname.len = rinfo->dname_len;
+ err = ceph_fname_alloc_buffer(dir, &oname);
+ if (err < 0) {
+ dput(parent);
+ goto done;
+ }
+
+ err = ceph_fname_to_usr(&fname, NULL, &oname, &is_nokey);
+ if (err < 0) {
+ dput(parent);
+ ceph_fname_free_buffer(dir, &oname);
+ goto done;
+ }
+ dname.name = oname.name;
+ dname.len = oname.len;
dname.hash = full_name_hash(parent, dname.name, dname.len);
tvino.ino = le64_to_cpu(rinfo->targeti.in->ino);
tvino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
@@ -1335,9 +1564,15 @@ retry_lookup:
dname.len, dname.name, dn);
if (!dn) {
dput(parent);
+ ceph_fname_free_buffer(dir, &oname);
err = -ENOMEM;
goto done;
}
+ if (is_nokey) {
+ spin_lock(&dn->d_lock);
+ dn->d_flags |= DCACHE_NOKEY_NAME;
+ spin_unlock(&dn->d_lock);
+ }
err = 0;
} else if (d_really_is_positive(dn) &&
(ceph_ino(d_inode(dn)) != tvino.ino ||
@@ -1349,6 +1584,7 @@ retry_lookup:
dput(dn);
goto retry_lookup;
}
+ ceph_fname_free_buffer(dir, &oname);
req->r_dentry = dn;
dput(parent);
@@ -1552,7 +1788,7 @@ static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
vino.ino = le64_to_cpu(rde->inode.in->ino);
vino.snap = le64_to_cpu(rde->inode.in->snapid);
- in = ceph_get_inode(req->r_dentry->d_sb, vino);
+ in = ceph_get_inode(req->r_dentry->d_sb, vino, NULL);
if (IS_ERR(in)) {
err = PTR_ERR(in);
dout("new_inode badness got %d\n", err);
@@ -1630,7 +1866,8 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
struct ceph_mds_session *session)
{
struct dentry *parent = req->r_dentry;
- struct ceph_inode_info *ci = ceph_inode(d_inode(parent));
+ struct inode *inode = d_inode(parent);
+ struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
struct qstr dname;
struct dentry *dn;
@@ -1704,9 +1941,7 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
tvino.snap = le64_to_cpu(rde->inode.in->snapid);
if (rinfo->hash_order) {
- u32 hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
- rde->name, rde->name_len);
- hash = ceph_frag_value(hash);
+ u32 hash = ceph_frag_value(rde->raw_hash);
if (hash != last_hash)
fpos_offset = 2;
last_hash = hash;
@@ -1729,6 +1964,11 @@ retry_lookup:
err = -ENOMEM;
goto out;
}
+ if (rde->is_nokey) {
+ spin_lock(&dn->d_lock);
+ dn->d_flags |= DCACHE_NOKEY_NAME;
+ spin_unlock(&dn->d_lock);
+ }
} else if (d_really_is_positive(dn) &&
(ceph_ino(d_inode(dn)) != tvino.ino ||
ceph_snap(d_inode(dn)) != tvino.snap)) {
@@ -1754,7 +1994,7 @@ retry_lookup:
if (d_really_is_positive(dn)) {
in = d_inode(dn);
} else {
- in = ceph_get_inode(parent->d_sb, tvino);
+ in = ceph_get_inode(parent->d_sb, tvino, NULL);
if (IS_ERR(in)) {
dout("new_inode badness\n");
d_drop(dn);
@@ -1927,7 +2167,7 @@ void __ceph_do_pending_vmtruncate(struct inode *inode)
retry:
spin_lock(&ci->i_ceph_lock);
if (ci->i_truncate_pending == 0) {
- dout("__do_pending_vmtruncate %p none pending\n", inode);
+ dout("%s %p none pending\n", __func__, inode);
spin_unlock(&ci->i_ceph_lock);
mutex_unlock(&ci->i_truncate_mutex);
return;
@@ -1939,8 +2179,7 @@ retry:
*/
if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
spin_unlock(&ci->i_ceph_lock);
- dout("__do_pending_vmtruncate %p flushing snaps first\n",
- inode);
+ dout("%s %p flushing snaps first\n", __func__, inode);
filemap_write_and_wait_range(&inode->i_data, 0,
inode->i_sb->s_maxbytes);
goto retry;
@@ -1949,9 +2188,9 @@ retry:
/* there should be no reader or writer */
WARN_ON_ONCE(ci->i_rd_ref || ci->i_wr_ref);
- to = ci->i_truncate_size;
+ to = ci->i_truncate_pagecache_size;
wrbuffer_refs = ci->i_wrbuffer_ref;
- dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
+ dout("%s %p (%d) to %lld\n", __func__, inode,
ci->i_truncate_pending, to);
spin_unlock(&ci->i_ceph_lock);
@@ -1959,7 +2198,7 @@ retry:
truncate_pagecache(inode, to);
spin_lock(&ci->i_ceph_lock);
- if (to == ci->i_truncate_size) {
+ if (to == ci->i_truncate_pagecache_size) {
ci->i_truncate_pending = 0;
finish = 1;
}
@@ -2000,6 +2239,32 @@ static void ceph_inode_work(struct work_struct *work)
iput(inode);
}
+static const char *ceph_encrypted_get_link(struct dentry *dentry,
+ struct inode *inode,
+ struct delayed_call *done)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+
+ if (!dentry)
+ return ERR_PTR(-ECHILD);
+
+ return fscrypt_get_symlink(inode, ci->i_symlink, i_size_read(inode),
+ done);
+}
+
+static int ceph_encrypted_symlink_getattr(struct mnt_idmap *idmap,
+ const struct path *path,
+ struct kstat *stat, u32 request_mask,
+ unsigned int query_flags)
+{
+ int ret;
+
+ ret = ceph_getattr(idmap, path, stat, request_mask, query_flags);
+ if (ret)
+ return ret;
+ return fscrypt_symlink_getattr(path, stat);
+}
+
/*
* symlinks
*/
@@ -2010,20 +2275,173 @@ static const struct inode_operations ceph_symlink_iops = {
.listxattr = ceph_listxattr,
};
-int __ceph_setattr(struct inode *inode, struct iattr *attr)
+static const struct inode_operations ceph_encrypted_symlink_iops = {
+ .get_link = ceph_encrypted_get_link,
+ .setattr = ceph_setattr,
+ .getattr = ceph_encrypted_symlink_getattr,
+ .listxattr = ceph_listxattr,
+};
+
+/*
+ * Transfer the encrypted last block to the MDS and the MDS
+ * will help update it when truncating a smaller size.
+ *
+ * We don't support a PAGE_SIZE that is smaller than the
+ * CEPH_FSCRYPT_BLOCK_SIZE.
+ */
+static int fill_fscrypt_truncate(struct inode *inode,
+ struct ceph_mds_request *req,
+ struct iattr *attr)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ int boff = attr->ia_size % CEPH_FSCRYPT_BLOCK_SIZE;
+ loff_t pos, orig_pos = round_down(attr->ia_size,
+ CEPH_FSCRYPT_BLOCK_SIZE);
+ u64 block = orig_pos >> CEPH_FSCRYPT_BLOCK_SHIFT;
+ struct ceph_pagelist *pagelist = NULL;
+ struct kvec iov = {0};
+ struct iov_iter iter;
+ struct page *page = NULL;
+ struct ceph_fscrypt_truncate_size_header header;
+ int retry_op = 0;
+ int len = CEPH_FSCRYPT_BLOCK_SIZE;
+ loff_t i_size = i_size_read(inode);
+ int got, ret, issued;
+ u64 objver;
+
+ ret = __ceph_get_caps(inode, NULL, CEPH_CAP_FILE_RD, 0, -1, &got);
+ if (ret < 0)
+ return ret;
+
+ issued = __ceph_caps_issued(ci, NULL);
+
+ dout("%s size %lld -> %lld got cap refs on %s, issued %s\n", __func__,
+ i_size, attr->ia_size, ceph_cap_string(got),
+ ceph_cap_string(issued));
+
+ /* Try to writeback the dirty pagecaches */
+ if (issued & (CEPH_CAP_FILE_BUFFER)) {
+ loff_t lend = orig_pos + CEPH_FSCRYPT_BLOCK_SHIFT - 1;
+
+ ret = filemap_write_and_wait_range(inode->i_mapping,
+ orig_pos, lend);
+ if (ret < 0)
+ goto out;
+ }
+
+ page = __page_cache_alloc(GFP_KERNEL);
+ if (page == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ pagelist = ceph_pagelist_alloc(GFP_KERNEL);
+ if (!pagelist) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ iov.iov_base = kmap_local_page(page);
+ iov.iov_len = len;
+ iov_iter_kvec(&iter, READ, &iov, 1, len);
+
+ pos = orig_pos;
+ ret = __ceph_sync_read(inode, &pos, &iter, &retry_op, &objver);
+ if (ret < 0)
+ goto out;
+
+ /* Insert the header first */
+ header.ver = 1;
+ header.compat = 1;
+ header.change_attr = cpu_to_le64(inode_peek_iversion_raw(inode));
+
+ /*
+ * Always set the block_size to CEPH_FSCRYPT_BLOCK_SIZE,
+ * because in MDS it may need this to do the truncate.
+ */
+ header.block_size = cpu_to_le32(CEPH_FSCRYPT_BLOCK_SIZE);
+
+ /*
+ * If we hit a hole here, we should just skip filling
+ * the fscrypt for the request, because once the fscrypt
+ * is enabled, the file will be split into many blocks
+ * with the size of CEPH_FSCRYPT_BLOCK_SIZE, if there
+ * has a hole, the hole size should be multiple of block
+ * size.
+ *
+ * If the Rados object doesn't exist, it will be set to 0.
+ */
+ if (!objver) {
+ dout("%s hit hole, ppos %lld < size %lld\n", __func__,
+ pos, i_size);
+
+ header.data_len = cpu_to_le32(8 + 8 + 4);
+ header.file_offset = 0;
+ ret = 0;
+ } else {
+ header.data_len = cpu_to_le32(8 + 8 + 4 + CEPH_FSCRYPT_BLOCK_SIZE);
+ header.file_offset = cpu_to_le64(orig_pos);
+
+ dout("%s encrypt block boff/bsize %d/%lu\n", __func__,
+ boff, CEPH_FSCRYPT_BLOCK_SIZE);
+
+ /* truncate and zero out the extra contents for the last block */
+ memset(iov.iov_base + boff, 0, PAGE_SIZE - boff);
+
+ /* encrypt the last block */
+ ret = ceph_fscrypt_encrypt_block_inplace(inode, page,
+ CEPH_FSCRYPT_BLOCK_SIZE,
+ 0, block,
+ GFP_KERNEL);
+ if (ret)
+ goto out;
+ }
+
+ /* Insert the header */
+ ret = ceph_pagelist_append(pagelist, &header, sizeof(header));
+ if (ret)
+ goto out;
+
+ if (header.block_size) {
+ /* Append the last block contents to pagelist */
+ ret = ceph_pagelist_append(pagelist, iov.iov_base,
+ CEPH_FSCRYPT_BLOCK_SIZE);
+ if (ret)
+ goto out;
+ }
+ req->r_pagelist = pagelist;
+out:
+ dout("%s %p size dropping cap refs on %s\n", __func__,
+ inode, ceph_cap_string(got));
+ ceph_put_cap_refs(ci, got);
+ if (iov.iov_base)
+ kunmap_local(iov.iov_base);
+ if (page)
+ __free_pages(page, 0);
+ if (ret && pagelist)
+ ceph_pagelist_release(pagelist);
+ return ret;
+}
+
+int __ceph_setattr(struct inode *inode, struct iattr *attr,
+ struct ceph_iattr *cia)
{
struct ceph_inode_info *ci = ceph_inode(inode);
unsigned int ia_valid = attr->ia_valid;
struct ceph_mds_request *req;
struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
struct ceph_cap_flush *prealloc_cf;
+ loff_t isize = i_size_read(inode);
int issued;
int release = 0, dirtied = 0;
int mask = 0;
int err = 0;
int inode_dirty_flags = 0;
bool lock_snap_rwsem = false;
+ bool fill_fscrypt;
+ int truncate_retry = 20; /* The RMW will take around 50ms */
+retry:
prealloc_cf = ceph_alloc_cap_flush();
if (!prealloc_cf)
return -ENOMEM;
@@ -2035,6 +2453,7 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
return PTR_ERR(req);
}
+ fill_fscrypt = false;
spin_lock(&ci->i_ceph_lock);
issued = __ceph_caps_issued(ci, NULL);
@@ -2050,6 +2469,43 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
}
dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+ if (cia && cia->fscrypt_auth) {
+ u32 len = ceph_fscrypt_auth_len(cia->fscrypt_auth);
+
+ if (len > sizeof(*cia->fscrypt_auth)) {
+ err = -EINVAL;
+ spin_unlock(&ci->i_ceph_lock);
+ goto out;
+ }
+
+ dout("setattr %llx:%llx fscrypt_auth len %u to %u)\n",
+ ceph_vinop(inode), ci->fscrypt_auth_len, len);
+
+ /* It should never be re-set once set */
+ WARN_ON_ONCE(ci->fscrypt_auth);
+
+ if (issued & CEPH_CAP_AUTH_EXCL) {
+ dirtied |= CEPH_CAP_AUTH_EXCL;
+ kfree(ci->fscrypt_auth);
+ ci->fscrypt_auth = (u8 *)cia->fscrypt_auth;
+ ci->fscrypt_auth_len = len;
+ } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
+ ci->fscrypt_auth_len != len ||
+ memcmp(ci->fscrypt_auth, cia->fscrypt_auth, len)) {
+ req->r_fscrypt_auth = cia->fscrypt_auth;
+ mask |= CEPH_SETATTR_FSCRYPT_AUTH;
+ release |= CEPH_CAP_AUTH_SHARED;
+ }
+ cia->fscrypt_auth = NULL;
+ }
+#else
+ if (cia && cia->fscrypt_auth) {
+ err = -EINVAL;
+ spin_unlock(&ci->i_ceph_lock);
+ goto out;
+ }
+#endif /* CONFIG_FS_ENCRYPTION */
if (ia_valid & ATTR_UID) {
dout("setattr %p uid %d -> %d\n", inode,
@@ -2119,10 +2575,27 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
}
}
if (ia_valid & ATTR_SIZE) {
- loff_t isize = i_size_read(inode);
-
dout("setattr %p size %lld -> %lld\n", inode, isize, attr->ia_size);
- if ((issued & CEPH_CAP_FILE_EXCL) && attr->ia_size >= isize) {
+ /*
+ * Only when the new size is smaller and not aligned to
+ * CEPH_FSCRYPT_BLOCK_SIZE will the RMW is needed.
+ */
+ if (IS_ENCRYPTED(inode) && attr->ia_size < isize &&
+ (attr->ia_size % CEPH_FSCRYPT_BLOCK_SIZE)) {
+ mask |= CEPH_SETATTR_SIZE;
+ release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL |
+ CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
+ set_bit(CEPH_MDS_R_FSCRYPT_FILE, &req->r_req_flags);
+ mask |= CEPH_SETATTR_FSCRYPT_FILE;
+ req->r_args.setattr.size =
+ cpu_to_le64(round_up(attr->ia_size,
+ CEPH_FSCRYPT_BLOCK_SIZE));
+ req->r_args.setattr.old_size =
+ cpu_to_le64(round_up(isize,
+ CEPH_FSCRYPT_BLOCK_SIZE));
+ req->r_fscrypt_file = attr->ia_size;
+ fill_fscrypt = true;
+ } else if ((issued & CEPH_CAP_FILE_EXCL) && attr->ia_size >= isize) {
if (attr->ia_size > isize) {
i_size_write(inode, attr->ia_size);
inode->i_blocks = calc_inode_blocks(attr->ia_size);
@@ -2132,11 +2605,24 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
}
} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
attr->ia_size != isize) {
- req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
- req->r_args.setattr.old_size = cpu_to_le64(isize);
mask |= CEPH_SETATTR_SIZE;
release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL |
CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
+ if (IS_ENCRYPTED(inode) && attr->ia_size) {
+ set_bit(CEPH_MDS_R_FSCRYPT_FILE, &req->r_req_flags);
+ mask |= CEPH_SETATTR_FSCRYPT_FILE;
+ req->r_args.setattr.size =
+ cpu_to_le64(round_up(attr->ia_size,
+ CEPH_FSCRYPT_BLOCK_SIZE));
+ req->r_args.setattr.old_size =
+ cpu_to_le64(round_up(isize,
+ CEPH_FSCRYPT_BLOCK_SIZE));
+ req->r_fscrypt_file = attr->ia_size;
+ } else {
+ req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
+ req->r_args.setattr.old_size = cpu_to_le64(isize);
+ req->r_fscrypt_file = 0;
+ }
}
}
if (ia_valid & ATTR_MTIME) {
@@ -2199,8 +2685,10 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
release &= issued;
spin_unlock(&ci->i_ceph_lock);
- if (lock_snap_rwsem)
+ if (lock_snap_rwsem) {
up_read(&mdsc->snap_rwsem);
+ lock_snap_rwsem = false;
+ }
if (inode_dirty_flags)
__mark_inode_dirty(inode, inode_dirty_flags);
@@ -2212,8 +2700,29 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
req->r_args.setattr.mask = cpu_to_le32(mask);
req->r_num_caps = 1;
req->r_stamp = attr->ia_ctime;
+ if (fill_fscrypt) {
+ err = fill_fscrypt_truncate(inode, req, attr);
+ if (err)
+ goto out;
+ }
+
+ /*
+ * The truncate request will return -EAGAIN when the
+ * last block has been updated just before the MDS
+ * successfully gets the xlock for the FILE lock. To
+ * avoid corrupting the file contents we need to retry
+ * it.
+ */
err = ceph_mdsc_do_request(mdsc, NULL, req);
+ if (err == -EAGAIN && truncate_retry--) {
+ dout("setattr %p result=%d (%s locally, %d remote), retry it!\n",
+ inode, err, ceph_cap_string(dirtied), mask);
+ ceph_mdsc_put_request(req);
+ ceph_free_cap_flush(prealloc_cf);
+ goto retry;
+ }
}
+out:
dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
ceph_cap_string(dirtied), mask);
@@ -2242,6 +2751,10 @@ int ceph_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
if (ceph_inode_is_shutdown(inode))
return -ESTALE;
+ err = fscrypt_prepare_setattr(dentry, attr);
+ if (err)
+ return err;
+
err = setattr_prepare(&nop_mnt_idmap, dentry, attr);
if (err != 0)
return err;
@@ -2254,7 +2767,7 @@ int ceph_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
ceph_quota_is_max_bytes_exceeded(inode, attr->ia_size))
return -EDQUOT;
- err = __ceph_setattr(inode, attr);
+ err = __ceph_setattr(inode, attr, NULL);
if (err >= 0 && (attr->ia_valid & ATTR_MODE))
err = posix_acl_chmod(&nop_mnt_idmap, dentry, attr->ia_mode);
@@ -2525,8 +3038,12 @@ int ceph_getattr(struct mnt_idmap *idmap, const struct path *path,
stat->nlink = 1 + 1 + ci->i_subdirs;
}
- stat->attributes_mask |= STATX_ATTR_CHANGE_MONOTONIC;
stat->attributes |= STATX_ATTR_CHANGE_MONOTONIC;
+ if (IS_ENCRYPTED(inode))
+ stat->attributes |= STATX_ATTR_ENCRYPTED;
+ stat->attributes_mask |= (STATX_ATTR_CHANGE_MONOTONIC |
+ STATX_ATTR_ENCRYPTED);
+
stat->result_mask = request_mask & valid_mask;
return err;
}
diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
index deac817647eb..91a84917d203 100644
--- a/fs/ceph/ioctl.c
+++ b/fs/ceph/ioctl.c
@@ -6,6 +6,7 @@
#include "mds_client.h"
#include "ioctl.h"
#include <linux/ceph/striper.h>
+#include <linux/fscrypt.h>
/*
* ioctls
@@ -268,9 +269,96 @@ static long ceph_ioctl_syncio(struct file *file)
return 0;
}
+static int vet_mds_for_fscrypt(struct file *file)
+{
+ int i, ret = -EOPNOTSUPP;
+ struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(file_inode(file)->i_sb);
+
+ mutex_lock(&mdsc->mutex);
+ for (i = 0; i < mdsc->max_sessions; i++) {
+ struct ceph_mds_session *s = mdsc->sessions[i];
+
+ if (!s)
+ continue;
+ if (test_bit(CEPHFS_FEATURE_ALTERNATE_NAME, &s->s_features))
+ ret = 0;
+ break;
+ }
+ mutex_unlock(&mdsc->mutex);
+ return ret;
+}
+
+static long ceph_set_encryption_policy(struct file *file, unsigned long arg)
+{
+ int ret, got = 0;
+ struct inode *inode = file_inode(file);
+ struct ceph_inode_info *ci = ceph_inode(inode);
+
+ /* encrypted directories can't have striped layout */
+ if (ci->i_layout.stripe_count > 1)
+ return -EINVAL;
+
+ ret = vet_mds_for_fscrypt(file);
+ if (ret)
+ return ret;
+
+ /*
+ * Ensure we hold these caps so that we _know_ that the rstats check
+ * in the empty_dir check is reliable.
+ */
+ ret = ceph_get_caps(file, CEPH_CAP_FILE_SHARED, 0, -1, &got);
+ if (ret)
+ return ret;
+
+ ret = fscrypt_ioctl_set_policy(file, (const void __user *)arg);
+ if (got)
+ ceph_put_cap_refs(ci, got);
+
+ return ret;
+}
+
+static const char *ceph_ioctl_cmd_name(const unsigned int cmd)
+{
+ switch (cmd) {
+ case CEPH_IOC_GET_LAYOUT:
+ return "get_layout";
+ case CEPH_IOC_SET_LAYOUT:
+ return "set_layout";
+ case CEPH_IOC_SET_LAYOUT_POLICY:
+ return "set_layout_policy";
+ case CEPH_IOC_GET_DATALOC:
+ return "get_dataloc";
+ case CEPH_IOC_LAZYIO:
+ return "lazyio";
+ case CEPH_IOC_SYNCIO:
+ return "syncio";
+ case FS_IOC_SET_ENCRYPTION_POLICY:
+ return "set_encryption_policy";
+ case FS_IOC_GET_ENCRYPTION_POLICY:
+ return "get_encryption_policy";
+ case FS_IOC_GET_ENCRYPTION_POLICY_EX:
+ return "get_encryption_policy_ex";
+ case FS_IOC_ADD_ENCRYPTION_KEY:
+ return "add_encryption_key";
+ case FS_IOC_REMOVE_ENCRYPTION_KEY:
+ return "remove_encryption_key";
+ case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
+ return "remove_encryption_key_all_users";
+ case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
+ return "get_encryption_key_status";
+ case FS_IOC_GET_ENCRYPTION_NONCE:
+ return "get_encryption_nonce";
+ default:
+ return "unknown";
+ }
+}
+
long ceph_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- dout("ioctl file %p cmd %u arg %lu\n", file, cmd, arg);
+ int ret;
+
+ dout("ioctl file %p cmd %s arg %lu\n", file,
+ ceph_ioctl_cmd_name(cmd), arg);
switch (cmd) {
case CEPH_IOC_GET_LAYOUT:
return ceph_ioctl_get_layout(file, (void __user *)arg);
@@ -289,6 +377,43 @@ long ceph_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case CEPH_IOC_SYNCIO:
return ceph_ioctl_syncio(file);
+
+ case FS_IOC_SET_ENCRYPTION_POLICY:
+ return ceph_set_encryption_policy(file, arg);
+
+ case FS_IOC_GET_ENCRYPTION_POLICY:
+ ret = vet_mds_for_fscrypt(file);
+ if (ret)
+ return ret;
+ return fscrypt_ioctl_get_policy(file, (void __user *)arg);
+
+ case FS_IOC_GET_ENCRYPTION_POLICY_EX:
+ ret = vet_mds_for_fscrypt(file);
+ if (ret)
+ return ret;
+ return fscrypt_ioctl_get_policy_ex(file, (void __user *)arg);
+
+ case FS_IOC_ADD_ENCRYPTION_KEY:
+ ret = vet_mds_for_fscrypt(file);
+ if (ret)
+ return ret;
+ return fscrypt_ioctl_add_key(file, (void __user *)arg);
+
+ case FS_IOC_REMOVE_ENCRYPTION_KEY:
+ return fscrypt_ioctl_remove_key(file, (void __user *)arg);
+
+ case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
+ return fscrypt_ioctl_remove_key_all_users(file,
+ (void __user *)arg);
+
+ case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
+ return fscrypt_ioctl_get_key_status(file, (void __user *)arg);
+
+ case FS_IOC_GET_ENCRYPTION_NONCE:
+ ret = vet_mds_for_fscrypt(file);
+ if (ret)
+ return ret;
+ return fscrypt_ioctl_get_nonce(file, (void __user *)arg);
}
return -ENOTTY;
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 5fb367b1d4b0..615db141b6c4 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -15,6 +15,7 @@
#include "super.h"
#include "mds_client.h"
+#include "crypto.h"
#include <linux/ceph/ceph_features.h>
#include <linux/ceph/messenger.h>
@@ -184,8 +185,54 @@ static int parse_reply_info_in(void **p, void *end,
info->rsnaps = 0;
}
+ if (struct_v >= 5) {
+ u32 alen;
+
+ ceph_decode_32_safe(p, end, alen, bad);
+
+ while (alen--) {
+ u32 len;
+
+ /* key */
+ ceph_decode_32_safe(p, end, len, bad);
+ ceph_decode_skip_n(p, end, len, bad);
+ /* value */
+ ceph_decode_32_safe(p, end, len, bad);
+ ceph_decode_skip_n(p, end, len, bad);
+ }
+ }
+
+ /* fscrypt flag -- ignore */
+ if (struct_v >= 6)
+ ceph_decode_skip_8(p, end, bad);
+
+ info->fscrypt_auth = NULL;
+ info->fscrypt_auth_len = 0;
+ info->fscrypt_file = NULL;
+ info->fscrypt_file_len = 0;
+ if (struct_v >= 7) {
+ ceph_decode_32_safe(p, end, info->fscrypt_auth_len, bad);
+ if (info->fscrypt_auth_len) {
+ info->fscrypt_auth = kmalloc(info->fscrypt_auth_len,
+ GFP_KERNEL);
+ if (!info->fscrypt_auth)
+ return -ENOMEM;
+ ceph_decode_copy_safe(p, end, info->fscrypt_auth,
+ info->fscrypt_auth_len, bad);
+ }
+ ceph_decode_32_safe(p, end, info->fscrypt_file_len, bad);
+ if (info->fscrypt_file_len) {
+ info->fscrypt_file = kmalloc(info->fscrypt_file_len,
+ GFP_KERNEL);
+ if (!info->fscrypt_file)
+ return -ENOMEM;
+ ceph_decode_copy_safe(p, end, info->fscrypt_file,
+ info->fscrypt_file_len, bad);
+ }
+ }
*p = end;
} else {
+ /* legacy (unversioned) struct */
if (features & CEPH_FEATURE_MDS_INLINE_DATA) {
ceph_decode_64_safe(p, end, info->inline_version, bad);
ceph_decode_32_safe(p, end, info->inline_len, bad);
@@ -263,27 +310,47 @@ bad:
static int parse_reply_info_lease(void **p, void *end,
struct ceph_mds_reply_lease **lease,
- u64 features)
+ u64 features, u32 *altname_len, u8 **altname)
{
+ u8 struct_v;
+ u32 struct_len;
+ void *lend;
+
if (features == (u64)-1) {
- u8 struct_v, struct_compat;
- u32 struct_len;
+ u8 struct_compat;
+
ceph_decode_8_safe(p, end, struct_v, bad);
ceph_decode_8_safe(p, end, struct_compat, bad);
+
/* struct_v is expected to be >= 1. we only understand
* encoding whose struct_compat == 1. */
if (!struct_v || struct_compat != 1)
goto bad;
+
ceph_decode_32_safe(p, end, struct_len, bad);
- ceph_decode_need(p, end, struct_len, bad);
- end = *p + struct_len;
+ } else {
+ struct_len = sizeof(**lease);
+ *altname_len = 0;
+ *altname = NULL;
}
- ceph_decode_need(p, end, sizeof(**lease), bad);
+ lend = *p + struct_len;
+ ceph_decode_need(p, end, struct_len, bad);
*lease = *p;
*p += sizeof(**lease);
- if (features == (u64)-1)
- *p = end;
+
+ if (features == (u64)-1) {
+ if (struct_v >= 2) {
+ ceph_decode_32_safe(p, end, *altname_len, bad);
+ ceph_decode_need(p, end, *altname_len, bad);
+ *altname = *p;
+ *p += *altname_len;
+ } else {
+ *altname = NULL;
+ *altname_len = 0;
+ }
+ }
+ *p = lend;
return 0;
bad:
return -EIO;
@@ -313,7 +380,8 @@ static int parse_reply_info_trace(void **p, void *end,
info->dname = *p;
*p += info->dname_len;
- err = parse_reply_info_lease(p, end, &info->dlease, features);
+ err = parse_reply_info_lease(p, end, &info->dlease, features,
+ &info->altname_len, &info->altname);
if (err < 0)
goto out_bad;
}
@@ -339,9 +407,10 @@ out_bad:
* parse readdir results
*/
static int parse_reply_info_readdir(void **p, void *end,
- struct ceph_mds_reply_info_parsed *info,
- u64 features)
+ struct ceph_mds_request *req,
+ u64 features)
{
+ struct ceph_mds_reply_info_parsed *info = &req->r_reply_info;
u32 num, i = 0;
int err;
@@ -371,18 +440,87 @@ static int parse_reply_info_readdir(void **p, void *end,
info->dir_nr = num;
while (num) {
+ struct inode *inode = d_inode(req->r_dentry);
+ struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_mds_reply_dir_entry *rde = info->dir_entries + i;
+ struct fscrypt_str tname = FSTR_INIT(NULL, 0);
+ struct fscrypt_str oname = FSTR_INIT(NULL, 0);
+ struct ceph_fname fname;
+ u32 altname_len, _name_len;
+ u8 *altname, *_name;
+
/* dentry */
- ceph_decode_32_safe(p, end, rde->name_len, bad);
- ceph_decode_need(p, end, rde->name_len, bad);
- rde->name = *p;
- *p += rde->name_len;
- dout("parsed dir dname '%.*s'\n", rde->name_len, rde->name);
+ ceph_decode_32_safe(p, end, _name_len, bad);
+ ceph_decode_need(p, end, _name_len, bad);
+ _name = *p;
+ *p += _name_len;
+ dout("parsed dir dname '%.*s'\n", _name_len, _name);
+
+ if (info->hash_order)
+ rde->raw_hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
+ _name, _name_len);
/* dentry lease */
- err = parse_reply_info_lease(p, end, &rde->lease, features);
+ err = parse_reply_info_lease(p, end, &rde->lease, features,
+ &altname_len, &altname);
if (err)
goto out_bad;
+
+ /*
+ * Try to dencrypt the dentry names and update them
+ * in the ceph_mds_reply_dir_entry struct.
+ */
+ fname.dir = inode;
+ fname.name = _name;
+ fname.name_len = _name_len;
+ fname.ctext = altname;
+ fname.ctext_len = altname_len;
+ /*
+ * The _name_len maybe larger than altname_len, such as
+ * when the human readable name length is in range of
+ * (CEPH_NOHASH_NAME_MAX, CEPH_NOHASH_NAME_MAX + SHA256_DIGEST_SIZE),
+ * then the copy in ceph_fname_to_usr will corrupt the
+ * data if there has no encryption key.
+ *
+ * Just set the no_copy flag and then if there has no
+ * encryption key the oname.name will be assigned to
+ * _name always.
+ */
+ fname.no_copy = true;
+ if (altname_len == 0) {
+ /*
+ * Set tname to _name, and this will be used
+ * to do the base64_decode in-place. It's
+ * safe because the decoded string should
+ * always be shorter, which is 3/4 of origin
+ * string.
+ */
+ tname.name = _name;
+
+ /*
+ * Set oname to _name too, and this will be
+ * used to do the dencryption in-place.
+ */
+ oname.name = _name;
+ oname.len = _name_len;
+ } else {
+ /*
+ * This will do the decryption only in-place
+ * from altname cryptext directly.
+ */
+ oname.name = altname;
+ oname.len = altname_len;
+ }
+ rde->is_nokey = false;
+ err = ceph_fname_to_usr(&fname, &tname, &oname, &rde->is_nokey);
+ if (err) {
+ pr_err("%s unable to decode %.*s, got %d\n", __func__,
+ _name_len, _name, err);
+ goto out_bad;
+ }
+ rde->name = oname.name;
+ rde->name_len = oname.len;
+
/* inode */
err = parse_reply_info_in(p, end, &rde->inode, features);
if (err < 0)
@@ -581,15 +719,16 @@ bad:
* parse extra results
*/
static int parse_reply_info_extra(void **p, void *end,
- struct ceph_mds_reply_info_parsed *info,
+ struct ceph_mds_request *req,
u64 features, struct ceph_mds_session *s)
{
+ struct ceph_mds_reply_info_parsed *info = &req->r_reply_info;
u32 op = le32_to_cpu(info->head->op);
if (op == CEPH_MDS_OP_GETFILELOCK)
return parse_reply_info_filelock(p, end, info, features);
else if (op == CEPH_MDS_OP_READDIR || op == CEPH_MDS_OP_LSSNAP)
- return parse_reply_info_readdir(p, end, info, features);
+ return parse_reply_info_readdir(p, end, req, features);
else if (op == CEPH_MDS_OP_CREATE)
return parse_reply_info_create(p, end, info, features, s);
else if (op == CEPH_MDS_OP_GETVXATTR)
@@ -602,9 +741,9 @@ static int parse_reply_info_extra(void **p, void *end,
* parse entire mds reply
*/
static int parse_reply_info(struct ceph_mds_session *s, struct ceph_msg *msg,
- struct ceph_mds_reply_info_parsed *info,
- u64 features)
+ struct ceph_mds_request *req, u64 features)
{
+ struct ceph_mds_reply_info_parsed *info = &req->r_reply_info;
void *p, *end;
u32 len;
int err;
@@ -626,7 +765,7 @@ static int parse_reply_info(struct ceph_mds_session *s, struct ceph_msg *msg,
ceph_decode_32_safe(&p, end, len, bad);
if (len > 0) {
ceph_decode_need(&p, end, len, bad);
- err = parse_reply_info_extra(&p, p+len, info, features, s);
+ err = parse_reply_info_extra(&p, p+len, req, features, s);
if (err < 0)
goto out_bad;
}
@@ -651,8 +790,21 @@ out_bad:
static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info)
{
+ int i;
+
+ kfree(info->diri.fscrypt_auth);
+ kfree(info->diri.fscrypt_file);
+ kfree(info->targeti.fscrypt_auth);
+ kfree(info->targeti.fscrypt_file);
if (!info->dir_entries)
return;
+
+ for (i = 0; i < info->dir_nr; i++) {
+ struct ceph_mds_reply_dir_entry *rde = info->dir_entries + i;
+
+ kfree(rde->inode.fscrypt_auth);
+ kfree(rde->inode.fscrypt_file);
+ }
free_pages((unsigned long)info->dir_entries, get_order(info->dir_buf_size));
}
@@ -945,6 +1097,7 @@ void ceph_mdsc_release_request(struct kref *kref)
iput(req->r_parent);
}
iput(req->r_target_inode);
+ iput(req->r_new_inode);
if (req->r_dentry)
dput(req->r_dentry);
if (req->r_old_dentry)
@@ -965,6 +1118,8 @@ void ceph_mdsc_release_request(struct kref *kref)
put_cred(req->r_cred);
if (req->r_pagelist)
ceph_pagelist_release(req->r_pagelist);
+ kfree(req->r_fscrypt_auth);
+ kfree(req->r_altname);
put_request_session(req);
ceph_unreserve_caps(req->r_mdsc, &req->r_caps_reservation);
WARN_ON_ONCE(!list_empty(&req->r_wait));
@@ -2373,20 +2528,90 @@ static inline u64 __get_oldest_tid(struct ceph_mds_client *mdsc)
return mdsc->oldest_tid;
}
-/*
- * Build a dentry's path. Allocate on heap; caller must kfree. Based
- * on build_path_from_dentry in fs/cifs/dir.c.
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+static u8 *get_fscrypt_altname(const struct ceph_mds_request *req, u32 *plen)
+{
+ struct inode *dir = req->r_parent;
+ struct dentry *dentry = req->r_dentry;
+ u8 *cryptbuf = NULL;
+ u32 len = 0;
+ int ret = 0;
+
+ /* only encode if we have parent and dentry */
+ if (!dir || !dentry)
+ goto success;
+
+ /* No-op unless this is encrypted */
+ if (!IS_ENCRYPTED(dir))
+ goto success;
+
+ ret = ceph_fscrypt_prepare_readdir(dir);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ /* No key? Just ignore it. */
+ if (!fscrypt_has_encryption_key(dir))
+ goto success;
+
+ if (!fscrypt_fname_encrypted_size(dir, dentry->d_name.len, NAME_MAX,
+ &len)) {
+ WARN_ON_ONCE(1);
+ return ERR_PTR(-ENAMETOOLONG);
+ }
+
+ /* No need to append altname if name is short enough */
+ if (len <= CEPH_NOHASH_NAME_MAX) {
+ len = 0;
+ goto success;
+ }
+
+ cryptbuf = kmalloc(len, GFP_KERNEL);
+ if (!cryptbuf)
+ return ERR_PTR(-ENOMEM);
+
+ ret = fscrypt_fname_encrypt(dir, &dentry->d_name, cryptbuf, len);
+ if (ret) {
+ kfree(cryptbuf);
+ return ERR_PTR(ret);
+ }
+success:
+ *plen = len;
+ return cryptbuf;
+}
+#else
+static u8 *get_fscrypt_altname(const struct ceph_mds_request *req, u32 *plen)
+{
+ *plen = 0;
+ return NULL;
+}
+#endif
+
+/**
+ * ceph_mdsc_build_path - build a path string to a given dentry
+ * @dentry: dentry to which path should be built
+ * @plen: returned length of string
+ * @pbase: returned base inode number
+ * @for_wire: is this path going to be sent to the MDS?
+ *
+ * Build a string that represents the path to the dentry. This is mostly called
+ * for two different purposes:
+ *
+ * 1) we need to build a path string to send to the MDS (for_wire == true)
+ * 2) we need a path string for local presentation (e.g. debugfs)
+ * (for_wire == false)
*
- * If @stop_on_nosnap, generate path relative to the first non-snapped
- * inode.
+ * The path is built in reverse, starting with the dentry. Walk back up toward
+ * the root, building the path until the first non-snapped inode is reached
+ * (for_wire) or the root inode is reached (!for_wire).
*
* Encode hidden .snap dirs as a double /, i.e.
* foo/.snap/bar -> foo//bar
*/
char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *pbase,
- int stop_on_nosnap)
+ int for_wire)
{
- struct dentry *temp;
+ struct dentry *cur;
+ struct inode *inode;
char *path;
int pos;
unsigned seq;
@@ -2403,34 +2628,72 @@ retry:
path[pos] = '\0';
seq = read_seqbegin(&rename_lock);
- rcu_read_lock();
- temp = dentry;
+ cur = dget(dentry);
for (;;) {
- struct inode *inode;
+ struct dentry *parent;
- spin_lock(&temp->d_lock);
- inode = d_inode(temp);
+ spin_lock(&cur->d_lock);
+ inode = d_inode(cur);
if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
dout("build_path path+%d: %p SNAPDIR\n",
- pos, temp);
- } else if (stop_on_nosnap && inode && dentry != temp &&
+ pos, cur);
+ spin_unlock(&cur->d_lock);
+ parent = dget_parent(cur);
+ } else if (for_wire && inode && dentry != cur &&
ceph_snap(inode) == CEPH_NOSNAP) {
- spin_unlock(&temp->d_lock);
+ spin_unlock(&cur->d_lock);
pos++; /* get rid of any prepended '/' */
break;
+ } else if (!for_wire || !IS_ENCRYPTED(d_inode(cur->d_parent))) {
+ pos -= cur->d_name.len;
+ if (pos < 0) {
+ spin_unlock(&cur->d_lock);
+ break;
+ }
+ memcpy(path + pos, cur->d_name.name, cur->d_name.len);
+ spin_unlock(&cur->d_lock);
+ parent = dget_parent(cur);
} else {
- pos -= temp->d_name.len;
+ int len, ret;
+ char buf[NAME_MAX];
+
+ /*
+ * Proactively copy name into buf, in case we need to
+ * present it as-is.
+ */
+ memcpy(buf, cur->d_name.name, cur->d_name.len);
+ len = cur->d_name.len;
+ spin_unlock(&cur->d_lock);
+ parent = dget_parent(cur);
+
+ ret = ceph_fscrypt_prepare_readdir(d_inode(parent));
+ if (ret < 0) {
+ dput(parent);
+ dput(cur);
+ return ERR_PTR(ret);
+ }
+
+ if (fscrypt_has_encryption_key(d_inode(parent))) {
+ len = ceph_encode_encrypted_fname(d_inode(parent),
+ cur, buf);
+ if (len < 0) {
+ dput(parent);
+ dput(cur);
+ return ERR_PTR(len);
+ }
+ }
+ pos -= len;
if (pos < 0) {
- spin_unlock(&temp->d_lock);
+ dput(parent);
break;
}
- memcpy(path + pos, temp->d_name.name, temp->d_name.len);
+ memcpy(path + pos, buf, len);
}
- spin_unlock(&temp->d_lock);
- temp = READ_ONCE(temp->d_parent);
+ dput(cur);
+ cur = parent;
/* Are we at the root? */
- if (IS_ROOT(temp))
+ if (IS_ROOT(cur))
break;
/* Are we out of buffer? */
@@ -2439,8 +2702,9 @@ retry:
path[pos] = '/';
}
- base = ceph_ino(d_inode(temp));
- rcu_read_unlock();
+ inode = d_inode(cur);
+ base = inode ? ceph_ino(inode) : 0;
+ dput(cur);
if (read_seqretry(&rename_lock, seq))
goto retry;
@@ -2450,8 +2714,8 @@ retry:
* A rename didn't occur, but somehow we didn't end up where
* we thought we would. Throw a warning and try again.
*/
- pr_warn("build_path did not end path lookup where "
- "expected, pos is %d\n", pos);
+ pr_warn("build_path did not end path lookup where expected (pos = %d)\n",
+ pos);
goto retry;
}
@@ -2471,7 +2735,8 @@ static int build_dentry_path(struct dentry *dentry, struct inode *dir,
rcu_read_lock();
if (!dir)
dir = d_inode_rcu(dentry->d_parent);
- if (dir && parent_locked && ceph_snap(dir) == CEPH_NOSNAP) {
+ if (dir && parent_locked && ceph_snap(dir) == CEPH_NOSNAP &&
+ !IS_ENCRYPTED(dir)) {
*pino = ceph_ino(dir);
rcu_read_unlock();
*ppath = dentry->d_name.name;
@@ -2539,8 +2804,8 @@ static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
return r;
}
-static void encode_timestamp_and_gids(void **p,
- const struct ceph_mds_request *req)
+static void encode_mclientrequest_tail(void **p,
+ const struct ceph_mds_request *req)
{
struct ceph_timespec ts;
int i;
@@ -2548,11 +2813,43 @@ static void encode_timestamp_and_gids(void **p,
ceph_encode_timespec64(&ts, &req->r_stamp);
ceph_encode_copy(p, &ts, sizeof(ts));
- /* gid_list */
+ /* v4: gid_list */
ceph_encode_32(p, req->r_cred->group_info->ngroups);
for (i = 0; i < req->r_cred->group_info->ngroups; i++)
ceph_encode_64(p, from_kgid(&init_user_ns,
req->r_cred->group_info->gid[i]));
+
+ /* v5: altname */
+ ceph_encode_32(p, req->r_altname_len);
+ ceph_encode_copy(p, req->r_altname, req->r_altname_len);
+
+ /* v6: fscrypt_auth and fscrypt_file */
+ if (req->r_fscrypt_auth) {
+ u32 authlen = ceph_fscrypt_auth_len(req->r_fscrypt_auth);
+
+ ceph_encode_32(p, authlen);
+ ceph_encode_copy(p, req->r_fscrypt_auth, authlen);
+ } else {
+ ceph_encode_32(p, 0);
+ }
+ if (test_bit(CEPH_MDS_R_FSCRYPT_FILE, &req->r_req_flags)) {
+ ceph_encode_32(p, sizeof(__le64));
+ ceph_encode_64(p, req->r_fscrypt_file);
+ } else {
+ ceph_encode_32(p, 0);
+ }
+}
+
+static struct ceph_mds_request_head_legacy *
+find_legacy_request_head(void *p, u64 features)
+{
+ bool legacy = !(features & CEPH_FEATURE_FS_BTIME);
+ struct ceph_mds_request_head_old *ohead;
+
+ if (legacy)
+ return (struct ceph_mds_request_head_legacy *)p;
+ ohead = (struct ceph_mds_request_head_old *)p;
+ return (struct ceph_mds_request_head_legacy *)&ohead->oldest_client_tid;
}
/*
@@ -2565,7 +2862,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
int mds = session->s_mds;
struct ceph_mds_client *mdsc = session->s_mdsc;
struct ceph_msg *msg;
- struct ceph_mds_request_head_old *head;
+ struct ceph_mds_request_head_legacy *lhead;
const char *path1 = NULL;
const char *path2 = NULL;
u64 ino1 = 0, ino2 = 0;
@@ -2577,6 +2874,8 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
void *p, *end;
int ret;
bool legacy = !(session->s_con.peer_features & CEPH_FEATURE_FS_BTIME);
+ bool old_version = !test_bit(CEPHFS_FEATURE_32BITS_RETRY_FWD,
+ &session->s_features);
ret = set_request_path_attr(req->r_inode, req->r_dentry,
req->r_parent, req->r_path1, req->r_ino1.ino,
@@ -2601,12 +2900,32 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
goto out_free1;
}
- len = legacy ? sizeof(*head) : sizeof(struct ceph_mds_request_head);
- len += pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64)) +
- sizeof(struct ceph_timespec);
- len += sizeof(u32) + (sizeof(u64) * req->r_cred->group_info->ngroups);
+ req->r_altname = get_fscrypt_altname(req, &req->r_altname_len);
+ if (IS_ERR(req->r_altname)) {
+ msg = ERR_CAST(req->r_altname);
+ req->r_altname = NULL;
+ goto out_free2;
+ }
+
+ /*
+ * For old cephs without supporting the 32bit retry/fwd feature
+ * it will copy the raw memories directly when decoding the
+ * requests. While new cephs will decode the head depending the
+ * version member, so we need to make sure it will be compatible
+ * with them both.
+ */
+ if (legacy)
+ len = sizeof(struct ceph_mds_request_head_legacy);
+ else if (old_version)
+ len = sizeof(struct ceph_mds_request_head_old);
+ else
+ len = sizeof(struct ceph_mds_request_head);
- /* calculate (max) length for cap releases */
+ /* filepaths */
+ len += 2 * (1 + sizeof(u32) + sizeof(u64));
+ len += pathlen1 + pathlen2;
+
+ /* cap releases */
len += sizeof(struct ceph_mds_request_release) *
(!!req->r_inode_drop + !!req->r_dentry_drop +
!!req->r_old_inode_drop + !!req->r_old_dentry_drop);
@@ -2616,6 +2935,27 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
if (req->r_old_dentry_drop)
len += pathlen2;
+ /* MClientRequest tail */
+
+ /* req->r_stamp */
+ len += sizeof(struct ceph_timespec);
+
+ /* gid list */
+ len += sizeof(u32) + (sizeof(u64) * req->r_cred->group_info->ngroups);
+
+ /* alternate name */
+ len += sizeof(u32) + req->r_altname_len;
+
+ /* fscrypt_auth */
+ len += sizeof(u32); // fscrypt_auth
+ if (req->r_fscrypt_auth)
+ len += ceph_fscrypt_auth_len(req->r_fscrypt_auth);
+
+ /* fscrypt_file */
+ len += sizeof(u32);
+ if (test_bit(CEPH_MDS_R_FSCRYPT_FILE, &req->r_req_flags))
+ len += sizeof(__le64);
+
msg = ceph_msg_new2(CEPH_MSG_CLIENT_REQUEST, len, 1, GFP_NOFS, false);
if (!msg) {
msg = ERR_PTR(-ENOMEM);
@@ -2624,33 +2964,40 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
msg->hdr.tid = cpu_to_le64(req->r_tid);
+ lhead = find_legacy_request_head(msg->front.iov_base,
+ session->s_con.peer_features);
+
/*
- * The old ceph_mds_request_head didn't contain a version field, and
+ * The ceph_mds_request_head_legacy didn't contain a version field, and
* one was added when we moved the message version from 3->4.
*/
if (legacy) {
msg->hdr.version = cpu_to_le16(3);
- head = msg->front.iov_base;
- p = msg->front.iov_base + sizeof(*head);
- } else {
- struct ceph_mds_request_head *new_head = msg->front.iov_base;
+ p = msg->front.iov_base + sizeof(*lhead);
+ } else if (old_version) {
+ struct ceph_mds_request_head_old *ohead = msg->front.iov_base;
msg->hdr.version = cpu_to_le16(4);
- new_head->version = cpu_to_le16(CEPH_MDS_REQUEST_HEAD_VERSION);
- head = (struct ceph_mds_request_head_old *)&new_head->oldest_client_tid;
- p = msg->front.iov_base + sizeof(*new_head);
+ ohead->version = cpu_to_le16(1);
+ p = msg->front.iov_base + sizeof(*ohead);
+ } else {
+ struct ceph_mds_request_head *nhead = msg->front.iov_base;
+
+ msg->hdr.version = cpu_to_le16(6);
+ nhead->version = cpu_to_le16(CEPH_MDS_REQUEST_HEAD_VERSION);
+ p = msg->front.iov_base + sizeof(*nhead);
}
end = msg->front.iov_base + msg->front.iov_len;
- head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch);
- head->op = cpu_to_le32(req->r_op);
- head->caller_uid = cpu_to_le32(from_kuid(&init_user_ns,
- req->r_cred->fsuid));
- head->caller_gid = cpu_to_le32(from_kgid(&init_user_ns,
- req->r_cred->fsgid));
- head->ino = cpu_to_le64(req->r_deleg_ino);
- head->args = req->r_args;
+ lhead->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch);
+ lhead->op = cpu_to_le32(req->r_op);
+ lhead->caller_uid = cpu_to_le32(from_kuid(&init_user_ns,
+ req->r_cred->fsuid));
+ lhead->caller_gid = cpu_to_le32(from_kgid(&init_user_ns,
+ req->r_cred->fsgid));
+ lhead->ino = cpu_to_le64(req->r_deleg_ino);
+ lhead->args = req->r_args;
ceph_encode_filepath(&p, end, ino1, path1);
ceph_encode_filepath(&p, end, ino2, path2);
@@ -2665,15 +3012,23 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
req->r_inode ? req->r_inode : d_inode(req->r_dentry),
mds, req->r_inode_drop, req->r_inode_unless,
req->r_op == CEPH_MDS_OP_READDIR);
- if (req->r_dentry_drop)
- releases += ceph_encode_dentry_release(&p, req->r_dentry,
+ if (req->r_dentry_drop) {
+ ret = ceph_encode_dentry_release(&p, req->r_dentry,
req->r_parent, mds, req->r_dentry_drop,
req->r_dentry_unless);
- if (req->r_old_dentry_drop)
- releases += ceph_encode_dentry_release(&p, req->r_old_dentry,
+ if (ret < 0)
+ goto out_err;
+ releases += ret;
+ }
+ if (req->r_old_dentry_drop) {
+ ret = ceph_encode_dentry_release(&p, req->r_old_dentry,
req->r_old_dentry_dir, mds,
req->r_old_dentry_drop,
req->r_old_dentry_unless);
+ if (ret < 0)
+ goto out_err;
+ releases += ret;
+ }
if (req->r_old_inode_drop)
releases += ceph_encode_inode_release(&p,
d_inode(req->r_old_dentry),
@@ -2684,9 +3039,9 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
p = msg->front.iov_base + req->r_request_release_offset;
}
- head->num_releases = cpu_to_le16(releases);
+ lhead->num_releases = cpu_to_le16(releases);
- encode_timestamp_and_gids(&p, req);
+ encode_mclientrequest_tail(&p, req);
if (WARN_ON_ONCE(p > end)) {
ceph_msg_put(msg);
@@ -2715,6 +3070,10 @@ out_free1:
ceph_mdsc_free_path((char *)path1, pathlen1);
out:
return msg;
+out_err:
+ ceph_msg_put(msg);
+ msg = ERR_PTR(ret);
+ goto out_free2;
}
/*
@@ -2731,18 +3090,6 @@ static void complete_request(struct ceph_mds_client *mdsc,
complete_all(&req->r_completion);
}
-static struct ceph_mds_request_head_old *
-find_old_request_head(void *p, u64 features)
-{
- bool legacy = !(features & CEPH_FEATURE_FS_BTIME);
- struct ceph_mds_request_head *new_head;
-
- if (legacy)
- return (struct ceph_mds_request_head_old *)p;
- new_head = (struct ceph_mds_request_head *)p;
- return (struct ceph_mds_request_head_old *)&new_head->oldest_client_tid;
-}
-
/*
* called under mdsc->mutex
*/
@@ -2752,29 +3099,28 @@ static int __prepare_send_request(struct ceph_mds_session *session,
{
int mds = session->s_mds;
struct ceph_mds_client *mdsc = session->s_mdsc;
- struct ceph_mds_request_head_old *rhead;
+ struct ceph_mds_request_head_legacy *lhead;
+ struct ceph_mds_request_head *nhead;
struct ceph_msg *msg;
- int flags = 0, max_retry;
+ int flags = 0, old_max_retry;
+ bool old_version = !test_bit(CEPHFS_FEATURE_32BITS_RETRY_FWD,
+ &session->s_features);
/*
- * The type of 'r_attempts' in kernel 'ceph_mds_request'
- * is 'int', while in 'ceph_mds_request_head' the type of
- * 'num_retry' is '__u8'. So in case the request retries
- * exceeding 256 times, the MDS will receive a incorrect
- * retry seq.
- *
- * In this case it's ususally a bug in MDS and continue
- * retrying the request makes no sense.
- *
- * In future this could be fixed in ceph code, so avoid
- * using the hardcode here.
+ * Avoid inifinite retrying after overflow. The client will
+ * increase the retry count and if the MDS is old version,
+ * so we limit to retry at most 256 times.
*/
- max_retry = sizeof_field(struct ceph_mds_request_head, num_retry);
- max_retry = 1 << (max_retry * BITS_PER_BYTE);
- if (req->r_attempts >= max_retry) {
- pr_warn_ratelimited("%s request tid %llu seq overflow\n",
- __func__, req->r_tid);
- return -EMULTIHOP;
+ if (req->r_attempts) {
+ old_max_retry = sizeof_field(struct ceph_mds_request_head_old,
+ num_retry);
+ old_max_retry = 1 << (old_max_retry * BITS_PER_BYTE);
+ if ((old_version && req->r_attempts >= old_max_retry) ||
+ ((uint32_t)req->r_attempts >= U32_MAX)) {
+ pr_warn_ratelimited("%s request tid %llu seq overflow\n",
+ __func__, req->r_tid);
+ return -EMULTIHOP;
+ }
}
req->r_attempts++;
@@ -2800,23 +3146,27 @@ static int __prepare_send_request(struct ceph_mds_session *session,
* d_move mangles the src name.
*/
msg = req->r_request;
- rhead = find_old_request_head(msg->front.iov_base,
- session->s_con.peer_features);
+ lhead = find_legacy_request_head(msg->front.iov_base,
+ session->s_con.peer_features);
- flags = le32_to_cpu(rhead->flags);
+ flags = le32_to_cpu(lhead->flags);
flags |= CEPH_MDS_FLAG_REPLAY;
- rhead->flags = cpu_to_le32(flags);
+ lhead->flags = cpu_to_le32(flags);
if (req->r_target_inode)
- rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode));
+ lhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode));
- rhead->num_retry = req->r_attempts - 1;
+ lhead->num_retry = req->r_attempts - 1;
+ if (!old_version) {
+ nhead = (struct ceph_mds_request_head*)msg->front.iov_base;
+ nhead->ext_num_retry = cpu_to_le32(req->r_attempts - 1);
+ }
/* remove cap/dentry releases from message */
- rhead->num_releases = 0;
+ lhead->num_releases = 0;
p = msg->front.iov_base + req->r_request_release_offset;
- encode_timestamp_and_gids(&p, req);
+ encode_mclientrequest_tail(&p, req);
msg->front.iov_len = p - msg->front.iov_base;
msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
@@ -2834,18 +3184,23 @@ static int __prepare_send_request(struct ceph_mds_session *session,
}
req->r_request = msg;
- rhead = find_old_request_head(msg->front.iov_base,
- session->s_con.peer_features);
- rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc));
+ lhead = find_legacy_request_head(msg->front.iov_base,
+ session->s_con.peer_features);
+ lhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc));
if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
flags |= CEPH_MDS_FLAG_REPLAY;
if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags))
flags |= CEPH_MDS_FLAG_ASYNC;
if (req->r_parent)
flags |= CEPH_MDS_FLAG_WANT_DENTRY;
- rhead->flags = cpu_to_le32(flags);
- rhead->num_fwd = req->r_num_fwd;
- rhead->num_retry = req->r_attempts - 1;
+ lhead->flags = cpu_to_le32(flags);
+ lhead->num_fwd = req->r_num_fwd;
+ lhead->num_retry = req->r_attempts - 1;
+ if (!old_version) {
+ nhead = (struct ceph_mds_request_head*)msg->front.iov_base;
+ nhead->ext_num_fwd = cpu_to_le32(req->r_num_fwd);
+ nhead->ext_num_retry = cpu_to_le32(req->r_attempts - 1);
+ }
dout(" r_parent = %p\n", req->r_parent);
return 0;
@@ -3348,22 +3703,35 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
}
dout("handle_reply tid %lld result %d\n", tid, result);
- rinfo = &req->r_reply_info;
if (test_bit(CEPHFS_FEATURE_REPLY_ENCODING, &session->s_features))
- err = parse_reply_info(session, msg, rinfo, (u64)-1);
+ err = parse_reply_info(session, msg, req, (u64)-1);
else
- err = parse_reply_info(session, msg, rinfo, session->s_con.peer_features);
+ err = parse_reply_info(session, msg, req,
+ session->s_con.peer_features);
mutex_unlock(&mdsc->mutex);
/* Must find target inode outside of mutexes to avoid deadlocks */
+ rinfo = &req->r_reply_info;
if ((err >= 0) && rinfo->head->is_target) {
- struct inode *in;
+ struct inode *in = xchg(&req->r_new_inode, NULL);
struct ceph_vino tvino = {
.ino = le64_to_cpu(rinfo->targeti.in->ino),
.snap = le64_to_cpu(rinfo->targeti.in->snapid)
};
- in = ceph_get_inode(mdsc->fsc->sb, tvino);
+ /*
+ * If we ended up opening an existing inode, discard
+ * r_new_inode
+ */
+ if (req->r_op == CEPH_MDS_OP_CREATE &&
+ !req->r_reply_info.has_create_ino) {
+ /* This should never happen on an async create */
+ WARN_ON_ONCE(req->r_deleg_ino);
+ iput(in);
+ in = NULL;
+ }
+
+ in = ceph_get_inode(mdsc->fsc->sb, tvino, in);
if (IS_ERR(in)) {
err = PTR_ERR(in);
mutex_lock(&session->s_mutex);
@@ -3406,7 +3774,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
if (err == 0) {
if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR ||
req->r_op == CEPH_MDS_OP_LSSNAP))
- ceph_readdir_prepopulate(req, req->r_session);
+ err = ceph_readdir_prepopulate(req, req->r_session);
}
current->journal_info = NULL;
mutex_unlock(&req->r_fill_mutex);
@@ -3491,33 +3859,21 @@ static void handle_forward(struct ceph_mds_client *mdsc,
if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
dout("forward tid %llu aborted, unregistering\n", tid);
__unregister_request(mdsc, req);
- } else if (fwd_seq <= req->r_num_fwd) {
+ } else if (fwd_seq <= req->r_num_fwd || (uint32_t)fwd_seq >= U32_MAX) {
/*
- * The type of 'num_fwd' in ceph 'MClientRequestForward'
- * is 'int32_t', while in 'ceph_mds_request_head' the
- * type is '__u8'. So in case the request bounces between
- * MDSes exceeding 256 times, the client will get stuck.
- *
- * In this case it's ususally a bug in MDS and continue
- * bouncing the request makes no sense.
+ * Avoid inifinite retrying after overflow.
*
- * In future this could be fixed in ceph code, so avoid
- * using the hardcode here.
+ * The MDS will increase the fwd count and in client side
+ * if the num_fwd is less than the one saved in request
+ * that means the MDS is an old version and overflowed of
+ * 8 bits.
*/
- int max = sizeof_field(struct ceph_mds_request_head, num_fwd);
- max = 1 << (max * BITS_PER_BYTE);
- if (req->r_num_fwd >= max) {
- mutex_lock(&req->r_fill_mutex);
- req->r_err = -EMULTIHOP;
- set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
- mutex_unlock(&req->r_fill_mutex);
- aborted = true;
- pr_warn_ratelimited("forward tid %llu seq overflow\n",
- tid);
- } else {
- dout("forward tid %llu to mds%d - old seq %d <= %d\n",
- tid, next_mds, req->r_num_fwd, fwd_seq);
- }
+ mutex_lock(&req->r_fill_mutex);
+ req->r_err = -EMULTIHOP;
+ set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
+ mutex_unlock(&req->r_fill_mutex);
+ aborted = true;
+ pr_warn_ratelimited("forward tid %llu seq overflow\n", tid);
} else {
/* resend. forward race not possible; mds would drop */
dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds);
@@ -4550,6 +4906,9 @@ static void handle_lease(struct ceph_mds_client *mdsc,
dout("handle_lease from mds%d\n", mds);
+ if (!ceph_inc_mds_stopping_blocker(mdsc, session))
+ return;
+
/* decode */
if (msg->front.iov_len < sizeof(*h) + sizeof(u32))
goto bad;
@@ -4568,8 +4927,6 @@ static void handle_lease(struct ceph_mds_client *mdsc,
dname.len, dname.name);
mutex_lock(&session->s_mutex);
- inc_session_sequence(session);
-
if (!inode) {
dout("handle_lease no inode %llx\n", vino.ino);
goto release;
@@ -4631,9 +4988,13 @@ release:
out:
mutex_unlock(&session->s_mutex);
iput(inode);
+
+ ceph_dec_mds_stopping_blocker(mdsc);
return;
bad:
+ ceph_dec_mds_stopping_blocker(mdsc);
+
pr_err("corrupt lease message\n");
ceph_msg_dump(msg);
}
@@ -4829,6 +5190,9 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
}
init_completion(&mdsc->safe_umount_waiters);
+ spin_lock_init(&mdsc->stopping_lock);
+ atomic_set(&mdsc->stopping_blockers, 0);
+ init_completion(&mdsc->stopping_waiter);
init_waitqueue_head(&mdsc->session_close_wq);
INIT_LIST_HEAD(&mdsc->waiting_for_map);
mdsc->quotarealms_inodes = RB_ROOT;
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 86d2965e68a1..5a3714bdd64a 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -32,8 +32,9 @@ enum ceph_feature_type {
CEPHFS_FEATURE_ALTERNATE_NAME,
CEPHFS_FEATURE_NOTIFY_SESSION_STATE,
CEPHFS_FEATURE_OP_GETVXATTR,
+ CEPHFS_FEATURE_32BITS_RETRY_FWD,
- CEPHFS_FEATURE_MAX = CEPHFS_FEATURE_OP_GETVXATTR,
+ CEPHFS_FEATURE_MAX = CEPHFS_FEATURE_32BITS_RETRY_FWD,
};
#define CEPHFS_FEATURES_CLIENT_SUPPORTED { \
@@ -44,8 +45,10 @@ enum ceph_feature_type {
CEPHFS_FEATURE_MULTI_RECONNECT, \
CEPHFS_FEATURE_DELEG_INO, \
CEPHFS_FEATURE_METRIC_COLLECT, \
+ CEPHFS_FEATURE_ALTERNATE_NAME, \
CEPHFS_FEATURE_NOTIFY_SESSION_STATE, \
CEPHFS_FEATURE_OP_GETVXATTR, \
+ CEPHFS_FEATURE_32BITS_RETRY_FWD, \
}
/*
@@ -86,13 +89,19 @@ struct ceph_mds_reply_info_in {
s32 dir_pin;
struct ceph_timespec btime;
struct ceph_timespec snap_btime;
+ u8 *fscrypt_auth;
+ u8 *fscrypt_file;
+ u32 fscrypt_auth_len;
+ u32 fscrypt_file_len;
u64 rsnaps;
u64 change_attr;
};
struct ceph_mds_reply_dir_entry {
+ bool is_nokey;
char *name;
u32 name_len;
+ u32 raw_hash;
struct ceph_mds_reply_lease *lease;
struct ceph_mds_reply_info_in inode;
loff_t offset;
@@ -116,7 +125,9 @@ struct ceph_mds_reply_info_parsed {
struct ceph_mds_reply_info_in diri, targeti;
struct ceph_mds_reply_dirfrag *dirfrag;
char *dname;
+ u8 *altname;
u32 dname_len;
+ u32 altname_len;
struct ceph_mds_reply_lease *dlease;
struct ceph_mds_reply_xattr xattr_info;
@@ -263,6 +274,7 @@ struct ceph_mds_request {
struct inode *r_parent; /* parent dir inode */
struct inode *r_target_inode; /* resulting inode */
+ struct inode *r_new_inode; /* new inode (for creates) */
#define CEPH_MDS_R_DIRECT_IS_HASH (1) /* r_direct_hash is valid */
#define CEPH_MDS_R_ABORTED (2) /* call was aborted */
@@ -272,11 +284,19 @@ struct ceph_mds_request {
#define CEPH_MDS_R_DID_PREPOPULATE (6) /* prepopulated readdir */
#define CEPH_MDS_R_PARENT_LOCKED (7) /* is r_parent->i_rwsem wlocked? */
#define CEPH_MDS_R_ASYNC (8) /* async request */
+#define CEPH_MDS_R_FSCRYPT_FILE (9) /* must marshal fscrypt_file field */
unsigned long r_req_flags;
struct mutex r_fill_mutex;
union ceph_mds_request_args r_args;
+
+ struct ceph_fscrypt_auth *r_fscrypt_auth;
+ u64 r_fscrypt_file;
+
+ u8 *r_altname; /* fscrypt binary crypttext for long filenames */
+ u32 r_altname_len; /* length of r_altname */
+
int r_fmode; /* file mode, if expecting cap */
int r_request_release_offset;
const struct cred *r_cred;
@@ -381,8 +401,9 @@ struct cap_wait {
};
enum {
- CEPH_MDSC_STOPPING_BEGIN = 1,
- CEPH_MDSC_STOPPING_FLUSHED = 2,
+ CEPH_MDSC_STOPPING_BEGIN = 1,
+ CEPH_MDSC_STOPPING_FLUSHING = 2,
+ CEPH_MDSC_STOPPING_FLUSHED = 3,
};
/*
@@ -401,7 +422,11 @@ struct ceph_mds_client {
struct ceph_mds_session **sessions; /* NULL for mds if no session */
atomic_t num_sessions;
int max_sessions; /* len of sessions array */
- int stopping; /* true if shutting down */
+
+ spinlock_t stopping_lock; /* protect snap_empty */
+ int stopping; /* the stage of shutting down */
+ atomic_t stopping_blockers;
+ struct completion stopping_waiter;
atomic64_t quotarealms_count; /* # realms with quota */
/*
@@ -557,7 +582,7 @@ static inline void ceph_mdsc_free_path(char *path, int len)
}
extern char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base,
- int stop_on_nosnap);
+ int for_wire);
extern void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry);
extern void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
diff --git a/fs/ceph/quota.c b/fs/ceph/quota.c
index 64592adfe48f..f7fcf7f08ec6 100644
--- a/fs/ceph/quota.c
+++ b/fs/ceph/quota.c
@@ -47,25 +47,23 @@ void ceph_handle_quota(struct ceph_mds_client *mdsc,
struct inode *inode;
struct ceph_inode_info *ci;
+ if (!ceph_inc_mds_stopping_blocker(mdsc, session))
+ return;
+
if (msg->front.iov_len < sizeof(*h)) {
pr_err("%s corrupt message mds%d len %d\n", __func__,
session->s_mds, (int)msg->front.iov_len);
ceph_msg_dump(msg);
- return;
+ goto out;
}
- /* increment msg sequence number */
- mutex_lock(&session->s_mutex);
- inc_session_sequence(session);
- mutex_unlock(&session->s_mutex);
-
/* lookup inode */
vino.ino = le64_to_cpu(h->ino);
vino.snap = CEPH_NOSNAP;
inode = ceph_find_inode(sb, vino);
if (!inode) {
pr_warn("Failed to find inode %llu\n", vino.ino);
- return;
+ goto out;
}
ci = ceph_inode(inode);
@@ -78,6 +76,8 @@ void ceph_handle_quota(struct ceph_mds_client *mdsc,
spin_unlock(&ci->i_ceph_lock);
iput(inode);
+out:
+ ceph_dec_mds_stopping_blocker(mdsc);
}
static struct ceph_quotarealm_inode *
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index c9920ade15f5..813f21add992 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -1015,6 +1015,9 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
int locked_rwsem = 0;
bool close_sessions = false;
+ if (!ceph_inc_mds_stopping_blocker(mdsc, session))
+ return;
+
/* decode */
if (msg->front.iov_len < sizeof(*h))
goto bad;
@@ -1030,10 +1033,6 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
dout("%s from mds%d op %s split %llx tracelen %d\n", __func__,
mds, ceph_snap_op_name(op), split, trace_len);
- mutex_lock(&session->s_mutex);
- inc_session_sequence(session);
- mutex_unlock(&session->s_mutex);
-
down_write(&mdsc->snap_rwsem);
locked_rwsem = 1;
@@ -1151,6 +1150,7 @@ skip_inode:
up_write(&mdsc->snap_rwsem);
flush_snaps(mdsc);
+ ceph_dec_mds_stopping_blocker(mdsc);
return;
bad:
@@ -1160,6 +1160,8 @@ out:
if (locked_rwsem)
up_write(&mdsc->snap_rwsem);
+ ceph_dec_mds_stopping_blocker(mdsc);
+
if (close_sessions)
ceph_mdsc_close_sessions(mdsc);
return;
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index a5f52013314d..2d7f5a8d4a92 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -20,6 +20,7 @@
#include "super.h"
#include "mds_client.h"
#include "cache.h"
+#include "crypto.h"
#include <linux/ceph/ceph_features.h>
#include <linux/ceph/decode.h>
@@ -46,6 +47,7 @@ static void ceph_put_super(struct super_block *s)
struct ceph_fs_client *fsc = ceph_sb_to_client(s);
dout("put_super\n");
+ ceph_fscrypt_free_dummy_policy(fsc);
ceph_mdsc_close_sessions(fsc->mdsc);
}
@@ -151,6 +153,7 @@ enum {
Opt_recover_session,
Opt_source,
Opt_mon_addr,
+ Opt_test_dummy_encryption,
/* string args above */
Opt_dirstat,
Opt_rbytes,
@@ -165,6 +168,7 @@ enum {
Opt_copyfrom,
Opt_wsync,
Opt_pagecache,
+ Opt_sparseread,
};
enum ceph_recover_session_mode {
@@ -192,6 +196,7 @@ static const struct fs_parameter_spec ceph_mount_parameters[] = {
fsparam_string ("fsc", Opt_fscache), // fsc=...
fsparam_flag_no ("ino32", Opt_ino32),
fsparam_string ("mds_namespace", Opt_mds_namespace),
+ fsparam_string ("mon_addr", Opt_mon_addr),
fsparam_flag_no ("poolperm", Opt_poolperm),
fsparam_flag_no ("quotadf", Opt_quotadf),
fsparam_u32 ("rasize", Opt_rasize),
@@ -203,10 +208,12 @@ static const struct fs_parameter_spec ceph_mount_parameters[] = {
fsparam_u32 ("rsize", Opt_rsize),
fsparam_string ("snapdirname", Opt_snapdirname),
fsparam_string ("source", Opt_source),
- fsparam_string ("mon_addr", Opt_mon_addr),
+ fsparam_flag ("test_dummy_encryption", Opt_test_dummy_encryption),
+ fsparam_string ("test_dummy_encryption", Opt_test_dummy_encryption),
fsparam_u32 ("wsize", Opt_wsize),
fsparam_flag_no ("wsync", Opt_wsync),
fsparam_flag_no ("pagecache", Opt_pagecache),
+ fsparam_flag_no ("sparseread", Opt_sparseread),
{}
};
@@ -576,6 +583,29 @@ static int ceph_parse_mount_param(struct fs_context *fc,
else
fsopt->flags &= ~CEPH_MOUNT_OPT_NOPAGECACHE;
break;
+ case Opt_sparseread:
+ if (result.negated)
+ fsopt->flags &= ~CEPH_MOUNT_OPT_SPARSEREAD;
+ else
+ fsopt->flags |= CEPH_MOUNT_OPT_SPARSEREAD;
+ break;
+ case Opt_test_dummy_encryption:
+#ifdef CONFIG_FS_ENCRYPTION
+ fscrypt_free_dummy_policy(&fsopt->dummy_enc_policy);
+ ret = fscrypt_parse_test_dummy_encryption(param,
+ &fsopt->dummy_enc_policy);
+ if (ret == -EINVAL) {
+ warnfc(fc, "Value of option \"%s\" is unrecognized",
+ param->key);
+ } else if (ret == -EEXIST) {
+ warnfc(fc, "Conflicting test_dummy_encryption options");
+ ret = -EINVAL;
+ }
+#else
+ warnfc(fc,
+ "FS encryption not supported: test_dummy_encryption mount option ignored");
+#endif
+ break;
default:
BUG();
}
@@ -596,6 +626,7 @@ static void destroy_mount_options(struct ceph_mount_options *args)
kfree(args->server_path);
kfree(args->fscache_uniq);
kfree(args->mon_addr);
+ fscrypt_free_dummy_policy(&args->dummy_enc_policy);
kfree(args);
}
@@ -710,9 +741,12 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
if (!(fsopt->flags & CEPH_MOUNT_OPT_ASYNC_DIROPS))
seq_puts(m, ",wsync");
-
if (fsopt->flags & CEPH_MOUNT_OPT_NOPAGECACHE)
seq_puts(m, ",nopagecache");
+ if (fsopt->flags & CEPH_MOUNT_OPT_SPARSEREAD)
+ seq_puts(m, ",sparseread");
+
+ fscrypt_show_test_dummy_encryption(m, ',', root->d_sb);
if (fsopt->wsize != CEPH_MAX_WRITE_SIZE)
seq_printf(m, ",wsize=%u", fsopt->wsize);
@@ -1052,6 +1086,50 @@ out:
return root;
}
+#ifdef CONFIG_FS_ENCRYPTION
+static int ceph_apply_test_dummy_encryption(struct super_block *sb,
+ struct fs_context *fc,
+ struct ceph_mount_options *fsopt)
+{
+ struct ceph_fs_client *fsc = sb->s_fs_info;
+
+ if (!fscrypt_is_dummy_policy_set(&fsopt->dummy_enc_policy))
+ return 0;
+
+ /* No changing encryption context on remount. */
+ if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE &&
+ !fscrypt_is_dummy_policy_set(&fsc->fsc_dummy_enc_policy)) {
+ if (fscrypt_dummy_policies_equal(&fsopt->dummy_enc_policy,
+ &fsc->fsc_dummy_enc_policy))
+ return 0;
+ errorfc(fc, "Can't set test_dummy_encryption on remount");
+ return -EINVAL;
+ }
+
+ /* Also make sure fsopt doesn't contain a conflicting value. */
+ if (fscrypt_is_dummy_policy_set(&fsc->fsc_dummy_enc_policy)) {
+ if (fscrypt_dummy_policies_equal(&fsopt->dummy_enc_policy,
+ &fsc->fsc_dummy_enc_policy))
+ return 0;
+ errorfc(fc, "Conflicting test_dummy_encryption options");
+ return -EINVAL;
+ }
+
+ fsc->fsc_dummy_enc_policy = fsopt->dummy_enc_policy;
+ memset(&fsopt->dummy_enc_policy, 0, sizeof(fsopt->dummy_enc_policy));
+
+ warnfc(fc, "test_dummy_encryption mode enabled");
+ return 0;
+}
+#else
+static int ceph_apply_test_dummy_encryption(struct super_block *sb,
+ struct fs_context *fc,
+ struct ceph_mount_options *fsopt)
+{
+ return 0;
+}
+#endif
+
/*
* mount: join the ceph cluster, and open root directory.
*/
@@ -1080,6 +1158,11 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc,
goto out;
}
+ err = ceph_apply_test_dummy_encryption(fsc->sb, fc,
+ fsc->mount_options);
+ if (err)
+ goto out;
+
dout("mount opening path '%s'\n", path);
ceph_fs_debugfs_init(fsc);
@@ -1101,6 +1184,7 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc,
out:
mutex_unlock(&fsc->client->mount_mutex);
+ ceph_fscrypt_free_dummy_policy(fsc);
return ERR_PTR(err);
}
@@ -1126,6 +1210,8 @@ static int ceph_set_super(struct super_block *s, struct fs_context *fc)
s->s_time_max = U32_MAX;
s->s_flags |= SB_NODIRATIME | SB_NOATIME;
+ ceph_fscrypt_set_ops(s);
+
ret = set_anon_super_fc(s, fc);
if (ret != 0)
fsc->sb = NULL;
@@ -1287,15 +1373,26 @@ static void ceph_free_fc(struct fs_context *fc)
static int ceph_reconfigure_fc(struct fs_context *fc)
{
+ int err;
struct ceph_parse_opts_ctx *pctx = fc->fs_private;
struct ceph_mount_options *fsopt = pctx->opts;
- struct ceph_fs_client *fsc = ceph_sb_to_client(fc->root->d_sb);
+ struct super_block *sb = fc->root->d_sb;
+ struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
+
+ err = ceph_apply_test_dummy_encryption(sb, fc, fsopt);
+ if (err)
+ return err;
if (fsopt->flags & CEPH_MOUNT_OPT_ASYNC_DIROPS)
ceph_set_mount_opt(fsc, ASYNC_DIROPS);
else
ceph_clear_mount_opt(fsc, ASYNC_DIROPS);
+ if (fsopt->flags & CEPH_MOUNT_OPT_SPARSEREAD)
+ ceph_set_mount_opt(fsc, SPARSEREAD);
+ else
+ ceph_clear_mount_opt(fsc, SPARSEREAD);
+
if (strcmp_null(fsc->mount_options->mon_addr, fsopt->mon_addr)) {
kfree(fsc->mount_options->mon_addr);
fsc->mount_options->mon_addr = fsopt->mon_addr;
@@ -1303,7 +1400,7 @@ static int ceph_reconfigure_fc(struct fs_context *fc)
pr_notice("ceph: monitor addresses recorded, but not used for reconnection");
}
- sync_filesystem(fc->root->d_sb);
+ sync_filesystem(sb);
return 0;
}
@@ -1365,25 +1462,101 @@ nomem:
return -ENOMEM;
}
+/*
+ * Return true if it successfully increases the blocker counter,
+ * or false if the mdsc is in stopping and flushed state.
+ */
+static bool __inc_stopping_blocker(struct ceph_mds_client *mdsc)
+{
+ spin_lock(&mdsc->stopping_lock);
+ if (mdsc->stopping >= CEPH_MDSC_STOPPING_FLUSHING) {
+ spin_unlock(&mdsc->stopping_lock);
+ return false;
+ }
+ atomic_inc(&mdsc->stopping_blockers);
+ spin_unlock(&mdsc->stopping_lock);
+ return true;
+}
+
+static void __dec_stopping_blocker(struct ceph_mds_client *mdsc)
+{
+ spin_lock(&mdsc->stopping_lock);
+ if (!atomic_dec_return(&mdsc->stopping_blockers) &&
+ mdsc->stopping >= CEPH_MDSC_STOPPING_FLUSHING)
+ complete_all(&mdsc->stopping_waiter);
+ spin_unlock(&mdsc->stopping_lock);
+}
+
+/* For metadata IO requests */
+bool ceph_inc_mds_stopping_blocker(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session)
+{
+ mutex_lock(&session->s_mutex);
+ inc_session_sequence(session);
+ mutex_unlock(&session->s_mutex);
+
+ return __inc_stopping_blocker(mdsc);
+}
+
+void ceph_dec_mds_stopping_blocker(struct ceph_mds_client *mdsc)
+{
+ __dec_stopping_blocker(mdsc);
+}
+
+/* For data IO requests */
+bool ceph_inc_osd_stopping_blocker(struct ceph_mds_client *mdsc)
+{
+ return __inc_stopping_blocker(mdsc);
+}
+
+void ceph_dec_osd_stopping_blocker(struct ceph_mds_client *mdsc)
+{
+ __dec_stopping_blocker(mdsc);
+}
+
static void ceph_kill_sb(struct super_block *s)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(s);
+ struct ceph_mds_client *mdsc = fsc->mdsc;
+ bool wait;
dout("kill_sb %p\n", s);
- ceph_mdsc_pre_umount(fsc->mdsc);
+ ceph_mdsc_pre_umount(mdsc);
flush_fs_workqueues(fsc);
/*
* Though the kill_anon_super() will finally trigger the
- * sync_filesystem() anyway, we still need to do it here
- * and then bump the stage of shutdown to stop the work
- * queue as earlier as possible.
+ * sync_filesystem() anyway, we still need to do it here and
+ * then bump the stage of shutdown. This will allow us to
+ * drop any further message, which will increase the inodes'
+ * i_count reference counters but makes no sense any more,
+ * from MDSs.
+ *
+ * Without this when evicting the inodes it may fail in the
+ * kill_anon_super(), which will trigger a warning when
+ * destroying the fscrypt keyring and then possibly trigger
+ * a further crash in ceph module when the iput() tries to
+ * evict the inodes later.
*/
sync_filesystem(s);
- fsc->mdsc->stopping = CEPH_MDSC_STOPPING_FLUSHED;
+ spin_lock(&mdsc->stopping_lock);
+ mdsc->stopping = CEPH_MDSC_STOPPING_FLUSHING;
+ wait = !!atomic_read(&mdsc->stopping_blockers);
+ spin_unlock(&mdsc->stopping_lock);
+
+ if (wait && atomic_read(&mdsc->stopping_blockers)) {
+ long timeleft = wait_for_completion_killable_timeout(
+ &mdsc->stopping_waiter,
+ fsc->client->options->mount_timeout);
+ if (!timeleft) /* timed out */
+ pr_warn("umount timed out, %ld\n", timeleft);
+ else if (timeleft < 0) /* killed */
+ pr_warn("umount was killed, %ld\n", timeleft);
+ }
+ mdsc->stopping = CEPH_MDSC_STOPPING_FLUSHED;
kill_anon_super(s);
fsc->client->extra_mon_dispatch = NULL;
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 3bfddf34d488..51c7f2b14f6f 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -22,6 +22,7 @@
#include <linux/hashtable.h>
#include <linux/ceph/libceph.h>
+#include "crypto.h"
/* large granularity for statfs utilization stats to facilitate
* large volume sizes on 32-bit machines. */
@@ -42,6 +43,7 @@
#define CEPH_MOUNT_OPT_NOCOPYFROM (1<<14) /* don't use RADOS 'copy-from' op */
#define CEPH_MOUNT_OPT_ASYNC_DIROPS (1<<15) /* allow async directory ops */
#define CEPH_MOUNT_OPT_NOPAGECACHE (1<<16) /* bypass pagecache altogether */
+#define CEPH_MOUNT_OPT_SPARSEREAD (1<<17) /* always do sparse reads */
#define CEPH_MOUNT_OPT_DEFAULT \
(CEPH_MOUNT_OPT_DCACHE | \
@@ -98,6 +100,7 @@ struct ceph_mount_options {
char *server_path; /* default NULL (means "/") */
char *fscache_uniq; /* default NULL */
char *mon_addr;
+ struct fscrypt_dummy_policy dummy_enc_policy;
};
/* mount state */
@@ -154,9 +157,11 @@ struct ceph_fs_client {
#ifdef CONFIG_CEPH_FSCACHE
struct fscache_volume *fscache;
#endif
+#ifdef CONFIG_FS_ENCRYPTION
+ struct fscrypt_dummy_policy fsc_dummy_enc_policy;
+#endif
};
-
/*
* File i/o capability. This tracks shared state with the metadata
* server that allows us to cache or writeback attributes or to read
@@ -419,6 +424,11 @@ struct ceph_inode_info {
u32 i_truncate_seq; /* last truncate to smaller size */
u64 i_truncate_size; /* and the size we last truncated down to */
int i_truncate_pending; /* still need to call vmtruncate */
+ /*
+ * For none fscrypt case it equals to i_truncate_size or it will
+ * equals to fscrypt_file_size
+ */
+ u64 i_truncate_pagecache_size;
u64 i_max_size; /* max file size authorized by mds */
u64 i_reported_size; /* (max_)size reported to or requested of mds */
@@ -449,6 +459,13 @@ struct ceph_inode_info {
struct work_struct i_work;
unsigned long i_work_mask;
+
+#ifdef CONFIG_FS_ENCRYPTION
+ u32 fscrypt_auth_len;
+ u32 fscrypt_file_len;
+ u8 *fscrypt_auth;
+ u8 *fscrypt_file;
+#endif
};
struct ceph_netfs_request_data {
@@ -998,6 +1015,7 @@ static inline bool __ceph_have_pending_cap_snap(struct ceph_inode_info *ci)
/* inode.c */
struct ceph_mds_reply_info_in;
struct ceph_mds_reply_dirfrag;
+struct ceph_acl_sec_ctx;
extern const struct inode_operations ceph_file_iops;
@@ -1005,8 +1023,14 @@ extern struct inode *ceph_alloc_inode(struct super_block *sb);
extern void ceph_evict_inode(struct inode *inode);
extern void ceph_free_inode(struct inode *inode);
+struct inode *ceph_new_inode(struct inode *dir, struct dentry *dentry,
+ umode_t *mode, struct ceph_acl_sec_ctx *as_ctx);
+void ceph_as_ctx_to_req(struct ceph_mds_request *req,
+ struct ceph_acl_sec_ctx *as_ctx);
+
extern struct inode *ceph_get_inode(struct super_block *sb,
- struct ceph_vino vino);
+ struct ceph_vino vino,
+ struct inode *newino);
extern struct inode *ceph_get_snapdir(struct inode *parent);
extern int ceph_fill_file_size(struct inode *inode, int issued,
u32 truncate_seq, u64 truncate_size, u64 size);
@@ -1065,7 +1089,13 @@ static inline int ceph_do_getattr(struct inode *inode, int mask, bool force)
}
extern int ceph_permission(struct mnt_idmap *idmap,
struct inode *inode, int mask);
-extern int __ceph_setattr(struct inode *inode, struct iattr *attr);
+
+struct ceph_iattr {
+ struct ceph_fscrypt_auth *fscrypt_auth;
+};
+
+extern int __ceph_setattr(struct inode *inode, struct iattr *attr,
+ struct ceph_iattr *cia);
extern int ceph_setattr(struct mnt_idmap *idmap,
struct dentry *dentry, struct iattr *attr);
extern int ceph_getattr(struct mnt_idmap *idmap,
@@ -1100,6 +1130,9 @@ struct ceph_acl_sec_ctx {
void *sec_ctx;
u32 sec_ctxlen;
#endif
+#ifdef CONFIG_FS_ENCRYPTION
+ struct ceph_fscrypt_auth *fscrypt_auth;
+#endif
struct ceph_pagelist *pagelist;
};
@@ -1237,6 +1270,8 @@ extern int ceph_encode_dentry_release(void **p, struct dentry *dn,
struct inode *dir,
int mds, int drop, int unless);
+extern int __ceph_get_caps(struct inode *inode, struct ceph_file_info *fi,
+ int need, int want, loff_t endoff, int *got);
extern int ceph_get_caps(struct file *filp, int need, int want,
loff_t endoff, int *got);
extern int ceph_try_get_caps(struct inode *inode,
@@ -1272,6 +1307,9 @@ extern int ceph_renew_caps(struct inode *inode, int fmode);
extern int ceph_open(struct inode *inode, struct file *file);
extern int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
struct file *file, unsigned flags, umode_t mode);
+extern ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
+ struct iov_iter *to, int *retry_op,
+ u64 *last_objver);
extern int ceph_release(struct inode *inode, struct file *filp);
extern void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
char *data, size_t len);
@@ -1375,4 +1413,9 @@ extern bool ceph_quota_update_statfs(struct ceph_fs_client *fsc,
struct kstatfs *buf);
extern void ceph_cleanup_quotarealms_inodes(struct ceph_mds_client *mdsc);
+bool ceph_inc_mds_stopping_blocker(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session);
+void ceph_dec_mds_stopping_blocker(struct ceph_mds_client *mdsc);
+bool ceph_inc_osd_stopping_blocker(struct ceph_mds_client *mdsc);
+void ceph_dec_osd_stopping_blocker(struct ceph_mds_client *mdsc);
#endif /* _FS_CEPH_SUPER_H */
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 1cbd84cc82a8..0deae4a0f5f1 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -352,6 +352,24 @@ static ssize_t ceph_vxattrcb_auth_mds(struct ceph_inode_info *ci,
return ret;
}
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+static bool ceph_vxattrcb_fscrypt_auth_exists(struct ceph_inode_info *ci)
+{
+ return ci->fscrypt_auth_len;
+}
+
+static ssize_t ceph_vxattrcb_fscrypt_auth(struct ceph_inode_info *ci,
+ char *val, size_t size)
+{
+ if (size) {
+ if (size < ci->fscrypt_auth_len)
+ return -ERANGE;
+ memcpy(val, ci->fscrypt_auth, ci->fscrypt_auth_len);
+ }
+ return ci->fscrypt_auth_len;
+}
+#endif /* CONFIG_FS_ENCRYPTION */
+
#define CEPH_XATTR_NAME(_type, _name) XATTR_CEPH_PREFIX #_type "." #_name
#define CEPH_XATTR_NAME2(_type, _name, _name2) \
XATTR_CEPH_PREFIX #_type "." #_name "." #_name2
@@ -500,6 +518,15 @@ static struct ceph_vxattr ceph_common_vxattrs[] = {
.exists_cb = NULL,
.flags = VXATTR_FLAG_READONLY,
},
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+ {
+ .name = "ceph.fscrypt.auth",
+ .name_size = sizeof("ceph.fscrypt.auth"),
+ .getxattr_cb = ceph_vxattrcb_fscrypt_auth,
+ .exists_cb = ceph_vxattrcb_fscrypt_auth_exists,
+ .flags = VXATTR_FLAG_READONLY,
+ },
+#endif /* CONFIG_FS_ENCRYPTION */
{ .name = NULL, 0 } /* Required table terminator */
};
@@ -1408,6 +1435,9 @@ void ceph_release_acl_sec_ctx(struct ceph_acl_sec_ctx *as_ctx)
#ifdef CONFIG_CEPH_FS_SECURITY_LABEL
security_release_secctx(as_ctx->sec_ctx, as_ctx->sec_ctxlen);
#endif
+#ifdef CONFIG_FS_ENCRYPTION
+ kfree(as_ctx->fscrypt_auth);
+#endif
if (as_ctx->pagelist)
ceph_pagelist_release(as_ctx->pagelist);
}
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index b7711888dd17..87b3753aa4b1 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -904,8 +904,52 @@ EXPORT_SYMBOL_GPL(debugfs_create_str);
static ssize_t debugfs_write_file_str(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
- /* This is really only for read-only strings */
- return -EINVAL;
+ struct dentry *dentry = F_DENTRY(file);
+ char *old, *new = NULL;
+ int pos = *ppos;
+ int r;
+
+ r = debugfs_file_get(dentry);
+ if (unlikely(r))
+ return r;
+
+ old = *(char **)file->private_data;
+
+ /* only allow strict concatenation */
+ r = -EINVAL;
+ if (pos && pos != strlen(old))
+ goto error;
+
+ r = -E2BIG;
+ if (pos + count + 1 > PAGE_SIZE)
+ goto error;
+
+ r = -ENOMEM;
+ new = kmalloc(pos + count + 1, GFP_KERNEL);
+ if (!new)
+ goto error;
+
+ if (pos)
+ memcpy(new, old, pos);
+
+ r = -EFAULT;
+ if (copy_from_user(new + pos, user_buf, count))
+ goto error;
+
+ new[pos + count] = '\0';
+ strim(new);
+
+ rcu_assign_pointer(*(char **)file->private_data, new);
+ synchronize_rcu();
+ kfree(old);
+
+ debugfs_file_put(dentry);
+ return count;
+
+error:
+ kfree(new);
+ debugfs_file_put(dentry);
+ return r;
}
static const struct file_operations fops_str = {
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index d1dbe47c7975..c20704aa21b3 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -386,6 +386,7 @@ static int export_encode_fh(struct inode *inode, struct fid *fid,
* @inode: the object to encode
* @fid: where to store the file handle fragment
* @max_len: maximum length to store there
+ * @parent: parent directory inode, if wanted
* @flags: properties of the requested file handle
*
* Returns an enum fid_type or a negative errno.
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 8fd3b7f9fb88..b0597a539fc5 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -1701,9 +1701,9 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
}
f2fs_restore_inmem_curseg(sbi);
+ stat_inc_cp_count(sbi);
stop:
unblock_operations(sbi);
- stat_inc_cp_count(sbi->stat_info);
if (cpc->reason & CP_RECOVERY)
f2fs_notice(sbi, "checkpoint: version = %llx", ckpt_ver);
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 0f7df9c11af3..d820801f473e 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -649,13 +649,8 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
goto destroy_compress_ctx;
}
- for (i = 0; i < cc->nr_cpages; i++) {
+ for (i = 0; i < cc->nr_cpages; i++)
cc->cpages[i] = f2fs_compress_alloc_page();
- if (!cc->cpages[i]) {
- ret = -ENOMEM;
- goto out_free_cpages;
- }
- }
cc->rbuf = f2fs_vmap(cc->rpages, cc->cluster_size);
if (!cc->rbuf) {
@@ -1574,8 +1569,6 @@ static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
}
dic->tpages[i] = f2fs_compress_alloc_page();
- if (!dic->tpages[i])
- return -ENOMEM;
}
dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
@@ -1656,11 +1649,6 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
struct page *page;
page = f2fs_compress_alloc_page();
- if (!page) {
- ret = -ENOMEM;
- goto out_free;
- }
-
f2fs_set_compressed_page(page, cc->inode,
start_idx + i + 1, dic);
dic->cpages[i] = page;
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 5882afe71d82..916e317ac925 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -1167,6 +1167,9 @@ static int f2fs_submit_page_read(struct inode *inode, struct page *page,
f2fs_wait_on_block_writeback(inode, blkaddr);
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
+ iostat_update_and_unbind_ctx(bio);
+ if (bio->bi_private)
+ mempool_free(bio->bi_private, bio_post_read_ctx_pool);
bio_put(bio);
return -EFAULT;
}
@@ -1389,18 +1392,14 @@ struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
{
struct address_space *mapping = inode->i_mapping;
struct page *page;
-repeat:
+
page = f2fs_get_read_data_page(inode, index, 0, for_write, NULL);
if (IS_ERR(page))
return page;
/* wait for read completion */
lock_page(page);
- if (unlikely(page->mapping != mapping)) {
- f2fs_put_page(page, 1);
- goto repeat;
- }
- if (unlikely(!PageUptodate(page))) {
+ if (unlikely(page->mapping != mapping || !PageUptodate(page))) {
f2fs_put_page(page, 1);
return ERR_PTR(-EIO);
}
@@ -3236,8 +3235,7 @@ result:
}
goto next;
}
- done_index = folio->index +
- folio_nr_pages(folio);
+ done_index = folio_next_index(folio);
done = 1;
break;
}
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index 61c35b59126e..fdbf994f1271 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -215,6 +215,9 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->valid_blks[type] += blks;
}
+ for (i = 0; i < MAX_CALL_TYPE; i++)
+ si->cp_call_count[i] = atomic_read(&sbi->cp_call_count[i]);
+
for (i = 0; i < 2; i++) {
si->segment_count[i] = sbi->segment_count[i];
si->block_count[i] = sbi->block_count[i];
@@ -497,7 +500,9 @@ static int stat_show(struct seq_file *s, void *v)
seq_printf(s, " - Prefree: %d\n - Free: %d (%d)\n\n",
si->prefree_count, si->free_segs, si->free_secs);
seq_printf(s, "CP calls: %d (BG: %d)\n",
- si->cp_count, si->bg_cp_count);
+ si->cp_call_count[TOTAL_CALL],
+ si->cp_call_count[BACKGROUND]);
+ seq_printf(s, "CP count: %d\n", si->cp_count);
seq_printf(s, " - cp blocks : %u\n", si->meta_count[META_CP]);
seq_printf(s, " - sit blocks : %u\n",
si->meta_count[META_SIT]);
@@ -511,12 +516,24 @@ static int stat_show(struct seq_file *s, void *v)
seq_printf(s, " - Total : %4d\n", si->nr_total_ckpt);
seq_printf(s, " - Cur time : %4d(ms)\n", si->cur_ckpt_time);
seq_printf(s, " - Peak time : %4d(ms)\n", si->peak_ckpt_time);
- seq_printf(s, "GC calls: %d (BG: %d)\n",
- si->call_count, si->bg_gc);
- seq_printf(s, " - data segments : %d (%d)\n",
- si->data_segs, si->bg_data_segs);
- seq_printf(s, " - node segments : %d (%d)\n",
- si->node_segs, si->bg_node_segs);
+ seq_printf(s, "GC calls: %d (gc_thread: %d)\n",
+ si->gc_call_count[BACKGROUND] +
+ si->gc_call_count[FOREGROUND],
+ si->gc_call_count[BACKGROUND]);
+ if (__is_large_section(sbi)) {
+ seq_printf(s, " - data sections : %d (BG: %d)\n",
+ si->gc_secs[DATA][BG_GC] + si->gc_secs[DATA][FG_GC],
+ si->gc_secs[DATA][BG_GC]);
+ seq_printf(s, " - node sections : %d (BG: %d)\n",
+ si->gc_secs[NODE][BG_GC] + si->gc_secs[NODE][FG_GC],
+ si->gc_secs[NODE][BG_GC]);
+ }
+ seq_printf(s, " - data segments : %d (BG: %d)\n",
+ si->gc_segs[DATA][BG_GC] + si->gc_segs[DATA][FG_GC],
+ si->gc_segs[DATA][BG_GC]);
+ seq_printf(s, " - node segments : %d (BG: %d)\n",
+ si->gc_segs[NODE][BG_GC] + si->gc_segs[NODE][FG_GC],
+ si->gc_segs[NODE][BG_GC]);
seq_puts(s, " - Reclaimed segs :\n");
seq_printf(s, " - Normal : %d\n", sbi->gc_reclaimed_segs[GC_NORMAL]);
seq_printf(s, " - Idle CB : %d\n", sbi->gc_reclaimed_segs[GC_IDLE_CB]);
@@ -687,6 +704,8 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi)
atomic_set(&sbi->inplace_count, 0);
for (i = META_CP; i < META_MAX; i++)
atomic_set(&sbi->meta_count[i], 0);
+ for (i = 0; i < MAX_CALL_TYPE; i++)
+ atomic_set(&sbi->cp_call_count[i], 0);
atomic_set(&sbi->max_aw_cnt, 0);
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 613132339d72..6d688e42d89c 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1383,6 +1383,13 @@ enum errors_option {
MOUNT_ERRORS_PANIC, /* panic on errors */
};
+enum {
+ BACKGROUND,
+ FOREGROUND,
+ MAX_CALL_TYPE,
+ TOTAL_CALL = FOREGROUND,
+};
+
static inline int f2fs_test_bit(unsigned int nr, char *addr);
static inline void f2fs_set_bit(unsigned int nr, char *addr);
static inline void f2fs_clear_bit(unsigned int nr, char *addr);
@@ -1695,6 +1702,7 @@ struct f2fs_sb_info {
unsigned int io_skip_bggc; /* skip background gc for in-flight IO */
unsigned int other_skip_bggc; /* skip background gc for other reasons */
unsigned int ndirty_inode[NR_INODE_TYPE]; /* # of dirty inodes */
+ atomic_t cp_call_count[MAX_CALL_TYPE]; /* # of cp call */
#endif
spinlock_t stat_lock; /* lock for stat operations */
@@ -2114,15 +2122,6 @@ static inline int f2fs_down_read_trylock(struct f2fs_rwsem *sem)
return down_read_trylock(&sem->internal_rwsem);
}
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-static inline void f2fs_down_read_nested(struct f2fs_rwsem *sem, int subclass)
-{
- down_read_nested(&sem->internal_rwsem, subclass);
-}
-#else
-#define f2fs_down_read_nested(sem, subclass) f2fs_down_read(sem)
-#endif
-
static inline void f2fs_up_read(struct f2fs_rwsem *sem)
{
up_read(&sem->internal_rwsem);
@@ -2133,6 +2132,21 @@ static inline void f2fs_down_write(struct f2fs_rwsem *sem)
down_write(&sem->internal_rwsem);
}
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static inline void f2fs_down_read_nested(struct f2fs_rwsem *sem, int subclass)
+{
+ down_read_nested(&sem->internal_rwsem, subclass);
+}
+
+static inline void f2fs_down_write_nested(struct f2fs_rwsem *sem, int subclass)
+{
+ down_write_nested(&sem->internal_rwsem, subclass);
+}
+#else
+#define f2fs_down_read_nested(sem, subclass) f2fs_down_read(sem)
+#define f2fs_down_write_nested(sem, subclass) f2fs_down_write(sem)
+#endif
+
static inline int f2fs_down_write_trylock(struct f2fs_rwsem *sem)
{
return down_write_trylock(&sem->internal_rwsem);
@@ -3887,7 +3901,7 @@ struct f2fs_stat_info {
int nats, dirty_nats, sits, dirty_sits;
int free_nids, avail_nids, alloc_nids;
int total_count, utilization;
- int bg_gc, nr_wb_cp_data, nr_wb_data;
+ int nr_wb_cp_data, nr_wb_data;
int nr_rd_data, nr_rd_node, nr_rd_meta;
int nr_dio_read, nr_dio_write;
unsigned int io_skip_bggc, other_skip_bggc;
@@ -3907,9 +3921,11 @@ struct f2fs_stat_info {
int rsvd_segs, overp_segs;
int dirty_count, node_pages, meta_pages, compress_pages;
int compress_page_hit;
- int prefree_count, call_count, cp_count, bg_cp_count;
- int tot_segs, node_segs, data_segs, free_segs, free_secs;
- int bg_node_segs, bg_data_segs;
+ int prefree_count, free_segs, free_secs;
+ int cp_call_count[MAX_CALL_TYPE], cp_count;
+ int gc_call_count[MAX_CALL_TYPE];
+ int gc_segs[2][2];
+ int gc_secs[2][2];
int tot_blks, data_blks, node_blks;
int bg_data_blks, bg_node_blks;
int curseg[NR_CURSEG_TYPE];
@@ -3931,10 +3947,9 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
return (struct f2fs_stat_info *)sbi->stat_info;
}
-#define stat_inc_cp_count(si) ((si)->cp_count++)
-#define stat_inc_bg_cp_count(si) ((si)->bg_cp_count++)
-#define stat_inc_call_count(si) ((si)->call_count++)
-#define stat_inc_bggc_count(si) ((si)->bg_gc++)
+#define stat_inc_cp_call_count(sbi, foreground) \
+ atomic_inc(&sbi->cp_call_count[(foreground)])
+#define stat_inc_cp_count(si) (F2FS_STAT(sbi)->cp_count++)
#define stat_io_skip_bggc_count(sbi) ((sbi)->io_skip_bggc++)
#define stat_other_skip_bggc_count(sbi) ((sbi)->other_skip_bggc++)
#define stat_inc_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]++)
@@ -4019,18 +4034,12 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
if (cur > max) \
atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur); \
} while (0)
-#define stat_inc_seg_count(sbi, type, gc_type) \
- do { \
- struct f2fs_stat_info *si = F2FS_STAT(sbi); \
- si->tot_segs++; \
- if ((type) == SUM_TYPE_DATA) { \
- si->data_segs++; \
- si->bg_data_segs += (gc_type == BG_GC) ? 1 : 0; \
- } else { \
- si->node_segs++; \
- si->bg_node_segs += (gc_type == BG_GC) ? 1 : 0; \
- } \
- } while (0)
+#define stat_inc_gc_call_count(sbi, foreground) \
+ (F2FS_STAT(sbi)->gc_call_count[(foreground)]++)
+#define stat_inc_gc_sec_count(sbi, type, gc_type) \
+ (F2FS_STAT(sbi)->gc_secs[(type)][(gc_type)]++)
+#define stat_inc_gc_seg_count(sbi, type, gc_type) \
+ (F2FS_STAT(sbi)->gc_segs[(type)][(gc_type)]++)
#define stat_inc_tot_blk_count(si, blks) \
((si)->tot_blks += (blks))
@@ -4057,10 +4066,8 @@ void __init f2fs_create_root_stats(void);
void f2fs_destroy_root_stats(void);
void f2fs_update_sit_info(struct f2fs_sb_info *sbi);
#else
-#define stat_inc_cp_count(si) do { } while (0)
-#define stat_inc_bg_cp_count(si) do { } while (0)
-#define stat_inc_call_count(si) do { } while (0)
-#define stat_inc_bggc_count(si) do { } while (0)
+#define stat_inc_cp_call_count(sbi, foreground) do { } while (0)
+#define stat_inc_cp_count(sbi) do { } while (0)
#define stat_io_skip_bggc_count(sbi) do { } while (0)
#define stat_other_skip_bggc_count(sbi) do { } while (0)
#define stat_inc_dirty_inode(sbi, type) do { } while (0)
@@ -4088,7 +4095,9 @@ void f2fs_update_sit_info(struct f2fs_sb_info *sbi);
#define stat_inc_seg_type(sbi, curseg) do { } while (0)
#define stat_inc_block_count(sbi, curseg) do { } while (0)
#define stat_inc_inplace_blocks(sbi) do { } while (0)
-#define stat_inc_seg_count(sbi, type, gc_type) do { } while (0)
+#define stat_inc_gc_call_count(sbi, foreground) do { } while (0)
+#define stat_inc_gc_sec_count(sbi, type, gc_type) do { } while (0)
+#define stat_inc_gc_seg_count(sbi, type, gc_type) do { } while (0)
#define stat_inc_tot_blk_count(si, blks) do { } while (0)
#define stat_inc_data_blk_count(sbi, blks, gc_type) do { } while (0)
#define stat_inc_node_blk_count(sbi, blks, gc_type) do { } while (0)
@@ -4425,6 +4434,22 @@ static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi,
}
#endif
+static inline int f2fs_bdev_index(struct f2fs_sb_info *sbi,
+ struct block_device *bdev)
+{
+ int i;
+
+ if (!f2fs_is_multi_device(sbi))
+ return 0;
+
+ for (i = 0; i < sbi->s_ndevs; i++)
+ if (FDEV(i).bdev == bdev)
+ return i;
+
+ WARN_ON(1);
+ return -1;
+}
+
static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi)
{
return f2fs_sb_has_blkzoned(sbi);
@@ -4485,7 +4510,8 @@ static inline bool f2fs_low_mem_mode(struct f2fs_sb_info *sbi)
static inline bool f2fs_may_compress(struct inode *inode)
{
if (IS_SWAPFILE(inode) || f2fs_is_pinned_file(inode) ||
- f2fs_is_atomic_file(inode) || f2fs_has_inline_data(inode))
+ f2fs_is_atomic_file(inode) || f2fs_has_inline_data(inode) ||
+ f2fs_is_mmap_file(inode))
return false;
return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode);
}
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index ce9d567cd5fe..ca5904129b16 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -526,7 +526,11 @@ static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
file_accessed(file);
vma->vm_ops = &f2fs_file_vm_ops;
+
+ f2fs_down_read(&F2FS_I(inode)->i_sem);
set_inode_flag(inode, FI_MMAP_FILE);
+ f2fs_up_read(&F2FS_I(inode)->i_sem);
+
return 0;
}
@@ -1724,6 +1728,7 @@ next_alloc:
if (has_not_enough_free_secs(sbi, 0,
GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
f2fs_down_write(&sbi->gc_lock);
+ stat_inc_gc_call_count(sbi, FOREGROUND);
err = f2fs_gc(sbi, &gc_control);
if (err && err != -ENODATA)
goto out_err;
@@ -1919,12 +1924,19 @@ static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
int err = f2fs_convert_inline_inode(inode);
if (err)
return err;
- if (!f2fs_may_compress(inode))
- return -EINVAL;
- if (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))
+
+ f2fs_down_write(&F2FS_I(inode)->i_sem);
+ if (!f2fs_may_compress(inode) ||
+ (S_ISREG(inode->i_mode) &&
+ F2FS_HAS_BLOCKS(inode))) {
+ f2fs_up_write(&F2FS_I(inode)->i_sem);
return -EINVAL;
- if (set_compress_context(inode))
- return -EOPNOTSUPP;
+ }
+ err = set_compress_context(inode);
+ f2fs_up_write(&F2FS_I(inode)->i_sem);
+
+ if (err)
+ return err;
}
}
@@ -2465,6 +2477,7 @@ static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
gc_control.init_gc_type = sync ? FG_GC : BG_GC;
gc_control.err_gc_skipped = sync;
+ stat_inc_gc_call_count(sbi, FOREGROUND);
ret = f2fs_gc(sbi, &gc_control);
out:
mnt_drop_write_file(filp);
@@ -2508,6 +2521,7 @@ do_more:
}
gc_control.victim_segno = GET_SEGNO(sbi, range->start);
+ stat_inc_gc_call_count(sbi, FOREGROUND);
ret = f2fs_gc(sbi, &gc_control);
if (ret) {
if (ret == -EBUSY)
@@ -2990,6 +3004,7 @@ static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
sm->last_victim[ALLOC_NEXT] = end_segno + 1;
gc_control.victim_segno = start_segno;
+ stat_inc_gc_call_count(sbi, FOREGROUND);
ret = f2fs_gc(sbi, &gc_control);
if (ret == -EAGAIN)
ret = 0;
@@ -3976,6 +3991,7 @@ static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
file_start_write(filp);
inode_lock(inode);
+ f2fs_down_write(&F2FS_I(inode)->i_sem);
if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) {
ret = -EBUSY;
goto out;
@@ -3995,6 +4011,7 @@ static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
f2fs_warn(sbi, "compression algorithm is successfully set, "
"but current kernel doesn't support this algorithm.");
out:
+ f2fs_up_write(&F2FS_I(inode)->i_sem);
inode_unlock(inode);
file_end_write(filp);
@@ -4079,10 +4096,8 @@ static int f2fs_ioc_decompress_file(struct file *filp)
last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
count = last_idx - page_idx;
- while (count) {
- int len = min(cluster_size, count);
-
- ret = redirty_blocks(inode, page_idx, len);
+ while (count && count >= cluster_size) {
+ ret = redirty_blocks(inode, page_idx, cluster_size);
if (ret < 0)
break;
@@ -4092,8 +4107,14 @@ static int f2fs_ioc_decompress_file(struct file *filp)
break;
}
- count -= len;
- page_idx += len;
+ count -= cluster_size;
+ page_idx += cluster_size;
+
+ cond_resched();
+ if (fatal_signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
}
if (!ret)
@@ -4153,10 +4174,8 @@ static int f2fs_ioc_compress_file(struct file *filp)
last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
count = last_idx - page_idx;
- while (count) {
- int len = min(cluster_size, count);
-
- ret = redirty_blocks(inode, page_idx, len);
+ while (count && count >= cluster_size) {
+ ret = redirty_blocks(inode, page_idx, cluster_size);
if (ret < 0)
break;
@@ -4166,8 +4185,14 @@ static int f2fs_ioc_compress_file(struct file *filp)
break;
}
- count -= len;
- page_idx += len;
+ count -= cluster_size;
+ page_idx += cluster_size;
+
+ cond_resched();
+ if (fatal_signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
}
if (!ret)
@@ -4579,6 +4604,7 @@ static int f2fs_dio_write_end_io(struct kiocb *iocb, ssize_t size, int error,
dec_page_count(sbi, F2FS_DIO_WRITE);
if (error)
return error;
+ f2fs_update_time(sbi, REQ_TIME);
f2fs_update_iostat(sbi, NULL, APP_DIRECT_IO, size);
return 0;
}
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index a1ca394bc327..f550cdeaa663 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -121,8 +121,8 @@ static int gc_thread_func(void *data)
else
increase_sleep_time(gc_th, &wait_ms);
do_gc:
- if (!foreground)
- stat_inc_bggc_count(sbi->stat_info);
+ stat_inc_gc_call_count(sbi, foreground ?
+ FOREGROUND : BACKGROUND);
sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC;
@@ -1685,6 +1685,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
int seg_freed = 0, migrated = 0;
unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
SUM_TYPE_DATA : SUM_TYPE_NODE;
+ unsigned char data_type = (type == SUM_TYPE_DATA) ? DATA : NODE;
int submitted = 0;
if (__is_large_section(sbi))
@@ -1766,7 +1767,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
segno, gc_type,
force_migrate);
- stat_inc_seg_count(sbi, type, gc_type);
+ stat_inc_gc_seg_count(sbi, data_type, gc_type);
sbi->gc_reclaimed_segs[sbi->gc_mode]++;
migrated++;
@@ -1783,12 +1784,12 @@ skip:
}
if (submitted)
- f2fs_submit_merged_write(sbi,
- (type == SUM_TYPE_NODE) ? NODE : DATA);
+ f2fs_submit_merged_write(sbi, data_type);
blk_finish_plug(&plug);
- stat_inc_call_count(sbi->stat_info);
+ if (migrated)
+ stat_inc_gc_sec_count(sbi, data_type, gc_type);
return seg_freed;
}
@@ -1839,6 +1840,7 @@ gc_more:
* secure free segments which doesn't need fggc any more.
*/
if (prefree_segments(sbi)) {
+ stat_inc_cp_call_count(sbi, TOTAL_CALL);
ret = f2fs_write_checkpoint(sbi, &cpc);
if (ret)
goto stop;
@@ -1887,6 +1889,7 @@ retry:
round++;
if (skipped_round > MAX_SKIP_GC_COUNT &&
skipped_round * 2 >= round) {
+ stat_inc_cp_call_count(sbi, TOTAL_CALL);
ret = f2fs_write_checkpoint(sbi, &cpc);
goto stop;
}
@@ -1902,6 +1905,7 @@ retry:
*/
if (free_sections(sbi) <= upper_secs + NR_GC_CHECKPOINT_SECS &&
prefree_segments(sbi)) {
+ stat_inc_cp_call_count(sbi, TOTAL_CALL);
ret = f2fs_write_checkpoint(sbi, &cpc);
if (ret)
goto stop;
@@ -2029,6 +2033,7 @@ static int free_segment_range(struct f2fs_sb_info *sbi,
if (gc_only)
goto out;
+ stat_inc_cp_call_count(sbi, TOTAL_CALL);
err = f2fs_write_checkpoint(sbi, &cpc);
if (err)
goto out;
@@ -2223,6 +2228,7 @@ out_drop_write:
clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
set_sbi_flag(sbi, SBI_IS_DIRTY);
+ stat_inc_cp_call_count(sbi, TOTAL_CALL);
err = f2fs_write_checkpoint(sbi, &cpc);
if (err) {
update_fs_metadata(sbi, secs);
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 88fc9208ffa7..2fe25619ccb5 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -641,7 +641,8 @@ int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
}
if (inode) {
- f2fs_down_write(&F2FS_I(inode)->i_sem);
+ f2fs_down_write_nested(&F2FS_I(inode)->i_sem,
+ SINGLE_DEPTH_NESTING);
page = f2fs_init_inode_metadata(inode, dir, fname, ipage);
if (IS_ERR(page)) {
err = PTR_ERR(page);
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index c1c2ba9f28e5..cde243840abd 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -214,7 +214,7 @@ static bool sanity_check_compress_inode(struct inode *inode,
f2fs_warn(sbi,
"%s: inode (ino=%lx) has unsupported compress algorithm: %u, run fsck to fix",
__func__, inode->i_ino, ri->i_compress_algorithm);
- goto err;
+ return false;
}
if (le64_to_cpu(ri->i_compr_blocks) >
SECTOR_TO_BLOCK(inode->i_blocks)) {
@@ -222,14 +222,14 @@ static bool sanity_check_compress_inode(struct inode *inode,
"%s: inode (ino=%lx) has inconsistent i_compr_blocks:%llu, i_blocks:%llu, run fsck to fix",
__func__, inode->i_ino, le64_to_cpu(ri->i_compr_blocks),
SECTOR_TO_BLOCK(inode->i_blocks));
- goto err;
+ return false;
}
if (ri->i_log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
ri->i_log_cluster_size > MAX_COMPRESS_LOG_SIZE) {
f2fs_warn(sbi,
"%s: inode (ino=%lx) has unsupported log cluster size: %u, run fsck to fix",
__func__, inode->i_ino, ri->i_log_cluster_size);
- goto err;
+ return false;
}
clevel = le16_to_cpu(ri->i_compress_flag) >>
@@ -273,8 +273,6 @@ static bool sanity_check_compress_inode(struct inode *inode,
err_level:
f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported compress level: %u, run fsck to fix",
__func__, inode->i_ino, clevel);
-err:
- set_sbi_flag(sbi, SBI_NEED_FSCK);
return false;
}
@@ -287,14 +285,12 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks);
if (!iblocks) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn(sbi, "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, run fsck to fix.",
__func__, inode->i_ino, iblocks);
return false;
}
if (ino_of_node(node_page) != nid_of_node(node_page)) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn(sbi, "%s: corrupted inode footer i_ino=%lx, ino,nid: [%u, %u] run fsck to fix.",
__func__, inode->i_ino,
ino_of_node(node_page), nid_of_node(node_page));
@@ -303,7 +299,6 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
if (f2fs_has_extra_attr(inode)) {
if (!f2fs_sb_has_extra_attr(sbi)) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn(sbi, "%s: inode (ino=%lx) is with extra_attr, but extra_attr feature is off",
__func__, inode->i_ino);
return false;
@@ -311,7 +306,6 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
if (fi->i_extra_isize > F2FS_TOTAL_EXTRA_ATTR_SIZE ||
fi->i_extra_isize < F2FS_MIN_EXTRA_ATTR_SIZE ||
fi->i_extra_isize % sizeof(__le32)) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_extra_isize: %d, max: %zu",
__func__, inode->i_ino, fi->i_extra_isize,
F2FS_TOTAL_EXTRA_ATTR_SIZE);
@@ -321,7 +315,6 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
f2fs_has_inline_xattr(inode) &&
(!fi->i_inline_xattr_size ||
fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, max: %zu",
__func__, inode->i_ino, fi->i_inline_xattr_size,
MAX_INLINE_XATTR_SIZE);
@@ -335,7 +328,6 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
return false;
}
} else if (f2fs_sb_has_flexible_inline_xattr(sbi)) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn(sbi, "%s: corrupted inode ino=%lx, run fsck to fix.",
__func__, inode->i_ino);
return false;
@@ -343,31 +335,26 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
if (!f2fs_sb_has_extra_attr(sbi)) {
if (f2fs_sb_has_project_quota(sbi)) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
__func__, inode->i_ino, F2FS_FEATURE_PRJQUOTA);
return false;
}
if (f2fs_sb_has_inode_chksum(sbi)) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
__func__, inode->i_ino, F2FS_FEATURE_INODE_CHKSUM);
return false;
}
if (f2fs_sb_has_flexible_inline_xattr(sbi)) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
__func__, inode->i_ino, F2FS_FEATURE_FLEXIBLE_INLINE_XATTR);
return false;
}
if (f2fs_sb_has_inode_crtime(sbi)) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
__func__, inode->i_ino, F2FS_FEATURE_INODE_CRTIME);
return false;
}
if (f2fs_sb_has_compression(sbi)) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
__func__, inode->i_ino, F2FS_FEATURE_COMPRESSION);
return false;
@@ -375,21 +362,18 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
}
if (f2fs_sanity_check_inline_data(inode)) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_data, run fsck to fix",
__func__, inode->i_ino, inode->i_mode);
return false;
}
if (f2fs_has_inline_dentry(inode) && !S_ISDIR(inode->i_mode)) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_dentry, run fsck to fix",
__func__, inode->i_ino, inode->i_mode);
return false;
}
if ((fi->i_flags & F2FS_CASEFOLD_FL) && !f2fs_sb_has_casefold(sbi)) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn(sbi, "%s: inode (ino=%lx) has casefold flag, but casefold feature is off",
__func__, inode->i_ino);
return false;
@@ -475,6 +459,13 @@ static int do_read_inode(struct inode *inode)
fi->i_inline_xattr_size = 0;
}
+ if (!sanity_check_inode(inode, node_page)) {
+ f2fs_put_page(node_page, 1);
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
+ return -EFSCORRUPTED;
+ }
+
/* check data exist */
if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
__recover_inline_status(inode, node_page);
@@ -544,12 +535,6 @@ static int do_read_inode(struct inode *inode)
f2fs_init_read_extent_tree(inode, node_page);
f2fs_init_age_extent_tree(inode);
- if (!sanity_check_inode(inode, node_page)) {
- f2fs_put_page(node_page, 1);
- f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
- return -EFSCORRUPTED;
- }
-
if (!sanity_check_extent_cache(inode)) {
f2fs_put_page(node_page, 1);
f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index b8637e88d94f..7be60df277a5 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -924,6 +924,7 @@ skip:
struct cp_control cpc = {
.reason = CP_RECOVERY,
};
+ stat_inc_cp_call_count(sbi, TOTAL_CALL);
err = f2fs_write_checkpoint(sbi, &cpc);
}
}
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 0457d620011f..d05b41608fc0 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -205,6 +205,8 @@ void f2fs_abort_atomic_write(struct inode *inode, bool clean)
f2fs_i_size_write(inode, fi->original_i_size);
fi->original_i_size = 0;
}
+ /* avoid stale dirty inode during eviction */
+ sync_inode_metadata(inode, 0);
}
static int __replace_atomic_write_block(struct inode *inode, pgoff_t index,
@@ -433,6 +435,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
.err_gc_skipped = false,
.nr_free_secs = 1 };
f2fs_down_write(&sbi->gc_lock);
+ stat_inc_gc_call_count(sbi, FOREGROUND);
f2fs_gc(sbi, &gc_control);
}
}
@@ -510,8 +513,8 @@ do_sync:
mutex_unlock(&sbi->flush_lock);
}
+ stat_inc_cp_call_count(sbi, BACKGROUND);
f2fs_sync_fs(sbi->sb, 1);
- stat_inc_bg_cp_count(sbi->stat_info);
}
static int __submit_flush_wait(struct f2fs_sb_info *sbi,
@@ -1258,8 +1261,16 @@ static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
#ifdef CONFIG_BLK_DEV_ZONED
if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev)) {
- __submit_zone_reset_cmd(sbi, dc, flag, wait_list, issued);
- return 0;
+ int devi = f2fs_bdev_index(sbi, bdev);
+
+ if (devi < 0)
+ return -EINVAL;
+
+ if (f2fs_blkz_is_seq(sbi, devi, dc->di.start)) {
+ __submit_zone_reset_cmd(sbi, dc, flag,
+ wait_list, issued);
+ return 0;
+ }
}
#endif
@@ -1785,15 +1796,24 @@ static void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
dc = __lookup_discard_cmd(sbi, blkaddr);
#ifdef CONFIG_BLK_DEV_ZONED
if (dc && f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(dc->bdev)) {
- /* force submit zone reset */
- if (dc->state == D_PREP)
- __submit_zone_reset_cmd(sbi, dc, REQ_SYNC,
- &dcc->wait_list, NULL);
- dc->ref++;
- mutex_unlock(&dcc->cmd_lock);
- /* wait zone reset */
- __wait_one_discard_bio(sbi, dc);
- return;
+ int devi = f2fs_bdev_index(sbi, dc->bdev);
+
+ if (devi < 0) {
+ mutex_unlock(&dcc->cmd_lock);
+ return;
+ }
+
+ if (f2fs_blkz_is_seq(sbi, devi, dc->di.start)) {
+ /* force submit zone reset */
+ if (dc->state == D_PREP)
+ __submit_zone_reset_cmd(sbi, dc, REQ_SYNC,
+ &dcc->wait_list, NULL);
+ dc->ref++;
+ mutex_unlock(&dcc->cmd_lock);
+ /* wait zone reset */
+ __wait_one_discard_bio(sbi, dc);
+ return;
+ }
}
#endif
if (dc) {
@@ -2193,7 +2213,7 @@ find_next:
len = next_pos - cur_pos;
if (f2fs_sb_has_blkzoned(sbi) ||
- !force || len < cpc->trim_minlen)
+ (force && len < cpc->trim_minlen))
goto skip;
f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos,
@@ -3228,6 +3248,7 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
goto out;
f2fs_down_write(&sbi->gc_lock);
+ stat_inc_cp_call_count(sbi, TOTAL_CALL);
err = f2fs_write_checkpoint(sbi, &cpc);
f2fs_up_write(&sbi->gc_lock);
if (err)
@@ -4846,17 +4867,17 @@ static int check_zone_write_pointer(struct f2fs_sb_info *sbi,
{
unsigned int wp_segno, wp_blkoff, zone_secno, zone_segno, segno;
block_t zone_block, wp_block, last_valid_block;
+ unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
int i, s, b, ret;
struct seg_entry *se;
if (zone->type != BLK_ZONE_TYPE_SEQWRITE_REQ)
return 0;
- wp_block = fdev->start_blk + (zone->wp >> sbi->log_sectors_per_block);
+ wp_block = fdev->start_blk + (zone->wp >> log_sectors_per_block);
wp_segno = GET_SEGNO(sbi, wp_block);
wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
- zone_block = fdev->start_blk + (zone->start >>
- sbi->log_sectors_per_block);
+ zone_block = fdev->start_blk + (zone->start >> log_sectors_per_block);
zone_segno = GET_SEGNO(sbi, zone_block);
zone_secno = GET_SEC_FROM_SEG(sbi, zone_segno);
@@ -4906,7 +4927,7 @@ static int check_zone_write_pointer(struct f2fs_sb_info *sbi,
"pointer. Reset the write pointer: wp[0x%x,0x%x]",
wp_segno, wp_blkoff);
ret = __f2fs_issue_discard_zone(sbi, fdev->bdev, zone_block,
- zone->len >> sbi->log_sectors_per_block);
+ zone->len >> log_sectors_per_block);
if (ret)
f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
fdev->path, ret);
@@ -4927,12 +4948,19 @@ static int check_zone_write_pointer(struct f2fs_sb_info *sbi,
GET_BLKOFF_FROM_SEG0(sbi, last_valid_block),
wp_segno, wp_blkoff);
- ret = blkdev_issue_zeroout(fdev->bdev, zone->wp,
- zone->len - (zone->wp - zone->start),
- GFP_NOFS, 0);
- if (ret)
- f2fs_err(sbi, "Fill up zone failed: %s (errno=%d)",
- fdev->path, ret);
+ ret = blkdev_zone_mgmt(fdev->bdev, REQ_OP_ZONE_FINISH,
+ zone->start, zone->len, GFP_NOFS);
+ if (ret == -EOPNOTSUPP) {
+ ret = blkdev_issue_zeroout(fdev->bdev, zone->wp,
+ zone->len - (zone->wp - zone->start),
+ GFP_NOFS, 0);
+ if (ret)
+ f2fs_err(sbi, "Fill up zone failed: %s (errno=%d)",
+ fdev->path, ret);
+ } else if (ret) {
+ f2fs_err(sbi, "Finishing zone failed: %s (errno=%d)",
+ fdev->path, ret);
+ }
return ret;
}
@@ -4967,6 +4995,7 @@ static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
struct blk_zone zone;
unsigned int cs_section, wp_segno, wp_blkoff, wp_sector_off;
block_t cs_zone_block, wp_block;
+ unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
sector_t zone_sector;
int err;
@@ -4978,8 +5007,8 @@ static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
return 0;
/* report zone for the sector the curseg points to */
- zone_sector = (sector_t)(cs_zone_block - zbd->start_blk) <<
- sbi->log_sectors_per_block;
+ zone_sector = (sector_t)(cs_zone_block - zbd->start_blk)
+ << log_sectors_per_block;
err = blkdev_report_zones(zbd->bdev, zone_sector, 1,
report_one_zone_cb, &zone);
if (err != 1) {
@@ -4991,10 +5020,10 @@ static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ)
return 0;
- wp_block = zbd->start_blk + (zone.wp >> sbi->log_sectors_per_block);
+ wp_block = zbd->start_blk + (zone.wp >> log_sectors_per_block);
wp_segno = GET_SEGNO(sbi, wp_block);
wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
- wp_sector_off = zone.wp & GENMASK(sbi->log_sectors_per_block - 1, 0);
+ wp_sector_off = zone.wp & GENMASK(log_sectors_per_block - 1, 0);
if (cs->segno == wp_segno && cs->next_blkoff == wp_blkoff &&
wp_sector_off == 0)
@@ -5021,8 +5050,8 @@ static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
if (!zbd)
return 0;
- zone_sector = (sector_t)(cs_zone_block - zbd->start_blk) <<
- sbi->log_sectors_per_block;
+ zone_sector = (sector_t)(cs_zone_block - zbd->start_blk)
+ << log_sectors_per_block;
err = blkdev_report_zones(zbd->bdev, zone_sector, 1,
report_one_zone_cb, &zone);
if (err != 1) {
@@ -5040,7 +5069,7 @@ static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
"Reset the zone: curseg[0x%x,0x%x]",
type, cs->segno, cs->next_blkoff);
err = __f2fs_issue_discard_zone(sbi, zbd->bdev, cs_zone_block,
- zone.len >> sbi->log_sectors_per_block);
+ zone.len >> log_sectors_per_block);
if (err) {
f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
zbd->path, err);
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index aa1f9a3a8037..a8c8232852bb 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -591,7 +591,7 @@ static int f2fs_set_lz4hc_level(struct f2fs_sb_info *sbi, const char *str)
unsigned int level;
if (strlen(str) == 3) {
- F2FS_OPTION(sbi).compress_level = LZ4HC_DEFAULT_CLEVEL;
+ F2FS_OPTION(sbi).compress_level = 0;
return 0;
}
@@ -862,11 +862,6 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
if (!name)
return -ENOMEM;
if (!strcmp(name, "adaptive")) {
- if (f2fs_sb_has_blkzoned(sbi)) {
- f2fs_warn(sbi, "adaptive mode is not allowed with zoned block device feature");
- kfree(name);
- return -EINVAL;
- }
F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
} else if (!strcmp(name, "lfs")) {
F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
@@ -1331,6 +1326,11 @@ default_check:
F2FS_OPTION(sbi).discard_unit =
DISCARD_UNIT_SECTION;
}
+
+ if (F2FS_OPTION(sbi).fs_mode != FS_MODE_LFS) {
+ f2fs_info(sbi, "Only lfs mode is allowed with zoned block device feature");
+ return -EINVAL;
+ }
#else
f2fs_err(sbi, "Zoned block device support is not enabled");
return -EINVAL;
@@ -1561,7 +1561,8 @@ static void destroy_device_list(struct f2fs_sb_info *sbi)
int i;
for (i = 0; i < sbi->s_ndevs; i++) {
- blkdev_put(FDEV(i).bdev, sbi->sb);
+ if (i > 0)
+ blkdev_put(FDEV(i).bdev, sbi->sb);
#ifdef CONFIG_BLK_DEV_ZONED
kvfree(FDEV(i).blkz_seq);
#endif
@@ -1600,6 +1601,7 @@ static void f2fs_put_super(struct super_block *sb)
struct cp_control cpc = {
.reason = CP_UMOUNT,
};
+ stat_inc_cp_call_count(sbi, TOTAL_CALL);
err = f2fs_write_checkpoint(sbi, &cpc);
}
@@ -1609,6 +1611,7 @@ static void f2fs_put_super(struct super_block *sb)
struct cp_control cpc = {
.reason = CP_UMOUNT | CP_TRIMMED,
};
+ stat_inc_cp_call_count(sbi, TOTAL_CALL);
err = f2fs_write_checkpoint(sbi, &cpc);
}
@@ -1705,8 +1708,10 @@ int f2fs_sync_fs(struct super_block *sb, int sync)
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
return -EAGAIN;
- if (sync)
+ if (sync) {
+ stat_inc_cp_call_count(sbi, TOTAL_CALL);
err = f2fs_issue_checkpoint(sbi);
+ }
return err;
}
@@ -2205,6 +2210,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
.nr_free_secs = 1 };
f2fs_down_write(&sbi->gc_lock);
+ stat_inc_gc_call_count(sbi, FOREGROUND);
err = f2fs_gc(sbi, &gc_control);
if (err == -ENODATA) {
err = 0;
@@ -2230,6 +2236,7 @@ skip_gc:
f2fs_down_write(&sbi->gc_lock);
cpc.reason = CP_PAUSE;
set_sbi_flag(sbi, SBI_CP_DISABLED);
+ stat_inc_cp_call_count(sbi, TOTAL_CALL);
err = f2fs_write_checkpoint(sbi, &cpc);
if (err)
goto out_unlock;
@@ -4190,16 +4197,12 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
sbi->aligned_blksize = true;
for (i = 0; i < max_devices; i++) {
-
- if (i > 0 && !RDEV(i).path[0])
+ if (i == 0)
+ FDEV(0).bdev = sbi->sb->s_bdev;
+ else if (!RDEV(i).path[0])
break;
- if (max_devices == 1) {
- /* Single zoned block device mount */
- FDEV(0).bdev =
- blkdev_get_by_dev(sbi->sb->s_bdev->bd_dev, mode,
- sbi->sb, NULL);
- } else {
+ if (max_devices > 1) {
/* Multi-device mount */
memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
FDEV(i).total_segments =
@@ -4215,9 +4218,9 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
FDEV(i).end_blk = FDEV(i).start_blk +
(FDEV(i).total_segments <<
sbi->log_blocks_per_seg) - 1;
+ FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path,
+ mode, sbi->sb, NULL);
}
- FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path, mode,
- sbi->sb, NULL);
}
if (IS_ERR(FDEV(i).bdev))
return PTR_ERR(FDEV(i).bdev);
@@ -4870,6 +4873,7 @@ static void kill_f2fs_super(struct super_block *sb)
struct cp_control cpc = {
.reason = CP_UMOUNT,
};
+ stat_inc_cp_call_count(sbi, TOTAL_CALL);
f2fs_write_checkpoint(sbi, &cpc);
}
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index 48b7e0073884..417fae96890f 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -356,6 +356,16 @@ static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
if (!strcmp(a->attr.name, "revoked_atomic_block"))
return sysfs_emit(buf, "%llu\n", sbi->revoked_atomic_block);
+#ifdef CONFIG_F2FS_STAT_FS
+ if (!strcmp(a->attr.name, "cp_foreground_calls"))
+ return sysfs_emit(buf, "%d\n",
+ atomic_read(&sbi->cp_call_count[TOTAL_CALL]) -
+ atomic_read(&sbi->cp_call_count[BACKGROUND]));
+ if (!strcmp(a->attr.name, "cp_background_calls"))
+ return sysfs_emit(buf, "%d\n",
+ atomic_read(&sbi->cp_call_count[BACKGROUND]));
+#endif
+
ui = (unsigned int *)(ptr + a->offset);
return sysfs_emit(buf, "%u\n", *ui);
@@ -972,10 +982,10 @@ F2FS_SBI_GENERAL_RO_ATTR(unusable_blocks_per_sec);
/* STAT_INFO ATTR */
#ifdef CONFIG_F2FS_STAT_FS
-STAT_INFO_RO_ATTR(cp_foreground_calls, cp_count);
-STAT_INFO_RO_ATTR(cp_background_calls, bg_cp_count);
-STAT_INFO_RO_ATTR(gc_foreground_calls, call_count);
-STAT_INFO_RO_ATTR(gc_background_calls, bg_gc);
+STAT_INFO_RO_ATTR(cp_foreground_calls, cp_call_count[FOREGROUND]);
+STAT_INFO_RO_ATTR(cp_background_calls, cp_call_count[BACKGROUND]);
+STAT_INFO_RO_ATTR(gc_foreground_calls, gc_call_count[FOREGROUND]);
+STAT_INFO_RO_ATTR(gc_background_calls, gc_call_count[BACKGROUND]);
#endif
/* FAULT_INFO ATTR */
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 4ae93e1df421..a657284faee3 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -757,17 +757,17 @@ static int __f2fs_setxattr(struct inode *inode, int index,
if (index == F2FS_XATTR_INDEX_ENCRYPTION &&
!strcmp(name, F2FS_XATTR_NAME_ENCRYPTION_CONTEXT))
f2fs_set_encrypted_inode(inode);
- f2fs_mark_inode_dirty_sync(inode, true);
- if (!error && S_ISDIR(inode->i_mode))
+ if (S_ISDIR(inode->i_mode))
set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_CP);
same:
if (is_inode_flag_set(inode, FI_ACL_MODE)) {
inode->i_mode = F2FS_I(inode)->i_acl_mode;
- inode_set_ctime_current(inode);
clear_inode_flag(inode, FI_ACL_MODE);
}
+ inode_set_ctime_current(inode);
+ f2fs_mark_inode_dirty_sync(inode, true);
exit:
kfree(base_addr);
return error;
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 881524b9a55a..d707e6987da9 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -92,7 +92,7 @@ static void fuse_dentry_settime(struct dentry *dentry, u64 time)
/*
* Calculate the time in jiffies until a dentry/attributes are valid
*/
-static u64 time_to_jiffies(u64 sec, u32 nsec)
+u64 fuse_time_to_jiffies(u64 sec, u32 nsec)
{
if (sec || nsec) {
struct timespec64 ts = {
@@ -112,17 +112,7 @@ static u64 time_to_jiffies(u64 sec, u32 nsec)
void fuse_change_entry_timeout(struct dentry *entry, struct fuse_entry_out *o)
{
fuse_dentry_settime(entry,
- time_to_jiffies(o->entry_valid, o->entry_valid_nsec));
-}
-
-static u64 attr_timeout(struct fuse_attr_out *o)
-{
- return time_to_jiffies(o->attr_valid, o->attr_valid_nsec);
-}
-
-u64 entry_attr_timeout(struct fuse_entry_out *o)
-{
- return time_to_jiffies(o->attr_valid, o->attr_valid_nsec);
+ fuse_time_to_jiffies(o->entry_valid, o->entry_valid_nsec));
}
void fuse_invalidate_attr_mask(struct inode *inode, u32 mask)
@@ -265,8 +255,8 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
goto invalid;
forget_all_cached_acls(inode);
- fuse_change_attributes(inode, &outarg.attr,
- entry_attr_timeout(&outarg),
+ fuse_change_attributes(inode, &outarg.attr, NULL,
+ ATTR_TIMEOUT(&outarg),
attr_version);
fuse_change_entry_timeout(entry, &outarg);
} else if (inode) {
@@ -360,10 +350,14 @@ int fuse_valid_type(int m)
S_ISBLK(m) || S_ISFIFO(m) || S_ISSOCK(m);
}
+static bool fuse_valid_size(u64 size)
+{
+ return size <= LLONG_MAX;
+}
+
bool fuse_invalid_attr(struct fuse_attr *attr)
{
- return !fuse_valid_type(attr->mode) ||
- attr->size > LLONG_MAX;
+ return !fuse_valid_type(attr->mode) || !fuse_valid_size(attr->size);
}
int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name,
@@ -399,7 +393,7 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name
goto out_put_forget;
*inode = fuse_iget(sb, outarg->nodeid, outarg->generation,
- &outarg->attr, entry_attr_timeout(outarg),
+ &outarg->attr, ATTR_TIMEOUT(outarg),
attr_version);
err = -ENOMEM;
if (!*inode) {
@@ -686,7 +680,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
ff->nodeid = outentry.nodeid;
ff->open_flags = outopen.open_flags;
inode = fuse_iget(dir->i_sb, outentry.nodeid, outentry.generation,
- &outentry.attr, entry_attr_timeout(&outentry), 0);
+ &outentry.attr, ATTR_TIMEOUT(&outentry), 0);
if (!inode) {
flags &= ~(O_CREAT | O_EXCL | O_TRUNC);
fuse_sync_release(NULL, ff, flags);
@@ -755,7 +749,8 @@ static int fuse_atomic_open(struct inode *dir, struct dentry *entry,
if (err == -ENOSYS) {
fc->no_create = 1;
goto mknod;
- }
+ } else if (err == -EEXIST)
+ fuse_invalidate_entry(entry);
out_dput:
dput(res);
return err;
@@ -813,7 +808,7 @@ static int create_new_entry(struct fuse_mount *fm, struct fuse_args *args,
goto out_put_forget_req;
inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation,
- &outarg.attr, entry_attr_timeout(&outarg), 0);
+ &outarg.attr, ATTR_TIMEOUT(&outarg), 0);
if (!inode) {
fuse_queue_forget(fm->fc, forget, outarg.nodeid, 1);
return -ENOMEM;
@@ -835,6 +830,8 @@ static int create_new_entry(struct fuse_mount *fm, struct fuse_args *args,
return 0;
out_put_forget_req:
+ if (err == -EEXIST)
+ fuse_invalidate_entry(entry);
kfree(forget);
return err;
}
@@ -986,7 +983,7 @@ static int fuse_unlink(struct inode *dir, struct dentry *entry)
if (!err) {
fuse_dir_changed(dir);
fuse_entry_unlinked(entry);
- } else if (err == -EINTR)
+ } else if (err == -EINTR || err == -ENOENT)
fuse_invalidate_entry(entry);
return err;
}
@@ -1009,7 +1006,7 @@ static int fuse_rmdir(struct inode *dir, struct dentry *entry)
if (!err) {
fuse_dir_changed(dir);
fuse_entry_unlinked(entry);
- } else if (err == -EINTR)
+ } else if (err == -EINTR || err == -ENOENT)
fuse_invalidate_entry(entry);
return err;
}
@@ -1050,7 +1047,7 @@ static int fuse_rename_common(struct inode *olddir, struct dentry *oldent,
/* newent will end up negative */
if (!(flags & RENAME_EXCHANGE) && d_really_is_positive(newent))
fuse_entry_unlinked(newent);
- } else if (err == -EINTR) {
+ } else if (err == -EINTR || err == -ENOENT) {
/* If request was interrupted, DEITY only knows if the
rename actually took place. If the invalidation
fails (e.g. some process has CWD under the renamed
@@ -1153,6 +1150,87 @@ static void fuse_fillattr(struct inode *inode, struct fuse_attr *attr,
stat->blksize = 1 << blkbits;
}
+static void fuse_statx_to_attr(struct fuse_statx *sx, struct fuse_attr *attr)
+{
+ memset(attr, 0, sizeof(*attr));
+ attr->ino = sx->ino;
+ attr->size = sx->size;
+ attr->blocks = sx->blocks;
+ attr->atime = sx->atime.tv_sec;
+ attr->mtime = sx->mtime.tv_sec;
+ attr->ctime = sx->ctime.tv_sec;
+ attr->atimensec = sx->atime.tv_nsec;
+ attr->mtimensec = sx->mtime.tv_nsec;
+ attr->ctimensec = sx->ctime.tv_nsec;
+ attr->mode = sx->mode;
+ attr->nlink = sx->nlink;
+ attr->uid = sx->uid;
+ attr->gid = sx->gid;
+ attr->rdev = new_encode_dev(MKDEV(sx->rdev_major, sx->rdev_minor));
+ attr->blksize = sx->blksize;
+}
+
+static int fuse_do_statx(struct inode *inode, struct file *file,
+ struct kstat *stat)
+{
+ int err;
+ struct fuse_attr attr;
+ struct fuse_statx *sx;
+ struct fuse_statx_in inarg;
+ struct fuse_statx_out outarg;
+ struct fuse_mount *fm = get_fuse_mount(inode);
+ u64 attr_version = fuse_get_attr_version(fm->fc);
+ FUSE_ARGS(args);
+
+ memset(&inarg, 0, sizeof(inarg));
+ memset(&outarg, 0, sizeof(outarg));
+ /* Directories have separate file-handle space */
+ if (file && S_ISREG(inode->i_mode)) {
+ struct fuse_file *ff = file->private_data;
+
+ inarg.getattr_flags |= FUSE_GETATTR_FH;
+ inarg.fh = ff->fh;
+ }
+ /* For now leave sync hints as the default, request all stats. */
+ inarg.sx_flags = 0;
+ inarg.sx_mask = STATX_BASIC_STATS | STATX_BTIME;
+ args.opcode = FUSE_STATX;
+ args.nodeid = get_node_id(inode);
+ args.in_numargs = 1;
+ args.in_args[0].size = sizeof(inarg);
+ args.in_args[0].value = &inarg;
+ args.out_numargs = 1;
+ args.out_args[0].size = sizeof(outarg);
+ args.out_args[0].value = &outarg;
+ err = fuse_simple_request(fm, &args);
+ if (err)
+ return err;
+
+ sx = &outarg.stat;
+ if (((sx->mask & STATX_SIZE) && !fuse_valid_size(sx->size)) ||
+ ((sx->mask & STATX_TYPE) && (!fuse_valid_type(sx->mode) ||
+ inode_wrong_type(inode, sx->mode)))) {
+ make_bad_inode(inode);
+ return -EIO;
+ }
+
+ fuse_statx_to_attr(&outarg.stat, &attr);
+ if ((sx->mask & STATX_BASIC_STATS) == STATX_BASIC_STATS) {
+ fuse_change_attributes(inode, &attr, &outarg.stat,
+ ATTR_TIMEOUT(&outarg), attr_version);
+ }
+
+ if (stat) {
+ stat->result_mask = sx->mask & (STATX_BASIC_STATS | STATX_BTIME);
+ stat->btime.tv_sec = sx->btime.tv_sec;
+ stat->btime.tv_nsec = min_t(u32, sx->btime.tv_nsec, NSEC_PER_SEC - 1);
+ fuse_fillattr(inode, &attr, stat);
+ stat->result_mask |= STATX_TYPE;
+ }
+
+ return 0;
+}
+
static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
struct file *file)
{
@@ -1189,8 +1267,8 @@ static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
fuse_make_bad(inode);
err = -EIO;
} else {
- fuse_change_attributes(inode, &outarg.attr,
- attr_timeout(&outarg),
+ fuse_change_attributes(inode, &outarg.attr, NULL,
+ ATTR_TIMEOUT(&outarg),
attr_version);
if (stat)
fuse_fillattr(inode, &outarg.attr, stat);
@@ -1204,12 +1282,22 @@ static int fuse_update_get_attr(struct inode *inode, struct file *file,
unsigned int flags)
{
struct fuse_inode *fi = get_fuse_inode(inode);
+ struct fuse_conn *fc = get_fuse_conn(inode);
int err = 0;
bool sync;
u32 inval_mask = READ_ONCE(fi->inval_mask);
u32 cache_mask = fuse_get_cache_mask(inode);
- if (flags & AT_STATX_FORCE_SYNC)
+
+ /* FUSE only supports basic stats and possibly btime */
+ request_mask &= STATX_BASIC_STATS | STATX_BTIME;
+retry:
+ if (fc->no_statx)
+ request_mask &= STATX_BASIC_STATS;
+
+ if (!request_mask)
+ sync = false;
+ else if (flags & AT_STATX_FORCE_SYNC)
sync = true;
else if (flags & AT_STATX_DONT_SYNC)
sync = false;
@@ -1220,11 +1308,24 @@ static int fuse_update_get_attr(struct inode *inode, struct file *file,
if (sync) {
forget_all_cached_acls(inode);
- err = fuse_do_getattr(inode, stat, file);
+ /* Try statx if BTIME is requested */
+ if (!fc->no_statx && (request_mask & ~STATX_BASIC_STATS)) {
+ err = fuse_do_statx(inode, file, stat);
+ if (err == -ENOSYS) {
+ fc->no_statx = 1;
+ goto retry;
+ }
+ } else {
+ err = fuse_do_getattr(inode, stat, file);
+ }
} else if (stat) {
generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
stat->mode = fi->orig_i_mode;
stat->ino = fi->orig_ino;
+ if (test_bit(FUSE_I_BTIME, &fi->state)) {
+ stat->btime = fi->i_btime;
+ stat->result_mask |= STATX_BTIME;
+ }
}
return err;
@@ -1861,8 +1962,8 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
/* FIXME: clear I_DIRTY_SYNC? */
}
- fuse_change_attributes_common(inode, &outarg.attr,
- attr_timeout(&outarg),
+ fuse_change_attributes_common(inode, &outarg.attr, NULL,
+ ATTR_TIMEOUT(&outarg),
fuse_get_cache_mask(inode));
oldsize = inode->i_size;
/* see the comment in fuse_change_attributes() */
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index bc4115288eec..1cdb6327511e 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -19,7 +19,6 @@
#include <linux/uio.h>
#include <linux/fs.h>
#include <linux/filelock.h>
-#include <linux/file.h>
static int fuse_send_open(struct fuse_mount *fm, u64 nodeid,
unsigned int open_flags, int opcode,
@@ -479,36 +478,48 @@ static void fuse_sync_writes(struct inode *inode)
fuse_release_nowrite(inode);
}
-struct fuse_flush_args {
- struct fuse_args args;
- struct fuse_flush_in inarg;
- struct work_struct work;
- struct file *file;
-};
-
-static int fuse_do_flush(struct fuse_flush_args *fa)
+static int fuse_flush(struct file *file, fl_owner_t id)
{
- int err;
- struct inode *inode = file_inode(fa->file);
+ struct inode *inode = file_inode(file);
struct fuse_mount *fm = get_fuse_mount(inode);
+ struct fuse_file *ff = file->private_data;
+ struct fuse_flush_in inarg;
+ FUSE_ARGS(args);
+ int err;
+
+ if (fuse_is_bad(inode))
+ return -EIO;
+
+ if (ff->open_flags & FOPEN_NOFLUSH && !fm->fc->writeback_cache)
+ return 0;
err = write_inode_now(inode, 1);
if (err)
- goto out;
+ return err;
inode_lock(inode);
fuse_sync_writes(inode);
inode_unlock(inode);
- err = filemap_check_errors(fa->file->f_mapping);
+ err = filemap_check_errors(file->f_mapping);
if (err)
- goto out;
+ return err;
err = 0;
if (fm->fc->no_flush)
goto inval_attr_out;
- err = fuse_simple_request(fm, &fa->args);
+ memset(&inarg, 0, sizeof(inarg));
+ inarg.fh = ff->fh;
+ inarg.lock_owner = fuse_lock_owner_id(fm->fc, id);
+ args.opcode = FUSE_FLUSH;
+ args.nodeid = get_node_id(inode);
+ args.in_numargs = 1;
+ args.in_args[0].size = sizeof(inarg);
+ args.in_args[0].value = &inarg;
+ args.force = true;
+
+ err = fuse_simple_request(fm, &args);
if (err == -ENOSYS) {
fm->fc->no_flush = 1;
err = 0;
@@ -521,57 +532,9 @@ inval_attr_out:
*/
if (!err && fm->fc->writeback_cache)
fuse_invalidate_attr_mask(inode, STATX_BLOCKS);
-
-out:
- fput(fa->file);
- kfree(fa);
return err;
}
-static void fuse_flush_async(struct work_struct *work)
-{
- struct fuse_flush_args *fa = container_of(work, typeof(*fa), work);
-
- fuse_do_flush(fa);
-}
-
-static int fuse_flush(struct file *file, fl_owner_t id)
-{
- struct fuse_flush_args *fa;
- struct inode *inode = file_inode(file);
- struct fuse_mount *fm = get_fuse_mount(inode);
- struct fuse_file *ff = file->private_data;
-
- if (fuse_is_bad(inode))
- return -EIO;
-
- if (ff->open_flags & FOPEN_NOFLUSH && !fm->fc->writeback_cache)
- return 0;
-
- fa = kzalloc(sizeof(*fa), GFP_KERNEL);
- if (!fa)
- return -ENOMEM;
-
- fa->inarg.fh = ff->fh;
- fa->inarg.lock_owner = fuse_lock_owner_id(fm->fc, id);
- fa->args.opcode = FUSE_FLUSH;
- fa->args.nodeid = get_node_id(inode);
- fa->args.in_numargs = 1;
- fa->args.in_args[0].size = sizeof(fa->inarg);
- fa->args.in_args[0].value = &fa->inarg;
- fa->args.force = true;
- fa->file = get_file(file);
-
- /* Don't wait if the task is exiting */
- if (current->flags & PF_EXITING) {
- INIT_WORK(&fa->work, fuse_flush_async);
- schedule_work(&fa->work);
- return 0;
- }
-
- return fuse_do_flush(fa);
-}
-
int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
int datasync, int opcode)
{
@@ -1465,7 +1428,8 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
int write = flags & FUSE_DIO_WRITE;
int cuse = flags & FUSE_DIO_CUSE;
struct file *file = io->iocb->ki_filp;
- struct inode *inode = file->f_mapping->host;
+ struct address_space *mapping = file->f_mapping;
+ struct inode *inode = mapping->host;
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = ff->fm->fc;
size_t nmax = write ? fc->max_write : fc->max_read;
@@ -1477,12 +1441,20 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
int err = 0;
struct fuse_io_args *ia;
unsigned int max_pages;
+ bool fopen_direct_io = ff->open_flags & FOPEN_DIRECT_IO;
max_pages = iov_iter_npages(iter, fc->max_pages);
ia = fuse_io_alloc(io, max_pages);
if (!ia)
return -ENOMEM;
+ if (fopen_direct_io && fc->direct_io_relax) {
+ res = filemap_write_and_wait_range(mapping, pos, pos + count - 1);
+ if (res) {
+ fuse_io_free(ia);
+ return res;
+ }
+ }
if (!cuse && fuse_range_is_writeback(inode, idx_from, idx_to)) {
if (!write)
inode_lock(inode);
@@ -1491,6 +1463,14 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
inode_unlock(inode);
}
+ if (fopen_direct_io && write) {
+ res = invalidate_inode_pages2_range(mapping, idx_from, idx_to);
+ if (res) {
+ fuse_io_free(ia);
+ return res;
+ }
+ }
+
io->should_dirty = !write && user_backed_iter(iter);
while (count) {
ssize_t nres;
@@ -2478,14 +2458,17 @@ static const struct vm_operations_struct fuse_file_vm_ops = {
static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
{
struct fuse_file *ff = file->private_data;
+ struct fuse_conn *fc = ff->fm->fc;
/* DAX mmap is superior to direct_io mmap */
if (FUSE_IS_DAX(file_inode(file)))
return fuse_dax_mmap(file, vma);
if (ff->open_flags & FOPEN_DIRECT_IO) {
- /* Can't provide the coherency needed for MAP_SHARED */
- if (vma->vm_flags & VM_MAYSHARE)
+ /* Can't provide the coherency needed for MAP_SHARED
+ * if FUSE_DIRECT_IO_RELAX isn't set.
+ */
+ if ((vma->vm_flags & VM_MAYSHARE) && !fc->direct_io_relax)
return -ENODEV;
invalidate_inode_pages2(file->f_mapping);
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 9b7fc7d3c7f1..bf0b85d0b95c 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -88,6 +88,9 @@ struct fuse_inode {
preserve the original mode */
umode_t orig_i_mode;
+ /* Cache birthtime */
+ struct timespec64 i_btime;
+
/** 64 bit inode number */
u64 orig_ino;
@@ -167,6 +170,8 @@ enum {
FUSE_I_SIZE_UNSTABLE,
/* Bad inode */
FUSE_I_BAD,
+ /* Has btime */
+ FUSE_I_BTIME,
};
struct fuse_conn;
@@ -792,6 +797,12 @@ struct fuse_conn {
/* Is tmpfile not implemented by fs? */
unsigned int no_tmpfile:1;
+ /* relax restrictions in FOPEN_DIRECT_IO mode */
+ unsigned int direct_io_relax:1;
+
+ /* Is statx not implemented by fs? */
+ unsigned int no_statx:1;
+
/** The number of requests waiting for completion */
atomic_t num_waiting;
@@ -1058,9 +1069,11 @@ void fuse_init_symlink(struct inode *inode);
* Change attributes of an inode
*/
void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
+ struct fuse_statx *sx,
u64 attr_valid, u64 attr_version);
void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
+ struct fuse_statx *sx,
u64 attr_valid, u32 cache_mask);
u32 fuse_get_cache_mask(struct inode *inode);
@@ -1111,7 +1124,10 @@ void fuse_invalidate_entry_cache(struct dentry *entry);
void fuse_invalidate_atime(struct inode *inode);
-u64 entry_attr_timeout(struct fuse_entry_out *o);
+u64 fuse_time_to_jiffies(u64 sec, u32 nsec);
+#define ATTR_TIMEOUT(o) \
+ fuse_time_to_jiffies((o)->attr_valid, (o)->attr_valid_nsec)
+
void fuse_change_entry_timeout(struct dentry *entry, struct fuse_entry_out *o);
/**
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 549358ffea8b..2e4eb7cf26fb 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -77,7 +77,7 @@ static struct inode *fuse_alloc_inode(struct super_block *sb)
return NULL;
fi->i_time = 0;
- fi->inval_mask = 0;
+ fi->inval_mask = ~0;
fi->nodeid = 0;
fi->nlookup = 0;
fi->attr_version = 0;
@@ -163,6 +163,7 @@ static ino_t fuse_squash_ino(u64 ino64)
}
void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
+ struct fuse_statx *sx,
u64 attr_valid, u32 cache_mask)
{
struct fuse_conn *fc = get_fuse_conn(inode);
@@ -172,7 +173,8 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
fi->attr_version = atomic64_inc_return(&fc->attr_version);
fi->i_time = attr_valid;
- WRITE_ONCE(fi->inval_mask, 0);
+ /* Clear basic stats from invalid mask */
+ set_mask_bits(&fi->inval_mask, STATX_BASIC_STATS, 0);
inode->i_ino = fuse_squash_ino(attr->ino);
inode->i_mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777);
@@ -196,6 +198,25 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
if (!(cache_mask & STATX_CTIME)) {
inode_set_ctime(inode, attr->ctime, attr->ctimensec);
}
+ if (sx) {
+ /* Sanitize nsecs */
+ sx->btime.tv_nsec =
+ min_t(u32, sx->btime.tv_nsec, NSEC_PER_SEC - 1);
+
+ /*
+ * Btime has been queried, cache is valid (whether or not btime
+ * is available or not) so clear STATX_BTIME from inval_mask.
+ *
+ * Availability of the btime attribute is indicated in
+ * FUSE_I_BTIME
+ */
+ set_mask_bits(&fi->inval_mask, STATX_BTIME, 0);
+ if (sx->mask & STATX_BTIME) {
+ set_bit(FUSE_I_BTIME, &fi->state);
+ fi->i_btime.tv_sec = sx->btime.tv_sec;
+ fi->i_btime.tv_nsec = sx->btime.tv_nsec;
+ }
+ }
if (attr->blksize != 0)
inode->i_blkbits = ilog2(attr->blksize);
@@ -235,6 +256,7 @@ u32 fuse_get_cache_mask(struct inode *inode)
}
void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
+ struct fuse_statx *sx,
u64 attr_valid, u64 attr_version)
{
struct fuse_conn *fc = get_fuse_conn(inode);
@@ -269,7 +291,7 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
}
old_mtime = inode->i_mtime;
- fuse_change_attributes_common(inode, attr, attr_valid, cache_mask);
+ fuse_change_attributes_common(inode, attr, sx, attr_valid, cache_mask);
oldsize = inode->i_size;
/*
@@ -406,7 +428,7 @@ done:
spin_lock(&fi->lock);
fi->nlookup++;
spin_unlock(&fi->lock);
- fuse_change_attributes(inode, attr, attr_valid, attr_version);
+ fuse_change_attributes(inode, attr, NULL, attr_valid, attr_version);
return inode;
}
@@ -1210,6 +1232,8 @@ static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args,
fc->init_security = 1;
if (flags & FUSE_CREATE_SUPP_GROUP)
fc->create_supp_group = 1;
+ if (flags & FUSE_DIRECT_IO_RELAX)
+ fc->direct_io_relax = 1;
} else {
ra_pages = fc->max_read / PAGE_SIZE;
fc->no_lock = 1;
@@ -1256,7 +1280,7 @@ void fuse_send_init(struct fuse_mount *fm)
FUSE_NO_OPENDIR_SUPPORT | FUSE_EXPLICIT_INVAL_DATA |
FUSE_HANDLE_KILLPRIV_V2 | FUSE_SETXATTR_EXT | FUSE_INIT_EXT |
FUSE_SECURITY_CTX | FUSE_CREATE_SUPP_GROUP |
- FUSE_HAS_EXPIRE_ONLY;
+ FUSE_HAS_EXPIRE_ONLY | FUSE_DIRECT_IO_RELAX;
#ifdef CONFIG_FUSE_DAX
if (fm->fc->dax)
flags |= FUSE_MAP_ALIGNMENT;
diff --git a/fs/fuse/readdir.c b/fs/fuse/readdir.c
index dc603479b30e..9e6d587b3e67 100644
--- a/fs/fuse/readdir.c
+++ b/fs/fuse/readdir.c
@@ -223,8 +223,8 @@ retry:
spin_unlock(&fi->lock);
forget_all_cached_acls(inode);
- fuse_change_attributes(inode, &o->attr,
- entry_attr_timeout(o),
+ fuse_change_attributes(inode, &o->attr, NULL,
+ ATTR_TIMEOUT(o),
attr_version);
/*
* The other branch comes via fuse_iget()
@@ -232,7 +232,7 @@ retry:
*/
} else {
inode = fuse_iget(dir->i_sb, o->nodeid, o->generation,
- &o->attr, entry_attr_timeout(o),
+ &o->attr, ATTR_TIMEOUT(o),
attr_version);
if (!inode)
inode = ERR_PTR(-ENOMEM);
@@ -243,8 +243,16 @@ retry:
dput(dentry);
dentry = alias;
}
- if (IS_ERR(dentry))
+ if (IS_ERR(dentry)) {
+ if (!IS_ERR(inode)) {
+ struct fuse_inode *fi = get_fuse_inode(inode);
+
+ spin_lock(&fi->lock);
+ fi->nlookup--;
+ spin_unlock(&fi->lock);
+ }
return PTR_ERR(dentry);
+ }
}
if (fc->readdirplus_auto)
set_bit(FUSE_I_INIT_RDPLUS, &get_fuse_inode(inode)->state);
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 9c4b26aec580..c26d48355cc2 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -183,13 +183,13 @@ static int gfs2_writepages(struct address_space *mapping,
int ret;
/*
- * Even if we didn't write any pages here, we might still be holding
+ * Even if we didn't write enough pages here, we might still be holding
* dirty pages in the ail. We forcibly flush the ail because we don't
* want balance_dirty_pages() to loop indefinitely trying to write out
* pages held in the ail that it can't find.
*/
ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops);
- if (ret == 0)
+ if (ret == 0 && wbc->nr_to_write > 0)
set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
return ret;
}
@@ -272,8 +272,7 @@ continue_unlock:
* not be suitable for data integrity
* writeout).
*/
- *done_index = folio->index +
- folio_nr_pages(folio);
+ *done_index = folio_next_index(folio);
ret = 1;
break;
}
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index f62366be7587..ef7017fb6951 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -161,7 +161,7 @@ int gfs2_unstuff_dinode(struct gfs2_inode *ip)
int error;
down_write(&ip->i_rw_mutex);
- page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
+ page = grab_cache_page(inode->i_mapping, 0);
error = -ENOMEM;
if (!page)
goto out;
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 1438e7465e30..9cbf8d98489a 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -176,7 +176,7 @@ void gfs2_glock_free(struct gfs2_glock *gl)
wake_up_glock(gl);
call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
if (atomic_dec_and_test(&sdp->sd_glock_disposal))
- wake_up(&sdp->sd_glock_wait);
+ wake_up(&sdp->sd_kill_wait);
}
/**
@@ -468,10 +468,10 @@ done:
* do_promote - promote as many requests as possible on the current queue
* @gl: The glock
*
- * Returns: 1 if there is a blocked holder at the head of the list
+ * Returns true on success (i.e., progress was made or there are no waiters).
*/
-static int do_promote(struct gfs2_glock *gl)
+static bool do_promote(struct gfs2_glock *gl)
{
struct gfs2_holder *gh, *current_gh;
@@ -484,10 +484,10 @@ static int do_promote(struct gfs2_glock *gl)
* If we get here, it means we may not grant this
* holder for some reason. If this holder is at the
* head of the list, it means we have a blocked holder
- * at the head, so return 1.
+ * at the head, so return false.
*/
if (list_is_first(&gh->gh_list, &gl->gl_holders))
- return 1;
+ return false;
do_error(gl, 0);
break;
}
@@ -497,7 +497,7 @@ static int do_promote(struct gfs2_glock *gl)
if (!current_gh)
current_gh = gh;
}
- return 0;
+ return true;
}
/**
@@ -591,10 +591,11 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
/* move to back of queue and try next entry */
if (ret & LM_OUT_CANCELED) {
- if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
- list_move_tail(&gh->gh_list, &gl->gl_holders);
+ list_move_tail(&gh->gh_list, &gl->gl_holders);
gh = find_first_waiter(gl);
gl->gl_target = gh->gh_state;
+ if (do_promote(gl))
+ goto out;
goto retry;
}
/* Some error or failed "try lock" - report it */
@@ -679,8 +680,7 @@ __acquires(&gl->gl_lockref.lock)
gh && !(gh->gh_flags & LM_FLAG_NOEXP))
goto skip_inval;
- lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
- LM_FLAG_PRIORITY);
+ lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP);
GLOCK_BUG_ON(gl, gl->gl_state == target);
GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
@@ -834,7 +834,7 @@ __acquires(&gl->gl_lockref.lock)
} else {
if (test_bit(GLF_DEMOTE, &gl->gl_flags))
gfs2_demote_wake(gl);
- if (do_promote(gl) == 0)
+ if (do_promote(gl))
goto out_unlock;
gh = find_first_waiter(gl);
gl->gl_target = gh->gh_state;
@@ -1022,7 +1022,7 @@ static void delete_work_func(struct work_struct *work)
* step entirely.
*/
if (gfs2_try_evict(gl)) {
- if (test_bit(SDF_DEACTIVATING, &sdp->sd_flags))
+ if (test_bit(SDF_KILL, &sdp->sd_flags))
goto out;
if (gfs2_queue_verify_evict(gl))
return;
@@ -1035,7 +1035,7 @@ static void delete_work_func(struct work_struct *work)
GFS2_BLKST_UNLINKED);
if (IS_ERR(inode)) {
if (PTR_ERR(inode) == -EAGAIN &&
- !test_bit(SDF_DEACTIVATING, &sdp->sd_flags) &&
+ !test_bit(SDF_KILL, &sdp->sd_flags) &&
gfs2_queue_verify_evict(gl))
return;
} else {
@@ -1231,7 +1231,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
out_free:
gfs2_glock_dealloc(&gl->gl_rcu);
if (atomic_dec_and_test(&sdp->sd_glock_disposal))
- wake_up(&sdp->sd_glock_wait);
+ wake_up(&sdp->sd_kill_wait);
out:
return ret;
@@ -1515,27 +1515,20 @@ fail:
}
if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
continue;
- if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
- insert_pt = &gh2->gh_list;
}
trace_gfs2_glock_queue(gh, 1);
gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
if (likely(insert_pt == NULL)) {
list_add_tail(&gh->gh_list, &gl->gl_holders);
- if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
- goto do_cancel;
return;
}
list_add_tail(&gh->gh_list, insert_pt);
-do_cancel:
gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list);
- if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
- spin_unlock(&gl->gl_lockref.lock);
- if (sdp->sd_lockstruct.ls_ops->lm_cancel)
- sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
- spin_lock(&gl->gl_lockref.lock);
- }
+ spin_unlock(&gl->gl_lockref.lock);
+ if (sdp->sd_lockstruct.ls_ops->lm_cancel)
+ sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
+ spin_lock(&gl->gl_lockref.lock);
return;
trap_recursive:
@@ -2195,7 +2188,7 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
flush_workqueue(glock_workqueue);
glock_hash_walk(clear_glock, sdp);
flush_workqueue(glock_workqueue);
- wait_event_timeout(sdp->sd_glock_wait,
+ wait_event_timeout(sdp->sd_kill_wait,
atomic_read(&sdp->sd_glock_disposal) == 0,
HZ * 600);
glock_hash_walk(dump_glock_func, sdp);
@@ -2227,8 +2220,6 @@ static const char *hflags2str(char *buf, u16 flags, unsigned long iflags)
*p++ = 'e';
if (flags & LM_FLAG_ANY)
*p++ = 'A';
- if (flags & LM_FLAG_PRIORITY)
- *p++ = 'p';
if (flags & LM_FLAG_NODE_SCOPE)
*p++ = 'n';
if (flags & GL_ASYNC)
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 1f1ba92c15a8..c8685ca7d2a2 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -68,14 +68,6 @@ enum {
* also be granted in SHARED. The preferred state is whichever is compatible
* with other granted locks, or the specified state if no other locks exist.
*
- * LM_FLAG_PRIORITY
- * Override fairness considerations. Suppose a lock is held in a shared state
- * and there is a pending request for the deferred state. A shared lock
- * request with the priority flag would be allowed to bypass the deferred
- * request and directly join the other shared lock. A shared lock request
- * without the priority flag might be forced to wait until the deferred
- * requested had acquired and released the lock.
- *
* LM_FLAG_NODE_SCOPE
* This holder agrees to share the lock within this node. In other words,
* the glock is held in EX mode according to DLM, but local holders on the
@@ -86,7 +78,6 @@ enum {
#define LM_FLAG_TRY_1CB 0x0002
#define LM_FLAG_NOEXP 0x0004
#define LM_FLAG_ANY 0x0008
-#define LM_FLAG_PRIORITY 0x0010
#define LM_FLAG_NODE_SCOPE 0x0020
#define GL_ASYNC 0x0040
#define GL_EXACT 0x0080
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index aecdac3cfbe1..d26759a98b10 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -637,7 +637,7 @@ static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
if (!remote || sb_rdonly(sdp->sd_vfs) ||
- test_bit(SDF_DEACTIVATING, &sdp->sd_flags))
+ test_bit(SDF_KILL, &sdp->sd_flags))
return;
if (gl->gl_demote_state == LM_ST_UNLOCKED &&
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 04f2d78e8658..a8c95c5293c6 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -452,7 +452,7 @@ struct gfs2_quota_data {
s64 qd_change_sync;
unsigned int qd_slot;
- unsigned int qd_slot_count;
+ unsigned int qd_slot_ref;
struct buffer_head *qd_bh;
struct gfs2_quota_change *qd_bh_qc;
@@ -537,6 +537,7 @@ struct gfs2_statfs_change_host {
#define GFS2_QUOTA_OFF 0
#define GFS2_QUOTA_ACCOUNT 1
#define GFS2_QUOTA_ON 2
+#define GFS2_QUOTA_QUIET 3 /* on but not complaining */
#define GFS2_DATA_DEFAULT GFS2_DATA_ORDERED
#define GFS2_DATA_WRITEBACK 1
@@ -606,7 +607,7 @@ enum {
SDF_REMOTE_WITHDRAW = 13, /* Performing remote recovery */
SDF_WITHDRAW_RECOVERY = 14, /* Wait for journal recovery when we are
withdrawing */
- SDF_DEACTIVATING = 15,
+ SDF_KILL = 15,
SDF_EVICTING = 16,
SDF_FROZEN = 17,
};
@@ -716,7 +717,7 @@ struct gfs2_sbd {
struct gfs2_glock *sd_rename_gl;
struct gfs2_glock *sd_freeze_gl;
struct work_struct sd_freeze_work;
- wait_queue_head_t sd_glock_wait;
+ wait_queue_head_t sd_kill_wait;
wait_queue_head_t sd_async_glock_wait;
atomic_t sd_glock_disposal;
struct completion sd_locking_init;
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index a21ac41d6669..0eac04507904 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -276,10 +276,16 @@ struct inode *gfs2_lookup_simple(struct inode *dip, const char *name)
* gfs2_lookup_simple callers expect ENOENT
* and do not check for NULL.
*/
- if (inode == NULL)
- return ERR_PTR(-ENOENT);
- else
- return inode;
+ if (IS_ERR_OR_NULL(inode))
+ return inode ? inode : ERR_PTR(-ENOENT);
+
+ /*
+ * Must not call back into the filesystem when allocating
+ * pages in the metadata inode's address space.
+ */
+ mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
+
+ return inode;
}
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 54911294687c..59ab18c79889 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -222,11 +222,6 @@ static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags,
lkf |= DLM_LKF_NOQUEUEBAST;
}
- if (gfs_flags & LM_FLAG_PRIORITY) {
- lkf |= DLM_LKF_NOORDER;
- lkf |= DLM_LKF_HEADQUE;
- }
-
if (gfs_flags & LM_FLAG_ANY) {
if (req == DLM_LOCK_PR)
lkf |= DLM_LKF_ALTCW;
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index aa568796207c..e5271ae87d1c 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -1227,6 +1227,21 @@ static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
gfs2_log_unlock(sdp);
}
+static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
+{
+ return atomic_read(&sdp->sd_log_pinned) +
+ atomic_read(&sdp->sd_log_blks_needed) >=
+ atomic_read(&sdp->sd_log_thresh1);
+}
+
+static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)
+{
+ return sdp->sd_jdesc->jd_blocks -
+ atomic_read(&sdp->sd_log_blks_free) +
+ atomic_read(&sdp->sd_log_blks_needed) >=
+ atomic_read(&sdp->sd_log_thresh2);
+}
+
/**
* gfs2_log_commit - Commit a transaction to the log
* @sdp: the filesystem
@@ -1246,9 +1261,7 @@ void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
{
log_refund(sdp, tr);
- if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) ||
- ((sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free)) >
- atomic_read(&sdp->sd_log_thresh2)))
+ if (gfs2_ail_flush_reqd(sdp) || gfs2_jrnl_flush_reqd(sdp))
wake_up(&sdp->sd_logd_waitq);
}
@@ -1271,24 +1284,6 @@ static void gfs2_log_shutdown(struct gfs2_sbd *sdp)
gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list));
}
-static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
-{
- return (atomic_read(&sdp->sd_log_pinned) +
- atomic_read(&sdp->sd_log_blks_needed) >=
- atomic_read(&sdp->sd_log_thresh1));
-}
-
-static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)
-{
- unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free);
-
- if (test_and_clear_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags))
- return 1;
-
- return used_blocks + atomic_read(&sdp->sd_log_blks_needed) >=
- atomic_read(&sdp->sd_log_thresh2);
-}
-
/**
* gfs2_logd - Update log tail as Active Items get flushed to in-place blocks
* @data: Pointer to GFS2 superblock
@@ -1301,14 +1296,11 @@ int gfs2_logd(void *data)
{
struct gfs2_sbd *sdp = data;
unsigned long t = 1;
- DEFINE_WAIT(wait);
while (!kthread_should_stop()) {
+ if (gfs2_withdrawn(sdp))
+ break;
- if (gfs2_withdrawn(sdp)) {
- msleep_interruptible(HZ);
- continue;
- }
/* Check for errors writing to the journal */
if (sdp->sd_log_error) {
gfs2_lm(sdp,
@@ -1317,7 +1309,7 @@ int gfs2_logd(void *data)
"prevent further damage.\n",
sdp->sd_fsname, sdp->sd_log_error);
gfs2_withdraw(sdp);
- continue;
+ break;
}
if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
@@ -1326,7 +1318,9 @@ int gfs2_logd(void *data)
GFS2_LFC_LOGD_JFLUSH_REQD);
}
- if (gfs2_ail_flush_reqd(sdp)) {
+ if (test_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags) ||
+ gfs2_ail_flush_reqd(sdp)) {
+ clear_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
gfs2_ail1_start(sdp);
gfs2_ail1_wait(sdp);
gfs2_ail1_empty(sdp, 0);
@@ -1338,17 +1332,14 @@ int gfs2_logd(void *data)
try_to_freeze();
- do {
- prepare_to_wait(&sdp->sd_logd_waitq, &wait,
- TASK_INTERRUPTIBLE);
- if (!gfs2_ail_flush_reqd(sdp) &&
- !gfs2_jrnl_flush_reqd(sdp) &&
- !kthread_should_stop())
- t = schedule_timeout(t);
- } while(t && !gfs2_ail_flush_reqd(sdp) &&
- !gfs2_jrnl_flush_reqd(sdp) &&
- !kthread_should_stop());
- finish_wait(&sdp->sd_logd_waitq, &wait);
+ t = wait_event_interruptible_timeout(sdp->sd_logd_waitq,
+ test_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags) ||
+ gfs2_ail_flush_reqd(sdp) ||
+ gfs2_jrnl_flush_reqd(sdp) ||
+ sdp->sd_log_error ||
+ gfs2_withdrawn(sdp) ||
+ kthread_should_stop(),
+ t);
}
return 0;
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 251322b01631..483f69807062 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -456,7 +456,7 @@ static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd,
* Find the folio with 'index' in the journal's mapping. Search the folio for
* the journal head if requested (cleanup == false). Release refs on the
* folio so the page cache can reclaim it. We grabbed a
- * reference on this folio twice, first when we did a find_or_create_page()
+ * reference on this folio twice, first when we did a grab_cache_page()
* to obtain the folio to add it to the bio and second when we do a
* filemap_get_folio() here to get the folio to wait on while I/O on it is being
* completed.
@@ -481,7 +481,7 @@ static void gfs2_jhead_process_page(struct gfs2_jdesc *jd, unsigned long index,
if (!*done)
*done = gfs2_jhead_pg_srch(jd, head, &folio->page);
- /* filemap_get_folio() and the earlier find_or_create_page() */
+ /* filemap_get_folio() and the earlier grab_cache_page() */
folio_put_refs(folio, 2);
}
@@ -535,8 +535,7 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
for (; block < je->lblock + je->blocks; block++, dblock++) {
if (!page) {
- page = find_or_create_page(mapping,
- block >> shift, GFP_NOFS);
+ page = grab_cache_page(mapping, block >> shift);
if (!page) {
ret = -ENOMEM;
done = true;
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index afcb32854f14..66eb98b690a2 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -152,9 +152,9 @@ static int __init init_gfs2_fs(void)
goto fail_shrinker;
error = -ENOMEM;
- gfs_recovery_wq = alloc_workqueue("gfs_recovery",
+ gfs2_recovery_wq = alloc_workqueue("gfs2_recovery",
WQ_MEM_RECLAIM | WQ_FREEZABLE, 0);
- if (!gfs_recovery_wq)
+ if (!gfs2_recovery_wq)
goto fail_wq1;
gfs2_control_wq = alloc_workqueue("gfs2_control",
@@ -162,7 +162,7 @@ static int __init init_gfs2_fs(void)
if (!gfs2_control_wq)
goto fail_wq2;
- gfs2_freeze_wq = alloc_workqueue("freeze_workqueue", 0, 0);
+ gfs2_freeze_wq = alloc_workqueue("gfs2_freeze", 0, 0);
if (!gfs2_freeze_wq)
goto fail_wq3;
@@ -194,7 +194,7 @@ fail_mempool:
fail_wq3:
destroy_workqueue(gfs2_control_wq);
fail_wq2:
- destroy_workqueue(gfs_recovery_wq);
+ destroy_workqueue(gfs2_recovery_wq);
fail_wq1:
unregister_shrinker(&gfs2_qd_shrinker);
fail_shrinker:
@@ -234,7 +234,7 @@ static void __exit exit_gfs2_fs(void)
gfs2_unregister_debugfs();
unregister_filesystem(&gfs2_fs_type);
unregister_filesystem(&gfs2meta_fs_type);
- destroy_workqueue(gfs_recovery_wq);
+ destroy_workqueue(gfs2_recovery_wq);
destroy_workqueue(gfs2_control_wq);
destroy_workqueue(gfs2_freeze_wq);
list_lru_destroy(&gfs2_qd_lru);
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 8a27957dbfee..33ca04733e93 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -87,7 +87,7 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
set_bit(SDF_NOJOURNALID, &sdp->sd_flags);
gfs2_tune_init(&sdp->sd_tune);
- init_waitqueue_head(&sdp->sd_glock_wait);
+ init_waitqueue_head(&sdp->sd_kill_wait);
init_waitqueue_head(&sdp->sd_async_glock_wait);
atomic_set(&sdp->sd_glock_disposal, 0);
init_completion(&sdp->sd_locking_init);
@@ -1103,29 +1103,49 @@ static int init_threads(struct gfs2_sbd *sdp)
struct task_struct *p;
int error = 0;
- p = kthread_run(gfs2_logd, sdp, "gfs2_logd");
+ p = kthread_create(gfs2_logd, sdp, "gfs2_logd/%s", sdp->sd_fsname);
if (IS_ERR(p)) {
error = PTR_ERR(p);
- fs_err(sdp, "can't start logd thread: %d\n", error);
+ fs_err(sdp, "can't create logd thread: %d\n", error);
return error;
}
+ get_task_struct(p);
sdp->sd_logd_process = p;
- p = kthread_run(gfs2_quotad, sdp, "gfs2_quotad");
+ p = kthread_create(gfs2_quotad, sdp, "gfs2_quotad/%s", sdp->sd_fsname);
if (IS_ERR(p)) {
error = PTR_ERR(p);
- fs_err(sdp, "can't start quotad thread: %d\n", error);
+ fs_err(sdp, "can't create quotad thread: %d\n", error);
goto fail;
}
+ get_task_struct(p);
sdp->sd_quotad_process = p;
+
+ wake_up_process(sdp->sd_logd_process);
+ wake_up_process(sdp->sd_quotad_process);
return 0;
fail:
kthread_stop(sdp->sd_logd_process);
+ put_task_struct(sdp->sd_logd_process);
sdp->sd_logd_process = NULL;
return error;
}
+void gfs2_destroy_threads(struct gfs2_sbd *sdp)
+{
+ if (sdp->sd_logd_process) {
+ kthread_stop(sdp->sd_logd_process);
+ put_task_struct(sdp->sd_logd_process);
+ sdp->sd_logd_process = NULL;
+ }
+ if (sdp->sd_quotad_process) {
+ kthread_stop(sdp->sd_quotad_process);
+ put_task_struct(sdp->sd_quotad_process);
+ sdp->sd_quotad_process = NULL;
+ }
+}
+
/**
* gfs2_fill_super - Read in superblock
* @sb: The VFS superblock
@@ -1276,12 +1296,7 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
if (error) {
gfs2_freeze_unlock(&sdp->sd_freeze_gh);
- if (sdp->sd_quotad_process)
- kthread_stop(sdp->sd_quotad_process);
- sdp->sd_quotad_process = NULL;
- if (sdp->sd_logd_process)
- kthread_stop(sdp->sd_logd_process);
- sdp->sd_logd_process = NULL;
+ gfs2_destroy_threads(sdp);
fs_err(sdp, "can't make FS RW: %d\n", error);
goto fail_per_node;
}
@@ -1381,6 +1396,7 @@ static const struct constant_table gfs2_param_quota[] = {
{"off", GFS2_QUOTA_OFF},
{"account", GFS2_QUOTA_ACCOUNT},
{"on", GFS2_QUOTA_ON},
+ {"quiet", GFS2_QUOTA_QUIET},
{}
};
@@ -1786,9 +1802,9 @@ static void gfs2_kill_sb(struct super_block *sb)
/*
* Flush and then drain the delete workqueue here (via
* destroy_workqueue()) to ensure that any delete work that
- * may be running will also see the SDF_DEACTIVATING flag.
+ * may be running will also see the SDF_KILL flag.
*/
- set_bit(SDF_DEACTIVATING, &sdp->sd_flags);
+ set_bit(SDF_KILL, &sdp->sd_flags);
gfs2_flush_delete_work(sdp);
destroy_workqueue(sdp->sd_delete_wq);
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index aa5fd06d47bc..171b2713d2e5 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -109,38 +109,44 @@ static inline void spin_unlock_bucket(unsigned int hash)
static void gfs2_qd_dealloc(struct rcu_head *rcu)
{
struct gfs2_quota_data *qd = container_of(rcu, struct gfs2_quota_data, qd_rcu);
+ struct gfs2_sbd *sdp = qd->qd_sbd;
+
kmem_cache_free(gfs2_quotad_cachep, qd);
+ if (atomic_dec_and_test(&sdp->sd_quota_count))
+ wake_up(&sdp->sd_kill_wait);
}
-static void gfs2_qd_dispose(struct list_head *list)
+static void gfs2_qd_dispose(struct gfs2_quota_data *qd)
{
- struct gfs2_quota_data *qd;
- struct gfs2_sbd *sdp;
-
- while (!list_empty(list)) {
- qd = list_first_entry(list, struct gfs2_quota_data, qd_lru);
- sdp = qd->qd_gl->gl_name.ln_sbd;
-
- list_del(&qd->qd_lru);
+ struct gfs2_sbd *sdp = qd->qd_sbd;
- /* Free from the filesystem-specific list */
- spin_lock(&qd_lock);
- list_del(&qd->qd_list);
- spin_unlock(&qd_lock);
+ spin_lock(&qd_lock);
+ list_del(&qd->qd_list);
+ spin_unlock(&qd_lock);
- spin_lock_bucket(qd->qd_hash);
- hlist_bl_del_rcu(&qd->qd_hlist);
- spin_unlock_bucket(qd->qd_hash);
+ spin_lock_bucket(qd->qd_hash);
+ hlist_bl_del_rcu(&qd->qd_hlist);
+ spin_unlock_bucket(qd->qd_hash);
+ if (!gfs2_withdrawn(sdp)) {
gfs2_assert_warn(sdp, !qd->qd_change);
- gfs2_assert_warn(sdp, !qd->qd_slot_count);
+ gfs2_assert_warn(sdp, !qd->qd_slot_ref);
gfs2_assert_warn(sdp, !qd->qd_bh_count);
+ }
- gfs2_glock_put(qd->qd_gl);
- atomic_dec(&sdp->sd_quota_count);
+ gfs2_glock_put(qd->qd_gl);
+ call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
+}
- /* Delete it from the common reclaim list */
- call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
+static void gfs2_qd_list_dispose(struct list_head *list)
+{
+ struct gfs2_quota_data *qd;
+
+ while (!list_empty(list)) {
+ qd = list_first_entry(list, struct gfs2_quota_data, qd_lru);
+ list_del(&qd->qd_lru);
+
+ gfs2_qd_dispose(qd);
}
}
@@ -149,18 +155,22 @@ static enum lru_status gfs2_qd_isolate(struct list_head *item,
struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
{
struct list_head *dispose = arg;
- struct gfs2_quota_data *qd = list_entry(item, struct gfs2_quota_data, qd_lru);
+ struct gfs2_quota_data *qd =
+ list_entry(item, struct gfs2_quota_data, qd_lru);
+ enum lru_status status;
if (!spin_trylock(&qd->qd_lockref.lock))
return LRU_SKIP;
+ status = LRU_SKIP;
if (qd->qd_lockref.count == 0) {
lockref_mark_dead(&qd->qd_lockref);
list_lru_isolate_move(lru, &qd->qd_lru, dispose);
+ status = LRU_REMOVED;
}
spin_unlock(&qd->qd_lockref.lock);
- return LRU_REMOVED;
+ return status;
}
static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
@@ -175,7 +185,7 @@ static unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
freed = list_lru_shrink_walk(&gfs2_qd_lru, sc,
gfs2_qd_isolate, &dispose);
- gfs2_qd_dispose(&dispose);
+ gfs2_qd_list_dispose(&dispose);
return freed;
}
@@ -203,12 +213,7 @@ static u64 qd2index(struct gfs2_quota_data *qd)
static u64 qd2offset(struct gfs2_quota_data *qd)
{
- u64 offset;
-
- offset = qd2index(qd);
- offset *= sizeof(struct gfs2_quota);
-
- return offset;
+ return qd2index(qd) * sizeof(struct gfs2_quota);
}
static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, struct kqid qid)
@@ -221,7 +226,7 @@ static struct gfs2_quota_data *qd_alloc(unsigned hash, struct gfs2_sbd *sdp, str
return NULL;
qd->qd_sbd = sdp;
- qd->qd_lockref.count = 1;
+ qd->qd_lockref.count = 0;
spin_lock_init(&qd->qd_lockref.lock);
qd->qd_id = qid;
qd->qd_slot = -1;
@@ -283,6 +288,7 @@ static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
spin_lock_bucket(hash);
*qdp = qd = gfs2_qd_search_bucket(hash, sdp, qid);
if (qd == NULL) {
+ new_qd->qd_lockref.count++;
*qdp = new_qd;
list_add(&new_qd->qd_list, &sdp->sd_quota_list);
hlist_bl_add_head_rcu(&new_qd->qd_hlist, &qd_hash_table[hash]);
@@ -302,20 +308,31 @@ static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
static void qd_hold(struct gfs2_quota_data *qd)
{
- struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = qd->qd_sbd;
gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref));
lockref_get(&qd->qd_lockref);
}
static void qd_put(struct gfs2_quota_data *qd)
{
+ struct gfs2_sbd *sdp;
+
if (lockref_put_or_lock(&qd->qd_lockref))
return;
+ BUG_ON(__lockref_is_dead(&qd->qd_lockref));
+ sdp = qd->qd_sbd;
+ if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) {
+ lockref_mark_dead(&qd->qd_lockref);
+ spin_unlock(&qd->qd_lockref.lock);
+
+ gfs2_qd_dispose(qd);
+ return;
+ }
+
qd->qd_lockref.count = 0;
list_lru_add(&gfs2_qd_lru, &qd->qd_lru);
spin_unlock(&qd->qd_lockref.lock);
-
}
static int slot_get(struct gfs2_quota_data *qd)
@@ -325,20 +342,19 @@ static int slot_get(struct gfs2_quota_data *qd)
int error = 0;
spin_lock(&sdp->sd_bitmap_lock);
- if (qd->qd_slot_count != 0)
- goto out;
-
- error = -ENOSPC;
- bit = find_first_zero_bit(sdp->sd_quota_bitmap, sdp->sd_quota_slots);
- if (bit < sdp->sd_quota_slots) {
+ if (qd->qd_slot_ref == 0) {
+ bit = find_first_zero_bit(sdp->sd_quota_bitmap,
+ sdp->sd_quota_slots);
+ if (bit >= sdp->sd_quota_slots) {
+ error = -ENOSPC;
+ goto out;
+ }
set_bit(bit, sdp->sd_quota_bitmap);
qd->qd_slot = bit;
- error = 0;
-out:
- qd->qd_slot_count++;
}
+ qd->qd_slot_ref++;
+out:
spin_unlock(&sdp->sd_bitmap_lock);
-
return error;
}
@@ -347,8 +363,8 @@ static void slot_hold(struct gfs2_quota_data *qd)
struct gfs2_sbd *sdp = qd->qd_sbd;
spin_lock(&sdp->sd_bitmap_lock);
- gfs2_assert(sdp, qd->qd_slot_count);
- qd->qd_slot_count++;
+ gfs2_assert(sdp, qd->qd_slot_ref);
+ qd->qd_slot_ref++;
spin_unlock(&sdp->sd_bitmap_lock);
}
@@ -357,8 +373,8 @@ static void slot_put(struct gfs2_quota_data *qd)
struct gfs2_sbd *sdp = qd->qd_sbd;
spin_lock(&sdp->sd_bitmap_lock);
- gfs2_assert(sdp, qd->qd_slot_count);
- if (!--qd->qd_slot_count) {
+ gfs2_assert(sdp, qd->qd_slot_ref);
+ if (!--qd->qd_slot_ref) {
BUG_ON(!test_and_clear_bit(qd->qd_slot, sdp->sd_quota_bitmap));
qd->qd_slot = -1;
}
@@ -367,7 +383,7 @@ static void slot_put(struct gfs2_quota_data *qd)
static int bh_get(struct gfs2_quota_data *qd)
{
- struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = qd->qd_sbd;
struct inode *inode = sdp->sd_qc_inode;
struct gfs2_inode *ip = GFS2_I(inode);
unsigned int block, offset;
@@ -421,7 +437,7 @@ fail:
static void bh_put(struct gfs2_quota_data *qd)
{
- struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = qd->qd_sbd;
mutex_lock(&sdp->sd_quota_mutex);
gfs2_assert(sdp, qd->qd_bh_count);
@@ -451,6 +467,20 @@ static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
return 1;
}
+static int qd_bh_get_or_undo(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
+{
+ int error;
+
+ error = bh_get(qd);
+ if (!error)
+ return 0;
+
+ clear_bit(QDF_LOCKED, &qd->qd_flags);
+ slot_put(qd);
+ qd_put(qd);
+ return error;
+}
+
static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
{
struct gfs2_quota_data *qd = NULL, *iter;
@@ -473,30 +503,29 @@ static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
spin_unlock(&qd_lock);
if (qd) {
- error = bh_get(qd);
- if (error) {
- clear_bit(QDF_LOCKED, &qd->qd_flags);
- slot_put(qd);
- qd_put(qd);
+ error = qd_bh_get_or_undo(sdp, qd);
+ if (error)
return error;
- }
+ *qdp = qd;
}
- *qdp = qd;
-
return 0;
}
-static void qd_unlock(struct gfs2_quota_data *qd)
+static void qdsb_put(struct gfs2_quota_data *qd)
{
- gfs2_assert_warn(qd->qd_gl->gl_name.ln_sbd,
- test_bit(QDF_LOCKED, &qd->qd_flags));
- clear_bit(QDF_LOCKED, &qd->qd_flags);
bh_put(qd);
slot_put(qd);
qd_put(qd);
}
+static void qd_unlock(struct gfs2_quota_data *qd)
+{
+ gfs2_assert_warn(qd->qd_sbd, test_bit(QDF_LOCKED, &qd->qd_flags));
+ clear_bit(QDF_LOCKED, &qd->qd_flags);
+ qdsb_put(qd);
+}
+
static int qdsb_get(struct gfs2_sbd *sdp, struct kqid qid,
struct gfs2_quota_data **qdp)
{
@@ -523,13 +552,6 @@ fail:
return error;
}
-static void qdsb_put(struct gfs2_quota_data *qd)
-{
- bh_put(qd);
- slot_put(qd);
- qd_put(qd);
-}
-
/**
* gfs2_qa_get - make sure we have a quota allocations data structure,
* if necessary
@@ -666,7 +688,7 @@ static int sort_qd(const void *a, const void *b)
static void do_qc(struct gfs2_quota_data *qd, s64 change, int qc_type)
{
- struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = qd->qd_sbd;
struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
struct gfs2_quota_change *qc = qd->qd_bh_qc;
s64 x;
@@ -708,30 +730,29 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change, int qc_type)
mutex_unlock(&sdp->sd_quota_mutex);
}
-static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index,
+static int gfs2_write_buf_to_page(struct gfs2_sbd *sdp, unsigned long index,
unsigned off, void *buf, unsigned bytes)
{
+ struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
struct inode *inode = &ip->i_inode;
- struct gfs2_sbd *sdp = GFS2_SB(inode);
struct address_space *mapping = inode->i_mapping;
struct page *page;
struct buffer_head *bh;
u64 blk;
unsigned bsize = sdp->sd_sb.sb_bsize, bnum = 0, boff = 0;
unsigned to_write = bytes, pg_off = off;
- int done = 0;
blk = index << (PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift);
boff = off % bsize;
- page = find_or_create_page(mapping, index, GFP_NOFS);
+ page = grab_cache_page(mapping, index);
if (!page)
return -ENOMEM;
if (!page_has_buffers(page))
create_empty_buffers(page, bsize, 0);
bh = page_buffers(page);
- while (!done) {
+ for(;;) {
/* Find the beginning block within the page */
if (pg_off >= ((bnum * bsize) + bsize)) {
bh = bh->b_this_page;
@@ -751,10 +772,7 @@ static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index,
set_buffer_uptodate(bh);
if (bh_read(bh, REQ_META | REQ_PRIO) < 0)
goto unlock_out;
- if (gfs2_is_jdata(ip))
- gfs2_trans_add_data(ip->i_gl, bh);
- else
- gfs2_ordered_add_inode(ip);
+ gfs2_trans_add_data(ip->i_gl, bh);
/* If we need to write to the next block as well */
if (to_write > (bsize - boff)) {
@@ -763,7 +781,7 @@ static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index,
boff = pg_off % bsize;
continue;
}
- done = 1;
+ break;
}
/* Write to the page, now that we have setup the buffer(s) */
@@ -780,12 +798,12 @@ unlock_out:
return -EIO;
}
-static int gfs2_write_disk_quota(struct gfs2_inode *ip, struct gfs2_quota *qp,
+static int gfs2_write_disk_quota(struct gfs2_sbd *sdp, struct gfs2_quota *qp,
loff_t loc)
{
unsigned long pg_beg;
unsigned pg_off, nbytes, overflow = 0;
- int pg_oflow = 0, error;
+ int error;
void *ptr;
nbytes = sizeof(struct gfs2_quota);
@@ -794,17 +812,15 @@ static int gfs2_write_disk_quota(struct gfs2_inode *ip, struct gfs2_quota *qp,
pg_off = offset_in_page(loc);
/* If the quota straddles a page boundary, split the write in two */
- if ((pg_off + nbytes) > PAGE_SIZE) {
- pg_oflow = 1;
+ if ((pg_off + nbytes) > PAGE_SIZE)
overflow = (pg_off + nbytes) - PAGE_SIZE;
- }
ptr = qp;
- error = gfs2_write_buf_to_page(ip, pg_beg, pg_off, ptr,
+ error = gfs2_write_buf_to_page(sdp, pg_beg, pg_off, ptr,
nbytes - overflow);
/* If there's an overflow, write the remaining bytes to the next page */
- if (!error && pg_oflow)
- error = gfs2_write_buf_to_page(ip, pg_beg + 1, 0,
+ if (!error && overflow)
+ error = gfs2_write_buf_to_page(sdp, pg_beg + 1, 0,
ptr + nbytes - overflow,
overflow);
return error;
@@ -812,7 +828,7 @@ static int gfs2_write_disk_quota(struct gfs2_inode *ip, struct gfs2_quota *qp,
/**
* gfs2_adjust_quota - adjust record of current block usage
- * @ip: The quota inode
+ * @sdp: The superblock
* @loc: Offset of the entry in the quota file
* @change: The amount of usage change to record
* @qd: The quota data
@@ -824,12 +840,12 @@ static int gfs2_write_disk_quota(struct gfs2_inode *ip, struct gfs2_quota *qp,
* Returns: 0 or -ve on error
*/
-static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
+static int gfs2_adjust_quota(struct gfs2_sbd *sdp, loff_t loc,
s64 change, struct gfs2_quota_data *qd,
struct qc_dqblk *fdq)
{
+ struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
struct inode *inode = &ip->i_inode;
- struct gfs2_sbd *sdp = GFS2_SB(inode);
struct gfs2_quota q;
int err;
u64 size;
@@ -846,7 +862,6 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
return err;
loc -= sizeof(q); /* gfs2_internal_read would've advanced the loc ptr */
- err = -EIO;
be64_add_cpu(&q.qu_value, change);
if (((s64)be64_to_cpu(q.qu_value)) < 0)
q.qu_value = 0; /* Never go negative on quota usage */
@@ -866,7 +881,7 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
}
}
- err = gfs2_write_disk_quota(ip, &q, loc);
+ err = gfs2_write_disk_quota(sdp, &q, loc);
if (!err) {
size = loc + sizeof(struct gfs2_quota);
if (size > inode->i_size)
@@ -881,7 +896,7 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
{
- struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = (*qda)->qd_sbd;
struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
struct gfs2_alloc_parms ap = { .aflags = 0, };
unsigned int data_blocks, ind_blocks;
@@ -893,18 +908,12 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
unsigned int nalloc = 0, blocks;
int error;
- error = gfs2_qa_get(ip);
- if (error)
- return error;
-
gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
&data_blocks, &ind_blocks);
ghs = kmalloc_array(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
- if (!ghs) {
- error = -ENOMEM;
- goto out;
- }
+ if (!ghs)
+ return -ENOMEM;
sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
inode_lock(&ip->i_inode);
@@ -953,7 +962,8 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
for (x = 0; x < num_qd; x++) {
qd = qda[x];
offset = qd2offset(qd);
- error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL);
+ error = gfs2_adjust_quota(sdp, offset, qd->qd_change_sync, qd,
+ NULL);
if (error)
goto out_end_trans;
@@ -961,8 +971,6 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
set_bit(QDF_REFRESH, &qd->qd_flags);
}
- error = 0;
-
out_end_trans:
gfs2_trans_end(sdp);
out_ipres:
@@ -976,8 +984,10 @@ out_dq:
kfree(ghs);
gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl,
GFS2_LOG_HEAD_FLUSH_NORMAL | GFS2_LFC_DO_SYNC);
-out:
- gfs2_qa_put(ip);
+ if (!error) {
+ for (x = 0; x < num_qd; x++)
+ qda[x]->qd_sync_gen = sdp->sd_quota_sync_gen;
+ }
return error;
}
@@ -1009,11 +1019,12 @@ static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
struct gfs2_holder *q_gh)
{
- struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = qd->qd_sbd;
struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
struct gfs2_holder i_gh;
int error;
+ gfs2_assert_warn(sdp, sdp == qd->qd_gl->gl_name.ln_sbd);
restart:
error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
if (error)
@@ -1059,9 +1070,10 @@ int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_quota_data *qd;
u32 x;
- int error = 0;
+ int error;
- if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
+ if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON &&
+ sdp->sd_args.ar_quota != GFS2_QUOTA_QUIET)
return 0;
error = gfs2_quota_hold(ip, uid, gid);
@@ -1089,16 +1101,15 @@ int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
return error;
}
-static int need_sync(struct gfs2_quota_data *qd)
+static bool need_sync(struct gfs2_quota_data *qd)
{
- struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = qd->qd_sbd;
struct gfs2_tune *gt = &sdp->sd_tune;
s64 value;
unsigned int num, den;
- int do_sync = 1;
if (!qd->qd_qb.qb_limit)
- return 0;
+ return false;
spin_lock(&qd_lock);
value = qd->qd_change;
@@ -1109,26 +1120,26 @@ static int need_sync(struct gfs2_quota_data *qd)
den = gt->gt_quota_scale_den;
spin_unlock(&gt->gt_spin);
- if (value < 0)
- do_sync = 0;
+ if (value <= 0)
+ return false;
else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
(s64)be64_to_cpu(qd->qd_qb.qb_limit))
- do_sync = 0;
+ return false;
else {
value *= gfs2_jindex_size(sdp) * num;
value = div_s64(value, den);
value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
- do_sync = 0;
+ return false;
}
- return do_sync;
+ return true;
}
void gfs2_quota_unlock(struct gfs2_inode *ip)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
- struct gfs2_quota_data *qda[4];
+ struct gfs2_quota_data *qda[2 * GFS2_MAXQUOTAS];
unsigned int count = 0;
u32 x;
int found;
@@ -1138,7 +1149,7 @@ void gfs2_quota_unlock(struct gfs2_inode *ip)
for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
struct gfs2_quota_data *qd;
- int sync;
+ bool sync;
qd = ip->i_qadata->qa_qd[x];
sync = need_sync(qd);
@@ -1154,15 +1165,8 @@ void gfs2_quota_unlock(struct gfs2_inode *ip)
if (!found)
continue;
- gfs2_assert_warn(sdp, qd->qd_change_sync);
- if (bh_get(qd)) {
- clear_bit(QDF_LOCKED, &qd->qd_flags);
- slot_put(qd);
- qd_put(qd);
- continue;
- }
-
- qda[count++] = qd;
+ if (!qd_bh_get_or_undo(sdp, qd))
+ qda[count++] = qd;
}
if (count) {
@@ -1178,12 +1182,13 @@ void gfs2_quota_unlock(struct gfs2_inode *ip)
static int print_message(struct gfs2_quota_data *qd, char *type)
{
- struct gfs2_sbd *sdp = qd->qd_gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = qd->qd_sbd;
- fs_info(sdp, "quota %s for %s %u\n",
- type,
- (qd->qd_id.type == USRQUOTA) ? "user" : "group",
- from_kqid(&init_user_ns, qd->qd_id));
+ if (sdp->sd_args.ar_quota != GFS2_QUOTA_QUIET)
+ fs_info(sdp, "quota %s for %s %u\n",
+ type,
+ (qd->qd_id.type == USRQUOTA) ? "user" : "group",
+ from_kqid(&init_user_ns, qd->qd_id));
return 0;
}
@@ -1269,7 +1274,8 @@ void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
u32 x;
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
- if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON ||
+ if ((sdp->sd_args.ar_quota != GFS2_QUOTA_ON &&
+ sdp->sd_args.ar_quota != GFS2_QUOTA_QUIET) ||
gfs2_assert_warn(sdp, change))
return;
if (ip->i_diskflags & GFS2_DIF_SYSTEM)
@@ -1288,6 +1294,24 @@ void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
}
}
+static bool qd_changed(struct gfs2_sbd *sdp)
+{
+ struct gfs2_quota_data *qd;
+ bool changed = false;
+
+ spin_lock(&qd_lock);
+ list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
+ if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
+ !test_bit(QDF_CHANGE, &qd->qd_flags))
+ continue;
+
+ changed = true;
+ break;
+ }
+ spin_unlock(&qd_lock);
+ return changed;
+}
+
int gfs2_quota_sync(struct super_block *sb, int type)
{
struct gfs2_sbd *sdp = sb->s_fs_info;
@@ -1297,6 +1321,9 @@ int gfs2_quota_sync(struct super_block *sb, int type)
unsigned int x;
int error = 0;
+ if (!qd_changed(sdp))
+ return 0;
+
qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
if (!qda)
return -ENOMEM;
@@ -1318,10 +1345,6 @@ int gfs2_quota_sync(struct super_block *sb, int type)
if (num_qd) {
if (!error)
error = do_sync(num_qd, qda);
- if (!error)
- for (x = 0; x < num_qd; x++)
- qda[x]->qd_sync_gen =
- sdp->sd_quota_sync_gen;
for (x = 0; x < num_qd; x++)
qd_unlock(qda[x]);
@@ -1423,7 +1446,7 @@ int gfs2_quota_init(struct gfs2_sbd *sdp)
set_bit(QDF_CHANGE, &qd->qd_flags);
qd->qd_change = qc_change;
qd->qd_slot = slot;
- qd->qd_slot_count = 1;
+ qd->qd_slot_ref = 1;
spin_lock(&qd_lock);
BUG_ON(test_and_set_bit(slot, sdp->sd_quota_bitmap));
@@ -1455,36 +1478,35 @@ fail:
void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
{
- struct list_head *head = &sdp->sd_quota_list;
struct gfs2_quota_data *qd;
+ LIST_HEAD(dispose);
+ int count;
- spin_lock(&qd_lock);
- while (!list_empty(head)) {
- qd = list_last_entry(head, struct gfs2_quota_data, qd_list);
+ BUG_ON(test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags));
- list_del(&qd->qd_list);
+ spin_lock(&qd_lock);
+ list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
+ spin_lock(&qd->qd_lockref.lock);
+ if (qd->qd_lockref.count != 0) {
+ spin_unlock(&qd->qd_lockref.lock);
+ continue;
+ }
+ lockref_mark_dead(&qd->qd_lockref);
+ spin_unlock(&qd->qd_lockref.lock);
- /* Also remove if this qd exists in the reclaim list */
list_lru_del(&gfs2_qd_lru, &qd->qd_lru);
- atomic_dec(&sdp->sd_quota_count);
- spin_unlock(&qd_lock);
-
- spin_lock_bucket(qd->qd_hash);
- hlist_bl_del_rcu(&qd->qd_hlist);
- spin_unlock_bucket(qd->qd_hash);
-
- gfs2_assert_warn(sdp, !qd->qd_change);
- gfs2_assert_warn(sdp, !qd->qd_slot_count);
- gfs2_assert_warn(sdp, !qd->qd_bh_count);
-
- gfs2_glock_put(qd->qd_gl);
- call_rcu(&qd->qd_rcu, gfs2_qd_dealloc);
-
- spin_lock(&qd_lock);
+ list_add(&qd->qd_lru, &dispose);
}
spin_unlock(&qd_lock);
- gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
+ gfs2_qd_list_dispose(&dispose);
+
+ wait_event_timeout(sdp->sd_kill_wait,
+ (count = atomic_read(&sdp->sd_quota_count)) == 0,
+ HZ * 60);
+
+ if (count != 0)
+ fs_err(sdp, "%d left-over quota data objects\n", count);
kvfree(sdp->sd_quota_bitmap);
sdp->sd_quota_bitmap = NULL;
@@ -1536,12 +1558,11 @@ int gfs2_quotad(void *data)
unsigned long statfs_timeo = 0;
unsigned long quotad_timeo = 0;
unsigned long t = 0;
- DEFINE_WAIT(wait);
while (!kthread_should_stop()) {
-
if (gfs2_withdrawn(sdp))
- goto bypass;
+ break;
+
/* Update the master statfs file */
if (sdp->sd_statfs_force_sync) {
int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
@@ -1559,15 +1580,16 @@ int gfs2_quotad(void *data)
try_to_freeze();
-bypass:
t = min(quotad_timeo, statfs_timeo);
- prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
- if (!sdp->sd_statfs_force_sync)
- t -= schedule_timeout(t);
- else
+ t = wait_event_interruptible_timeout(sdp->sd_quota_wait,
+ sdp->sd_statfs_force_sync ||
+ gfs2_withdrawn(sdp) ||
+ kthread_should_stop(),
+ t);
+
+ if (sdp->sd_statfs_force_sync)
t = 0;
- finish_wait(&sdp->sd_quota_wait, &wait);
}
return 0;
@@ -1580,6 +1602,8 @@ static int gfs2_quota_get_state(struct super_block *sb, struct qc_state *state)
memset(state, 0, sizeof(*state));
switch (sdp->sd_args.ar_quota) {
+ case GFS2_QUOTA_QUIET:
+ fallthrough;
case GFS2_QUOTA_ON:
state->s_state[USRQUOTA].flags |= QCI_LIMITS_ENFORCED;
state->s_state[GRPQUOTA].flags |= QCI_LIMITS_ENFORCED;
@@ -1726,7 +1750,7 @@ static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
goto out_release;
/* Apply changes */
- error = gfs2_adjust_quota(ip, offset, 0, qd, fdq);
+ error = gfs2_adjust_quota(sdp, offset, 0, qd, fdq);
if (!error)
clear_bit(QDF_QMSG_QUIET, &qd->qd_flags);
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
index 9c7a9f640bad..5aae02669a40 100644
--- a/fs/gfs2/recovery.c
+++ b/fs/gfs2/recovery.c
@@ -27,7 +27,7 @@
#include "util.h"
#include "dir.h"
-struct workqueue_struct *gfs_recovery_wq;
+struct workqueue_struct *gfs2_recovery_wq;
int gfs2_replay_read_block(struct gfs2_jdesc *jd, unsigned int blk,
struct buffer_head **bh)
@@ -570,7 +570,7 @@ int gfs2_recover_journal(struct gfs2_jdesc *jd, bool wait)
return -EBUSY;
/* we have JDF_RECOVERY, queue should always succeed */
- rv = queue_work(gfs_recovery_wq, &jd->jd_work);
+ rv = queue_work(gfs2_recovery_wq, &jd->jd_work);
BUG_ON(!rv);
if (wait)
diff --git a/fs/gfs2/recovery.h b/fs/gfs2/recovery.h
index 0d30f8e804f4..7a0c9d0b7503 100644
--- a/fs/gfs2/recovery.h
+++ b/fs/gfs2/recovery.h
@@ -9,7 +9,7 @@
#include "incore.h"
-extern struct workqueue_struct *gfs_recovery_wq;
+extern struct workqueue_struct *gfs2_recovery_wq;
static inline void gfs2_replay_incr_blk(struct gfs2_jdesc *jd, u32 *blk)
{
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 2f701335e8ee..02d93da21b2b 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -546,20 +546,10 @@ void gfs2_make_fs_ro(struct gfs2_sbd *sdp)
{
int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
- if (!test_bit(SDF_DEACTIVATING, &sdp->sd_flags))
+ if (!test_bit(SDF_KILL, &sdp->sd_flags))
gfs2_flush_delete_work(sdp);
- if (!log_write_allowed && current == sdp->sd_quotad_process)
- fs_warn(sdp, "The quotad daemon is withdrawing.\n");
- else if (sdp->sd_quotad_process)
- kthread_stop(sdp->sd_quotad_process);
- sdp->sd_quotad_process = NULL;
-
- if (!log_write_allowed && current == sdp->sd_logd_process)
- fs_warn(sdp, "The logd daemon is withdrawing.\n");
- else if (sdp->sd_logd_process)
- kthread_stop(sdp->sd_logd_process);
- sdp->sd_logd_process = NULL;
+ gfs2_destroy_threads(sdp);
if (log_write_allowed) {
gfs2_quota_sync(sdp->sd_vfs, 0);
@@ -580,15 +570,8 @@ void gfs2_make_fs_ro(struct gfs2_sbd *sdp)
gfs2_log_is_empty(sdp),
HZ * 5);
gfs2_assert_warn(sdp, gfs2_log_is_empty(sdp));
- } else {
- wait_event_timeout(sdp->sd_log_waitq,
- gfs2_log_is_empty(sdp),
- HZ * 5);
}
gfs2_quota_cleanup(sdp);
-
- if (!log_write_allowed)
- sdp->sd_vfs->s_flags |= SB_RDONLY;
}
/**
@@ -622,6 +605,10 @@ restart:
if (!sb_rdonly(sb)) {
gfs2_make_fs_ro(sdp);
}
+ if (gfs2_withdrawn(sdp)) {
+ gfs2_destroy_threads(sdp);
+ gfs2_quota_cleanup(sdp);
+ }
WARN_ON(gfs2_withdrawing(sdp));
/* At this point, we're through modifying the disk */
@@ -1134,6 +1121,9 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root)
case GFS2_QUOTA_ON:
state = "on";
break;
+ case GFS2_QUOTA_QUIET:
+ state = "quiet";
+ break;
default:
state = "unknown";
break;
diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h
index bba58629bc45..ab9c83106932 100644
--- a/fs/gfs2/super.h
+++ b/fs/gfs2/super.h
@@ -36,6 +36,7 @@ extern int gfs2_lookup_in_master_dir(struct gfs2_sbd *sdp, char *filename,
extern int gfs2_make_fs_rw(struct gfs2_sbd *sdp);
extern void gfs2_make_fs_ro(struct gfs2_sbd *sdp);
extern void gfs2_online_uevent(struct gfs2_sbd *sdp);
+extern void gfs2_destroy_threads(struct gfs2_sbd *sdp);
extern int gfs2_statfs_init(struct gfs2_sbd *sdp);
extern void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
s64 dinodes);
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index c60bc7f628e1..60a0206890c5 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -98,7 +98,10 @@ static ssize_t status_show(struct gfs2_sbd *sdp, char *buf)
"sd_log_flush_head: %d\n"
"sd_log_flush_tail: %d\n"
"sd_log_blks_reserved: %d\n"
- "sd_log_revokes_available: %d\n",
+ "sd_log_revokes_available: %d\n"
+ "sd_log_pinned: %d\n"
+ "sd_log_thresh1: %d\n"
+ "sd_log_thresh2: %d\n",
test_bit(SDF_JOURNAL_CHECKED, &f),
test_bit(SDF_JOURNAL_LIVE, &f),
(sdp->sd_jdesc ? sdp->sd_jdesc->jd_jid : 0),
@@ -118,7 +121,7 @@ static ssize_t status_show(struct gfs2_sbd *sdp, char *buf)
test_bit(SDF_WITHDRAW_IN_PROG, &f),
test_bit(SDF_REMOTE_WITHDRAW, &f),
test_bit(SDF_WITHDRAW_RECOVERY, &f),
- test_bit(SDF_DEACTIVATING, &f),
+ test_bit(SDF_KILL, &f),
sdp->sd_log_error,
rwsem_is_locked(&sdp->sd_log_flush_lock),
sdp->sd_log_num_revoke,
@@ -128,7 +131,10 @@ static ssize_t status_show(struct gfs2_sbd *sdp, char *buf)
sdp->sd_log_flush_head,
sdp->sd_log_flush_tail,
sdp->sd_log_blks_reserved,
- atomic_read(&sdp->sd_log_revokes_available));
+ atomic_read(&sdp->sd_log_revokes_available),
+ atomic_read(&sdp->sd_log_pinned),
+ atomic_read(&sdp->sd_log_thresh1),
+ atomic_read(&sdp->sd_log_thresh2));
return s;
}
diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
index dac22b1c1a2e..da29fafb6272 100644
--- a/fs/gfs2/util.c
+++ b/fs/gfs2/util.c
@@ -9,6 +9,7 @@
#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/buffer_head.h>
+#include <linux/kthread.h>
#include <linux/crc32.h>
#include <linux/gfs2_ondisk.h>
#include <linux/delay.h>
@@ -150,7 +151,14 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
if (!sb_rdonly(sdp->sd_vfs)) {
bool locked = mutex_trylock(&sdp->sd_freeze_mutex);
- gfs2_make_fs_ro(sdp);
+ wake_up(&sdp->sd_logd_waitq);
+ wake_up(&sdp->sd_quota_wait);
+
+ wait_event_timeout(sdp->sd_log_waitq,
+ gfs2_log_is_empty(sdp),
+ HZ * 5);
+
+ sdp->sd_vfs->s_flags |= SB_RDONLY;
if (locked)
mutex_unlock(&sdp->sd_freeze_mutex);
@@ -315,19 +323,19 @@ int gfs2_withdraw(struct gfs2_sbd *sdp)
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
const struct lm_lockops *lm = ls->ls_ops;
- if (sdp->sd_args.ar_errors == GFS2_ERRORS_WITHDRAW &&
- test_and_set_bit(SDF_WITHDRAWN, &sdp->sd_flags)) {
- if (!test_bit(SDF_WITHDRAW_IN_PROG, &sdp->sd_flags))
- return -1;
-
- wait_on_bit(&sdp->sd_flags, SDF_WITHDRAW_IN_PROG,
- TASK_UNINTERRUPTIBLE);
- return -1;
- }
-
- set_bit(SDF_WITHDRAW_IN_PROG, &sdp->sd_flags);
-
if (sdp->sd_args.ar_errors == GFS2_ERRORS_WITHDRAW) {
+ unsigned long old = READ_ONCE(sdp->sd_flags), new;
+
+ do {
+ if (old & BIT(SDF_WITHDRAWN)) {
+ wait_on_bit(&sdp->sd_flags,
+ SDF_WITHDRAW_IN_PROG,
+ TASK_UNINTERRUPTIBLE);
+ return -1;
+ }
+ new = old | BIT(SDF_WITHDRAWN) | BIT(SDF_WITHDRAW_IN_PROG);
+ } while (unlikely(!try_cmpxchg(&sdp->sd_flags, &old, new)));
+
fs_err(sdp, "about to withdraw this file system\n");
BUG_ON(sdp->sd_args.ar_debug);
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index 660995856a04..8b2bd65d70e7 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -383,9 +383,11 @@ static int kernfs_link_sibling(struct kernfs_node *kn)
rb_insert_color(&kn->rb, &kn->parent->dir.children);
/* successfully added, account subdir number */
+ down_write(&kernfs_root(kn)->kernfs_iattr_rwsem);
if (kernfs_type(kn) == KERNFS_DIR)
kn->parent->dir.subdirs++;
kernfs_inc_rev(kn->parent);
+ up_write(&kernfs_root(kn)->kernfs_iattr_rwsem);
return 0;
}
@@ -408,9 +410,11 @@ static bool kernfs_unlink_sibling(struct kernfs_node *kn)
if (RB_EMPTY_NODE(&kn->rb))
return false;
+ down_write(&kernfs_root(kn)->kernfs_iattr_rwsem);
if (kernfs_type(kn) == KERNFS_DIR)
kn->parent->dir.subdirs--;
kernfs_inc_rev(kn->parent);
+ up_write(&kernfs_root(kn)->kernfs_iattr_rwsem);
rb_erase(&kn->rb, &kn->parent->dir.children);
RB_CLEAR_NODE(&kn->rb);
diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c
index d49606accb07..c4bf26142eec 100644
--- a/fs/kernfs/mount.c
+++ b/fs/kernfs/mount.c
@@ -16,6 +16,8 @@
#include <linux/namei.h>
#include <linux/seq_file.h>
#include <linux/exportfs.h>
+#include <linux/uuid.h>
+#include <linux/statfs.h>
#include "kernfs-internal.h"
@@ -45,8 +47,15 @@ static int kernfs_sop_show_path(struct seq_file *sf, struct dentry *dentry)
return 0;
}
+static int kernfs_statfs(struct dentry *dentry, struct kstatfs *buf)
+{
+ simple_statfs(dentry, buf);
+ buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b);
+ return 0;
+}
+
const struct super_operations kernfs_sops = {
- .statfs = simple_statfs,
+ .statfs = kernfs_statfs,
.drop_inode = generic_delete_inode,
.evict_inode = kernfs_evict_inode,
@@ -351,6 +360,8 @@ int kernfs_get_tree(struct fs_context *fc)
}
sb->s_flags |= SB_ACTIVE;
+ uuid_gen(&sb->s_uuid);
+
down_write(&root->kernfs_supers_rwsem);
list_add(&info->node, &info->root->supers);
up_write(&root->kernfs_supers_rwsem);
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
index 1d9488cf0534..87a0f207df0b 100644
--- a/fs/lockd/mon.c
+++ b/fs/lockd/mon.c
@@ -276,6 +276,9 @@ static struct nsm_handle *nsm_create_handle(const struct sockaddr *sap,
{
struct nsm_handle *new;
+ if (!hostname)
+ return NULL;
+
new = kzalloc(sizeof(*new) + hostname_len + 1, GFP_KERNEL);
if (unlikely(new == NULL))
return NULL;
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index 22d3ff3818f5..6579948070a4 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -45,7 +45,6 @@
#define NLMDBG_FACILITY NLMDBG_SVC
#define LOCKD_BUFSIZE (1024 + NLMSVC_XDRSIZE)
-#define ALLOWED_SIGS (sigmask(SIGKILL))
static struct svc_program nlmsvc_program;
@@ -57,6 +56,12 @@ static unsigned int nlmsvc_users;
static struct svc_serv *nlmsvc_serv;
unsigned long nlmsvc_timeout;
+static void nlmsvc_request_retry(struct timer_list *tl)
+{
+ svc_wake_up(nlmsvc_serv);
+}
+DEFINE_TIMER(nlmsvc_retry, nlmsvc_request_retry);
+
unsigned int lockd_net_id;
/*
@@ -111,26 +116,12 @@ static void set_grace_period(struct net *net)
schedule_delayed_work(&ln->grace_period_end, grace_period);
}
-static void restart_grace(void)
-{
- if (nlmsvc_ops) {
- struct net *net = &init_net;
- struct lockd_net *ln = net_generic(net, lockd_net_id);
-
- cancel_delayed_work_sync(&ln->grace_period_end);
- locks_end_grace(&ln->lockd_manager);
- nlmsvc_invalidate_all();
- set_grace_period(net);
- }
-}
-
/*
* This is the lockd kernel thread
*/
static int
lockd(void *vrqstp)
{
- int err = 0;
struct svc_rqst *rqstp = vrqstp;
struct net *net = &init_net;
struct lockd_net *ln = net_generic(net, lockd_net_id);
@@ -138,9 +129,6 @@ lockd(void *vrqstp)
/* try_to_freeze() is called from svc_recv() */
set_freezable();
- /* Allow SIGKILL to tell lockd to drop all of its locks */
- allow_signal(SIGKILL);
-
dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n");
/*
@@ -148,33 +136,12 @@ lockd(void *vrqstp)
* NFS mount or NFS daemon has gone away.
*/
while (!kthread_should_stop()) {
- long timeout = MAX_SCHEDULE_TIMEOUT;
- RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]);
-
/* update sv_maxconn if it has changed */
rqstp->rq_server->sv_maxconn = nlm_max_connections;
- if (signalled()) {
- flush_signals(current);
- restart_grace();
- continue;
- }
-
- timeout = nlmsvc_retry_blocked();
-
- /*
- * Find a socket with data available and call its
- * recvfrom routine.
- */
- err = svc_recv(rqstp, timeout);
- if (err == -EAGAIN || err == -EINTR)
- continue;
- dprintk("lockd: request from %s\n",
- svc_print_addr(rqstp, buf, sizeof(buf)));
-
- svc_process(rqstp);
+ nlmsvc_retry_blocked();
+ svc_recv(rqstp);
}
- flush_signals(current);
if (nlmsvc_ops)
nlmsvc_invalidate_all();
nlm_shutdown_hosts();
@@ -407,6 +374,7 @@ static void lockd_put(void)
#endif
svc_set_num_threads(nlmsvc_serv, NULL, 0);
+ timer_delete_sync(&nlmsvc_retry);
nlmsvc_serv = NULL;
dprintk("lockd_down: service destroyed\n");
}
@@ -538,7 +506,7 @@ static inline int is_callback(u32 proc)
}
-static int lockd_authenticate(struct svc_rqst *rqstp)
+static enum svc_auth_status lockd_authenticate(struct svc_rqst *rqstp)
{
rqstp->rq_client = NULL;
switch (rqstp->rq_authop->flavour) {
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index c43ccdf28ed9..43aeba9de55c 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -131,12 +131,14 @@ static void nlmsvc_insert_block(struct nlm_block *block, unsigned long when)
static inline void
nlmsvc_remove_block(struct nlm_block *block)
{
+ spin_lock(&nlm_blocked_lock);
if (!list_empty(&block->b_list)) {
- spin_lock(&nlm_blocked_lock);
list_del_init(&block->b_list);
spin_unlock(&nlm_blocked_lock);
nlmsvc_release_block(block);
+ return;
}
+ spin_unlock(&nlm_blocked_lock);
}
/*
@@ -152,6 +154,7 @@ nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock)
file, lock->fl.fl_pid,
(long long)lock->fl.fl_start,
(long long)lock->fl.fl_end, lock->fl.fl_type);
+ spin_lock(&nlm_blocked_lock);
list_for_each_entry(block, &nlm_blocked, b_list) {
fl = &block->b_call->a_args.lock.fl;
dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n",
@@ -161,9 +164,11 @@ nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock)
nlmdbg_cookie2a(&block->b_call->a_args.cookie));
if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) {
kref_get(&block->b_count);
+ spin_unlock(&nlm_blocked_lock);
return block;
}
}
+ spin_unlock(&nlm_blocked_lock);
return NULL;
}
@@ -185,16 +190,19 @@ nlmsvc_find_block(struct nlm_cookie *cookie)
{
struct nlm_block *block;
+ spin_lock(&nlm_blocked_lock);
list_for_each_entry(block, &nlm_blocked, b_list) {
if (nlm_cookie_match(&block->b_call->a_args.cookie,cookie))
goto found;
}
+ spin_unlock(&nlm_blocked_lock);
return NULL;
found:
dprintk("nlmsvc_find_block(%s): block=%p\n", nlmdbg_cookie2a(cookie), block);
kref_get(&block->b_count);
+ spin_unlock(&nlm_blocked_lock);
return block;
}
@@ -317,6 +325,7 @@ void nlmsvc_traverse_blocks(struct nlm_host *host,
restart:
mutex_lock(&file->f_mutex);
+ spin_lock(&nlm_blocked_lock);
list_for_each_entry_safe(block, next, &file->f_blocks, b_flist) {
if (!match(block->b_host, host))
continue;
@@ -325,11 +334,13 @@ restart:
if (list_empty(&block->b_list))
continue;
kref_get(&block->b_count);
+ spin_unlock(&nlm_blocked_lock);
mutex_unlock(&file->f_mutex);
nlmsvc_unlink_block(block);
nlmsvc_release_block(block);
goto restart;
}
+ spin_unlock(&nlm_blocked_lock);
mutex_unlock(&file->f_mutex);
}
@@ -1008,7 +1019,7 @@ retry_deferred_block(struct nlm_block *block)
* picks up locks that can be granted, or grant notifications that must
* be retransmitted.
*/
-unsigned long
+void
nlmsvc_retry_blocked(void)
{
unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
@@ -1038,5 +1049,6 @@ nlmsvc_retry_blocked(void)
}
spin_unlock(&nlm_blocked_lock);
- return timeout;
+ if (timeout < MAX_SCHEDULE_TIMEOUT)
+ mod_timer(&nlmsvc_retry, jiffies + timeout);
}
diff --git a/fs/locks.c b/fs/locks.c
index a45efc16945d..76ad05f8070a 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1744,13 +1744,6 @@ generic_add_lease(struct file *filp, int arg, struct file_lock **flp, void **pri
if (is_deleg && !inode_trylock(inode))
return -EAGAIN;
- if (is_deleg && arg == F_WRLCK) {
- /* Write delegations are not currently supported: */
- inode_unlock(inode);
- WARN_ON_ONCE(1);
- return -EINVAL;
- }
-
percpu_down_read(&file_rwsem);
spin_lock(&ctx->flc_lock);
time_out_leases(inode, &dispose);
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index b6fc169be1b1..7df2503cef6c 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -209,8 +209,6 @@ config NFS_DISABLE_UDP_SUPPORT
config NFS_V4_2_READ_PLUS
bool "NFS: Enable support for the NFSv4.2 READ_PLUS operation"
depends on NFS_V4_2
- default n
+ default y
help
- This is intended for developers only. The READ_PLUS operation has
- been shown to have issues under specific conditions and should not
- be used in production.
+ Choose Y here to enable use of the NFS v4.2 READ_PLUS operation.
diff --git a/fs/nfs/blocklayout/dev.c b/fs/nfs/blocklayout/dev.c
index 70f5563a8e81..65cbb5607a5f 100644
--- a/fs/nfs/blocklayout/dev.c
+++ b/fs/nfs/blocklayout/dev.c
@@ -404,7 +404,7 @@ bl_parse_concat(struct nfs_server *server, struct pnfs_block_dev *d,
int ret, i;
d->children = kcalloc(v->concat.volumes_count,
- sizeof(struct pnfs_block_dev), GFP_KERNEL);
+ sizeof(struct pnfs_block_dev), gfp_mask);
if (!d->children)
return -ENOMEM;
@@ -433,7 +433,7 @@ bl_parse_stripe(struct nfs_server *server, struct pnfs_block_dev *d,
int ret, i;
d->children = kcalloc(v->stripe.volumes_count,
- sizeof(struct pnfs_block_dev), GFP_KERNEL);
+ sizeof(struct pnfs_block_dev), gfp_mask);
if (!d->children)
return -ENOMEM;
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 456af7d230cf..466ebf1d41b2 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -74,23 +74,12 @@ out_err:
static int
nfs4_callback_svc(void *vrqstp)
{
- int err;
struct svc_rqst *rqstp = vrqstp;
set_freezable();
- while (!kthread_freezable_should_stop(NULL)) {
-
- if (signal_pending(current))
- flush_signals(current);
- /*
- * Listen for a request on the socket
- */
- err = svc_recv(rqstp, MAX_SCHEDULE_TIMEOUT);
- if (err == -EAGAIN || err == -EINTR)
- continue;
- svc_process(rqstp);
- }
+ while (!kthread_freezable_should_stop(NULL))
+ svc_recv(rqstp);
svc_exit_thread(rqstp);
return 0;
@@ -112,11 +101,7 @@ nfs41_callback_svc(void *vrqstp)
set_freezable();
while (!kthread_freezable_should_stop(NULL)) {
-
- if (signal_pending(current))
- flush_signals(current);
-
- prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE);
+ prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_IDLE);
spin_lock_bh(&serv->sv_cb_lock);
if (!list_empty(&serv->sv_cb_list)) {
req = list_first_entry(&serv->sv_cb_list,
@@ -387,7 +372,7 @@ check_gss_callback_principal(struct nfs_client *clp, struct svc_rqst *rqstp)
* All other checking done after NFS decoding where the nfs_client can be
* found in nfs4_callback_compound
*/
-static int nfs_callback_authenticate(struct svc_rqst *rqstp)
+static enum svc_auth_status nfs_callback_authenticate(struct svc_rqst *rqstp)
{
rqstp->rq_auth_stat = rpc_autherr_badcred;
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index e4c5f193ed5e..44eca51b2808 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -517,6 +517,8 @@ int nfs_create_rpc_client(struct nfs_client *clp,
.authflavor = flavor,
.cred = cl_init->cred,
.xprtsec = cl_init->xprtsec,
+ .connect_timeout = cl_init->connect_timeout,
+ .reconnect_timeout = cl_init->reconnect_timeout,
};
if (test_bit(NFS_CS_DISCRTRY, &clp->cl_flags))
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 8f3112e71a6a..e6a51fd94fea 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1089,6 +1089,17 @@ static void nfs_do_filldir(struct nfs_readdir_descriptor *desc,
for (i = desc->cache_entry_index; i < array->size; i++) {
struct nfs_cache_array_entry *ent;
+ /*
+ * nfs_readdir_handle_cache_misses return force clear at
+ * (cache_misses > NFS_READDIR_CACHE_MISS_THRESHOLD) for
+ * readdir heuristic, NFS_READDIR_CACHE_MISS_THRESHOLD + 1
+ * entries need be emitted here.
+ */
+ if (first_emit && i > NFS_READDIR_CACHE_MISS_THRESHOLD + 2) {
+ desc->eob = true;
+ break;
+ }
+
ent = &array->array[i];
if (!dir_emit(desc->ctx, ent->name, ent->name_len,
nfs_compat_user_ino64(ent->ino), ent->d_type)) {
@@ -1107,10 +1118,6 @@ static void nfs_do_filldir(struct nfs_readdir_descriptor *desc,
desc->ctx->pos = desc->dir_cookie;
else
desc->ctx->pos++;
- if (first_emit && i > NFS_READDIR_CACHE_MISS_THRESHOLD + 1) {
- desc->eob = true;
- break;
- }
}
if (array->folio_is_eof)
desc->eof = !desc->eob;
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index aaffaaa336cc..47d892a1d363 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -472,13 +472,31 @@ out:
return result;
}
+static void nfs_direct_add_page_head(struct list_head *list,
+ struct nfs_page *req)
+{
+ struct nfs_page *head = req->wb_head;
+
+ if (!list_empty(&head->wb_list) || !nfs_lock_request(head))
+ return;
+ if (!list_empty(&head->wb_list)) {
+ nfs_unlock_request(head);
+ return;
+ }
+ list_add(&head->wb_list, list);
+ kref_get(&head->wb_kref);
+ kref_get(&head->wb_kref);
+}
+
static void nfs_direct_join_group(struct list_head *list, struct inode *inode)
{
struct nfs_page *req, *subreq;
list_for_each_entry(req, list, wb_list) {
- if (req->wb_head != req)
+ if (req->wb_head != req) {
+ nfs_direct_add_page_head(&req->wb_list, req);
continue;
+ }
subreq = req->wb_this_page;
if (subreq == req)
continue;
diff --git a/fs/nfs/dns_resolve.c b/fs/nfs/dns_resolve.c
index 6603b5cee029..714975e5c0db 100644
--- a/fs/nfs/dns_resolve.c
+++ b/fs/nfs/dns_resolve.c
@@ -7,14 +7,16 @@
* Resolves DNS hostnames into valid ip addresses
*/
-#ifdef CONFIG_NFS_USE_KERNEL_DNS
-
#include <linux/module.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/addr.h>
-#include <linux/dns_resolver.h>
+
#include "dns_resolve.h"
+#ifdef CONFIG_NFS_USE_KERNEL_DNS
+
+#include <linux/dns_resolver.h>
+
ssize_t nfs_dns_resolve_name(struct net *net, char *name, size_t namelen,
struct sockaddr_storage *ss, size_t salen)
{
@@ -35,7 +37,6 @@ ssize_t nfs_dns_resolve_name(struct net *net, char *name, size_t namelen,
#else
-#include <linux/module.h>
#include <linux/hash.h>
#include <linux/string.h>
#include <linux/kmod.h>
@@ -43,15 +44,12 @@ ssize_t nfs_dns_resolve_name(struct net *net, char *name, size_t namelen,
#include <linux/socket.h>
#include <linux/seq_file.h>
#include <linux/inet.h>
-#include <linux/sunrpc/clnt.h>
-#include <linux/sunrpc/addr.h>
#include <linux/sunrpc/cache.h>
#include <linux/sunrpc/svcauth.h>
#include <linux/sunrpc/rpc_pipe_fs.h>
#include <linux/nfs_fs.h>
#include "nfs4_fs.h"
-#include "dns_resolve.h"
#include "cache_lib.h"
#include "netns.h"
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 79b1b3fcd3fc..3f9768810427 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -200,7 +200,7 @@ nfs_file_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe
EXPORT_SYMBOL_GPL(nfs_file_splice_read);
int
-nfs_file_mmap(struct file * file, struct vm_area_struct * vma)
+nfs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
struct inode *inode = file_inode(file);
int status;
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 913c09806c7f..9c9cf764f600 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -82,6 +82,8 @@ struct nfs_client_initdata {
const struct rpc_timeout *timeparms;
const struct cred *cred;
struct xprtsec_parms xprtsec;
+ unsigned long connect_timeout;
+ unsigned long reconnect_timeout;
};
/*
@@ -493,6 +495,7 @@ extern const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
extern void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
struct inode *inode, bool force_mds,
const struct nfs_pgio_completion_ops *compl_ops);
+extern bool nfs_read_alloc_scratch(struct nfs_pgio_header *hdr, size_t size);
extern int nfs_read_add_folio(struct nfs_pageio_descriptor *pgio,
struct nfs_open_context *ctx,
struct folio *folio);
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
index 05c3b4b2b3dd..c19093814296 100644
--- a/fs/nfs/nfs2xdr.c
+++ b/fs/nfs/nfs2xdr.c
@@ -949,7 +949,7 @@ int nfs2_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
error = decode_filename_inline(xdr, &entry->name, &entry->len);
if (unlikely(error))
- return -EAGAIN;
+ return error == -ENAMETOOLONG ? -ENAMETOOLONG : -EAGAIN;
/*
* The type (size and byte order) of nfscookie isn't defined in
diff --git a/fs/nfs/nfs3client.c b/fs/nfs/nfs3client.c
index eff3802c5e03..674c012868b1 100644
--- a/fs/nfs/nfs3client.c
+++ b/fs/nfs/nfs3client.c
@@ -86,6 +86,7 @@ struct nfs_client *nfs3_set_ds_client(struct nfs_server *mds_srv,
int ds_proto, unsigned int ds_timeo, unsigned int ds_retrans)
{
struct rpc_timeout ds_timeout;
+ unsigned long connect_timeout = ds_timeo * (ds_retrans + 1) * HZ / 10;
struct nfs_client *mds_clp = mds_srv->nfs_client;
struct nfs_client_initdata cl_init = {
.addr = ds_addr,
@@ -98,6 +99,8 @@ struct nfs_client *nfs3_set_ds_client(struct nfs_server *mds_srv,
.timeparms = &ds_timeout,
.cred = mds_srv->cred,
.xprtsec = mds_clp->cl_xprtsec,
+ .connect_timeout = connect_timeout,
+ .reconnect_timeout = connect_timeout,
};
struct nfs_client *clp;
char buf[INET6_ADDRSTRLEN + 1];
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index 3b0b650c9c5a..60f032be805a 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -1991,7 +1991,7 @@ int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
error = decode_inline_filename3(xdr, &entry->name, &entry->len);
if (unlikely(error))
- return -EAGAIN;
+ return error == -ENAMETOOLONG ? -ENAMETOOLONG : -EAGAIN;
error = decode_cookie3(xdr, &new_cookie);
if (unlikely(error))
diff --git a/fs/nfs/nfs42.h b/fs/nfs/nfs42.h
index 0fe5aacbcfdf..b59876b01a1e 100644
--- a/fs/nfs/nfs42.h
+++ b/fs/nfs/nfs42.h
@@ -13,6 +13,7 @@
* more? Need to consider not to pre-alloc too much for a compound.
*/
#define PNFS_LAYOUTSTATS_MAXDEV (4)
+#define READ_PLUS_SCRATCH_SIZE (16)
/* nfs4.2proc.c */
#ifdef CONFIG_NFS_V4_2
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index 49f78e23b34c..063e00aff87e 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -471,8 +471,9 @@ ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src,
continue;
}
break;
- } else if (err == -NFS4ERR_OFFLOAD_NO_REQS && !args.sync) {
- args.sync = true;
+ } else if (err == -NFS4ERR_OFFLOAD_NO_REQS &&
+ args.sync != res.synchronous) {
+ args.sync = res.synchronous;
dst_exception.retry = 1;
continue;
} else if ((err == -ESTALE ||
diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c
index 95234208dc9e..9e3ae53e2205 100644
--- a/fs/nfs/nfs42xdr.c
+++ b/fs/nfs/nfs42xdr.c
@@ -54,10 +54,16 @@
(1 /* data_content4 */ + \
2 /* data_info4.di_offset */ + \
1 /* data_info4.di_length */)
+#define NFS42_READ_PLUS_HOLE_SEGMENT_SIZE \
+ (1 /* data_content4 */ + \
+ 2 /* data_info4.di_offset */ + \
+ 2 /* data_info4.di_length */)
+#define READ_PLUS_SEGMENT_SIZE_DIFF (NFS42_READ_PLUS_HOLE_SEGMENT_SIZE - \
+ NFS42_READ_PLUS_DATA_SEGMENT_SIZE)
#define decode_read_plus_maxsz (op_decode_hdr_maxsz + \
1 /* rpr_eof */ + \
1 /* rpr_contents count */ + \
- NFS42_READ_PLUS_DATA_SEGMENT_SIZE)
+ NFS42_READ_PLUS_HOLE_SEGMENT_SIZE)
#define encode_seek_maxsz (op_encode_hdr_maxsz + \
encode_stateid_maxsz + \
2 /* offset */ + \
@@ -617,8 +623,8 @@ static void nfs4_xdr_enc_read_plus(struct rpc_rqst *req,
encode_putfh(xdr, args->fh, &hdr);
encode_read_plus(xdr, args, &hdr);
- rpc_prepare_reply_pages(req, args->pages, args->pgbase,
- args->count, hdr.replen);
+ rpc_prepare_reply_pages(req, args->pages, args->pgbase, args->count,
+ hdr.replen - READ_PLUS_SEGMENT_SIZE_DIFF);
encode_nops(&hdr);
}
@@ -1056,13 +1062,12 @@ static int decode_read_plus(struct xdr_stream *xdr, struct nfs_pgio_res *res)
res->eof = be32_to_cpup(p++);
segments = be32_to_cpup(p++);
if (segments == 0)
- return status;
+ return 0;
segs = kmalloc_array(segments, sizeof(*segs), GFP_KERNEL);
if (!segs)
return -ENOMEM;
- status = -EIO;
for (i = 0; i < segments; i++) {
status = decode_read_plus_segment(xdr, &segs[i]);
if (status < 0)
@@ -1428,7 +1433,7 @@ static int nfs4_xdr_dec_read_plus(struct rpc_rqst *rqstp,
struct compound_hdr hdr;
int status;
- xdr_set_scratch_buffer(xdr, res->scratch, sizeof(res->scratch));
+ xdr_set_scratch_buffer(xdr, res->scratch, READ_PLUS_SCRATCH_SIZE);
status = decode_compound_hdr(xdr, &hdr);
if (status)
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index d9114a754db7..27fb25567ce7 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -232,6 +232,8 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)
__set_bit(NFS_CS_DISCRTRY, &clp->cl_flags);
__set_bit(NFS_CS_NO_RETRANS_TIMEOUT, &clp->cl_flags);
+ if (test_bit(NFS_CS_DS, &cl_init->init_flags))
+ __set_bit(NFS_CS_DS, &clp->cl_flags);
/*
* Set up the connection to the server before we add add to the
* global list.
@@ -1007,6 +1009,7 @@ struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv,
if (mds_srv->flags & NFS_MOUNT_NORESVPORT)
__set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
+ __set_bit(NFS_CS_DS, &cl_init.init_flags);
/*
* Set an authflavor equual to the MDS value. Use the MDS nfs_client
* cl_ipaddr so as to use the same EXCHANGE_ID co_ownerid as the MDS
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index d57aaf0cc577..794343790ea8 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -5438,18 +5438,8 @@ static bool nfs4_read_plus_not_supported(struct rpc_task *task,
return false;
}
-static inline void nfs4_read_plus_scratch_free(struct nfs_pgio_header *hdr)
-{
- if (hdr->res.scratch) {
- kfree(hdr->res.scratch);
- hdr->res.scratch = NULL;
- }
-}
-
static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
{
- nfs4_read_plus_scratch_free(hdr);
-
if (!nfs4_sequence_done(task, &hdr->res.seq_res))
return -EAGAIN;
if (nfs4_read_stateid_changed(task, &hdr->args))
@@ -5469,8 +5459,7 @@ static bool nfs42_read_plus_support(struct nfs_pgio_header *hdr,
/* Note: We don't use READ_PLUS with pNFS yet */
if (nfs_server_capable(hdr->inode, NFS_CAP_READ_PLUS) && !hdr->ds_clp) {
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS];
- hdr->res.scratch = kmalloc(32, GFP_KERNEL);
- return hdr->res.scratch != NULL;
+ return nfs_read_alloc_scratch(hdr, READ_PLUS_SCRATCH_SIZE);
}
return false;
}
@@ -8798,6 +8787,8 @@ nfs4_run_exchange_id(struct nfs_client *clp, const struct cred *cred,
#ifdef CONFIG_NFS_V4_1_MIGRATION
calldata->args.flags |= EXCHGID4_FLAG_SUPP_MOVED_MIGR;
#endif
+ if (test_bit(NFS_CS_DS, &clp->cl_flags))
+ calldata->args.flags |= EXCHGID4_FLAG_USE_PNFS_DS;
msg.rpc_argp = &calldata->args;
msg.rpc_resp = &calldata->res;
task_setup_data.callback_data = calldata;
@@ -8875,6 +8866,8 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cre
/* Save the EXCHANGE_ID verifier session trunk tests */
memcpy(clp->cl_confirm.data, argp->verifier.data,
sizeof(clp->cl_confirm.data));
+ if (resp->flags & EXCHGID4_FLAG_USE_PNFS_DS)
+ set_bit(NFS_CS_DS, &clp->cl_flags);
out:
trace_nfs4_exchange_id(clp, status);
rpc_put_task(task);
diff --git a/fs/nfs/pnfs_dev.c b/fs/nfs/pnfs_dev.c
index ddbbf4fcda86..178001c90156 100644
--- a/fs/nfs/pnfs_dev.c
+++ b/fs/nfs/pnfs_dev.c
@@ -154,7 +154,7 @@ nfs4_get_device_info(struct nfs_server *server,
set_bit(NFS_DEVICEID_NOCACHE, &d->flags);
out_free_pages:
- for (i = 0; i < max_pages; i++)
+ while (--i >= 0)
__free_page(pages[i]);
kfree(pages);
out_free_pdev:
diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
index a0112ad4937a..afd23910f3bf 100644
--- a/fs/nfs/pnfs_nfs.c
+++ b/fs/nfs/pnfs_nfs.c
@@ -852,6 +852,7 @@ static int _nfs4_pnfs_v3_ds_connect(struct nfs_server *mds_srv,
{
struct nfs_client *clp = ERR_PTR(-EIO);
struct nfs4_pnfs_ds_addr *da;
+ unsigned long connect_timeout = timeo * (retrans + 1) * HZ / 10;
int status = 0;
dprintk("--> %s DS %s\n", __func__, ds->ds_remotestr);
@@ -870,6 +871,8 @@ static int _nfs4_pnfs_v3_ds_connect(struct nfs_server *mds_srv,
.dstaddr = (struct sockaddr *)&da->da_addr,
.addrlen = da->da_addrlen,
.servername = clp->cl_hostname,
+ .connect_timeout = connect_timeout,
+ .reconnect_timeout = connect_timeout,
};
if (da->da_transport != clp->cl_proto)
@@ -943,7 +946,7 @@ static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
* Test this address for session trunking and
* add as an alias
*/
- xprtdata.cred = nfs4_get_clid_cred(clp),
+ xprtdata.cred = nfs4_get_clid_cred(clp);
rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args,
rpc_clnt_setup_test_and_add_xprt,
&rpcdata);
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index f71eeee67e20..7dc21a48e3e7 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -47,6 +47,8 @@ static struct nfs_pgio_header *nfs_readhdr_alloc(void)
static void nfs_readhdr_free(struct nfs_pgio_header *rhdr)
{
+ if (rhdr->res.scratch != NULL)
+ kfree(rhdr->res.scratch);
kmem_cache_free(nfs_rdata_cachep, rhdr);
}
@@ -108,6 +110,14 @@ void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
}
EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
+bool nfs_read_alloc_scratch(struct nfs_pgio_header *hdr, size_t size)
+{
+ WARN_ON(hdr->res.scratch != NULL);
+ hdr->res.scratch = kmalloc(size, GFP_KERNEL);
+ return hdr->res.scratch != NULL;
+}
+EXPORT_SYMBOL_GPL(nfs_read_alloc_scratch);
+
static void nfs_readpage_release(struct nfs_page *req, int error)
{
struct folio *folio = nfs_page_to_folio(req);
diff --git a/fs/nfsd/blocklayoutxdr.c b/fs/nfsd/blocklayoutxdr.c
index 8e9c1a0f8d38..1ed2f691ebb9 100644
--- a/fs/nfsd/blocklayoutxdr.c
+++ b/fs/nfsd/blocklayoutxdr.c
@@ -83,6 +83,15 @@ nfsd4_block_encode_getdeviceinfo(struct xdr_stream *xdr,
int len = sizeof(__be32), ret, i;
__be32 *p;
+ /*
+ * See paragraph 5 of RFC 8881 S18.40.3.
+ */
+ if (!gdp->gd_maxcount) {
+ if (xdr_stream_encode_u32(xdr, 0) != XDR_UNIT)
+ return nfserr_resource;
+ return nfs_ok;
+ }
+
p = xdr_reserve_space(xdr, len + sizeof(__be32));
if (!p)
return nfserr_resource;
diff --git a/fs/nfsd/cache.h b/fs/nfsd/cache.h
index 4c9b87850ab1..929248c6ca84 100644
--- a/fs/nfsd/cache.h
+++ b/fs/nfsd/cache.h
@@ -19,7 +19,7 @@
* typical sockaddr_storage. This is for space reasons, since sockaddr_storage
* is much larger than a sockaddr_in6.
*/
-struct svc_cacherep {
+struct nfsd_cacherep {
struct {
/* Keep often-read xid, csum in the same cache line: */
__be32 k_xid;
@@ -84,8 +84,10 @@ int nfsd_net_reply_cache_init(struct nfsd_net *nn);
void nfsd_net_reply_cache_destroy(struct nfsd_net *nn);
int nfsd_reply_cache_init(struct nfsd_net *);
void nfsd_reply_cache_shutdown(struct nfsd_net *);
-int nfsd_cache_lookup(struct svc_rqst *);
-void nfsd_cache_update(struct svc_rqst *, int, __be32 *);
+int nfsd_cache_lookup(struct svc_rqst *rqstp,
+ struct nfsd_cacherep **cacherep);
+void nfsd_cache_update(struct svc_rqst *rqstp, struct nfsd_cacherep *rp,
+ int cachetype, __be32 *statp);
int nfsd_reply_cache_stats_show(struct seq_file *m, void *v);
#endif /* NFSCACHE_H */
diff --git a/fs/nfsd/flexfilelayoutxdr.c b/fs/nfsd/flexfilelayoutxdr.c
index e81d2a5cf381..bb205328e043 100644
--- a/fs/nfsd/flexfilelayoutxdr.c
+++ b/fs/nfsd/flexfilelayoutxdr.c
@@ -85,6 +85,15 @@ nfsd4_ff_encode_getdeviceinfo(struct xdr_stream *xdr,
int addr_len;
__be32 *p;
+ /*
+ * See paragraph 5 of RFC 8881 S18.40.3.
+ */
+ if (!gdp->gd_maxcount) {
+ if (xdr_stream_encode_u32(xdr, 0) != XDR_UNIT)
+ return nfserr_resource;
+ return nfs_ok;
+ }
+
/* len + padding for two strings */
addr_len = 16 + da->netaddr.netid_len + da->netaddr.addr_len;
ver_len = 20;
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
index fc8d5b7db9f8..268ef57751c4 100644
--- a/fs/nfsd/nfs3proc.c
+++ b/fs/nfsd/nfs3proc.c
@@ -307,7 +307,9 @@ nfsd3_create_file(struct svc_rqst *rqstp, struct svc_fh *fhp,
if (!IS_POSIXACL(inode))
iap->ia_mode &= ~current_umask();
- fh_fill_pre_attrs(fhp);
+ status = fh_fill_pre_attrs(fhp);
+ if (status != nfs_ok)
+ goto out;
host_err = vfs_create(&nop_mnt_idmap, inode, child, iap->ia_mode, true);
if (host_err < 0) {
status = nfserrno(host_err);
diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
index 518203821790..96e786b5e544 100644
--- a/fs/nfsd/nfs4acl.c
+++ b/fs/nfsd/nfs4acl.c
@@ -441,7 +441,7 @@ struct posix_ace_state_array {
* calculated so far: */
struct posix_acl_state {
- int empty;
+ unsigned char valid;
struct posix_ace_state owner;
struct posix_ace_state group;
struct posix_ace_state other;
@@ -457,7 +457,6 @@ init_state(struct posix_acl_state *state, int cnt)
int alloc;
memset(state, 0, sizeof(struct posix_acl_state));
- state->empty = 1;
/*
* In the worst case, each individual acl could be for a distinct
* named user or group, but we don't know which, so we allocate
@@ -500,7 +499,7 @@ posix_state_to_acl(struct posix_acl_state *state, unsigned int flags)
* and effective cases: when there are no inheritable ACEs,
* calls ->set_acl with a NULL ACL structure.
*/
- if (state->empty && (flags & NFS4_ACL_TYPE_DEFAULT))
+ if (!state->valid && (flags & NFS4_ACL_TYPE_DEFAULT))
return NULL;
/*
@@ -622,11 +621,12 @@ static void process_one_v4_ace(struct posix_acl_state *state,
struct nfs4_ace *ace)
{
u32 mask = ace->access_mask;
+ short type = ace2type(ace);
int i;
- state->empty = 0;
+ state->valid |= type;
- switch (ace2type(ace)) {
+ switch (type) {
case ACL_USER_OBJ:
if (ace->type == NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE) {
allow_bits(&state->owner, mask);
@@ -726,6 +726,30 @@ static int nfs4_acl_nfsv4_to_posix(struct nfs4_acl *acl,
if (!(ace->flag & NFS4_ACE_INHERIT_ONLY_ACE))
process_one_v4_ace(&effective_acl_state, ace);
}
+
+ /*
+ * At this point, the default ACL may have zeroed-out entries for owner,
+ * group and other. That usually results in a non-sensical resulting ACL
+ * that denies all access except to any ACE that was explicitly added.
+ *
+ * The setfacl command solves a similar problem with this logic:
+ *
+ * "If a Default ACL entry is created, and the Default ACL contains
+ * no owner, owning group, or others entry, a copy of the ACL
+ * owner, owning group, or others entry is added to the Default ACL."
+ *
+ * Copy any missing ACEs from the effective set, if any ACEs were
+ * explicitly set.
+ */
+ if (default_acl_state.valid) {
+ if (!(default_acl_state.valid & ACL_USER_OBJ))
+ default_acl_state.owner = effective_acl_state.owner;
+ if (!(default_acl_state.valid & ACL_GROUP_OBJ))
+ default_acl_state.group = effective_acl_state.group;
+ if (!(default_acl_state.valid & ACL_OTHER))
+ default_acl_state.other = effective_acl_state.other;
+ }
+
*pacl = posix_state_to_acl(&effective_acl_state, flags);
if (IS_ERR(*pacl)) {
ret = PTR_ERR(*pacl);
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 5ae670807449..5ca748309c26 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -297,12 +297,12 @@ nfsd4_create_file(struct svc_rqst *rqstp, struct svc_fh *fhp,
}
if (d_really_is_positive(child)) {
- status = nfs_ok;
-
/* NFSv4 protocol requires change attributes even though
* no change happened.
*/
- fh_fill_both_attrs(fhp);
+ status = fh_fill_both_attrs(fhp);
+ if (status != nfs_ok)
+ goto out;
switch (open->op_createmode) {
case NFS4_CREATE_UNCHECKED:
@@ -345,7 +345,9 @@ nfsd4_create_file(struct svc_rqst *rqstp, struct svc_fh *fhp,
if (!IS_POSIXACL(inode))
iap->ia_mode &= ~current_umask();
- fh_fill_pre_attrs(fhp);
+ status = fh_fill_pre_attrs(fhp);
+ if (status != nfs_ok)
+ goto out;
status = nfsd4_vfs_create(fhp, child, open);
if (status != nfs_ok)
goto out;
@@ -380,6 +382,38 @@ out:
return status;
}
+/**
+ * set_change_info - set up the change_info4 for a reply
+ * @cinfo: pointer to nfsd4_change_info to be populated
+ * @fhp: pointer to svc_fh to use as source
+ *
+ * Many operations in NFSv4 require change_info4 in the reply. This function
+ * populates that from the info that we (should!) have already collected. In
+ * the event that we didn't get any pre-attrs, just zero out both.
+ */
+static void
+set_change_info(struct nfsd4_change_info *cinfo, struct svc_fh *fhp)
+{
+ cinfo->atomic = (u32)(fhp->fh_pre_saved && fhp->fh_post_saved && !fhp->fh_no_atomic_attr);
+ cinfo->before_change = fhp->fh_pre_change;
+ cinfo->after_change = fhp->fh_post_change;
+
+ /*
+ * If fetching the pre-change attributes failed, then we should
+ * have already failed the whole operation. We could have still
+ * failed to fetch post-change attributes however.
+ *
+ * If we didn't get post-op attrs, just zero-out the after
+ * field since we don't know what it should be. If the pre_saved
+ * field isn't set for some reason, throw warning and just copy
+ * whatever is in the after field.
+ */
+ if (WARN_ON_ONCE(!fhp->fh_pre_saved))
+ cinfo->before_change = 0;
+ if (!fhp->fh_post_saved)
+ cinfo->after_change = cinfo->before_change + 1;
+}
+
static __be32
do_open_lookup(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_open *open, struct svc_fh **resfh)
{
@@ -424,11 +458,11 @@ do_open_lookup(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, stru
} else {
status = nfsd_lookup(rqstp, current_fh,
open->op_fname, open->op_fnamelen, *resfh);
- if (!status)
+ if (status == nfs_ok)
/* NFSv4 protocol requires change attributes even though
* no change happened.
*/
- fh_fill_both_attrs(current_fh);
+ status = fh_fill_both_attrs(current_fh);
}
if (status)
goto out;
@@ -1313,12 +1347,11 @@ try_again:
/* found a match */
if (ni->nsui_busy) {
/* wait - and try again */
- prepare_to_wait(&nn->nfsd_ssc_waitq, &wait,
- TASK_INTERRUPTIBLE);
+ prepare_to_wait(&nn->nfsd_ssc_waitq, &wait, TASK_IDLE);
spin_unlock(&nn->nfsd_ssc_lock);
/* allow 20secs for mount/unmount for now - revisit */
- if (signal_pending(current) ||
+ if (kthread_should_stop() ||
(schedule_timeout(20*HZ) == 0)) {
finish_wait(&nn->nfsd_ssc_waitq, &wait);
kfree(work);
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index daf305daa751..8534693eb6a4 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -649,6 +649,18 @@ find_readable_file(struct nfs4_file *f)
return ret;
}
+static struct nfsd_file *
+find_rw_file(struct nfs4_file *f)
+{
+ struct nfsd_file *ret;
+
+ spin_lock(&f->fi_lock);
+ ret = nfsd_file_get(f->fi_fds[O_RDWR]);
+ spin_unlock(&f->fi_lock);
+
+ return ret;
+}
+
struct nfsd_file *
find_any_file(struct nfs4_file *f)
{
@@ -1144,7 +1156,7 @@ static void block_delegations(struct knfsd_fh *fh)
static struct nfs4_delegation *
alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp,
- struct nfs4_clnt_odstate *odstate)
+ struct nfs4_clnt_odstate *odstate, u32 dl_type)
{
struct nfs4_delegation *dp;
long n;
@@ -1170,7 +1182,7 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp,
INIT_LIST_HEAD(&dp->dl_recall_lru);
dp->dl_clnt_odstate = odstate;
get_clnt_odstate(odstate);
- dp->dl_type = NFS4_OPEN_DELEGATE_READ;
+ dp->dl_type = dl_type;
dp->dl_retries = 1;
dp->dl_recalled = false;
nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
@@ -5449,8 +5461,9 @@ nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
struct nfs4_file *fp = stp->st_stid.sc_file;
struct nfs4_clnt_odstate *odstate = stp->st_clnt_odstate;
struct nfs4_delegation *dp;
- struct nfsd_file *nf;
+ struct nfsd_file *nf = NULL;
struct file_lock *fl;
+ u32 dl_type;
/*
* The fi_had_conflict and nfs_get_existing_delegation checks
@@ -5460,15 +5473,35 @@ nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
if (fp->fi_had_conflict)
return ERR_PTR(-EAGAIN);
- nf = find_readable_file(fp);
- if (!nf) {
- /*
- * We probably could attempt another open and get a read
- * delegation, but for now, don't bother until the
- * client actually sends us one.
- */
- return ERR_PTR(-EAGAIN);
+ /*
+ * Try for a write delegation first. RFC8881 section 10.4 says:
+ *
+ * "An OPEN_DELEGATE_WRITE delegation allows the client to handle,
+ * on its own, all opens."
+ *
+ * Furthermore the client can use a write delegation for most READ
+ * operations as well, so we require a O_RDWR file here.
+ *
+ * Offer a write delegation in the case of a BOTH open, and ensure
+ * we get the O_RDWR descriptor.
+ */
+ if ((open->op_share_access & NFS4_SHARE_ACCESS_BOTH) == NFS4_SHARE_ACCESS_BOTH) {
+ nf = find_rw_file(fp);
+ dl_type = NFS4_OPEN_DELEGATE_WRITE;
}
+
+ /*
+ * If the file is being opened O_RDONLY or we couldn't get a O_RDWR
+ * file for some reason, then try for a read delegation instead.
+ */
+ if (!nf && (open->op_share_access & NFS4_SHARE_ACCESS_READ)) {
+ nf = find_readable_file(fp);
+ dl_type = NFS4_OPEN_DELEGATE_READ;
+ }
+
+ if (!nf)
+ return ERR_PTR(-EAGAIN);
+
spin_lock(&state_lock);
spin_lock(&fp->fi_lock);
if (nfs4_delegation_exists(clp, fp))
@@ -5491,11 +5524,11 @@ nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
return ERR_PTR(status);
status = -ENOMEM;
- dp = alloc_init_deleg(clp, fp, odstate);
+ dp = alloc_init_deleg(clp, fp, odstate, dl_type);
if (!dp)
goto out_delegees;
- fl = nfs4_alloc_init_lease(dp, NFS4_OPEN_DELEGATE_READ);
+ fl = nfs4_alloc_init_lease(dp, dl_type);
if (!fl)
goto out_clnt_odstate;
@@ -5568,10 +5601,28 @@ static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
}
/*
- * Attempt to hand out a delegation.
+ * The Linux NFS server does not offer write delegations to NFSv4.0
+ * clients in order to avoid conflicts between write delegations and
+ * GETATTRs requesting CHANGE or SIZE attributes.
+ *
+ * With NFSv4.1 and later minorversions, the SEQUENCE operation that
+ * begins each COMPOUND contains a client ID. Delegation recall can
+ * be avoided when the server recognizes the client sending a
+ * GETATTR also holds write delegation it conflicts with.
+ *
+ * However, the NFSv4.0 protocol does not enable a server to
+ * determine that a GETATTR originated from the client holding the
+ * conflicting delegation versus coming from some other client. Per
+ * RFC 7530 Section 16.7.5, the server must recall or send a
+ * CB_GETATTR even when the GETATTR originates from the client that
+ * holds the conflicting delegation.
*
- * Note we don't support write delegations, and won't until the vfs has
- * proper support for them.
+ * An NFSv4.0 client can trigger a pathological situation if it
+ * always sends a DELEGRETURN preceded by a conflicting GETATTR in
+ * the same COMPOUND. COMPOUND execution will always stop at the
+ * GETATTR and the DELEGRETURN will never get executed. The server
+ * eventually revokes the delegation, which can result in loss of
+ * open or lock state.
*/
static void
nfs4_open_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
@@ -5590,8 +5641,6 @@ nfs4_open_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
case NFS4_OPEN_CLAIM_PREVIOUS:
if (!cb_up)
open->op_recall = 1;
- if (open->op_delegate_type != NFS4_OPEN_DELEGATE_READ)
- goto out_no_deleg;
break;
case NFS4_OPEN_CLAIM_NULL:
parent = currentfh;
@@ -5606,6 +5655,9 @@ nfs4_open_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
goto out_no_deleg;
if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
goto out_no_deleg;
+ if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE &&
+ !clp->cl_minorversion)
+ goto out_no_deleg;
break;
default:
goto out_no_deleg;
@@ -5616,8 +5668,13 @@ nfs4_open_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
- trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid);
- open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
+ if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) {
+ open->op_delegate_type = NFS4_OPEN_DELEGATE_WRITE;
+ trace_nfsd_deleg_write(&dp->dl_stid.sc_stateid);
+ } else {
+ open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
+ trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid);
+ }
nfs4_put_stid(&dp->dl_stid);
return;
out_no_deleg:
@@ -8341,3 +8398,68 @@ nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
{
get_stateid(cstate, &u->write.wr_stateid);
}
+
+/**
+ * nfsd4_deleg_getattr_conflict - Recall if GETATTR causes conflict
+ * @rqstp: RPC transaction context
+ * @inode: file to be checked for a conflict
+ *
+ * This function is called when there is a conflict between a write
+ * delegation and a change/size GETATTR from another client. The server
+ * must either use the CB_GETATTR to get the current values of the
+ * attributes from the client that holds the delegation or recall the
+ * delegation before replying to the GETATTR. See RFC 8881 section
+ * 18.7.4.
+ *
+ * The current implementation does not support CB_GETATTR yet. However
+ * this can avoid recalling the delegation could be added in follow up
+ * work.
+ *
+ * Returns 0 if there is no conflict; otherwise an nfs_stat
+ * code is returned.
+ */
+__be32
+nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode)
+{
+ __be32 status;
+ struct file_lock_context *ctx;
+ struct file_lock *fl;
+ struct nfs4_delegation *dp;
+
+ ctx = locks_inode_context(inode);
+ if (!ctx)
+ return 0;
+ spin_lock(&ctx->flc_lock);
+ list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
+ if (fl->fl_flags == FL_LAYOUT)
+ continue;
+ if (fl->fl_lmops != &nfsd_lease_mng_ops) {
+ /*
+ * non-nfs lease, if it's a lease with F_RDLCK then
+ * we are done; there isn't any write delegation
+ * on this inode
+ */
+ if (fl->fl_type == F_RDLCK)
+ break;
+ goto break_lease;
+ }
+ if (fl->fl_type == F_WRLCK) {
+ dp = fl->fl_owner;
+ if (dp->dl_recall.cb_clp == *(rqstp->rq_lease_breaker)) {
+ spin_unlock(&ctx->flc_lock);
+ return 0;
+ }
+break_lease:
+ spin_unlock(&ctx->flc_lock);
+ nfsd_stats_wdeleg_getattr_inc();
+ status = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ));
+ if (status != nfserr_jukebox ||
+ !nfsd_wait_for_delegreturn(rqstp, inode))
+ return status;
+ return 0;
+ }
+ break;
+ }
+ spin_unlock(&ctx->flc_lock);
+ return 0;
+}
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index b30dca7de8cc..2e40c74d2f72 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -2984,6 +2984,11 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
if (status)
goto out;
}
+ if (bmval0 & (FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE)) {
+ status = nfsd4_deleg_getattr_conflict(rqstp, d_inode(dentry));
+ if (status)
+ goto out;
+ }
err = vfs_getattr(&path, &stat,
STATX_BASIC_STATS | STATX_BTIME | STATX_CHANGE_COOKIE,
@@ -3973,17 +3978,20 @@ nfsd4_encode_open(struct nfsd4_compoundres *resp, __be32 nfserr,
nfserr = nfsd4_encode_stateid(xdr, &open->op_delegate_stateid);
if (nfserr)
return nfserr;
- p = xdr_reserve_space(xdr, 32);
+
+ p = xdr_reserve_space(xdr, XDR_UNIT * 8);
if (!p)
return nfserr_resource;
*p++ = cpu_to_be32(open->op_recall);
/*
+ * Always flush on close
+ *
* TODO: space_limit's in delegations
*/
*p++ = cpu_to_be32(NFS4_LIMIT_SIZE);
- *p++ = cpu_to_be32(~(u32)0);
- *p++ = cpu_to_be32(~(u32)0);
+ *p++ = xdr_zero;
+ *p++ = xdr_zero;
/*
* TODO: ACE's in delegations
@@ -4678,20 +4686,17 @@ nfsd4_encode_getdeviceinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
*p++ = cpu_to_be32(gdev->gd_layout_type);
- /* If maxcount is 0 then just update notifications */
- if (gdev->gd_maxcount != 0) {
- ops = nfsd4_layout_ops[gdev->gd_layout_type];
- nfserr = ops->encode_getdeviceinfo(xdr, gdev);
- if (nfserr) {
- /*
- * We don't bother to burden the layout drivers with
- * enforcing gd_maxcount, just tell the client to
- * come back with a bigger buffer if it's not enough.
- */
- if (xdr->buf->len + 4 > gdev->gd_maxcount)
- goto toosmall;
- return nfserr;
- }
+ ops = nfsd4_layout_ops[gdev->gd_layout_type];
+ nfserr = ops->encode_getdeviceinfo(xdr, gdev);
+ if (nfserr) {
+ /*
+ * We don't bother to burden the layout drivers with
+ * enforcing gd_maxcount, just tell the client to
+ * come back with a bigger buffer if it's not enough.
+ */
+ if (xdr->buf->len + 4 > gdev->gd_maxcount)
+ goto toosmall;
+ return nfserr;
}
if (gdev->gd_notify_types) {
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index a8eda1c85829..80621a709510 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -84,11 +84,11 @@ nfsd_hashsize(unsigned int limit)
return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE);
}
-static struct svc_cacherep *
-nfsd_reply_cache_alloc(struct svc_rqst *rqstp, __wsum csum,
- struct nfsd_net *nn)
+static struct nfsd_cacherep *
+nfsd_cacherep_alloc(struct svc_rqst *rqstp, __wsum csum,
+ struct nfsd_net *nn)
{
- struct svc_cacherep *rp;
+ struct nfsd_cacherep *rp;
rp = kmem_cache_alloc(drc_slab, GFP_KERNEL);
if (rp) {
@@ -110,36 +110,64 @@ nfsd_reply_cache_alloc(struct svc_rqst *rqstp, __wsum csum,
return rp;
}
-static void
-nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
- struct nfsd_net *nn)
+static void nfsd_cacherep_free(struct nfsd_cacherep *rp)
{
- if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
- nfsd_stats_drc_mem_usage_sub(nn, rp->c_replvec.iov_len);
+ if (rp->c_type == RC_REPLBUFF)
kfree(rp->c_replvec.iov_base);
+ kmem_cache_free(drc_slab, rp);
+}
+
+static unsigned long
+nfsd_cacherep_dispose(struct list_head *dispose)
+{
+ struct nfsd_cacherep *rp;
+ unsigned long freed = 0;
+
+ while (!list_empty(dispose)) {
+ rp = list_first_entry(dispose, struct nfsd_cacherep, c_lru);
+ list_del(&rp->c_lru);
+ nfsd_cacherep_free(rp);
+ freed++;
}
+ return freed;
+}
+
+static void
+nfsd_cacherep_unlink_locked(struct nfsd_net *nn, struct nfsd_drc_bucket *b,
+ struct nfsd_cacherep *rp)
+{
+ if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base)
+ nfsd_stats_drc_mem_usage_sub(nn, rp->c_replvec.iov_len);
if (rp->c_state != RC_UNUSED) {
rb_erase(&rp->c_node, &b->rb_head);
list_del(&rp->c_lru);
atomic_dec(&nn->num_drc_entries);
nfsd_stats_drc_mem_usage_sub(nn, sizeof(*rp));
}
- kmem_cache_free(drc_slab, rp);
}
static void
-nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
+nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct nfsd_cacherep *rp,
+ struct nfsd_net *nn)
+{
+ nfsd_cacherep_unlink_locked(nn, b, rp);
+ nfsd_cacherep_free(rp);
+}
+
+static void
+nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct nfsd_cacherep *rp,
struct nfsd_net *nn)
{
spin_lock(&b->cache_lock);
- nfsd_reply_cache_free_locked(b, rp, nn);
+ nfsd_cacherep_unlink_locked(nn, b, rp);
spin_unlock(&b->cache_lock);
+ nfsd_cacherep_free(rp);
}
int nfsd_drc_slab_create(void)
{
drc_slab = kmem_cache_create("nfsd_drc",
- sizeof(struct svc_cacherep), 0, 0, NULL);
+ sizeof(struct nfsd_cacherep), 0, 0, NULL);
return drc_slab ? 0: -ENOMEM;
}
@@ -208,7 +236,7 @@ out_shrinker:
void nfsd_reply_cache_shutdown(struct nfsd_net *nn)
{
- struct svc_cacherep *rp;
+ struct nfsd_cacherep *rp;
unsigned int i;
unregister_shrinker(&nn->nfsd_reply_cache_shrinker);
@@ -216,7 +244,7 @@ void nfsd_reply_cache_shutdown(struct nfsd_net *nn)
for (i = 0; i < nn->drc_hashsize; i++) {
struct list_head *head = &nn->drc_hashtbl[i].lru_head;
while (!list_empty(head)) {
- rp = list_first_entry(head, struct svc_cacherep, c_lru);
+ rp = list_first_entry(head, struct nfsd_cacherep, c_lru);
nfsd_reply_cache_free_locked(&nn->drc_hashtbl[i],
rp, nn);
}
@@ -233,7 +261,7 @@ void nfsd_reply_cache_shutdown(struct nfsd_net *nn)
* not already scheduled.
*/
static void
-lru_put_end(struct nfsd_drc_bucket *b, struct svc_cacherep *rp)
+lru_put_end(struct nfsd_drc_bucket *b, struct nfsd_cacherep *rp)
{
rp->c_timestamp = jiffies;
list_move_tail(&rp->c_lru, &b->lru_head);
@@ -247,12 +275,21 @@ nfsd_cache_bucket_find(__be32 xid, struct nfsd_net *nn)
return &nn->drc_hashtbl[hash];
}
-static long prune_bucket(struct nfsd_drc_bucket *b, struct nfsd_net *nn,
- unsigned int max)
+/*
+ * Remove and return no more than @max expired entries in bucket @b.
+ * If @max is zero, do not limit the number of removed entries.
+ */
+static void
+nfsd_prune_bucket_locked(struct nfsd_net *nn, struct nfsd_drc_bucket *b,
+ unsigned int max, struct list_head *dispose)
{
- struct svc_cacherep *rp, *tmp;
- long freed = 0;
+ unsigned long expiry = jiffies - RC_EXPIRE;
+ struct nfsd_cacherep *rp, *tmp;
+ unsigned int freed = 0;
+
+ lockdep_assert_held(&b->cache_lock);
+ /* The bucket LRU is ordered oldest-first. */
list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) {
/*
* Don't free entries attached to calls that are still
@@ -260,60 +297,77 @@ static long prune_bucket(struct nfsd_drc_bucket *b, struct nfsd_net *nn,
*/
if (rp->c_state == RC_INPROG)
continue;
+
if (atomic_read(&nn->num_drc_entries) <= nn->max_drc_entries &&
- time_before(jiffies, rp->c_timestamp + RC_EXPIRE))
+ time_before(expiry, rp->c_timestamp))
break;
- nfsd_reply_cache_free_locked(b, rp, nn);
- if (max && freed++ > max)
+
+ nfsd_cacherep_unlink_locked(nn, b, rp);
+ list_add(&rp->c_lru, dispose);
+
+ if (max && ++freed > max)
break;
}
- return freed;
}
-static long nfsd_prune_bucket(struct nfsd_drc_bucket *b, struct nfsd_net *nn)
+/**
+ * nfsd_reply_cache_count - count_objects method for the DRC shrinker
+ * @shrink: our registered shrinker context
+ * @sc: garbage collection parameters
+ *
+ * Returns the total number of entries in the duplicate reply cache. To
+ * keep things simple and quick, this is not the number of expired entries
+ * in the cache (ie, the number that would be removed by a call to
+ * nfsd_reply_cache_scan).
+ */
+static unsigned long
+nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
{
- return prune_bucket(b, nn, 3);
+ struct nfsd_net *nn = container_of(shrink,
+ struct nfsd_net, nfsd_reply_cache_shrinker);
+
+ return atomic_read(&nn->num_drc_entries);
}
-/*
- * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
- * Also prune the oldest ones when the total exceeds the max number of entries.
+/**
+ * nfsd_reply_cache_scan - scan_objects method for the DRC shrinker
+ * @shrink: our registered shrinker context
+ * @sc: garbage collection parameters
+ *
+ * Free expired entries on each bucket's LRU list until we've released
+ * nr_to_scan freed objects. Nothing will be released if the cache
+ * has not exceeded it's max_drc_entries limit.
+ *
+ * Returns the number of entries released by this call.
*/
-static long
-prune_cache_entries(struct nfsd_net *nn)
+static unsigned long
+nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
{
+ struct nfsd_net *nn = container_of(shrink,
+ struct nfsd_net, nfsd_reply_cache_shrinker);
+ unsigned long freed = 0;
+ LIST_HEAD(dispose);
unsigned int i;
- long freed = 0;
for (i = 0; i < nn->drc_hashsize; i++) {
struct nfsd_drc_bucket *b = &nn->drc_hashtbl[i];
if (list_empty(&b->lru_head))
continue;
+
spin_lock(&b->cache_lock);
- freed += prune_bucket(b, nn, 0);
+ nfsd_prune_bucket_locked(nn, b, 0, &dispose);
spin_unlock(&b->cache_lock);
- }
- return freed;
-}
-static unsigned long
-nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
-{
- struct nfsd_net *nn = container_of(shrink,
- struct nfsd_net, nfsd_reply_cache_shrinker);
+ freed += nfsd_cacherep_dispose(&dispose);
+ if (freed > sc->nr_to_scan)
+ break;
+ }
- return atomic_read(&nn->num_drc_entries);
+ trace_nfsd_drc_gc(nn, freed);
+ return freed;
}
-static unsigned long
-nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
-{
- struct nfsd_net *nn = container_of(shrink,
- struct nfsd_net, nfsd_reply_cache_shrinker);
-
- return prune_cache_entries(nn);
-}
/*
* Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes
*/
@@ -348,8 +402,8 @@ nfsd_cache_csum(struct svc_rqst *rqstp)
}
static int
-nfsd_cache_key_cmp(const struct svc_cacherep *key,
- const struct svc_cacherep *rp, struct nfsd_net *nn)
+nfsd_cache_key_cmp(const struct nfsd_cacherep *key,
+ const struct nfsd_cacherep *rp, struct nfsd_net *nn)
{
if (key->c_key.k_xid == rp->c_key.k_xid &&
key->c_key.k_csum != rp->c_key.k_csum) {
@@ -365,11 +419,11 @@ nfsd_cache_key_cmp(const struct svc_cacherep *key,
* Must be called with cache_lock held. Returns the found entry or
* inserts an empty key on failure.
*/
-static struct svc_cacherep *
-nfsd_cache_insert(struct nfsd_drc_bucket *b, struct svc_cacherep *key,
+static struct nfsd_cacherep *
+nfsd_cache_insert(struct nfsd_drc_bucket *b, struct nfsd_cacherep *key,
struct nfsd_net *nn)
{
- struct svc_cacherep *rp, *ret = key;
+ struct nfsd_cacherep *rp, *ret = key;
struct rb_node **p = &b->rb_head.rb_node,
*parent = NULL;
unsigned int entries = 0;
@@ -378,7 +432,7 @@ nfsd_cache_insert(struct nfsd_drc_bucket *b, struct svc_cacherep *key,
while (*p != NULL) {
++entries;
parent = *p;
- rp = rb_entry(parent, struct svc_cacherep, c_node);
+ rp = rb_entry(parent, struct nfsd_cacherep, c_node);
cmp = nfsd_cache_key_cmp(key, rp, nn);
if (cmp < 0)
@@ -411,6 +465,7 @@ out:
/**
* nfsd_cache_lookup - Find an entry in the duplicate reply cache
* @rqstp: Incoming Call to find
+ * @cacherep: OUT: DRC entry for this request
*
* Try to find an entry matching the current call in the cache. When none
* is found, we try to grab the oldest expired entry off the LRU list. If
@@ -423,16 +478,17 @@ out:
* %RC_REPLY: Reply from cache
* %RC_DROPIT: Do not process the request further
*/
-int nfsd_cache_lookup(struct svc_rqst *rqstp)
+int nfsd_cache_lookup(struct svc_rqst *rqstp, struct nfsd_cacherep **cacherep)
{
struct nfsd_net *nn;
- struct svc_cacherep *rp, *found;
+ struct nfsd_cacherep *rp, *found;
__wsum csum;
struct nfsd_drc_bucket *b;
int type = rqstp->rq_cachetype;
+ unsigned long freed;
+ LIST_HEAD(dispose);
int rtn = RC_DOIT;
- rqstp->rq_cacherep = NULL;
if (type == RC_NOCACHE) {
nfsd_stats_rc_nocache_inc();
goto out;
@@ -445,7 +501,7 @@ int nfsd_cache_lookup(struct svc_rqst *rqstp)
* preallocate an entry.
*/
nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
- rp = nfsd_reply_cache_alloc(rqstp, csum, nn);
+ rp = nfsd_cacherep_alloc(rqstp, csum, nn);
if (!rp)
goto out;
@@ -454,20 +510,18 @@ int nfsd_cache_lookup(struct svc_rqst *rqstp)
found = nfsd_cache_insert(b, rp, nn);
if (found != rp)
goto found_entry;
-
- nfsd_stats_rc_misses_inc();
- rqstp->rq_cacherep = rp;
+ *cacherep = rp;
rp->c_state = RC_INPROG;
+ nfsd_prune_bucket_locked(nn, b, 3, &dispose);
+ spin_unlock(&b->cache_lock);
+ freed = nfsd_cacherep_dispose(&dispose);
+ trace_nfsd_drc_gc(nn, freed);
+
+ nfsd_stats_rc_misses_inc();
atomic_inc(&nn->num_drc_entries);
nfsd_stats_drc_mem_usage_add(nn, sizeof(*rp));
-
- nfsd_prune_bucket(b, nn);
-
-out_unlock:
- spin_unlock(&b->cache_lock);
-out:
- return rtn;
+ goto out;
found_entry:
/* We found a matching entry which is either in progress or done. */
@@ -505,12 +559,16 @@ found_entry:
out_trace:
trace_nfsd_drc_found(nn, rqstp, rtn);
- goto out_unlock;
+out_unlock:
+ spin_unlock(&b->cache_lock);
+out:
+ return rtn;
}
/**
* nfsd_cache_update - Update an entry in the duplicate reply cache.
* @rqstp: svc_rqst with a finished Reply
+ * @rp: IN: DRC entry for this request
* @cachetype: which cache to update
* @statp: pointer to Reply's NFS status code, or NULL
*
@@ -528,10 +586,10 @@ out_trace:
* nfsd failed to encode a reply that otherwise would have been cached.
* In this case, nfsd_cache_update is called with statp == NULL.
*/
-void nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
+void nfsd_cache_update(struct svc_rqst *rqstp, struct nfsd_cacherep *rp,
+ int cachetype, __be32 *statp)
{
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
- struct svc_cacherep *rp = rqstp->rq_cacherep;
struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
struct nfsd_drc_bucket *b;
int len;
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 3709830f90a6..7ed02fb88a36 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -1627,6 +1627,7 @@ static void __exit exit_nfsd(void)
}
MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>");
+MODULE_DESCRIPTION("In-kernel NFS server");
MODULE_LICENSE("GPL");
module_init(init_nfsd)
module_exit(exit_nfsd)
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index d88498f8b275..11c14faa6c67 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -96,7 +96,12 @@ int nfsd_pool_stats_open(struct inode *, struct file *);
int nfsd_pool_stats_release(struct inode *, struct file *);
void nfsd_shutdown_threads(struct net *net);
-void nfsd_put(struct net *net);
+static inline void nfsd_put(struct net *net)
+{
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+ svc_put(nn->nfsd_serv);
+}
bool i_am_nfsd(void);
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index c291389a1d71..355bf0db3235 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -614,7 +614,7 @@ out_negative:
* @fhp: file handle to be updated
*
*/
-void fh_fill_pre_attrs(struct svc_fh *fhp)
+__be32 __must_check fh_fill_pre_attrs(struct svc_fh *fhp)
{
bool v4 = (fhp->fh_maxsize == NFS4_FHSIZE);
struct inode *inode;
@@ -622,12 +622,12 @@ void fh_fill_pre_attrs(struct svc_fh *fhp)
__be32 err;
if (fhp->fh_no_wcc || fhp->fh_pre_saved)
- return;
+ return nfs_ok;
inode = d_inode(fhp->fh_dentry);
err = fh_getattr(fhp, &stat);
if (err)
- return;
+ return err;
if (v4)
fhp->fh_pre_change = nfsd4_change_attribute(&stat, inode);
@@ -636,6 +636,7 @@ void fh_fill_pre_attrs(struct svc_fh *fhp)
fhp->fh_pre_ctime = stat.ctime;
fhp->fh_pre_size = stat.size;
fhp->fh_pre_saved = true;
+ return nfs_ok;
}
/**
@@ -643,26 +644,27 @@ void fh_fill_pre_attrs(struct svc_fh *fhp)
* @fhp: file handle to be updated
*
*/
-void fh_fill_post_attrs(struct svc_fh *fhp)
+__be32 fh_fill_post_attrs(struct svc_fh *fhp)
{
bool v4 = (fhp->fh_maxsize == NFS4_FHSIZE);
struct inode *inode = d_inode(fhp->fh_dentry);
__be32 err;
if (fhp->fh_no_wcc)
- return;
+ return nfs_ok;
if (fhp->fh_post_saved)
printk("nfsd: inode locked twice during operation.\n");
err = fh_getattr(fhp, &fhp->fh_post_attr);
if (err)
- return;
+ return err;
fhp->fh_post_saved = true;
if (v4)
fhp->fh_post_change =
nfsd4_change_attribute(&fhp->fh_post_attr, inode);
+ return nfs_ok;
}
/**
@@ -672,16 +674,20 @@ void fh_fill_post_attrs(struct svc_fh *fhp)
* This is used when the directory wasn't changed, but wcc attributes
* are needed anyway.
*/
-void fh_fill_both_attrs(struct svc_fh *fhp)
+__be32 __must_check fh_fill_both_attrs(struct svc_fh *fhp)
{
- fh_fill_post_attrs(fhp);
- if (!fhp->fh_post_saved)
- return;
+ __be32 err;
+
+ err = fh_fill_post_attrs(fhp);
+ if (err)
+ return err;
+
fhp->fh_pre_change = fhp->fh_post_change;
fhp->fh_pre_mtime = fhp->fh_post_attr.mtime;
fhp->fh_pre_ctime = fhp->fh_post_attr.ctime;
fhp->fh_pre_size = fhp->fh_post_attr.size;
fhp->fh_pre_saved = true;
+ return nfs_ok;
}
/*
diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h
index 4e0ecf0ae2cf..40426f899e76 100644
--- a/fs/nfsd/nfsfh.h
+++ b/fs/nfsd/nfsfh.h
@@ -294,7 +294,7 @@ static inline void fh_clear_pre_post_attrs(struct svc_fh *fhp)
}
u64 nfsd4_change_attribute(struct kstat *stat, struct inode *inode);
-extern void fh_fill_pre_attrs(struct svc_fh *fhp);
-extern void fh_fill_post_attrs(struct svc_fh *fhp);
-extern void fh_fill_both_attrs(struct svc_fh *fhp);
+__be32 __must_check fh_fill_pre_attrs(struct svc_fh *fhp);
+__be32 fh_fill_post_attrs(struct svc_fh *fhp);
+__be32 __must_check fh_fill_both_attrs(struct svc_fh *fhp);
#endif /* _LINUX_NFSD_NFSFH_H */
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 2154fa63c5f2..1582af33e204 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -542,9 +542,14 @@ static struct notifier_block nfsd_inet6addr_notifier = {
/* Only used under nfsd_mutex, so this atomic may be overkill: */
static atomic_t nfsd_notifier_refcount = ATOMIC_INIT(0);
-static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
+static void nfsd_last_thread(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ struct svc_serv *serv = nn->nfsd_serv;
+
+ spin_lock(&nfsd_notifier_lock);
+ nn->nfsd_serv = NULL;
+ spin_unlock(&nfsd_notifier_lock);
/* check if the notifier still has clients */
if (atomic_dec_return(&nfsd_notifier_refcount) == 0) {
@@ -554,6 +559,8 @@ static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
#endif
}
+ svc_xprt_destroy_all(serv, net);
+
/*
* write_ports can create the server without actually starting
* any threads--if we get shut down before any threads are
@@ -644,7 +651,8 @@ void nfsd_shutdown_threads(struct net *net)
svc_get(serv);
/* Kill outstanding nfsd threads */
svc_set_num_threads(serv, NULL, 0);
- nfsd_put(net);
+ nfsd_last_thread(net);
+ svc_put(serv);
mutex_unlock(&nfsd_mutex);
}
@@ -674,9 +682,6 @@ int nfsd_create_serv(struct net *net)
serv->sv_maxconn = nn->max_connections;
error = svc_bind(serv, net);
if (error < 0) {
- /* NOT nfsd_put() as notifiers (see below) haven't
- * been set up yet.
- */
svc_put(serv);
return error;
}
@@ -719,29 +724,6 @@ int nfsd_get_nrthreads(int n, int *nthreads, struct net *net)
return 0;
}
-/* This is the callback for kref_put() below.
- * There is no code here as the first thing to be done is
- * call svc_shutdown_net(), but we cannot get the 'net' from
- * the kref. So do all the work when kref_put returns true.
- */
-static void nfsd_noop(struct kref *ref)
-{
-}
-
-void nfsd_put(struct net *net)
-{
- struct nfsd_net *nn = net_generic(net, nfsd_net_id);
-
- if (kref_put(&nn->nfsd_serv->sv_refcnt, nfsd_noop)) {
- svc_xprt_destroy_all(nn->nfsd_serv, net);
- nfsd_last_thread(nn->nfsd_serv, net);
- svc_destroy(&nn->nfsd_serv->sv_refcnt);
- spin_lock(&nfsd_notifier_lock);
- nn->nfsd_serv = NULL;
- spin_unlock(&nfsd_notifier_lock);
- }
-}
-
int nfsd_set_nrthreads(int n, int *nthreads, struct net *net)
{
int i = 0;
@@ -792,7 +774,7 @@ int nfsd_set_nrthreads(int n, int *nthreads, struct net *net)
if (err)
break;
}
- nfsd_put(net);
+ svc_put(nn->nfsd_serv);
return err;
}
@@ -807,6 +789,7 @@ nfsd_svc(int nrservs, struct net *net, const struct cred *cred)
int error;
bool nfsd_up_before;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ struct svc_serv *serv;
mutex_lock(&nfsd_mutex);
dprintk("nfsd: creating service\n");
@@ -826,22 +809,25 @@ nfsd_svc(int nrservs, struct net *net, const struct cred *cred)
goto out;
nfsd_up_before = nn->nfsd_net_up;
+ serv = nn->nfsd_serv;
error = nfsd_startup_net(net, cred);
if (error)
goto out_put;
- error = svc_set_num_threads(nn->nfsd_serv, NULL, nrservs);
+ error = svc_set_num_threads(serv, NULL, nrservs);
if (error)
goto out_shutdown;
- error = nn->nfsd_serv->sv_nrthreads;
+ error = serv->sv_nrthreads;
+ if (error == 0)
+ nfsd_last_thread(net);
out_shutdown:
if (error < 0 && !nfsd_up_before)
nfsd_shutdown_net(net);
out_put:
/* Threads now hold service active */
if (xchg(&nn->keep_active, 0))
- nfsd_put(net);
- nfsd_put(net);
+ svc_put(serv);
+ svc_put(serv);
out:
mutex_unlock(&nfsd_mutex);
return error;
@@ -953,7 +939,6 @@ nfsd(void *vrqstp)
struct svc_xprt *perm_sock = list_entry(rqstp->rq_server->sv_permsocks.next, typeof(struct svc_xprt), xpt_list);
struct net *net = perm_sock->xpt_net;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
- int err;
/* At this point, the thread shares current->fs
* with the init process. We need to create files with the
@@ -965,15 +950,6 @@ nfsd(void *vrqstp)
current->fs->umask = 0;
- /*
- * thread is spawned with all signals set to SIG_IGN, re-enable
- * the ones that will bring down the thread
- */
- allow_signal(SIGKILL);
- allow_signal(SIGHUP);
- allow_signal(SIGINT);
- allow_signal(SIGQUIT);
-
atomic_inc(&nfsdstats.th_cnt);
set_freezable();
@@ -981,54 +957,19 @@ nfsd(void *vrqstp)
/*
* The main request loop
*/
- for (;;) {
+ while (!kthread_should_stop()) {
/* Update sv_maxconn if it has changed */
rqstp->rq_server->sv_maxconn = nn->max_connections;
- /*
- * Find a socket with data available and call its
- * recvfrom routine.
- */
- while ((err = svc_recv(rqstp, 60*60*HZ)) == -EAGAIN)
- ;
- if (err == -EINTR)
- break;
- validate_process_creds();
- svc_process(rqstp);
+ svc_recv(rqstp);
validate_process_creds();
}
- /* Clear signals before calling svc_exit_thread() */
- flush_signals(current);
-
atomic_dec(&nfsdstats.th_cnt);
out:
- /* Take an extra ref so that the svc_put in svc_exit_thread()
- * doesn't call svc_destroy()
- */
- svc_get(nn->nfsd_serv);
-
/* Release the thread */
svc_exit_thread(rqstp);
-
- /* We need to drop a ref, but may not drop the last reference
- * without holding nfsd_mutex, and we cannot wait for nfsd_mutex as that
- * could deadlock with nfsd_shutdown_threads() waiting for us.
- * So three options are:
- * - drop a non-final reference,
- * - get the mutex without waiting
- * - sleep briefly andd try the above again
- */
- while (!svc_put_not_last(nn->nfsd_serv)) {
- if (mutex_trylock(&nfsd_mutex)) {
- nfsd_put(net);
- mutex_unlock(&nfsd_mutex);
- break;
- }
- msleep(20);
- }
-
return 0;
}
@@ -1046,6 +987,7 @@ int nfsd_dispatch(struct svc_rqst *rqstp)
{
const struct svc_procedure *proc = rqstp->rq_procinfo;
__be32 *statp = rqstp->rq_accept_statp;
+ struct nfsd_cacherep *rp;
/*
* Give the xdr decoder a chance to change this if it wants
@@ -1056,7 +998,8 @@ int nfsd_dispatch(struct svc_rqst *rqstp)
if (!proc->pc_decode(rqstp, &rqstp->rq_arg_stream))
goto out_decode_err;
- switch (nfsd_cache_lookup(rqstp)) {
+ rp = NULL;
+ switch (nfsd_cache_lookup(rqstp, &rp)) {
case RC_DOIT:
break;
case RC_REPLY:
@@ -1072,7 +1015,7 @@ int nfsd_dispatch(struct svc_rqst *rqstp)
if (!proc->pc_encode(rqstp, &rqstp->rq_res_stream))
goto out_encode_err;
- nfsd_cache_update(rqstp, rqstp->rq_cachetype, statp + 1);
+ nfsd_cache_update(rqstp, rp, rqstp->rq_cachetype, statp + 1);
out_cached_reply:
return 1;
@@ -1082,13 +1025,13 @@ out_decode_err:
return 1;
out_update_drop:
- nfsd_cache_update(rqstp, RC_NOCACHE, NULL);
+ nfsd_cache_update(rqstp, rp, RC_NOCACHE, NULL);
out_dropit:
return 0;
out_encode_err:
trace_nfsd_cant_encode_err(rqstp);
- nfsd_cache_update(rqstp, RC_NOCACHE, NULL);
+ nfsd_cache_update(rqstp, rp, RC_NOCACHE, NULL);
*statp = rpc_system_err;
return 1;
}
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index d49d3060ed4f..cbddcf484dba 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -732,4 +732,7 @@ static inline bool try_to_expire_client(struct nfs4_client *clp)
cmpxchg(&clp->cl_state, NFSD4_COURTESY, NFSD4_EXPIRABLE);
return clp->cl_state == NFSD4_EXPIRABLE;
}
+
+extern __be32 nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp,
+ struct inode *inode);
#endif /* NFSD4_STATE_H */
diff --git a/fs/nfsd/stats.c b/fs/nfsd/stats.c
index 777e24e5da33..63797635e1c3 100644
--- a/fs/nfsd/stats.c
+++ b/fs/nfsd/stats.c
@@ -65,6 +65,8 @@ static int nfsd_show(struct seq_file *seq, void *v)
seq_printf(seq, " %lld",
percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_NFS4_OP(i)]));
}
+ seq_printf(seq, "\nwdeleg_getattr %lld",
+ percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_WDELEG_GETATTR]));
seq_putc(seq, '\n');
#endif
diff --git a/fs/nfsd/stats.h b/fs/nfsd/stats.h
index 9b43dc3d9991..cf5524e7ca06 100644
--- a/fs/nfsd/stats.h
+++ b/fs/nfsd/stats.h
@@ -22,6 +22,7 @@ enum {
NFSD_STATS_FIRST_NFS4_OP, /* count of individual nfsv4 operations */
NFSD_STATS_LAST_NFS4_OP = NFSD_STATS_FIRST_NFS4_OP + LAST_NFS4_OP,
#define NFSD_STATS_NFS4_OP(op) (NFSD_STATS_FIRST_NFS4_OP + (op))
+ NFSD_STATS_WDELEG_GETATTR, /* count of getattr conflict with wdeleg */
#endif
NFSD_STATS_COUNTERS_NUM
};
@@ -93,4 +94,10 @@ static inline void nfsd_stats_drc_mem_usage_sub(struct nfsd_net *nn, s64 amount)
percpu_counter_sub(&nn->counter[NFSD_NET_DRC_MEM_USAGE], amount);
}
+#ifdef CONFIG_NFSD_V4
+static inline void nfsd_stats_wdeleg_getattr_inc(void)
+{
+ percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_WDELEG_GETATTR]);
+}
+#endif
#endif /* _NFSD_STATS_H */
diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h
index 2af74983f146..803904348871 100644
--- a/fs/nfsd/trace.h
+++ b/fs/nfsd/trace.h
@@ -607,6 +607,7 @@ DEFINE_STATEID_EVENT(layout_recall_release);
DEFINE_STATEID_EVENT(open);
DEFINE_STATEID_EVENT(deleg_read);
+DEFINE_STATEID_EVENT(deleg_write);
DEFINE_STATEID_EVENT(deleg_return);
DEFINE_STATEID_EVENT(deleg_recall);
@@ -1240,8 +1241,8 @@ TRACE_EVENT(nfsd_drc_found,
TRACE_EVENT(nfsd_drc_mismatch,
TP_PROTO(
const struct nfsd_net *nn,
- const struct svc_cacherep *key,
- const struct svc_cacherep *rp
+ const struct nfsd_cacherep *key,
+ const struct nfsd_cacherep *rp
),
TP_ARGS(nn, key, rp),
TP_STRUCT__entry(
@@ -1261,6 +1262,28 @@ TRACE_EVENT(nfsd_drc_mismatch,
__entry->ingress)
);
+TRACE_EVENT_CONDITION(nfsd_drc_gc,
+ TP_PROTO(
+ const struct nfsd_net *nn,
+ unsigned long freed
+ ),
+ TP_ARGS(nn, freed),
+ TP_CONDITION(freed > 0),
+ TP_STRUCT__entry(
+ __field(unsigned long long, boot_time)
+ __field(unsigned long, freed)
+ __field(int, total)
+ ),
+ TP_fast_assign(
+ __entry->boot_time = nn->boot_time;
+ __entry->freed = freed;
+ __entry->total = atomic_read(&nn->num_drc_entries);
+ ),
+ TP_printk("boot_time=%16llx total=%d freed=%lu",
+ __entry->boot_time, __entry->total, __entry->freed
+ )
+);
+
TRACE_EVENT(nfsd_cb_args,
TP_PROTO(
const struct nfs4_client *clp,
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 9b7acba382fe..48260cf68fde 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -1540,7 +1540,9 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
dput(dchild);
if (err)
goto out_unlock;
- fh_fill_pre_attrs(fhp);
+ err = fh_fill_pre_attrs(fhp);
+ if (err != nfs_ok)
+ goto out_unlock;
err = nfsd_create_locked(rqstp, fhp, attrs, type, rdev, resfhp);
fh_fill_post_attrs(fhp);
out_unlock:
@@ -1635,13 +1637,16 @@ nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
inode_unlock(dentry->d_inode);
goto out_drop_write;
}
- fh_fill_pre_attrs(fhp);
+ err = fh_fill_pre_attrs(fhp);
+ if (err != nfs_ok)
+ goto out_unlock;
host_err = vfs_symlink(&nop_mnt_idmap, d_inode(dentry), dnew, path);
err = nfserrno(host_err);
cerr = fh_compose(resfhp, fhp->fh_export, dnew, fhp);
if (!err)
nfsd_create_setattr(rqstp, fhp, resfhp, attrs);
fh_fill_post_attrs(fhp);
+out_unlock:
inode_unlock(dentry->d_inode);
if (!err)
err = nfserrno(commit_metadata(fhp));
@@ -1703,7 +1708,9 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
err = nfserr_noent;
if (d_really_is_negative(dold))
goto out_dput;
- fh_fill_pre_attrs(ffhp);
+ err = fh_fill_pre_attrs(ffhp);
+ if (err != nfs_ok)
+ goto out_dput;
host_err = vfs_link(dold, &nop_mnt_idmap, dirp, dnew, NULL);
fh_fill_post_attrs(ffhp);
inode_unlock(dirp);
@@ -1789,8 +1796,12 @@ retry:
}
trap = lock_rename(tdentry, fdentry);
- fh_fill_pre_attrs(ffhp);
- fh_fill_pre_attrs(tfhp);
+ err = fh_fill_pre_attrs(ffhp);
+ if (err != nfs_ok)
+ goto out_unlock;
+ err = fh_fill_pre_attrs(tfhp);
+ if (err != nfs_ok)
+ goto out_unlock;
odentry = lookup_one_len(fname, fdentry, flen);
host_err = PTR_ERR(odentry);
@@ -1857,6 +1868,7 @@ retry:
fh_fill_post_attrs(ffhp);
fh_fill_post_attrs(tfhp);
}
+out_unlock:
unlock_rename(tdentry, fdentry);
fh_drop_write(ffhp);
@@ -1916,12 +1928,14 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
goto out_unlock;
}
rinode = d_inode(rdentry);
- ihold(rinode);
+ err = fh_fill_pre_attrs(fhp);
+ if (err != nfs_ok)
+ goto out_unlock;
+ ihold(rinode);
if (!type)
type = d_inode(rdentry)->i_mode & S_IFMT;
- fh_fill_pre_attrs(fhp);
if (type != S_IFDIR) {
int retries;
@@ -2341,16 +2355,18 @@ nfsd_removexattr(struct svc_rqst *rqstp, struct svc_fh *fhp, char *name)
return nfserrno(ret);
inode_lock(fhp->fh_dentry->d_inode);
- fh_fill_pre_attrs(fhp);
-
+ err = fh_fill_pre_attrs(fhp);
+ if (err != nfs_ok)
+ goto out_unlock;
ret = __vfs_removexattr_locked(&nop_mnt_idmap, fhp->fh_dentry,
name, NULL);
-
+ err = nfsd_xattr_errno(ret);
fh_fill_post_attrs(fhp);
+out_unlock:
inode_unlock(fhp->fh_dentry->d_inode);
fh_drop_write(fhp);
- return nfsd_xattr_errno(ret);
+ return err;
}
__be32
@@ -2368,15 +2384,17 @@ nfsd_setxattr(struct svc_rqst *rqstp, struct svc_fh *fhp, char *name,
if (ret)
return nfserrno(ret);
inode_lock(fhp->fh_dentry->d_inode);
- fh_fill_pre_attrs(fhp);
-
- ret = __vfs_setxattr_locked(&nop_mnt_idmap, fhp->fh_dentry, name, buf,
- len, flags, NULL);
+ err = fh_fill_pre_attrs(fhp);
+ if (err != nfs_ok)
+ goto out_unlock;
+ ret = __vfs_setxattr_locked(&nop_mnt_idmap, fhp->fh_dentry,
+ name, buf, len, flags, NULL);
fh_fill_post_attrs(fhp);
+ err = nfsd_xattr_errno(ret);
+out_unlock:
inode_unlock(fhp->fh_dentry->d_inode);
fh_drop_write(fhp);
-
- return nfsd_xattr_errno(ret);
+ return err;
}
#endif
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index 510978e602da..9d918a79dc16 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -774,17 +774,6 @@ void warn_on_nonidempotent_op(struct nfsd4_op *op);
#define NFS4_SVC_XDRSIZE sizeof(struct nfsd4_compoundargs)
-static inline void
-set_change_info(struct nfsd4_change_info *cinfo, struct svc_fh *fhp)
-{
- BUG_ON(!fhp->fh_pre_saved);
- cinfo->atomic = (u32)(fhp->fh_post_saved && !fhp->fh_no_atomic_attr);
-
- cinfo->before_change = fhp->fh_pre_change;
- cinfo->after_change = fhp->fh_post_change;
-}
-
-
bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp);
bool nfs4svc_decode_compoundargs(struct svc_rqst *rqstp, struct xdr_stream *xdr);
bool nfs4svc_encode_compoundres(struct svc_rqst *rqstp, struct xdr_stream *xdr);
diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
index 5fffddea554f..cfec5e0c7f66 100644
--- a/fs/ntfs3/super.c
+++ b/fs/ntfs3/super.c
@@ -571,12 +571,8 @@ static void init_once(void *foo)
/*
* Noinline to reduce binary size.
*/
-static noinline void ntfs3_free_sbi(struct ntfs_sb_info *sbi)
+static noinline void ntfs3_put_sbi(struct ntfs_sb_info *sbi)
{
- kfree(sbi->new_rec);
- kvfree(ntfs_put_shared(sbi->upcase));
- kfree(sbi->def_table);
-
wnd_close(&sbi->mft.bitmap);
wnd_close(&sbi->used.bitmap);
@@ -601,6 +597,13 @@ static noinline void ntfs3_free_sbi(struct ntfs_sb_info *sbi)
indx_clear(&sbi->security.index_sdh);
indx_clear(&sbi->reparse.index_r);
indx_clear(&sbi->objid.index_o);
+}
+
+static void ntfs3_free_sbi(struct ntfs_sb_info *sbi)
+{
+ kfree(sbi->new_rec);
+ kvfree(ntfs_put_shared(sbi->upcase));
+ kfree(sbi->def_table);
kfree(sbi->compress.lznt);
#ifdef CONFIG_NTFS3_LZX_XPRESS
xpress_free_decompressor(sbi->compress.xpress);
@@ -625,6 +628,7 @@ static void ntfs_put_super(struct super_block *sb)
/* Mark rw ntfs as clear, if possible. */
ntfs_set_state(sbi, NTFS_DIRTY_CLEAR);
+ ntfs3_put_sbi(sbi);
}
static int ntfs_statfs(struct dentry *dentry, struct kstatfs *buf)
@@ -1644,8 +1648,10 @@ static void ntfs_fs_free(struct fs_context *fc)
struct ntfs_mount_options *opts = fc->fs_private;
struct ntfs_sb_info *sbi = fc->s_fs_info;
- if (sbi)
+ if (sbi) {
+ ntfs3_put_sbi(sbi);
ntfs3_free_sbi(sbi);
+ }
if (opts)
put_mount_options(opts);
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 0f2aa36a9913..3dd5be96691b 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -4,6 +4,7 @@
#include <linux/hugetlb.h>
#include <linux/huge_mm.h>
#include <linux/mount.h>
+#include <linux/ksm.h>
#include <linux/seq_file.h>
#include <linux/highmem.h>
#include <linux/ptrace.h>
@@ -396,6 +397,7 @@ struct mem_size_stats {
unsigned long swap;
unsigned long shared_hugetlb;
unsigned long private_hugetlb;
+ unsigned long ksm;
u64 pss;
u64 pss_anon;
u64 pss_file;
@@ -452,6 +454,9 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
mss->lazyfree += size;
}
+ if (PageKsm(page))
+ mss->ksm += size;
+
mss->resident += size;
/* Accumulate the size in pages that have been accessed. */
if (young || page_is_young(page) || PageReferenced(page))
@@ -825,6 +830,7 @@ static void __show_smap(struct seq_file *m, const struct mem_size_stats *mss,
SEQ_PUT_DEC(" kB\nPrivate_Dirty: ", mss->private_dirty);
SEQ_PUT_DEC(" kB\nReferenced: ", mss->referenced);
SEQ_PUT_DEC(" kB\nAnonymous: ", mss->anonymous);
+ SEQ_PUT_DEC(" kB\nKSM: ", mss->ksm);
SEQ_PUT_DEC(" kB\nLazyFree: ", mss->lazyfree);
SEQ_PUT_DEC(" kB\nAnonHugePages: ", mss->anonymous_thp);
SEQ_PUT_DEC(" kB\nShmemPmdMapped: ", mss->shmem_thp);
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 62356d542ef6..e5bca9a004cc 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -98,7 +98,14 @@ MODULE_PARM_DESC(kmsg_bytes, "amount of kernel log to snapshot (in bytes)");
static void *compress_workspace;
+/*
+ * Compression is only used for dmesg output, which consists of low-entropy
+ * ASCII text, and so we can assume worst-case 60%.
+ */
+#define DMESG_COMP_PERCENT 60
+
static char *big_oops_buf;
+static size_t max_compressed_size;
void pstore_set_kmsg_bytes(int bytes)
{
@@ -196,6 +203,7 @@ static int pstore_compress(const void *in, void *out,
static void allocate_buf_for_compression(void)
{
+ size_t compressed_size;
char *buf;
/* Skip if not built-in or compression disabled. */
@@ -216,7 +224,8 @@ static void allocate_buf_for_compression(void)
* uncompressed record size, since any record that would be expanded by
* compression is just stored uncompressed.
*/
- buf = kvzalloc(psinfo->bufsize, GFP_KERNEL);
+ compressed_size = (psinfo->bufsize * 100) / DMESG_COMP_PERCENT;
+ buf = kvzalloc(compressed_size, GFP_KERNEL);
if (!buf) {
pr_err("Failed %zu byte compression buffer allocation for: %s\n",
psinfo->bufsize, compress);
@@ -233,6 +242,7 @@ static void allocate_buf_for_compression(void)
/* A non-NULL big_oops_buf indicates compression is available. */
big_oops_buf = buf;
+ max_compressed_size = compressed_size;
pr_info("Using crash dump compression: %s\n", compress);
}
@@ -246,6 +256,7 @@ static void free_buf_for_compression(void)
kvfree(big_oops_buf);
big_oops_buf = NULL;
+ max_compressed_size = 0;
}
void pstore_record_init(struct pstore_record *record,
@@ -305,7 +316,7 @@ static void pstore_dump(struct kmsg_dumper *dumper,
record.buf = psinfo->buf;
dst = big_oops_buf ?: psinfo->buf;
- dst_size = psinfo->bufsize;
+ dst_size = max_compressed_size ?: psinfo->bufsize;
/* Write dump header. */
header_size = snprintf(dst, dst_size, "%s#%d Part%u\n", why,
@@ -326,8 +337,15 @@ static void pstore_dump(struct kmsg_dumper *dumper,
record.compressed = true;
record.size = zipped_len;
} else {
- record.size = header_size + dump_size;
- memcpy(psinfo->buf, dst, record.size);
+ /*
+ * Compression failed, so the buffer is most
+ * likely filled with binary data that does not
+ * compress as well as ASCII text. Copy as much
+ * of the uncompressed data as possible into
+ * the pstore record, and discard the rest.
+ */
+ record.size = psinfo->bufsize;
+ memcpy(psinfo->buf, dst, psinfo->bufsize);
}
} else {
record.size = header_size + dump_size;
@@ -560,6 +578,7 @@ static void decompress_record(struct pstore_record *record,
int ret;
int unzipped_len;
char *unzipped, *workspace;
+ size_t max_uncompressed_size;
if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS) || !record->compressed)
return;
@@ -583,7 +602,8 @@ static void decompress_record(struct pstore_record *record,
}
/* Allocate enough space to hold max decompression and ECC. */
- workspace = kvzalloc(psinfo->bufsize + record->ecc_notice_size,
+ max_uncompressed_size = 3 * psinfo->bufsize;
+ workspace = kvzalloc(max_uncompressed_size + record->ecc_notice_size,
GFP_KERNEL);
if (!workspace)
return;
@@ -591,11 +611,11 @@ static void decompress_record(struct pstore_record *record,
zstream->next_in = record->buf;
zstream->avail_in = record->size;
zstream->next_out = workspace;
- zstream->avail_out = psinfo->bufsize;
+ zstream->avail_out = max_uncompressed_size;
ret = zlib_inflate(zstream, Z_FINISH);
if (ret != Z_STREAM_END) {
- pr_err("zlib_inflate() failed, ret = %d!\n", ret);
+ pr_err_ratelimited("zlib_inflate() failed, ret = %d!\n", ret);
kvfree(workspace);
return;
}
diff --git a/fs/stat.c b/fs/stat.c
index 136711ae72fb..6822ac77aec2 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -311,6 +311,23 @@ int vfs_fstatat(int dfd, const char __user *filename,
int statx_flags = flags | AT_NO_AUTOMOUNT;
struct filename *name;
+ /*
+ * Work around glibc turning fstat() into fstatat(AT_EMPTY_PATH)
+ *
+ * If AT_EMPTY_PATH is set, we expect the common case to be that
+ * empty path, and avoid doing all the extra pathname work.
+ */
+ if (dfd >= 0 && flags == AT_EMPTY_PATH) {
+ char c;
+
+ ret = get_user(c, filename);
+ if (unlikely(ret))
+ return ret;
+
+ if (likely(!c))
+ return vfs_fstat(dfd, stat);
+ }
+
name = getname_flags(filename, getname_statx_lookup_flags(statx_flags), NULL);
ret = vfs_statx(dfd, name, statx_flags, stat, STATX_BASIC_STATS);
putname(name);
diff --git a/fs/tracefs/Makefile b/fs/tracefs/Makefile
index 7c35a282b484..73c56da8e284 100644
--- a/fs/tracefs/Makefile
+++ b/fs/tracefs/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
tracefs-objs := inode.o
+tracefs-objs += event_inode.o
obj-$(CONFIG_TRACING) += tracefs.o
diff --git a/fs/tracefs/event_inode.c b/fs/tracefs/event_inode.c
new file mode 100644
index 000000000000..237c6f370ad9
--- /dev/null
+++ b/fs/tracefs/event_inode.c
@@ -0,0 +1,807 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * event_inode.c - part of tracefs, a pseudo file system for activating tracing
+ *
+ * Copyright (C) 2020-23 VMware Inc, author: Steven Rostedt (VMware) <rostedt@goodmis.org>
+ * Copyright (C) 2020-23 VMware Inc, author: Ajay Kaher <akaher@vmware.com>
+ *
+ * eventfs is used to dynamically create inodes and dentries based on the
+ * meta data provided by the tracing system.
+ *
+ * eventfs stores the meta-data of files/dirs and holds off on creating
+ * inodes/dentries of the files. When accessed, the eventfs will create the
+ * inodes/dentries in a just-in-time (JIT) manner. The eventfs will clean up
+ * and delete the inodes/dentries when they are no longer referenced.
+ */
+#include <linux/fsnotify.h>
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/workqueue.h>
+#include <linux/security.h>
+#include <linux/tracefs.h>
+#include <linux/kref.h>
+#include <linux/delay.h>
+#include "internal.h"
+
+struct eventfs_inode {
+ struct list_head e_top_files;
+};
+
+/*
+ * struct eventfs_file - hold the properties of the eventfs files and
+ * directories.
+ * @name: the name of the file or directory to create
+ * @d_parent: holds parent's dentry
+ * @dentry: once accessed holds dentry
+ * @list: file or directory to be added to parent directory
+ * @ei: list of files and directories within directory
+ * @fop: file_operations for file or directory
+ * @iop: inode_operations for file or directory
+ * @data: something that the caller will want to get to later on
+ * @mode: the permission that the file or directory should have
+ */
+struct eventfs_file {
+ const char *name;
+ struct dentry *d_parent;
+ struct dentry *dentry;
+ struct list_head list;
+ struct eventfs_inode *ei;
+ const struct file_operations *fop;
+ const struct inode_operations *iop;
+ /*
+ * Union - used for deletion
+ * @del_list: list of eventfs_file to delete
+ * @rcu: eventfs_file to delete in RCU
+ * @is_freed: node is freed if one of the above is set
+ */
+ union {
+ struct list_head del_list;
+ struct rcu_head rcu;
+ unsigned long is_freed;
+ };
+ void *data;
+ umode_t mode;
+};
+
+static DEFINE_MUTEX(eventfs_mutex);
+DEFINE_STATIC_SRCU(eventfs_srcu);
+
+static struct dentry *eventfs_root_lookup(struct inode *dir,
+ struct dentry *dentry,
+ unsigned int flags);
+static int dcache_dir_open_wrapper(struct inode *inode, struct file *file);
+static int eventfs_release(struct inode *inode, struct file *file);
+
+static const struct inode_operations eventfs_root_dir_inode_operations = {
+ .lookup = eventfs_root_lookup,
+};
+
+static const struct file_operations eventfs_file_operations = {
+ .open = dcache_dir_open_wrapper,
+ .read = generic_read_dir,
+ .iterate_shared = dcache_readdir,
+ .llseek = generic_file_llseek,
+ .release = eventfs_release,
+};
+
+/**
+ * create_file - create a file in the tracefs filesystem
+ * @name: the name of the file to create.
+ * @mode: the permission that the file should have.
+ * @parent: parent dentry for this file.
+ * @data: something that the caller will want to get to later on.
+ * @fop: struct file_operations that should be used for this file.
+ *
+ * This is the basic "create a file" function for tracefs. It allows for a
+ * wide range of flexibility in creating a file.
+ *
+ * This function will return a pointer to a dentry if it succeeds. This
+ * pointer must be passed to the tracefs_remove() function when the file is
+ * to be removed (no automatic cleanup happens if your module is unloaded,
+ * you are responsible here.) If an error occurs, %NULL will be returned.
+ *
+ * If tracefs is not enabled in the kernel, the value -%ENODEV will be
+ * returned.
+ */
+static struct dentry *create_file(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const struct file_operations *fop)
+{
+ struct tracefs_inode *ti;
+ struct dentry *dentry;
+ struct inode *inode;
+
+ if (!(mode & S_IFMT))
+ mode |= S_IFREG;
+
+ if (WARN_ON_ONCE(!S_ISREG(mode)))
+ return NULL;
+
+ dentry = eventfs_start_creating(name, parent);
+
+ if (IS_ERR(dentry))
+ return dentry;
+
+ inode = tracefs_get_inode(dentry->d_sb);
+ if (unlikely(!inode))
+ return eventfs_failed_creating(dentry);
+
+ inode->i_mode = mode;
+ inode->i_fop = fop;
+ inode->i_private = data;
+
+ ti = get_tracefs(inode);
+ ti->flags |= TRACEFS_EVENT_INODE;
+ d_instantiate(dentry, inode);
+ fsnotify_create(dentry->d_parent->d_inode, dentry);
+ return eventfs_end_creating(dentry);
+};
+
+/**
+ * create_dir - create a dir in the tracefs filesystem
+ * @name: the name of the file to create.
+ * @parent: parent dentry for this file.
+ * @data: something that the caller will want to get to later on.
+ *
+ * This is the basic "create a dir" function for eventfs. It allows for a
+ * wide range of flexibility in creating a dir.
+ *
+ * This function will return a pointer to a dentry if it succeeds. This
+ * pointer must be passed to the tracefs_remove() function when the file is
+ * to be removed (no automatic cleanup happens if your module is unloaded,
+ * you are responsible here.) If an error occurs, %NULL will be returned.
+ *
+ * If tracefs is not enabled in the kernel, the value -%ENODEV will be
+ * returned.
+ */
+static struct dentry *create_dir(const char *name, struct dentry *parent, void *data)
+{
+ struct tracefs_inode *ti;
+ struct dentry *dentry;
+ struct inode *inode;
+
+ dentry = eventfs_start_creating(name, parent);
+ if (IS_ERR(dentry))
+ return dentry;
+
+ inode = tracefs_get_inode(dentry->d_sb);
+ if (unlikely(!inode))
+ return eventfs_failed_creating(dentry);
+
+ inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
+ inode->i_op = &eventfs_root_dir_inode_operations;
+ inode->i_fop = &eventfs_file_operations;
+ inode->i_private = data;
+
+ ti = get_tracefs(inode);
+ ti->flags |= TRACEFS_EVENT_INODE;
+
+ inc_nlink(inode);
+ d_instantiate(dentry, inode);
+ inc_nlink(dentry->d_parent->d_inode);
+ fsnotify_mkdir(dentry->d_parent->d_inode, dentry);
+ return eventfs_end_creating(dentry);
+}
+
+/**
+ * eventfs_set_ef_status_free - set the ef->status to free
+ * @dentry: dentry who's status to be freed
+ *
+ * eventfs_set_ef_status_free will be called if no more
+ * references remain
+ */
+void eventfs_set_ef_status_free(struct dentry *dentry)
+{
+ struct tracefs_inode *ti_parent;
+ struct eventfs_file *ef;
+
+ mutex_lock(&eventfs_mutex);
+ ti_parent = get_tracefs(dentry->d_parent->d_inode);
+ if (!ti_parent || !(ti_parent->flags & TRACEFS_EVENT_INODE))
+ goto out;
+
+ ef = dentry->d_fsdata;
+ if (!ef)
+ goto out;
+
+ /*
+ * If ef was freed, then the LSB bit is set for d_fsdata.
+ * But this should not happen, as it should still have a
+ * ref count that prevents it. Warn in case it does.
+ */
+ if (WARN_ON_ONCE((unsigned long)ef & 1))
+ goto out;
+
+ dentry->d_fsdata = NULL;
+ ef->dentry = NULL;
+out:
+ mutex_unlock(&eventfs_mutex);
+}
+
+/**
+ * eventfs_post_create_dir - post create dir routine
+ * @ef: eventfs_file of recently created dir
+ *
+ * Map the meta-data of files within an eventfs dir to their parent dentry
+ */
+static void eventfs_post_create_dir(struct eventfs_file *ef)
+{
+ struct eventfs_file *ef_child;
+ struct tracefs_inode *ti;
+
+ /* srcu lock already held */
+ /* fill parent-child relation */
+ list_for_each_entry_srcu(ef_child, &ef->ei->e_top_files, list,
+ srcu_read_lock_held(&eventfs_srcu)) {
+ ef_child->d_parent = ef->dentry;
+ }
+
+ ti = get_tracefs(ef->dentry->d_inode);
+ ti->private = ef->ei;
+}
+
+/**
+ * create_dentry - helper function to create dentry
+ * @ef: eventfs_file of file or directory to create
+ * @parent: parent dentry
+ * @lookup: true if called from lookup routine
+ *
+ * Used to create a dentry for file/dir, executes post dentry creation routine
+ */
+static struct dentry *
+create_dentry(struct eventfs_file *ef, struct dentry *parent, bool lookup)
+{
+ bool invalidate = false;
+ struct dentry *dentry;
+
+ mutex_lock(&eventfs_mutex);
+ if (ef->is_freed) {
+ mutex_unlock(&eventfs_mutex);
+ return NULL;
+ }
+ if (ef->dentry) {
+ dentry = ef->dentry;
+ /* On dir open, up the ref count */
+ if (!lookup)
+ dget(dentry);
+ mutex_unlock(&eventfs_mutex);
+ return dentry;
+ }
+ mutex_unlock(&eventfs_mutex);
+
+ if (!lookup)
+ inode_lock(parent->d_inode);
+
+ if (ef->ei)
+ dentry = create_dir(ef->name, parent, ef->data);
+ else
+ dentry = create_file(ef->name, ef->mode, parent,
+ ef->data, ef->fop);
+
+ if (!lookup)
+ inode_unlock(parent->d_inode);
+
+ mutex_lock(&eventfs_mutex);
+ if (IS_ERR_OR_NULL(dentry)) {
+ /* If the ef was already updated get it */
+ dentry = ef->dentry;
+ if (dentry && !lookup)
+ dget(dentry);
+ mutex_unlock(&eventfs_mutex);
+ return dentry;
+ }
+
+ if (!ef->dentry && !ef->is_freed) {
+ ef->dentry = dentry;
+ if (ef->ei)
+ eventfs_post_create_dir(ef);
+ dentry->d_fsdata = ef;
+ } else {
+ /* A race here, should try again (unless freed) */
+ invalidate = true;
+
+ /*
+ * Should never happen unless we get here due to being freed.
+ * Otherwise it means two dentries exist with the same name.
+ */
+ WARN_ON_ONCE(!ef->is_freed);
+ }
+ mutex_unlock(&eventfs_mutex);
+ if (invalidate)
+ d_invalidate(dentry);
+
+ if (lookup || invalidate)
+ dput(dentry);
+
+ return invalidate ? NULL : dentry;
+}
+
+static bool match_event_file(struct eventfs_file *ef, const char *name)
+{
+ bool ret;
+
+ mutex_lock(&eventfs_mutex);
+ ret = !ef->is_freed && strcmp(ef->name, name) == 0;
+ mutex_unlock(&eventfs_mutex);
+
+ return ret;
+}
+
+/**
+ * eventfs_root_lookup - lookup routine to create file/dir
+ * @dir: in which a lookup is being done
+ * @dentry: file/dir dentry
+ * @flags: to pass as flags parameter to simple lookup
+ *
+ * Used to create a dynamic file/dir within @dir. Use the eventfs_inode
+ * list of meta data to find the information needed to create the file/dir.
+ */
+static struct dentry *eventfs_root_lookup(struct inode *dir,
+ struct dentry *dentry,
+ unsigned int flags)
+{
+ struct tracefs_inode *ti;
+ struct eventfs_inode *ei;
+ struct eventfs_file *ef;
+ struct dentry *ret = NULL;
+ int idx;
+
+ ti = get_tracefs(dir);
+ if (!(ti->flags & TRACEFS_EVENT_INODE))
+ return NULL;
+
+ ei = ti->private;
+ idx = srcu_read_lock(&eventfs_srcu);
+ list_for_each_entry_srcu(ef, &ei->e_top_files, list,
+ srcu_read_lock_held(&eventfs_srcu)) {
+ if (!match_event_file(ef, dentry->d_name.name))
+ continue;
+ ret = simple_lookup(dir, dentry, flags);
+ create_dentry(ef, ef->d_parent, true);
+ break;
+ }
+ srcu_read_unlock(&eventfs_srcu, idx);
+ return ret;
+}
+
+/**
+ * eventfs_release - called to release eventfs file/dir
+ * @inode: inode to be released
+ * @file: file to be released (not used)
+ */
+static int eventfs_release(struct inode *inode, struct file *file)
+{
+ struct tracefs_inode *ti;
+ struct eventfs_inode *ei;
+ struct eventfs_file *ef;
+ struct dentry *dentry;
+ int idx;
+
+ ti = get_tracefs(inode);
+ if (!(ti->flags & TRACEFS_EVENT_INODE))
+ return -EINVAL;
+
+ ei = ti->private;
+ idx = srcu_read_lock(&eventfs_srcu);
+ list_for_each_entry_srcu(ef, &ei->e_top_files, list,
+ srcu_read_lock_held(&eventfs_srcu)) {
+ mutex_lock(&eventfs_mutex);
+ dentry = ef->dentry;
+ mutex_unlock(&eventfs_mutex);
+ if (dentry)
+ dput(dentry);
+ }
+ srcu_read_unlock(&eventfs_srcu, idx);
+ return dcache_dir_close(inode, file);
+}
+
+/**
+ * dcache_dir_open_wrapper - eventfs open wrapper
+ * @inode: not used
+ * @file: dir to be opened (to create its child)
+ *
+ * Used to dynamically create the file/dir within @file. @file is really a
+ * directory and all the files/dirs of the children within @file will be
+ * created. If any of the files/dirs have already been created, their
+ * reference count will be incremented.
+ */
+static int dcache_dir_open_wrapper(struct inode *inode, struct file *file)
+{
+ struct tracefs_inode *ti;
+ struct eventfs_inode *ei;
+ struct eventfs_file *ef;
+ struct dentry *dentry = file_dentry(file);
+ struct inode *f_inode = file_inode(file);
+ int idx;
+
+ ti = get_tracefs(f_inode);
+ if (!(ti->flags & TRACEFS_EVENT_INODE))
+ return -EINVAL;
+
+ ei = ti->private;
+ idx = srcu_read_lock(&eventfs_srcu);
+ list_for_each_entry_rcu(ef, &ei->e_top_files, list) {
+ create_dentry(ef, dentry, false);
+ }
+ srcu_read_unlock(&eventfs_srcu, idx);
+ return dcache_dir_open(inode, file);
+}
+
+/**
+ * eventfs_prepare_ef - helper function to prepare eventfs_file
+ * @name: the name of the file/directory to create.
+ * @mode: the permission that the file should have.
+ * @fop: struct file_operations that should be used for this file/directory.
+ * @iop: struct inode_operations that should be used for this file/directory.
+ * @data: something that the caller will want to get to later on. The
+ * inode.i_private pointer will point to this value on the open() call.
+ *
+ * This function allocates and fills the eventfs_file structure.
+ */
+static struct eventfs_file *eventfs_prepare_ef(const char *name, umode_t mode,
+ const struct file_operations *fop,
+ const struct inode_operations *iop,
+ void *data)
+{
+ struct eventfs_file *ef;
+
+ ef = kzalloc(sizeof(*ef), GFP_KERNEL);
+ if (!ef)
+ return ERR_PTR(-ENOMEM);
+
+ ef->name = kstrdup(name, GFP_KERNEL);
+ if (!ef->name) {
+ kfree(ef);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (S_ISDIR(mode)) {
+ ef->ei = kzalloc(sizeof(*ef->ei), GFP_KERNEL);
+ if (!ef->ei) {
+ kfree(ef->name);
+ kfree(ef);
+ return ERR_PTR(-ENOMEM);
+ }
+ INIT_LIST_HEAD(&ef->ei->e_top_files);
+ } else {
+ ef->ei = NULL;
+ }
+
+ ef->iop = iop;
+ ef->fop = fop;
+ ef->mode = mode;
+ ef->data = data;
+ return ef;
+}
+
+/**
+ * eventfs_create_events_dir - create the trace event structure
+ * @name: the name of the directory to create.
+ * @parent: parent dentry for this file. This should be a directory dentry
+ * if set. If this parameter is NULL, then the directory will be
+ * created in the root of the tracefs filesystem.
+ *
+ * This function creates the top of the trace event directory.
+ */
+struct dentry *eventfs_create_events_dir(const char *name,
+ struct dentry *parent)
+{
+ struct dentry *dentry = tracefs_start_creating(name, parent);
+ struct eventfs_inode *ei;
+ struct tracefs_inode *ti;
+ struct inode *inode;
+
+ if (IS_ERR(dentry))
+ return dentry;
+
+ ei = kzalloc(sizeof(*ei), GFP_KERNEL);
+ if (!ei)
+ return ERR_PTR(-ENOMEM);
+ inode = tracefs_get_inode(dentry->d_sb);
+ if (unlikely(!inode)) {
+ kfree(ei);
+ tracefs_failed_creating(dentry);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ INIT_LIST_HEAD(&ei->e_top_files);
+
+ ti = get_tracefs(inode);
+ ti->flags |= TRACEFS_EVENT_INODE;
+ ti->private = ei;
+
+ inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
+ inode->i_op = &eventfs_root_dir_inode_operations;
+ inode->i_fop = &eventfs_file_operations;
+
+ /* directory inodes start off with i_nlink == 2 (for "." entry) */
+ inc_nlink(inode);
+ d_instantiate(dentry, inode);
+ inc_nlink(dentry->d_parent->d_inode);
+ fsnotify_mkdir(dentry->d_parent->d_inode, dentry);
+ return tracefs_end_creating(dentry);
+}
+
+/**
+ * eventfs_add_subsystem_dir - add eventfs subsystem_dir to list to create later
+ * @name: the name of the file to create.
+ * @parent: parent dentry for this dir.
+ *
+ * This function adds eventfs subsystem dir to list.
+ * And all these dirs are created on the fly when they are looked up,
+ * and the dentry and inodes will be removed when they are done.
+ */
+struct eventfs_file *eventfs_add_subsystem_dir(const char *name,
+ struct dentry *parent)
+{
+ struct tracefs_inode *ti_parent;
+ struct eventfs_inode *ei_parent;
+ struct eventfs_file *ef;
+
+ if (!parent)
+ return ERR_PTR(-EINVAL);
+
+ ti_parent = get_tracefs(parent->d_inode);
+ ei_parent = ti_parent->private;
+
+ ef = eventfs_prepare_ef(name, S_IFDIR, NULL, NULL, NULL);
+ if (IS_ERR(ef))
+ return ef;
+
+ mutex_lock(&eventfs_mutex);
+ list_add_tail(&ef->list, &ei_parent->e_top_files);
+ ef->d_parent = parent;
+ mutex_unlock(&eventfs_mutex);
+ return ef;
+}
+
+/**
+ * eventfs_add_dir - add eventfs dir to list to create later
+ * @name: the name of the file to create.
+ * @ef_parent: parent eventfs_file for this dir.
+ *
+ * This function adds eventfs dir to list.
+ * And all these dirs are created on the fly when they are looked up,
+ * and the dentry and inodes will be removed when they are done.
+ */
+struct eventfs_file *eventfs_add_dir(const char *name,
+ struct eventfs_file *ef_parent)
+{
+ struct eventfs_file *ef;
+
+ if (!ef_parent)
+ return ERR_PTR(-EINVAL);
+
+ ef = eventfs_prepare_ef(name, S_IFDIR, NULL, NULL, NULL);
+ if (IS_ERR(ef))
+ return ef;
+
+ mutex_lock(&eventfs_mutex);
+ list_add_tail(&ef->list, &ef_parent->ei->e_top_files);
+ ef->d_parent = ef_parent->dentry;
+ mutex_unlock(&eventfs_mutex);
+ return ef;
+}
+
+/**
+ * eventfs_add_events_file - add the data needed to create a file for later reference
+ * @name: the name of the file to create.
+ * @mode: the permission that the file should have.
+ * @parent: parent dentry for this file.
+ * @data: something that the caller will want to get to later on.
+ * @fop: struct file_operations that should be used for this file.
+ *
+ * This function is used to add the information needed to create a
+ * dentry/inode within the top level events directory. The file created
+ * will have the @mode permissions. The @data will be used to fill the
+ * inode.i_private when the open() call is done. The dentry and inodes are
+ * all created when they are referenced, and removed when they are no
+ * longer referenced.
+ */
+int eventfs_add_events_file(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const struct file_operations *fop)
+{
+ struct tracefs_inode *ti;
+ struct eventfs_inode *ei;
+ struct eventfs_file *ef;
+
+ if (!parent)
+ return -EINVAL;
+
+ if (!(mode & S_IFMT))
+ mode |= S_IFREG;
+
+ if (!parent->d_inode)
+ return -EINVAL;
+
+ ti = get_tracefs(parent->d_inode);
+ if (!(ti->flags & TRACEFS_EVENT_INODE))
+ return -EINVAL;
+
+ ei = ti->private;
+ ef = eventfs_prepare_ef(name, mode, fop, NULL, data);
+
+ if (IS_ERR(ef))
+ return -ENOMEM;
+
+ mutex_lock(&eventfs_mutex);
+ list_add_tail(&ef->list, &ei->e_top_files);
+ ef->d_parent = parent;
+ mutex_unlock(&eventfs_mutex);
+ return 0;
+}
+
+/**
+ * eventfs_add_file - add eventfs file to list to create later
+ * @name: the name of the file to create.
+ * @mode: the permission that the file should have.
+ * @ef_parent: parent eventfs_file for this file.
+ * @data: something that the caller will want to get to later on.
+ * @fop: struct file_operations that should be used for this file.
+ *
+ * This function is used to add the information needed to create a
+ * file within a subdirectory of the events directory. The file created
+ * will have the @mode permissions. The @data will be used to fill the
+ * inode.i_private when the open() call is done. The dentry and inodes are
+ * all created when they are referenced, and removed when they are no
+ * longer referenced.
+ */
+int eventfs_add_file(const char *name, umode_t mode,
+ struct eventfs_file *ef_parent,
+ void *data,
+ const struct file_operations *fop)
+{
+ struct eventfs_file *ef;
+
+ if (!ef_parent)
+ return -EINVAL;
+
+ if (!(mode & S_IFMT))
+ mode |= S_IFREG;
+
+ ef = eventfs_prepare_ef(name, mode, fop, NULL, data);
+ if (IS_ERR(ef))
+ return -ENOMEM;
+
+ mutex_lock(&eventfs_mutex);
+ list_add_tail(&ef->list, &ef_parent->ei->e_top_files);
+ ef->d_parent = ef_parent->dentry;
+ mutex_unlock(&eventfs_mutex);
+ return 0;
+}
+
+static void free_ef(struct rcu_head *head)
+{
+ struct eventfs_file *ef = container_of(head, struct eventfs_file, rcu);
+
+ kfree(ef->name);
+ kfree(ef->ei);
+ kfree(ef);
+}
+
+/**
+ * eventfs_remove_rec - remove eventfs dir or file from list
+ * @ef: eventfs_file to be removed.
+ * @head: to create list of eventfs_file to be deleted
+ * @level: to check recursion depth
+ *
+ * The helper function eventfs_remove_rec() is used to clean up and free the
+ * associated data from eventfs for both of the added functions.
+ */
+static void eventfs_remove_rec(struct eventfs_file *ef, struct list_head *head, int level)
+{
+ struct eventfs_file *ef_child;
+
+ if (!ef)
+ return;
+ /*
+ * Check recursion depth. It should never be greater than 3:
+ * 0 - events/
+ * 1 - events/group/
+ * 2 - events/group/event/
+ * 3 - events/group/event/file
+ */
+ if (WARN_ON_ONCE(level > 3))
+ return;
+
+ if (ef->ei) {
+ /* search for nested folders or files */
+ list_for_each_entry_srcu(ef_child, &ef->ei->e_top_files, list,
+ lockdep_is_held(&eventfs_mutex)) {
+ eventfs_remove_rec(ef_child, head, level + 1);
+ }
+ }
+
+ list_del_rcu(&ef->list);
+ list_add_tail(&ef->del_list, head);
+}
+
+/**
+ * eventfs_remove - remove eventfs dir or file from list
+ * @ef: eventfs_file to be removed.
+ *
+ * This function acquire the eventfs_mutex lock and call eventfs_remove_rec()
+ */
+void eventfs_remove(struct eventfs_file *ef)
+{
+ struct eventfs_file *tmp;
+ LIST_HEAD(ef_del_list);
+ struct dentry *dentry_list = NULL;
+ struct dentry *dentry;
+
+ if (!ef)
+ return;
+
+ mutex_lock(&eventfs_mutex);
+ eventfs_remove_rec(ef, &ef_del_list, 0);
+ list_for_each_entry_safe(ef, tmp, &ef_del_list, del_list) {
+ if (ef->dentry) {
+ unsigned long ptr = (unsigned long)dentry_list;
+
+ /* Keep the dentry from being freed yet */
+ dget(ef->dentry);
+
+ /*
+ * Paranoid: The dget() above should prevent the dentry
+ * from being freed and calling eventfs_set_ef_status_free().
+ * But just in case, set the link list LSB pointer to 1
+ * and have eventfs_set_ef_status_free() check that to
+ * make sure that if it does happen, it will not think
+ * the d_fsdata is an event_file.
+ *
+ * For this to work, no event_file should be allocated
+ * on a odd space, as the ef should always be allocated
+ * to be at least word aligned. Check for that too.
+ */
+ WARN_ON_ONCE(ptr & 1);
+
+ ef->dentry->d_fsdata = (void *)(ptr | 1);
+ dentry_list = ef->dentry;
+ ef->dentry = NULL;
+ }
+ call_srcu(&eventfs_srcu, &ef->rcu, free_ef);
+ }
+ mutex_unlock(&eventfs_mutex);
+
+ while (dentry_list) {
+ unsigned long ptr;
+
+ dentry = dentry_list;
+ ptr = (unsigned long)dentry->d_fsdata & ~1UL;
+ dentry_list = (struct dentry *)ptr;
+ dentry->d_fsdata = NULL;
+ d_invalidate(dentry);
+ mutex_lock(&eventfs_mutex);
+ /* dentry should now have at least a single reference */
+ WARN_ONCE((int)d_count(dentry) < 1,
+ "dentry %p less than one reference (%d) after invalidate\n",
+ dentry, d_count(dentry));
+ mutex_unlock(&eventfs_mutex);
+ dput(dentry);
+ }
+}
+
+/**
+ * eventfs_remove_events_dir - remove eventfs dir or file from list
+ * @dentry: events's dentry to be removed.
+ *
+ * This function remove events main directory
+ */
+void eventfs_remove_events_dir(struct dentry *dentry)
+{
+ struct tracefs_inode *ti;
+ struct eventfs_inode *ei;
+
+ if (!dentry || !dentry->d_inode)
+ return;
+
+ ti = get_tracefs(dentry->d_inode);
+ if (!ti || !(ti->flags & TRACEFS_EVENT_INODE))
+ return;
+
+ ei = ti->private;
+ d_invalidate(dentry);
+ dput(dentry);
+ kfree(ei);
+}
diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
index 2feb6c58648c..de5b72216b1a 100644
--- a/fs/tracefs/inode.c
+++ b/fs/tracefs/inode.c
@@ -21,13 +21,33 @@
#include <linux/parser.h>
#include <linux/magic.h>
#include <linux/slab.h>
+#include "internal.h"
#define TRACEFS_DEFAULT_MODE 0700
+static struct kmem_cache *tracefs_inode_cachep __ro_after_init;
static struct vfsmount *tracefs_mount;
static int tracefs_mount_count;
static bool tracefs_registered;
+static struct inode *tracefs_alloc_inode(struct super_block *sb)
+{
+ struct tracefs_inode *ti;
+
+ ti = kmem_cache_alloc(tracefs_inode_cachep, GFP_KERNEL);
+ if (!ti)
+ return NULL;
+
+ ti->flags = 0;
+
+ return &ti->vfs_inode;
+}
+
+static void tracefs_free_inode(struct inode *inode)
+{
+ kmem_cache_free(tracefs_inode_cachep, get_tracefs(inode));
+}
+
static ssize_t default_read_file(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
@@ -127,7 +147,7 @@ static const struct inode_operations tracefs_dir_inode_operations = {
.rmdir = tracefs_syscall_rmdir,
};
-static struct inode *tracefs_get_inode(struct super_block *sb)
+struct inode *tracefs_get_inode(struct super_block *sb)
{
struct inode *inode = new_inode(sb);
if (inode) {
@@ -290,6 +310,7 @@ static int tracefs_apply_options(struct super_block *sb, bool remount)
struct tracefs_fs_info *fsi = sb->s_fs_info;
struct inode *inode = d_inode(sb->s_root);
struct tracefs_mount_opts *opts = &fsi->mount_opts;
+ umode_t tmp_mode;
/*
* On remount, only reset mode/uid/gid if they were provided as mount
@@ -297,8 +318,9 @@ static int tracefs_apply_options(struct super_block *sb, bool remount)
*/
if (!remount || opts->opts & BIT(Opt_mode)) {
- inode->i_mode &= ~S_IALLUGO;
- inode->i_mode |= opts->mode;
+ tmp_mode = READ_ONCE(inode->i_mode) & ~S_IALLUGO;
+ tmp_mode |= opts->mode;
+ WRITE_ONCE(inode->i_mode, tmp_mode);
}
if (!remount || opts->opts & BIT(Opt_uid))
@@ -346,11 +368,31 @@ static int tracefs_show_options(struct seq_file *m, struct dentry *root)
}
static const struct super_operations tracefs_super_operations = {
+ .alloc_inode = tracefs_alloc_inode,
+ .free_inode = tracefs_free_inode,
+ .drop_inode = generic_delete_inode,
.statfs = simple_statfs,
.remount_fs = tracefs_remount,
.show_options = tracefs_show_options,
};
+static void tracefs_dentry_iput(struct dentry *dentry, struct inode *inode)
+{
+ struct tracefs_inode *ti;
+
+ if (!dentry || !inode)
+ return;
+
+ ti = get_tracefs(inode);
+ if (ti && ti->flags & TRACEFS_EVENT_INODE)
+ eventfs_set_ef_status_free(dentry);
+ iput(inode);
+}
+
+static const struct dentry_operations tracefs_dentry_operations = {
+ .d_iput = tracefs_dentry_iput,
+};
+
static int trace_fill_super(struct super_block *sb, void *data, int silent)
{
static const struct tree_descr trace_files[] = {{""}};
@@ -373,6 +415,7 @@ static int trace_fill_super(struct super_block *sb, void *data, int silent)
goto fail;
sb->s_op = &tracefs_super_operations;
+ sb->s_d_op = &tracefs_dentry_operations;
tracefs_apply_options(sb, false);
@@ -399,7 +442,7 @@ static struct file_system_type trace_fs_type = {
};
MODULE_ALIAS_FS("tracefs");
-static struct dentry *start_creating(const char *name, struct dentry *parent)
+struct dentry *tracefs_start_creating(const char *name, struct dentry *parent)
{
struct dentry *dentry;
int error;
@@ -437,7 +480,7 @@ static struct dentry *start_creating(const char *name, struct dentry *parent)
return dentry;
}
-static struct dentry *failed_creating(struct dentry *dentry)
+struct dentry *tracefs_failed_creating(struct dentry *dentry)
{
inode_unlock(d_inode(dentry->d_parent));
dput(dentry);
@@ -445,13 +488,87 @@ static struct dentry *failed_creating(struct dentry *dentry)
return NULL;
}
-static struct dentry *end_creating(struct dentry *dentry)
+struct dentry *tracefs_end_creating(struct dentry *dentry)
{
inode_unlock(d_inode(dentry->d_parent));
return dentry;
}
/**
+ * eventfs_start_creating - start the process of creating a dentry
+ * @name: Name of the file created for the dentry
+ * @parent: The parent dentry where this dentry will be created
+ *
+ * This is a simple helper function for the dynamically created eventfs
+ * files. When the directory of the eventfs files are accessed, their
+ * dentries are created on the fly. This function is used to start that
+ * process.
+ */
+struct dentry *eventfs_start_creating(const char *name, struct dentry *parent)
+{
+ struct dentry *dentry;
+ int error;
+
+ error = simple_pin_fs(&trace_fs_type, &tracefs_mount,
+ &tracefs_mount_count);
+ if (error)
+ return ERR_PTR(error);
+
+ /*
+ * If the parent is not specified, we create it in the root.
+ * We need the root dentry to do this, which is in the super
+ * block. A pointer to that is in the struct vfsmount that we
+ * have around.
+ */
+ if (!parent)
+ parent = tracefs_mount->mnt_root;
+
+ if (unlikely(IS_DEADDIR(parent->d_inode)))
+ dentry = ERR_PTR(-ENOENT);
+ else
+ dentry = lookup_one_len(name, parent, strlen(name));
+
+ if (!IS_ERR(dentry) && dentry->d_inode) {
+ dput(dentry);
+ dentry = ERR_PTR(-EEXIST);
+ }
+
+ if (IS_ERR(dentry))
+ simple_release_fs(&tracefs_mount, &tracefs_mount_count);
+
+ return dentry;
+}
+
+/**
+ * eventfs_failed_creating - clean up a failed eventfs dentry creation
+ * @dentry: The dentry to clean up
+ *
+ * If after calling eventfs_start_creating(), a failure is detected, the
+ * resources created by eventfs_start_creating() needs to be cleaned up. In
+ * that case, this function should be called to perform that clean up.
+ */
+struct dentry *eventfs_failed_creating(struct dentry *dentry)
+{
+ dput(dentry);
+ simple_release_fs(&tracefs_mount, &tracefs_mount_count);
+ return NULL;
+}
+
+/**
+ * eventfs_end_creating - Finish the process of creating a eventfs dentry
+ * @dentry: The dentry that has successfully been created.
+ *
+ * This function is currently just a place holder to match
+ * eventfs_start_creating(). In case any synchronization needs to be added,
+ * this function will be used to implement that without having to modify
+ * the callers of eventfs_start_creating().
+ */
+struct dentry *eventfs_end_creating(struct dentry *dentry)
+{
+ return dentry;
+}
+
+/**
* tracefs_create_file - create a file in the tracefs filesystem
* @name: a pointer to a string containing the name of the file to create.
* @mode: the permission that the file should have.
@@ -490,14 +607,14 @@ struct dentry *tracefs_create_file(const char *name, umode_t mode,
if (!(mode & S_IFMT))
mode |= S_IFREG;
BUG_ON(!S_ISREG(mode));
- dentry = start_creating(name, parent);
+ dentry = tracefs_start_creating(name, parent);
if (IS_ERR(dentry))
return NULL;
inode = tracefs_get_inode(dentry->d_sb);
if (unlikely(!inode))
- return failed_creating(dentry);
+ return tracefs_failed_creating(dentry);
inode->i_mode = mode;
inode->i_fop = fops ? fops : &tracefs_file_operations;
@@ -506,13 +623,13 @@ struct dentry *tracefs_create_file(const char *name, umode_t mode,
inode->i_gid = d_inode(dentry->d_parent)->i_gid;
d_instantiate(dentry, inode);
fsnotify_create(d_inode(dentry->d_parent), dentry);
- return end_creating(dentry);
+ return tracefs_end_creating(dentry);
}
static struct dentry *__create_dir(const char *name, struct dentry *parent,
const struct inode_operations *ops)
{
- struct dentry *dentry = start_creating(name, parent);
+ struct dentry *dentry = tracefs_start_creating(name, parent);
struct inode *inode;
if (IS_ERR(dentry))
@@ -520,7 +637,7 @@ static struct dentry *__create_dir(const char *name, struct dentry *parent,
inode = tracefs_get_inode(dentry->d_sb);
if (unlikely(!inode))
- return failed_creating(dentry);
+ return tracefs_failed_creating(dentry);
/* Do not set bits for OTH */
inode->i_mode = S_IFDIR | S_IRWXU | S_IRUSR| S_IRGRP | S_IXUSR | S_IXGRP;
@@ -534,7 +651,7 @@ static struct dentry *__create_dir(const char *name, struct dentry *parent,
d_instantiate(dentry, inode);
inc_nlink(d_inode(dentry->d_parent));
fsnotify_mkdir(d_inode(dentry->d_parent), dentry);
- return end_creating(dentry);
+ return tracefs_end_creating(dentry);
}
/**
@@ -628,10 +745,26 @@ bool tracefs_initialized(void)
return tracefs_registered;
}
+static void init_once(void *foo)
+{
+ struct tracefs_inode *ti = (struct tracefs_inode *) foo;
+
+ inode_init_once(&ti->vfs_inode);
+}
+
static int __init tracefs_init(void)
{
int retval;
+ tracefs_inode_cachep = kmem_cache_create("tracefs_inode_cache",
+ sizeof(struct tracefs_inode),
+ 0, (SLAB_RECLAIM_ACCOUNT|
+ SLAB_MEM_SPREAD|
+ SLAB_ACCOUNT),
+ init_once);
+ if (!tracefs_inode_cachep)
+ return -ENOMEM;
+
retval = sysfs_create_mount_point(kernel_kobj, "tracing");
if (retval)
return -EINVAL;
diff --git a/fs/tracefs/internal.h b/fs/tracefs/internal.h
new file mode 100644
index 000000000000..69c2b1d87c46
--- /dev/null
+++ b/fs/tracefs/internal.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _TRACEFS_INTERNAL_H
+#define _TRACEFS_INTERNAL_H
+
+enum {
+ TRACEFS_EVENT_INODE = BIT(1),
+};
+
+struct tracefs_inode {
+ unsigned long flags;
+ void *private;
+ struct inode vfs_inode;
+};
+
+static inline struct tracefs_inode *get_tracefs(const struct inode *inode)
+{
+ return container_of(inode, struct tracefs_inode, vfs_inode);
+}
+
+struct dentry *tracefs_start_creating(const char *name, struct dentry *parent);
+struct dentry *tracefs_end_creating(struct dentry *dentry);
+struct dentry *tracefs_failed_creating(struct dentry *dentry);
+struct inode *tracefs_get_inode(struct super_block *sb);
+struct dentry *eventfs_start_creating(const char *name, struct dentry *parent);
+struct dentry *eventfs_failed_creating(struct dentry *dentry);
+struct dentry *eventfs_end_creating(struct dentry *dentry);
+void eventfs_set_ef_status_free(struct dentry *dentry);
+
+#endif /* _TRACEFS_INTERNAL_H */
diff --git a/include/asm-generic/current.h b/include/asm-generic/current.h
index 3a2e224b9fa0..9c2aeecbd05a 100644
--- a/include/asm-generic/current.h
+++ b/include/asm-generic/current.h
@@ -2,9 +2,11 @@
#ifndef __ASM_GENERIC_CURRENT_H
#define __ASM_GENERIC_CURRENT_H
+#ifndef __ASSEMBLY__
#include <linux/thread_info.h>
#define get_current() (current_thread_info()->task)
#define current get_current()
+#endif
#endif /* __ASM_GENERIC_CURRENT_H */
diff --git a/include/asm-generic/hyperv-tlfs.h b/include/asm-generic/hyperv-tlfs.h
index f4e4cc4f965f..fdac4a1714ec 100644
--- a/include/asm-generic/hyperv-tlfs.h
+++ b/include/asm-generic/hyperv-tlfs.h
@@ -223,6 +223,7 @@ enum HV_GENERIC_SET_FORMAT {
#define HV_STATUS_INVALID_PORT_ID 17
#define HV_STATUS_INVALID_CONNECTION_ID 18
#define HV_STATUS_INSUFFICIENT_BUFFERS 19
+#define HV_STATUS_TIME_OUT 120
#define HV_STATUS_VTL_ALREADY_ENABLED 134
/*
diff --git a/include/asm-generic/ide_iops.h b/include/asm-generic/ide_iops.h
deleted file mode 100644
index 81dfa3ee5e06..000000000000
--- a/include/asm-generic/ide_iops.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Generic I/O and MEMIO string operations. */
-
-#define __ide_insw insw
-#define __ide_insl insl
-#define __ide_outsw outsw
-#define __ide_outsl outsl
-
-static __inline__ void __ide_mm_insw(void __iomem *port, void *addr, u32 count)
-{
- while (count--) {
- *(u16 *)addr = readw(port);
- addr += 2;
- }
-}
-
-static __inline__ void __ide_mm_insl(void __iomem *port, void *addr, u32 count)
-{
- while (count--) {
- *(u32 *)addr = readl(port);
- addr += 4;
- }
-}
-
-static __inline__ void __ide_mm_outsw(void __iomem *port, void *addr, u32 count)
-{
- while (count--) {
- writew(*(u16 *)addr, port);
- addr += 2;
- }
-}
-
-static __inline__ void __ide_mm_outsl(void __iomem * port, void *addr, u32 count)
-{
- while (count--) {
- writel(*(u32 *)addr, port);
- addr += 4;
- }
-}
diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h
index a8f4b653ef4e..cecd2b7bd033 100644
--- a/include/asm-generic/mshyperv.h
+++ b/include/asm-generic/mshyperv.h
@@ -36,18 +36,25 @@ struct ms_hyperv_info {
u32 nested_features;
u32 max_vp_index;
u32 max_lp_index;
- u32 isolation_config_a;
+ union {
+ u32 isolation_config_a;
+ struct {
+ u32 paravisor_present : 1;
+ u32 reserved_a1 : 31;
+ };
+ };
union {
u32 isolation_config_b;
struct {
u32 cvm_type : 4;
- u32 reserved1 : 1;
+ u32 reserved_b1 : 1;
u32 shared_gpa_boundary_active : 1;
u32 shared_gpa_boundary_bits : 6;
- u32 reserved2 : 20;
+ u32 reserved_b2 : 20;
};
};
u64 shared_gpa_boundary;
+ u8 vtl;
};
extern struct ms_hyperv_info ms_hyperv;
extern bool hv_nested;
@@ -57,7 +64,8 @@ extern void * __percpu *hyperv_pcpu_output_arg;
extern u64 hv_do_hypercall(u64 control, void *inputaddr, void *outputaddr);
extern u64 hv_do_fast_hypercall8(u16 control, u64 input8);
-extern bool hv_isolation_type_snp(void);
+bool hv_isolation_type_snp(void);
+bool hv_isolation_type_tdx(void);
/* Helper functions that provide a consistent pattern for checking Hyper-V hypercall status. */
static inline int hv_result(u64 status)
@@ -274,6 +282,7 @@ enum hv_isolation_type hv_get_isolation_type(void);
bool hv_is_isolation_supported(void);
bool hv_isolation_type_snp(void);
u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size);
+u64 hv_tdx_hypercall(u64 control, u64 param1, u64 param2);
void hyperv_cleanup(void);
bool hv_query_ext_cap(u64 cap_query);
void hv_setup_dma_ops(struct device *dev, bool coherent);
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
index b4d43a4af5f7..51f8f3881523 100644
--- a/include/asm-generic/preempt.h
+++ b/include/asm-generic/preempt.h
@@ -80,9 +80,21 @@ static __always_inline bool should_resched(int preempt_offset)
#ifdef CONFIG_PREEMPTION
extern asmlinkage void preempt_schedule(void);
-#define __preempt_schedule() preempt_schedule()
extern asmlinkage void preempt_schedule_notrace(void);
+
+#if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
+
+void dynamic_preempt_schedule(void);
+void dynamic_preempt_schedule_notrace(void);
+#define __preempt_schedule() dynamic_preempt_schedule()
+#define __preempt_schedule_notrace() dynamic_preempt_schedule_notrace()
+
+#else /* !CONFIG_PREEMPT_DYNAMIC || !CONFIG_HAVE_PREEMPT_DYNAMIC_KEY*/
+
+#define __preempt_schedule() preempt_schedule()
#define __preempt_schedule_notrace() preempt_schedule_notrace()
+
+#endif /* CONFIG_PREEMPT_DYNAMIC && CONFIG_HAVE_PREEMPT_DYNAMIC_KEY*/
#endif /* CONFIG_PREEMPTION */
#endif /* __ASM_PREEMPT_H */
diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h b/include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h
index 09fd169ad18e..5d98f7d48a1e 100644
--- a/include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h
+++ b/include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h
@@ -6,58 +6,60 @@
#ifndef _DT_BINDINGS_QCOM_SPMI_VADC_PM8350_H
#define _DT_BINDINGS_QCOM_SPMI_VADC_PM8350_H
+#include <dt-bindings/iio/qcom,spmi-vadc.h>
+
/* ADC channels for PM8350_ADC for PMIC7 */
-#define PM8350_ADC7_REF_GND(sid) ((sid) << 8 | 0x0)
-#define PM8350_ADC7_1P25VREF(sid) ((sid) << 8 | 0x01)
-#define PM8350_ADC7_VREF_VADC(sid) ((sid) << 8 | 0x02)
-#define PM8350_ADC7_DIE_TEMP(sid) ((sid) << 8 | 0x03)
-
-#define PM8350_ADC7_AMUX_THM1(sid) ((sid) << 8 | 0x04)
-#define PM8350_ADC7_AMUX_THM2(sid) ((sid) << 8 | 0x05)
-#define PM8350_ADC7_AMUX_THM3(sid) ((sid) << 8 | 0x06)
-#define PM8350_ADC7_AMUX_THM4(sid) ((sid) << 8 | 0x07)
-#define PM8350_ADC7_AMUX_THM5(sid) ((sid) << 8 | 0x08)
-#define PM8350_ADC7_GPIO1(sid) ((sid) << 8 | 0x0a)
-#define PM8350_ADC7_GPIO2(sid) ((sid) << 8 | 0x0b)
-#define PM8350_ADC7_GPIO3(sid) ((sid) << 8 | 0x0c)
-#define PM8350_ADC7_GPIO4(sid) ((sid) << 8 | 0x0d)
+#define PM8350_ADC7_REF_GND(sid) ((sid) << 8 | ADC7_REF_GND)
+#define PM8350_ADC7_1P25VREF(sid) ((sid) << 8 | ADC7_1P25VREF)
+#define PM8350_ADC7_VREF_VADC(sid) ((sid) << 8 | ADC7_VREF_VADC)
+#define PM8350_ADC7_DIE_TEMP(sid) ((sid) << 8 | ADC7_DIE_TEMP)
+
+#define PM8350_ADC7_AMUX_THM1(sid) ((sid) << 8 | ADC7_AMUX_THM1)
+#define PM8350_ADC7_AMUX_THM2(sid) ((sid) << 8 | ADC7_AMUX_THM2)
+#define PM8350_ADC7_AMUX_THM3(sid) ((sid) << 8 | ADC7_AMUX_THM3)
+#define PM8350_ADC7_AMUX_THM4(sid) ((sid) << 8 | ADC7_AMUX_THM4)
+#define PM8350_ADC7_AMUX_THM5(sid) ((sid) << 8 | ADC7_AMUX_THM5)
+#define PM8350_ADC7_GPIO1(sid) ((sid) << 8 | ADC7_GPIO1)
+#define PM8350_ADC7_GPIO2(sid) ((sid) << 8 | ADC7_GPIO2)
+#define PM8350_ADC7_GPIO3(sid) ((sid) << 8 | ADC7_GPIO3)
+#define PM8350_ADC7_GPIO4(sid) ((sid) << 8 | ADC7_GPIO4)
/* 30k pull-up1 */
-#define PM8350_ADC7_AMUX_THM1_30K_PU(sid) ((sid) << 8 | 0x24)
-#define PM8350_ADC7_AMUX_THM2_30K_PU(sid) ((sid) << 8 | 0x25)
-#define PM8350_ADC7_AMUX_THM3_30K_PU(sid) ((sid) << 8 | 0x26)
-#define PM8350_ADC7_AMUX_THM4_30K_PU(sid) ((sid) << 8 | 0x27)
-#define PM8350_ADC7_AMUX_THM5_30K_PU(sid) ((sid) << 8 | 0x28)
-#define PM8350_ADC7_GPIO1_30K_PU(sid) ((sid) << 8 | 0x2a)
-#define PM8350_ADC7_GPIO2_30K_PU(sid) ((sid) << 8 | 0x2b)
-#define PM8350_ADC7_GPIO3_30K_PU(sid) ((sid) << 8 | 0x2c)
-#define PM8350_ADC7_GPIO4_30K_PU(sid) ((sid) << 8 | 0x2d)
+#define PM8350_ADC7_AMUX_THM1_30K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM1_30K_PU)
+#define PM8350_ADC7_AMUX_THM2_30K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM2_30K_PU)
+#define PM8350_ADC7_AMUX_THM3_30K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM3_30K_PU)
+#define PM8350_ADC7_AMUX_THM4_30K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM4_30K_PU)
+#define PM8350_ADC7_AMUX_THM5_30K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM5_30K_PU)
+#define PM8350_ADC7_GPIO1_30K_PU(sid) ((sid) << 8 | ADC7_GPIO1_30K_PU)
+#define PM8350_ADC7_GPIO2_30K_PU(sid) ((sid) << 8 | ADC7_GPIO2_30K_PU)
+#define PM8350_ADC7_GPIO3_30K_PU(sid) ((sid) << 8 | ADC7_GPIO3_30K_PU)
+#define PM8350_ADC7_GPIO4_30K_PU(sid) ((sid) << 8 | ADC7_GPIO4_30K_PU)
/* 100k pull-up2 */
-#define PM8350_ADC7_AMUX_THM1_100K_PU(sid) ((sid) << 8 | 0x44)
-#define PM8350_ADC7_AMUX_THM2_100K_PU(sid) ((sid) << 8 | 0x45)
-#define PM8350_ADC7_AMUX_THM3_100K_PU(sid) ((sid) << 8 | 0x46)
-#define PM8350_ADC7_AMUX_THM4_100K_PU(sid) ((sid) << 8 | 0x47)
-#define PM8350_ADC7_AMUX_THM5_100K_PU(sid) ((sid) << 8 | 0x48)
-#define PM8350_ADC7_GPIO1_100K_PU(sid) ((sid) << 8 | 0x4a)
-#define PM8350_ADC7_GPIO2_100K_PU(sid) ((sid) << 8 | 0x4b)
-#define PM8350_ADC7_GPIO3_100K_PU(sid) ((sid) << 8 | 0x4c)
-#define PM8350_ADC7_GPIO4_100K_PU(sid) ((sid) << 8 | 0x4d)
+#define PM8350_ADC7_AMUX_THM1_100K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM1_100K_PU)
+#define PM8350_ADC7_AMUX_THM2_100K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM2_100K_PU)
+#define PM8350_ADC7_AMUX_THM3_100K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM3_100K_PU)
+#define PM8350_ADC7_AMUX_THM4_100K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM4_100K_PU)
+#define PM8350_ADC7_AMUX_THM5_100K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM5_100K_PU)
+#define PM8350_ADC7_GPIO1_100K_PU(sid) ((sid) << 8 | ADC7_GPIO1_100K_PU)
+#define PM8350_ADC7_GPIO2_100K_PU(sid) ((sid) << 8 | ADC7_GPIO2_100K_PU)
+#define PM8350_ADC7_GPIO3_100K_PU(sid) ((sid) << 8 | ADC7_GPIO3_100K_PU)
+#define PM8350_ADC7_GPIO4_100K_PU(sid) ((sid) << 8 | ADC7_GPIO4_100K_PU)
/* 400k pull-up3 */
-#define PM8350_ADC7_AMUX_THM1_400K_PU(sid) ((sid) << 8 | 0x64)
-#define PM8350_ADC7_AMUX_THM2_400K_PU(sid) ((sid) << 8 | 0x65)
-#define PM8350_ADC7_AMUX_THM3_400K_PU(sid) ((sid) << 8 | 0x66)
-#define PM8350_ADC7_AMUX_THM4_400K_PU(sid) ((sid) << 8 | 0x67)
-#define PM8350_ADC7_AMUX_THM5_400K_PU(sid) ((sid) << 8 | 0x68)
-#define PM8350_ADC7_GPIO1_400K_PU(sid) ((sid) << 8 | 0x6a)
-#define PM8350_ADC7_GPIO2_400K_PU(sid) ((sid) << 8 | 0x6b)
-#define PM8350_ADC7_GPIO3_400K_PU(sid) ((sid) << 8 | 0x6c)
-#define PM8350_ADC7_GPIO4_400K_PU(sid) ((sid) << 8 | 0x6d)
+#define PM8350_ADC7_AMUX_THM1_400K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM1_400K_PU)
+#define PM8350_ADC7_AMUX_THM2_400K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM2_400K_PU)
+#define PM8350_ADC7_AMUX_THM3_400K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM3_400K_PU)
+#define PM8350_ADC7_AMUX_THM4_400K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM4_400K_PU)
+#define PM8350_ADC7_AMUX_THM5_400K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM5_400K_PU)
+#define PM8350_ADC7_GPIO1_400K_PU(sid) ((sid) << 8 | ADC7_GPIO1_400K_PU)
+#define PM8350_ADC7_GPIO2_400K_PU(sid) ((sid) << 8 | ADC7_GPIO2_400K_PU)
+#define PM8350_ADC7_GPIO3_400K_PU(sid) ((sid) << 8 | ADC7_GPIO3_400K_PU)
+#define PM8350_ADC7_GPIO4_400K_PU(sid) ((sid) << 8 | ADC7_GPIO4_400K_PU)
/* 1/3 Divider */
-#define PM8350_ADC7_GPIO4_DIV3(sid) ((sid) << 8 | 0x8d)
+#define PM8350_ADC7_GPIO4_DIV3(sid) ((sid) << 8 | ADC7_GPIO4_DIV3)
-#define PM8350_ADC7_VPH_PWR(sid) ((sid) << 8 | 0x8e)
+#define PM8350_ADC7_VPH_PWR(sid) ((sid) << 8 | ADC7_VPH_PWR)
#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PM8350_H */
diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pm8350b.h b/include/dt-bindings/iio/qcom,spmi-adc7-pm8350b.h
index dc2497c27e16..57c7977666d3 100644
--- a/include/dt-bindings/iio/qcom,spmi-adc7-pm8350b.h
+++ b/include/dt-bindings/iio/qcom,spmi-adc7-pm8350b.h
@@ -10,79 +10,81 @@
#define PM8350B_SID 3
#endif
+#include <dt-bindings/iio/qcom,spmi-vadc.h>
+
/* ADC channels for PM8350B_ADC for PMIC7 */
-#define PM8350B_ADC7_REF_GND (PM8350B_SID << 8 | 0x0)
-#define PM8350B_ADC7_1P25VREF (PM8350B_SID << 8 | 0x01)
-#define PM8350B_ADC7_VREF_VADC (PM8350B_SID << 8 | 0x02)
-#define PM8350B_ADC7_DIE_TEMP (PM8350B_SID << 8 | 0x03)
+#define PM8350B_ADC7_REF_GND (PM8350B_SID << 8 | ADC7_REF_GND)
+#define PM8350B_ADC7_1P25VREF (PM8350B_SID << 8 | ADC7_1P25VREF)
+#define PM8350B_ADC7_VREF_VADC (PM8350B_SID << 8 | ADC7_VREF_VADC)
+#define PM8350B_ADC7_DIE_TEMP (PM8350B_SID << 8 | ADC7_DIE_TEMP)
-#define PM8350B_ADC7_AMUX_THM1 (PM8350B_SID << 8 | 0x04)
-#define PM8350B_ADC7_AMUX_THM2 (PM8350B_SID << 8 | 0x05)
-#define PM8350B_ADC7_AMUX_THM3 (PM8350B_SID << 8 | 0x06)
-#define PM8350B_ADC7_AMUX_THM4 (PM8350B_SID << 8 | 0x07)
-#define PM8350B_ADC7_AMUX_THM5 (PM8350B_SID << 8 | 0x08)
-#define PM8350B_ADC7_AMUX_THM6 (PM8350B_SID << 8 | 0x09)
-#define PM8350B_ADC7_GPIO1 (PM8350B_SID << 8 | 0x0a)
-#define PM8350B_ADC7_GPIO2 (PM8350B_SID << 8 | 0x0b)
-#define PM8350B_ADC7_GPIO3 (PM8350B_SID << 8 | 0x0c)
-#define PM8350B_ADC7_GPIO4 (PM8350B_SID << 8 | 0x0d)
+#define PM8350B_ADC7_AMUX_THM1 (PM8350B_SID << 8 | ADC7_AMUX_THM1)
+#define PM8350B_ADC7_AMUX_THM2 (PM8350B_SID << 8 | ADC7_AMUX_THM2)
+#define PM8350B_ADC7_AMUX_THM3 (PM8350B_SID << 8 | ADC7_AMUX_THM3)
+#define PM8350B_ADC7_AMUX_THM4 (PM8350B_SID << 8 | ADC7_AMUX_THM4)
+#define PM8350B_ADC7_AMUX_THM5 (PM8350B_SID << 8 | ADC7_AMUX_THM5)
+#define PM8350B_ADC7_AMUX_THM6 (PM8350B_SID << 8 | ADC7_AMUX_THM6)
+#define PM8350B_ADC7_GPIO1 (PM8350B_SID << 8 | ADC7_GPIO1)
+#define PM8350B_ADC7_GPIO2 (PM8350B_SID << 8 | ADC7_GPIO2)
+#define PM8350B_ADC7_GPIO3 (PM8350B_SID << 8 | ADC7_GPIO3)
+#define PM8350B_ADC7_GPIO4 (PM8350B_SID << 8 | ADC7_GPIO4)
-#define PM8350B_ADC7_CHG_TEMP (PM8350B_SID << 8 | 0x10)
-#define PM8350B_ADC7_USB_IN_V_16 (PM8350B_SID << 8 | 0x11)
-#define PM8350B_ADC7_VDC_16 (PM8350B_SID << 8 | 0x12)
-#define PM8350B_ADC7_CC1_ID (PM8350B_SID << 8 | 0x13)
-#define PM8350B_ADC7_VREF_BAT_THERM (PM8350B_SID << 8 | 0x15)
-#define PM8350B_ADC7_IIN_FB (PM8350B_SID << 8 | 0x17)
+#define PM8350B_ADC7_CHG_TEMP (PM8350B_SID << 8 | ADC7_CHG_TEMP)
+#define PM8350B_ADC7_USB_IN_V_16 (PM8350B_SID << 8 | ADC7_USB_IN_V_16)
+#define PM8350B_ADC7_VDC_16 (PM8350B_SID << 8 | ADC7_VDC_16)
+#define PM8350B_ADC7_CC1_ID (PM8350B_SID << 8 | ADC7_CC1_ID)
+#define PM8350B_ADC7_VREF_BAT_THERM (PM8350B_SID << 8 | ADC7_VREF_BAT_THERM)
+#define PM8350B_ADC7_IIN_FB (PM8350B_SID << 8 | ADC7_IIN_FB)
/* 30k pull-up1 */
-#define PM8350B_ADC7_AMUX_THM1_30K_PU (PM8350B_SID << 8 | 0x24)
-#define PM8350B_ADC7_AMUX_THM2_30K_PU (PM8350B_SID << 8 | 0x25)
-#define PM8350B_ADC7_AMUX_THM3_30K_PU (PM8350B_SID << 8 | 0x26)
-#define PM8350B_ADC7_AMUX_THM4_30K_PU (PM8350B_SID << 8 | 0x27)
-#define PM8350B_ADC7_AMUX_THM5_30K_PU (PM8350B_SID << 8 | 0x28)
-#define PM8350B_ADC7_AMUX_THM6_30K_PU (PM8350B_SID << 8 | 0x29)
-#define PM8350B_ADC7_GPIO1_30K_PU (PM8350B_SID << 8 | 0x2a)
-#define PM8350B_ADC7_GPIO2_30K_PU (PM8350B_SID << 8 | 0x2b)
-#define PM8350B_ADC7_GPIO3_30K_PU (PM8350B_SID << 8 | 0x2c)
-#define PM8350B_ADC7_GPIO4_30K_PU (PM8350B_SID << 8 | 0x2d)
-#define PM8350B_ADC7_CC1_ID_30K_PU (PM8350B_SID << 8 | 0x33)
+#define PM8350B_ADC7_AMUX_THM1_30K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM1_30K_PU)
+#define PM8350B_ADC7_AMUX_THM2_30K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM2_30K_PU)
+#define PM8350B_ADC7_AMUX_THM3_30K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM3_30K_PU)
+#define PM8350B_ADC7_AMUX_THM4_30K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM4_30K_PU)
+#define PM8350B_ADC7_AMUX_THM5_30K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM5_30K_PU)
+#define PM8350B_ADC7_AMUX_THM6_30K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM6_30K_PU)
+#define PM8350B_ADC7_GPIO1_30K_PU (PM8350B_SID << 8 | ADC7_GPIO1_30K_PU)
+#define PM8350B_ADC7_GPIO2_30K_PU (PM8350B_SID << 8 | ADC7_GPIO2_30K_PU)
+#define PM8350B_ADC7_GPIO3_30K_PU (PM8350B_SID << 8 | ADC7_GPIO3_30K_PU)
+#define PM8350B_ADC7_GPIO4_30K_PU (PM8350B_SID << 8 | ADC7_GPIO4_30K_PU)
+#define PM8350B_ADC7_CC1_ID_30K_PU (PM8350B_SID << 8 | ADC7_CC1_ID_30K_PU)
/* 100k pull-up2 */
-#define PM8350B_ADC7_AMUX_THM1_100K_PU (PM8350B_SID << 8 | 0x44)
-#define PM8350B_ADC7_AMUX_THM2_100K_PU (PM8350B_SID << 8 | 0x45)
-#define PM8350B_ADC7_AMUX_THM3_100K_PU (PM8350B_SID << 8 | 0x46)
-#define PM8350B_ADC7_AMUX_THM4_100K_PU (PM8350B_SID << 8 | 0x47)
-#define PM8350B_ADC7_AMUX_THM5_100K_PU (PM8350B_SID << 8 | 0x48)
-#define PM8350B_ADC7_AMUX_THM6_100K_PU (PM8350B_SID << 8 | 0x49)
-#define PM8350B_ADC7_GPIO1_100K_PU (PM8350B_SID << 8 | 0x4a)
-#define PM8350B_ADC7_GPIO2_100K_PU (PM8350B_SID << 8 | 0x4b)
-#define PM8350B_ADC7_GPIO3_100K_PU (PM8350B_SID << 8 | 0x4c)
-#define PM8350B_ADC7_GPIO4_100K_PU (PM8350B_SID << 8 | 0x4d)
-#define PM8350B_ADC7_CC1_ID_100K_PU (PM8350B_SID << 8 | 0x53)
+#define PM8350B_ADC7_AMUX_THM1_100K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM1_100K_PU)
+#define PM8350B_ADC7_AMUX_THM2_100K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM2_100K_PU)
+#define PM8350B_ADC7_AMUX_THM3_100K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM3_100K_PU)
+#define PM8350B_ADC7_AMUX_THM4_100K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM4_100K_PU)
+#define PM8350B_ADC7_AMUX_THM5_100K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM5_100K_PU)
+#define PM8350B_ADC7_AMUX_THM6_100K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM6_100K_PU)
+#define PM8350B_ADC7_GPIO1_100K_PU (PM8350B_SID << 8 | ADC7_GPIO1_100K_PU)
+#define PM8350B_ADC7_GPIO2_100K_PU (PM8350B_SID << 8 | ADC7_GPIO2_100K_PU)
+#define PM8350B_ADC7_GPIO3_100K_PU (PM8350B_SID << 8 | ADC7_GPIO3_100K_PU)
+#define PM8350B_ADC7_GPIO4_100K_PU (PM8350B_SID << 8 | ADC7_GPIO4_100K_PU)
+#define PM8350B_ADC7_CC1_ID_100K_PU (PM8350B_SID << 8 | ADC7_CC1_ID_100K_PU)
/* 400k pull-up3 */
-#define PM8350B_ADC7_AMUX_THM1_400K_PU (PM8350B_SID << 8 | 0x64)
-#define PM8350B_ADC7_AMUX_THM2_400K_PU (PM8350B_SID << 8 | 0x65)
-#define PM8350B_ADC7_AMUX_THM3_400K_PU (PM8350B_SID << 8 | 0x66)
-#define PM8350B_ADC7_AMUX_THM4_400K_PU (PM8350B_SID << 8 | 0x67)
-#define PM8350B_ADC7_AMUX_THM5_400K_PU (PM8350B_SID << 8 | 0x68)
-#define PM8350B_ADC7_AMUX_THM6_400K_PU (PM8350B_SID << 8 | 0x69)
-#define PM8350B_ADC7_GPIO1_400K_PU (PM8350B_SID << 8 | 0x6a)
-#define PM8350B_ADC7_GPIO2_400K_PU (PM8350B_SID << 8 | 0x6b)
-#define PM8350B_ADC7_GPIO3_400K_PU (PM8350B_SID << 8 | 0x6c)
-#define PM8350B_ADC7_GPIO4_400K_PU (PM8350B_SID << 8 | 0x6d)
-#define PM8350B_ADC7_CC1_ID_400K_PU (PM8350B_SID << 8 | 0x73)
+#define PM8350B_ADC7_AMUX_THM1_400K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM1_400K_PU)
+#define PM8350B_ADC7_AMUX_THM2_400K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM2_400K_PU)
+#define PM8350B_ADC7_AMUX_THM3_400K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM3_400K_PU)
+#define PM8350B_ADC7_AMUX_THM4_400K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM4_400K_PU)
+#define PM8350B_ADC7_AMUX_THM5_400K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM5_400K_PU)
+#define PM8350B_ADC7_AMUX_THM6_400K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM6_400K_PU)
+#define PM8350B_ADC7_GPIO1_400K_PU (PM8350B_SID << 8 | ADC7_GPIO1_400K_PU)
+#define PM8350B_ADC7_GPIO2_400K_PU (PM8350B_SID << 8 | ADC7_GPIO2_400K_PU)
+#define PM8350B_ADC7_GPIO3_400K_PU (PM8350B_SID << 8 | ADC7_GPIO3_400K_PU)
+#define PM8350B_ADC7_GPIO4_400K_PU (PM8350B_SID << 8 | ADC7_GPIO4_400K_PU)
+#define PM8350B_ADC7_CC1_ID_400K_PU (PM8350B_SID << 8 | ADC7_CC1_ID_400K_PU)
/* 1/3 Divider */
-#define PM8350B_ADC7_GPIO1_DIV3 (PM8350B_SID << 8 | 0x8a)
-#define PM8350B_ADC7_GPIO2_DIV3 (PM8350B_SID << 8 | 0x8b)
-#define PM8350B_ADC7_GPIO3_DIV3 (PM8350B_SID << 8 | 0x8c)
-#define PM8350B_ADC7_GPIO4_DIV3 (PM8350B_SID << 8 | 0x8d)
+#define PM8350B_ADC7_GPIO1_DIV3 (PM8350B_SID << 8 | ADC7_GPIO1_DIV3)
+#define PM8350B_ADC7_GPIO2_DIV3 (PM8350B_SID << 8 | ADC7_GPIO2_DIV3)
+#define PM8350B_ADC7_GPIO3_DIV3 (PM8350B_SID << 8 | ADC7_GPIO3_DIV3)
+#define PM8350B_ADC7_GPIO4_DIV3 (PM8350B_SID << 8 | ADC7_GPIO4_DIV3)
-#define PM8350B_ADC7_VPH_PWR (PM8350B_SID << 8 | 0x8e)
-#define PM8350B_ADC7_VBAT_SNS (PM8350B_SID << 8 | 0x8f)
+#define PM8350B_ADC7_VPH_PWR (PM8350B_SID << 8 | ADC7_VPH_PWR)
+#define PM8350B_ADC7_VBAT_SNS (PM8350B_SID << 8 | ADC7_VBAT_SNS)
-#define PM8350B_ADC7_SBUx (PM8350B_SID << 8 | 0x94)
-#define PM8350B_ADC7_VBAT_2S_MID (PM8350B_SID << 8 | 0x96)
+#define PM8350B_ADC7_SBUx (PM8350B_SID << 8 | ADC7_SBU)
+#define PM8350B_ADC7_VBAT_2S_MID (PM8350B_SID << 8 | ADC7_VBAT_2S_MID)
#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PM8350B_H */
diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pmk8350.h b/include/dt-bindings/iio/qcom,spmi-adc7-pmk8350.h
index 6c296870e95b..3d1a41a22cef 100644
--- a/include/dt-bindings/iio/qcom,spmi-adc7-pmk8350.h
+++ b/include/dt-bindings/iio/qcom,spmi-adc7-pmk8350.h
@@ -10,37 +10,39 @@
#define PMK8350_SID 0
#endif
+#include <dt-bindings/iio/qcom,spmi-vadc.h>
+
/* ADC channels for PMK8350_ADC for PMIC7 */
-#define PMK8350_ADC7_REF_GND (PMK8350_SID << 8 | 0x0)
-#define PMK8350_ADC7_1P25VREF (PMK8350_SID << 8 | 0x01)
-#define PMK8350_ADC7_VREF_VADC (PMK8350_SID << 8 | 0x02)
-#define PMK8350_ADC7_DIE_TEMP (PMK8350_SID << 8 | 0x03)
+#define PMK8350_ADC7_REF_GND (PMK8350_SID << 8 | ADC7_REF_GND)
+#define PMK8350_ADC7_1P25VREF (PMK8350_SID << 8 | ADC7_1P25VREF)
+#define PMK8350_ADC7_VREF_VADC (PMK8350_SID << 8 | ADC7_VREF_VADC)
+#define PMK8350_ADC7_DIE_TEMP (PMK8350_SID << 8 | ADC7_DIE_TEMP)
-#define PMK8350_ADC7_AMUX_THM1 (PMK8350_SID << 8 | 0x04)
-#define PMK8350_ADC7_AMUX_THM2 (PMK8350_SID << 8 | 0x05)
-#define PMK8350_ADC7_AMUX_THM3 (PMK8350_SID << 8 | 0x06)
-#define PMK8350_ADC7_AMUX_THM4 (PMK8350_SID << 8 | 0x07)
-#define PMK8350_ADC7_AMUX_THM5 (PMK8350_SID << 8 | 0x08)
+#define PMK8350_ADC7_AMUX_THM1 (PMK8350_SID << 8 | ADC7_AMUX_THM1)
+#define PMK8350_ADC7_AMUX_THM2 (PMK8350_SID << 8 | ADC7_AMUX_THM2)
+#define PMK8350_ADC7_AMUX_THM3 (PMK8350_SID << 8 | ADC7_AMUX_THM3)
+#define PMK8350_ADC7_AMUX_THM4 (PMK8350_SID << 8 | ADC7_AMUX_THM4)
+#define PMK8350_ADC7_AMUX_THM5 (PMK8350_SID << 8 | ADC7_AMUX_THM5)
/* 30k pull-up1 */
-#define PMK8350_ADC7_AMUX_THM1_30K_PU (PMK8350_SID << 8 | 0x24)
-#define PMK8350_ADC7_AMUX_THM2_30K_PU (PMK8350_SID << 8 | 0x25)
-#define PMK8350_ADC7_AMUX_THM3_30K_PU (PMK8350_SID << 8 | 0x26)
-#define PMK8350_ADC7_AMUX_THM4_30K_PU (PMK8350_SID << 8 | 0x27)
-#define PMK8350_ADC7_AMUX_THM5_30K_PU (PMK8350_SID << 8 | 0x28)
+#define PMK8350_ADC7_AMUX_THM1_30K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM1_30K_PU)
+#define PMK8350_ADC7_AMUX_THM2_30K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM2_30K_PU)
+#define PMK8350_ADC7_AMUX_THM3_30K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM3_30K_PU)
+#define PMK8350_ADC7_AMUX_THM4_30K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM4_30K_PU)
+#define PMK8350_ADC7_AMUX_THM5_30K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM5_30K_PU)
/* 100k pull-up2 */
-#define PMK8350_ADC7_AMUX_THM1_100K_PU (PMK8350_SID << 8 | 0x44)
-#define PMK8350_ADC7_AMUX_THM2_100K_PU (PMK8350_SID << 8 | 0x45)
-#define PMK8350_ADC7_AMUX_THM3_100K_PU (PMK8350_SID << 8 | 0x46)
-#define PMK8350_ADC7_AMUX_THM4_100K_PU (PMK8350_SID << 8 | 0x47)
-#define PMK8350_ADC7_AMUX_THM5_100K_PU (PMK8350_SID << 8 | 0x48)
+#define PMK8350_ADC7_AMUX_THM1_100K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM1_100K_PU)
+#define PMK8350_ADC7_AMUX_THM2_100K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM2_100K_PU)
+#define PMK8350_ADC7_AMUX_THM3_100K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM3_100K_PU)
+#define PMK8350_ADC7_AMUX_THM4_100K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM4_100K_PU)
+#define PMK8350_ADC7_AMUX_THM5_100K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM5_100K_PU)
/* 400k pull-up3 */
-#define PMK8350_ADC7_AMUX_THM1_400K_PU (PMK8350_SID << 8 | 0x64)
-#define PMK8350_ADC7_AMUX_THM2_400K_PU (PMK8350_SID << 8 | 0x65)
-#define PMK8350_ADC7_AMUX_THM3_400K_PU (PMK8350_SID << 8 | 0x66)
-#define PMK8350_ADC7_AMUX_THM4_400K_PU (PMK8350_SID << 8 | 0x67)
-#define PMK8350_ADC7_AMUX_THM5_400K_PU (PMK8350_SID << 8 | 0x68)
+#define PMK8350_ADC7_AMUX_THM1_400K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM1_400K_PU)
+#define PMK8350_ADC7_AMUX_THM2_400K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM2_400K_PU)
+#define PMK8350_ADC7_AMUX_THM3_400K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM3_400K_PU)
+#define PMK8350_ADC7_AMUX_THM4_400K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM4_400K_PU)
+#define PMK8350_ADC7_AMUX_THM5_400K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM5_400K_PU)
#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PMK8350_H */
diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pmr735a.h b/include/dt-bindings/iio/qcom,spmi-adc7-pmr735a.h
index d6df1b19e5ff..c5adfa82b20d 100644
--- a/include/dt-bindings/iio/qcom,spmi-adc7-pmr735a.h
+++ b/include/dt-bindings/iio/qcom,spmi-adc7-pmr735a.h
@@ -10,19 +10,21 @@
#define PMR735A_SID 4
#endif
+#include <dt-bindings/iio/qcom,spmi-vadc.h>
+
/* ADC channels for PMR735A_ADC for PMIC7 */
-#define PMR735A_ADC7_REF_GND (PMR735A_SID << 8 | 0x0)
-#define PMR735A_ADC7_1P25VREF (PMR735A_SID << 8 | 0x01)
-#define PMR735A_ADC7_VREF_VADC (PMR735A_SID << 8 | 0x02)
-#define PMR735A_ADC7_DIE_TEMP (PMR735A_SID << 8 | 0x03)
+#define PMR735A_ADC7_REF_GND (PMR735A_SID << 8 | ADC7_REF_GND)
+#define PMR735A_ADC7_1P25VREF (PMR735A_SID << 8 | ADC7_1P25VREF)
+#define PMR735A_ADC7_VREF_VADC (PMR735A_SID << 8 | ADC7_VREF_VADC)
+#define PMR735A_ADC7_DIE_TEMP (PMR735A_SID << 8 | ADC7_DIE_TEMP)
-#define PMR735A_ADC7_GPIO1 (PMR735A_SID << 8 | 0x0a)
-#define PMR735A_ADC7_GPIO2 (PMR735A_SID << 8 | 0x0b)
-#define PMR735A_ADC7_GPIO3 (PMR735A_SID << 8 | 0x0c)
+#define PMR735A_ADC7_GPIO1 (PMR735A_SID << 8 | ADC7_GPIO1)
+#define PMR735A_ADC7_GPIO2 (PMR735A_SID << 8 | ADC7_GPIO2)
+#define PMR735A_ADC7_GPIO3 (PMR735A_SID << 8 | ADC7_GPIO3)
/* 100k pull-up2 */
-#define PMR735A_ADC7_GPIO1_100K_PU (PMR735A_SID << 8 | 0x4a)
-#define PMR735A_ADC7_GPIO2_100K_PU (PMR735A_SID << 8 | 0x4b)
-#define PMR735A_ADC7_GPIO3_100K_PU (PMR735A_SID << 8 | 0x4c)
+#define PMR735A_ADC7_GPIO1_100K_PU (PMR735A_SID << 8 | ADC7_GPIO1_100K_PU)
+#define PMR735A_ADC7_GPIO2_100K_PU (PMR735A_SID << 8 | ADC7_GPIO2_100K_PU)
+#define PMR735A_ADC7_GPIO3_100K_PU (PMR735A_SID << 8 | ADC7_GPIO3_100K_PU)
#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PMR735A_H */
diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pmr735b.h b/include/dt-bindings/iio/qcom,spmi-adc7-pmr735b.h
index 8da0e7dab315..fdb8dd9ae541 100644
--- a/include/dt-bindings/iio/qcom,spmi-adc7-pmr735b.h
+++ b/include/dt-bindings/iio/qcom,spmi-adc7-pmr735b.h
@@ -10,19 +10,21 @@
#define PMR735B_SID 5
#endif
+#include <dt-bindings/iio/qcom,spmi-vadc.h>
+
/* ADC channels for PMR735B_ADC for PMIC7 */
-#define PMR735B_ADC7_REF_GND (PMR735B_SID << 8 | 0x0)
-#define PMR735B_ADC7_1P25VREF (PMR735B_SID << 8 | 0x01)
-#define PMR735B_ADC7_VREF_VADC (PMR735B_SID << 8 | 0x02)
-#define PMR735B_ADC7_DIE_TEMP (PMR735B_SID << 8 | 0x03)
+#define PMR735B_ADC7_REF_GND (PMR735B_SID << 8 | ADC7_REF_GND)
+#define PMR735B_ADC7_1P25VREF (PMR735B_SID << 8 | ADC7_1P25VREF)
+#define PMR735B_ADC7_VREF_VADC (PMR735B_SID << 8 | ADC7_VREF_VADC)
+#define PMR735B_ADC7_DIE_TEMP (PMR735B_SID << 8 | ADC7_DIE_TEMP)
-#define PMR735B_ADC7_GPIO1 (PMR735B_SID << 8 | 0x0a)
-#define PMR735B_ADC7_GPIO2 (PMR735B_SID << 8 | 0x0b)
-#define PMR735B_ADC7_GPIO3 (PMR735B_SID << 8 | 0x0c)
+#define PMR735B_ADC7_GPIO1 (PMR735B_SID << 8 | ADC7_GPIO1)
+#define PMR735B_ADC7_GPIO2 (PMR735B_SID << 8 | ADC7_GPIO2)
+#define PMR735B_ADC7_GPIO3 (PMR735B_SID << 8 | ADC7_GPIO3)
/* 100k pull-up2 */
-#define PMR735B_ADC7_GPIO1_100K_PU (PMR735B_SID << 8 | 0x4a)
-#define PMR735B_ADC7_GPIO2_100K_PU (PMR735B_SID << 8 | 0x4b)
-#define PMR735B_ADC7_GPIO3_100K_PU (PMR735B_SID << 8 | 0x4c)
+#define PMR735B_ADC7_GPIO1_100K_PU (PMR735B_SID << 8 | ADC7_GPIO1_100K_PU)
+#define PMR735B_ADC7_GPIO2_100K_PU (PMR735B_SID << 8 | ADC7_GPIO2_100K_PU)
+#define PMR735B_ADC7_GPIO3_100K_PU (PMR735B_SID << 8 | ADC7_GPIO3_100K_PU)
#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PMR735B_H */
diff --git a/include/dt-bindings/interconnect/qcom,sm8250.h b/include/dt-bindings/interconnect/qcom,sm8250.h
index a4af5cc19271..2a656c02df4b 100644
--- a/include/dt-bindings/interconnect/qcom,sm8250.h
+++ b/include/dt-bindings/interconnect/qcom,sm8250.h
@@ -166,4 +166,11 @@
#define SLAVE_QDSS_STM 17
#define SLAVE_TCU 18
+#define MASTER_QUP_CORE_0 0
+#define MASTER_QUP_CORE_1 1
+#define MASTER_QUP_CORE_2 2
+#define SLAVE_QUP_CORE_0 3
+#define SLAVE_QUP_CORE_1 4
+#define SLAVE_QUP_CORE_2 5
+
#endif
diff --git a/include/dt-bindings/memory/mediatek,mt8188-memory-port.h b/include/dt-bindings/memory/mediatek,mt8188-memory-port.h
new file mode 100644
index 000000000000..337ab11262af
--- /dev/null
+++ b/include/dt-bindings/memory/mediatek,mt8188-memory-port.h
@@ -0,0 +1,489 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Chengci Xu <chengci.xu@mediatek.com>
+ */
+#ifndef _DT_BINDINGS_MEMORY_MEDIATEK_MT8188_LARB_PORT_H_
+#define _DT_BINDINGS_MEMORY_MEDIATEK_MT8188_LARB_PORT_H_
+
+#include <dt-bindings/memory/mtk-memory-port.h>
+
+/*
+ * MM IOMMU larbs:
+ * From below, for example larb11 has larb11a/larb11b/larb11c,
+ * the index of larb is not in order. So we reindexed these larbs from a
+ * software view.
+ */
+#define SMI_L0_ID 0
+#define SMI_L1_ID 1
+#define SMI_L2_ID 2
+#define SMI_L3_ID 3
+#define SMI_L4_ID 4
+#define SMI_L5_ID 5
+#define SMI_L6_ID 6
+#define SMI_L7_ID 7
+#define SMI_L9_ID 8
+#define SMI_L10_ID 9
+#define SMI_L11A_ID 10
+#define SMI_L11B_ID 11
+#define SMI_L11C_ID 12
+#define SMI_L12_ID 13
+#define SMI_L13_ID 14
+#define SMI_L14_ID 15
+#define SMI_L15_ID 16
+#define SMI_L16A_ID 17
+#define SMI_L16B_ID 18
+#define SMI_L17A_ID 19
+#define SMI_L17B_ID 20
+#define SMI_L19_ID 21
+#define SMI_L21_ID 22
+#define SMI_L23_ID 23
+#define SMI_L27_ID 24
+#define SMI_L28_ID 25
+
+/*
+ * MM IOMMU supports 16GB dma address. We separate it to four ranges:
+ * 0 ~ 4G; 4G ~ 8G; 8G ~ 12G; 12G ~ 16G, we could adjust these masters
+ * locate in anyone region. BUT:
+ * a) Make sure all the ports inside a larb are in one range.
+ * b) The iova of any master can NOT cross the 4G/8G/12G boundary.
+ *
+ * This is the suggested mapping in this SoC:
+ *
+ * modules dma-address-region larbs-ports
+ * disp 0 ~ 4G larb0/1/2/3
+ * vcodec 4G ~ 8G larb19(21)[1]/21(22)/23
+ * cam/mdp 8G ~ 12G the other larbs.
+ * N/A 12G ~ 16G
+ * CCU0 0x24000_0000 ~ 0x243ff_ffff larb27(24): port 0/1
+ * CCU1 0x24400_0000 ~ 0x247ff_ffff larb27(24): port 2/3
+ *
+ * This SoC have two MM IOMMU HWs, this is the connected information:
+ * iommu-vdo: larb0/2/5/9/10/11A/11C/13/16B/17B/19/21
+ * iommu-vpp: larb1/3/4/6/7/11B/12/14/15/16A/17A/23/27
+ *
+ * [1]: This is larb19, but the index is 21 from the SW view.
+ */
+
+/* MM IOMMU ports */
+/* LARB 0 -- VDO-0 */
+#define M4U_PORT_L0_DISP_RDMA1 MTK_M4U_ID(SMI_L0_ID, 0)
+#define M4U_PORT_L0_DISP_WDMA0 MTK_M4U_ID(SMI_L0_ID, 1)
+#define M4U_PORT_L0_DISP_OVL0_RDMA0 MTK_M4U_ID(SMI_L0_ID, 2)
+#define M4U_PORT_L0_DISP_OVL0_RDMA1 MTK_M4U_ID(SMI_L0_ID, 3)
+#define M4U_PORT_L0_DISP_OVL0_HDR MTK_M4U_ID(SMI_L0_ID, 4)
+#define M4U_PORT_L0_DISP_POSTMASK0 MTK_M4U_ID(SMI_L0_ID, 5)
+#define M4U_PORT_L0_DISP_FAKE_ENG0 MTK_M4U_ID(SMI_L0_ID, 6)
+
+/* LARB 1 -- VD0-0 */
+#define M4U_PORT_L1_DISP_RDMA0 MTK_M4U_ID(SMI_L1_ID, 0)
+#define M4U_PORT_L1_DISP_WDMA1 MTK_M4U_ID(SMI_L1_ID, 1)
+#define M4U_PORT_L1_DISP_OVL1_RDMA0 MTK_M4U_ID(SMI_L1_ID, 2)
+#define M4U_PORT_L1_DISP_OVL1_RDMA1 MTK_M4U_ID(SMI_L1_ID, 3)
+#define M4U_PORT_L1_DISP_OVL1_HDR MTK_M4U_ID(SMI_L1_ID, 4)
+#define M4U_PORT_L1_DISP_WROT0 MTK_M4U_ID(SMI_L1_ID, 5)
+#define M4U_PORT_L1_DISP_FAKE_ENG1 MTK_M4U_ID(SMI_L1_ID, 6)
+
+/* LARB 2 -- VDO-1 */
+#define M4U_PORT_L2_MDP_RDMA0 MTK_M4U_ID(SMI_L2_ID, 0)
+#define M4U_PORT_L2_MDP_RDMA2 MTK_M4U_ID(SMI_L2_ID, 1)
+#define M4U_PORT_L2_MDP_RDMA4 MTK_M4U_ID(SMI_L2_ID, 2)
+#define M4U_PORT_L2_MDP_RDMA6 MTK_M4U_ID(SMI_L2_ID, 3)
+#define M4U_PORT_L2_DISP_FAKE1 MTK_M4U_ID(SMI_L2_ID, 4)
+
+/* LARB 3 -- VDO-1 */
+#define M4U_PORT_L3_MDP_RDMA1 MTK_M4U_ID(SMI_L3_ID, 0)
+#define M4U_PORT_L3_MDP_RDMA3 MTK_M4U_ID(SMI_L3_ID, 1)
+#define M4U_PORT_L3_MDP_RDMA5 MTK_M4U_ID(SMI_L3_ID, 2)
+#define M4U_PORT_L3_MDP_RDMA7 MTK_M4U_ID(SMI_L3_ID, 3)
+#define M4U_PORT_L3_HDR_DS_SMI MTK_M4U_ID(SMI_L3_ID, 4)
+#define M4U_PORT_L3_HDR_ADL_SMI MTK_M4U_ID(SMI_L3_ID, 5)
+#define M4U_PORT_L3_DISP_FAKE1 MTK_M4U_ID(SMI_L3_ID, 6)
+
+/* LARB 4 -- VPP-0 */
+#define M4U_PORT_L4_MDP_RDMA MTK_M4U_ID(SMI_L4_ID, 0)
+#define M4U_PORT_L4_MDP_FG MTK_M4U_ID(SMI_L4_ID, 1)
+#define M4U_PORT_L4_MDP_OVL MTK_M4U_ID(SMI_L4_ID, 2)
+#define M4U_PORT_L4_MDP_WROT MTK_M4U_ID(SMI_L4_ID, 3)
+#define M4U_PORT_L4_FAKE_ENG MTK_M4U_ID(SMI_L4_ID, 4)
+#define M4U_PORT_L4_DISP_RDMA MTK_M4U_ID(SMI_L4_ID, 5)
+#define M4U_PORT_L4_DISP_WDMA MTK_M4U_ID(SMI_L4_ID, 6)
+
+/* LARB 5 -- VPP-1 */
+#define M4U_PORT_L5_SVPP1_MDP_RDMA MTK_M4U_ID(SMI_L5_ID, 0)
+#define M4U_PORT_L5_SVPP1_MDP_FG MTK_M4U_ID(SMI_L5_ID, 1)
+#define M4U_PORT_L5_SVPP1_MDP_OVL MTK_M4U_ID(SMI_L5_ID, 2)
+#define M4U_PORT_L5_SVPP1_MDP_WROT MTK_M4U_ID(SMI_L5_ID, 3)
+#define M4U_PORT_L5_SVPP2_MDP_RDMA MTK_M4U_ID(SMI_L5_ID, 4)
+#define M4U_PORT_L5_SVPP2_MDP_FG MTK_M4U_ID(SMI_L5_ID, 5)
+#define M4U_PORT_L5_SVPP2_MDP_WROT MTK_M4U_ID(SMI_L5_ID, 6)
+#define M4U_PORT_L5_LARB5_FAKE_ENG MTK_M4U_ID(SMI_L5_ID, 7)
+
+/* LARB 6 -- VPP-1 */
+#define M4U_PORT_L6_SVPP3_MDP_RDMA MTK_M4U_ID(SMI_L6_ID, 0)
+#define M4U_PORT_L6_SVPP3_MDP_FG MTK_M4U_ID(SMI_L6_ID, 1)
+#define M4U_PORT_L6_SVPP3_MDP_WROT MTK_M4U_ID(SMI_L6_ID, 2)
+#define M4U_PORT_L6_LARB6_FAKE_ENG MTK_M4U_ID(SMI_L6_ID, 3)
+
+/* LARB 7 -- WPE */
+#define M4U_PORT_L7_WPE_RDMA_0 MTK_M4U_ID(SMI_L7_ID, 0)
+#define M4U_PORT_L7_WPE_RDMA_1 MTK_M4U_ID(SMI_L7_ID, 1)
+#define M4U_PORT_L7_WPE_WDMA_0 MTK_M4U_ID(SMI_L7_ID, 2)
+
+/* LARB 9 -- IMG-M */
+#define M4U_PORT_L9_IMGI_T1_A MTK_M4U_ID(SMI_L9_ID, 0)
+#define M4U_PORT_L9_UFDI_T1_A MTK_M4U_ID(SMI_L9_ID, 1)
+#define M4U_PORT_L9_IMGBI_T1_A MTK_M4U_ID(SMI_L9_ID, 2)
+#define M4U_PORT_L9_IMGCI_T1_A MTK_M4U_ID(SMI_L9_ID, 3)
+#define M4U_PORT_L9_SMTI_T1_A MTK_M4U_ID(SMI_L9_ID, 4)
+#define M4U_PORT_L9_SMTI_T4_A MTK_M4U_ID(SMI_L9_ID, 5)
+#define M4U_PORT_L9_TNCSTI_T1_A MTK_M4U_ID(SMI_L9_ID, 6)
+#define M4U_PORT_L9_TNCSTI_T4_A MTK_M4U_ID(SMI_L9_ID, 7)
+#define M4U_PORT_L9_YUVO_T1_A MTK_M4U_ID(SMI_L9_ID, 8)
+#define M4U_PORT_L9_YUVBO_T1_A MTK_M4U_ID(SMI_L9_ID, 9)
+#define M4U_PORT_L9_YUVCO_T1_A MTK_M4U_ID(SMI_L9_ID, 10)
+#define M4U_PORT_L9_TIMGO_T1_A MTK_M4U_ID(SMI_L9_ID, 11)
+#define M4U_PORT_L9_YUVO_T2_A MTK_M4U_ID(SMI_L9_ID, 12)
+#define M4U_PORT_L9_YUVO_T5_A MTK_M4U_ID(SMI_L9_ID, 13)
+#define M4U_PORT_L9_IMGI_T1_B MTK_M4U_ID(SMI_L9_ID, 14)
+#define M4U_PORT_L9_IMGBI_T1_B MTK_M4U_ID(SMI_L9_ID, 15)
+#define M4U_PORT_L9_IMGCI_T1_B MTK_M4U_ID(SMI_L9_ID, 16)
+#define M4U_PORT_L9_SMTI_T4_B MTK_M4U_ID(SMI_L9_ID, 17)
+#define M4U_PORT_L9_TNCSO_T1_A MTK_M4U_ID(SMI_L9_ID, 18)
+#define M4U_PORT_L9_SMTO_T1_A MTK_M4U_ID(SMI_L9_ID, 19)
+#define M4U_PORT_L9_SMTO_T4_A MTK_M4U_ID(SMI_L9_ID, 20)
+#define M4U_PORT_L9_TNCSTO_T1_A MTK_M4U_ID(SMI_L9_ID, 21)
+#define M4U_PORT_L9_YUVO_T2_B MTK_M4U_ID(SMI_L9_ID, 22)
+#define M4U_PORT_L9_YUVO_T5_B MTK_M4U_ID(SMI_L9_ID, 23)
+#define M4U_PORT_L9_SMTO_T4_B MTK_M4U_ID(SMI_L9_ID, 24)
+
+/* LARB 10 -- IMG-D */
+#define M4U_PORT_L10_IMGI_D1 MTK_M4U_ID(SMI_L10_ID, 0)
+#define M4U_PORT_L10_IMGBI_D1 MTK_M4U_ID(SMI_L10_ID, 1)
+#define M4U_PORT_L10_IMGCI_D1 MTK_M4U_ID(SMI_L10_ID, 2)
+#define M4U_PORT_L10_IMGDI_D1 MTK_M4U_ID(SMI_L10_ID, 3)
+#define M4U_PORT_L10_DEPI_D1 MTK_M4U_ID(SMI_L10_ID, 4)
+#define M4U_PORT_L10_DMGI_D1 MTK_M4U_ID(SMI_L10_ID, 5)
+#define M4U_PORT_L10_SMTI_D1 MTK_M4U_ID(SMI_L10_ID, 6)
+#define M4U_PORT_L10_RECI_D1 MTK_M4U_ID(SMI_L10_ID, 7)
+#define M4U_PORT_L10_RECI_D1_N MTK_M4U_ID(SMI_L10_ID, 8)
+#define M4U_PORT_L10_TNRWI_D1 MTK_M4U_ID(SMI_L10_ID, 9)
+#define M4U_PORT_L10_TNRCI_D1 MTK_M4U_ID(SMI_L10_ID, 10)
+#define M4U_PORT_L10_TNRCI_D1_N MTK_M4U_ID(SMI_L10_ID, 11)
+#define M4U_PORT_L10_IMG4O_D1 MTK_M4U_ID(SMI_L10_ID, 12)
+#define M4U_PORT_L10_IMG4BO_D1 MTK_M4U_ID(SMI_L10_ID, 13)
+#define M4U_PORT_L10_SMTI_D8 MTK_M4U_ID(SMI_L10_ID, 14)
+#define M4U_PORT_L10_SMTO_D1 MTK_M4U_ID(SMI_L10_ID, 15)
+#define M4U_PORT_L10_TNRMO_D1 MTK_M4U_ID(SMI_L10_ID, 16)
+#define M4U_PORT_L10_TNRMO_D1_N MTK_M4U_ID(SMI_L10_ID, 17)
+#define M4U_PORT_L10_SMTO_D8 MTK_M4U_ID(SMI_L10_ID, 18)
+#define M4U_PORT_L10_DBGO_D1 MTK_M4U_ID(SMI_L10_ID, 19)
+
+/* LARB 11A -- IMG-D */
+#define M4U_PORT_L11A_WPE_RDMA_0 MTK_M4U_ID(SMI_L11A_ID, 0)
+#define M4U_PORT_L11A_WPE_RDMA_1 MTK_M4U_ID(SMI_L11A_ID, 1)
+#define M4U_PORT_L11A_WPE_RDMA_4P_0 MTK_M4U_ID(SMI_L11A_ID, 2)
+#define M4U_PORT_L11A_WPE_RDMA_4P_1 MTK_M4U_ID(SMI_L11A_ID, 3)
+#define M4U_PORT_L11A_WPE_CQ0 MTK_M4U_ID(SMI_L11A_ID, 4)
+#define M4U_PORT_L11A_WPE_CQ1 MTK_M4U_ID(SMI_L11A_ID, 5)
+#define M4U_PORT_L11A_PIMGI_P1 MTK_M4U_ID(SMI_L11A_ID, 6)
+#define M4U_PORT_L11A_PIMGBI_P1 MTK_M4U_ID(SMI_L11A_ID, 7)
+#define M4U_PORT_L11A_PIMGCI_P1 MTK_M4U_ID(SMI_L11A_ID, 8)
+#define M4U_PORT_L11A_IMGI_T1_C MTK_M4U_ID(SMI_L11A_ID, 9)
+#define M4U_PORT_L11A_IMGBI_T1_C MTK_M4U_ID(SMI_L11A_ID, 10)
+#define M4U_PORT_L11A_IMGCI_T1_C MTK_M4U_ID(SMI_L11A_ID, 11)
+#define M4U_PORT_L11A_SMTI_T1_C MTK_M4U_ID(SMI_L11A_ID, 12)
+#define M4U_PORT_L11A_SMTI_T4_C MTK_M4U_ID(SMI_L11A_ID, 13)
+#define M4U_PORT_L11A_SMTI_T6_C MTK_M4U_ID(SMI_L11A_ID, 14)
+#define M4U_PORT_L11A_YUVO_T1_C MTK_M4U_ID(SMI_L11A_ID, 15)
+#define M4U_PORT_L11A_YUVBO_T1_C MTK_M4U_ID(SMI_L11A_ID, 16)
+#define M4U_PORT_L11A_YUVCO_T1_C MTK_M4U_ID(SMI_L11A_ID, 17)
+#define M4U_PORT_L11A_WPE_WDMA_0 MTK_M4U_ID(SMI_L11A_ID, 18)
+#define M4U_PORT_L11A_WPE_WDMA_4P_0 MTK_M4U_ID(SMI_L11A_ID, 19)
+#define M4U_PORT_L11A_WROT_P1 MTK_M4U_ID(SMI_L11A_ID, 20)
+#define M4U_PORT_L11A_TCCSO_P1 MTK_M4U_ID(SMI_L11A_ID, 21)
+#define M4U_PORT_L11A_TCCSI_P1 MTK_M4U_ID(SMI_L11A_ID, 22)
+#define M4U_PORT_L11A_TIMGO_T1_C MTK_M4U_ID(SMI_L11A_ID, 23)
+#define M4U_PORT_L11A_YUVO_T2_C MTK_M4U_ID(SMI_L11A_ID, 24)
+#define M4U_PORT_L11A_YUVO_T5_C MTK_M4U_ID(SMI_L11A_ID, 25)
+#define M4U_PORT_L11A_SMTO_T1_C MTK_M4U_ID(SMI_L11A_ID, 26)
+#define M4U_PORT_L11A_SMTO_T4_C MTK_M4U_ID(SMI_L11A_ID, 27)
+#define M4U_PORT_L11A_SMTO_T6_C MTK_M4U_ID(SMI_L11A_ID, 28)
+#define M4U_PORT_L11A_DBGO_T1_C MTK_M4U_ID(SMI_L11A_ID, 29)
+
+/* LARB 11B -- IMG-D */
+#define M4U_PORT_L11B_WPE_RDMA_0 MTK_M4U_ID(SMI_L11B_ID, 0)
+#define M4U_PORT_L11B_WPE_RDMA_1 MTK_M4U_ID(SMI_L11B_ID, 1)
+#define M4U_PORT_L11B_WPE_RDMA_4P_0 MTK_M4U_ID(SMI_L11B_ID, 2)
+#define M4U_PORT_L11B_WPE_RDMA_4P_1 MTK_M4U_ID(SMI_L11B_ID, 3)
+#define M4U_PORT_L11B_WPE_CQ0 MTK_M4U_ID(SMI_L11B_ID, 4)
+#define M4U_PORT_L11B_WPE_CQ1 MTK_M4U_ID(SMI_L11B_ID, 5)
+#define M4U_PORT_L11B_PIMGI_P1 MTK_M4U_ID(SMI_L11B_ID, 6)
+#define M4U_PORT_L11B_PIMGBI_P1 MTK_M4U_ID(SMI_L11B_ID, 7)
+#define M4U_PORT_L11B_PIMGCI_P1 MTK_M4U_ID(SMI_L11B_ID, 8)
+#define M4U_PORT_L11B_IMGI_T1_C MTK_M4U_ID(SMI_L11B_ID, 9)
+#define M4U_PORT_L11B_IMGBI_T1_C MTK_M4U_ID(SMI_L11B_ID, 10)
+#define M4U_PORT_L11B_IMGCI_T1_C MTK_M4U_ID(SMI_L11B_ID, 11)
+#define M4U_PORT_L11B_SMTI_T1_C MTK_M4U_ID(SMI_L11B_ID, 12)
+#define M4U_PORT_L11B_SMTI_T4_C MTK_M4U_ID(SMI_L11B_ID, 13)
+#define M4U_PORT_L11B_SMTI_T6_C MTK_M4U_ID(SMI_L11B_ID, 14)
+#define M4U_PORT_L11B_YUVO_T1_C MTK_M4U_ID(SMI_L11B_ID, 15)
+#define M4U_PORT_L11B_YUVBO_T1_C MTK_M4U_ID(SMI_L11B_ID, 16)
+#define M4U_PORT_L11B_YUVCO_T1_C MTK_M4U_ID(SMI_L11B_ID, 17)
+#define M4U_PORT_L11B_WPE_WDMA_0 MTK_M4U_ID(SMI_L11B_ID, 18)
+#define M4U_PORT_L11B_WPE_WDMA_4P_0 MTK_M4U_ID(SMI_L11B_ID, 19)
+#define M4U_PORT_L11B_WROT_P1 MTK_M4U_ID(SMI_L11B_ID, 20)
+#define M4U_PORT_L11B_TCCSO_P1 MTK_M4U_ID(SMI_L11B_ID, 21)
+#define M4U_PORT_L11B_TCCSI_P1 MTK_M4U_ID(SMI_L11B_ID, 22)
+#define M4U_PORT_L11B_TIMGO_T1_C MTK_M4U_ID(SMI_L11B_ID, 23)
+#define M4U_PORT_L11B_YUVO_T2_C MTK_M4U_ID(SMI_L11B_ID, 24)
+#define M4U_PORT_L11B_YUVO_T5_C MTK_M4U_ID(SMI_L11B_ID, 25)
+#define M4U_PORT_L11B_SMTO_T1_C MTK_M4U_ID(SMI_L11B_ID, 26)
+#define M4U_PORT_L11B_SMTO_T4_C MTK_M4U_ID(SMI_L11B_ID, 27)
+#define M4U_PORT_L11B_SMTO_T6_C MTK_M4U_ID(SMI_L11B_ID, 28)
+#define M4U_PORT_L11B_DBGO_T1_C MTK_M4U_ID(SMI_L11B_ID, 29)
+
+/* LARB 11C -- IMG-D */
+#define M4U_PORT_L11C_WPE_RDMA_0 MTK_M4U_ID(SMI_L11C_ID, 0)
+#define M4U_PORT_L11C_WPE_RDMA_1 MTK_M4U_ID(SMI_L11C_ID, 1)
+#define M4U_PORT_L11C_WPE_RDMA_4P_0 MTK_M4U_ID(SMI_L11C_ID, 2)
+#define M4U_PORT_L11C_WPE_RDMA_4P_1 MTK_M4U_ID(SMI_L11C_ID, 3)
+#define M4U_PORT_L11C_WPE_CQ0 MTK_M4U_ID(SMI_L11C_ID, 4)
+#define M4U_PORT_L11C_WPE_CQ1 MTK_M4U_ID(SMI_L11C_ID, 5)
+#define M4U_PORT_L11C_PIMGI_P1 MTK_M4U_ID(SMI_L11C_ID, 6)
+#define M4U_PORT_L11C_PIMGBI_P1 MTK_M4U_ID(SMI_L11C_ID, 7)
+#define M4U_PORT_L11C_PIMGCI_P1 MTK_M4U_ID(SMI_L11C_ID, 8)
+#define M4U_PORT_L11C_IMGI_T1_C MTK_M4U_ID(SMI_L11C_ID, 9)
+#define M4U_PORT_L11C_IMGBI_T1_C MTK_M4U_ID(SMI_L11C_ID, 10)
+#define M4U_PORT_L11C_IMGCI_T1_C MTK_M4U_ID(SMI_L11C_ID, 11)
+#define M4U_PORT_L11C_SMTI_T1_C MTK_M4U_ID(SMI_L11C_ID, 12)
+#define M4U_PORT_L11C_SMTI_T4_C MTK_M4U_ID(SMI_L11C_ID, 13)
+#define M4U_PORT_L11C_SMTI_T6_C MTK_M4U_ID(SMI_L11C_ID, 14)
+#define M4U_PORT_L11C_YUVO_T1_C MTK_M4U_ID(SMI_L11C_ID, 15)
+#define M4U_PORT_L11C_YUVBO_T1_C MTK_M4U_ID(SMI_L11C_ID, 16)
+#define M4U_PORT_L11C_YUVCO_T1_C MTK_M4U_ID(SMI_L11C_ID, 17)
+#define M4U_PORT_L11C_WPE_WDMA_0 MTK_M4U_ID(SMI_L11C_ID, 18)
+#define M4U_PORT_L11C_WPE_WDMA_4P_0 MTK_M4U_ID(SMI_L11C_ID, 19)
+#define M4U_PORT_L11C_WROT_P1 MTK_M4U_ID(SMI_L11C_ID, 20)
+#define M4U_PORT_L11C_TCCSO_P1 MTK_M4U_ID(SMI_L11C_ID, 21)
+#define M4U_PORT_L11C_TCCSI_P1 MTK_M4U_ID(SMI_L11C_ID, 22)
+#define M4U_PORT_L11C_TIMGO_T1_C MTK_M4U_ID(SMI_L11C_ID, 23)
+#define M4U_PORT_L11C_YUVO_T2_C MTK_M4U_ID(SMI_L11C_ID, 24)
+#define M4U_PORT_L11C_YUVO_T5_C MTK_M4U_ID(SMI_L11C_ID, 25)
+#define M4U_PORT_L11C_SMTO_T1_C MTK_M4U_ID(SMI_L11C_ID, 26)
+#define M4U_PORT_L11C_SMTO_T4_C MTK_M4U_ID(SMI_L11C_ID, 27)
+#define M4U_PORT_L11C_SMTO_T6_C MTK_M4U_ID(SMI_L11C_ID, 28)
+#define M4U_PORT_L11C_DBGO_T1_C MTK_M4U_ID(SMI_L11C_ID, 29)
+
+/* LARB 12 -- IPE */
+#define M4U_PORT_L12_FDVT_RDA_0 MTK_M4U_ID(SMI_L12_ID, 0)
+#define M4U_PORT_L12_FDVT_RDB_0 MTK_M4U_ID(SMI_L12_ID, 1)
+#define M4U_PORT_L12_FDVT_WRA_0 MTK_M4U_ID(SMI_L12_ID, 2)
+#define M4U_PORT_L12_FDVT_WRB_0 MTK_M4U_ID(SMI_L12_ID, 3)
+#define M4U_PORT_L12_ME_RDMA MTK_M4U_ID(SMI_L12_ID, 4)
+#define M4U_PORT_L12_ME_WDMA MTK_M4U_ID(SMI_L12_ID, 5)
+#define M4U_PORT_L12_DVS_RDMA MTK_M4U_ID(SMI_L12_ID, 6)
+#define M4U_PORT_L12_DVS_WDMA MTK_M4U_ID(SMI_L12_ID, 7)
+#define M4U_PORT_L12_DVP_RDMA MTK_M4U_ID(SMI_L12_ID, 8)
+#define M4U_PORT_L12_DVP_WDMA MTK_M4U_ID(SMI_L12_ID, 9)
+#define M4U_PORT_L12_FDVT_2ND_RDA_0 MTK_M4U_ID(SMI_L12_ID, 10)
+#define M4U_PORT_L12_FDVT_2ND_RDB_0 MTK_M4U_ID(SMI_L12_ID, 11)
+#define M4U_PORT_L12_FDVT_2ND_WRA_0 MTK_M4U_ID(SMI_L12_ID, 12)
+#define M4U_PORT_L12_FDVT_2ND_WRB_0 MTK_M4U_ID(SMI_L12_ID, 13)
+#define M4U_PORT_L12_DHZEI_E1 MTK_M4U_ID(SMI_L12_ID, 14)
+#define M4U_PORT_L12_DHZEO_E1 MTK_M4U_ID(SMI_L12_ID, 15)
+
+/* LARB 13 -- CAM-1 */
+#define M4U_PORT_L13_CAMSV_CQI_E1 MTK_M4U_ID(SMI_L13_ID, 0)
+#define M4U_PORT_L13_CAMSV_CQI_E2 MTK_M4U_ID(SMI_L13_ID, 1)
+#define M4U_PORT_L13_GCAMSV_A_IMGO_1 MTK_M4U_ID(SMI_L13_ID, 2)
+#define M4U_PORT_L13_GCAMSV_C_IMGO_1 MTK_M4U_ID(SMI_L13_ID, 3)
+#define M4U_PORT_L13_GCAMSV_A_IMGO_2 MTK_M4U_ID(SMI_L13_ID, 4)
+#define M4U_PORT_L13_GCAMSV_C_IMGO_2 MTK_M4U_ID(SMI_L13_ID, 5)
+#define M4U_PORT_L13_PDAI_A_0 MTK_M4U_ID(SMI_L13_ID, 6)
+#define M4U_PORT_L13_PDAI_A_1 MTK_M4U_ID(SMI_L13_ID, 7)
+#define M4U_PORT_L13_CAMSV_CQI_B_E1 MTK_M4U_ID(SMI_L13_ID, 8)
+#define M4U_PORT_L13_CAMSV_CQI_B_E2 MTK_M4U_ID(SMI_L13_ID, 9)
+#define M4U_PORT_L13_CAMSV_CQI_C_E1 MTK_M4U_ID(SMI_L13_ID, 10)
+#define M4U_PORT_L13_CAMSV_CQI_C_E2 MTK_M4U_ID(SMI_L13_ID, 11)
+#define M4U_PORT_L13_GCAMSV_E_IMGO_1 MTK_M4U_ID(SMI_L13_ID, 12)
+#define M4U_PORT_L13_GCAMSV_E_IMGO_2 MTK_M4U_ID(SMI_L13_ID, 13)
+#define M4U_PORT_L13_GCAMSV_A_UFEO_1 MTK_M4U_ID(SMI_L13_ID, 14)
+#define M4U_PORT_L13_GCAMSV_C_UFEO_1 MTK_M4U_ID(SMI_L13_ID, 15)
+#define M4U_PORT_L13_GCAMSV_A_UFEO_2 MTK_M4U_ID(SMI_L13_ID, 16)
+#define M4U_PORT_L13_GCAMSV_C_UFEO_2 MTK_M4U_ID(SMI_L13_ID, 17)
+#define M4U_PORT_L13_GCAMSV_E_UFEO_1 MTK_M4U_ID(SMI_L13_ID, 18)
+#define M4U_PORT_L13_GCAMSV_E_UFEO_2 MTK_M4U_ID(SMI_L13_ID, 19)
+#define M4U_PORT_L13_GCAMSV_G_IMGO_1 MTK_M4U_ID(SMI_L13_ID, 20)
+#define M4U_PORT_L13_GCAMSV_G_IMGO_2 MTK_M4U_ID(SMI_L13_ID, 21)
+#define M4U_PORT_L13_PDAO_A MTK_M4U_ID(SMI_L13_ID, 22)
+#define M4U_PORT_L13_PDAO_C MTK_M4U_ID(SMI_L13_ID, 23)
+
+/* LARB 14 -- CAM-1 */
+#define M4U_PORT_L14_GCAMSV_B_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 0)
+#define M4U_PORT_L14_GCAMSV_B_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 1)
+#define M4U_PORT_L14_SCAMSV_A_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 2)
+#define M4U_PORT_L14_SCAMSV_A_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 3)
+#define M4U_PORT_L14_SCAMSV_B_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 4)
+#define M4U_PORT_L14_SCAMSV_B_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 5)
+#define M4U_PORT_L14_PDAI_B_0 MTK_M4U_ID(SMI_L14_ID, 6)
+#define M4U_PORT_L14_PDAI_B_1 MTK_M4U_ID(SMI_L14_ID, 7)
+#define M4U_PORT_L14_GCAMSV_D_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 8)
+#define M4U_PORT_L14_GCAMSV_D_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 9)
+#define M4U_PORT_L14_GCAMSV_F_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 10)
+#define M4U_PORT_L14_GCAMSV_F_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 11)
+#define M4U_PORT_L14_GCAMSV_H_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 12)
+#define M4U_PORT_L14_GCAMSV_H_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 13)
+#define M4U_PORT_L14_GCAMSV_B_UFEO_1 MTK_M4U_ID(SMI_L14_ID, 14)
+#define M4U_PORT_L14_GCAMSV_B_UFEO_2 MTK_M4U_ID(SMI_L14_ID, 15)
+#define M4U_PORT_L14_GCAMSV_D_UFEO_1 MTK_M4U_ID(SMI_L14_ID, 16)
+#define M4U_PORT_L14_GCAMSV_D_UFEO_2 MTK_M4U_ID(SMI_L14_ID, 17)
+#define M4U_PORT_L14_PDAO_B MTK_M4U_ID(SMI_L14_ID, 18)
+#define M4U_PORT_L14_IPUI MTK_M4U_ID(SMI_L14_ID, 19)
+#define M4U_PORT_L14_IPUO MTK_M4U_ID(SMI_L14_ID, 20)
+#define M4U_PORT_L14_IPU3O MTK_M4U_ID(SMI_L14_ID, 21)
+#define M4U_PORT_L14_FAKE MTK_M4U_ID(SMI_L14_ID, 22)
+
+/* LARB 15 -- IMG-D */
+#define M4U_PORT_L15_VIPI_D1 MTK_M4U_ID(SMI_L15_ID, 0)
+#define M4U_PORT_L15_VIPBI_D1 MTK_M4U_ID(SMI_L15_ID, 1)
+#define M4U_PORT_L15_SMTI_D6 MTK_M4U_ID(SMI_L15_ID, 2)
+#define M4U_PORT_L15_TNCSTI_D1 MTK_M4U_ID(SMI_L15_ID, 3)
+#define M4U_PORT_L15_TNCSTI_D4 MTK_M4U_ID(SMI_L15_ID, 4)
+#define M4U_PORT_L15_SMTI_D4 MTK_M4U_ID(SMI_L15_ID, 5)
+#define M4U_PORT_L15_IMG3O_D1 MTK_M4U_ID(SMI_L15_ID, 6)
+#define M4U_PORT_L15_IMG3BO_D1 MTK_M4U_ID(SMI_L15_ID, 7)
+#define M4U_PORT_L15_IMG3CO_D1 MTK_M4U_ID(SMI_L15_ID, 8)
+#define M4U_PORT_L15_IMG2O_D1 MTK_M4U_ID(SMI_L15_ID, 9)
+#define M4U_PORT_L15_SMTI_D9 MTK_M4U_ID(SMI_L15_ID, 10)
+#define M4U_PORT_L15_SMTO_D4 MTK_M4U_ID(SMI_L15_ID, 11)
+#define M4U_PORT_L15_FEO_D1 MTK_M4U_ID(SMI_L15_ID, 12)
+#define M4U_PORT_L15_TNCSO_D1 MTK_M4U_ID(SMI_L15_ID, 13)
+#define M4U_PORT_L15_TNCSTO_D1 MTK_M4U_ID(SMI_L15_ID, 14)
+#define M4U_PORT_L15_SMTO_D6 MTK_M4U_ID(SMI_L15_ID, 15)
+#define M4U_PORT_L15_SMTO_D9 MTK_M4U_ID(SMI_L15_ID, 16)
+#define M4U_PORT_L15_TNCO_D1 MTK_M4U_ID(SMI_L15_ID, 17)
+#define M4U_PORT_L15_TNCO_D1_N MTK_M4U_ID(SMI_L15_ID, 18)
+
+/* LARB 16A -- CAM */
+#define M4U_PORT_L16A_IMGO_R1 MTK_M4U_ID(SMI_L16A_ID, 0)
+#define M4U_PORT_L16A_CQI_R1 MTK_M4U_ID(SMI_L16A_ID, 1)
+#define M4U_PORT_L16A_CQI_R2 MTK_M4U_ID(SMI_L16A_ID, 2)
+#define M4U_PORT_L16A_BPCI_R1 MTK_M4U_ID(SMI_L16A_ID, 3)
+#define M4U_PORT_L16A_LSCI_R1 MTK_M4U_ID(SMI_L16A_ID, 4)
+#define M4U_PORT_L16A_RAWI_R2 MTK_M4U_ID(SMI_L16A_ID, 5)
+#define M4U_PORT_L16A_RAWI_R3 MTK_M4U_ID(SMI_L16A_ID, 6)
+#define M4U_PORT_L16A_UFDI_R2 MTK_M4U_ID(SMI_L16A_ID, 7)
+#define M4U_PORT_L16A_UFDI_R3 MTK_M4U_ID(SMI_L16A_ID, 8)
+#define M4U_PORT_L16A_RAWI_R4 MTK_M4U_ID(SMI_L16A_ID, 9)
+#define M4U_PORT_L16A_RAWI_R5 MTK_M4U_ID(SMI_L16A_ID, 10)
+#define M4U_PORT_L16A_AAI_R1 MTK_M4U_ID(SMI_L16A_ID, 11)
+#define M4U_PORT_L16A_UFDI_R5 MTK_M4U_ID(SMI_L16A_ID, 12)
+#define M4U_PORT_L16A_FHO_R1 MTK_M4U_ID(SMI_L16A_ID, 13)
+#define M4U_PORT_L16A_AAO_R1 MTK_M4U_ID(SMI_L16A_ID, 14)
+#define M4U_PORT_L16A_TSFSO_R1 MTK_M4U_ID(SMI_L16A_ID, 15)
+#define M4U_PORT_L16A_FLKO_R1 MTK_M4U_ID(SMI_L16A_ID, 16)
+
+/* LARB 16B -- CAM */
+#define M4U_PORT_L16B_IMGO_R1 MTK_M4U_ID(SMI_L16B_ID, 0)
+#define M4U_PORT_L16B_CQI_R1 MTK_M4U_ID(SMI_L16B_ID, 1)
+#define M4U_PORT_L16B_CQI_R2 MTK_M4U_ID(SMI_L16B_ID, 2)
+#define M4U_PORT_L16B_BPCI_R1 MTK_M4U_ID(SMI_L16B_ID, 3)
+#define M4U_PORT_L16B_LSCI_R1 MTK_M4U_ID(SMI_L16B_ID, 4)
+#define M4U_PORT_L16B_RAWI_R2 MTK_M4U_ID(SMI_L16B_ID, 5)
+#define M4U_PORT_L16B_RAWI_R3 MTK_M4U_ID(SMI_L16B_ID, 6)
+#define M4U_PORT_L16B_UFDI_R2 MTK_M4U_ID(SMI_L16B_ID, 7)
+#define M4U_PORT_L16B_UFDI_R3 MTK_M4U_ID(SMI_L16B_ID, 8)
+#define M4U_PORT_L16B_RAWI_R4 MTK_M4U_ID(SMI_L16B_ID, 9)
+#define M4U_PORT_L16B_RAWI_R5 MTK_M4U_ID(SMI_L16B_ID, 10)
+#define M4U_PORT_L16B_AAI_R1 MTK_M4U_ID(SMI_L16B_ID, 11)
+#define M4U_PORT_L16B_UFDI_R5 MTK_M4U_ID(SMI_L16B_ID, 12)
+#define M4U_PORT_L16B_FHO_R1 MTK_M4U_ID(SMI_L16B_ID, 13)
+#define M4U_PORT_L16B_AAO_R1 MTK_M4U_ID(SMI_L16B_ID, 14)
+#define M4U_PORT_L16B_TSFSO_R1 MTK_M4U_ID(SMI_L16B_ID, 15)
+#define M4U_PORT_L16B_FLKO_R1 MTK_M4U_ID(SMI_L16B_ID, 16)
+
+/* LARB 17A -- CAM */
+#define M4U_PORT_L17A_YUVO_R1 MTK_M4U_ID(SMI_L17A_ID, 0)
+#define M4U_PORT_L17A_YUVO_R3 MTK_M4U_ID(SMI_L17A_ID, 1)
+#define M4U_PORT_L17A_YUVCO_R1 MTK_M4U_ID(SMI_L17A_ID, 2)
+#define M4U_PORT_L17A_YUVO_R2 MTK_M4U_ID(SMI_L17A_ID, 3)
+#define M4U_PORT_L17A_RZH1N2TO_R1 MTK_M4U_ID(SMI_L17A_ID, 4)
+#define M4U_PORT_L17A_DRZS4NO_R1 MTK_M4U_ID(SMI_L17A_ID, 5)
+#define M4U_PORT_L17A_TNCSO_R1 MTK_M4U_ID(SMI_L17A_ID, 6)
+
+/* LARB 17B -- CAM */
+#define M4U_PORT_L17B_YUVO_R1 MTK_M4U_ID(SMI_L17B_ID, 0)
+#define M4U_PORT_L17B_YUVO_R3 MTK_M4U_ID(SMI_L17B_ID, 1)
+#define M4U_PORT_L17B_YUVCO_R1 MTK_M4U_ID(SMI_L17B_ID, 2)
+#define M4U_PORT_L17B_YUVO_R2 MTK_M4U_ID(SMI_L17B_ID, 3)
+#define M4U_PORT_L17B_RZH1N2TO_R1 MTK_M4U_ID(SMI_L17B_ID, 4)
+#define M4U_PORT_L17B_DRZS4NO_R1 MTK_M4U_ID(SMI_L17B_ID, 5)
+#define M4U_PORT_L17B_TNCSO_R1 MTK_M4U_ID(SMI_L17B_ID, 6)
+
+/* LARB 19 -- VENC */
+#define M4U_PORT_L19_VENC_RCPU MTK_M4U_ID(SMI_L19_ID, 0)
+#define M4U_PORT_L19_VENC_REC MTK_M4U_ID(SMI_L19_ID, 1)
+#define M4U_PORT_L19_VENC_BSDMA MTK_M4U_ID(SMI_L19_ID, 2)
+#define M4U_PORT_L19_VENC_SV_COMV MTK_M4U_ID(SMI_L19_ID, 3)
+#define M4U_PORT_L19_VENC_RD_COMV MTK_M4U_ID(SMI_L19_ID, 4)
+#define M4U_PORT_L19_VENC_NBM_RDMA MTK_M4U_ID(SMI_L19_ID, 5)
+#define M4U_PORT_L19_VENC_NBM_RDMA_LITE MTK_M4U_ID(SMI_L19_ID, 6)
+#define M4U_PORT_L19_JPGENC_Y_RDMA MTK_M4U_ID(SMI_L19_ID, 7)
+#define M4U_PORT_L19_JPGENC_C_RDMA MTK_M4U_ID(SMI_L19_ID, 8)
+#define M4U_PORT_L19_JPGENC_Q_TABLE MTK_M4U_ID(SMI_L19_ID, 9)
+#define M4U_PORT_L19_VENC_SUB_W_LUMA MTK_M4U_ID(SMI_L19_ID, 10)
+#define M4U_PORT_L19_VENC_FCS_NBM_RDMA MTK_M4U_ID(SMI_L19_ID, 11)
+#define M4U_PORT_L19_JPGENC_BSDMA MTK_M4U_ID(SMI_L19_ID, 12)
+#define M4U_PORT_L19_JPGDEC_WDMA_0 MTK_M4U_ID(SMI_L19_ID, 13)
+#define M4U_PORT_L19_JPGDEC_BSDMA_0 MTK_M4U_ID(SMI_L19_ID, 14)
+#define M4U_PORT_L19_VENC_NBM_WDMA MTK_M4U_ID(SMI_L19_ID, 15)
+#define M4U_PORT_L19_VENC_NBM_WDMA_LITE MTK_M4U_ID(SMI_L19_ID, 16)
+#define M4U_PORT_L19_VENC_FCS_NBM_WDMA MTK_M4U_ID(SMI_L19_ID, 17)
+#define M4U_PORT_L19_JPGDEC_WDMA_1 MTK_M4U_ID(SMI_L19_ID, 18)
+#define M4U_PORT_L19_JPGDEC_BSDMA_1 MTK_M4U_ID(SMI_L19_ID, 19)
+#define M4U_PORT_L19_JPGDEC_HUFF_OFFSET_1 MTK_M4U_ID(SMI_L19_ID, 20)
+#define M4U_PORT_L19_JPGDEC_HUFF_OFFSET_0 MTK_M4U_ID(SMI_L19_ID, 21)
+#define M4U_PORT_L19_VENC_CUR_LUMA MTK_M4U_ID(SMI_L19_ID, 22)
+#define M4U_PORT_L19_VENC_CUR_CHROMA MTK_M4U_ID(SMI_L19_ID, 23)
+#define M4U_PORT_L19_VENC_REF_LUMA MTK_M4U_ID(SMI_L19_ID, 24)
+#define M4U_PORT_L19_VENC_REF_CHROMA MTK_M4U_ID(SMI_L19_ID, 25)
+#define M4U_PORT_L19_VENC_SUB_R_LUMA MTK_M4U_ID(SMI_L19_ID, 26)
+
+/* LARB 21 -- VDEC-CORE0 */
+#define M4U_PORT_L21_HW_VDEC_MC_EXT MTK_M4U_ID(SMI_L21_ID, 0)
+#define M4U_PORT_L21_HW_VDEC_UFO_EXT MTK_M4U_ID(SMI_L21_ID, 1)
+#define M4U_PORT_L21_HW_VDEC_PP_EXT MTK_M4U_ID(SMI_L21_ID, 2)
+#define M4U_PORT_L21_HW_VDEC_PRED_RD_EXT MTK_M4U_ID(SMI_L21_ID, 3)
+#define M4U_PORT_L21_HW_VDEC_PRED_WR_EXT MTK_M4U_ID(SMI_L21_ID, 4)
+#define M4U_PORT_L21_HW_VDEC_PPWRAP_EXT MTK_M4U_ID(SMI_L21_ID, 5)
+#define M4U_PORT_L21_HW_VDEC_TILE_EXT MTK_M4U_ID(SMI_L21_ID, 6)
+#define M4U_PORT_L21_HW_VDEC_VLD_EXT MTK_M4U_ID(SMI_L21_ID, 7)
+#define M4U_PORT_L21_HW_VDEC_VLD2_EXT MTK_M4U_ID(SMI_L21_ID, 8)
+#define M4U_PORT_L21_HW_VDEC_AVC_MV_EXT MTK_M4U_ID(SMI_L21_ID, 9)
+#define M4U_PORT_L21_HW_VDEC_UFO_EXT_C MTK_M4U_ID(SMI_L21_ID, 10)
+
+/* LARB 23 -- VDEC-SOC */
+#define M4U_PORT_L23_HW_VDEC_LAT0_VLD_EXT MTK_M4U_ID(SMI_L23_ID, 0)
+#define M4U_PORT_L23_HW_VDEC_LAT0_VLD2_EXT MTK_M4U_ID(SMI_L23_ID, 1)
+#define M4U_PORT_L23_HW_VDEC_LAT0_AVC_MV_EXT MTK_M4U_ID(SMI_L23_ID, 2)
+#define M4U_PORT_L23_HW_VDEC_LAT0_PRED_RD_EXT MTK_M4U_ID(SMI_L23_ID, 3)
+#define M4U_PORT_L23_HW_VDEC_LAT0_TILE_EXT MTK_M4U_ID(SMI_L23_ID, 4)
+#define M4U_PORT_L23_HW_VDEC_LAT0_WDMA_EXT MTK_M4U_ID(SMI_L23_ID, 5)
+#define M4U_PORT_L23_HW_VDEC_UFO_ENC_EXT MTK_M4U_ID(SMI_L23_ID, 6)
+#define M4U_PORT_L23_HW_VDEC_UFO_ENC_EXT_C MTK_M4U_ID(SMI_L23_ID, 7)
+#define M4U_PORT_L23_HW_VDEC_MC_EXT_C MTK_M4U_ID(SMI_L23_ID, 8)
+
+/* LARB 27 -- CCU */
+#define M4U_PORT_L27_CCUI MTK_M4U_ID(SMI_L27_ID, 0)
+#define M4U_PORT_L27_CCUO MTK_M4U_ID(SMI_L27_ID, 1)
+#define M4U_PORT_L27_CCUI2 MTK_M4U_ID(SMI_L27_ID, 2)
+#define M4U_PORT_L27_CCUO2 MTK_M4U_ID(SMI_L27_ID, 3)
+
+/* LARB 28 -- AXI-CCU */
+#define M4U_PORT_L28_CCU_AXI_0 MTK_M4U_ID(SMI_L28_ID, 0)
+
+/* infra/peri */
+#define IFR_IOMMU_PORT_PCIE_0 MTK_IFAIOMMU_PERI_ID(0)
+
+#endif
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index 847da6fc2713..31029f4f7be8 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -12,7 +12,7 @@
#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1)
-#ifdef CONFIG_HW_PERF_EVENTS
+#if IS_ENABLED(CONFIG_HW_PERF_EVENTS) && IS_ENABLED(CONFIG_KVM)
struct kvm_pmc {
u8 idx; /* index into the pmu->pmc array */
@@ -74,6 +74,7 @@ int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
struct kvm_pmu_events *kvm_get_pmu_events(void);
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
+void kvm_vcpu_pmu_resync_el0(void);
#define kvm_vcpu_has_pmu(vcpu) \
(test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
@@ -171,6 +172,7 @@ static inline u8 kvm_arm_pmu_get_pmuver_limit(void)
{
return 0;
}
+static inline void kvm_vcpu_pmu_resync_el0(void) {}
#endif
diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h
index 953e6f12fa1c..99a5201d9e62 100644
--- a/include/linux/amd-iommu.h
+++ b/include/linux/amd-iommu.h
@@ -32,7 +32,6 @@ struct task_struct;
struct pci_dev;
extern int amd_iommu_detect(void);
-extern int amd_iommu_init_hardware(void);
/**
* amd_iommu_init_device() - Init device for use with IOMMUv2 driver
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 6a3a9e122bb5..51b1b7054a23 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -117,6 +117,8 @@ enum audit_nfcfgop {
AUDIT_NFT_OP_OBJ_RESET,
AUDIT_NFT_OP_FLOWTABLE_REGISTER,
AUDIT_NFT_OP_FLOWTABLE_UNREGISTER,
+ AUDIT_NFT_OP_SETELEM_RESET,
+ AUDIT_NFT_OP_RULE_RESET,
AUDIT_NFT_OP_INVALID,
};
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 12596af59c00..024e8b28c34b 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -438,7 +438,7 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
size /= sizeof(long);
while (size--)
- *ldst++ = *lsrc++;
+ data_race(*ldst++ = *lsrc++);
}
/* copy everything but bpf_spin_lock, bpf_timer, and kptrs. There could be one of each. */
diff --git a/include/linux/btf.h b/include/linux/btf.h
index df64cc642074..928113a80a95 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -209,6 +209,7 @@ struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type
int btf_check_and_fixup_fields(const struct btf *btf, struct btf_record *rec);
bool btf_type_is_void(const struct btf_type *t);
s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind);
+s32 bpf_find_btf_id(const char *name, u32 kind, struct btf **btf_p);
const struct btf_type *btf_type_skip_modifiers(const struct btf *btf,
u32 id, u32 *res_id);
const struct btf_type *btf_type_resolve_ptr(const struct btf *btf,
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index 49586ff26152..5f2301ee88bc 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -359,14 +359,19 @@ enum {
extern const char *ceph_mds_op_name(int op);
-
-#define CEPH_SETATTR_MODE 1
-#define CEPH_SETATTR_UID 2
-#define CEPH_SETATTR_GID 4
-#define CEPH_SETATTR_MTIME 8
-#define CEPH_SETATTR_ATIME 16
-#define CEPH_SETATTR_SIZE 32
-#define CEPH_SETATTR_CTIME 64
+#define CEPH_SETATTR_MODE (1 << 0)
+#define CEPH_SETATTR_UID (1 << 1)
+#define CEPH_SETATTR_GID (1 << 2)
+#define CEPH_SETATTR_MTIME (1 << 3)
+#define CEPH_SETATTR_ATIME (1 << 4)
+#define CEPH_SETATTR_SIZE (1 << 5)
+#define CEPH_SETATTR_CTIME (1 << 6)
+#define CEPH_SETATTR_MTIME_NOW (1 << 7)
+#define CEPH_SETATTR_ATIME_NOW (1 << 8)
+#define CEPH_SETATTR_BTIME (1 << 9)
+#define CEPH_SETATTR_KILL_SGUID (1 << 10)
+#define CEPH_SETATTR_FSCRYPT_AUTH (1 << 11)
+#define CEPH_SETATTR_FSCRYPT_FILE (1 << 12)
/*
* Ceph setxattr request flags.
@@ -462,24 +467,26 @@ union ceph_mds_request_args {
} __attribute__ ((packed));
union ceph_mds_request_args_ext {
- union ceph_mds_request_args old;
- struct {
- __le32 mode;
- __le32 uid;
- __le32 gid;
- struct ceph_timespec mtime;
- struct ceph_timespec atime;
- __le64 size, old_size; /* old_size needed by truncate */
- __le32 mask; /* CEPH_SETATTR_* */
- struct ceph_timespec btime;
- } __attribute__ ((packed)) setattr_ext;
+ union {
+ union ceph_mds_request_args old;
+ struct {
+ __le32 mode;
+ __le32 uid;
+ __le32 gid;
+ struct ceph_timespec mtime;
+ struct ceph_timespec atime;
+ __le64 size, old_size; /* old_size needed by truncate */
+ __le32 mask; /* CEPH_SETATTR_* */
+ struct ceph_timespec btime;
+ } __attribute__ ((packed)) setattr_ext;
+ };
};
#define CEPH_MDS_FLAG_REPLAY 1 /* this is a replayed op */
#define CEPH_MDS_FLAG_WANT_DENTRY 2 /* want dentry in reply */
#define CEPH_MDS_FLAG_ASYNC 4 /* request is asynchronous */
-struct ceph_mds_request_head_old {
+struct ceph_mds_request_head_legacy {
__le64 oldest_client_tid;
__le32 mdsmap_epoch; /* on client */
__le32 flags; /* CEPH_MDS_FLAG_* */
@@ -492,9 +499,9 @@ struct ceph_mds_request_head_old {
union ceph_mds_request_args args;
} __attribute__ ((packed));
-#define CEPH_MDS_REQUEST_HEAD_VERSION 1
+#define CEPH_MDS_REQUEST_HEAD_VERSION 2
-struct ceph_mds_request_head {
+struct ceph_mds_request_head_old {
__le16 version; /* struct version */
__le64 oldest_client_tid;
__le32 mdsmap_epoch; /* on client */
@@ -508,6 +515,23 @@ struct ceph_mds_request_head {
union ceph_mds_request_args_ext args;
} __attribute__ ((packed));
+struct ceph_mds_request_head {
+ __le16 version; /* struct version */
+ __le64 oldest_client_tid;
+ __le32 mdsmap_epoch; /* on client */
+ __le32 flags; /* CEPH_MDS_FLAG_* */
+ __u8 num_retry, num_fwd; /* legacy count retry and fwd attempts */
+ __le16 num_releases; /* # include cap/lease release records */
+ __le32 op; /* mds op code */
+ __le32 caller_uid, caller_gid;
+ __le64 ino; /* use this ino for openc, mkdir, mknod,
+ etc. (if replaying) */
+ union ceph_mds_request_args_ext args;
+
+ __le32 ext_num_retry; /* new count retry attempts */
+ __le32 ext_num_fwd; /* new count fwd attempts */
+} __attribute__ ((packed));
+
/* cap/lease release record */
struct ceph_mds_request_release {
__le64 ino, cap_id; /* ino and unique cap id */
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index 99c1726be6ee..2eaaabbe98cb 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -17,6 +17,7 @@
struct ceph_msg;
struct ceph_connection;
+struct ceph_msg_data_cursor;
/*
* Ceph defines these callbacks for handling connection events.
@@ -70,6 +71,30 @@ struct ceph_connection_operations {
int used_proto, int result,
const int *allowed_protos, int proto_cnt,
const int *allowed_modes, int mode_cnt);
+
+ /**
+ * sparse_read: read sparse data
+ * @con: connection we're reading from
+ * @cursor: data cursor for reading extents
+ * @buf: optional buffer to read into
+ *
+ * This should be called more than once, each time setting up to
+ * receive an extent into the current cursor position, and zeroing
+ * the holes between them.
+ *
+ * Returns amount of data to be read (in bytes), 0 if reading is
+ * complete, or -errno if there was an error.
+ *
+ * If @buf is set on a >0 return, then the data should be read into
+ * the provided buffer. Otherwise, it should be read into the cursor.
+ *
+ * The sparse read operation is expected to initialize the cursor
+ * with a length covering up to the end of the last extent.
+ */
+ int (*sparse_read)(struct ceph_connection *con,
+ struct ceph_msg_data_cursor *cursor,
+ char **buf);
+
};
/* use format string %s%lld */
@@ -98,6 +123,7 @@ enum ceph_msg_data_type {
CEPH_MSG_DATA_BIO, /* data source/destination is a bio list */
#endif /* CONFIG_BLOCK */
CEPH_MSG_DATA_BVECS, /* data source/destination is a bio_vec array */
+ CEPH_MSG_DATA_ITER, /* data source/destination is an iov_iter */
};
#ifdef CONFIG_BLOCK
@@ -199,6 +225,7 @@ struct ceph_msg_data {
bool own_pages;
};
struct ceph_pagelist *pagelist;
+ struct iov_iter iter;
};
};
@@ -207,6 +234,7 @@ struct ceph_msg_data_cursor {
struct ceph_msg_data *data; /* current data item */
size_t resid; /* bytes not yet consumed */
+ int sr_resid; /* residual sparse_read len */
bool need_crc; /* crc update needed */
union {
#ifdef CONFIG_BLOCK
@@ -222,6 +250,10 @@ struct ceph_msg_data_cursor {
struct page *page; /* page from list */
size_t offset; /* bytes from list */
};
+ struct {
+ struct iov_iter iov_iter;
+ unsigned int lastlen;
+ };
};
};
@@ -251,6 +283,7 @@ struct ceph_msg {
struct kref kref;
bool more_to_follow;
bool needs_out_seq;
+ bool sparse_read;
int front_alloc_len;
struct ceph_msgpool *pool;
@@ -309,6 +342,10 @@ struct ceph_connection_v1_info {
int in_base_pos; /* bytes read */
+ /* sparse reads */
+ struct kvec in_sr_kvec; /* current location to receive into */
+ u64 in_sr_len; /* amount of data in this extent */
+
/* message in temps */
u8 in_tag; /* protocol control byte */
struct ceph_msg_header in_hdr;
@@ -395,6 +432,7 @@ struct ceph_connection_v2_info {
void *conn_bufs[16];
int conn_buf_cnt;
+ int data_len_remain;
struct kvec in_sign_kvecs[8];
struct kvec out_sign_kvecs[8];
@@ -573,6 +611,8 @@ void ceph_msg_data_add_bio(struct ceph_msg *msg, struct ceph_bio_iter *bio_pos,
#endif /* CONFIG_BLOCK */
void ceph_msg_data_add_bvecs(struct ceph_msg *msg,
struct ceph_bvec_iter *bvec_pos);
+void ceph_msg_data_add_iter(struct ceph_msg *msg,
+ struct iov_iter *iter);
struct ceph_msg *ceph_msg_new2(int type, int front_len, int max_data_items,
gfp_t flags, bool can_fail);
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index fb6be72104df..bf9823956758 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -29,14 +29,62 @@ typedef void (*ceph_osdc_callback_t)(struct ceph_osd_request *);
#define CEPH_HOMELESS_OSD -1
-/* a given osd we're communicating with */
+/*
+ * A single extent in a SPARSE_READ reply.
+ *
+ * Note that these come from the OSD as little-endian values. On BE arches,
+ * we convert them in-place after receipt.
+ */
+struct ceph_sparse_extent {
+ u64 off;
+ u64 len;
+} __packed;
+
+/* Sparse read state machine state values */
+enum ceph_sparse_read_state {
+ CEPH_SPARSE_READ_HDR = 0,
+ CEPH_SPARSE_READ_EXTENTS,
+ CEPH_SPARSE_READ_DATA_LEN,
+ CEPH_SPARSE_READ_DATA,
+};
+
+/*
+ * A SPARSE_READ reply is a 32-bit count of extents, followed by an array of
+ * 64-bit offset/length pairs, and then all of the actual file data
+ * concatenated after it (sans holes).
+ *
+ * Unfortunately, we don't know how long the extent array is until we've
+ * started reading the data section of the reply. The caller should send down
+ * a destination buffer for the array, but we'll alloc one if it's too small
+ * or if the caller doesn't.
+ */
+struct ceph_sparse_read {
+ enum ceph_sparse_read_state sr_state; /* state machine state */
+ u64 sr_req_off; /* orig request offset */
+ u64 sr_req_len; /* orig request length */
+ u64 sr_pos; /* current pos in buffer */
+ int sr_index; /* current extent index */
+ __le32 sr_datalen; /* length of actual data */
+ u32 sr_count; /* extent count in reply */
+ int sr_ext_len; /* length of extent array */
+ struct ceph_sparse_extent *sr_extent; /* extent array */
+};
+
+/*
+ * A given osd we're communicating with.
+ *
+ * Note that the o_requests tree can be searched while holding the "lock" mutex
+ * or the "o_requests_lock" spinlock. Insertion or removal requires both!
+ */
struct ceph_osd {
refcount_t o_ref;
+ int o_sparse_op_idx;
struct ceph_osd_client *o_osdc;
int o_osd;
int o_incarnation;
struct rb_node o_node;
struct ceph_connection o_con;
+ spinlock_t o_requests_lock;
struct rb_root o_requests;
struct rb_root o_linger_requests;
struct rb_root o_backoff_mappings;
@@ -46,6 +94,7 @@ struct ceph_osd {
unsigned long lru_ttl;
struct list_head o_keepalive_item;
struct mutex lock;
+ struct ceph_sparse_read o_sparse_read;
};
#define CEPH_OSD_SLAB_OPS 2
@@ -59,6 +108,7 @@ enum ceph_osd_data_type {
CEPH_OSD_DATA_TYPE_BIO,
#endif /* CONFIG_BLOCK */
CEPH_OSD_DATA_TYPE_BVECS,
+ CEPH_OSD_DATA_TYPE_ITER,
};
struct ceph_osd_data {
@@ -82,6 +132,7 @@ struct ceph_osd_data {
struct ceph_bvec_iter bvec_pos;
u32 num_bvecs;
};
+ struct iov_iter iter;
};
};
@@ -98,6 +149,8 @@ struct ceph_osd_req_op {
u64 offset, length;
u64 truncate_size;
u32 truncate_seq;
+ int sparse_ext_cnt;
+ struct ceph_sparse_extent *sparse_ext;
struct ceph_osd_data osd_data;
} extent;
struct {
@@ -145,6 +198,9 @@ struct ceph_osd_req_op {
u32 src_fadvise_flags;
struct ceph_osd_data osd_data;
} copy_from;
+ struct {
+ u64 ver;
+ } assert_ver;
};
};
@@ -199,6 +255,7 @@ struct ceph_osd_request {
struct ceph_osd_client *r_osdc;
struct kref r_kref;
bool r_mempool;
+ bool r_linger; /* don't resend on failure */
struct completion r_completion; /* private to osd_client.c */
ceph_osdc_callback_t r_callback;
@@ -211,9 +268,9 @@ struct ceph_osd_request {
struct ceph_snap_context *r_snapc; /* for writes */
struct timespec64 r_mtime; /* ditto */
u64 r_data_offset; /* ditto */
- bool r_linger; /* don't resend on failure */
/* internal */
+ u64 r_version; /* data version sent in reply */
unsigned long r_stamp; /* jiffies, send or check time */
unsigned long r_start_stamp; /* jiffies */
ktime_t r_start_latency; /* ktime_t */
@@ -450,6 +507,8 @@ void osd_req_op_extent_osd_data_bvecs(struct ceph_osd_request *osd_req,
void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req,
unsigned int which,
struct ceph_bvec_iter *bvec_pos);
+void osd_req_op_extent_osd_iter(struct ceph_osd_request *osd_req,
+ unsigned int which, struct iov_iter *iter);
extern void osd_req_op_cls_request_data_pagelist(struct ceph_osd_request *,
unsigned int which,
@@ -504,6 +563,20 @@ extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *,
u32 truncate_seq, u64 truncate_size,
bool use_mempool);
+int __ceph_alloc_sparse_ext_map(struct ceph_osd_req_op *op, int cnt);
+
+/*
+ * How big an extent array should we preallocate for a sparse read? This is
+ * just a starting value. If we get more than this back from the OSD, the
+ * receiver will reallocate.
+ */
+#define CEPH_SPARSE_EXT_ARRAY_INITIAL 16
+
+static inline int ceph_alloc_sparse_ext_map(struct ceph_osd_req_op *op)
+{
+ return __ceph_alloc_sparse_ext_map(op, CEPH_SPARSE_EXT_ARRAY_INITIAL);
+}
+
extern void ceph_osdc_get_request(struct ceph_osd_request *req);
extern void ceph_osdc_put_request(struct ceph_osd_request *req);
@@ -558,5 +631,19 @@ int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
struct ceph_object_locator *oloc,
struct ceph_watch_item **watchers,
u32 *num_watchers);
-#endif
+/* Find offset into the buffer of the end of the extent map */
+static inline u64 ceph_sparse_ext_map_end(struct ceph_osd_req_op *op)
+{
+ struct ceph_sparse_extent *ext;
+
+ /* No extents? No data */
+ if (op->extent.sparse_ext_cnt == 0)
+ return 0;
+
+ ext = &op->extent.sparse_ext[op->extent.sparse_ext_cnt - 1];
+
+ return ext->off + ext->len - op->extent.offset;
+}
+
+#endif
diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h
index 43a7a1573b51..73c3efbec36c 100644
--- a/include/linux/ceph/rados.h
+++ b/include/linux/ceph/rados.h
@@ -524,6 +524,10 @@ struct ceph_osd_op {
__le64 cookie;
} __attribute__ ((packed)) notify;
struct {
+ __le64 unused;
+ __le64 ver;
+ } __attribute__ ((packed)) assert_ver;
+ struct {
__le64 offset, length;
__le64 src_offset;
} __attribute__ ((packed)) clonerange;
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index ae20dbb885d6..f1b3151ac30b 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -342,6 +342,20 @@ struct cgroup_rstat_cpu {
struct cgroup_base_stat last_bstat;
/*
+ * This field is used to record the cumulative per-cpu time of
+ * the cgroup and its descendants. Currently it can be read via
+ * eBPF/drgn etc, and we are still trying to determine how to
+ * expose it in the cgroupfs interface.
+ */
+ struct cgroup_base_stat subtree_bstat;
+
+ /*
+ * Snapshots at the last reading. These are used to calculate the
+ * deltas to propagate to the per-cpu subtree_bstat.
+ */
+ struct cgroup_base_stat last_subtree_bstat;
+
+ /*
* Child cgroups with stat updates on this cpu since the last read
* are linked on the parent's ->updated_children through
* ->updated_next.
diff --git a/include/linux/console.h b/include/linux/console.h
index d3195664baa5..7de11c763eb3 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -154,6 +154,8 @@ static inline int con_debug_leave(void)
* receiving the printk spam for obvious reasons.
* @CON_EXTENDED: The console supports the extended output format of
* /dev/kmesg which requires a larger output buffer.
+ * @CON_SUSPENDED: Indicates if a console is suspended. If true, the
+ * printing callbacks must not be called.
*/
enum cons_flags {
CON_PRINTBUFFER = BIT(0),
@@ -163,6 +165,7 @@ enum cons_flags {
CON_ANYTIME = BIT(4),
CON_BRL = BIT(5),
CON_EXTENDED = BIT(6),
+ CON_SUSPENDED = BIT(7),
};
/**
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index bf70987240e4..a269fffaf991 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -6,6 +6,8 @@
#ifndef _LINUX_CORESIGHT_H
#define _LINUX_CORESIGHT_H
+#include <linux/amba/bus.h>
+#include <linux/clk.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/perf_event.h>
@@ -386,6 +388,63 @@ static inline u32 csdev_access_relaxed_read32(struct csdev_access *csa,
return csa->read(offset, true, false);
}
+#define CORESIGHT_CIDRn(i) (0xFF0 + ((i) * 4))
+
+static inline u32 coresight_get_cid(void __iomem *base)
+{
+ u32 i, cid = 0;
+
+ for (i = 0; i < 4; i++)
+ cid |= readl(base + CORESIGHT_CIDRn(i)) << (i * 8);
+
+ return cid;
+}
+
+static inline bool is_coresight_device(void __iomem *base)
+{
+ u32 cid = coresight_get_cid(base);
+
+ return cid == CORESIGHT_CID;
+}
+
+/*
+ * Attempt to find and enable "APB clock" for the given device
+ *
+ * Returns:
+ *
+ * clk - Clock is found and enabled
+ * NULL - clock is not found
+ * ERROR - Clock is found but failed to enable
+ */
+static inline struct clk *coresight_get_enable_apb_pclk(struct device *dev)
+{
+ struct clk *pclk;
+ int ret;
+
+ pclk = clk_get(dev, "apb_pclk");
+ if (IS_ERR(pclk))
+ return NULL;
+
+ ret = clk_prepare_enable(pclk);
+ if (ret) {
+ clk_put(pclk);
+ return ERR_PTR(ret);
+ }
+ return pclk;
+}
+
+#define CORESIGHT_PIDRn(i) (0xFE0 + ((i) * 4))
+
+static inline u32 coresight_get_pid(struct csdev_access *csa)
+{
+ u32 i, pid = 0;
+
+ for (i = 0; i < 4; i++)
+ pid |= csdev_access_relaxed_read32(csa, CORESIGHT_PIDRn(i)) << (i * 8);
+
+ return pid;
+}
+
static inline u64 csdev_access_relaxed_read_pair(struct csdev_access *csa,
u32 lo_offset, u32 hi_offset)
{
diff --git a/include/linux/counter.h b/include/linux/counter.h
index b63746637de2..702e9108bbb4 100644
--- a/include/linux/counter.h
+++ b/include/linux/counter.h
@@ -399,7 +399,7 @@ struct counter_device {
struct mutex ops_exist_lock;
};
-void *counter_priv(const struct counter_device *const counter);
+void *counter_priv(const struct counter_device *const counter) __attribute_const__;
struct counter_device *counter_alloc(size_t sizeof_priv);
void counter_put(struct counter_device *const counter);
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 43b363a99215..71d186d6933a 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -141,6 +141,9 @@ struct cpufreq_policy {
*/
bool dvfs_possible_from_any_cpu;
+ /* Per policy boost enabled flag. */
+ bool boost_enabled;
+
/* Cached frequency lookup from cpufreq_driver_resolve_freq. */
unsigned int cached_target_freq;
unsigned int cached_resolved_idx;
diff --git a/include/linux/dev_printk.h b/include/linux/dev_printk.h
index 8904063d4c9f..6bfe70decc9f 100644
--- a/include/linux/dev_printk.h
+++ b/include/linux/dev_printk.h
@@ -274,4 +274,6 @@ do { \
WARN_ONCE(condition, "%s %s: " format, \
dev_driver_string(dev), dev_name(dev), ## arg)
+__printf(3, 4) int dev_err_probe(const struct device *dev, int err, const char *fmt, ...);
+
#endif /* _DEVICE_PRINTK_H_ */
diff --git a/include/linux/device.h b/include/linux/device.h
index 941c0de6151a..56d93a1ffb7b 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -1249,8 +1249,6 @@ void device_link_remove(void *consumer, struct device *supplier);
void device_links_supplier_sync_state_pause(void);
void device_links_supplier_sync_state_resume(void);
-__printf(3, 4) int dev_err_probe(const struct device *dev, int err, const char *fmt, ...);
-
/* Create alias, so I can be autoloaded. */
#define MODULE_ALIAS_CHARDEV(major,minor) \
MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor))
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index 27dbd4c64860..e34b601b71fd 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -106,8 +106,6 @@ static inline bool dmar_rcu_check(void)
extern int dmar_table_init(void);
extern int dmar_dev_scope_init(void);
extern void dmar_register_bus_notifier(void);
-extern int dmar_parse_dev_scope(void *start, void *end, int *cnt,
- struct dmar_dev_scope **devices, u16 segment);
extern void *dmar_alloc_dev_scope(void *start, void *end, int *cnt);
extern void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt);
extern int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index 061dd84d09f3..4fcbf4d4fd0a 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -37,10 +37,12 @@ struct _ddebug {
#define _DPRINTK_FLAGS_INCL_FUNCNAME (1<<2)
#define _DPRINTK_FLAGS_INCL_LINENO (1<<3)
#define _DPRINTK_FLAGS_INCL_TID (1<<4)
+#define _DPRINTK_FLAGS_INCL_SOURCENAME (1<<5)
#define _DPRINTK_FLAGS_INCL_ANY \
(_DPRINTK_FLAGS_INCL_MODNAME | _DPRINTK_FLAGS_INCL_FUNCNAME |\
- _DPRINTK_FLAGS_INCL_LINENO | _DPRINTK_FLAGS_INCL_TID)
+ _DPRINTK_FLAGS_INCL_LINENO | _DPRINTK_FLAGS_INCL_TID |\
+ _DPRINTK_FLAGS_INCL_SOURCENAME)
#if defined DEBUG
#define _DPRINTK_FLAGS_DEFAULT _DPRINTK_FLAGS_PRINT
diff --git a/include/linux/elf-fdpic.h b/include/linux/elf-fdpic.h
index 3bea95a1af53..e533f4513194 100644
--- a/include/linux/elf-fdpic.h
+++ b/include/linux/elf-fdpic.h
@@ -10,13 +10,25 @@
#include <uapi/linux/elf-fdpic.h>
+#if ELF_CLASS == ELFCLASS32
+#define Elf_Sword Elf32_Sword
+#define elf_fdpic_loadseg elf32_fdpic_loadseg
+#define elf_fdpic_loadmap elf32_fdpic_loadmap
+#define ELF_FDPIC_LOADMAP_VERSION ELF32_FDPIC_LOADMAP_VERSION
+#else
+#define Elf_Sword Elf64_Sxword
+#define elf_fdpic_loadmap elf64_fdpic_loadmap
+#define elf_fdpic_loadseg elf64_fdpic_loadseg
+#define ELF_FDPIC_LOADMAP_VERSION ELF64_FDPIC_LOADMAP_VERSION
+#endif
+
/*
* binfmt binary parameters structure
*/
struct elf_fdpic_params {
struct elfhdr hdr; /* ref copy of ELF header */
struct elf_phdr *phdrs; /* ref copy of PT_PHDR table */
- struct elf32_fdpic_loadmap *loadmap; /* loadmap to be passed to userspace */
+ struct elf_fdpic_loadmap *loadmap; /* loadmap to be passed to userspace */
unsigned long elfhdr_addr; /* mapped ELF header user address */
unsigned long ph_addr; /* mapped PT_PHDR user address */
unsigned long map_addr; /* mapped loadmap user address */
diff --git a/include/linux/export.h b/include/linux/export.h
index beed8387e0a4..9911508a9604 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -50,7 +50,7 @@ extern struct module __this_module;
__EXPORT_SYMBOL_REF(sym) ASM_NL \
.previous
-#if !defined(CONFIG_MODULES) || defined(__DISABLE_EXPORTS)
+#if defined(__DISABLE_EXPORTS)
/*
* Allow symbol exports to be disabled completely so that C code may
@@ -75,7 +75,7 @@ extern struct module __this_module;
__ADDRESSABLE(sym) \
asm(__stringify(___EXPORT_SYMBOL(sym, license, ns)))
-#endif /* CONFIG_MODULES */
+#endif
#ifdef DEFAULT_SYMBOL_NAMESPACE
#define _EXPORT_SYMBOL(sym, license) __EXPORT_SYMBOL(sym, license, __stringify(DEFAULT_SYMBOL_NAMESPACE))
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index 3c45c3846fe9..e596a0abcb27 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -328,16 +328,4 @@ struct extcon_specific_cable_nb {
struct extcon_dev *edev;
unsigned long previous_value;
};
-
-static inline int extcon_register_interest(struct extcon_specific_cable_nb *obj,
- const char *extcon_name, const char *cable_name,
- struct notifier_block *nb)
-{
- return -EINVAL;
-}
-
-static inline int extcon_unregister_interest(struct extcon_specific_cable_nb *obj)
-{
- return -EINVAL;
-}
#endif /* __LINUX_EXTCON_H__ */
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 16c3e6d6c55d..c14576458228 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -588,7 +588,7 @@ extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf,
.fb_copyarea = sys_copyarea, \
.fb_imageblit = sys_imageblit
-/* drivers/video/fbmem.c */
+/* fbmem.c */
extern int register_framebuffer(struct fb_info *fb_info);
extern void unregister_framebuffer(struct fb_info *fb_info);
extern int fb_prepare_logo(struct fb_info *fb_info, int rotate);
@@ -631,7 +631,7 @@ static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch,
}
}
-/* drivers/video/fb_defio.c */
+/* fb_defio.c */
int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma);
extern int fb_deferred_io_init(struct fb_info *info);
extern void fb_deferred_io_open(struct fb_info *info,
@@ -734,7 +734,7 @@ extern struct fb_info *framebuffer_alloc(size_t size, struct device *dev);
extern void framebuffer_release(struct fb_info *info);
extern void fb_bl_default_curve(struct fb_info *fb_info, u8 off, u8 min, u8 max);
-/* drivers/video/fbmon.c */
+/* fbmon.c */
#define FB_MAXTIMINGS 0
#define FB_VSYNCTIMINGS 1
#define FB_HSYNCTIMINGS 2
@@ -768,7 +768,7 @@ extern int of_get_fb_videomode(struct device_node *np,
extern int fb_videomode_from_videomode(const struct videomode *vm,
struct fb_videomode *fbmode);
-/* drivers/video/modedb.c */
+/* modedb.c */
#define VESA_MODEDB_SIZE 43
#define DMT_SIZE 0x50
@@ -794,7 +794,7 @@ extern void fb_videomode_to_modelist(const struct fb_videomode *modedb, int num,
extern const struct fb_videomode *fb_find_best_display(const struct fb_monspecs *specs,
struct list_head *head);
-/* drivers/video/fbcmap.c */
+/* fbcmap.c */
extern int fb_alloc_cmap(struct fb_cmap *cmap, int len, int transp);
extern int fb_alloc_cmap_gfp(struct fb_cmap *cmap, int len, int transp, gfp_t flags);
extern void fb_dealloc_cmap(struct fb_cmap *cmap);
diff --git a/include/linux/firmware/intel/stratix10-smc.h b/include/linux/firmware/intel/stratix10-smc.h
index a718f853d457..ee80ca4bb0d0 100644
--- a/include/linux/firmware/intel/stratix10-smc.h
+++ b/include/linux/firmware/intel/stratix10-smc.h
@@ -467,6 +467,31 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE)
INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FIRMWARE_VERSION)
/**
+ * SMC call protocol for Mailbox, starting FUNCID from 60
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_MBOX_SEND_CMD
+ * a1 mailbox command code
+ * a2 physical address that contain mailbox command data (not include header)
+ * a3 mailbox command data size in word
+ * a4 set to 0 for CASUAL, set to 1 for URGENT
+ * a5 physical address for secure firmware to put response data
+ * (not include header)
+ * a6 maximum size in word of physical address to store response data
+ * a7 not used
+ *
+ * Return status
+ * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_REJECTED or
+ * INTEL_SIP_SMC_STATUS_ERROR
+ * a1 mailbox error code
+ * a2 response data length in word
+ * a3 not used
+ */
+#define INTEL_SIP_SMC_FUNCID_MBOX_SEND_CMD 60
+ #define INTEL_SIP_SMC_MBOX_SEND_CMD \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_MBOX_SEND_CMD)
+
+/**
* Request INTEL_SIP_SMC_SVC_VERSION
*
* Sync call used to query the SIP SMC API Version
diff --git a/include/linux/firmware/intel/stratix10-svc-client.h b/include/linux/firmware/intel/stratix10-svc-client.h
index 0c16037fd08d..60ed82112680 100644
--- a/include/linux/firmware/intel/stratix10-svc-client.h
+++ b/include/linux/firmware/intel/stratix10-svc-client.h
@@ -118,6 +118,9 @@ struct stratix10_svc_chan;
* @COMMAND_SMC_SVC_VERSION: Non-mailbox SMC SVC API Version,
* return status is SVC_STATUS_OK
*
+ * @COMMAND_MBOX_SEND_CMD: send generic mailbox command, return status is
+ * SVC_STATUS_OK or SVC_STATUS_ERROR
+ *
* @COMMAND_RSU_DCMF_STATUS: query firmware for the DCMF status
* return status is SVC_STATUS_OK or SVC_STATUS_ERROR
*
@@ -164,6 +167,8 @@ enum stratix10_svc_command_code {
COMMAND_FCS_RANDOM_NUMBER_GEN,
/* for general status poll */
COMMAND_POLL_SERVICE_STATUS = 40,
+ /* for generic mailbox send command */
+ COMMAND_MBOX_SEND_CMD = 100,
/* Non-mailbox SMC Call */
COMMAND_SMC_SVC_VERSION = 200,
};
diff --git a/include/linux/fs_uart_pd.h b/include/linux/fs_uart_pd.h
deleted file mode 100644
index 36b61ff39277..000000000000
--- a/include/linux/fs_uart_pd.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Platform information definitions for the CPM Uart driver.
- *
- * 2006 (c) MontaVista Software, Inc.
- * Vitaly Bordug <vbordug@ru.mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#ifndef FS_UART_PD_H
-#define FS_UART_PD_H
-
-#include <asm/types.h>
-
-enum fs_uart_id {
- fsid_smc1_uart,
- fsid_smc2_uart,
- fsid_scc1_uart,
- fsid_scc2_uart,
- fsid_scc3_uart,
- fsid_scc4_uart,
- fs_uart_nr,
-};
-
-static inline int fs_uart_id_scc2fsid(int id)
-{
- return fsid_scc1_uart + id - 1;
-}
-
-static inline int fs_uart_id_fsid2scc(int id)
-{
- return id - fsid_scc1_uart + 1;
-}
-
-static inline int fs_uart_id_smc2fsid(int id)
-{
- return fsid_smc1_uart + id - 1;
-}
-
-static inline int fs_uart_id_fsid2smc(int id)
-{
- return id - fsid_smc1_uart + 1;
-}
-
-struct fs_uart_platform_info {
- void(*init_ioports)(struct fs_uart_platform_info *);
- /* device specific information */
- int fs_no; /* controller index */
- char fs_type[4]; /* controller type */
- u32 uart_clk;
- u8 tx_num_fifo;
- u8 tx_buf_size;
- u8 rx_num_fifo;
- u8 rx_buf_size;
- u8 brg;
- u8 clk_rx;
- u8 clk_tx;
-};
-
-static inline int fs_uart_get_id(struct fs_uart_platform_info *fpi)
-{
- if(strstr(fpi->fs_type, "SMC"))
- return fs_uart_id_smc2fsid(fpi->fs_no);
- if(strstr(fpi->fs_type, "SCC"))
- return fs_uart_id_scc2fsid(fpi->fs_no);
- return fpi->fs_no;
-}
-
-#endif
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index aad9cf8876b5..e8921871ef9a 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -862,13 +862,8 @@ extern int skip_trace(unsigned long ip);
extern void ftrace_module_init(struct module *mod);
extern void ftrace_module_enable(struct module *mod);
extern void ftrace_release_mod(struct module *mod);
-
-extern void ftrace_disable_daemon(void);
-extern void ftrace_enable_daemon(void);
#else /* CONFIG_DYNAMIC_FTRACE */
static inline int skip_trace(unsigned long ip) { return 0; }
-static inline void ftrace_disable_daemon(void) { }
-static inline void ftrace_enable_daemon(void) { }
static inline void ftrace_module_init(struct module *mod) { }
static inline void ftrace_module_enable(struct module *mod) { }
static inline void ftrace_release_mod(struct module *mod) { }
diff --git a/include/linux/gameport.h b/include/linux/gameport.h
index 0a221e768ea4..07e370113b2b 100644
--- a/include/linux/gameport.h
+++ b/include/linux/gameport.h
@@ -63,7 +63,7 @@ struct gameport_driver {
int gameport_open(struct gameport *gameport, struct gameport_driver *drv, int mode);
void gameport_close(struct gameport *gameport);
-#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
+#if IS_REACHABLE(CONFIG_GAMEPORT)
void __gameport_register_port(struct gameport *gameport, struct module *owner);
/* use a define to avoid include chaining to get THIS_MODULE */
diff --git a/include/linux/greybus/svc.h b/include/linux/greybus/svc.h
index 5afaf5f06856..da547fb9071b 100644
--- a/include/linux/greybus/svc.h
+++ b/include/linux/greybus/svc.h
@@ -100,7 +100,4 @@ bool gb_svc_watchdog_enabled(struct gb_svc *svc);
int gb_svc_watchdog_enable(struct gb_svc *svc);
int gb_svc_watchdog_disable(struct gb_svc *svc);
-int gb_svc_protocol_init(void);
-void gb_svc_protocol_exit(void);
-
#endif /* __SVC_H */
diff --git a/include/linux/hid-roccat.h b/include/linux/hid-roccat.h
index 3214fb0815fc..753654fff07f 100644
--- a/include/linux/hid-roccat.h
+++ b/include/linux/hid-roccat.h
@@ -16,7 +16,7 @@
#ifdef __KERNEL__
-int roccat_connect(struct class *klass, struct hid_device *hid,
+int roccat_connect(const struct class *klass, struct hid_device *hid,
int report_size);
void roccat_disconnect(int minor);
int roccat_report_event(int minor, u8 const *data);
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 39e21e3815ad..964ca1f15e3f 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -341,6 +341,29 @@ struct hid_item {
*/
#define MAX_USBHID_BOOT_QUIRKS 4
+/**
+ * DOC: HID quirks
+ * | @HID_QUIRK_NOTOUCH:
+ * | @HID_QUIRK_IGNORE: ignore this device
+ * | @HID_QUIRK_NOGET:
+ * | @HID_QUIRK_HIDDEV_FORCE:
+ * | @HID_QUIRK_BADPAD:
+ * | @HID_QUIRK_MULTI_INPUT:
+ * | @HID_QUIRK_HIDINPUT_FORCE:
+ * | @HID_QUIRK_ALWAYS_POLL:
+ * | @HID_QUIRK_INPUT_PER_APP:
+ * | @HID_QUIRK_X_INVERT:
+ * | @HID_QUIRK_Y_INVERT:
+ * | @HID_QUIRK_SKIP_OUTPUT_REPORTS:
+ * | @HID_QUIRK_SKIP_OUTPUT_REPORT_ID:
+ * | @HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP:
+ * | @HID_QUIRK_HAVE_SPECIAL_DRIVER:
+ * | @HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE:
+ * | @HID_QUIRK_FULLSPEED_INTERVAL:
+ * | @HID_QUIRK_NO_INIT_REPORTS:
+ * | @HID_QUIRK_NO_IGNORE:
+ * | @HID_QUIRK_NO_INPUT_SYNC:
+ */
/* BIT(0) reserved for backward compatibility, was HID_QUIRK_INVERT */
#define HID_QUIRK_NOTOUCH BIT(1)
#define HID_QUIRK_IGNORE BIT(2)
@@ -360,6 +383,7 @@ struct hid_item {
#define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP BIT(18)
#define HID_QUIRK_HAVE_SPECIAL_DRIVER BIT(19)
#define HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE BIT(20)
+#define HID_QUIRK_NOINVERT BIT(21)
#define HID_QUIRK_FULLSPEED_INTERVAL BIT(28)
#define HID_QUIRK_NO_INIT_REPORTS BIT(29)
#define HID_QUIRK_NO_IGNORE BIT(30)
@@ -555,9 +579,9 @@ struct hid_input {
struct hid_report *report;
struct input_dev *input;
const char *name;
- bool registered;
struct list_head reports; /* the list of reports */
unsigned int application; /* application usage for this input */
+ bool registered;
};
enum hid_type {
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 3ac3974b3c78..2b00faf98017 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -348,7 +348,7 @@ struct vmtransfer_page_packet_header {
u8 sender_owns_set;
u8 reserved;
u32 range_cnt;
- struct vmtransfer_page_range ranges[1];
+ struct vmtransfer_page_range ranges[];
} __packed;
struct vmgpadl_packet_header {
@@ -665,8 +665,8 @@ struct vmbus_channel_initiate_contact {
u64 interrupt_page;
struct {
u8 msg_sint;
- u8 padding1[3];
- u32 padding2;
+ u8 msg_vtl;
+ u8 reserved[6];
};
};
u64 monitor_page1;
diff --git a/include/linux/i2c-atr.h b/include/linux/i2c-atr.h
new file mode 100644
index 000000000000..4d5da161c225
--- /dev/null
+++ b/include/linux/i2c-atr.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * I2C Address Translator
+ *
+ * Copyright (c) 2019,2022 Luca Ceresoli <luca@lucaceresoli.net>
+ * Copyright (c) 2022,2023 Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+ *
+ * Based on i2c-mux.h
+ */
+
+#ifndef _LINUX_I2C_ATR_H
+#define _LINUX_I2C_ATR_H
+
+#include <linux/i2c.h>
+#include <linux/types.h>
+
+struct device;
+struct fwnode_handle;
+struct i2c_atr;
+
+/**
+ * struct i2c_atr_ops - Callbacks from ATR to the device driver.
+ * @attach_client: Notify the driver of a new device connected on a child
+ * bus, with the alias assigned to it. The driver must
+ * configure the hardware to use the alias.
+ * @detach_client: Notify the driver of a device getting disconnected. The
+ * driver must configure the hardware to stop using the
+ * alias.
+ *
+ * All these functions return 0 on success, a negative error code otherwise.
+ */
+struct i2c_atr_ops {
+ int (*attach_client)(struct i2c_atr *atr, u32 chan_id,
+ const struct i2c_client *client, u16 alias);
+ void (*detach_client)(struct i2c_atr *atr, u32 chan_id,
+ const struct i2c_client *client);
+};
+
+/**
+ * i2c_atr_new() - Allocate and initialize an I2C ATR helper.
+ * @parent: The parent (upstream) adapter
+ * @dev: The device acting as an ATR
+ * @ops: Driver-specific callbacks
+ * @max_adapters: Maximum number of child adapters
+ *
+ * The new ATR helper is connected to the parent adapter but has no child
+ * adapters. Call i2c_atr_add_adapter() to add some.
+ *
+ * Call i2c_atr_delete() to remove.
+ *
+ * Return: pointer to the new ATR helper object, or ERR_PTR
+ */
+struct i2c_atr *i2c_atr_new(struct i2c_adapter *parent, struct device *dev,
+ const struct i2c_atr_ops *ops, int max_adapters);
+
+/**
+ * i2c_atr_delete - Delete an I2C ATR helper.
+ * @atr: I2C ATR helper to be deleted.
+ *
+ * Precondition: all the adapters added with i2c_atr_add_adapter() must be
+ * removed by calling i2c_atr_del_adapter().
+ */
+void i2c_atr_delete(struct i2c_atr *atr);
+
+/**
+ * i2c_atr_add_adapter - Create a child ("downstream") I2C bus.
+ * @atr: The I2C ATR
+ * @chan_id: Index of the new adapter (0 .. max_adapters-1). This value is
+ * passed to the callbacks in `struct i2c_atr_ops`.
+ * @adapter_parent: The device used as the parent of the new i2c adapter, or NULL
+ * to use the i2c-atr device as the parent.
+ * @bus_handle: The fwnode handle that points to the adapter's i2c
+ * peripherals, or NULL.
+ *
+ * After calling this function a new i2c bus will appear. Adding and removing
+ * devices on the downstream bus will result in calls to the
+ * &i2c_atr_ops->attach_client and &i2c_atr_ops->detach_client callbacks for the
+ * driver to assign an alias to the device.
+ *
+ * The adapter's fwnode is set to @bus_handle, or if @bus_handle is NULL the
+ * function looks for a child node whose 'reg' property matches the chan_id
+ * under the i2c-atr device's 'i2c-atr' node.
+ *
+ * Call i2c_atr_del_adapter() to remove the adapter.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int i2c_atr_add_adapter(struct i2c_atr *atr, u32 chan_id,
+ struct device *adapter_parent,
+ struct fwnode_handle *bus_handle);
+
+/**
+ * i2c_atr_del_adapter - Remove a child ("downstream") I2C bus added by
+ * i2c_atr_add_adapter(). If no I2C bus has been added
+ * this function is a no-op.
+ * @atr: The I2C ATR
+ * @chan_id: Index of the adapter to be removed (0 .. max_adapters-1)
+ */
+void i2c_atr_del_adapter(struct i2c_atr *atr, u32 chan_id);
+
+/**
+ * i2c_atr_set_driver_data - Set private driver data to the i2c-atr instance.
+ * @atr: The I2C ATR
+ * @data: Pointer to the data to store
+ */
+void i2c_atr_set_driver_data(struct i2c_atr *atr, void *data);
+
+/**
+ * i2c_atr_get_driver_data - Get the stored drive data.
+ * @atr: The I2C ATR
+ *
+ * Return: Pointer to the stored data
+ */
+void *i2c_atr_get_driver_data(struct i2c_atr *atr);
+
+#endif /* _LINUX_I2C_ATR_H */
diff --git a/include/linux/iio/common/inv_sensors_timestamp.h b/include/linux/iio/common/inv_sensors_timestamp.h
new file mode 100644
index 000000000000..a47d304d1ba7
--- /dev/null
+++ b/include/linux/iio/common/inv_sensors_timestamp.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2020 Invensense, Inc.
+ */
+
+#ifndef INV_SENSORS_TIMESTAMP_H_
+#define INV_SENSORS_TIMESTAMP_H_
+
+/**
+ * struct inv_sensors_timestamp_chip - chip internal properties
+ * @clock_period: internal clock period in ns
+ * @jitter: acceptable jitter in per-mille
+ * @init_period: chip initial period at reset in ns
+ */
+struct inv_sensors_timestamp_chip {
+ uint32_t clock_period;
+ uint32_t jitter;
+ uint32_t init_period;
+};
+
+/**
+ * struct inv_sensors_timestamp_interval - timestamps interval
+ * @lo: interval lower bound
+ * @up: interval upper bound
+ */
+struct inv_sensors_timestamp_interval {
+ int64_t lo;
+ int64_t up;
+};
+
+/**
+ * struct inv_sensors_timestamp_acc - accumulator for computing an estimation
+ * @val: current estimation of the value, the mean of all values
+ * @idx: current index of the next free place in values table
+ * @values: table of all measured values, use for computing the mean
+ */
+struct inv_sensors_timestamp_acc {
+ uint32_t val;
+ size_t idx;
+ uint32_t values[32];
+};
+
+/**
+ * struct inv_sensors_timestamp - timestamp management states
+ * @chip: chip internal characteristics
+ * @min_period: minimal acceptable clock period
+ * @max_period: maximal acceptable clock period
+ * @it: interrupts interval timestamps
+ * @timestamp: store last timestamp for computing next data timestamp
+ * @mult: current internal period multiplier
+ * @new_mult: new set internal period multiplier (not yet effective)
+ * @period: measured current period of the sensor
+ * @chip_period: accumulator for computing internal chip period
+ */
+struct inv_sensors_timestamp {
+ struct inv_sensors_timestamp_chip chip;
+ uint32_t min_period;
+ uint32_t max_period;
+ struct inv_sensors_timestamp_interval it;
+ int64_t timestamp;
+ uint32_t mult;
+ uint32_t new_mult;
+ uint32_t period;
+ struct inv_sensors_timestamp_acc chip_period;
+};
+
+void inv_sensors_timestamp_init(struct inv_sensors_timestamp *ts,
+ const struct inv_sensors_timestamp_chip *chip);
+
+int inv_sensors_timestamp_update_odr(struct inv_sensors_timestamp *ts,
+ uint32_t period, bool fifo);
+
+void inv_sensors_timestamp_interrupt(struct inv_sensors_timestamp *ts,
+ uint32_t fifo_period, size_t fifo_nb,
+ size_t sensor_nb, int64_t timestamp);
+
+static inline int64_t inv_sensors_timestamp_pop(struct inv_sensors_timestamp *ts)
+{
+ ts->timestamp += ts->period;
+ return ts->timestamp;
+}
+
+void inv_sensors_timestamp_apply_odr(struct inv_sensors_timestamp *ts,
+ uint32_t fifo_period, size_t fifo_nb,
+ unsigned int fifo_no);
+
+static inline void inv_sensors_timestamp_reset(struct inv_sensors_timestamp *ts)
+{
+ const struct inv_sensors_timestamp_interval interval_init = {0LL, 0LL};
+
+ ts->it = interval_init;
+ ts->timestamp = 0;
+}
+
+#endif
diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h
index 82faa98c719a..117bde7d6ad7 100644
--- a/include/linux/iio/types.h
+++ b/include/linux/iio/types.h
@@ -19,6 +19,8 @@ enum iio_event_info {
IIO_EV_INFO_TIMEOUT,
IIO_EV_INFO_RESET_TIMEOUT,
IIO_EV_INFO_TAP2_MIN_DELAY,
+ IIO_EV_INFO_RUNNING_PERIOD,
+ IIO_EV_INFO_RUNNING_COUNT,
};
#define IIO_VAL_INT 1
diff --git a/include/linux/intel_tpmi.h b/include/linux/intel_tpmi.h
index f505788c05da..04d937ad4dc4 100644
--- a/include/linux/intel_tpmi.h
+++ b/include/linux/intel_tpmi.h
@@ -27,4 +27,6 @@ struct intel_tpmi_plat_info *tpmi_get_platform_data(struct auxiliary_device *aux
struct resource *tpmi_get_resource_at_index(struct auxiliary_device *auxdev, int index);
int tpmi_get_resource_count(struct auxiliary_device *auxdev);
+int tpmi_get_feature_status(struct auxiliary_device *auxdev, int feature_id, int *locked,
+ int *disabled);
#endif
diff --git a/include/linux/interconnect-provider.h b/include/linux/interconnect-provider.h
index e6d8aca6886d..7ba183f221f1 100644
--- a/include/linux/interconnect-provider.h
+++ b/include/linux/interconnect-provider.h
@@ -33,7 +33,7 @@ struct icc_node_data {
*/
struct icc_onecell_data {
unsigned int num_nodes;
- struct icc_node *nodes[];
+ struct icc_node *nodes[] __counted_by(num_nodes);
};
struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec,
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index bd6a1110b294..c50a769d569a 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -196,6 +196,8 @@ enum iommu_dev_features {
IOMMU_DEV_FEAT_IOPF,
};
+#define IOMMU_NO_PASID (0U) /* Reserved for DMA w/o PASID */
+#define IOMMU_FIRST_GLOBAL_PASID (1U) /*starting range for allocation */
#define IOMMU_PASID_INVALID (-1U)
typedef unsigned int ioasid_t;
@@ -414,6 +416,8 @@ struct iommu_fault_param {
* @priv: IOMMU Driver private data
* @max_pasids: number of PASIDs this device can consume
* @attach_deferred: the dma domain attachment is deferred
+ * @pci_32bit_workaround: Limit DMA allocations to 32-bit IOVAs
+ * @require_direct: device requires IOMMU_RESV_DIRECT regions
*
* TODO: migrate other per device data pointers under iommu_dev_data, e.g.
* struct iommu_group *iommu_group;
@@ -427,6 +431,8 @@ struct dev_iommu {
void *priv;
u32 max_pasids;
u32 attach_deferred:1;
+ u32 pci_32bit_workaround:1;
+ u32 require_direct:1;
};
int iommu_device_register(struct iommu_device *iommu,
@@ -721,6 +727,8 @@ void iommu_detach_device_pasid(struct iommu_domain *domain,
struct iommu_domain *
iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid,
unsigned int type);
+ioasid_t iommu_alloc_global_pasid(struct device *dev);
+void iommu_free_global_pasid(ioasid_t pasid);
#else /* CONFIG_IOMMU_API */
struct iommu_ops {};
@@ -1082,6 +1090,13 @@ iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid,
{
return NULL;
}
+
+static inline ioasid_t iommu_alloc_global_pasid(struct device *dev)
+{
+ return IOMMU_PASID_INVALID;
+}
+
+static inline void iommu_free_global_pasid(ioasid_t pasid) {}
#endif /* CONFIG_IOMMU_API */
/**
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 5883551b1ee8..af8a771a053c 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -147,6 +147,7 @@ struct inet6_skb_parm {
#define IP6SKB_JUMBOGRAM 128
#define IP6SKB_SEG6 256
#define IP6SKB_FAKEJUMBO 512
+#define IP6SKB_MULTIPATH 1024
};
#if defined(CONFIG_NET_L3_MASTER_DEV)
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 819b6bc8ac08..3df5499f7936 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -54,11 +54,13 @@ extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
int kasan_populate_early_shadow(const void *shadow_start,
const void *shadow_end);
+#ifndef __HAVE_ARCH_SHADOW_MAP
static inline void *kasan_mem_to_shadow(const void *addr)
{
return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
+ KASAN_SHADOW_OFFSET;
}
+#endif
int kasan_add_zero_shadow(void *start, unsigned long size);
void kasan_remove_zero_shadow(void *start, unsigned long size);
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 73f5c120def8..2a36f3218b51 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -550,6 +550,10 @@ static inline int kernfs_setattr(struct kernfs_node *kn,
const struct iattr *iattr)
{ return -ENOSYS; }
+static inline __poll_t kernfs_generic_poll(struct kernfs_open_file *of,
+ struct poll_table_struct *pt)
+{ return -ENOSYS; }
+
static inline void kernfs_notify(struct kernfs_node *kn) { }
static inline int kernfs_xattr_get(struct kernfs_node *kn, const char *name,
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index c392c811d9ad..c30affcc43b4 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -69,14 +69,16 @@ struct kobject {
const struct kobj_type *ktype;
struct kernfs_node *sd; /* sysfs directory entry */
struct kref kref;
-#ifdef CONFIG_DEBUG_KOBJECT_RELEASE
- struct delayed_work release;
-#endif
+
unsigned int state_initialized:1;
unsigned int state_in_sysfs:1;
unsigned int state_add_uevent_sent:1;
unsigned int state_remove_uevent_sent:1;
unsigned int uevent_suppress:1;
+
+#ifdef CONFIG_DEBUG_KOBJECT_RELEASE
+ struct delayed_work release;
+#endif
};
__printf(2, 3) int kobject_set_name(struct kobject *kobj, const char *name, ...);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 9d3ac7720da9..fb6c6109fdca 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -190,8 +190,6 @@ bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
struct kvm_vcpu *except);
-bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req,
- unsigned long *vcpu_bitmap);
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
@@ -256,11 +254,15 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
#endif
#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
+union kvm_mmu_notifier_arg {
+ pte_t pte;
+};
+
struct kvm_gfn_range {
struct kvm_memory_slot *slot;
gfn_t start;
gfn_t end;
- pte_t pte;
+ union kvm_mmu_notifier_arg arg;
bool may_block;
};
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
@@ -865,6 +867,25 @@ static inline void kvm_vm_bugged(struct kvm *kvm)
unlikely(__ret); \
})
+/*
+ * Note, "data corruption" refers to corruption of host kernel data structures,
+ * not guest data. Guest data corruption, suspected or confirmed, that is tied
+ * and contained to a single VM should *never* BUG() and potentially panic the
+ * host, i.e. use this variant of KVM_BUG() if and only if a KVM data structure
+ * is corrupted and that corruption can have a cascading effect to other parts
+ * of the hosts and/or to other VMs.
+ */
+#define KVM_BUG_ON_DATA_CORRUPTION(cond, kvm) \
+({ \
+ bool __ret = !!(cond); \
+ \
+ if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) \
+ BUG_ON(__ret); \
+ else if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \
+ kvm_vm_bugged(kvm); \
+ unlikely(__ret); \
+})
+
static inline void kvm_vcpu_srcu_read_lock(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_PROVE_RCU
@@ -1359,6 +1380,9 @@ int kvm_vcpu_yield_to(struct kvm_vcpu *target);
void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool yield_to_kernel_mode);
void kvm_flush_remote_tlbs(struct kvm *kvm);
+void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages);
+void kvm_flush_remote_tlbs_memslot(struct kvm *kvm,
+ const struct kvm_memory_slot *memslot);
#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min);
@@ -1387,10 +1411,7 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
unsigned long mask);
void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot);
-#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
-void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
- const struct kvm_memory_slot *memslot);
-#else /* !CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
+#ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log);
int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
int *is_dirty, struct kvm_memory_slot **memslot);
@@ -1479,11 +1500,23 @@ static inline void kvm_arch_free_vm(struct kvm *kvm)
}
#endif
-#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
-static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
+#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS
+static inline int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
{
return -ENOTSUPP;
}
+#else
+int kvm_arch_flush_remote_tlbs(struct kvm *kvm);
+#endif
+
+#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE
+static inline int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm,
+ gfn_t gfn, u64 nr_pages)
+{
+ return -EOPNOTSUPP;
+}
+#else
+int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages);
#endif
#ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
@@ -2148,8 +2181,6 @@ struct kvm_device_ops {
int (*mmap)(struct kvm_device *dev, struct vm_area_struct *vma);
};
-void kvm_device_get(struct kvm_device *dev);
-void kvm_device_put(struct kvm_device *dev);
struct kvm_device *kvm_device_from_filp(struct file *filp);
int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type);
void kvm_unregister_device_ops(u32 type);
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 7d428100b42b..aa16dc2a8230 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -100,6 +100,7 @@ struct led_classdev {
const char *name;
unsigned int brightness;
unsigned int max_brightness;
+ unsigned int color;
int flags;
/* Lower 16 bits reflect status */
@@ -313,6 +314,8 @@ extern struct led_classdev *of_led_get(struct device_node *np, int index);
extern void led_put(struct led_classdev *led_cdev);
struct led_classdev *__must_check devm_of_led_get(struct device *dev,
int index);
+struct led_classdev *__must_check devm_of_led_get_optional(struct device *dev,
+ int index);
/**
* led_blink_set - set blinking with software fallback
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 820f7a3a2749..52d58b13e5ee 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -344,7 +344,6 @@ enum {
ATA_LINK_RESUME_TRIES = 5,
/* how hard are we gonna try to probe/recover devices */
- ATA_PROBE_MAX_TRIES = 3,
ATA_EH_DEV_TRIES = 3,
ATA_EH_PMP_TRIES = 5,
ATA_EH_PMP_LINK_TRIES = 3,
@@ -977,12 +976,6 @@ struct ata_port_operations {
ssize_t size);
/*
- * Obsolete
- */
- void (*phy_reset)(struct ata_port *ap);
- void (*eng_timeout)(struct ata_port *ap);
-
- /*
* ->inherits must be the last field and all the preceding
* fields must be pointers.
*/
@@ -1116,7 +1109,7 @@ static inline void ata_sas_port_resume(struct ata_port *ap)
extern int ata_ratelimit(void);
extern void ata_msleep(struct ata_port *ap, unsigned int msecs);
extern u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask,
- u32 val, unsigned long interval, unsigned long timeout);
+ u32 val, unsigned int interval, unsigned int timeout);
extern int atapi_cmd_type(u8 opcode);
extern unsigned int ata_pack_xfermask(unsigned int pio_mask,
unsigned int mwdma_mask,
@@ -1166,11 +1159,11 @@ extern void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *
* SATA specific code - drivers/ata/libata-sata.c
*/
#ifdef CONFIG_SATA_HOST
-extern const unsigned long sata_deb_timing_normal[];
-extern const unsigned long sata_deb_timing_hotplug[];
-extern const unsigned long sata_deb_timing_long[];
+extern const unsigned int sata_deb_timing_normal[];
+extern const unsigned int sata_deb_timing_hotplug[];
+extern const unsigned int sata_deb_timing_long[];
-static inline const unsigned long *
+static inline const unsigned int *
sata_ehc_deb_timing(struct ata_eh_context *ehc)
{
if (ehc->i.flags & ATA_EHI_HOTPLUGGED)
@@ -1185,14 +1178,14 @@ extern int sata_scr_write(struct ata_link *link, int reg, u32 val);
extern int sata_scr_write_flush(struct ata_link *link, int reg, u32 val);
extern int sata_set_spd(struct ata_link *link);
extern int sata_link_hardreset(struct ata_link *link,
- const unsigned long *timing, unsigned long deadline,
+ const unsigned int *timing, unsigned long deadline,
bool *online, int (*check_ready)(struct ata_link *));
-extern int sata_link_resume(struct ata_link *link, const unsigned long *params,
+extern int sata_link_resume(struct ata_link *link, const unsigned int *params,
unsigned long deadline);
extern int ata_eh_read_sense_success_ncq_log(struct ata_link *link);
extern void ata_eh_analyze_ncq_error(struct ata_link *link);
#else
-static inline const unsigned long *
+static inline const unsigned int *
sata_ehc_deb_timing(struct ata_eh_context *ehc)
{
return NULL;
@@ -1212,7 +1205,7 @@ static inline int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
}
static inline int sata_set_spd(struct ata_link *link) { return -EOPNOTSUPP; }
static inline int sata_link_hardreset(struct ata_link *link,
- const unsigned long *timing,
+ const unsigned int *timing,
unsigned long deadline,
bool *online,
int (*check_ready)(struct ata_link *))
@@ -1222,7 +1215,7 @@ static inline int sata_link_hardreset(struct ata_link *link,
return -EOPNOTSUPP;
}
static inline int sata_link_resume(struct ata_link *link,
- const unsigned long *params,
+ const unsigned int *params,
unsigned long deadline)
{
return -EOPNOTSUPP;
@@ -1234,20 +1227,15 @@ static inline int ata_eh_read_sense_success_ncq_log(struct ata_link *link)
static inline void ata_eh_analyze_ncq_error(struct ata_link *link) { }
#endif
extern int sata_link_debounce(struct ata_link *link,
- const unsigned long *params, unsigned long deadline);
+ const unsigned int *params, unsigned long deadline);
extern int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
bool spm_wakeup);
extern int ata_slave_link_init(struct ata_port *ap);
-extern void ata_sas_port_destroy(struct ata_port *);
extern struct ata_port *ata_sas_port_alloc(struct ata_host *,
struct ata_port_info *, struct Scsi_Host *);
-extern void ata_sas_async_probe(struct ata_port *ap);
-extern int ata_sas_sync_probe(struct ata_port *ap);
-extern int ata_sas_port_init(struct ata_port *);
-extern int ata_sas_port_start(struct ata_port *ap);
+extern void ata_port_probe(struct ata_port *ap);
extern int ata_sas_tport_add(struct device *parent, struct ata_port *ap);
extern void ata_sas_tport_delete(struct ata_port *ap);
-extern void ata_sas_port_stop(struct ata_port *ap);
extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *);
extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap);
extern void ata_tf_to_fis(const struct ata_taskfile *tf,
@@ -1785,7 +1773,7 @@ static inline struct ata_queued_cmd *ata_qc_from_tag(struct ata_port *ap,
{
struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
- if (unlikely(!qc) || !ap->ops->error_handler)
+ if (unlikely(!qc))
return qc;
if ((qc->flags & (ATA_QCFLAG_ACTIVE |
@@ -1876,7 +1864,7 @@ static inline int ata_check_ready(u8 status)
}
static inline unsigned long ata_deadline(unsigned long from_jiffies,
- unsigned long timeout_msecs)
+ unsigned int timeout_msecs)
{
return from_jiffies + msecs_to_jiffies(timeout_msecs);
}
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index f42594a9efe0..0f016d69c996 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -204,6 +204,8 @@ extern unsigned long nlmsvc_timeout;
extern bool nsm_use_hostnames;
extern u32 nsm_local_state;
+extern struct timer_list nlmsvc_retry;
+
/*
* Lockd client functions
*/
@@ -280,7 +282,7 @@ __be32 nlmsvc_testlock(struct svc_rqst *, struct nlm_file *,
struct nlm_host *, struct nlm_lock *,
struct nlm_lock *, struct nlm_cookie *);
__be32 nlmsvc_cancel_blocked(struct net *net, struct nlm_file *, struct nlm_lock *);
-unsigned long nlmsvc_retry_blocked(void);
+void nlmsvc_retry_blocked(void);
void nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *,
nlm_host_match_fn_t match);
void nlmsvc_grant_reply(struct nlm_cookie *, __be32);
diff --git a/include/linux/mfd/88pm860x.h b/include/linux/mfd/88pm860x.h
index 473545a2c425..6fa21791fc85 100644
--- a/include/linux/mfd/88pm860x.h
+++ b/include/linux/mfd/88pm860x.h
@@ -472,13 +472,7 @@ extern int pm860x_bulk_read(struct i2c_client *, int, int, unsigned char *);
extern int pm860x_bulk_write(struct i2c_client *, int, int, unsigned char *);
extern int pm860x_set_bits(struct i2c_client *, int, unsigned char,
unsigned char);
-extern int pm860x_page_reg_read(struct i2c_client *, int);
extern int pm860x_page_reg_write(struct i2c_client *, int, unsigned char);
extern int pm860x_page_bulk_read(struct i2c_client *, int, int,
unsigned char *);
-extern int pm860x_page_bulk_write(struct i2c_client *, int, int,
- unsigned char *);
-extern int pm860x_page_set_bits(struct i2c_client *, int, unsigned char,
- unsigned char);
-
#endif /* __LINUX_MFD_88PM860X_H */
diff --git a/include/linux/mfd/abx500/ab8500.h b/include/linux/mfd/abx500/ab8500.h
index 302a330c5c84..09fb3c56e7d7 100644
--- a/include/linux/mfd/abx500/ab8500.h
+++ b/include/linux/mfd/abx500/ab8500.h
@@ -382,10 +382,6 @@ struct ab8500_platform_data {
struct ab8500_sysctrl_platform_data *sysctrl;
};
-extern int ab8500_init(struct ab8500 *ab8500,
- enum ab8500_version version);
-extern int ab8500_exit(struct ab8500 *ab8500);
-
extern int ab8500_suspend(struct ab8500 *ab8500);
static inline int is_ab8500(struct ab8500 *ab)
diff --git a/include/linux/mfd/dbx500-prcmu.h b/include/linux/mfd/dbx500-prcmu.h
index e7a7e70fdb38..dd0fc891b228 100644
--- a/include/linux/mfd/dbx500-prcmu.h
+++ b/include/linux/mfd/dbx500-prcmu.h
@@ -556,16 +556,6 @@ static inline void prcmu_clear(unsigned int reg, u32 bits)
#define PRCMU_QOS_ARM_OPP 3
#define PRCMU_QOS_DEFAULT_VALUE -1
-static inline unsigned long prcmu_qos_get_cpufreq_opp_delay(void)
-{
- return 0;
-}
-
-static inline int prcmu_qos_requirement(int prcmu_qos_class)
-{
- return 0;
-}
-
static inline int prcmu_qos_add_requirement(int prcmu_qos_class,
char *name, s32 value)
{
@@ -582,15 +572,4 @@ static inline void prcmu_qos_remove_requirement(int prcmu_qos_class, char *name)
{
}
-static inline int prcmu_qos_add_notifier(int prcmu_qos_class,
- struct notifier_block *notifier)
-{
- return 0;
-}
-static inline int prcmu_qos_remove_notifier(int prcmu_qos_class,
- struct notifier_block *notifier)
-{
- return 0;
-}
-
#endif /* __MACH_PRCMU_H */
diff --git a/include/linux/mfd/hi655x-pmic.h b/include/linux/mfd/hi655x-pmic.h
index 6a012784dd1b..194556851ccf 100644
--- a/include/linux/mfd/hi655x-pmic.h
+++ b/include/linux/mfd/hi655x-pmic.h
@@ -52,7 +52,6 @@
#define OTMP_D1R_INT_MASK BIT(OTMP_D1R_INT)
struct hi655x_pmic {
- struct resource *res;
struct device *dev;
struct regmap *regmap;
struct gpio_desc *gpio;
diff --git a/include/linux/mfd/max77686-private.h b/include/linux/mfd/max77686-private.h
index 3acceeedbaba..ea635d12a741 100644
--- a/include/linux/mfd/max77686-private.h
+++ b/include/linux/mfd/max77686-private.h
@@ -441,8 +441,4 @@ enum max77686_types {
TYPE_MAX77802,
};
-extern int max77686_irq_init(struct max77686_dev *max77686);
-extern void max77686_irq_exit(struct max77686_dev *max77686);
-extern int max77686_irq_resume(struct max77686_dev *max77686);
-
#endif /* __LINUX_MFD_MAX77686_PRIV_H */
diff --git a/include/linux/mfd/rz-mtu3.h b/include/linux/mfd/rz-mtu3.h
index c5173bc06270..8421d49500bf 100644
--- a/include/linux/mfd/rz-mtu3.h
+++ b/include/linux/mfd/rz-mtu3.h
@@ -151,7 +151,6 @@ struct rz_mtu3 {
void *priv_data;
};
-#if IS_ENABLED(CONFIG_RZ_MTU3)
static inline bool rz_mtu3_request_channel(struct rz_mtu3_channel *ch)
{
mutex_lock(&ch->lock);
@@ -188,70 +187,5 @@ void rz_mtu3_32bit_ch_write(struct rz_mtu3_channel *ch, u16 off, u32 val);
void rz_mtu3_shared_reg_write(struct rz_mtu3_channel *ch, u16 off, u16 val);
void rz_mtu3_shared_reg_update_bit(struct rz_mtu3_channel *ch, u16 off,
u16 pos, u8 val);
-#else
-static inline bool rz_mtu3_request_channel(struct rz_mtu3_channel *ch)
-{
- return false;
-}
-
-static inline void rz_mtu3_release_channel(struct rz_mtu3_channel *ch)
-{
-}
-
-static inline bool rz_mtu3_is_enabled(struct rz_mtu3_channel *ch)
-{
- return false;
-}
-
-static inline void rz_mtu3_disable(struct rz_mtu3_channel *ch)
-{
-}
-
-static inline int rz_mtu3_enable(struct rz_mtu3_channel *ch)
-{
- return 0;
-}
-
-static inline u8 rz_mtu3_8bit_ch_read(struct rz_mtu3_channel *ch, u16 off)
-{
- return 0;
-}
-
-static inline u16 rz_mtu3_16bit_ch_read(struct rz_mtu3_channel *ch, u16 off)
-{
- return 0;
-}
-
-static inline u32 rz_mtu3_32bit_ch_read(struct rz_mtu3_channel *ch, u16 off)
-{
- return 0;
-}
-
-static inline u16 rz_mtu3_shared_reg_read(struct rz_mtu3_channel *ch, u16 off)
-{
- return 0;
-}
-
-static inline void rz_mtu3_8bit_ch_write(struct rz_mtu3_channel *ch, u16 off, u8 val)
-{
-}
-
-static inline void rz_mtu3_16bit_ch_write(struct rz_mtu3_channel *ch, u16 off, u16 val)
-{
-}
-
-static inline void rz_mtu3_32bit_ch_write(struct rz_mtu3_channel *ch, u16 off, u32 val)
-{
-}
-
-static inline void rz_mtu3_shared_reg_write(struct rz_mtu3_channel *ch, u16 off, u16 val)
-{
-}
-
-static inline void rz_mtu3_shared_reg_update_bit(struct rz_mtu3_channel *ch,
- u16 off, u16 pos, u8 val)
-{
-}
-#endif
#endif /* __MFD_RZ_MTU3_H__ */
diff --git a/include/linux/mhi.h b/include/linux/mhi.h
index f6de4b6ecfc7..039943ec4d4e 100644
--- a/include/linux/mhi.h
+++ b/include/linux/mhi.h
@@ -299,6 +299,10 @@ struct mhi_controller_config {
* @iova_start: IOMMU starting address for data (required)
* @iova_stop: IOMMU stop address for data (required)
* @fw_image: Firmware image name for normal booting (optional)
+ * @fw_data: Firmware image data content for normal booting, used only
+ * if fw_image is NULL and fbc_download is true (optional)
+ * @fw_sz: Firmware image data size for normal booting, used only if fw_image
+ * is NULL and fbc_download is true (optional)
* @edl_image: Firmware image name for emergency download mode (optional)
* @rddm_size: RAM dump size that host should allocate for debugging purpose
* @sbl_size: SBL image size downloaded through BHIe (optional)
@@ -384,6 +388,8 @@ struct mhi_controller {
dma_addr_t iova_start;
dma_addr_t iova_stop;
const char *fw_image;
+ const u8 *fw_data;
+ size_t fw_sz;
const char *edl_image;
size_t rddm_size;
size_t sbl_size;
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index 8bef1ab62bba..4e27ca7c49de 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -41,9 +41,10 @@
#define PHY_ID_KSZ9477 0x00221631
/* struct phy_device dev_flags definitions */
-#define MICREL_PHY_50MHZ_CLK 0x00000001
-#define MICREL_PHY_FXEN 0x00000002
-#define MICREL_KSZ8_P1_ERRATA 0x00000003
+#define MICREL_PHY_50MHZ_CLK BIT(0)
+#define MICREL_PHY_FXEN BIT(1)
+#define MICREL_KSZ8_P1_ERRATA BIT(2)
+#define MICREL_NO_EEE BIT(3)
#define MICREL_KSZ9021_EXTREG_CTRL 0xB
#define MICREL_KSZ9021_EXTREG_DATA_WRITE 0xC
diff --git a/include/linux/misc_cgroup.h b/include/linux/misc_cgroup.h
index c238207d1615..e799b1f8d05b 100644
--- a/include/linux/misc_cgroup.h
+++ b/include/linux/misc_cgroup.h
@@ -31,17 +31,18 @@ struct misc_cg;
* struct misc_res: Per cgroup per misc type resource
* @max: Maximum limit on the resource.
* @usage: Current usage of the resource.
- * @failed: True if charged failed for the resource in a cgroup.
+ * @events: Number of times, the resource limit exceeded.
*/
struct misc_res {
- unsigned long max;
- atomic_long_t usage;
- atomic_long_t events;
+ u64 max;
+ atomic64_t usage;
+ atomic64_t events;
};
/**
* struct misc_cg - Miscellaneous controller's cgroup structure.
* @css: cgroup subsys state object.
+ * @events_file: Handle for the misc resources events file.
* @res: Array of misc resources usage in the cgroup.
*/
struct misc_cg {
@@ -53,12 +54,10 @@ struct misc_cg {
struct misc_res res[MISC_CG_RES_TYPES];
};
-unsigned long misc_cg_res_total_usage(enum misc_res_type type);
-int misc_cg_set_capacity(enum misc_res_type type, unsigned long capacity);
-int misc_cg_try_charge(enum misc_res_type type, struct misc_cg *cg,
- unsigned long amount);
-void misc_cg_uncharge(enum misc_res_type type, struct misc_cg *cg,
- unsigned long amount);
+u64 misc_cg_res_total_usage(enum misc_res_type type);
+int misc_cg_set_capacity(enum misc_res_type type, u64 capacity);
+int misc_cg_try_charge(enum misc_res_type type, struct misc_cg *cg, u64 amount);
+void misc_cg_uncharge(enum misc_res_type type, struct misc_cg *cg, u64 amount);
/**
* css_misc() - Get misc cgroup from the css.
@@ -99,27 +98,26 @@ static inline void put_misc_cg(struct misc_cg *cg)
#else /* !CONFIG_CGROUP_MISC */
-static inline unsigned long misc_cg_res_total_usage(enum misc_res_type type)
+static inline u64 misc_cg_res_total_usage(enum misc_res_type type)
{
return 0;
}
-static inline int misc_cg_set_capacity(enum misc_res_type type,
- unsigned long capacity)
+static inline int misc_cg_set_capacity(enum misc_res_type type, u64 capacity)
{
return 0;
}
static inline int misc_cg_try_charge(enum misc_res_type type,
struct misc_cg *cg,
- unsigned long amount)
+ u64 amount)
{
return 0;
}
static inline void misc_cg_uncharge(enum misc_res_type type,
struct misc_cg *cg,
- unsigned long amount)
+ u64 amount)
{
}
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 7c58c44662b8..914a9f974baa 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -379,7 +379,7 @@ struct mtd_info {
struct module *owner;
struct device dev;
- int usecount;
+ struct kref refcnt;
struct mtd_debug_info dbg;
struct nvmem_device *nvmem;
struct nvmem_device *otp_user_nvmem;
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index 5159d692f9ce..90a141ba2a5a 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -1540,6 +1540,7 @@ int nand_reset_op(struct nand_chip *chip);
int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
unsigned int len);
int nand_status_op(struct nand_chip *chip, u8 *status);
+int nand_exit_status_op(struct nand_chip *chip);
int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock);
int nand_read_page_op(struct nand_chip *chip, unsigned int page,
unsigned int offset_in_page, void *buf, unsigned int len);
diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h
index fa030d93b768..4523e4e83319 100644
--- a/include/linux/nvmem-consumer.h
+++ b/include/linux/nvmem-consumer.h
@@ -43,6 +43,8 @@ enum {
NVMEM_REMOVE,
NVMEM_CELL_ADD,
NVMEM_CELL_REMOVE,
+ NVMEM_LAYOUT_ADD,
+ NVMEM_LAYOUT_REMOVE,
};
#if IS_ENABLED(CONFIG_NVMEM)
@@ -256,7 +258,7 @@ static inline struct nvmem_device *of_nvmem_device_get(struct device_node *np,
static inline struct device_node *
of_nvmem_layout_get_container(struct nvmem_device *nvmem)
{
- return ERR_PTR(-EOPNOTSUPP);
+ return NULL;
}
#endif /* CONFIG_NVMEM && CONFIG_OF */
diff --git a/include/linux/of.h b/include/linux/of.h
index ed679819c279..6a9ddf20e79a 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -1676,8 +1676,8 @@ int of_overlay_notifier_unregister(struct notifier_block *nb);
#else
-static inline int of_overlay_fdt_apply(void *overlay_fdt, u32 overlay_fdt_size,
- int *ovcs_id)
+static inline int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size,
+ int *ovcs_id, struct device_node *target_base)
{
return -ENOTSUPP;
}
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index b9767f36d5b5..5fb3d4c393a9 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1762,6 +1762,10 @@
#define PCI_SUBDEVICE_ID_AT_2700FX 0x2701
#define PCI_SUBDEVICE_ID_AT_2701FX 0x2703
+#define PCI_VENDOR_ID_ASIX 0x125b
+#define PCI_DEVICE_ID_ASIX_AX99100 0x9100
+#define PCI_DEVICE_ID_ASIX_AX99100_LB 0x9110
+
#define PCI_VENDOR_ID_ESS 0x125d
#define PCI_DEVICE_ID_ESS_ESS1968 0x1968
#define PCI_DEVICE_ID_ESS_ESS1978 0x1978
diff --git a/include/linux/peci.h b/include/linux/peci.h
index 06e6ef935297..9b3d36aff431 100644
--- a/include/linux/peci.h
+++ b/include/linux/peci.h
@@ -42,13 +42,13 @@ struct peci_controller_ops {
*/
struct peci_controller {
struct device dev;
- struct peci_controller_ops *ops;
+ const struct peci_controller_ops *ops;
struct mutex bus_lock; /* held for the duration of xfer */
u8 id;
};
struct peci_controller *devm_peci_controller_add(struct device *parent,
- struct peci_controller_ops *ops);
+ const struct peci_controller_ops *ops);
static inline struct peci_controller *to_peci_controller(void *d)
{
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index 75b73c83bc9d..d01351b1526f 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -30,17 +30,28 @@ struct percpu_counter {
extern int percpu_counter_batch;
-int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
- struct lock_class_key *key);
+int __percpu_counter_init_many(struct percpu_counter *fbc, s64 amount,
+ gfp_t gfp, u32 nr_counters,
+ struct lock_class_key *key);
-#define percpu_counter_init(fbc, value, gfp) \
+#define percpu_counter_init_many(fbc, value, gfp, nr_counters) \
({ \
static struct lock_class_key __key; \
\
- __percpu_counter_init(fbc, value, gfp, &__key); \
+ __percpu_counter_init_many(fbc, value, gfp, nr_counters,\
+ &__key); \
})
-void percpu_counter_destroy(struct percpu_counter *fbc);
+
+#define percpu_counter_init(fbc, value, gfp) \
+ percpu_counter_init_many(fbc, value, gfp, 1)
+
+void percpu_counter_destroy_many(struct percpu_counter *fbc, u32 nr_counters);
+static inline void percpu_counter_destroy(struct percpu_counter *fbc)
+{
+ percpu_counter_destroy_many(fbc, 1);
+}
+
void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
s32 batch);
@@ -116,11 +127,27 @@ struct percpu_counter {
s64 count;
};
+static inline int percpu_counter_init_many(struct percpu_counter *fbc,
+ s64 amount, gfp_t gfp,
+ u32 nr_counters)
+{
+ u32 i;
+
+ for (i = 0; i < nr_counters; i++)
+ fbc[i].count = amount;
+
+ return 0;
+}
+
static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
gfp_t gfp)
{
- fbc->count = amount;
- return 0;
+ return percpu_counter_init_many(fbc, amount, gfp, 1);
+}
+
+static inline void percpu_counter_destroy_many(struct percpu_counter *fbc,
+ u32 nr_counters)
+{
}
static inline void percpu_counter_destroy(struct percpu_counter *fbc)
diff --git a/include/linux/perf/riscv_pmu.h b/include/linux/perf/riscv_pmu.h
index 43fc892aa7d9..43282e22ebe1 100644
--- a/include/linux/perf/riscv_pmu.h
+++ b/include/linux/perf/riscv_pmu.h
@@ -6,8 +6,8 @@
*
*/
-#ifndef _ASM_RISCV_PERF_EVENT_H
-#define _ASM_RISCV_PERF_EVENT_H
+#ifndef _RISCV_PMU_H
+#define _RISCV_PMU_H
#include <linux/perf_event.h>
#include <linux/ptrace.h>
@@ -21,7 +21,7 @@
#define RISCV_MAX_COUNTERS 64
#define RISCV_OP_UNSUPP (-EOPNOTSUPP)
-#define RISCV_PMU_PDEV_NAME "riscv-pmu"
+#define RISCV_PMU_SBI_PDEV_NAME "riscv-pmu-sbi"
#define RISCV_PMU_LEGACY_PDEV_NAME "riscv-pmu-legacy"
#define RISCV_PMU_STOP_FLAG_RESET 1
@@ -55,6 +55,10 @@ struct riscv_pmu {
void (*ctr_start)(struct perf_event *event, u64 init_val);
void (*ctr_stop)(struct perf_event *event, unsigned long flag);
int (*event_map)(struct perf_event *event, u64 *config);
+ void (*event_init)(struct perf_event *event);
+ void (*event_mapped)(struct perf_event *event, struct mm_struct *mm);
+ void (*event_unmapped)(struct perf_event *event, struct mm_struct *mm);
+ uint8_t (*csr_index)(struct perf_event *event);
struct cpu_hw_events __percpu *hw_events;
struct hlist_node node;
@@ -81,4 +85,4 @@ int riscv_pmu_get_hpm_info(u32 *hw_ctr_width, u32 *num_hw_ctr);
#endif /* CONFIG_RISCV_PMU */
-#endif /* _ASM_RISCV_PERF_EVENT_H */
+#endif /* _RISCV_PMU_H */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 05253af70ce9..e85cd1c0eaf3 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -444,7 +444,8 @@ struct pmu {
/*
* Will return the value for perf_event_mmap_page::index for this event,
- * if no implementation is provided it will default to: event->hw.idx + 1.
+ * if no implementation is provided it will default to 0 (see
+ * perf_event_idx_default).
*/
int (*event_idx) (struct perf_event *event); /*optional */
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index 7d07f8736431..2b886ea654bb 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -600,7 +600,7 @@ void pcs_get_state(struct phylink_pcs *pcs,
*
* The %neg_mode argument should be tested via the phylink_mode_*() family of
* functions, or for PCS that set pcs->neg_mode true, should be tested
- * against the %PHYLINK_PCS_NEG_* definitions.
+ * against the PHYLINK_PCS_NEG_* definitions.
*/
int pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface, const unsigned long *advertising,
@@ -630,7 +630,7 @@ void pcs_an_restart(struct phylink_pcs *pcs);
*
* The %mode argument should be tested via the phylink_mode_*() family of
* functions, or for PCS that set pcs->neg_mode true, should be tested
- * against the %PHYLINK_PCS_NEG_* definitions.
+ * against the PHYLINK_PCS_NEG_* definitions.
*/
void pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface, int speed, int duplex);
diff --git a/include/linux/platform_data/rtc-ds2404.h b/include/linux/platform_data/rtc-ds2404.h
deleted file mode 100644
index 22c53825528f..000000000000
--- a/include/linux/platform_data/rtc-ds2404.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * ds2404.h - platform data structure for the DS2404 RTC.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 Sven Schnelle <svens@stackframe.org>
- */
-
-#ifndef __LINUX_DS2404_H
-#define __LINUX_DS2404_H
-
-struct ds2404_platform_data {
-
- unsigned int gpio_rst;
- unsigned int gpio_clk;
- unsigned int gpio_dq;
-};
-#endif
diff --git a/include/linux/platform_data/video-mx3fb.h b/include/linux/platform_data/video-mx3fb.h
deleted file mode 100644
index d03dc322a616..000000000000
--- a/include/linux/platform_data/video-mx3fb.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2008
- * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
- */
-
-#ifndef __ASM_ARCH_MX3FB_H__
-#define __ASM_ARCH_MX3FB_H__
-
-#include <linux/device.h>
-#include <linux/fb.h>
-
-/* Proprietary FB_SYNC_ flags */
-#define FB_SYNC_OE_ACT_HIGH 0x80000000
-#define FB_SYNC_CLK_INVERT 0x40000000
-#define FB_SYNC_DATA_INVERT 0x20000000
-#define FB_SYNC_CLK_IDLE_EN 0x10000000
-#define FB_SYNC_SHARP_MODE 0x08000000
-#define FB_SYNC_SWAP_RGB 0x04000000
-#define FB_SYNC_CLK_SEL_EN 0x02000000
-
-/*
- * Specify the way your display is connected. The IPU can arbitrarily
- * map the internal colors to the external data lines. We only support
- * the following mappings at the moment.
- */
-enum disp_data_mapping {
- /* blue -> d[0..5], green -> d[6..11], red -> d[12..17] */
- IPU_DISP_DATA_MAPPING_RGB666,
- /* blue -> d[0..4], green -> d[5..10], red -> d[11..15] */
- IPU_DISP_DATA_MAPPING_RGB565,
- /* blue -> d[0..7], green -> d[8..15], red -> d[16..23] */
- IPU_DISP_DATA_MAPPING_RGB888,
-};
-
-/**
- * struct mx3fb_platform_data - mx3fb platform data
- *
- * @dma_dev: pointer to the dma-device, used for dma-slave connection
- * @mode: pointer to a platform-provided per mxc_register_fb() videomode
- */
-struct mx3fb_platform_data {
- struct device *dma_dev;
- const char *name;
- const struct fb_videomode *mode;
- int num_modes;
- enum disp_data_mapping disp_data_fmt;
-};
-
-#endif
diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h
index 28234dc9fa6a..16e99a1c37fc 100644
--- a/include/linux/platform_data/x86/asus-wmi.h
+++ b/include/linux/platform_data/x86/asus-wmi.h
@@ -66,6 +66,7 @@
#define ASUS_WMI_DEVID_CAMERA 0x00060013
#define ASUS_WMI_DEVID_LID_FLIP 0x00060062
#define ASUS_WMI_DEVID_LID_FLIP_ROG 0x00060077
+#define ASUS_WMI_DEVID_MINI_LED_MODE 0x0005001E
/* Storage */
#define ASUS_WMI_DEVID_CARDREADER 0x00080013
@@ -80,8 +81,19 @@
#define ASUS_WMI_DEVID_FAN_CTRL 0x00110012 /* deprecated */
#define ASUS_WMI_DEVID_CPU_FAN_CTRL 0x00110013
#define ASUS_WMI_DEVID_GPU_FAN_CTRL 0x00110014
+#define ASUS_WMI_DEVID_MID_FAN_CTRL 0x00110031
#define ASUS_WMI_DEVID_CPU_FAN_CURVE 0x00110024
#define ASUS_WMI_DEVID_GPU_FAN_CURVE 0x00110025
+#define ASUS_WMI_DEVID_MID_FAN_CURVE 0x00110032
+
+/* Tunables for AUS ROG laptops */
+#define ASUS_WMI_DEVID_PPT_PL2_SPPT 0x001200A0
+#define ASUS_WMI_DEVID_PPT_PL1_SPL 0x001200A3
+#define ASUS_WMI_DEVID_PPT_APU_SPPT 0x001200B0
+#define ASUS_WMI_DEVID_PPT_PLAT_SPPT 0x001200B1
+#define ASUS_WMI_DEVID_PPT_FPPT 0x001200C1
+#define ASUS_WMI_DEVID_NV_DYN_BOOST 0x001200C0
+#define ASUS_WMI_DEVID_NV_THERM_TARGET 0x001200C2
/* Power */
#define ASUS_WMI_DEVID_PROCESSOR_STATE 0x00120012
@@ -95,7 +107,12 @@
/* Keyboard dock */
#define ASUS_WMI_DEVID_KBD_DOCK 0x00120063
-/* dgpu on/off */
+/* Charging mode - 1=Barrel, 2=USB */
+#define ASUS_WMI_DEVID_CHARGE_MODE 0x0012006C
+
+/* epu is connected? 1 == true */
+#define ASUS_WMI_DEVID_EGPU_CONNECTED 0x00090018
+/* egpu on/off */
#define ASUS_WMI_DEVID_EGPU 0x00090019
/* dgpu on/off */
diff --git a/include/linux/platform_data/x86/simatic-ipc-base.h b/include/linux/platform_data/x86/simatic-ipc-base.h
index 57d6a10dfc9e..2d7f7120ba6b 100644
--- a/include/linux/platform_data/x86/simatic-ipc-base.h
+++ b/include/linux/platform_data/x86/simatic-ipc-base.h
@@ -2,7 +2,7 @@
/*
* Siemens SIMATIC IPC drivers
*
- * Copyright (c) Siemens AG, 2018-2021
+ * Copyright (c) Siemens AG, 2018-2023
*
* Authors:
* Henning Schild <henning.schild@siemens.com>
@@ -20,6 +20,9 @@
#define SIMATIC_IPC_DEVICE_127E 3
#define SIMATIC_IPC_DEVICE_227E 4
#define SIMATIC_IPC_DEVICE_227G 5
+#define SIMATIC_IPC_DEVICE_BX_21A 6
+#define SIMATIC_IPC_DEVICE_BX_39A 7
+#define SIMATIC_IPC_DEVICE_BX_59A 8
struct simatic_ipc_platform {
u8 devmode;
diff --git a/include/linux/platform_data/x86/simatic-ipc.h b/include/linux/platform_data/x86/simatic-ipc.h
index a48bb5240977..8d8b3b919674 100644
--- a/include/linux/platform_data/x86/simatic-ipc.h
+++ b/include/linux/platform_data/x86/simatic-ipc.h
@@ -2,7 +2,7 @@
/*
* Siemens SIMATIC IPC drivers
*
- * Copyright (c) Siemens AG, 2018-2021
+ * Copyright (c) Siemens AG, 2018-2023
*
* Authors:
* Henning Schild <henning.schild@siemens.com>
@@ -32,8 +32,12 @@ enum simatic_ipc_station_ids {
SIMATIC_IPC_IPC477E = 0x00000A02,
SIMATIC_IPC_IPC127E = 0x00000D01,
SIMATIC_IPC_IPC227G = 0x00000F01,
+ SIMATIC_IPC_IPC277G = 0x00000F02,
SIMATIC_IPC_IPCBX_39A = 0x00001001,
SIMATIC_IPC_IPCPX_39A = 0x00001002,
+ SIMATIC_IPC_IPCBX_21A = 0x00001101,
+ SIMATIC_IPC_IPCBX_56A = 0x00001201,
+ SIMATIC_IPC_IPCBX_59A = 0x00001202,
};
static inline u32 simatic_ipc_get_station_id(u8 *data, int max_len)
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 04ae1d9073a7..d2f9f690a9c1 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -298,7 +298,7 @@ struct pwm_chip {
int base;
unsigned int npwm;
- struct pwm_device * (*of_xlate)(struct pwm_chip *pc,
+ struct pwm_device * (*of_xlate)(struct pwm_chip *chip,
const struct of_phandle_args *args);
unsigned int of_pwm_n_cells;
@@ -395,9 +395,9 @@ struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
unsigned int index,
const char *label);
-struct pwm_device *of_pwm_xlate_with_flags(struct pwm_chip *pc,
+struct pwm_device *of_pwm_xlate_with_flags(struct pwm_chip *chip,
const struct of_phandle_args *args);
-struct pwm_device *of_pwm_single_xlate(struct pwm_chip *pc,
+struct pwm_device *of_pwm_single_xlate(struct pwm_chip *chip,
const struct of_phandle_args *args);
struct pwm_device *pwm_get(struct device *dev, const char *con_id);
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index f29aaaf2eb21..006e18decfad 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -108,6 +108,8 @@ extern const struct raid6_calls raid6_vpermxor1;
extern const struct raid6_calls raid6_vpermxor2;
extern const struct raid6_calls raid6_vpermxor4;
extern const struct raid6_calls raid6_vpermxor8;
+extern const struct raid6_calls raid6_lsx;
+extern const struct raid6_calls raid6_lasx;
struct raid6_recov_calls {
void (*data2)(int, size_t, int, int, void **);
@@ -123,6 +125,8 @@ extern const struct raid6_recov_calls raid6_recov_avx2;
extern const struct raid6_recov_calls raid6_recov_avx512;
extern const struct raid6_recov_calls raid6_recov_s390xc;
extern const struct raid6_recov_calls raid6_recov_neon;
+extern const struct raid6_recov_calls raid6_recov_lsx;
+extern const struct raid6_recov_calls raid6_recov_lasx;
extern const struct raid6_calls raid6_neonx1;
extern const struct raid6_calls raid6_neonx2;
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index fe8978eb69f1..b4795698d8c2 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -690,6 +690,10 @@ int rproc_detach(struct rproc *rproc);
int rproc_set_firmware(struct rproc *rproc, const char *fw_name);
void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type);
void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem);
+
+/* from remoteproc_coredump.c */
+void rproc_coredump_cleanup(struct rproc *rproc);
+void rproc_coredump(struct rproc *rproc);
void rproc_coredump_using_sections(struct rproc *rproc);
int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size);
int rproc_coredump_add_custom_segment(struct rproc *rproc,
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index a3825ce81102..51cc21ebb568 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -479,7 +479,6 @@ struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
#define anon_vma_init() do {} while (0)
#define anon_vma_prepare(vma) (0)
-#define anon_vma_link(vma) do {} while (0)
static inline int folio_referenced(struct folio *folio, int is_locked,
struct mem_cgroup *memcg,
diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h
index 523c98b96cb4..90d8e4475f80 100644
--- a/include/linux/rpmsg.h
+++ b/include/linux/rpmsg.h
@@ -64,12 +64,14 @@ struct rpmsg_device {
};
typedef int (*rpmsg_rx_cb_t)(struct rpmsg_device *, void *, int, void *, u32);
+typedef int (*rpmsg_flowcontrol_cb_t)(struct rpmsg_device *, void *, bool);
/**
* struct rpmsg_endpoint - binds a local rpmsg address to its user
* @rpdev: rpmsg channel device
* @refcount: when this drops to zero, the ept is deallocated
* @cb: rx callback handler
+ * @flow_cb: remote flow control callback handler
* @cb_lock: must be taken before accessing/changing @cb
* @addr: local rpmsg address
* @priv: private data for the driver's use
@@ -92,6 +94,7 @@ struct rpmsg_endpoint {
struct rpmsg_device *rpdev;
struct kref refcount;
rpmsg_rx_cb_t cb;
+ rpmsg_flowcontrol_cb_t flow_cb;
struct mutex cb_lock;
u32 addr;
void *priv;
@@ -106,6 +109,7 @@ struct rpmsg_endpoint {
* @probe: invoked when a matching rpmsg channel (i.e. device) is found
* @remove: invoked when the rpmsg channel is removed
* @callback: invoked when an inbound message is received on the channel
+ * @flowcontrol: invoked when remote side flow control request is received
*/
struct rpmsg_driver {
struct device_driver drv;
@@ -113,6 +117,7 @@ struct rpmsg_driver {
int (*probe)(struct rpmsg_device *dev);
void (*remove)(struct rpmsg_device *dev);
int (*callback)(struct rpmsg_device *, void *, int, void *, u32);
+ int (*flowcontrol)(struct rpmsg_device *, void *, bool);
};
static inline u16 rpmsg16_to_cpu(struct rpmsg_device *rpdev, __rpmsg16 val)
@@ -192,6 +197,8 @@ __poll_t rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp,
ssize_t rpmsg_get_mtu(struct rpmsg_endpoint *ept);
+int rpmsg_set_flow_control(struct rpmsg_endpoint *ept, bool pause, u32 dst);
+
#else
static inline int rpmsg_register_device_override(struct rpmsg_device *rpdev,
@@ -316,6 +323,14 @@ static inline ssize_t rpmsg_get_mtu(struct rpmsg_endpoint *ept)
return -ENXIO;
}
+static inline int rpmsg_set_flow_control(struct rpmsg_endpoint *ept, bool pause, u32 dst)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+
+ return -ENXIO;
+}
+
#endif /* IS_ENABLED(CONFIG_RPMSG) */
/* use a macro to avoid include chaining to get THIS_MODULE */
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 1fd9c6a21ebe..4c0bcbeb1f00 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -146,6 +146,7 @@ struct rtc_device {
time64_t range_min;
timeu64_t range_max;
+ timeu64_t alarm_offset_max;
time64_t start_secs;
time64_t offset_secs;
bool set_start_time;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 177b3f3676ef..77f01ac385f7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1671,7 +1671,7 @@ static inline unsigned int __task_state_index(unsigned int tsk_state,
BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
- if (tsk_state == TASK_IDLE)
+ if ((tsk_state & TASK_IDLE) == TASK_IDLE)
state = TASK_REPORT_IDLE;
/*
@@ -1679,7 +1679,7 @@ static inline unsigned int __task_state_index(unsigned int tsk_state,
* to userspace, we can make this appear as if the task has gone through
* a regular rt_mutex_lock() call.
*/
- if (tsk_state == TASK_RTLOCK_WAIT)
+ if (tsk_state & TASK_RTLOCK_WAIT)
state = TASK_UNINTERRUPTIBLE;
return fls(state);
@@ -1858,7 +1858,17 @@ extern int task_can_attach(struct task_struct *p);
extern int dl_bw_alloc(int cpu, u64 dl_bw);
extern void dl_bw_free(int cpu, u64 dl_bw);
#ifdef CONFIG_SMP
+
+/* do_set_cpus_allowed() - consider using set_cpus_allowed_ptr() instead */
extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
+
+/**
+ * set_cpus_allowed_ptr - set CPU affinity mask of a task
+ * @p: the task
+ * @new_mask: CPU affinity mask
+ *
+ * Return: zero if successful, or a negative error code
+ */
extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
extern int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node);
extern void release_user_cpus_ptr(struct task_struct *p);
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index a156d2ed8d9e..bb6f073bc159 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -570,7 +570,7 @@ struct uart_port {
struct serial_port_device *port_dev; /* serial core port device */
unsigned long sysrq; /* sysrq timeout */
- unsigned int sysrq_ch; /* char for sysrq */
+ u8 sysrq_ch; /* char for sysrq */
unsigned char has_sysrq;
unsigned char sysrq_seq; /* index in sysrq_toggle_seq */
@@ -904,16 +904,16 @@ void uart_handle_dcd_change(struct uart_port *uport, bool active);
void uart_handle_cts_change(struct uart_port *uport, bool active);
void uart_insert_char(struct uart_port *port, unsigned int status,
- unsigned int overrun, unsigned int ch, unsigned int flag);
+ unsigned int overrun, u8 ch, u8 flag);
void uart_xchar_out(struct uart_port *uport, int offset);
#ifdef CONFIG_MAGIC_SYSRQ_SERIAL
#define SYSRQ_TIMEOUT (HZ * 5)
-bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch);
+bool uart_try_toggle_sysrq(struct uart_port *port, u8 ch);
-static inline int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
+static inline int uart_handle_sysrq_char(struct uart_port *port, u8 ch)
{
if (!port->sysrq)
return 0;
@@ -932,7 +932,7 @@ static inline int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch
return 0;
}
-static inline int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch)
+static inline int uart_prepare_sysrq_char(struct uart_port *port, u8 ch)
{
if (!port->sysrq)
return 0;
@@ -953,7 +953,7 @@ static inline int uart_prepare_sysrq_char(struct uart_port *port, unsigned int c
static inline void uart_unlock_and_check_sysrq(struct uart_port *port)
{
- int sysrq_ch;
+ u8 sysrq_ch;
if (!port->has_sysrq) {
spin_unlock(&port->lock);
@@ -972,7 +972,7 @@ static inline void uart_unlock_and_check_sysrq(struct uart_port *port)
static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port,
unsigned long flags)
{
- int sysrq_ch;
+ u8 sysrq_ch;
if (!port->has_sysrq) {
spin_unlock_irqrestore(&port->lock, flags);
@@ -988,11 +988,11 @@ static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port
handle_sysrq(sysrq_ch);
}
#else /* CONFIG_MAGIC_SYSRQ_SERIAL */
-static inline int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
+static inline int uart_handle_sysrq_char(struct uart_port *port, u8 ch)
{
return 0;
}
-static inline int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch)
+static inline int uart_prepare_sysrq_char(struct uart_port *port, u8 ch)
{
return 0;
}
diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
index a388160f4be0..4f3d14bb1538 100644
--- a/include/linux/soundwire/sdw.h
+++ b/include/linux/soundwire/sdw.h
@@ -858,6 +858,8 @@ struct sdw_defer {
* @post_bank_switch: Callback for post bank switch
* @read_ping_status: Read status from PING frames, reported with two bits per Device.
* Bits 31:24 are reserved.
+ * @get_device_num: Callback for vendor-specific device_number allocation
+ * @put_device_num: Callback for vendor-specific device_number release
* @new_peripheral_assigned: Callback to handle enumeration of new peripheral.
*/
struct sdw_master_ops {
@@ -873,7 +875,11 @@ struct sdw_master_ops {
int (*pre_bank_switch)(struct sdw_bus *bus);
int (*post_bank_switch)(struct sdw_bus *bus);
u32 (*read_ping_status)(struct sdw_bus *bus);
- void (*new_peripheral_assigned)(struct sdw_bus *bus, int dev_num);
+ int (*get_device_num)(struct sdw_bus *bus, struct sdw_slave *slave);
+ void (*put_device_num)(struct sdw_bus *bus, struct sdw_slave *slave);
+ void (*new_peripheral_assigned)(struct sdw_bus *bus,
+ struct sdw_slave *slave,
+ int dev_num);
};
/**
@@ -908,9 +914,6 @@ struct sdw_master_ops {
* meaningful if multi_link is set. If set to 1, hardware-based
* synchronization will be used even if a stream only uses a single
* SoundWire segment.
- * @dev_num_ida_min: if set, defines the minimum values for the IDA
- * used to allocate system-unique device numbers. This value needs to be
- * identical across all SoundWire bus in the system.
*/
struct sdw_bus {
struct device *dev;
@@ -939,7 +942,6 @@ struct sdw_bus {
u32 bank_switch_timeout;
bool multi_link;
int hw_sync_min_links;
- int dev_num_ida_min;
};
int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h
index fa67fad4ef51..00bb22d96ae5 100644
--- a/include/linux/soundwire/sdw_intel.h
+++ b/include/linux/soundwire/sdw_intel.h
@@ -428,4 +428,11 @@ struct sdw_intel_hw_ops {
extern const struct sdw_intel_hw_ops sdw_intel_cnl_hw_ops;
extern const struct sdw_intel_hw_ops sdw_intel_lnl_hw_ops;
+/*
+ * IDA min selected to allow for 5 unconstrained devices per link,
+ * and 6 system-unique Device Numbers for wake-capable devices.
+ */
+
+#define SDW_INTEL_DEV_NUM_IDA_MIN 6
+
#endif
diff --git a/include/linux/string_choices.h b/include/linux/string_choices.h
index 48120222b9b2..3c1091941eb8 100644
--- a/include/linux/string_choices.h
+++ b/include/linux/string_choices.h
@@ -30,6 +30,7 @@ static inline const char *str_read_write(bool v)
{
return v ? "read" : "write";
}
+#define str_write_read(v) str_read_write(!(v))
static inline const char *str_on_off(bool v)
{
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index 518bd28f5ab8..35766963dd14 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -56,10 +56,14 @@ struct cache_head {
struct kref ref;
unsigned long flags;
};
-#define CACHE_VALID 0 /* Entry contains valid data */
-#define CACHE_NEGATIVE 1 /* Negative entry - there is no match for the key */
-#define CACHE_PENDING 2 /* An upcall has been sent but no reply received yet*/
-#define CACHE_CLEANED 3 /* Entry has been cleaned from cache */
+
+/* cache_head.flags */
+enum {
+ CACHE_VALID, /* Entry contains valid data */
+ CACHE_NEGATIVE, /* Negative entry - there is no match for the key */
+ CACHE_PENDING, /* An upcall has been sent but no reply received yet*/
+ CACHE_CLEANED, /* Entry has been cleaned from cache */
+};
#define CACHE_NEW_EXPIRY 120 /* keep new things pending confirmation for 120 seconds */
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 4f41d839face..af7358277f1c 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -148,6 +148,8 @@ struct rpc_create_args {
const struct cred *cred;
unsigned int max_connect;
struct xprtsec_parms xprtsec;
+ unsigned long connect_timeout;
+ unsigned long reconnect_timeout;
};
struct rpc_add_xprt_test {
diff --git a/include/linux/sunrpc/stats.h b/include/linux/sunrpc/stats.h
index d94d4f410507..3ce1550d1beb 100644
--- a/include/linux/sunrpc/stats.h
+++ b/include/linux/sunrpc/stats.h
@@ -43,22 +43,6 @@ struct net;
#ifdef CONFIG_PROC_FS
int rpc_proc_init(struct net *);
void rpc_proc_exit(struct net *);
-#else
-static inline int rpc_proc_init(struct net *net)
-{
- return 0;
-}
-
-static inline void rpc_proc_exit(struct net *net)
-{
-}
-#endif
-
-#ifdef MODULE
-void rpc_modcount(struct inode *, int);
-#endif
-
-#ifdef CONFIG_PROC_FS
struct proc_dir_entry * rpc_proc_register(struct net *,struct rpc_stat *);
void rpc_proc_unregister(struct net *,const char *);
void rpc_proc_zero(const struct rpc_program *);
@@ -69,7 +53,14 @@ void svc_proc_unregister(struct net *, const char *);
void svc_seq_show(struct seq_file *,
const struct svc_stat *);
#else
+static inline int rpc_proc_init(struct net *net)
+{
+ return 0;
+}
+static inline void rpc_proc_exit(struct net *net)
+{
+}
static inline struct proc_dir_entry *rpc_proc_register(struct net *net, struct rpc_stat *s) { return NULL; }
static inline void rpc_proc_unregister(struct net *net, const char *p) {}
static inline void rpc_proc_zero(const struct rpc_program *p) {}
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index f8751118c122..dbf5b21feafe 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -39,16 +39,20 @@ struct svc_pool {
struct list_head sp_all_threads; /* all server threads */
/* statistics on pool operation */
+ struct percpu_counter sp_messages_arrived;
struct percpu_counter sp_sockets_queued;
struct percpu_counter sp_threads_woken;
- struct percpu_counter sp_threads_timedout;
-#define SP_TASK_PENDING (0) /* still work to do even if no
- * xprt is queued. */
-#define SP_CONGESTED (1)
unsigned long sp_flags;
} ____cacheline_aligned_in_smp;
+/* bits for sp_flags */
+enum {
+ SP_TASK_PENDING, /* still work to do even if no xprt is queued */
+ SP_CONGESTED, /* all threads are busy, none idle */
+};
+
+
/*
* RPC service.
*
@@ -120,19 +124,6 @@ static inline void svc_put(struct svc_serv *serv)
kref_put(&serv->sv_refcnt, svc_destroy);
}
-/**
- * svc_put_not_last - decrement non-final reference count on SUNRPC serv
- * @serv: the svc_serv to have count decremented
- *
- * Returns: %true is refcount was decremented.
- *
- * If the refcount is 1, it is not decremented and instead failure is reported.
- */
-static inline bool svc_put_not_last(struct svc_serv *serv)
-{
- return refcount_dec_not_one(&serv->sv_refcnt.refcount);
-}
-
/*
* Maximum payload size supported by a kernel RPC server.
* This is use to determine the max number of pages nfsd is
@@ -232,16 +223,6 @@ struct svc_rqst {
u32 rq_proc; /* procedure number */
u32 rq_prot; /* IP protocol */
int rq_cachetype; /* catering to nfsd */
-#define RQ_SECURE (0) /* secure port */
-#define RQ_LOCAL (1) /* local request */
-#define RQ_USEDEFERRAL (2) /* use deferral */
-#define RQ_DROPME (3) /* drop current reply */
-#define RQ_SPLICE_OK (4) /* turned off in gss privacy
- * to prevent encrypting page
- * cache pages */
-#define RQ_VICTIM (5) /* about to be shut down */
-#define RQ_BUSY (6) /* request is busy */
-#define RQ_DATA (7) /* request has data */
unsigned long rq_flags; /* flags field */
ktime_t rq_qtime; /* enqueue time */
@@ -265,7 +246,6 @@ struct svc_rqst {
/* Catering to nfsd */
struct auth_domain * rq_client; /* RPC peer info */
struct auth_domain * rq_gssclient; /* "gss/"-style peer info */
- struct svc_cacherep * rq_cacherep; /* cache info */
struct task_struct *rq_task; /* service thread */
struct net *rq_bc_net; /* pointer to backchannel's
* net namespace
@@ -273,6 +253,19 @@ struct svc_rqst {
void ** rq_lease_breaker; /* The v4 client breaking a lease */
};
+/* bits for rq_flags */
+enum {
+ RQ_SECURE, /* secure port */
+ RQ_LOCAL, /* local request */
+ RQ_USEDEFERRAL, /* use deferral */
+ RQ_DROPME, /* drop current reply */
+ RQ_SPLICE_OK, /* turned off in gss privacy to prevent
+ * encrypting page cache pages */
+ RQ_VICTIM, /* about to be shut down */
+ RQ_BUSY, /* request is busy */
+ RQ_DATA, /* request has data */
+};
+
#define SVC_NET(rqst) (rqst->rq_xprt ? rqst->rq_xprt->xpt_net : rqst->rq_bc_net)
/*
@@ -344,7 +337,7 @@ struct svc_program {
char * pg_name; /* service name */
char * pg_class; /* class name: services sharing authentication */
struct svc_stat * pg_stats; /* rpc statistics */
- int (*pg_authenticate)(struct svc_rqst *);
+ enum svc_auth_status (*pg_authenticate)(struct svc_rqst *rqstp);
__be32 (*pg_init_request)(struct svc_rqst *,
const struct svc_program *,
struct svc_process_info *);
@@ -427,6 +420,7 @@ int svc_register(const struct svc_serv *, struct net *, const int,
void svc_wake_up(struct svc_serv *);
void svc_reserve(struct svc_rqst *rqstp, int space);
+void svc_pool_wake_idle_thread(struct svc_pool *pool);
struct svc_pool *svc_pool_for_cpu(struct svc_serv *serv);
char * svc_print_addr(struct svc_rqst *, char *, size_t);
const char * svc_proc_name(const struct svc_rqst *rqstp);
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index a6b12631db21..fa55d12dc765 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -56,23 +56,6 @@ struct svc_xprt {
struct list_head xpt_list;
struct list_head xpt_ready;
unsigned long xpt_flags;
-#define XPT_BUSY 0 /* enqueued/receiving */
-#define XPT_CONN 1 /* conn pending */
-#define XPT_CLOSE 2 /* dead or dying */
-#define XPT_DATA 3 /* data pending */
-#define XPT_TEMP 4 /* connected transport */
-#define XPT_DEAD 6 /* transport closed */
-#define XPT_CHNGBUF 7 /* need to change snd/rcv buf sizes */
-#define XPT_DEFERRED 8 /* deferred request pending */
-#define XPT_OLD 9 /* used for xprt aging mark+sweep */
-#define XPT_LISTENER 10 /* listening endpoint */
-#define XPT_CACHE_AUTH 11 /* cache auth info */
-#define XPT_LOCAL 12 /* connection from loopback interface */
-#define XPT_KILL_TEMP 13 /* call xpo_kill_temp_xprt before closing */
-#define XPT_CONG_CTRL 14 /* has congestion control */
-#define XPT_HANDSHAKE 15 /* xprt requests a handshake */
-#define XPT_TLS_SESSION 16 /* transport-layer security established */
-#define XPT_PEER_AUTH 17 /* peer has been authenticated */
struct svc_serv *xpt_server; /* service for transport */
atomic_t xpt_reserved; /* space on outq that is rsvd */
@@ -97,6 +80,27 @@ struct svc_xprt {
struct rpc_xprt_switch *xpt_bc_xps; /* NFSv4.1 backchannel */
};
+/* flag bits for xpt_flags */
+enum {
+ XPT_BUSY, /* enqueued/receiving */
+ XPT_CONN, /* conn pending */
+ XPT_CLOSE, /* dead or dying */
+ XPT_DATA, /* data pending */
+ XPT_TEMP, /* connected transport */
+ XPT_DEAD, /* transport closed */
+ XPT_CHNGBUF, /* need to change snd/rcv buf sizes */
+ XPT_DEFERRED, /* deferred request pending */
+ XPT_OLD, /* used for xprt aging mark+sweep */
+ XPT_LISTENER, /* listening endpoint */
+ XPT_CACHE_AUTH, /* cache auth info */
+ XPT_LOCAL, /* connection from loopback interface */
+ XPT_KILL_TEMP, /* call xpo_kill_temp_xprt before closing */
+ XPT_CONG_CTRL, /* has congestion control */
+ XPT_HANDSHAKE, /* xprt requests a handshake */
+ XPT_TLS_SESSION, /* transport-layer security established */
+ XPT_PEER_AUTH, /* peer has been authenticated */
+};
+
static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u)
{
spin_lock(&xpt->xpt_lock);
diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
index 6d9cc9080aca..6f90203edbf8 100644
--- a/include/linux/sunrpc/svcauth.h
+++ b/include/linux/sunrpc/svcauth.h
@@ -83,6 +83,19 @@ struct auth_domain {
struct rcu_head rcu_head;
};
+enum svc_auth_status {
+ SVC_GARBAGE = 1,
+ SVC_SYSERR,
+ SVC_VALID,
+ SVC_NEGATIVE,
+ SVC_OK,
+ SVC_DROP,
+ SVC_CLOSE,
+ SVC_DENIED,
+ SVC_PENDING,
+ SVC_COMPLETE,
+};
+
/*
* Each authentication flavour registers an auth_ops
* structure.
@@ -98,6 +111,8 @@ struct auth_domain {
* is (probably) already in place. Certainly space is
* reserved for it.
* DROP - simply drop the request. It may have been deferred
+ * CLOSE - like SVC_DROP, but request is definitely lost.
+ * If there is a tcp connection, it should be closed.
* GARBAGE - rpc garbage_args error
* SYSERR - rpc system_err error
* DENIED - authp holds reason for denial.
@@ -111,14 +126,10 @@ struct auth_domain {
*
* release() is given a request after the procedure has been run.
* It should sign/encrypt the results if needed
- * It should return:
- * OK - the resbuf is ready to be sent
- * DROP - the reply should be quitely dropped
- * DENIED - authp holds a reason for MSG_DENIED
- * SYSERR - rpc system_err
*
* domain_release()
* This call releases a domain.
+ *
* set_client()
* Givens a pending request (struct svc_rqst), finds and assigns
* an appropriate 'auth_domain' as the client.
@@ -127,44 +138,28 @@ struct auth_ops {
char * name;
struct module *owner;
int flavour;
- int (*accept)(struct svc_rqst *rq);
- int (*release)(struct svc_rqst *rq);
- void (*domain_release)(struct auth_domain *);
- int (*set_client)(struct svc_rqst *rq);
-};
-#define SVC_GARBAGE 1
-#define SVC_SYSERR 2
-#define SVC_VALID 3
-#define SVC_NEGATIVE 4
-#define SVC_OK 5
-#define SVC_DROP 6
-#define SVC_CLOSE 7 /* Like SVC_DROP, but request is definitely
- * lost so if there is a tcp connection, it
- * should be closed
- */
-#define SVC_DENIED 8
-#define SVC_PENDING 9
-#define SVC_COMPLETE 10
+ enum svc_auth_status (*accept)(struct svc_rqst *rqstp);
+ int (*release)(struct svc_rqst *rqstp);
+ void (*domain_release)(struct auth_domain *dom);
+ enum svc_auth_status (*set_client)(struct svc_rqst *rqstp);
+};
struct svc_xprt;
-extern int svc_authenticate(struct svc_rqst *rqstp);
+extern enum svc_auth_status svc_authenticate(struct svc_rqst *rqstp);
extern int svc_authorise(struct svc_rqst *rqstp);
-extern int svc_set_client(struct svc_rqst *rqstp);
+extern enum svc_auth_status svc_set_client(struct svc_rqst *rqstp);
extern int svc_auth_register(rpc_authflavor_t flavor, struct auth_ops *aops);
extern void svc_auth_unregister(rpc_authflavor_t flavor);
extern struct auth_domain *unix_domain_find(char *name);
extern void auth_domain_put(struct auth_domain *item);
-extern int auth_unix_add_addr(struct net *net, struct in6_addr *addr, struct auth_domain *dom);
extern struct auth_domain *auth_domain_lookup(char *name, struct auth_domain *new);
extern struct auth_domain *auth_domain_find(char *name);
-extern struct auth_domain *auth_unix_lookup(struct net *net, struct in6_addr *addr);
-extern int auth_unix_forget_old(struct auth_domain *dom);
extern void svcauth_unix_purge(struct net *net);
extern void svcauth_unix_info_release(struct svc_xprt *xpt);
-extern int svcauth_unix_set_client(struct svc_rqst *rqstp);
+extern enum svc_auth_status svcauth_unix_set_client(struct svc_rqst *rqstp);
extern int unix_gid_cache_create(struct net *net);
extern void unix_gid_cache_destroy(struct net *net);
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
index a7116048a4d4..7c78ec6356b9 100644
--- a/include/linux/sunrpc/svcsock.h
+++ b/include/linux/sunrpc/svcsock.h
@@ -35,8 +35,8 @@ struct svc_sock {
/* Total length of the data (not including fragment headers)
* received so far in the fragments making up this rpc: */
u32 sk_datalen;
- /* Number of queued send requests */
- atomic_t sk_sendqlen;
+
+ struct page_frag_cache sk_frag_cache;
struct completion sk_handshake_done;
@@ -56,8 +56,7 @@ static inline u32 svc_sock_final_rec(struct svc_sock *svsk)
/*
* Function prototypes.
*/
-void svc_close_net(struct svc_serv *, struct net *);
-int svc_recv(struct svc_rqst *, long);
+void svc_recv(struct svc_rqst *rqstp);
void svc_send(struct svc_rqst *rqstp);
void svc_drop(struct svc_rqst *);
void svc_sock_update_bufs(struct svc_serv *serv);
@@ -66,8 +65,6 @@ int svc_addsock(struct svc_serv *serv, struct net *net,
const struct cred *cred);
void svc_init_xprt_sock(void);
void svc_cleanup_xprt_sock(void);
-struct svc_xprt *svc_sock_create(struct svc_serv *serv, int prot);
-void svc_sock_destroy(struct svc_xprt *);
/*
* svc_makesock socket characteristics
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index f89ec4b5ea16..5b4fb3c791bc 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -139,6 +139,8 @@ void xdr_terminate_string(const struct xdr_buf *, const u32);
size_t xdr_buf_pagecount(const struct xdr_buf *buf);
int xdr_alloc_bvec(struct xdr_buf *buf, gfp_t gfp);
void xdr_free_bvec(struct xdr_buf *buf);
+unsigned int xdr_buf_to_bvec(struct bio_vec *bvec, unsigned int bvec_size,
+ const struct xdr_buf *xdr);
static inline __be32 *xdr_encode_array(__be32 *p, const void *s, unsigned int len)
{
@@ -224,6 +226,7 @@ struct xdr_stream {
struct kvec *iov; /* pointer to the current kvec */
struct kvec scratch; /* Scratch buffer */
struct page **page_ptr; /* pointer to the current page */
+ void *page_kaddr; /* kmapped address of the current page */
unsigned int nwords; /* Remaining decode buffer length */
struct rpc_rqst *rqst; /* For debugging */
@@ -255,6 +258,7 @@ extern void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf,
__be32 *p, struct rpc_rqst *rqst);
extern void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
struct page **pages, unsigned int len);
+extern void xdr_finish_decode(struct xdr_stream *xdr);
extern __be32 *xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes);
extern unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len);
extern void xdr_enter_page(struct xdr_stream *xdr, unsigned int len);
@@ -775,9 +779,7 @@ xdr_stream_decode_uint32_array(struct xdr_stream *xdr,
if (unlikely(xdr_stream_decode_u32(xdr, &len) < 0))
return -EBADMSG;
- if (len > SIZE_MAX / sizeof(*p))
- return -EBADMSG;
- p = xdr_inline_decode(xdr, len * sizeof(*p));
+ p = xdr_inline_decode(xdr, size_mul(len, sizeof(*p)));
if (unlikely(!p))
return -EBADMSG;
if (array == NULL)
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index b52411bcfe4e..4ecc89301eb7 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -351,6 +351,8 @@ struct xprt_create {
struct rpc_xprt_switch *bc_xps;
unsigned int flags;
struct xprtsec_parms xprtsec;
+ unsigned long connect_timeout;
+ unsigned long reconnect_timeout;
};
struct xprt_class {
diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
index 3a582ec7a2f1..bdca467ebb77 100644
--- a/include/linux/sysrq.h
+++ b/include/linux/sysrq.h
@@ -30,7 +30,7 @@
#define SYSRQ_ENABLE_RTNICE 0x0100
struct sysrq_key_op {
- void (* const handler)(int);
+ void (* const handler)(u8);
const char * const help_msg;
const char * const action_msg;
const int enable_mask;
@@ -43,10 +43,10 @@ struct sysrq_key_op {
* are available -- else NULL's).
*/
-void handle_sysrq(int key);
-void __handle_sysrq(int key, bool check_mask);
-int register_sysrq_key(int key, const struct sysrq_key_op *op);
-int unregister_sysrq_key(int key, const struct sysrq_key_op *op);
+void handle_sysrq(u8 key);
+void __handle_sysrq(u8 key, bool check_mask);
+int register_sysrq_key(u8 key, const struct sysrq_key_op *op);
+int unregister_sysrq_key(u8 key, const struct sysrq_key_op *op);
extern const struct sysrq_key_op *__sysrq_reboot_op;
int sysrq_toggle_support(int enable_mask);
@@ -54,20 +54,20 @@ int sysrq_mask(void);
#else
-static inline void handle_sysrq(int key)
+static inline void handle_sysrq(u8 key)
{
}
-static inline void __handle_sysrq(int key, bool check_mask)
+static inline void __handle_sysrq(u8 key, bool check_mask)
{
}
-static inline int register_sysrq_key(int key, const struct sysrq_key_op *op)
+static inline int register_sysrq_key(u8 key, const struct sysrq_key_op *op)
{
return -EINVAL;
}
-static inline int unregister_sysrq_key(int key, const struct sysrq_key_op *op)
+static inline int unregister_sysrq_key(u8 key, const struct sysrq_key_op *op)
{
return -EINVAL;
}
diff --git a/include/linux/tca6416_keypad.h b/include/linux/tca6416_keypad.h
index b0d36a9934cc..5cf6f6f82aa7 100644
--- a/include/linux/tca6416_keypad.h
+++ b/include/linux/tca6416_keypad.h
@@ -25,7 +25,6 @@ struct tca6416_keys_platform_data {
unsigned int rep:1; /* enable input subsystem auto repeat */
uint16_t pinmask;
uint16_t invert;
- int irq_is_gpio;
int use_polling; /* use polling if Interrupt is not connected*/
};
#endif
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index b449a46766f5..c99440aac1a1 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -53,6 +53,20 @@ enum thermal_notify_event {
THERMAL_EVENT_KEEP_ALIVE, /* Request for user space handler to respond */
};
+/**
+ * struct thermal_trip - representation of a point in temperature domain
+ * @temperature: temperature value in miliCelsius
+ * @hysteresis: relative hysteresis in miliCelsius
+ * @type: trip point type
+ * @priv: pointer to driver data associated with this trip
+ */
+struct thermal_trip {
+ int temperature;
+ int hysteresis;
+ enum thermal_trip_type type;
+ void *priv;
+};
+
struct thermal_zone_device_ops {
int (*bind) (struct thermal_zone_device *,
struct thermal_cooling_device *);
@@ -62,34 +76,16 @@ struct thermal_zone_device_ops {
int (*set_trips) (struct thermal_zone_device *, int, int);
int (*change_mode) (struct thermal_zone_device *,
enum thermal_device_mode);
- int (*get_trip_type) (struct thermal_zone_device *, int,
- enum thermal_trip_type *);
- int (*get_trip_temp) (struct thermal_zone_device *, int, int *);
int (*set_trip_temp) (struct thermal_zone_device *, int, int);
- int (*get_trip_hyst) (struct thermal_zone_device *, int, int *);
int (*set_trip_hyst) (struct thermal_zone_device *, int, int);
int (*get_crit_temp) (struct thermal_zone_device *, int *);
int (*set_emul_temp) (struct thermal_zone_device *, int);
- int (*get_trend) (struct thermal_zone_device *, int,
+ int (*get_trend) (struct thermal_zone_device *, struct thermal_trip *,
enum thermal_trend *);
void (*hot)(struct thermal_zone_device *);
void (*critical)(struct thermal_zone_device *);
};
-/**
- * struct thermal_trip - representation of a point in temperature domain
- * @temperature: temperature value in miliCelsius
- * @hysteresis: relative hysteresis in miliCelsius
- * @type: trip point type
- * @priv: pointer to driver data associated with this trip
- */
-struct thermal_trip {
- int temperature;
- int hysteresis;
- enum thermal_trip_type type;
- void *priv;
-};
-
struct thermal_cooling_device_ops {
int (*get_max_state) (struct thermal_cooling_device *, unsigned long *);
int (*get_cur_state) (struct thermal_cooling_device *, unsigned long *);
@@ -304,16 +300,22 @@ int thermal_acpi_critical_trip_temp(struct acpi_device *adev, int *ret_temp);
#endif
#ifdef CONFIG_THERMAL
-struct thermal_zone_device *thermal_zone_device_register(const char *, int, int,
- void *, struct thermal_zone_device_ops *,
- const struct thermal_zone_params *, int, int);
-
-void thermal_zone_device_unregister(struct thermal_zone_device *);
-
-struct thermal_zone_device *
-thermal_zone_device_register_with_trips(const char *, struct thermal_trip *, int, int,
- void *, struct thermal_zone_device_ops *,
- const struct thermal_zone_params *, int, int);
+struct thermal_zone_device *thermal_zone_device_register_with_trips(
+ const char *type,
+ struct thermal_trip *trips,
+ int num_trips, int mask,
+ void *devdata,
+ struct thermal_zone_device_ops *ops,
+ const struct thermal_zone_params *tzp,
+ int passive_delay, int polling_delay);
+
+struct thermal_zone_device *thermal_tripless_zone_device_register(
+ const char *type,
+ void *devdata,
+ struct thermal_zone_device_ops *ops,
+ const struct thermal_zone_params *tzp);
+
+void thermal_zone_device_unregister(struct thermal_zone_device *tz);
void *thermal_zone_device_priv(struct thermal_zone_device *tzd);
const char *thermal_zone_device_type(struct thermal_zone_device *tzd);
@@ -354,15 +356,26 @@ int thermal_zone_device_enable(struct thermal_zone_device *tz);
int thermal_zone_device_disable(struct thermal_zone_device *tz);
void thermal_zone_device_critical(struct thermal_zone_device *tz);
#else
-static inline struct thermal_zone_device *thermal_zone_device_register(
- const char *type, int trips, int mask, void *devdata,
- struct thermal_zone_device_ops *ops,
- const struct thermal_zone_params *tzp,
- int passive_delay, int polling_delay)
+static inline struct thermal_zone_device *thermal_zone_device_register_with_trips(
+ const char *type,
+ struct thermal_trip *trips,
+ int num_trips, int mask,
+ void *devdata,
+ struct thermal_zone_device_ops *ops,
+ const struct thermal_zone_params *tzp,
+ int passive_delay, int polling_delay)
{ return ERR_PTR(-ENODEV); }
-static inline void thermal_zone_device_unregister(
- struct thermal_zone_device *tz)
+
+static inline struct thermal_zone_device *thermal_tripless_zone_device_register(
+ const char *type,
+ void *devdata,
+ struct thermal_zone_device_ops *ops,
+ const struct thermal_zone_params *tzp)
+{ return ERR_PTR(-ENODEV); }
+
+static inline void thermal_zone_device_unregister(struct thermal_zone_device *tz)
{ }
+
static inline struct thermal_cooling_device *
thermal_cooling_device_register(const char *type, void *devdata,
const struct thermal_cooling_device_ops *ops)
diff --git a/include/linux/ti_wilink_st.h b/include/linux/ti_wilink_st.h
index 44a7f9169ac6..10642d4844f0 100644
--- a/include/linux/ti_wilink_st.h
+++ b/include/linux/ti_wilink_st.h
@@ -271,7 +271,7 @@ long st_kim_stop(void *);
void st_kim_complete(void *);
void kim_st_list_protocols(struct st_data_s *, void *);
-void st_kim_recv(void *, const unsigned char *, long);
+void st_kim_recv(void *disc_data, const u8 *data, size_t count);
/*
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index c1a0a19d80fb..eb5c3add939b 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -649,6 +649,7 @@ struct trace_event_file {
struct list_head list;
struct trace_event_call *event_call;
struct event_filter __rcu *filter;
+ struct eventfs_file *ef;
struct dentry *dir;
struct trace_array *tr;
struct trace_subsystem_dir *system;
@@ -824,6 +825,7 @@ enum {
FILTER_RDYN_STRING,
FILTER_PTR_STRING,
FILTER_TRACE_FN,
+ FILTER_CPUMASK,
FILTER_COMM,
FILTER_CPU,
FILTER_STACKTRACE,
diff --git a/include/linux/tracefs.h b/include/linux/tracefs.h
index 99912445974c..009072792fa3 100644
--- a/include/linux/tracefs.h
+++ b/include/linux/tracefs.h
@@ -21,6 +21,29 @@ struct file_operations;
#ifdef CONFIG_TRACING
+struct eventfs_file;
+
+struct dentry *eventfs_create_events_dir(const char *name,
+ struct dentry *parent);
+
+struct eventfs_file *eventfs_add_subsystem_dir(const char *name,
+ struct dentry *parent);
+
+struct eventfs_file *eventfs_add_dir(const char *name,
+ struct eventfs_file *ef_parent);
+
+int eventfs_add_file(const char *name, umode_t mode,
+ struct eventfs_file *ef_parent, void *data,
+ const struct file_operations *fops);
+
+int eventfs_add_events_file(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const struct file_operations *fops);
+
+void eventfs_remove(struct eventfs_file *ef);
+
+void eventfs_remove_events_dir(struct dentry *dentry);
+
struct dentry *tracefs_create_file(const char *name, umode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops);
diff --git a/include/linux/tty.h b/include/linux/tty.h
index e8d5d9997aca..f002d0f25db7 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -192,13 +192,14 @@ struct tty_operations;
*/
struct tty_struct {
struct kref kref;
+ int index;
struct device *dev;
struct tty_driver *driver;
+ struct tty_port *port;
const struct tty_operations *ops;
- int index;
- struct ld_semaphore ldisc_sem;
struct tty_ldisc *ldisc;
+ struct ld_semaphore ldisc_sem;
struct mutex atomic_write_lock;
struct mutex legacy_mutex;
@@ -209,6 +210,7 @@ struct tty_struct {
char name[64];
unsigned long flags;
int count;
+ unsigned int receive_room;
struct winsize winsize;
struct {
@@ -219,16 +221,16 @@ struct tty_struct {
} __aligned(sizeof(unsigned long)) flow;
struct {
- spinlock_t lock;
struct pid *pgrp;
struct pid *session;
+ spinlock_t lock;
unsigned char pktstatus;
bool packet;
unsigned long unused[0];
} __aligned(sizeof(unsigned long)) ctrl;
bool hw_stopped;
- unsigned int receive_room;
+ bool closing;
int flow_change;
struct tty_struct *link;
@@ -239,15 +241,13 @@ struct tty_struct {
void *disc_data;
void *driver_data;
spinlock_t files_lock;
+ int write_cnt;
+ unsigned char *write_buf;
+
struct list_head tty_files;
#define N_TTY_BUF_SIZE 4096
-
- int closing;
- unsigned char *write_buf;
- int write_cnt;
struct work_struct SAK_work;
- struct tty_port *port;
} __randomize_layout;
/* Each of a tty's open files has private_data pointing to tty_file_private */
diff --git a/include/linux/tty_buffer.h b/include/linux/tty_buffer.h
index 6ceb2789e6c8..31125e3be3c5 100644
--- a/include/linux/tty_buffer.h
+++ b/include/linux/tty_buffer.h
@@ -12,24 +12,24 @@ struct tty_buffer {
struct tty_buffer *next;
struct llist_node free;
};
- int used;
- int size;
- int commit;
- int lookahead; /* Lazy update on recv, can become less than "read" */
- int read;
+ unsigned int used;
+ unsigned int size;
+ unsigned int commit;
+ unsigned int lookahead; /* Lazy update on recv, can become less than "read" */
+ unsigned int read;
bool flags;
/* Data points here */
- unsigned long data[];
+ u8 data[] __aligned(sizeof(unsigned long));
};
-static inline unsigned char *char_buf_ptr(struct tty_buffer *b, int ofs)
+static inline u8 *char_buf_ptr(struct tty_buffer *b, unsigned int ofs)
{
- return ((unsigned char *)b->data) + ofs;
+ return b->data + ofs;
}
-static inline char *flag_buf_ptr(struct tty_buffer *b, int ofs)
+static inline u8 *flag_buf_ptr(struct tty_buffer *b, unsigned int ofs)
{
- return (char *)char_buf_ptr(b, ofs) + b->size;
+ return char_buf_ptr(b, ofs) + b->size;
}
struct tty_bufhead {
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index e00034118c7b..18beff0cec1a 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -72,8 +72,8 @@ struct serial_struct;
* is closed for the last time freeing up the resources. This is
* actually the second part of shutdown for routines that might sleep.
*
- * @write: ``int ()(struct tty_struct *tty, const unsigned char *buf,
- * int count)``
+ * @write: ``ssize_t ()(struct tty_struct *tty, const unsigned char *buf,
+ * size_t count)``
*
* This routine is called by the kernel to write a series (@count) of
* characters (@buf) to the @tty device. The characters may come from
@@ -356,9 +356,8 @@ struct tty_operations {
void (*close)(struct tty_struct * tty, struct file * filp);
void (*shutdown)(struct tty_struct *tty);
void (*cleanup)(struct tty_struct *tty);
- int (*write)(struct tty_struct * tty,
- const unsigned char *buf, int count);
- int (*put_char)(struct tty_struct *tty, unsigned char ch);
+ ssize_t (*write)(struct tty_struct *tty, const u8 *buf, size_t count);
+ int (*put_char)(struct tty_struct *tty, u8 ch);
void (*flush_chars)(struct tty_struct *tty);
unsigned int (*write_room)(struct tty_struct *tty);
unsigned int (*chars_in_buffer)(struct tty_struct *tty);
diff --git a/include/linux/tty_flip.h b/include/linux/tty_flip.h
index bfaaeee61a05..af4fce98f64e 100644
--- a/include/linux/tty_flip.h
+++ b/include/linux/tty_flip.h
@@ -10,17 +10,59 @@ struct tty_ldisc;
int tty_buffer_set_limit(struct tty_port *port, int limit);
unsigned int tty_buffer_space_avail(struct tty_port *port);
int tty_buffer_request_room(struct tty_port *port, size_t size);
-int tty_insert_flip_string_flags(struct tty_port *port,
- const unsigned char *chars, const char *flags, size_t size);
-int tty_insert_flip_string_fixed_flag(struct tty_port *port,
- const unsigned char *chars, char flag, size_t size);
-int tty_prepare_flip_string(struct tty_port *port, unsigned char **chars,
- size_t size);
+size_t __tty_insert_flip_string_flags(struct tty_port *port, const u8 *chars,
+ const u8 *flags, bool mutable_flags,
+ size_t size);
+size_t tty_prepare_flip_string(struct tty_port *port, u8 **chars, size_t size);
void tty_flip_buffer_push(struct tty_port *port);
-int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag);
-static inline int tty_insert_flip_char(struct tty_port *port,
- unsigned char ch, char flag)
+/**
+ * tty_insert_flip_string_fixed_flag - add characters to the tty buffer
+ * @port: tty port
+ * @chars: characters
+ * @flag: flag value for each character
+ * @size: size
+ *
+ * Queue a series of bytes to the tty buffering. All the characters passed are
+ * marked with the supplied flag.
+ *
+ * Returns: the number added.
+ */
+static inline size_t tty_insert_flip_string_fixed_flag(struct tty_port *port,
+ const u8 *chars, u8 flag,
+ size_t size)
+{
+ return __tty_insert_flip_string_flags(port, chars, &flag, false, size);
+}
+
+/**
+ * tty_insert_flip_string_flags - add characters to the tty buffer
+ * @port: tty port
+ * @chars: characters
+ * @flags: flag bytes
+ * @size: size
+ *
+ * Queue a series of bytes to the tty buffering. For each character the flags
+ * array indicates the status of the character.
+ *
+ * Returns: the number added.
+ */
+static inline size_t tty_insert_flip_string_flags(struct tty_port *port,
+ const u8 *chars,
+ const u8 *flags, size_t size)
+{
+ return __tty_insert_flip_string_flags(port, chars, flags, true, size);
+}
+
+/**
+ * tty_insert_flip_char - add one character to the tty buffer
+ * @port: tty port
+ * @ch: character
+ * @flag: flag byte
+ *
+ * Queue a single byte @ch to the tty buffering, with an optional flag.
+ */
+static inline size_t tty_insert_flip_char(struct tty_port *port, u8 ch, u8 flag)
{
struct tty_buffer *tb = port->buf.tail;
int change;
@@ -32,17 +74,17 @@ static inline int tty_insert_flip_char(struct tty_port *port,
*char_buf_ptr(tb, tb->used++) = ch;
return 1;
}
- return __tty_insert_flip_char(port, ch, flag);
+ return __tty_insert_flip_string_flags(port, &ch, &flag, false, 1);
}
-static inline int tty_insert_flip_string(struct tty_port *port,
- const unsigned char *chars, size_t size)
+static inline size_t tty_insert_flip_string(struct tty_port *port,
+ const u8 *chars, size_t size)
{
return tty_insert_flip_string_fixed_flag(port, chars, TTY_NORMAL, size);
}
-int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p,
- const char *f, int count);
+size_t tty_ldisc_receive_buf(struct tty_ldisc *ld, const u8 *p, const u8 *f,
+ size_t count);
void tty_buffer_lock_exclusive(struct tty_port *port);
void tty_buffer_unlock_exclusive(struct tty_port *port);
diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
index 49dc172dedc7..af01e89074b2 100644
--- a/include/linux/tty_ldisc.h
+++ b/include/linux/tty_ldisc.h
@@ -71,7 +71,7 @@ int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
* call to @receive_buf(). Returning an error will prevent the ldisc from
* being attached.
*
- * Can sleep.
+ * Optional. Can sleep.
*
* @close: [TTY] ``void ()(struct tty_struct *tty)``
*
@@ -80,7 +80,7 @@ int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
* changed to use a new line discipline. At the point of execution no
* further users will enter the ldisc code for this tty.
*
- * Can sleep.
+ * Optional. Can sleep.
*
* @flush_buffer: [TTY] ``void ()(struct tty_struct *tty)``
*
@@ -88,8 +88,10 @@ int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
* input characters it may have queued to be delivered to the user mode
* process. It may be called at any point between open and close.
*
- * @read: [TTY] ``ssize_t ()(struct tty_struct *tty, struct file *file,
- * unsigned char *buf, size_t nr)``
+ * Optional.
+ *
+ * @read: [TTY] ``ssize_t ()(struct tty_struct *tty, struct file *file, u8 *buf,
+ * size_t nr)``
*
* This function is called when the user requests to read from the @tty.
* The line discipline will return whatever characters it has buffered up
@@ -97,10 +99,10 @@ int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
* an %EIO error. Multiple read calls may occur in parallel and the ldisc
* must deal with serialization issues.
*
- * Can sleep.
+ * Optional: %EIO unless provided. Can sleep.
*
* @write: [TTY] ``ssize_t ()(struct tty_struct *tty, struct file *file,
- * const unsigned char *buf, size_t nr)``
+ * const u8 *buf, size_t nr)``
*
* This function is called when the user requests to write to the @tty.
* The line discipline will deliver the characters to the low-level tty
@@ -108,7 +110,7 @@ int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
* characters first. If this function is not defined, the user will
* receive an %EIO error.
*
- * Can sleep.
+ * Optional: %EIO unless provided. Can sleep.
*
* @ioctl: [TTY] ``int ()(struct tty_struct *tty, unsigned int cmd,
* unsigned long arg)``
@@ -120,6 +122,8 @@ int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
* discpline. So a low-level driver can "grab" an ioctl request before
* the line discpline has a chance to see it.
*
+ * Optional.
+ *
* @compat_ioctl: [TTY] ``int ()(struct tty_struct *tty, unsigned int cmd,
* unsigned long arg)``
*
@@ -130,11 +134,15 @@ int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
* a pointer to wordsize-sensitive structure belongs here, but most of
* ldiscs will happily leave it %NULL.
*
+ * Optional.
+ *
* @set_termios: [TTY] ``void ()(struct tty_struct *tty, const struct ktermios *old)``
*
* This function notifies the line discpline that a change has been made
* to the termios structure.
*
+ * Optional.
+ *
* @poll: [TTY] ``int ()(struct tty_struct *tty, struct file *file,
* struct poll_table_struct *wait)``
*
@@ -142,6 +150,8 @@ int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
* device. It is solely the responsibility of the line discipline to
* handle poll requests.
*
+ * Optional.
+ *
* @hangup: [TTY] ``void ()(struct tty_struct *tty)``
*
* Called on a hangup. Tells the discipline that it should cease I/O to
@@ -149,10 +159,10 @@ int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
* but should wait until any pending driver I/O is completed. No further
* calls into the ldisc code will occur.
*
- * Can sleep.
+ * Optional. Can sleep.
*
- * @receive_buf: [DRV] ``void ()(struct tty_struct *tty,
- * const unsigned char *cp, const char *fp, int count)``
+ * @receive_buf: [DRV] ``void ()(struct tty_struct *tty, const u8 *cp,
+ * const u8 *fp, size_t count)``
*
* This function is called by the low-level tty driver to send characters
* received by the hardware to the line discpline for processing. @cp is
@@ -161,6 +171,8 @@ int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
* character was received with a parity error, etc. @fp may be %NULL to
* indicate all data received is %TTY_NORMAL.
*
+ * Optional.
+ *
* @write_wakeup: [DRV] ``void ()(struct tty_struct *tty)``
*
* This function is called by the low-level tty driver to signal that line
@@ -170,13 +182,17 @@ int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
* send, please arise a tasklet or workqueue to do the real data transfer.
* Do not send data in this hook, it may lead to a deadlock.
*
+ * Optional.
+ *
* @dcd_change: [DRV] ``void ()(struct tty_struct *tty, bool active)``
*
* Tells the discipline that the DCD pin has changed its status. Used
* exclusively by the %N_PPS (Pulse-Per-Second) line discipline.
*
- * @receive_buf2: [DRV] ``int ()(struct tty_struct *tty,
- * const unsigned char *cp, const char *fp, int count)``
+ * Optional.
+ *
+ * @receive_buf2: [DRV] ``ssize_t ()(struct tty_struct *tty, const u8 *cp,
+ * const u8 *fp, size_t count)``
*
* This function is called by the low-level tty driver to send characters
* received by the hardware to the line discpline for processing. @cp is a
@@ -186,8 +202,10 @@ int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
* indicate all data received is %TTY_NORMAL. If assigned, prefer this
* function for automatic flow control.
*
- * @lookahead_buf: [DRV] ``void ()(struct tty_struct *tty,
- * const unsigned char *cp, const char *fp, int count)``
+ * Optional.
+ *
+ * @lookahead_buf: [DRV] ``void ()(struct tty_struct *tty, const u8 *cp,
+ * const u8 *fp, size_t count)``
*
* This function is called by the low-level tty driver for characters
* not eaten by ->receive_buf() or ->receive_buf2(). It is useful for
@@ -198,6 +216,8 @@ int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
* same characters (e.g. by skipping the actions for high-priority
* characters already handled by ->lookahead_buf()).
*
+ * Optional.
+ *
* @owner: module containting this ldisc (for reference counting)
*
* This structure defines the interface between the tty line discipline
@@ -218,11 +238,10 @@ struct tty_ldisc_ops {
int (*open)(struct tty_struct *tty);
void (*close)(struct tty_struct *tty);
void (*flush_buffer)(struct tty_struct *tty);
- ssize_t (*read)(struct tty_struct *tty, struct file *file,
- unsigned char *buf, size_t nr,
- void **cookie, unsigned long offset);
+ ssize_t (*read)(struct tty_struct *tty, struct file *file, u8 *buf,
+ size_t nr, void **cookie, unsigned long offset);
ssize_t (*write)(struct tty_struct *tty, struct file *file,
- const unsigned char *buf, size_t nr);
+ const u8 *buf, size_t nr);
int (*ioctl)(struct tty_struct *tty, unsigned int cmd,
unsigned long arg);
int (*compat_ioctl)(struct tty_struct *tty, unsigned int cmd,
@@ -235,14 +254,14 @@ struct tty_ldisc_ops {
/*
* The following routines are called from below.
*/
- void (*receive_buf)(struct tty_struct *tty, const unsigned char *cp,
- const char *fp, int count);
+ void (*receive_buf)(struct tty_struct *tty, const u8 *cp,
+ const u8 *fp, size_t count);
void (*write_wakeup)(struct tty_struct *tty);
void (*dcd_change)(struct tty_struct *tty, bool active);
- int (*receive_buf2)(struct tty_struct *tty, const unsigned char *cp,
- const char *fp, int count);
- void (*lookahead_buf)(struct tty_struct *tty, const unsigned char *cp,
- const unsigned char *fp, unsigned int count);
+ size_t (*receive_buf2)(struct tty_struct *tty, const u8 *cp,
+ const u8 *fp, size_t count);
+ void (*lookahead_buf)(struct tty_struct *tty, const u8 *cp,
+ const u8 *fp, size_t count);
struct module *owner;
};
diff --git a/include/linux/tty_port.h b/include/linux/tty_port.h
index edf685a24f7c..6b367eb17979 100644
--- a/include/linux/tty_port.h
+++ b/include/linux/tty_port.h
@@ -39,9 +39,10 @@ struct tty_port_operations {
};
struct tty_port_client_operations {
- int (*receive_buf)(struct tty_port *port, const unsigned char *, const unsigned char *, size_t);
- void (*lookahead_buf)(struct tty_port *port, const unsigned char *cp,
- const unsigned char *fp, unsigned int count);
+ size_t (*receive_buf)(struct tty_port *port, const u8 *cp, const u8 *fp,
+ size_t count);
+ void (*lookahead_buf)(struct tty_port *port, const u8 *cp,
+ const u8 *fp, size_t count);
void (*write_wakeup)(struct tty_port *port);
};
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 25f8e62a30ec..a21074861f91 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -25,7 +25,6 @@
struct usb_device;
struct usb_driver;
-struct wusb_dev;
/*-------------------------------------------------------------------------*/
@@ -425,7 +424,6 @@ struct usb_host_config {
struct usb_host_bos {
struct usb_bos_descriptor *desc;
- /* wireless cap descriptor is handled by wusb */
struct usb_ext_cap_descriptor *ext_cap;
struct usb_ss_cap_descriptor *ss_cap;
struct usb_ssp_cap_descriptor *ssp_cap;
@@ -612,7 +610,6 @@ struct usb3_lpm_parameters {
* WUSB devices are not, until we authorize them from user space.
* FIXME -- complete doc
* @authenticated: Crypto authentication passed
- * @wusb: device is Wireless USB
* @lpm_capable: device supports LPM
* @lpm_devinit_allow: Allow USB3 device initiated LPM, exit latency is in range
* @usb2_hw_lpm_capable: device can perform USB2 hardware LPM
@@ -634,8 +631,6 @@ struct usb3_lpm_parameters {
* @do_remote_wakeup: remote wakeup should be enabled
* @reset_resume: needs reset instead of resume
* @port_is_suspended: the upstream port is suspended (L2 or U3)
- * @wusb_dev: if this is a Wireless USB device, link to the WUSB
- * specific data for the device.
* @slot_id: Slot ID assigned by xHCI
* @removable: Device can be physically removed from this port
* @l1_params: best effor service latency for USB2 L1 LPM state, and L1 timeout.
@@ -696,7 +691,6 @@ struct usb_device {
unsigned have_langid:1;
unsigned authorized:1;
unsigned authenticated:1;
- unsigned wusb:1;
unsigned lpm_capable:1;
unsigned lpm_devinit_allow:1;
unsigned usb2_hw_lpm_capable:1;
@@ -727,7 +721,6 @@ struct usb_device {
unsigned reset_resume:1;
unsigned port_is_suspended:1;
- struct wusb_dev *wusb_dev;
int slot_id;
struct usb2_lpm_parameters l1_params;
struct usb3_lpm_parameters u1_params;
@@ -1742,11 +1735,6 @@ static inline void usb_fill_bulk_urb(struct urb *urb,
* encoding of the endpoint interval, and express polling intervals in
* microframes (eight per millisecond) rather than in frames (one per
* millisecond).
- *
- * Wireless USB also uses the logarithmic encoding, but specifies it in units of
- * 128us instead of 125us. For Wireless USB devices, the interval is passed
- * through to the host controller, rather than being translated into microframe
- * units.
*/
static inline void usb_fill_int_urb(struct urb *urb,
struct usb_device *dev,
diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h
index 969e7dba6358..c93b410b314a 100644
--- a/include/linux/usb/ch9.h
+++ b/include/linux/usb/ch9.h
@@ -3,7 +3,7 @@
* This file holds USB constants and structures that are needed for
* USB device APIs. These are used by the USB device model, which is
* defined in chapter 9 of the USB 2.0 specification and in the
- * Wireless USB 1.0 (spread around). Linux has several APIs in C that
+ * Wireless USB 1.0 spec (now defunct). Linux has several APIs in C that
* need these:
*
* - the host side Linux-USB kernel driver API;
@@ -14,9 +14,6 @@
* act either as a USB host or as a USB device. That means the host and
* device side APIs benefit from working well together.
*
- * There's also "Wireless USB", using low power short range radios for
- * peripheral interconnection but otherwise building on the USB framework.
- *
* Note all descriptors are declared '__attribute__((packed))' so that:
*
* [a] they never get padded, either internally (USB spec writers
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
index ee38835ed77c..0b4f2d5faa08 100644
--- a/include/linux/usb/chipidea.h
+++ b/include/linux/usb/chipidea.h
@@ -63,6 +63,7 @@ struct ci_hdrc_platform_data {
#define CI_HDRC_IMX_IS_HSIC BIT(14)
#define CI_HDRC_PMQOS BIT(15)
#define CI_HDRC_PHY_VBUS_CONTROL BIT(16)
+#define CI_HDRC_HAS_PORTSC_PEC_MISSED BIT(17)
enum usb_dr_mode dr_mode;
#define CI_HDRC_CONTROLLER_RESET_EVENT 0
#define CI_HDRC_CONTROLLER_STOPPED_EVENT 1
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index 07531c4f4350..6014340ba980 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -450,29 +450,6 @@ static inline struct usb_composite_driver *to_cdriver(
*
* One of these devices is allocated and initialized before the
* associated device driver's bind() is called.
- *
- * OPEN ISSUE: it appears that some WUSB devices will need to be
- * built by combining a normal (wired) gadget with a wireless one.
- * This revision of the gadget framework should probably try to make
- * sure doing that won't hurt too much.
- *
- * One notion for how to handle Wireless USB devices involves:
- *
- * (a) a second gadget here, discovery mechanism TBD, but likely
- * needing separate "register/unregister WUSB gadget" calls;
- * (b) updates to usb_gadget to include flags "is it wireless",
- * "is it wired", plus (presumably in a wrapper structure)
- * bandgroup and PHY info;
- * (c) presumably a wireless_ep wrapping a usb_ep, and reporting
- * wireless-specific parameters like maxburst and maxsequence;
- * (d) configurations that are specific to wireless links;
- * (e) function drivers that understand wireless configs and will
- * support wireless for (additional) function instances;
- * (f) a function to support association setup (like CBAF), not
- * necessarily requiring a wireless adapter;
- * (g) composite device setup that can create one or more wireless
- * configs, including appropriate association setup support;
- * (h) more, TBD.
*/
struct usb_composite_dev {
struct usb_gadget *gadget;
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 4e9623e8492b..61d4f0b793dc 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -154,7 +154,6 @@ struct usb_hcd {
/* The next flag is a stopgap, to be removed when all the HCDs
* support the new root-hub polling mechanism. */
unsigned uses_new_polling:1;
- unsigned wireless:1; /* Wireless USB HCD */
unsigned has_tt:1; /* Integrated TT in root hub */
unsigned amd_resume_bug:1; /* AMD remote wakeup quirk */
unsigned can_do_streams:1; /* HC supports streams */
@@ -249,7 +248,6 @@ struct hc_driver {
#define HCD_SHARED 0x0004 /* Two (or more) usb_hcds share HW */
#define HCD_USB11 0x0010 /* USB 1.1 */
#define HCD_USB2 0x0020 /* USB 2.0 */
-#define HCD_USB25 0x0030 /* Wireless USB 1.0 (USB 2.5)*/
#define HCD_USB3 0x0040 /* USB 3.0 */
#define HCD_USB31 0x0050 /* USB 3.1 */
#define HCD_USB32 0x0060 /* USB 3.2 */
diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h
index e4de6bc1f69b..b513749582d7 100644
--- a/include/linux/usb/phy.h
+++ b/include/linux/usb/phy.h
@@ -144,6 +144,10 @@ struct usb_phy {
*/
int (*set_wakeup)(struct usb_phy *x, bool enabled);
+ /* notify phy port status change */
+ int (*notify_port_status)(struct usb_phy *x, int port,
+ u16 portstatus, u16 portchange);
+
/* notify phy connect status change */
int (*notify_connect)(struct usb_phy *x,
enum usb_device_speed speed);
@@ -317,6 +321,15 @@ usb_phy_set_wakeup(struct usb_phy *x, bool enabled)
}
static inline int
+usb_phy_notify_port_status(struct usb_phy *x, int port, u16 portstatus, u16 portchange)
+{
+ if (x && x->notify_port_status)
+ return x->notify_port_status(x, port, portstatus, portchange);
+ else
+ return 0;
+}
+
+static inline int
usb_phy_notify_connect(struct usb_phy *x, enum usb_device_speed speed)
{
if (x && x->notify_connect)
diff --git a/include/linux/usb/tcpci.h b/include/linux/usb/tcpci.h
index 85e95a3251d3..83376473ac76 100644
--- a/include/linux/usb/tcpci.h
+++ b/include/linux/usb/tcpci.h
@@ -103,6 +103,7 @@
#define TCPC_POWER_STATUS_SINKING_VBUS BIT(0)
#define TCPC_FAULT_STATUS 0x1f
+#define TCPC_FAULT_STATUS_ALL_REG_RST_TO_DEFAULT BIT(7)
#define TCPC_ALERT_EXTENDED 0x21
diff --git a/include/linux/usb/typec_altmode.h b/include/linux/usb/typec_altmode.h
index 350d49012659..28aeef8f9e7b 100644
--- a/include/linux/usb/typec_altmode.h
+++ b/include/linux/usb/typec_altmode.h
@@ -67,7 +67,7 @@ struct typec_altmode_ops {
int typec_altmode_enter(struct typec_altmode *altmode, u32 *vdo);
int typec_altmode_exit(struct typec_altmode *altmode);
-void typec_altmode_attention(struct typec_altmode *altmode, u32 vdo);
+int typec_altmode_attention(struct typec_altmode *altmode, u32 vdo);
int typec_altmode_vdm(struct typec_altmode *altmode,
const u32 header, const u32 *vdo, int count);
int typec_altmode_notify(struct typec_altmode *altmode, unsigned long conf,
diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h
index db1b0eaef4eb..0e652026b776 100644
--- a/include/linux/vdpa.h
+++ b/include/linux/vdpa.h
@@ -208,6 +208,9 @@ struct vdpa_map_file {
* @vdev: vdpa device
* Returns the virtio features support by the
* device
+ * @get_backend_features: Get parent-specific backend features (optional)
+ * Returns the vdpa features supported by the
+ * device.
* @set_driver_features: Set virtio features supported by the driver
* @vdev: vdpa device
* @features: feature support by the driver
@@ -358,6 +361,7 @@ struct vdpa_config_ops {
u32 (*get_vq_align)(struct vdpa_device *vdev);
u32 (*get_vq_group)(struct vdpa_device *vdev, u16 idx);
u64 (*get_device_features)(struct vdpa_device *vdev);
+ u64 (*get_backend_features)(const struct vdpa_device *vdev);
int (*set_driver_features)(struct vdpa_device *vdev, u64 features);
u64 (*get_driver_features)(struct vdpa_device *vdev);
void (*set_config_cb)(struct vdpa_device *vdev,
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index de6041deee37..4cc614a38376 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -9,6 +9,7 @@
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <linux/gfp.h>
+#include <linux/dma-mapping.h>
/**
* struct virtqueue - a queue to register buffers for sending or receiving.
@@ -61,6 +62,8 @@ int virtqueue_add_sgs(struct virtqueue *vq,
void *data,
gfp_t gfp);
+struct device *virtqueue_dma_dev(struct virtqueue *vq);
+
bool virtqueue_kick(struct virtqueue *vq);
bool virtqueue_kick_prepare(struct virtqueue *vq);
@@ -78,6 +81,8 @@ bool virtqueue_enable_cb(struct virtqueue *vq);
unsigned virtqueue_enable_cb_prepare(struct virtqueue *vq);
+int virtqueue_set_dma_premapped(struct virtqueue *_vq);
+
bool virtqueue_poll(struct virtqueue *vq, unsigned);
bool virtqueue_enable_cb_delayed(struct virtqueue *vq);
@@ -95,6 +100,8 @@ dma_addr_t virtqueue_get_used_addr(const struct virtqueue *vq);
int virtqueue_resize(struct virtqueue *vq, u32 num,
void (*recycle)(struct virtqueue *vq, void *buf));
+int virtqueue_reset(struct virtqueue *vq,
+ void (*recycle)(struct virtqueue *vq, void *buf));
/**
* struct virtio_device - representation of a device using virtio
@@ -184,10 +191,8 @@ struct virtio_driver {
void (*scan)(struct virtio_device *dev);
void (*remove)(struct virtio_device *dev);
void (*config_changed)(struct virtio_device *dev);
-#ifdef CONFIG_PM
int (*freeze)(struct virtio_device *dev);
int (*restore)(struct virtio_device *dev);
-#endif
};
static inline struct virtio_driver *drv_to_virtio(struct device_driver *drv)
@@ -206,4 +211,19 @@ void unregister_virtio_driver(struct virtio_driver *drv);
#define module_virtio_driver(__virtio_driver) \
module_driver(__virtio_driver, register_virtio_driver, \
unregister_virtio_driver)
+
+dma_addr_t virtqueue_dma_map_single_attrs(struct virtqueue *_vq, void *ptr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
+void virtqueue_dma_unmap_single_attrs(struct virtqueue *_vq, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+int virtqueue_dma_mapping_error(struct virtqueue *_vq, dma_addr_t addr);
+
+bool virtqueue_dma_need_sync(struct virtqueue *_vq, dma_addr_t addr);
+void virtqueue_dma_sync_single_range_for_cpu(struct virtqueue *_vq, dma_addr_t addr,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir);
+void virtqueue_dma_sync_single_range_for_device(struct virtqueue *_vq, dma_addr_t addr,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir);
#endif /* _LINUX_VIRTIO_H */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 683efe29fa69..1c1d06804d45 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -125,6 +125,17 @@ struct rcu_work {
struct workqueue_struct *wq;
};
+enum wq_affn_scope {
+ WQ_AFFN_DFL, /* use system default */
+ WQ_AFFN_CPU, /* one pod per CPU */
+ WQ_AFFN_SMT, /* one pod poer SMT */
+ WQ_AFFN_CACHE, /* one pod per LLC */
+ WQ_AFFN_NUMA, /* one pod per NUMA node */
+ WQ_AFFN_SYSTEM, /* one pod across the whole system */
+
+ WQ_AFFN_NR_TYPES,
+};
+
/**
* struct workqueue_attrs - A struct for workqueue attributes.
*
@@ -138,17 +149,58 @@ struct workqueue_attrs {
/**
* @cpumask: allowed CPUs
+ *
+ * Work items in this workqueue are affine to these CPUs and not allowed
+ * to execute on other CPUs. A pool serving a workqueue must have the
+ * same @cpumask.
*/
cpumask_var_t cpumask;
/**
- * @no_numa: disable NUMA affinity
+ * @__pod_cpumask: internal attribute used to create per-pod pools
+ *
+ * Internal use only.
+ *
+ * Per-pod unbound worker pools are used to improve locality. Always a
+ * subset of ->cpumask. A workqueue can be associated with multiple
+ * worker pools with disjoint @__pod_cpumask's. Whether the enforcement
+ * of a pool's @__pod_cpumask is strict depends on @affn_strict.
+ */
+ cpumask_var_t __pod_cpumask;
+
+ /**
+ * @affn_strict: affinity scope is strict
+ *
+ * If clear, workqueue will make a best-effort attempt at starting the
+ * worker inside @__pod_cpumask but the scheduler is free to migrate it
+ * outside.
*
- * Unlike other fields, ``no_numa`` isn't a property of a worker_pool. It
- * only modifies how :c:func:`apply_workqueue_attrs` select pools and thus
- * doesn't participate in pool hash calculations or equality comparisons.
+ * If set, workers are only allowed to run inside @__pod_cpumask.
+ */
+ bool affn_strict;
+
+ /*
+ * Below fields aren't properties of a worker_pool. They only modify how
+ * :c:func:`apply_workqueue_attrs` select pools and thus don't
+ * participate in pool hash calculations or equality comparisons.
+ */
+
+ /**
+ * @affn_scope: unbound CPU affinity scope
+ *
+ * CPU pods are used to improve execution locality of unbound work
+ * items. There are multiple pod types, one for each wq_affn_scope, and
+ * every CPU in the system belongs to one pod in every pod type. CPUs
+ * that belong to the same pod share the worker pool. For example,
+ * selecting %WQ_AFFN_NUMA makes the workqueue use a separate worker
+ * pool for each NUMA node.
+ */
+ enum wq_affn_scope affn_scope;
+
+ /**
+ * @ordered: work items must be executed one by one in queueing order
*/
- bool no_numa;
+ bool ordered;
};
static inline struct delayed_work *to_delayed_work(struct work_struct *work)
@@ -343,14 +395,10 @@ enum {
__WQ_ORDERED_EXPLICIT = 1 << 19, /* internal: alloc_ordered_workqueue() */
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
- WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
+ WQ_UNBOUND_MAX_ACTIVE = WQ_MAX_ACTIVE,
WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
};
-/* unbound wq's aren't per-cpu, scale max_active according to #cpus */
-#define WQ_UNBOUND_MAX_ACTIVE \
- max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
-
/*
* System-wide workqueues which are always present.
*
@@ -391,7 +439,7 @@ extern struct workqueue_struct *system_freezable_power_efficient_wq;
* alloc_workqueue - allocate a workqueue
* @fmt: printf format for the name of the workqueue
* @flags: WQ_* flags
- * @max_active: max in-flight work items, 0 for default
+ * @max_active: max in-flight work items per CPU, 0 for default
* remaining args: args for @fmt
*
* Allocate a workqueue with the specified parameters. For detailed
@@ -569,6 +617,7 @@ static inline bool schedule_work(struct work_struct *work)
/*
* Detect attempt to flush system-wide workqueues at compile time when possible.
+ * Warn attempt to flush system-wide workqueues at runtime.
*
* See https://lkml.kernel.org/r/49925af7-78a8-a3dd-bce6-cfc02e1a9236@I-love.SAKURA.ne.jp
* for reasons and steps for converting system-wide workqueues into local workqueues.
@@ -576,52 +625,13 @@ static inline bool schedule_work(struct work_struct *work)
extern void __warn_flushing_systemwide_wq(void)
__compiletime_warning("Please avoid flushing system-wide workqueues.");
-/**
- * flush_scheduled_work - ensure that any scheduled work has run to completion.
- *
- * Forces execution of the kernel-global workqueue and blocks until its
- * completion.
- *
- * It's very easy to get into trouble if you don't take great care.
- * Either of the following situations will lead to deadlock:
- *
- * One of the work items currently on the workqueue needs to acquire
- * a lock held by your code or its caller.
- *
- * Your code is running in the context of a work routine.
- *
- * They will be detected by lockdep when they occur, but the first might not
- * occur very often. It depends on what work items are on the workqueue and
- * what locks they need, which you have no control over.
- *
- * In most situations flushing the entire workqueue is overkill; you merely
- * need to know that a particular work item isn't queued and isn't running.
- * In such cases you should use cancel_delayed_work_sync() or
- * cancel_work_sync() instead.
- *
- * Please stop calling this function! A conversion to stop flushing system-wide
- * workqueues is in progress. This function will be removed after all in-tree
- * users stopped calling this function.
- */
-/*
- * The background of commit 771c035372a036f8 ("deprecate the
- * '__deprecated' attribute warnings entirely and for good") is that,
- * since Linus builds all modules between every single pull he does,
- * the standard kernel build needs to be _clean_ in order to be able to
- * notice when new problems happen. Therefore, don't emit warning while
- * there are in-tree users.
- */
+/* Please stop using this function, for this function will be removed in near future. */
#define flush_scheduled_work() \
({ \
- if (0) \
- __warn_flushing_systemwide_wq(); \
+ __warn_flushing_systemwide_wq(); \
__flush_workqueue(system_wq); \
})
-/*
- * Although there is no longer in-tree caller, for now just emit warning
- * in order to give out-of-tree callers time to update.
- */
#define flush_workqueue(wq) \
({ \
struct workqueue_struct *_wq = (wq); \
@@ -714,5 +724,6 @@ int workqueue_offline_cpu(unsigned int cpu);
void __init workqueue_init_early(void);
void __init workqueue_init(void);
+void __init workqueue_init_topology(void);
#endif
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 741703b45f61..cb571dfcf4b1 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -856,6 +856,9 @@ static inline int __must_check xa_insert_irq(struct xarray *xa,
* stores the index into the @id pointer, then stores the entry at
* that index. A concurrent lookup will not see an uninitialised @id.
*
+ * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
+ * in xa_init_flags().
+ *
* Context: Any context. Takes and releases the xa_lock. May sleep if
* the @gfp flags permit.
* Return: 0 on success, -ENOMEM if memory could not be allocated or
@@ -886,6 +889,9 @@ static inline __must_check int xa_alloc(struct xarray *xa, u32 *id,
* stores the index into the @id pointer, then stores the entry at
* that index. A concurrent lookup will not see an uninitialised @id.
*
+ * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
+ * in xa_init_flags().
+ *
* Context: Any context. Takes and releases the xa_lock while
* disabling softirqs. May sleep if the @gfp flags permit.
* Return: 0 on success, -ENOMEM if memory could not be allocated or
@@ -916,6 +922,9 @@ static inline int __must_check xa_alloc_bh(struct xarray *xa, u32 *id,
* stores the index into the @id pointer, then stores the entry at
* that index. A concurrent lookup will not see an uninitialised @id.
*
+ * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
+ * in xa_init_flags().
+ *
* Context: Process context. Takes and releases the xa_lock while
* disabling interrupts. May sleep if the @gfp flags permit.
* Return: 0 on success, -ENOMEM if memory could not be allocated or
@@ -949,6 +958,9 @@ static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id,
* The search for an empty entry will start at @next and will wrap
* around if necessary.
*
+ * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
+ * in xa_init_flags().
+ *
* Context: Any context. Takes and releases the xa_lock. May sleep if
* the @gfp flags permit.
* Return: 0 if the allocation succeeded without wrapping. 1 if the
@@ -983,6 +995,9 @@ static inline int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
* The search for an empty entry will start at @next and will wrap
* around if necessary.
*
+ * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
+ * in xa_init_flags().
+ *
* Context: Any context. Takes and releases the xa_lock while
* disabling softirqs. May sleep if the @gfp flags permit.
* Return: 0 if the allocation succeeded without wrapping. 1 if the
@@ -1017,6 +1032,9 @@ static inline int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry,
* The search for an empty entry will start at @next and will wrap
* around if necessary.
*
+ * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
+ * in xa_init_flags().
+ *
* Context: Process context. Takes and releases the xa_lock while
* disabling interrupts. May sleep if the @gfp flags permit.
* Return: 0 if the allocation succeeded without wrapping. 1 if the
diff --git a/include/media/cec.h b/include/media/cec.h
index abee41ae02d0..9c007f83569a 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -113,22 +113,25 @@ struct cec_fh {
#define CEC_FREE_TIME_TO_USEC(ft) ((ft) * 2400)
struct cec_adap_ops {
- /* Low-level callbacks */
+ /* Low-level callbacks, called with adap->lock held */
int (*adap_enable)(struct cec_adapter *adap, bool enable);
int (*adap_monitor_all_enable)(struct cec_adapter *adap, bool enable);
int (*adap_monitor_pin_enable)(struct cec_adapter *adap, bool enable);
int (*adap_log_addr)(struct cec_adapter *adap, u8 logical_addr);
- void (*adap_configured)(struct cec_adapter *adap, bool configured);
+ void (*adap_unconfigured)(struct cec_adapter *adap);
int (*adap_transmit)(struct cec_adapter *adap, u8 attempts,
u32 signal_free_time, struct cec_msg *msg);
+ void (*adap_nb_transmit_canceled)(struct cec_adapter *adap,
+ const struct cec_msg *msg);
void (*adap_status)(struct cec_adapter *adap, struct seq_file *file);
void (*adap_free)(struct cec_adapter *adap);
- /* Error injection callbacks */
+ /* Error injection callbacks, called without adap->lock held */
int (*error_inj_show)(struct cec_adapter *adap, struct seq_file *sf);
bool (*error_inj_parse_line)(struct cec_adapter *adap, char *line);
- /* High-level CEC message callback */
+ /* High-level CEC message callback, called without adap->lock held */
+ void (*configured)(struct cec_adapter *adap);
int (*received)(struct cec_adapter *adap, struct cec_msg *msg);
};
diff --git a/include/media/davinci/vpif_types.h b/include/media/davinci/vpif_types.h
index d03e5c54347a..6cce1f09c721 100644
--- a/include/media/davinci/vpif_types.h
+++ b/include/media/davinci/vpif_types.h
@@ -72,7 +72,7 @@ struct vpif_capture_config {
int i2c_adapter_id;
const char *card_name;
- struct v4l2_async_subdev *asd[VPIF_CAPTURE_MAX_CHANNELS];
+ struct v4l2_async_connection *asd[VPIF_CAPTURE_MAX_CHANNELS];
int asd_sizes[VPIF_CAPTURE_MAX_CHANNELS];
};
#endif /* _VPIF_TYPES_H */
diff --git a/include/media/i2c/ds90ub9xx.h b/include/media/i2c/ds90ub9xx.h
new file mode 100644
index 000000000000..0245198469ec
--- /dev/null
+++ b/include/media/i2c/ds90ub9xx.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __MEDIA_I2C_DS90UB9XX_H__
+#define __MEDIA_I2C_DS90UB9XX_H__
+
+#include <linux/types.h>
+
+struct i2c_atr;
+
+/**
+ * struct ds90ub9xx_platform_data - platform data for FPD-Link Serializers.
+ * @port: Deserializer RX port for this Serializer
+ * @atr: I2C ATR
+ * @bc_rate: back-channel clock rate
+ */
+struct ds90ub9xx_platform_data {
+ u32 port;
+ struct i2c_atr *atr;
+ unsigned long bc_rate;
+};
+
+#endif /* __MEDIA_I2C_DS90UB9XX_H__ */
diff --git a/drivers/media/pci/intel/ipu3/cio2-bridge.h b/include/media/ipu-bridge.h
index b76ed8a641e2..bdc654a45521 100644
--- a/drivers/media/pci/intel/ipu3/cio2-bridge.h
+++ b/include/media/ipu-bridge.h
@@ -1,25 +1,23 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Author: Dan Scally <djrscally@gmail.com> */
-#ifndef __CIO2_BRIDGE_H
-#define __CIO2_BRIDGE_H
+#ifndef __IPU_BRIDGE_H
+#define __IPU_BRIDGE_H
#include <linux/property.h>
#include <linux/types.h>
+#include <media/v4l2-fwnode.h>
-#include "ipu3-cio2.h"
-
-struct i2c_client;
-
-#define CIO2_HID "INT343E"
-#define CIO2_MAX_LANES 4
+#define IPU_HID "INT343E"
+#define IPU_MAX_LANES 4
+#define IPU_MAX_PORTS 4
#define MAX_NUM_LINK_FREQS 3
/* Values are educated guesses as we don't have a spec */
-#define CIO2_SENSOR_ROTATION_NORMAL 0
-#define CIO2_SENSOR_ROTATION_INVERTED 1
+#define IPU_SENSOR_ROTATION_NORMAL 0
+#define IPU_SENSOR_ROTATION_INVERTED 1
-#define CIO2_SENSOR_CONFIG(_HID, _NR, ...) \
- (const struct cio2_sensor_config) { \
+#define IPU_SENSOR_CONFIG(_HID, _NR, ...) \
+ (const struct ipu_sensor_config) { \
.hid = _HID, \
.nr_link_freqs = _NR, \
.link_freqs = { __VA_ARGS__ } \
@@ -49,19 +47,24 @@ struct i2c_client;
.name = _TYPE, \
}
-enum cio2_sensor_swnodes {
+enum ipu_sensor_swnodes {
SWNODE_SENSOR_HID,
SWNODE_SENSOR_PORT,
SWNODE_SENSOR_ENDPOINT,
- SWNODE_CIO2_PORT,
- SWNODE_CIO2_ENDPOINT,
- /* Must be last because it is optional / maybe empty */
+ SWNODE_IPU_PORT,
+ SWNODE_IPU_ENDPOINT,
+ /* below are optional / maybe empty */
+ SWNODE_IVSC_HID,
+ SWNODE_IVSC_SENSOR_PORT,
+ SWNODE_IVSC_SENSOR_ENDPOINT,
+ SWNODE_IVSC_IPU_PORT,
+ SWNODE_IVSC_IPU_ENDPOINT,
SWNODE_VCM,
SWNODE_COUNT
};
/* Data representation as it is in ACPI SSDB buffer */
-struct cio2_sensor_ssdb {
+struct ipu_sensor_ssdb {
u8 version;
u8 sku;
u8 guid_csi2[16];
@@ -90,7 +93,7 @@ struct cio2_sensor_ssdb {
u8 reserved2[13];
} __packed;
-struct cio2_property_names {
+struct ipu_property_names {
char clock_frequency[16];
char rotation[9];
char orientation[12];
@@ -100,47 +103,79 @@ struct cio2_property_names {
char link_frequencies[17];
};
-struct cio2_node_names {
+struct ipu_node_names {
char port[7];
+ char ivsc_sensor_port[7];
+ char ivsc_ipu_port[7];
char endpoint[11];
char remote_port[7];
+ char vcm[16];
};
-struct cio2_sensor_config {
+struct ipu_sensor_config {
const char *hid;
const u8 nr_link_freqs;
const u64 link_freqs[MAX_NUM_LINK_FREQS];
};
-struct cio2_sensor {
+struct ipu_sensor {
/* append ssdb.link(u8) in "-%u" format as suffix of HID */
char name[ACPI_ID_LEN + 4];
struct acpi_device *adev;
- struct i2c_client *vcm_i2c_client;
+
+ struct device *csi_dev;
+ struct acpi_device *ivsc_adev;
+ char ivsc_name[ACPI_ID_LEN + 4];
/* SWNODE_COUNT + 1 for terminating NULL */
const struct software_node *group[SWNODE_COUNT + 1];
struct software_node swnodes[SWNODE_COUNT];
- struct cio2_node_names node_names;
+ struct ipu_node_names node_names;
- struct cio2_sensor_ssdb ssdb;
- struct acpi_pld_info *pld;
+ u8 link;
+ u8 lanes;
+ u32 mclkspeed;
+ u32 rotation;
+ enum v4l2_fwnode_orientation orientation;
+ const char *vcm_type;
- struct cio2_property_names prop_names;
+ struct ipu_property_names prop_names;
struct property_entry ep_properties[5];
struct property_entry dev_properties[5];
- struct property_entry cio2_properties[3];
+ struct property_entry ipu_properties[3];
+ struct property_entry ivsc_properties[1];
+ struct property_entry ivsc_sensor_ep_properties[4];
+ struct property_entry ivsc_ipu_ep_properties[4];
+
struct software_node_ref_args local_ref[1];
struct software_node_ref_args remote_ref[1];
struct software_node_ref_args vcm_ref[1];
+ struct software_node_ref_args ivsc_sensor_ref[1];
+ struct software_node_ref_args ivsc_ipu_ref[1];
};
-struct cio2_bridge {
- char cio2_node_name[ACPI_ID_LEN];
- struct software_node cio2_hid_node;
+typedef int (*ipu_parse_sensor_fwnode_t)(struct acpi_device *adev,
+ struct ipu_sensor *sensor);
+
+struct ipu_bridge {
+ struct device *dev;
+ ipu_parse_sensor_fwnode_t parse_sensor_fwnode;
+ char ipu_node_name[ACPI_ID_LEN];
+ struct software_node ipu_hid_node;
u32 data_lanes[4];
unsigned int n_sensors;
- struct cio2_sensor sensors[CIO2_NUM_PORTS];
+ struct ipu_sensor sensors[IPU_MAX_PORTS];
};
+#if IS_ENABLED(CONFIG_IPU_BRIDGE)
+int ipu_bridge_init(struct device *dev,
+ ipu_parse_sensor_fwnode_t parse_sensor_fwnode);
+int ipu_bridge_parse_ssdb(struct acpi_device *adev, struct ipu_sensor *sensor);
+int ipu_bridge_instantiate_vcm(struct device *sensor);
+#else
+/* Use a define to avoid the @parse_sensor_fwnode argument getting evaluated */
+#define ipu_bridge_init(dev, parse_sensor_fwnode) (0)
+static inline int ipu_bridge_instantiate_vcm(struct device *s) { return 0; }
+#endif
+
#endif
diff --git a/include/media/ov_16bit_addr_reg_helpers.h b/include/media/ov_16bit_addr_reg_helpers.h
deleted file mode 100644
index 1c60a50bd795..000000000000
--- a/include/media/ov_16bit_addr_reg_helpers.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * I2C register access helpers for Omnivision OVxxxx image sensors which expect
- * a 16 bit register address in big-endian format and which have 1-3 byte
- * wide registers, in big-endian format (for the higher width registers).
- *
- * Based on the register helpers from drivers/media/i2c/ov2680.c which is:
- * Copyright (C) 2018 Linaro Ltd
- */
-#ifndef __OV_16BIT_ADDR_REG_HELPERS_H
-#define __OV_16BIT_ADDR_REG_HELPERS_H
-
-#include <asm/unaligned.h>
-#include <linux/dev_printk.h>
-#include <linux/i2c.h>
-
-static inline int ov_read_reg(struct i2c_client *client, u16 reg,
- unsigned int len, u32 *val)
-{
- u8 addr_buf[2], data_buf[4] = { };
- struct i2c_msg msgs[2];
- int ret;
-
- if (len > 4)
- return -EINVAL;
-
- put_unaligned_be16(reg, addr_buf);
-
- msgs[0].addr = client->addr;
- msgs[0].flags = 0;
- msgs[0].len = ARRAY_SIZE(addr_buf);
- msgs[0].buf = addr_buf;
-
- msgs[1].addr = client->addr;
- msgs[1].flags = I2C_M_RD;
- msgs[1].len = len;
- msgs[1].buf = &data_buf[4 - len];
-
- ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
- if (ret != ARRAY_SIZE(msgs)) {
- dev_err(&client->dev, "read error: reg=0x%4x: %d\n", reg, ret);
- return -EIO;
- }
-
- *val = get_unaligned_be32(data_buf);
-
- return 0;
-}
-
-#define ov_read_reg8(s, r, v) ov_read_reg(s, r, 1, v)
-#define ov_read_reg16(s, r, v) ov_read_reg(s, r, 2, v)
-#define ov_read_reg24(s, r, v) ov_read_reg(s, r, 3, v)
-
-static inline int ov_write_reg(struct i2c_client *client, u16 reg,
- unsigned int len, u32 val)
-{
- u8 buf[6];
- int ret;
-
- if (len > 4)
- return -EINVAL;
-
- put_unaligned_be16(reg, buf);
- put_unaligned_be32(val << (8 * (4 - len)), buf + 2);
- ret = i2c_master_send(client, buf, len + 2);
- if (ret != len + 2) {
- dev_err(&client->dev, "write error: reg=0x%4x: %d\n", reg, ret);
- return -EIO;
- }
-
- return 0;
-}
-
-#define ov_write_reg8(s, r, v) ov_write_reg(s, r, 1, v)
-#define ov_write_reg16(s, r, v) ov_write_reg(s, r, 2, v)
-#define ov_write_reg24(s, r, v) ov_write_reg(s, r, 3, v)
-
-static inline int ov_update_reg(struct i2c_client *client, u16 reg, u8 mask, u8 val)
-{
- u32 readval;
- int ret;
-
- ret = ov_read_reg8(client, reg, &readval);
- if (ret < 0)
- return ret;
-
- val = (readval & ~mask) | (val & mask);
-
- return ov_write_reg8(client, reg, val);
-}
-
-#endif
diff --git a/include/media/v4l2-async.h b/include/media/v4l2-async.h
index 25eb1d138c06..9bd326d31181 100644
--- a/include/media/v4l2-async.h
+++ b/include/media/v4l2-async.h
@@ -22,76 +22,85 @@ struct v4l2_async_notifier;
* enum v4l2_async_match_type - type of asynchronous subdevice logic to be used
* in order to identify a match
*
- * @V4L2_ASYNC_MATCH_I2C: Match will check for I2C adapter ID and address
- * @V4L2_ASYNC_MATCH_FWNODE: Match will use firmware node
+ * @V4L2_ASYNC_MATCH_TYPE_I2C: Match will check for I2C adapter ID and address
+ * @V4L2_ASYNC_MATCH_TYPE_FWNODE: Match will use firmware node
*
- * This enum is used by the asynchronous sub-device logic to define the
+ * This enum is used by the asynchronous connection logic to define the
* algorithm that will be used to match an asynchronous device.
*/
enum v4l2_async_match_type {
- V4L2_ASYNC_MATCH_I2C,
- V4L2_ASYNC_MATCH_FWNODE,
+ V4L2_ASYNC_MATCH_TYPE_I2C,
+ V4L2_ASYNC_MATCH_TYPE_FWNODE,
};
/**
- * struct v4l2_async_subdev - sub-device descriptor, as known to a bridge
- *
- * @match_type: type of match that will be used
- * @match: union of per-bus type matching data sets
- * @match.fwnode:
- * pointer to &struct fwnode_handle to be matched.
- * Used if @match_type is %V4L2_ASYNC_MATCH_FWNODE.
- * @match.i2c: embedded struct with I2C parameters to be matched.
+ * struct v4l2_async_match_desc - async connection match information
+ *
+ * @type: type of match that will be used
+ * @fwnode: pointer to &struct fwnode_handle to be matched.
+ * Used if @match_type is %V4L2_ASYNC_MATCH_TYPE_FWNODE.
+ * @i2c: embedded struct with I2C parameters to be matched.
* Both @match.i2c.adapter_id and @match.i2c.address
* should be matched.
- * Used if @match_type is %V4L2_ASYNC_MATCH_I2C.
- * @match.i2c.adapter_id:
+ * Used if @match_type is %V4L2_ASYNC_MATCH_TYPE_I2C.
+ * @i2c.adapter_id:
* I2C adapter ID to be matched.
- * Used if @match_type is %V4L2_ASYNC_MATCH_I2C.
- * @match.i2c.address:
+ * Used if @match_type is %V4L2_ASYNC_MATCH_TYPE_I2C.
+ * @i2c.address:
* I2C address to be matched.
- * Used if @match_type is %V4L2_ASYNC_MATCH_I2C.
- * @asd_list: used to add struct v4l2_async_subdev objects to the
- * master notifier @asd_list
- * @list: used to link struct v4l2_async_subdev objects, waiting to be
- * probed, to a notifier->waiting list
- *
- * When this struct is used as a member in a driver specific struct,
- * the driver specific struct shall contain the &struct
- * v4l2_async_subdev as its first member.
+ * Used if @match_type is %V4L2_ASYNC_MATCH_TYPE_I2C.
*/
-struct v4l2_async_subdev {
- enum v4l2_async_match_type match_type;
+struct v4l2_async_match_desc {
+ enum v4l2_async_match_type type;
union {
struct fwnode_handle *fwnode;
struct {
int adapter_id;
unsigned short address;
} i2c;
- } match;
+ };
+};
- /* v4l2-async core private: not to be used by drivers */
- struct list_head list;
- struct list_head asd_list;
+/**
+ * struct v4l2_async_connection - sub-device connection descriptor, as known to
+ * a bridge
+ *
+ * @match: struct of match type and per-bus type matching data sets
+ * @notifier: the async notifier the connection is related to
+ * @asc_entry: used to add struct v4l2_async_connection objects to the
+ * notifier @waiting_list or @done_list
+ * @asc_subdev_entry: entry in struct v4l2_async_subdev.asc_list list
+ * @sd: the related sub-device
+ *
+ * When this struct is used as a member in a driver specific struct, the driver
+ * specific struct shall contain the &struct v4l2_async_connection as its first
+ * member.
+ */
+struct v4l2_async_connection {
+ struct v4l2_async_match_desc match;
+ struct v4l2_async_notifier *notifier;
+ struct list_head asc_entry;
+ struct list_head asc_subdev_entry;
+ struct v4l2_subdev *sd;
};
/**
* struct v4l2_async_notifier_operations - Asynchronous V4L2 notifier operations
- * @bound: a subdevice driver has successfully probed one of the subdevices
- * @complete: All subdevices have been probed successfully. The complete
+ * @bound: a sub-device has been bound by the given connection
+ * @complete: All connections have been bound successfully. The complete
* callback is only executed for the root notifier.
* @unbind: a subdevice is leaving
- * @destroy: the asd is about to be freed
+ * @destroy: the asc is about to be freed
*/
struct v4l2_async_notifier_operations {
int (*bound)(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd);
+ struct v4l2_async_connection *asc);
int (*complete)(struct v4l2_async_notifier *notifier);
void (*unbind)(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd);
- void (*destroy)(struct v4l2_async_subdev *asd);
+ struct v4l2_async_connection *asc);
+ void (*destroy)(struct v4l2_async_connection *asc);
};
/**
@@ -101,20 +110,31 @@ struct v4l2_async_notifier_operations {
* @v4l2_dev: v4l2_device of the root notifier, NULL otherwise
* @sd: sub-device that registered the notifier, NULL otherwise
* @parent: parent notifier
- * @asd_list: master list of struct v4l2_async_subdev
- * @waiting: list of struct v4l2_async_subdev, waiting for their drivers
- * @done: list of struct v4l2_subdev, already probed
- * @list: member in a global list of notifiers
+ * @waiting_list: list of struct v4l2_async_connection, waiting for their
+ * drivers
+ * @done_list: list of struct v4l2_subdev, already probed
+ * @notifier_entry: member in a global list of notifiers
*/
struct v4l2_async_notifier {
const struct v4l2_async_notifier_operations *ops;
struct v4l2_device *v4l2_dev;
struct v4l2_subdev *sd;
struct v4l2_async_notifier *parent;
- struct list_head asd_list;
- struct list_head waiting;
- struct list_head done;
- struct list_head list;
+ struct list_head waiting_list;
+ struct list_head done_list;
+ struct list_head notifier_entry;
+};
+
+/**
+ * struct v4l2_async_subdev_endpoint - Entry in sub-device's fwnode list
+ *
+ * @async_subdev_endpoint_entry: An entry in async_subdev_endpoint_list of
+ * &struct v4l2_subdev
+ * @endpoint: Endpoint fwnode agains which to match the sub-device
+ */
+struct v4l2_async_subdev_endpoint {
+ struct list_head async_subdev_endpoint_entry;
+ struct fwnode_handle *endpoint;
};
/**
@@ -128,76 +148,71 @@ void v4l2_async_debug_init(struct dentry *debugfs_dir);
* v4l2_async_nf_init - Initialize a notifier.
*
* @notifier: pointer to &struct v4l2_async_notifier
+ * @v4l2_dev: pointer to &struct v4l2_device
*
- * This function initializes the notifier @asd_list. It must be called
+ * This function initializes the notifier @asc_entry. It must be called
* before adding a subdevice to a notifier, using one of:
* v4l2_async_nf_add_fwnode_remote(),
- * v4l2_async_nf_add_fwnode(),
- * v4l2_async_nf_add_i2c(),
- * __v4l2_async_nf_add_subdev() or
- * v4l2_async_nf_parse_fwnode_endpoints().
+ * v4l2_async_nf_add_fwnode() or
+ * v4l2_async_nf_add_i2c().
*/
-void v4l2_async_nf_init(struct v4l2_async_notifier *notifier);
+void v4l2_async_nf_init(struct v4l2_async_notifier *notifier,
+ struct v4l2_device *v4l2_dev);
/**
- * __v4l2_async_nf_add_subdev - Add an async subdev to the
- * notifier's master asd list.
+ * v4l2_async_subdev_nf_init - Initialize a sub-device notifier.
*
* @notifier: pointer to &struct v4l2_async_notifier
- * @asd: pointer to &struct v4l2_async_subdev
+ * @sd: pointer to &struct v4l2_subdev
*
- * \warning: Drivers should avoid using this function and instead use one of:
- * v4l2_async_nf_add_fwnode(),
- * v4l2_async_nf_add_fwnode_remote() or
+ * This function initializes the notifier @asc_list. It must be called
+ * before adding a subdevice to a notifier, using one of:
+ * v4l2_async_nf_add_fwnode_remote(), v4l2_async_nf_add_fwnode() or
* v4l2_async_nf_add_i2c().
- *
- * Call this function before registering a notifier to link the provided @asd to
- * the notifiers master @asd_list. The @asd must be allocated with k*alloc() as
- * it will be freed by the framework when the notifier is destroyed.
*/
-int __v4l2_async_nf_add_subdev(struct v4l2_async_notifier *notifier,
- struct v4l2_async_subdev *asd);
+void v4l2_async_subdev_nf_init(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd);
-struct v4l2_async_subdev *
+struct v4l2_async_connection *
__v4l2_async_nf_add_fwnode(struct v4l2_async_notifier *notifier,
struct fwnode_handle *fwnode,
- unsigned int asd_struct_size);
+ unsigned int asc_struct_size);
/**
* v4l2_async_nf_add_fwnode - Allocate and add a fwnode async
- * subdev to the notifier's master asd_list.
+ * subdev to the notifier's master asc_list.
*
* @notifier: pointer to &struct v4l2_async_notifier
* @fwnode: fwnode handle of the sub-device to be matched, pointer to
* &struct fwnode_handle
- * @type: Type of the driver's async sub-device struct. The &struct
- * v4l2_async_subdev shall be the first member of the driver's async
- * sub-device struct, i.e. both begin at the same memory address.
+ * @type: Type of the driver's async sub-device or connection struct. The
+ * &struct v4l2_async_connection shall be the first member of the
+ * driver's async struct, i.e. both begin at the same memory address.
*
- * Allocate a fwnode-matched asd of size asd_struct_size, and add it to the
- * notifiers @asd_list. The function also gets a reference of the fwnode which
+ * Allocate a fwnode-matched asc of size asc_struct_size, and add it to the
+ * notifiers @asc_list. The function also gets a reference of the fwnode which
* is released later at notifier cleanup time.
*/
#define v4l2_async_nf_add_fwnode(notifier, fwnode, type) \
((type *)__v4l2_async_nf_add_fwnode(notifier, fwnode, sizeof(type)))
-struct v4l2_async_subdev *
+struct v4l2_async_connection *
__v4l2_async_nf_add_fwnode_remote(struct v4l2_async_notifier *notif,
struct fwnode_handle *endpoint,
- unsigned int asd_struct_size);
+ unsigned int asc_struct_size);
/**
* v4l2_async_nf_add_fwnode_remote - Allocate and add a fwnode
* remote async subdev to the
- * notifier's master asd_list.
+ * notifier's master asc_list.
*
* @notifier: pointer to &struct v4l2_async_notifier
- * @ep: local endpoint pointing to the remote sub-device to be matched,
+ * @ep: local endpoint pointing to the remote connection to be matched,
* pointer to &struct fwnode_handle
- * @type: Type of the driver's async sub-device struct. The &struct
- * v4l2_async_subdev shall be the first member of the driver's async
- * sub-device struct, i.e. both begin at the same memory address.
+ * @type: Type of the driver's async connection struct. The &struct
+ * v4l2_async_connection shall be the first member of the driver's async
+ * connection struct, i.e. both begin at the same memory address.
*
* Gets the remote endpoint of a given local endpoint, set it up for fwnode
- * matching and adds the async sub-device to the notifier's @asd_list. The
+ * matching and adds the async connection to the notifier's @asc_list. The
* function also gets a reference of the fwnode which is released later at
* notifier cleanup time.
*
@@ -207,46 +222,63 @@ __v4l2_async_nf_add_fwnode_remote(struct v4l2_async_notifier *notif,
#define v4l2_async_nf_add_fwnode_remote(notifier, ep, type) \
((type *)__v4l2_async_nf_add_fwnode_remote(notifier, ep, sizeof(type)))
-struct v4l2_async_subdev *
+struct v4l2_async_connection *
__v4l2_async_nf_add_i2c(struct v4l2_async_notifier *notifier,
int adapter_id, unsigned short address,
- unsigned int asd_struct_size);
+ unsigned int asc_struct_size);
/**
* v4l2_async_nf_add_i2c - Allocate and add an i2c async
- * subdev to the notifier's master asd_list.
+ * subdev to the notifier's master asc_list.
*
* @notifier: pointer to &struct v4l2_async_notifier
* @adapter: I2C adapter ID to be matched
- * @address: I2C address of sub-device to be matched
- * @type: Type of the driver's async sub-device struct. The &struct
- * v4l2_async_subdev shall be the first member of the driver's async
- * sub-device struct, i.e. both begin at the same memory address.
+ * @address: I2C address of connection to be matched
+ * @type: Type of the driver's async connection struct. The &struct
+ * v4l2_async_connection shall be the first member of the driver's async
+ * connection struct, i.e. both begin at the same memory address.
*
* Same as v4l2_async_nf_add_fwnode() but for I2C matched
- * sub-devices.
+ * connections.
*/
#define v4l2_async_nf_add_i2c(notifier, adapter, address, type) \
((type *)__v4l2_async_nf_add_i2c(notifier, adapter, address, \
sizeof(type)))
/**
- * v4l2_async_nf_register - registers a subdevice asynchronous notifier
+ * v4l2_async_subdev_endpoint_add - Add an endpoint fwnode to async sub-device
+ * matching list
*
- * @v4l2_dev: pointer to &struct v4l2_device
- * @notifier: pointer to &struct v4l2_async_notifier
+ * @sd: the sub-device
+ * @fwnode: the endpoint fwnode to match
+ *
+ * Add a fwnode to the async sub-device's matching list. This allows registering
+ * multiple async sub-devices from a single device.
+ *
+ * Note that calling v4l2_subdev_cleanup() as part of the sub-device's cleanup
+ * if endpoints have been added to the sub-device's fwnode matching list.
+ *
+ * Returns an error on failure, 0 on success.
*/
-int v4l2_async_nf_register(struct v4l2_device *v4l2_dev,
- struct v4l2_async_notifier *notifier);
+int v4l2_async_subdev_endpoint_add(struct v4l2_subdev *sd,
+ struct fwnode_handle *fwnode);
/**
- * v4l2_async_subdev_nf_register - registers a subdevice asynchronous
- * notifier for a sub-device
+ * v4l2_async_connection_unique - return a unique &struct v4l2_async_connection
+ * for a sub-device
+ * @sd: the sub-device
+ *
+ * Return an async connection for a sub-device, when there is a single
+ * one only.
+ */
+struct v4l2_async_connection *
+v4l2_async_connection_unique(struct v4l2_subdev *sd);
+
+/**
+ * v4l2_async_nf_register - registers a subdevice asynchronous notifier
*
- * @sd: pointer to &struct v4l2_subdev
* @notifier: pointer to &struct v4l2_async_notifier
*/
-int v4l2_async_subdev_nf_register(struct v4l2_subdev *sd,
- struct v4l2_async_notifier *notifier);
+int v4l2_async_nf_register(struct v4l2_async_notifier *notifier);
/**
* v4l2_async_nf_unregister - unregisters a subdevice
@@ -261,14 +293,10 @@ void v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier);
* @notifier: the notifier the resources of which are to be cleaned up
*
* Release memory resources related to a notifier, including the async
- * sub-devices allocated for the purposes of the notifier but not the notifier
+ * connections allocated for the purposes of the notifier but not the notifier
* itself. The user is responsible for calling this function to clean up the
- * notifier after calling
- * v4l2_async_nf_add_fwnode_remote(),
- * v4l2_async_nf_add_fwnode(),
- * v4l2_async_nf_add_i2c(),
- * __v4l2_async_nf_add_subdev() or
- * v4l2_async_nf_parse_fwnode_endpoints().
+ * notifier after calling v4l2_async_nf_add_fwnode_remote(),
+ * v4l2_async_nf_add_fwnode() or v4l2_async_nf_add_i2c().
*
* There is no harm from calling v4l2_async_nf_cleanup() in other
* cases as long as its memory has been zeroed after it has been
diff --git a/include/media/v4l2-cci.h b/include/media/v4l2-cci.h
new file mode 100644
index 000000000000..0f6803e4b17e
--- /dev/null
+++ b/include/media/v4l2-cci.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * MIPI Camera Control Interface (CCI) register access helpers.
+ *
+ * Copyright (C) 2023 Hans de Goede <hansg@kernel.org>
+ */
+#ifndef _V4L2_CCI_H
+#define _V4L2_CCI_H
+
+#include <linux/types.h>
+
+struct i2c_client;
+struct regmap;
+
+/**
+ * struct cci_reg_sequence - An individual write from a sequence of CCI writes
+ *
+ * @reg: Register address, use CCI_REG#() macros to encode reg width
+ * @val: Register value
+ *
+ * Register/value pairs for sequences of writes.
+ */
+struct cci_reg_sequence {
+ u32 reg;
+ u64 val;
+};
+
+/*
+ * Macros to define register address with the register width encoded
+ * into the higher bits.
+ */
+#define CCI_REG_ADDR_MASK GENMASK(15, 0)
+#define CCI_REG_WIDTH_SHIFT 16
+#define CCI_REG_WIDTH_MASK GENMASK(19, 16)
+
+#define CCI_REG8(x) ((1 << CCI_REG_WIDTH_SHIFT) | (x))
+#define CCI_REG16(x) ((2 << CCI_REG_WIDTH_SHIFT) | (x))
+#define CCI_REG24(x) ((3 << CCI_REG_WIDTH_SHIFT) | (x))
+#define CCI_REG32(x) ((4 << CCI_REG_WIDTH_SHIFT) | (x))
+#define CCI_REG64(x) ((8 << CCI_REG_WIDTH_SHIFT) | (x))
+
+/**
+ * cci_read() - Read a value from a single CCI register
+ *
+ * @map: Register map to read from
+ * @reg: Register address to read, use CCI_REG#() macros to encode reg width
+ * @val: Pointer to store read value
+ * @err: Optional pointer to store errors, if a previous error is set
+ * then the read will be skipped
+ *
+ * Return: %0 on success or a negative error code on failure.
+ */
+int cci_read(struct regmap *map, u32 reg, u64 *val, int *err);
+
+/**
+ * cci_write() - Write a value to a single CCI register
+ *
+ * @map: Register map to write to
+ * @reg: Register address to write, use CCI_REG#() macros to encode reg width
+ * @val: Value to be written
+ * @err: Optional pointer to store errors, if a previous error is set
+ * then the write will be skipped
+ *
+ * Return: %0 on success or a negative error code on failure.
+ */
+int cci_write(struct regmap *map, u32 reg, u64 val, int *err);
+
+/**
+ * cci_update_bits() - Perform a read/modify/write cycle on
+ * a single CCI register
+ *
+ * @map: Register map to update
+ * @reg: Register address to update, use CCI_REG#() macros to encode reg width
+ * @mask: Bitmask to change
+ * @val: New value for bitmask
+ * @err: Optional pointer to store errors, if a previous error is set
+ * then the update will be skipped
+ *
+ * Note this uses read-modify-write to update the bits, atomicity with regards
+ * to other cci_*() register access functions is NOT guaranteed.
+ *
+ * Return: %0 on success or a negative error code on failure.
+ */
+int cci_update_bits(struct regmap *map, u32 reg, u64 mask, u64 val, int *err);
+
+/**
+ * cci_multi_reg_write() - Write multiple registers to the device
+ *
+ * @map: Register map to write to
+ * @regs: Array of structures containing register-address, -value pairs to be
+ * written, register-addresses use CCI_REG#() macros to encode reg width
+ * @num_regs: Number of registers to write
+ * @err: Optional pointer to store errors, if a previous error is set
+ * then the write will be skipped
+ *
+ * Write multiple registers to the device where the set of register, value
+ * pairs are supplied in any order, possibly not all in a single range.
+ *
+ * Use of the CCI_REG#() macros to encode reg width is mandatory.
+ *
+ * For raw lists of register-address, -value pairs with only 8 bit
+ * wide writes regmap_multi_reg_write() can be used instead.
+ *
+ * Return: %0 on success or a negative error code on failure.
+ */
+int cci_multi_reg_write(struct regmap *map, const struct cci_reg_sequence *regs,
+ unsigned int num_regs, int *err);
+
+#if IS_ENABLED(CONFIG_V4L2_CCI_I2C)
+/**
+ * devm_cci_regmap_init_i2c() - Create regmap to use with cci_*() register
+ * access functions
+ *
+ * @client: i2c_client to create the regmap for
+ * @reg_addr_bits: register address width to use (8 or 16)
+ *
+ * Note the memory for the created regmap is devm() managed, tied to the client.
+ *
+ * Return: %0 on success or a negative error code on failure.
+ */
+struct regmap *devm_cci_regmap_init_i2c(struct i2c_client *client,
+ int reg_addr_bits);
+#endif
+
+#endif
diff --git a/include/media/v4l2-fwnode.h b/include/media/v4l2-fwnode.h
index 394d798f3dfa..f7c57c776589 100644
--- a/include/media/v4l2-fwnode.h
+++ b/include/media/v4l2-fwnode.h
@@ -21,10 +21,6 @@
#include <media/v4l2-mediabus.h>
-struct fwnode_handle;
-struct v4l2_async_notifier;
-struct v4l2_async_subdev;
-
/**
* struct v4l2_fwnode_endpoint - the endpoint data structure
* @base: fwnode endpoint of the v4l2_fwnode
@@ -393,72 +389,6 @@ int v4l2_fwnode_connector_add_link(struct fwnode_handle *fwnode,
int v4l2_fwnode_device_parse(struct device *dev,
struct v4l2_fwnode_device_properties *props);
-/**
- * typedef parse_endpoint_func - Driver's callback function to be called on
- * each V4L2 fwnode endpoint.
- *
- * @dev: pointer to &struct device
- * @vep: pointer to &struct v4l2_fwnode_endpoint
- * @asd: pointer to &struct v4l2_async_subdev
- *
- * Return:
- * * %0 on success
- * * %-ENOTCONN if the endpoint is to be skipped but this
- * should not be considered as an error
- * * %-EINVAL if the endpoint configuration is invalid
- */
-typedef int (*parse_endpoint_func)(struct device *dev,
- struct v4l2_fwnode_endpoint *vep,
- struct v4l2_async_subdev *asd);
-
-/**
- * v4l2_async_nf_parse_fwnode_endpoints - Parse V4L2 fwnode endpoints in a
- * device node
- * @dev: the device the endpoints of which are to be parsed
- * @notifier: notifier for @dev
- * @asd_struct_size: size of the driver's async sub-device struct, including
- * sizeof(struct v4l2_async_subdev). The &struct
- * v4l2_async_subdev shall be the first member of
- * the driver's async sub-device struct, i.e. both
- * begin at the same memory address.
- * @parse_endpoint: Driver's callback function called on each V4L2 fwnode
- * endpoint. Optional.
- *
- * DEPRECATED! This function is deprecated. Don't use it in new drivers.
- * Instead see an example in cio2_parse_firmware() function in
- * drivers/media/pci/intel/ipu3/ipu3-cio2.c .
- *
- * Parse the fwnode endpoints of the @dev device and populate the async sub-
- * devices list in the notifier. The @parse_endpoint callback function is
- * called for each endpoint with the corresponding async sub-device pointer to
- * let the caller initialize the driver-specific part of the async sub-device
- * structure.
- *
- * The notifier memory shall be zeroed before this function is called on the
- * notifier.
- *
- * This function may not be called on a registered notifier and may be called on
- * a notifier only once.
- *
- * The &struct v4l2_fwnode_endpoint passed to the callback function
- * @parse_endpoint is released once the function is finished. If there is a need
- * to retain that configuration, the user needs to allocate memory for it.
- *
- * Any notifier populated using this function must be released with a call to
- * v4l2_async_nf_cleanup() after it has been unregistered and the async
- * sub-devices are no longer in use, even if the function returned an error.
- *
- * Return: %0 on success, including when no async sub-devices are found
- * %-ENOMEM if memory allocation failed
- * %-EINVAL if graph or endpoint parsing failed
- * Other error codes as returned by @parse_endpoint
- */
-int
-v4l2_async_nf_parse_fwnode_endpoints(struct device *dev,
- struct v4l2_async_notifier *notifier,
- size_t asd_struct_size,
- parse_endpoint_func parse_endpoint);
-
/* Helper macros to access the connector links. */
/** v4l2_connector_last_link - Helper macro to get the first
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index b325df0d54d6..d9fca929c10b 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -1020,12 +1020,14 @@ struct v4l2_subdev_platform_data {
* @dev: pointer to the physical device, if any
* @fwnode: The fwnode_handle of the subdev, usually the same as
* either dev->of_node->fwnode or dev->fwnode (whichever is non-NULL).
- * @async_list: Links this subdev to a global subdev_list or @notifier->done
- * list.
- * @asd: Pointer to respective &struct v4l2_async_subdev.
- * @notifier: Pointer to the managing notifier.
+ * @async_list: Links this subdev to a global subdev_list or
+ * @notifier->done_list list.
+ * @async_subdev_endpoint_list: List entry in async_subdev_endpoint_entry of
+ * &struct v4l2_async_subdev_endpoint.
* @subdev_notifier: A sub-device notifier implicitly registered for the sub-
* device using v4l2_async_register_subdev_sensor().
+ * @asc_list: Async connection list, of &struct
+ * v4l2_async_connection.subdev_entry.
* @pdata: common part of subdevice platform data
* @state_lock: A pointer to a lock used for all the subdev's states, set by the
* driver. This is optional. If NULL, each state instance will get
@@ -1065,9 +1067,9 @@ struct v4l2_subdev {
struct device *dev;
struct fwnode_handle *fwnode;
struct list_head async_list;
- struct v4l2_async_subdev *asd;
- struct v4l2_async_notifier *notifier;
+ struct list_head async_subdev_endpoint_list;
struct v4l2_async_notifier *subdev_notifier;
+ struct list_head asc_list;
struct v4l2_subdev_platform_data *pdata;
struct mutex *state_lock;
@@ -1383,8 +1385,9 @@ int __v4l2_subdev_init_finalize(struct v4l2_subdev *sd, const char *name,
* v4l2_subdev_cleanup() - Releases the resources allocated by the subdevice
* @sd: The subdevice
*
- * This function will release the resources allocated in
- * v4l2_subdev_init_finalize.
+ * Clean up a V4L2 async sub-device. Must be called for a sub-device as part of
+ * its release if resources have been associated with it using
+ * v4l2_async_subdev_endpoint_add() or v4l2_subdev_init_finalize().
*/
void v4l2_subdev_cleanup(struct v4l2_subdev *sd);
@@ -1532,7 +1535,7 @@ __v4l2_subdev_next_active_route(const struct v4l2_subdev_krouting *routing,
*/
int v4l2_subdev_set_routing_with_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
- struct v4l2_subdev_krouting *routing,
+ const struct v4l2_subdev_krouting *routing,
const struct v4l2_mbus_framefmt *fmt);
/**
diff --git a/include/net/ip.h b/include/net/ip.h
index 19adacd5ece0..3489a1cca5e7 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -57,6 +57,7 @@ struct inet_skb_parm {
#define IPSKB_FRAG_PMTU BIT(6)
#define IPSKB_L3SLAVE BIT(7)
#define IPSKB_NOPOLICY BIT(8)
+#define IPSKB_MULTIPATH BIT(9)
u16 frag_max_size;
};
@@ -94,7 +95,7 @@ static inline void ipcm_init_sk(struct ipcm_cookie *ipcm,
ipcm_init(ipcm);
ipcm->sockc.mark = READ_ONCE(inet->sk.sk_mark);
- ipcm->sockc.tsflags = inet->sk.sk_tsflags;
+ ipcm->sockc.tsflags = READ_ONCE(inet->sk.sk_tsflags);
ipcm->oif = READ_ONCE(inet->sk.sk_bound_dev_if);
ipcm->addr = inet->inet_saddr;
ipcm->protocol = inet->inet_num;
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index c9ff23cf313e..1ba9f4ddf2f6 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -642,7 +642,10 @@ static inline bool fib6_rules_early_flow_dissect(struct net *net,
if (!net->ipv6.fib6_rules_require_fldissect)
return false;
- skb_flow_dissect_flow_keys(skb, flkeys, flag);
+ memset(flkeys, 0, sizeof(*flkeys));
+ __skb_flow_dissect(net, skb, &flow_keys_dissector,
+ flkeys, NULL, 0, 0, 0, flag);
+
fl6->fl6_sport = flkeys->ports.src;
fl6->fl6_dport = flkeys->ports.dst;
fl6->flowi6_proto = flkeys->basic.ip_proto;
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index a378eff827c7..f0c13864180e 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -418,7 +418,10 @@ static inline bool fib4_rules_early_flow_dissect(struct net *net,
if (!net->ipv4.fib_rules_require_fldissect)
return false;
- skb_flow_dissect_flow_keys(skb, flkeys, flag);
+ memset(flkeys, 0, sizeof(*flkeys));
+ __skb_flow_dissect(net, skb, &flow_keys_dissector,
+ flkeys, NULL, 0, 0, 0, flag);
+
fl4->fl4_sport = flkeys->ports.src;
fl4->fl4_dport = flkeys->ports.dst;
fl4->flowi4_proto = flkeys->basic.ip_proto;
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index e8750b4ef7e1..f346b4efbc30 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -483,15 +483,14 @@ static inline void iptunnel_xmit_stats(struct net_device *dev, int pkt_len)
u64_stats_inc(&tstats->tx_packets);
u64_stats_update_end(&tstats->syncp);
put_cpu_ptr(tstats);
+ return;
+ }
+
+ if (pkt_len < 0) {
+ DEV_STATS_INC(dev, tx_errors);
+ DEV_STATS_INC(dev, tx_aborted_errors);
} else {
- struct net_device_stats *err_stats = &dev->stats;
-
- if (pkt_len < 0) {
- err_stats->tx_errors++;
- err_stats->tx_aborted_errors++;
- } else {
- err_stats->tx_dropped++;
- }
+ DEV_STATS_INC(dev, tx_dropped);
}
}
diff --git a/include/net/scm.h b/include/net/scm.h
index c5bcdf65f55c..e8c76b4be2fe 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -9,6 +9,7 @@
#include <linux/pid.h>
#include <linux/nsproxy.h>
#include <linux/sched/signal.h>
+#include <net/compat.h>
/* Well, we should have at least one descriptor open
* to accept passed FDs 8)
@@ -123,14 +124,17 @@ static inline bool scm_has_secdata(struct socket *sock)
static __inline__ void scm_pidfd_recv(struct msghdr *msg, struct scm_cookie *scm)
{
struct file *pidfd_file = NULL;
- int pidfd;
+ int len, pidfd;
- /*
- * put_cmsg() doesn't return an error if CMSG is truncated,
+ /* put_cmsg() doesn't return an error if CMSG is truncated,
* that's why we need to opencode these checks here.
*/
- if ((msg->msg_controllen <= sizeof(struct cmsghdr)) ||
- (msg->msg_controllen - sizeof(struct cmsghdr)) < sizeof(int)) {
+ if (msg->msg_flags & MSG_CMSG_COMPAT)
+ len = sizeof(struct compat_cmsghdr) + sizeof(int);
+ else
+ len = sizeof(struct cmsghdr) + sizeof(int);
+
+ if (msg->msg_controllen < len) {
msg->msg_flags |= MSG_CTRUNC;
return;
}
diff --git a/include/net/sock.h b/include/net/sock.h
index 11d503417591..b770261fbdaf 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1053,6 +1053,12 @@ static inline void sk_wmem_queued_add(struct sock *sk, int val)
WRITE_ONCE(sk->sk_wmem_queued, sk->sk_wmem_queued + val);
}
+static inline void sk_forward_alloc_add(struct sock *sk, int val)
+{
+ /* Paired with lockless reads of sk->sk_forward_alloc */
+ WRITE_ONCE(sk->sk_forward_alloc, sk->sk_forward_alloc + val);
+}
+
void sk_stream_write_space(struct sock *sk);
/* OOB backlog add */
@@ -1377,7 +1383,7 @@ static inline int sk_forward_alloc_get(const struct sock *sk)
if (sk->sk_prot->forward_alloc_get)
return sk->sk_prot->forward_alloc_get(sk);
#endif
- return sk->sk_forward_alloc;
+ return READ_ONCE(sk->sk_forward_alloc);
}
static inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
@@ -1673,14 +1679,14 @@ static inline void sk_mem_charge(struct sock *sk, int size)
{
if (!sk_has_account(sk))
return;
- sk->sk_forward_alloc -= size;
+ sk_forward_alloc_add(sk, -size);
}
static inline void sk_mem_uncharge(struct sock *sk, int size)
{
if (!sk_has_account(sk))
return;
- sk->sk_forward_alloc += size;
+ sk_forward_alloc_add(sk, size);
sk_mem_reclaim(sk);
}
@@ -1900,7 +1906,9 @@ struct sockcm_cookie {
static inline void sockcm_init(struct sockcm_cookie *sockc,
const struct sock *sk)
{
- *sockc = (struct sockcm_cookie) { .tsflags = sk->sk_tsflags };
+ *sockc = (struct sockcm_cookie) {
+ .tsflags = READ_ONCE(sk->sk_tsflags)
+ };
}
int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg,
@@ -2695,9 +2703,9 @@ void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
static inline void
sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
{
- ktime_t kt = skb->tstamp;
struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
-
+ u32 tsflags = READ_ONCE(sk->sk_tsflags);
+ ktime_t kt = skb->tstamp;
/*
* generate control messages if
* - receive time stamping in software requested
@@ -2705,10 +2713,10 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
* - hardware time stamps available and wanted
*/
if (sock_flag(sk, SOCK_RCVTSTAMP) ||
- (sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) ||
- (kt && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) ||
+ (tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) ||
+ (kt && tsflags & SOF_TIMESTAMPING_SOFTWARE) ||
(hwtstamps->hwtstamp &&
- (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
+ (tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
__sock_recv_timestamp(msg, sk, skb);
else
sock_write_timestamp(sk, kt);
@@ -2730,7 +2738,8 @@ static inline void sock_recv_cmsgs(struct msghdr *msg, struct sock *sk,
#define TSFLAGS_ANY (SOF_TIMESTAMPING_SOFTWARE | \
SOF_TIMESTAMPING_RAW_HARDWARE)
- if (sk->sk_flags & FLAGS_RECV_CMSGS || sk->sk_tsflags & TSFLAGS_ANY)
+ if (sk->sk_flags & FLAGS_RECV_CMSGS ||
+ READ_ONCE(sk->sk_tsflags) & TSFLAGS_ANY)
__sock_recv_cmsgs(msg, sk, skb);
else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP)))
sock_write_timestamp(sk, skb->tstamp);
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 1e7774ac808f..533ab92684d8 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -4440,8 +4440,6 @@ struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u32 port,
const struct sockaddr *addr);
int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
unsigned int port);
-struct net_device *ib_device_netdev(struct ib_device *dev, u32 port);
-
struct ib_wq *ib_create_wq(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr);
int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata);
diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
index 03abd30e6c8c..2b22f153ef63 100644
--- a/include/rdma/iw_cm.h
+++ b/include/rdma/iw_cm.h
@@ -115,27 +115,6 @@ struct iw_cm_id *iw_create_cm_id(struct ib_device *device,
void iw_destroy_cm_id(struct iw_cm_id *cm_id);
/**
- * iw_cm_bind_qp - Unbind the specified IW CM identifier and QP
- *
- * @cm_id: The IW CM idenfier to unbind from the QP.
- * @qp: The QP
- *
- * This is called by the provider when destroying the QP to ensure
- * that any references held by the IWCM are released. It may also
- * be called by the IWCM when destroying a CM_ID to that any
- * references held by the provider are released.
- */
-void iw_cm_unbind_qp(struct iw_cm_id *cm_id, struct ib_qp *qp);
-
-/**
- * iw_cm_get_qp - Return the ib_qp associated with a QPN
- *
- * @ib_device: The IB device
- * @qpn: The queue pair number
- */
-struct ib_qp *iw_cm_get_qp(struct ib_device *device, int qpn);
-
-/**
* iw_cm_listen - Listen for incoming connection requests on the
* specified IW CM id.
*
diff --git a/include/rv/da_monitor.h b/include/rv/da_monitor.h
index 9eb75683e012..9705b2a98e49 100644
--- a/include/rv/da_monitor.h
+++ b/include/rv/da_monitor.h
@@ -262,7 +262,7 @@ static inline void da_monitor_destroy_##name(void) \
/* \
* per-cpu monitor variables \
*/ \
-DEFINE_PER_CPU(struct da_monitor, da_mon_##name); \
+static DEFINE_PER_CPU(struct da_monitor, da_mon_##name); \
\
/* \
* da_get_monitor_##name - return current CPU monitor address \
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 159823e0afbf..8a43534eea5c 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -23,22 +23,12 @@
struct block_device;
-enum sas_class {
- SAS,
- EXPANDER
-};
-
enum sas_phy_role {
PHY_ROLE_NONE = 0,
PHY_ROLE_TARGET = 0x40,
PHY_ROLE_INITIATOR = 0x80,
};
-enum sas_phy_type {
- PHY_TYPE_PHYSICAL,
- PHY_TYPE_VIRTUAL
-};
-
/* The events are mnemonically described in sas_dump.c
* so when updating/adding events here, please also
* update the other file too.
@@ -258,7 +248,6 @@ struct asd_sas_port {
/* public: */
int id;
- enum sas_class class;
u8 sas_addr[SAS_ADDR_SIZE];
u8 attached_sas_addr[SAS_ADDR_SIZE];
enum sas_protocol iproto;
@@ -319,11 +308,9 @@ struct asd_sas_phy {
int enabled; /* must be set */
int id; /* must be set */
- enum sas_class class;
enum sas_protocol iproto;
enum sas_protocol tproto;
- enum sas_phy_type type;
enum sas_phy_role role;
enum sas_oob_mode oob_mode;
enum sas_linkrate linkrate;
@@ -346,11 +333,6 @@ struct asd_sas_phy {
void *lldd_phy; /* not touched by the sas_class_code */
};
-struct scsi_core {
- struct Scsi_Host *shost;
-
-};
-
enum sas_ha_state {
SAS_HA_REGISTERED,
SAS_HA_DRAINING,
@@ -371,12 +353,11 @@ struct sas_ha_struct {
struct mutex disco_mutex;
- struct scsi_core core;
+ struct Scsi_Host *shost;
/* public: */
char *sas_ha_name;
struct device *dev; /* should be set */
- struct module *lldd_module; /* should be set */
struct workqueue_struct *event_q;
struct workqueue_struct *disco_q;
@@ -544,12 +525,9 @@ struct sas_ata_task {
struct host_to_dev_fis fis;
u8 atapi_packet[16]; /* 0 if not ATAPI task */
- u8 retry_count; /* hardware retry, should be > 0 */
-
u8 dma_xfer:1; /* PIO:0 or DMA:1 */
u8 use_ncq:1;
- u8 set_affil_pol:1;
- u8 stp_affil_pol:1;
+ u8 return_fis_on_success:1;
u8 device_control_reg_update:1;
@@ -582,12 +560,8 @@ enum task_attribute {
};
struct sas_ssp_task {
- u8 retry_count; /* hardware retry, should be > 0 */
-
u8 LUN[8];
- u8 enable_first_burst:1;
enum task_attribute task_attr;
- u8 task_prio;
struct scsi_cmnd *cmd;
};
@@ -727,8 +701,6 @@ extern struct device_attribute dev_attr_phy_event_threshold;
int sas_discover_root_expander(struct domain_device *);
-void sas_init_ex_attr(void);
-
int sas_ex_revalidate_domain(struct domain_device *);
void sas_unregister_domain_devices(struct asd_sas_port *port, int gone);
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 70b7475dcf56..a2b8d30c4c80 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -769,7 +769,7 @@ extern void scsi_remove_host(struct Scsi_Host *);
extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
extern int scsi_host_busy(struct Scsi_Host *shost);
extern void scsi_host_put(struct Scsi_Host *t);
-extern struct Scsi_Host *scsi_host_lookup(unsigned short);
+extern struct Scsi_Host *scsi_host_lookup(unsigned int hostnum);
extern const char *scsi_host_state_name(enum scsi_host_state);
extern void scsi_host_complete_all_commands(struct Scsi_Host *shost,
enum scsi_host_status status);
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 34c03707fb6e..fb3399e4cd29 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -472,7 +472,6 @@ extern struct iscsi_iface *iscsi_create_iface(struct Scsi_Host *shost,
uint32_t iface_type,
uint32_t iface_num, int dd_size);
extern void iscsi_destroy_iface(struct iscsi_iface *iface);
-extern struct iscsi_iface *iscsi_lookup_iface(int handle);
extern char *iscsi_get_port_speed_name(struct Scsi_Host *shost);
extern char *iscsi_get_port_state_name(struct Scsi_Host *shost);
extern int iscsi_is_session_dev(const struct device *dev);
diff --git a/include/soc/mediatek/smi.h b/include/soc/mediatek/smi.h
index dfd8efca5e60..000eb1cf68b7 100644
--- a/include/soc/mediatek/smi.h
+++ b/include/soc/mediatek/smi.h
@@ -13,6 +13,7 @@
enum iommu_atf_cmd {
IOMMU_ATF_CMD_CONFIG_SMI_LARB, /* For mm master to en/disable iommu */
+ IOMMU_ATF_CMD_CONFIG_INFRA_IOMMU, /* For infra master to enable iommu */
IOMMU_ATF_CMD_MAX,
};
diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h
index c9a8bce9a785..d70c55f17df7 100644
--- a/include/sound/dmaengine_pcm.h
+++ b/include/sound/dmaengine_pcm.h
@@ -142,7 +142,7 @@ struct snd_dmaengine_pcm_config {
struct snd_pcm_substream *substream);
int (*process)(struct snd_pcm_substream *substream,
int channel, unsigned long hwoff,
- struct iov_iter *buf, unsigned long bytes);
+ unsigned long bytes);
dma_filter_fn compat_filter_fn;
struct device *dma_dev;
const char *chan_names[SNDRV_PCM_STREAM_LAST + 1];
diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h
index 17bea3144551..ceca69b46a82 100644
--- a/include/sound/soc-component.h
+++ b/include/sound/soc-component.h
@@ -139,7 +139,7 @@ struct snd_soc_component_driver {
struct snd_pcm_audio_tstamp_report *audio_tstamp_report);
int (*copy)(struct snd_soc_component *component,
struct snd_pcm_substream *substream, int channel,
- unsigned long pos, struct iov_iter *buf,
+ unsigned long pos, struct iov_iter *iter,
unsigned long bytes);
struct page *(*page)(struct snd_soc_component *component,
struct snd_pcm_substream *substream,
@@ -511,7 +511,7 @@ int snd_soc_pcm_component_ioctl(struct snd_pcm_substream *substream,
int snd_soc_pcm_component_sync_stop(struct snd_pcm_substream *substream);
int snd_soc_pcm_component_copy(struct snd_pcm_substream *substream,
int channel, unsigned long pos,
- struct iov_iter *buf, unsigned long bytes);
+ struct iov_iter *iter, unsigned long bytes);
struct page *snd_soc_pcm_component_page(struct snd_pcm_substream *substream,
unsigned long offset);
int snd_soc_pcm_component_mmap(struct snd_pcm_substream *substream,
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index 4c15420e8965..60af7c63b34e 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -50,9 +50,6 @@ struct sock;
#define TA_LOGIN_TIMEOUT 15
#define TA_LOGIN_TIMEOUT_MAX 30
#define TA_LOGIN_TIMEOUT_MIN 5
-#define TA_NETIF_TIMEOUT 2
-#define TA_NETIF_TIMEOUT_MAX 15
-#define TA_NETIF_TIMEOUT_MIN 2
#define TA_GENERATE_NODE_ACLS 0
#define TA_DEFAULT_CMDSN_DEPTH 64
#define TA_DEFAULT_CMDSN_DEPTH_MAX 512
@@ -773,7 +770,6 @@ to_iscsi_nacl(struct se_node_acl *se_nacl)
struct iscsi_tpg_attrib {
u32 authentication;
u32 login_timeout;
- u32 netif_timeout;
u32 generate_node_acls;
u32 cache_dynamic_acls;
u32 default_cmdsn_depth;
diff --git a/include/trace/events/fsi.h b/include/trace/events/fsi.h
index c9a72e8432b8..5ff15126ad9d 100644
--- a/include/trace/events/fsi.h
+++ b/include/trace/events/fsi.h
@@ -122,6 +122,37 @@ TRACE_EVENT(fsi_master_break,
)
);
+TRACE_EVENT(fsi_master_scan,
+ TP_PROTO(const struct fsi_master *master, bool scan),
+ TP_ARGS(master, scan),
+ TP_STRUCT__entry(
+ __field(int, master_idx)
+ __field(int, n_links)
+ __field(bool, scan)
+ ),
+ TP_fast_assign(
+ __entry->master_idx = master->idx;
+ __entry->n_links = master->n_links;
+ __entry->scan = scan;
+ ),
+ TP_printk("fsi%d (%d links) %s", __entry->master_idx, __entry->n_links,
+ __entry->scan ? "scan" : "unscan")
+);
+
+TRACE_EVENT(fsi_master_unregister,
+ TP_PROTO(const struct fsi_master *master),
+ TP_ARGS(master),
+ TP_STRUCT__entry(
+ __field(int, master_idx)
+ __field(int, n_links)
+ ),
+ TP_fast_assign(
+ __entry->master_idx = master->idx;
+ __entry->n_links = master->n_links;
+ ),
+ TP_printk("fsi%d (%d links)", __entry->master_idx, __entry->n_links)
+);
+
TRACE_EVENT(fsi_slave_init,
TP_PROTO(const struct fsi_slave *slave),
TP_ARGS(slave),
diff --git a/include/trace/events/fsi_master_i2cr.h b/include/trace/events/fsi_master_i2cr.h
new file mode 100644
index 000000000000..c33eba130049
--- /dev/null
+++ b/include/trace/events/fsi_master_i2cr.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM fsi_master_i2cr
+
+#if !defined(_TRACE_FSI_MASTER_I2CR_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_FSI_MASTER_I2CR_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(i2cr_i2c_error,
+ TP_PROTO(const struct i2c_client *client, uint32_t command, int rc),
+ TP_ARGS(client, command, rc),
+ TP_STRUCT__entry(
+ __field(int, bus)
+ __field(int, rc)
+ __array(unsigned char, command, sizeof(uint32_t))
+ __field(unsigned short, addr)
+ ),
+ TP_fast_assign(
+ __entry->bus = client->adapter->nr;
+ __entry->rc = rc;
+ memcpy(__entry->command, &command, sizeof(uint32_t));
+ __entry->addr = client->addr;
+ ),
+ TP_printk("%d-%02x command:{ %*ph } rc:%d", __entry->bus, __entry->addr,
+ (int)sizeof(uint32_t), __entry->command, __entry->rc)
+);
+
+TRACE_EVENT(i2cr_read,
+ TP_PROTO(const struct i2c_client *client, uint32_t command, uint64_t *data),
+ TP_ARGS(client, command, data),
+ TP_STRUCT__entry(
+ __field(int, bus)
+ __array(unsigned char, data, sizeof(uint64_t))
+ __array(unsigned char, command, sizeof(uint32_t))
+ __field(unsigned short, addr)
+ ),
+ TP_fast_assign(
+ __entry->bus = client->adapter->nr;
+ memcpy(__entry->data, data, sizeof(uint64_t));
+ memcpy(__entry->command, &command, sizeof(uint32_t));
+ __entry->addr = client->addr;
+ ),
+ TP_printk("%d-%02x command:{ %*ph } { %*ph }", __entry->bus, __entry->addr,
+ (int)sizeof(uint32_t), __entry->command, (int)sizeof(uint64_t), __entry->data)
+);
+
+TRACE_EVENT(i2cr_status,
+ TP_PROTO(const struct i2c_client *client, uint64_t status),
+ TP_ARGS(client, status),
+ TP_STRUCT__entry(
+ __field(uint64_t, status)
+ __field(int, bus)
+ __field(unsigned short, addr)
+ ),
+ TP_fast_assign(
+ __entry->status = status;
+ __entry->bus = client->adapter->nr;
+ __entry->addr = client->addr;
+ ),
+ TP_printk("%d-%02x %016llx", __entry->bus, __entry->addr, __entry->status)
+);
+
+TRACE_EVENT(i2cr_status_error,
+ TP_PROTO(const struct i2c_client *client, uint64_t status, uint64_t error, uint64_t log),
+ TP_ARGS(client, status, error, log),
+ TP_STRUCT__entry(
+ __field(uint64_t, error)
+ __field(uint64_t, log)
+ __field(uint64_t, status)
+ __field(int, bus)
+ __field(unsigned short, addr)
+ ),
+ TP_fast_assign(
+ __entry->error = error;
+ __entry->log = log;
+ __entry->status = status;
+ __entry->bus = client->adapter->nr;
+ __entry->addr = client->addr;
+ ),
+ TP_printk("%d-%02x status:%016llx error:%016llx log:%016llx", __entry->bus, __entry->addr,
+ __entry->status, __entry->error, __entry->log)
+);
+
+TRACE_EVENT(i2cr_write,
+ TP_PROTO(const struct i2c_client *client, uint32_t command, uint64_t data),
+ TP_ARGS(client, command, data),
+ TP_STRUCT__entry(
+ __field(int, bus)
+ __array(unsigned char, data, sizeof(uint64_t))
+ __array(unsigned char, command, sizeof(uint32_t))
+ __field(unsigned short, addr)
+ ),
+ TP_fast_assign(
+ __entry->bus = client->adapter->nr;
+ memcpy(__entry->data, &data, sizeof(uint64_t));
+ memcpy(__entry->command, &command, sizeof(uint32_t));
+ __entry->addr = client->addr;
+ ),
+ TP_printk("%d-%02x command:{ %*ph } { %*ph }", __entry->bus, __entry->addr,
+ (int)sizeof(uint32_t), __entry->command, (int)sizeof(uint64_t), __entry->data)
+);
+
+#endif
+
+#include <trace/define_trace.h>
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index 43711753616a..6beb38c1dcb5 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -1706,7 +1706,7 @@ TRACE_DEFINE_ENUM(SVC_DENIED);
TRACE_DEFINE_ENUM(SVC_PENDING);
TRACE_DEFINE_ENUM(SVC_COMPLETE);
-#define svc_show_status(status) \
+#define show_svc_auth_status(status) \
__print_symbolic(status, \
{ SVC_GARBAGE, "SVC_GARBAGE" }, \
{ SVC_SYSERR, "SVC_SYSERR" }, \
@@ -1743,7 +1743,10 @@ TRACE_DEFINE_ENUM(SVC_COMPLETE);
__entry->xid, __get_sockaddr(server), __get_sockaddr(client)
TRACE_EVENT_CONDITION(svc_authenticate,
- TP_PROTO(const struct svc_rqst *rqst, int auth_res),
+ TP_PROTO(
+ const struct svc_rqst *rqst,
+ enum svc_auth_status auth_res
+ ),
TP_ARGS(rqst, auth_res),
@@ -1766,7 +1769,7 @@ TRACE_EVENT_CONDITION(svc_authenticate,
TP_printk(SVC_RQST_ENDPOINT_FORMAT
" auth_res=%s auth_stat=%s",
SVC_RQST_ENDPOINT_VARARGS,
- svc_show_status(__entry->svc_status),
+ show_svc_auth_status(__entry->svc_status),
rpc_show_auth_stat(__entry->auth_stat))
);
@@ -1918,25 +1921,42 @@ TRACE_EVENT(svc_stats_latency,
__get_str(procedure), __entry->execute)
);
+/*
+ * from include/linux/sunrpc/svc_xprt.h
+ */
+#define SVC_XPRT_FLAG_LIST \
+ svc_xprt_flag(BUSY) \
+ svc_xprt_flag(CONN) \
+ svc_xprt_flag(CLOSE) \
+ svc_xprt_flag(DATA) \
+ svc_xprt_flag(TEMP) \
+ svc_xprt_flag(DEAD) \
+ svc_xprt_flag(CHNGBUF) \
+ svc_xprt_flag(DEFERRED) \
+ svc_xprt_flag(OLD) \
+ svc_xprt_flag(LISTENER) \
+ svc_xprt_flag(CACHE_AUTH) \
+ svc_xprt_flag(LOCAL) \
+ svc_xprt_flag(KILL_TEMP) \
+ svc_xprt_flag(CONG_CTRL) \
+ svc_xprt_flag(HANDSHAKE) \
+ svc_xprt_flag(TLS_SESSION) \
+ svc_xprt_flag_end(PEER_AUTH)
+
+#undef svc_xprt_flag
+#undef svc_xprt_flag_end
+#define svc_xprt_flag(x) TRACE_DEFINE_ENUM(XPT_##x);
+#define svc_xprt_flag_end(x) TRACE_DEFINE_ENUM(XPT_##x);
+
+SVC_XPRT_FLAG_LIST
+
+#undef svc_xprt_flag
+#undef svc_xprt_flag_end
+#define svc_xprt_flag(x) { BIT(XPT_##x), #x },
+#define svc_xprt_flag_end(x) { BIT(XPT_##x), #x }
+
#define show_svc_xprt_flags(flags) \
- __print_flags(flags, "|", \
- { BIT(XPT_BUSY), "BUSY" }, \
- { BIT(XPT_CONN), "CONN" }, \
- { BIT(XPT_CLOSE), "CLOSE" }, \
- { BIT(XPT_DATA), "DATA" }, \
- { BIT(XPT_TEMP), "TEMP" }, \
- { BIT(XPT_DEAD), "DEAD" }, \
- { BIT(XPT_CHNGBUF), "CHNGBUF" }, \
- { BIT(XPT_DEFERRED), "DEFERRED" }, \
- { BIT(XPT_OLD), "OLD" }, \
- { BIT(XPT_LISTENER), "LISTENER" }, \
- { BIT(XPT_CACHE_AUTH), "CACHE_AUTH" }, \
- { BIT(XPT_LOCAL), "LOCAL" }, \
- { BIT(XPT_KILL_TEMP), "KILL_TEMP" }, \
- { BIT(XPT_CONG_CTRL), "CONG_CTRL" }, \
- { BIT(XPT_HANDSHAKE), "HANDSHAKE" }, \
- { BIT(XPT_TLS_SESSION), "TLS_SESSION" }, \
- { BIT(XPT_PEER_AUTH), "PEER_AUTH" })
+ __print_flags(flags, "|", SVC_XPRT_FLAG_LIST)
TRACE_EVENT(svc_xprt_create_err,
TP_PROTO(
@@ -1994,25 +2014,25 @@ TRACE_EVENT(svc_xprt_create_err,
TRACE_EVENT(svc_xprt_enqueue,
TP_PROTO(
const struct svc_xprt *xprt,
- const struct svc_rqst *rqst
+ unsigned long flags
),
- TP_ARGS(xprt, rqst),
+ TP_ARGS(xprt, flags),
TP_STRUCT__entry(
SVC_XPRT_ENDPOINT_FIELDS(xprt)
-
- __field(int, pid)
),
TP_fast_assign(
- SVC_XPRT_ENDPOINT_ASSIGNMENTS(xprt);
-
- __entry->pid = rqst? rqst->rq_task->pid : 0;
+ __assign_sockaddr(server, &xprt->xpt_local,
+ xprt->xpt_locallen);
+ __assign_sockaddr(client, &xprt->xpt_remote,
+ xprt->xpt_remotelen);
+ __entry->flags = flags;
+ __entry->netns_ino = xprt->xpt_net->ns.inum;
),
- TP_printk(SVC_XPRT_ENDPOINT_FORMAT " pid=%d",
- SVC_XPRT_ENDPOINT_VARARGS, __entry->pid)
+ TP_printk(SVC_XPRT_ENDPOINT_FORMAT, SVC_XPRT_ENDPOINT_VARARGS)
);
TRACE_EVENT(svc_xprt_dequeue,
diff --git a/include/trace/events/task.h b/include/trace/events/task.h
index 64d160930b0d..47b527464d1a 100644
--- a/include/trace/events/task.h
+++ b/include/trace/events/task.h
@@ -47,7 +47,7 @@ TRACE_EVENT(task_rename,
TP_fast_assign(
__entry->pid = task->pid;
memcpy(entry->oldcomm, task->comm, TASK_COMM_LEN);
- strlcpy(entry->newcomm, comm, TASK_COMM_LEN);
+ strscpy(entry->newcomm, comm, TASK_COMM_LEN);
__entry->oom_score_adj = task->signal->oom_score_adj;
),
diff --git a/include/uapi/linux/cgroupstats.h b/include/uapi/linux/cgroupstats.h
index aa306e4cd6c1..80b2c8594480 100644
--- a/include/uapi/linux/cgroupstats.h
+++ b/include/uapi/linux/cgroupstats.h
@@ -24,8 +24,6 @@
* basis. This data is shared using taskstats.
*
* Most of these states are derived by looking at the task->state value
- * For the nr_io_wait state, a flag in the delay accounting structure
- * indicates that the task is waiting on IO
*
* Each member is aligned to a 8 byte boundary.
*/
diff --git a/include/uapi/linux/elf-fdpic.h b/include/uapi/linux/elf-fdpic.h
index 4fcc6cfebe18..ec23f0871129 100644
--- a/include/uapi/linux/elf-fdpic.h
+++ b/include/uapi/linux/elf-fdpic.h
@@ -32,4 +32,19 @@ struct elf32_fdpic_loadmap {
#define ELF32_FDPIC_LOADMAP_VERSION 0x0000
+/* segment mappings for ELF FDPIC libraries/executables/interpreters */
+struct elf64_fdpic_loadseg {
+ Elf64_Addr addr; /* core address to which mapped */
+ Elf64_Addr p_vaddr; /* VMA recorded in file */
+ Elf64_Word p_memsz; /* allocation size recorded in file */
+};
+
+struct elf64_fdpic_loadmap {
+ Elf64_Half version; /* version of these structures, just in case... */
+ Elf64_Half nsegs; /* number of segments */
+ struct elf64_fdpic_loadseg segs[];
+};
+
+#define ELF64_FDPIC_LOADMAP_VERSION 0x0000
+
#endif /* _UAPI_LINUX_ELF_FDPIC_H */
diff --git a/include/uapi/linux/fsi.h b/include/uapi/linux/fsi.h
index b2f1977378c7..a2e730fc6309 100644
--- a/include/uapi/linux/fsi.h
+++ b/include/uapi/linux/fsi.h
@@ -60,6 +60,16 @@ struct scom_access {
*/
/**
+ * FSI_SBEFIFO_CMD_TIMEOUT sets the timeout for writing data to the SBEFIFO.
+ *
+ * The command timeout is specified in seconds. The minimum value of command
+ * timeout is 1 seconds (default) and the maximum value of command timeout is
+ * 120 seconds. A command timeout of 0 will reset the value to the default of
+ * 1 seconds.
+ */
+#define FSI_SBEFIFO_CMD_TIMEOUT_SECONDS _IOW('s', 0x01, __u32)
+
+/**
* FSI_SBEFIFO_READ_TIMEOUT sets the read timeout for response from SBE.
*
* The read timeout is specified in seconds. The minimum value of read
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index b3fcab13fcd3..db92a7202b34 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -207,6 +207,10 @@
* - add FUSE_EXT_GROUPS
* - add FUSE_CREATE_SUPP_GROUP
* - add FUSE_HAS_EXPIRE_ONLY
+ *
+ * 7.39
+ * - add FUSE_DIRECT_IO_RELAX
+ * - add FUSE_STATX and related structures
*/
#ifndef _LINUX_FUSE_H
@@ -242,7 +246,7 @@
#define FUSE_KERNEL_VERSION 7
/** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 38
+#define FUSE_KERNEL_MINOR_VERSION 39
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
@@ -269,6 +273,40 @@ struct fuse_attr {
uint32_t flags;
};
+/*
+ * The following structures are bit-for-bit compatible with the statx(2) ABI in
+ * Linux.
+ */
+struct fuse_sx_time {
+ int64_t tv_sec;
+ uint32_t tv_nsec;
+ int32_t __reserved;
+};
+
+struct fuse_statx {
+ uint32_t mask;
+ uint32_t blksize;
+ uint64_t attributes;
+ uint32_t nlink;
+ uint32_t uid;
+ uint32_t gid;
+ uint16_t mode;
+ uint16_t __spare0[1];
+ uint64_t ino;
+ uint64_t size;
+ uint64_t blocks;
+ uint64_t attributes_mask;
+ struct fuse_sx_time atime;
+ struct fuse_sx_time btime;
+ struct fuse_sx_time ctime;
+ struct fuse_sx_time mtime;
+ uint32_t rdev_major;
+ uint32_t rdev_minor;
+ uint32_t dev_major;
+ uint32_t dev_minor;
+ uint64_t __spare2[14];
+};
+
struct fuse_kstatfs {
uint64_t blocks;
uint64_t bfree;
@@ -371,6 +409,8 @@ struct fuse_file_lock {
* FUSE_CREATE_SUPP_GROUP: add supplementary group info to create, mkdir,
* symlink and mknod (single group that matches parent)
* FUSE_HAS_EXPIRE_ONLY: kernel supports expiry-only entry invalidation
+ * FUSE_DIRECT_IO_RELAX: relax restrictions in FOPEN_DIRECT_IO mode, for now
+ * allow shared mmap
*/
#define FUSE_ASYNC_READ (1 << 0)
#define FUSE_POSIX_LOCKS (1 << 1)
@@ -409,6 +449,7 @@ struct fuse_file_lock {
#define FUSE_HAS_INODE_DAX (1ULL << 33)
#define FUSE_CREATE_SUPP_GROUP (1ULL << 34)
#define FUSE_HAS_EXPIRE_ONLY (1ULL << 35)
+#define FUSE_DIRECT_IO_RELAX (1ULL << 36)
/**
* CUSE INIT request/reply flags
@@ -575,6 +616,7 @@ enum fuse_opcode {
FUSE_REMOVEMAPPING = 49,
FUSE_SYNCFS = 50,
FUSE_TMPFILE = 51,
+ FUSE_STATX = 52,
/* CUSE specific operations */
CUSE_INIT = 4096,
@@ -639,6 +681,22 @@ struct fuse_attr_out {
struct fuse_attr attr;
};
+struct fuse_statx_in {
+ uint32_t getattr_flags;
+ uint32_t reserved;
+ uint64_t fh;
+ uint32_t sx_flags;
+ uint32_t sx_mask;
+};
+
+struct fuse_statx_out {
+ uint64_t attr_valid; /* Cache timeout for the attributes */
+ uint32_t attr_valid_nsec;
+ uint32_t flags;
+ uint64_t spare[2];
+ struct fuse_statx stat;
+};
+
#define FUSE_COMPAT_MKNOD_IN_SIZE 8
struct fuse_mknod_in {
diff --git a/include/uapi/linux/gsmmux.h b/include/uapi/linux/gsmmux.h
index eb67884e5f38..4c878d84dbda 100644
--- a/include/uapi/linux/gsmmux.h
+++ b/include/uapi/linux/gsmmux.h
@@ -2,10 +2,45 @@
#ifndef _LINUX_GSMMUX_H
#define _LINUX_GSMMUX_H
+#include <linux/const.h>
#include <linux/if.h>
#include <linux/ioctl.h>
#include <linux/types.h>
+/*
+ * flags definition for n_gsm
+ *
+ * Used by:
+ * struct gsm_config_ext.flags
+ * struct gsm_dlci_config.flags
+ */
+/* Forces a DLCI reset if set. Otherwise, a DLCI reset is only done if
+ * incompatible settings were provided. Always cleared on retrieval.
+ */
+#define GSM_FL_RESTART _BITUL(0)
+
+/**
+ * struct gsm_config - n_gsm basic configuration parameters
+ *
+ * This structure is used in combination with GSMIOC_GETCONF and GSMIOC_SETCONF
+ * to retrieve and set the basic parameters of an n_gsm ldisc.
+ * struct gsm_config_ext can be used to configure extended ldisc parameters.
+ *
+ * All timers are in units of 1/100th of a second.
+ *
+ * @adaption: Convergence layer type
+ * @encapsulation: Framing (0 = basic option, 1 = advanced option)
+ * @initiator: Initiator or responder
+ * @t1: Acknowledgment timer
+ * @t2: Response timer for multiplexer control channel
+ * @t3: Response timer for wake-up procedure
+ * @n2: Maximum number of retransmissions
+ * @mru: Maximum incoming frame payload size
+ * @mtu: Maximum outgoing frame payload size
+ * @k: Window size
+ * @i: Frame type (1 = UIH, 2 = UI)
+ * @unused: Can not be used
+ */
struct gsm_config
{
unsigned int adaption;
@@ -19,18 +54,32 @@ struct gsm_config
unsigned int mtu;
unsigned int k;
unsigned int i;
- unsigned int unused[8]; /* Can not be used */
+ unsigned int unused[8];
};
#define GSMIOC_GETCONF _IOR('G', 0, struct gsm_config)
#define GSMIOC_SETCONF _IOW('G', 1, struct gsm_config)
+/**
+ * struct gsm_netconfig - n_gsm network configuration parameters
+ *
+ * This structure is used in combination with GSMIOC_ENABLE_NET and
+ * GSMIOC_DISABLE_NET to enable or disable a network data connection
+ * over a mux virtual tty channel. This is for modems that support
+ * data connections with raw IP frames instead of PPP.
+ *
+ * @adaption: Adaption to use in network mode.
+ * @protocol: Protocol to use - only ETH_P_IP supported.
+ * @unused2: Can not be used.
+ * @if_name: Interface name format string.
+ * @unused: Can not be used.
+ */
struct gsm_netconfig {
- unsigned int adaption; /* Adaption to use in network mode */
- unsigned short protocol;/* Protocol to use - only ETH_P_IP supported */
- unsigned short unused2; /* Can not be used */
- char if_name[IFNAMSIZ]; /* interface name format string */
- __u8 unused[28]; /* Can not be used */
+ unsigned int adaption;
+ unsigned short protocol;
+ unsigned short unused2;
+ char if_name[IFNAMSIZ];
+ __u8 unused[28];
};
#define GSMIOC_ENABLE_NET _IOW('G', 2, struct gsm_netconfig)
@@ -39,26 +88,57 @@ struct gsm_netconfig {
/* get the base tty number for a configured gsmmux tty */
#define GSMIOC_GETFIRST _IOR('G', 4, __u32)
+/**
+ * struct gsm_config_ext - n_gsm extended configuration parameters
+ *
+ * This structure is used in combination with GSMIOC_GETCONF_EXT and
+ * GSMIOC_SETCONF_EXT to retrieve and set the extended parameters of an
+ * n_gsm ldisc.
+ *
+ * All timers are in units of 1/100th of a second.
+ *
+ * @keep_alive: Control channel keep-alive in 1/100th of a second (0 to disable).
+ * @wait_config: Wait for DLCI config before opening virtual link?
+ * @flags: Mux specific flags.
+ * @reserved: For future use, must be initialized to zero.
+ */
struct gsm_config_ext {
- __u32 keep_alive; /* Control channel keep-alive in 1/100th of a
- * second (0 to disable)
- */
- __u32 wait_config; /* Wait for DLCI config before opening virtual link? */
- __u32 reserved[6]; /* For future use, must be initialized to zero */
+ __u32 keep_alive;
+ __u32 wait_config;
+ __u32 flags;
+ __u32 reserved[5];
};
#define GSMIOC_GETCONF_EXT _IOR('G', 5, struct gsm_config_ext)
#define GSMIOC_SETCONF_EXT _IOW('G', 6, struct gsm_config_ext)
-/* Set channel accordingly before calling GSMIOC_GETCONF_DLCI. */
+/**
+ * struct gsm_dlci_config - n_gsm channel configuration parameters
+ *
+ * This structure is used in combination with GSMIOC_GETCONF_DLCI and
+ * GSMIOC_SETCONF_DLCI to retrieve and set the channel specific parameters
+ * of an n_gsm ldisc.
+ *
+ * Set the channel accordingly before calling GSMIOC_GETCONF_DLCI.
+ *
+ * @channel: DLCI (0 for the associated DLCI).
+ * @adaption: Convergence layer type.
+ * @mtu: Maximum transfer unit.
+ * @priority: Priority (0 for default value).
+ * @i: Frame type (1 = UIH, 2 = UI).
+ * @k: Window size (0 for default value).
+ * @flags: DLCI specific flags.
+ * @reserved: For future use, must be initialized to zero.
+ */
struct gsm_dlci_config {
- __u32 channel; /* DLCI (0 for the associated DLCI) */
- __u32 adaption; /* Convergence layer type */
- __u32 mtu; /* Maximum transfer unit */
- __u32 priority; /* Priority (0 for default value) */
- __u32 i; /* Frame type (1 = UIH, 2 = UI) */
- __u32 k; /* Window size (0 for default value) */
- __u32 reserved[8]; /* For future use, must be initialized to zero */
+ __u32 channel;
+ __u32 adaption;
+ __u32 mtu;
+ __u32 priority;
+ __u32 i;
+ __u32 k;
+ __u32 flags;
+ __u32 reserved[7];
};
#define GSMIOC_GETCONF_DLCI _IOWR('G', 7, struct gsm_dlci_config)
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 8466c2a9938f..ca30232b7bc8 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -263,6 +263,7 @@ enum nft_chain_attributes {
* @NFTA_RULE_USERDATA: user data (NLA_BINARY, NFT_USERDATA_MAXLEN)
* @NFTA_RULE_ID: uniquely identifies a rule in a transaction (NLA_U32)
* @NFTA_RULE_POSITION_ID: transaction unique identifier of the previous rule (NLA_U32)
+ * @NFTA_RULE_CHAIN_ID: add the rule to chain by ID, alternative to @NFTA_RULE_CHAIN (NLA_U32)
*/
enum nft_rule_attributes {
NFTA_RULE_UNSPEC,
diff --git a/include/uapi/linux/rpmsg.h b/include/uapi/linux/rpmsg.h
index 1637e68177d9..f0c8da2b185b 100644
--- a/include/uapi/linux/rpmsg.h
+++ b/include/uapi/linux/rpmsg.h
@@ -43,4 +43,14 @@ struct rpmsg_endpoint_info {
*/
#define RPMSG_RELEASE_DEV_IOCTL _IOW(0xb5, 0x4, struct rpmsg_endpoint_info)
+/**
+ * Get the flow control state of the remote rpmsg char device.
+ */
+#define RPMSG_GET_OUTGOING_FLOWCONTROL _IOR(0xb5, 0x5, int)
+
+/**
+ * Set the flow control state of the local rpmsg char device.
+ */
+#define RPMSG_SET_INCOMING_FLOWCONTROL _IOR(0xb5, 0x6, int)
+
#endif
diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h
index 281fa286555c..add349889d0a 100644
--- a/include/uapi/linux/serial_core.h
+++ b/include/uapi/linux/serial_core.h
@@ -25,6 +25,8 @@
/*
* The type definitions. These are from Ted Ts'o's serial.h
+ * By historical reasons the values from 0 to 13 are defined
+ * in the include/uapi/linux/serial.h, do not define them here.
*/
#define PORT_NS16550A 14
#define PORT_XSCALE 15
@@ -94,15 +96,9 @@
#define PORT_SCIF 53
#define PORT_IRDA 54
-/* Samsung S3C2410 SoC and derivatives thereof */
-#define PORT_S3C2410 55
-
/* SGI IP22 aka Indy / Challenge S / Indigo 2 */
#define PORT_IP22ZILOG 56
-/* Sharp LH7a40x -- an ARM9 SoC series */
-#define PORT_LH7A40X 57
-
/* PPC CPM type number */
#define PORT_CPM 58
@@ -112,37 +108,23 @@
/* IBM icom */
#define PORT_ICOM 60
-/* Samsung S3C2440 SoC */
-#define PORT_S3C2440 61
-
/* Motorola i.MX SoC */
#define PORT_IMX 62
-/* Marvell MPSC (obsolete unused) */
-#define PORT_MPSC 63
-
/* TXX9 type number */
#define PORT_TXX9 64
-/* Samsung S3C2400 SoC */
-#define PORT_S3C2400 67
-
-/* M32R SIO */
-#define PORT_M32R_SIO 68
-
/*Digi jsm */
#define PORT_JSM 69
/* SUN4V Hypervisor Console */
#define PORT_SUNHV 72
-#define PORT_S3C2412 73
-
/* Xilinx uartlite */
#define PORT_UARTLITE 74
-/* Blackfin bf5xx */
-#define PORT_BFIN 75
+/* Broadcom BCM7271 UART */
+#define PORT_BCM7271 76
/* Broadcom SB1250, etc. SOC */
#define PORT_SB1250_DUART 77
@@ -150,13 +132,6 @@
/* Freescale ColdFire */
#define PORT_MCF 78
-/* Blackfin SPORT */
-#define PORT_BFIN_SPORT 79
-
-/* MN10300 on-chip UART numbers */
-#define PORT_MN10300 80
-#define PORT_MN10300_CTS 81
-
#define PORT_SC26XX 82
/* SH-SCI */
@@ -164,9 +139,6 @@
#define PORT_S3C6400 84
-/* NWPSERIAL, now removed */
-#define PORT_NWPSERIAL 85
-
/* MAX3100 */
#define PORT_MAX3100 86
@@ -225,13 +197,10 @@
/* ST ASC type numbers */
#define PORT_ASC 105
-/* Tilera TILE-Gx UART */
-#define PORT_TILEGX 106
-
/* MEN 16z135 UART */
#define PORT_MEN_Z135 107
-/* SC16IS74xx */
+/* SC16IS7xx */
#define PORT_SC16IS7XX 108
/* MESON */
@@ -243,9 +212,6 @@
/* SPRD SERIAL */
#define PORT_SPRD 111
-/* Cris v10 / v32 SoC */
-#define PORT_CRIS 112
-
/* STM32 USART */
#define PORT_STM32 113
diff --git a/include/uapi/linux/sync_file.h b/include/uapi/linux/sync_file.h
index 7e42a5b7558b..ff0a931833e2 100644
--- a/include/uapi/linux/sync_file.h
+++ b/include/uapi/linux/sync_file.h
@@ -56,7 +56,7 @@ struct sync_fence_info {
* @name: name of fence
* @status: status of fence. 1: signaled 0:active <0:error
* @flags: sync_file_info flags
- * @num_fences number of fences in the sync_file
+ * @num_fences: number of fences in the sync_file
* @pad: padding for 64-bit alignment, should always be zero
* @sync_fence_info: pointer to array of struct &sync_fence_info with all
* fences in the sync_file
diff --git a/include/uapi/linux/usb/ch11.h b/include/uapi/linux/usb/ch11.h
index fb0cd24c392c..ce4c83f2e66a 100644
--- a/include/uapi/linux/usb/ch11.h
+++ b/include/uapi/linux/usb/ch11.h
@@ -15,10 +15,8 @@
/* This is arbitrary.
* From USB 2.0 spec Table 11-13, offset 7, a hub can
* have up to 255 ports. The most yet reported is 10.
- *
- * Current Wireless USB host hardware (Intel i1480 for example) allows
- * up to 22 devices to connect. Upcoming hardware might raise that
- * limit. Because the arrays need to add a bit for hub status data, we
+ * Upcoming hardware might raise that limit.
+ * Because the arrays need to add a bit for hub status data, we
* use 31, so plus one evens out to four bytes.
*/
#define USB_MAXCHILDREN 31
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index 62d318377379..8a147abfc680 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -3,7 +3,7 @@
* This file holds USB constants and structures that are needed for
* USB device APIs. These are used by the USB device model, which is
* defined in chapter 9 of the USB 2.0 specification and in the
- * Wireless USB 1.0 (spread around). Linux has several APIs in C that
+ * Wireless USB 1.0 spec (now defunct). Linux has several APIs in C that
* need these:
*
* - the master/host side Linux-USB kernel driver API;
@@ -14,9 +14,6 @@
* act either as a USB master/host or as a USB slave/device. That means
* the master and slave side APIs benefit from working well together.
*
- * There's also "Wireless USB", using low power short range radios for
- * peripheral interconnection but otherwise building on the USB framework.
- *
* Note all descriptors are declared '__attribute__((packed))' so that:
*
* [a] they never get padded, either internally (USB spec writers
diff --git a/include/uapi/linux/vhost_types.h b/include/uapi/linux/vhost_types.h
index d3aad12ad1fa..2d827d22cd99 100644
--- a/include/uapi/linux/vhost_types.h
+++ b/include/uapi/linux/vhost_types.h
@@ -181,5 +181,9 @@ struct vhost_vdpa_iova_range {
#define VHOST_BACKEND_F_SUSPEND 0x4
/* Device can be resumed */
#define VHOST_BACKEND_F_RESUME 0x5
+/* Device supports the driver enabling virtqueues both before and after
+ * DRIVER_OK
+ */
+#define VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK 0x6
#endif
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 3af6a82d0cad..78260e5d9985 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -796,6 +796,8 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_Z16 v4l2_fourcc('Z', '1', '6', ' ') /* Depth data 16-bit */
#define V4L2_PIX_FMT_MT21C v4l2_fourcc('M', 'T', '2', '1') /* Mediatek compressed block mode */
#define V4L2_PIX_FMT_MM21 v4l2_fourcc('M', 'M', '2', '1') /* Mediatek 8-bit block mode, two non-contiguous planes */
+#define V4L2_PIX_FMT_MT2110T v4l2_fourcc('M', 'T', '2', 'T') /* Mediatek 10-bit block tile mode */
+#define V4L2_PIX_FMT_MT2110R v4l2_fourcc('M', 'T', '2', 'R') /* Mediatek 10-bit block raster mode */
#define V4L2_PIX_FMT_INZI v4l2_fourcc('I', 'N', 'Z', 'I') /* Intel Planar Greyscale 10-bit and Depth 16-bit */
#define V4L2_PIX_FMT_CNF4 v4l2_fourcc('C', 'N', 'F', '4') /* Intel 4-bit packed depth confidence information */
#define V4L2_PIX_FMT_HI240 v4l2_fourcc('H', 'I', '2', '4') /* BTTV 8-bit dithered RGB */
diff --git a/include/uapi/rdma/bnxt_re-abi.h b/include/uapi/rdma/bnxt_re-abi.h
index 8a2a1d4f6b29..6e7c67a0cca3 100644
--- a/include/uapi/rdma/bnxt_re-abi.h
+++ b/include/uapi/rdma/bnxt_re-abi.h
@@ -53,6 +53,7 @@ enum {
BNXT_RE_UCNTX_CMASK_HAVE_CCTX = 0x1ULL,
BNXT_RE_UCNTX_CMASK_HAVE_MODE = 0x02ULL,
BNXT_RE_UCNTX_CMASK_WC_DPI_ENABLED = 0x04ULL,
+ BNXT_RE_UCNTX_CMASK_DBR_PACING_ENABLED = 0x08ULL,
};
enum bnxt_re_wqe_mode {
@@ -131,10 +132,13 @@ enum bnxt_re_shpg_offt {
enum bnxt_re_objects {
BNXT_RE_OBJECT_ALLOC_PAGE = (1U << UVERBS_ID_NS_SHIFT),
+ BNXT_RE_OBJECT_NOTIFY_DRV,
};
enum bnxt_re_alloc_page_type {
BNXT_RE_ALLOC_WC_PAGE = 0,
+ BNXT_RE_ALLOC_DBR_BAR_PAGE,
+ BNXT_RE_ALLOC_DBR_PAGE,
};
enum bnxt_re_var_alloc_page_attrs {
@@ -154,4 +158,7 @@ enum bnxt_re_alloc_page_methods {
BNXT_RE_METHOD_DESTROY_PAGE,
};
+enum bnxt_re_notify_drv_methods {
+ BNXT_RE_METHOD_NOTIFY_DRV = (1U << UVERBS_ID_NS_SHIFT),
+};
#endif /* __BNXT_RE_UVERBS_ABI_H__*/
diff --git a/include/uapi/rdma/irdma-abi.h b/include/uapi/rdma/irdma-abi.h
index a7085e092d34..bb18f15489e3 100644
--- a/include/uapi/rdma/irdma-abi.h
+++ b/include/uapi/rdma/irdma-abi.h
@@ -22,10 +22,16 @@ enum irdma_memreg_type {
IRDMA_MEMREG_TYPE_CQ = 2,
};
+enum {
+ IRDMA_ALLOC_UCTX_USE_RAW_ATTR = 1 << 0,
+ IRDMA_ALLOC_UCTX_MIN_HW_WQ_SIZE = 1 << 1,
+};
+
struct irdma_alloc_ucontext_req {
__u32 rsvd32;
__u8 userspace_ver;
__u8 rsvd8[3];
+ __aligned_u64 comp_mask;
};
struct irdma_alloc_ucontext_resp {
@@ -46,6 +52,9 @@ struct irdma_alloc_ucontext_resp {
__u16 max_hw_sq_chunk;
__u8 hw_rev;
__u8 rsvd2;
+ __aligned_u64 comp_mask;
+ __u16 min_hw_wq_size;
+ __u8 rsvd3[6];
};
struct irdma_alloc_pd_resp {
diff --git a/include/uapi/scsi/scsi_bsg_ufs.h b/include/uapi/scsi/scsi_bsg_ufs.h
index fd3f9e5ee241..7c7975f9905e 100644
--- a/include/uapi/scsi/scsi_bsg_ufs.h
+++ b/include/uapi/scsi/scsi_bsg_ufs.h
@@ -8,6 +8,7 @@
#ifndef SCSI_BSG_UFS_H
#define SCSI_BSG_UFS_H
+#include <asm/byteorder.h>
#include <linux/types.h>
/*
* This file intended to be included by both kernel and user space
@@ -40,11 +41,56 @@ enum ufs_rpmb_op_type {
* @dword_0: UPIU header DW-0
* @dword_1: UPIU header DW-1
* @dword_2: UPIU header DW-2
+ *
+ * @transaction_code: Type of request or response. See also enum
+ * upiu_request_transaction and enum upiu_response_transaction.
+ * @flags: UPIU flags. The meaning of individual flags depends on the
+ * transaction code.
+ * @lun: Logical unit number.
+ * @task_tag: Task tag.
+ * @iid: Initiator ID.
+ * @command_set_type: 0 for SCSI command set; 1 for UFS specific.
+ * @tm_function: Task management function in case of a task management request
+ * UPIU.
+ * @query_function: Query function in case of a query request UPIU.
+ * @response: 0 for success; 1 for failure.
+ * @status: SCSI status if this is the header of a response to a SCSI command.
+ * @ehs_length: EHS length in units of 32 bytes.
+ * @device_information:
+ * @data_segment_length: data segment length.
*/
struct utp_upiu_header {
- __be32 dword_0;
- __be32 dword_1;
- __be32 dword_2;
+ union {
+ struct {
+ __be32 dword_0;
+ __be32 dword_1;
+ __be32 dword_2;
+ };
+ struct {
+ __u8 transaction_code;
+ __u8 flags;
+ __u8 lun;
+ __u8 task_tag;
+#if defined(__BIG_ENDIAN)
+ __u8 iid: 4;
+ __u8 command_set_type: 4;
+#elif defined(__LITTLE_ENDIAN)
+ __u8 command_set_type: 4;
+ __u8 iid: 4;
+#else
+#error
+#endif
+ union {
+ __u8 tm_function;
+ __u8 query_function;
+ };
+ __u8 response;
+ __u8 status;
+ __u8 ehs_length;
+ __u8 device_information;
+ __be16 data_segment_length;
+ };
+ };
};
/**
diff --git a/include/ufs/ufs.h b/include/ufs/ufs.h
index 198cb391f9db..0cced88f4531 100644
--- a/include/ufs/ufs.h
+++ b/include/ufs/ufs.h
@@ -11,10 +11,16 @@
#ifndef _UFS_H
#define _UFS_H
-#include <linux/mutex.h>
+#include <linux/bitops.h>
#include <linux/types.h>
#include <uapi/scsi/scsi_bsg_ufs.h>
+/*
+ * Using static_assert() is not allowed in UAPI header files. Hence the check
+ * in this header file of the size of struct utp_upiu_header.
+ */
+static_assert(sizeof(struct utp_upiu_header) == 12);
+
#define GENERAL_UPIU_REQUEST_SIZE (sizeof(struct utp_upiu_req))
#define QUERY_DESC_MAX_SIZE 255
#define QUERY_DESC_MIN_SIZE 2
@@ -23,9 +29,6 @@
(sizeof(struct utp_upiu_header)))
#define UFS_SENSE_SIZE 18
-#define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\
- cpu_to_be32((byte3 << 24) | (byte2 << 16) |\
- (byte1 << 8) | (byte0))
/*
* UFS device may have standard LUs and LUN id could be from 0x00 to
* 0x7F. Standard LUs use "Peripheral Device Addressing Format".
@@ -76,7 +79,7 @@ enum {
};
/* UTP UPIU Transaction Codes Initiator to Target */
-enum {
+enum upiu_request_transaction {
UPIU_TRANSACTION_NOP_OUT = 0x00,
UPIU_TRANSACTION_COMMAND = 0x01,
UPIU_TRANSACTION_DATA_OUT = 0x02,
@@ -85,7 +88,7 @@ enum {
};
/* UTP UPIU Transaction Codes Target to Initiator */
-enum {
+enum upiu_response_transaction {
UPIU_TRANSACTION_NOP_IN = 0x20,
UPIU_TRANSACTION_RESPONSE = 0x21,
UPIU_TRANSACTION_DATA_IN = 0x22,
@@ -102,6 +105,12 @@ enum {
UPIU_CMD_FLAGS_READ = 0x40,
};
+/* UPIU response flags */
+enum {
+ UPIU_RSP_FLAG_UNDERFLOW = 0x20,
+ UPIU_RSP_FLAG_OVERFLOW = 0x40,
+};
+
/* UPIU Task Attributes */
enum {
UPIU_TASK_ATTR_SIMPLE = 0x00,
@@ -466,21 +475,11 @@ enum {
UPIU_COMMAND_SET_TYPE_QUERY = 0x2,
};
-/* UTP Transfer Request Command Offset */
-#define UPIU_COMMAND_TYPE_OFFSET 28
-
/* Offset of the response code in the UPIU header */
#define UPIU_RSP_CODE_OFFSET 8
enum {
- MASK_SCSI_STATUS = 0xFF,
- MASK_TASK_RESPONSE = 0xFF00,
- MASK_RSP_UPIU_RESULT = 0xFFFF,
- MASK_QUERY_DATA_SEG_LEN = 0xFFFF,
- MASK_RSP_UPIU_DATA_SEG_LEN = 0xFFFF,
- MASK_RSP_EXCEPTION_EVENT = 0x10000,
MASK_TM_SERVICE_RESP = 0xFF,
- MASK_TM_FUNC = 0xFF,
};
/* Task management service response */
@@ -516,41 +515,6 @@ struct utp_cmd_rsp {
u8 sense_data[UFS_SENSE_SIZE];
};
-struct ufshpb_active_field {
- __be16 active_rgn;
- __be16 active_srgn;
-};
-#define HPB_ACT_FIELD_SIZE 4
-
-/**
- * struct utp_hpb_rsp - Response UPIU structure
- * @residual_transfer_count: Residual transfer count DW-3
- * @reserved1: Reserved double words DW-4 to DW-7
- * @sense_data_len: Sense data length DW-8 U16
- * @desc_type: Descriptor type of sense data
- * @additional_len: Additional length of sense data
- * @hpb_op: HPB operation type
- * @lun: LUN of response UPIU
- * @active_rgn_cnt: Active region count
- * @inactive_rgn_cnt: Inactive region count
- * @hpb_active_field: Recommended to read HPB region and subregion
- * @hpb_inactive_field: To be inactivated HPB region and subregion
- */
-struct utp_hpb_rsp {
- __be32 residual_transfer_count;
- __be32 reserved1[4];
- __be16 sense_data_len;
- u8 desc_type;
- u8 additional_len;
- u8 hpb_op;
- u8 lun;
- u8 active_rgn_cnt;
- u8 inactive_rgn_cnt;
- struct ufshpb_active_field hpb_active_field[2];
- __be16 hpb_inactive_field[2];
-};
-#define UTP_HPB_RSP_SIZE 40
-
/**
* struct utp_upiu_rsp - general upiu response structure
* @header: UPIU header structure DW-0 to DW-2
@@ -561,31 +525,10 @@ struct utp_upiu_rsp {
struct utp_upiu_header header;
union {
struct utp_cmd_rsp sr;
- struct utp_hpb_rsp hr;
struct utp_upiu_query qr;
};
};
-/**
- * struct ufs_query_req - parameters for building a query request
- * @query_func: UPIU header query function
- * @upiu_req: the query request data
- */
-struct ufs_query_req {
- u8 query_func;
- struct utp_upiu_query upiu_req;
-};
-
-/**
- * struct ufs_query_resp - UPIU QUERY
- * @response: device response code
- * @upiu_res: query response data
- */
-struct ufs_query_res {
- u8 response;
- struct utp_upiu_query upiu_res;
-};
-
/*
* VCCQ & VCCQ2 current requirement when UFS device is in sleep state
* and link is in Hibern8 state.
@@ -621,9 +564,6 @@ struct ufs_dev_info {
/* Stores the depth of queue in UFS device */
u8 bqueuedepth;
- /* UFS HPB related flag */
- bool hpb_enabled;
-
/* UFS WB related flags */
bool wb_enabled;
bool wb_buf_flush_enabled;
diff --git a/include/ufs/ufs_quirks.h b/include/ufs/ufs_quirks.h
index bcb4f004bed5..41ff44dfa1db 100644
--- a/include/ufs/ufs_quirks.h
+++ b/include/ufs/ufs_quirks.h
@@ -107,10 +107,4 @@ struct ufs_dev_quirk {
*/
#define UFS_DEVICE_QUIRK_DELAY_AFTER_LPM (1 << 11)
-/*
- * Some UFS devices require L2P entry should be swapped before being sent to the
- * UFS device for HPB READ command.
- */
-#define UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ (1 << 12)
-
#endif /* UFS_QUIRKS_H_ */
diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
index 6dc11fa0ebb1..7d07b256e906 100644
--- a/include/ufs/ufshcd.h
+++ b/include/ufs/ufshcd.h
@@ -20,6 +20,7 @@
#include <linux/pm_runtime.h>
#include <linux/dma-direction.h>
#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
#include <ufs/unipro.h>
#include <ufs/ufs.h>
#include <ufs/ufs_quirks.h>
@@ -202,6 +203,25 @@ struct ufshcd_lrb {
};
/**
+ * struct ufs_query_req - parameters for building a query request
+ * @query_func: UPIU header query function
+ * @upiu_req: the query request data
+ */
+struct ufs_query_req {
+ u8 query_func;
+ struct utp_upiu_query upiu_req;
+};
+
+/**
+ * struct ufs_query_resp - UPIU QUERY
+ * @response: device response code
+ * @upiu_res: query response data
+ */
+struct ufs_query_res {
+ struct utp_upiu_query upiu_res;
+};
+
+/**
* struct ufs_query - holds relevant data structures for query request
* @request: request upiu and function
* @descriptor: buffer for sending/receiving descriptor
@@ -709,31 +729,6 @@ struct ufs_hba_variant_params {
u32 wb_flush_threshold;
};
-#ifdef CONFIG_SCSI_UFS_HPB
-/**
- * struct ufshpb_dev_info - UFSHPB device related info
- * @num_lu: the number of user logical unit to check whether all lu finished
- * initialization
- * @rgn_size: device reported HPB region size
- * @srgn_size: device reported HPB sub-region size
- * @slave_conf_cnt: counter to check all lu finished initialization
- * @hpb_disabled: flag to check if HPB is disabled
- * @max_hpb_single_cmd: device reported bMAX_DATA_SIZE_FOR_SINGLE_CMD value
- * @is_legacy: flag to check HPB 1.0
- * @control_mode: either host or device
- */
-struct ufshpb_dev_info {
- int num_lu;
- int rgn_size;
- int srgn_size;
- atomic_t slave_conf_cnt;
- bool hpb_disabled;
- u8 max_hpb_single_cmd;
- bool is_legacy;
- u8 control_mode;
-};
-#endif
-
struct ufs_hba_monitor {
unsigned long chunk_size;
@@ -894,7 +889,6 @@ enum ufshcd_mcq_opr {
* @rpm_dev_flush_recheck_work: used to suspend from RPM (runtime power
* management) after the UFS device has finished a WriteBooster buffer
* flush or auto BKOP.
- * @ufshpb_dev: information related to HPB (Host Performance Booster).
* @monitor: statistics about UFS commands
* @crypto_capabilities: Content of crypto capabilities register (0x100)
* @crypto_cap_array: Array of crypto capabilities
@@ -1050,10 +1044,6 @@ struct ufs_hba {
struct request_queue *bsg_queue;
struct delayed_work rpm_dev_flush_recheck_work;
-#ifdef CONFIG_SCSI_UFS_HPB
- struct ufshpb_dev_info ufshpb_dev;
-#endif
-
struct ufs_hba_monitor monitor;
#ifdef CONFIG_SCSI_UFS_CRYPTO
@@ -1254,9 +1244,12 @@ void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk);
void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val);
void ufshcd_hba_stop(struct ufs_hba *hba);
void ufshcd_schedule_eh_work(struct ufs_hba *hba);
+void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds);
+u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i);
void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i);
unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
struct ufs_hw_queue *hwq);
+void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba);
void ufshcd_mcq_enable_esi(struct ufs_hba *hba);
void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg);
@@ -1383,12 +1376,6 @@ int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg);
int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
-int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
- struct utp_upiu_req *req_upiu,
- struct utp_upiu_req *rsp_upiu,
- int msgcode,
- u8 *desc_buff, int *buff_len,
- enum query_opcode desc_op);
int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *req_upiu,
struct utp_upiu_req *rsp_upiu, struct ufs_ehs *ehs_req,
struct ufs_ehs *ehs_rsp, int sg_cnt,
@@ -1398,6 +1385,7 @@ int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable);
int ufshcd_suspend_prepare(struct device *dev);
int __ufshcd_suspend_prepare(struct device *dev, bool rpm_ok_for_spm);
void ufshcd_resume_complete(struct device *dev);
+bool ufshcd_is_hba_active(struct ufs_hba *hba);
/* Wrapper functions for safely calling variant operations */
static inline int ufshcd_vops_init(struct ufs_hba *hba)
diff --git a/include/ufs/ufshci.h b/include/ufs/ufshci.h
index 146fbea76d98..d5accacae6bc 100644
--- a/include/ufs/ufshci.h
+++ b/include/ufs/ufshci.h
@@ -11,7 +11,8 @@
#ifndef _UFSHCI_H
#define _UFSHCI_H
-#include <scsi/scsi_host.h>
+#include <linux/types.h>
+#include <ufs/ufs.h>
enum {
TASK_REQ_UPIU_SIZE_DWORDS = 8,
@@ -126,7 +127,6 @@ enum {
};
#define SQ_ICU_ERR_CODE_MASK GENMASK(7, 4)
-#define UPIU_COMMAND_TYPE_MASK GENMASK(31, 28)
#define UFS_MASK(mask, offset) ((mask) << (offset))
/* UFS Version 08h */
@@ -438,15 +438,13 @@ enum {
UTP_SCSI_COMMAND = 0x00000000,
UTP_NATIVE_UFS_COMMAND = 0x10000000,
UTP_DEVICE_MANAGEMENT_FUNCTION = 0x20000000,
- UTP_REQ_DESC_INT_CMD = 0x01000000,
- UTP_REQ_DESC_CRYPTO_ENABLE_CMD = 0x00800000,
};
/* UTP Transfer Request Data Direction (DD) */
-enum {
- UTP_NO_DATA_TRANSFER = 0x00000000,
- UTP_HOST_TO_DEVICE = 0x02000000,
- UTP_DEVICE_TO_HOST = 0x04000000,
+enum utp_data_direction {
+ UTP_NO_DATA_TRANSFER = 0,
+ UTP_HOST_TO_DEVICE = 1,
+ UTP_DEVICE_TO_HOST = 2,
};
/* Overall command status values */
@@ -505,17 +503,38 @@ struct utp_transfer_cmd_desc {
/**
* struct request_desc_header - Descriptor Header common to both UTRD and UTMRD
- * @dword0: Descriptor Header DW0
- * @dword1: Descriptor Header DW1
- * @dword2: Descriptor Header DW2
- * @dword3: Descriptor Header DW3
*/
struct request_desc_header {
- __le32 dword_0;
- __le32 dword_1;
- __le32 dword_2;
- __le32 dword_3;
-};
+ u8 cci;
+ u8 ehs_length;
+#if defined(__BIG_ENDIAN)
+ u8 enable_crypto:1;
+ u8 reserved2:7;
+
+ u8 command_type:4;
+ u8 reserved1:1;
+ u8 data_direction:2;
+ u8 interrupt:1;
+#elif defined(__LITTLE_ENDIAN)
+ u8 reserved2:7;
+ u8 enable_crypto:1;
+
+ u8 interrupt:1;
+ u8 data_direction:2;
+ u8 reserved1:1;
+ u8 command_type:4;
+#else
+#error
+#endif
+
+ __le32 dunl;
+ u8 ocs;
+ u8 cds;
+ __le16 ldbc;
+ __le32 dunu;
+};
+
+static_assert(sizeof(struct request_desc_header) == 16);
/**
* struct utp_transfer_req_desc - UTP Transfer Request Descriptor (UTRD)
diff --git a/include/ufs/unipro.h b/include/ufs/unipro.h
index dc9dd1d23f0f..256eb3a43f54 100644
--- a/include/ufs/unipro.h
+++ b/include/ufs/unipro.h
@@ -230,6 +230,12 @@ enum ufs_hs_gear_tag {
UFS_HS_G5 /* HS Gear 5 */
};
+enum ufs_lanes {
+ UFS_LANE_DONT_CHANGE, /* Don't change Lane */
+ UFS_LANE_1, /* Lane 1 (default for reset) */
+ UFS_LANE_2, /* Lane 2 */
+};
+
enum ufs_unipro_ver {
UFS_UNIPRO_VER_RESERVED = 0,
UFS_UNIPRO_VER_1_40 = 1, /* UniPro version 1.40 */
diff --git a/init/main.c b/init/main.c
index ad920fac325c..436d73261810 100644
--- a/init/main.c
+++ b/init/main.c
@@ -1540,6 +1540,7 @@ static noinline void __init kernel_init_freeable(void)
smp_init();
sched_init_smp();
+ workqueue_init_topology();
padata_init();
page_alloc_init_late();
diff --git a/io_uring/fdinfo.c b/io_uring/fdinfo.c
index 300455b4bc12..c53678875416 100644
--- a/io_uring/fdinfo.c
+++ b/io_uring/fdinfo.c
@@ -93,6 +93,8 @@ __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
struct io_uring_sqe *sqe;
unsigned int sq_idx;
+ if (ctx->flags & IORING_SETUP_NO_SQARRAY)
+ break;
sq_idx = READ_ONCE(ctx->sq_array[entry & sq_mask]);
if (sq_idx > sq_mask)
continue;
diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c
index 62f345587df5..1ecc8c748768 100644
--- a/io_uring/io-wq.c
+++ b/io_uring/io-wq.c
@@ -174,6 +174,16 @@ static void io_worker_ref_put(struct io_wq *wq)
complete(&wq->worker_done);
}
+bool io_wq_worker_stopped(void)
+{
+ struct io_worker *worker = current->worker_private;
+
+ if (WARN_ON_ONCE(!io_wq_current_is_worker()))
+ return true;
+
+ return test_bit(IO_WQ_BIT_EXIT, &worker->wq->state);
+}
+
static void io_worker_cancel_cb(struct io_worker *worker)
{
struct io_wq_acct *acct = io_wq_get_acct(worker);
diff --git a/io_uring/io-wq.h b/io_uring/io-wq.h
index 06d9ca90c577..2b2a6406dd8e 100644
--- a/io_uring/io-wq.h
+++ b/io_uring/io-wq.h
@@ -52,6 +52,7 @@ void io_wq_hash_work(struct io_wq_work *work, void *val);
int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask);
int io_wq_max_workers(struct io_wq *wq, int *new_count);
+bool io_wq_worker_stopped(void);
static inline bool io_wq_is_hashed(struct io_wq_work *work)
{
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index e7675355048d..783ed0fff71b 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -150,6 +150,31 @@ static void io_queue_sqe(struct io_kiocb *req);
struct kmem_cache *req_cachep;
+static int __read_mostly sysctl_io_uring_disabled;
+static int __read_mostly sysctl_io_uring_group = -1;
+
+#ifdef CONFIG_SYSCTL
+static struct ctl_table kernel_io_uring_disabled_table[] = {
+ {
+ .procname = "io_uring_disabled",
+ .data = &sysctl_io_uring_disabled,
+ .maxlen = sizeof(sysctl_io_uring_disabled),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_TWO,
+ },
+ {
+ .procname = "io_uring_group",
+ .data = &sysctl_io_uring_group,
+ .maxlen = sizeof(gid_t),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {},
+};
+#endif
+
struct sock *io_uring_get_socket(struct file *file)
{
#if defined(CONFIG_UNIX)
@@ -883,7 +908,7 @@ static void __io_flush_post_cqes(struct io_ring_ctx *ctx)
struct io_uring_cqe *cqe = &ctx->completion_cqes[i];
if (!io_fill_cqe_aux(ctx, cqe->user_data, cqe->res, cqe->flags)) {
- if (ctx->task_complete) {
+ if (ctx->lockless_cq) {
spin_lock(&ctx->completion_lock);
io_cqring_event_overflow(ctx, cqe->user_data,
cqe->res, cqe->flags, 0, 0);
@@ -1541,7 +1566,7 @@ void __io_submit_flush_completions(struct io_ring_ctx *ctx)
if (!(req->flags & REQ_F_CQE_SKIP) &&
unlikely(!io_fill_cqe_req(ctx, req))) {
- if (ctx->task_complete) {
+ if (ctx->lockless_cq) {
spin_lock(&ctx->completion_lock);
io_req_cqe_overflow(req);
spin_unlock(&ctx->completion_lock);
@@ -1950,6 +1975,8 @@ fail:
if (!needs_poll) {
if (!(req->ctx->flags & IORING_SETUP_IOPOLL))
break;
+ if (io_wq_worker_stopped())
+ break;
cond_resched();
continue;
}
@@ -4038,9 +4065,30 @@ static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
return io_uring_create(entries, &p, params);
}
+static inline bool io_uring_allowed(void)
+{
+ int disabled = READ_ONCE(sysctl_io_uring_disabled);
+ kgid_t io_uring_group;
+
+ if (disabled == 2)
+ return false;
+
+ if (disabled == 0 || capable(CAP_SYS_ADMIN))
+ return true;
+
+ io_uring_group = make_kgid(&init_user_ns, sysctl_io_uring_group);
+ if (!gid_valid(io_uring_group))
+ return false;
+
+ return in_group_p(io_uring_group);
+}
+
SYSCALL_DEFINE2(io_uring_setup, u32, entries,
struct io_uring_params __user *, params)
{
+ if (!io_uring_allowed())
+ return -EPERM;
+
return io_uring_setup(entries, params);
}
@@ -4634,6 +4682,10 @@ static int __init io_uring_init(void)
offsetof(struct io_kiocb, cmd.data),
sizeof_field(struct io_kiocb, cmd.data), NULL);
+#ifdef CONFIG_SYSCTL
+ register_sysctl_init("kernel", kernel_io_uring_disabled_table);
+#endif
+
return 0;
};
__initcall(io_uring_init);
diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c
index ee2d2c687fda..bd6c2c7959a5 100644
--- a/io_uring/sqpoll.c
+++ b/io_uring/sqpoll.c
@@ -430,7 +430,9 @@ __cold int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx,
if (sqd) {
io_sq_thread_park(sqd);
- ret = io_wq_cpu_affinity(sqd->thread->io_uring, mask);
+ /* Don't set affinity for a dying thread */
+ if (sqd->thread)
+ ret = io_wq_cpu_affinity(sqd->thread->io_uring, mask);
io_sq_thread_unpark(sqd);
}
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index b0cb7631e48b..21d2fa815e78 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -143,6 +143,8 @@ static const struct audit_nfcfgop_tab audit_nfcfgs[] = {
{ AUDIT_NFT_OP_OBJ_RESET, "nft_reset_obj" },
{ AUDIT_NFT_OP_FLOWTABLE_REGISTER, "nft_register_flowtable" },
{ AUDIT_NFT_OP_FLOWTABLE_UNREGISTER, "nft_unregister_flowtable" },
+ { AUDIT_NFT_OP_SETELEM_RESET, "nft_reset_setelem" },
+ { AUDIT_NFT_OP_RULE_RESET, "nft_reset_rule" },
{ AUDIT_NFT_OP_INVALID, "nft_invalid" },
};
diff --git a/kernel/bpf/bpf_local_storage.c b/kernel/bpf/bpf_local_storage.c
index b5149cfce7d4..146824cc9689 100644
--- a/kernel/bpf/bpf_local_storage.c
+++ b/kernel/bpf/bpf_local_storage.c
@@ -553,7 +553,7 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
void *value, u64 map_flags, gfp_t gfp_flags)
{
struct bpf_local_storage_data *old_sdata = NULL;
- struct bpf_local_storage_elem *selem = NULL;
+ struct bpf_local_storage_elem *alloc_selem, *selem = NULL;
struct bpf_local_storage *local_storage;
unsigned long flags;
int err;
@@ -607,11 +607,12 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
}
}
- if (gfp_flags == GFP_KERNEL) {
- selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags);
- if (!selem)
- return ERR_PTR(-ENOMEM);
- }
+ /* A lookup has just been done before and concluded a new selem is
+ * needed. The chance of an unnecessary alloc is unlikely.
+ */
+ alloc_selem = selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags);
+ if (!alloc_selem)
+ return ERR_PTR(-ENOMEM);
raw_spin_lock_irqsave(&local_storage->lock, flags);
@@ -623,13 +624,13 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
* simple.
*/
err = -EAGAIN;
- goto unlock_err;
+ goto unlock;
}
old_sdata = bpf_local_storage_lookup(local_storage, smap, false);
err = check_flags(old_sdata, map_flags);
if (err)
- goto unlock_err;
+ goto unlock;
if (old_sdata && (map_flags & BPF_F_LOCK)) {
copy_map_value_locked(&smap->map, old_sdata->data, value,
@@ -638,23 +639,7 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
goto unlock;
}
- if (gfp_flags != GFP_KERNEL) {
- /* local_storage->lock is held. Hence, we are sure
- * we can unlink and uncharge the old_sdata successfully
- * later. Hence, instead of charging the new selem now
- * and then uncharge the old selem later (which may cause
- * a potential but unnecessary charge failure), avoid taking
- * a charge at all here (the "!old_sdata" check) and the
- * old_sdata will not be uncharged later during
- * bpf_selem_unlink_storage_nolock().
- */
- selem = bpf_selem_alloc(smap, owner, value, !old_sdata, gfp_flags);
- if (!selem) {
- err = -ENOMEM;
- goto unlock_err;
- }
- }
-
+ alloc_selem = NULL;
/* First, link the new selem to the map */
bpf_selem_link_map(smap, selem);
@@ -665,20 +650,16 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
if (old_sdata) {
bpf_selem_unlink_map(SELEM(old_sdata));
bpf_selem_unlink_storage_nolock(local_storage, SELEM(old_sdata),
- false, false);
+ true, false);
}
unlock:
raw_spin_unlock_irqrestore(&local_storage->lock, flags);
- return SDATA(selem);
-
-unlock_err:
- raw_spin_unlock_irqrestore(&local_storage->lock, flags);
- if (selem) {
+ if (alloc_selem) {
mem_uncharge(smap, owner, smap->elem_size);
- bpf_selem_free(selem, smap, true);
+ bpf_selem_free(alloc_selem, smap, true);
}
- return ERR_PTR(err);
+ return err ? ERR_PTR(err) : SDATA(selem);
}
static u16 bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache *cache)
@@ -779,7 +760,7 @@ void bpf_local_storage_destroy(struct bpf_local_storage *local_storage)
* of the loop will set the free_cgroup_storage to true.
*/
free_storage = bpf_selem_unlink_storage_nolock(
- local_storage, selem, false, true);
+ local_storage, selem, true, true);
}
raw_spin_unlock_irqrestore(&local_storage->lock, flags);
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 249657c466dd..1095bbe29859 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -553,7 +553,7 @@ s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind)
return -ENOENT;
}
-static s32 bpf_find_btf_id(const char *name, u32 kind, struct btf **btf_p)
+s32 bpf_find_btf_id(const char *name, u32 kind, struct btf **btf_p)
{
struct btf *btf;
s32 ret;
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index ebeb0695305a..eb01c31ed591 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -5502,9 +5502,9 @@ int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size)
}
run_ctx.bpf_cookie = 0;
- run_ctx.saved_run_ctx = NULL;
if (!__bpf_prog_enter_sleepable_recur(prog, &run_ctx)) {
/* recursion detected */
+ __bpf_prog_exit_sleepable_recur(prog, 0, &run_ctx);
bpf_prog_put(prog);
return -EBUSY;
}
diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index 78acf28d4873..53ff50cac61e 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -926,13 +926,12 @@ u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog,
migrate_disable();
might_fault();
+ run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
+
if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
bpf_prog_inc_misses_counter(prog);
return 0;
}
-
- run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
-
return bpf_prog_start_time();
}
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index 83044312bc41..c487ffef6652 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -431,7 +431,7 @@ static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
if (l->list[mid] == pid) {
index = mid;
break;
- } else if (l->list[mid] <= pid)
+ } else if (l->list[mid] < pid)
index = mid + 1;
else
end = mid;
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 5fa95f86cb4d..1fb7f562289d 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -493,28 +493,6 @@ static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp,
}
/**
- * cgroup_tryget_css - try to get a cgroup's css for the specified subsystem
- * @cgrp: the cgroup of interest
- * @ss: the subsystem of interest
- *
- * Find and get @cgrp's css associated with @ss. If the css doesn't exist
- * or is offline, %NULL is returned.
- */
-static struct cgroup_subsys_state *cgroup_tryget_css(struct cgroup *cgrp,
- struct cgroup_subsys *ss)
-{
- struct cgroup_subsys_state *css;
-
- rcu_read_lock();
- css = cgroup_css(cgrp, ss);
- if (css && !css_tryget_online(css))
- css = NULL;
- rcu_read_unlock();
-
- return css;
-}
-
-/**
* cgroup_e_css_by_mask - obtain a cgroup's effective css for the specified ss
* @cgrp: the cgroup of interest
* @ss: the subsystem of interest (%NULL returns @cgrp->self)
@@ -679,7 +657,7 @@ EXPORT_SYMBOL_GPL(of_css);
* @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end
* @cgrp: the target cgroup to iterate css's of
*
- * Should be called under cgroup_[tree_]mutex.
+ * Should be called under cgroup_mutex.
*/
#define for_each_css(css, ssid, cgrp) \
for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \
@@ -929,7 +907,7 @@ static void css_set_move_task(struct task_struct *task,
#define CSS_SET_HASH_BITS 7
static DEFINE_HASHTABLE(css_set_table, CSS_SET_HASH_BITS);
-static unsigned long css_set_hash(struct cgroup_subsys_state *css[])
+static unsigned long css_set_hash(struct cgroup_subsys_state **css)
{
unsigned long key = 0UL;
struct cgroup_subsys *ss;
@@ -1070,7 +1048,7 @@ static bool compare_css_sets(struct css_set *cset,
*/
static struct css_set *find_existing_css_set(struct css_set *old_cset,
struct cgroup *cgrp,
- struct cgroup_subsys_state *template[])
+ struct cgroup_subsys_state **template)
{
struct cgroup_root *root = cgrp->root;
struct cgroup_subsys *ss;
@@ -1736,7 +1714,7 @@ static int css_populate_dir(struct cgroup_subsys_state *css)
struct cftype *cfts, *failed_cfts;
int ret;
- if ((css->flags & CSS_VISIBLE) || !cgrp->kn)
+ if (css->flags & CSS_VISIBLE)
return 0;
if (!css->ss) {
@@ -2499,7 +2477,7 @@ struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
/*
* This function may be called both before and
- * after cgroup_taskset_migrate(). The two cases
+ * after cgroup_migrate_execute(). The two cases
* can be distinguished by looking at whether @cset
* has its ->mg_dst_cset set.
*/
@@ -3654,9 +3632,32 @@ static int cgroup_stat_show(struct seq_file *seq, void *v)
return 0;
}
-static int __maybe_unused cgroup_extra_stat_show(struct seq_file *seq,
- struct cgroup *cgrp, int ssid)
+#ifdef CONFIG_CGROUP_SCHED
+/**
+ * cgroup_tryget_css - try to get a cgroup's css for the specified subsystem
+ * @cgrp: the cgroup of interest
+ * @ss: the subsystem of interest
+ *
+ * Find and get @cgrp's css associated with @ss. If the css doesn't exist
+ * or is offline, %NULL is returned.
+ */
+static struct cgroup_subsys_state *cgroup_tryget_css(struct cgroup *cgrp,
+ struct cgroup_subsys *ss)
+{
+ struct cgroup_subsys_state *css;
+
+ rcu_read_lock();
+ css = cgroup_css(cgrp, ss);
+ if (css && !css_tryget_online(css))
+ css = NULL;
+ rcu_read_unlock();
+
+ return css;
+}
+
+static int cgroup_extra_stat_show(struct seq_file *seq, int ssid)
{
+ struct cgroup *cgrp = seq_css(seq)->cgroup;
struct cgroup_subsys *ss = cgroup_subsys[ssid];
struct cgroup_subsys_state *css;
int ret;
@@ -3673,20 +3674,8 @@ static int __maybe_unused cgroup_extra_stat_show(struct seq_file *seq,
return ret;
}
-static int cpu_stat_show(struct seq_file *seq, void *v)
-{
- struct cgroup __maybe_unused *cgrp = seq_css(seq)->cgroup;
- int ret = 0;
-
- cgroup_base_stat_cputime_show(seq);
-#ifdef CONFIG_CGROUP_SCHED
- ret = cgroup_extra_stat_show(seq, cgrp, cpu_cgrp_id);
-#endif
- return ret;
-}
-
-static int __maybe_unused cgroup_local_stat_show(struct seq_file *seq,
- struct cgroup *cgrp, int ssid)
+static int cgroup_local_stat_show(struct seq_file *seq,
+ struct cgroup *cgrp, int ssid)
{
struct cgroup_subsys *ss = cgroup_subsys[ssid];
struct cgroup_subsys_state *css;
@@ -3703,6 +3692,18 @@ static int __maybe_unused cgroup_local_stat_show(struct seq_file *seq,
css_put(css);
return ret;
}
+#endif
+
+static int cpu_stat_show(struct seq_file *seq, void *v)
+{
+ int ret = 0;
+
+ cgroup_base_stat_cputime_show(seq);
+#ifdef CONFIG_CGROUP_SCHED
+ ret = cgroup_extra_stat_show(seq, cpu_cgrp_id);
+#endif
+ return ret;
+}
static int cpu_local_stat_show(struct seq_file *seq, void *v)
{
@@ -4350,14 +4351,13 @@ static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
return ret;
}
-static int cgroup_rm_cftypes_locked(struct cftype *cfts)
+static void cgroup_rm_cftypes_locked(struct cftype *cfts)
{
lockdep_assert_held(&cgroup_mutex);
list_del(&cfts->node);
cgroup_apply_cftypes(cfts, false);
cgroup_exit_cftypes(cfts);
- return 0;
}
/**
@@ -4373,8 +4373,6 @@ static int cgroup_rm_cftypes_locked(struct cftype *cfts)
*/
int cgroup_rm_cftypes(struct cftype *cfts)
{
- int ret;
-
if (!cfts || cfts[0].name[0] == '\0')
return 0;
@@ -4382,9 +4380,9 @@ int cgroup_rm_cftypes(struct cftype *cfts)
return -ENOENT;
cgroup_lock();
- ret = cgroup_rm_cftypes_locked(cfts);
+ cgroup_rm_cftypes_locked(cfts);
cgroup_unlock();
- return ret;
+ return 0;
}
/**
@@ -5337,7 +5335,7 @@ static struct cftype cgroup_psi_files[] = {
* RCU callback.
*
* 4. After the grace period, the css can be freed. Implemented in
- * css_free_work_fn().
+ * css_free_rwork_fn().
*
* It is actually hairier because both step 2 and 4 require process context
* and thus involve punting to css->destroy_work adding two additional
@@ -5581,8 +5579,7 @@ err_free_css:
/*
* The returned cgroup is fully initialized including its control mask, but
- * it isn't associated with its kernfs_node and doesn't have the control
- * mask applied.
+ * it doesn't have the control mask applied.
*/
static struct cgroup *cgroup_create(struct cgroup *parent, const char *name,
umode_t mode)
@@ -5908,7 +5905,7 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
/*
* Mark @cgrp and the associated csets dead. The former prevents
* further task migration and child creation by disabling
- * cgroup_lock_live_group(). The latter makes the csets ignored by
+ * cgroup_kn_lock_live(). The latter makes the csets ignored by
* the migration path.
*/
cgrp->self.flags &= ~CSS_ONLINE;
@@ -5930,7 +5927,7 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
parent->nr_threaded_children--;
spin_lock_irq(&css_set_lock);
- for (tcgrp = cgroup_parent(cgrp); tcgrp; tcgrp = cgroup_parent(tcgrp)) {
+ for (tcgrp = parent; tcgrp; tcgrp = cgroup_parent(tcgrp)) {
tcgrp->nr_descendants--;
tcgrp->nr_dying_descendants++;
/*
@@ -6123,8 +6120,8 @@ int __init cgroup_init(void)
continue;
if (cgroup1_ssid_disabled(ssid))
- printk(KERN_INFO "Disabling %s control group subsystem in v1 mounts\n",
- ss->name);
+ pr_info("Disabling %s control group subsystem in v1 mounts\n",
+ ss->name);
cgrp_dfl_root.subsys_mask |= 1 << ss->id;
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 58e6f18f01c1..58ec88efa4f8 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -1230,7 +1230,7 @@ static void update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus)
/*
* Percpu kthreads in top_cpuset are ignored
*/
- if ((task->flags & PF_KTHREAD) && kthread_is_per_cpu(task))
+ if (kthread_is_per_cpu(task))
continue;
cpumask_andnot(new_cpus, possible_mask, cs->subparts_cpus);
} else {
@@ -1255,7 +1255,7 @@ static void update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus)
static void compute_effective_cpumask(struct cpumask *new_cpus,
struct cpuset *cs, struct cpuset *parent)
{
- if (parent->nr_subparts_cpus) {
+ if (parent->nr_subparts_cpus && is_partition_valid(cs)) {
cpumask_or(new_cpus, parent->effective_cpus,
parent->subparts_cpus);
cpumask_and(new_cpus, new_cpus, cs->cpus_allowed);
@@ -1277,6 +1277,52 @@ enum subparts_cmd {
static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
int turning_on);
+static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
+ struct tmpmasks *tmp);
+
+/*
+ * Update partition exclusive flag
+ *
+ * Return: 0 if successful, an error code otherwise
+ */
+static int update_partition_exclusive(struct cpuset *cs, int new_prs)
+{
+ bool exclusive = (new_prs > 0);
+
+ if (exclusive && !is_cpu_exclusive(cs)) {
+ if (update_flag(CS_CPU_EXCLUSIVE, cs, 1))
+ return PERR_NOTEXCL;
+ } else if (!exclusive && is_cpu_exclusive(cs)) {
+ /* Turning off CS_CPU_EXCLUSIVE will not return error */
+ update_flag(CS_CPU_EXCLUSIVE, cs, 0);
+ }
+ return 0;
+}
+
+/*
+ * Update partition load balance flag and/or rebuild sched domain
+ *
+ * Changing load balance flag will automatically call
+ * rebuild_sched_domains_locked().
+ */
+static void update_partition_sd_lb(struct cpuset *cs, int old_prs)
+{
+ int new_prs = cs->partition_root_state;
+ bool new_lb = (new_prs != PRS_ISOLATED);
+ bool rebuild_domains = (new_prs > 0) || (old_prs > 0);
+
+ if (new_lb != !!is_sched_load_balance(cs)) {
+ rebuild_domains = true;
+ if (new_lb)
+ set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
+ else
+ clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
+ }
+
+ if (rebuild_domains)
+ rebuild_sched_domains_locked();
+}
+
/**
* update_parent_subparts_cpumask - update subparts_cpus mask of parent cpuset
* @cs: The cpuset that requests change in partition root state
@@ -1336,8 +1382,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd,
return is_partition_invalid(parent)
? PERR_INVPARENT : PERR_NOTPART;
}
- if ((newmask && cpumask_empty(newmask)) ||
- (!newmask && cpumask_empty(cs->cpus_allowed)))
+ if (!newmask && cpumask_empty(cs->cpus_allowed))
return PERR_CPUSEMPTY;
/*
@@ -1404,10 +1449,15 @@ static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd,
adding = cpumask_andnot(tmp->addmask, tmp->addmask,
parent->subparts_cpus);
/*
+ * Empty cpumask is not allowed
+ */
+ if (cpumask_empty(newmask)) {
+ part_error = PERR_CPUSEMPTY;
+ /*
* Make partition invalid if parent's effective_cpus could
* become empty and there are tasks in the parent.
*/
- if (adding &&
+ } else if (adding &&
cpumask_subset(parent->effective_cpus, tmp->addmask) &&
!cpumask_intersects(tmp->delmask, cpu_active_mask) &&
partition_is_populated(parent, cs)) {
@@ -1480,14 +1530,13 @@ static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd,
/*
* Transitioning between invalid to valid or vice versa may require
- * changing CS_CPU_EXCLUSIVE and CS_SCHED_LOAD_BALANCE.
+ * changing CS_CPU_EXCLUSIVE.
*/
if (old_prs != new_prs) {
- if (is_prs_invalid(old_prs) && !is_cpu_exclusive(cs) &&
- (update_flag(CS_CPU_EXCLUSIVE, cs, 1) < 0))
- return PERR_NOTEXCL;
- if (is_prs_invalid(new_prs) && is_cpu_exclusive(cs))
- update_flag(CS_CPU_EXCLUSIVE, cs, 0);
+ int err = update_partition_exclusive(cs, new_prs);
+
+ if (err)
+ return err;
}
/*
@@ -1520,24 +1569,34 @@ static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd,
spin_unlock_irq(&callback_lock);
- if (adding || deleting)
+ if (adding || deleting) {
update_tasks_cpumask(parent, tmp->addmask);
+ if (parent->child_ecpus_count)
+ update_sibling_cpumasks(parent, cs, tmp);
+ }
/*
- * Set or clear CS_SCHED_LOAD_BALANCE when partcmd_update, if necessary.
- * rebuild_sched_domains_locked() may be called.
+ * For partcmd_update without newmask, it is being called from
+ * cpuset_hotplug_workfn() where cpus_read_lock() wasn't taken.
+ * Update the load balance flag and scheduling domain if
+ * cpus_read_trylock() is successful.
*/
- if (old_prs != new_prs) {
- if (old_prs == PRS_ISOLATED)
- update_flag(CS_SCHED_LOAD_BALANCE, cs, 1);
- else if (new_prs == PRS_ISOLATED)
- update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
+ if ((cmd == partcmd_update) && !newmask && cpus_read_trylock()) {
+ update_partition_sd_lb(cs, old_prs);
+ cpus_read_unlock();
}
+
notify_partition_change(cs, old_prs);
return 0;
}
/*
+ * update_cpumasks_hier() flags
+ */
+#define HIER_CHECKALL 0x01 /* Check all cpusets with no skipping */
+#define HIER_NO_SD_REBUILD 0x02 /* Don't rebuild sched domains */
+
+/*
* update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
* @cs: the cpuset to consider
* @tmp: temp variables for calculating effective_cpus & partition setup
@@ -1551,7 +1610,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd,
* Called with cpuset_mutex held
*/
static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
- bool force)
+ int flags)
{
struct cpuset *cp;
struct cgroup_subsys_state *pos_css;
@@ -1588,11 +1647,16 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
}
/*
- * Skip the whole subtree if the cpumask remains the same
- * and has no partition root state and force flag not set.
+ * Skip the whole subtree if
+ * 1) the cpumask remains the same,
+ * 2) has no partition root state,
+ * 3) HIER_CHECKALL flag not set, and
+ * 4) for v2 load balance state same as its parent.
*/
- if (!cp->partition_root_state && !force &&
- cpumask_equal(tmp->new_cpus, cp->effective_cpus)) {
+ if (!cp->partition_root_state && !(flags & HIER_CHECKALL) &&
+ cpumask_equal(tmp->new_cpus, cp->effective_cpus) &&
+ (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
+ (is_sched_load_balance(parent) == is_sched_load_balance(cp)))) {
pos_css = css_rightmost_descendant(pos_css);
continue;
}
@@ -1676,6 +1740,20 @@ update_parent_subparts:
update_tasks_cpumask(cp, tmp->new_cpus);
/*
+ * On default hierarchy, inherit the CS_SCHED_LOAD_BALANCE
+ * from parent if current cpuset isn't a valid partition root
+ * and their load balance states differ.
+ */
+ if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
+ !is_partition_valid(cp) &&
+ (is_sched_load_balance(parent) != is_sched_load_balance(cp))) {
+ if (is_sched_load_balance(parent))
+ set_bit(CS_SCHED_LOAD_BALANCE, &cp->flags);
+ else
+ clear_bit(CS_SCHED_LOAD_BALANCE, &cp->flags);
+ }
+
+ /*
* On legacy hierarchy, if the effective cpumask of any non-
* empty cpuset is changed, we need to rebuild sched domains.
* On default hierarchy, the cpuset needs to be a partition
@@ -1692,7 +1770,7 @@ update_parent_subparts:
}
rcu_read_unlock();
- if (need_rebuild_sched_domains)
+ if (need_rebuild_sched_domains && !(flags & HIER_NO_SD_REBUILD))
rebuild_sched_domains_locked();
}
@@ -1716,7 +1794,9 @@ static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
* to use the right effective_cpus value.
*
* The update_cpumasks_hier() function may sleep. So we have to
- * release the RCU read lock before calling it.
+ * release the RCU read lock before calling it. HIER_NO_SD_REBUILD
+ * flag is used to suppress rebuild of sched domains as the callers
+ * will take care of that.
*/
rcu_read_lock();
cpuset_for_each_child(sibling, pos_css, parent) {
@@ -1728,7 +1808,7 @@ static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs,
continue;
rcu_read_unlock();
- update_cpumasks_hier(sibling, tmp, false);
+ update_cpumasks_hier(sibling, tmp, HIER_NO_SD_REBUILD);
rcu_read_lock();
css_put(&sibling->css);
}
@@ -1747,6 +1827,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
int retval;
struct tmpmasks tmp;
bool invalidate = false;
+ int old_prs = cs->partition_root_state;
/* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
if (cs == &top_cpuset)
@@ -1774,18 +1855,8 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
return 0;
-#ifdef CONFIG_CPUMASK_OFFSTACK
- /*
- * Use the cpumasks in trialcs for tmpmasks when they are pointers
- * to allocated cpumasks.
- *
- * Note that update_parent_subparts_cpumask() uses only addmask &
- * delmask, but not new_cpus.
- */
- tmp.addmask = trialcs->subparts_cpus;
- tmp.delmask = trialcs->effective_cpus;
- tmp.new_cpus = NULL;
-#endif
+ if (alloc_cpumasks(NULL, &tmp))
+ return -ENOMEM;
retval = validate_change(cs, trialcs);
@@ -1814,7 +1885,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
retval = 0;
}
if (retval < 0)
- return retval;
+ goto out_free;
if (cs->partition_root_state) {
if (invalidate)
@@ -1849,13 +1920,8 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
}
spin_unlock_irq(&callback_lock);
-#ifdef CONFIG_CPUMASK_OFFSTACK
- /* Now trialcs->cpus_allowed is available */
- tmp.new_cpus = trialcs->cpus_allowed;
-#endif
-
/* effective_cpus will be updated here */
- update_cpumasks_hier(cs, &tmp, false);
+ update_cpumasks_hier(cs, &tmp, 0);
if (cs->partition_root_state) {
struct cpuset *parent = parent_cs(cs);
@@ -1866,7 +1932,12 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
*/
if (parent->child_ecpus_count)
update_sibling_cpumasks(parent, cs, &tmp);
+
+ /* Update CS_SCHED_LOAD_BALANCE and/or sched_domains */
+ update_partition_sd_lb(cs, old_prs);
}
+out_free:
+ free_cpumasks(NULL, &tmp);
return 0;
}
@@ -2242,7 +2313,6 @@ out:
static int update_prstate(struct cpuset *cs, int new_prs)
{
int err = PERR_NONE, old_prs = cs->partition_root_state;
- bool sched_domain_rebuilt = false;
struct cpuset *parent = parent_cs(cs);
struct tmpmasks tmpmask;
@@ -2261,45 +2331,26 @@ static int update_prstate(struct cpuset *cs, int new_prs)
if (alloc_cpumasks(NULL, &tmpmask))
return -ENOMEM;
+ err = update_partition_exclusive(cs, new_prs);
+ if (err)
+ goto out;
+
if (!old_prs) {
/*
- * Turning on partition root requires setting the
- * CS_CPU_EXCLUSIVE bit implicitly as well and cpus_allowed
- * cannot be empty.
+ * cpus_allowed cannot be empty.
*/
if (cpumask_empty(cs->cpus_allowed)) {
err = PERR_CPUSEMPTY;
goto out;
}
- err = update_flag(CS_CPU_EXCLUSIVE, cs, 1);
- if (err) {
- err = PERR_NOTEXCL;
- goto out;
- }
-
err = update_parent_subparts_cpumask(cs, partcmd_enable,
NULL, &tmpmask);
- if (err) {
- update_flag(CS_CPU_EXCLUSIVE, cs, 0);
- goto out;
- }
-
- if (new_prs == PRS_ISOLATED) {
- /*
- * Disable the load balance flag should not return an
- * error unless the system is running out of memory.
- */
- update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
- sched_domain_rebuilt = true;
- }
} else if (old_prs && new_prs) {
/*
* A change in load balance state only, no change in cpumasks.
*/
- update_flag(CS_SCHED_LOAD_BALANCE, cs, (new_prs != PRS_ISOLATED));
- sched_domain_rebuilt = true;
- goto out; /* Sched domain is rebuilt in update_flag() */
+ ;
} else {
/*
* Switching back to member is always allowed even if it
@@ -2318,40 +2369,31 @@ static int update_prstate(struct cpuset *cs, int new_prs)
compute_effective_cpumask(cs->effective_cpus, cs, parent);
spin_unlock_irq(&callback_lock);
}
-
- /* Turning off CS_CPU_EXCLUSIVE will not return error */
- update_flag(CS_CPU_EXCLUSIVE, cs, 0);
-
- if (!is_sched_load_balance(cs)) {
- /* Make sure load balance is on */
- update_flag(CS_SCHED_LOAD_BALANCE, cs, 1);
- sched_domain_rebuilt = true;
- }
}
-
- update_tasks_cpumask(parent, tmpmask.new_cpus);
-
- if (parent->child_ecpus_count)
- update_sibling_cpumasks(parent, cs, &tmpmask);
-
- if (!sched_domain_rebuilt)
- rebuild_sched_domains_locked();
out:
/*
- * Make partition invalid if an error happen
+ * Make partition invalid & disable CS_CPU_EXCLUSIVE if an error
+ * happens.
*/
- if (err)
+ if (err) {
new_prs = -new_prs;
+ update_partition_exclusive(cs, new_prs);
+ }
+
spin_lock_irq(&callback_lock);
cs->partition_root_state = new_prs;
WRITE_ONCE(cs->prs_err, err);
spin_unlock_irq(&callback_lock);
+
/*
* Update child cpusets, if present.
* Force update if switching back to member.
*/
if (!list_empty(&cs->css.children))
- update_cpumasks_hier(cs, &tmpmask, !new_prs);
+ update_cpumasks_hier(cs, &tmpmask, !new_prs ? HIER_CHECKALL : 0);
+
+ /* Update sched domains and load balance flag */
+ update_partition_sd_lb(cs, old_prs);
notify_partition_change(cs, old_prs);
free_cpumasks(NULL, &tmpmask);
@@ -2487,6 +2529,7 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
struct cgroup_subsys_state *css;
struct cpuset *cs, *oldcs;
struct task_struct *task;
+ bool cpus_updated, mems_updated;
int ret;
/* used later by cpuset_attach() */
@@ -2501,13 +2544,25 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
if (ret)
goto out_unlock;
+ cpus_updated = !cpumask_equal(cs->effective_cpus, oldcs->effective_cpus);
+ mems_updated = !nodes_equal(cs->effective_mems, oldcs->effective_mems);
+
cgroup_taskset_for_each(task, css, tset) {
ret = task_can_attach(task);
if (ret)
goto out_unlock;
- ret = security_task_setscheduler(task);
- if (ret)
- goto out_unlock;
+
+ /*
+ * Skip rights over task check in v2 when nothing changes,
+ * migration permission derives from hierarchy ownership in
+ * cgroup_procs_write_permission()).
+ */
+ if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
+ (cpus_updated || mems_updated)) {
+ ret = security_task_setscheduler(task);
+ if (ret)
+ goto out_unlock;
+ }
if (dl_task(task)) {
cs->nr_migrate_dl_tasks++;
@@ -3222,6 +3277,14 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
cs->use_parent_ecpus = true;
parent->child_ecpus_count++;
}
+
+ /*
+ * For v2, clear CS_SCHED_LOAD_BALANCE if parent is isolated
+ */
+ if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
+ !is_sched_load_balance(parent))
+ clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
+
spin_unlock_irq(&callback_lock);
if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
@@ -3521,17 +3584,16 @@ hotplug_update_tasks_legacy(struct cpuset *cs,
is_empty = cpumask_empty(cs->cpus_allowed) ||
nodes_empty(cs->mems_allowed);
- mutex_unlock(&cpuset_mutex);
-
/*
* Move tasks to the nearest ancestor with execution resources,
* This is full cgroup operation which will also call back into
* cpuset. Should be done outside any lock.
*/
- if (is_empty)
+ if (is_empty) {
+ mutex_unlock(&cpuset_mutex);
remove_tasks_in_empty_cpuset(cs);
-
- mutex_lock(&cpuset_mutex);
+ mutex_lock(&cpuset_mutex);
+ }
}
static void
@@ -3691,6 +3753,7 @@ unlock:
/**
* cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
+ * @work: unused
*
* This function is called after either CPU or memory configuration has
* changed and updates cpuset accordingly. The top_cpuset is always
@@ -4073,6 +4136,7 @@ bool cpuset_node_allowed(int node, gfp_t gfp_mask)
/**
* cpuset_spread_node() - On which node to begin search for a page
+ * @rotor: round robin rotor
*
* If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
* tasks in a cpuset with is_spread_page or is_spread_slab set),
diff --git a/kernel/cgroup/misc.c b/kernel/cgroup/misc.c
index ae2f4dd47508..79a3717a5803 100644
--- a/kernel/cgroup/misc.c
+++ b/kernel/cgroup/misc.c
@@ -14,7 +14,7 @@
#include <linux/misc_cgroup.h>
#define MAX_STR "max"
-#define MAX_NUM ULONG_MAX
+#define MAX_NUM U64_MAX
/* Miscellaneous res name, keep it in sync with enum misc_res_type */
static const char *const misc_res_name[] = {
@@ -37,7 +37,7 @@ static struct misc_cg root_cg;
* more than the actual capacity. We are using Limits resource distribution
* model of cgroup for miscellaneous controller.
*/
-static unsigned long misc_res_capacity[MISC_CG_RES_TYPES];
+static u64 misc_res_capacity[MISC_CG_RES_TYPES];
/**
* parent_misc() - Get the parent of the passed misc cgroup.
@@ -74,10 +74,10 @@ static inline bool valid_type(enum misc_res_type type)
* Context: Any context.
* Return: Current total usage of the resource.
*/
-unsigned long misc_cg_res_total_usage(enum misc_res_type type)
+u64 misc_cg_res_total_usage(enum misc_res_type type)
{
if (valid_type(type))
- return atomic_long_read(&root_cg.res[type].usage);
+ return atomic64_read(&root_cg.res[type].usage);
return 0;
}
@@ -95,7 +95,7 @@ EXPORT_SYMBOL_GPL(misc_cg_res_total_usage);
* * %0 - Successfully registered the capacity.
* * %-EINVAL - If @type is invalid.
*/
-int misc_cg_set_capacity(enum misc_res_type type, unsigned long capacity)
+int misc_cg_set_capacity(enum misc_res_type type, u64 capacity)
{
if (!valid_type(type))
return -EINVAL;
@@ -114,9 +114,9 @@ EXPORT_SYMBOL_GPL(misc_cg_set_capacity);
* Context: Any context.
*/
static void misc_cg_cancel_charge(enum misc_res_type type, struct misc_cg *cg,
- unsigned long amount)
+ u64 amount)
{
- WARN_ONCE(atomic_long_add_negative(-amount, &cg->res[type].usage),
+ WARN_ONCE(atomic64_add_negative(-amount, &cg->res[type].usage),
"misc cgroup resource %s became less than 0",
misc_res_name[type]);
}
@@ -137,13 +137,12 @@ static void misc_cg_cancel_charge(enum misc_res_type type, struct misc_cg *cg,
* * -EBUSY - If max limit will be crossed or total usage will be more than the
* capacity.
*/
-int misc_cg_try_charge(enum misc_res_type type, struct misc_cg *cg,
- unsigned long amount)
+int misc_cg_try_charge(enum misc_res_type type, struct misc_cg *cg, u64 amount)
{
struct misc_cg *i, *j;
int ret;
struct misc_res *res;
- int new_usage;
+ u64 new_usage;
if (!(valid_type(type) && cg && READ_ONCE(misc_res_capacity[type])))
return -EINVAL;
@@ -154,7 +153,7 @@ int misc_cg_try_charge(enum misc_res_type type, struct misc_cg *cg,
for (i = cg; i; i = parent_misc(i)) {
res = &i->res[type];
- new_usage = atomic_long_add_return(amount, &res->usage);
+ new_usage = atomic64_add_return(amount, &res->usage);
if (new_usage > READ_ONCE(res->max) ||
new_usage > READ_ONCE(misc_res_capacity[type])) {
ret = -EBUSY;
@@ -165,7 +164,7 @@ int misc_cg_try_charge(enum misc_res_type type, struct misc_cg *cg,
err_charge:
for (j = i; j; j = parent_misc(j)) {
- atomic_long_inc(&j->res[type].events);
+ atomic64_inc(&j->res[type].events);
cgroup_file_notify(&j->events_file);
}
@@ -184,8 +183,7 @@ EXPORT_SYMBOL_GPL(misc_cg_try_charge);
*
* Context: Any context.
*/
-void misc_cg_uncharge(enum misc_res_type type, struct misc_cg *cg,
- unsigned long amount)
+void misc_cg_uncharge(enum misc_res_type type, struct misc_cg *cg, u64 amount)
{
struct misc_cg *i;
@@ -209,7 +207,7 @@ static int misc_cg_max_show(struct seq_file *sf, void *v)
{
int i;
struct misc_cg *cg = css_misc(seq_css(sf));
- unsigned long max;
+ u64 max;
for (i = 0; i < MISC_CG_RES_TYPES; i++) {
if (READ_ONCE(misc_res_capacity[i])) {
@@ -217,7 +215,7 @@ static int misc_cg_max_show(struct seq_file *sf, void *v)
if (max == MAX_NUM)
seq_printf(sf, "%s max\n", misc_res_name[i]);
else
- seq_printf(sf, "%s %lu\n", misc_res_name[i],
+ seq_printf(sf, "%s %llu\n", misc_res_name[i],
max);
}
}
@@ -241,13 +239,13 @@ static int misc_cg_max_show(struct seq_file *sf, void *v)
* Return:
* * >= 0 - Number of bytes processed in the input.
* * -EINVAL - If buf is not valid.
- * * -ERANGE - If number is bigger than the unsigned long capacity.
+ * * -ERANGE - If number is bigger than the u64 capacity.
*/
static ssize_t misc_cg_max_write(struct kernfs_open_file *of, char *buf,
size_t nbytes, loff_t off)
{
struct misc_cg *cg;
- unsigned long max;
+ u64 max;
int ret = 0, i;
enum misc_res_type type = MISC_CG_RES_TYPES;
char *token;
@@ -271,7 +269,7 @@ static ssize_t misc_cg_max_write(struct kernfs_open_file *of, char *buf,
if (!strcmp(MAX_STR, buf)) {
max = MAX_NUM;
} else {
- ret = kstrtoul(buf, 0, &max);
+ ret = kstrtou64(buf, 0, &max);
if (ret)
return ret;
}
@@ -297,13 +295,13 @@ static ssize_t misc_cg_max_write(struct kernfs_open_file *of, char *buf,
static int misc_cg_current_show(struct seq_file *sf, void *v)
{
int i;
- unsigned long usage;
+ u64 usage;
struct misc_cg *cg = css_misc(seq_css(sf));
for (i = 0; i < MISC_CG_RES_TYPES; i++) {
- usage = atomic_long_read(&cg->res[i].usage);
+ usage = atomic64_read(&cg->res[i].usage);
if (READ_ONCE(misc_res_capacity[i]) || usage)
- seq_printf(sf, "%s %lu\n", misc_res_name[i], usage);
+ seq_printf(sf, "%s %llu\n", misc_res_name[i], usage);
}
return 0;
@@ -322,12 +320,12 @@ static int misc_cg_current_show(struct seq_file *sf, void *v)
static int misc_cg_capacity_show(struct seq_file *sf, void *v)
{
int i;
- unsigned long cap;
+ u64 cap;
for (i = 0; i < MISC_CG_RES_TYPES; i++) {
cap = READ_ONCE(misc_res_capacity[i]);
if (cap)
- seq_printf(sf, "%s %lu\n", misc_res_name[i], cap);
+ seq_printf(sf, "%s %llu\n", misc_res_name[i], cap);
}
return 0;
@@ -336,12 +334,13 @@ static int misc_cg_capacity_show(struct seq_file *sf, void *v)
static int misc_events_show(struct seq_file *sf, void *v)
{
struct misc_cg *cg = css_misc(seq_css(sf));
- unsigned long events, i;
+ u64 events;
+ int i;
for (i = 0; i < MISC_CG_RES_TYPES; i++) {
- events = atomic_long_read(&cg->res[i].events);
+ events = atomic64_read(&cg->res[i].events);
if (READ_ONCE(misc_res_capacity[i]) || events)
- seq_printf(sf, "%s.max %lu\n", misc_res_name[i], events);
+ seq_printf(sf, "%s.max %llu\n", misc_res_name[i], events);
}
return 0;
}
@@ -397,7 +396,7 @@ misc_cg_alloc(struct cgroup_subsys_state *parent_css)
for (i = 0; i < MISC_CG_RES_TYPES; i++) {
WRITE_ONCE(cg->res[i].max, MAX_NUM);
- atomic_long_set(&cg->res[i].usage, 0);
+ atomic64_set(&cg->res[i].usage, 0);
}
return &cg->css;
diff --git a/kernel/cgroup/namespace.c b/kernel/cgroup/namespace.c
index 0d5c29879a50..144a464e45c6 100644
--- a/kernel/cgroup/namespace.c
+++ b/kernel/cgroup/namespace.c
@@ -149,9 +149,3 @@ const struct proc_ns_operations cgroupns_operations = {
.install = cgroupns_install,
.owner = cgroupns_owner,
};
-
-static __init int cgroup_namespaces_init(void)
-{
- return 0;
-}
-subsys_initcall(cgroup_namespaces_init);
diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
index 2542c21b6b6d..d80d7a608141 100644
--- a/kernel/cgroup/rstat.c
+++ b/kernel/cgroup/rstat.c
@@ -344,6 +344,7 @@ static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu)
{
struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
struct cgroup *parent = cgroup_parent(cgrp);
+ struct cgroup_rstat_cpu *prstatc;
struct cgroup_base_stat delta;
unsigned seq;
@@ -357,17 +358,24 @@ static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu)
delta = rstatc->bstat;
} while (__u64_stats_fetch_retry(&rstatc->bsync, seq));
- /* propagate percpu delta to global */
+ /* propagate per-cpu delta to cgroup and per-cpu global statistics */
cgroup_base_stat_sub(&delta, &rstatc->last_bstat);
cgroup_base_stat_add(&cgrp->bstat, &delta);
cgroup_base_stat_add(&rstatc->last_bstat, &delta);
+ cgroup_base_stat_add(&rstatc->subtree_bstat, &delta);
- /* propagate global delta to parent (unless that's root) */
+ /* propagate cgroup and per-cpu global delta to parent (unless that's root) */
if (cgroup_parent(parent)) {
delta = cgrp->bstat;
cgroup_base_stat_sub(&delta, &cgrp->last_bstat);
cgroup_base_stat_add(&parent->bstat, &delta);
cgroup_base_stat_add(&cgrp->last_bstat, &delta);
+
+ delta = rstatc->subtree_bstat;
+ prstatc = cgroup_rstat_cpu(parent, cpu);
+ cgroup_base_stat_sub(&delta, &rstatc->last_subtree_bstat);
+ cgroup_base_stat_add(&prstatc->subtree_bstat, &delta);
+ cgroup_base_stat_add(&rstatc->last_subtree_bstat, &delta);
}
}
diff --git a/kernel/configs/debug.config b/kernel/configs/debug.config
index e8db8d938661..4722b998a324 100644
--- a/kernel/configs/debug.config
+++ b/kernel/configs/debug.config
@@ -1,3 +1,5 @@
+# Help: Debugging for CI systems and finding regressions
+#
# The config is based on running daily CI for enterprise Linux distros to
# seek regressions on linux-next builds on different bare-metal and virtual
# platforms. It can be used for example,
diff --git a/kernel/configs/kvm_guest.config b/kernel/configs/kvm_guest.config
index 208481d91090..d0877063d925 100644
--- a/kernel/configs/kvm_guest.config
+++ b/kernel/configs/kvm_guest.config
@@ -1,3 +1,4 @@
+# Help: Bootable as a KVM guest
CONFIG_NET=y
CONFIG_NET_CORE=y
CONFIG_NETDEVICES=y
diff --git a/kernel/configs/nopm.config b/kernel/configs/nopm.config
index 81ff07863576..ebfdc3d8aa9a 100644
--- a/kernel/configs/nopm.config
+++ b/kernel/configs/nopm.config
@@ -1,3 +1,5 @@
+# Help: Disable Power Management
+
CONFIG_PM=n
CONFIG_SUSPEND=n
CONFIG_HIBERNATION=n
diff --git a/kernel/configs/rust.config b/kernel/configs/rust.config
index 38a7c5362c9c..2c6e001a7284 100644
--- a/kernel/configs/rust.config
+++ b/kernel/configs/rust.config
@@ -1 +1,2 @@
+# Help: Enable Rust
CONFIG_RUST=y
diff --git a/kernel/configs/x86_debug.config b/kernel/configs/x86_debug.config
index 6fac5b405334..35f48671b8d5 100644
--- a/kernel/configs/x86_debug.config
+++ b/kernel/configs/x86_debug.config
@@ -1,3 +1,4 @@
+# Help: Debugging options for tip tree testing
CONFIG_X86_DEBUG_FPU=y
CONFIG_LOCK_STAT=y
CONFIG_DEBUG_VM=y
diff --git a/kernel/configs/xen.config b/kernel/configs/xen.config
index 436f806aa1ed..6878b9a49be8 100644
--- a/kernel/configs/xen.config
+++ b/kernel/configs/xen.config
@@ -1,3 +1,5 @@
+# Help: Bootable as a Xen guest
+#
# global stuff - these enable us to allow some
# of the not so generic stuff below for xen
CONFIG_PARAVIRT=y
diff --git a/kernel/cpu.c b/kernel/cpu.c
index f6811c857102..6de7c6bb74ee 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -1487,8 +1487,22 @@ out:
return ret;
}
+struct cpu_down_work {
+ unsigned int cpu;
+ enum cpuhp_state target;
+};
+
+static long __cpu_down_maps_locked(void *arg)
+{
+ struct cpu_down_work *work = arg;
+
+ return _cpu_down(work->cpu, 0, work->target);
+}
+
static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
{
+ struct cpu_down_work work = { .cpu = cpu, .target = target, };
+
/*
* If the platform does not support hotplug, report it explicitly to
* differentiate it from a transient offlining failure.
@@ -1497,7 +1511,15 @@ static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
return -EOPNOTSUPP;
if (cpu_hotplug_disabled)
return -EBUSY;
- return _cpu_down(cpu, 0, target);
+
+ /*
+ * Ensure that the control task does not run on the to be offlined
+ * CPU to prevent a deadlock against cfs_b->period_timer.
+ */
+ cpu = cpumask_any_but(cpu_online_mask, cpu);
+ if (cpu >= nr_cpu_ids)
+ return -EBUSY;
+ return work_on_cpu(cpu, __cpu_down_maps_locked, &work);
}
static int cpu_down(unsigned int cpu, enum cpuhp_state target)
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index d5e9ccde3ab8..621037a0aa87 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -968,7 +968,7 @@ static int __init opt_kgdb_con(char *str)
early_param("kgdbcon", opt_kgdb_con);
#ifdef CONFIG_MAGIC_SYSRQ
-static void sysrq_handle_dbg(int key)
+static void sysrq_handle_dbg(u8 key)
{
if (!dbg_io_ops) {
pr_crit("ERROR: No KGDB I/O module available\n");
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
index 813cb6cf72d6..9443bc63c5a2 100644
--- a/kernel/debug/kdb/kdb_io.c
+++ b/kernel/debug/kdb/kdb_io.c
@@ -590,6 +590,8 @@ static void kdb_msg_write(const char *msg, int msg_len)
continue;
if (c == dbg_io_ops->cons)
continue;
+ if (!c->write)
+ continue;
/*
* Set oops_in_progress to encourage the console drivers to
* disregard their internal spin locks: in the current calling
diff --git a/kernel/fork.c b/kernel/fork.c
index a9c18d480dc5..3b6d20dfb9a8 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -909,8 +909,6 @@ static void cleanup_lazy_tlbs(struct mm_struct *mm)
*/
void __mmdrop(struct mm_struct *mm)
{
- int i;
-
BUG_ON(mm == &init_mm);
WARN_ON_ONCE(mm == current->mm);
@@ -925,9 +923,8 @@ void __mmdrop(struct mm_struct *mm)
put_user_ns(mm->user_ns);
mm_pasid_drop(mm);
mm_destroy_cid(mm);
+ percpu_counter_destroy_many(mm->rss_stat, NR_MM_COUNTERS);
- for (i = 0; i < NR_MM_COUNTERS; i++)
- percpu_counter_destroy(&mm->rss_stat[i]);
free_mm(mm);
}
EXPORT_SYMBOL_GPL(__mmdrop);
@@ -1260,8 +1257,6 @@ static void mm_init_uprobes_state(struct mm_struct *mm)
static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
struct user_namespace *user_ns)
{
- int i;
-
mt_init_flags(&mm->mm_mt, MM_MT_FLAGS);
mt_set_external_lock(&mm->mm_mt, &mm->mmap_lock);
atomic_set(&mm->mm_users, 1);
@@ -1309,17 +1304,15 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
if (mm_alloc_cid(mm))
goto fail_cid;
- for (i = 0; i < NR_MM_COUNTERS; i++)
- if (percpu_counter_init(&mm->rss_stat[i], 0, GFP_KERNEL_ACCOUNT))
- goto fail_pcpu;
+ if (percpu_counter_init_many(mm->rss_stat, 0, GFP_KERNEL_ACCOUNT,
+ NR_MM_COUNTERS))
+ goto fail_pcpu;
mm->user_ns = get_user_ns(user_ns);
lru_gen_init_mm(mm);
return mm;
fail_pcpu:
- while (i > 0)
- percpu_counter_destroy(&mm->rss_stat[--i]);
mm_destroy_cid(mm);
fail_cid:
destroy_context(mm);
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index ca385b61d546..0c6185aefaef 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -2232,8 +2232,7 @@ int register_kretprobe(struct kretprobe *rp)
return -ENOMEM;
for (i = 0; i < rp->maxactive; i++) {
- inst = kzalloc(sizeof(struct kretprobe_instance) +
- rp->data_size, GFP_KERNEL);
+ inst = kzalloc(struct_size(inst, data, rp->data_size), GFP_KERNEL);
if (inst == NULL) {
rethook_free(rp->rh);
rp->rh = NULL;
@@ -2256,8 +2255,7 @@ int register_kretprobe(struct kretprobe *rp)
rp->rph->rp = rp;
for (i = 0; i < rp->maxactive; i++) {
- inst = kzalloc(sizeof(struct kretprobe_instance) +
- rp->data_size, GFP_KERNEL);
+ inst = kzalloc(struct_size(inst, data, rp->data_size), GFP_KERNEL);
if (inst == NULL) {
refcount_set(&rp->rph->ref, i);
free_rp_inst(rp);
diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
index 562aa0e450ed..1f306f158696 100644
--- a/kernel/power/poweroff.c
+++ b/kernel/power/poweroff.c
@@ -23,7 +23,7 @@ static void do_poweroff(struct work_struct *dummy)
static DECLARE_WORK(poweroff_work, do_poweroff);
-static void handle_poweroff(int key)
+static void handle_poweroff(u8 key)
{
/* run sysrq poweroff on boot cpu */
schedule_work_on(cpumask_first(cpu_online_mask), &poweroff_work);
diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h
index 2a17704136f1..7d4979d5c3ce 100644
--- a/kernel/printk/internal.h
+++ b/kernel/printk/internal.h
@@ -103,3 +103,5 @@ struct printk_message {
u64 seq;
unsigned long dropped;
};
+
+bool other_cpu_in_panic(void);
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 357a4d18f638..7e0b4dd02398 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -88,7 +88,7 @@ EXPORT_SYMBOL(oops_in_progress);
static DEFINE_MUTEX(console_mutex);
/*
- * console_sem protects updates to console->seq and console_suspended,
+ * console_sem protects updates to console->seq
* and also provides serialization for console printing.
*/
static DEFINE_SEMAPHORE(console_sem, 1);
@@ -361,7 +361,7 @@ static bool panic_in_progress(void)
* paths in the console code where we end up in places I want
* locked without the console semaphore held).
*/
-static int console_locked, console_suspended;
+static int console_locked;
/*
* Array of consoles built from command line options (console=)
@@ -2308,7 +2308,11 @@ asmlinkage int vprintk_emit(int facility, int level,
preempt_enable();
}
- wake_up_klogd();
+ if (in_sched)
+ defer_console_output();
+ else
+ wake_up_klogd();
+
return printed_len;
}
EXPORT_SYMBOL(vprintk_emit);
@@ -2547,22 +2551,46 @@ MODULE_PARM_DESC(console_no_auto_verbose, "Disable console loglevel raise to hig
*/
void suspend_console(void)
{
+ struct console *con;
+
if (!console_suspend_enabled)
return;
pr_info("Suspending console(s) (use no_console_suspend to debug)\n");
pr_flush(1000, true);
- console_lock();
- console_suspended = 1;
- up_console_sem();
+
+ console_list_lock();
+ for_each_console(con)
+ console_srcu_write_flags(con, con->flags | CON_SUSPENDED);
+ console_list_unlock();
+
+ /*
+ * Ensure that all SRCU list walks have completed. All printing
+ * contexts must be able to see that they are suspended so that it
+ * is guaranteed that all printing has stopped when this function
+ * completes.
+ */
+ synchronize_srcu(&console_srcu);
}
void resume_console(void)
{
+ struct console *con;
+
if (!console_suspend_enabled)
return;
- down_console_sem();
- console_suspended = 0;
- console_unlock();
+
+ console_list_lock();
+ for_each_console(con)
+ console_srcu_write_flags(con, con->flags & ~CON_SUSPENDED);
+ console_list_unlock();
+
+ /*
+ * Ensure that all SRCU list walks have completed. All printing
+ * contexts must be able to see they are no longer suspended so
+ * that they are guaranteed to wake up and resume printing.
+ */
+ synchronize_srcu(&console_srcu);
+
pr_flush(1000, true);
}
@@ -2585,6 +2613,26 @@ static int console_cpu_notify(unsigned int cpu)
return 0;
}
+/*
+ * Return true if a panic is in progress on a remote CPU.
+ *
+ * On true, the local CPU should immediately release any printing resources
+ * that may be needed by the panic CPU.
+ */
+bool other_cpu_in_panic(void)
+{
+ if (!panic_in_progress())
+ return false;
+
+ /*
+ * We can use raw_smp_processor_id() here because it is impossible for
+ * the task to be migrated to the panic_cpu, or away from it. If
+ * panic_cpu has already been set, and we're not currently executing on
+ * that CPU, then we never will be.
+ */
+ return atomic_read(&panic_cpu) != raw_smp_processor_id();
+}
+
/**
* console_lock - block the console subsystem from printing
*
@@ -2597,9 +2645,11 @@ void console_lock(void)
{
might_sleep();
+ /* On panic, the console_lock must be left to the panic cpu. */
+ while (other_cpu_in_panic())
+ msleep(1000);
+
down_console_sem();
- if (console_suspended)
- return;
console_locked = 1;
console_may_schedule = 1;
}
@@ -2615,12 +2665,11 @@ EXPORT_SYMBOL(console_lock);
*/
int console_trylock(void)
{
- if (down_trylock_console_sem())
+ /* On panic, the console_lock must be left to the panic cpu. */
+ if (other_cpu_in_panic())
return 0;
- if (console_suspended) {
- up_console_sem();
+ if (down_trylock_console_sem())
return 0;
- }
console_locked = 1;
console_may_schedule = 0;
return 1;
@@ -2634,25 +2683,6 @@ int is_console_locked(void)
EXPORT_SYMBOL(is_console_locked);
/*
- * Return true when this CPU should unlock console_sem without pushing all
- * messages to the console. This reduces the chance that the console is
- * locked when the panic CPU tries to use it.
- */
-static bool abandon_console_lock_in_panic(void)
-{
- if (!panic_in_progress())
- return false;
-
- /*
- * We can use raw_smp_processor_id() here because it is impossible for
- * the task to be migrated to the panic_cpu, or away from it. If
- * panic_cpu has already been set, and we're not currently executing on
- * that CPU, then we never will be.
- */
- return atomic_read(&panic_cpu) != raw_smp_processor_id();
-}
-
-/*
* Check if the given console is currently capable and allowed to print
* records.
*
@@ -2665,6 +2695,9 @@ static inline bool console_is_usable(struct console *con)
if (!(flags & CON_ENABLED))
return false;
+ if ((flags & CON_SUSPENDED))
+ return false;
+
if (!con->write)
return false;
@@ -2948,7 +2981,7 @@ static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handove
any_progress = true;
/* Allow panic_cpu to take over the consoles safely. */
- if (abandon_console_lock_in_panic())
+ if (other_cpu_in_panic())
goto abandon;
if (do_cond_resched)
@@ -2983,11 +3016,6 @@ void console_unlock(void)
bool flushed;
u64 next_seq;
- if (console_suspended) {
- up_console_sem();
- return;
- }
-
/*
* Console drivers are called with interrupts disabled, so
* @console_may_schedule should be cleared before; however, we may
@@ -3045,10 +3073,28 @@ EXPORT_SYMBOL(console_conditional_schedule);
void console_unblank(void)
{
+ bool found_unblank = false;
struct console *c;
int cookie;
/*
+ * First check if there are any consoles implementing the unblank()
+ * callback. If not, there is no reason to continue and take the
+ * console lock, which in particular can be dangerous if
+ * @oops_in_progress is set.
+ */
+ cookie = console_srcu_read_lock();
+ for_each_console_srcu(c) {
+ if ((console_srcu_read_flags(c) & CON_ENABLED) && c->unblank) {
+ found_unblank = true;
+ break;
+ }
+ }
+ console_srcu_read_unlock(cookie);
+ if (!found_unblank)
+ return;
+
+ /*
* Stop console printing because the unblank() callback may
* assume the console is not within its write() callback.
*
@@ -3056,6 +3102,16 @@ void console_unblank(void)
* In that case, attempt a trylock as best-effort.
*/
if (oops_in_progress) {
+ /* Semaphores are not NMI-safe. */
+ if (in_nmi())
+ return;
+
+ /*
+ * Attempting to trylock the console lock can deadlock
+ * if another CPU was stopped while modifying the
+ * semaphore. "Hope and pray" that this is not the
+ * current situation.
+ */
if (down_trylock_console_sem() != 0)
return;
} else
@@ -3085,14 +3141,24 @@ void console_unblank(void)
*/
void console_flush_on_panic(enum con_flush_mode mode)
{
+ bool handover;
+ u64 next_seq;
+
/*
- * If someone else is holding the console lock, trylock will fail
- * and may_schedule may be set. Ignore and proceed to unlock so
- * that messages are flushed out. As this can be called from any
- * context and we don't want to get preempted while flushing,
- * ensure may_schedule is cleared.
+ * Ignore the console lock and flush out the messages. Attempting a
+ * trylock would not be useful because:
+ *
+ * - if it is contended, it must be ignored anyway
+ * - console_lock() and console_trylock() block and fail
+ * respectively in panic for non-panic CPUs
+ * - semaphores are not NMI-safe
+ */
+
+ /*
+ * If another context is holding the console lock,
+ * @console_may_schedule might be set. Clear it so that
+ * this context does not call cond_resched() while flushing.
*/
- console_trylock();
console_may_schedule = 0;
if (mode == CONSOLE_REPLAY_ALL) {
@@ -3105,15 +3171,15 @@ void console_flush_on_panic(enum con_flush_mode mode)
cookie = console_srcu_read_lock();
for_each_console_srcu(c) {
/*
- * If the above console_trylock() failed, this is an
- * unsynchronized assignment. But in that case, the
+ * This is an unsynchronized assignment, but the
* kernel is in "hope and pray" mode anyway.
*/
c->seq = seq;
}
console_srcu_read_unlock(cookie);
}
- console_unlock();
+
+ console_flush_all(false, &next_seq, &handover);
}
/*
@@ -3679,8 +3745,7 @@ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre
/*
* Hold the console_lock to guarantee safe access to
- * console->seq and to prevent changes to @console_suspended
- * until all consoles have been processed.
+ * console->seq.
*/
console_lock();
@@ -3688,6 +3753,11 @@ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre
for_each_console_srcu(c) {
if (con && con != c)
continue;
+ /*
+ * If consoles are not usable, it cannot be expected
+ * that they make forward progress, so only increment
+ * @diff for usable consoles.
+ */
if (!console_is_usable(c))
continue;
printk_seq = c->seq;
@@ -3696,18 +3766,12 @@ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre
}
console_srcu_read_unlock(cookie);
- /*
- * If consoles are suspended, it cannot be expected that they
- * make forward progress, so timeout immediately. @diff is
- * still used to return a valid flush status.
- */
- if (console_suspended)
- remaining = 0;
- else if (diff != last_diff && reset_on_progress)
+ if (diff != last_diff && reset_on_progress)
remaining = timeout_ms;
console_unlock();
+ /* Note: @diff is 0 if there are no usable consoles. */
if (diff == 0 || remaining == 0)
break;
@@ -3741,7 +3805,7 @@ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre
* printer has been seen to make some forward progress.
*
* Context: Process context. May sleep while acquiring console lock.
- * Return: true if all enabled printers are caught up.
+ * Return: true if all usable printers are caught up.
*/
static bool pr_flush(int timeout_ms, bool reset_on_progress)
{
@@ -3798,11 +3862,33 @@ static void __wake_up_klogd(int val)
preempt_enable();
}
+/**
+ * wake_up_klogd - Wake kernel logging daemon
+ *
+ * Use this function when new records have been added to the ringbuffer
+ * and the console printing of those records has already occurred or is
+ * known to be handled by some other context. This function will only
+ * wake the logging daemon.
+ *
+ * Context: Any context.
+ */
void wake_up_klogd(void)
{
__wake_up_klogd(PRINTK_PENDING_WAKEUP);
}
+/**
+ * defer_console_output - Wake kernel logging daemon and trigger
+ * console printing in a deferred context
+ *
+ * Use this function when new records have been added to the ringbuffer,
+ * this context is responsible for console printing those records, but
+ * the current context is not allowed to perform the console printing.
+ * Trigger an irq_work context to perform the console printing. This
+ * function also wakes the logging daemon.
+ *
+ * Context: Any context.
+ */
void defer_console_output(void)
{
/*
@@ -3819,12 +3905,7 @@ void printk_trigger_flush(void)
int vprintk_deferred(const char *fmt, va_list args)
{
- int r;
-
- r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, fmt, args);
- defer_console_output();
-
- return r;
+ return vprintk_emit(0, LOGLEVEL_SCHED, NULL, fmt, args);
}
int _printk_deferred(const char *fmt, ...)
diff --git a/kernel/printk/printk_ringbuffer.c b/kernel/printk/printk_ringbuffer.c
index 2dc4d5a1f1ff..fde338606ce8 100644
--- a/kernel/printk/printk_ringbuffer.c
+++ b/kernel/printk/printk_ringbuffer.c
@@ -1735,7 +1735,7 @@ static bool copy_data(struct prb_data_ring *data_ring,
if (!buf || !buf_size)
return true;
- data_size = min_t(u16, buf_size, len);
+ data_size = min_t(unsigned int, buf_size, len);
memcpy(&buf[0], data, data_size); /* LMM(copy_data:A) */
return true;
diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c
index ef0f9a2044da..6d10927a07d8 100644
--- a/kernel/printk/printk_safe.c
+++ b/kernel/printk/printk_safe.c
@@ -38,13 +38,8 @@ asmlinkage int vprintk(const char *fmt, va_list args)
* Use the main logbuf even in NMI. But avoid calling console
* drivers that might have their own locks.
*/
- if (this_cpu_read(printk_context) || in_nmi()) {
- int len;
-
- len = vprintk_store(0, LOGLEVEL_DEFAULT, NULL, fmt, args);
- defer_console_output();
- return len;
- }
+ if (this_cpu_read(printk_context) || in_nmi())
+ return vprintk_deferred(fmt, args);
/* No obstacles. */
return vprintk_default(fmt, args);
diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h
index b10b8349bb2a..6f06dc12904a 100644
--- a/kernel/rcu/tree_stall.h
+++ b/kernel/rcu/tree_stall.h
@@ -1035,7 +1035,7 @@ static bool sysrq_rcu;
module_param(sysrq_rcu, bool, 0444);
/* Dump grace-period-request information due to commandeered sysrq. */
-static void sysrq_show_rcu(int key)
+static void sysrq_show_rcu(u8 key)
{
show_rcu_gp_kthreads();
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 911d0063763c..8dbff6e7ad4f 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -699,7 +699,7 @@ u64 avg_vruntime(struct cfs_rq *cfs_rq)
*
* XXX could add max_slice to the augmented data to track this.
*/
-void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
+static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
s64 lag, limit;
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 4df14db4da49..87015e9deacc 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -1045,7 +1045,7 @@ static bool report_idle_softirq(void)
return false;
/* On RT, softirqs handling may be waiting on some lock */
- if (!local_bh_blocked())
+ if (local_bh_blocked())
return false;
pr_warn("NOHZ tick-stop error: local softirq work is pending, handler #%02x!!!\n",
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 64b61f67a403..057cd975d014 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -99,6 +99,7 @@ obj-$(CONFIG_KGDB_KDB) += trace_kdb.o
endif
obj-$(CONFIG_DYNAMIC_EVENTS) += trace_dynevent.o
obj-$(CONFIG_PROBE_EVENTS) += trace_probe.o
+obj-$(CONFIG_PROBE_EVENTS_BTF_ARGS) += trace_btf.o
obj-$(CONFIG_UPROBE_EVENTS) += trace_uprobe.o
obj-$(CONFIG_BOOTTIME_TRACING) += trace_boot.o
obj-$(CONFIG_FTRACE_RECORD_RECURSION) += trace_recursion_record.o
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 05c0024815bf..8de8bec5f366 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -6779,8 +6779,7 @@ void ftrace_release_mod(struct module *mod)
last_pg = &ftrace_pages_start;
for (pg = ftrace_pages_start; pg; pg = *last_pg) {
rec = &pg->records[0];
- if (within_module_core(rec->ip, mod) ||
- within_module_init(rec->ip, mod)) {
+ if (within_module(rec->ip, mod)) {
/*
* As core pages are first, the first
* page should never be a module page.
@@ -6852,8 +6851,7 @@ void ftrace_module_enable(struct module *mod)
* not part of this module, then skip this pg,
* which the "break" will do.
*/
- if (!within_module_core(rec->ip, mod) &&
- !within_module_init(rec->ip, mod))
+ if (!within_module(rec->ip, mod))
break;
/* Weak functions should still be ignored */
@@ -7142,9 +7140,7 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
struct dyn_ftrace key;
struct ftrace_mod_map *mod_map = NULL;
struct ftrace_init_func *func, *func_next;
- struct list_head clear_hash;
-
- INIT_LIST_HEAD(&clear_hash);
+ LIST_HEAD(clear_hash);
key.ip = start;
key.flags = end; /* overload flags, as it is unsigned long */
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 52dea5dd5362..78502d4c7214 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -692,10 +692,7 @@ static void rb_time_set(rb_time_t *t, u64 val)
static inline bool
rb_time_read_cmpxchg(local_t *l, unsigned long expect, unsigned long set)
{
- unsigned long ret;
-
- ret = local_cmpxchg(l, expect, set);
- return ret == expect;
+ return local_try_cmpxchg(l, &expect, set);
}
static bool rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set)
@@ -752,9 +749,7 @@ static void rb_time_set(rb_time_t *t, u64 val)
static bool rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set)
{
- u64 val;
- val = local64_cmpxchg(&t->time, expect, set);
- return val == expect;
+ return local64_try_cmpxchg(&t->time, &expect, set);
}
#endif
@@ -1494,14 +1489,11 @@ static bool rb_head_page_replace(struct buffer_page *old,
{
unsigned long *ptr = (unsigned long *)&old->list.prev->next;
unsigned long val;
- unsigned long ret;
val = *ptr & ~RB_FLAG_MASK;
val |= RB_PAGE_HEAD;
- ret = cmpxchg(ptr, val, (unsigned long)&new->list);
-
- return ret == val;
+ return try_cmpxchg(ptr, &val, (unsigned long)&new->list);
}
/*
@@ -3003,7 +2995,6 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
{
unsigned long new_index, old_index;
struct buffer_page *bpage;
- unsigned long index;
unsigned long addr;
u64 write_stamp;
u64 delta;
@@ -3060,8 +3051,9 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
*/
old_index += write_mask;
new_index += write_mask;
- index = local_cmpxchg(&bpage->write, old_index, new_index);
- if (index == old_index) {
+
+ /* caution: old_index gets updated on cmpxchg failure */
+ if (local_try_cmpxchg(&bpage->write, &old_index, new_index)) {
/* update counters */
local_sub(event_length, &cpu_buffer->entries_bytes);
return true;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 8e64aaad5361..2b4ded753367 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3119,7 +3119,6 @@ static void __ftrace_trace_stack(struct trace_buffer *buffer,
struct ftrace_stack *fstack;
struct stack_entry *entry;
int stackidx;
- void *ptr;
/*
* Add one, for this function and the call to save_stack_trace()
@@ -3157,32 +3156,16 @@ static void __ftrace_trace_stack(struct trace_buffer *buffer,
nr_entries = stack_trace_save(fstack->calls, size, skip);
}
- size = nr_entries * sizeof(unsigned long);
event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
- (sizeof(*entry) - sizeof(entry->caller)) + size,
+ struct_size(entry, caller, nr_entries),
trace_ctx);
if (!event)
goto out;
- ptr = ring_buffer_event_data(event);
- entry = ptr;
-
- /*
- * For backward compatibility reasons, the entry->caller is an
- * array of 8 slots to store the stack. This is also exported
- * to user space. The amount allocated on the ring buffer actually
- * holds enough for the stack specified by nr_entries. This will
- * go into the location of entry->caller. Due to string fortifiers
- * checking the size of the destination of memcpy() it triggers
- * when it detects that size is greater than 8. To hide this from
- * the fortifiers, we use "ptr" and pointer arithmetic to assign caller.
- *
- * The below is really just:
- * memcpy(&entry->caller, fstack->calls, size);
- */
- ptr += offsetof(typeof(*entry), caller);
- memcpy(ptr, fstack->calls, size);
+ entry = ring_buffer_event_data(event);
entry->size = nr_entries;
+ memcpy(&entry->caller, fstack->calls,
+ flex_array_size(entry, caller, nr_entries));
if (!call_filter_check_discard(call, entry, buffer, event))
__buffer_unlock_commit(buffer, event);
@@ -4206,18 +4189,12 @@ static void *s_start(struct seq_file *m, loff_t *pos)
loff_t l = 0;
int cpu;
- /*
- * copy the tracer to avoid using a global lock all around.
- * iter->trace is a copy of current_trace, the pointer to the
- * name may be used instead of a strcmp(), as iter->trace->name
- * will point to the same string as current_trace->name.
- */
mutex_lock(&trace_types_lock);
- if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) {
+ if (unlikely(tr->current_trace != iter->trace)) {
/* Close iter->trace before switching to the new current tracer */
if (iter->trace->close)
iter->trace->close(iter);
- *iter->trace = *tr->current_trace;
+ iter->trace = tr->current_trace;
/* Reopen the new current tracer */
if (iter->trace->open)
iter->trace->open(iter);
@@ -4829,6 +4806,25 @@ static const struct seq_operations tracer_seq_ops = {
.show = s_show,
};
+/*
+ * Note, as iter itself can be allocated and freed in different
+ * ways, this function is only used to free its content, and not
+ * the iterator itself. The only requirement to all the allocations
+ * is that it must zero all fields (kzalloc), as freeing works with
+ * ethier allocated content or NULL.
+ */
+static void free_trace_iter_content(struct trace_iterator *iter)
+{
+ /* The fmt is either NULL, allocated or points to static_fmt_buf */
+ if (iter->fmt != static_fmt_buf)
+ kfree(iter->fmt);
+
+ kfree(iter->temp);
+ kfree(iter->buffer_iter);
+ mutex_destroy(&iter->mutex);
+ free_cpumask_var(iter->started);
+}
+
static struct trace_iterator *
__tracing_open(struct inode *inode, struct file *file, bool snapshot)
{
@@ -4870,16 +4866,8 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
iter->fmt = NULL;
iter->fmt_size = 0;
- /*
- * We make a copy of the current tracer to avoid concurrent
- * changes on it while we are reading.
- */
mutex_lock(&trace_types_lock);
- iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
- if (!iter->trace)
- goto fail;
-
- *iter->trace = *tr->current_trace;
+ iter->trace = tr->current_trace;
if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
goto fail;
@@ -4944,9 +4932,7 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
fail:
mutex_unlock(&trace_types_lock);
- kfree(iter->trace);
- kfree(iter->temp);
- kfree(iter->buffer_iter);
+ free_trace_iter_content(iter);
release:
seq_release_private(inode, file);
return ERR_PTR(-ENOMEM);
@@ -5025,12 +5011,7 @@ static int tracing_release(struct inode *inode, struct file *file)
mutex_unlock(&trace_types_lock);
- mutex_destroy(&iter->mutex);
- free_cpumask_var(iter->started);
- kfree(iter->fmt);
- kfree(iter->temp);
- kfree(iter->trace);
- kfree(iter->buffer_iter);
+ free_trace_iter_content(iter);
seq_release_private(inode, file);
return 0;
@@ -5730,7 +5711,8 @@ static const char readme_msg[] =
"\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n"
#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
#ifdef CONFIG_PROBE_EVENTS_BTF_ARGS
- "\t $stack<index>, $stack, $retval, $comm, $arg<N>, <argname>\n"
+ "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
+ "\t <argname>[->field[->field|.field...]],\n"
#else
"\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n"
#endif
@@ -6318,6 +6300,15 @@ static void set_buffer_entries(struct array_buffer *buf, unsigned long val)
per_cpu_ptr(buf->data, cpu)->entries = val;
}
+static void update_buffer_entries(struct array_buffer *buf, int cpu)
+{
+ if (cpu == RING_BUFFER_ALL_CPUS) {
+ set_buffer_entries(buf, ring_buffer_size(buf->buffer, 0));
+ } else {
+ per_cpu_ptr(buf->data, cpu)->entries = ring_buffer_size(buf->buffer, cpu);
+ }
+}
+
#ifdef CONFIG_TRACER_MAX_TRACE
/* resize @tr's buffer to the size of @size_tr's entries */
static int resize_buffer_duplicate_size(struct array_buffer *trace_buf,
@@ -6396,18 +6387,12 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
return ret;
}
- if (cpu == RING_BUFFER_ALL_CPUS)
- set_buffer_entries(&tr->max_buffer, size);
- else
- per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
+ update_buffer_entries(&tr->max_buffer, cpu);
out:
#endif /* CONFIG_TRACER_MAX_TRACE */
- if (cpu == RING_BUFFER_ALL_CPUS)
- set_buffer_entries(&tr->array_buffer, size);
- else
- per_cpu_ptr(tr->array_buffer.data, cpu)->entries = size;
+ update_buffer_entries(&tr->array_buffer, cpu);
return ret;
}
@@ -6825,10 +6810,7 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
close_pipe_on_cpu(tr, iter->cpu_file);
mutex_unlock(&trace_types_lock);
- free_cpumask_var(iter->started);
- kfree(iter->fmt);
- kfree(iter->temp);
- mutex_destroy(&iter->mutex);
+ free_trace_iter_content(iter);
kfree(iter);
trace_array_put(tr);
@@ -7618,6 +7600,11 @@ out:
return ret;
}
+static void tracing_swap_cpu_buffer(void *tr)
+{
+ update_max_tr_single((struct trace_array *)tr, current, smp_processor_id());
+}
+
static ssize_t
tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
loff_t *ppos)
@@ -7676,13 +7663,15 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
ret = tracing_alloc_snapshot_instance(tr);
if (ret < 0)
break;
- local_irq_disable();
/* Now, we're going to swap */
- if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
+ if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
+ local_irq_disable();
update_max_tr(tr, current, smp_processor_id(), NULL);
- else
- update_max_tr_single(tr, current, iter->cpu_file);
- local_irq_enable();
+ local_irq_enable();
+ } else {
+ smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer,
+ (void *)tr, 1);
+ }
break;
default:
if (tr->allocated_snapshot) {
@@ -9486,7 +9475,7 @@ static struct trace_array *trace_array_create(const char *name)
if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
goto out_free_tr;
- if (!alloc_cpumask_var(&tr->pipe_cpumask, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&tr->pipe_cpumask, GFP_KERNEL))
goto out_free_tr;
tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
@@ -10431,7 +10420,7 @@ __init static int tracer_alloc_buffers(void)
if (trace_create_savedcmd() < 0)
goto out_free_temp_buffer;
- if (!alloc_cpumask_var(&global_trace.pipe_cpumask, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&global_trace.pipe_cpumask, GFP_KERNEL))
goto out_free_savedcmd;
/* TODO: make the number of buffers hot pluggable with CPUS */
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 73eaec158473..5669dd1f90d9 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -77,6 +77,16 @@ enum trace_type {
#undef __array
#define __array(type, item, size) type item[size];
+/*
+ * For backward compatibility, older user space expects to see the
+ * kernel_stack event with a fixed size caller field. But today the fix
+ * size is ignored by the kernel, and the real structure is dynamic.
+ * Expose to user space: "unsigned long caller[8];" but the real structure
+ * will be "unsigned long caller[] __counted_by(size)"
+ */
+#undef __stack_array
+#define __stack_array(type, item, size, field) type item[] __counted_by(field);
+
#undef __array_desc
#define __array_desc(type, container, item, size)
@@ -596,7 +606,6 @@ trace_buffer_iter(struct trace_iterator *iter, int cpu)
int tracer_init(struct tracer *t, struct trace_array *tr);
int tracing_is_enabled(void);
void tracing_reset_online_cpus(struct array_buffer *buf);
-void tracing_reset_current(int cpu);
void tracing_reset_all_online_cpus(void);
void tracing_reset_all_online_cpus_unlocked(void);
int tracing_open_generic(struct inode *inode, struct file *filp);
@@ -697,7 +706,6 @@ void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos);
void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos);
int trace_pid_show(struct seq_file *m, void *v);
-void trace_free_pid_list(struct trace_pid_list *pid_list);
int trace_pid_write(struct trace_pid_list *filtered_pids,
struct trace_pid_list **new_pid_list,
const char __user *ubuf, size_t cnt);
@@ -1334,7 +1342,7 @@ struct trace_subsystem_dir {
struct list_head list;
struct event_subsystem *subsystem;
struct trace_array *tr;
- struct dentry *entry;
+ struct eventfs_file *ef;
int ref_count;
int nr_events;
};
diff --git a/kernel/trace/trace_btf.c b/kernel/trace/trace_btf.c
new file mode 100644
index 000000000000..ca224d53bfdc
--- /dev/null
+++ b/kernel/trace/trace_btf.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/btf.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "trace_btf.h"
+
+/*
+ * Find a function proto type by name, and return the btf_type with its btf
+ * in *@btf_p. Return NULL if not found.
+ * Note that caller has to call btf_put(*@btf_p) after using the btf_type.
+ */
+const struct btf_type *btf_find_func_proto(const char *func_name, struct btf **btf_p)
+{
+ const struct btf_type *t;
+ s32 id;
+
+ id = bpf_find_btf_id(func_name, BTF_KIND_FUNC, btf_p);
+ if (id < 0)
+ return NULL;
+
+ /* Get BTF_KIND_FUNC type */
+ t = btf_type_by_id(*btf_p, id);
+ if (!t || !btf_type_is_func(t))
+ goto err;
+
+ /* The type of BTF_KIND_FUNC is BTF_KIND_FUNC_PROTO */
+ t = btf_type_by_id(*btf_p, t->type);
+ if (!t || !btf_type_is_func_proto(t))
+ goto err;
+
+ return t;
+err:
+ btf_put(*btf_p);
+ return NULL;
+}
+
+/*
+ * Get function parameter with the number of parameters.
+ * This can return NULL if the function has no parameters.
+ * It can return -EINVAL if the @func_proto is not a function proto type.
+ */
+const struct btf_param *btf_get_func_param(const struct btf_type *func_proto, s32 *nr)
+{
+ if (!btf_type_is_func_proto(func_proto))
+ return ERR_PTR(-EINVAL);
+
+ *nr = btf_type_vlen(func_proto);
+ if (*nr > 0)
+ return (const struct btf_param *)(func_proto + 1);
+ else
+ return NULL;
+}
+
+#define BTF_ANON_STACK_MAX 16
+
+struct btf_anon_stack {
+ u32 tid;
+ u32 offset;
+};
+
+/*
+ * Find a member of data structure/union by name and return it.
+ * Return NULL if not found, or -EINVAL if parameter is invalid.
+ * If the member is an member of anonymous union/structure, the offset
+ * of that anonymous union/structure is stored into @anon_offset. Caller
+ * can calculate the correct offset from the root data structure by
+ * adding anon_offset to the member's offset.
+ */
+const struct btf_member *btf_find_struct_member(struct btf *btf,
+ const struct btf_type *type,
+ const char *member_name,
+ u32 *anon_offset)
+{
+ struct btf_anon_stack *anon_stack;
+ const struct btf_member *member;
+ u32 tid, cur_offset = 0;
+ const char *name;
+ int i, top = 0;
+
+ anon_stack = kcalloc(BTF_ANON_STACK_MAX, sizeof(*anon_stack), GFP_KERNEL);
+ if (!anon_stack)
+ return ERR_PTR(-ENOMEM);
+
+retry:
+ if (!btf_type_is_struct(type)) {
+ member = ERR_PTR(-EINVAL);
+ goto out;
+ }
+
+ for_each_member(i, type, member) {
+ if (!member->name_off) {
+ /* Anonymous union/struct: push it for later use */
+ type = btf_type_skip_modifiers(btf, member->type, &tid);
+ if (type && top < BTF_ANON_STACK_MAX) {
+ anon_stack[top].tid = tid;
+ anon_stack[top++].offset =
+ cur_offset + member->offset;
+ }
+ } else {
+ name = btf_name_by_offset(btf, member->name_off);
+ if (name && !strcmp(member_name, name)) {
+ if (anon_offset)
+ *anon_offset = cur_offset;
+ goto out;
+ }
+ }
+ }
+ if (top > 0) {
+ /* Pop from the anonymous stack and retry */
+ tid = anon_stack[--top].tid;
+ cur_offset = anon_stack[top].offset;
+ type = btf_type_by_id(btf, tid);
+ goto retry;
+ }
+ member = NULL;
+
+out:
+ kfree(anon_stack);
+ return member;
+}
+
diff --git a/kernel/trace/trace_btf.h b/kernel/trace/trace_btf.h
new file mode 100644
index 000000000000..4bc44bc261e6
--- /dev/null
+++ b/kernel/trace/trace_btf.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/btf.h>
+
+const struct btf_type *btf_find_func_proto(const char *func_name,
+ struct btf **btf_p);
+const struct btf_param *btf_get_func_param(const struct btf_type *func_proto,
+ s32 *nr);
+const struct btf_member *btf_find_struct_member(struct btf *btf,
+ const struct btf_type *type,
+ const char *member_name,
+ u32 *anon_offset);
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index 340b2fa98218..c47422b20908 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -190,7 +190,7 @@ FTRACE_ENTRY(kernel_stack, stack_entry,
F_STRUCT(
__field( int, size )
- __array( unsigned long, caller, FTRACE_STACK_ENTRIES )
+ __stack_array( unsigned long, caller, FTRACE_STACK_ENTRIES, size)
),
F_printk("\t=> %ps\n\t=> %ps\n\t=> %ps\n"
diff --git a/kernel/trace/trace_eprobe.c b/kernel/trace/trace_eprobe.c
index a0a704ba27db..72714cbf475c 100644
--- a/kernel/trace/trace_eprobe.c
+++ b/kernel/trace/trace_eprobe.c
@@ -41,6 +41,10 @@ struct eprobe_data {
struct trace_eprobe *ep;
};
+
+#define for_each_trace_eprobe_tp(ep, _tp) \
+ list_for_each_entry(ep, trace_probe_probe_list(_tp), tp.list)
+
static int __trace_eprobe_create(int argc, const char *argv[]);
static void trace_event_probe_cleanup(struct trace_eprobe *ep)
@@ -640,7 +644,7 @@ static int disable_eprobe(struct trace_eprobe *ep,
static int enable_trace_eprobe(struct trace_event_call *call,
struct trace_event_file *file)
{
- struct trace_probe *pos, *tp;
+ struct trace_probe *tp;
struct trace_eprobe *ep;
bool enabled;
int ret = 0;
@@ -662,8 +666,7 @@ static int enable_trace_eprobe(struct trace_event_call *call,
if (enabled)
return 0;
- list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
- ep = container_of(pos, struct trace_eprobe, tp);
+ for_each_trace_eprobe_tp(ep, tp) {
ret = enable_eprobe(ep, file);
if (ret)
break;
@@ -680,8 +683,7 @@ static int enable_trace_eprobe(struct trace_event_call *call,
*/
WARN_ON_ONCE(ret != -ENOMEM);
- list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
- ep = container_of(pos, struct trace_eprobe, tp);
+ for_each_trace_eprobe_tp(ep, tp) {
disable_eprobe(ep, file->tr);
if (!--cnt)
break;
@@ -699,7 +701,7 @@ static int enable_trace_eprobe(struct trace_event_call *call,
static int disable_trace_eprobe(struct trace_event_call *call,
struct trace_event_file *file)
{
- struct trace_probe *pos, *tp;
+ struct trace_probe *tp;
struct trace_eprobe *ep;
tp = trace_probe_primary_from_call(call);
@@ -716,10 +718,8 @@ static int disable_trace_eprobe(struct trace_event_call *call,
trace_probe_clear_flag(tp, TP_FLAG_PROFILE);
if (!trace_probe_is_enabled(tp)) {
- list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
- ep = container_of(pos, struct trace_eprobe, tp);
+ for_each_trace_eprobe_tp(ep, tp)
disable_eprobe(ep, file->tr);
- }
}
out:
@@ -807,13 +807,11 @@ static int trace_eprobe_tp_update_arg(struct trace_eprobe *ep, const char *argv[
int ret;
ret = traceprobe_parse_probe_arg(&ep->tp, i, argv[i], &ctx);
- if (ret)
- return ret;
-
/* Handle symbols "@" */
if (!ret)
ret = traceprobe_update_arg(&ep->tp.args[i]);
+ traceprobe_finish_parse(&ctx);
return ret;
}
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 578f1f7d49a6..ed367d713be0 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -984,7 +984,7 @@ static void remove_subsystem(struct trace_subsystem_dir *dir)
return;
if (!--dir->nr_events) {
- tracefs_remove(dir->entry);
+ eventfs_remove(dir->ef);
list_del(&dir->list);
__put_system_dir(dir);
}
@@ -1005,7 +1005,7 @@ static void remove_event_file_dir(struct trace_event_file *file)
tracefs_remove(dir);
}
-
+ eventfs_remove(file->ef);
list_del(&file->list);
remove_subsystem(file->system);
free_event_filter(file->filter);
@@ -2291,13 +2291,13 @@ create_new_subsystem(const char *name)
return NULL;
}
-static struct dentry *
+static struct eventfs_file *
event_subsystem_dir(struct trace_array *tr, const char *name,
struct trace_event_file *file, struct dentry *parent)
{
struct event_subsystem *system, *iter;
struct trace_subsystem_dir *dir;
- struct dentry *entry;
+ int res;
/* First see if we did not already create this dir */
list_for_each_entry(dir, &tr->systems, list) {
@@ -2305,7 +2305,7 @@ event_subsystem_dir(struct trace_array *tr, const char *name,
if (strcmp(system->name, name) == 0) {
dir->nr_events++;
file->system = dir;
- return dir->entry;
+ return dir->ef;
}
}
@@ -2329,8 +2329,8 @@ event_subsystem_dir(struct trace_array *tr, const char *name,
} else
__get_system(system);
- dir->entry = tracefs_create_dir(name, parent);
- if (!dir->entry) {
+ dir->ef = eventfs_add_subsystem_dir(name, parent);
+ if (IS_ERR(dir->ef)) {
pr_warn("Failed to create system directory %s\n", name);
__put_system(system);
goto out_free;
@@ -2345,22 +2345,22 @@ event_subsystem_dir(struct trace_array *tr, const char *name,
/* the ftrace system is special, do not create enable or filter files */
if (strcmp(name, "ftrace") != 0) {
- entry = tracefs_create_file("filter", TRACE_MODE_WRITE,
- dir->entry, dir,
+ res = eventfs_add_file("filter", TRACE_MODE_WRITE,
+ dir->ef, dir,
&ftrace_subsystem_filter_fops);
- if (!entry) {
+ if (res) {
kfree(system->filter);
system->filter = NULL;
pr_warn("Could not create tracefs '%s/filter' entry\n", name);
}
- trace_create_file("enable", TRACE_MODE_WRITE, dir->entry, dir,
+ eventfs_add_file("enable", TRACE_MODE_WRITE, dir->ef, dir,
&ftrace_system_enable_fops);
}
list_add(&dir->list, &tr->systems);
- return dir->entry;
+ return dir->ef;
out_free:
kfree(dir);
@@ -2413,36 +2413,37 @@ static int
event_create_dir(struct dentry *parent, struct trace_event_file *file)
{
struct trace_event_call *call = file->event_call;
+ struct eventfs_file *ef_subsystem = NULL;
struct trace_array *tr = file->tr;
- struct dentry *d_events;
const char *name;
int ret;
/*
* If the trace point header did not define TRACE_SYSTEM
- * then the system would be called "TRACE_SYSTEM".
+ * then the system would be called "TRACE_SYSTEM". This should
+ * never happen.
*/
- if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
- d_events = event_subsystem_dir(tr, call->class->system, file, parent);
- if (!d_events)
- return -ENOMEM;
- } else
- d_events = parent;
+ if (WARN_ON_ONCE(strcmp(call->class->system, TRACE_SYSTEM) == 0))
+ return -ENODEV;
+
+ ef_subsystem = event_subsystem_dir(tr, call->class->system, file, parent);
+ if (!ef_subsystem)
+ return -ENOMEM;
name = trace_event_name(call);
- file->dir = tracefs_create_dir(name, d_events);
- if (!file->dir) {
+ file->ef = eventfs_add_dir(name, ef_subsystem);
+ if (IS_ERR(file->ef)) {
pr_warn("Could not create tracefs '%s' directory\n", name);
return -1;
}
if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
- trace_create_file("enable", TRACE_MODE_WRITE, file->dir, file,
+ eventfs_add_file("enable", TRACE_MODE_WRITE, file->ef, file,
&ftrace_enable_fops);
#ifdef CONFIG_PERF_EVENTS
if (call->event.type && call->class->reg)
- trace_create_file("id", TRACE_MODE_READ, file->dir,
+ eventfs_add_file("id", TRACE_MODE_READ, file->ef,
(void *)(long)call->event.type,
&ftrace_event_id_fops);
#endif
@@ -2458,27 +2459,27 @@ event_create_dir(struct dentry *parent, struct trace_event_file *file)
* triggers or filters.
*/
if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) {
- trace_create_file("filter", TRACE_MODE_WRITE, file->dir,
+ eventfs_add_file("filter", TRACE_MODE_WRITE, file->ef,
file, &ftrace_event_filter_fops);
- trace_create_file("trigger", TRACE_MODE_WRITE, file->dir,
+ eventfs_add_file("trigger", TRACE_MODE_WRITE, file->ef,
file, &event_trigger_fops);
}
#ifdef CONFIG_HIST_TRIGGERS
- trace_create_file("hist", TRACE_MODE_READ, file->dir, file,
+ eventfs_add_file("hist", TRACE_MODE_READ, file->ef, file,
&event_hist_fops);
#endif
#ifdef CONFIG_HIST_TRIGGERS_DEBUG
- trace_create_file("hist_debug", TRACE_MODE_READ, file->dir, file,
+ eventfs_add_file("hist_debug", TRACE_MODE_READ, file->ef, file,
&event_hist_debug_fops);
#endif
- trace_create_file("format", TRACE_MODE_READ, file->dir, call,
+ eventfs_add_file("format", TRACE_MODE_READ, file->ef, call,
&ftrace_event_format_fops);
#ifdef CONFIG_TRACE_EVENT_INJECT
if (call->event.type && call->class->reg)
- trace_create_file("inject", 0200, file->dir, file,
+ eventfs_add_file("inject", 0200, file->ef, file,
&event_inject_fops);
#endif
@@ -3631,21 +3632,22 @@ create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
{
struct dentry *d_events;
struct dentry *entry;
+ int error = 0;
entry = trace_create_file("set_event", TRACE_MODE_WRITE, parent,
tr, &ftrace_set_event_fops);
if (!entry)
return -ENOMEM;
- d_events = tracefs_create_dir("events", parent);
- if (!d_events) {
+ d_events = eventfs_create_events_dir("events", parent);
+ if (IS_ERR(d_events)) {
pr_warn("Could not create tracefs 'events' directory\n");
return -ENOMEM;
}
- entry = trace_create_file("enable", TRACE_MODE_WRITE, d_events,
+ error = eventfs_add_events_file("enable", TRACE_MODE_WRITE, d_events,
tr, &ftrace_tr_enable_fops);
- if (!entry)
+ if (error)
return -ENOMEM;
/* There are not as crucial, just warn if they are not created */
@@ -3658,11 +3660,11 @@ create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
&ftrace_set_event_notrace_pid_fops);
/* ring buffer internal formats */
- trace_create_file("header_page", TRACE_MODE_READ, d_events,
+ eventfs_add_events_file("header_page", TRACE_MODE_READ, d_events,
ring_buffer_print_page_header,
&ftrace_show_header_fops);
- trace_create_file("header_event", TRACE_MODE_READ, d_events,
+ eventfs_add_events_file("header_event", TRACE_MODE_READ, d_events,
ring_buffer_print_entry_header,
&ftrace_show_header_fops);
@@ -3750,7 +3752,7 @@ int event_trace_del_tracer(struct trace_array *tr)
down_write(&trace_event_sem);
__trace_remove_event_dirs(tr);
- tracefs_remove(tr->event_dir);
+ eventfs_remove_events_dir(tr->event_dir);
up_write(&trace_event_sem);
tr->event_dir = NULL;
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 1dad64267878..33264e510d16 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -46,15 +46,19 @@ static const char * ops[] = { OPS };
enum filter_pred_fn {
FILTER_PRED_FN_NOP,
FILTER_PRED_FN_64,
+ FILTER_PRED_FN_64_CPUMASK,
FILTER_PRED_FN_S64,
FILTER_PRED_FN_U64,
FILTER_PRED_FN_32,
+ FILTER_PRED_FN_32_CPUMASK,
FILTER_PRED_FN_S32,
FILTER_PRED_FN_U32,
FILTER_PRED_FN_16,
+ FILTER_PRED_FN_16_CPUMASK,
FILTER_PRED_FN_S16,
FILTER_PRED_FN_U16,
FILTER_PRED_FN_8,
+ FILTER_PRED_FN_8_CPUMASK,
FILTER_PRED_FN_S8,
FILTER_PRED_FN_U8,
FILTER_PRED_FN_COMM,
@@ -64,21 +68,25 @@ enum filter_pred_fn {
FILTER_PRED_FN_PCHAR_USER,
FILTER_PRED_FN_PCHAR,
FILTER_PRED_FN_CPU,
+ FILTER_PRED_FN_CPU_CPUMASK,
+ FILTER_PRED_FN_CPUMASK,
+ FILTER_PRED_FN_CPUMASK_CPU,
FILTER_PRED_FN_FUNCTION,
FILTER_PRED_FN_,
FILTER_PRED_TEST_VISITED,
};
struct filter_pred {
- enum filter_pred_fn fn_num;
- u64 val;
- u64 val2;
- struct regex regex;
+ struct regex *regex;
+ struct cpumask *mask;
unsigned short *ops;
struct ftrace_event_field *field;
- int offset;
+ u64 val;
+ u64 val2;
+ enum filter_pred_fn fn_num;
+ int offset;
int not;
- int op;
+ int op;
};
/*
@@ -94,6 +102,8 @@ struct filter_pred {
C(TOO_MANY_OPEN, "Too many '('"), \
C(TOO_MANY_CLOSE, "Too few '('"), \
C(MISSING_QUOTE, "Missing matching quote"), \
+ C(MISSING_BRACE_OPEN, "Missing '{'"), \
+ C(MISSING_BRACE_CLOSE, "Missing '}'"), \
C(OPERAND_TOO_LONG, "Operand too long"), \
C(EXPECT_STRING, "Expecting string field"), \
C(EXPECT_DIGIT, "Expecting numeric field"), \
@@ -103,6 +113,7 @@ struct filter_pred {
C(BAD_SUBSYS_FILTER, "Couldn't find or set field in one of a subsystem's events"), \
C(TOO_MANY_PREDS, "Too many terms in predicate expression"), \
C(INVALID_FILTER, "Meaningless filter expression"), \
+ C(INVALID_CPULIST, "Invalid cpulist"), \
C(IP_FIELD_ONLY, "Only 'ip' field is supported for function trace"), \
C(INVALID_VALUE, "Invalid value (did you forget quotes)?"), \
C(NO_FUNCTION, "Function not found"), \
@@ -186,6 +197,15 @@ enum {
PROCESS_OR = 4,
};
+static void free_predicate(struct filter_pred *pred)
+{
+ if (pred) {
+ kfree(pred->regex);
+ kfree(pred->mask);
+ kfree(pred);
+ }
+}
+
/*
* Without going into a formal proof, this explains the method that is used in
* parsing the logical expressions.
@@ -623,12 +643,64 @@ out_free:
kfree(inverts);
if (prog_stack) {
for (i = 0; prog_stack[i].pred; i++)
- kfree(prog_stack[i].pred);
+ free_predicate(prog_stack[i].pred);
kfree(prog_stack);
}
return ERR_PTR(ret);
}
+static inline int
+do_filter_cpumask(int op, const struct cpumask *mask, const struct cpumask *cmp)
+{
+ switch (op) {
+ case OP_EQ:
+ return cpumask_equal(mask, cmp);
+ case OP_NE:
+ return !cpumask_equal(mask, cmp);
+ case OP_BAND:
+ return cpumask_intersects(mask, cmp);
+ default:
+ return 0;
+ }
+}
+
+/* Optimisation of do_filter_cpumask() for scalar fields */
+static inline int
+do_filter_scalar_cpumask(int op, unsigned int cpu, const struct cpumask *mask)
+{
+ /*
+ * Per the weight-of-one cpumask optimisations, the mask passed in this
+ * function has a weight >= 2, so it is never equal to a single scalar.
+ */
+ switch (op) {
+ case OP_EQ:
+ return false;
+ case OP_NE:
+ return true;
+ case OP_BAND:
+ return cpumask_test_cpu(cpu, mask);
+ default:
+ return 0;
+ }
+}
+
+static inline int
+do_filter_cpumask_scalar(int op, const struct cpumask *mask, unsigned int cpu)
+{
+ switch (op) {
+ case OP_EQ:
+ return cpumask_test_cpu(cpu, mask) &&
+ cpumask_nth(1, mask) >= nr_cpu_ids;
+ case OP_NE:
+ return !cpumask_test_cpu(cpu, mask) ||
+ cpumask_nth(1, mask) < nr_cpu_ids;
+ case OP_BAND:
+ return cpumask_test_cpu(cpu, mask);
+ default:
+ return 0;
+ }
+}
+
enum pred_cmp_types {
PRED_CMP_TYPE_NOP,
PRED_CMP_TYPE_LT,
@@ -672,6 +744,18 @@ static int filter_pred_##type(struct filter_pred *pred, void *event) \
} \
}
+#define DEFINE_CPUMASK_COMPARISON_PRED(size) \
+static int filter_pred_##size##_cpumask(struct filter_pred *pred, void *event) \
+{ \
+ u##size *addr = (u##size *)(event + pred->offset); \
+ unsigned int cpu = *addr; \
+ \
+ if (cpu >= nr_cpu_ids) \
+ return 0; \
+ \
+ return do_filter_scalar_cpumask(pred->op, cpu, pred->mask); \
+}
+
#define DEFINE_EQUALITY_PRED(size) \
static int filter_pred_##size(struct filter_pred *pred, void *event) \
{ \
@@ -693,6 +777,11 @@ DEFINE_COMPARISON_PRED(u16);
DEFINE_COMPARISON_PRED(s8);
DEFINE_COMPARISON_PRED(u8);
+DEFINE_CPUMASK_COMPARISON_PRED(64);
+DEFINE_CPUMASK_COMPARISON_PRED(32);
+DEFINE_CPUMASK_COMPARISON_PRED(16);
+DEFINE_CPUMASK_COMPARISON_PRED(8);
+
DEFINE_EQUALITY_PRED(64);
DEFINE_EQUALITY_PRED(32);
DEFINE_EQUALITY_PRED(16);
@@ -750,7 +839,7 @@ static int filter_pred_string(struct filter_pred *pred, void *event)
char *addr = (char *)(event + pred->offset);
int cmp, match;
- cmp = pred->regex.match(addr, &pred->regex, pred->regex.field_len);
+ cmp = pred->regex->match(addr, pred->regex, pred->regex->field_len);
match = cmp ^ pred->not;
@@ -763,7 +852,7 @@ static __always_inline int filter_pchar(struct filter_pred *pred, char *str)
int len;
len = strlen(str) + 1; /* including tailing '\0' */
- cmp = pred->regex.match(str, &pred->regex, len);
+ cmp = pred->regex->match(str, pred->regex, len);
match = cmp ^ pred->not;
@@ -813,7 +902,7 @@ static int filter_pred_strloc(struct filter_pred *pred, void *event)
char *addr = (char *)(event + str_loc);
int cmp, match;
- cmp = pred->regex.match(addr, &pred->regex, str_len);
+ cmp = pred->regex->match(addr, pred->regex, str_len);
match = cmp ^ pred->not;
@@ -836,7 +925,7 @@ static int filter_pred_strrelloc(struct filter_pred *pred, void *event)
char *addr = (char *)(&item[1]) + str_loc;
int cmp, match;
- cmp = pred->regex.match(addr, &pred->regex, str_len);
+ cmp = pred->regex->match(addr, pred->regex, str_len);
match = cmp ^ pred->not;
@@ -869,12 +958,42 @@ static int filter_pred_cpu(struct filter_pred *pred, void *event)
}
}
+/* Filter predicate for current CPU vs user-provided cpumask */
+static int filter_pred_cpu_cpumask(struct filter_pred *pred, void *event)
+{
+ int cpu = raw_smp_processor_id();
+
+ return do_filter_scalar_cpumask(pred->op, cpu, pred->mask);
+}
+
+/* Filter predicate for cpumask field vs user-provided cpumask */
+static int filter_pred_cpumask(struct filter_pred *pred, void *event)
+{
+ u32 item = *(u32 *)(event + pred->offset);
+ int loc = item & 0xffff;
+ const struct cpumask *mask = (event + loc);
+ const struct cpumask *cmp = pred->mask;
+
+ return do_filter_cpumask(pred->op, mask, cmp);
+}
+
+/* Filter predicate for cpumask field vs user-provided scalar */
+static int filter_pred_cpumask_cpu(struct filter_pred *pred, void *event)
+{
+ u32 item = *(u32 *)(event + pred->offset);
+ int loc = item & 0xffff;
+ const struct cpumask *mask = (event + loc);
+ unsigned int cpu = pred->val;
+
+ return do_filter_cpumask_scalar(pred->op, mask, cpu);
+}
+
/* Filter predicate for COMM. */
static int filter_pred_comm(struct filter_pred *pred, void *event)
{
int cmp;
- cmp = pred->regex.match(current->comm, &pred->regex,
+ cmp = pred->regex->match(current->comm, pred->regex,
TASK_COMM_LEN);
return cmp ^ pred->not;
}
@@ -1004,7 +1123,7 @@ enum regex_type filter_parse_regex(char *buff, int len, char **search, int *not)
static void filter_build_regex(struct filter_pred *pred)
{
- struct regex *r = &pred->regex;
+ struct regex *r = pred->regex;
char *search;
enum regex_type type = MATCH_FULL;
@@ -1169,7 +1288,7 @@ static void free_prog(struct event_filter *filter)
return;
for (i = 0; prog[i].pred; i++)
- kfree(prog[i].pred);
+ free_predicate(prog[i].pred);
kfree(prog);
}
@@ -1236,8 +1355,12 @@ static void filter_free_subsystem_filters(struct trace_subsystem_dir *dir,
int filter_assign_type(const char *type)
{
- if (strstr(type, "__data_loc") && strstr(type, "char"))
- return FILTER_DYN_STRING;
+ if (strstr(type, "__data_loc")) {
+ if (strstr(type, "char"))
+ return FILTER_DYN_STRING;
+ if (strstr(type, "cpumask_t"))
+ return FILTER_CPUMASK;
+ }
if (strstr(type, "__rel_loc") && strstr(type, "char"))
return FILTER_RDYN_STRING;
@@ -1313,24 +1436,32 @@ static int filter_pred_fn_call(struct filter_pred *pred, void *event)
switch (pred->fn_num) {
case FILTER_PRED_FN_64:
return filter_pred_64(pred, event);
+ case FILTER_PRED_FN_64_CPUMASK:
+ return filter_pred_64_cpumask(pred, event);
case FILTER_PRED_FN_S64:
return filter_pred_s64(pred, event);
case FILTER_PRED_FN_U64:
return filter_pred_u64(pred, event);
case FILTER_PRED_FN_32:
return filter_pred_32(pred, event);
+ case FILTER_PRED_FN_32_CPUMASK:
+ return filter_pred_32_cpumask(pred, event);
case FILTER_PRED_FN_S32:
return filter_pred_s32(pred, event);
case FILTER_PRED_FN_U32:
return filter_pred_u32(pred, event);
case FILTER_PRED_FN_16:
return filter_pred_16(pred, event);
+ case FILTER_PRED_FN_16_CPUMASK:
+ return filter_pred_16_cpumask(pred, event);
case FILTER_PRED_FN_S16:
return filter_pred_s16(pred, event);
case FILTER_PRED_FN_U16:
return filter_pred_u16(pred, event);
case FILTER_PRED_FN_8:
return filter_pred_8(pred, event);
+ case FILTER_PRED_FN_8_CPUMASK:
+ return filter_pred_8_cpumask(pred, event);
case FILTER_PRED_FN_S8:
return filter_pred_s8(pred, event);
case FILTER_PRED_FN_U8:
@@ -1349,6 +1480,12 @@ static int filter_pred_fn_call(struct filter_pred *pred, void *event)
return filter_pred_pchar(pred, event);
case FILTER_PRED_FN_CPU:
return filter_pred_cpu(pred, event);
+ case FILTER_PRED_FN_CPU_CPUMASK:
+ return filter_pred_cpu_cpumask(pred, event);
+ case FILTER_PRED_FN_CPUMASK:
+ return filter_pred_cpumask(pred, event);
+ case FILTER_PRED_FN_CPUMASK_CPU:
+ return filter_pred_cpumask_cpu(pred, event);
case FILTER_PRED_FN_FUNCTION:
return filter_pred_function(pred, event);
case FILTER_PRED_TEST_VISITED:
@@ -1553,9 +1690,130 @@ static int parse_pred(const char *str, void *data,
goto err_free;
}
- pred->regex.len = len;
- strncpy(pred->regex.pattern, str + s, len);
- pred->regex.pattern[len] = 0;
+ pred->regex = kzalloc(sizeof(*pred->regex), GFP_KERNEL);
+ if (!pred->regex)
+ goto err_mem;
+ pred->regex->len = len;
+ strncpy(pred->regex->pattern, str + s, len);
+ pred->regex->pattern[len] = 0;
+
+ } else if (!strncmp(str + i, "CPUS", 4)) {
+ unsigned int maskstart;
+ bool single;
+ char *tmp;
+
+ switch (field->filter_type) {
+ case FILTER_CPUMASK:
+ case FILTER_CPU:
+ case FILTER_OTHER:
+ break;
+ default:
+ parse_error(pe, FILT_ERR_ILLEGAL_FIELD_OP, pos + i);
+ goto err_free;
+ }
+
+ switch (op) {
+ case OP_EQ:
+ case OP_NE:
+ case OP_BAND:
+ break;
+ default:
+ parse_error(pe, FILT_ERR_ILLEGAL_FIELD_OP, pos + i);
+ goto err_free;
+ }
+
+ /* Skip CPUS */
+ i += 4;
+ if (str[i++] != '{') {
+ parse_error(pe, FILT_ERR_MISSING_BRACE_OPEN, pos + i);
+ goto err_free;
+ }
+ maskstart = i;
+
+ /* Walk the cpulist until closing } */
+ for (; str[i] && str[i] != '}'; i++)
+ ;
+
+ if (str[i] != '}') {
+ parse_error(pe, FILT_ERR_MISSING_BRACE_CLOSE, pos + i);
+ goto err_free;
+ }
+
+ if (maskstart == i) {
+ parse_error(pe, FILT_ERR_INVALID_CPULIST, pos + i);
+ goto err_free;
+ }
+
+ /* Copy the cpulist between { and } */
+ tmp = kmalloc((i - maskstart) + 1, GFP_KERNEL);
+ if (!tmp)
+ goto err_mem;
+
+ strscpy(tmp, str + maskstart, (i - maskstart) + 1);
+ pred->mask = kzalloc(cpumask_size(), GFP_KERNEL);
+ if (!pred->mask) {
+ kfree(tmp);
+ goto err_mem;
+ }
+
+ /* Now parse it */
+ if (cpulist_parse(tmp, pred->mask)) {
+ kfree(tmp);
+ parse_error(pe, FILT_ERR_INVALID_CPULIST, pos + i);
+ goto err_free;
+ }
+ kfree(tmp);
+
+ /* Move along */
+ i++;
+
+ /*
+ * Optimisation: if the user-provided mask has a weight of one
+ * then we can treat it as a scalar input.
+ */
+ single = cpumask_weight(pred->mask) == 1;
+ if (single) {
+ pred->val = cpumask_first(pred->mask);
+ kfree(pred->mask);
+ pred->mask = NULL;
+ }
+
+ if (field->filter_type == FILTER_CPUMASK) {
+ pred->fn_num = single ?
+ FILTER_PRED_FN_CPUMASK_CPU :
+ FILTER_PRED_FN_CPUMASK;
+ } else if (field->filter_type == FILTER_CPU) {
+ if (single) {
+ if (pred->op == OP_BAND)
+ pred->op = OP_EQ;
+
+ pred->fn_num = FILTER_PRED_FN_CPU;
+ } else {
+ pred->fn_num = FILTER_PRED_FN_CPU_CPUMASK;
+ }
+ } else if (single) {
+ if (pred->op == OP_BAND)
+ pred->op = OP_EQ;
+
+ pred->fn_num = select_comparison_fn(pred->op, field->size, false);
+ if (pred->op == OP_NE)
+ pred->not = 1;
+ } else {
+ switch (field->size) {
+ case 8:
+ pred->fn_num = FILTER_PRED_FN_64_CPUMASK;
+ break;
+ case 4:
+ pred->fn_num = FILTER_PRED_FN_32_CPUMASK;
+ break;
+ case 2:
+ pred->fn_num = FILTER_PRED_FN_16_CPUMASK;
+ break;
+ case 1:
+ pred->fn_num = FILTER_PRED_FN_8_CPUMASK;
+ break;
+ }
+ }
/* This is either a string, or an integer */
} else if (str[i] == '\'' || str[i] == '"') {
@@ -1597,9 +1855,12 @@ static int parse_pred(const char *str, void *data,
goto err_free;
}
- pred->regex.len = len;
- strncpy(pred->regex.pattern, str + s, len);
- pred->regex.pattern[len] = 0;
+ pred->regex = kzalloc(sizeof(*pred->regex), GFP_KERNEL);
+ if (!pred->regex)
+ goto err_mem;
+ pred->regex->len = len;
+ strncpy(pred->regex->pattern, str + s, len);
+ pred->regex->pattern[len] = 0;
filter_build_regex(pred);
@@ -1608,7 +1869,7 @@ static int parse_pred(const char *str, void *data,
} else if (field->filter_type == FILTER_STATIC_STRING) {
pred->fn_num = FILTER_PRED_FN_STRING;
- pred->regex.field_len = field->size;
+ pred->regex->field_len = field->size;
} else if (field->filter_type == FILTER_DYN_STRING) {
pred->fn_num = FILTER_PRED_FN_STRLOC;
@@ -1691,10 +1952,10 @@ static int parse_pred(const char *str, void *data,
return i;
err_free:
- kfree(pred);
+ free_predicate(pred);
return -EINVAL;
err_mem:
- kfree(pred);
+ free_predicate(pred);
return -ENOMEM;
}
@@ -2287,8 +2548,8 @@ static int ftrace_function_set_filter_pred(struct filter_pred *pred,
return ret;
return __ftrace_function_set_filter(pred->op == OP_EQ,
- pred->regex.pattern,
- pred->regex.len,
+ pred->regex->pattern,
+ pred->regex->len,
data);
}
diff --git a/kernel/trace/trace_events_user.c b/kernel/trace/trace_events_user.c
index 33cb6af31f39..6f046650e527 100644
--- a/kernel/trace/trace_events_user.c
+++ b/kernel/trace/trace_events_user.c
@@ -1328,14 +1328,14 @@ static int user_field_set_string(struct ftrace_event_field *field,
static int user_event_set_print_fmt(struct user_event *user, char *buf, int len)
{
- struct ftrace_event_field *field, *next;
+ struct ftrace_event_field *field;
struct list_head *head = &user->fields;
int pos = 0, depth = 0;
const char *str_func;
pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
- list_for_each_entry_safe_reverse(field, next, head, link) {
+ list_for_each_entry_reverse(field, head, link) {
if (depth != 0)
pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
@@ -1347,7 +1347,7 @@ static int user_event_set_print_fmt(struct user_event *user, char *buf, int len)
pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
- list_for_each_entry_safe_reverse(field, next, head, link) {
+ list_for_each_entry_reverse(field, head, link) {
if (user_field_is_dyn_string(field->type, &str_func))
pos += snprintf(buf + pos, LEN_OR_ZERO,
", %s(%s)", str_func, field->name);
@@ -1732,7 +1732,7 @@ static int user_event_create(const char *raw_command)
static int user_event_show(struct seq_file *m, struct dyn_event *ev)
{
struct user_event *user = container_of(ev, struct user_event, devent);
- struct ftrace_event_field *field, *next;
+ struct ftrace_event_field *field;
struct list_head *head;
int depth = 0;
@@ -1740,7 +1740,7 @@ static int user_event_show(struct seq_file *m, struct dyn_event *ev)
head = trace_get_fields(&user->call);
- list_for_each_entry_safe_reverse(field, next, head, link) {
+ list_for_each_entry_reverse(field, head, link) {
if (depth == 0)
seq_puts(m, " ");
else
@@ -1816,13 +1816,14 @@ out:
static bool user_fields_match(struct user_event *user, int argc,
const char **argv)
{
- struct ftrace_event_field *field, *next;
+ struct ftrace_event_field *field;
struct list_head *head = &user->fields;
int i = 0;
- list_for_each_entry_safe_reverse(field, next, head, link)
+ list_for_each_entry_reverse(field, head, link) {
if (!user_field_match(field, argc, argv, &i))
return false;
+ }
if (i != argc)
return false;
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index 58f3946081e2..1698fc22afa0 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -51,6 +51,9 @@ static int ftrace_event_register(struct trace_event_call *call,
#undef __array
#define __array(type, item, size) type item[size];
+#undef __stack_array
+#define __stack_array(type, item, size, field) __array(type, item, size)
+
#undef __array_desc
#define __array_desc(type, container, item, size) type item[size];
@@ -114,6 +117,9 @@ static void __always_unused ____ftrace_check_##name(void) \
is_signed_type(_type), .filter_type = FILTER_OTHER, \
.len = _len },
+#undef __stack_array
+#define __stack_array(_type, _item, _len, _field) __array(_type, _item, _len)
+
#undef __array_desc
#define __array_desc(_type, _container, _item, _len) __array(_type, _item, _len)
@@ -149,6 +155,9 @@ static struct trace_event_fields ftrace_event_fields_##name[] = { \
#undef __array
#define __array(type, item, len)
+#undef __stack_array
+#define __stack_array(type, item, len, field)
+
#undef __array_desc
#define __array_desc(type, container, item, len)
diff --git a/kernel/trace/trace_fprobe.c b/kernel/trace/trace_fprobe.c
index dfe2e546acdc..8bfe23af9c73 100644
--- a/kernel/trace/trace_fprobe.c
+++ b/kernel/trace/trace_fprobe.c
@@ -898,6 +898,46 @@ static struct tracepoint *find_tracepoint(const char *tp_name)
return data.tpoint;
}
+static int parse_symbol_and_return(int argc, const char *argv[],
+ char **symbol, bool *is_return,
+ bool is_tracepoint)
+{
+ char *tmp = strchr(argv[1], '%');
+ int i;
+
+ if (tmp) {
+ int len = tmp - argv[1];
+
+ if (!is_tracepoint && !strcmp(tmp, "%return")) {
+ *is_return = true;
+ } else {
+ trace_probe_log_err(len, BAD_ADDR_SUFFIX);
+ return -EINVAL;
+ }
+ *symbol = kmemdup_nul(argv[1], len, GFP_KERNEL);
+ } else
+ *symbol = kstrdup(argv[1], GFP_KERNEL);
+ if (!*symbol)
+ return -ENOMEM;
+
+ if (*is_return)
+ return 0;
+
+ /* If there is $retval, this should be a return fprobe. */
+ for (i = 2; i < argc; i++) {
+ tmp = strstr(argv[i], "$retval");
+ if (tmp && !isalnum(tmp[7]) && tmp[7] != '_') {
+ *is_return = true;
+ /*
+ * NOTE: Don't check is_tracepoint here, because it will
+ * be checked when the argument is parsed.
+ */
+ break;
+ }
+ }
+ return 0;
+}
+
static int __trace_fprobe_create(int argc, const char *argv[])
{
/*
@@ -927,7 +967,7 @@ static int __trace_fprobe_create(int argc, const char *argv[])
struct trace_fprobe *tf = NULL;
int i, len, new_argc = 0, ret = 0;
bool is_return = false;
- char *symbol = NULL, *tmp = NULL;
+ char *symbol = NULL;
const char *event = NULL, *group = FPROBE_EVENT_SYSTEM;
const char **new_argv = NULL;
int maxactive = 0;
@@ -983,20 +1023,10 @@ static int __trace_fprobe_create(int argc, const char *argv[])
trace_probe_log_set_index(1);
/* a symbol(or tracepoint) must be specified */
- symbol = kstrdup(argv[1], GFP_KERNEL);
- if (!symbol)
- return -ENOMEM;
+ ret = parse_symbol_and_return(argc, argv, &symbol, &is_return, is_tracepoint);
+ if (ret < 0)
+ goto parse_error;
- tmp = strchr(symbol, '%');
- if (tmp) {
- if (!is_tracepoint && !strcmp(tmp, "%return")) {
- *tmp = '\0';
- is_return = true;
- } else {
- trace_probe_log_err(tmp - symbol, BAD_ADDR_SUFFIX);
- goto parse_error;
- }
- }
if (!is_return && maxactive) {
trace_probe_log_set_index(0);
trace_probe_log_err(1, BAD_MAXACT_TYPE);
@@ -1096,6 +1126,7 @@ static int __trace_fprobe_create(int argc, const char *argv[])
}
out:
+ traceprobe_finish_parse(&ctx);
trace_probe_log_clear();
kfree(new_argv);
kfree(symbol);
diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
index 2f37a6e68aa9..b791524a6536 100644
--- a/kernel/trace/trace_hwlat.c
+++ b/kernel/trace/trace_hwlat.c
@@ -635,7 +635,7 @@ static int s_mode_show(struct seq_file *s, void *v)
else
seq_printf(s, "%s", thread_mode_str[mode]);
- if (mode != MODE_MAX)
+ if (mode < MODE_MAX - 1) /* if mode is any but last */
seq_puts(s, " ");
return 0;
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 17c21c0b2dd1..3d7a180a8427 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -907,6 +907,7 @@ static int __trace_kprobe_create(int argc, const char *argv[])
}
out:
+ traceprobe_finish_parse(&ctx);
trace_probe_log_clear();
kfree(new_argv);
kfree(symbol);
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
index c68a72707852..4dc74d73fc1d 100644
--- a/kernel/trace/trace_probe.c
+++ b/kernel/trace/trace_probe.c
@@ -12,6 +12,7 @@
#define pr_fmt(fmt) "trace_probe: " fmt
#include <linux/bpf.h>
+#include "trace_btf.h"
#include "trace_probe.h"
@@ -304,31 +305,90 @@ static int parse_trace_event_arg(char *arg, struct fetch_insn *code,
#ifdef CONFIG_PROBE_EVENTS_BTF_ARGS
-static struct btf *traceprobe_get_btf(void)
+static u32 btf_type_int(const struct btf_type *t)
{
- struct btf *btf = bpf_get_btf_vmlinux();
+ return *(u32 *)(t + 1);
+}
- if (IS_ERR_OR_NULL(btf))
- return NULL;
+static bool btf_type_is_char_ptr(struct btf *btf, const struct btf_type *type)
+{
+ const struct btf_type *real_type;
+ u32 intdata;
+ s32 tid;
+
+ real_type = btf_type_skip_modifiers(btf, type->type, &tid);
+ if (!real_type)
+ return false;
+
+ if (BTF_INFO_KIND(real_type->info) != BTF_KIND_INT)
+ return false;
- return btf;
+ intdata = btf_type_int(real_type);
+ return !(BTF_INT_ENCODING(intdata) & BTF_INT_SIGNED)
+ && BTF_INT_BITS(intdata) == 8;
}
-static u32 btf_type_int(const struct btf_type *t)
+static bool btf_type_is_char_array(struct btf *btf, const struct btf_type *type)
{
- return *(u32 *)(t + 1);
+ const struct btf_type *real_type;
+ const struct btf_array *array;
+ u32 intdata;
+ s32 tid;
+
+ if (BTF_INFO_KIND(type->info) != BTF_KIND_ARRAY)
+ return false;
+
+ array = (const struct btf_array *)(type + 1);
+
+ real_type = btf_type_skip_modifiers(btf, array->type, &tid);
+
+ intdata = btf_type_int(real_type);
+ return !(BTF_INT_ENCODING(intdata) & BTF_INT_SIGNED)
+ && BTF_INT_BITS(intdata) == 8;
}
-static const char *type_from_btf_id(struct btf *btf, s32 id)
+static int check_prepare_btf_string_fetch(char *typename,
+ struct fetch_insn **pcode,
+ struct traceprobe_parse_context *ctx)
+{
+ struct btf *btf = ctx->btf;
+
+ if (!btf || !ctx->last_type)
+ return 0;
+
+ /* char [] does not need any change. */
+ if (btf_type_is_char_array(btf, ctx->last_type))
+ return 0;
+
+ /* char * requires dereference the pointer. */
+ if (btf_type_is_char_ptr(btf, ctx->last_type)) {
+ struct fetch_insn *code = *pcode + 1;
+
+ if (code->op == FETCH_OP_END) {
+ trace_probe_log_err(ctx->offset, TOO_MANY_OPS);
+ return -E2BIG;
+ }
+ if (typename[0] == 'u')
+ code->op = FETCH_OP_UDEREF;
+ else
+ code->op = FETCH_OP_DEREF;
+ code->offset = 0;
+ *pcode = code;
+ return 0;
+ }
+ /* Other types are not available for string */
+ trace_probe_log_err(ctx->offset, BAD_TYPE4STR);
+ return -EINVAL;
+}
+
+static const char *fetch_type_from_btf_type(struct btf *btf,
+ const struct btf_type *type,
+ struct traceprobe_parse_context *ctx)
{
- const struct btf_type *t;
u32 intdata;
- s32 tid;
/* TODO: const char * could be converted as a string */
- t = btf_type_skip_modifiers(btf, id, &tid);
-
- switch (BTF_INFO_KIND(t->info)) {
+ switch (BTF_INFO_KIND(type->info)) {
case BTF_KIND_ENUM:
/* enum is "int", so convert to "s32" */
return "s32";
@@ -341,7 +401,7 @@ static const char *type_from_btf_id(struct btf *btf, s32 id)
else
return "x32";
case BTF_KIND_INT:
- intdata = btf_type_int(t);
+ intdata = btf_type_int(type);
if (BTF_INT_ENCODING(intdata) & BTF_INT_SIGNED) {
switch (BTF_INT_BITS(intdata)) {
case 8:
@@ -364,6 +424,10 @@ static const char *type_from_btf_id(struct btf *btf, s32 id)
case 64:
return "u64";
}
+ /* bitfield, size is encoded in the type */
+ ctx->last_bitsize = BTF_INT_BITS(intdata);
+ ctx->last_bitoffs += BTF_INT_OFFSET(intdata);
+ return "u64";
}
}
/* TODO: support other types */
@@ -371,88 +435,223 @@ static const char *type_from_btf_id(struct btf *btf, s32 id)
return NULL;
}
-static const struct btf_type *find_btf_func_proto(const char *funcname)
+static int query_btf_context(struct traceprobe_parse_context *ctx)
{
- struct btf *btf = traceprobe_get_btf();
- const struct btf_type *t;
- s32 id;
+ const struct btf_param *param;
+ const struct btf_type *type;
+ struct btf *btf;
+ s32 nr;
- if (!btf || !funcname)
- return ERR_PTR(-EINVAL);
+ if (ctx->btf)
+ return 0;
+
+ if (!ctx->funcname)
+ return -EINVAL;
+
+ type = btf_find_func_proto(ctx->funcname, &btf);
+ if (!type)
+ return -ENOENT;
- id = btf_find_by_name_kind(btf, funcname, BTF_KIND_FUNC);
- if (id <= 0)
- return ERR_PTR(-ENOENT);
+ ctx->btf = btf;
+ ctx->proto = type;
+
+ /* ctx->params is optional, since func(void) will not have params. */
+ nr = 0;
+ param = btf_get_func_param(type, &nr);
+ if (!IS_ERR_OR_NULL(param)) {
+ /* Hide the first 'data' argument of tracepoint */
+ if (ctx->flags & TPARG_FL_TPOINT) {
+ nr--;
+ param++;
+ }
+ }
- /* Get BTF_KIND_FUNC type */
- t = btf_type_by_id(btf, id);
- if (!t || !btf_type_is_func(t))
- return ERR_PTR(-ENOENT);
+ if (nr > 0) {
+ ctx->nr_params = nr;
+ ctx->params = param;
+ } else {
+ ctx->nr_params = 0;
+ ctx->params = NULL;
+ }
- /* The type of BTF_KIND_FUNC is BTF_KIND_FUNC_PROTO */
- t = btf_type_by_id(btf, t->type);
- if (!t || !btf_type_is_func_proto(t))
- return ERR_PTR(-ENOENT);
+ return 0;
+}
- return t;
+static void clear_btf_context(struct traceprobe_parse_context *ctx)
+{
+ if (ctx->btf) {
+ btf_put(ctx->btf);
+ ctx->btf = NULL;
+ ctx->proto = NULL;
+ ctx->params = NULL;
+ ctx->nr_params = 0;
+ }
}
-static const struct btf_param *find_btf_func_param(const char *funcname, s32 *nr,
- bool tracepoint)
+/* Return 1 if the field separater is arrow operator ('->') */
+static int split_next_field(char *varname, char **next_field,
+ struct traceprobe_parse_context *ctx)
{
- const struct btf_param *param;
- const struct btf_type *t;
+ char *field;
+ int ret = 0;
+
+ field = strpbrk(varname, ".-");
+ if (field) {
+ if (field[0] == '-' && field[1] == '>') {
+ field[0] = '\0';
+ field += 2;
+ ret = 1;
+ } else if (field[0] == '.') {
+ field[0] = '\0';
+ field += 1;
+ } else {
+ trace_probe_log_err(ctx->offset + field - varname, BAD_HYPHEN);
+ return -EINVAL;
+ }
+ *next_field = field;
+ }
- if (!funcname || !nr)
- return ERR_PTR(-EINVAL);
+ return ret;
+}
- t = find_btf_func_proto(funcname);
- if (IS_ERR(t))
- return (const struct btf_param *)t;
+/*
+ * Parse the field of data structure. The @type must be a pointer type
+ * pointing the target data structure type.
+ */
+static int parse_btf_field(char *fieldname, const struct btf_type *type,
+ struct fetch_insn **pcode, struct fetch_insn *end,
+ struct traceprobe_parse_context *ctx)
+{
+ struct fetch_insn *code = *pcode;
+ const struct btf_member *field;
+ u32 bitoffs, anon_offs;
+ char *next;
+ int is_ptr;
+ s32 tid;
- *nr = btf_type_vlen(t);
- param = (const struct btf_param *)(t + 1);
+ do {
+ /* Outer loop for solving arrow operator ('->') */
+ if (BTF_INFO_KIND(type->info) != BTF_KIND_PTR) {
+ trace_probe_log_err(ctx->offset, NO_PTR_STRCT);
+ return -EINVAL;
+ }
+ /* Convert a struct pointer type to a struct type */
+ type = btf_type_skip_modifiers(ctx->btf, type->type, &tid);
+ if (!type) {
+ trace_probe_log_err(ctx->offset, BAD_BTF_TID);
+ return -EINVAL;
+ }
- /* Hide the first 'data' argument of tracepoint */
- if (tracepoint) {
- (*nr)--;
- param++;
- }
+ bitoffs = 0;
+ do {
+ /* Inner loop for solving dot operator ('.') */
+ next = NULL;
+ is_ptr = split_next_field(fieldname, &next, ctx);
+ if (is_ptr < 0)
+ return is_ptr;
+
+ anon_offs = 0;
+ field = btf_find_struct_member(ctx->btf, type, fieldname,
+ &anon_offs);
+ if (!field) {
+ trace_probe_log_err(ctx->offset, NO_BTF_FIELD);
+ return -ENOENT;
+ }
+ /* Add anonymous structure/union offset */
+ bitoffs += anon_offs;
+
+ /* Accumulate the bit-offsets of the dot-connected fields */
+ if (btf_type_kflag(type)) {
+ bitoffs += BTF_MEMBER_BIT_OFFSET(field->offset);
+ ctx->last_bitsize = BTF_MEMBER_BITFIELD_SIZE(field->offset);
+ } else {
+ bitoffs += field->offset;
+ ctx->last_bitsize = 0;
+ }
- if (*nr > 0)
- return param;
- else
- return NULL;
+ type = btf_type_skip_modifiers(ctx->btf, field->type, &tid);
+ if (!type) {
+ trace_probe_log_err(ctx->offset, BAD_BTF_TID);
+ return -EINVAL;
+ }
+
+ ctx->offset += next - fieldname;
+ fieldname = next;
+ } while (!is_ptr && fieldname);
+
+ if (++code == end) {
+ trace_probe_log_err(ctx->offset, TOO_MANY_OPS);
+ return -EINVAL;
+ }
+ code->op = FETCH_OP_DEREF; /* TODO: user deref support */
+ code->offset = bitoffs / 8;
+ *pcode = code;
+
+ ctx->last_bitoffs = bitoffs % 8;
+ ctx->last_type = type;
+ } while (fieldname);
+
+ return 0;
}
-static int parse_btf_arg(const char *varname, struct fetch_insn *code,
+static int parse_btf_arg(char *varname,
+ struct fetch_insn **pcode, struct fetch_insn *end,
struct traceprobe_parse_context *ctx)
{
- struct btf *btf = traceprobe_get_btf();
+ struct fetch_insn *code = *pcode;
const struct btf_param *params;
- int i;
+ const struct btf_type *type;
+ char *field = NULL;
+ int i, is_ptr, ret;
+ u32 tid;
+
+ if (WARN_ON_ONCE(!ctx->funcname))
+ return -EINVAL;
- if (!btf) {
- trace_probe_log_err(ctx->offset, NOSUP_BTFARG);
+ is_ptr = split_next_field(varname, &field, ctx);
+ if (is_ptr < 0)
+ return is_ptr;
+ if (!is_ptr && field) {
+ /* dot-connected field on an argument is not supported. */
+ trace_probe_log_err(ctx->offset + field - varname,
+ NOSUP_DAT_ARG);
return -EOPNOTSUPP;
}
- if (WARN_ON_ONCE(!ctx->funcname))
- return -EINVAL;
+ if (ctx->flags & TPARG_FL_RETURN) {
+ if (strcmp(varname, "$retval") != 0) {
+ trace_probe_log_err(ctx->offset, NO_BTFARG);
+ return -ENOENT;
+ }
+ code->op = FETCH_OP_RETVAL;
+ /* Check whether the function return type is not void */
+ if (query_btf_context(ctx) == 0) {
+ if (ctx->proto->type == 0) {
+ trace_probe_log_err(ctx->offset, NO_RETVAL);
+ return -ENOENT;
+ }
+ tid = ctx->proto->type;
+ goto found;
+ }
+ if (field) {
+ trace_probe_log_err(ctx->offset + field - varname,
+ NO_BTF_ENTRY);
+ return -ENOENT;
+ }
+ return 0;
+ }
- if (!ctx->params) {
- params = find_btf_func_param(ctx->funcname, &ctx->nr_params,
- ctx->flags & TPARG_FL_TPOINT);
- if (IS_ERR_OR_NULL(params)) {
+ if (!ctx->btf) {
+ ret = query_btf_context(ctx);
+ if (ret < 0 || ctx->nr_params == 0) {
trace_probe_log_err(ctx->offset, NO_BTF_ENTRY);
return PTR_ERR(params);
}
- ctx->params = params;
- } else
- params = ctx->params;
+ }
+ params = ctx->params;
for (i = 0; i < ctx->nr_params; i++) {
- const char *name = btf_name_by_offset(btf, params[i].name_off);
+ const char *name = btf_name_by_offset(ctx->btf, params[i].name_off);
if (name && !strcmp(name, varname)) {
code->op = FETCH_OP_ARG;
@@ -460,91 +659,114 @@ static int parse_btf_arg(const char *varname, struct fetch_insn *code,
code->param = i + 1;
else
code->param = i;
- return 0;
+ tid = params[i].type;
+ goto found;
}
}
trace_probe_log_err(ctx->offset, NO_BTFARG);
return -ENOENT;
-}
-
-static const struct fetch_type *parse_btf_arg_type(int arg_idx,
- struct traceprobe_parse_context *ctx)
-{
- struct btf *btf = traceprobe_get_btf();
- const char *typestr = NULL;
- if (btf && ctx->params) {
- if (ctx->flags & TPARG_FL_TPOINT)
- arg_idx--;
- typestr = type_from_btf_id(btf, ctx->params[arg_idx].type);
+found:
+ type = btf_type_skip_modifiers(ctx->btf, tid, &tid);
+ if (!type) {
+ trace_probe_log_err(ctx->offset, BAD_BTF_TID);
+ return -EINVAL;
}
-
- return find_fetch_type(typestr, ctx->flags);
+ /* Initialize the last type information */
+ ctx->last_type = type;
+ ctx->last_bitoffs = 0;
+ ctx->last_bitsize = 0;
+ if (field) {
+ ctx->offset += field - varname;
+ return parse_btf_field(field, type, pcode, end, ctx);
+ }
+ return 0;
}
-static const struct fetch_type *parse_btf_retval_type(
+static const struct fetch_type *find_fetch_type_from_btf_type(
struct traceprobe_parse_context *ctx)
{
- struct btf *btf = traceprobe_get_btf();
+ struct btf *btf = ctx->btf;
const char *typestr = NULL;
- const struct btf_type *t;
- if (btf && ctx->funcname) {
- t = find_btf_func_proto(ctx->funcname);
- if (!IS_ERR(t))
- typestr = type_from_btf_id(btf, t->type);
- }
+ if (btf && ctx->last_type)
+ typestr = fetch_type_from_btf_type(btf, ctx->last_type, ctx);
return find_fetch_type(typestr, ctx->flags);
}
-static bool is_btf_retval_void(const char *funcname)
+static int parse_btf_bitfield(struct fetch_insn **pcode,
+ struct traceprobe_parse_context *ctx)
{
- const struct btf_type *t;
+ struct fetch_insn *code = *pcode;
- t = find_btf_func_proto(funcname);
- if (IS_ERR(t))
- return false;
+ if ((ctx->last_bitsize % 8 == 0) && ctx->last_bitoffs == 0)
+ return 0;
+
+ code++;
+ if (code->op != FETCH_OP_NOP) {
+ trace_probe_log_err(ctx->offset, TOO_MANY_OPS);
+ return -EINVAL;
+ }
+ *pcode = code;
- return t->type == 0;
+ code->op = FETCH_OP_MOD_BF;
+ code->lshift = 64 - (ctx->last_bitsize + ctx->last_bitoffs);
+ code->rshift = 64 - ctx->last_bitsize;
+ code->basesize = 64 / 8;
+ return 0;
}
+
#else
-static struct btf *traceprobe_get_btf(void)
+static void clear_btf_context(struct traceprobe_parse_context *ctx)
{
- return NULL;
+ ctx->btf = NULL;
}
-static const struct btf_param *find_btf_func_param(const char *funcname, s32 *nr,
- bool tracepoint)
+static int query_btf_context(struct traceprobe_parse_context *ctx)
{
- return ERR_PTR(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
}
-static int parse_btf_arg(const char *varname, struct fetch_insn *code,
+static int parse_btf_arg(char *varname,
+ struct fetch_insn **pcode, struct fetch_insn *end,
struct traceprobe_parse_context *ctx)
{
trace_probe_log_err(ctx->offset, NOSUP_BTFARG);
return -EOPNOTSUPP;
}
-#define parse_btf_arg_type(idx, ctx) \
- find_fetch_type(NULL, ctx->flags)
+static int parse_btf_bitfield(struct fetch_insn **pcode,
+ struct traceprobe_parse_context *ctx)
+{
+ trace_probe_log_err(ctx->offset, NOSUP_BTFARG);
+ return -EOPNOTSUPP;
+}
-#define parse_btf_retval_type(ctx) \
+#define find_fetch_type_from_btf_type(ctx) \
find_fetch_type(NULL, ctx->flags)
-#define is_btf_retval_void(funcname) (false)
+static int check_prepare_btf_string_fetch(char *typename,
+ struct fetch_insn **pcode,
+ struct traceprobe_parse_context *ctx)
+{
+ return 0;
+}
#endif
#define PARAM_MAX_STACK (THREAD_SIZE / sizeof(unsigned long))
-static int parse_probe_vars(char *arg, const struct fetch_type *t,
- struct fetch_insn *code,
+/* Parse $vars. @orig_arg points '$', which syncs to @ctx->offset */
+static int parse_probe_vars(char *orig_arg, const struct fetch_type *t,
+ struct fetch_insn **pcode,
+ struct fetch_insn *end,
struct traceprobe_parse_context *ctx)
{
- unsigned long param;
+ struct fetch_insn *code = *pcode;
int err = TP_ERR_BAD_VAR;
+ char *arg = orig_arg + 1;
+ unsigned long param;
int ret = 0;
int len;
@@ -563,18 +785,17 @@ static int parse_probe_vars(char *arg, const struct fetch_type *t,
goto inval;
}
- if (strcmp(arg, "retval") == 0) {
- if (ctx->flags & TPARG_FL_RETURN) {
- if ((ctx->flags & TPARG_FL_KERNEL) &&
- is_btf_retval_void(ctx->funcname)) {
- err = TP_ERR_NO_RETVAL;
- goto inval;
- }
+ if (str_has_prefix(arg, "retval")) {
+ if (!(ctx->flags & TPARG_FL_RETURN)) {
+ err = TP_ERR_RETVAL_ON_PROBE;
+ goto inval;
+ }
+ if (!(ctx->flags & TPARG_FL_KERNEL) ||
+ !IS_ENABLED(CONFIG_PROBE_EVENTS_BTF_ARGS)) {
code->op = FETCH_OP_RETVAL;
return 0;
}
- err = TP_ERR_RETVAL_ON_PROBE;
- goto inval;
+ return parse_btf_arg(orig_arg, pcode, end, ctx);
}
len = str_has_prefix(arg, "stack");
@@ -676,7 +897,7 @@ parse_probe_arg(char *arg, const struct fetch_type *type,
switch (arg[0]) {
case '$':
- ret = parse_probe_vars(arg + 1, type, code, ctx);
+ ret = parse_probe_vars(arg, type, pcode, end, ctx);
break;
case '%': /* named register */
@@ -795,6 +1016,8 @@ parse_probe_arg(char *arg, const struct fetch_type *type,
code->op = deref;
code->offset = offset;
+ /* Reset the last type if used */
+ ctx->last_type = NULL;
}
break;
case '\\': /* Immediate value */
@@ -818,7 +1041,7 @@ parse_probe_arg(char *arg, const struct fetch_type *type,
trace_probe_log_err(ctx->offset, NOSUP_BTFARG);
return -EINVAL;
}
- ret = parse_btf_arg(arg, code, ctx);
+ ret = parse_btf_arg(arg, pcode, end, ctx);
break;
}
}
@@ -964,17 +1187,22 @@ static int traceprobe_parse_probe_arg_body(const char *argv, ssize_t *size,
goto out;
code[FETCH_INSN_MAX - 1].op = FETCH_OP_END;
+ ctx->last_type = NULL;
ret = parse_probe_arg(arg, parg->type, &code, &code[FETCH_INSN_MAX - 1],
ctx);
if (ret)
goto fail;
/* Update storing type if BTF is available */
- if (IS_ENABLED(CONFIG_PROBE_EVENTS_BTF_ARGS) && !t) {
- if (code->op == FETCH_OP_ARG)
- parg->type = parse_btf_arg_type(code->param, ctx);
- else if (code->op == FETCH_OP_RETVAL)
- parg->type = parse_btf_retval_type(ctx);
+ if (IS_ENABLED(CONFIG_PROBE_EVENTS_BTF_ARGS) &&
+ ctx->last_type) {
+ if (!t) {
+ parg->type = find_fetch_type_from_btf_type(ctx);
+ } else if (strstr(t, "string")) {
+ ret = check_prepare_btf_string_fetch(t, &code, ctx);
+ if (ret)
+ goto fail;
+ }
}
ret = -EINVAL;
@@ -1048,6 +1276,11 @@ static int traceprobe_parse_probe_arg_body(const char *argv, ssize_t *size,
trace_probe_log_err(ctx->offset + t - arg, BAD_BITFIELD);
goto fail;
}
+ } else if (IS_ENABLED(CONFIG_PROBE_EVENTS_BTF_ARGS) &&
+ ctx->last_type) {
+ ret = parse_btf_bitfield(&code, ctx);
+ if (ret)
+ goto fail;
}
ret = -EINVAL;
/* Loop(Array) operation */
@@ -1231,7 +1464,6 @@ static int sprint_nth_btf_arg(int idx, const char *type,
char *buf, int bufsize,
struct traceprobe_parse_context *ctx)
{
- struct btf *btf = traceprobe_get_btf();
const char *name;
int ret;
@@ -1239,7 +1471,7 @@ static int sprint_nth_btf_arg(int idx, const char *type,
trace_probe_log_err(0, NO_BTFARG);
return -ENOENT;
}
- name = btf_name_by_offset(btf, ctx->params[idx].name_off);
+ name = btf_name_by_offset(ctx->btf, ctx->params[idx].name_off);
if (!name) {
trace_probe_log_err(0, NO_BTF_ENTRY);
return -ENOENT;
@@ -1260,7 +1492,6 @@ const char **traceprobe_expand_meta_args(int argc, const char *argv[],
const struct btf_param *params = NULL;
int i, j, n, used, ret, args_idx = -1;
const char **new_argv = NULL;
- int nr_params;
ret = argv_has_var_arg(argc, argv, &args_idx, ctx);
if (ret < 0)
@@ -1271,9 +1502,8 @@ const char **traceprobe_expand_meta_args(int argc, const char *argv[],
return NULL;
}
- params = find_btf_func_param(ctx->funcname, &nr_params,
- ctx->flags & TPARG_FL_TPOINT);
- if (IS_ERR_OR_NULL(params)) {
+ ret = query_btf_context(ctx);
+ if (ret < 0 || ctx->nr_params == 0) {
if (args_idx != -1) {
/* $arg* requires BTF info */
trace_probe_log_err(0, NOSUP_BTFARG);
@@ -1282,8 +1512,6 @@ const char **traceprobe_expand_meta_args(int argc, const char *argv[],
*new_argc = argc;
return NULL;
}
- ctx->params = params;
- ctx->nr_params = nr_params;
if (args_idx >= 0)
*new_argc = argc + ctx->nr_params - 1;
@@ -1298,7 +1526,7 @@ const char **traceprobe_expand_meta_args(int argc, const char *argv[],
for (i = 0, j = 0; i < argc; i++) {
trace_probe_log_set_index(i + 2);
if (i == args_idx) {
- for (n = 0; n < nr_params; n++) {
+ for (n = 0; n < ctx->nr_params; n++) {
ret = sprint_nth_btf_arg(n, "", buf + used,
bufsize - used, ctx);
if (ret < 0)
@@ -1337,6 +1565,11 @@ error:
return ERR_PTR(ret);
}
+void traceprobe_finish_parse(struct traceprobe_parse_context *ctx)
+{
+ clear_btf_context(ctx);
+}
+
int traceprobe_update_arg(struct probe_arg *arg)
{
struct fetch_insn *code = arg->code;
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
index 7dde806be91e..02b432ae7513 100644
--- a/kernel/trace/trace_probe.h
+++ b/kernel/trace/trace_probe.h
@@ -383,9 +383,15 @@ static inline bool tparg_is_function_entry(unsigned int flags)
struct traceprobe_parse_context {
struct trace_event_call *event;
- const struct btf_param *params;
- s32 nr_params;
- const char *funcname;
+ /* BTF related parameters */
+ const char *funcname; /* Function name in BTF */
+ const struct btf_type *proto; /* Prototype of the function */
+ const struct btf_param *params; /* Parameter of the function */
+ s32 nr_params; /* The number of the parameters */
+ struct btf *btf; /* The BTF to be used */
+ const struct btf_type *last_type; /* Saved type */
+ u32 last_bitoffs; /* Saved bitoffs */
+ u32 last_bitsize; /* Saved bitsize */
unsigned int flags;
int offset;
};
@@ -400,6 +406,12 @@ const char **traceprobe_expand_meta_args(int argc, const char *argv[],
extern int traceprobe_update_arg(struct probe_arg *arg);
extern void traceprobe_free_probe_arg(struct probe_arg *arg);
+/*
+ * If either traceprobe_parse_probe_arg() or traceprobe_expand_meta_args() is called,
+ * this MUST be called for clean up the context and return a resource.
+ */
+void traceprobe_finish_parse(struct traceprobe_parse_context *ctx);
+
extern int traceprobe_split_symbol_offset(char *symbol, long *offset);
int traceprobe_parse_event_name(const char **pevent, const char **pgroup,
char *buf, int offset);
@@ -495,7 +507,14 @@ extern int traceprobe_define_arg_fields(struct trace_event_call *event_call,
C(BAD_VAR_ARGS, "$arg* must be an independent parameter without name etc."),\
C(NOFENTRY_ARGS, "$arg* can be used only on function entry"), \
C(DOUBLE_ARGS, "$arg* can be used only once in the parameters"), \
- C(ARGS_2LONG, "$arg* failed because the argument list is too long"),
+ C(ARGS_2LONG, "$arg* failed because the argument list is too long"), \
+ C(ARGIDX_2BIG, "$argN index is too big"), \
+ C(NO_PTR_STRCT, "This is not a pointer to union/structure."), \
+ C(NOSUP_DAT_ARG, "Non pointer structure/union argument is not supported."),\
+ C(BAD_HYPHEN, "Failed to parse single hyphen. Forgot '>'?"), \
+ C(NO_BTF_FIELD, "This field is not found."), \
+ C(BAD_BTF_TID, "Failed to get BTF type info."),\
+ C(BAD_TYPE4STR, "This type does not fit for string."),
#undef C
#define C(a, b) TP_ERR_##a
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 576b3bcb8ebd..99c051de412a 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -688,6 +688,7 @@ static int __trace_uprobe_create(int argc, const char **argv)
trace_probe_log_set_index(i + 2);
ret = traceprobe_parse_probe_arg(&tu->tp, i, argv[i], &ctx);
+ traceprobe_finish_parse(&ctx);
if (ret)
goto error;
}
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 800b4208dba9..c85825e17df8 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -122,11 +122,6 @@ enum {
*
* L: pool->lock protected. Access with pool->lock held.
*
- * X: During normal operation, modification requires pool->lock and should
- * be done only from local cpu. Either disabling preemption on local
- * cpu or grabbing pool->lock is enough for read access. If
- * POOL_DISASSOCIATED is set, it's identical to L.
- *
* K: Only modified by worker while holding pool->lock. Can be safely read by
* self, while holding pool->lock or from IRQ context if %current is the
* kworker.
@@ -160,7 +155,7 @@ struct worker_pool {
int cpu; /* I: the associated cpu */
int node; /* I: the associated node ID */
int id; /* I: pool ID */
- unsigned int flags; /* X: flags */
+ unsigned int flags; /* L: flags */
unsigned long watchdog_ts; /* L: watchdog timestamp */
bool cpu_stall; /* WD: stalled cpu bound pool */
@@ -216,6 +211,7 @@ enum pool_workqueue_stats {
PWQ_STAT_CPU_TIME, /* total CPU time consumed */
PWQ_STAT_CPU_INTENSIVE, /* wq_cpu_intensive_thresh_us violations */
PWQ_STAT_CM_WAKEUP, /* concurrency-management worker wakeups */
+ PWQ_STAT_REPATRIATED, /* unbound workers brought back into scope */
PWQ_STAT_MAYDAY, /* maydays to rescuer */
PWQ_STAT_RESCUED, /* linked work items executed by rescuer */
@@ -262,12 +258,12 @@ struct pool_workqueue {
u64 stats[PWQ_NR_STATS];
/*
- * Release of unbound pwq is punted to system_wq. See put_pwq()
- * and pwq_unbound_release_workfn() for details. pool_workqueue
- * itself is also RCU protected so that the first pwq can be
- * determined without grabbing wq->mutex.
+ * Release of unbound pwq is punted to a kthread_worker. See put_pwq()
+ * and pwq_release_workfn() for details. pool_workqueue itself is also
+ * RCU protected so that the first pwq can be determined without
+ * grabbing wq->mutex.
*/
- struct work_struct unbound_release_work;
+ struct kthread_work release_work;
struct rcu_head rcu;
} __aligned(1 << WORK_STRUCT_FLAG_BITS);
@@ -326,14 +322,33 @@ struct workqueue_struct {
/* hot fields used during command issue, aligned to cacheline */
unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */
- struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwqs */
- struct pool_workqueue __rcu *numa_pwq_tbl[]; /* PWR: unbound pwqs indexed by node */
+ struct pool_workqueue __percpu __rcu **cpu_pwq; /* I: per-cpu pwqs */
};
static struct kmem_cache *pwq_cache;
-static cpumask_var_t *wq_numa_possible_cpumask;
- /* possible CPUs of each node */
+/*
+ * Each pod type describes how CPUs should be grouped for unbound workqueues.
+ * See the comment above workqueue_attrs->affn_scope.
+ */
+struct wq_pod_type {
+ int nr_pods; /* number of pods */
+ cpumask_var_t *pod_cpus; /* pod -> cpus */
+ int *pod_node; /* pod -> node */
+ int *cpu_pod; /* cpu -> pod */
+};
+
+static struct wq_pod_type wq_pod_types[WQ_AFFN_NR_TYPES];
+static enum wq_affn_scope wq_affn_dfl = WQ_AFFN_CACHE;
+
+static const char *wq_affn_names[WQ_AFFN_NR_TYPES] = {
+ [WQ_AFFN_DFL] = "default",
+ [WQ_AFFN_CPU] = "cpu",
+ [WQ_AFFN_SMT] = "smt",
+ [WQ_AFFN_CACHE] = "cache",
+ [WQ_AFFN_NUMA] = "numa",
+ [WQ_AFFN_SYSTEM] = "system",
+};
/*
* Per-cpu work items which run for longer than the following threshold are
@@ -345,19 +360,14 @@ static cpumask_var_t *wq_numa_possible_cpumask;
static unsigned long wq_cpu_intensive_thresh_us = ULONG_MAX;
module_param_named(cpu_intensive_thresh_us, wq_cpu_intensive_thresh_us, ulong, 0644);
-static bool wq_disable_numa;
-module_param_named(disable_numa, wq_disable_numa, bool, 0444);
-
/* see the comment above the definition of WQ_POWER_EFFICIENT */
static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
module_param_named(power_efficient, wq_power_efficient, bool, 0444);
static bool wq_online; /* can kworkers be created yet? */
-static bool wq_numa_enabled; /* unbound NUMA affinity enabled */
-
-/* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */
-static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
+/* buf for wq_update_unbound_pod_attrs(), protected by CPU hotplug exclusion */
+static struct workqueue_attrs *wq_update_pod_attrs_buf;
static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
@@ -371,6 +381,9 @@ static bool workqueue_freezing; /* PL: have wqs started freezing? */
/* PL&A: allowable cpus for unbound wqs and work items */
static cpumask_var_t wq_unbound_cpumask;
+/* for further constrain wq_unbound_cpumask by cmdline parameter*/
+static struct cpumask wq_cmdline_cpumask __initdata;
+
/* CPU where unbound work was last round robin scheduled from this CPU */
static DEFINE_PER_CPU(int, wq_rr_cpu_last);
@@ -400,6 +413,13 @@ static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
/* I: attributes used when instantiating ordered pools on demand */
static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
+/*
+ * I: kthread_worker to release pwq's. pwq release needs to be bounced to a
+ * process context while holding a pool lock. Bounce to a dedicated kthread
+ * worker to avoid A-A deadlocks.
+ */
+static struct kthread_worker *pwq_release_worker;
+
struct workqueue_struct *system_wq __read_mostly;
EXPORT_SYMBOL(system_wq);
struct workqueue_struct *system_highpri_wq __read_mostly;
@@ -606,35 +626,6 @@ static int worker_pool_assign_id(struct worker_pool *pool)
return ret;
}
-/**
- * unbound_pwq_by_node - return the unbound pool_workqueue for the given node
- * @wq: the target workqueue
- * @node: the node ID
- *
- * This must be called with any of wq_pool_mutex, wq->mutex or RCU
- * read locked.
- * If the pwq needs to be used beyond the locking in effect, the caller is
- * responsible for guaranteeing that the pwq stays online.
- *
- * Return: The unbound pool_workqueue for @node.
- */
-static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
- int node)
-{
- assert_rcu_or_wq_mutex_or_pool_mutex(wq);
-
- /*
- * XXX: @node can be NUMA_NO_NODE if CPU goes offline while a
- * delayed item is pending. The plan is to keep CPU -> NODE
- * mapping valid and stable across CPU on/offlines. Once that
- * happens, this workaround can be removed.
- */
- if (unlikely(node == NUMA_NO_NODE))
- return wq->dfl_pwq;
-
- return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
-}
-
static unsigned int work_color_to_flags(int color)
{
return color << WORK_STRUCT_COLOR_SHIFT;
@@ -825,11 +816,6 @@ static bool work_is_canceling(struct work_struct *work)
* they're being called with pool->lock held.
*/
-static bool __need_more_worker(struct worker_pool *pool)
-{
- return !pool->nr_running;
-}
-
/*
* Need to wake up a worker? Called from anything but currently
* running workers.
@@ -840,7 +826,7 @@ static bool __need_more_worker(struct worker_pool *pool)
*/
static bool need_more_worker(struct worker_pool *pool)
{
- return !list_empty(&pool->worklist) && __need_more_worker(pool);
+ return !list_empty(&pool->worklist) && !pool->nr_running;
}
/* Can I start working? Called from busy but !running workers. */
@@ -871,51 +857,18 @@ static bool too_many_workers(struct worker_pool *pool)
return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
}
-/*
- * Wake up functions.
- */
-
-/* Return the first idle worker. Called with pool->lock held. */
-static struct worker *first_idle_worker(struct worker_pool *pool)
-{
- if (unlikely(list_empty(&pool->idle_list)))
- return NULL;
-
- return list_first_entry(&pool->idle_list, struct worker, entry);
-}
-
-/**
- * wake_up_worker - wake up an idle worker
- * @pool: worker pool to wake worker from
- *
- * Wake up the first idle worker of @pool.
- *
- * CONTEXT:
- * raw_spin_lock_irq(pool->lock).
- */
-static void wake_up_worker(struct worker_pool *pool)
-{
- struct worker *worker = first_idle_worker(pool);
-
- if (likely(worker))
- wake_up_process(worker->task);
-}
-
/**
* worker_set_flags - set worker flags and adjust nr_running accordingly
* @worker: self
* @flags: flags to set
*
* Set @flags in @worker->flags and adjust nr_running accordingly.
- *
- * CONTEXT:
- * raw_spin_lock_irq(pool->lock)
*/
static inline void worker_set_flags(struct worker *worker, unsigned int flags)
{
struct worker_pool *pool = worker->pool;
- WARN_ON_ONCE(worker->task != current);
+ lockdep_assert_held(&pool->lock);
/* If transitioning into NOT_RUNNING, adjust nr_running. */
if ((flags & WORKER_NOT_RUNNING) &&
@@ -932,16 +885,13 @@ static inline void worker_set_flags(struct worker *worker, unsigned int flags)
* @flags: flags to clear
*
* Clear @flags in @worker->flags and adjust nr_running accordingly.
- *
- * CONTEXT:
- * raw_spin_lock_irq(pool->lock)
*/
static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
{
struct worker_pool *pool = worker->pool;
unsigned int oflags = worker->flags;
- WARN_ON_ONCE(worker->task != current);
+ lockdep_assert_held(&pool->lock);
worker->flags &= ~flags;
@@ -955,6 +905,244 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
pool->nr_running++;
}
+/* Return the first idle worker. Called with pool->lock held. */
+static struct worker *first_idle_worker(struct worker_pool *pool)
+{
+ if (unlikely(list_empty(&pool->idle_list)))
+ return NULL;
+
+ return list_first_entry(&pool->idle_list, struct worker, entry);
+}
+
+/**
+ * worker_enter_idle - enter idle state
+ * @worker: worker which is entering idle state
+ *
+ * @worker is entering idle state. Update stats and idle timer if
+ * necessary.
+ *
+ * LOCKING:
+ * raw_spin_lock_irq(pool->lock).
+ */
+static void worker_enter_idle(struct worker *worker)
+{
+ struct worker_pool *pool = worker->pool;
+
+ if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
+ WARN_ON_ONCE(!list_empty(&worker->entry) &&
+ (worker->hentry.next || worker->hentry.pprev)))
+ return;
+
+ /* can't use worker_set_flags(), also called from create_worker() */
+ worker->flags |= WORKER_IDLE;
+ pool->nr_idle++;
+ worker->last_active = jiffies;
+
+ /* idle_list is LIFO */
+ list_add(&worker->entry, &pool->idle_list);
+
+ if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
+ mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
+
+ /* Sanity check nr_running. */
+ WARN_ON_ONCE(pool->nr_workers == pool->nr_idle && pool->nr_running);
+}
+
+/**
+ * worker_leave_idle - leave idle state
+ * @worker: worker which is leaving idle state
+ *
+ * @worker is leaving idle state. Update stats.
+ *
+ * LOCKING:
+ * raw_spin_lock_irq(pool->lock).
+ */
+static void worker_leave_idle(struct worker *worker)
+{
+ struct worker_pool *pool = worker->pool;
+
+ if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
+ return;
+ worker_clr_flags(worker, WORKER_IDLE);
+ pool->nr_idle--;
+ list_del_init(&worker->entry);
+}
+
+/**
+ * find_worker_executing_work - find worker which is executing a work
+ * @pool: pool of interest
+ * @work: work to find worker for
+ *
+ * Find a worker which is executing @work on @pool by searching
+ * @pool->busy_hash which is keyed by the address of @work. For a worker
+ * to match, its current execution should match the address of @work and
+ * its work function. This is to avoid unwanted dependency between
+ * unrelated work executions through a work item being recycled while still
+ * being executed.
+ *
+ * This is a bit tricky. A work item may be freed once its execution
+ * starts and nothing prevents the freed area from being recycled for
+ * another work item. If the same work item address ends up being reused
+ * before the original execution finishes, workqueue will identify the
+ * recycled work item as currently executing and make it wait until the
+ * current execution finishes, introducing an unwanted dependency.
+ *
+ * This function checks the work item address and work function to avoid
+ * false positives. Note that this isn't complete as one may construct a
+ * work function which can introduce dependency onto itself through a
+ * recycled work item. Well, if somebody wants to shoot oneself in the
+ * foot that badly, there's only so much we can do, and if such deadlock
+ * actually occurs, it should be easy to locate the culprit work function.
+ *
+ * CONTEXT:
+ * raw_spin_lock_irq(pool->lock).
+ *
+ * Return:
+ * Pointer to worker which is executing @work if found, %NULL
+ * otherwise.
+ */
+static struct worker *find_worker_executing_work(struct worker_pool *pool,
+ struct work_struct *work)
+{
+ struct worker *worker;
+
+ hash_for_each_possible(pool->busy_hash, worker, hentry,
+ (unsigned long)work)
+ if (worker->current_work == work &&
+ worker->current_func == work->func)
+ return worker;
+
+ return NULL;
+}
+
+/**
+ * move_linked_works - move linked works to a list
+ * @work: start of series of works to be scheduled
+ * @head: target list to append @work to
+ * @nextp: out parameter for nested worklist walking
+ *
+ * Schedule linked works starting from @work to @head. Work series to be
+ * scheduled starts at @work and includes any consecutive work with
+ * WORK_STRUCT_LINKED set in its predecessor. See assign_work() for details on
+ * @nextp.
+ *
+ * CONTEXT:
+ * raw_spin_lock_irq(pool->lock).
+ */
+static void move_linked_works(struct work_struct *work, struct list_head *head,
+ struct work_struct **nextp)
+{
+ struct work_struct *n;
+
+ /*
+ * Linked worklist will always end before the end of the list,
+ * use NULL for list head.
+ */
+ list_for_each_entry_safe_from(work, n, NULL, entry) {
+ list_move_tail(&work->entry, head);
+ if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
+ break;
+ }
+
+ /*
+ * If we're already inside safe list traversal and have moved
+ * multiple works to the scheduled queue, the next position
+ * needs to be updated.
+ */
+ if (nextp)
+ *nextp = n;
+}
+
+/**
+ * assign_work - assign a work item and its linked work items to a worker
+ * @work: work to assign
+ * @worker: worker to assign to
+ * @nextp: out parameter for nested worklist walking
+ *
+ * Assign @work and its linked work items to @worker. If @work is already being
+ * executed by another worker in the same pool, it'll be punted there.
+ *
+ * If @nextp is not NULL, it's updated to point to the next work of the last
+ * scheduled work. This allows assign_work() to be nested inside
+ * list_for_each_entry_safe().
+ *
+ * Returns %true if @work was successfully assigned to @worker. %false if @work
+ * was punted to another worker already executing it.
+ */
+static bool assign_work(struct work_struct *work, struct worker *worker,
+ struct work_struct **nextp)
+{
+ struct worker_pool *pool = worker->pool;
+ struct worker *collision;
+
+ lockdep_assert_held(&pool->lock);
+
+ /*
+ * A single work shouldn't be executed concurrently by multiple workers.
+ * __queue_work() ensures that @work doesn't jump to a different pool
+ * while still running in the previous pool. Here, we should ensure that
+ * @work is not executed concurrently by multiple workers from the same
+ * pool. Check whether anyone is already processing the work. If so,
+ * defer the work to the currently executing one.
+ */
+ collision = find_worker_executing_work(pool, work);
+ if (unlikely(collision)) {
+ move_linked_works(work, &collision->scheduled, nextp);
+ return false;
+ }
+
+ move_linked_works(work, &worker->scheduled, nextp);
+ return true;
+}
+
+/**
+ * kick_pool - wake up an idle worker if necessary
+ * @pool: pool to kick
+ *
+ * @pool may have pending work items. Wake up worker if necessary. Returns
+ * whether a worker was woken up.
+ */
+static bool kick_pool(struct worker_pool *pool)
+{
+ struct worker *worker = first_idle_worker(pool);
+ struct task_struct *p;
+
+ lockdep_assert_held(&pool->lock);
+
+ if (!need_more_worker(pool) || !worker)
+ return false;
+
+ p = worker->task;
+
+#ifdef CONFIG_SMP
+ /*
+ * Idle @worker is about to execute @work and waking up provides an
+ * opportunity to migrate @worker at a lower cost by setting the task's
+ * wake_cpu field. Let's see if we want to move @worker to improve
+ * execution locality.
+ *
+ * We're waking the worker that went idle the latest and there's some
+ * chance that @worker is marked idle but hasn't gone off CPU yet. If
+ * so, setting the wake_cpu won't do anything. As this is a best-effort
+ * optimization and the race window is narrow, let's leave as-is for
+ * now. If this becomes pronounced, we can skip over workers which are
+ * still on cpu when picking an idle worker.
+ *
+ * If @pool has non-strict affinity, @worker might have ended up outside
+ * its affinity scope. Repatriate.
+ */
+ if (!pool->attrs->affn_strict &&
+ !cpumask_test_cpu(p->wake_cpu, pool->attrs->__pod_cpumask)) {
+ struct work_struct *work = list_first_entry(&pool->worklist,
+ struct work_struct, entry);
+ p->wake_cpu = cpumask_any_distribute(pool->attrs->__pod_cpumask);
+ get_work_pwq(work)->stats[PWQ_STAT_REPATRIATED]++;
+ }
+#endif
+ wake_up_process(p);
+ return true;
+}
+
#ifdef CONFIG_WQ_CPU_INTENSIVE_REPORT
/*
@@ -1120,10 +1308,9 @@ void wq_worker_sleeping(struct task_struct *task)
}
pool->nr_running--;
- if (need_more_worker(pool)) {
+ if (kick_pool(pool))
worker->current_pwq->stats[PWQ_STAT_CM_WAKEUP]++;
- wake_up_worker(pool);
- }
+
raw_spin_unlock_irq(&pool->lock);
}
@@ -1171,10 +1358,8 @@ void wq_worker_tick(struct task_struct *task)
wq_cpu_intensive_report(worker->current_func);
pwq->stats[PWQ_STAT_CPU_INTENSIVE]++;
- if (need_more_worker(pool)) {
+ if (kick_pool(pool))
pwq->stats[PWQ_STAT_CM_WAKEUP]++;
- wake_up_worker(pool);
- }
raw_spin_unlock(&pool->lock);
}
@@ -1211,94 +1396,6 @@ work_func_t wq_worker_last_func(struct task_struct *task)
}
/**
- * find_worker_executing_work - find worker which is executing a work
- * @pool: pool of interest
- * @work: work to find worker for
- *
- * Find a worker which is executing @work on @pool by searching
- * @pool->busy_hash which is keyed by the address of @work. For a worker
- * to match, its current execution should match the address of @work and
- * its work function. This is to avoid unwanted dependency between
- * unrelated work executions through a work item being recycled while still
- * being executed.
- *
- * This is a bit tricky. A work item may be freed once its execution
- * starts and nothing prevents the freed area from being recycled for
- * another work item. If the same work item address ends up being reused
- * before the original execution finishes, workqueue will identify the
- * recycled work item as currently executing and make it wait until the
- * current execution finishes, introducing an unwanted dependency.
- *
- * This function checks the work item address and work function to avoid
- * false positives. Note that this isn't complete as one may construct a
- * work function which can introduce dependency onto itself through a
- * recycled work item. Well, if somebody wants to shoot oneself in the
- * foot that badly, there's only so much we can do, and if such deadlock
- * actually occurs, it should be easy to locate the culprit work function.
- *
- * CONTEXT:
- * raw_spin_lock_irq(pool->lock).
- *
- * Return:
- * Pointer to worker which is executing @work if found, %NULL
- * otherwise.
- */
-static struct worker *find_worker_executing_work(struct worker_pool *pool,
- struct work_struct *work)
-{
- struct worker *worker;
-
- hash_for_each_possible(pool->busy_hash, worker, hentry,
- (unsigned long)work)
- if (worker->current_work == work &&
- worker->current_func == work->func)
- return worker;
-
- return NULL;
-}
-
-/**
- * move_linked_works - move linked works to a list
- * @work: start of series of works to be scheduled
- * @head: target list to append @work to
- * @nextp: out parameter for nested worklist walking
- *
- * Schedule linked works starting from @work to @head. Work series to
- * be scheduled starts at @work and includes any consecutive work with
- * WORK_STRUCT_LINKED set in its predecessor.
- *
- * If @nextp is not NULL, it's updated to point to the next work of
- * the last scheduled work. This allows move_linked_works() to be
- * nested inside outer list_for_each_entry_safe().
- *
- * CONTEXT:
- * raw_spin_lock_irq(pool->lock).
- */
-static void move_linked_works(struct work_struct *work, struct list_head *head,
- struct work_struct **nextp)
-{
- struct work_struct *n;
-
- /*
- * Linked worklist will always end before the end of the list,
- * use NULL for list head.
- */
- list_for_each_entry_safe_from(work, n, NULL, entry) {
- list_move_tail(&work->entry, head);
- if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
- break;
- }
-
- /*
- * If we're already inside safe list traversal and have moved
- * multiple works to the scheduled queue, the next position
- * needs to be updated.
- */
- if (nextp)
- *nextp = n;
-}
-
-/**
* get_pwq - get an extra reference on the specified pool_workqueue
* @pwq: pool_workqueue to get
*
@@ -1324,17 +1421,11 @@ static void put_pwq(struct pool_workqueue *pwq)
lockdep_assert_held(&pwq->pool->lock);
if (likely(--pwq->refcnt))
return;
- if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND)))
- return;
/*
- * @pwq can't be released under pool->lock, bounce to
- * pwq_unbound_release_workfn(). This never recurses on the same
- * pool->lock as this path is taken only for unbound workqueues and
- * the release work item is scheduled on a per-cpu workqueue. To
- * avoid lockdep warning, unbound pool->locks are given lockdep
- * subclass of 1 in get_unbound_pool().
+ * @pwq can't be released under pool->lock, bounce to a dedicated
+ * kthread_worker to avoid A-A deadlocks.
*/
- schedule_work(&pwq->unbound_release_work);
+ kthread_queue_work(pwq_release_worker, &pwq->release_work);
}
/**
@@ -1550,7 +1641,7 @@ fail:
static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
struct list_head *head, unsigned int extra_flags)
{
- struct worker_pool *pool = pwq->pool;
+ debug_work_activate(work);
/* record the work call stack in order to print it in KASAN reports */
kasan_record_aux_stack_noalloc(work);
@@ -1559,9 +1650,6 @@ static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
set_work_pwq(work, pwq, extra_flags);
list_add_tail(&work->entry, head);
get_pwq(pwq);
-
- if (__need_more_worker(pool))
- wake_up_worker(pool);
}
/*
@@ -1615,8 +1703,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
struct work_struct *work)
{
struct pool_workqueue *pwq;
- struct worker_pool *last_pool;
- struct list_head *worklist;
+ struct worker_pool *last_pool, *pool;
unsigned int work_flags;
unsigned int req_cpu = cpu;
@@ -1640,23 +1727,23 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
rcu_read_lock();
retry:
/* pwq which will be used unless @work is executing elsewhere */
- if (wq->flags & WQ_UNBOUND) {
- if (req_cpu == WORK_CPU_UNBOUND)
+ if (req_cpu == WORK_CPU_UNBOUND) {
+ if (wq->flags & WQ_UNBOUND)
cpu = wq_select_unbound_cpu(raw_smp_processor_id());
- pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
- } else {
- if (req_cpu == WORK_CPU_UNBOUND)
+ else
cpu = raw_smp_processor_id();
- pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
}
+ pwq = rcu_dereference(*per_cpu_ptr(wq->cpu_pwq, cpu));
+ pool = pwq->pool;
+
/*
* If @work was previously on a different pool, it might still be
* running there, in which case the work needs to be queued on that
* pool to guarantee non-reentrancy.
*/
last_pool = get_work_pool(work);
- if (last_pool && last_pool != pwq->pool) {
+ if (last_pool && last_pool != pool) {
struct worker *worker;
raw_spin_lock(&last_pool->lock);
@@ -1665,26 +1752,27 @@ retry:
if (worker && worker->current_pwq->wq == wq) {
pwq = worker->current_pwq;
+ pool = pwq->pool;
+ WARN_ON_ONCE(pool != last_pool);
} else {
/* meh... not running there, queue here */
raw_spin_unlock(&last_pool->lock);
- raw_spin_lock(&pwq->pool->lock);
+ raw_spin_lock(&pool->lock);
}
} else {
- raw_spin_lock(&pwq->pool->lock);
+ raw_spin_lock(&pool->lock);
}
/*
- * pwq is determined and locked. For unbound pools, we could have
- * raced with pwq release and it could already be dead. If its
- * refcnt is zero, repeat pwq selection. Note that pwqs never die
- * without another pwq replacing it in the numa_pwq_tbl or while
- * work items are executing on it, so the retrying is guaranteed to
- * make forward-progress.
+ * pwq is determined and locked. For unbound pools, we could have raced
+ * with pwq release and it could already be dead. If its refcnt is zero,
+ * repeat pwq selection. Note that unbound pwqs never die without
+ * another pwq replacing it in cpu_pwq or while work items are executing
+ * on it, so the retrying is guaranteed to make forward-progress.
*/
if (unlikely(!pwq->refcnt)) {
if (wq->flags & WQ_UNBOUND) {
- raw_spin_unlock(&pwq->pool->lock);
+ raw_spin_unlock(&pool->lock);
cpu_relax();
goto retry;
}
@@ -1703,21 +1791,20 @@ retry:
work_flags = work_color_to_flags(pwq->work_color);
if (likely(pwq->nr_active < pwq->max_active)) {
+ if (list_empty(&pool->worklist))
+ pool->watchdog_ts = jiffies;
+
trace_workqueue_activate_work(work);
pwq->nr_active++;
- worklist = &pwq->pool->worklist;
- if (list_empty(worklist))
- pwq->pool->watchdog_ts = jiffies;
+ insert_work(pwq, work, &pool->worklist, work_flags);
+ kick_pool(pool);
} else {
work_flags |= WORK_STRUCT_INACTIVE;
- worklist = &pwq->inactive_works;
+ insert_work(pwq, work, &pwq->inactive_works, work_flags);
}
- debug_work_activate(work);
- insert_work(pwq, work, worklist, work_flags);
-
out:
- raw_spin_unlock(&pwq->pool->lock);
+ raw_spin_unlock(&pool->lock);
rcu_read_unlock();
}
@@ -1754,7 +1841,7 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq,
EXPORT_SYMBOL(queue_work_on);
/**
- * workqueue_select_cpu_near - Select a CPU based on NUMA node
+ * select_numa_node_cpu - Select a CPU based on NUMA node
* @node: NUMA node ID that we want to select a CPU from
*
* This function will attempt to find a "random" cpu available on a given
@@ -1762,14 +1849,10 @@ EXPORT_SYMBOL(queue_work_on);
* WORK_CPU_UNBOUND indicating that we should just schedule to any
* available CPU if we need to schedule this work.
*/
-static int workqueue_select_cpu_near(int node)
+static int select_numa_node_cpu(int node)
{
int cpu;
- /* No point in doing this if NUMA isn't enabled for workqueues */
- if (!wq_numa_enabled)
- return WORK_CPU_UNBOUND;
-
/* Delay binding to CPU if node is not valid or online */
if (node < 0 || node >= MAX_NUMNODES || !node_online(node))
return WORK_CPU_UNBOUND;
@@ -1826,7 +1909,7 @@ bool queue_work_node(int node, struct workqueue_struct *wq,
local_irq_save(flags);
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
- int cpu = workqueue_select_cpu_near(node);
+ int cpu = select_numa_node_cpu(node);
__queue_work(cpu, wq, work);
ret = true;
@@ -1981,60 +2064,6 @@ bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork)
}
EXPORT_SYMBOL(queue_rcu_work);
-/**
- * worker_enter_idle - enter idle state
- * @worker: worker which is entering idle state
- *
- * @worker is entering idle state. Update stats and idle timer if
- * necessary.
- *
- * LOCKING:
- * raw_spin_lock_irq(pool->lock).
- */
-static void worker_enter_idle(struct worker *worker)
-{
- struct worker_pool *pool = worker->pool;
-
- if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
- WARN_ON_ONCE(!list_empty(&worker->entry) &&
- (worker->hentry.next || worker->hentry.pprev)))
- return;
-
- /* can't use worker_set_flags(), also called from create_worker() */
- worker->flags |= WORKER_IDLE;
- pool->nr_idle++;
- worker->last_active = jiffies;
-
- /* idle_list is LIFO */
- list_add(&worker->entry, &pool->idle_list);
-
- if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
- mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
-
- /* Sanity check nr_running. */
- WARN_ON_ONCE(pool->nr_workers == pool->nr_idle && pool->nr_running);
-}
-
-/**
- * worker_leave_idle - leave idle state
- * @worker: worker which is leaving idle state
- *
- * @worker is leaving idle state. Update stats.
- *
- * LOCKING:
- * raw_spin_lock_irq(pool->lock).
- */
-static void worker_leave_idle(struct worker *worker)
-{
- struct worker_pool *pool = worker->pool;
-
- if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
- return;
- worker_clr_flags(worker, WORKER_IDLE);
- pool->nr_idle--;
- list_del_init(&worker->entry);
-}
-
static struct worker *alloc_worker(int node)
{
struct worker *worker;
@@ -2050,6 +2079,14 @@ static struct worker *alloc_worker(int node)
return worker;
}
+static cpumask_t *pool_allowed_cpus(struct worker_pool *pool)
+{
+ if (pool->cpu < 0 && pool->attrs->affn_strict)
+ return pool->attrs->__pod_cpumask;
+ else
+ return pool->attrs->cpumask;
+}
+
/**
* worker_attach_to_pool() - attach a worker to a pool
* @worker: worker to be attached
@@ -2075,7 +2112,7 @@ static void worker_attach_to_pool(struct worker *worker,
kthread_set_per_cpu(worker->task, pool->cpu);
if (worker->rescue_wq)
- set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
+ set_cpus_allowed_ptr(worker->task, pool_allowed_cpus(pool));
list_add_tail(&worker->node, &pool->workers);
worker->pool = pool;
@@ -2167,16 +2204,25 @@ static struct worker *create_worker(struct worker_pool *pool)
}
set_user_nice(worker->task, pool->attrs->nice);
- kthread_bind_mask(worker->task, pool->attrs->cpumask);
+ kthread_bind_mask(worker->task, pool_allowed_cpus(pool));
/* successful, attach the worker to the pool */
worker_attach_to_pool(worker, pool);
/* start the newly created worker */
raw_spin_lock_irq(&pool->lock);
+
worker->pool->nr_workers++;
worker_enter_idle(worker);
+ kick_pool(pool);
+
+ /*
+ * @worker is waiting on a completion in kthread() and will trigger hung
+ * check if not woken up soon. As kick_pool() might not have waken it
+ * up, wake it up explicitly once more.
+ */
wake_up_process(worker->task);
+
raw_spin_unlock_irq(&pool->lock);
return worker;
@@ -2304,9 +2350,8 @@ static void idle_worker_timeout(struct timer_list *t)
static void idle_cull_fn(struct work_struct *work)
{
struct worker_pool *pool = container_of(work, struct worker_pool, idle_cull_work);
- struct list_head cull_list;
+ LIST_HEAD(cull_list);
- INIT_LIST_HEAD(&cull_list);
/*
* Grabbing wq_pool_attach_mutex here ensures an already-running worker
* cannot proceed beyong worker_detach_from_pool() in its self-destruct
@@ -2495,7 +2540,6 @@ __acquires(&pool->lock)
struct pool_workqueue *pwq = get_work_pwq(work);
struct worker_pool *pool = worker->pool;
unsigned long work_data;
- struct worker *collision;
#ifdef CONFIG_LOCKDEP
/*
* It is permissible to free the struct work_struct from
@@ -2512,18 +2556,6 @@ __acquires(&pool->lock)
WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
raw_smp_processor_id() != pool->cpu);
- /*
- * A single work shouldn't be executed concurrently by
- * multiple workers on a single cpu. Check whether anyone is
- * already processing the work. If so, defer the work to the
- * currently executing one.
- */
- collision = find_worker_executing_work(pool, work);
- if (unlikely(collision)) {
- move_linked_works(work, &collision->scheduled, NULL);
- return;
- }
-
/* claim and dequeue */
debug_work_deactivate(work);
hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
@@ -2552,14 +2584,12 @@ __acquires(&pool->lock)
worker_set_flags(worker, WORKER_CPU_INTENSIVE);
/*
- * Wake up another worker if necessary. The condition is always
- * false for normal per-cpu workers since nr_running would always
- * be >= 1 at this point. This is used to chain execution of the
- * pending work items for WORKER_NOT_RUNNING workers such as the
- * UNBOUND and CPU_INTENSIVE ones.
+ * Kick @pool if necessary. It's always noop for per-cpu worker pools
+ * since nr_running would always be >= 1 at this point. This is used to
+ * chain execution of the pending work items for WORKER_NOT_RUNNING
+ * workers such as the UNBOUND and CPU_INTENSIVE ones.
*/
- if (need_more_worker(pool))
- wake_up_worker(pool);
+ kick_pool(pool);
/*
* Record the last pool and clear PENDING which should be the last
@@ -2569,6 +2599,7 @@ __acquires(&pool->lock)
*/
set_work_pool_and_clear_pending(work, pool->id);
+ pwq->stats[PWQ_STAT_STARTED]++;
raw_spin_unlock_irq(&pool->lock);
lock_map_acquire(&pwq->wq->lockdep_map);
@@ -2595,7 +2626,6 @@ __acquires(&pool->lock)
* workqueues), so hiding them isn't a problem.
*/
lockdep_invariant_state(true);
- pwq->stats[PWQ_STAT_STARTED]++;
trace_workqueue_execute_start(work);
worker->current_func(work);
/*
@@ -2661,9 +2691,15 @@ __acquires(&pool->lock)
*/
static void process_scheduled_works(struct worker *worker)
{
- while (!list_empty(&worker->scheduled)) {
- struct work_struct *work = list_first_entry(&worker->scheduled,
- struct work_struct, entry);
+ struct work_struct *work;
+ bool first = true;
+
+ while ((work = list_first_entry_or_null(&worker->scheduled,
+ struct work_struct, entry))) {
+ if (first) {
+ worker->pool->watchdog_ts = jiffies;
+ first = false;
+ }
process_one_work(worker, work);
}
}
@@ -2744,17 +2780,8 @@ recheck:
list_first_entry(&pool->worklist,
struct work_struct, entry);
- pool->watchdog_ts = jiffies;
-
- if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
- /* optimization path, not strictly necessary */
- process_one_work(worker, work);
- if (unlikely(!list_empty(&worker->scheduled)))
- process_scheduled_works(worker);
- } else {
- move_linked_works(work, &worker->scheduled, NULL);
+ if (assign_work(work, worker, NULL))
process_scheduled_works(worker);
- }
} while (keep_working(pool));
worker_set_flags(worker, WORKER_PREP);
@@ -2798,7 +2825,6 @@ static int rescuer_thread(void *__rescuer)
{
struct worker *rescuer = __rescuer;
struct workqueue_struct *wq = rescuer->rescue_wq;
- struct list_head *scheduled = &rescuer->scheduled;
bool should_stop;
set_user_nice(current, RESCUER_NICE_LEVEL);
@@ -2829,7 +2855,6 @@ repeat:
struct pool_workqueue, mayday_node);
struct worker_pool *pool = pwq->pool;
struct work_struct *work, *n;
- bool first = true;
__set_current_state(TASK_RUNNING);
list_del_init(&pwq->mayday_node);
@@ -2844,18 +2869,14 @@ repeat:
* Slurp in all works issued via this workqueue and
* process'em.
*/
- WARN_ON_ONCE(!list_empty(scheduled));
+ WARN_ON_ONCE(!list_empty(&rescuer->scheduled));
list_for_each_entry_safe(work, n, &pool->worklist, entry) {
- if (get_work_pwq(work) == pwq) {
- if (first)
- pool->watchdog_ts = jiffies;
- move_linked_works(work, scheduled, &n);
+ if (get_work_pwq(work) == pwq &&
+ assign_work(work, rescuer, &n))
pwq->stats[PWQ_STAT_RESCUED]++;
- }
- first = false;
}
- if (!list_empty(scheduled)) {
+ if (!list_empty(&rescuer->scheduled)) {
process_scheduled_works(rescuer);
/*
@@ -2888,12 +2909,10 @@ repeat:
put_pwq(pwq);
/*
- * Leave this pool. If need_more_worker() is %true, notify a
- * regular worker; otherwise, we end up with 0 concurrency
- * and stalling the execution.
+ * Leave this pool. Notify regular workers; otherwise, we end up
+ * with 0 concurrency and stalling the execution.
*/
- if (need_more_worker(pool))
- wake_up_worker(pool);
+ kick_pool(pool);
raw_spin_unlock_irq(&pool->lock);
@@ -3028,7 +3047,6 @@ static void insert_wq_barrier(struct pool_workqueue *pwq,
pwq->nr_in_flight[work_color]++;
work_flags |= work_color_to_flags(work_color);
- debug_work_activate(&barr->work);
insert_work(pwq, &barr->work, head, work_flags);
}
@@ -3691,6 +3709,7 @@ void free_workqueue_attrs(struct workqueue_attrs *attrs)
{
if (attrs) {
free_cpumask_var(attrs->cpumask);
+ free_cpumask_var(attrs->__pod_cpumask);
kfree(attrs);
}
}
@@ -3712,8 +3731,11 @@ struct workqueue_attrs *alloc_workqueue_attrs(void)
goto fail;
if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL))
goto fail;
+ if (!alloc_cpumask_var(&attrs->__pod_cpumask, GFP_KERNEL))
+ goto fail;
cpumask_copy(attrs->cpumask, cpu_possible_mask);
+ attrs->affn_scope = WQ_AFFN_DFL;
return attrs;
fail:
free_workqueue_attrs(attrs);
@@ -3725,12 +3747,26 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to,
{
to->nice = from->nice;
cpumask_copy(to->cpumask, from->cpumask);
+ cpumask_copy(to->__pod_cpumask, from->__pod_cpumask);
+ to->affn_strict = from->affn_strict;
+
/*
- * Unlike hash and equality test, this function doesn't ignore
- * ->no_numa as it is used for both pool and wq attrs. Instead,
- * get_unbound_pool() explicitly clears ->no_numa after copying.
+ * Unlike hash and equality test, copying shouldn't ignore wq-only
+ * fields as copying is used for both pool and wq attrs. Instead,
+ * get_unbound_pool() explicitly clears the fields.
*/
- to->no_numa = from->no_numa;
+ to->affn_scope = from->affn_scope;
+ to->ordered = from->ordered;
+}
+
+/*
+ * Some attrs fields are workqueue-only. Clear them for worker_pool's. See the
+ * comments in 'struct workqueue_attrs' definition.
+ */
+static void wqattrs_clear_for_pool(struct workqueue_attrs *attrs)
+{
+ attrs->affn_scope = WQ_AFFN_NR_TYPES;
+ attrs->ordered = false;
}
/* hash value of the content of @attr */
@@ -3741,6 +3777,9 @@ static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
hash = jhash_1word(attrs->nice, hash);
hash = jhash(cpumask_bits(attrs->cpumask),
BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
+ hash = jhash(cpumask_bits(attrs->__pod_cpumask),
+ BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
+ hash = jhash_1word(attrs->affn_strict, hash);
return hash;
}
@@ -3752,9 +3791,57 @@ static bool wqattrs_equal(const struct workqueue_attrs *a,
return false;
if (!cpumask_equal(a->cpumask, b->cpumask))
return false;
+ if (!cpumask_equal(a->__pod_cpumask, b->__pod_cpumask))
+ return false;
+ if (a->affn_strict != b->affn_strict)
+ return false;
return true;
}
+/* Update @attrs with actually available CPUs */
+static void wqattrs_actualize_cpumask(struct workqueue_attrs *attrs,
+ const cpumask_t *unbound_cpumask)
+{
+ /*
+ * Calculate the effective CPU mask of @attrs given @unbound_cpumask. If
+ * @attrs->cpumask doesn't overlap with @unbound_cpumask, we fallback to
+ * @unbound_cpumask.
+ */
+ cpumask_and(attrs->cpumask, attrs->cpumask, unbound_cpumask);
+ if (unlikely(cpumask_empty(attrs->cpumask)))
+ cpumask_copy(attrs->cpumask, unbound_cpumask);
+}
+
+/* find wq_pod_type to use for @attrs */
+static const struct wq_pod_type *
+wqattrs_pod_type(const struct workqueue_attrs *attrs)
+{
+ enum wq_affn_scope scope;
+ struct wq_pod_type *pt;
+
+ /* to synchronize access to wq_affn_dfl */
+ lockdep_assert_held(&wq_pool_mutex);
+
+ if (attrs->affn_scope == WQ_AFFN_DFL)
+ scope = wq_affn_dfl;
+ else
+ scope = attrs->affn_scope;
+
+ pt = &wq_pod_types[scope];
+
+ if (!WARN_ON_ONCE(attrs->affn_scope == WQ_AFFN_NR_TYPES) &&
+ likely(pt->nr_pods))
+ return pt;
+
+ /*
+ * Before workqueue_init_topology(), only SYSTEM is available which is
+ * initialized in workqueue_init_early().
+ */
+ pt = &wq_pod_types[WQ_AFFN_SYSTEM];
+ BUG_ON(!pt->nr_pods);
+ return pt;
+}
+
/**
* init_worker_pool - initialize a newly zalloc'd worker_pool
* @pool: worker_pool to initialize
@@ -3793,6 +3880,9 @@ static int init_worker_pool(struct worker_pool *pool)
pool->attrs = alloc_workqueue_attrs();
if (!pool->attrs)
return -ENOMEM;
+
+ wqattrs_clear_for_pool(pool->attrs);
+
return 0;
}
@@ -3840,12 +3930,8 @@ static void rcu_free_wq(struct rcu_head *rcu)
container_of(rcu, struct workqueue_struct, rcu);
wq_free_lockdep(wq);
-
- if (!(wq->flags & WQ_UNBOUND))
- free_percpu(wq->cpu_pwqs);
- else
- free_workqueue_attrs(wq->unbound_attrs);
-
+ free_percpu(wq->cpu_pwq);
+ free_workqueue_attrs(wq->unbound_attrs);
kfree(wq);
}
@@ -3872,10 +3958,8 @@ static void rcu_free_pool(struct rcu_head *rcu)
static void put_unbound_pool(struct worker_pool *pool)
{
DECLARE_COMPLETION_ONSTACK(detach_completion);
- struct list_head cull_list;
struct worker *worker;
-
- INIT_LIST_HEAD(&cull_list);
+ LIST_HEAD(cull_list);
lockdep_assert_held(&wq_pool_mutex);
@@ -3959,10 +4043,10 @@ static void put_unbound_pool(struct worker_pool *pool)
*/
static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
{
+ struct wq_pod_type *pt = &wq_pod_types[WQ_AFFN_NUMA];
u32 hash = wqattrs_hash(attrs);
struct worker_pool *pool;
- int node;
- int target_node = NUMA_NO_NODE;
+ int pod, node = NUMA_NO_NODE;
lockdep_assert_held(&wq_pool_mutex);
@@ -3974,31 +4058,22 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
}
}
- /* if cpumask is contained inside a NUMA node, we belong to that node */
- if (wq_numa_enabled) {
- for_each_node(node) {
- if (cpumask_subset(attrs->cpumask,
- wq_numa_possible_cpumask[node])) {
- target_node = node;
- break;
- }
+ /* If __pod_cpumask is contained inside a NUMA pod, that's our node */
+ for (pod = 0; pod < pt->nr_pods; pod++) {
+ if (cpumask_subset(attrs->__pod_cpumask, pt->pod_cpus[pod])) {
+ node = pt->pod_node[pod];
+ break;
}
}
/* nope, create a new one */
- pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, target_node);
+ pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, node);
if (!pool || init_worker_pool(pool) < 0)
goto fail;
- lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */
+ pool->node = node;
copy_workqueue_attrs(pool->attrs, attrs);
- pool->node = target_node;
-
- /*
- * no_numa isn't a worker_pool attribute, always clear it. See
- * 'struct workqueue_attrs' comments for detail.
- */
- pool->attrs->no_numa = false;
+ wqattrs_clear_for_pool(pool->attrs);
if (worker_pool_assign_id(pool) < 0)
goto fail;
@@ -4024,34 +4099,33 @@ static void rcu_free_pwq(struct rcu_head *rcu)
}
/*
- * Scheduled on system_wq by put_pwq() when an unbound pwq hits zero refcnt
- * and needs to be destroyed.
+ * Scheduled on pwq_release_worker by put_pwq() when an unbound pwq hits zero
+ * refcnt and needs to be destroyed.
*/
-static void pwq_unbound_release_workfn(struct work_struct *work)
+static void pwq_release_workfn(struct kthread_work *work)
{
struct pool_workqueue *pwq = container_of(work, struct pool_workqueue,
- unbound_release_work);
+ release_work);
struct workqueue_struct *wq = pwq->wq;
struct worker_pool *pool = pwq->pool;
bool is_last = false;
/*
- * when @pwq is not linked, it doesn't hold any reference to the
+ * When @pwq is not linked, it doesn't hold any reference to the
* @wq, and @wq is invalid to access.
*/
if (!list_empty(&pwq->pwqs_node)) {
- if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
- return;
-
mutex_lock(&wq->mutex);
list_del_rcu(&pwq->pwqs_node);
is_last = list_empty(&wq->pwqs);
mutex_unlock(&wq->mutex);
}
- mutex_lock(&wq_pool_mutex);
- put_unbound_pool(pool);
- mutex_unlock(&wq_pool_mutex);
+ if (wq->flags & WQ_UNBOUND) {
+ mutex_lock(&wq_pool_mutex);
+ put_unbound_pool(pool);
+ mutex_unlock(&wq_pool_mutex);
+ }
call_rcu(&pwq->rcu, rcu_free_pwq);
@@ -4095,24 +4169,13 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq)
* is updated and visible.
*/
if (!freezable || !workqueue_freezing) {
- bool kick = false;
-
pwq->max_active = wq->saved_max_active;
while (!list_empty(&pwq->inactive_works) &&
- pwq->nr_active < pwq->max_active) {
+ pwq->nr_active < pwq->max_active)
pwq_activate_first_inactive(pwq);
- kick = true;
- }
- /*
- * Need to kick a worker after thawed or an unbound wq's
- * max_active is bumped. In realtime scenarios, always kicking a
- * worker will cause interference on the isolated cpu cores, so
- * let's kick iff work items were activated.
- */
- if (kick)
- wake_up_worker(pwq->pool);
+ kick_pool(pwq->pool);
} else {
pwq->max_active = 0;
}
@@ -4135,7 +4198,7 @@ static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
INIT_LIST_HEAD(&pwq->inactive_works);
INIT_LIST_HEAD(&pwq->pwqs_node);
INIT_LIST_HEAD(&pwq->mayday_node);
- INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn);
+ kthread_init_work(&pwq->release_work, pwq_release_workfn);
}
/* sync @pwq with the current state of its associated wq and link it */
@@ -4183,61 +4246,49 @@ static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
}
/**
- * wq_calc_node_cpumask - calculate a wq_attrs' cpumask for the specified node
+ * wq_calc_pod_cpumask - calculate a wq_attrs' cpumask for a pod
* @attrs: the wq_attrs of the default pwq of the target workqueue
- * @node: the target NUMA node
+ * @cpu: the target CPU
* @cpu_going_down: if >= 0, the CPU to consider as offline
- * @cpumask: outarg, the resulting cpumask
- *
- * Calculate the cpumask a workqueue with @attrs should use on @node. If
- * @cpu_going_down is >= 0, that cpu is considered offline during
- * calculation. The result is stored in @cpumask.
*
- * If NUMA affinity is not enabled, @attrs->cpumask is always used. If
- * enabled and @node has online CPUs requested by @attrs, the returned
- * cpumask is the intersection of the possible CPUs of @node and
- * @attrs->cpumask.
+ * Calculate the cpumask a workqueue with @attrs should use on @pod. If
+ * @cpu_going_down is >= 0, that cpu is considered offline during calculation.
+ * The result is stored in @attrs->__pod_cpumask.
*
- * The caller is responsible for ensuring that the cpumask of @node stays
- * stable.
+ * If pod affinity is not enabled, @attrs->cpumask is always used. If enabled
+ * and @pod has online CPUs requested by @attrs, the returned cpumask is the
+ * intersection of the possible CPUs of @pod and @attrs->cpumask.
*
- * Return: %true if the resulting @cpumask is different from @attrs->cpumask,
- * %false if equal.
+ * The caller is responsible for ensuring that the cpumask of @pod stays stable.
*/
-static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node,
- int cpu_going_down, cpumask_t *cpumask)
+static void wq_calc_pod_cpumask(struct workqueue_attrs *attrs, int cpu,
+ int cpu_going_down)
{
- if (!wq_numa_enabled || attrs->no_numa)
- goto use_dfl;
+ const struct wq_pod_type *pt = wqattrs_pod_type(attrs);
+ int pod = pt->cpu_pod[cpu];
- /* does @node have any online CPUs @attrs wants? */
- cpumask_and(cpumask, cpumask_of_node(node), attrs->cpumask);
+ /* does @pod have any online CPUs @attrs wants? */
+ cpumask_and(attrs->__pod_cpumask, pt->pod_cpus[pod], attrs->cpumask);
+ cpumask_and(attrs->__pod_cpumask, attrs->__pod_cpumask, cpu_online_mask);
if (cpu_going_down >= 0)
- cpumask_clear_cpu(cpu_going_down, cpumask);
+ cpumask_clear_cpu(cpu_going_down, attrs->__pod_cpumask);
- if (cpumask_empty(cpumask))
- goto use_dfl;
+ if (cpumask_empty(attrs->__pod_cpumask)) {
+ cpumask_copy(attrs->__pod_cpumask, attrs->cpumask);
+ return;
+ }
- /* yeap, return possible CPUs in @node that @attrs wants */
- cpumask_and(cpumask, attrs->cpumask, wq_numa_possible_cpumask[node]);
+ /* yeap, return possible CPUs in @pod that @attrs wants */
+ cpumask_and(attrs->__pod_cpumask, attrs->cpumask, pt->pod_cpus[pod]);
- if (cpumask_empty(cpumask)) {
+ if (cpumask_empty(attrs->__pod_cpumask))
pr_warn_once("WARNING: workqueue cpumask: online intersect > "
"possible intersect\n");
- return false;
- }
-
- return !cpumask_equal(cpumask, attrs->cpumask);
-
-use_dfl:
- cpumask_copy(cpumask, attrs->cpumask);
- return false;
}
-/* install @pwq into @wq's numa_pwq_tbl[] for @node and return the old pwq */
-static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq,
- int node,
- struct pool_workqueue *pwq)
+/* install @pwq into @wq's cpu_pwq and return the old pwq */
+static struct pool_workqueue *install_unbound_pwq(struct workqueue_struct *wq,
+ int cpu, struct pool_workqueue *pwq)
{
struct pool_workqueue *old_pwq;
@@ -4247,8 +4298,8 @@ static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq,
/* link_pwq() can handle duplicate calls */
link_pwq(pwq);
- old_pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
- rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq);
+ old_pwq = rcu_access_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu));
+ rcu_assign_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu), pwq);
return old_pwq;
}
@@ -4265,10 +4316,10 @@ struct apply_wqattrs_ctx {
static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx)
{
if (ctx) {
- int node;
+ int cpu;
- for_each_node(node)
- put_pwq_unlocked(ctx->pwq_tbl[node]);
+ for_each_possible_cpu(cpu)
+ put_pwq_unlocked(ctx->pwq_tbl[cpu]);
put_pwq_unlocked(ctx->dfl_pwq);
free_workqueue_attrs(ctx->attrs);
@@ -4284,76 +4335,64 @@ apply_wqattrs_prepare(struct workqueue_struct *wq,
const cpumask_var_t unbound_cpumask)
{
struct apply_wqattrs_ctx *ctx;
- struct workqueue_attrs *new_attrs, *tmp_attrs;
- int node;
+ struct workqueue_attrs *new_attrs;
+ int cpu;
lockdep_assert_held(&wq_pool_mutex);
- ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_node_ids), GFP_KERNEL);
+ if (WARN_ON(attrs->affn_scope < 0 ||
+ attrs->affn_scope >= WQ_AFFN_NR_TYPES))
+ return ERR_PTR(-EINVAL);
+
+ ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_cpu_ids), GFP_KERNEL);
new_attrs = alloc_workqueue_attrs();
- tmp_attrs = alloc_workqueue_attrs();
- if (!ctx || !new_attrs || !tmp_attrs)
+ if (!ctx || !new_attrs)
goto out_free;
/*
- * Calculate the attrs of the default pwq with unbound_cpumask
- * which is wq_unbound_cpumask or to set to wq_unbound_cpumask.
- * If the user configured cpumask doesn't overlap with the
- * wq_unbound_cpumask, we fallback to the wq_unbound_cpumask.
- */
- copy_workqueue_attrs(new_attrs, attrs);
- cpumask_and(new_attrs->cpumask, new_attrs->cpumask, unbound_cpumask);
- if (unlikely(cpumask_empty(new_attrs->cpumask)))
- cpumask_copy(new_attrs->cpumask, unbound_cpumask);
-
- /*
- * We may create multiple pwqs with differing cpumasks. Make a
- * copy of @new_attrs which will be modified and used to obtain
- * pools.
- */
- copy_workqueue_attrs(tmp_attrs, new_attrs);
-
- /*
* If something goes wrong during CPU up/down, we'll fall back to
* the default pwq covering whole @attrs->cpumask. Always create
* it even if we don't use it immediately.
*/
+ copy_workqueue_attrs(new_attrs, attrs);
+ wqattrs_actualize_cpumask(new_attrs, unbound_cpumask);
+ cpumask_copy(new_attrs->__pod_cpumask, new_attrs->cpumask);
ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs);
if (!ctx->dfl_pwq)
goto out_free;
- for_each_node(node) {
- if (wq_calc_node_cpumask(new_attrs, node, -1, tmp_attrs->cpumask)) {
- ctx->pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs);
- if (!ctx->pwq_tbl[node])
- goto out_free;
- } else {
+ for_each_possible_cpu(cpu) {
+ if (new_attrs->ordered) {
ctx->dfl_pwq->refcnt++;
- ctx->pwq_tbl[node] = ctx->dfl_pwq;
+ ctx->pwq_tbl[cpu] = ctx->dfl_pwq;
+ } else {
+ wq_calc_pod_cpumask(new_attrs, cpu, -1);
+ ctx->pwq_tbl[cpu] = alloc_unbound_pwq(wq, new_attrs);
+ if (!ctx->pwq_tbl[cpu])
+ goto out_free;
}
}
/* save the user configured attrs and sanitize it. */
copy_workqueue_attrs(new_attrs, attrs);
cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask);
+ cpumask_copy(new_attrs->__pod_cpumask, new_attrs->cpumask);
ctx->attrs = new_attrs;
ctx->wq = wq;
- free_workqueue_attrs(tmp_attrs);
return ctx;
out_free:
- free_workqueue_attrs(tmp_attrs);
free_workqueue_attrs(new_attrs);
apply_wqattrs_cleanup(ctx);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
/* set attrs and install prepared pwqs, @ctx points to old pwqs on return */
static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx)
{
- int node;
+ int cpu;
/* all pwqs have been created successfully, let's install'em */
mutex_lock(&ctx->wq->mutex);
@@ -4361,9 +4400,9 @@ static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx)
copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs);
/* save the previous pwq and install the new one */
- for_each_node(node)
- ctx->pwq_tbl[node] = numa_pwq_tbl_install(ctx->wq, node,
- ctx->pwq_tbl[node]);
+ for_each_possible_cpu(cpu)
+ ctx->pwq_tbl[cpu] = install_unbound_pwq(ctx->wq, cpu,
+ ctx->pwq_tbl[cpu]);
/* @dfl_pwq might not have been used, ensure it's linked */
link_pwq(ctx->dfl_pwq);
@@ -4403,8 +4442,8 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
}
ctx = apply_wqattrs_prepare(wq, attrs, wq_unbound_cpumask);
- if (!ctx)
- return -ENOMEM;
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
/* the ctx has been prepared successfully, let's commit it */
apply_wqattrs_commit(ctx);
@@ -4418,12 +4457,11 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
* @wq: the target workqueue
* @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs()
*
- * Apply @attrs to an unbound workqueue @wq. Unless disabled, on NUMA
- * machines, this function maps a separate pwq to each NUMA node with
- * possibles CPUs in @attrs->cpumask so that work items are affine to the
- * NUMA node it was issued on. Older pwqs are released as in-flight work
- * items finish. Note that a work item which repeatedly requeues itself
- * back-to-back will stay on its current pwq.
+ * Apply @attrs to an unbound workqueue @wq. Unless disabled, this function maps
+ * a separate pwq to each CPU pod with possibles CPUs in @attrs->cpumask so that
+ * work items are affine to the pod it was issued on. Older pwqs are released as
+ * in-flight work items finish. Note that a work item which repeatedly requeues
+ * itself back-to-back will stay on its current pwq.
*
* Performs GFP_KERNEL allocations.
*
@@ -4446,40 +4484,37 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
}
/**
- * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug
+ * wq_update_pod - update pod affinity of a wq for CPU hot[un]plug
* @wq: the target workqueue
- * @cpu: the CPU coming up or going down
+ * @cpu: the CPU to update pool association for
+ * @hotplug_cpu: the CPU coming up or going down
* @online: whether @cpu is coming up or going down
*
* This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and
- * %CPU_DOWN_FAILED. @cpu is being hot[un]plugged, update NUMA affinity of
+ * %CPU_DOWN_FAILED. @cpu is being hot[un]plugged, update pod affinity of
* @wq accordingly.
*
- * If NUMA affinity can't be adjusted due to memory allocation failure, it
- * falls back to @wq->dfl_pwq which may not be optimal but is always
- * correct.
- *
- * Note that when the last allowed CPU of a NUMA node goes offline for a
- * workqueue with a cpumask spanning multiple nodes, the workers which were
- * already executing the work items for the workqueue will lose their CPU
- * affinity and may execute on any CPU. This is similar to how per-cpu
- * workqueues behave on CPU_DOWN. If a workqueue user wants strict
- * affinity, it's the user's responsibility to flush the work item from
- * CPU_DOWN_PREPARE.
+ *
+ * If pod affinity can't be adjusted due to memory allocation failure, it falls
+ * back to @wq->dfl_pwq which may not be optimal but is always correct.
+ *
+ * Note that when the last allowed CPU of a pod goes offline for a workqueue
+ * with a cpumask spanning multiple pods, the workers which were already
+ * executing the work items for the workqueue will lose their CPU affinity and
+ * may execute on any CPU. This is similar to how per-cpu workqueues behave on
+ * CPU_DOWN. If a workqueue user wants strict affinity, it's the user's
+ * responsibility to flush the work item from CPU_DOWN_PREPARE.
*/
-static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
- bool online)
+static void wq_update_pod(struct workqueue_struct *wq, int cpu,
+ int hotplug_cpu, bool online)
{
- int node = cpu_to_node(cpu);
- int cpu_off = online ? -1 : cpu;
+ int off_cpu = online ? -1 : hotplug_cpu;
struct pool_workqueue *old_pwq = NULL, *pwq;
struct workqueue_attrs *target_attrs;
- cpumask_t *cpumask;
lockdep_assert_held(&wq_pool_mutex);
- if (!wq_numa_enabled || !(wq->flags & WQ_UNBOUND) ||
- wq->unbound_attrs->no_numa)
+ if (!(wq->flags & WQ_UNBOUND) || wq->unbound_attrs->ordered)
return;
/*
@@ -4487,36 +4522,29 @@ static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
* Let's use a preallocated one. The following buf is protected by
* CPU hotplug exclusion.
*/
- target_attrs = wq_update_unbound_numa_attrs_buf;
- cpumask = target_attrs->cpumask;
+ target_attrs = wq_update_pod_attrs_buf;
copy_workqueue_attrs(target_attrs, wq->unbound_attrs);
- pwq = unbound_pwq_by_node(wq, node);
+ wqattrs_actualize_cpumask(target_attrs, wq_unbound_cpumask);
- /*
- * Let's determine what needs to be done. If the target cpumask is
- * different from the default pwq's, we need to compare it to @pwq's
- * and create a new one if they don't match. If the target cpumask
- * equals the default pwq's, the default pwq should be used.
- */
- if (wq_calc_node_cpumask(wq->dfl_pwq->pool->attrs, node, cpu_off, cpumask)) {
- if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask))
- return;
- } else {
- goto use_dfl_pwq;
- }
+ /* nothing to do if the target cpumask matches the current pwq */
+ wq_calc_pod_cpumask(target_attrs, cpu, off_cpu);
+ pwq = rcu_dereference_protected(*per_cpu_ptr(wq->cpu_pwq, cpu),
+ lockdep_is_held(&wq_pool_mutex));
+ if (wqattrs_equal(target_attrs, pwq->pool->attrs))
+ return;
/* create a new pwq */
pwq = alloc_unbound_pwq(wq, target_attrs);
if (!pwq) {
- pr_warn("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n",
+ pr_warn("workqueue: allocation failed while updating CPU pod affinity of \"%s\"\n",
wq->name);
goto use_dfl_pwq;
}
/* Install the new pwq. */
mutex_lock(&wq->mutex);
- old_pwq = numa_pwq_tbl_install(wq, node, pwq);
+ old_pwq = install_unbound_pwq(wq, cpu, pwq);
goto out_unlock;
use_dfl_pwq:
@@ -4524,7 +4552,7 @@ use_dfl_pwq:
raw_spin_lock_irq(&wq->dfl_pwq->pool->lock);
get_pwq(wq->dfl_pwq);
raw_spin_unlock_irq(&wq->dfl_pwq->pool->lock);
- old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq);
+ old_pwq = install_unbound_pwq(wq, cpu, wq->dfl_pwq);
out_unlock:
mutex_unlock(&wq->mutex);
put_pwq_unlocked(old_pwq);
@@ -4535,21 +4563,26 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
bool highpri = wq->flags & WQ_HIGHPRI;
int cpu, ret;
- if (!(wq->flags & WQ_UNBOUND)) {
- wq->cpu_pwqs = alloc_percpu(struct pool_workqueue);
- if (!wq->cpu_pwqs)
- return -ENOMEM;
+ wq->cpu_pwq = alloc_percpu(struct pool_workqueue *);
+ if (!wq->cpu_pwq)
+ goto enomem;
+ if (!(wq->flags & WQ_UNBOUND)) {
for_each_possible_cpu(cpu) {
- struct pool_workqueue *pwq =
- per_cpu_ptr(wq->cpu_pwqs, cpu);
- struct worker_pool *cpu_pools =
- per_cpu(cpu_worker_pools, cpu);
+ struct pool_workqueue **pwq_p =
+ per_cpu_ptr(wq->cpu_pwq, cpu);
+ struct worker_pool *pool =
+ &(per_cpu_ptr(cpu_worker_pools, cpu)[highpri]);
- init_pwq(pwq, wq, &cpu_pools[highpri]);
+ *pwq_p = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL,
+ pool->node);
+ if (!*pwq_p)
+ goto enomem;
+
+ init_pwq(*pwq_p, wq, pool);
mutex_lock(&wq->mutex);
- link_pwq(pwq);
+ link_pwq(*pwq_p);
mutex_unlock(&wq->mutex);
}
return 0;
@@ -4568,18 +4601,25 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
cpus_read_unlock();
return ret;
+
+enomem:
+ if (wq->cpu_pwq) {
+ for_each_possible_cpu(cpu)
+ kfree(*per_cpu_ptr(wq->cpu_pwq, cpu));
+ free_percpu(wq->cpu_pwq);
+ wq->cpu_pwq = NULL;
+ }
+ return -ENOMEM;
}
static int wq_clamp_max_active(int max_active, unsigned int flags,
const char *name)
{
- int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
-
- if (max_active < 1 || max_active > lim)
+ if (max_active < 1 || max_active > WQ_MAX_ACTIVE)
pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
- max_active, name, 1, lim);
+ max_active, name, 1, WQ_MAX_ACTIVE);
- return clamp_val(max_active, 1, lim);
+ return clamp_val(max_active, 1, WQ_MAX_ACTIVE);
}
/*
@@ -4602,7 +4642,7 @@ static int init_rescuer(struct workqueue_struct *wq)
}
rescuer->rescue_wq = wq;
- rescuer->task = kthread_create(rescuer_thread, rescuer, "%s", wq->name);
+ rescuer->task = kthread_create(rescuer_thread, rescuer, "kworker/R-%s", wq->name);
if (IS_ERR(rescuer->task)) {
ret = PTR_ERR(rescuer->task);
pr_err("workqueue: Failed to create a rescuer kthread for wq \"%s\": %pe",
@@ -4623,17 +4663,15 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
unsigned int flags,
int max_active, ...)
{
- size_t tbl_size = 0;
va_list args;
struct workqueue_struct *wq;
struct pool_workqueue *pwq;
/*
- * Unbound && max_active == 1 used to imply ordered, which is no
- * longer the case on NUMA machines due to per-node pools. While
+ * Unbound && max_active == 1 used to imply ordered, which is no longer
+ * the case on many machines due to per-pod pools. While
* alloc_ordered_workqueue() is the right way to create an ordered
- * workqueue, keep the previous behavior to avoid subtle breakages
- * on NUMA.
+ * workqueue, keep the previous behavior to avoid subtle breakages.
*/
if ((flags & WQ_UNBOUND) && max_active == 1)
flags |= __WQ_ORDERED;
@@ -4643,10 +4681,7 @@ struct workqueue_struct *alloc_workqueue(const char *fmt,
flags |= WQ_UNBOUND;
/* allocate wq and format name */
- if (flags & WQ_UNBOUND)
- tbl_size = nr_node_ids * sizeof(wq->numa_pwq_tbl[0]);
-
- wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL);
+ wq = kzalloc(sizeof(*wq), GFP_KERNEL);
if (!wq)
return NULL;
@@ -4741,7 +4776,7 @@ static bool pwq_busy(struct pool_workqueue *pwq)
void destroy_workqueue(struct workqueue_struct *wq)
{
struct pool_workqueue *pwq;
- int node;
+ int cpu;
/*
* Remove it from sysfs first so that sanity check failure doesn't
@@ -4800,33 +4835,23 @@ void destroy_workqueue(struct workqueue_struct *wq)
list_del_rcu(&wq->list);
mutex_unlock(&wq_pool_mutex);
- if (!(wq->flags & WQ_UNBOUND)) {
- wq_unregister_lockdep(wq);
- /*
- * The base ref is never dropped on per-cpu pwqs. Directly
- * schedule RCU free.
- */
- call_rcu(&wq->rcu, rcu_free_wq);
- } else {
- /*
- * We're the sole accessor of @wq at this point. Directly
- * access numa_pwq_tbl[] and dfl_pwq to put the base refs.
- * @wq will be freed when the last pwq is released.
- */
- for_each_node(node) {
- pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
- RCU_INIT_POINTER(wq->numa_pwq_tbl[node], NULL);
- put_pwq_unlocked(pwq);
- }
+ /*
+ * We're the sole accessor of @wq. Directly access cpu_pwq and dfl_pwq
+ * to put the base refs. @wq will be auto-destroyed from the last
+ * pwq_put. RCU read lock prevents @wq from going away from under us.
+ */
+ rcu_read_lock();
- /*
- * Put dfl_pwq. @wq may be freed any time after dfl_pwq is
- * put. Don't access it afterwards.
- */
- pwq = wq->dfl_pwq;
- wq->dfl_pwq = NULL;
+ for_each_possible_cpu(cpu) {
+ pwq = rcu_access_pointer(*per_cpu_ptr(wq->cpu_pwq, cpu));
+ RCU_INIT_POINTER(*per_cpu_ptr(wq->cpu_pwq, cpu), NULL);
put_pwq_unlocked(pwq);
}
+
+ put_pwq_unlocked(wq->dfl_pwq);
+ wq->dfl_pwq = NULL;
+
+ rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(destroy_workqueue);
@@ -4903,10 +4928,11 @@ bool current_is_workqueue_rescuer(void)
* unreliable and only useful as advisory hints or for debugging.
*
* If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU.
- * Note that both per-cpu and unbound workqueues may be associated with
- * multiple pool_workqueues which have separate congested states. A
- * workqueue being congested on one CPU doesn't mean the workqueue is also
- * contested on other CPUs / NUMA nodes.
+ *
+ * With the exception of ordered workqueues, all workqueues have per-cpu
+ * pool_workqueues, each with its own congested state. A workqueue being
+ * congested on one CPU doesn't mean that the workqueue is contested on any
+ * other CPUs.
*
* Return:
* %true if congested, %false otherwise.
@@ -4922,12 +4948,9 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
if (cpu == WORK_CPU_UNBOUND)
cpu = smp_processor_id();
- if (!(wq->flags & WQ_UNBOUND))
- pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
- else
- pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
-
+ pwq = *per_cpu_ptr(wq->cpu_pwq, cpu);
ret = !list_empty(&pwq->inactive_works);
+
preempt_enable();
rcu_read_unlock();
@@ -5402,7 +5425,7 @@ static void unbind_workers(int cpu)
* worker blocking could lead to lengthy stalls. Kick off
* unbound chain execution of currently pending work items.
*/
- wake_up_worker(pool);
+ kick_pool(pool);
raw_spin_unlock_irq(&pool->lock);
@@ -5435,7 +5458,7 @@ static void rebind_workers(struct worker_pool *pool)
for_each_pool_worker(worker, pool) {
kthread_set_per_cpu(worker->task, pool->cpu);
WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
- pool->attrs->cpumask) < 0);
+ pool_allowed_cpus(pool)) < 0);
}
raw_spin_lock_irq(&pool->lock);
@@ -5529,9 +5552,18 @@ int workqueue_online_cpu(unsigned int cpu)
mutex_unlock(&wq_pool_attach_mutex);
}
- /* update NUMA affinity of unbound workqueues */
- list_for_each_entry(wq, &workqueues, list)
- wq_update_unbound_numa(wq, cpu, true);
+ /* update pod affinity of unbound workqueues */
+ list_for_each_entry(wq, &workqueues, list) {
+ struct workqueue_attrs *attrs = wq->unbound_attrs;
+
+ if (attrs) {
+ const struct wq_pod_type *pt = wqattrs_pod_type(attrs);
+ int tcpu;
+
+ for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]])
+ wq_update_pod(wq, tcpu, cpu, true);
+ }
+ }
mutex_unlock(&wq_pool_mutex);
return 0;
@@ -5547,10 +5579,19 @@ int workqueue_offline_cpu(unsigned int cpu)
unbind_workers(cpu);
- /* update NUMA affinity of unbound workqueues */
+ /* update pod affinity of unbound workqueues */
mutex_lock(&wq_pool_mutex);
- list_for_each_entry(wq, &workqueues, list)
- wq_update_unbound_numa(wq, cpu, false);
+ list_for_each_entry(wq, &workqueues, list) {
+ struct workqueue_attrs *attrs = wq->unbound_attrs;
+
+ if (attrs) {
+ const struct wq_pod_type *pt = wqattrs_pod_type(attrs);
+ int tcpu;
+
+ for_each_cpu(tcpu, pt->pod_cpus[pt->cpu_pod[cpu]])
+ wq_update_pod(wq, tcpu, cpu, false);
+ }
+ }
mutex_unlock(&wq_pool_mutex);
return 0;
@@ -5746,8 +5787,8 @@ static int workqueue_apply_unbound_cpumask(const cpumask_var_t unbound_cpumask)
continue;
ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs, unbound_cpumask);
- if (!ctx) {
- ret = -ENOMEM;
+ if (IS_ERR(ctx)) {
+ ret = PTR_ERR(ctx);
break;
}
@@ -5805,21 +5846,72 @@ out_unlock:
return ret;
}
+static int parse_affn_scope(const char *val)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(wq_affn_names); i++) {
+ if (!strncasecmp(val, wq_affn_names[i], strlen(wq_affn_names[i])))
+ return i;
+ }
+ return -EINVAL;
+}
+
+static int wq_affn_dfl_set(const char *val, const struct kernel_param *kp)
+{
+ struct workqueue_struct *wq;
+ int affn, cpu;
+
+ affn = parse_affn_scope(val);
+ if (affn < 0)
+ return affn;
+ if (affn == WQ_AFFN_DFL)
+ return -EINVAL;
+
+ cpus_read_lock();
+ mutex_lock(&wq_pool_mutex);
+
+ wq_affn_dfl = affn;
+
+ list_for_each_entry(wq, &workqueues, list) {
+ for_each_online_cpu(cpu) {
+ wq_update_pod(wq, cpu, cpu, true);
+ }
+ }
+
+ mutex_unlock(&wq_pool_mutex);
+ cpus_read_unlock();
+
+ return 0;
+}
+
+static int wq_affn_dfl_get(char *buffer, const struct kernel_param *kp)
+{
+ return scnprintf(buffer, PAGE_SIZE, "%s\n", wq_affn_names[wq_affn_dfl]);
+}
+
+static const struct kernel_param_ops wq_affn_dfl_ops = {
+ .set = wq_affn_dfl_set,
+ .get = wq_affn_dfl_get,
+};
+
+module_param_cb(default_affinity_scope, &wq_affn_dfl_ops, NULL, 0644);
+
#ifdef CONFIG_SYSFS
/*
* Workqueues with WQ_SYSFS flag set is visible to userland via
* /sys/bus/workqueue/devices/WQ_NAME. All visible workqueues have the
* following attributes.
*
- * per_cpu RO bool : whether the workqueue is per-cpu or unbound
- * max_active RW int : maximum number of in-flight work items
+ * per_cpu RO bool : whether the workqueue is per-cpu or unbound
+ * max_active RW int : maximum number of in-flight work items
*
* Unbound workqueues have the following extra attributes.
*
- * pool_ids RO int : the associated pool IDs for each node
- * nice RW int : nice value of the workers
- * cpumask RW mask : bitmask of allowed CPUs for the workers
- * numa RW bool : whether enable NUMA affinity
+ * nice RW int : nice value of the workers
+ * cpumask RW mask : bitmask of allowed CPUs for the workers
+ * affinity_scope RW str : worker CPU affinity scope (cache, numa, none)
+ * affinity_strict RW bool : worker CPU affinity is strict
*/
struct wq_device {
struct workqueue_struct *wq;
@@ -5872,28 +5964,6 @@ static struct attribute *wq_sysfs_attrs[] = {
};
ATTRIBUTE_GROUPS(wq_sysfs);
-static ssize_t wq_pool_ids_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct workqueue_struct *wq = dev_to_wq(dev);
- const char *delim = "";
- int node, written = 0;
-
- cpus_read_lock();
- rcu_read_lock();
- for_each_node(node) {
- written += scnprintf(buf + written, PAGE_SIZE - written,
- "%s%d:%d", delim, node,
- unbound_pwq_by_node(wq, node)->pool->id);
- delim = " ";
- }
- written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
- rcu_read_unlock();
- cpus_read_unlock();
-
- return written;
-}
-
static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -5984,50 +6054,84 @@ out_unlock:
return ret ?: count;
}
-static ssize_t wq_numa_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t wq_affn_scope_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct workqueue_struct *wq = dev_to_wq(dev);
int written;
mutex_lock(&wq->mutex);
- written = scnprintf(buf, PAGE_SIZE, "%d\n",
- !wq->unbound_attrs->no_numa);
+ if (wq->unbound_attrs->affn_scope == WQ_AFFN_DFL)
+ written = scnprintf(buf, PAGE_SIZE, "%s (%s)\n",
+ wq_affn_names[WQ_AFFN_DFL],
+ wq_affn_names[wq_affn_dfl]);
+ else
+ written = scnprintf(buf, PAGE_SIZE, "%s\n",
+ wq_affn_names[wq->unbound_attrs->affn_scope]);
mutex_unlock(&wq->mutex);
return written;
}
-static ssize_t wq_numa_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t wq_affn_scope_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct workqueue_struct *wq = dev_to_wq(dev);
struct workqueue_attrs *attrs;
- int v, ret = -ENOMEM;
+ int affn, ret = -ENOMEM;
- apply_wqattrs_lock();
+ affn = parse_affn_scope(buf);
+ if (affn < 0)
+ return affn;
+ apply_wqattrs_lock();
attrs = wq_sysfs_prep_attrs(wq);
- if (!attrs)
- goto out_unlock;
-
- ret = -EINVAL;
- if (sscanf(buf, "%d", &v) == 1) {
- attrs->no_numa = !v;
+ if (attrs) {
+ attrs->affn_scope = affn;
ret = apply_workqueue_attrs_locked(wq, attrs);
}
+ apply_wqattrs_unlock();
+ free_workqueue_attrs(attrs);
+ return ret ?: count;
+}
-out_unlock:
+static ssize_t wq_affinity_strict_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct workqueue_struct *wq = dev_to_wq(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
+ wq->unbound_attrs->affn_strict);
+}
+
+static ssize_t wq_affinity_strict_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct workqueue_struct *wq = dev_to_wq(dev);
+ struct workqueue_attrs *attrs;
+ int v, ret = -ENOMEM;
+
+ if (sscanf(buf, "%d", &v) != 1)
+ return -EINVAL;
+
+ apply_wqattrs_lock();
+ attrs = wq_sysfs_prep_attrs(wq);
+ if (attrs) {
+ attrs->affn_strict = (bool)v;
+ ret = apply_workqueue_attrs_locked(wq, attrs);
+ }
apply_wqattrs_unlock();
free_workqueue_attrs(attrs);
return ret ?: count;
}
static struct device_attribute wq_sysfs_unbound_attrs[] = {
- __ATTR(pool_ids, 0444, wq_pool_ids_show, NULL),
__ATTR(nice, 0644, wq_nice_show, wq_nice_store),
__ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store),
- __ATTR(numa, 0644, wq_numa_show, wq_numa_store),
+ __ATTR(affinity_scope, 0644, wq_affn_scope_show, wq_affn_scope_store),
+ __ATTR(affinity_strict, 0644, wq_affinity_strict_show, wq_affinity_strict_store),
__ATTR_NULL,
};
@@ -6393,62 +6497,19 @@ static inline void wq_watchdog_init(void) { }
#endif /* CONFIG_WQ_WATCHDOG */
-static void __init wq_numa_init(void)
-{
- cpumask_var_t *tbl;
- int node, cpu;
-
- if (num_possible_nodes() <= 1)
- return;
-
- if (wq_disable_numa) {
- pr_info("workqueue: NUMA affinity support disabled\n");
- return;
- }
-
- for_each_possible_cpu(cpu) {
- if (WARN_ON(cpu_to_node(cpu) == NUMA_NO_NODE)) {
- pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
- return;
- }
- }
-
- wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs();
- BUG_ON(!wq_update_unbound_numa_attrs_buf);
-
- /*
- * We want masks of possible CPUs of each node which isn't readily
- * available. Build one from cpu_to_node() which should have been
- * fully initialized by now.
- */
- tbl = kcalloc(nr_node_ids, sizeof(tbl[0]), GFP_KERNEL);
- BUG_ON(!tbl);
-
- for_each_node(node)
- BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
- node_online(node) ? node : NUMA_NO_NODE));
-
- for_each_possible_cpu(cpu) {
- node = cpu_to_node(cpu);
- cpumask_set_cpu(cpu, tbl[node]);
- }
-
- wq_numa_possible_cpumask = tbl;
- wq_numa_enabled = true;
-}
-
/**
* workqueue_init_early - early init for workqueue subsystem
*
- * This is the first half of two-staged workqueue subsystem initialization
- * and invoked as soon as the bare basics - memory allocation, cpumasks and
- * idr are up. It sets up all the data structures and system workqueues
- * and allows early boot code to create workqueues and queue/cancel work
- * items. Actual work item execution starts only after kthreads can be
- * created and scheduled right before early initcalls.
+ * This is the first step of three-staged workqueue subsystem initialization and
+ * invoked as soon as the bare basics - memory allocation, cpumasks and idr are
+ * up. It sets up all the data structures and system workqueues and allows early
+ * boot code to create workqueues and queue/cancel work items. Actual work item
+ * execution starts only after kthreads can be created and scheduled right
+ * before early initcalls.
*/
void __init workqueue_init_early(void)
{
+ struct wq_pod_type *pt = &wq_pod_types[WQ_AFFN_SYSTEM];
int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
int i, cpu;
@@ -6458,8 +6519,30 @@ void __init workqueue_init_early(void)
cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(HK_TYPE_WQ));
cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, housekeeping_cpumask(HK_TYPE_DOMAIN));
+ if (!cpumask_empty(&wq_cmdline_cpumask))
+ cpumask_and(wq_unbound_cpumask, wq_unbound_cpumask, &wq_cmdline_cpumask);
+
pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
+ wq_update_pod_attrs_buf = alloc_workqueue_attrs();
+ BUG_ON(!wq_update_pod_attrs_buf);
+
+ /* initialize WQ_AFFN_SYSTEM pods */
+ pt->pod_cpus = kcalloc(1, sizeof(pt->pod_cpus[0]), GFP_KERNEL);
+ pt->pod_node = kcalloc(1, sizeof(pt->pod_node[0]), GFP_KERNEL);
+ pt->cpu_pod = kcalloc(nr_cpu_ids, sizeof(pt->cpu_pod[0]), GFP_KERNEL);
+ BUG_ON(!pt->pod_cpus || !pt->pod_node || !pt->cpu_pod);
+
+ BUG_ON(!zalloc_cpumask_var_node(&pt->pod_cpus[0], GFP_KERNEL, NUMA_NO_NODE));
+
+ wq_update_pod_attrs_buf = alloc_workqueue_attrs();
+ BUG_ON(!wq_update_pod_attrs_buf);
+
+ pt->nr_pods = 1;
+ cpumask_copy(pt->pod_cpus[0], cpu_possible_mask);
+ pt->pod_node[0] = NUMA_NO_NODE;
+ pt->cpu_pod[0] = 0;
+
/* initialize CPU pools */
for_each_possible_cpu(cpu) {
struct worker_pool *pool;
@@ -6469,7 +6552,9 @@ void __init workqueue_init_early(void)
BUG_ON(init_worker_pool(pool));
pool->cpu = cpu;
cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
+ cpumask_copy(pool->attrs->__pod_cpumask, cpumask_of(cpu));
pool->attrs->nice = std_nice[i++];
+ pool->attrs->affn_strict = true;
pool->node = cpu_to_node(cpu);
/* alloc pool ID */
@@ -6490,11 +6575,10 @@ void __init workqueue_init_early(void)
/*
* An ordered wq should have only one pwq as ordering is
* guaranteed by max_active which is enforced by pwqs.
- * Turn off NUMA so that dfl_pwq is used for all nodes.
*/
BUG_ON(!(attrs = alloc_workqueue_attrs()));
attrs->nice = std_nice[i];
- attrs->no_numa = true;
+ attrs->ordered = true;
ordered_wq_attrs[i] = attrs;
}
@@ -6502,7 +6586,7 @@ void __init workqueue_init_early(void)
system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
system_long_wq = alloc_workqueue("events_long", 0, 0);
system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
- WQ_UNBOUND_MAX_ACTIVE);
+ WQ_MAX_ACTIVE);
system_freezable_wq = alloc_workqueue("events_freezable",
WQ_FREEZABLE, 0);
system_power_efficient_wq = alloc_workqueue("events_power_efficient",
@@ -6525,6 +6609,9 @@ static void __init wq_cpu_intensive_thresh_init(void)
if (wq_cpu_intensive_thresh_us != ULONG_MAX)
return;
+ pwq_release_worker = kthread_create_worker(0, "pool_workqueue_release");
+ BUG_ON(IS_ERR(pwq_release_worker));
+
/*
* The default of 10ms is derived from the fact that most modern (as of
* 2023) processors can do a lot in 10ms and that it's just below what
@@ -6555,11 +6642,11 @@ static void __init wq_cpu_intensive_thresh_init(void)
/**
* workqueue_init - bring workqueue subsystem fully online
*
- * This is the latter half of two-staged workqueue subsystem initialization
- * and invoked as soon as kthreads can be created and scheduled.
- * Workqueues have been created and work items queued on them, but there
- * are no kworkers executing the work items yet. Populate the worker pools
- * with the initial workers and enable future kworker creations.
+ * This is the second step of three-staged workqueue subsystem initialization
+ * and invoked as soon as kthreads can be created and scheduled. Workqueues have
+ * been created and work items queued on them, but there are no kworkers
+ * executing the work items yet. Populate the worker pools with the initial
+ * workers and enable future kworker creations.
*/
void __init workqueue_init(void)
{
@@ -6569,19 +6656,12 @@ void __init workqueue_init(void)
wq_cpu_intensive_thresh_init();
- /*
- * It'd be simpler to initialize NUMA in workqueue_init_early() but
- * CPU to node mapping may not be available that early on some
- * archs such as power and arm64. As per-cpu pools created
- * previously could be missing node hint and unbound pools NUMA
- * affinity, fix them up.
- *
- * Also, while iterating workqueues, create rescuers if requested.
- */
- wq_numa_init();
-
mutex_lock(&wq_pool_mutex);
+ /*
+ * Per-cpu pools created earlier could be missing node hint. Fix them
+ * up. Also, create a rescuer for workqueues that requested it.
+ */
for_each_possible_cpu(cpu) {
for_each_cpu_worker_pool(pool, cpu) {
pool->node = cpu_to_node(cpu);
@@ -6589,7 +6669,6 @@ void __init workqueue_init(void)
}
list_for_each_entry(wq, &workqueues, list) {
- wq_update_unbound_numa(wq, smp_processor_id(), true);
WARN(init_rescuer(wq),
"workqueue: failed to create early rescuer for %s",
wq->name);
@@ -6613,9 +6692,114 @@ void __init workqueue_init(void)
}
/*
- * Despite the naming, this is a no-op function which is here only for avoiding
- * link error. Since compile-time warning may fail to catch, we will need to
- * emit run-time warning from __flush_workqueue().
+ * Initialize @pt by first initializing @pt->cpu_pod[] with pod IDs according to
+ * @cpu_shares_pod(). Each subset of CPUs that share a pod is assigned a unique
+ * and consecutive pod ID. The rest of @pt is initialized accordingly.
+ */
+static void __init init_pod_type(struct wq_pod_type *pt,
+ bool (*cpus_share_pod)(int, int))
+{
+ int cur, pre, cpu, pod;
+
+ pt->nr_pods = 0;
+
+ /* init @pt->cpu_pod[] according to @cpus_share_pod() */
+ pt->cpu_pod = kcalloc(nr_cpu_ids, sizeof(pt->cpu_pod[0]), GFP_KERNEL);
+ BUG_ON(!pt->cpu_pod);
+
+ for_each_possible_cpu(cur) {
+ for_each_possible_cpu(pre) {
+ if (pre >= cur) {
+ pt->cpu_pod[cur] = pt->nr_pods++;
+ break;
+ }
+ if (cpus_share_pod(cur, pre)) {
+ pt->cpu_pod[cur] = pt->cpu_pod[pre];
+ break;
+ }
+ }
+ }
+
+ /* init the rest to match @pt->cpu_pod[] */
+ pt->pod_cpus = kcalloc(pt->nr_pods, sizeof(pt->pod_cpus[0]), GFP_KERNEL);
+ pt->pod_node = kcalloc(pt->nr_pods, sizeof(pt->pod_node[0]), GFP_KERNEL);
+ BUG_ON(!pt->pod_cpus || !pt->pod_node);
+
+ for (pod = 0; pod < pt->nr_pods; pod++)
+ BUG_ON(!zalloc_cpumask_var(&pt->pod_cpus[pod], GFP_KERNEL));
+
+ for_each_possible_cpu(cpu) {
+ cpumask_set_cpu(cpu, pt->pod_cpus[pt->cpu_pod[cpu]]);
+ pt->pod_node[pt->cpu_pod[cpu]] = cpu_to_node(cpu);
+ }
+}
+
+static bool __init cpus_dont_share(int cpu0, int cpu1)
+{
+ return false;
+}
+
+static bool __init cpus_share_smt(int cpu0, int cpu1)
+{
+#ifdef CONFIG_SCHED_SMT
+ return cpumask_test_cpu(cpu0, cpu_smt_mask(cpu1));
+#else
+ return false;
+#endif
+}
+
+static bool __init cpus_share_numa(int cpu0, int cpu1)
+{
+ return cpu_to_node(cpu0) == cpu_to_node(cpu1);
+}
+
+/**
+ * workqueue_init_topology - initialize CPU pods for unbound workqueues
+ *
+ * This is the third step of there-staged workqueue subsystem initialization and
+ * invoked after SMP and topology information are fully initialized. It
+ * initializes the unbound CPU pods accordingly.
*/
-void __warn_flushing_systemwide_wq(void) { }
+void __init workqueue_init_topology(void)
+{
+ struct workqueue_struct *wq;
+ int cpu;
+
+ init_pod_type(&wq_pod_types[WQ_AFFN_CPU], cpus_dont_share);
+ init_pod_type(&wq_pod_types[WQ_AFFN_SMT], cpus_share_smt);
+ init_pod_type(&wq_pod_types[WQ_AFFN_CACHE], cpus_share_cache);
+ init_pod_type(&wq_pod_types[WQ_AFFN_NUMA], cpus_share_numa);
+
+ mutex_lock(&wq_pool_mutex);
+
+ /*
+ * Workqueues allocated earlier would have all CPUs sharing the default
+ * worker pool. Explicitly call wq_update_pod() on all workqueue and CPU
+ * combinations to apply per-pod sharing.
+ */
+ list_for_each_entry(wq, &workqueues, list) {
+ for_each_online_cpu(cpu) {
+ wq_update_pod(wq, cpu, cpu, true);
+ }
+ }
+
+ mutex_unlock(&wq_pool_mutex);
+}
+
+void __warn_flushing_systemwide_wq(void)
+{
+ pr_warn("WARNING: Flushing system-wide workqueues will be prohibited in near future.\n");
+ dump_stack();
+}
EXPORT_SYMBOL(__warn_flushing_systemwide_wq);
+
+static int __init workqueue_unbound_cpus_setup(char *str)
+{
+ if (cpulist_parse(str, &wq_cmdline_cpumask) < 0) {
+ cpumask_clear(&wq_cmdline_cpumask);
+ pr_warn("workqueue.unbound_cpus: incorrect CPU range, using default\n");
+ }
+
+ return 1;
+}
+__setup("workqueue.unbound_cpus=", workqueue_unbound_cpus_setup);
diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h
index 6b1d66e28269..f6275944ada7 100644
--- a/kernel/workqueue_internal.h
+++ b/kernel/workqueue_internal.h
@@ -48,7 +48,7 @@ struct worker {
/* A: runs through worker->node */
unsigned long last_active; /* K: last active timestamp */
- unsigned int flags; /* X: flags */
+ unsigned int flags; /* L: flags */
int id; /* I: worker id */
/*
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index a8a1b0ac8b22..319cfbeb0738 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -355,6 +355,11 @@ endchoice # "Compressed Debug information"
config DEBUG_INFO_SPLIT
bool "Produce split debuginfo in .dwo files"
depends on $(cc-option,-gsplit-dwarf)
+ # RISC-V linker relaxation + -gsplit-dwarf has issues with LLVM and GCC
+ # prior to 12.x:
+ # https://github.com/llvm/llvm-project/issues/56642
+ # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99090
+ depends on !RISCV || GCC_VERSION >= 120000
help
Generate debug info into separate .dwo files. This significantly
reduces the build directory size for builds with DEBUG_INFO,
@@ -502,7 +507,7 @@ config SECTION_MISMATCH_WARN_ONLY
config DEBUG_FORCE_FUNCTION_ALIGN_64B
bool "Force all function address 64B aligned"
- depends on EXPERT && (X86_64 || ARM64 || PPC32 || PPC64 || ARC || S390)
+ depends on EXPERT && (X86_64 || ARM64 || PPC32 || PPC64 || ARC || RISCV || S390)
select FUNCTION_ALIGNMENT_64B
help
There are cases that a commit from one domain changes the function
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index fdd6d9800a70..6fba6423cc10 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -88,10 +88,11 @@ static inline const char *trim_prefix(const char *path)
return path + skip;
}
-static struct { unsigned flag:8; char opt_char; } opt_array[] = {
+static const struct { unsigned flag:8; char opt_char; } opt_array[] = {
{ _DPRINTK_FLAGS_PRINT, 'p' },
{ _DPRINTK_FLAGS_INCL_MODNAME, 'm' },
{ _DPRINTK_FLAGS_INCL_FUNCNAME, 'f' },
+ { _DPRINTK_FLAGS_INCL_SOURCENAME, 's' },
{ _DPRINTK_FLAGS_INCL_LINENO, 'l' },
{ _DPRINTK_FLAGS_INCL_TID, 't' },
{ _DPRINTK_FLAGS_NONE, '_' },
@@ -808,7 +809,7 @@ const struct kernel_param_ops param_ops_dyndbg_classes = {
};
EXPORT_SYMBOL(param_ops_dyndbg_classes);
-#define PREFIX_SIZE 64
+#define PREFIX_SIZE 128
static int remaining(int wrote)
{
@@ -836,6 +837,9 @@ static char *__dynamic_emit_prefix(const struct _ddebug *desc, char *buf)
if (desc->flags & _DPRINTK_FLAGS_INCL_FUNCNAME)
pos += snprintf(buf + pos, remaining(pos), "%s:",
desc->function);
+ if (desc->flags & _DPRINTK_FLAGS_INCL_SOURCENAME)
+ pos += snprintf(buf + pos, remaining(pos), "%s:",
+ trim_prefix(desc->filename));
if (desc->flags & _DPRINTK_FLAGS_INCL_LINENO)
pos += snprintf(buf + pos, remaining(pos), "%d:",
desc->lineno);
diff --git a/lib/idr.c b/lib/idr.c
index 7ecdfdb5309e..13f2758c2377 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -100,7 +100,7 @@ EXPORT_SYMBOL_GPL(idr_alloc);
* @end: The maximum ID (exclusive).
* @gfp: Memory allocation flags.
*
- * Allocates an unused ID in the range specified by @nextid and @end. If
+ * Allocates an unused ID in the range specified by @start and @end. If
* @end is <= 0, it is treated as one larger than %INT_MAX. This allows
* callers to use @start + N as @end as long as N is within integer range.
* The search for an unused ID will start at the last ID allocated and will
diff --git a/lib/kobject.c b/lib/kobject.c
index 16d530f9c174..59dbcbdb1c91 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -56,6 +56,14 @@ void kobject_get_ownership(const struct kobject *kobj, kuid_t *uid, kgid_t *gid)
kobj->ktype->get_ownership(kobj, uid, gid);
}
+static bool kobj_ns_type_is_valid(enum kobj_ns_type type)
+{
+ if ((type <= KOBJ_NS_TYPE_NONE) || (type >= KOBJ_NS_TYPES))
+ return false;
+
+ return true;
+}
+
static int create_dir(struct kobject *kobj)
{
const struct kobj_type *ktype = get_ktype(kobj);
@@ -66,12 +74,10 @@ static int create_dir(struct kobject *kobj)
if (error)
return error;
- if (ktype) {
- error = sysfs_create_groups(kobj, ktype->default_groups);
- if (error) {
- sysfs_remove_dir(kobj);
- return error;
- }
+ error = sysfs_create_groups(kobj, ktype->default_groups);
+ if (error) {
+ sysfs_remove_dir(kobj);
+ return error;
}
/*
@@ -86,8 +92,7 @@ static int create_dir(struct kobject *kobj)
*/
ops = kobj_child_ns_ops(kobj);
if (ops) {
- BUG_ON(ops->type <= KOBJ_NS_TYPE_NONE);
- BUG_ON(ops->type >= KOBJ_NS_TYPES);
+ BUG_ON(!kobj_ns_type_is_valid(ops->type));
BUG_ON(!kobj_ns_type_registered(ops->type));
sysfs_enable_ns(kobj->sd);
@@ -584,8 +589,7 @@ static void __kobject_del(struct kobject *kobj)
sd = kobj->sd;
ktype = get_ktype(kobj);
- if (ktype)
- sysfs_remove_groups(kobj, ktype->default_groups);
+ sysfs_remove_groups(kobj, ktype->default_groups);
/* send "remove" if the caller did not do it but sent "add" */
if (kobj->state_add_uevent_sent && !kobj->state_remove_uevent_sent) {
@@ -662,10 +666,6 @@ static void kobject_cleanup(struct kobject *kobj)
pr_debug("'%s' (%p): %s, parent %p\n",
kobject_name(kobj), kobj, __func__, kobj->parent);
- if (t && !t->release)
- pr_debug("'%s' (%p): does not have a release() function, it is broken and must be fixed. See Documentation/core-api/kobject.rst.\n",
- kobject_name(kobj), kobj);
-
/* remove from sysfs if the caller did not do it */
if (kobj->state_in_sysfs) {
pr_debug("'%s' (%p): auto cleanup kobject_del\n",
@@ -676,10 +676,13 @@ static void kobject_cleanup(struct kobject *kobj)
parent = NULL;
}
- if (t && t->release) {
+ if (t->release) {
pr_debug("'%s' (%p): calling ktype release\n",
kobject_name(kobj), kobj);
t->release(kobj);
+ } else {
+ pr_debug("'%s' (%p): does not have a release() function, it is broken and must be fixed. See Documentation/core-api/kobject.rst.\n",
+ kobject_name(kobj), kobj);
}
/* free name if we allocated it */
@@ -854,6 +857,11 @@ int kset_register(struct kset *k)
if (!k)
return -EINVAL;
+ if (!k->kobj.ktype) {
+ pr_err("must have a ktype to be initialized properly!\n");
+ return -EINVAL;
+ }
+
kset_init(k);
err = kobject_add_internal(&k->kobj);
if (err) {
@@ -1017,11 +1025,7 @@ int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
spin_lock(&kobj_ns_type_lock);
error = -EINVAL;
- if (type >= KOBJ_NS_TYPES)
- goto out;
-
- error = -EINVAL;
- if (type <= KOBJ_NS_TYPE_NONE)
+ if (!kobj_ns_type_is_valid(type))
goto out;
error = -EBUSY;
@@ -1041,7 +1045,7 @@ int kobj_ns_type_registered(enum kobj_ns_type type)
int registered = 0;
spin_lock(&kobj_ns_type_lock);
- if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES))
+ if (kobj_ns_type_is_valid(type))
registered = kobj_ns_ops_tbl[type] != NULL;
spin_unlock(&kobj_ns_type_lock);
@@ -1052,7 +1056,7 @@ const struct kobj_ns_type_operations *kobj_child_ns_ops(const struct kobject *pa
{
const struct kobj_ns_type_operations *ops = NULL;
- if (parent && parent->ktype && parent->ktype->child_ns_type)
+ if (parent && parent->ktype->child_ns_type)
ops = parent->ktype->child_ns_type(parent);
return ops;
@@ -1068,8 +1072,7 @@ bool kobj_ns_current_may_mount(enum kobj_ns_type type)
bool may_mount = true;
spin_lock(&kobj_ns_type_lock);
- if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
- kobj_ns_ops_tbl[type])
+ if (kobj_ns_type_is_valid(type) && kobj_ns_ops_tbl[type])
may_mount = kobj_ns_ops_tbl[type]->current_may_mount();
spin_unlock(&kobj_ns_type_lock);
@@ -1081,8 +1084,7 @@ void *kobj_ns_grab_current(enum kobj_ns_type type)
void *ns = NULL;
spin_lock(&kobj_ns_type_lock);
- if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
- kobj_ns_ops_tbl[type])
+ if (kobj_ns_type_is_valid(type) && kobj_ns_ops_tbl[type])
ns = kobj_ns_ops_tbl[type]->grab_current_ns();
spin_unlock(&kobj_ns_type_lock);
@@ -1095,8 +1097,7 @@ const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk)
const void *ns = NULL;
spin_lock(&kobj_ns_type_lock);
- if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
- kobj_ns_ops_tbl[type])
+ if (kobj_ns_type_is_valid(type) && kobj_ns_ops_tbl[type])
ns = kobj_ns_ops_tbl[type]->netlink_ns(sk);
spin_unlock(&kobj_ns_type_lock);
@@ -1108,8 +1109,7 @@ const void *kobj_ns_initial(enum kobj_ns_type type)
const void *ns = NULL;
spin_lock(&kobj_ns_type_lock);
- if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
- kobj_ns_ops_tbl[type])
+ if (kobj_ns_type_is_valid(type) && kobj_ns_ops_tbl[type])
ns = kobj_ns_ops_tbl[type]->initial_ns();
spin_unlock(&kobj_ns_type_lock);
@@ -1119,7 +1119,7 @@ const void *kobj_ns_initial(enum kobj_ns_type type)
void kobj_ns_drop(enum kobj_ns_type type, void *ns)
{
spin_lock(&kobj_ns_type_lock);
- if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
+ if (kobj_ns_type_is_valid(type) &&
kobj_ns_ops_tbl[type] && kobj_ns_ops_tbl[type]->drop_ns)
kobj_ns_ops_tbl[type]->drop_ns(ns);
spin_unlock(&kobj_ns_type_lock);
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 5004463c4f9f..9073430dc865 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -151,48 +151,72 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
}
EXPORT_SYMBOL(__percpu_counter_sum);
-int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
- struct lock_class_key *key)
+int __percpu_counter_init_many(struct percpu_counter *fbc, s64 amount,
+ gfp_t gfp, u32 nr_counters,
+ struct lock_class_key *key)
{
unsigned long flags __maybe_unused;
-
- raw_spin_lock_init(&fbc->lock);
- lockdep_set_class(&fbc->lock, key);
- fbc->count = amount;
- fbc->counters = alloc_percpu_gfp(s32, gfp);
- if (!fbc->counters)
+ size_t counter_size;
+ s32 __percpu *counters;
+ u32 i;
+
+ counter_size = ALIGN(sizeof(*counters), __alignof__(*counters));
+ counters = __alloc_percpu_gfp(nr_counters * counter_size,
+ __alignof__(*counters), gfp);
+ if (!counters) {
+ fbc[0].counters = NULL;
return -ENOMEM;
+ }
- debug_percpu_counter_activate(fbc);
+ for (i = 0; i < nr_counters; i++) {
+ raw_spin_lock_init(&fbc[i].lock);
+ lockdep_set_class(&fbc[i].lock, key);
+#ifdef CONFIG_HOTPLUG_CPU
+ INIT_LIST_HEAD(&fbc[i].list);
+#endif
+ fbc[i].count = amount;
+ fbc[i].counters = (void *)counters + (i * counter_size);
+
+ debug_percpu_counter_activate(&fbc[i]);
+ }
#ifdef CONFIG_HOTPLUG_CPU
- INIT_LIST_HEAD(&fbc->list);
spin_lock_irqsave(&percpu_counters_lock, flags);
- list_add(&fbc->list, &percpu_counters);
+ for (i = 0; i < nr_counters; i++)
+ list_add(&fbc[i].list, &percpu_counters);
spin_unlock_irqrestore(&percpu_counters_lock, flags);
#endif
return 0;
}
-EXPORT_SYMBOL(__percpu_counter_init);
+EXPORT_SYMBOL(__percpu_counter_init_many);
-void percpu_counter_destroy(struct percpu_counter *fbc)
+void percpu_counter_destroy_many(struct percpu_counter *fbc, u32 nr_counters)
{
unsigned long flags __maybe_unused;
+ u32 i;
+
+ if (WARN_ON_ONCE(!fbc))
+ return;
- if (!fbc->counters)
+ if (!fbc[0].counters)
return;
- debug_percpu_counter_deactivate(fbc);
+ for (i = 0; i < nr_counters; i++)
+ debug_percpu_counter_deactivate(&fbc[i]);
#ifdef CONFIG_HOTPLUG_CPU
spin_lock_irqsave(&percpu_counters_lock, flags);
- list_del(&fbc->list);
+ for (i = 0; i < nr_counters; i++)
+ list_del(&fbc[i].list);
spin_unlock_irqrestore(&percpu_counters_lock, flags);
#endif
- free_percpu(fbc->counters);
- fbc->counters = NULL;
+
+ free_percpu(fbc[0].counters);
+
+ for (i = 0; i < nr_counters; i++)
+ fbc[i].counters = NULL;
}
-EXPORT_SYMBOL(percpu_counter_destroy);
+EXPORT_SYMBOL(percpu_counter_destroy_many);
int percpu_counter_batch __read_mostly = 32;
EXPORT_SYMBOL(percpu_counter_batch);
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
index 45e17619422b..035b0a4db476 100644
--- a/lib/raid6/Makefile
+++ b/lib/raid6/Makefile
@@ -9,6 +9,7 @@ raid6_pq-$(CONFIG_ALTIVEC) += altivec1.o altivec2.o altivec4.o altivec8.o \
vpermxor1.o vpermxor2.o vpermxor4.o vpermxor8.o
raid6_pq-$(CONFIG_KERNEL_MODE_NEON) += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o
raid6_pq-$(CONFIG_S390) += s390vx8.o recov_s390xc.o
+raid6_pq-$(CONFIG_LOONGARCH) += loongarch_simd.o recov_loongarch_simd.o
hostprogs += mktables
diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c
index a22a05c9af8a..0ec534faf019 100644
--- a/lib/raid6/algos.c
+++ b/lib/raid6/algos.c
@@ -73,6 +73,14 @@ const struct raid6_calls * const raid6_algos[] = {
&raid6_neonx2,
&raid6_neonx1,
#endif
+#ifdef CONFIG_LOONGARCH
+#ifdef CONFIG_CPU_HAS_LASX
+ &raid6_lasx,
+#endif
+#ifdef CONFIG_CPU_HAS_LSX
+ &raid6_lsx,
+#endif
+#endif
#if defined(__ia64__)
&raid6_intx32,
&raid6_intx16,
@@ -104,6 +112,14 @@ const struct raid6_recov_calls *const raid6_recov_algos[] = {
#if defined(CONFIG_KERNEL_MODE_NEON)
&raid6_recov_neon,
#endif
+#ifdef CONFIG_LOONGARCH
+#ifdef CONFIG_CPU_HAS_LASX
+ &raid6_recov_lasx,
+#endif
+#ifdef CONFIG_CPU_HAS_LSX
+ &raid6_recov_lsx,
+#endif
+#endif
&raid6_recov_intx1,
NULL
};
diff --git a/lib/raid6/loongarch.h b/lib/raid6/loongarch.h
new file mode 100644
index 000000000000..acfc33ce7056
--- /dev/null
+++ b/lib/raid6/loongarch.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2023 WANG Xuerui <git@xen0n.name>
+ *
+ * raid6/loongarch.h
+ *
+ * Definitions common to LoongArch RAID-6 code only
+ */
+
+#ifndef _LIB_RAID6_LOONGARCH_H
+#define _LIB_RAID6_LOONGARCH_H
+
+#ifdef __KERNEL__
+
+#include <asm/cpu-features.h>
+#include <asm/fpu.h>
+
+#else /* for user-space testing */
+
+#include <sys/auxv.h>
+
+/* have to supply these defines for glibc 2.37- and musl */
+#ifndef HWCAP_LOONGARCH_LSX
+#define HWCAP_LOONGARCH_LSX (1 << 4)
+#endif
+#ifndef HWCAP_LOONGARCH_LASX
+#define HWCAP_LOONGARCH_LASX (1 << 5)
+#endif
+
+#define kernel_fpu_begin()
+#define kernel_fpu_end()
+
+#define cpu_has_lsx (getauxval(AT_HWCAP) & HWCAP_LOONGARCH_LSX)
+#define cpu_has_lasx (getauxval(AT_HWCAP) & HWCAP_LOONGARCH_LASX)
+
+#endif /* __KERNEL__ */
+
+#endif /* _LIB_RAID6_LOONGARCH_H */
diff --git a/lib/raid6/loongarch_simd.c b/lib/raid6/loongarch_simd.c
new file mode 100644
index 000000000000..aa5d9f924ca3
--- /dev/null
+++ b/lib/raid6/loongarch_simd.c
@@ -0,0 +1,422 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * RAID6 syndrome calculations in LoongArch SIMD (LSX & LASX)
+ *
+ * Copyright 2023 WANG Xuerui <git@xen0n.name>
+ *
+ * Based on the generic RAID-6 code (int.uc):
+ *
+ * Copyright 2002-2004 H. Peter Anvin
+ */
+
+#include <linux/raid/pq.h>
+#include "loongarch.h"
+
+/*
+ * The vector algorithms are currently priority 0, which means the generic
+ * scalar algorithms are not being disabled if vector support is present.
+ * This is like the similar LoongArch RAID5 XOR code, with the main reason
+ * repeated here: it cannot be ruled out at this point of time, that some
+ * future (maybe reduced) models could run the vector algorithms slower than
+ * the scalar ones, maybe for errata or micro-op reasons. It may be
+ * appropriate to revisit this after one or two more uarch generations.
+ */
+
+#ifdef CONFIG_CPU_HAS_LSX
+#define NSIZE 16
+
+static int raid6_has_lsx(void)
+{
+ return cpu_has_lsx;
+}
+
+static void raid6_lsx_gen_syndrome(int disks, size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = disks - 3; /* Highest data disk */
+ p = dptr[z0+1]; /* XOR parity */
+ q = dptr[z0+2]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ /*
+ * $vr0, $vr1, $vr2, $vr3: wp
+ * $vr4, $vr5, $vr6, $vr7: wq
+ * $vr8, $vr9, $vr10, $vr11: wd
+ * $vr12, $vr13, $vr14, $vr15: w2
+ * $vr16, $vr17, $vr18, $vr19: w1
+ */
+ for (d = 0; d < bytes; d += NSIZE*4) {
+ /* wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; */
+ asm volatile("vld $vr0, %0" : : "m"(dptr[z0][d+0*NSIZE]));
+ asm volatile("vld $vr1, %0" : : "m"(dptr[z0][d+1*NSIZE]));
+ asm volatile("vld $vr2, %0" : : "m"(dptr[z0][d+2*NSIZE]));
+ asm volatile("vld $vr3, %0" : : "m"(dptr[z0][d+3*NSIZE]));
+ asm volatile("vori.b $vr4, $vr0, 0");
+ asm volatile("vori.b $vr5, $vr1, 0");
+ asm volatile("vori.b $vr6, $vr2, 0");
+ asm volatile("vori.b $vr7, $vr3, 0");
+ for (z = z0-1; z >= 0; z--) {
+ /* wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE]; */
+ asm volatile("vld $vr8, %0" : : "m"(dptr[z][d+0*NSIZE]));
+ asm volatile("vld $vr9, %0" : : "m"(dptr[z][d+1*NSIZE]));
+ asm volatile("vld $vr10, %0" : : "m"(dptr[z][d+2*NSIZE]));
+ asm volatile("vld $vr11, %0" : : "m"(dptr[z][d+3*NSIZE]));
+ /* wp$$ ^= wd$$; */
+ asm volatile("vxor.v $vr0, $vr0, $vr8");
+ asm volatile("vxor.v $vr1, $vr1, $vr9");
+ asm volatile("vxor.v $vr2, $vr2, $vr10");
+ asm volatile("vxor.v $vr3, $vr3, $vr11");
+ /* w2$$ = MASK(wq$$); */
+ asm volatile("vslti.b $vr12, $vr4, 0");
+ asm volatile("vslti.b $vr13, $vr5, 0");
+ asm volatile("vslti.b $vr14, $vr6, 0");
+ asm volatile("vslti.b $vr15, $vr7, 0");
+ /* w1$$ = SHLBYTE(wq$$); */
+ asm volatile("vslli.b $vr16, $vr4, 1");
+ asm volatile("vslli.b $vr17, $vr5, 1");
+ asm volatile("vslli.b $vr18, $vr6, 1");
+ asm volatile("vslli.b $vr19, $vr7, 1");
+ /* w2$$ &= NBYTES(0x1d); */
+ asm volatile("vandi.b $vr12, $vr12, 0x1d");
+ asm volatile("vandi.b $vr13, $vr13, 0x1d");
+ asm volatile("vandi.b $vr14, $vr14, 0x1d");
+ asm volatile("vandi.b $vr15, $vr15, 0x1d");
+ /* w1$$ ^= w2$$; */
+ asm volatile("vxor.v $vr16, $vr16, $vr12");
+ asm volatile("vxor.v $vr17, $vr17, $vr13");
+ asm volatile("vxor.v $vr18, $vr18, $vr14");
+ asm volatile("vxor.v $vr19, $vr19, $vr15");
+ /* wq$$ = w1$$ ^ wd$$; */
+ asm volatile("vxor.v $vr4, $vr16, $vr8");
+ asm volatile("vxor.v $vr5, $vr17, $vr9");
+ asm volatile("vxor.v $vr6, $vr18, $vr10");
+ asm volatile("vxor.v $vr7, $vr19, $vr11");
+ }
+ /* *(unative_t *)&p[d+NSIZE*$$] = wp$$; */
+ asm volatile("vst $vr0, %0" : "=m"(p[d+NSIZE*0]));
+ asm volatile("vst $vr1, %0" : "=m"(p[d+NSIZE*1]));
+ asm volatile("vst $vr2, %0" : "=m"(p[d+NSIZE*2]));
+ asm volatile("vst $vr3, %0" : "=m"(p[d+NSIZE*3]));
+ /* *(unative_t *)&q[d+NSIZE*$$] = wq$$; */
+ asm volatile("vst $vr4, %0" : "=m"(q[d+NSIZE*0]));
+ asm volatile("vst $vr5, %0" : "=m"(q[d+NSIZE*1]));
+ asm volatile("vst $vr6, %0" : "=m"(q[d+NSIZE*2]));
+ asm volatile("vst $vr7, %0" : "=m"(q[d+NSIZE*3]));
+ }
+
+ kernel_fpu_end();
+}
+
+static void raid6_lsx_xor_syndrome(int disks, int start, int stop,
+ size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = stop; /* P/Q right side optimization */
+ p = dptr[disks-2]; /* XOR parity */
+ q = dptr[disks-1]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ /*
+ * $vr0, $vr1, $vr2, $vr3: wp
+ * $vr4, $vr5, $vr6, $vr7: wq
+ * $vr8, $vr9, $vr10, $vr11: wd
+ * $vr12, $vr13, $vr14, $vr15: w2
+ * $vr16, $vr17, $vr18, $vr19: w1
+ */
+ for (d = 0; d < bytes; d += NSIZE*4) {
+ /* P/Q data pages */
+ /* wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; */
+ asm volatile("vld $vr0, %0" : : "m"(dptr[z0][d+0*NSIZE]));
+ asm volatile("vld $vr1, %0" : : "m"(dptr[z0][d+1*NSIZE]));
+ asm volatile("vld $vr2, %0" : : "m"(dptr[z0][d+2*NSIZE]));
+ asm volatile("vld $vr3, %0" : : "m"(dptr[z0][d+3*NSIZE]));
+ asm volatile("vori.b $vr4, $vr0, 0");
+ asm volatile("vori.b $vr5, $vr1, 0");
+ asm volatile("vori.b $vr6, $vr2, 0");
+ asm volatile("vori.b $vr7, $vr3, 0");
+ for (z = z0-1; z >= start; z--) {
+ /* wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE]; */
+ asm volatile("vld $vr8, %0" : : "m"(dptr[z][d+0*NSIZE]));
+ asm volatile("vld $vr9, %0" : : "m"(dptr[z][d+1*NSIZE]));
+ asm volatile("vld $vr10, %0" : : "m"(dptr[z][d+2*NSIZE]));
+ asm volatile("vld $vr11, %0" : : "m"(dptr[z][d+3*NSIZE]));
+ /* wp$$ ^= wd$$; */
+ asm volatile("vxor.v $vr0, $vr0, $vr8");
+ asm volatile("vxor.v $vr1, $vr1, $vr9");
+ asm volatile("vxor.v $vr2, $vr2, $vr10");
+ asm volatile("vxor.v $vr3, $vr3, $vr11");
+ /* w2$$ = MASK(wq$$); */
+ asm volatile("vslti.b $vr12, $vr4, 0");
+ asm volatile("vslti.b $vr13, $vr5, 0");
+ asm volatile("vslti.b $vr14, $vr6, 0");
+ asm volatile("vslti.b $vr15, $vr7, 0");
+ /* w1$$ = SHLBYTE(wq$$); */
+ asm volatile("vslli.b $vr16, $vr4, 1");
+ asm volatile("vslli.b $vr17, $vr5, 1");
+ asm volatile("vslli.b $vr18, $vr6, 1");
+ asm volatile("vslli.b $vr19, $vr7, 1");
+ /* w2$$ &= NBYTES(0x1d); */
+ asm volatile("vandi.b $vr12, $vr12, 0x1d");
+ asm volatile("vandi.b $vr13, $vr13, 0x1d");
+ asm volatile("vandi.b $vr14, $vr14, 0x1d");
+ asm volatile("vandi.b $vr15, $vr15, 0x1d");
+ /* w1$$ ^= w2$$; */
+ asm volatile("vxor.v $vr16, $vr16, $vr12");
+ asm volatile("vxor.v $vr17, $vr17, $vr13");
+ asm volatile("vxor.v $vr18, $vr18, $vr14");
+ asm volatile("vxor.v $vr19, $vr19, $vr15");
+ /* wq$$ = w1$$ ^ wd$$; */
+ asm volatile("vxor.v $vr4, $vr16, $vr8");
+ asm volatile("vxor.v $vr5, $vr17, $vr9");
+ asm volatile("vxor.v $vr6, $vr18, $vr10");
+ asm volatile("vxor.v $vr7, $vr19, $vr11");
+ }
+
+ /* P/Q left side optimization */
+ for (z = start-1; z >= 0; z--) {
+ /* w2$$ = MASK(wq$$); */
+ asm volatile("vslti.b $vr12, $vr4, 0");
+ asm volatile("vslti.b $vr13, $vr5, 0");
+ asm volatile("vslti.b $vr14, $vr6, 0");
+ asm volatile("vslti.b $vr15, $vr7, 0");
+ /* w1$$ = SHLBYTE(wq$$); */
+ asm volatile("vslli.b $vr16, $vr4, 1");
+ asm volatile("vslli.b $vr17, $vr5, 1");
+ asm volatile("vslli.b $vr18, $vr6, 1");
+ asm volatile("vslli.b $vr19, $vr7, 1");
+ /* w2$$ &= NBYTES(0x1d); */
+ asm volatile("vandi.b $vr12, $vr12, 0x1d");
+ asm volatile("vandi.b $vr13, $vr13, 0x1d");
+ asm volatile("vandi.b $vr14, $vr14, 0x1d");
+ asm volatile("vandi.b $vr15, $vr15, 0x1d");
+ /* wq$$ = w1$$ ^ w2$$; */
+ asm volatile("vxor.v $vr4, $vr16, $vr12");
+ asm volatile("vxor.v $vr5, $vr17, $vr13");
+ asm volatile("vxor.v $vr6, $vr18, $vr14");
+ asm volatile("vxor.v $vr7, $vr19, $vr15");
+ }
+ /*
+ * *(unative_t *)&p[d+NSIZE*$$] ^= wp$$;
+ * *(unative_t *)&q[d+NSIZE*$$] ^= wq$$;
+ */
+ asm volatile(
+ "vld $vr20, %0\n\t"
+ "vld $vr21, %1\n\t"
+ "vld $vr22, %2\n\t"
+ "vld $vr23, %3\n\t"
+ "vld $vr24, %4\n\t"
+ "vld $vr25, %5\n\t"
+ "vld $vr26, %6\n\t"
+ "vld $vr27, %7\n\t"
+ "vxor.v $vr20, $vr20, $vr0\n\t"
+ "vxor.v $vr21, $vr21, $vr1\n\t"
+ "vxor.v $vr22, $vr22, $vr2\n\t"
+ "vxor.v $vr23, $vr23, $vr3\n\t"
+ "vxor.v $vr24, $vr24, $vr4\n\t"
+ "vxor.v $vr25, $vr25, $vr5\n\t"
+ "vxor.v $vr26, $vr26, $vr6\n\t"
+ "vxor.v $vr27, $vr27, $vr7\n\t"
+ "vst $vr20, %0\n\t"
+ "vst $vr21, %1\n\t"
+ "vst $vr22, %2\n\t"
+ "vst $vr23, %3\n\t"
+ "vst $vr24, %4\n\t"
+ "vst $vr25, %5\n\t"
+ "vst $vr26, %6\n\t"
+ "vst $vr27, %7\n\t"
+ : "+m"(p[d+NSIZE*0]), "+m"(p[d+NSIZE*1]),
+ "+m"(p[d+NSIZE*2]), "+m"(p[d+NSIZE*3]),
+ "+m"(q[d+NSIZE*0]), "+m"(q[d+NSIZE*1]),
+ "+m"(q[d+NSIZE*2]), "+m"(q[d+NSIZE*3])
+ );
+ }
+
+ kernel_fpu_end();
+}
+
+const struct raid6_calls raid6_lsx = {
+ raid6_lsx_gen_syndrome,
+ raid6_lsx_xor_syndrome,
+ raid6_has_lsx,
+ "lsx",
+ .priority = 0 /* see the comment near the top of the file for reason */
+};
+
+#undef NSIZE
+#endif /* CONFIG_CPU_HAS_LSX */
+
+#ifdef CONFIG_CPU_HAS_LASX
+#define NSIZE 32
+
+static int raid6_has_lasx(void)
+{
+ return cpu_has_lasx;
+}
+
+static void raid6_lasx_gen_syndrome(int disks, size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = disks - 3; /* Highest data disk */
+ p = dptr[z0+1]; /* XOR parity */
+ q = dptr[z0+2]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ /*
+ * $xr0, $xr1: wp
+ * $xr2, $xr3: wq
+ * $xr4, $xr5: wd
+ * $xr6, $xr7: w2
+ * $xr8, $xr9: w1
+ */
+ for (d = 0; d < bytes; d += NSIZE*2) {
+ /* wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; */
+ asm volatile("xvld $xr0, %0" : : "m"(dptr[z0][d+0*NSIZE]));
+ asm volatile("xvld $xr1, %0" : : "m"(dptr[z0][d+1*NSIZE]));
+ asm volatile("xvori.b $xr2, $xr0, 0");
+ asm volatile("xvori.b $xr3, $xr1, 0");
+ for (z = z0-1; z >= 0; z--) {
+ /* wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE]; */
+ asm volatile("xvld $xr4, %0" : : "m"(dptr[z][d+0*NSIZE]));
+ asm volatile("xvld $xr5, %0" : : "m"(dptr[z][d+1*NSIZE]));
+ /* wp$$ ^= wd$$; */
+ asm volatile("xvxor.v $xr0, $xr0, $xr4");
+ asm volatile("xvxor.v $xr1, $xr1, $xr5");
+ /* w2$$ = MASK(wq$$); */
+ asm volatile("xvslti.b $xr6, $xr2, 0");
+ asm volatile("xvslti.b $xr7, $xr3, 0");
+ /* w1$$ = SHLBYTE(wq$$); */
+ asm volatile("xvslli.b $xr8, $xr2, 1");
+ asm volatile("xvslli.b $xr9, $xr3, 1");
+ /* w2$$ &= NBYTES(0x1d); */
+ asm volatile("xvandi.b $xr6, $xr6, 0x1d");
+ asm volatile("xvandi.b $xr7, $xr7, 0x1d");
+ /* w1$$ ^= w2$$; */
+ asm volatile("xvxor.v $xr8, $xr8, $xr6");
+ asm volatile("xvxor.v $xr9, $xr9, $xr7");
+ /* wq$$ = w1$$ ^ wd$$; */
+ asm volatile("xvxor.v $xr2, $xr8, $xr4");
+ asm volatile("xvxor.v $xr3, $xr9, $xr5");
+ }
+ /* *(unative_t *)&p[d+NSIZE*$$] = wp$$; */
+ asm volatile("xvst $xr0, %0" : "=m"(p[d+NSIZE*0]));
+ asm volatile("xvst $xr1, %0" : "=m"(p[d+NSIZE*1]));
+ /* *(unative_t *)&q[d+NSIZE*$$] = wq$$; */
+ asm volatile("xvst $xr2, %0" : "=m"(q[d+NSIZE*0]));
+ asm volatile("xvst $xr3, %0" : "=m"(q[d+NSIZE*1]));
+ }
+
+ kernel_fpu_end();
+}
+
+static void raid6_lasx_xor_syndrome(int disks, int start, int stop,
+ size_t bytes, void **ptrs)
+{
+ u8 **dptr = (u8 **)ptrs;
+ u8 *p, *q;
+ int d, z, z0;
+
+ z0 = stop; /* P/Q right side optimization */
+ p = dptr[disks-2]; /* XOR parity */
+ q = dptr[disks-1]; /* RS syndrome */
+
+ kernel_fpu_begin();
+
+ /*
+ * $xr0, $xr1: wp
+ * $xr2, $xr3: wq
+ * $xr4, $xr5: wd
+ * $xr6, $xr7: w2
+ * $xr8, $xr9: w1
+ */
+ for (d = 0; d < bytes; d += NSIZE*2) {
+ /* P/Q data pages */
+ /* wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; */
+ asm volatile("xvld $xr0, %0" : : "m"(dptr[z0][d+0*NSIZE]));
+ asm volatile("xvld $xr1, %0" : : "m"(dptr[z0][d+1*NSIZE]));
+ asm volatile("xvori.b $xr2, $xr0, 0");
+ asm volatile("xvori.b $xr3, $xr1, 0");
+ for (z = z0-1; z >= start; z--) {
+ /* wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE]; */
+ asm volatile("xvld $xr4, %0" : : "m"(dptr[z][d+0*NSIZE]));
+ asm volatile("xvld $xr5, %0" : : "m"(dptr[z][d+1*NSIZE]));
+ /* wp$$ ^= wd$$; */
+ asm volatile("xvxor.v $xr0, $xr0, $xr4");
+ asm volatile("xvxor.v $xr1, $xr1, $xr5");
+ /* w2$$ = MASK(wq$$); */
+ asm volatile("xvslti.b $xr6, $xr2, 0");
+ asm volatile("xvslti.b $xr7, $xr3, 0");
+ /* w1$$ = SHLBYTE(wq$$); */
+ asm volatile("xvslli.b $xr8, $xr2, 1");
+ asm volatile("xvslli.b $xr9, $xr3, 1");
+ /* w2$$ &= NBYTES(0x1d); */
+ asm volatile("xvandi.b $xr6, $xr6, 0x1d");
+ asm volatile("xvandi.b $xr7, $xr7, 0x1d");
+ /* w1$$ ^= w2$$; */
+ asm volatile("xvxor.v $xr8, $xr8, $xr6");
+ asm volatile("xvxor.v $xr9, $xr9, $xr7");
+ /* wq$$ = w1$$ ^ wd$$; */
+ asm volatile("xvxor.v $xr2, $xr8, $xr4");
+ asm volatile("xvxor.v $xr3, $xr9, $xr5");
+ }
+
+ /* P/Q left side optimization */
+ for (z = start-1; z >= 0; z--) {
+ /* w2$$ = MASK(wq$$); */
+ asm volatile("xvslti.b $xr6, $xr2, 0");
+ asm volatile("xvslti.b $xr7, $xr3, 0");
+ /* w1$$ = SHLBYTE(wq$$); */
+ asm volatile("xvslli.b $xr8, $xr2, 1");
+ asm volatile("xvslli.b $xr9, $xr3, 1");
+ /* w2$$ &= NBYTES(0x1d); */
+ asm volatile("xvandi.b $xr6, $xr6, 0x1d");
+ asm volatile("xvandi.b $xr7, $xr7, 0x1d");
+ /* wq$$ = w1$$ ^ w2$$; */
+ asm volatile("xvxor.v $xr2, $xr8, $xr6");
+ asm volatile("xvxor.v $xr3, $xr9, $xr7");
+ }
+ /*
+ * *(unative_t *)&p[d+NSIZE*$$] ^= wp$$;
+ * *(unative_t *)&q[d+NSIZE*$$] ^= wq$$;
+ */
+ asm volatile(
+ "xvld $xr10, %0\n\t"
+ "xvld $xr11, %1\n\t"
+ "xvld $xr12, %2\n\t"
+ "xvld $xr13, %3\n\t"
+ "xvxor.v $xr10, $xr10, $xr0\n\t"
+ "xvxor.v $xr11, $xr11, $xr1\n\t"
+ "xvxor.v $xr12, $xr12, $xr2\n\t"
+ "xvxor.v $xr13, $xr13, $xr3\n\t"
+ "xvst $xr10, %0\n\t"
+ "xvst $xr11, %1\n\t"
+ "xvst $xr12, %2\n\t"
+ "xvst $xr13, %3\n\t"
+ : "+m"(p[d+NSIZE*0]), "+m"(p[d+NSIZE*1]),
+ "+m"(q[d+NSIZE*0]), "+m"(q[d+NSIZE*1])
+ );
+ }
+
+ kernel_fpu_end();
+}
+
+const struct raid6_calls raid6_lasx = {
+ raid6_lasx_gen_syndrome,
+ raid6_lasx_xor_syndrome,
+ raid6_has_lasx,
+ "lasx",
+ .priority = 0 /* see the comment near the top of the file for reason */
+};
+#undef NSIZE
+#endif /* CONFIG_CPU_HAS_LASX */
diff --git a/lib/raid6/recov_loongarch_simd.c b/lib/raid6/recov_loongarch_simd.c
new file mode 100644
index 000000000000..94aeac85e6f7
--- /dev/null
+++ b/lib/raid6/recov_loongarch_simd.c
@@ -0,0 +1,513 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * RAID6 recovery algorithms in LoongArch SIMD (LSX & LASX)
+ *
+ * Copyright (C) 2023 WANG Xuerui <git@xen0n.name>
+ *
+ * Originally based on recov_avx2.c and recov_ssse3.c:
+ *
+ * Copyright (C) 2012 Intel Corporation
+ * Author: Jim Kukunas <james.t.kukunas@linux.intel.com>
+ */
+
+#include <linux/raid/pq.h>
+#include "loongarch.h"
+
+/*
+ * Unlike with the syndrome calculation algorithms, there's no boot-time
+ * selection of recovery algorithms by benchmarking, so we have to specify
+ * the priorities and hope the future cores will all have decent vector
+ * support (i.e. no LASX slower than LSX, or even scalar code).
+ */
+
+#ifdef CONFIG_CPU_HAS_LSX
+static int raid6_has_lsx(void)
+{
+ return cpu_has_lsx;
+}
+
+static void raid6_2data_recov_lsx(int disks, size_t bytes, int faila,
+ int failb, void **ptrs)
+{
+ u8 *p, *q, *dp, *dq;
+ const u8 *pbmul; /* P multiplier table for B data */
+ const u8 *qmul; /* Q multiplier table (for both) */
+
+ p = (u8 *)ptrs[disks - 2];
+ q = (u8 *)ptrs[disks - 1];
+
+ /*
+ * Compute syndrome with zero for the missing data pages
+ * Use the dead data pages as temporary storage for
+ * delta p and delta q
+ */
+ dp = (u8 *)ptrs[faila];
+ ptrs[faila] = (void *)raid6_empty_zero_page;
+ ptrs[disks - 2] = dp;
+ dq = (u8 *)ptrs[failb];
+ ptrs[failb] = (void *)raid6_empty_zero_page;
+ ptrs[disks - 1] = dq;
+
+ raid6_call.gen_syndrome(disks, bytes, ptrs);
+
+ /* Restore pointer table */
+ ptrs[faila] = dp;
+ ptrs[failb] = dq;
+ ptrs[disks - 2] = p;
+ ptrs[disks - 1] = q;
+
+ /* Now, pick the proper data tables */
+ pbmul = raid6_vgfmul[raid6_gfexi[failb - faila]];
+ qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila] ^ raid6_gfexp[failb]]];
+
+ kernel_fpu_begin();
+
+ /*
+ * vr20, vr21: qmul
+ * vr22, vr23: pbmul
+ */
+ asm volatile("vld $vr20, %0" : : "m" (qmul[0]));
+ asm volatile("vld $vr21, %0" : : "m" (qmul[16]));
+ asm volatile("vld $vr22, %0" : : "m" (pbmul[0]));
+ asm volatile("vld $vr23, %0" : : "m" (pbmul[16]));
+
+ while (bytes) {
+ /* vr4 - vr7: Q */
+ asm volatile("vld $vr4, %0" : : "m" (q[0]));
+ asm volatile("vld $vr5, %0" : : "m" (q[16]));
+ asm volatile("vld $vr6, %0" : : "m" (q[32]));
+ asm volatile("vld $vr7, %0" : : "m" (q[48]));
+ /* vr4 - vr7: Q + Qxy */
+ asm volatile("vld $vr8, %0" : : "m" (dq[0]));
+ asm volatile("vld $vr9, %0" : : "m" (dq[16]));
+ asm volatile("vld $vr10, %0" : : "m" (dq[32]));
+ asm volatile("vld $vr11, %0" : : "m" (dq[48]));
+ asm volatile("vxor.v $vr4, $vr4, $vr8");
+ asm volatile("vxor.v $vr5, $vr5, $vr9");
+ asm volatile("vxor.v $vr6, $vr6, $vr10");
+ asm volatile("vxor.v $vr7, $vr7, $vr11");
+ /* vr0 - vr3: P */
+ asm volatile("vld $vr0, %0" : : "m" (p[0]));
+ asm volatile("vld $vr1, %0" : : "m" (p[16]));
+ asm volatile("vld $vr2, %0" : : "m" (p[32]));
+ asm volatile("vld $vr3, %0" : : "m" (p[48]));
+ /* vr0 - vr3: P + Pxy */
+ asm volatile("vld $vr8, %0" : : "m" (dp[0]));
+ asm volatile("vld $vr9, %0" : : "m" (dp[16]));
+ asm volatile("vld $vr10, %0" : : "m" (dp[32]));
+ asm volatile("vld $vr11, %0" : : "m" (dp[48]));
+ asm volatile("vxor.v $vr0, $vr0, $vr8");
+ asm volatile("vxor.v $vr1, $vr1, $vr9");
+ asm volatile("vxor.v $vr2, $vr2, $vr10");
+ asm volatile("vxor.v $vr3, $vr3, $vr11");
+
+ /* vr8 - vr11: higher 4 bits of each byte of (Q + Qxy) */
+ asm volatile("vsrli.b $vr8, $vr4, 4");
+ asm volatile("vsrli.b $vr9, $vr5, 4");
+ asm volatile("vsrli.b $vr10, $vr6, 4");
+ asm volatile("vsrli.b $vr11, $vr7, 4");
+ /* vr4 - vr7: lower 4 bits of each byte of (Q + Qxy) */
+ asm volatile("vandi.b $vr4, $vr4, 0x0f");
+ asm volatile("vandi.b $vr5, $vr5, 0x0f");
+ asm volatile("vandi.b $vr6, $vr6, 0x0f");
+ asm volatile("vandi.b $vr7, $vr7, 0x0f");
+ /* lookup from qmul[0] */
+ asm volatile("vshuf.b $vr4, $vr20, $vr20, $vr4");
+ asm volatile("vshuf.b $vr5, $vr20, $vr20, $vr5");
+ asm volatile("vshuf.b $vr6, $vr20, $vr20, $vr6");
+ asm volatile("vshuf.b $vr7, $vr20, $vr20, $vr7");
+ /* lookup from qmul[16] */
+ asm volatile("vshuf.b $vr8, $vr21, $vr21, $vr8");
+ asm volatile("vshuf.b $vr9, $vr21, $vr21, $vr9");
+ asm volatile("vshuf.b $vr10, $vr21, $vr21, $vr10");
+ asm volatile("vshuf.b $vr11, $vr21, $vr21, $vr11");
+ /* vr16 - vr19: B(Q + Qxy) */
+ asm volatile("vxor.v $vr16, $vr8, $vr4");
+ asm volatile("vxor.v $vr17, $vr9, $vr5");
+ asm volatile("vxor.v $vr18, $vr10, $vr6");
+ asm volatile("vxor.v $vr19, $vr11, $vr7");
+
+ /* vr4 - vr7: higher 4 bits of each byte of (P + Pxy) */
+ asm volatile("vsrli.b $vr4, $vr0, 4");
+ asm volatile("vsrli.b $vr5, $vr1, 4");
+ asm volatile("vsrli.b $vr6, $vr2, 4");
+ asm volatile("vsrli.b $vr7, $vr3, 4");
+ /* vr12 - vr15: lower 4 bits of each byte of (P + Pxy) */
+ asm volatile("vandi.b $vr12, $vr0, 0x0f");
+ asm volatile("vandi.b $vr13, $vr1, 0x0f");
+ asm volatile("vandi.b $vr14, $vr2, 0x0f");
+ asm volatile("vandi.b $vr15, $vr3, 0x0f");
+ /* lookup from pbmul[0] */
+ asm volatile("vshuf.b $vr12, $vr22, $vr22, $vr12");
+ asm volatile("vshuf.b $vr13, $vr22, $vr22, $vr13");
+ asm volatile("vshuf.b $vr14, $vr22, $vr22, $vr14");
+ asm volatile("vshuf.b $vr15, $vr22, $vr22, $vr15");
+ /* lookup from pbmul[16] */
+ asm volatile("vshuf.b $vr4, $vr23, $vr23, $vr4");
+ asm volatile("vshuf.b $vr5, $vr23, $vr23, $vr5");
+ asm volatile("vshuf.b $vr6, $vr23, $vr23, $vr6");
+ asm volatile("vshuf.b $vr7, $vr23, $vr23, $vr7");
+ /* vr4 - vr7: A(P + Pxy) */
+ asm volatile("vxor.v $vr4, $vr4, $vr12");
+ asm volatile("vxor.v $vr5, $vr5, $vr13");
+ asm volatile("vxor.v $vr6, $vr6, $vr14");
+ asm volatile("vxor.v $vr7, $vr7, $vr15");
+
+ /* vr4 - vr7: A(P + Pxy) + B(Q + Qxy) = Dx */
+ asm volatile("vxor.v $vr4, $vr4, $vr16");
+ asm volatile("vxor.v $vr5, $vr5, $vr17");
+ asm volatile("vxor.v $vr6, $vr6, $vr18");
+ asm volatile("vxor.v $vr7, $vr7, $vr19");
+ asm volatile("vst $vr4, %0" : "=m" (dq[0]));
+ asm volatile("vst $vr5, %0" : "=m" (dq[16]));
+ asm volatile("vst $vr6, %0" : "=m" (dq[32]));
+ asm volatile("vst $vr7, %0" : "=m" (dq[48]));
+
+ /* vr0 - vr3: P + Pxy + Dx = Dy */
+ asm volatile("vxor.v $vr0, $vr0, $vr4");
+ asm volatile("vxor.v $vr1, $vr1, $vr5");
+ asm volatile("vxor.v $vr2, $vr2, $vr6");
+ asm volatile("vxor.v $vr3, $vr3, $vr7");
+ asm volatile("vst $vr0, %0" : "=m" (dp[0]));
+ asm volatile("vst $vr1, %0" : "=m" (dp[16]));
+ asm volatile("vst $vr2, %0" : "=m" (dp[32]));
+ asm volatile("vst $vr3, %0" : "=m" (dp[48]));
+
+ bytes -= 64;
+ p += 64;
+ q += 64;
+ dp += 64;
+ dq += 64;
+ }
+
+ kernel_fpu_end();
+}
+
+static void raid6_datap_recov_lsx(int disks, size_t bytes, int faila,
+ void **ptrs)
+{
+ u8 *p, *q, *dq;
+ const u8 *qmul; /* Q multiplier table */
+
+ p = (u8 *)ptrs[disks - 2];
+ q = (u8 *)ptrs[disks - 1];
+
+ /*
+ * Compute syndrome with zero for the missing data page
+ * Use the dead data page as temporary storage for delta q
+ */
+ dq = (u8 *)ptrs[faila];
+ ptrs[faila] = (void *)raid6_empty_zero_page;
+ ptrs[disks - 1] = dq;
+
+ raid6_call.gen_syndrome(disks, bytes, ptrs);
+
+ /* Restore pointer table */
+ ptrs[faila] = dq;
+ ptrs[disks - 1] = q;
+
+ /* Now, pick the proper data tables */
+ qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila]]];
+
+ kernel_fpu_begin();
+
+ /* vr22, vr23: qmul */
+ asm volatile("vld $vr22, %0" : : "m" (qmul[0]));
+ asm volatile("vld $vr23, %0" : : "m" (qmul[16]));
+
+ while (bytes) {
+ /* vr0 - vr3: P + Dx */
+ asm volatile("vld $vr0, %0" : : "m" (p[0]));
+ asm volatile("vld $vr1, %0" : : "m" (p[16]));
+ asm volatile("vld $vr2, %0" : : "m" (p[32]));
+ asm volatile("vld $vr3, %0" : : "m" (p[48]));
+ /* vr4 - vr7: Qx */
+ asm volatile("vld $vr4, %0" : : "m" (dq[0]));
+ asm volatile("vld $vr5, %0" : : "m" (dq[16]));
+ asm volatile("vld $vr6, %0" : : "m" (dq[32]));
+ asm volatile("vld $vr7, %0" : : "m" (dq[48]));
+ /* vr4 - vr7: Q + Qx */
+ asm volatile("vld $vr8, %0" : : "m" (q[0]));
+ asm volatile("vld $vr9, %0" : : "m" (q[16]));
+ asm volatile("vld $vr10, %0" : : "m" (q[32]));
+ asm volatile("vld $vr11, %0" : : "m" (q[48]));
+ asm volatile("vxor.v $vr4, $vr4, $vr8");
+ asm volatile("vxor.v $vr5, $vr5, $vr9");
+ asm volatile("vxor.v $vr6, $vr6, $vr10");
+ asm volatile("vxor.v $vr7, $vr7, $vr11");
+
+ /* vr8 - vr11: higher 4 bits of each byte of (Q + Qx) */
+ asm volatile("vsrli.b $vr8, $vr4, 4");
+ asm volatile("vsrli.b $vr9, $vr5, 4");
+ asm volatile("vsrli.b $vr10, $vr6, 4");
+ asm volatile("vsrli.b $vr11, $vr7, 4");
+ /* vr4 - vr7: lower 4 bits of each byte of (Q + Qx) */
+ asm volatile("vandi.b $vr4, $vr4, 0x0f");
+ asm volatile("vandi.b $vr5, $vr5, 0x0f");
+ asm volatile("vandi.b $vr6, $vr6, 0x0f");
+ asm volatile("vandi.b $vr7, $vr7, 0x0f");
+ /* lookup from qmul[0] */
+ asm volatile("vshuf.b $vr4, $vr22, $vr22, $vr4");
+ asm volatile("vshuf.b $vr5, $vr22, $vr22, $vr5");
+ asm volatile("vshuf.b $vr6, $vr22, $vr22, $vr6");
+ asm volatile("vshuf.b $vr7, $vr22, $vr22, $vr7");
+ /* lookup from qmul[16] */
+ asm volatile("vshuf.b $vr8, $vr23, $vr23, $vr8");
+ asm volatile("vshuf.b $vr9, $vr23, $vr23, $vr9");
+ asm volatile("vshuf.b $vr10, $vr23, $vr23, $vr10");
+ asm volatile("vshuf.b $vr11, $vr23, $vr23, $vr11");
+ /* vr4 - vr7: qmul(Q + Qx) = Dx */
+ asm volatile("vxor.v $vr4, $vr4, $vr8");
+ asm volatile("vxor.v $vr5, $vr5, $vr9");
+ asm volatile("vxor.v $vr6, $vr6, $vr10");
+ asm volatile("vxor.v $vr7, $vr7, $vr11");
+ asm volatile("vst $vr4, %0" : "=m" (dq[0]));
+ asm volatile("vst $vr5, %0" : "=m" (dq[16]));
+ asm volatile("vst $vr6, %0" : "=m" (dq[32]));
+ asm volatile("vst $vr7, %0" : "=m" (dq[48]));
+
+ /* vr0 - vr3: P + Dx + Dx = P */
+ asm volatile("vxor.v $vr0, $vr0, $vr4");
+ asm volatile("vxor.v $vr1, $vr1, $vr5");
+ asm volatile("vxor.v $vr2, $vr2, $vr6");
+ asm volatile("vxor.v $vr3, $vr3, $vr7");
+ asm volatile("vst $vr0, %0" : "=m" (p[0]));
+ asm volatile("vst $vr1, %0" : "=m" (p[16]));
+ asm volatile("vst $vr2, %0" : "=m" (p[32]));
+ asm volatile("vst $vr3, %0" : "=m" (p[48]));
+
+ bytes -= 64;
+ p += 64;
+ q += 64;
+ dq += 64;
+ }
+
+ kernel_fpu_end();
+}
+
+const struct raid6_recov_calls raid6_recov_lsx = {
+ .data2 = raid6_2data_recov_lsx,
+ .datap = raid6_datap_recov_lsx,
+ .valid = raid6_has_lsx,
+ .name = "lsx",
+ .priority = 1,
+};
+#endif /* CONFIG_CPU_HAS_LSX */
+
+#ifdef CONFIG_CPU_HAS_LASX
+static int raid6_has_lasx(void)
+{
+ return cpu_has_lasx;
+}
+
+static void raid6_2data_recov_lasx(int disks, size_t bytes, int faila,
+ int failb, void **ptrs)
+{
+ u8 *p, *q, *dp, *dq;
+ const u8 *pbmul; /* P multiplier table for B data */
+ const u8 *qmul; /* Q multiplier table (for both) */
+
+ p = (u8 *)ptrs[disks - 2];
+ q = (u8 *)ptrs[disks - 1];
+
+ /*
+ * Compute syndrome with zero for the missing data pages
+ * Use the dead data pages as temporary storage for
+ * delta p and delta q
+ */
+ dp = (u8 *)ptrs[faila];
+ ptrs[faila] = (void *)raid6_empty_zero_page;
+ ptrs[disks - 2] = dp;
+ dq = (u8 *)ptrs[failb];
+ ptrs[failb] = (void *)raid6_empty_zero_page;
+ ptrs[disks - 1] = dq;
+
+ raid6_call.gen_syndrome(disks, bytes, ptrs);
+
+ /* Restore pointer table */
+ ptrs[faila] = dp;
+ ptrs[failb] = dq;
+ ptrs[disks - 2] = p;
+ ptrs[disks - 1] = q;
+
+ /* Now, pick the proper data tables */
+ pbmul = raid6_vgfmul[raid6_gfexi[failb - faila]];
+ qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila] ^ raid6_gfexp[failb]]];
+
+ kernel_fpu_begin();
+
+ /*
+ * xr20, xr21: qmul
+ * xr22, xr23: pbmul
+ */
+ asm volatile("vld $vr20, %0" : : "m" (qmul[0]));
+ asm volatile("vld $vr21, %0" : : "m" (qmul[16]));
+ asm volatile("vld $vr22, %0" : : "m" (pbmul[0]));
+ asm volatile("vld $vr23, %0" : : "m" (pbmul[16]));
+ asm volatile("xvreplve0.q $xr20, $xr20");
+ asm volatile("xvreplve0.q $xr21, $xr21");
+ asm volatile("xvreplve0.q $xr22, $xr22");
+ asm volatile("xvreplve0.q $xr23, $xr23");
+
+ while (bytes) {
+ /* xr0, xr1: Q */
+ asm volatile("xvld $xr0, %0" : : "m" (q[0]));
+ asm volatile("xvld $xr1, %0" : : "m" (q[32]));
+ /* xr0, xr1: Q + Qxy */
+ asm volatile("xvld $xr4, %0" : : "m" (dq[0]));
+ asm volatile("xvld $xr5, %0" : : "m" (dq[32]));
+ asm volatile("xvxor.v $xr0, $xr0, $xr4");
+ asm volatile("xvxor.v $xr1, $xr1, $xr5");
+ /* xr2, xr3: P */
+ asm volatile("xvld $xr2, %0" : : "m" (p[0]));
+ asm volatile("xvld $xr3, %0" : : "m" (p[32]));
+ /* xr2, xr3: P + Pxy */
+ asm volatile("xvld $xr4, %0" : : "m" (dp[0]));
+ asm volatile("xvld $xr5, %0" : : "m" (dp[32]));
+ asm volatile("xvxor.v $xr2, $xr2, $xr4");
+ asm volatile("xvxor.v $xr3, $xr3, $xr5");
+
+ /* xr4, xr5: higher 4 bits of each byte of (Q + Qxy) */
+ asm volatile("xvsrli.b $xr4, $xr0, 4");
+ asm volatile("xvsrli.b $xr5, $xr1, 4");
+ /* xr0, xr1: lower 4 bits of each byte of (Q + Qxy) */
+ asm volatile("xvandi.b $xr0, $xr0, 0x0f");
+ asm volatile("xvandi.b $xr1, $xr1, 0x0f");
+ /* lookup from qmul[0] */
+ asm volatile("xvshuf.b $xr0, $xr20, $xr20, $xr0");
+ asm volatile("xvshuf.b $xr1, $xr20, $xr20, $xr1");
+ /* lookup from qmul[16] */
+ asm volatile("xvshuf.b $xr4, $xr21, $xr21, $xr4");
+ asm volatile("xvshuf.b $xr5, $xr21, $xr21, $xr5");
+ /* xr6, xr7: B(Q + Qxy) */
+ asm volatile("xvxor.v $xr6, $xr4, $xr0");
+ asm volatile("xvxor.v $xr7, $xr5, $xr1");
+
+ /* xr4, xr5: higher 4 bits of each byte of (P + Pxy) */
+ asm volatile("xvsrli.b $xr4, $xr2, 4");
+ asm volatile("xvsrli.b $xr5, $xr3, 4");
+ /* xr0, xr1: lower 4 bits of each byte of (P + Pxy) */
+ asm volatile("xvandi.b $xr0, $xr2, 0x0f");
+ asm volatile("xvandi.b $xr1, $xr3, 0x0f");
+ /* lookup from pbmul[0] */
+ asm volatile("xvshuf.b $xr0, $xr22, $xr22, $xr0");
+ asm volatile("xvshuf.b $xr1, $xr22, $xr22, $xr1");
+ /* lookup from pbmul[16] */
+ asm volatile("xvshuf.b $xr4, $xr23, $xr23, $xr4");
+ asm volatile("xvshuf.b $xr5, $xr23, $xr23, $xr5");
+ /* xr0, xr1: A(P + Pxy) */
+ asm volatile("xvxor.v $xr0, $xr0, $xr4");
+ asm volatile("xvxor.v $xr1, $xr1, $xr5");
+
+ /* xr0, xr1: A(P + Pxy) + B(Q + Qxy) = Dx */
+ asm volatile("xvxor.v $xr0, $xr0, $xr6");
+ asm volatile("xvxor.v $xr1, $xr1, $xr7");
+
+ /* xr2, xr3: P + Pxy + Dx = Dy */
+ asm volatile("xvxor.v $xr2, $xr2, $xr0");
+ asm volatile("xvxor.v $xr3, $xr3, $xr1");
+
+ asm volatile("xvst $xr0, %0" : "=m" (dq[0]));
+ asm volatile("xvst $xr1, %0" : "=m" (dq[32]));
+ asm volatile("xvst $xr2, %0" : "=m" (dp[0]));
+ asm volatile("xvst $xr3, %0" : "=m" (dp[32]));
+
+ bytes -= 64;
+ p += 64;
+ q += 64;
+ dp += 64;
+ dq += 64;
+ }
+
+ kernel_fpu_end();
+}
+
+static void raid6_datap_recov_lasx(int disks, size_t bytes, int faila,
+ void **ptrs)
+{
+ u8 *p, *q, *dq;
+ const u8 *qmul; /* Q multiplier table */
+
+ p = (u8 *)ptrs[disks - 2];
+ q = (u8 *)ptrs[disks - 1];
+
+ /*
+ * Compute syndrome with zero for the missing data page
+ * Use the dead data page as temporary storage for delta q
+ */
+ dq = (u8 *)ptrs[faila];
+ ptrs[faila] = (void *)raid6_empty_zero_page;
+ ptrs[disks - 1] = dq;
+
+ raid6_call.gen_syndrome(disks, bytes, ptrs);
+
+ /* Restore pointer table */
+ ptrs[faila] = dq;
+ ptrs[disks - 1] = q;
+
+ /* Now, pick the proper data tables */
+ qmul = raid6_vgfmul[raid6_gfinv[raid6_gfexp[faila]]];
+
+ kernel_fpu_begin();
+
+ /* xr22, xr23: qmul */
+ asm volatile("vld $vr22, %0" : : "m" (qmul[0]));
+ asm volatile("xvreplve0.q $xr22, $xr22");
+ asm volatile("vld $vr23, %0" : : "m" (qmul[16]));
+ asm volatile("xvreplve0.q $xr23, $xr23");
+
+ while (bytes) {
+ /* xr0, xr1: P + Dx */
+ asm volatile("xvld $xr0, %0" : : "m" (p[0]));
+ asm volatile("xvld $xr1, %0" : : "m" (p[32]));
+ /* xr2, xr3: Qx */
+ asm volatile("xvld $xr2, %0" : : "m" (dq[0]));
+ asm volatile("xvld $xr3, %0" : : "m" (dq[32]));
+ /* xr2, xr3: Q + Qx */
+ asm volatile("xvld $xr4, %0" : : "m" (q[0]));
+ asm volatile("xvld $xr5, %0" : : "m" (q[32]));
+ asm volatile("xvxor.v $xr2, $xr2, $xr4");
+ asm volatile("xvxor.v $xr3, $xr3, $xr5");
+
+ /* xr4, xr5: higher 4 bits of each byte of (Q + Qx) */
+ asm volatile("xvsrli.b $xr4, $xr2, 4");
+ asm volatile("xvsrli.b $xr5, $xr3, 4");
+ /* xr2, xr3: lower 4 bits of each byte of (Q + Qx) */
+ asm volatile("xvandi.b $xr2, $xr2, 0x0f");
+ asm volatile("xvandi.b $xr3, $xr3, 0x0f");
+ /* lookup from qmul[0] */
+ asm volatile("xvshuf.b $xr2, $xr22, $xr22, $xr2");
+ asm volatile("xvshuf.b $xr3, $xr22, $xr22, $xr3");
+ /* lookup from qmul[16] */
+ asm volatile("xvshuf.b $xr4, $xr23, $xr23, $xr4");
+ asm volatile("xvshuf.b $xr5, $xr23, $xr23, $xr5");
+ /* xr2, xr3: qmul(Q + Qx) = Dx */
+ asm volatile("xvxor.v $xr2, $xr2, $xr4");
+ asm volatile("xvxor.v $xr3, $xr3, $xr5");
+
+ /* xr0, xr1: P + Dx + Dx = P */
+ asm volatile("xvxor.v $xr0, $xr0, $xr2");
+ asm volatile("xvxor.v $xr1, $xr1, $xr3");
+
+ asm volatile("xvst $xr2, %0" : "=m" (dq[0]));
+ asm volatile("xvst $xr3, %0" : "=m" (dq[32]));
+ asm volatile("xvst $xr0, %0" : "=m" (p[0]));
+ asm volatile("xvst $xr1, %0" : "=m" (p[32]));
+
+ bytes -= 64;
+ p += 64;
+ q += 64;
+ dq += 64;
+ }
+
+ kernel_fpu_end();
+}
+
+const struct raid6_recov_calls raid6_recov_lasx = {
+ .data2 = raid6_2data_recov_lasx,
+ .datap = raid6_datap_recov_lasx,
+ .valid = raid6_has_lasx,
+ .name = "lasx",
+ .priority = 2,
+};
+#endif /* CONFIG_CPU_HAS_LASX */
diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile
index 1f693ea3b980..2abe0076a636 100644
--- a/lib/raid6/test/Makefile
+++ b/lib/raid6/test/Makefile
@@ -41,6 +41,16 @@ ifeq ($(findstring ppc,$(ARCH)),ppc)
gcc -c -x c - >/dev/null && rm ./-.o && echo yes)
endif
+ifeq ($(ARCH),loongarch64)
+ CFLAGS += -I../../../arch/loongarch/include -DCONFIG_LOONGARCH=1
+ CFLAGS += $(shell echo 'vld $$vr0, $$zero, 0' | \
+ gcc -c -x assembler - >/dev/null 2>&1 && \
+ rm ./-.o && echo -DCONFIG_CPU_HAS_LSX=1)
+ CFLAGS += $(shell echo 'xvld $$xr0, $$zero, 0' | \
+ gcc -c -x assembler - >/dev/null 2>&1 && \
+ rm ./-.o && echo -DCONFIG_CPU_HAS_LASX=1)
+endif
+
ifeq ($(IS_X86),yes)
OBJS += mmx.o sse1.o sse2.o avx2.o recov_ssse3.o recov_avx2.o avx512.o recov_avx512.o
CFLAGS += -DCONFIG_X86
@@ -54,6 +64,8 @@ else ifeq ($(HAS_ALTIVEC),yes)
CFLAGS += -DCONFIG_ALTIVEC
OBJS += altivec1.o altivec2.o altivec4.o altivec8.o \
vpermxor1.o vpermxor2.o vpermxor4.o vpermxor8.o
+else ifeq ($(ARCH),loongarch64)
+ OBJS += loongarch_simd.o recov_loongarch_simd.o
endif
.c.o:
diff --git a/lib/test_scanf.c b/lib/test_scanf.c
index b620cf7de503..a2707af2951a 100644
--- a/lib/test_scanf.c
+++ b/lib/test_scanf.c
@@ -606,7 +606,7 @@ static void __init numbers_slice(void)
#define test_number_prefix(T, str, scan_fmt, expect0, expect1, n_args, fn) \
do { \
const T expect[2] = { expect0, expect1 }; \
- T result[2] = {~expect[0], ~expect[1]}; \
+ T result[2] = { (T)~expect[0], (T)~expect[1] }; \
\
_test(fn, &expect, str, scan_fmt, n_args, &result[0], &result[1]); \
} while (0)
diff --git a/lib/xarray.c b/lib/xarray.c
index 2071a3718f4e..39f07bfc4dcc 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -206,7 +206,7 @@ static void *xas_descend(struct xa_state *xas, struct xa_node *node)
void *entry = xa_entry(xas->xa, node, offset);
xas->xa_node = node;
- if (xa_is_sibling(entry)) {
+ while (xa_is_sibling(entry)) {
offset = xa_to_sibling(entry);
entry = xa_entry(xas->xa, node, offset);
if (node->shift && xa_is_node(entry))
@@ -1802,6 +1802,9 @@ EXPORT_SYMBOL(xa_get_order);
* stores the index into the @id pointer, then stores the entry at
* that index. A concurrent lookup will not see an uninitialised @id.
*
+ * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
+ * in xa_init_flags().
+ *
* Context: Any context. Expects xa_lock to be held on entry. May
* release and reacquire xa_lock if @gfp flags permit.
* Return: 0 on success, -ENOMEM if memory could not be allocated or
@@ -1850,6 +1853,9 @@ EXPORT_SYMBOL(__xa_alloc);
* The search for an empty entry will start at @next and will wrap
* around if necessary.
*
+ * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
+ * in xa_init_flags().
+ *
* Context: Any context. Expects xa_lock to be held on entry. May
* release and reacquire xa_lock if @gfp flags permit.
* Return: 0 if the allocation succeeded without wrapping. 1 if the
diff --git a/mm/filemap.c b/mm/filemap.c
index bf6219d9aaac..582f5317ff71 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -121,9 +121,6 @@
* bdi.wb->list_lock (zap_pte_range->set_page_dirty)
* ->inode->i_lock (zap_pte_range->set_page_dirty)
* ->private_lock (zap_pte_range->block_dirty_folio)
- *
- * ->i_mmap_rwsem
- * ->tasklist_lock (memory_failure, collect_procs_ao)
*/
static void page_cache_delete(struct address_space *mapping,
diff --git a/mm/kasan/init.c b/mm/kasan/init.c
index dcfec277e839..89895f38f722 100644
--- a/mm/kasan/init.c
+++ b/mm/kasan/init.c
@@ -139,6 +139,10 @@ static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr,
return 0;
}
+void __weak __meminit pmd_init(void *addr)
+{
+}
+
static int __ref zero_pud_populate(p4d_t *p4d, unsigned long addr,
unsigned long end)
{
@@ -166,8 +170,9 @@ static int __ref zero_pud_populate(p4d_t *p4d, unsigned long addr,
if (!p)
return -ENOMEM;
} else {
- pud_populate(&init_mm, pud,
- early_alloc(PAGE_SIZE, NUMA_NO_NODE));
+ p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
+ pmd_init(p);
+ pud_populate(&init_mm, pud, p);
}
}
zero_pmd_populate(pud, addr, next);
@@ -176,6 +181,10 @@ static int __ref zero_pud_populate(p4d_t *p4d, unsigned long addr,
return 0;
}
+void __weak __meminit pud_init(void *addr)
+{
+}
+
static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr,
unsigned long end)
{
@@ -207,8 +216,9 @@ static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr,
if (!p)
return -ENOMEM;
} else {
- p4d_populate(&init_mm, p4d,
- early_alloc(PAGE_SIZE, NUMA_NO_NODE));
+ p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
+ pud_init(p);
+ p4d_populate(&init_mm, p4d, p);
}
}
zero_pud_populate(p4d, addr, next);
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 2e973b36fe07..f70e3d7a602e 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -291,16 +291,22 @@ struct kasan_stack_ring {
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
+#ifndef __HAVE_ARCH_SHADOW_MAP
static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
{
return (void *)(((unsigned long)shadow_addr - KASAN_SHADOW_OFFSET)
<< KASAN_SHADOW_SCALE_SHIFT);
}
+#endif
static __always_inline bool addr_has_metadata(const void *addr)
{
+#ifdef __HAVE_ARCH_SHADOW_MAP
+ return (kasan_mem_to_shadow((void *)addr) != NULL);
+#else
return (kasan_reset_tag(addr) >=
kasan_shadow_to_mem((void *)KASAN_SHADOW_START));
+#endif
}
/**
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index 96fd0411f5c5..3872528d0963 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -574,13 +574,14 @@ static void rcu_guarded_free(struct rcu_head *h)
*/
static unsigned long kfence_init_pool(void)
{
- unsigned long addr = (unsigned long)__kfence_pool;
+ unsigned long addr;
struct page *pages;
int i;
if (!arch_kfence_init_pool())
- return addr;
+ return (unsigned long)__kfence_pool;
+ addr = (unsigned long)__kfence_pool;
pages = virt_to_page(__kfence_pool);
/*
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 2918150e31bd..54c2c90d3abc 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -1584,6 +1584,9 @@ static void kmemleak_scan(void)
for (pfn = start_pfn; pfn < end_pfn; pfn++) {
struct page *page = pfn_to_online_page(pfn);
+ if (!(pfn & 63))
+ cond_resched();
+
if (!page)
continue;
@@ -1594,8 +1597,6 @@ static void kmemleak_scan(void)
if (page_count(page) == 0)
continue;
scan_block(page, page + 1, NULL);
- if (!(pfn & 63))
- cond_resched();
}
}
put_online_mems();
diff --git a/mm/ksm.c b/mm/ksm.c
index 8d6aee05421d..981af9c72e7a 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -2925,7 +2925,7 @@ void collect_procs_ksm(struct page *page, struct list_head *to_kill,
struct anon_vma *av = rmap_item->anon_vma;
anon_vma_lock_read(av);
- read_lock(&tasklist_lock);
+ rcu_read_lock();
for_each_process(tsk) {
struct anon_vma_chain *vmac;
unsigned long addr;
@@ -2944,7 +2944,7 @@ void collect_procs_ksm(struct page *page, struct list_head *to_kill,
}
}
}
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
anon_vma_unlock_read(av);
}
}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index b29b850cf399..a4d3282493b6 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5326,7 +5326,6 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
memcg->deferred_split_queue.split_queue_len = 0;
#endif
- idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
lru_gen_init_memcg(memcg);
return memcg;
fail:
@@ -5398,14 +5397,27 @@ static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
if (alloc_shrinker_info(memcg))
goto offline_kmem;
- /* Online state pins memcg ID, memcg ID pins CSS */
- refcount_set(&memcg->id.ref, 1);
- css_get(css);
-
if (unlikely(mem_cgroup_is_root(memcg)))
queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
FLUSH_TIME);
lru_gen_online_memcg(memcg);
+
+ /* Online state pins memcg ID, memcg ID pins CSS */
+ refcount_set(&memcg->id.ref, 1);
+ css_get(css);
+
+ /*
+ * Ensure mem_cgroup_from_id() works once we're fully online.
+ *
+ * We could do this earlier and require callers to filter with
+ * css_tryget_online(). But right now there are no users that
+ * need earlier access, and the workingset code relies on the
+ * cgroup tree linkage (mem_cgroup_get_nr_swap_pages()). So
+ * publish it here at the end of onlining. This matches the
+ * regular ID destruction during offlining.
+ */
+ idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
+
return 0;
offline_kmem:
memcg_offline_kmem(memcg);
diff --git a/mm/memfd.c b/mm/memfd.c
index 1cad1904fc26..2dba2cb6f0d0 100644
--- a/mm/memfd.c
+++ b/mm/memfd.c
@@ -316,7 +316,7 @@ SYSCALL_DEFINE2(memfd_create,
return -EINVAL;
if (!(flags & (MFD_EXEC | MFD_NOEXEC_SEAL))) {
- pr_info_ratelimited(
+ pr_warn_once(
"%s[%d]: memfd_create() called without MFD_EXEC or MFD_NOEXEC_SEAL set\n",
current->comm, task_pid_nr(current));
}
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 881c35ef1daa..4d6e43c88489 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -547,8 +547,8 @@ static void kill_procs(struct list_head *to_kill, int forcekill, bool fail,
* on behalf of the thread group. Return task_struct of the (first found)
* dedicated thread if found, and return NULL otherwise.
*
- * We already hold read_lock(&tasklist_lock) in the caller, so we don't
- * have to call rcu_read_lock/unlock() in this function.
+ * We already hold rcu lock in the caller, so we don't have to call
+ * rcu_read_lock/unlock() in this function.
*/
static struct task_struct *find_early_kill_thread(struct task_struct *tsk)
{
@@ -609,7 +609,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
return;
pgoff = page_to_pgoff(page);
- read_lock(&tasklist_lock);
+ rcu_read_lock();
for_each_process(tsk) {
struct anon_vma_chain *vmac;
struct task_struct *t = task_early_kill(tsk, force_early);
@@ -626,7 +626,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
add_to_kill_anon_file(t, page, vma, to_kill);
}
}
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
anon_vma_unlock_read(av);
}
@@ -642,7 +642,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
pgoff_t pgoff;
i_mmap_lock_read(mapping);
- read_lock(&tasklist_lock);
+ rcu_read_lock();
pgoff = page_to_pgoff(page);
for_each_process(tsk) {
struct task_struct *t = task_early_kill(tsk, force_early);
@@ -662,7 +662,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
add_to_kill_anon_file(t, page, vma, to_kill);
}
}
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
i_mmap_unlock_read(mapping);
}
@@ -685,7 +685,7 @@ static void collect_procs_fsdax(struct page *page,
struct task_struct *tsk;
i_mmap_lock_read(mapping);
- read_lock(&tasklist_lock);
+ rcu_read_lock();
for_each_process(tsk) {
struct task_struct *t = task_early_kill(tsk, true);
@@ -696,7 +696,7 @@ static void collect_procs_fsdax(struct page *page,
add_to_kill_fsdax(t, page, vma, to_kill, pgoff);
}
}
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
i_mmap_unlock_read(mapping);
}
#endif /* CONFIG_FS_DAX */
@@ -717,7 +717,7 @@ static void collect_procs(struct page *page, struct list_head *tokill,
collect_procs_file(page, tokill, force_early);
}
-struct hwp_walk {
+struct hwpoison_walk {
struct to_kill tk;
unsigned long pfn;
int flags;
@@ -752,7 +752,7 @@ static int check_hwpoisoned_entry(pte_t pte, unsigned long addr, short shift,
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr,
- struct hwp_walk *hwp)
+ struct hwpoison_walk *hwp)
{
pmd_t pmd = *pmdp;
unsigned long pfn;
@@ -770,7 +770,7 @@ static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr,
}
#else
static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr,
- struct hwp_walk *hwp)
+ struct hwpoison_walk *hwp)
{
return 0;
}
@@ -779,7 +779,7 @@ static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr,
static int hwpoison_pte_range(pmd_t *pmdp, unsigned long addr,
unsigned long end, struct mm_walk *walk)
{
- struct hwp_walk *hwp = walk->private;
+ struct hwpoison_walk *hwp = walk->private;
int ret = 0;
pte_t *ptep, *mapped_pte;
spinlock_t *ptl;
@@ -813,7 +813,7 @@ static int hwpoison_hugetlb_range(pte_t *ptep, unsigned long hmask,
unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
- struct hwp_walk *hwp = walk->private;
+ struct hwpoison_walk *hwp = walk->private;
pte_t pte = huge_ptep_get(ptep);
struct hstate *h = hstate_vma(walk->vma);
@@ -824,7 +824,7 @@ static int hwpoison_hugetlb_range(pte_t *ptep, unsigned long hmask,
#define hwpoison_hugetlb_range NULL
#endif
-static const struct mm_walk_ops hwp_walk_ops = {
+static const struct mm_walk_ops hwpoison_walk_ops = {
.pmd_entry = hwpoison_pte_range,
.hugetlb_entry = hwpoison_hugetlb_range,
.walk_lock = PGWALK_RDLOCK,
@@ -847,7 +847,7 @@ static int kill_accessing_process(struct task_struct *p, unsigned long pfn,
int flags)
{
int ret;
- struct hwp_walk priv = {
+ struct hwpoison_walk priv = {
.pfn = pfn,
};
priv.tk.tsk = p;
@@ -856,7 +856,7 @@ static int kill_accessing_process(struct task_struct *p, unsigned long pfn,
return -EFAULT;
mmap_read_lock(p->mm);
- ret = walk_page_range(p->mm, 0, TASK_SIZE, &hwp_walk_ops,
+ ret = walk_page_range(p->mm, 0, TASK_SIZE, &hwpoison_walk_ops,
(void *)&priv);
if (ret == 1 && priv.tk.addr)
kill_proc(&priv.tk, pfn, flags);
@@ -1562,7 +1562,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
* Here we are interested only in user-mapped pages, so skip any
* other types of pages.
*/
- if (PageReserved(p) || PageSlab(p) || PageTable(p))
+ if (PageReserved(p) || PageSlab(p) || PageTable(p) || PageOffline(p))
return true;
if (!(PageLRU(hpage) || PageHuge(p)))
return true;
@@ -2533,7 +2533,8 @@ int unpoison_memory(unsigned long pfn)
goto unlock_mutex;
}
- if (folio_test_slab(folio) || PageTable(&folio->page) || folio_test_reserved(folio))
+ if (folio_test_slab(folio) || PageTable(&folio->page) ||
+ folio_test_reserved(folio) || PageOffline(&folio->page))
goto unlock_mutex;
/*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 452459836b71..0c5be12f9336 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2641,12 +2641,6 @@ struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
do {
page = NULL;
spin_lock_irqsave(&zone->lock, flags);
- /*
- * order-0 request can reach here when the pcplist is skipped
- * due to non-CMA allocation context. HIGHATOMIC area is
- * reserved for high-order atomic allocation, so order-0
- * request should skip it.
- */
if (alloc_flags & ALLOC_HIGHATOMIC)
page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
if (!page) {
@@ -2780,17 +2774,10 @@ struct page *rmqueue(struct zone *preferred_zone,
WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
if (likely(pcp_allowed_order(order))) {
- /*
- * MIGRATE_MOVABLE pcplist could have the pages on CMA area and
- * we need to skip it when CMA area isn't allowed.
- */
- if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA ||
- migratetype != MIGRATE_MOVABLE) {
- page = rmqueue_pcplist(preferred_zone, zone, order,
- migratetype, alloc_flags);
- if (likely(page))
- goto out;
- }
+ page = rmqueue_pcplist(preferred_zone, zone, order,
+ migratetype, alloc_flags);
+ if (likely(page))
+ goto out;
}
page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags,
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index 9b2d23fbf4d3..b7d7e4fcfad7 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -58,7 +58,7 @@ static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
pte = pte_offset_map(pmd, addr);
if (pte) {
err = walk_pte_range_inner(pte, addr, end, walk);
- if (walk->mm != &init_mm)
+ if (walk->mm != &init_mm && addr < TASK_SIZE)
pte_unmap(pte);
}
} else {
diff --git a/mm/percpu.c b/mm/percpu.c
index 28e07ede46f6..a7665de8485f 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1890,13 +1890,15 @@ fail_unlock:
fail:
trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);
- if (!is_atomic && do_warn && warn_limit) {
+ if (do_warn && warn_limit) {
pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
size, align, is_atomic, err);
- dump_stack();
+ if (!is_atomic)
+ dump_stack();
if (!--warn_limit)
pr_info("limit reached, disable warning\n");
}
+
if (is_atomic) {
/* see the flag handling in pcpu_balance_workfn() */
pcpu_atomic_alloc_failed = true;
@@ -2581,14 +2583,12 @@ void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
{
size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
size_t static_size, dyn_size;
- struct pcpu_chunk *chunk;
unsigned long *group_offsets;
size_t *group_sizes;
unsigned long *unit_off;
unsigned int cpu;
int *unit_map;
int group, unit, i;
- int map_size;
unsigned long tmp_addr;
size_t alloc_size;
@@ -2615,7 +2615,6 @@ void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->unit_size, PCPU_BITMAP_BLOCK_SIZE));
PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
- PCPU_SETUP_BUG_ON(!ai->dyn_size);
PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->reserved_size, PCPU_MIN_ALLOC_SIZE));
PCPU_SETUP_BUG_ON(!(IS_ALIGNED(PCPU_BITMAP_BLOCK_SIZE, PAGE_SIZE) ||
IS_ALIGNED(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE)));
@@ -2698,7 +2697,7 @@ void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
pcpu_atom_size = ai->atom_size;
- pcpu_chunk_struct_size = struct_size(chunk, populated,
+ pcpu_chunk_struct_size = struct_size((struct pcpu_chunk *)0, populated,
BITS_TO_LONGS(pcpu_unit_pages));
pcpu_stats_save_ai(ai);
@@ -2735,29 +2734,23 @@ void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
dyn_size = ai->dyn_size - (static_size - ai->static_size);
/*
- * Initialize first chunk.
- * If the reserved_size is non-zero, this initializes the reserved
- * chunk. If the reserved_size is zero, the reserved chunk is NULL
- * and the dynamic region is initialized here. The first chunk,
- * pcpu_first_chunk, will always point to the chunk that serves
- * the dynamic region.
+ * Initialize first chunk:
+ * This chunk is broken up into 3 parts:
+ * < static | [reserved] | dynamic >
+ * - static - there is no backing chunk because these allocations can
+ * never be freed.
+ * - reserved (pcpu_reserved_chunk) - exists primarily to serve
+ * allocations from module load.
+ * - dynamic (pcpu_first_chunk) - serves the dynamic part of the first
+ * chunk.
*/
tmp_addr = (unsigned long)base_addr + static_size;
- map_size = ai->reserved_size ?: dyn_size;
- chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
-
- /* init dynamic chunk if necessary */
- if (ai->reserved_size) {
- pcpu_reserved_chunk = chunk;
+ if (ai->reserved_size)
+ pcpu_reserved_chunk = pcpu_alloc_first_chunk(tmp_addr,
+ ai->reserved_size);
+ tmp_addr = (unsigned long)base_addr + static_size + ai->reserved_size;
+ pcpu_first_chunk = pcpu_alloc_first_chunk(tmp_addr, dyn_size);
- tmp_addr = (unsigned long)base_addr + static_size +
- ai->reserved_size;
- map_size = dyn_size;
- chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
- }
-
- /* link the first chunk in */
- pcpu_first_chunk = chunk;
pcpu_nr_empty_pop_pages = pcpu_first_chunk->nr_empty_pop_pages;
pcpu_chunk_relocate(pcpu_first_chunk, -1);
@@ -3189,32 +3182,26 @@ void __init __weak pcpu_populate_pte(unsigned long addr)
pmd_t *pmd;
if (pgd_none(*pgd)) {
- p4d_t *new;
-
- new = memblock_alloc(P4D_TABLE_SIZE, P4D_TABLE_SIZE);
- if (!new)
+ p4d = memblock_alloc(P4D_TABLE_SIZE, P4D_TABLE_SIZE);
+ if (!p4d)
goto err_alloc;
- pgd_populate(&init_mm, pgd, new);
+ pgd_populate(&init_mm, pgd, p4d);
}
p4d = p4d_offset(pgd, addr);
if (p4d_none(*p4d)) {
- pud_t *new;
-
- new = memblock_alloc(PUD_TABLE_SIZE, PUD_TABLE_SIZE);
- if (!new)
+ pud = memblock_alloc(PUD_TABLE_SIZE, PUD_TABLE_SIZE);
+ if (!pud)
goto err_alloc;
- p4d_populate(&init_mm, p4d, new);
+ p4d_populate(&init_mm, p4d, pud);
}
pud = pud_offset(p4d, addr);
if (pud_none(*pud)) {
- pmd_t *new;
-
- new = memblock_alloc(PMD_TABLE_SIZE, PMD_TABLE_SIZE);
- if (!new)
+ pmd = memblock_alloc(PMD_TABLE_SIZE, PMD_TABLE_SIZE);
+ if (!pmd)
goto err_alloc;
- pud_populate(&init_mm, pud, new);
+ pud_populate(&init_mm, pud, pmd);
}
pmd = pmd_offset(pud, addr);
diff --git a/mm/util.c b/mm/util.c
index f08b655da917..8cbbfd3a3d59 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -1068,7 +1068,9 @@ void mem_dump_obj(void *object)
if (vmalloc_dump_obj(object))
return;
- if (virt_addr_valid(object))
+ if (is_vmalloc_addr(object))
+ type = "vmalloc memory";
+ else if (virt_addr_valid(object))
type = "non-slab/vmalloc memory";
else if (object == NULL)
type = "NULL pointer";
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 228a4a5312f2..ef8599d394fd 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -4278,14 +4278,32 @@ void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
#ifdef CONFIG_PRINTK
bool vmalloc_dump_obj(void *object)
{
- struct vm_struct *vm;
void *objp = (void *)PAGE_ALIGN((unsigned long)object);
+ const void *caller;
+ struct vm_struct *vm;
+ struct vmap_area *va;
+ unsigned long addr;
+ unsigned int nr_pages;
- vm = find_vm_area(objp);
- if (!vm)
+ if (!spin_trylock(&vmap_area_lock))
+ return false;
+ va = __find_vmap_area((unsigned long)objp, &vmap_area_root);
+ if (!va) {
+ spin_unlock(&vmap_area_lock);
return false;
+ }
+
+ vm = va->vm;
+ if (!vm) {
+ spin_unlock(&vmap_area_lock);
+ return false;
+ }
+ addr = (unsigned long)vm->addr;
+ caller = vm->caller;
+ nr_pages = vm->nr_pages;
+ spin_unlock(&vmap_area_lock);
pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
- vm->nr_pages, (unsigned long)vm->addr, vm->caller);
+ nr_pages, addr, caller);
return true;
}
#endif
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 5697df9d4394..94ec913dfb76 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -771,7 +771,7 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
{
- struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
+ struct rfcomm_dev *dev = tty->driver_data;
BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
dev->port.count);
@@ -779,17 +779,18 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
tty_port_close(&dev->port, tty, filp);
}
-static int rfcomm_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
+static ssize_t rfcomm_tty_write(struct tty_struct *tty, const u8 *buf,
+ size_t count)
{
- struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
+ struct rfcomm_dev *dev = tty->driver_data;
struct rfcomm_dlc *dlc = dev->dlc;
struct sk_buff *skb;
- int sent = 0, size;
+ size_t sent = 0, size;
- BT_DBG("tty %p count %d", tty, count);
+ BT_DBG("tty %p count %zu", tty, count);
while (count) {
- size = min_t(uint, count, dlc->mtu);
+ size = min_t(size_t, count, dlc->mtu);
skb = rfcomm_wmalloc(dev, size + RFCOMM_SKB_RESERVE, GFP_ATOMIC);
if (!skb)
@@ -810,7 +811,7 @@ static int rfcomm_tty_write(struct tty_struct *tty, const unsigned char *buf, in
static unsigned int rfcomm_tty_write_room(struct tty_struct *tty)
{
- struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
+ struct rfcomm_dev *dev = tty->driver_data;
int room = 0;
if (dev && dev->dlc)
@@ -864,7 +865,7 @@ static void rfcomm_tty_set_termios(struct tty_struct *tty,
u8 baud, data_bits, stop_bits, parity, x_on, x_off;
u16 changes = 0;
- struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
+ struct rfcomm_dev *dev = tty->driver_data;
BT_DBG("tty %p termios %p", tty, old);
@@ -996,7 +997,7 @@ static void rfcomm_tty_set_termios(struct tty_struct *tty,
static void rfcomm_tty_throttle(struct tty_struct *tty)
{
- struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
+ struct rfcomm_dev *dev = tty->driver_data;
BT_DBG("tty %p dev %p", tty, dev);
@@ -1005,7 +1006,7 @@ static void rfcomm_tty_throttle(struct tty_struct *tty)
static void rfcomm_tty_unthrottle(struct tty_struct *tty)
{
- struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
+ struct rfcomm_dev *dev = tty->driver_data;
BT_DBG("tty %p dev %p", tty, dev);
@@ -1014,7 +1015,7 @@ static void rfcomm_tty_unthrottle(struct tty_struct *tty)
static unsigned int rfcomm_tty_chars_in_buffer(struct tty_struct *tty)
{
- struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
+ struct rfcomm_dev *dev = tty->driver_data;
BT_DBG("tty %p dev %p", tty, dev);
@@ -1029,7 +1030,7 @@ static unsigned int rfcomm_tty_chars_in_buffer(struct tty_struct *tty)
static void rfcomm_tty_flush_buffer(struct tty_struct *tty)
{
- struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
+ struct rfcomm_dev *dev = tty->driver_data;
BT_DBG("tty %p dev %p", tty, dev);
@@ -1052,7 +1053,7 @@ static void rfcomm_tty_wait_until_sent(struct tty_struct *tty, int timeout)
static void rfcomm_tty_hangup(struct tty_struct *tty)
{
- struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
+ struct rfcomm_dev *dev = tty->driver_data;
BT_DBG("tty %p dev %p", tty, dev);
@@ -1061,7 +1062,7 @@ static void rfcomm_tty_hangup(struct tty_struct *tty)
static int rfcomm_tty_tiocmget(struct tty_struct *tty)
{
- struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
+ struct rfcomm_dev *dev = tty->driver_data;
BT_DBG("tty %p dev %p", tty, dev);
@@ -1070,7 +1071,7 @@ static int rfcomm_tty_tiocmget(struct tty_struct *tty)
static int rfcomm_tty_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear)
{
- struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
+ struct rfcomm_dev *dev = tty->driver_data;
struct rfcomm_dlc *dlc = dev->dlc;
u8 v24_sig;
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index 57a7a64b84ed..0841f8d82419 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -543,6 +543,7 @@ struct bpf_fentry_test_t {
int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg)
{
+ asm volatile ("");
return (long)arg;
}
diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
index feaec4ad6d16..b28c976f52a0 100644
--- a/net/can/j1939/socket.c
+++ b/net/can/j1939/socket.c
@@ -974,6 +974,7 @@ static void __j1939_sk_errqueue(struct j1939_session *session, struct sock *sk,
struct sock_exterr_skb *serr;
struct sk_buff *skb;
char *state = "UNK";
+ u32 tsflags;
int err;
jsk = j1939_sk(sk);
@@ -981,13 +982,14 @@ static void __j1939_sk_errqueue(struct j1939_session *session, struct sock *sk,
if (!(jsk->state & J1939_SOCK_ERRQUEUE))
return;
+ tsflags = READ_ONCE(sk->sk_tsflags);
switch (type) {
case J1939_ERRQUEUE_TX_ACK:
- if (!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK))
+ if (!(tsflags & SOF_TIMESTAMPING_TX_ACK))
return;
break;
case J1939_ERRQUEUE_TX_SCHED:
- if (!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_SCHED))
+ if (!(tsflags & SOF_TIMESTAMPING_TX_SCHED))
return;
break;
case J1939_ERRQUEUE_TX_ABORT:
@@ -997,7 +999,7 @@ static void __j1939_sk_errqueue(struct j1939_session *session, struct sock *sk,
case J1939_ERRQUEUE_RX_DPO:
fallthrough;
case J1939_ERRQUEUE_RX_ABORT:
- if (!(sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE))
+ if (!(tsflags & SOF_TIMESTAMPING_RX_SOFTWARE))
return;
break;
default:
@@ -1054,7 +1056,7 @@ static void __j1939_sk_errqueue(struct j1939_session *session, struct sock *sk,
}
serr->opt_stats = true;
- if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
+ if (tsflags & SOF_TIMESTAMPING_OPT_ID)
serr->ee.ee_data = session->tskey;
netdev_dbg(session->priv->ndev, "%s: 0x%p tskey: %i, state: %s\n",
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 5eb4898cccd4..10a41cd9c523 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -969,6 +969,62 @@ static bool ceph_msg_data_pagelist_advance(struct ceph_msg_data_cursor *cursor,
return true;
}
+static void ceph_msg_data_iter_cursor_init(struct ceph_msg_data_cursor *cursor,
+ size_t length)
+{
+ struct ceph_msg_data *data = cursor->data;
+
+ cursor->iov_iter = data->iter;
+ cursor->lastlen = 0;
+ iov_iter_truncate(&cursor->iov_iter, length);
+ cursor->resid = iov_iter_count(&cursor->iov_iter);
+}
+
+static struct page *ceph_msg_data_iter_next(struct ceph_msg_data_cursor *cursor,
+ size_t *page_offset, size_t *length)
+{
+ struct page *page;
+ ssize_t len;
+
+ if (cursor->lastlen)
+ iov_iter_revert(&cursor->iov_iter, cursor->lastlen);
+
+ len = iov_iter_get_pages2(&cursor->iov_iter, &page, PAGE_SIZE,
+ 1, page_offset);
+ BUG_ON(len < 0);
+
+ cursor->lastlen = len;
+
+ /*
+ * FIXME: The assumption is that the pages represented by the iov_iter
+ * are pinned, with the references held by the upper-level
+ * callers, or by virtue of being under writeback. Eventually,
+ * we'll get an iov_iter_get_pages2 variant that doesn't take
+ * page refs. Until then, just put the page ref.
+ */
+ VM_BUG_ON_PAGE(!PageWriteback(page) && page_count(page) < 2, page);
+ put_page(page);
+
+ *length = min_t(size_t, len, cursor->resid);
+ return page;
+}
+
+static bool ceph_msg_data_iter_advance(struct ceph_msg_data_cursor *cursor,
+ size_t bytes)
+{
+ BUG_ON(bytes > cursor->resid);
+ cursor->resid -= bytes;
+
+ if (bytes < cursor->lastlen) {
+ cursor->lastlen -= bytes;
+ } else {
+ iov_iter_advance(&cursor->iov_iter, bytes - cursor->lastlen);
+ cursor->lastlen = 0;
+ }
+
+ return cursor->resid;
+}
+
/*
* Message data is handled (sent or received) in pieces, where each
* piece resides on a single page. The network layer might not
@@ -996,6 +1052,9 @@ static void __ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor)
case CEPH_MSG_DATA_BVECS:
ceph_msg_data_bvecs_cursor_init(cursor, length);
break;
+ case CEPH_MSG_DATA_ITER:
+ ceph_msg_data_iter_cursor_init(cursor, length);
+ break;
case CEPH_MSG_DATA_NONE:
default:
/* BUG(); */
@@ -1013,6 +1072,7 @@ void ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor,
cursor->total_resid = length;
cursor->data = msg->data;
+ cursor->sr_resid = 0;
__ceph_msg_data_cursor_init(cursor);
}
@@ -1042,6 +1102,9 @@ struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor,
case CEPH_MSG_DATA_BVECS:
page = ceph_msg_data_bvecs_next(cursor, page_offset, length);
break;
+ case CEPH_MSG_DATA_ITER:
+ page = ceph_msg_data_iter_next(cursor, page_offset, length);
+ break;
case CEPH_MSG_DATA_NONE:
default:
page = NULL;
@@ -1080,6 +1143,9 @@ void ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor, size_t bytes)
case CEPH_MSG_DATA_BVECS:
new_piece = ceph_msg_data_bvecs_advance(cursor, bytes);
break;
+ case CEPH_MSG_DATA_ITER:
+ new_piece = ceph_msg_data_iter_advance(cursor, bytes);
+ break;
case CEPH_MSG_DATA_NONE:
default:
BUG();
@@ -1879,6 +1945,18 @@ void ceph_msg_data_add_bvecs(struct ceph_msg *msg,
}
EXPORT_SYMBOL(ceph_msg_data_add_bvecs);
+void ceph_msg_data_add_iter(struct ceph_msg *msg,
+ struct iov_iter *iter)
+{
+ struct ceph_msg_data *data;
+
+ data = ceph_msg_data_add(msg);
+ data->type = CEPH_MSG_DATA_ITER;
+ data->iter = *iter;
+
+ msg->data_length += iov_iter_count(&data->iter);
+}
+
/*
* construct a new message with given type, size
* the new msg has a ref count of 1.
diff --git a/net/ceph/messenger_v1.c b/net/ceph/messenger_v1.c
index 3d57bb48a2b4..f9a50d7f0d20 100644
--- a/net/ceph/messenger_v1.c
+++ b/net/ceph/messenger_v1.c
@@ -159,9 +159,9 @@ static size_t sizeof_footer(struct ceph_connection *con)
static void prepare_message_data(struct ceph_msg *msg, u32 data_len)
{
- /* Initialize data cursor */
-
- ceph_msg_data_cursor_init(&msg->cursor, msg, data_len);
+ /* Initialize data cursor if it's not a sparse read */
+ if (!msg->sparse_read)
+ ceph_msg_data_cursor_init(&msg->cursor, msg, data_len);
}
/*
@@ -960,9 +960,9 @@ static void process_ack(struct ceph_connection *con)
prepare_read_tag(con);
}
-static int read_partial_message_section(struct ceph_connection *con,
- struct kvec *section,
- unsigned int sec_len, u32 *crc)
+static int read_partial_message_chunk(struct ceph_connection *con,
+ struct kvec *section,
+ unsigned int sec_len, u32 *crc)
{
int ret, left;
@@ -978,11 +978,91 @@ static int read_partial_message_section(struct ceph_connection *con,
section->iov_len += ret;
}
if (section->iov_len == sec_len)
- *crc = crc32c(0, section->iov_base, section->iov_len);
+ *crc = crc32c(*crc, section->iov_base, section->iov_len);
return 1;
}
+static inline int read_partial_message_section(struct ceph_connection *con,
+ struct kvec *section,
+ unsigned int sec_len, u32 *crc)
+{
+ *crc = 0;
+ return read_partial_message_chunk(con, section, sec_len, crc);
+}
+
+static int read_sparse_msg_extent(struct ceph_connection *con, u32 *crc)
+{
+ struct ceph_msg_data_cursor *cursor = &con->in_msg->cursor;
+ bool do_bounce = ceph_test_opt(from_msgr(con->msgr), RXBOUNCE);
+
+ if (do_bounce && unlikely(!con->bounce_page)) {
+ con->bounce_page = alloc_page(GFP_NOIO);
+ if (!con->bounce_page) {
+ pr_err("failed to allocate bounce page\n");
+ return -ENOMEM;
+ }
+ }
+
+ while (cursor->sr_resid > 0) {
+ struct page *page, *rpage;
+ size_t off, len;
+ int ret;
+
+ page = ceph_msg_data_next(cursor, &off, &len);
+ rpage = do_bounce ? con->bounce_page : page;
+
+ /* clamp to what remains in extent */
+ len = min_t(int, len, cursor->sr_resid);
+ ret = ceph_tcp_recvpage(con->sock, rpage, (int)off, len);
+ if (ret <= 0)
+ return ret;
+ *crc = ceph_crc32c_page(*crc, rpage, off, ret);
+ ceph_msg_data_advance(cursor, (size_t)ret);
+ cursor->sr_resid -= ret;
+ if (do_bounce)
+ memcpy_page(page, off, rpage, off, ret);
+ }
+ return 1;
+}
+
+static int read_sparse_msg_data(struct ceph_connection *con)
+{
+ struct ceph_msg_data_cursor *cursor = &con->in_msg->cursor;
+ bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC);
+ u32 crc = 0;
+ int ret = 1;
+
+ if (do_datacrc)
+ crc = con->in_data_crc;
+
+ do {
+ if (con->v1.in_sr_kvec.iov_base)
+ ret = read_partial_message_chunk(con,
+ &con->v1.in_sr_kvec,
+ con->v1.in_sr_len,
+ &crc);
+ else if (cursor->sr_resid > 0)
+ ret = read_sparse_msg_extent(con, &crc);
+
+ if (ret <= 0) {
+ if (do_datacrc)
+ con->in_data_crc = crc;
+ return ret;
+ }
+
+ memset(&con->v1.in_sr_kvec, 0, sizeof(con->v1.in_sr_kvec));
+ ret = con->ops->sparse_read(con, cursor,
+ (char **)&con->v1.in_sr_kvec.iov_base);
+ con->v1.in_sr_len = ret;
+ } while (ret > 0);
+
+ if (do_datacrc)
+ con->in_data_crc = crc;
+
+ return ret < 0 ? ret : 1; /* must return > 0 to indicate success */
+}
+
static int read_partial_msg_data(struct ceph_connection *con)
{
struct ceph_msg_data_cursor *cursor = &con->in_msg->cursor;
@@ -1173,7 +1253,9 @@ static int read_partial_message(struct ceph_connection *con)
if (!m->num_data_items)
return -EIO;
- if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE))
+ if (m->sparse_read)
+ ret = read_sparse_msg_data(con);
+ else if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE))
ret = read_partial_msg_data_bounce(con);
else
ret = read_partial_msg_data(con);
diff --git a/net/ceph/messenger_v2.c b/net/ceph/messenger_v2.c
index 1df1d29dee92..d09a39ff2cf0 100644
--- a/net/ceph/messenger_v2.c
+++ b/net/ceph/messenger_v2.c
@@ -8,9 +8,9 @@
#include <linux/ceph/ceph_debug.h>
#include <crypto/aead.h>
-#include <crypto/algapi.h> /* for crypto_memneq() */
#include <crypto/hash.h>
#include <crypto/sha2.h>
+#include <crypto/utils.h>
#include <linux/bvec.h>
#include <linux/crc32c.h>
#include <linux/net.h>
@@ -52,14 +52,16 @@
#define FRAME_LATE_STATUS_COMPLETE 0xe
#define FRAME_LATE_STATUS_ABORTED_MASK 0xf
-#define IN_S_HANDLE_PREAMBLE 1
-#define IN_S_HANDLE_CONTROL 2
-#define IN_S_HANDLE_CONTROL_REMAINDER 3
-#define IN_S_PREPARE_READ_DATA 4
-#define IN_S_PREPARE_READ_DATA_CONT 5
-#define IN_S_PREPARE_READ_ENC_PAGE 6
-#define IN_S_HANDLE_EPILOGUE 7
-#define IN_S_FINISH_SKIP 8
+#define IN_S_HANDLE_PREAMBLE 1
+#define IN_S_HANDLE_CONTROL 2
+#define IN_S_HANDLE_CONTROL_REMAINDER 3
+#define IN_S_PREPARE_READ_DATA 4
+#define IN_S_PREPARE_READ_DATA_CONT 5
+#define IN_S_PREPARE_READ_ENC_PAGE 6
+#define IN_S_PREPARE_SPARSE_DATA 7
+#define IN_S_PREPARE_SPARSE_DATA_CONT 8
+#define IN_S_HANDLE_EPILOGUE 9
+#define IN_S_FINISH_SKIP 10
#define OUT_S_QUEUE_DATA 1
#define OUT_S_QUEUE_DATA_CONT 2
@@ -967,12 +969,48 @@ static void init_sgs_cursor(struct scatterlist **sg,
}
}
+/**
+ * init_sgs_pages: set up scatterlist on an array of page pointers
+ * @sg: scatterlist to populate
+ * @pages: pointer to page array
+ * @dpos: position in the array to start (bytes)
+ * @dlen: len to add to sg (bytes)
+ * @pad: pointer to pad destination (if any)
+ *
+ * Populate the scatterlist from the page array, starting at an arbitrary
+ * byte in the array and running for a specified length.
+ */
+static void init_sgs_pages(struct scatterlist **sg, struct page **pages,
+ int dpos, int dlen, u8 *pad)
+{
+ int idx = dpos >> PAGE_SHIFT;
+ int off = offset_in_page(dpos);
+ int resid = dlen;
+
+ do {
+ int len = min(resid, (int)PAGE_SIZE - off);
+
+ sg_set_page(*sg, pages[idx], len, off);
+ *sg = sg_next(*sg);
+ off = 0;
+ ++idx;
+ resid -= len;
+ } while (resid);
+
+ if (need_padding(dlen)) {
+ sg_set_buf(*sg, pad, padding_len(dlen));
+ *sg = sg_next(*sg);
+ }
+}
+
static int setup_message_sgs(struct sg_table *sgt, struct ceph_msg *msg,
u8 *front_pad, u8 *middle_pad, u8 *data_pad,
- void *epilogue, bool add_tag)
+ void *epilogue, struct page **pages, int dpos,
+ bool add_tag)
{
struct ceph_msg_data_cursor cursor;
struct scatterlist *cur_sg;
+ int dlen = data_len(msg);
int sg_cnt;
int ret;
@@ -986,9 +1024,15 @@ static int setup_message_sgs(struct sg_table *sgt, struct ceph_msg *msg,
if (middle_len(msg))
sg_cnt += calc_sg_cnt(msg->middle->vec.iov_base,
middle_len(msg));
- if (data_len(msg)) {
- ceph_msg_data_cursor_init(&cursor, msg, data_len(msg));
- sg_cnt += calc_sg_cnt_cursor(&cursor);
+ if (dlen) {
+ if (pages) {
+ sg_cnt += calc_pages_for(dpos, dlen);
+ if (need_padding(dlen))
+ sg_cnt++;
+ } else {
+ ceph_msg_data_cursor_init(&cursor, msg, dlen);
+ sg_cnt += calc_sg_cnt_cursor(&cursor);
+ }
}
ret = sg_alloc_table(sgt, sg_cnt, GFP_NOIO);
@@ -1002,9 +1046,13 @@ static int setup_message_sgs(struct sg_table *sgt, struct ceph_msg *msg,
if (middle_len(msg))
init_sgs(&cur_sg, msg->middle->vec.iov_base, middle_len(msg),
middle_pad);
- if (data_len(msg)) {
- ceph_msg_data_cursor_init(&cursor, msg, data_len(msg));
- init_sgs_cursor(&cur_sg, &cursor, data_pad);
+ if (dlen) {
+ if (pages) {
+ init_sgs_pages(&cur_sg, pages, dpos, dlen, data_pad);
+ } else {
+ ceph_msg_data_cursor_init(&cursor, msg, dlen);
+ init_sgs_cursor(&cur_sg, &cursor, data_pad);
+ }
}
WARN_ON(!sg_is_last(cur_sg));
@@ -1039,10 +1087,53 @@ static int decrypt_control_remainder(struct ceph_connection *con)
padded_len(rem_len) + CEPH_GCM_TAG_LEN);
}
+/* Process sparse read data that lives in a buffer */
+static int process_v2_sparse_read(struct ceph_connection *con,
+ struct page **pages, int spos)
+{
+ struct ceph_msg_data_cursor *cursor = &con->v2.in_cursor;
+ int ret;
+
+ for (;;) {
+ char *buf = NULL;
+
+ ret = con->ops->sparse_read(con, cursor, &buf);
+ if (ret <= 0)
+ return ret;
+
+ dout("%s: sparse_read return %x buf %p\n", __func__, ret, buf);
+
+ do {
+ int idx = spos >> PAGE_SHIFT;
+ int soff = offset_in_page(spos);
+ struct page *spage = con->v2.in_enc_pages[idx];
+ int len = min_t(int, ret, PAGE_SIZE - soff);
+
+ if (buf) {
+ memcpy_from_page(buf, spage, soff, len);
+ buf += len;
+ } else {
+ struct bio_vec bv;
+
+ get_bvec_at(cursor, &bv);
+ len = min_t(int, len, bv.bv_len);
+ memcpy_page(bv.bv_page, bv.bv_offset,
+ spage, soff, len);
+ ceph_msg_data_advance(cursor, len);
+ }
+ spos += len;
+ ret -= len;
+ } while (ret);
+ }
+}
+
static int decrypt_tail(struct ceph_connection *con)
{
struct sg_table enc_sgt = {};
struct sg_table sgt = {};
+ struct page **pages = NULL;
+ bool sparse = con->in_msg->sparse_read;
+ int dpos = 0;
int tail_len;
int ret;
@@ -1053,9 +1144,14 @@ static int decrypt_tail(struct ceph_connection *con)
if (ret)
goto out;
+ if (sparse) {
+ dpos = padded_len(front_len(con->in_msg) + padded_len(middle_len(con->in_msg)));
+ pages = con->v2.in_enc_pages;
+ }
+
ret = setup_message_sgs(&sgt, con->in_msg, FRONT_PAD(con->v2.in_buf),
- MIDDLE_PAD(con->v2.in_buf), DATA_PAD(con->v2.in_buf),
- con->v2.in_buf, true);
+ MIDDLE_PAD(con->v2.in_buf), DATA_PAD(con->v2.in_buf),
+ con->v2.in_buf, pages, dpos, true);
if (ret)
goto out;
@@ -1065,6 +1161,12 @@ static int decrypt_tail(struct ceph_connection *con)
if (ret)
goto out;
+ if (sparse && data_len(con->in_msg)) {
+ ret = process_v2_sparse_read(con, con->v2.in_enc_pages, dpos);
+ if (ret)
+ goto out;
+ }
+
WARN_ON(!con->v2.in_enc_page_cnt);
ceph_release_page_vector(con->v2.in_enc_pages,
con->v2.in_enc_page_cnt);
@@ -1588,7 +1690,7 @@ static int prepare_message_secure(struct ceph_connection *con)
encode_epilogue_secure(con, false);
ret = setup_message_sgs(&sgt, con->out_msg, zerop, zerop, zerop,
- &con->v2.out_epil, false);
+ &con->v2.out_epil, NULL, 0, false);
if (ret)
goto out;
@@ -1825,6 +1927,123 @@ static void prepare_read_data_cont(struct ceph_connection *con)
con->v2.in_state = IN_S_HANDLE_EPILOGUE;
}
+static int prepare_sparse_read_cont(struct ceph_connection *con)
+{
+ int ret;
+ struct bio_vec bv;
+ char *buf = NULL;
+ struct ceph_msg_data_cursor *cursor = &con->v2.in_cursor;
+
+ WARN_ON(con->v2.in_state != IN_S_PREPARE_SPARSE_DATA_CONT);
+
+ if (iov_iter_is_bvec(&con->v2.in_iter)) {
+ if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
+ con->in_data_crc = crc32c(con->in_data_crc,
+ page_address(con->bounce_page),
+ con->v2.in_bvec.bv_len);
+ get_bvec_at(cursor, &bv);
+ memcpy_to_page(bv.bv_page, bv.bv_offset,
+ page_address(con->bounce_page),
+ con->v2.in_bvec.bv_len);
+ } else {
+ con->in_data_crc = ceph_crc32c_page(con->in_data_crc,
+ con->v2.in_bvec.bv_page,
+ con->v2.in_bvec.bv_offset,
+ con->v2.in_bvec.bv_len);
+ }
+
+ ceph_msg_data_advance(cursor, con->v2.in_bvec.bv_len);
+ cursor->sr_resid -= con->v2.in_bvec.bv_len;
+ dout("%s: advance by 0x%x sr_resid 0x%x\n", __func__,
+ con->v2.in_bvec.bv_len, cursor->sr_resid);
+ WARN_ON_ONCE(cursor->sr_resid > cursor->total_resid);
+ if (cursor->sr_resid) {
+ get_bvec_at(cursor, &bv);
+ if (bv.bv_len > cursor->sr_resid)
+ bv.bv_len = cursor->sr_resid;
+ if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
+ bv.bv_page = con->bounce_page;
+ bv.bv_offset = 0;
+ }
+ set_in_bvec(con, &bv);
+ con->v2.data_len_remain -= bv.bv_len;
+ return 0;
+ }
+ } else if (iov_iter_is_kvec(&con->v2.in_iter)) {
+ /* On first call, we have no kvec so don't compute crc */
+ if (con->v2.in_kvec_cnt) {
+ WARN_ON_ONCE(con->v2.in_kvec_cnt > 1);
+ con->in_data_crc = crc32c(con->in_data_crc,
+ con->v2.in_kvecs[0].iov_base,
+ con->v2.in_kvecs[0].iov_len);
+ }
+ } else {
+ return -EIO;
+ }
+
+ /* get next extent */
+ ret = con->ops->sparse_read(con, cursor, &buf);
+ if (ret <= 0) {
+ if (ret < 0)
+ return ret;
+
+ reset_in_kvecs(con);
+ add_in_kvec(con, con->v2.in_buf, CEPH_EPILOGUE_PLAIN_LEN);
+ con->v2.in_state = IN_S_HANDLE_EPILOGUE;
+ return 0;
+ }
+
+ if (buf) {
+ /* receive into buffer */
+ reset_in_kvecs(con);
+ add_in_kvec(con, buf, ret);
+ con->v2.data_len_remain -= ret;
+ return 0;
+ }
+
+ if (ret > cursor->total_resid) {
+ pr_warn("%s: ret 0x%x total_resid 0x%zx resid 0x%zx\n",
+ __func__, ret, cursor->total_resid, cursor->resid);
+ return -EIO;
+ }
+ get_bvec_at(cursor, &bv);
+ if (bv.bv_len > cursor->sr_resid)
+ bv.bv_len = cursor->sr_resid;
+ if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
+ if (unlikely(!con->bounce_page)) {
+ con->bounce_page = alloc_page(GFP_NOIO);
+ if (!con->bounce_page) {
+ pr_err("failed to allocate bounce page\n");
+ return -ENOMEM;
+ }
+ }
+
+ bv.bv_page = con->bounce_page;
+ bv.bv_offset = 0;
+ }
+ set_in_bvec(con, &bv);
+ con->v2.data_len_remain -= ret;
+ return ret;
+}
+
+static int prepare_sparse_read_data(struct ceph_connection *con)
+{
+ struct ceph_msg *msg = con->in_msg;
+
+ dout("%s: starting sparse read\n", __func__);
+
+ if (WARN_ON_ONCE(!con->ops->sparse_read))
+ return -EOPNOTSUPP;
+
+ if (!con_secure(con))
+ con->in_data_crc = -1;
+
+ reset_in_kvecs(con);
+ con->v2.in_state = IN_S_PREPARE_SPARSE_DATA_CONT;
+ con->v2.data_len_remain = data_len(msg);
+ return prepare_sparse_read_cont(con);
+}
+
static int prepare_read_tail_plain(struct ceph_connection *con)
{
struct ceph_msg *msg = con->in_msg;
@@ -1845,7 +2064,10 @@ static int prepare_read_tail_plain(struct ceph_connection *con)
}
if (data_len(msg)) {
- con->v2.in_state = IN_S_PREPARE_READ_DATA;
+ if (msg->sparse_read)
+ con->v2.in_state = IN_S_PREPARE_SPARSE_DATA;
+ else
+ con->v2.in_state = IN_S_PREPARE_READ_DATA;
} else {
add_in_kvec(con, con->v2.in_buf, CEPH_EPILOGUE_PLAIN_LEN);
con->v2.in_state = IN_S_HANDLE_EPILOGUE;
@@ -2898,6 +3120,12 @@ static int populate_in_iter(struct ceph_connection *con)
prepare_read_enc_page(con);
ret = 0;
break;
+ case IN_S_PREPARE_SPARSE_DATA:
+ ret = prepare_sparse_read_data(con);
+ break;
+ case IN_S_PREPARE_SPARSE_DATA_CONT:
+ ret = prepare_sparse_read_cont(con);
+ break;
case IN_S_HANDLE_EPILOGUE:
ret = handle_epilogue(con);
break;
@@ -3489,6 +3717,23 @@ static void revoke_at_prepare_read_enc_page(struct ceph_connection *con)
con->v2.in_state = IN_S_FINISH_SKIP;
}
+static void revoke_at_prepare_sparse_data(struct ceph_connection *con)
+{
+ int resid; /* current piece of data */
+ int remaining;
+
+ WARN_ON(con_secure(con));
+ WARN_ON(!data_len(con->in_msg));
+ WARN_ON(!iov_iter_is_bvec(&con->v2.in_iter));
+ resid = iov_iter_count(&con->v2.in_iter);
+ dout("%s con %p resid %d\n", __func__, con, resid);
+
+ remaining = CEPH_EPILOGUE_PLAIN_LEN + con->v2.data_len_remain;
+ con->v2.in_iter.count -= resid;
+ set_in_skip(con, resid + remaining);
+ con->v2.in_state = IN_S_FINISH_SKIP;
+}
+
static void revoke_at_handle_epilogue(struct ceph_connection *con)
{
int resid;
@@ -3505,6 +3750,7 @@ static void revoke_at_handle_epilogue(struct ceph_connection *con)
void ceph_con_v2_revoke_incoming(struct ceph_connection *con)
{
switch (con->v2.in_state) {
+ case IN_S_PREPARE_SPARSE_DATA:
case IN_S_PREPARE_READ_DATA:
revoke_at_prepare_read_data(con);
break;
@@ -3514,6 +3760,9 @@ void ceph_con_v2_revoke_incoming(struct ceph_connection *con)
case IN_S_PREPARE_READ_ENC_PAGE:
revoke_at_prepare_read_enc_page(con);
break;
+ case IN_S_PREPARE_SPARSE_DATA_CONT:
+ revoke_at_prepare_sparse_data(con);
+ break;
case IN_S_HANDLE_EPILOGUE:
revoke_at_handle_epilogue(con);
break;
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 658a6f2320cf..d3a759e052c8 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -171,6 +171,13 @@ static void ceph_osd_data_bvecs_init(struct ceph_osd_data *osd_data,
osd_data->num_bvecs = num_bvecs;
}
+static void ceph_osd_iter_init(struct ceph_osd_data *osd_data,
+ struct iov_iter *iter)
+{
+ osd_data->type = CEPH_OSD_DATA_TYPE_ITER;
+ osd_data->iter = *iter;
+}
+
static struct ceph_osd_data *
osd_req_op_raw_data_in(struct ceph_osd_request *osd_req, unsigned int which)
{
@@ -264,6 +271,22 @@ void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req,
}
EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvec_pos);
+/**
+ * osd_req_op_extent_osd_iter - Set up an operation with an iterator buffer
+ * @osd_req: The request to set up
+ * @which: Index of the operation in which to set the iter
+ * @iter: The buffer iterator
+ */
+void osd_req_op_extent_osd_iter(struct ceph_osd_request *osd_req,
+ unsigned int which, struct iov_iter *iter)
+{
+ struct ceph_osd_data *osd_data;
+
+ osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
+ ceph_osd_iter_init(osd_data, iter);
+}
+EXPORT_SYMBOL(osd_req_op_extent_osd_iter);
+
static void osd_req_op_cls_request_info_pagelist(
struct ceph_osd_request *osd_req,
unsigned int which, struct ceph_pagelist *pagelist)
@@ -346,6 +369,8 @@ static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data)
#endif /* CONFIG_BLOCK */
case CEPH_OSD_DATA_TYPE_BVECS:
return osd_data->bvec_pos.iter.bi_size;
+ case CEPH_OSD_DATA_TYPE_ITER:
+ return iov_iter_count(&osd_data->iter);
default:
WARN(true, "unrecognized data type %d\n", (int)osd_data->type);
return 0;
@@ -376,8 +401,10 @@ static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
switch (op->op) {
case CEPH_OSD_OP_READ:
+ case CEPH_OSD_OP_SPARSE_READ:
case CEPH_OSD_OP_WRITE:
case CEPH_OSD_OP_WRITEFULL:
+ kfree(op->extent.sparse_ext);
ceph_osd_data_release(&op->extent.osd_data);
break;
case CEPH_OSD_OP_CALL:
@@ -669,6 +696,7 @@ static void get_num_data_items(struct ceph_osd_request *req,
/* reply */
case CEPH_OSD_OP_STAT:
case CEPH_OSD_OP_READ:
+ case CEPH_OSD_OP_SPARSE_READ:
case CEPH_OSD_OP_LIST_WATCHERS:
*num_reply_data_items += 1;
break;
@@ -738,7 +766,7 @@ void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
opcode != CEPH_OSD_OP_WRITEFULL && opcode != CEPH_OSD_OP_ZERO &&
- opcode != CEPH_OSD_OP_TRUNCATE);
+ opcode != CEPH_OSD_OP_TRUNCATE && opcode != CEPH_OSD_OP_SPARSE_READ);
op->extent.offset = offset;
op->extent.length = length;
@@ -951,6 +979,8 @@ static void ceph_osdc_msg_data_add(struct ceph_msg *msg,
#endif
} else if (osd_data->type == CEPH_OSD_DATA_TYPE_BVECS) {
ceph_msg_data_add_bvecs(msg, &osd_data->bvec_pos);
+ } else if (osd_data->type == CEPH_OSD_DATA_TYPE_ITER) {
+ ceph_msg_data_add_iter(msg, &osd_data->iter);
} else {
BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE);
}
@@ -963,6 +993,7 @@ static u32 osd_req_encode_op(struct ceph_osd_op *dst,
case CEPH_OSD_OP_STAT:
break;
case CEPH_OSD_OP_READ:
+ case CEPH_OSD_OP_SPARSE_READ:
case CEPH_OSD_OP_WRITE:
case CEPH_OSD_OP_WRITEFULL:
case CEPH_OSD_OP_ZERO:
@@ -1017,6 +1048,10 @@ static u32 osd_req_encode_op(struct ceph_osd_op *dst,
dst->copy_from.src_fadvise_flags =
cpu_to_le32(src->copy_from.src_fadvise_flags);
break;
+ case CEPH_OSD_OP_ASSERT_VER:
+ dst->assert_ver.unused = cpu_to_le64(0);
+ dst->assert_ver.ver = cpu_to_le64(src->assert_ver.ver);
+ break;
default:
pr_err("unsupported osd opcode %s\n",
ceph_osd_op_name(src->op));
@@ -1059,7 +1094,8 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE &&
- opcode != CEPH_OSD_OP_CREATE && opcode != CEPH_OSD_OP_DELETE);
+ opcode != CEPH_OSD_OP_CREATE && opcode != CEPH_OSD_OP_DELETE &&
+ opcode != CEPH_OSD_OP_SPARSE_READ);
req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool,
GFP_NOFS);
@@ -1100,15 +1136,30 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
if (flags & CEPH_OSD_FLAG_WRITE)
req->r_data_offset = off;
- if (num_ops > 1)
+ if (num_ops > 1) {
+ int num_req_ops, num_rep_ops;
+
/*
- * This is a special case for ceph_writepages_start(), but it
- * also covers ceph_uninline_data(). If more multi-op request
- * use cases emerge, we will need a separate helper.
+ * If this is a multi-op write request, assume that we'll need
+ * request ops. If it's a multi-op read then assume we'll need
+ * reply ops. Anything else and call it -EINVAL.
*/
- r = __ceph_osdc_alloc_messages(req, GFP_NOFS, num_ops, 0);
- else
+ if (flags & CEPH_OSD_FLAG_WRITE) {
+ num_req_ops = num_ops;
+ num_rep_ops = 0;
+ } else if (flags & CEPH_OSD_FLAG_READ) {
+ num_req_ops = 0;
+ num_rep_ops = num_ops;
+ } else {
+ r = -EINVAL;
+ goto fail;
+ }
+
+ r = __ceph_osdc_alloc_messages(req, GFP_NOFS, num_req_ops,
+ num_rep_ops);
+ } else {
r = ceph_osdc_alloc_messages(req, GFP_NOFS);
+ }
if (r)
goto fail;
@@ -1120,6 +1171,18 @@ fail:
}
EXPORT_SYMBOL(ceph_osdc_new_request);
+int __ceph_alloc_sparse_ext_map(struct ceph_osd_req_op *op, int cnt)
+{
+ op->extent.sparse_ext_cnt = cnt;
+ op->extent.sparse_ext = kmalloc_array(cnt,
+ sizeof(*op->extent.sparse_ext),
+ GFP_NOFS);
+ if (!op->extent.sparse_ext)
+ return -ENOMEM;
+ return 0;
+}
+EXPORT_SYMBOL(__ceph_alloc_sparse_ext_map);
+
/*
* We keep osd requests in an rbtree, sorted by ->r_tid.
*/
@@ -1177,6 +1240,7 @@ static void osd_init(struct ceph_osd *osd)
{
refcount_set(&osd->o_ref, 1);
RB_CLEAR_NODE(&osd->o_node);
+ spin_lock_init(&osd->o_requests_lock);
osd->o_requests = RB_ROOT;
osd->o_linger_requests = RB_ROOT;
osd->o_backoff_mappings = RB_ROOT;
@@ -1187,6 +1251,13 @@ static void osd_init(struct ceph_osd *osd)
mutex_init(&osd->lock);
}
+static void ceph_init_sparse_read(struct ceph_sparse_read *sr)
+{
+ kfree(sr->sr_extent);
+ memset(sr, '\0', sizeof(*sr));
+ sr->sr_state = CEPH_SPARSE_READ_HDR;
+}
+
static void osd_cleanup(struct ceph_osd *osd)
{
WARN_ON(!RB_EMPTY_NODE(&osd->o_node));
@@ -1197,6 +1268,8 @@ static void osd_cleanup(struct ceph_osd *osd)
WARN_ON(!list_empty(&osd->o_osd_lru));
WARN_ON(!list_empty(&osd->o_keepalive_item));
+ ceph_init_sparse_read(&osd->o_sparse_read);
+
if (osd->o_auth.authorizer) {
WARN_ON(osd_homeless(osd));
ceph_auth_destroy_authorizer(osd->o_auth.authorizer);
@@ -1216,6 +1289,9 @@ static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
osd_init(osd);
osd->o_osdc = osdc;
osd->o_osd = onum;
+ osd->o_sparse_op_idx = -1;
+
+ ceph_init_sparse_read(&osd->o_sparse_read);
ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
@@ -1406,7 +1482,9 @@ static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req)
atomic_inc(&osd->o_osdc->num_homeless);
get_osd(osd);
+ spin_lock(&osd->o_requests_lock);
insert_request(&osd->o_requests, req);
+ spin_unlock(&osd->o_requests_lock);
req->r_osd = osd;
}
@@ -1418,7 +1496,9 @@ static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req)
req, req->r_tid);
req->r_osd = NULL;
+ spin_lock(&osd->o_requests_lock);
erase_request(&osd->o_requests, req);
+ spin_unlock(&osd->o_requests_lock);
put_osd(osd);
if (!osd_homeless(osd))
@@ -2016,6 +2096,7 @@ static void setup_request_data(struct ceph_osd_request *req)
&op->raw_data_in);
break;
case CEPH_OSD_OP_READ:
+ case CEPH_OSD_OP_SPARSE_READ:
ceph_osdc_msg_data_add(reply_msg,
&op->extent.osd_data);
break;
@@ -2435,8 +2516,10 @@ static void finish_request(struct ceph_osd_request *req)
req->r_end_latency = ktime_get();
- if (req->r_osd)
+ if (req->r_osd) {
+ ceph_init_sparse_read(&req->r_osd->o_sparse_read);
unlink_request(req->r_osd, req);
+ }
atomic_dec(&osdc->num_requests);
/*
@@ -3795,6 +3878,7 @@ static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
* one (type of) reply back.
*/
WARN_ON(!(m.flags & CEPH_OSD_FLAG_ONDISK));
+ req->r_version = m.user_version;
req->r_result = m.result ?: data_len;
finish_request(req);
mutex_unlock(&osd->lock);
@@ -5348,6 +5432,24 @@ static void osd_dispatch(struct ceph_connection *con, struct ceph_msg *msg)
ceph_msg_put(msg);
}
+/* How much sparse data was requested? */
+static u64 sparse_data_requested(struct ceph_osd_request *req)
+{
+ u64 len = 0;
+
+ if (req->r_flags & CEPH_OSD_FLAG_READ) {
+ int i;
+
+ for (i = 0; i < req->r_num_ops; ++i) {
+ struct ceph_osd_req_op *op = &req->r_ops[i];
+
+ if (op->op == CEPH_OSD_OP_SPARSE_READ)
+ len += op->extent.length;
+ }
+ }
+ return len;
+}
+
/*
* Lookup and return message for incoming reply. Don't try to do
* anything about a larger than preallocated data portion of the
@@ -5364,6 +5466,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
int front_len = le32_to_cpu(hdr->front_len);
int data_len = le32_to_cpu(hdr->data_len);
u64 tid = le64_to_cpu(hdr->tid);
+ u64 srlen;
down_read(&osdc->lock);
if (!osd_registered(osd)) {
@@ -5396,7 +5499,8 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
req->r_reply = m;
}
- if (data_len > req->r_reply->data_length) {
+ srlen = sparse_data_requested(req);
+ if (!srlen && data_len > req->r_reply->data_length) {
pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n",
__func__, osd->o_osd, req->r_tid, data_len,
req->r_reply->data_length);
@@ -5406,6 +5510,8 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
}
m = ceph_msg_get(req->r_reply);
+ m->sparse_read = (bool)srlen;
+
dout("get_reply tid %lld %p\n", tid, m);
out_unlock_session:
@@ -5638,9 +5744,217 @@ static int osd_check_message_signature(struct ceph_msg *msg)
return ceph_auth_check_message_signature(auth, msg);
}
+static void advance_cursor(struct ceph_msg_data_cursor *cursor, size_t len,
+ bool zero)
+{
+ while (len) {
+ struct page *page;
+ size_t poff, plen;
+
+ page = ceph_msg_data_next(cursor, &poff, &plen);
+ if (plen > len)
+ plen = len;
+ if (zero)
+ zero_user_segment(page, poff, poff + plen);
+ len -= plen;
+ ceph_msg_data_advance(cursor, plen);
+ }
+}
+
+static int prep_next_sparse_read(struct ceph_connection *con,
+ struct ceph_msg_data_cursor *cursor)
+{
+ struct ceph_osd *o = con->private;
+ struct ceph_sparse_read *sr = &o->o_sparse_read;
+ struct ceph_osd_request *req;
+ struct ceph_osd_req_op *op;
+
+ spin_lock(&o->o_requests_lock);
+ req = lookup_request(&o->o_requests, le64_to_cpu(con->in_msg->hdr.tid));
+ if (!req) {
+ spin_unlock(&o->o_requests_lock);
+ return -EBADR;
+ }
+
+ if (o->o_sparse_op_idx < 0) {
+ u64 srlen = sparse_data_requested(req);
+
+ dout("%s: [%d] starting new sparse read req. srlen=0x%llx\n",
+ __func__, o->o_osd, srlen);
+ ceph_msg_data_cursor_init(cursor, con->in_msg, srlen);
+ } else {
+ u64 end;
+
+ op = &req->r_ops[o->o_sparse_op_idx];
+
+ WARN_ON_ONCE(op->extent.sparse_ext);
+
+ /* hand back buffer we took earlier */
+ op->extent.sparse_ext = sr->sr_extent;
+ sr->sr_extent = NULL;
+ op->extent.sparse_ext_cnt = sr->sr_count;
+ sr->sr_ext_len = 0;
+ dout("%s: [%d] completed extent array len %d cursor->resid %zd\n",
+ __func__, o->o_osd, op->extent.sparse_ext_cnt, cursor->resid);
+ /* Advance to end of data for this operation */
+ end = ceph_sparse_ext_map_end(op);
+ if (end < sr->sr_req_len)
+ advance_cursor(cursor, sr->sr_req_len - end, false);
+ }
+
+ ceph_init_sparse_read(sr);
+
+ /* find next op in this request (if any) */
+ while (++o->o_sparse_op_idx < req->r_num_ops) {
+ op = &req->r_ops[o->o_sparse_op_idx];
+ if (op->op == CEPH_OSD_OP_SPARSE_READ)
+ goto found;
+ }
+
+ /* reset for next sparse read request */
+ spin_unlock(&o->o_requests_lock);
+ o->o_sparse_op_idx = -1;
+ return 0;
+found:
+ sr->sr_req_off = op->extent.offset;
+ sr->sr_req_len = op->extent.length;
+ sr->sr_pos = sr->sr_req_off;
+ dout("%s: [%d] new sparse read op at idx %d 0x%llx~0x%llx\n", __func__,
+ o->o_osd, o->o_sparse_op_idx, sr->sr_req_off, sr->sr_req_len);
+
+ /* hand off request's sparse extent map buffer */
+ sr->sr_ext_len = op->extent.sparse_ext_cnt;
+ op->extent.sparse_ext_cnt = 0;
+ sr->sr_extent = op->extent.sparse_ext;
+ op->extent.sparse_ext = NULL;
+
+ spin_unlock(&o->o_requests_lock);
+ return 1;
+}
+
+#ifdef __BIG_ENDIAN
+static inline void convert_extent_map(struct ceph_sparse_read *sr)
+{
+ int i;
+
+ for (i = 0; i < sr->sr_count; i++) {
+ struct ceph_sparse_extent *ext = &sr->sr_extent[i];
+
+ ext->off = le64_to_cpu((__force __le64)ext->off);
+ ext->len = le64_to_cpu((__force __le64)ext->len);
+ }
+}
+#else
+static inline void convert_extent_map(struct ceph_sparse_read *sr)
+{
+}
+#endif
+
+#define MAX_EXTENTS 4096
+
+static int osd_sparse_read(struct ceph_connection *con,
+ struct ceph_msg_data_cursor *cursor,
+ char **pbuf)
+{
+ struct ceph_osd *o = con->private;
+ struct ceph_sparse_read *sr = &o->o_sparse_read;
+ u32 count = sr->sr_count;
+ u64 eoff, elen;
+ int ret;
+
+ switch (sr->sr_state) {
+ case CEPH_SPARSE_READ_HDR:
+next_op:
+ ret = prep_next_sparse_read(con, cursor);
+ if (ret <= 0)
+ return ret;
+
+ /* number of extents */
+ ret = sizeof(sr->sr_count);
+ *pbuf = (char *)&sr->sr_count;
+ sr->sr_state = CEPH_SPARSE_READ_EXTENTS;
+ break;
+ case CEPH_SPARSE_READ_EXTENTS:
+ /* Convert sr_count to host-endian */
+ count = le32_to_cpu((__force __le32)sr->sr_count);
+ sr->sr_count = count;
+ dout("[%d] got %u extents\n", o->o_osd, count);
+
+ if (count > 0) {
+ if (!sr->sr_extent || count > sr->sr_ext_len) {
+ /*
+ * Apply a hard cap to the number of extents.
+ * If we have more, assume something is wrong.
+ */
+ if (count > MAX_EXTENTS) {
+ dout("%s: OSD returned 0x%x extents in a single reply!\n",
+ __func__, count);
+ return -EREMOTEIO;
+ }
+
+ /* no extent array provided, or too short */
+ kfree(sr->sr_extent);
+ sr->sr_extent = kmalloc_array(count,
+ sizeof(*sr->sr_extent),
+ GFP_NOIO);
+ if (!sr->sr_extent)
+ return -ENOMEM;
+ sr->sr_ext_len = count;
+ }
+ ret = count * sizeof(*sr->sr_extent);
+ *pbuf = (char *)sr->sr_extent;
+ sr->sr_state = CEPH_SPARSE_READ_DATA_LEN;
+ break;
+ }
+ /* No extents? Read data len */
+ fallthrough;
+ case CEPH_SPARSE_READ_DATA_LEN:
+ convert_extent_map(sr);
+ ret = sizeof(sr->sr_datalen);
+ *pbuf = (char *)&sr->sr_datalen;
+ sr->sr_state = CEPH_SPARSE_READ_DATA;
+ break;
+ case CEPH_SPARSE_READ_DATA:
+ if (sr->sr_index >= count) {
+ sr->sr_state = CEPH_SPARSE_READ_HDR;
+ goto next_op;
+ }
+
+ eoff = sr->sr_extent[sr->sr_index].off;
+ elen = sr->sr_extent[sr->sr_index].len;
+
+ dout("[%d] ext %d off 0x%llx len 0x%llx\n",
+ o->o_osd, sr->sr_index, eoff, elen);
+
+ if (elen > INT_MAX) {
+ dout("Sparse read extent length too long (0x%llx)\n",
+ elen);
+ return -EREMOTEIO;
+ }
+
+ /* zero out anything from sr_pos to start of extent */
+ if (sr->sr_pos < eoff)
+ advance_cursor(cursor, eoff - sr->sr_pos, true);
+
+ /* Set position to end of extent */
+ sr->sr_pos = eoff + elen;
+
+ /* send back the new length and nullify the ptr */
+ cursor->sr_resid = elen;
+ ret = elen;
+ *pbuf = NULL;
+
+ /* Bump the array index */
+ ++sr->sr_index;
+ break;
+ }
+ return ret;
+}
+
static const struct ceph_connection_operations osd_con_ops = {
.get = osd_get_con,
.put = osd_put_con,
+ .sparse_read = osd_sparse_read,
.alloc_msg = osd_alloc_msg,
.dispatch = osd_dispatch,
.fault = osd_fault,
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 89d15ceaf9af..b3b3af0e7844 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -1831,8 +1831,7 @@ u32 __skb_get_hash_symmetric(const struct sk_buff *skb)
memset(&keys, 0, sizeof(keys));
__skb_flow_dissect(NULL, skb, &flow_keys_dissector_symmetric,
- &keys, NULL, 0, 0, 0,
- FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
+ &keys, NULL, 0, 0, 0, 0);
return __flow_hash_from_keys(&keys, &hashrnd);
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 45707059082f..4eaf7ed0d1f4 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -550,7 +550,7 @@ static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node,
bool *pfmemalloc)
{
bool ret_pfmemalloc = false;
- unsigned int obj_size;
+ size_t obj_size;
void *obj;
obj_size = SKB_HEAD_ALIGN(*size);
@@ -567,7 +567,13 @@ static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node,
obj = kmem_cache_alloc_node(skb_small_head_cache, flags, node);
goto out;
}
- *size = obj_size = kmalloc_size_roundup(obj_size);
+
+ obj_size = kmalloc_size_roundup(obj_size);
+ /* The following cast might truncate high-order bits of obj_size, this
+ * is harmless because kmalloc(obj_size >= 2^32) will fail anyway.
+ */
+ *size = (unsigned int)obj_size;
+
/*
* Try a regular allocation, when that fails and we're not entitled
* to the reserves, fail.
@@ -4423,21 +4429,20 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
struct sk_buff *segs = NULL;
struct sk_buff *tail = NULL;
struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list;
- skb_frag_t *frag = skb_shinfo(head_skb)->frags;
unsigned int mss = skb_shinfo(head_skb)->gso_size;
unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
- struct sk_buff *frag_skb = head_skb;
unsigned int offset = doffset;
unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
unsigned int partial_segs = 0;
unsigned int headroom;
unsigned int len = head_skb->len;
+ struct sk_buff *frag_skb;
+ skb_frag_t *frag;
__be16 proto;
bool csum, sg;
- int nfrags = skb_shinfo(head_skb)->nr_frags;
int err = -ENOMEM;
int i = 0;
- int pos;
+ int nfrags, pos;
if ((skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY) &&
mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb)) {
@@ -4514,6 +4519,13 @@ normal:
headroom = skb_headroom(head_skb);
pos = skb_headlen(head_skb);
+ if (skb_orphan_frags(head_skb, GFP_ATOMIC))
+ return ERR_PTR(-ENOMEM);
+
+ nfrags = skb_shinfo(head_skb)->nr_frags;
+ frag = skb_shinfo(head_skb)->frags;
+ frag_skb = head_skb;
+
do {
struct sk_buff *nskb;
skb_frag_t *nskb_frag;
@@ -4534,6 +4546,10 @@ normal:
(skb_headlen(list_skb) == len || sg)) {
BUG_ON(skb_headlen(list_skb) > len);
+ nskb = skb_clone(list_skb, GFP_ATOMIC);
+ if (unlikely(!nskb))
+ goto err;
+
i = 0;
nfrags = skb_shinfo(list_skb)->nr_frags;
frag = skb_shinfo(list_skb)->frags;
@@ -4552,12 +4568,8 @@ normal:
frag++;
}
- nskb = skb_clone(list_skb, GFP_ATOMIC);
list_skb = list_skb->next;
- if (unlikely(!nskb))
- goto err;
-
if (unlikely(pskb_trim(nskb, len))) {
kfree_skb(nskb);
goto err;
@@ -4633,12 +4645,16 @@ normal:
skb_shinfo(nskb)->flags |= skb_shinfo(head_skb)->flags &
SKBFL_SHARED_FRAG;
- if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
- skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
+ if (skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
goto err;
while (pos < offset + len) {
if (i >= nfrags) {
+ if (skb_orphan_frags(list_skb, GFP_ATOMIC) ||
+ skb_zerocopy_clone(nskb, list_skb,
+ GFP_ATOMIC))
+ goto err;
+
i = 0;
nfrags = skb_shinfo(list_skb)->nr_frags;
frag = skb_shinfo(list_skb)->frags;
@@ -4652,10 +4668,6 @@ normal:
i--;
frag--;
}
- if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
- skb_zerocopy_clone(nskb, frag_skb,
- GFP_ATOMIC))
- goto err;
list_skb = list_skb->next;
}
@@ -5207,7 +5219,7 @@ static void __skb_complete_tx_timestamp(struct sk_buff *skb,
serr->ee.ee_info = tstype;
serr->opt_stats = opt_stats;
serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0;
- if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
+ if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID) {
serr->ee.ee_data = skb_shinfo(skb)->tskey;
if (sk_is_tcp(sk))
serr->ee.ee_data -= atomic_read(&sk->sk_tskey);
@@ -5263,21 +5275,23 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
{
struct sk_buff *skb;
bool tsonly, opt_stats = false;
+ u32 tsflags;
if (!sk)
return;
- if (!hwtstamps && !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) &&
+ tsflags = READ_ONCE(sk->sk_tsflags);
+ if (!hwtstamps && !(tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) &&
skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS)
return;
- tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY;
+ tsonly = tsflags & SOF_TIMESTAMPING_OPT_TSONLY;
if (!skb_may_tx_timestamp(sk, tsonly))
return;
if (tsonly) {
#ifdef CONFIG_INET
- if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) &&
+ if ((tsflags & SOF_TIMESTAMPING_OPT_STATS) &&
sk_is_tcp(sk)) {
skb = tcp_get_timestamping_opt_stats(sk, orig_skb,
ack_skb);
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index a0659fc29bcc..6c31eefbd777 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -612,12 +612,18 @@ static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb
static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
u32 off, u32 len, bool ingress)
{
+ int err = 0;
+
if (!ingress) {
if (!sock_writeable(psock->sk))
return -EAGAIN;
return skb_send_sock(psock->sk, skb, off, len);
}
- return sk_psock_skb_ingress(psock, skb, off, len);
+ skb_get(skb);
+ err = sk_psock_skb_ingress(psock, skb, off, len);
+ if (err < 0)
+ kfree_skb(skb);
+ return err;
}
static void sk_psock_skb_state(struct sk_psock *psock,
@@ -685,9 +691,7 @@ static void sk_psock_backlog(struct work_struct *work)
} while (len);
skb = skb_dequeue(&psock->ingress_skb);
- if (!ingress) {
- kfree_skb(skb);
- }
+ kfree_skb(skb);
}
end:
mutex_unlock(&psock->work_mutex);
diff --git a/net/core/sock.c b/net/core/sock.c
index 666a17cab4f5..16584e2dd648 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -765,7 +765,8 @@ bool sk_mc_loop(struct sock *sk)
return false;
if (!sk)
return true;
- switch (sk->sk_family) {
+ /* IPV6_ADDRFORM can change sk->sk_family under us. */
+ switch (READ_ONCE(sk->sk_family)) {
case AF_INET:
return inet_test_bit(MC_LOOP, sk);
#if IS_ENABLED(CONFIG_IPV6)
@@ -893,7 +894,7 @@ static int sock_timestamping_bind_phc(struct sock *sk, int phc_index)
if (!match)
return -EINVAL;
- sk->sk_bind_phc = phc_index;
+ WRITE_ONCE(sk->sk_bind_phc, phc_index);
return 0;
}
@@ -936,7 +937,7 @@ int sock_set_timestamping(struct sock *sk, int optname,
return ret;
}
- sk->sk_tsflags = val;
+ WRITE_ONCE(sk->sk_tsflags, val);
sock_valbool_flag(sk, SOCK_TSTAMP_NEW, optname == SO_TIMESTAMPING_NEW);
if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
@@ -1044,7 +1045,7 @@ static int sock_reserve_memory(struct sock *sk, int bytes)
mem_cgroup_uncharge_skmem(sk->sk_memcg, pages);
return -ENOMEM;
}
- sk->sk_forward_alloc += pages << PAGE_SHIFT;
+ sk_forward_alloc_add(sk, pages << PAGE_SHIFT);
WRITE_ONCE(sk->sk_reserved_mem,
sk->sk_reserved_mem + (pages << PAGE_SHIFT));
@@ -1718,8 +1719,8 @@ int sk_getsockopt(struct sock *sk, int level, int optname,
case SO_TIMESTAMPING_OLD:
lv = sizeof(v.timestamping);
- v.timestamping.flags = sk->sk_tsflags;
- v.timestamping.bind_phc = sk->sk_bind_phc;
+ v.timestamping.flags = READ_ONCE(sk->sk_tsflags);
+ v.timestamping.bind_phc = READ_ONCE(sk->sk_bind_phc);
break;
case SO_RCVTIMEO_OLD:
@@ -2746,9 +2747,9 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo)
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
if (refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf))
break;
- if (sk->sk_shutdown & SEND_SHUTDOWN)
+ if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN)
break;
- if (sk->sk_err)
+ if (READ_ONCE(sk->sk_err))
break;
timeo = schedule_timeout(timeo);
}
@@ -2776,7 +2777,7 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
goto failure;
err = -EPIPE;
- if (sk->sk_shutdown & SEND_SHUTDOWN)
+ if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN)
goto failure;
if (sk_wmem_alloc_get(sk) < READ_ONCE(sk->sk_sndbuf))
@@ -3138,10 +3139,10 @@ int __sk_mem_schedule(struct sock *sk, int size, int kind)
{
int ret, amt = sk_mem_pages(size);
- sk->sk_forward_alloc += amt << PAGE_SHIFT;
+ sk_forward_alloc_add(sk, amt << PAGE_SHIFT);
ret = __sk_mem_raise_allocated(sk, size, amt, kind);
if (!ret)
- sk->sk_forward_alloc -= amt << PAGE_SHIFT;
+ sk_forward_alloc_add(sk, -(amt << PAGE_SHIFT));
return ret;
}
EXPORT_SYMBOL(__sk_mem_schedule);
@@ -3173,7 +3174,7 @@ void __sk_mem_reduce_allocated(struct sock *sk, int amount)
void __sk_mem_reclaim(struct sock *sk, int amount)
{
amount >>= PAGE_SHIFT;
- sk->sk_forward_alloc -= amount << PAGE_SHIFT;
+ sk_forward_alloc_add(sk, -(amount << PAGE_SHIFT));
__sk_mem_reduce_allocated(sk, amount);
}
EXPORT_SYMBOL(__sk_mem_reclaim);
@@ -3742,7 +3743,7 @@ void sk_get_meminfo(const struct sock *sk, u32 *mem)
mem[SK_MEMINFO_RCVBUF] = READ_ONCE(sk->sk_rcvbuf);
mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk);
mem[SK_MEMINFO_SNDBUF] = READ_ONCE(sk->sk_sndbuf);
- mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
+ mem[SK_MEMINFO_FWD_ALLOC] = sk_forward_alloc_get(sk);
mem[SK_MEMINFO_WMEM_QUEUED] = READ_ONCE(sk->sk_wmem_queued);
mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len);
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index 8f07fea39d9e..cb11750b1df5 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -18,7 +18,7 @@ struct bpf_stab {
struct bpf_map map;
struct sock **sks;
struct sk_psock_progs progs;
- raw_spinlock_t lock;
+ spinlock_t lock;
};
#define SOCK_CREATE_FLAG_MASK \
@@ -44,7 +44,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
return ERR_PTR(-ENOMEM);
bpf_map_init_from_attr(&stab->map, attr);
- raw_spin_lock_init(&stab->lock);
+ spin_lock_init(&stab->lock);
stab->sks = bpf_map_area_alloc((u64) stab->map.max_entries *
sizeof(struct sock *),
@@ -411,7 +411,7 @@ static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test,
struct sock *sk;
int err = 0;
- raw_spin_lock_bh(&stab->lock);
+ spin_lock_bh(&stab->lock);
sk = *psk;
if (!sk_test || sk_test == sk)
sk = xchg(psk, NULL);
@@ -421,7 +421,7 @@ static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test,
else
err = -EINVAL;
- raw_spin_unlock_bh(&stab->lock);
+ spin_unlock_bh(&stab->lock);
return err;
}
@@ -487,7 +487,7 @@ static int sock_map_update_common(struct bpf_map *map, u32 idx,
psock = sk_psock(sk);
WARN_ON_ONCE(!psock);
- raw_spin_lock_bh(&stab->lock);
+ spin_lock_bh(&stab->lock);
osk = stab->sks[idx];
if (osk && flags == BPF_NOEXIST) {
ret = -EEXIST;
@@ -501,10 +501,10 @@ static int sock_map_update_common(struct bpf_map *map, u32 idx,
stab->sks[idx] = sk;
if (osk)
sock_map_unref(osk, &stab->sks[idx]);
- raw_spin_unlock_bh(&stab->lock);
+ spin_unlock_bh(&stab->lock);
return 0;
out_unlock:
- raw_spin_unlock_bh(&stab->lock);
+ spin_unlock_bh(&stab->lock);
if (psock)
sk_psock_put(sk, psock);
out_free:
@@ -835,7 +835,7 @@ struct bpf_shtab_elem {
struct bpf_shtab_bucket {
struct hlist_head head;
- raw_spinlock_t lock;
+ spinlock_t lock;
};
struct bpf_shtab {
@@ -910,7 +910,7 @@ static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk,
* is okay since it's going away only after RCU grace period.
* However, we need to check whether it's still present.
*/
- raw_spin_lock_bh(&bucket->lock);
+ spin_lock_bh(&bucket->lock);
elem_probe = sock_hash_lookup_elem_raw(&bucket->head, elem->hash,
elem->key, map->key_size);
if (elem_probe && elem_probe == elem) {
@@ -918,7 +918,7 @@ static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk,
sock_map_unref(elem->sk, elem);
sock_hash_free_elem(htab, elem);
}
- raw_spin_unlock_bh(&bucket->lock);
+ spin_unlock_bh(&bucket->lock);
}
static long sock_hash_delete_elem(struct bpf_map *map, void *key)
@@ -932,7 +932,7 @@ static long sock_hash_delete_elem(struct bpf_map *map, void *key)
hash = sock_hash_bucket_hash(key, key_size);
bucket = sock_hash_select_bucket(htab, hash);
- raw_spin_lock_bh(&bucket->lock);
+ spin_lock_bh(&bucket->lock);
elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
if (elem) {
hlist_del_rcu(&elem->node);
@@ -940,7 +940,7 @@ static long sock_hash_delete_elem(struct bpf_map *map, void *key)
sock_hash_free_elem(htab, elem);
ret = 0;
}
- raw_spin_unlock_bh(&bucket->lock);
+ spin_unlock_bh(&bucket->lock);
return ret;
}
@@ -1000,7 +1000,7 @@ static int sock_hash_update_common(struct bpf_map *map, void *key,
hash = sock_hash_bucket_hash(key, key_size);
bucket = sock_hash_select_bucket(htab, hash);
- raw_spin_lock_bh(&bucket->lock);
+ spin_lock_bh(&bucket->lock);
elem = sock_hash_lookup_elem_raw(&bucket->head, hash, key, key_size);
if (elem && flags == BPF_NOEXIST) {
ret = -EEXIST;
@@ -1026,10 +1026,10 @@ static int sock_hash_update_common(struct bpf_map *map, void *key,
sock_map_unref(elem->sk, elem);
sock_hash_free_elem(htab, elem);
}
- raw_spin_unlock_bh(&bucket->lock);
+ spin_unlock_bh(&bucket->lock);
return 0;
out_unlock:
- raw_spin_unlock_bh(&bucket->lock);
+ spin_unlock_bh(&bucket->lock);
sk_psock_put(sk, psock);
out_free:
sk_psock_free_link(link);
@@ -1115,7 +1115,7 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
for (i = 0; i < htab->buckets_num; i++) {
INIT_HLIST_HEAD(&htab->buckets[i].head);
- raw_spin_lock_init(&htab->buckets[i].lock);
+ spin_lock_init(&htab->buckets[i].lock);
}
return &htab->map;
@@ -1147,11 +1147,11 @@ static void sock_hash_free(struct bpf_map *map)
* exists, psock exists and holds a ref to socket. That
* lets us to grab a socket ref too.
*/
- raw_spin_lock_bh(&bucket->lock);
+ spin_lock_bh(&bucket->lock);
hlist_for_each_entry(elem, &bucket->head, node)
sock_hold(elem->sk);
hlist_move_list(&bucket->head, &unlink_list);
- raw_spin_unlock_bh(&bucket->lock);
+ spin_unlock_bh(&bucket->lock);
/* Process removed entries out of atomic context to
* block for socket lock before deleting the psock's
diff --git a/net/handshake/netlink.c b/net/handshake/netlink.c
index 1086653e1fad..d0bc1dd8e65a 100644
--- a/net/handshake/netlink.c
+++ b/net/handshake/netlink.c
@@ -157,26 +157,24 @@ out_status:
int handshake_nl_done_doit(struct sk_buff *skb, struct genl_info *info)
{
struct net *net = sock_net(skb->sk);
- struct handshake_req *req = NULL;
- struct socket *sock = NULL;
+ struct handshake_req *req;
+ struct socket *sock;
int fd, status, err;
if (GENL_REQ_ATTR_CHECK(info, HANDSHAKE_A_DONE_SOCKFD))
return -EINVAL;
fd = nla_get_u32(info->attrs[HANDSHAKE_A_DONE_SOCKFD]);
- err = 0;
sock = sockfd_lookup(fd, &err);
- if (err) {
- err = -EBADF;
- goto out_status;
- }
+ if (!sock)
+ return err;
req = handshake_req_hash_lookup(sock->sk);
if (!req) {
err = -EBUSY;
+ trace_handshake_cmd_done_err(net, req, sock->sk, err);
fput(sock->file);
- goto out_status;
+ return err;
}
trace_handshake_cmd_done(net, req, sock->sk, fd);
@@ -188,10 +186,6 @@ int handshake_nl_done_doit(struct sk_buff *skb, struct genl_info *info)
handshake_complete(req, status, info);
fput(sock->file);
return 0;
-
-out_status:
- trace_handshake_cmd_done_err(net, req, sock->sk, err);
- return err;
}
static unsigned int handshake_net_id;
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 65ba18a91865..eafa4a033515 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -278,7 +278,8 @@ void fib_release_info(struct fib_info *fi)
hlist_del(&nexthop_nh->nh_hash);
} endfor_nexthops(fi)
}
- fi->fib_dead = 1;
+ /* Paired with READ_ONCE() from fib_table_lookup() */
+ WRITE_ONCE(fi->fib_dead, 1);
fib_info_put(fi);
}
spin_unlock_bh(&fib_info_lock);
@@ -1581,6 +1582,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg,
link_it:
ofi = fib_find_info(fi);
if (ofi) {
+ /* fib_table_lookup() should not see @fi yet. */
fi->fib_dead = 1;
free_fib_info(fi);
refcount_inc(&ofi->fib_treeref);
@@ -1619,6 +1621,7 @@ err_inval:
failure:
if (fi) {
+ /* fib_table_lookup() should not see @fi yet. */
fi->fib_dead = 1;
free_fib_info(fi);
}
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 74d403dbd2b4..d13fb9e76b97 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1582,7 +1582,8 @@ found:
if (fa->fa_dscp &&
inet_dscp_to_dsfield(fa->fa_dscp) != flp->flowi4_tos)
continue;
- if (fi->fib_dead)
+ /* Paired with WRITE_ONCE() in fib_release_info() */
+ if (READ_ONCE(fi->fib_dead))
continue;
if (fa->fa_info->fib_scope < flp->flowi4_scope)
continue;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 0c9e768e5628..418e5fb58fd3 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -353,8 +353,9 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu)
struct flowi4 fl4;
int hlen = LL_RESERVED_SPACE(dev);
int tlen = dev->needed_tailroom;
- unsigned int size = mtu;
+ unsigned int size;
+ size = min(mtu, IP_MAX_MTU);
while (1) {
skb = alloc_skb(size + hlen + tlen,
GFP_ATOMIC | __GFP_NOWARN);
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index e18931a6d153..66fac1216d46 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -67,7 +67,6 @@ static int ip_forward_finish(struct net *net, struct sock *sk, struct sk_buff *s
struct ip_options *opt = &(IPCB(skb)->opt);
__IP_INC_STATS(net, IPSTATS_MIB_OUTFORWDATAGRAMS);
- __IP_ADD_STATS(net, IPSTATS_MIB_OUTOCTETS, skb->len);
#ifdef CONFIG_NET_SWITCHDEV
if (skb->offload_l3_fwd_mark) {
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index fe9ead9ee863..5e9c8156656a 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -584,7 +584,8 @@ static void ip_sublist_rcv_finish(struct list_head *head)
static struct sk_buff *ip_extract_route_hint(const struct net *net,
struct sk_buff *skb, int rt_type)
{
- if (fib4_has_custom_rules(net) || rt_type == RTN_BROADCAST)
+ if (fib4_has_custom_rules(net) || rt_type == RTN_BROADCAST ||
+ IPCB(skb)->flags & IPSKB_MULTIPATH)
return NULL;
return skb;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 43ba4b77b248..4ab877cf6d35 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -207,6 +207,9 @@ static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *s
} else if (rt->rt_type == RTN_BROADCAST)
IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTBCAST, skb->len);
+ /* OUTOCTETS should be counted after fragment */
+ IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
+
if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
skb = skb_expand_head(skb, hh_len);
if (!skb)
@@ -366,8 +369,6 @@ int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb)
/*
* If the indicated interface is up and running, send the packet.
*/
- IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
-
skb->dev = dev;
skb->protocol = htons(ETH_P_IP);
@@ -424,8 +425,6 @@ int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
struct net_device *dev = skb_dst(skb)->dev, *indev = skb->dev;
- IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
-
skb->dev = dev;
skb->protocol = htons(ETH_P_IP);
@@ -982,7 +981,7 @@ static int __ip_append_data(struct sock *sk,
paged = !!cork->gso_size;
if (cork->tx_flags & SKBTX_ANY_TSTAMP &&
- sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
+ READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID)
tskey = atomic_inc_return(&sk->sk_tskey) - 1;
hh_len = LL_RESERVED_SPACE(rt->dst.dev);
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index d1c73660b844..cce9cb25f3b3 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -511,7 +511,7 @@ static bool ipv4_datagram_support_cmsg(const struct sock *sk,
* or without payload (SOF_TIMESTAMPING_OPT_TSONLY).
*/
info = PKTINFO_SKB_CB(skb);
- if (!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG) ||
+ if (!(READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_CMSG) ||
!info->ipi_ifindex)
return false;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 3f0c6d602fb7..9e222a57bc2b 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1804,7 +1804,6 @@ static inline int ipmr_forward_finish(struct net *net, struct sock *sk,
struct ip_options *opt = &(IPCB(skb)->opt);
IP_INC_STATS(net, IPSTATS_MIB_OUTFORWDATAGRAMS);
- IP_ADD_STATS(net, IPSTATS_MIB_OUTOCTETS, skb->len);
if (unlikely(opt->optlen))
ip_forward_options(skb);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index d8c99bdc6170..66f419e7f9a7 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2144,6 +2144,7 @@ static int ip_mkroute_input(struct sk_buff *skb,
int h = fib_multipath_hash(res->fi->fib_net, NULL, skb, hkeys);
fib_select_multipath(res, h);
+ IPCB(skb)->flags |= IPSKB_MULTIPATH;
}
#endif
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index b1559481898d..0c3040a63ebd 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2256,14 +2256,14 @@ void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
}
}
- if (sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE)
+ if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_SOFTWARE)
has_timestamping = true;
else
tss->ts[0] = (struct timespec64) {0};
}
if (tss->ts[2].tv_sec || tss->ts[2].tv_nsec) {
- if (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)
+ if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_RAW_HARDWARE)
has_timestamping = true;
else
tss->ts[2] = (struct timespec64) {0};
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index e6b4fbd642f7..ccfc8bbf7455 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -3474,7 +3474,7 @@ void sk_forced_mem_schedule(struct sock *sk, int size)
if (delta <= 0)
return;
amt = sk_mem_pages(delta);
- sk->sk_forward_alloc += amt << PAGE_SHIFT;
+ sk_forward_alloc_add(sk, amt << PAGE_SHIFT);
sk_memory_allocated_add(sk, amt);
if (mem_cgroup_sockets_enabled && sk->sk_memcg)
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 0794a2c46a56..f39b9c844580 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1414,9 +1414,9 @@ static void udp_rmem_release(struct sock *sk, int size, int partial,
spin_lock(&sk_queue->lock);
- sk->sk_forward_alloc += size;
+ sk_forward_alloc_add(sk, size);
amt = (sk->sk_forward_alloc - partial) & ~(PAGE_SIZE - 1);
- sk->sk_forward_alloc -= amt;
+ sk_forward_alloc_add(sk, -amt);
if (amt)
__sk_mem_reduce_allocated(sk, amt >> PAGE_SHIFT);
@@ -1527,7 +1527,7 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
goto uncharge_drop;
}
- sk->sk_forward_alloc -= size;
+ sk_forward_alloc_add(sk, -size);
/* no need to setup a destructor, we will explicitly release the
* forward allocated memory on dequeue
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 967913ad65e5..0b6ee962c84e 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1378,7 +1378,7 @@ retry:
* idev->desync_factor if it's larger
*/
cnf_temp_preferred_lft = READ_ONCE(idev->cnf.temp_prefered_lft);
- max_desync_factor = min_t(__u32,
+ max_desync_factor = min_t(long,
idev->cnf.max_desync_factor,
cnf_temp_preferred_lft - regen_advance);
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index d94041bb4287..b8378814532c 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -99,7 +99,8 @@ static bool ip6_can_use_hint(const struct sk_buff *skb,
static struct sk_buff *ip6_extract_route_hint(const struct net *net,
struct sk_buff *skb)
{
- if (fib6_routes_require_src(net) || fib6_has_custom_rules(net))
+ if (fib6_routes_require_src(net) || fib6_has_custom_rules(net) ||
+ IP6CB(skb)->flags & IP6SKB_MULTIPATH)
return NULL;
return skb;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 0665e8b09968..54fc4c711f2c 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -451,7 +451,6 @@ static inline int ip6_forward_finish(struct net *net, struct sock *sk,
struct dst_entry *dst = skb_dst(skb);
__IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
- __IP6_ADD_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
#ifdef CONFIG_NET_SWITCHDEV
if (skb->offload_l3_fwd_mark) {
@@ -1502,7 +1501,7 @@ static int __ip6_append_data(struct sock *sk,
orig_mtu = mtu;
if (cork->tx_flags & SKBTX_ANY_TSTAMP &&
- sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
+ READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID)
tskey = atomic_inc_return(&sk->sk_tskey) - 1;
hh_len = LL_RESERVED_SPACE(rt->dst.dev);
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 67a3b8f6e72b..30ca064b76ef 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -2010,8 +2010,6 @@ static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct
{
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_OUTFORWDATAGRAMS);
- IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
- IPSTATS_MIB_OUTOCTETS, skb->len);
return dst_output(net, sk, skb);
}
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 1b2772834972..5831aaa53d75 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -119,7 +119,7 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
return -EINVAL;
ipcm6_init_sk(&ipc6, np);
- ipc6.sockc.tsflags = sk->sk_tsflags;
+ ipc6.sockc.tsflags = READ_ONCE(sk->sk_tsflags);
ipc6.sockc.mark = READ_ONCE(sk->sk_mark);
fl6.flowi6_oif = oif;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 0eae7661a85c..42fcec3ecf5e 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -772,7 +772,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
fl6.flowi6_uid = sk->sk_uid;
ipcm6_init(&ipc6);
- ipc6.sockc.tsflags = sk->sk_tsflags;
+ ipc6.sockc.tsflags = READ_ONCE(sk->sk_tsflags);
ipc6.sockc.mark = fl6.flowi6_mark;
if (sin6) {
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index d15a9e3aa24a..9c687b357e6a 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -423,6 +423,9 @@ void fib6_select_path(const struct net *net, struct fib6_result *res,
if (match->nh && have_oif_match && res->nh)
return;
+ if (skb)
+ IP6CB(skb)->flags |= IP6SKB_MULTIPATH;
+
/* We might have already computed the hash for ICMPv6 errors. In such
* case it will always be non-zero. Otherwise now is the time to do it.
*/
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index ebc6ae47cfea..86b5d509a468 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1339,7 +1339,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
ipcm6_init(&ipc6);
ipc6.gso_size = READ_ONCE(up->gso_size);
- ipc6.sockc.tsflags = sk->sk_tsflags;
+ ipc6.sockc.tsflags = READ_ONCE(sk->sk_tsflags);
ipc6.sockc.mark = READ_ONCE(sk->sk_mark);
/* destination address check */
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index 393f01b2a7e6..4580f61426bb 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -1859,6 +1859,8 @@ static __net_exit void kcm_exit_net(struct net *net)
* that all multiplexors and psocks have been destroyed.
*/
WARN_ON(!list_empty(&knet->mux_list));
+
+ mutex_destroy(&knet->mutex);
}
static struct pernet_operations kcm_net_ops = {
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 933b257eee02..a7fc16f5175d 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -134,9 +134,15 @@ static void mptcp_drop(struct sock *sk, struct sk_buff *skb)
__kfree_skb(skb);
}
+static void mptcp_rmem_fwd_alloc_add(struct sock *sk, int size)
+{
+ WRITE_ONCE(mptcp_sk(sk)->rmem_fwd_alloc,
+ mptcp_sk(sk)->rmem_fwd_alloc + size);
+}
+
static void mptcp_rmem_charge(struct sock *sk, int size)
{
- mptcp_sk(sk)->rmem_fwd_alloc -= size;
+ mptcp_rmem_fwd_alloc_add(sk, -size);
}
static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to,
@@ -177,7 +183,7 @@ static bool mptcp_ooo_try_coalesce(struct mptcp_sock *msk, struct sk_buff *to,
static void __mptcp_rmem_reclaim(struct sock *sk, int amount)
{
amount >>= PAGE_SHIFT;
- mptcp_sk(sk)->rmem_fwd_alloc -= amount << PAGE_SHIFT;
+ mptcp_rmem_charge(sk, amount << PAGE_SHIFT);
__sk_mem_reduce_allocated(sk, amount);
}
@@ -186,7 +192,7 @@ static void mptcp_rmem_uncharge(struct sock *sk, int size)
struct mptcp_sock *msk = mptcp_sk(sk);
int reclaimable;
- msk->rmem_fwd_alloc += size;
+ mptcp_rmem_fwd_alloc_add(sk, size);
reclaimable = msk->rmem_fwd_alloc - sk_unused_reserved_mem(sk);
/* see sk_mem_uncharge() for the rationale behind the following schema */
@@ -341,7 +347,7 @@ static bool mptcp_rmem_schedule(struct sock *sk, struct sock *ssk, int size)
if (!__sk_mem_raise_allocated(sk, size, amt, SK_MEM_RECV))
return false;
- msk->rmem_fwd_alloc += amount;
+ mptcp_rmem_fwd_alloc_add(sk, amount);
return true;
}
@@ -1800,7 +1806,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
}
/* data successfully copied into the write queue */
- sk->sk_forward_alloc -= total_ts;
+ sk_forward_alloc_add(sk, -total_ts);
copied += psize;
dfrag->data_len += psize;
frag_truesize += psize;
@@ -3257,8 +3263,8 @@ void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags)
/* move all the rx fwd alloc into the sk_mem_reclaim_final in
* inet_sock_destruct() will dispose it
*/
- sk->sk_forward_alloc += msk->rmem_fwd_alloc;
- msk->rmem_fwd_alloc = 0;
+ sk_forward_alloc_add(sk, msk->rmem_fwd_alloc);
+ WRITE_ONCE(msk->rmem_fwd_alloc, 0);
mptcp_token_destroy(msk);
mptcp_pm_free_anno_list(msk);
mptcp_free_local_addr_list(msk);
@@ -3522,7 +3528,8 @@ static void mptcp_shutdown(struct sock *sk, int how)
static int mptcp_forward_alloc_get(const struct sock *sk)
{
- return sk->sk_forward_alloc + mptcp_sk(sk)->rmem_fwd_alloc;
+ return READ_ONCE(sk->sk_forward_alloc) +
+ READ_ONCE(mptcp_sk(sk)->rmem_fwd_alloc);
}
static int mptcp_ioctl_outq(const struct mptcp_sock *msk, u64 v)
diff --git a/net/netfilter/ipset/ip_set_hash_netportnet.c b/net/netfilter/ipset/ip_set_hash_netportnet.c
index 005a7ce87217..bf4f91b78e1d 100644
--- a/net/netfilter/ipset/ip_set_hash_netportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_netportnet.c
@@ -36,6 +36,7 @@ MODULE_ALIAS("ip_set_hash:net,port,net");
#define IP_SET_HASH_WITH_PROTO
#define IP_SET_HASH_WITH_NETS
#define IPSET_NET_COUNT 2
+#define IP_SET_HASH_WITH_NET0
/* IPv4 variant */
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 41b826dff6f5..e429ebba74b3 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -102,6 +102,7 @@ static const u8 nft2audit_op[NFT_MSG_MAX] = { // enum nf_tables_msg_types
[NFT_MSG_NEWFLOWTABLE] = AUDIT_NFT_OP_FLOWTABLE_REGISTER,
[NFT_MSG_GETFLOWTABLE] = AUDIT_NFT_OP_INVALID,
[NFT_MSG_DELFLOWTABLE] = AUDIT_NFT_OP_FLOWTABLE_UNREGISTER,
+ [NFT_MSG_GETSETELEM_RESET] = AUDIT_NFT_OP_SETELEM_RESET,
};
static void nft_validate_state_update(struct nft_table *table, u8 new_validate_state)
@@ -3421,6 +3422,18 @@ err:
nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES, -ENOBUFS);
}
+static void audit_log_rule_reset(const struct nft_table *table,
+ unsigned int base_seq,
+ unsigned int nentries)
+{
+ char *buf = kasprintf(GFP_ATOMIC, "%s:%u",
+ table->name, base_seq);
+
+ audit_log_nfcfg(buf, table->family, nentries,
+ AUDIT_NFT_OP_RULE_RESET, GFP_ATOMIC);
+ kfree(buf);
+}
+
struct nft_rule_dump_ctx {
char *table;
char *chain;
@@ -3467,6 +3480,10 @@ cont:
cont_skip:
(*idx)++;
}
+
+ if (reset && *idx)
+ audit_log_rule_reset(table, cb->seq, *idx);
+
return 0;
}
@@ -3634,6 +3651,9 @@ static int nf_tables_getrule(struct sk_buff *skb, const struct nfnl_info *info,
if (err < 0)
goto err_fill_rule_info;
+ if (reset)
+ audit_log_rule_reset(table, nft_pernet(net)->base_seq, 1);
+
return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
err_fill_rule_info:
@@ -5624,13 +5644,25 @@ static int nf_tables_dump_setelem(const struct nft_ctx *ctx,
return nf_tables_fill_setelem(args->skb, set, elem, args->reset);
}
+static void audit_log_nft_set_reset(const struct nft_table *table,
+ unsigned int base_seq,
+ unsigned int nentries)
+{
+ char *buf = kasprintf(GFP_ATOMIC, "%s:%u", table->name, base_seq);
+
+ audit_log_nfcfg(buf, table->family, nentries,
+ AUDIT_NFT_OP_SETELEM_RESET, GFP_ATOMIC);
+ kfree(buf);
+}
+
struct nft_set_dump_ctx {
const struct nft_set *set;
struct nft_ctx ctx;
};
static int nft_set_catchall_dump(struct net *net, struct sk_buff *skb,
- const struct nft_set *set, bool reset)
+ const struct nft_set *set, bool reset,
+ unsigned int base_seq)
{
struct nft_set_elem_catchall *catchall;
u8 genmask = nft_genmask_cur(net);
@@ -5646,6 +5678,8 @@ static int nft_set_catchall_dump(struct net *net, struct sk_buff *skb,
elem.priv = catchall->elem;
ret = nf_tables_fill_setelem(skb, set, &elem, reset);
+ if (reset && !ret)
+ audit_log_nft_set_reset(set->table, base_seq, 1);
break;
}
@@ -5725,12 +5759,17 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
set->ops->walk(&dump_ctx->ctx, set, &args.iter);
if (!args.iter.err && args.iter.count == cb->args[0])
- args.iter.err = nft_set_catchall_dump(net, skb, set, reset);
- rcu_read_unlock();
-
+ args.iter.err = nft_set_catchall_dump(net, skb, set,
+ reset, cb->seq);
nla_nest_end(skb, nest);
nlmsg_end(skb, nlh);
+ if (reset && args.iter.count > args.iter.skip)
+ audit_log_nft_set_reset(table, cb->seq,
+ args.iter.count - args.iter.skip);
+
+ rcu_read_unlock();
+
if (args.iter.err && args.iter.err != -EMSGSIZE)
return args.iter.err;
if (args.iter.count == cb->args[0])
@@ -5955,13 +5994,13 @@ static int nf_tables_getsetelem(struct sk_buff *skb,
struct netlink_ext_ack *extack = info->extack;
u8 genmask = nft_genmask_cur(info->net);
u8 family = info->nfmsg->nfgen_family;
+ int rem, err = 0, nelems = 0;
struct net *net = info->net;
struct nft_table *table;
struct nft_set *set;
struct nlattr *attr;
struct nft_ctx ctx;
bool reset = false;
- int rem, err = 0;
table = nft_table_lookup(net, nla[NFTA_SET_ELEM_LIST_TABLE], family,
genmask, 0);
@@ -6004,8 +6043,13 @@ static int nf_tables_getsetelem(struct sk_buff *skb,
NL_SET_BAD_ATTR(extack, attr);
break;
}
+ nelems++;
}
+ if (reset)
+ audit_log_nft_set_reset(table, nft_pernet(net)->base_seq,
+ nelems);
+
return err;
}
diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c
index 8f1bfa6ccc2d..50723ba08289 100644
--- a/net/netfilter/nfnetlink_osf.c
+++ b/net/netfilter/nfnetlink_osf.c
@@ -315,6 +315,14 @@ static int nfnl_osf_add_callback(struct sk_buff *skb,
f = nla_data(osf_attrs[OSF_ATTR_FINGER]);
+ if (f->opt_num > ARRAY_SIZE(f->opt))
+ return -EINVAL;
+
+ if (!memchr(f->genre, 0, MAXGENRELEN) ||
+ !memchr(f->subtype, 0, MAXGENRELEN) ||
+ !memchr(f->version, 0, MAXGENRELEN))
+ return -EINVAL;
+
kf = kmalloc(sizeof(struct nf_osf_finger), GFP_KERNEL);
if (!kf)
return -ENOMEM;
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
index 7f856ceb3a66..3fbaa7bf41f9 100644
--- a/net/netfilter/nft_exthdr.c
+++ b/net/netfilter/nft_exthdr.c
@@ -35,6 +35,14 @@ static unsigned int optlen(const u8 *opt, unsigned int offset)
return opt[offset + 1];
}
+static int nft_skb_copy_to_reg(const struct sk_buff *skb, int offset, u32 *dest, unsigned int len)
+{
+ if (len % NFT_REG32_SIZE)
+ dest[len / NFT_REG32_SIZE] = 0;
+
+ return skb_copy_bits(skb, offset, dest, len);
+}
+
static void nft_exthdr_ipv6_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
@@ -56,8 +64,7 @@ static void nft_exthdr_ipv6_eval(const struct nft_expr *expr,
}
offset += priv->offset;
- dest[priv->len / NFT_REG32_SIZE] = 0;
- if (skb_copy_bits(pkt->skb, offset, dest, priv->len) < 0)
+ if (nft_skb_copy_to_reg(pkt->skb, offset, dest, priv->len) < 0)
goto err;
return;
err:
@@ -153,8 +160,7 @@ static void nft_exthdr_ipv4_eval(const struct nft_expr *expr,
}
offset += priv->offset;
- dest[priv->len / NFT_REG32_SIZE] = 0;
- if (skb_copy_bits(pkt->skb, offset, dest, priv->len) < 0)
+ if (nft_skb_copy_to_reg(pkt->skb, offset, dest, priv->len) < 0)
goto err;
return;
err:
@@ -210,7 +216,8 @@ static void nft_exthdr_tcp_eval(const struct nft_expr *expr,
if (priv->flags & NFT_EXTHDR_F_PRESENT) {
*dest = 1;
} else {
- dest[priv->len / NFT_REG32_SIZE] = 0;
+ if (priv->len % NFT_REG32_SIZE)
+ dest[priv->len / NFT_REG32_SIZE] = 0;
memcpy(dest, opt + offset, priv->len);
}
@@ -238,7 +245,12 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
if (!tcph)
goto err;
+ if (skb_ensure_writable(pkt->skb, nft_thoff(pkt) + tcphdr_len))
+ goto err;
+
+ tcph = (struct tcphdr *)(pkt->skb->data + nft_thoff(pkt));
opt = (u8 *)tcph;
+
for (i = sizeof(*tcph); i < tcphdr_len - 1; i += optl) {
union {
__be16 v16;
@@ -253,15 +265,6 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
if (i + optl > tcphdr_len || priv->len + priv->offset > optl)
goto err;
- if (skb_ensure_writable(pkt->skb,
- nft_thoff(pkt) + i + priv->len))
- goto err;
-
- tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff,
- &tcphdr_len);
- if (!tcph)
- goto err;
-
offset = i + priv->offset;
switch (priv->len) {
@@ -325,9 +328,9 @@ static void nft_exthdr_tcp_strip_eval(const struct nft_expr *expr,
if (skb_ensure_writable(pkt->skb, nft_thoff(pkt) + tcphdr_len))
goto drop;
- opt = (u8 *)nft_tcp_header_pointer(pkt, sizeof(buff), buff, &tcphdr_len);
- if (!opt)
- goto err;
+ tcph = (struct tcphdr *)(pkt->skb->data + nft_thoff(pkt));
+ opt = (u8 *)tcph;
+
for (i = sizeof(*tcph); i < tcphdr_len - 1; i += optl) {
unsigned int j;
@@ -392,9 +395,8 @@ static void nft_exthdr_sctp_eval(const struct nft_expr *expr,
offset + ntohs(sch->length) > pkt->skb->len)
break;
- dest[priv->len / NFT_REG32_SIZE] = 0;
- if (skb_copy_bits(pkt->skb, offset + priv->offset,
- dest, priv->len) < 0)
+ if (nft_skb_copy_to_reg(pkt->skb, offset + priv->offset,
+ dest, priv->len) < 0)
break;
return;
}
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index c6435e709231..f250b5399344 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -312,6 +312,7 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
struct nft_rbtree_elem *rbe, *rbe_le = NULL, *rbe_ge = NULL;
struct rb_node *node, *next, *parent, **p, *first = NULL;
struct nft_rbtree *priv = nft_set_priv(set);
+ u8 cur_genmask = nft_genmask_cur(net);
u8 genmask = nft_genmask_next(net);
int d, err;
@@ -357,8 +358,11 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
if (!nft_set_elem_active(&rbe->ext, genmask))
continue;
- /* perform garbage collection to avoid bogus overlap reports. */
- if (nft_set_elem_expired(&rbe->ext)) {
+ /* perform garbage collection to avoid bogus overlap reports
+ * but skip new elements in this transaction.
+ */
+ if (nft_set_elem_expired(&rbe->ext) &&
+ nft_set_elem_active(&rbe->ext, cur_genmask)) {
err = nft_rbtree_gc_elem(set, priv, rbe, genmask);
if (err < 0)
return err;
diff --git a/net/netfilter/xt_sctp.c b/net/netfilter/xt_sctp.c
index e8961094a282..b46a6a512058 100644
--- a/net/netfilter/xt_sctp.c
+++ b/net/netfilter/xt_sctp.c
@@ -149,6 +149,8 @@ static int sctp_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_sctp_info *info = par->matchinfo;
+ if (info->flag_count > ARRAY_SIZE(info->flag_info))
+ return -EINVAL;
if (info->flags & ~XT_SCTP_VALID_FLAGS)
return -EINVAL;
if (info->invflags & ~XT_SCTP_VALID_FLAGS)
diff --git a/net/netfilter/xt_u32.c b/net/netfilter/xt_u32.c
index 177b40d08098..117d4615d668 100644
--- a/net/netfilter/xt_u32.c
+++ b/net/netfilter/xt_u32.c
@@ -96,11 +96,32 @@ static bool u32_mt(const struct sk_buff *skb, struct xt_action_param *par)
return ret ^ data->invert;
}
+static int u32_mt_checkentry(const struct xt_mtchk_param *par)
+{
+ const struct xt_u32 *data = par->matchinfo;
+ const struct xt_u32_test *ct;
+ unsigned int i;
+
+ if (data->ntests > ARRAY_SIZE(data->tests))
+ return -EINVAL;
+
+ for (i = 0; i < data->ntests; ++i) {
+ ct = &data->tests[i];
+
+ if (ct->nnums > ARRAY_SIZE(ct->location) ||
+ ct->nvalues > ARRAY_SIZE(ct->value))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static struct xt_match xt_u32_mt_reg __read_mostly = {
.name = "u32",
.revision = 0,
.family = NFPROTO_UNSPEC,
.match = u32_mt,
+ .checkentry = u32_mt_checkentry,
.matchsize = sizeof(struct xt_u32),
.me = THIS_MODULE,
};
diff --git a/net/nfc/nci/uart.c b/net/nfc/nci/uart.c
index cc8fa9e36159..ed1508a9e093 100644
--- a/net/nfc/nci/uart.c
+++ b/net/nfc/nci/uart.c
@@ -172,7 +172,7 @@ static int nci_uart_tty_open(struct tty_struct *tty)
*/
static void nci_uart_tty_close(struct tty_struct *tty)
{
- struct nci_uart *nu = (void *)tty->disc_data;
+ struct nci_uart *nu = tty->disc_data;
/* Detach from the tty */
tty->disc_data = NULL;
@@ -204,7 +204,7 @@ static void nci_uart_tty_close(struct tty_struct *tty)
*/
static void nci_uart_tty_wakeup(struct tty_struct *tty)
{
- struct nci_uart *nu = (void *)tty->disc_data;
+ struct nci_uart *nu = tty->disc_data;
if (!nu)
return;
@@ -296,9 +296,9 @@ static int nci_uart_default_recv_buf(struct nci_uart *nu, const u8 *data,
* Return Value: None
*/
static void nci_uart_tty_receive(struct tty_struct *tty, const u8 *data,
- const char *flags, int count)
+ const u8 *flags, size_t count)
{
- struct nci_uart *nu = (void *)tty->disc_data;
+ struct nci_uart *nu = tty->disc_data;
if (!nu || tty != nu->tty)
return;
@@ -325,7 +325,7 @@ static void nci_uart_tty_receive(struct tty_struct *tty, const u8 *data,
static int nci_uart_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
unsigned long arg)
{
- struct nci_uart *nu = (void *)tty->disc_data;
+ struct nci_uart *nu = tty->disc_data;
int err = 0;
switch (cmd) {
@@ -345,20 +345,14 @@ static int nci_uart_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
/* We don't provide read/write/poll interface for user space. */
static ssize_t nci_uart_tty_read(struct tty_struct *tty, struct file *file,
- unsigned char *buf, size_t nr,
- void **cookie, unsigned long offset)
+ u8 *buf, size_t nr, void **cookie,
+ unsigned long offset)
{
return 0;
}
static ssize_t nci_uart_tty_write(struct tty_struct *tty, struct file *file,
- const unsigned char *data, size_t count)
-{
- return 0;
-}
-
-static __poll_t nci_uart_tty_poll(struct tty_struct *tty,
- struct file *filp, poll_table *wait)
+ const u8 *data, size_t count)
{
return 0;
}
@@ -435,7 +429,6 @@ static struct tty_ldisc_ops nci_uart_ldisc = {
.close = nci_uart_tty_close,
.read = nci_uart_tty_read,
.write = nci_uart_tty_write,
- .poll = nci_uart_tty_poll,
.receive_buf = nci_uart_tty_receive,
.write_wakeup = nci_uart_tty_wakeup,
.ioctl = nci_uart_tty_ioctl,
diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c
index 591d87d5e5c0..68e6acd0f130 100644
--- a/net/sched/sch_fq_pie.c
+++ b/net/sched/sch_fq_pie.c
@@ -61,6 +61,7 @@ struct fq_pie_sched_data {
struct pie_params p_params;
u32 ecn_prob;
u32 flows_cnt;
+ u32 flows_cursor;
u32 quantum;
u32 memory_limit;
u32 new_flow_count;
@@ -375,22 +376,32 @@ flow_error:
static void fq_pie_timer(struct timer_list *t)
{
struct fq_pie_sched_data *q = from_timer(q, t, adapt_timer);
+ unsigned long next, tupdate;
struct Qdisc *sch = q->sch;
spinlock_t *root_lock; /* to lock qdisc for probability calculations */
- u32 idx;
+ int max_cnt, i;
rcu_read_lock();
root_lock = qdisc_lock(qdisc_root_sleeping(sch));
spin_lock(root_lock);
- for (idx = 0; idx < q->flows_cnt; idx++)
- pie_calculate_probability(&q->p_params, &q->flows[idx].vars,
- q->flows[idx].backlog);
-
- /* reset the timer to fire after 'tupdate' jiffies. */
- if (q->p_params.tupdate)
- mod_timer(&q->adapt_timer, jiffies + q->p_params.tupdate);
+ /* Limit this expensive loop to 2048 flows per round. */
+ max_cnt = min_t(int, q->flows_cnt - q->flows_cursor, 2048);
+ for (i = 0; i < max_cnt; i++) {
+ pie_calculate_probability(&q->p_params,
+ &q->flows[q->flows_cursor].vars,
+ q->flows[q->flows_cursor].backlog);
+ q->flows_cursor++;
+ }
+ tupdate = q->p_params.tupdate;
+ next = 0;
+ if (q->flows_cursor >= q->flows_cnt) {
+ q->flows_cursor = 0;
+ next = tupdate;
+ }
+ if (tupdate)
+ mod_timer(&q->adapt_timer, jiffies + next);
spin_unlock(root_lock);
rcu_read_unlock();
}
diff --git a/net/sched/sch_plug.c b/net/sched/sch_plug.c
index ea8c4a7174bb..35f49edf63db 100644
--- a/net/sched/sch_plug.c
+++ b/net/sched/sch_plug.c
@@ -207,7 +207,7 @@ static struct Qdisc_ops plug_qdisc_ops __read_mostly = {
.priv_size = sizeof(struct plug_sched_data),
.enqueue = plug_enqueue,
.dequeue = plug_dequeue,
- .peek = qdisc_peek_head,
+ .peek = qdisc_peek_dequeued,
.init = plug_init,
.change = plug_change,
.reset = qdisc_reset_queue,
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 1a25752f1a9a..546c10adcacd 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -974,10 +974,13 @@ static void qfq_update_eligible(struct qfq_sched *q)
}
/* Dequeue head packet of the head class in the DRR queue of the aggregate. */
-static void agg_dequeue(struct qfq_aggregate *agg,
- struct qfq_class *cl, unsigned int len)
+static struct sk_buff *agg_dequeue(struct qfq_aggregate *agg,
+ struct qfq_class *cl, unsigned int len)
{
- qdisc_dequeue_peeked(cl->qdisc);
+ struct sk_buff *skb = qdisc_dequeue_peeked(cl->qdisc);
+
+ if (!skb)
+ return NULL;
cl->deficit -= (int) len;
@@ -987,6 +990,8 @@ static void agg_dequeue(struct qfq_aggregate *agg,
cl->deficit += agg->lmax;
list_move_tail(&cl->alist, &agg->active);
}
+
+ return skb;
}
static inline struct sk_buff *qfq_peek_skb(struct qfq_aggregate *agg,
@@ -1132,11 +1137,18 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
if (!skb)
return NULL;
- qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
+
+ skb = agg_dequeue(in_serv_agg, cl, len);
+
+ if (!skb) {
+ sch->q.qlen++;
+ return NULL;
+ }
+
+ qdisc_qstats_backlog_dec(sch, skb);
qdisc_bstats_update(sch, skb);
- agg_dequeue(in_serv_agg, cl, len);
/* If lmax is lowered, through qfq_change_class, for a class
* owning pending packets with larger size than the new value
* of lmax, then the following condition may hold.
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index f13d6a34f32f..ec00ee75d59a 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -282,7 +282,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
assoc->init_retries, assoc->shutdown_retries,
assoc->rtx_data_chunks,
refcount_read(&sk->sk_wmem_alloc),
- sk->sk_wmem_queued,
+ READ_ONCE(sk->sk_wmem_queued),
sk->sk_sndbuf,
sk->sk_rcvbuf);
seq_printf(seq, "\n");
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index fd0631e70d46..ab943e8fb1db 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -69,7 +69,7 @@
#include <net/sctp/stream_sched.h>
/* Forward declarations for internal helper functions. */
-static bool sctp_writeable(struct sock *sk);
+static bool sctp_writeable(const struct sock *sk);
static void sctp_wfree(struct sk_buff *skb);
static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
size_t msg_len);
@@ -140,7 +140,7 @@ static inline void sctp_set_owner_w(struct sctp_chunk *chunk)
refcount_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);
asoc->sndbuf_used += chunk->skb->truesize + sizeof(struct sctp_chunk);
- sk->sk_wmem_queued += chunk->skb->truesize + sizeof(struct sctp_chunk);
+ sk_wmem_queued_add(sk, chunk->skb->truesize + sizeof(struct sctp_chunk));
sk_mem_charge(sk, chunk->skb->truesize);
}
@@ -9144,7 +9144,7 @@ static void sctp_wfree(struct sk_buff *skb)
struct sock *sk = asoc->base.sk;
sk_mem_uncharge(sk, skb->truesize);
- sk->sk_wmem_queued -= skb->truesize + sizeof(struct sctp_chunk);
+ sk_wmem_queued_add(sk, -(skb->truesize + sizeof(struct sctp_chunk)));
asoc->sndbuf_used -= skb->truesize + sizeof(struct sctp_chunk);
WARN_ON(refcount_sub_and_test(sizeof(struct sctp_chunk),
&sk->sk_wmem_alloc));
@@ -9299,9 +9299,9 @@ void sctp_write_space(struct sock *sk)
* UDP-style sockets or TCP-style sockets, this code should work.
* - Daisy
*/
-static bool sctp_writeable(struct sock *sk)
+static bool sctp_writeable(const struct sock *sk)
{
- return sk->sk_sndbuf > sk->sk_wmem_queued;
+ return READ_ONCE(sk->sk_sndbuf) > READ_ONCE(sk->sk_wmem_queued);
}
/* Wait for an association to go into ESTABLISHED state. If timeout is 0,
diff --git a/net/socket.c b/net/socket.c
index 77f28328e387..c8b08b32f097 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -827,7 +827,7 @@ static bool skb_is_swtx_tstamp(const struct sk_buff *skb, int false_tstamp)
static ktime_t get_timestamp(struct sock *sk, struct sk_buff *skb, int *if_index)
{
- bool cycles = sk->sk_tsflags & SOF_TIMESTAMPING_BIND_PHC;
+ bool cycles = READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_BIND_PHC;
struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
struct net_device *orig_dev;
ktime_t hwtstamp;
@@ -879,12 +879,12 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
int need_software_tstamp = sock_flag(sk, SOCK_RCVTSTAMP);
int new_tstamp = sock_flag(sk, SOCK_TSTAMP_NEW);
struct scm_timestamping_internal tss;
-
int empty = 1, false_tstamp = 0;
struct skb_shared_hwtstamps *shhwtstamps =
skb_hwtstamps(skb);
int if_index;
ktime_t hwtstamp;
+ u32 tsflags;
/* Race occurred between timestamp enabling and packet
receiving. Fill in the current time for now. */
@@ -926,11 +926,12 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
}
memset(&tss, 0, sizeof(tss));
- if ((sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) &&
+ tsflags = READ_ONCE(sk->sk_tsflags);
+ if ((tsflags & SOF_TIMESTAMPING_SOFTWARE) &&
ktime_to_timespec64_cond(skb->tstamp, tss.ts + 0))
empty = 0;
if (shhwtstamps &&
- (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
+ (tsflags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
!skb_is_swtx_tstamp(skb, false_tstamp)) {
if_index = 0;
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP_NETDEV)
@@ -938,14 +939,14 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
else
hwtstamp = shhwtstamps->hwtstamp;
- if (sk->sk_tsflags & SOF_TIMESTAMPING_BIND_PHC)
+ if (tsflags & SOF_TIMESTAMPING_BIND_PHC)
hwtstamp = ptp_convert_timestamp(&hwtstamp,
- sk->sk_bind_phc);
+ READ_ONCE(sk->sk_bind_phc));
if (ktime_to_timespec64_cond(hwtstamp, tss.ts + 2)) {
empty = 0;
- if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_PKTINFO) &&
+ if ((tsflags & SOF_TIMESTAMPING_OPT_PKTINFO) &&
!skb_is_err_queue(skb))
put_ts_pktinfo(msg, skb, if_index);
}
diff --git a/net/sunrpc/.kunitconfig b/net/sunrpc/.kunitconfig
index a55a00fa649b..eb02b906c295 100644
--- a/net/sunrpc/.kunitconfig
+++ b/net/sunrpc/.kunitconfig
@@ -23,7 +23,6 @@ CONFIG_NFS_FS=y
CONFIG_SUNRPC=y
CONFIG_SUNRPC_GSS=y
CONFIG_RPCSEC_GSS_KRB5=y
-CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_DES=y
CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1=y
CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA=y
CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2=y
diff --git a/net/sunrpc/Kconfig b/net/sunrpc/Kconfig
index 4afc5fd71d44..2d8b67dac7b5 100644
--- a/net/sunrpc/Kconfig
+++ b/net/sunrpc/Kconfig
@@ -34,38 +34,6 @@ config RPCSEC_GSS_KRB5
If unsure, say Y.
-config RPCSEC_GSS_KRB5_SIMPLIFIED
- bool
- depends on RPCSEC_GSS_KRB5
-
-config RPCSEC_GSS_KRB5_CRYPTOSYSTEM
- bool
- depends on RPCSEC_GSS_KRB5
-
-config RPCSEC_GSS_KRB5_ENCTYPES_DES
- bool "Enable Kerberos enctypes based on DES (deprecated)"
- depends on RPCSEC_GSS_KRB5
- depends on CRYPTO_CBC && CRYPTO_CTS && CRYPTO_ECB
- depends on CRYPTO_HMAC && CRYPTO_MD5 && CRYPTO_SHA1
- depends on CRYPTO_DES
- default n
- select RPCSEC_GSS_KRB5_SIMPLIFIED
- help
- Choose Y to enable the use of deprecated Kerberos 5
- encryption types that utilize Data Encryption Standard
- (DES) based ciphers. These include des-cbc-md5,
- des-cbc-crc, and des-cbc-md4, which were deprecated by
- RFC 6649, and des3-cbc-sha1, which was deprecated by RFC
- 8429.
-
- These encryption types are known to be insecure, therefore
- the default setting of this option is N. Support for these
- encryption types is available only for compatibility with
- legacy NFS client and server implementations.
-
- Removal of support is planned for a subsequent kernel
- release.
-
config RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1
bool "Enable Kerberos enctypes based on AES and SHA-1"
depends on RPCSEC_GSS_KRB5
@@ -73,7 +41,6 @@ config RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1
depends on CRYPTO_HMAC && CRYPTO_SHA1
depends on CRYPTO_AES
default y
- select RPCSEC_GSS_KRB5_CRYPTOSYSTEM
help
Choose Y to enable the use of Kerberos 5 encryption types
that utilize Advanced Encryption Standard (AES) ciphers and
@@ -86,7 +53,6 @@ config RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA
depends on CRYPTO_CBC && CRYPTO_CTS && CRYPTO_CAMELLIA
depends on CRYPTO_CMAC
default n
- select RPCSEC_GSS_KRB5_CRYPTOSYSTEM
help
Choose Y to enable the use of Kerberos 5 encryption types
that utilize Camellia ciphers (RFC 3713) and CMAC digests
@@ -100,7 +66,6 @@ config RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2
depends on CRYPTO_HMAC && CRYPTO_SHA256 && CRYPTO_SHA512
depends on CRYPTO_AES
default n
- select RPCSEC_GSS_KRB5_CRYPTOSYSTEM
help
Choose Y to enable the use of Kerberos 5 encryption types
that utilize Advanced Encryption Standard (AES) ciphers and
diff --git a/net/sunrpc/auth_gss/Makefile b/net/sunrpc/auth_gss/Makefile
index 012ae1720689..ad1736d93b76 100644
--- a/net/sunrpc/auth_gss/Makefile
+++ b/net/sunrpc/auth_gss/Makefile
@@ -12,6 +12,6 @@ auth_rpcgss-y := auth_gss.o gss_generic_token.o \
obj-$(CONFIG_RPCSEC_GSS_KRB5) += rpcsec_gss_krb5.o
rpcsec_gss_krb5-y := gss_krb5_mech.o gss_krb5_seal.o gss_krb5_unseal.o \
- gss_krb5_seqnum.o gss_krb5_wrap.o gss_krb5_crypto.o gss_krb5_keys.o
+ gss_krb5_wrap.o gss_krb5_crypto.o gss_krb5_keys.o
obj-$(CONFIG_RPCSEC_GSS_KRB5_KUNIT_TEST) += gss_krb5_test.o
diff --git a/net/sunrpc/auth_gss/gss_krb5_internal.h b/net/sunrpc/auth_gss/gss_krb5_internal.h
index b673e2626acb..3afd4065bf3d 100644
--- a/net/sunrpc/auth_gss/gss_krb5_internal.h
+++ b/net/sunrpc/auth_gss/gss_krb5_internal.h
@@ -33,7 +33,6 @@ struct gss_krb5_enctype {
const u32 Ke_length; /* encryption subkey length, in octets */
const u32 Ki_length; /* integrity subkey length, in octets */
- int (*import_ctx)(struct krb5_ctx *ctx, gfp_t gfp_mask);
int (*derive_key)(const struct gss_krb5_enctype *gk5e,
const struct xdr_netobj *in,
struct xdr_netobj *out,
@@ -85,24 +84,15 @@ struct krb5_ctx {
* GSS Kerberos 5 mechanism Per-Message calls.
*/
-u32 gss_krb5_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text,
- struct xdr_netobj *token);
u32 gss_krb5_get_mic_v2(struct krb5_ctx *ctx, struct xdr_buf *text,
struct xdr_netobj *token);
-u32 gss_krb5_verify_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *message_buffer,
- struct xdr_netobj *read_token);
u32 gss_krb5_verify_mic_v2(struct krb5_ctx *ctx, struct xdr_buf *message_buffer,
struct xdr_netobj *read_token);
-u32 gss_krb5_wrap_v1(struct krb5_ctx *kctx, int offset,
- struct xdr_buf *buf, struct page **pages);
u32 gss_krb5_wrap_v2(struct krb5_ctx *kctx, int offset,
struct xdr_buf *buf, struct page **pages);
-u32 gss_krb5_unwrap_v1(struct krb5_ctx *kctx, int offset, int len,
- struct xdr_buf *buf, unsigned int *slack,
- unsigned int *align);
u32 gss_krb5_unwrap_v2(struct krb5_ctx *kctx, int offset, int len,
struct xdr_buf *buf, unsigned int *slack,
unsigned int *align);
@@ -113,12 +103,6 @@ u32 gss_krb5_unwrap_v2(struct krb5_ctx *kctx, int offset, int len,
/* Key Derivation Functions */
-int krb5_derive_key_v1(const struct gss_krb5_enctype *gk5e,
- const struct xdr_netobj *inkey,
- struct xdr_netobj *outkey,
- const struct xdr_netobj *label,
- gfp_t gfp_mask);
-
int krb5_derive_key_v2(const struct gss_krb5_enctype *gk5e,
const struct xdr_netobj *inkey,
struct xdr_netobj *outkey,
@@ -169,13 +153,6 @@ static inline int krb5_derive_key(struct krb5_ctx *kctx,
return gk5e->derive_key(gk5e, inkey, outkey, &label, gfp_mask);
}
-s32 krb5_make_seq_num(struct krb5_ctx *kctx, struct crypto_sync_skcipher *key,
- int direction, u32 seqnum, unsigned char *cksum,
- unsigned char *buf);
-
-s32 krb5_get_seq_num(struct krb5_ctx *kctx, unsigned char *cksum,
- unsigned char *buf, int *direction, u32 *seqnum);
-
void krb5_make_confounder(u8 *p, int conflen);
u32 make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
diff --git a/net/sunrpc/auth_gss/gss_krb5_keys.c b/net/sunrpc/auth_gss/gss_krb5_keys.c
index 5347fe1cc93f..06d8ee0db000 100644
--- a/net/sunrpc/auth_gss/gss_krb5_keys.c
+++ b/net/sunrpc/auth_gss/gss_krb5_keys.c
@@ -222,90 +222,6 @@ err_return:
return ret;
}
-#define smask(step) ((1<<step)-1)
-#define pstep(x, step) (((x)&smask(step))^(((x)>>step)&smask(step)))
-#define parity_char(x) pstep(pstep(pstep((x), 4), 2), 1)
-
-static void mit_des_fixup_key_parity(u8 key[8])
-{
- int i;
- for (i = 0; i < 8; i++) {
- key[i] &= 0xfe;
- key[i] |= 1^parity_char(key[i]);
- }
-}
-
-static int krb5_random_to_key_v1(const struct gss_krb5_enctype *gk5e,
- struct xdr_netobj *randombits,
- struct xdr_netobj *key)
-{
- int i, ret = -EINVAL;
-
- if (key->len != 24) {
- dprintk("%s: key->len is %d\n", __func__, key->len);
- goto err_out;
- }
- if (randombits->len != 21) {
- dprintk("%s: randombits->len is %d\n",
- __func__, randombits->len);
- goto err_out;
- }
-
- /* take the seven bytes, move them around into the top 7 bits of the
- 8 key bytes, then compute the parity bits. Do this three times. */
-
- for (i = 0; i < 3; i++) {
- memcpy(key->data + i*8, randombits->data + i*7, 7);
- key->data[i*8+7] = (((key->data[i*8]&1)<<1) |
- ((key->data[i*8+1]&1)<<2) |
- ((key->data[i*8+2]&1)<<3) |
- ((key->data[i*8+3]&1)<<4) |
- ((key->data[i*8+4]&1)<<5) |
- ((key->data[i*8+5]&1)<<6) |
- ((key->data[i*8+6]&1)<<7));
-
- mit_des_fixup_key_parity(key->data + i*8);
- }
- ret = 0;
-err_out:
- return ret;
-}
-
-/**
- * krb5_derive_key_v1 - Derive a subkey for an RFC 3961 enctype
- * @gk5e: Kerberos 5 enctype profile
- * @inkey: base protocol key
- * @outkey: OUT: derived key
- * @label: subkey usage label
- * @gfp_mask: memory allocation control flags
- *
- * Caller sets @outkey->len to the desired length of the derived key.
- *
- * On success, returns 0 and fills in @outkey. A negative errno value
- * is returned on failure.
- */
-int krb5_derive_key_v1(const struct gss_krb5_enctype *gk5e,
- const struct xdr_netobj *inkey,
- struct xdr_netobj *outkey,
- const struct xdr_netobj *label,
- gfp_t gfp_mask)
-{
- struct xdr_netobj inblock;
- int ret;
-
- inblock.len = gk5e->keybytes;
- inblock.data = kmalloc(inblock.len, gfp_mask);
- if (!inblock.data)
- return -ENOMEM;
-
- ret = krb5_DK(gk5e, inkey, inblock.data, label, gfp_mask);
- if (!ret)
- ret = krb5_random_to_key_v1(gk5e, &inblock, outkey);
-
- kfree_sensitive(inblock.data);
- return ret;
-}
-
/*
* This is the identity function, with some sanity checking.
*/
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 20e21d08badb..e31cfdf7eadc 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -30,61 +30,7 @@
static struct gss_api_mech gss_kerberos_mech;
-#if defined(CONFIG_RPCSEC_GSS_KRB5_SIMPLIFIED)
-static int gss_krb5_import_ctx_des(struct krb5_ctx *ctx, gfp_t gfp_mask);
-static int gss_krb5_import_ctx_v1(struct krb5_ctx *ctx, gfp_t gfp_mask);
-#endif
-#if defined(CONFIG_RPCSEC_GSS_KRB5_CRYPTOSYSTEM)
-static int gss_krb5_import_ctx_v2(struct krb5_ctx *ctx, gfp_t gfp_mask);
-#endif
-
static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = {
-#if defined(CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_DES)
- /*
- * DES (All DES enctypes are mapped to the same gss functionality)
- */
- {
- .etype = ENCTYPE_DES_CBC_RAW,
- .ctype = CKSUMTYPE_RSA_MD5,
- .name = "des-cbc-crc",
- .encrypt_name = "cbc(des)",
- .cksum_name = "md5",
- .import_ctx = gss_krb5_import_ctx_des,
- .get_mic = gss_krb5_get_mic_v1,
- .verify_mic = gss_krb5_verify_mic_v1,
- .wrap = gss_krb5_wrap_v1,
- .unwrap = gss_krb5_unwrap_v1,
- .signalg = SGN_ALG_DES_MAC_MD5,
- .sealalg = SEAL_ALG_DES,
- .keybytes = 7,
- .keylength = 8,
- .cksumlength = 8,
- .keyed_cksum = 0,
- },
- /*
- * 3DES
- */
- {
- .etype = ENCTYPE_DES3_CBC_RAW,
- .ctype = CKSUMTYPE_HMAC_SHA1_DES3,
- .name = "des3-hmac-sha1",
- .encrypt_name = "cbc(des3_ede)",
- .cksum_name = "hmac(sha1)",
- .import_ctx = gss_krb5_import_ctx_v1,
- .derive_key = krb5_derive_key_v1,
- .get_mic = gss_krb5_get_mic_v1,
- .verify_mic = gss_krb5_verify_mic_v1,
- .wrap = gss_krb5_wrap_v1,
- .unwrap = gss_krb5_unwrap_v1,
- .signalg = SGN_ALG_HMAC_SHA1_DES3_KD,
- .sealalg = SEAL_ALG_DES3KD,
- .keybytes = 21,
- .keylength = 24,
- .cksumlength = 20,
- .keyed_cksum = 1,
- },
-#endif
-
#if defined(CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1)
/*
* AES-128 with SHA-1 (RFC 3962)
@@ -96,7 +42,6 @@ static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = {
.encrypt_name = "cts(cbc(aes))",
.aux_cipher = "cbc(aes)",
.cksum_name = "hmac(sha1)",
- .import_ctx = gss_krb5_import_ctx_v2,
.derive_key = krb5_derive_key_v2,
.encrypt = gss_krb5_aes_encrypt,
.decrypt = gss_krb5_aes_decrypt,
@@ -126,7 +71,6 @@ static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = {
.encrypt_name = "cts(cbc(aes))",
.aux_cipher = "cbc(aes)",
.cksum_name = "hmac(sha1)",
- .import_ctx = gss_krb5_import_ctx_v2,
.derive_key = krb5_derive_key_v2,
.encrypt = gss_krb5_aes_encrypt,
.decrypt = gss_krb5_aes_decrypt,
@@ -166,7 +110,6 @@ static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = {
.Ke_length = BITS2OCTETS(128),
.Ki_length = BITS2OCTETS(128),
- .import_ctx = gss_krb5_import_ctx_v2,
.derive_key = krb5_kdf_feedback_cmac,
.encrypt = gss_krb5_aes_encrypt,
.decrypt = gss_krb5_aes_decrypt,
@@ -193,7 +136,6 @@ static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = {
.Ke_length = BITS2OCTETS(256),
.Ki_length = BITS2OCTETS(256),
- .import_ctx = gss_krb5_import_ctx_v2,
.derive_key = krb5_kdf_feedback_cmac,
.encrypt = gss_krb5_aes_encrypt,
.decrypt = gss_krb5_aes_decrypt,
@@ -223,7 +165,6 @@ static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = {
.Ke_length = BITS2OCTETS(128),
.Ki_length = BITS2OCTETS(128),
- .import_ctx = gss_krb5_import_ctx_v2,
.derive_key = krb5_kdf_hmac_sha2,
.encrypt = krb5_etm_encrypt,
.decrypt = krb5_etm_decrypt,
@@ -250,7 +191,6 @@ static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = {
.Ke_length = BITS2OCTETS(256),
.Ki_length = BITS2OCTETS(192),
- .import_ctx = gss_krb5_import_ctx_v2,
.derive_key = krb5_kdf_hmac_sha2,
.encrypt = krb5_etm_encrypt,
.decrypt = krb5_etm_decrypt,
@@ -284,12 +224,6 @@ static void gss_krb5_prepare_enctype_priority_list(void)
ENCTYPE_AES256_CTS_HMAC_SHA1_96,
ENCTYPE_AES128_CTS_HMAC_SHA1_96,
#endif
-#if defined(CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_DES)
- ENCTYPE_DES3_CBC_SHA1,
- ENCTYPE_DES_CBC_MD5,
- ENCTYPE_DES_CBC_CRC,
- ENCTYPE_DES_CBC_MD4,
-#endif
};
size_t total, i;
char buf[16];
@@ -330,185 +264,6 @@ const struct gss_krb5_enctype *gss_krb5_lookup_enctype(u32 etype)
EXPORT_SYMBOL_IF_KUNIT(gss_krb5_lookup_enctype);
static struct crypto_sync_skcipher *
-gss_krb5_alloc_cipher_v1(struct krb5_ctx *ctx, struct xdr_netobj *key)
-{
- struct crypto_sync_skcipher *tfm;
-
- tfm = crypto_alloc_sync_skcipher(ctx->gk5e->encrypt_name, 0, 0);
- if (IS_ERR(tfm))
- return NULL;
- if (crypto_sync_skcipher_setkey(tfm, key->data, key->len)) {
- crypto_free_sync_skcipher(tfm);
- return NULL;
- }
- return tfm;
-}
-
-static inline const void *
-get_key(const void *p, const void *end,
- struct krb5_ctx *ctx, struct crypto_sync_skcipher **res)
-{
- struct crypto_sync_skcipher *tfm;
- struct xdr_netobj key;
- int alg;
-
- p = simple_get_bytes(p, end, &alg, sizeof(alg));
- if (IS_ERR(p))
- goto out_err;
- switch (alg) {
- case ENCTYPE_DES_CBC_CRC:
- case ENCTYPE_DES_CBC_MD4:
- case ENCTYPE_DES_CBC_MD5:
- /* Map all these key types to ENCTYPE_DES_CBC_RAW */
- alg = ENCTYPE_DES_CBC_RAW;
- break;
- }
- if (!gss_krb5_lookup_enctype(alg)) {
- pr_warn("gss_krb5: unsupported enctype: %d\n", alg);
- goto out_err_inval;
- }
-
- p = simple_get_netobj(p, end, &key);
- if (IS_ERR(p))
- goto out_err;
- tfm = gss_krb5_alloc_cipher_v1(ctx, &key);
- kfree(key.data);
- if (!tfm) {
- pr_warn("gss_krb5: failed to initialize cipher '%s'\n",
- ctx->gk5e->encrypt_name);
- goto out_err_inval;
- }
- *res = tfm;
-
- return p;
-
-out_err_inval:
- p = ERR_PTR(-EINVAL);
-out_err:
- return p;
-}
-
-static int
-gss_import_v1_context(const void *p, const void *end, struct krb5_ctx *ctx)
-{
- u32 seq_send;
- int tmp;
- u32 time32;
-
- p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate));
- if (IS_ERR(p))
- goto out_err;
-
- /* Old format supports only DES! Any other enctype uses new format */
- ctx->enctype = ENCTYPE_DES_CBC_RAW;
-
- ctx->gk5e = gss_krb5_lookup_enctype(ctx->enctype);
- if (ctx->gk5e == NULL) {
- p = ERR_PTR(-EINVAL);
- goto out_err;
- }
-
- /* The downcall format was designed before we completely understood
- * the uses of the context fields; so it includes some stuff we
- * just give some minimal sanity-checking, and some we ignore
- * completely (like the next twenty bytes): */
- if (unlikely(p + 20 > end || p + 20 < p)) {
- p = ERR_PTR(-EFAULT);
- goto out_err;
- }
- p += 20;
- p = simple_get_bytes(p, end, &tmp, sizeof(tmp));
- if (IS_ERR(p))
- goto out_err;
- if (tmp != SGN_ALG_DES_MAC_MD5) {
- p = ERR_PTR(-ENOSYS);
- goto out_err;
- }
- p = simple_get_bytes(p, end, &tmp, sizeof(tmp));
- if (IS_ERR(p))
- goto out_err;
- if (tmp != SEAL_ALG_DES) {
- p = ERR_PTR(-ENOSYS);
- goto out_err;
- }
- p = simple_get_bytes(p, end, &time32, sizeof(time32));
- if (IS_ERR(p))
- goto out_err;
- /* unsigned 32-bit time overflows in year 2106 */
- ctx->endtime = (time64_t)time32;
- p = simple_get_bytes(p, end, &seq_send, sizeof(seq_send));
- if (IS_ERR(p))
- goto out_err;
- atomic_set(&ctx->seq_send, seq_send);
- p = simple_get_netobj(p, end, &ctx->mech_used);
- if (IS_ERR(p))
- goto out_err;
- p = get_key(p, end, ctx, &ctx->enc);
- if (IS_ERR(p))
- goto out_err_free_mech;
- p = get_key(p, end, ctx, &ctx->seq);
- if (IS_ERR(p))
- goto out_err_free_key1;
- if (p != end) {
- p = ERR_PTR(-EFAULT);
- goto out_err_free_key2;
- }
-
- return 0;
-
-out_err_free_key2:
- crypto_free_sync_skcipher(ctx->seq);
-out_err_free_key1:
- crypto_free_sync_skcipher(ctx->enc);
-out_err_free_mech:
- kfree(ctx->mech_used.data);
-out_err:
- return PTR_ERR(p);
-}
-
-#if defined(CONFIG_RPCSEC_GSS_KRB5_SIMPLIFIED)
-static int
-gss_krb5_import_ctx_des(struct krb5_ctx *ctx, gfp_t gfp_mask)
-{
- return -EINVAL;
-}
-
-static int
-gss_krb5_import_ctx_v1(struct krb5_ctx *ctx, gfp_t gfp_mask)
-{
- struct xdr_netobj keyin, keyout;
-
- keyin.data = ctx->Ksess;
- keyin.len = ctx->gk5e->keylength;
-
- ctx->seq = gss_krb5_alloc_cipher_v1(ctx, &keyin);
- if (ctx->seq == NULL)
- goto out_err;
- ctx->enc = gss_krb5_alloc_cipher_v1(ctx, &keyin);
- if (ctx->enc == NULL)
- goto out_free_seq;
-
- /* derive cksum */
- keyout.data = ctx->cksum;
- keyout.len = ctx->gk5e->keylength;
- if (krb5_derive_key(ctx, &keyin, &keyout, KG_USAGE_SIGN,
- KEY_USAGE_SEED_CHECKSUM, gfp_mask))
- goto out_free_enc;
-
- return 0;
-
-out_free_enc:
- crypto_free_sync_skcipher(ctx->enc);
-out_free_seq:
- crypto_free_sync_skcipher(ctx->seq);
-out_err:
- return -EINVAL;
-}
-#endif
-
-#if defined(CONFIG_RPCSEC_GSS_KRB5_CRYPTOSYSTEM)
-
-static struct crypto_sync_skcipher *
gss_krb5_alloc_cipher_v2(const char *cname, const struct xdr_netobj *key)
{
struct crypto_sync_skcipher *tfm;
@@ -636,8 +391,6 @@ out_free:
goto out;
}
-#endif
-
static int
gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx,
gfp_t gfp_mask)
@@ -671,9 +424,6 @@ gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx,
p = simple_get_bytes(p, end, &ctx->enctype, sizeof(ctx->enctype));
if (IS_ERR(p))
goto out_err;
- /* Map ENCTYPE_DES3_CBC_SHA1 to ENCTYPE_DES3_CBC_RAW */
- if (ctx->enctype == ENCTYPE_DES3_CBC_SHA1)
- ctx->enctype = ENCTYPE_DES3_CBC_RAW;
ctx->gk5e = gss_krb5_lookup_enctype(ctx->enctype);
if (ctx->gk5e == NULL) {
dprintk("gss_kerberos_mech: unsupported krb5 enctype %u\n",
@@ -700,7 +450,7 @@ gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx,
}
ctx->mech_used.len = gss_kerberos_mech.gm_oid.len;
- return ctx->gk5e->import_ctx(ctx, gfp_mask);
+ return gss_krb5_import_ctx_v2(ctx, gfp_mask);
out_err:
return PTR_ERR(p);
@@ -718,10 +468,7 @@ gss_krb5_import_sec_context(const void *p, size_t len, struct gss_ctx *ctx_id,
if (ctx == NULL)
return -ENOMEM;
- if (len == 85)
- ret = gss_import_v1_context(p, end, ctx);
- else
- ret = gss_import_v2_context(p, end, ctx, gfp_mask);
+ ret = gss_import_v2_context(p, end, ctx, gfp_mask);
memzero_explicit(&ctx->Ksess, sizeof(ctx->Ksess));
if (ret) {
kfree(ctx);
diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c
index 146aa755f07d..ce540df9bce4 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seal.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seal.c
@@ -71,75 +71,6 @@
# define RPCDBG_FACILITY RPCDBG_AUTH
#endif
-#if defined(CONFIG_RPCSEC_GSS_KRB5_SIMPLIFIED)
-
-static void *
-setup_token(struct krb5_ctx *ctx, struct xdr_netobj *token)
-{
- u16 *ptr;
- void *krb5_hdr;
- int body_size = GSS_KRB5_TOK_HDR_LEN + ctx->gk5e->cksumlength;
-
- token->len = g_token_size(&ctx->mech_used, body_size);
-
- ptr = (u16 *)token->data;
- g_make_token_header(&ctx->mech_used, body_size, (unsigned char **)&ptr);
-
- /* ptr now at start of header described in rfc 1964, section 1.2.1: */
- krb5_hdr = ptr;
- *ptr++ = KG_TOK_MIC_MSG;
- /*
- * signalg is stored as if it were converted from LE to host endian, even
- * though it's an opaque pair of bytes according to the RFC.
- */
- *ptr++ = (__force u16)cpu_to_le16(ctx->gk5e->signalg);
- *ptr++ = SEAL_ALG_NONE;
- *ptr = 0xffff;
-
- return krb5_hdr;
-}
-
-u32
-gss_krb5_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text,
- struct xdr_netobj *token)
-{
- char cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
- struct xdr_netobj md5cksum = {.len = sizeof(cksumdata),
- .data = cksumdata};
- void *ptr;
- time64_t now;
- u32 seq_send;
- u8 *cksumkey;
-
- dprintk("RPC: %s\n", __func__);
- BUG_ON(ctx == NULL);
-
- now = ktime_get_real_seconds();
-
- ptr = setup_token(ctx, token);
-
- if (ctx->gk5e->keyed_cksum)
- cksumkey = ctx->cksum;
- else
- cksumkey = NULL;
-
- if (make_checksum(ctx, ptr, 8, text, 0, cksumkey,
- KG_USAGE_SIGN, &md5cksum))
- return GSS_S_FAILURE;
-
- memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len);
-
- seq_send = atomic_fetch_inc(&ctx->seq_send);
-
- if (krb5_make_seq_num(ctx, ctx->seq, ctx->initiate ? 0 : 0xff,
- seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8))
- return GSS_S_FAILURE;
-
- return (ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
-}
-
-#endif
-
static void *
setup_token_v2(struct krb5_ctx *ctx, struct xdr_netobj *token)
{
diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
deleted file mode 100644
index 1babc3474e10..000000000000
--- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * linux/net/sunrpc/gss_krb5_seqnum.c
- *
- * Adapted from MIT Kerberos 5-1.2.1 lib/gssapi/krb5/util_seqnum.c
- *
- * Copyright (c) 2000 The Regents of the University of Michigan.
- * All rights reserved.
- *
- * Andy Adamson <andros@umich.edu>
- */
-
-/*
- * Copyright 1993 by OpenVision Technologies, Inc.
- *
- * Permission to use, copy, modify, distribute, and sell this software
- * and its documentation for any purpose is hereby granted without fee,
- * provided that the above copyright notice appears in all copies and
- * that both that copyright notice and this permission notice appear in
- * supporting documentation, and that the name of OpenVision not be used
- * in advertising or publicity pertaining to distribution of the software
- * without specific, written prior permission. OpenVision makes no
- * representations about the suitability of this software for any
- * purpose. It is provided "as is" without express or implied warranty.
- *
- * OPENVISION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
- * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
- * EVENT SHALL OPENVISION BE LIABLE FOR ANY SPECIAL, INDIRECT OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
- * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
- * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <crypto/skcipher.h>
-#include <linux/types.h>
-#include <linux/sunrpc/gss_krb5.h>
-
-#include "gss_krb5_internal.h"
-
-#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
-# define RPCDBG_FACILITY RPCDBG_AUTH
-#endif
-
-s32
-krb5_make_seq_num(struct krb5_ctx *kctx,
- struct crypto_sync_skcipher *key,
- int direction,
- u32 seqnum,
- unsigned char *cksum, unsigned char *buf)
-{
- unsigned char *plain;
- s32 code;
-
- plain = kmalloc(8, GFP_KERNEL);
- if (!plain)
- return -ENOMEM;
-
- plain[0] = (unsigned char) (seqnum & 0xff);
- plain[1] = (unsigned char) ((seqnum >> 8) & 0xff);
- plain[2] = (unsigned char) ((seqnum >> 16) & 0xff);
- plain[3] = (unsigned char) ((seqnum >> 24) & 0xff);
-
- plain[4] = direction;
- plain[5] = direction;
- plain[6] = direction;
- plain[7] = direction;
-
- code = krb5_encrypt(key, cksum, plain, buf, 8);
- kfree(plain);
- return code;
-}
-
-s32
-krb5_get_seq_num(struct krb5_ctx *kctx,
- unsigned char *cksum,
- unsigned char *buf,
- int *direction, u32 *seqnum)
-{
- s32 code;
- unsigned char *plain;
- struct crypto_sync_skcipher *key = kctx->seq;
-
- dprintk("RPC: krb5_get_seq_num:\n");
-
- plain = kmalloc(8, GFP_KERNEL);
- if (!plain)
- return -ENOMEM;
-
- if ((code = krb5_decrypt(key, cksum, buf, plain, 8)))
- goto out;
-
- if ((plain[4] != plain[5]) || (plain[4] != plain[6]) ||
- (plain[4] != plain[7])) {
- code = (s32)KG_BAD_SEQ;
- goto out;
- }
-
- *direction = plain[4];
-
- *seqnum = ((plain[0]) |
- (plain[1] << 8) | (plain[2] << 16) | (plain[3] << 24));
-
-out:
- kfree(plain);
- return code;
-}
diff --git a/net/sunrpc/auth_gss/gss_krb5_test.c b/net/sunrpc/auth_gss/gss_krb5_test.c
index 95ca783795c5..85625e3f3814 100644
--- a/net/sunrpc/auth_gss/gss_krb5_test.c
+++ b/net/sunrpc/auth_gss/gss_krb5_test.c
@@ -320,208 +320,12 @@ static void rfc3961_nfold_case(struct kunit *test)
"result mismatch");
}
-/*
- * RFC 3961 Appendix A.3. DES3 DR and DK
- *
- * These tests show the derived-random and derived-key values for the
- * des3-hmac-sha1-kd encryption scheme, using the DR and DK functions
- * defined in section 6.3.1. The input keys were randomly generated;
- * the usage values are from this specification.
- *
- * This test material is copyright (C) The Internet Society (2005).
- */
-
-DEFINE_HEX_XDR_NETOBJ(des3_dk_usage_155,
- 0x00, 0x00, 0x00, 0x01, 0x55
-);
-
-DEFINE_HEX_XDR_NETOBJ(des3_dk_usage_1aa,
- 0x00, 0x00, 0x00, 0x01, 0xaa
-);
-
-DEFINE_HEX_XDR_NETOBJ(des3_dk_usage_kerberos,
- 0x6b, 0x65, 0x72, 0x62, 0x65, 0x72, 0x6f, 0x73
-);
-
-DEFINE_HEX_XDR_NETOBJ(des3_dk_test1_base_key,
- 0xdc, 0xe0, 0x6b, 0x1f, 0x64, 0xc8, 0x57, 0xa1,
- 0x1c, 0x3d, 0xb5, 0x7c, 0x51, 0x89, 0x9b, 0x2c,
- 0xc1, 0x79, 0x10, 0x08, 0xce, 0x97, 0x3b, 0x92
-);
-DEFINE_HEX_XDR_NETOBJ(des3_dk_test1_derived_key,
- 0x92, 0x51, 0x79, 0xd0, 0x45, 0x91, 0xa7, 0x9b,
- 0x5d, 0x31, 0x92, 0xc4, 0xa7, 0xe9, 0xc2, 0x89,
- 0xb0, 0x49, 0xc7, 0x1f, 0x6e, 0xe6, 0x04, 0xcd
-);
-
-DEFINE_HEX_XDR_NETOBJ(des3_dk_test2_base_key,
- 0x5e, 0x13, 0xd3, 0x1c, 0x70, 0xef, 0x76, 0x57,
- 0x46, 0x57, 0x85, 0x31, 0xcb, 0x51, 0xc1, 0x5b,
- 0xf1, 0x1c, 0xa8, 0x2c, 0x97, 0xce, 0xe9, 0xf2
-);
-DEFINE_HEX_XDR_NETOBJ(des3_dk_test2_derived_key,
- 0x9e, 0x58, 0xe5, 0xa1, 0x46, 0xd9, 0x94, 0x2a,
- 0x10, 0x1c, 0x46, 0x98, 0x45, 0xd6, 0x7a, 0x20,
- 0xe3, 0xc4, 0x25, 0x9e, 0xd9, 0x13, 0xf2, 0x07
-);
-
-DEFINE_HEX_XDR_NETOBJ(des3_dk_test3_base_key,
- 0x98, 0xe6, 0xfd, 0x8a, 0x04, 0xa4, 0xb6, 0x85,
- 0x9b, 0x75, 0xa1, 0x76, 0x54, 0x0b, 0x97, 0x52,
- 0xba, 0xd3, 0xec, 0xd6, 0x10, 0xa2, 0x52, 0xbc
-);
-DEFINE_HEX_XDR_NETOBJ(des3_dk_test3_derived_key,
- 0x13, 0xfe, 0xf8, 0x0d, 0x76, 0x3e, 0x94, 0xec,
- 0x6d, 0x13, 0xfd, 0x2c, 0xa1, 0xd0, 0x85, 0x07,
- 0x02, 0x49, 0xda, 0xd3, 0x98, 0x08, 0xea, 0xbf
-);
-
-DEFINE_HEX_XDR_NETOBJ(des3_dk_test4_base_key,
- 0x62, 0x2a, 0xec, 0x25, 0xa2, 0xfe, 0x2c, 0xad,
- 0x70, 0x94, 0x68, 0x0b, 0x7c, 0x64, 0x94, 0x02,
- 0x80, 0x08, 0x4c, 0x1a, 0x7c, 0xec, 0x92, 0xb5
-);
-DEFINE_HEX_XDR_NETOBJ(des3_dk_test4_derived_key,
- 0xf8, 0xdf, 0xbf, 0x04, 0xb0, 0x97, 0xe6, 0xd9,
- 0xdc, 0x07, 0x02, 0x68, 0x6b, 0xcb, 0x34, 0x89,
- 0xd9, 0x1f, 0xd9, 0xa4, 0x51, 0x6b, 0x70, 0x3e
-);
-
-DEFINE_HEX_XDR_NETOBJ(des3_dk_test5_base_key,
- 0xd3, 0xf8, 0x29, 0x8c, 0xcb, 0x16, 0x64, 0x38,
- 0xdc, 0xb9, 0xb9, 0x3e, 0xe5, 0xa7, 0x62, 0x92,
- 0x86, 0xa4, 0x91, 0xf8, 0x38, 0xf8, 0x02, 0xfb
-);
-DEFINE_HEX_XDR_NETOBJ(des3_dk_test5_derived_key,
- 0x23, 0x70, 0xda, 0x57, 0x5d, 0x2a, 0x3d, 0xa8,
- 0x64, 0xce, 0xbf, 0xdc, 0x52, 0x04, 0xd5, 0x6d,
- 0xf7, 0x79, 0xa7, 0xdf, 0x43, 0xd9, 0xda, 0x43
-);
-
-DEFINE_HEX_XDR_NETOBJ(des3_dk_test6_base_key,
- 0xc1, 0x08, 0x16, 0x49, 0xad, 0xa7, 0x43, 0x62,
- 0xe6, 0xa1, 0x45, 0x9d, 0x01, 0xdf, 0xd3, 0x0d,
- 0x67, 0xc2, 0x23, 0x4c, 0x94, 0x07, 0x04, 0xda
-);
-DEFINE_HEX_XDR_NETOBJ(des3_dk_test6_derived_key,
- 0x34, 0x80, 0x57, 0xec, 0x98, 0xfd, 0xc4, 0x80,
- 0x16, 0x16, 0x1c, 0x2a, 0x4c, 0x7a, 0x94, 0x3e,
- 0x92, 0xae, 0x49, 0x2c, 0x98, 0x91, 0x75, 0xf7
-);
-
-DEFINE_HEX_XDR_NETOBJ(des3_dk_test7_base_key,
- 0x5d, 0x15, 0x4a, 0xf2, 0x38, 0xf4, 0x67, 0x13,
- 0x15, 0x57, 0x19, 0xd5, 0x5e, 0x2f, 0x1f, 0x79,
- 0x0d, 0xd6, 0x61, 0xf2, 0x79, 0xa7, 0x91, 0x7c
-);
-DEFINE_HEX_XDR_NETOBJ(des3_dk_test7_derived_key,
- 0xa8, 0x80, 0x8a, 0xc2, 0x67, 0xda, 0xda, 0x3d,
- 0xcb, 0xe9, 0xa7, 0xc8, 0x46, 0x26, 0xfb, 0xc7,
- 0x61, 0xc2, 0x94, 0xb0, 0x13, 0x15, 0xe5, 0xc1
-);
-
-DEFINE_HEX_XDR_NETOBJ(des3_dk_test8_base_key,
- 0x79, 0x85, 0x62, 0xe0, 0x49, 0x85, 0x2f, 0x57,
- 0xdc, 0x8c, 0x34, 0x3b, 0xa1, 0x7f, 0x2c, 0xa1,
- 0xd9, 0x73, 0x94, 0xef, 0xc8, 0xad, 0xc4, 0x43
-);
-DEFINE_HEX_XDR_NETOBJ(des3_dk_test8_derived_key,
- 0xc8, 0x13, 0xf8, 0x8a, 0x3b, 0xe3, 0xb3, 0x34,
- 0xf7, 0x54, 0x25, 0xce, 0x91, 0x75, 0xfb, 0xe3,
- 0xc8, 0x49, 0x3b, 0x89, 0xc8, 0x70, 0x3b, 0x49
-);
-
-DEFINE_HEX_XDR_NETOBJ(des3_dk_test9_base_key,
- 0x26, 0xdc, 0xe3, 0x34, 0xb5, 0x45, 0x29, 0x2f,
- 0x2f, 0xea, 0xb9, 0xa8, 0x70, 0x1a, 0x89, 0xa4,
- 0xb9, 0x9e, 0xb9, 0x94, 0x2c, 0xec, 0xd0, 0x16
-);
-DEFINE_HEX_XDR_NETOBJ(des3_dk_test9_derived_key,
- 0xf4, 0x8f, 0xfd, 0x6e, 0x83, 0xf8, 0x3e, 0x73,
- 0x54, 0xe6, 0x94, 0xfd, 0x25, 0x2c, 0xf8, 0x3b,
- 0xfe, 0x58, 0xf7, 0xd5, 0xba, 0x37, 0xec, 0x5d
-);
-
-static const struct gss_krb5_test_param rfc3961_kdf_test_params[] = {
- {
- .desc = "des3-hmac-sha1 key derivation case 1",
- .enctype = ENCTYPE_DES3_CBC_RAW,
- .base_key = &des3_dk_test1_base_key,
- .usage = &des3_dk_usage_155,
- .expected_result = &des3_dk_test1_derived_key,
- },
- {
- .desc = "des3-hmac-sha1 key derivation case 2",
- .enctype = ENCTYPE_DES3_CBC_RAW,
- .base_key = &des3_dk_test2_base_key,
- .usage = &des3_dk_usage_1aa,
- .expected_result = &des3_dk_test2_derived_key,
- },
- {
- .desc = "des3-hmac-sha1 key derivation case 3",
- .enctype = ENCTYPE_DES3_CBC_RAW,
- .base_key = &des3_dk_test3_base_key,
- .usage = &des3_dk_usage_155,
- .expected_result = &des3_dk_test3_derived_key,
- },
- {
- .desc = "des3-hmac-sha1 key derivation case 4",
- .enctype = ENCTYPE_DES3_CBC_RAW,
- .base_key = &des3_dk_test4_base_key,
- .usage = &des3_dk_usage_1aa,
- .expected_result = &des3_dk_test4_derived_key,
- },
- {
- .desc = "des3-hmac-sha1 key derivation case 5",
- .enctype = ENCTYPE_DES3_CBC_RAW,
- .base_key = &des3_dk_test5_base_key,
- .usage = &des3_dk_usage_kerberos,
- .expected_result = &des3_dk_test5_derived_key,
- },
- {
- .desc = "des3-hmac-sha1 key derivation case 6",
- .enctype = ENCTYPE_DES3_CBC_RAW,
- .base_key = &des3_dk_test6_base_key,
- .usage = &des3_dk_usage_155,
- .expected_result = &des3_dk_test6_derived_key,
- },
- {
- .desc = "des3-hmac-sha1 key derivation case 7",
- .enctype = ENCTYPE_DES3_CBC_RAW,
- .base_key = &des3_dk_test7_base_key,
- .usage = &des3_dk_usage_1aa,
- .expected_result = &des3_dk_test7_derived_key,
- },
- {
- .desc = "des3-hmac-sha1 key derivation case 8",
- .enctype = ENCTYPE_DES3_CBC_RAW,
- .base_key = &des3_dk_test8_base_key,
- .usage = &des3_dk_usage_155,
- .expected_result = &des3_dk_test8_derived_key,
- },
- {
- .desc = "des3-hmac-sha1 key derivation case 9",
- .enctype = ENCTYPE_DES3_CBC_RAW,
- .base_key = &des3_dk_test9_base_key,
- .usage = &des3_dk_usage_1aa,
- .expected_result = &des3_dk_test9_derived_key,
- },
-};
-
-/* Creates the function rfc3961_kdf_gen_params */
-KUNIT_ARRAY_PARAM(rfc3961_kdf, rfc3961_kdf_test_params, gss_krb5_get_desc);
-
static struct kunit_case rfc3961_test_cases[] = {
{
.name = "RFC 3961 n-fold",
.run_case = rfc3961_nfold_case,
.generate_params = rfc3961_nfold_gen_params,
},
- {
- .name = "RFC 3961 key derivation",
- .run_case = kdf_case,
- .generate_params = rfc3961_kdf_gen_params,
- },
{}
};
diff --git a/net/sunrpc/auth_gss/gss_krb5_unseal.c b/net/sunrpc/auth_gss/gss_krb5_unseal.c
index 7d6d4ae4a3c9..4fbc50a0a2c4 100644
--- a/net/sunrpc/auth_gss/gss_krb5_unseal.c
+++ b/net/sunrpc/auth_gss/gss_krb5_unseal.c
@@ -69,83 +69,6 @@
# define RPCDBG_FACILITY RPCDBG_AUTH
#endif
-
-#if defined(CONFIG_RPCSEC_GSS_KRB5_SIMPLIFIED)
-/* read_token is a mic token, and message_buffer is the data that the mic was
- * supposedly taken over. */
-u32
-gss_krb5_verify_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *message_buffer,
- struct xdr_netobj *read_token)
-{
- int signalg;
- int sealalg;
- char cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
- struct xdr_netobj md5cksum = {.len = sizeof(cksumdata),
- .data = cksumdata};
- s32 now;
- int direction;
- u32 seqnum;
- unsigned char *ptr = (unsigned char *)read_token->data;
- int bodysize;
- u8 *cksumkey;
-
- dprintk("RPC: krb5_read_token\n");
-
- if (g_verify_token_header(&ctx->mech_used, &bodysize, &ptr,
- read_token->len))
- return GSS_S_DEFECTIVE_TOKEN;
-
- if ((ptr[0] != ((KG_TOK_MIC_MSG >> 8) & 0xff)) ||
- (ptr[1] != (KG_TOK_MIC_MSG & 0xff)))
- return GSS_S_DEFECTIVE_TOKEN;
-
- /* XXX sanity-check bodysize?? */
-
- signalg = ptr[2] + (ptr[3] << 8);
- if (signalg != ctx->gk5e->signalg)
- return GSS_S_DEFECTIVE_TOKEN;
-
- sealalg = ptr[4] + (ptr[5] << 8);
- if (sealalg != SEAL_ALG_NONE)
- return GSS_S_DEFECTIVE_TOKEN;
-
- if ((ptr[6] != 0xff) || (ptr[7] != 0xff))
- return GSS_S_DEFECTIVE_TOKEN;
-
- if (ctx->gk5e->keyed_cksum)
- cksumkey = ctx->cksum;
- else
- cksumkey = NULL;
-
- if (make_checksum(ctx, ptr, 8, message_buffer, 0,
- cksumkey, KG_USAGE_SIGN, &md5cksum))
- return GSS_S_FAILURE;
-
- if (memcmp(md5cksum.data, ptr + GSS_KRB5_TOK_HDR_LEN,
- ctx->gk5e->cksumlength))
- return GSS_S_BAD_SIG;
-
- /* it got through unscathed. Make sure the context is unexpired */
-
- now = ktime_get_real_seconds();
-
- if (now > ctx->endtime)
- return GSS_S_CONTEXT_EXPIRED;
-
- /* do sequencing checks */
-
- if (krb5_get_seq_num(ctx, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8,
- &direction, &seqnum))
- return GSS_S_FAILURE;
-
- if ((ctx->initiate && direction != 0xff) ||
- (!ctx->initiate && direction != 0))
- return GSS_S_BAD_SIG;
-
- return GSS_S_COMPLETE;
-}
-#endif
-
u32
gss_krb5_verify_mic_v2(struct krb5_ctx *ctx, struct xdr_buf *message_buffer,
struct xdr_netobj *read_token)
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
index 6d6b082380b2..b3e1738ff6bf 100644
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -40,293 +40,6 @@
# define RPCDBG_FACILITY RPCDBG_AUTH
#endif
-#if defined(CONFIG_RPCSEC_GSS_KRB5_SIMPLIFIED)
-
-static inline int
-gss_krb5_padding(int blocksize, int length)
-{
- return blocksize - (length % blocksize);
-}
-
-static inline void
-gss_krb5_add_padding(struct xdr_buf *buf, int offset, int blocksize)
-{
- int padding = gss_krb5_padding(blocksize, buf->len - offset);
- char *p;
- struct kvec *iov;
-
- if (buf->page_len || buf->tail[0].iov_len)
- iov = &buf->tail[0];
- else
- iov = &buf->head[0];
- p = iov->iov_base + iov->iov_len;
- iov->iov_len += padding;
- buf->len += padding;
- memset(p, padding, padding);
-}
-
-static inline int
-gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize)
-{
- u8 *ptr;
- u8 pad;
- size_t len = buf->len;
-
- if (len <= buf->head[0].iov_len) {
- pad = *(u8 *)(buf->head[0].iov_base + len - 1);
- if (pad > buf->head[0].iov_len)
- return -EINVAL;
- buf->head[0].iov_len -= pad;
- goto out;
- } else
- len -= buf->head[0].iov_len;
- if (len <= buf->page_len) {
- unsigned int last = (buf->page_base + len - 1)
- >>PAGE_SHIFT;
- unsigned int offset = (buf->page_base + len - 1)
- & (PAGE_SIZE - 1);
- ptr = kmap_atomic(buf->pages[last]);
- pad = *(ptr + offset);
- kunmap_atomic(ptr);
- goto out;
- } else
- len -= buf->page_len;
- BUG_ON(len > buf->tail[0].iov_len);
- pad = *(u8 *)(buf->tail[0].iov_base + len - 1);
-out:
- /* XXX: NOTE: we do not adjust the page lengths--they represent
- * a range of data in the real filesystem page cache, and we need
- * to know that range so the xdr code can properly place read data.
- * However adjusting the head length, as we do above, is harmless.
- * In the case of a request that fits into a single page, the server
- * also uses length and head length together to determine the original
- * start of the request to copy the request for deferal; so it's
- * easier on the server if we adjust head and tail length in tandem.
- * It's not really a problem that we don't fool with the page and
- * tail lengths, though--at worst badly formed xdr might lead the
- * server to attempt to parse the padding.
- * XXX: Document all these weird requirements for gss mechanism
- * wrap/unwrap functions. */
- if (pad > blocksize)
- return -EINVAL;
- if (buf->len > pad)
- buf->len -= pad;
- else
- return -EINVAL;
- return 0;
-}
-
-/* Assumptions: the head and tail of inbuf are ours to play with.
- * The pages, however, may be real pages in the page cache and we replace
- * them with scratch pages from **pages before writing to them. */
-/* XXX: obviously the above should be documentation of wrap interface,
- * and shouldn't be in this kerberos-specific file. */
-
-/* XXX factor out common code with seal/unseal. */
-
-u32
-gss_krb5_wrap_v1(struct krb5_ctx *kctx, int offset,
- struct xdr_buf *buf, struct page **pages)
-{
- char cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
- struct xdr_netobj md5cksum = {.len = sizeof(cksumdata),
- .data = cksumdata};
- int blocksize = 0, plainlen;
- unsigned char *ptr, *msg_start;
- time64_t now;
- int headlen;
- struct page **tmp_pages;
- u32 seq_send;
- u8 *cksumkey;
- u32 conflen = crypto_sync_skcipher_blocksize(kctx->enc);
-
- dprintk("RPC: %s\n", __func__);
-
- now = ktime_get_real_seconds();
-
- blocksize = crypto_sync_skcipher_blocksize(kctx->enc);
- gss_krb5_add_padding(buf, offset, blocksize);
- BUG_ON((buf->len - offset) % blocksize);
- plainlen = conflen + buf->len - offset;
-
- headlen = g_token_size(&kctx->mech_used,
- GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength + plainlen) -
- (buf->len - offset);
-
- ptr = buf->head[0].iov_base + offset;
- /* shift data to make room for header. */
- xdr_extend_head(buf, offset, headlen);
-
- /* XXX Would be cleverer to encrypt while copying. */
- BUG_ON((buf->len - offset - headlen) % blocksize);
-
- g_make_token_header(&kctx->mech_used,
- GSS_KRB5_TOK_HDR_LEN +
- kctx->gk5e->cksumlength + plainlen, &ptr);
-
-
- /* ptr now at header described in rfc 1964, section 1.2.1: */
- ptr[0] = (unsigned char) ((KG_TOK_WRAP_MSG >> 8) & 0xff);
- ptr[1] = (unsigned char) (KG_TOK_WRAP_MSG & 0xff);
-
- msg_start = ptr + GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength;
-
- /*
- * signalg and sealalg are stored as if they were converted from LE
- * to host endian, even though they're opaque pairs of bytes according
- * to the RFC.
- */
- *(__le16 *)(ptr + 2) = cpu_to_le16(kctx->gk5e->signalg);
- *(__le16 *)(ptr + 4) = cpu_to_le16(kctx->gk5e->sealalg);
- ptr[6] = 0xff;
- ptr[7] = 0xff;
-
- krb5_make_confounder(msg_start, conflen);
-
- if (kctx->gk5e->keyed_cksum)
- cksumkey = kctx->cksum;
- else
- cksumkey = NULL;
-
- /* XXXJBF: UGH!: */
- tmp_pages = buf->pages;
- buf->pages = pages;
- if (make_checksum(kctx, ptr, 8, buf, offset + headlen - conflen,
- cksumkey, KG_USAGE_SEAL, &md5cksum))
- return GSS_S_FAILURE;
- buf->pages = tmp_pages;
-
- memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len);
-
- seq_send = atomic_fetch_inc(&kctx->seq_send);
-
- /* XXX would probably be more efficient to compute checksum
- * and encrypt at the same time: */
- if ((krb5_make_seq_num(kctx, kctx->seq, kctx->initiate ? 0 : 0xff,
- seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8)))
- return GSS_S_FAILURE;
-
- if (gss_encrypt_xdr_buf(kctx->enc, buf,
- offset + headlen - conflen, pages))
- return GSS_S_FAILURE;
-
- return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
-}
-
-u32
-gss_krb5_unwrap_v1(struct krb5_ctx *kctx, int offset, int len,
- struct xdr_buf *buf, unsigned int *slack,
- unsigned int *align)
-{
- int signalg;
- int sealalg;
- char cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
- struct xdr_netobj md5cksum = {.len = sizeof(cksumdata),
- .data = cksumdata};
- time64_t now;
- int direction;
- s32 seqnum;
- unsigned char *ptr;
- int bodysize;
- void *data_start, *orig_start;
- int data_len;
- int blocksize;
- u32 conflen = crypto_sync_skcipher_blocksize(kctx->enc);
- int crypt_offset;
- u8 *cksumkey;
- unsigned int saved_len = buf->len;
-
- dprintk("RPC: gss_unwrap_kerberos\n");
-
- ptr = (u8 *)buf->head[0].iov_base + offset;
- if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr,
- len - offset))
- return GSS_S_DEFECTIVE_TOKEN;
-
- if ((ptr[0] != ((KG_TOK_WRAP_MSG >> 8) & 0xff)) ||
- (ptr[1] != (KG_TOK_WRAP_MSG & 0xff)))
- return GSS_S_DEFECTIVE_TOKEN;
-
- /* XXX sanity-check bodysize?? */
-
- /* get the sign and seal algorithms */
-
- signalg = ptr[2] + (ptr[3] << 8);
- if (signalg != kctx->gk5e->signalg)
- return GSS_S_DEFECTIVE_TOKEN;
-
- sealalg = ptr[4] + (ptr[5] << 8);
- if (sealalg != kctx->gk5e->sealalg)
- return GSS_S_DEFECTIVE_TOKEN;
-
- if ((ptr[6] != 0xff) || (ptr[7] != 0xff))
- return GSS_S_DEFECTIVE_TOKEN;
-
- /*
- * Data starts after token header and checksum. ptr points
- * to the beginning of the token header
- */
- crypt_offset = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) -
- (unsigned char *)buf->head[0].iov_base;
-
- buf->len = len;
- if (gss_decrypt_xdr_buf(kctx->enc, buf, crypt_offset))
- return GSS_S_DEFECTIVE_TOKEN;
-
- if (kctx->gk5e->keyed_cksum)
- cksumkey = kctx->cksum;
- else
- cksumkey = NULL;
-
- if (make_checksum(kctx, ptr, 8, buf, crypt_offset,
- cksumkey, KG_USAGE_SEAL, &md5cksum))
- return GSS_S_FAILURE;
-
- if (memcmp(md5cksum.data, ptr + GSS_KRB5_TOK_HDR_LEN,
- kctx->gk5e->cksumlength))
- return GSS_S_BAD_SIG;
-
- /* it got through unscathed. Make sure the context is unexpired */
-
- now = ktime_get_real_seconds();
-
- if (now > kctx->endtime)
- return GSS_S_CONTEXT_EXPIRED;
-
- /* do sequencing checks */
-
- if (krb5_get_seq_num(kctx, ptr + GSS_KRB5_TOK_HDR_LEN,
- ptr + 8, &direction, &seqnum))
- return GSS_S_BAD_SIG;
-
- if ((kctx->initiate && direction != 0xff) ||
- (!kctx->initiate && direction != 0))
- return GSS_S_BAD_SIG;
-
- /* Copy the data back to the right position. XXX: Would probably be
- * better to copy and encrypt at the same time. */
-
- blocksize = crypto_sync_skcipher_blocksize(kctx->enc);
- data_start = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) +
- conflen;
- orig_start = buf->head[0].iov_base + offset;
- data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start;
- memmove(orig_start, data_start, data_len);
- buf->head[0].iov_len -= (data_start - orig_start);
- buf->len = len - (data_start - orig_start);
-
- if (gss_krb5_remove_padding(buf, blocksize))
- return GSS_S_DEFECTIVE_TOKEN;
-
- /* slack must include room for krb5 padding */
- *slack = XDR_QUADLEN(saved_len - buf->len);
- /* The GSS blob always precedes the RPC message payload */
- *align = *slack;
- return GSS_S_COMPLETE;
-}
-
-#endif
-
/*
* We can shift data by up to LOCAL_BUF_LEN bytes in a pass. If we need
* to do more than that, we shift repeatedly. Kevin Coffman reports
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index c4a566737085..18734e70c5dd 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -986,7 +986,7 @@ bad_unwrap:
return -EINVAL;
}
-static int
+static enum svc_auth_status
svcauth_gss_set_client(struct svc_rqst *rqstp)
{
struct gss_svc_data *svcdata = rqstp->rq_auth_data;
@@ -1634,7 +1634,7 @@ svcauth_gss_decode_credbody(struct xdr_stream *xdr,
*
* The rqstp->rq_auth_stat field is also set (see RFCs 2203 and 5531).
*/
-static int
+static enum svc_auth_status
svcauth_gss_accept(struct svc_rqst *rqstp)
{
struct gss_svc_data *svcdata = rqstp->rq_auth_data;
@@ -1945,9 +1945,6 @@ bad_wrap:
* %0: the Reply is ready to be sent
* %-ENOMEM: failed to allocate memory
* %-EINVAL: encoding error
- *
- * XXX: These return values do not match the return values documented
- * for the auth_ops ->release method in linux/sunrpc/svcauth.h.
*/
static int
svcauth_gss_release(struct svc_rqst *rqstp)
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index d7c697af3762..8d75290f1a31 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -534,6 +534,8 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
.servername = args->servername,
.bc_xprt = args->bc_xprt,
.xprtsec = args->xprtsec,
+ .connect_timeout = args->connect_timeout,
+ .reconnect_timeout = args->reconnect_timeout,
};
char servername[48];
struct rpc_clnt *clnt;
@@ -2602,6 +2604,7 @@ out:
case 0:
task->tk_action = rpc_exit_task;
task->tk_status = rpcauth_unwrap_resp(task, &xdr);
+ xdr_finish_decode(&xdr);
return;
case -EAGAIN:
task->tk_status = 0;
@@ -3069,6 +3072,11 @@ int rpc_clnt_add_xprt(struct rpc_clnt *clnt,
}
xprt->resvport = resvport;
xprt->reuseport = reuseport;
+
+ if (xprtargs->connect_timeout)
+ connect_timeout = xprtargs->connect_timeout;
+ if (xprtargs->reconnect_timeout)
+ reconnect_timeout = xprtargs->reconnect_timeout;
if (xprt->ops->set_connect_timeout != NULL)
xprt->ops->set_connect_timeout(xprt,
connect_timeout,
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 587811a002c9..812fda9d45dd 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -513,9 +513,9 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
INIT_LIST_HEAD(&pool->sp_all_threads);
spin_lock_init(&pool->sp_lock);
+ percpu_counter_init(&pool->sp_messages_arrived, 0, GFP_KERNEL);
percpu_counter_init(&pool->sp_sockets_queued, 0, GFP_KERNEL);
percpu_counter_init(&pool->sp_threads_woken, 0, GFP_KERNEL);
- percpu_counter_init(&pool->sp_threads_timedout, 0, GFP_KERNEL);
}
return serv;
@@ -588,9 +588,9 @@ svc_destroy(struct kref *ref)
for (i = 0; i < serv->sv_nrpools; i++) {
struct svc_pool *pool = &serv->sv_pools[i];
+ percpu_counter_destroy(&pool->sp_messages_arrived);
percpu_counter_destroy(&pool->sp_sockets_queued);
percpu_counter_destroy(&pool->sp_threads_woken);
- percpu_counter_destroy(&pool->sp_threads_timedout);
}
kfree(serv->sv_pools);
kfree(serv);
@@ -689,23 +689,44 @@ svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
return rqstp;
}
-/*
- * Choose a pool in which to create a new thread, for svc_set_num_threads
+/**
+ * svc_pool_wake_idle_thread - Awaken an idle thread in @pool
+ * @pool: service thread pool
+ *
+ * Can be called from soft IRQ or process context. Finding an idle
+ * service thread and marking it BUSY is atomic with respect to
+ * other calls to svc_pool_wake_idle_thread().
+ *
*/
-static inline struct svc_pool *
-choose_pool(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
+void svc_pool_wake_idle_thread(struct svc_pool *pool)
{
- if (pool != NULL)
- return pool;
+ struct svc_rqst *rqstp;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) {
+ if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags))
+ continue;
+
+ WRITE_ONCE(rqstp->rq_qtime, ktime_get());
+ wake_up_process(rqstp->rq_task);
+ rcu_read_unlock();
+ percpu_counter_inc(&pool->sp_threads_woken);
+ trace_svc_wake_up(rqstp->rq_task->pid);
+ return;
+ }
+ rcu_read_unlock();
- return &serv->sv_pools[(*state)++ % serv->sv_nrpools];
+ set_bit(SP_CONGESTED, &pool->sp_flags);
}
-/*
- * Choose a thread to kill, for svc_set_num_threads
- */
-static inline struct task_struct *
-choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
+static struct svc_pool *
+svc_pool_next(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
+{
+ return pool ? pool : &serv->sv_pools[(*state)++ % serv->sv_nrpools];
+}
+
+static struct task_struct *
+svc_pool_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
{
unsigned int i;
struct task_struct *task = NULL;
@@ -713,7 +734,6 @@ choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
if (pool != NULL) {
spin_lock_bh(&pool->sp_lock);
} else {
- /* choose a pool in round-robin fashion */
for (i = 0; i < serv->sv_nrpools; i++) {
pool = &serv->sv_pools[--(*state) % serv->sv_nrpools];
spin_lock_bh(&pool->sp_lock);
@@ -728,21 +748,15 @@ found_pool:
if (!list_empty(&pool->sp_all_threads)) {
struct svc_rqst *rqstp;
- /*
- * Remove from the pool->sp_all_threads list
- * so we don't try to kill it again.
- */
rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all);
set_bit(RQ_VICTIM, &rqstp->rq_flags);
list_del_rcu(&rqstp->rq_all);
task = rqstp->rq_task;
}
spin_unlock_bh(&pool->sp_lock);
-
return task;
}
-/* create new threads */
static int
svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
{
@@ -754,13 +768,12 @@ svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
do {
nrservs--;
- chosen_pool = choose_pool(serv, pool, &state);
-
+ chosen_pool = svc_pool_next(serv, pool, &state);
node = svc_pool_map_get_node(chosen_pool->sp_id);
+
rqstp = svc_prepare_thread(serv, chosen_pool, node);
if (IS_ERR(rqstp))
return PTR_ERR(rqstp);
-
task = kthread_create_on_node(serv->sv_threadfn, rqstp,
node, "%s", serv->sv_name);
if (IS_ERR(task)) {
@@ -779,15 +792,6 @@ svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
return 0;
}
-/*
- * Create or destroy enough new threads to make the number
- * of threads the given number. If `pool' is non-NULL, applies
- * only to threads in that pool, otherwise round-robins between
- * all pools. Caller must ensure that mutual exclusion between this and
- * server startup or shutdown.
- */
-
-/* destroy old threads */
static int
svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
{
@@ -795,9 +799,8 @@ svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
struct task_struct *task;
unsigned int state = serv->sv_nrthreads-1;
- /* destroy old threads */
do {
- task = choose_victim(serv, pool, &state);
+ task = svc_pool_victim(serv, pool, &state);
if (task == NULL)
break;
rqstp = kthread_data(task);
@@ -809,6 +812,23 @@ svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
return 0;
}
+/**
+ * svc_set_num_threads - adjust number of threads per RPC service
+ * @serv: RPC service to adjust
+ * @pool: Specific pool from which to choose threads, or NULL
+ * @nrservs: New number of threads for @serv (0 or less means kill all threads)
+ *
+ * Create or destroy threads to make the number of threads for @serv the
+ * given number. If @pool is non-NULL, change only threads in that pool;
+ * otherwise, round-robin between all pools for @serv. @serv's
+ * sv_nrthreads is adjusted for each thread created or destroyed.
+ *
+ * Caller must ensure mutual exclusion between this and server startup or
+ * shutdown.
+ *
+ * Returns zero on success or a negative errno if an error occurred while
+ * starting a thread.
+ */
int
svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
{
@@ -1277,8 +1297,9 @@ svc_process_common(struct svc_rqst *rqstp)
const struct svc_procedure *procp = NULL;
struct svc_serv *serv = rqstp->rq_server;
struct svc_process_info process;
- int auth_res, rc;
+ enum svc_auth_status auth_res;
unsigned int aoffset;
+ int rc;
__be32 *p;
/* Will be turned off by GSS integrity and privacy services */
@@ -1333,6 +1354,9 @@ svc_process_common(struct svc_rqst *rqstp)
goto dropit;
case SVC_COMPLETE:
goto sendit;
+ default:
+ pr_warn_once("Unexpected svc_auth_status (%d)\n", auth_res);
+ goto err_system_err;
}
if (progp == NULL)
@@ -1370,6 +1394,8 @@ svc_process_common(struct svc_rqst *rqstp)
rc = process.dispatch(rqstp);
if (procp->pc_release)
procp->pc_release(rqstp);
+ xdr_finish_decode(xdr);
+
if (!rc)
goto dropit;
if (rqstp->rq_auth_stat != rpc_auth_ok)
@@ -1516,7 +1542,6 @@ out_baddir:
out_drop:
svc_drop(rqstp);
}
-EXPORT_SYMBOL_GPL(svc_process);
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
/*
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 62c7919ea610..4cfe9640df48 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -434,6 +434,7 @@ static bool svc_xprt_ready(struct svc_xprt *xprt)
smp_rmb();
xpt_flags = READ_ONCE(xprt->xpt_flags);
+ trace_svc_xprt_enqueue(xprt, xpt_flags);
if (xpt_flags & BIT(XPT_BUSY))
return false;
if (xpt_flags & (BIT(XPT_CONN) | BIT(XPT_CLOSE) | BIT(XPT_HANDSHAKE)))
@@ -456,7 +457,6 @@ static bool svc_xprt_ready(struct svc_xprt *xprt)
void svc_xprt_enqueue(struct svc_xprt *xprt)
{
struct svc_pool *pool;
- struct svc_rqst *rqstp = NULL;
if (!svc_xprt_ready(xprt))
return;
@@ -476,21 +476,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
spin_unlock_bh(&pool->sp_lock);
- /* find a thread for this xprt */
- rcu_read_lock();
- list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) {
- if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags))
- continue;
- percpu_counter_inc(&pool->sp_threads_woken);
- rqstp->rq_qtime = ktime_get();
- wake_up_process(rqstp->rq_task);
- goto out_unlock;
- }
- set_bit(SP_CONGESTED, &pool->sp_flags);
- rqstp = NULL;
-out_unlock:
- rcu_read_unlock();
- trace_svc_xprt_enqueue(xprt, rqstp);
+ svc_pool_wake_idle_thread(pool);
}
EXPORT_SYMBOL_GPL(svc_xprt_enqueue);
@@ -581,7 +567,10 @@ static void svc_xprt_release(struct svc_rqst *rqstp)
svc_xprt_put(xprt);
}
-/*
+/**
+ * svc_wake_up - Wake up a service thread for non-transport work
+ * @serv: RPC service
+ *
* Some svc_serv's will have occasional work to do, even when a xprt is not
* waiting to be serviced. This function is there to "kick" a task in one of
* those services so that it can wake up and do that work. Note that we only
@@ -590,27 +579,10 @@ static void svc_xprt_release(struct svc_rqst *rqstp)
*/
void svc_wake_up(struct svc_serv *serv)
{
- struct svc_rqst *rqstp;
- struct svc_pool *pool;
-
- pool = &serv->sv_pools[0];
+ struct svc_pool *pool = &serv->sv_pools[0];
- rcu_read_lock();
- list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) {
- /* skip any that aren't queued */
- if (test_bit(RQ_BUSY, &rqstp->rq_flags))
- continue;
- rcu_read_unlock();
- wake_up_process(rqstp->rq_task);
- trace_svc_wake_up(rqstp->rq_task->pid);
- return;
- }
- rcu_read_unlock();
-
- /* No free entries available */
set_bit(SP_TASK_PENDING, &pool->sp_flags);
- smp_wmb();
- trace_svc_wake_up(0);
+ svc_pool_wake_idle_thread(pool);
}
EXPORT_SYMBOL_GPL(svc_wake_up);
@@ -679,7 +651,7 @@ static void svc_check_conn_limits(struct svc_serv *serv)
}
}
-static int svc_alloc_arg(struct svc_rqst *rqstp)
+static bool svc_alloc_arg(struct svc_rqst *rqstp)
{
struct svc_serv *serv = rqstp->rq_server;
struct xdr_buf *arg = &rqstp->rq_arg;
@@ -701,10 +673,10 @@ static int svc_alloc_arg(struct svc_rqst *rqstp)
/* Made progress, don't sleep yet */
continue;
- set_current_state(TASK_INTERRUPTIBLE);
- if (signalled() || kthread_should_stop()) {
+ set_current_state(TASK_IDLE);
+ if (kthread_should_stop()) {
set_current_state(TASK_RUNNING);
- return -EINTR;
+ return false;
}
trace_svc_alloc_arg_err(pages, ret);
memalloc_retry_wait(GFP_KERNEL);
@@ -723,7 +695,7 @@ static int svc_alloc_arg(struct svc_rqst *rqstp)
arg->tail[0].iov_len = 0;
rqstp->rq_xid = xdr_zero;
- return 0;
+ return true;
}
static bool
@@ -732,7 +704,7 @@ rqst_should_sleep(struct svc_rqst *rqstp)
struct svc_pool *pool = rqstp->rq_pool;
/* did someone call svc_wake_up? */
- if (test_and_clear_bit(SP_TASK_PENDING, &pool->sp_flags))
+ if (test_bit(SP_TASK_PENDING, &pool->sp_flags))
return false;
/* was a socket queued? */
@@ -740,7 +712,7 @@ rqst_should_sleep(struct svc_rqst *rqstp)
return false;
/* are we shutting down? */
- if (signalled() || kthread_should_stop())
+ if (kthread_should_stop())
return false;
/* are we freezing? */
@@ -750,10 +722,9 @@ rqst_should_sleep(struct svc_rqst *rqstp)
return true;
}
-static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
+static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp)
{
struct svc_pool *pool = rqstp->rq_pool;
- long time_left = 0;
/* rq_xprt should be clear on entry */
WARN_ON_ONCE(rqstp->rq_xprt);
@@ -762,18 +733,14 @@ static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
if (rqstp->rq_xprt)
goto out_found;
- /*
- * We have to be able to interrupt this wait
- * to bring down the daemons ...
- */
- set_current_state(TASK_INTERRUPTIBLE);
+ set_current_state(TASK_IDLE);
smp_mb__before_atomic();
clear_bit(SP_CONGESTED, &pool->sp_flags);
clear_bit(RQ_BUSY, &rqstp->rq_flags);
smp_mb__after_atomic();
if (likely(rqst_should_sleep(rqstp)))
- time_left = schedule_timeout(timeout);
+ schedule();
else
__set_current_state(TASK_RUNNING);
@@ -781,17 +748,16 @@ static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
set_bit(RQ_BUSY, &rqstp->rq_flags);
smp_mb__after_atomic();
+ clear_bit(SP_TASK_PENDING, &pool->sp_flags);
rqstp->rq_xprt = svc_xprt_dequeue(pool);
if (rqstp->rq_xprt)
goto out_found;
- if (!time_left)
- percpu_counter_inc(&pool->sp_threads_timedout);
-
- if (signalled() || kthread_should_stop())
- return ERR_PTR(-EINTR);
- return ERR_PTR(-EAGAIN);
+ if (kthread_should_stop())
+ return NULL;
+ return NULL;
out_found:
+ clear_bit(SP_TASK_PENDING, &pool->sp_flags);
/* Normally we will wait up to 5 seconds for any required
* cache information to be provided.
*/
@@ -867,37 +833,35 @@ out:
return len;
}
-/*
- * Receive the next request on any transport. This code is carefully
- * organised not to touch any cachelines in the shared svc_serv
- * structure, only cachelines in the local svc_pool.
+/**
+ * svc_recv - Receive and process the next request on any transport
+ * @rqstp: an idle RPC service thread
+ *
+ * This code is carefully organised not to touch any cachelines in
+ * the shared svc_serv structure, only cachelines in the local
+ * svc_pool.
*/
-int svc_recv(struct svc_rqst *rqstp, long timeout)
+void svc_recv(struct svc_rqst *rqstp)
{
struct svc_xprt *xprt = NULL;
struct svc_serv *serv = rqstp->rq_server;
- int len, err;
+ int len;
- err = svc_alloc_arg(rqstp);
- if (err)
+ if (!svc_alloc_arg(rqstp))
goto out;
try_to_freeze();
cond_resched();
- err = -EINTR;
- if (signalled() || kthread_should_stop())
+ if (kthread_should_stop())
goto out;
- xprt = svc_get_next_xprt(rqstp, timeout);
- if (IS_ERR(xprt)) {
- err = PTR_ERR(xprt);
+ xprt = svc_get_next_xprt(rqstp);
+ if (!xprt)
goto out;
- }
len = svc_handle_xprt(rqstp, xprt);
/* No data, incomplete (TCP) read, or accept() */
- err = -EAGAIN;
if (len <= 0)
goto out_release;
@@ -909,13 +873,14 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
if (serv->sv_stats)
serv->sv_stats->netcnt++;
+ percpu_counter_inc(&rqstp->rq_pool->sp_messages_arrived);
rqstp->rq_stime = ktime_get();
- return len;
+ svc_process(rqstp);
+out:
+ return;
out_release:
rqstp->rq_res.len = 0;
svc_xprt_release(rqstp);
-out:
- return err;
}
EXPORT_SYMBOL_GPL(svc_recv);
@@ -1456,12 +1421,11 @@ static int svc_pool_stats_show(struct seq_file *m, void *p)
return 0;
}
- seq_printf(m, "%u %llu %llu %llu %llu\n",
- pool->sp_id,
- percpu_counter_sum_positive(&pool->sp_sockets_queued),
- percpu_counter_sum_positive(&pool->sp_sockets_queued),
- percpu_counter_sum_positive(&pool->sp_threads_woken),
- percpu_counter_sum_positive(&pool->sp_threads_timedout));
+ seq_printf(m, "%u %llu %llu %llu 0\n",
+ pool->sp_id,
+ percpu_counter_sum_positive(&pool->sp_messages_arrived),
+ percpu_counter_sum_positive(&pool->sp_sockets_queued),
+ percpu_counter_sum_positive(&pool->sp_threads_woken));
return 0;
}
diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c
index 67d8245a08af..aa4429d0b810 100644
--- a/net/sunrpc/svcauth.c
+++ b/net/sunrpc/svcauth.c
@@ -60,8 +60,19 @@ svc_put_auth_ops(struct auth_ops *aops)
module_put(aops->owner);
}
-int
-svc_authenticate(struct svc_rqst *rqstp)
+/**
+ * svc_authenticate - Initialize an outgoing credential
+ * @rqstp: RPC execution context
+ *
+ * Return values:
+ * %SVC_OK: XDR encoding of the result can begin
+ * %SVC_DENIED: Credential or verifier is not valid
+ * %SVC_GARBAGE: Failed to decode credential or verifier
+ * %SVC_COMPLETE: GSS context lifetime event; no further action
+ * %SVC_DROP: Drop this request; no further action
+ * %SVC_CLOSE: Like drop, but also close transport connection
+ */
+enum svc_auth_status svc_authenticate(struct svc_rqst *rqstp)
{
struct auth_ops *aops;
u32 flavor;
@@ -89,16 +100,28 @@ svc_authenticate(struct svc_rqst *rqstp)
}
EXPORT_SYMBOL_GPL(svc_authenticate);
-int svc_set_client(struct svc_rqst *rqstp)
+/**
+ * svc_set_client - Assign an appropriate 'auth_domain' as the client
+ * @rqstp: RPC execution context
+ *
+ * Return values:
+ * %SVC_OK: Client was found and assigned
+ * %SVC_DENY: Client was explicitly denied
+ * %SVC_DROP: Ignore this request
+ * %SVC_CLOSE: Ignore this request and close the connection
+ */
+enum svc_auth_status svc_set_client(struct svc_rqst *rqstp)
{
rqstp->rq_client = NULL;
return rqstp->rq_authop->set_client(rqstp);
}
EXPORT_SYMBOL_GPL(svc_set_client);
-/* A request, which was authenticated, has now executed.
- * Time to finalise the credentials and verifier
- * and release and resources
+/**
+ * svc_authorise - Finalize credentials/verifier and release resources
+ * @rqstp: RPC execution context
+ *
+ * Returns zero on success, or a negative errno.
*/
int svc_authorise(struct svc_rqst *rqstp)
{
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 174783f804fa..04b45588ae6f 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -665,7 +665,7 @@ static struct group_info *unix_gid_find(kuid_t uid, struct svc_rqst *rqstp)
}
}
-int
+enum svc_auth_status
svcauth_unix_set_client(struct svc_rqst *rqstp)
{
struct sockaddr_in *sin;
@@ -736,7 +736,6 @@ out:
rqstp->rq_auth_stat = rpc_auth_ok;
return SVC_OK;
}
-
EXPORT_SYMBOL_GPL(svcauth_unix_set_client);
/**
@@ -751,7 +750,7 @@ EXPORT_SYMBOL_GPL(svcauth_unix_set_client);
*
* rqstp->rq_auth_stat is set as mandated by RFC 5531.
*/
-static int
+static enum svc_auth_status
svcauth_null_accept(struct svc_rqst *rqstp)
{
struct xdr_stream *xdr = &rqstp->rq_arg_stream;
@@ -828,7 +827,7 @@ struct auth_ops svcauth_null = {
*
* rqstp->rq_auth_stat is set as mandated by RFC 5531.
*/
-static int
+static enum svc_auth_status
svcauth_tls_accept(struct svc_rqst *rqstp)
{
struct xdr_stream *xdr = &rqstp->rq_arg_stream;
@@ -913,7 +912,7 @@ struct auth_ops svcauth_tls = {
*
* rqstp->rq_auth_stat is set as mandated by RFC 5531.
*/
-static int
+static enum svc_auth_status
svcauth_unix_accept(struct svc_rqst *rqstp)
{
struct xdr_stream *xdr = &rqstp->rq_arg_stream;
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 8c9a8ee76aa0..998687421fa6 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -36,6 +36,8 @@
#include <linux/skbuff.h>
#include <linux/file.h>
#include <linux/freezer.h>
+#include <linux/bvec.h>
+
#include <net/sock.h>
#include <net/checksum.h>
#include <net/ip.h>
@@ -695,9 +697,10 @@ static int svc_udp_sendto(struct svc_rqst *rqstp)
.msg_name = &rqstp->rq_addr,
.msg_namelen = rqstp->rq_addrlen,
.msg_control = cmh,
+ .msg_flags = MSG_SPLICE_PAGES,
.msg_controllen = sizeof(buffer),
};
- unsigned int sent;
+ unsigned int count;
int err;
svc_udp_release_ctxt(xprt, rqstp->rq_xprt_ctxt);
@@ -710,22 +713,23 @@ static int svc_udp_sendto(struct svc_rqst *rqstp)
if (svc_xprt_is_dead(xprt))
goto out_notconn;
- err = xdr_alloc_bvec(xdr, GFP_KERNEL);
- if (err < 0)
- goto out_unlock;
+ count = xdr_buf_to_bvec(rqstp->rq_bvec,
+ ARRAY_SIZE(rqstp->rq_bvec), xdr);
- err = xprt_sock_sendmsg(svsk->sk_sock, &msg, xdr, 0, 0, &sent);
+ iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, rqstp->rq_bvec,
+ count, 0);
+ err = sock_sendmsg(svsk->sk_sock, &msg);
if (err == -ECONNREFUSED) {
/* ICMP error on earlier request. */
- err = xprt_sock_sendmsg(svsk->sk_sock, &msg, xdr, 0, 0, &sent);
+ iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, rqstp->rq_bvec,
+ count, 0);
+ err = sock_sendmsg(svsk->sk_sock, &msg);
}
- xdr_free_bvec(xdr);
+
trace_svcsock_udp_send(xprt, err);
-out_unlock:
+
mutex_unlock(&xprt->xpt_mutex);
- if (err < 0)
- return err;
- return sent;
+ return err;
out_notconn:
mutex_unlock(&xprt->xpt_mutex);
@@ -1089,6 +1093,9 @@ static void svc_tcp_fragment_received(struct svc_sock *svsk)
/* If we have more data, signal svc_xprt_enqueue() to try again */
svsk->sk_tcplen = 0;
svsk->sk_marker = xdr_zero;
+
+ smp_wmb();
+ tcp_set_rcvlowat(svsk->sk_sk, 1);
}
/**
@@ -1178,10 +1185,17 @@ err_incomplete:
goto err_delete;
if (len == want)
svc_tcp_fragment_received(svsk);
- else
+ else {
+ /* Avoid more ->sk_data_ready() calls until the rest
+ * of the message has arrived. This reduces service
+ * thread wake-ups on large incoming messages. */
+ tcp_set_rcvlowat(svsk->sk_sk,
+ svc_sock_reclen(svsk) - svsk->sk_tcplen);
+
trace_svcsock_tcp_recv_short(&svsk->sk_xprt,
svc_sock_reclen(svsk),
svsk->sk_tcplen - sizeof(rpc_fraghdr));
+ }
goto err_noclose;
error:
if (len != -EAGAIN)
@@ -1198,75 +1212,51 @@ err_noclose:
return 0; /* record not complete */
}
-static int svc_tcp_send_kvec(struct socket *sock, const struct kvec *vec,
- int flags)
-{
- struct msghdr msg = { .msg_flags = MSG_SPLICE_PAGES | flags, };
-
- iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, vec, 1, vec->iov_len);
- return sock_sendmsg(sock, &msg);
-}
-
/*
* MSG_SPLICE_PAGES is used exclusively to reduce the number of
* copy operations in this path. Therefore the caller must ensure
* that the pages backing @xdr are unchanging.
*
- * In addition, the logic assumes that * .bv_len is never larger
- * than PAGE_SIZE.
+ * Note that the send is non-blocking. The caller has incremented
+ * the reference count on each page backing the RPC message, and
+ * the network layer will "put" these pages when transmission is
+ * complete.
+ *
+ * This is safe for our RPC services because the memory backing
+ * the head and tail components is never kmalloc'd. These always
+ * come from pages in the svc_rqst::rq_pages array.
*/
-static int svc_tcp_sendmsg(struct socket *sock, struct xdr_buf *xdr,
+static int svc_tcp_sendmsg(struct svc_sock *svsk, struct svc_rqst *rqstp,
rpc_fraghdr marker, unsigned int *sentp)
{
- const struct kvec *head = xdr->head;
- const struct kvec *tail = xdr->tail;
- struct kvec rm = {
- .iov_base = &marker,
- .iov_len = sizeof(marker),
- };
struct msghdr msg = {
- .msg_flags = 0,
+ .msg_flags = MSG_SPLICE_PAGES,
};
+ unsigned int count;
+ void *buf;
int ret;
*sentp = 0;
- ret = xdr_alloc_bvec(xdr, GFP_KERNEL);
- if (ret < 0)
- return ret;
-
- ret = kernel_sendmsg(sock, &msg, &rm, 1, rm.iov_len);
- if (ret < 0)
- return ret;
- *sentp += ret;
- if (ret != rm.iov_len)
- return -EAGAIN;
-
- ret = svc_tcp_send_kvec(sock, head, 0);
- if (ret < 0)
- return ret;
- *sentp += ret;
- if (ret != head->iov_len)
- goto out;
- if (xdr_buf_pagecount(xdr))
- xdr->bvec[0].bv_offset = offset_in_page(xdr->page_base);
-
- msg.msg_flags = MSG_SPLICE_PAGES;
- iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, xdr->bvec,
- xdr_buf_pagecount(xdr), xdr->page_len);
- ret = sock_sendmsg(sock, &msg);
+ /* The stream record marker is copied into a temporary page
+ * fragment buffer so that it can be included in rq_bvec.
+ */
+ buf = page_frag_alloc(&svsk->sk_frag_cache, sizeof(marker),
+ GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ memcpy(buf, &marker, sizeof(marker));
+ bvec_set_virt(rqstp->rq_bvec, buf, sizeof(marker));
+
+ count = xdr_buf_to_bvec(rqstp->rq_bvec + 1,
+ ARRAY_SIZE(rqstp->rq_bvec) - 1, &rqstp->rq_res);
+
+ iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, rqstp->rq_bvec,
+ 1 + count, sizeof(marker) + rqstp->rq_res.len);
+ ret = sock_sendmsg(svsk->sk_sock, &msg);
if (ret < 0)
return ret;
*sentp += ret;
-
- if (tail->iov_len) {
- ret = svc_tcp_send_kvec(sock, tail, 0);
- if (ret < 0)
- return ret;
- *sentp += ret;
- }
-
-out:
return 0;
}
@@ -1292,23 +1282,17 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp)
svc_tcp_release_ctxt(xprt, rqstp->rq_xprt_ctxt);
rqstp->rq_xprt_ctxt = NULL;
- atomic_inc(&svsk->sk_sendqlen);
mutex_lock(&xprt->xpt_mutex);
if (svc_xprt_is_dead(xprt))
goto out_notconn;
- tcp_sock_set_cork(svsk->sk_sk, true);
- err = svc_tcp_sendmsg(svsk->sk_sock, xdr, marker, &sent);
- xdr_free_bvec(xdr);
+ err = svc_tcp_sendmsg(svsk, rqstp, marker, &sent);
trace_svcsock_tcp_send(xprt, err < 0 ? (long)err : sent);
if (err < 0 || sent != (xdr->len + sizeof(marker)))
goto out_close;
- if (atomic_dec_and_test(&svsk->sk_sendqlen))
- tcp_sock_set_cork(svsk->sk_sk, false);
mutex_unlock(&xprt->xpt_mutex);
return sent;
out_notconn:
- atomic_dec(&svsk->sk_sendqlen);
mutex_unlock(&xprt->xpt_mutex);
return -ENOTCONN;
out_close:
@@ -1317,7 +1301,6 @@ out_close:
(err < 0) ? "got error" : "sent",
(err < 0) ? err : sent, xdr->len);
svc_xprt_deferred_close(xprt);
- atomic_dec(&svsk->sk_sendqlen);
mutex_unlock(&xprt->xpt_mutex);
return -EAGAIN;
}
@@ -1644,6 +1627,7 @@ static void svc_tcp_sock_detach(struct svc_xprt *xprt)
static void svc_sock_free(struct svc_xprt *xprt)
{
struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
+ struct page_frag_cache *pfc = &svsk->sk_frag_cache;
struct socket *sock = svsk->sk_sock;
trace_svcsock_free(svsk, sock);
@@ -1653,5 +1637,8 @@ static void svc_sock_free(struct svc_xprt *xprt)
sockfd_put(sock);
else
sock_release(sock);
+ if (pfc->va)
+ __page_frag_cache_drain(virt_to_head_page(pfc->va),
+ pfc->pagecnt_bias);
kfree(svsk);
}
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 2a22e78af116..62e07c330a66 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -165,6 +165,56 @@ xdr_free_bvec(struct xdr_buf *buf)
}
/**
+ * xdr_buf_to_bvec - Copy components of an xdr_buf into a bio_vec array
+ * @bvec: bio_vec array to populate
+ * @bvec_size: element count of @bio_vec
+ * @xdr: xdr_buf to be copied
+ *
+ * Returns the number of entries consumed in @bvec.
+ */
+unsigned int xdr_buf_to_bvec(struct bio_vec *bvec, unsigned int bvec_size,
+ const struct xdr_buf *xdr)
+{
+ const struct kvec *head = xdr->head;
+ const struct kvec *tail = xdr->tail;
+ unsigned int count = 0;
+
+ if (head->iov_len) {
+ bvec_set_virt(bvec++, head->iov_base, head->iov_len);
+ ++count;
+ }
+
+ if (xdr->page_len) {
+ unsigned int offset, len, remaining;
+ struct page **pages = xdr->pages;
+
+ offset = offset_in_page(xdr->page_base);
+ remaining = xdr->page_len;
+ while (remaining > 0) {
+ len = min_t(unsigned int, remaining,
+ PAGE_SIZE - offset);
+ bvec_set_page(bvec++, *pages++, len, offset);
+ remaining -= len;
+ offset = 0;
+ if (unlikely(++count > bvec_size))
+ goto bvec_overflow;
+ }
+ }
+
+ if (tail->iov_len) {
+ bvec_set_virt(bvec, tail->iov_base, tail->iov_len);
+ if (unlikely(++count > bvec_size))
+ goto bvec_overflow;
+ }
+
+ return count;
+
+bvec_overflow:
+ pr_warn_once("%s: bio_vec array overflow\n", __func__);
+ return count - 1;
+}
+
+/**
* xdr_inline_pages - Prepare receive buffer for a large reply
* @xdr: xdr_buf into which reply will be placed
* @offset: expected offset where data payload will start, in bytes
@@ -1288,6 +1338,14 @@ static unsigned int xdr_set_tail_base(struct xdr_stream *xdr,
return xdr_set_iov(xdr, buf->tail, base, len);
}
+static void xdr_stream_unmap_current_page(struct xdr_stream *xdr)
+{
+ if (xdr->page_kaddr) {
+ kunmap_local(xdr->page_kaddr);
+ xdr->page_kaddr = NULL;
+ }
+}
+
static unsigned int xdr_set_page_base(struct xdr_stream *xdr,
unsigned int base, unsigned int len)
{
@@ -1305,12 +1363,18 @@ static unsigned int xdr_set_page_base(struct xdr_stream *xdr,
if (len > maxlen)
len = maxlen;
+ xdr_stream_unmap_current_page(xdr);
xdr_stream_page_set_pos(xdr, base);
base += xdr->buf->page_base;
pgnr = base >> PAGE_SHIFT;
xdr->page_ptr = &xdr->buf->pages[pgnr];
- kaddr = page_address(*xdr->page_ptr);
+
+ if (PageHighMem(*xdr->page_ptr)) {
+ xdr->page_kaddr = kmap_local_page(*xdr->page_ptr);
+ kaddr = xdr->page_kaddr;
+ } else
+ kaddr = page_address(*xdr->page_ptr);
pgoff = base & ~PAGE_MASK;
xdr->p = (__be32*)(kaddr + pgoff);
@@ -1364,6 +1428,7 @@ void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p,
struct rpc_rqst *rqst)
{
xdr->buf = buf;
+ xdr->page_kaddr = NULL;
xdr_reset_scratch_buffer(xdr);
xdr->nwords = XDR_QUADLEN(buf->len);
if (xdr_set_iov(xdr, buf->head, 0, buf->len) == 0 &&
@@ -1396,6 +1461,16 @@ void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
}
EXPORT_SYMBOL_GPL(xdr_init_decode_pages);
+/**
+ * xdr_finish_decode - Clean up the xdr_stream after decoding data.
+ * @xdr: pointer to xdr_stream struct
+ */
+void xdr_finish_decode(struct xdr_stream *xdr)
+{
+ xdr_stream_unmap_current_page(xdr);
+}
+EXPORT_SYMBOL(xdr_finish_decode);
+
static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
{
unsigned int nwords = XDR_QUADLEN(nbytes);
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 5e5ff6784ef5..da409450dfc0 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -593,7 +593,6 @@ void xprt_rdma_cleanup(void);
int xprt_rdma_bc_setup(struct rpc_xprt *, unsigned int);
size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *);
unsigned int xprt_rdma_bc_max_slots(struct rpc_xprt *);
-int rpcrdma_bc_post_recv(struct rpcrdma_xprt *, unsigned int);
void rpcrdma_bc_receive_call(struct rpcrdma_xprt *, struct rpcrdma_rep *);
int xprt_rdma_bc_send_reply(struct rpc_rqst *rqst);
void xprt_rdma_bc_free_rqst(struct rpc_rqst *);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 268a2cc61acd..71cd916e384f 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2237,9 +2237,13 @@ static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt,
struct socket *sock)
{
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
+ struct net *net = sock_net(sock->sk);
+ unsigned long connect_timeout;
+ unsigned long syn_retries;
unsigned int keepidle;
unsigned int keepcnt;
unsigned int timeo;
+ unsigned long t;
spin_lock(&xprt->transport_lock);
keepidle = DIV_ROUND_UP(xprt->timeout->to_initval, HZ);
@@ -2257,6 +2261,35 @@ static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt,
/* TCP user timeout (see RFC5482) */
tcp_sock_set_user_timeout(sock->sk, timeo);
+
+ /* Connect timeout */
+ connect_timeout = max_t(unsigned long,
+ DIV_ROUND_UP(xprt->connect_timeout, HZ), 1);
+ syn_retries = max_t(unsigned long,
+ READ_ONCE(net->ipv4.sysctl_tcp_syn_retries), 1);
+ for (t = 0; t <= syn_retries && (1UL << t) < connect_timeout; t++)
+ ;
+ if (t <= syn_retries)
+ tcp_sock_set_syncnt(sock->sk, t - 1);
+}
+
+static void xs_tcp_do_set_connect_timeout(struct rpc_xprt *xprt,
+ unsigned long connect_timeout)
+{
+ struct sock_xprt *transport =
+ container_of(xprt, struct sock_xprt, xprt);
+ struct rpc_timeout to;
+ unsigned long initval;
+
+ memcpy(&to, xprt->timeout, sizeof(to));
+ /* Arbitrary lower limit */
+ initval = max_t(unsigned long, connect_timeout, XS_TCP_INIT_REEST_TO);
+ to.to_initval = initval;
+ to.to_maxval = initval;
+ to.to_retries = 0;
+ memcpy(&transport->tcp_timeout, &to, sizeof(transport->tcp_timeout));
+ xprt->timeout = &transport->tcp_timeout;
+ xprt->connect_timeout = connect_timeout;
}
static void xs_tcp_set_connect_timeout(struct rpc_xprt *xprt,
@@ -2264,25 +2297,12 @@ static void xs_tcp_set_connect_timeout(struct rpc_xprt *xprt,
unsigned long reconnect_timeout)
{
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
- struct rpc_timeout to;
- unsigned long initval;
spin_lock(&xprt->transport_lock);
if (reconnect_timeout < xprt->max_reconnect_timeout)
xprt->max_reconnect_timeout = reconnect_timeout;
- if (connect_timeout < xprt->connect_timeout) {
- memcpy(&to, xprt->timeout, sizeof(to));
- initval = DIV_ROUND_UP(connect_timeout, to.to_retries + 1);
- /* Arbitrary lower limit */
- if (initval < XS_TCP_INIT_REEST_TO << 1)
- initval = XS_TCP_INIT_REEST_TO << 1;
- to.to_initval = initval;
- to.to_maxval = initval;
- memcpy(&transport->tcp_timeout, &to,
- sizeof(transport->tcp_timeout));
- xprt->timeout = &transport->tcp_timeout;
- xprt->connect_timeout = connect_timeout;
- }
+ if (connect_timeout < xprt->connect_timeout)
+ xs_tcp_do_set_connect_timeout(xprt, connect_timeout);
set_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state);
spin_unlock(&xprt->transport_lock);
}
@@ -3335,8 +3355,13 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
xprt->timeout = &xs_tcp_default_timeout;
xprt->max_reconnect_timeout = xprt->timeout->to_maxval;
+ if (args->reconnect_timeout)
+ xprt->max_reconnect_timeout = args->reconnect_timeout;
+
xprt->connect_timeout = xprt->timeout->to_initval *
(xprt->timeout->to_retries + 1);
+ if (args->connect_timeout)
+ xs_tcp_do_set_connect_timeout(xprt, args->connect_timeout);
INIT_WORK(&transport->recv_worker, xs_stream_data_receive_workfn);
INIT_WORK(&transport->error_worker, xs_error_handle);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 86930a8ed012..3e8a04a13668 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -680,7 +680,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
* What the above comment does talk about? --ANK(980817)
*/
- if (unix_tot_inflight)
+ if (READ_ONCE(unix_tot_inflight))
unix_gc(); /* Garbage collect fds */
}
diff --git a/net/unix/scm.c b/net/unix/scm.c
index e9dde7176c8a..6ff628f2349f 100644
--- a/net/unix/scm.c
+++ b/net/unix/scm.c
@@ -64,7 +64,7 @@ void unix_inflight(struct user_struct *user, struct file *fp)
/* Paired with READ_ONCE() in wait_for_unix_gc() */
WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + 1);
}
- user->unix_inflight++;
+ WRITE_ONCE(user->unix_inflight, user->unix_inflight + 1);
spin_unlock(&unix_gc_lock);
}
@@ -85,7 +85,7 @@ void unix_notinflight(struct user_struct *user, struct file *fp)
/* Paired with READ_ONCE() in wait_for_unix_gc() */
WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - 1);
}
- user->unix_inflight--;
+ WRITE_ONCE(user->unix_inflight, user->unix_inflight - 1);
spin_unlock(&unix_gc_lock);
}
@@ -99,7 +99,7 @@ static inline bool too_many_unix_fds(struct task_struct *p)
{
struct user_struct *user = current_user();
- if (unlikely(user->unix_inflight > task_rlimit(p, RLIMIT_NOFILE)))
+ if (unlikely(READ_ONCE(user->unix_inflight) > task_rlimit(p, RLIMIT_NOFILE)))
return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
return false;
}
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index fcfc8472f73d..55f8b9b0e06d 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -602,7 +602,7 @@ static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
for (copied = 0, i = skb_shinfo(skb)->nr_frags; copied < len; i++) {
if (unlikely(i >= MAX_SKB_FRAGS))
- return ERR_PTR(-EFAULT);
+ return ERR_PTR(-EOVERFLOW);
page = pool->umem->pgs[addr >> PAGE_SHIFT];
get_page(page);
@@ -655,15 +655,17 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
skb_put(skb, len);
err = skb_store_bits(skb, 0, buffer, len);
- if (unlikely(err))
+ if (unlikely(err)) {
+ kfree_skb(skb);
goto free_err;
+ }
} else {
int nr_frags = skb_shinfo(skb)->nr_frags;
struct page *page;
u8 *vaddr;
if (unlikely(nr_frags == (MAX_SKB_FRAGS - 1) && xp_mb_desc(desc))) {
- err = -EFAULT;
+ err = -EOVERFLOW;
goto free_err;
}
@@ -690,12 +692,14 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
return skb;
free_err:
- if (err == -EAGAIN) {
- xsk_cq_cancel_locked(xs, 1);
- } else {
- xsk_set_destructor_arg(skb);
- xsk_drop_skb(skb);
+ if (err == -EOVERFLOW) {
+ /* Drop the packet */
+ xsk_set_destructor_arg(xs->skb);
+ xsk_drop_skb(xs->skb);
xskq_cons_release(xs->tx);
+ } else {
+ /* Let application retry */
+ xsk_cq_cancel_locked(xs, 1);
}
return ERR_PTR(err);
@@ -738,7 +742,7 @@ static int __xsk_generic_xmit(struct sock *sk)
skb = xsk_build_skb(xs, &desc);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
- if (err == -EAGAIN)
+ if (err != -EOVERFLOW)
goto out;
err = 0;
continue;
diff --git a/net/xdp/xsk_diag.c b/net/xdp/xsk_diag.c
index c014217f5fa7..22b36c8143cf 100644
--- a/net/xdp/xsk_diag.c
+++ b/net/xdp/xsk_diag.c
@@ -111,6 +111,9 @@ static int xsk_diag_fill(struct sock *sk, struct sk_buff *nlskb,
sock_diag_save_cookie(sk, msg->xdiag_cookie);
mutex_lock(&xs->mutex);
+ if (READ_ONCE(xs->state) == XSK_UNBOUND)
+ goto out_nlmsg_trim;
+
if ((req->xdiag_show & XDP_SHOW_INFO) && xsk_diag_put_info(xs, nlskb))
goto out_nlmsg_trim;
diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
index 40cd13eca82e..2fe6f2828d37 100644
--- a/scripts/Makefile.extrawarn
+++ b/scripts/Makefile.extrawarn
@@ -6,7 +6,79 @@
# They are independent, and can be combined like W=12 or W=123e.
# ==========================================================================
-KBUILD_CFLAGS += $(call cc-disable-warning, packed-not-aligned)
+# Default set of warnings, always enabled
+KBUILD_CFLAGS += -Wall
+KBUILD_CFLAGS += -Wundef
+KBUILD_CFLAGS += -Werror=implicit-function-declaration
+KBUILD_CFLAGS += -Werror=implicit-int
+KBUILD_CFLAGS += -Werror=return-type
+KBUILD_CFLAGS += -Werror=strict-prototypes
+KBUILD_CFLAGS += -Wno-format-security
+KBUILD_CFLAGS += -Wno-trigraphs
+KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,)
+KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
+
+ifneq ($(CONFIG_FRAME_WARN),0)
+KBUILD_CFLAGS += -Wframe-larger-than=$(CONFIG_FRAME_WARN)
+endif
+
+KBUILD_CPPFLAGS-$(CONFIG_WERROR) += -Werror
+KBUILD_CPPFLAGS += $(KBUILD_CPPFLAGS-y)
+KBUILD_CFLAGS-$(CONFIG_CC_NO_ARRAY_BOUNDS) += -Wno-array-bounds
+
+ifdef CONFIG_CC_IS_CLANG
+# The kernel builds with '-std=gnu11' so use of GNU extensions is acceptable.
+KBUILD_CFLAGS += -Wno-gnu
+else
+
+# gcc inanely warns about local variables called 'main'
+KBUILD_CFLAGS += -Wno-main
+endif
+
+# These warnings generated too much noise in a regular build.
+# Use make W=1 to enable them (see scripts/Makefile.extrawarn)
+KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
+KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
+
+# These result in bogus false positives
+KBUILD_CFLAGS += $(call cc-disable-warning, dangling-pointer)
+
+# Variable Length Arrays (VLAs) should not be used anywhere in the kernel
+KBUILD_CFLAGS += -Wvla
+
+# disable pointer signed / unsigned warnings in gcc 4.0
+KBUILD_CFLAGS += -Wno-pointer-sign
+
+# In order to make sure new function cast mismatches are not introduced
+# in the kernel (to avoid tripping CFI checking), the kernel should be
+# globally built with -Wcast-function-type.
+KBUILD_CFLAGS += $(call cc-option, -Wcast-function-type)
+
+# The allocators already balk at large sizes, so silence the compiler
+# warnings for bounds checks involving those possible values. While
+# -Wno-alloc-size-larger-than would normally be used here, earlier versions
+# of gcc (<9.1) weirdly don't handle the option correctly when _other_
+# warnings are produced (?!). Using -Walloc-size-larger-than=SIZE_MAX
+# doesn't work (as it is documented to), silently resolving to "0" prior to
+# version 9.1 (and producing an error more recently). Numeric values larger
+# than PTRDIFF_MAX also don't work prior to version 9.1, which are silently
+# ignored, continuing to default to PTRDIFF_MAX. So, left with no other
+# choice, we must perform a versioned check to disable this warning.
+# https://lore.kernel.org/lkml/20210824115859.187f272f@canb.auug.org.au
+KBUILD_CFLAGS-$(call gcc-min-version, 90100) += -Wno-alloc-size-larger-than
+KBUILD_CFLAGS += $(KBUILD_CFLAGS-y) $(CONFIG_CC_IMPLICIT_FALLTHROUGH)
+
+# Prohibit date/time macros, which would make the build non-deterministic
+KBUILD_CFLAGS += -Werror=date-time
+
+# enforce correct pointer usage
+KBUILD_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types)
+
+# Require designated initializers for all marked structures
+KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init)
+
+# Warn if there is an enum types mismatch
+KBUILD_CFLAGS += $(call cc-option,-Wenum-conversion)
# backward compatibility
KBUILD_EXTRA_WARN ?= $(KBUILD_ENABLE_EXTRA_GCC_CHECKS)
@@ -24,6 +96,7 @@ ifneq ($(findstring 1, $(KBUILD_EXTRA_WARN)),)
KBUILD_CFLAGS += -Wextra -Wunused -Wno-unused-parameter
KBUILD_CFLAGS += -Wmissing-declarations
+KBUILD_CFLAGS += $(call cc-option, -Wrestrict)
KBUILD_CFLAGS += -Wmissing-format-attribute
KBUILD_CFLAGS += -Wmissing-prototypes
KBUILD_CFLAGS += -Wold-style-definition
@@ -31,12 +104,10 @@ KBUILD_CFLAGS += -Wmissing-include-dirs
KBUILD_CFLAGS += $(call cc-option, -Wunused-but-set-variable)
KBUILD_CFLAGS += $(call cc-option, -Wunused-const-variable)
KBUILD_CFLAGS += $(call cc-option, -Wpacked-not-aligned)
+KBUILD_CFLAGS += $(call cc-option, -Wformat-overflow)
+KBUILD_CFLAGS += $(call cc-option, -Wformat-truncation)
+KBUILD_CFLAGS += $(call cc-option, -Wstringop-overflow)
KBUILD_CFLAGS += $(call cc-option, -Wstringop-truncation)
-# The following turn off the warnings enabled by -Wextra
-KBUILD_CFLAGS += -Wno-missing-field-initializers
-KBUILD_CFLAGS += -Wno-sign-compare
-KBUILD_CFLAGS += -Wno-type-limits
-KBUILD_CFLAGS += -Wno-shift-negative-value
KBUILD_CPPFLAGS += -Wundef
KBUILD_CPPFLAGS += -DKBUILD_EXTRA_WARN1
@@ -45,9 +116,16 @@ else
# Some diagnostics enabled by default are noisy.
# Suppress them by using -Wno... except for W=1.
+KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
+KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
+KBUILD_CFLAGS += $(call cc-disable-warning, restrict)
+KBUILD_CFLAGS += $(call cc-disable-warning, packed-not-aligned)
+KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow)
+KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation)
+KBUILD_CFLAGS += $(call cc-disable-warning, stringop-overflow)
+KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation)
ifdef CONFIG_CC_IS_CLANG
-KBUILD_CFLAGS += -Wno-initializer-overrides
# Clang before clang-16 would warn on default argument promotions.
ifneq ($(call clang-min-version, 160000),y)
# Disable -Wformat
@@ -61,7 +139,6 @@ ifeq ($(call clang-min-version, 120000),y)
KBUILD_CFLAGS += -Wformat-insufficient-args
endif
endif
-KBUILD_CFLAGS += -Wno-sign-compare
KBUILD_CFLAGS += $(call cc-disable-warning, pointer-to-enum-cast)
KBUILD_CFLAGS += -Wno-tautological-constant-out-of-range-compare
KBUILD_CFLAGS += $(call cc-disable-warning, unaligned-access)
@@ -83,8 +160,25 @@ KBUILD_CFLAGS += -Wtype-limits
KBUILD_CFLAGS += $(call cc-option, -Wmaybe-uninitialized)
KBUILD_CFLAGS += $(call cc-option, -Wunused-macros)
+ifdef CONFIG_CC_IS_CLANG
+KBUILD_CFLAGS += -Winitializer-overrides
+endif
+
KBUILD_CPPFLAGS += -DKBUILD_EXTRA_WARN2
+else
+
+# The following turn off the warnings enabled by -Wextra
+KBUILD_CFLAGS += -Wno-missing-field-initializers
+KBUILD_CFLAGS += -Wno-type-limits
+KBUILD_CFLAGS += -Wno-shift-negative-value
+
+ifdef CONFIG_CC_IS_CLANG
+KBUILD_CFLAGS += -Wno-initializer-overrides
+else
+KBUILD_CFLAGS += -Wno-maybe-uninitialized
+endif
+
endif
#
@@ -106,6 +200,11 @@ KBUILD_CFLAGS += $(call cc-option, -Wpacked-bitfield-compat)
KBUILD_CPPFLAGS += -DKBUILD_EXTRA_WARN3
+else
+
+# The following turn off the warnings enabled by -Wextra
+KBUILD_CFLAGS += -Wno-sign-compare
+
endif
#
diff --git a/scripts/Makefile.modinst b/scripts/Makefile.modinst
index ab0c5bd1a60f..c59cc57286ba 100644
--- a/scripts/Makefile.modinst
+++ b/scripts/Makefile.modinst
@@ -9,6 +9,35 @@ __modinst:
include include/config/auto.conf
include $(srctree)/scripts/Kbuild.include
+install-y :=
+
+ifeq ($(KBUILD_EXTMOD)$(sign-only),)
+
+# remove the old directory and symlink
+$(shell rm -fr $(MODLIB)/kernel $(MODLIB)/build)
+
+install-$(CONFIG_MODULES) += $(addprefix $(MODLIB)/, build modules.order)
+
+$(MODLIB)/build: FORCE
+ $(call cmd,symlink)
+
+quiet_cmd_symlink = SYMLINK $@
+ cmd_symlink = ln -s $(CURDIR) $@
+
+$(MODLIB)/modules.order: modules.order FORCE
+ $(call cmd,install_modorder)
+
+quiet_cmd_install_modorder = INSTALL $@
+ cmd_install_modorder = sed 's:^\(.*\)\.o$$:kernel/\1.ko:' $< > $@
+
+# Install modules.builtin(.modinfo) even when CONFIG_MODULES is disabled.
+install-y += $(addprefix $(MODLIB)/, modules.builtin modules.builtin.modinfo)
+
+$(addprefix $(MODLIB)/, modules.builtin modules.builtin.modinfo): $(MODLIB)/%: % FORCE
+ $(call cmd,install)
+
+endif
+
modules := $(call read-file, $(MODORDER))
ifeq ($(KBUILD_EXTMOD),)
@@ -27,15 +56,16 @@ suffix-$(CONFIG_MODULE_COMPRESS_XZ) := .xz
suffix-$(CONFIG_MODULE_COMPRESS_ZSTD) := .zst
modules := $(patsubst $(extmod_prefix)%.o, $(dst)/%.ko$(suffix-y), $(modules))
+install-$(CONFIG_MODULES) += $(modules)
-__modinst: $(modules)
+__modinst: $(install-y)
@:
#
# Installation
#
quiet_cmd_install = INSTALL $@
- cmd_install = mkdir -p $(dir $@); cp $< $@
+ cmd_install = cp $< $@
# Strip
#
@@ -65,7 +95,6 @@ endif
# Signing
# Don't stop modules_install even if we can't sign external modules.
#
-ifeq ($(CONFIG_MODULE_SIG_ALL),y)
ifeq ($(filter pkcs11:%, $(CONFIG_MODULE_SIG_KEY)),)
sig-key := $(if $(wildcard $(CONFIG_MODULE_SIG_KEY)),,$(srctree)/)$(CONFIG_MODULE_SIG_KEY)
else
@@ -74,18 +103,34 @@ endif
quiet_cmd_sign = SIGN $@
cmd_sign = scripts/sign-file $(CONFIG_MODULE_SIG_HASH) "$(sig-key)" certs/signing_key.x509 $@ \
$(if $(KBUILD_EXTMOD),|| true)
-else
+
+ifeq ($(sign-only),)
+
+# During modules_install, modules are signed only when CONFIG_MODULE_SIG_ALL=y.
+ifndef CONFIG_MODULE_SIG_ALL
quiet_cmd_sign :=
cmd_sign := :
endif
-ifeq ($(modules_sign_only),)
+# Create necessary directories
+$(shell mkdir -p $(sort $(dir $(install-y))))
$(dst)/%.ko: $(extmod_prefix)%.ko FORCE
$(call cmd,install)
$(call cmd,strip)
$(call cmd,sign)
+ifdef CONFIG_MODULES
+__modinst: depmod
+
+PHONY += depmod
+depmod: $(install-y)
+ $(call cmd,depmod)
+
+quiet_cmd_depmod = DEPMOD $(MODLIB)
+ cmd_depmod = $(srctree)/scripts/depmod.sh $(KERNELRELEASE)
+endif
+
else
$(dst)/%.ko: FORCE
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
index 39472e834b63..739402f45509 100644
--- a/scripts/Makefile.modpost
+++ b/scripts/Makefile.modpost
@@ -41,6 +41,7 @@ include $(srctree)/scripts/Kbuild.include
MODPOST = scripts/mod/modpost
modpost-args = \
+ $(if $(CONFIG_MODULES),-M) \
$(if $(CONFIG_MODVERSIONS),-m) \
$(if $(CONFIG_MODULE_SRCVERSION_ALL),-a) \
$(if $(CONFIG_SECTION_MISMATCH_WARN_ONLY),,-E) \
diff --git a/scripts/Makefile.package b/scripts/Makefile.package
index 92dbc889bd7c..2bcab02da965 100644
--- a/scripts/Makefile.package
+++ b/scripts/Makefile.package
@@ -11,7 +11,6 @@ TAR_CONTENT := Documentation LICENSES arch block certs crypto drivers fs \
samples scripts security sound tools usr virt \
.config Makefile \
Kbuild Kconfig COPYING $(wildcard localversion*)
-MKSPEC := $(srctree)/scripts/package/mkspec
quiet_cmd_src_tar = TAR $(2).tar.gz
cmd_src_tar = \
@@ -66,30 +65,38 @@ $(linux-tarballs): archive-args = --prefix=linux/ $$(cat $<)
$(linux-tarballs): .tmp_HEAD FORCE
$(call if_changed,archive)
-# rpm-pkg
+# rpm-pkg srcrpm-pkg binrpm-pkg
# ---------------------------------------------------------------------------
-PHONY += rpm-pkg
-rpm-pkg: srpm = $(shell rpmspec --srpm --query --queryformat='%{name}-%{VERSION}-%{RELEASE}.src.rpm' kernel.spec)
-rpm-pkg: srcrpm-pkg
- +rpmbuild $(RPMOPTS) --target $(UTS_MACHINE)-linux -rb $(srpm) \
- --define='_smp_mflags %{nil}'
-# srcrpm-pkg
-# ---------------------------------------------------------------------------
-PHONY += srcrpm-pkg
-srcrpm-pkg: linux.tar.gz
- $(CONFIG_SHELL) $(MKSPEC) >$(objtree)/kernel.spec
- +rpmbuild $(RPMOPTS) --target $(UTS_MACHINE)-linux -bs kernel.spec \
- --define='_smp_mflags %{nil}' --define='_sourcedir rpmbuild/SOURCES' --define='_srcrpmdir .'
+quiet_cmd_mkspec = GEN $@
+ cmd_mkspec = $(srctree)/scripts/package/mkspec > $@
-# binrpm-pkg
-# ---------------------------------------------------------------------------
-PHONY += binrpm-pkg
-binrpm-pkg:
- $(MAKE) -f $(srctree)/Makefile
- $(CONFIG_SHELL) $(MKSPEC) prebuilt > $(objtree)/binkernel.spec
- +rpmbuild $(RPMOPTS) --define "_builddir $(objtree)" --target \
- $(UTS_MACHINE)-linux -bb $(objtree)/binkernel.spec
+kernel.spec: FORCE
+ $(call cmd,mkspec)
+
+PHONY += rpm-sources
+rpm-sources: linux.tar.gz
+ $(Q)mkdir -p rpmbuild/SOURCES
+ $(Q)ln -f linux.tar.gz rpmbuild/SOURCES/linux.tar.gz
+ $(Q)cp $(KCONFIG_CONFIG) rpmbuild/SOURCES/config
+ $(Q)$(srctree)/scripts/package/gen-diff-patch rpmbuild/SOURCES/diff.patch
+
+PHONY += rpm-pkg srcrpm-pkg binrpm-pkg
+
+rpm-pkg: private build-type := a
+srcrpm-pkg: private build-type := s
+binrpm-pkg: private build-type := b
+
+rpm-pkg srcrpm-pkg: rpm-sources
+rpm-pkg srcrpm-pkg binrpm-pkg: kernel.spec
+ +$(strip rpmbuild -b$(build-type) kernel.spec \
+ --define='_topdir $(abspath rpmbuild)' \
+ $(if $(filter a b, $(build-type)), \
+ --target $(UTS_MACHINE)-linux --build-in-place --noprep --define='_smp_mflags %{nil}' \
+ $$(rpm -q rpm >/dev/null 2>&1 || echo --nodeps)) \
+ $(if $(filter b, $(build-type)), \
+ --without devel) \
+ $(RPMOPTS))
# deb-pkg srcdeb-pkg bindeb-pkg
# ---------------------------------------------------------------------------
@@ -141,14 +148,10 @@ deb-pkg srcdeb-pkg bindeb-pkg:
$(if $(findstring source, $(build-type)), \
--unsigned-source --compression=$(KDEB_SOURCE_COMPRESS)) \
$(if $(findstring binary, $(build-type)), \
- -r$(KBUILD_PKG_ROOTCMD) -a$$(cat debian/arch), \
+ --rules-file='$(MAKE) -f debian/rules' --jobs=1 -r$(KBUILD_PKG_ROOTCMD) -a$$(cat debian/arch), \
--no-check-builddeps) \
$(DPKG_FLAGS))
-PHONY += intdeb-pkg
-intdeb-pkg:
- +$(CONFIG_SHELL) $(srctree)/scripts/package/builddeb
-
# snap-pkg
# ---------------------------------------------------------------------------
PHONY += snap-pkg
diff --git a/scripts/bpf_doc.py b/scripts/bpf_doc.py
index eaae2ce78381..61b7dddedc46 100755
--- a/scripts/bpf_doc.py
+++ b/scripts/bpf_doc.py
@@ -59,9 +59,9 @@ class Helper(APIElement):
Break down helper function protocol into smaller chunks: return type,
name, distincts arguments.
"""
- arg_re = re.compile('((\w+ )*?(\w+|...))( (\**)(\w+))?$')
+ arg_re = re.compile(r'((\w+ )*?(\w+|...))( (\**)(\w+))?$')
res = {}
- proto_re = re.compile('(.+) (\**)(\w+)\(((([^,]+)(, )?){1,5})\)$')
+ proto_re = re.compile(r'(.+) (\**)(\w+)\(((([^,]+)(, )?){1,5})\)$')
capture = proto_re.match(self.proto)
res['ret_type'] = capture.group(1)
@@ -114,11 +114,11 @@ class HeaderParser(object):
return Helper(proto=proto, desc=desc, ret=ret)
def parse_symbol(self):
- p = re.compile(' \* ?(BPF\w+)$')
+ p = re.compile(r' \* ?(BPF\w+)$')
capture = p.match(self.line)
if not capture:
raise NoSyscallCommandFound
- end_re = re.compile(' \* ?NOTES$')
+ end_re = re.compile(r' \* ?NOTES$')
end = end_re.match(self.line)
if end:
raise NoSyscallCommandFound
@@ -133,7 +133,7 @@ class HeaderParser(object):
# - Same as above, with "const" and/or "struct" in front of type
# - "..." (undefined number of arguments, for bpf_trace_printk())
# There is at least one term ("void"), and at most five arguments.
- p = re.compile(' \* ?((.+) \**\w+\((((const )?(struct )?(\w+|\.\.\.)( \**\w+)?)(, )?){1,5}\))$')
+ p = re.compile(r' \* ?((.+) \**\w+\((((const )?(struct )?(\w+|\.\.\.)( \**\w+)?)(, )?){1,5}\))$')
capture = p.match(self.line)
if not capture:
raise NoHelperFound
@@ -141,7 +141,7 @@ class HeaderParser(object):
return capture.group(1)
def parse_desc(self, proto):
- p = re.compile(' \* ?(?:\t| {5,8})Description$')
+ p = re.compile(r' \* ?(?:\t| {5,8})Description$')
capture = p.match(self.line)
if not capture:
raise Exception("No description section found for " + proto)
@@ -154,7 +154,7 @@ class HeaderParser(object):
if self.line == ' *\n':
desc += '\n'
else:
- p = re.compile(' \* ?(?:\t| {5,8})(?:\t| {8})(.*)')
+ p = re.compile(r' \* ?(?:\t| {5,8})(?:\t| {8})(.*)')
capture = p.match(self.line)
if capture:
desc_present = True
@@ -167,7 +167,7 @@ class HeaderParser(object):
return desc
def parse_ret(self, proto):
- p = re.compile(' \* ?(?:\t| {5,8})Return$')
+ p = re.compile(r' \* ?(?:\t| {5,8})Return$')
capture = p.match(self.line)
if not capture:
raise Exception("No return section found for " + proto)
@@ -180,7 +180,7 @@ class HeaderParser(object):
if self.line == ' *\n':
ret += '\n'
else:
- p = re.compile(' \* ?(?:\t| {5,8})(?:\t| {8})(.*)')
+ p = re.compile(r' \* ?(?:\t| {5,8})(?:\t| {8})(.*)')
capture = p.match(self.line)
if capture:
ret_present = True
@@ -219,12 +219,12 @@ class HeaderParser(object):
self.seek_to('enum bpf_cmd {',
'Could not find start of bpf_cmd enum', 0)
# Searches for either one or more BPF\w+ enums
- bpf_p = re.compile('\s*(BPF\w+)+')
+ bpf_p = re.compile(r'\s*(BPF\w+)+')
# Searches for an enum entry assigned to another entry,
# for e.g. BPF_PROG_RUN = BPF_PROG_TEST_RUN, which is
# not documented hence should be skipped in check to
# determine if the right number of syscalls are documented
- assign_p = re.compile('\s*(BPF\w+)\s*=\s*(BPF\w+)')
+ assign_p = re.compile(r'\s*(BPF\w+)\s*=\s*(BPF\w+)')
bpf_cmd_str = ''
while True:
capture = assign_p.match(self.line)
@@ -239,7 +239,7 @@ class HeaderParser(object):
break
self.line = self.reader.readline()
# Find the number of occurences of BPF\w+
- self.enum_syscalls = re.findall('(BPF\w+)+', bpf_cmd_str)
+ self.enum_syscalls = re.findall(r'(BPF\w+)+', bpf_cmd_str)
def parse_desc_helpers(self):
self.seek_to(helpersDocStart,
@@ -263,7 +263,7 @@ class HeaderParser(object):
self.seek_to('#define ___BPF_FUNC_MAPPER(FN, ctx...)',
'Could not find start of eBPF helper definition list')
# Searches for one FN(\w+) define or a backslash for newline
- p = re.compile('\s*FN\((\w+), (\d+), ##ctx\)|\\\\')
+ p = re.compile(r'\s*FN\((\w+), (\d+), ##ctx\)|\\\\')
fn_defines_str = ''
i = 0
while True:
@@ -278,7 +278,7 @@ class HeaderParser(object):
break
self.line = self.reader.readline()
# Find the number of occurences of FN(\w+)
- self.define_unique_helpers = re.findall('FN\(\w+, \d+, ##ctx\)', fn_defines_str)
+ self.define_unique_helpers = re.findall(r'FN\(\w+, \d+, ##ctx\)', fn_defines_str)
def validate_helpers(self):
last_helper = ''
@@ -425,7 +425,7 @@ class PrinterRST(Printer):
try:
cmd = ['git', 'log', '-1', '--pretty=format:%cs', '--no-patch',
'-L',
- '/{}/,/\*\//:include/uapi/linux/bpf.h'.format(delimiter)]
+ '/{}/,/\\*\\//:include/uapi/linux/bpf.h'.format(delimiter)]
date = subprocess.run(cmd, cwd=linuxRoot,
capture_output=True, check=True)
return date.stdout.decode().rstrip()
@@ -516,7 +516,7 @@ as "Dual BSD/GPL", may be used). Some helper functions are only accessible to
programs that are compatible with the GNU Privacy License (GPL).
In order to use such helpers, the eBPF program must be loaded with the correct
-license string passed (via **attr**) to the **bpf**\ () system call, and this
+license string passed (via **attr**) to the **bpf**\\ () system call, and this
generally translates into the C source code of the program containing a line
similar to the following:
@@ -550,7 +550,7 @@ may be interested in:
* The bpftool utility can be used to probe the availability of helper functions
on the system (as well as supported program and map types, and a number of
other parameters). To do so, run **bpftool feature probe** (see
- **bpftool-feature**\ (8) for details). Add the **unprivileged** keyword to
+ **bpftool-feature**\\ (8) for details). Add the **unprivileged** keyword to
list features available to unprivileged users.
Compatibility between helper functions and program types can generally be found
@@ -562,23 +562,23 @@ other functions, themselves allowing access to additional helpers. The
requirement for GPL license is also in those **struct bpf_func_proto**.
Compatibility between helper functions and map types can be found in the
-**check_map_func_compatibility**\ () function in file *kernel/bpf/verifier.c*.
+**check_map_func_compatibility**\\ () function in file *kernel/bpf/verifier.c*.
Helper functions that invalidate the checks on **data** and **data_end**
pointers for network processing are listed in function
-**bpf_helper_changes_pkt_data**\ () in file *net/core/filter.c*.
+**bpf_helper_changes_pkt_data**\\ () in file *net/core/filter.c*.
SEE ALSO
========
-**bpf**\ (2),
-**bpftool**\ (8),
-**cgroups**\ (7),
-**ip**\ (8),
-**perf_event_open**\ (2),
-**sendmsg**\ (2),
-**socket**\ (7),
-**tc-bpf**\ (8)'''
+**bpf**\\ (2),
+**bpftool**\\ (8),
+**cgroups**\\ (7),
+**ip**\\ (8),
+**perf_event_open**\\ (2),
+**sendmsg**\\ (2),
+**socket**\\ (7),
+**tc-bpf**\\ (8)'''
print(footer)
def print_proto(self, helper):
@@ -598,7 +598,7 @@ SEE ALSO
one_arg = '{}{}'.format(comma, a['type'])
if a['name']:
if a['star']:
- one_arg += ' {}**\ '.format(a['star'].replace('*', '\\*'))
+ one_arg += ' {}**\\ '.format(a['star'].replace('*', '\\*'))
else:
one_arg += '** '
one_arg += '*{}*\\ **'.format(a['name'])
diff --git a/scripts/depmod.sh b/scripts/depmod.sh
index 3643b4f896ed..e22da27fe13e 100755
--- a/scripts/depmod.sh
+++ b/scripts/depmod.sh
@@ -1,14 +1,16 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
#
-# A depmod wrapper used by the toplevel Makefile
+# A depmod wrapper
-if test $# -ne 2; then
- echo "Usage: $0 /sbin/depmod <kernelrelease>" >&2
+if test $# -ne 1; then
+ echo "Usage: $0 <kernelrelease>" >&2
exit 1
fi
-DEPMOD=$1
-KERNELRELEASE=$2
+
+KERNELRELEASE=$1
+
+: ${DEPMOD:=depmod}
if ! test -r System.map ; then
echo "Warning: modules_install: missing 'System.map' file. Skipping depmod." >&2
@@ -23,33 +25,8 @@ if [ -z $(command -v $DEPMOD) ]; then
exit 0
fi
-# older versions of depmod require the version string to start with three
-# numbers, so we cheat with a symlink here
-depmod_hack_needed=true
-tmp_dir=$(mktemp -d ${TMPDIR:-/tmp}/depmod.XXXXXX)
-mkdir -p "$tmp_dir/lib/modules/$KERNELRELEASE"
-if "$DEPMOD" -b "$tmp_dir" $KERNELRELEASE 2>/dev/null; then
- if test -e "$tmp_dir/lib/modules/$KERNELRELEASE/modules.dep" -o \
- -e "$tmp_dir/lib/modules/$KERNELRELEASE/modules.dep.bin"; then
- depmod_hack_needed=false
- fi
-fi
-rm -rf "$tmp_dir"
-if $depmod_hack_needed; then
- symlink="$INSTALL_MOD_PATH/lib/modules/99.98.$KERNELRELEASE"
- ln -s "$KERNELRELEASE" "$symlink"
- KERNELRELEASE=99.98.$KERNELRELEASE
-fi
-
set -- -ae -F System.map
if test -n "$INSTALL_MOD_PATH"; then
set -- "$@" -b "$INSTALL_MOD_PATH"
fi
-"$DEPMOD" "$@" "$KERNELRELEASE"
-ret=$?
-
-if $depmod_hack_needed; then
- rm -f "$symlink"
-fi
-
-exit $ret
+exec "$DEPMOD" "$@" "$KERNELRELEASE"
diff --git a/scripts/dummy-tools/gcc b/scripts/dummy-tools/gcc
index 1db1889f6d81..07f6dc4c5cf6 100755
--- a/scripts/dummy-tools/gcc
+++ b/scripts/dummy-tools/gcc
@@ -85,8 +85,7 @@ if arg_contain -S "$@"; then
fi
# For arch/powerpc/tools/gcc-check-mprofile-kernel.sh
- if arg_contain -m64 "$@" && arg_contain -mlittle-endian "$@" &&
- arg_contain -mprofile-kernel "$@"; then
+ if arg_contain -m64 "$@" && arg_contain -mprofile-kernel "$@"; then
if ! test -t 0 && ! grep -q notrace; then
echo "_mcount"
fi
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index af1c96198f49..4eee155121a8 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -93,11 +93,13 @@ endif
%_defconfig: $(obj)/conf
$(Q)$< $(silent) --defconfig=arch/$(SRCARCH)/configs/$@ $(Kconfig)
-configfiles=$(wildcard $(srctree)/kernel/configs/$@ $(srctree)/arch/$(SRCARCH)/configs/$@)
+configfiles = $(wildcard $(srctree)/kernel/configs/$(1) $(srctree)/arch/$(SRCARCH)/configs/$(1))
+all-config-fragments = $(call configfiles,*.config)
+config-fragments = $(call configfiles,$@)
%.config: $(obj)/conf
- $(if $(call configfiles),, $(error No configuration exists for this target on this architecture))
- $(Q)$(CONFIG_SHELL) $(srctree)/scripts/kconfig/merge_config.sh -m .config $(configfiles)
+ $(if $(config-fragments),, $(error $@ fragment does not exists on this architecture))
+ $(Q)$(CONFIG_SHELL) $(srctree)/scripts/kconfig/merge_config.sh -m .config $(config-fragments)
$(Q)$(MAKE) -f $(srctree)/Makefile olddefconfig
PHONY += tinyconfig
@@ -115,6 +117,7 @@ clean-files += tests/.cache
# Help text used by make help
help:
+ @echo 'Configuration targets:'
@echo ' config - Update current config utilising a line-oriented program'
@echo ' nconfig - Update current config utilising a ncurses menu based program'
@echo ' menuconfig - Update current config utilising a menu based program'
@@ -141,6 +144,12 @@ help:
@echo ' default value without prompting'
@echo ' tinyconfig - Configure the tiniest possible kernel'
@echo ' testconfig - Run Kconfig unit tests (requires python3 and pytest)'
+ @echo ''
+ @echo 'Configuration topic targets:'
+ @$(foreach f, $(all-config-fragments), \
+ if help=$$(grep -m1 '^# Help: ' $(f)); then \
+ printf ' %-25s - %s\n' '$(notdir $(f))' "$${help#*: }"; \
+ fi;)
# ===========================================================================
# object files used by all kconfig flavours
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index 992575f1e976..4a6811d77d18 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -349,7 +349,11 @@ int conf_read_simple(const char *name, int def)
char *p, *p2;
struct symbol *sym;
int i, def_flags;
+ const char *warn_unknown;
+ const char *werror;
+ warn_unknown = getenv("KCONFIG_WARN_UNKNOWN_SYMBOLS");
+ werror = getenv("KCONFIG_WERROR");
if (name) {
in = zconf_fopen(name);
} else {
@@ -437,6 +441,10 @@ load:
if (def == S_DEF_USER) {
sym = sym_find(line + 2 + strlen(CONFIG_));
if (!sym) {
+ if (warn_unknown)
+ conf_warning("unknown symbol: %s",
+ line + 2 + strlen(CONFIG_));
+
conf_set_changed(true);
continue;
}
@@ -471,7 +479,7 @@ load:
sym = sym_find(line + strlen(CONFIG_));
if (!sym) {
- if (def == S_DEF_AUTO)
+ if (def == S_DEF_AUTO) {
/*
* Reading from include/config/auto.conf
* If CONFIG_FOO previously existed in
@@ -479,8 +487,13 @@ load:
* include/config/FOO must be touched.
*/
conf_touch_dep(line + strlen(CONFIG_));
- else
+ } else {
+ if (warn_unknown)
+ conf_warning("unknown symbol: %s",
+ line + strlen(CONFIG_));
+
conf_set_changed(true);
+ }
continue;
}
@@ -519,6 +532,10 @@ load:
}
free(line);
fclose(in);
+
+ if (conf_warnings && werror)
+ exit(1);
+
return 0;
}
diff --git a/scripts/kconfig/expr.h b/scripts/kconfig/expr.h
index 9c9caca5bd5f..4a9a23b1b7e1 100644
--- a/scripts/kconfig/expr.h
+++ b/scripts/kconfig/expr.h
@@ -275,7 +275,6 @@ struct jump_key {
struct list_head entries;
size_t offset;
struct menu *target;
- int index;
};
extern struct file *file_list;
diff --git a/scripts/kconfig/lkc.h b/scripts/kconfig/lkc.h
index e7118d62a45f..471a59acecec 100644
--- a/scripts/kconfig/lkc.h
+++ b/scripts/kconfig/lkc.h
@@ -101,6 +101,7 @@ const char *menu_get_prompt(struct menu *menu);
struct menu *menu_get_parent_menu(struct menu *menu);
bool menu_has_help(struct menu *menu);
const char *menu_get_help(struct menu *menu);
+int get_jump_key_char(void);
struct gstr get_relations_str(struct symbol **sym_arr, struct list_head *head);
void menu_get_ext_help(struct menu *menu, struct gstr *help);
diff --git a/scripts/kconfig/lxdialog/dialog.h b/scripts/kconfig/lxdialog/dialog.h
index 347daf25fdc8..a501abf9fa31 100644
--- a/scripts/kconfig/lxdialog/dialog.h
+++ b/scripts/kconfig/lxdialog/dialog.h
@@ -196,13 +196,9 @@ int first_alpha(const char *string, const char *exempt);
int dialog_yesno(const char *title, const char *prompt, int height, int width);
int dialog_msgbox(const char *title, const char *prompt, int height,
int width, int pause);
-
-
-typedef void (*update_text_fn)(char *buf, size_t start, size_t end, void
- *_data);
-int dialog_textbox(const char *title, char *tbuf, int initial_height,
- int initial_width, int *keys, int *_vscroll, int *_hscroll,
- update_text_fn update_text, void *data);
+int dialog_textbox(const char *title, const char *tbuf, int initial_height,
+ int initial_width, int *_vscroll, int *_hscroll,
+ int (*extra_key_cb)(int, size_t, size_t, void *), void *data);
int dialog_menu(const char *title, const char *prompt,
const void *selected, int *s_scroll);
int dialog_checklist(const char *title, const char *prompt, int height,
diff --git a/scripts/kconfig/lxdialog/textbox.c b/scripts/kconfig/lxdialog/textbox.c
index bc4d4fb1dc75..058ed0e5bbd5 100644
--- a/scripts/kconfig/lxdialog/textbox.c
+++ b/scripts/kconfig/lxdialog/textbox.c
@@ -10,8 +10,8 @@
static int hscroll;
static int begin_reached, end_reached, page_length;
-static char *buf;
-static char *page;
+static const char *buf, *page;
+static size_t start, end;
/*
* Go back 'n' lines in text. Called by dialog_textbox().
@@ -98,21 +98,10 @@ static void print_line(WINDOW *win, int row, int width)
/*
* Print a new page of text.
*/
-static void print_page(WINDOW *win, int height, int width, update_text_fn
- update_text, void *data)
+static void print_page(WINDOW *win, int height, int width)
{
int i, passed_end = 0;
- if (update_text) {
- char *end;
-
- for (i = 0; i < height; i++)
- get_line();
- end = page;
- back_lines(height);
- update_text(buf, page - buf, end - buf, data);
- }
-
page_length = 0;
for (i = 0; i < height; i++) {
print_line(win, i, width);
@@ -142,24 +131,26 @@ static void print_position(WINDOW *win)
* refresh window content
*/
static void refresh_text_box(WINDOW *dialog, WINDOW *box, int boxh, int boxw,
- int cur_y, int cur_x, update_text_fn update_text,
- void *data)
+ int cur_y, int cur_x)
{
- print_page(box, boxh, boxw, update_text, data);
+ start = page - buf;
+
+ print_page(box, boxh, boxw);
print_position(dialog);
wmove(dialog, cur_y, cur_x); /* Restore cursor position */
wrefresh(dialog);
+
+ end = page - buf;
}
/*
* Display text from a file in a dialog box.
*
* keys is a null-terminated array
- * update_text() may not add or remove any '\n' or '\0' in tbuf
*/
-int dialog_textbox(const char *title, char *tbuf, int initial_height,
- int initial_width, int *keys, int *_vscroll, int *_hscroll,
- update_text_fn update_text, void *data)
+int dialog_textbox(const char *title, const char *tbuf, int initial_height,
+ int initial_width, int *_vscroll, int *_hscroll,
+ int (*extra_key_cb)(int, size_t, size_t, void *), void *data)
{
int i, x, y, cur_x, cur_y, key = 0;
int height, width, boxh, boxw;
@@ -239,8 +230,7 @@ do_resize:
/* Print first page of text */
attr_clear(box, boxh, boxw, dlg.dialog.atr);
- refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x, update_text,
- data);
+ refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x);
while (!done) {
key = wgetch(dialog);
@@ -259,8 +249,7 @@ do_resize:
begin_reached = 1;
page = buf;
refresh_text_box(dialog, box, boxh, boxw,
- cur_y, cur_x, update_text,
- data);
+ cur_y, cur_x);
}
break;
case 'G': /* Last page */
@@ -270,8 +259,7 @@ do_resize:
/* point to last char in buf */
page = buf + strlen(buf);
back_lines(boxh);
- refresh_text_box(dialog, box, boxh, boxw, cur_y,
- cur_x, update_text, data);
+ refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x);
break;
case 'K': /* Previous line */
case 'k':
@@ -280,8 +268,7 @@ do_resize:
break;
back_lines(page_length + 1);
- refresh_text_box(dialog, box, boxh, boxw, cur_y,
- cur_x, update_text, data);
+ refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x);
break;
case 'B': /* Previous page */
case 'b':
@@ -290,8 +277,7 @@ do_resize:
if (begin_reached)
break;
back_lines(page_length + boxh);
- refresh_text_box(dialog, box, boxh, boxw, cur_y,
- cur_x, update_text, data);
+ refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x);
break;
case 'J': /* Next line */
case 'j':
@@ -300,8 +286,7 @@ do_resize:
break;
back_lines(page_length - 1);
- refresh_text_box(dialog, box, boxh, boxw, cur_y,
- cur_x, update_text, data);
+ refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x);
break;
case KEY_NPAGE: /* Next page */
case ' ':
@@ -310,8 +295,7 @@ do_resize:
break;
begin_reached = 0;
- refresh_text_box(dialog, box, boxh, boxw, cur_y,
- cur_x, update_text, data);
+ refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x);
break;
case '0': /* Beginning of line */
case 'H': /* Scroll left */
@@ -326,8 +310,7 @@ do_resize:
hscroll--;
/* Reprint current page to scroll horizontally */
back_lines(page_length);
- refresh_text_box(dialog, box, boxh, boxw, cur_y,
- cur_x, update_text, data);
+ refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x);
break;
case 'L': /* Scroll right */
case 'l':
@@ -337,8 +320,7 @@ do_resize:
hscroll++;
/* Reprint current page to scroll horizontally */
back_lines(page_length);
- refresh_text_box(dialog, box, boxh, boxw, cur_y,
- cur_x, update_text, data);
+ refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x);
break;
case KEY_ESC:
if (on_key_esc(dialog) == KEY_ESC)
@@ -351,11 +333,9 @@ do_resize:
on_key_resize();
goto do_resize;
default:
- for (i = 0; keys[i]; i++) {
- if (key == keys[i]) {
- done = true;
- break;
- }
+ if (extra_key_cb && extra_key_cb(key, start, end, data)) {
+ done = true;
+ break;
}
}
}
diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c
index 53d8834d12fe..eccc87a441e7 100644
--- a/scripts/kconfig/mconf.c
+++ b/scripts/kconfig/mconf.c
@@ -22,8 +22,6 @@
#include "lkc.h"
#include "lxdialog/dialog.h"
-#define JUMP_NB 9
-
static const char mconf_readme[] =
"Overview\n"
"--------\n"
@@ -288,6 +286,7 @@ static int single_menu_mode;
static int show_all_options;
static int save_and_exit;
static int silent;
+static int jump_key_char;
static void conf(struct menu *menu, struct menu *active_menu);
@@ -348,19 +347,19 @@ static void reset_subtitle(void)
set_dialog_subtitles(subtitles);
}
-static int show_textbox_ext(const char *title, char *text, int r, int c, int
- *keys, int *vscroll, int *hscroll, update_text_fn
- update_text, void *data)
+static int show_textbox_ext(const char *title, const char *text, int r, int c,
+ int *vscroll, int *hscroll,
+ int (*extra_key_cb)(int, size_t, size_t, void *),
+ void *data)
{
dialog_clear();
- return dialog_textbox(title, text, r, c, keys, vscroll, hscroll,
- update_text, data);
+ return dialog_textbox(title, text, r, c, vscroll, hscroll,
+ extra_key_cb, data);
}
static void show_textbox(const char *title, const char *text, int r, int c)
{
- show_textbox_ext(title, (char *) text, r, c, (int []) {0}, NULL, NULL,
- NULL, NULL);
+ show_textbox_ext(title, text, r, c, NULL, NULL, NULL, NULL);
}
static void show_helptext(const char *title, const char *text)
@@ -381,35 +380,54 @@ static void show_help(struct menu *menu)
struct search_data {
struct list_head *head;
- struct menu **targets;
- int *keys;
+ struct menu *target;
};
-static void update_text(char *buf, size_t start, size_t end, void *_data)
+static int next_jump_key(int key)
+{
+ if (key < '1' || key > '9')
+ return '1';
+
+ key++;
+
+ if (key > '9')
+ key = '1';
+
+ return key;
+}
+
+static int handle_search_keys(int key, size_t start, size_t end, void *_data)
{
struct search_data *data = _data;
struct jump_key *pos;
- int k = 0;
+ int index = 0;
+
+ if (key < '1' || key > '9')
+ return 0;
list_for_each_entry(pos, data->head, entries) {
- if (pos->offset >= start && pos->offset < end) {
- char header[4];
+ index = next_jump_key(index);
- if (k < JUMP_NB) {
- int key = '0' + (pos->index % JUMP_NB) + 1;
+ if (pos->offset < start)
+ continue;
- sprintf(header, "(%c)", key);
- data->keys[k] = key;
- data->targets[k] = pos->target;
- k++;
- } else {
- sprintf(header, " ");
- }
+ if (pos->offset >= end)
+ break;
- memcpy(buf + pos->offset, header, sizeof(header) - 1);
+ if (key == index) {
+ data->target = pos->target;
+ return 1;
}
}
- data->keys[k] = 0;
+
+ return 0;
+}
+
+int get_jump_key_char(void)
+{
+ jump_key_char = next_jump_key(jump_key_char);
+
+ return jump_key_char;
}
static void search_conf(void)
@@ -456,26 +474,23 @@ again:
sym_arr = sym_re_search(dialog_input);
do {
LIST_HEAD(head);
- struct menu *targets[JUMP_NB];
- int keys[JUMP_NB + 1], i;
struct search_data data = {
.head = &head,
- .targets = targets,
- .keys = keys,
};
struct jump_key *pos, *tmp;
+ jump_key_char = 0;
res = get_relations_str(sym_arr, &head);
set_subtitle();
dres = show_textbox_ext("Search Results", str_get(&res), 0, 0,
- keys, &vscroll, &hscroll, &update_text,
- &data);
+ &vscroll, &hscroll,
+ handle_search_keys, &data);
again = false;
- for (i = 0; i < JUMP_NB && keys[i]; i++)
- if (dres == keys[i]) {
- conf(targets[i]->parent, targets[i]);
- again = true;
- }
+ if (dres >= '1' && dres <= '9') {
+ assert(data.target != NULL);
+ conf(data.target->parent, data.target);
+ again = true;
+ }
str_free(&res);
list_for_each_entry_safe(pos, tmp, &head, entries)
free(pos);
diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
index b90fff833588..61c442d84aef 100644
--- a/scripts/kconfig/menu.c
+++ b/scripts/kconfig/menu.c
@@ -701,6 +701,11 @@ static void get_dep_str(struct gstr *r, struct expr *expr, const char *prefix)
}
}
+int __attribute__((weak)) get_jump_key_char(void)
+{
+ return -1;
+}
+
static void get_prompt_str(struct gstr *r, struct property *prop,
struct list_head *head)
{
@@ -730,24 +735,27 @@ static void get_prompt_str(struct gstr *r, struct property *prop,
}
if (head && location) {
jump = xmalloc(sizeof(struct jump_key));
-
jump->target = location;
-
- if (list_empty(head))
- jump->index = 0;
- else
- jump->index = list_entry(head->prev, struct jump_key,
- entries)->index + 1;
-
list_add_tail(&jump->entries, head);
}
str_printf(r, " Location:\n");
- for (j = 4; --i >= 0; j += 2) {
+ for (j = 0; --i >= 0; j++) {
+ int jk = -1;
+ int indent = 2 * j + 4;
+
menu = submenu[i];
- if (jump && menu == location)
+ if (jump && menu == location) {
jump->offset = strlen(r->s);
- str_printf(r, "%*c-> %s", j, ' ', menu_get_prompt(menu));
+ jk = get_jump_key_char();
+ }
+
+ if (jk >= 0) {
+ str_printf(r, "(%c)", jk);
+ indent -= 3;
+ }
+
+ str_printf(r, "%*c-> %s", indent, ' ', menu_get_prompt(menu));
if (menu->sym) {
str_printf(r, " (%s [=%s])", menu->sym->name ?
menu->sym->name : "<choice>",
diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c
index 3ba8b1af390f..143a2c351d57 100644
--- a/scripts/kconfig/nconf.c
+++ b/scripts/kconfig/nconf.c
@@ -220,7 +220,7 @@ search_help[] =
"Location:\n"
" -> Bus options (PCI, PCMCIA, EISA, ISA)\n"
" -> PCI support (PCI [ = y])\n"
-" -> PCI access mode (<choice> [ = y])\n"
+"(1) -> PCI access mode (<choice> [ = y])\n"
"Selects: LIBCRC32\n"
"Selected by: BAR\n"
"-----------------------------------------------------------------\n"
@@ -231,9 +231,13 @@ search_help[] =
"o The 'Depends on:' line lists symbols that need to be defined for\n"
" this symbol to be visible and selectable in the menu.\n"
"o The 'Location:' lines tell, where in the menu structure this symbol\n"
-" is located. A location followed by a [ = y] indicates that this is\n"
-" a selectable menu item, and the current value is displayed inside\n"
-" brackets.\n"
+" is located.\n"
+" A location followed by a [ = y] indicates that this is\n"
+" a selectable menu item, and the current value is displayed inside\n"
+" brackets.\n"
+" Press the key in the (#) prefix to jump directly to that\n"
+" location. You will be returned to the current search results\n"
+" after exiting this new menu.\n"
"o The 'Selects:' line tells, what symbol will be automatically selected\n"
" if this symbol is selected (y or m).\n"
"o The 'Selected by' line tells what symbol has selected this symbol.\n"
@@ -275,7 +279,9 @@ static const char *current_instructions = menu_instructions;
static char *dialog_input_result;
static int dialog_input_result_len;
+static int jump_key_char;
+static void selected_conf(struct menu *menu, struct menu *active_menu);
static void conf(struct menu *menu);
static void conf_choice(struct menu *menu);
static void conf_string(struct menu *menu);
@@ -685,6 +691,57 @@ static int do_exit(void)
return 0;
}
+struct search_data {
+ struct list_head *head;
+ struct menu *target;
+};
+
+static int next_jump_key(int key)
+{
+ if (key < '1' || key > '9')
+ return '1';
+
+ key++;
+
+ if (key > '9')
+ key = '1';
+
+ return key;
+}
+
+static int handle_search_keys(int key, size_t start, size_t end, void *_data)
+{
+ struct search_data *data = _data;
+ struct jump_key *pos;
+ int index = 0;
+
+ if (key < '1' || key > '9')
+ return 0;
+
+ list_for_each_entry(pos, data->head, entries) {
+ index = next_jump_key(index);
+
+ if (pos->offset < start)
+ continue;
+
+ if (pos->offset >= end)
+ break;
+
+ if (key == index) {
+ data->target = pos->target;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+int get_jump_key_char(void)
+{
+ jump_key_char = next_jump_key(jump_key_char);
+
+ return jump_key_char;
+}
static void search_conf(void)
{
@@ -692,7 +749,8 @@ static void search_conf(void)
struct gstr res;
struct gstr title;
char *dialog_input;
- int dres;
+ int dres, vscroll = 0, hscroll = 0;
+ bool again;
title = str_new();
str_printf( &title, "Enter (sub)string or regexp to search for "
@@ -721,11 +779,28 @@ again:
dialog_input += strlen(CONFIG_);
sym_arr = sym_re_search(dialog_input);
- res = get_relations_str(sym_arr, NULL);
+
+ do {
+ LIST_HEAD(head);
+ struct search_data data = {
+ .head = &head,
+ .target = NULL,
+ };
+ jump_key_char = 0;
+ res = get_relations_str(sym_arr, &head);
+ dres = show_scroll_win_ext(main_window,
+ "Search Results", str_get(&res),
+ &vscroll, &hscroll,
+ handle_search_keys, &data);
+ again = false;
+ if (dres >= '1' && dres <= '9') {
+ assert(data.target != NULL);
+ selected_conf(data.target->parent, data.target);
+ again = true;
+ }
+ str_free(&res);
+ } while (again);
free(sym_arr);
- show_scroll_win(main_window,
- "Search Results", str_get(&res));
- str_free(&res);
str_free(&title);
}
@@ -1063,9 +1138,14 @@ static int do_match(int key, struct match_state *state, int *ans)
static void conf(struct menu *menu)
{
+ selected_conf(menu, NULL);
+}
+
+static void selected_conf(struct menu *menu, struct menu *active_menu)
+{
struct menu *submenu = NULL;
struct symbol *sym;
- int res;
+ int i, res;
int current_index = 0;
int last_top_row = 0;
struct match_state match_state = {
@@ -1081,6 +1161,19 @@ static void conf(struct menu *menu)
if (!child_count)
break;
+ if (active_menu != NULL) {
+ for (i = 0; i < items_num; i++) {
+ struct mitem *mcur;
+
+ mcur = (struct mitem *) item_userptr(curses_menu_items[i]);
+ if ((struct menu *) mcur->usrptr == active_menu) {
+ current_index = i;
+ break;
+ }
+ }
+ active_menu = NULL;
+ }
+
show_menu(menu_get_prompt(menu), menu_instructions,
current_index, &last_top_row);
keypad((menu_win(curses_menu)), TRUE);
diff --git a/scripts/kconfig/nconf.gui.c b/scripts/kconfig/nconf.gui.c
index 9aedf40f1dc0..25a7263ef3c8 100644
--- a/scripts/kconfig/nconf.gui.c
+++ b/scripts/kconfig/nconf.gui.c
@@ -497,11 +497,18 @@ void refresh_all_windows(WINDOW *main_window)
refresh();
}
-/* layman's scrollable window... */
void show_scroll_win(WINDOW *main_window,
const char *title,
const char *text)
{
+ (void)show_scroll_win_ext(main_window, title, (char *)text, NULL, NULL, NULL, NULL);
+}
+
+/* layman's scrollable window... */
+int show_scroll_win_ext(WINDOW *main_window, const char *title, char *text,
+ int *vscroll, int *hscroll,
+ extra_key_cb_fn extra_key_cb, void *data)
+{
int res;
int total_lines = get_line_no(text);
int x, y, lines, columns;
@@ -514,6 +521,12 @@ void show_scroll_win(WINDOW *main_window,
WINDOW *win;
WINDOW *pad;
PANEL *panel;
+ bool done = false;
+
+ if (hscroll)
+ start_x = *hscroll;
+ if (vscroll)
+ start_y = *vscroll;
getmaxyx(stdscr, lines, columns);
@@ -549,8 +562,7 @@ void show_scroll_win(WINDOW *main_window,
panel = new_panel(win);
/* handle scrolling */
- do {
-
+ while (!done) {
copywin(pad, win, start_y, start_x, 2, 2, text_lines,
text_cols, 0);
print_in_middle(win,
@@ -593,8 +605,18 @@ void show_scroll_win(WINDOW *main_window,
case 'l':
start_x++;
break;
+ default:
+ if (extra_key_cb) {
+ size_t start = (get_line(text, start_y) - text);
+ size_t end = (get_line(text, start_y + text_lines) - text);
+
+ if (extra_key_cb(res, start, end, data)) {
+ done = true;
+ break;
+ }
+ }
}
- if (res == 10 || res == 27 || res == 'q' ||
+ if (res == 0 || res == 10 || res == 27 || res == 'q' ||
res == KEY_F(F_HELP) || res == KEY_F(F_BACK) ||
res == KEY_F(F_EXIT))
break;
@@ -606,9 +628,14 @@ void show_scroll_win(WINDOW *main_window,
start_x = 0;
if (start_x >= total_cols-text_cols)
start_x = total_cols-text_cols;
- } while (res);
+ }
+ if (hscroll)
+ *hscroll = start_x;
+ if (vscroll)
+ *vscroll = start_y;
del_panel(panel);
delwin(win);
refresh_all_windows(main_window);
+ return res;
}
diff --git a/scripts/kconfig/nconf.h b/scripts/kconfig/nconf.h
index 6f925bc74eb3..ab836d582664 100644
--- a/scripts/kconfig/nconf.h
+++ b/scripts/kconfig/nconf.h
@@ -67,6 +67,8 @@ typedef enum {
void set_colors(void);
+typedef int (*extra_key_cb_fn)(int, size_t, size_t, void *);
+
/* this changes the windows attributes !!! */
void print_in_middle(WINDOW *win, int y, int width, const char *str, int attrs);
int get_line_length(const char *line);
@@ -78,6 +80,9 @@ int dialog_inputbox(WINDOW *main_window,
const char *title, const char *prompt,
const char *init, char **resultp, int *result_len);
void refresh_all_windows(WINDOW *main_window);
+int show_scroll_win_ext(WINDOW *main_window, const char *title, char *text,
+ int *vscroll, int *hscroll,
+ extra_key_cb_fn extra_key_cb, void *data);
void show_scroll_win(WINDOW *main_window,
const char *title,
const char *text);
diff --git a/scripts/kconfig/preprocess.c b/scripts/kconfig/preprocess.c
index 748da578b418..d1f5bcff4b62 100644
--- a/scripts/kconfig/preprocess.c
+++ b/scripts/kconfig/preprocess.c
@@ -396,6 +396,9 @@ static char *eval_clause(const char *str, size_t len, int argc, char *argv[])
p++;
}
+
+ if (new_argc >= FUNCTION_MAX_ARGS)
+ pperror("too many function arguments");
new_argv[new_argc++] = prev;
/*
diff --git a/scripts/kconfig/qconf-cfg.sh b/scripts/kconfig/qconf-cfg.sh
index 117f36e568fc..0e113b0f2455 100755
--- a/scripts/kconfig/qconf-cfg.sh
+++ b/scripts/kconfig/qconf-cfg.sh
@@ -5,7 +5,8 @@ cflags=$1
libs=$2
bin=$3
-PKG="Qt5Core Qt5Gui Qt5Widgets"
+PKG5="Qt5Core Qt5Gui Qt5Widgets"
+PKG6="Qt6Core Qt6Gui Qt6Widgets"
if [ -z "$(command -v ${HOSTPKG_CONFIG})" ]; then
echo >&2 "*"
@@ -14,16 +15,26 @@ if [ -z "$(command -v ${HOSTPKG_CONFIG})" ]; then
exit 1
fi
-if ${HOSTPKG_CONFIG} --exists $PKG; then
- ${HOSTPKG_CONFIG} --cflags ${PKG} > ${cflags}
- ${HOSTPKG_CONFIG} --libs ${PKG} > ${libs}
+if ${HOSTPKG_CONFIG} --exists $PKG6; then
+ ${HOSTPKG_CONFIG} --cflags ${PKG6} > ${cflags}
+ # Qt6 requires C++17.
+ echo -std=c++17 >> ${cflags}
+ ${HOSTPKG_CONFIG} --libs ${PKG6} > ${libs}
+ ${HOSTPKG_CONFIG} --variable=libexecdir Qt6Core > ${bin}
+ exit 0
+fi
+
+if ${HOSTPKG_CONFIG} --exists $PKG5; then
+ ${HOSTPKG_CONFIG} --cflags ${PKG5} > ${cflags}
+ ${HOSTPKG_CONFIG} --libs ${PKG5} > ${libs}
${HOSTPKG_CONFIG} --variable=host_bins Qt5Core > ${bin}
exit 0
fi
echo >&2 "*"
-echo >&2 "* Could not find Qt5 via ${HOSTPKG_CONFIG}."
-echo >&2 "* Please install Qt5 and make sure it's in PKG_CONFIG_PATH"
-echo >&2 "* You need $PKG"
+echo >&2 "* Could not find Qt6 or Qt5 via ${HOSTPKG_CONFIG}."
+echo >&2 "* Please install Qt6 or Qt5 and make sure it's in PKG_CONFIG_PATH"
+echo >&2 "* You need $PKG6 for Qt6"
+echo >&2 "* You need $PKG5 for Qt5"
echo >&2 "*"
exit 1
diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc
index 78087b2d9ac6..620a3527c767 100644
--- a/scripts/kconfig/qconf.cc
+++ b/scripts/kconfig/qconf.cc
@@ -5,10 +5,10 @@
*/
#include <QAction>
+#include <QActionGroup>
#include <QApplication>
#include <QCloseEvent>
#include <QDebug>
-#include <QDesktopWidget>
#include <QFileDialog>
#include <QLabel>
#include <QLayout>
@@ -16,6 +16,8 @@
#include <QMenu>
#include <QMenuBar>
#include <QMessageBox>
+#include <QRegularExpression>
+#include <QScreen>
#include <QToolBar>
#include <stdlib.h>
@@ -1126,7 +1128,7 @@ QString ConfigInfoView::debug_info(struct symbol *sym)
QString ConfigInfoView::print_filter(const QString &str)
{
- QRegExp re("[<>&\"\\n]");
+ QRegularExpression re("[<>&\"\\n]");
QString res = str;
for (int i = 0; (i = res.indexOf(re, i)) >= 0;) {
switch (res[i].toLatin1()) {
@@ -1322,15 +1324,15 @@ ConfigMainWindow::ConfigMainWindow(void)
int width, height;
char title[256];
- QDesktopWidget *d = configApp->desktop();
snprintf(title, sizeof(title), "%s%s",
rootmenu.prompt->text,
""
);
setWindowTitle(title);
- width = configSettings->value("/window width", d->width() - 64).toInt();
- height = configSettings->value("/window height", d->height() - 64).toInt();
+ QRect g = configApp->primaryScreen()->geometry();
+ width = configSettings->value("/window width", g.width() - 64).toInt();
+ height = configSettings->value("/window height", g.height() - 64).toInt();
resize(width, height);
x = configSettings->value("/window x");
y = configSettings->value("/window y");
@@ -1379,17 +1381,17 @@ ConfigMainWindow::ConfigMainWindow(void)
this, &ConfigMainWindow::goBack);
QAction *quitAction = new QAction("&Quit", this);
- quitAction->setShortcut(Qt::CTRL + Qt::Key_Q);
+ quitAction->setShortcut(Qt::CTRL | Qt::Key_Q);
connect(quitAction, &QAction::triggered,
this, &ConfigMainWindow::close);
QAction *loadAction = new QAction(QPixmap(xpm_load), "&Load", this);
- loadAction->setShortcut(Qt::CTRL + Qt::Key_L);
+ loadAction->setShortcut(Qt::CTRL | Qt::Key_L);
connect(loadAction, &QAction::triggered,
this, &ConfigMainWindow::loadConfig);
saveAction = new QAction(QPixmap(xpm_save), "&Save", this);
- saveAction->setShortcut(Qt::CTRL + Qt::Key_S);
+ saveAction->setShortcut(Qt::CTRL | Qt::Key_S);
connect(saveAction, &QAction::triggered,
this, &ConfigMainWindow::saveConfig);
@@ -1403,7 +1405,7 @@ ConfigMainWindow::ConfigMainWindow(void)
connect(saveAsAction, &QAction::triggered,
this, &ConfigMainWindow::saveConfigAs);
QAction *searchAction = new QAction("&Find", this);
- searchAction->setShortcut(Qt::CTRL + Qt::Key_F);
+ searchAction->setShortcut(Qt::CTRL | Qt::Key_F);
connect(searchAction, &QAction::triggered,
this, &ConfigMainWindow::searchConfig);
singleViewAction = new QAction(QPixmap(xpm_single_view), "Single View", this);
@@ -1750,11 +1752,21 @@ void ConfigMainWindow::closeEvent(QCloseEvent* e)
e->accept();
return;
}
- QMessageBox mb("qconf", "Save configuration?", QMessageBox::Warning,
- QMessageBox::Yes | QMessageBox::Default, QMessageBox::No, QMessageBox::Cancel | QMessageBox::Escape);
- mb.setButtonText(QMessageBox::Yes, "&Save Changes");
- mb.setButtonText(QMessageBox::No, "&Discard Changes");
- mb.setButtonText(QMessageBox::Cancel, "Cancel Exit");
+
+ QMessageBox mb(QMessageBox::Icon::Warning, "qconf",
+ "Save configuration?");
+
+ QPushButton *yb = mb.addButton(QMessageBox::Yes);
+ QPushButton *db = mb.addButton(QMessageBox::No);
+ QPushButton *cb = mb.addButton(QMessageBox::Cancel);
+
+ yb->setText("&Save Changes");
+ db->setText("&Discard Changes");
+ cb->setText("Cancel Exit");
+
+ mb.setDefaultButton(yb);
+ mb.setEscapeButton(cb);
+
switch (mb.exec()) {
case QMessageBox::Yes:
if (saveConfig())
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index b29b29707f10..34a5386d444a 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -24,6 +24,7 @@
#include "../../include/linux/license.h"
#include "../../include/linux/module_symbol.h"
+static bool module_enabled;
/* Are we using CONFIG_MODVERSIONS? */
static bool modversions;
/* Is CONFIG_MODULE_SRCVERSION_ALL set? */
@@ -761,6 +762,7 @@ static const char *const section_white_list[] =
".fmt_slot*", /* EZchip */
".gnu.lto*",
".discard.*",
+ ".llvm.call-graph-profile", /* call graph */
NULL
};
@@ -1242,7 +1244,7 @@ static void check_section_mismatch(struct module *mod, struct elf_info *elf,
const char *tosec = sec_name(elf, get_secindex(elf, sym));
const struct sectioncheck *mismatch;
- if (elf->export_symbol_secndx == fsecndx) {
+ if (module_enabled && elf->export_symbol_secndx == fsecndx) {
check_export_symbol(mod, elf, faddr, tosec, sym);
return;
}
@@ -1256,21 +1258,16 @@ static void check_section_mismatch(struct module *mod, struct elf_info *elf,
tosec, taddr);
}
-static int addend_386_rel(uint32_t *location, Elf_Rela *r)
+static Elf_Addr addend_386_rel(uint32_t *location, unsigned int r_type)
{
- unsigned int r_typ = ELF_R_TYPE(r->r_info);
-
- switch (r_typ) {
+ switch (r_type) {
case R_386_32:
- r->r_addend = TO_NATIVE(*location);
- break;
+ return TO_NATIVE(*location);
case R_386_PC32:
- r->r_addend = TO_NATIVE(*location) + 4;
- break;
- default:
- r->r_addend = (Elf_Addr)(-1);
+ return TO_NATIVE(*location) + 4;
}
- return 0;
+
+ return (Elf_Addr)(-1);
}
#ifndef R_ARM_CALL
@@ -1314,32 +1311,28 @@ static int32_t sign_extend32(int32_t value, int index)
return (int32_t)(value << shift) >> shift;
}
-static int addend_arm_rel(void *loc, Elf_Sym *sym, Elf_Rela *r)
+static Elf_Addr addend_arm_rel(void *loc, Elf_Sym *sym, unsigned int r_type)
{
- unsigned int r_typ = ELF_R_TYPE(r->r_info);
uint32_t inst, upper, lower, sign, j1, j2;
int32_t offset;
- switch (r_typ) {
+ switch (r_type) {
case R_ARM_ABS32:
case R_ARM_REL32:
inst = TO_NATIVE(*(uint32_t *)loc);
- r->r_addend = inst + sym->st_value;
- break;
+ return inst + sym->st_value;
case R_ARM_MOVW_ABS_NC:
case R_ARM_MOVT_ABS:
inst = TO_NATIVE(*(uint32_t *)loc);
offset = sign_extend32(((inst & 0xf0000) >> 4) | (inst & 0xfff),
15);
- r->r_addend = offset + sym->st_value;
- break;
+ return offset + sym->st_value;
case R_ARM_PC24:
case R_ARM_CALL:
case R_ARM_JUMP24:
inst = TO_NATIVE(*(uint32_t *)loc);
offset = sign_extend32((inst & 0x00ffffff) << 2, 25);
- r->r_addend = offset + sym->st_value + 8;
- break;
+ return offset + sym->st_value + 8;
case R_ARM_THM_MOVW_ABS_NC:
case R_ARM_THM_MOVT_ABS:
upper = TO_NATIVE(*(uint16_t *)loc);
@@ -1349,8 +1342,7 @@ static int addend_arm_rel(void *loc, Elf_Sym *sym, Elf_Rela *r)
((lower & 0x7000) >> 4) |
(lower & 0x00ff),
15);
- r->r_addend = offset + sym->st_value;
- break;
+ return offset + sym->st_value;
case R_ARM_THM_JUMP19:
/*
* Encoding T3:
@@ -1371,8 +1363,7 @@ static int addend_arm_rel(void *loc, Elf_Sym *sym, Elf_Rela *r)
((upper & 0x03f) << 12) |
((lower & 0x07ff) << 1),
20);
- r->r_addend = offset + sym->st_value + 4;
- break;
+ return offset + sym->st_value + 4;
case R_ARM_THM_CALL:
case R_ARM_THM_JUMP24:
/*
@@ -1398,34 +1389,26 @@ static int addend_arm_rel(void *loc, Elf_Sym *sym, Elf_Rela *r)
((upper & 0x03ff) << 12) |
((lower & 0x07ff) << 1),
24);
- r->r_addend = offset + sym->st_value + 4;
- break;
- default:
- r->r_addend = (Elf_Addr)(-1);
+ return offset + sym->st_value + 4;
}
- return 0;
+
+ return (Elf_Addr)(-1);
}
-static int addend_mips_rel(uint32_t *location, Elf_Rela *r)
+static Elf_Addr addend_mips_rel(uint32_t *location, unsigned int r_type)
{
- unsigned int r_typ = ELF_R_TYPE(r->r_info);
uint32_t inst;
inst = TO_NATIVE(*location);
- switch (r_typ) {
+ switch (r_type) {
case R_MIPS_LO16:
- r->r_addend = inst & 0xffff;
- break;
+ return inst & 0xffff;
case R_MIPS_26:
- r->r_addend = (inst & 0x03ffffff) << 2;
- break;
+ return (inst & 0x03ffffff) << 2;
case R_MIPS_32:
- r->r_addend = inst;
- break;
- default:
- r->r_addend = (Elf_Addr)(-1);
+ return inst;
}
- return 0;
+ return (Elf_Addr)(-1);
}
#ifndef EM_RISCV
@@ -1444,12 +1427,45 @@ static int addend_mips_rel(uint32_t *location, Elf_Rela *r)
#define R_LARCH_SUB32 55
#endif
+static void get_rel_type_and_sym(struct elf_info *elf, uint64_t r_info,
+ unsigned int *r_type, unsigned int *r_sym)
+{
+ typedef struct {
+ Elf64_Word r_sym; /* Symbol index */
+ unsigned char r_ssym; /* Special symbol for 2nd relocation */
+ unsigned char r_type3; /* 3rd relocation type */
+ unsigned char r_type2; /* 2nd relocation type */
+ unsigned char r_type; /* 1st relocation type */
+ } Elf64_Mips_R_Info;
+
+ bool is_64bit = (elf->hdr->e_ident[EI_CLASS] == ELFCLASS64);
+
+ if (elf->hdr->e_machine == EM_MIPS && is_64bit) {
+ Elf64_Mips_R_Info *mips64_r_info = (void *)&r_info;
+
+ *r_type = mips64_r_info->r_type;
+ *r_sym = TO_NATIVE(mips64_r_info->r_sym);
+ return;
+ }
+
+ if (is_64bit) {
+ Elf64_Xword r_info64 = r_info;
+
+ r_info = TO_NATIVE(r_info64);
+ } else {
+ Elf32_Word r_info32 = r_info;
+
+ r_info = TO_NATIVE(r_info32);
+ }
+
+ *r_type = ELF_R_TYPE(r_info);
+ *r_sym = ELF_R_SYM(r_info);
+}
+
static void section_rela(struct module *mod, struct elf_info *elf,
Elf_Shdr *sechdr)
{
Elf_Rela *rela;
- Elf_Rela r;
- unsigned int r_sym;
unsigned int fsecndx = sechdr->sh_info;
const char *fromsec = sec_name(elf, fsecndx);
Elf_Rela *start = (void *)elf->hdr + sechdr->sh_offset;
@@ -1460,38 +1476,29 @@ static void section_rela(struct module *mod, struct elf_info *elf,
return;
for (rela = start; rela < stop; rela++) {
- r.r_offset = TO_NATIVE(rela->r_offset);
-#if KERNEL_ELFCLASS == ELFCLASS64
- if (elf->hdr->e_machine == EM_MIPS) {
- unsigned int r_typ;
- r_sym = ELF64_MIPS_R_SYM(rela->r_info);
- r_sym = TO_NATIVE(r_sym);
- r_typ = ELF64_MIPS_R_TYPE(rela->r_info);
- r.r_info = ELF64_R_INFO(r_sym, r_typ);
- } else {
- r.r_info = TO_NATIVE(rela->r_info);
- r_sym = ELF_R_SYM(r.r_info);
- }
-#else
- r.r_info = TO_NATIVE(rela->r_info);
- r_sym = ELF_R_SYM(r.r_info);
-#endif
- r.r_addend = TO_NATIVE(rela->r_addend);
+ Elf_Addr taddr, r_offset;
+ unsigned int r_type, r_sym;
+
+ r_offset = TO_NATIVE(rela->r_offset);
+ get_rel_type_and_sym(elf, rela->r_info, &r_type, &r_sym);
+
+ taddr = TO_NATIVE(rela->r_addend);
+
switch (elf->hdr->e_machine) {
case EM_RISCV:
if (!strcmp("__ex_table", fromsec) &&
- ELF_R_TYPE(r.r_info) == R_RISCV_SUB32)
+ r_type == R_RISCV_SUB32)
continue;
break;
case EM_LOONGARCH:
if (!strcmp("__ex_table", fromsec) &&
- ELF_R_TYPE(r.r_info) == R_LARCH_SUB32)
+ r_type == R_LARCH_SUB32)
continue;
break;
}
check_section_mismatch(mod, elf, elf->symtab_start + r_sym,
- fsecndx, fromsec, r.r_offset, r.r_addend);
+ fsecndx, fromsec, r_offset, taddr);
}
}
@@ -1499,8 +1506,6 @@ static void section_rel(struct module *mod, struct elf_info *elf,
Elf_Shdr *sechdr)
{
Elf_Rel *rel;
- Elf_Rela r;
- unsigned int r_sym;
unsigned int fsecndx = sechdr->sh_info;
const char *fromsec = sec_name(elf, fsecndx);
Elf_Rel *start = (void *)elf->hdr + sechdr->sh_offset;
@@ -1512,45 +1517,32 @@ static void section_rel(struct module *mod, struct elf_info *elf,
for (rel = start; rel < stop; rel++) {
Elf_Sym *tsym;
+ Elf_Addr taddr = 0, r_offset;
+ unsigned int r_type, r_sym;
void *loc;
- r.r_offset = TO_NATIVE(rel->r_offset);
-#if KERNEL_ELFCLASS == ELFCLASS64
- if (elf->hdr->e_machine == EM_MIPS) {
- unsigned int r_typ;
- r_sym = ELF64_MIPS_R_SYM(rel->r_info);
- r_sym = TO_NATIVE(r_sym);
- r_typ = ELF64_MIPS_R_TYPE(rel->r_info);
- r.r_info = ELF64_R_INFO(r_sym, r_typ);
- } else {
- r.r_info = TO_NATIVE(rel->r_info);
- r_sym = ELF_R_SYM(r.r_info);
- }
-#else
- r.r_info = TO_NATIVE(rel->r_info);
- r_sym = ELF_R_SYM(r.r_info);
-#endif
- r.r_addend = 0;
+ r_offset = TO_NATIVE(rel->r_offset);
+ get_rel_type_and_sym(elf, rel->r_info, &r_type, &r_sym);
- loc = sym_get_data_by_offset(elf, fsecndx, r.r_offset);
+ loc = sym_get_data_by_offset(elf, fsecndx, r_offset);
tsym = elf->symtab_start + r_sym;
switch (elf->hdr->e_machine) {
case EM_386:
- addend_386_rel(loc, &r);
+ taddr = addend_386_rel(loc, r_type);
break;
case EM_ARM:
- addend_arm_rel(loc, tsym, &r);
+ taddr = addend_arm_rel(loc, tsym, r_type);
break;
case EM_MIPS:
- addend_mips_rel(loc, &r);
+ taddr = addend_mips_rel(loc, r_type);
break;
default:
fatal("Please add code to calculate addend for this architecture\n");
}
check_section_mismatch(mod, elf, tsym,
- fsecndx, fromsec, r.r_offset, r.r_addend);
+ fsecndx, fromsec, r_offset, taddr);
}
}
@@ -2272,7 +2264,7 @@ int main(int argc, char **argv)
LIST_HEAD(dump_lists);
struct dump_list *dl, *dl2;
- while ((opt = getopt(argc, argv, "ei:mnT:to:au:WwENd:")) != -1) {
+ while ((opt = getopt(argc, argv, "ei:MmnT:to:au:WwENd:")) != -1) {
switch (opt) {
case 'e':
external_module = true;
@@ -2282,6 +2274,9 @@ int main(int argc, char **argv)
dl->file = optarg;
list_add_tail(&dl->list, &dump_lists);
break;
+ case 'M':
+ module_enabled = true;
+ break;
case 'm':
modversions = true;
break;
diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
index dfdb9484e325..5f94c2c9f2d9 100644
--- a/scripts/mod/modpost.h
+++ b/scripts/mod/modpost.h
@@ -50,28 +50,6 @@
#define ELF_R_TYPE ELF64_R_TYPE
#endif
-/* The 64-bit MIPS ELF ABI uses an unusual reloc format. */
-typedef struct
-{
- Elf32_Word r_sym; /* Symbol index */
- unsigned char r_ssym; /* Special symbol for 2nd relocation */
- unsigned char r_type3; /* 3rd relocation type */
- unsigned char r_type2; /* 2nd relocation type */
- unsigned char r_type1; /* 1st relocation type */
-} _Elf64_Mips_R_Info;
-
-typedef union
-{
- Elf64_Xword r_info_number;
- _Elf64_Mips_R_Info r_info_fields;
-} _Elf64_Mips_R_Info_union;
-
-#define ELF64_MIPS_R_SYM(i) \
- ((__extension__ (_Elf64_Mips_R_Info_union)(i)).r_info_fields.r_sym)
-
-#define ELF64_MIPS_R_TYPE(i) \
- ((__extension__ (_Elf64_Mips_R_Info_union)(i)).r_info_fields.r_type1)
-
#if KERNEL_ELFDATA != HOST_ELFDATA
static inline void __endian(const void *src, void *dest, unsigned int size)
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index 032774eb061e..bf3f8561aa68 100755
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -162,34 +162,7 @@ install_kernel_headers () {
rm -rf $pdir
- (
- cd $srctree
- find . arch/$SRCARCH -maxdepth 1 -name Makefile\*
- find include scripts -type f -o -type l
- find arch/$SRCARCH -name Kbuild.platforms -o -name Platform
- find $(find arch/$SRCARCH -name include -o -name scripts -type d) -type f
- ) > debian/hdrsrcfiles
-
- {
- if is_enabled CONFIG_OBJTOOL; then
- echo tools/objtool/objtool
- fi
-
- find arch/$SRCARCH/include Module.symvers include scripts -type f
-
- if is_enabled CONFIG_GCC_PLUGINS; then
- find scripts/gcc-plugins -name \*.so
- fi
- } > debian/hdrobjfiles
-
- destdir=$pdir/usr/src/linux-headers-$version
- mkdir -p $destdir
- tar -c -f - -C $srctree -T debian/hdrsrcfiles | tar -xf - -C $destdir
- tar -c -f - -T debian/hdrobjfiles | tar -xf - -C $destdir
- rm -f debian/hdrsrcfiles debian/hdrobjfiles
-
- # copy .config manually to be where it's expected to be
- cp $KCONFIG_CONFIG $destdir/.config
+ "${srctree}/scripts/package/install-extmod-build" "${pdir}/usr/src/linux-headers-${version}"
mkdir -p $pdir/lib/modules/$version/
ln -s /usr/src/linux-headers-$version $pdir/lib/modules/$version/build
diff --git a/scripts/package/debian/rules b/scripts/package/debian/rules
new file mode 100755
index 000000000000..3dafa9496c63
--- /dev/null
+++ b/scripts/package/debian/rules
@@ -0,0 +1,33 @@
+#!/usr/bin/make -f
+# SPDX-License-Identifier: GPL-2.0-only
+
+include debian/rules.vars
+
+srctree ?= .
+
+ifneq (,$(filter-out parallel=1,$(filter parallel=%,$(DEB_BUILD_OPTIONS))))
+ NUMJOBS = $(patsubst parallel=%,%,$(filter parallel=%,$(DEB_BUILD_OPTIONS)))
+ MAKEFLAGS += -j$(NUMJOBS)
+endif
+
+.PHONY: binary binary-indep binary-arch
+binary: binary-arch binary-indep
+binary-indep: build-indep
+binary-arch: build-arch
+ $(MAKE) -f $(srctree)/Makefile ARCH=$(ARCH) \
+ KERNELRELEASE=$(KERNELRELEASE) \
+ run-command KBUILD_RUN_COMMAND=+$(srctree)/scripts/package/builddeb
+
+.PHONY: build build-indep build-arch
+build: build-arch build-indep
+build-indep:
+build-arch:
+ $(MAKE) -f $(srctree)/Makefile ARCH=$(ARCH) \
+ KERNELRELEASE=$(KERNELRELEASE) \
+ $(shell $(srctree)/scripts/package/deb-build-option) \
+ olddefconfig all
+
+.PHONY: clean
+clean:
+ rm -rf debian/files debian/linux-*
+ $(MAKE) -f $(srctree)/Makefile ARCH=$(ARCH) clean
diff --git a/scripts/package/install-extmod-build b/scripts/package/install-extmod-build
new file mode 100755
index 000000000000..af7fe9f5b1e4
--- /dev/null
+++ b/scripts/package/install-extmod-build
@@ -0,0 +1,39 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0-only
+
+set -e
+
+destdir=${1}
+
+test -n "${srctree}"
+test -n "${SRCARCH}"
+
+is_enabled() {
+ grep -q "^$1=y" include/config/auto.conf
+}
+
+mkdir -p "${destdir}"
+
+(
+ cd "${srctree}"
+ echo Makefile
+ find "arch/${SRCARCH}" -maxdepth 1 -name 'Makefile*'
+ find include scripts -type f -o -type l
+ find "arch/${SRCARCH}" -name Kbuild.platforms -o -name Platform
+ find "$(find "arch/${SRCARCH}" -name include -o -name scripts -type d)" -type f
+) | tar -c -f - -C "${srctree}" -T - | tar -xf - -C "${destdir}"
+
+{
+ if is_enabled CONFIG_OBJTOOL; then
+ echo tools/objtool/objtool
+ fi
+
+ find "arch/${SRCARCH}/include" Module.symvers include scripts -type f
+
+ if is_enabled CONFIG_GCC_PLUGINS; then
+ find scripts/gcc-plugins -name '*.so'
+ fi
+} | tar -c -f - -T - | tar -xf - -C "${destdir}"
+
+# copy .config manually to be where it's expected to be
+cp "${KCONFIG_CONFIG}" "${destdir}/.config"
diff --git a/scripts/package/kernel.spec b/scripts/package/kernel.spec
new file mode 100644
index 000000000000..ac3f2ee6d7a0
--- /dev/null
+++ b/scripts/package/kernel.spec
@@ -0,0 +1,117 @@
+# _arch is undefined if /usr/lib/rpm/platform/*/macros was not included.
+%{!?_arch: %define _arch dummy}
+%{!?make: %define make make}
+%define makeflags %{?_smp_mflags} ARCH=%{ARCH}
+%define __spec_install_post /usr/lib/rpm/brp-compress || :
+%define debug_package %{nil}
+
+Name: kernel
+Summary: The Linux Kernel
+Version: %(echo %{KERNELRELEASE} | sed -e 's/-/_/g')
+Release: %{pkg_release}
+License: GPL
+Group: System Environment/Kernel
+Vendor: The Linux Community
+URL: https://www.kernel.org
+Source0: linux.tar.gz
+Source1: config
+Source2: diff.patch
+Provides: kernel-%{KERNELRELEASE}
+BuildRequires: bc binutils bison dwarves
+BuildRequires: (elfutils-libelf-devel or libelf-devel) flex
+BuildRequires: gcc make openssl openssl-devel perl python3 rsync
+
+%description
+The Linux Kernel, the operating system core itself
+
+%package headers
+Summary: Header files for the Linux kernel for use by glibc
+Group: Development/System
+Obsoletes: kernel-headers
+Provides: kernel-headers = %{version}
+%description headers
+Kernel-headers includes the C header files that specify the interface
+between the Linux kernel and userspace libraries and programs. The
+header files define structures and constants that are needed for
+building most standard programs and are also needed for rebuilding the
+glibc package.
+
+%if %{with_devel}
+%package devel
+Summary: Development package for building kernel modules to match the %{version} kernel
+Group: System Environment/Kernel
+AutoReqProv: no
+%description -n kernel-devel
+This package provides kernel headers and makefiles sufficient to build modules
+against the %{version} kernel package.
+%endif
+
+%prep
+%setup -q -n linux
+cp %{SOURCE1} .config
+patch -p1 < %{SOURCE2}
+
+%build
+%{make} %{makeflags} KERNELRELEASE=%{KERNELRELEASE} KBUILD_BUILD_VERSION=%{release}
+
+%install
+mkdir -p %{buildroot}/boot
+%ifarch ia64
+mkdir -p %{buildroot}/boot/efi
+cp $(%{make} %{makeflags} -s image_name) %{buildroot}/boot/efi/vmlinuz-%{KERNELRELEASE}
+ln -s efi/vmlinuz-%{KERNELRELEASE} %{buildroot}/boot/
+%else
+cp $(%{make} %{makeflags} -s image_name) %{buildroot}/boot/vmlinuz-%{KERNELRELEASE}
+%endif
+%{make} %{makeflags} INSTALL_MOD_PATH=%{buildroot} modules_install
+%{make} %{makeflags} INSTALL_HDR_PATH=%{buildroot}/usr headers_install
+cp System.map %{buildroot}/boot/System.map-%{KERNELRELEASE}
+cp .config %{buildroot}/boot/config-%{KERNELRELEASE}
+ln -fns /usr/src/kernels/%{KERNELRELEASE} %{buildroot}/lib/modules/%{KERNELRELEASE}/build
+ln -fns /usr/src/kernels/%{KERNELRELEASE} %{buildroot}/lib/modules/%{KERNELRELEASE}/source
+%if %{with_devel}
+%{make} %{makeflags} run-command KBUILD_RUN_COMMAND='${srctree}/scripts/package/install-extmod-build %{buildroot}/usr/src/kernels/%{KERNELRELEASE}'
+%endif
+
+%clean
+rm -rf %{buildroot}
+
+%post
+if [ -x /sbin/installkernel -a -r /boot/vmlinuz-%{KERNELRELEASE} -a -r /boot/System.map-%{KERNELRELEASE} ]; then
+cp /boot/vmlinuz-%{KERNELRELEASE} /boot/.vmlinuz-%{KERNELRELEASE}-rpm
+cp /boot/System.map-%{KERNELRELEASE} /boot/.System.map-%{KERNELRELEASE}-rpm
+rm -f /boot/vmlinuz-%{KERNELRELEASE} /boot/System.map-%{KERNELRELEASE}
+/sbin/installkernel %{KERNELRELEASE} /boot/.vmlinuz-%{KERNELRELEASE}-rpm /boot/.System.map-%{KERNELRELEASE}-rpm
+rm -f /boot/.vmlinuz-%{KERNELRELEASE}-rpm /boot/.System.map-%{KERNELRELEASE}-rpm
+fi
+
+%preun
+if [ -x /sbin/new-kernel-pkg ]; then
+new-kernel-pkg --remove %{KERNELRELEASE} --rminitrd --initrdfile=/boot/initramfs-%{KERNELRELEASE}.img
+elif [ -x /usr/bin/kernel-install ]; then
+kernel-install remove %{KERNELRELEASE}
+fi
+
+%postun
+if [ -x /sbin/update-bootloader ]; then
+/sbin/update-bootloader --remove %{KERNELRELEASE}
+fi
+
+%files
+%defattr (-, root, root)
+/lib/modules/%{KERNELRELEASE}
+%exclude /lib/modules/%{KERNELRELEASE}/build
+%exclude /lib/modules/%{KERNELRELEASE}/source
+/boot/*
+
+%files headers
+%defattr (-, root, root)
+/usr/include
+
+%if %{with_devel}
+%files devel
+%defattr (-, root, root)
+/usr/src/kernels/%{KERNELRELEASE}
+/lib/modules/%{KERNELRELEASE}/build
+/lib/modules/%{KERNELRELEASE}/source
+%endif
diff --git a/scripts/package/mkdebian b/scripts/package/mkdebian
index ba2453e08d40..5044224cf671 100755
--- a/scripts/package/mkdebian
+++ b/scripts/package/mkdebian
@@ -263,34 +263,11 @@ Description: Linux kernel debugging symbols for $version
EOF
fi
-cat <<EOF > debian/rules
-#!$(command -v $MAKE) -f
-
-srctree ?= .
-KERNELRELEASE = ${KERNELRELEASE}
-
-.PHONY: clean build build-arch build-indep binary binary-arch binary-indep
-
-build-indep:
-build-arch:
- \$(MAKE) -f \$(srctree)/Makefile ARCH=${ARCH} \
- KERNELRELEASE=\$(KERNELRELEASE) \
- \$(shell \$(srctree)/scripts/package/deb-build-option) \
- olddefconfig all
-
-build: build-arch
-
-binary-indep:
-binary-arch: build-arch
- \$(MAKE) -f \$(srctree)/Makefile ARCH=${ARCH} \
- KERNELRELEASE=\$(KERNELRELEASE) intdeb-pkg
-
-clean:
- rm -rf debian/files debian/linux-*
- \$(MAKE) -f \$(srctree)/Makefile ARCH=${ARCH} clean
-
-binary: binary-arch
+cat <<EOF > debian/rules.vars
+ARCH := ${ARCH}
+KERNELRELEASE := ${KERNELRELEASE}
EOF
-chmod +x debian/rules
+
+cp "${srctree}/scripts/package/debian/rules" debian/
exit 0
diff --git a/scripts/package/mkspec b/scripts/package/mkspec
index 8049f0e2c110..d41608efb747 100755
--- a/scripts/package/mkspec
+++ b/scripts/package/mkspec
@@ -9,148 +9,16 @@
# Patched for non-x86 by Opencon (L) 2002 <opencon@rio.skydome.net>
#
-# how we were called determines which rpms we build and how we build them
-if [ "$1" = prebuilt ]; then
- S=DEL
- MAKE="$MAKE -f $srctree/Makefile"
-else
- S=
-
- mkdir -p rpmbuild/SOURCES
- cp linux.tar.gz rpmbuild/SOURCES
- cp "${KCONFIG_CONFIG}" rpmbuild/SOURCES/config
- "${srctree}/scripts/package/gen-diff-patch" rpmbuild/SOURCES/diff.patch
-fi
-
if grep -q CONFIG_MODULES=y include/config/auto.conf; then
- M=
+echo '%define with_devel %{?_without_devel: 0} %{?!_without_devel: 1}'
else
- M=DEL
+echo '%define with_devel 0'
fi
-__KERNELRELEASE=$(echo $KERNELRELEASE | sed -e "s/-/_/g")
-EXCLUDES="$RCS_TAR_IGNORE --exclude=*vmlinux* --exclude=*.mod \
---exclude=*.o --exclude=*.ko --exclude=*.cmd --exclude=Documentation \
---exclude=.config.old --exclude=.missing-syscalls.d --exclude=*.s"
-
-# We can label the here-doc lines for conditional output to the spec file
-#
-# Labels:
-# $S: this line is enabled only when building source package
-# $M: this line is enabled only when CONFIG_MODULES is enabled
-sed -e '/^DEL/d' -e 's/^\t*//' <<EOF
- Name: kernel
- Summary: The Linux Kernel
- Version: $__KERNELRELEASE
- Release: $(cat .version 2>/dev/null || echo 1)
- License: GPL
- Group: System Environment/Kernel
- Vendor: The Linux Community
- URL: https://www.kernel.org
-$S Source0: linux.tar.gz
-$S Source1: config
-$S Source2: diff.patch
- Provides: kernel-$KERNELRELEASE
-$S BuildRequires: bc binutils bison dwarves
-$S BuildRequires: (elfutils-libelf-devel or libelf-devel) flex
-$S BuildRequires: gcc make openssl openssl-devel perl python3 rsync
-
- # $UTS_MACHINE as a fallback of _arch in case
- # /usr/lib/rpm/platform/*/macros was not included.
- %define _arch %{?_arch:$UTS_MACHINE}
- %define __spec_install_post /usr/lib/rpm/brp-compress || :
- %define debug_package %{nil}
-
- %description
- The Linux Kernel, the operating system core itself
-
- %package headers
- Summary: Header files for the Linux kernel for use by glibc
- Group: Development/System
- Obsoletes: kernel-headers
- Provides: kernel-headers = %{version}
- %description headers
- Kernel-headers includes the C header files that specify the interface
- between the Linux kernel and userspace libraries and programs. The
- header files define structures and constants that are needed for
- building most standard programs and are also needed for rebuilding the
- glibc package.
-
-$S$M %package devel
-$S$M Summary: Development package for building kernel modules to match the $__KERNELRELEASE kernel
-$S$M Group: System Environment/Kernel
-$S$M AutoReqProv: no
-$S$M %description -n kernel-devel
-$S$M This package provides kernel headers and makefiles sufficient to build modules
-$S$M against the $__KERNELRELEASE kernel package.
-$S$M
-$S %prep
-$S %setup -q -n linux
-$S cp %{SOURCE1} .config
-$S patch -p1 < %{SOURCE2}
-$S
-$S %build
-$S $MAKE %{?_smp_mflags} KERNELRELEASE=$KERNELRELEASE KBUILD_BUILD_VERSION=%{release}
-$S
- %install
- mkdir -p %{buildroot}/boot
- %ifarch ia64
- mkdir -p %{buildroot}/boot/efi
- cp \$($MAKE -s image_name) %{buildroot}/boot/efi/vmlinuz-$KERNELRELEASE
- ln -s efi/vmlinuz-$KERNELRELEASE %{buildroot}/boot/
- %else
- cp \$($MAKE -s image_name) %{buildroot}/boot/vmlinuz-$KERNELRELEASE
- %endif
-$M $MAKE %{?_smp_mflags} INSTALL_MOD_PATH=%{buildroot} modules_install
- $MAKE %{?_smp_mflags} INSTALL_HDR_PATH=%{buildroot}/usr headers_install
- cp System.map %{buildroot}/boot/System.map-$KERNELRELEASE
- cp .config %{buildroot}/boot/config-$KERNELRELEASE
-$S$M rm -f %{buildroot}/lib/modules/$KERNELRELEASE/build
-$S$M rm -f %{buildroot}/lib/modules/$KERNELRELEASE/source
-$S$M mkdir -p %{buildroot}/usr/src/kernels/$KERNELRELEASE
-$S$M tar cf - $EXCLUDES . | tar xf - -C %{buildroot}/usr/src/kernels/$KERNELRELEASE
-$S$M cd %{buildroot}/lib/modules/$KERNELRELEASE
-$S$M ln -sf /usr/src/kernels/$KERNELRELEASE build
-$S$M ln -sf /usr/src/kernels/$KERNELRELEASE source
-
- %clean
- rm -rf %{buildroot}
-
- %post
- if [ -x /sbin/installkernel -a -r /boot/vmlinuz-$KERNELRELEASE -a -r /boot/System.map-$KERNELRELEASE ]; then
- cp /boot/vmlinuz-$KERNELRELEASE /boot/.vmlinuz-$KERNELRELEASE-rpm
- cp /boot/System.map-$KERNELRELEASE /boot/.System.map-$KERNELRELEASE-rpm
- rm -f /boot/vmlinuz-$KERNELRELEASE /boot/System.map-$KERNELRELEASE
- /sbin/installkernel $KERNELRELEASE /boot/.vmlinuz-$KERNELRELEASE-rpm /boot/.System.map-$KERNELRELEASE-rpm
- rm -f /boot/.vmlinuz-$KERNELRELEASE-rpm /boot/.System.map-$KERNELRELEASE-rpm
- fi
-
- %preun
- if [ -x /sbin/new-kernel-pkg ]; then
- new-kernel-pkg --remove $KERNELRELEASE --rminitrd --initrdfile=/boot/initramfs-$KERNELRELEASE.img
- elif [ -x /usr/bin/kernel-install ]; then
- kernel-install remove $KERNELRELEASE
- fi
-
- %postun
- if [ -x /sbin/update-bootloader ]; then
- /sbin/update-bootloader --remove $KERNELRELEASE
- fi
-
- %files
- %defattr (-, root, root)
-$M /lib/modules/$KERNELRELEASE
-$M %exclude /lib/modules/$KERNELRELEASE/build
-$M %exclude /lib/modules/$KERNELRELEASE/source
- /boot/*
-
- %files headers
- %defattr (-, root, root)
- /usr/include
-$S$M
-$S$M %files devel
-$S$M %defattr (-, root, root)
-$S$M /usr/src/kernels/$KERNELRELEASE
-$S$M /lib/modules/$KERNELRELEASE/build
-$S$M /lib/modules/$KERNELRELEASE/source
+cat<<EOF
+%define ARCH ${ARCH}
+%define KERNELRELEASE ${KERNELRELEASE}
+%define pkg_release $("${srctree}/init/build-version")
EOF
+
+cat "${srctree}/scripts/package/kernel.spec"
diff --git a/scripts/remove-stale-files b/scripts/remove-stale-files
index f3659ea0335b..8b1a636f8543 100755
--- a/scripts/remove-stale-files
+++ b/scripts/remove-stale-files
@@ -37,3 +37,5 @@ rm -f .scmversion
rm -rf include/ksym
find . -name '*.usyms' | xargs rm -f
+
+rm -f binkernel.spec
diff --git a/scripts/setlocalversion b/scripts/setlocalversion
index 3d3babac8298..38b96c6797f4 100755
--- a/scripts/setlocalversion
+++ b/scripts/setlocalversion
@@ -2,7 +2,7 @@
# SPDX-License-Identifier: GPL-2.0
#
# This scripts adds local version information from the version
-# control systems git, mercurial (hg) and subversion (svn).
+# control system git.
#
# If something goes wrong, send a mail the kernel build mailinglist
# (see MAINTAINERS) and CC Nico Schottelius
@@ -57,21 +57,37 @@ scm_version()
return
fi
- # If a localversion*' file and the corresponding annotated tag exist,
- # use it. This is the case in linux-next.
+ # mainline kernel: 6.2.0-rc5 -> v6.2-rc5
+ # stable kernel: 6.1.7 -> v6.1.7
+ version_tag=v$(echo "${KERNELVERSION}" | sed -E 's/^([0-9]+\.[0-9]+)\.0(.*)$/\1\2/')
+
+ # If a localversion* file exists, and the corresponding
+ # annotated tag exists and is an ancestor of HEAD, use
+ # it. This is the case in linux-next.
tag=${file_localversion#-}
- tag=$(git describe --exact-match --match=$tag $tag 2>/dev/null)
+ desc=
+ if [ -n "${tag}" ]; then
+ desc=$(git describe --match=$tag 2>/dev/null)
+ fi
+
+ # Otherwise, if a localversion* file exists, and the tag
+ # obtained by appending it to the tag derived from
+ # KERNELVERSION exists and is an ancestor of HEAD, use
+ # it. This is e.g. the case in linux-rt.
+ if [ -z "${desc}" ] && [ -n "${file_localversion}" ]; then
+ tag="${version_tag}${file_localversion}"
+ desc=$(git describe --match=$tag 2>/dev/null)
+ fi
# Otherwise, default to the annotated tag derived from KERNELVERSION.
- # mainline kernel: 6.2.0-rc5 -> v6.2-rc5
- # stable kernel: 6.1.7 -> v6.1.7
- if [ -z "${tag}" ]; then
- tag=v$(echo "${KERNELVERSION}" | sed -E 's/^([0-9]+\.[0-9]+)\.0(.*)$/\1\2/')
+ if [ -z "${desc}" ]; then
+ tag="${version_tag}"
+ desc=$(git describe --match=$tag 2>/dev/null)
fi
# If we are at the tagged commit, we ignore it because the version is
# well-defined.
- if [ -z "$(git describe --exact-match --match=$tag 2>/dev/null)" ]; then
+ if [ "${tag}" != "${desc}" ]; then
# If only the short version is requested, don't bother
# running further git commands
@@ -81,8 +97,8 @@ scm_version()
fi
# If we are past the tagged commit, we pretty print it.
# (like 6.1.0-14595-g292a089d78d3)
- if atag="$(git describe --match=$tag 2>/dev/null)"; then
- echo "$atag" | awk -F- '{printf("-%05d", $(NF-1))}'
+ if [ -n "${desc}" ]; then
+ echo "${desc}" | awk -F- '{printf("-%05d", $(NF-1))}'
fi
# Add -g and exactly 12 hex chars.
diff --git a/security/landlock/ruleset.h b/security/landlock/ruleset.h
index d43231b783e4..55b1df8f66a8 100644
--- a/security/landlock/ruleset.h
+++ b/security/landlock/ruleset.h
@@ -67,7 +67,7 @@ struct landlock_rule {
* @layers: Stack of layers, from the latest to the newest, implemented
* as a flexible array member (FAM).
*/
- struct landlock_layer layers[];
+ struct landlock_layer layers[] __counted_by(num_layers);
};
/**
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
index 969d4aa6fd55..57ee70ae50f2 100644
--- a/security/tomoyo/common.c
+++ b/security/tomoyo/common.c
@@ -184,6 +184,7 @@ static bool tomoyo_manage_by_non_root;
*
* Returns nothing.
*/
+__printf(3, 4)
static void tomoyo_addprintf(char *buffer, int len, const char *fmt, ...)
{
va_list args;
diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h
index a539b2cbb5c4..0e8e2e959aef 100644
--- a/security/tomoyo/common.h
+++ b/security/tomoyo/common.h
@@ -954,7 +954,7 @@ bool tomoyo_str_starts(char **src, const char *find);
char *tomoyo_encode(const char *str);
char *tomoyo_encode2(const char *str, int str_len);
char *tomoyo_init_log(struct tomoyo_request_info *r, int len, const char *fmt,
- va_list args);
+ va_list args) __printf(3, 0);
char *tomoyo_read_token(struct tomoyo_acl_param *param);
char *tomoyo_realpath_from_path(const struct path *path);
char *tomoyo_realpath_nofollow(const char *pathname);
@@ -1037,8 +1037,6 @@ struct tomoyo_policy_namespace *tomoyo_assign_namespace
(const char *domainname);
struct tomoyo_profile *tomoyo_profile(const struct tomoyo_policy_namespace *ns,
const u8 profile);
-unsigned int tomoyo_check_flags(const struct tomoyo_domain_info *domain,
- const u8 index);
u8 tomoyo_parse_ulong(unsigned long *result, char **str);
void *tomoyo_commit_ok(void *data, const unsigned int size);
void __init tomoyo_load_builtin_policy(void);
@@ -1067,7 +1065,7 @@ void tomoyo_warn_oom(const char *function);
void tomoyo_write_log(struct tomoyo_request_info *r, const char *fmt, ...)
__printf(2, 3);
void tomoyo_write_log2(struct tomoyo_request_info *r, int len, const char *fmt,
- va_list args);
+ va_list args) __printf(3, 0);
/********** External variable definitions. **********/
diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c
index ac20c0bdff9d..90b53500a236 100644
--- a/security/tomoyo/domain.c
+++ b/security/tomoyo/domain.c
@@ -784,13 +784,12 @@ retry:
if (!strcmp(domainname, "parent")) {
char *cp;
- strncpy(ee->tmp, old_domain->domainname->name,
- TOMOYO_EXEC_TMPSIZE - 1);
+ strscpy(ee->tmp, old_domain->domainname->name, TOMOYO_EXEC_TMPSIZE);
cp = strrchr(ee->tmp, ' ');
if (cp)
*cp = '\0';
} else if (*domainname == '<')
- strncpy(ee->tmp, domainname, TOMOYO_EXEC_TMPSIZE - 1);
+ strscpy(ee->tmp, domainname, TOMOYO_EXEC_TMPSIZE);
else
snprintf(ee->tmp, TOMOYO_EXEC_TMPSIZE - 1, "%s %s",
old_domain->domainname->name, domainname);
diff --git a/sound/Kconfig b/sound/Kconfig
index f0e15822e858..4c036a9a420a 100644
--- a/sound/Kconfig
+++ b/sound/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
menuconfig SOUND
tristate "Sound card support"
- depends on HAS_IOMEM
+ depends on HAS_IOMEM || UML
help
If you have a sound card in your computer, i.e. if it can say more
than an occasional beep, say Y.
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 4859fb1caec9..a11cd7d6295f 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -1992,8 +1992,8 @@ static int default_write_copy(struct snd_pcm_substream *substream,
int channel, unsigned long hwoff,
struct iov_iter *iter, unsigned long bytes)
{
- if (!copy_from_iter(get_dma_ptr(substream->runtime, channel, hwoff),
- bytes, iter))
+ if (copy_from_iter(get_dma_ptr(substream->runtime, channel, hwoff),
+ bytes, iter) != bytes)
return -EFAULT;
return 0;
}
@@ -2025,8 +2025,8 @@ static int default_read_copy(struct snd_pcm_substream *substream,
int channel, unsigned long hwoff,
struct iov_iter *iter, unsigned long bytes)
{
- if (!copy_to_iter(get_dma_ptr(substream->runtime, channel, hwoff),
- bytes, iter))
+ if (copy_to_iter(get_dma_ptr(substream->runtime, channel, hwoff),
+ bytes, iter) != bytes)
return -EFAULT;
return 0;
}
diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
index 174585bf59d2..b603bb93f896 100644
--- a/sound/core/seq/seq_memory.c
+++ b/sound/core/seq/seq_memory.c
@@ -187,8 +187,13 @@ int snd_seq_expand_var_event(const struct snd_seq_event *event, int count, char
err = expand_var_event(event, 0, len, buf, in_kernel);
if (err < 0)
return err;
- if (len != newlen)
- memset(buf + len, 0, newlen - len);
+ if (len != newlen) {
+ if (in_kernel)
+ memset(buf + len, 0, newlen - len);
+ else if (clear_user((__force void __user *)buf + len,
+ newlen - len))
+ return -EFAULT;
+ }
return newlen;
}
EXPORT_SYMBOL(snd_seq_expand_var_event);
diff --git a/sound/isa/sb/emu8000_pcm.c b/sound/isa/sb/emu8000_pcm.c
index c05935c2edc4..9234d4fe8ada 100644
--- a/sound/isa/sb/emu8000_pcm.c
+++ b/sound/isa/sb/emu8000_pcm.c
@@ -456,7 +456,7 @@ static int emu8k_pcm_silence(struct snd_pcm_substream *subs,
/* convert to word unit */
pos = (pos << 1) + rec->loop_start[voice];
count <<= 1;
- LOOP_WRITE(rec, pos, USER_SOCKPTR(NULL), count);
+ LOOP_WRITE(rec, pos, NULL, count);
return 0;
}
diff --git a/sound/pci/hda/patch_cs8409.c b/sound/pci/hda/patch_cs8409.c
index 0ba1fbcbb21e..627899959ffe 100644
--- a/sound/pci/hda/patch_cs8409.c
+++ b/sound/pci/hda/patch_cs8409.c
@@ -888,7 +888,7 @@ static void cs42l42_resume(struct sub_codec *cs42l42)
/* Initialize CS42L42 companion codec */
cs8409_i2c_bulk_write(cs42l42, cs42l42->init_seq, cs42l42->init_seq_num);
- usleep_range(30000, 35000);
+ msleep(CS42L42_INIT_TIMEOUT_MS);
/* Clear interrupts, by reading interrupt status registers */
cs8409_i2c_bulk_read(cs42l42, irq_regs, ARRAY_SIZE(irq_regs));
diff --git a/sound/pci/hda/patch_cs8409.h b/sound/pci/hda/patch_cs8409.h
index 2a8dfb4ff046..937e9387abdc 100644
--- a/sound/pci/hda/patch_cs8409.h
+++ b/sound/pci/hda/patch_cs8409.h
@@ -229,6 +229,7 @@ enum cs8409_coefficient_index_registers {
#define CS42L42_I2C_SLEEP_US (2000)
#define CS42L42_PDN_TIMEOUT_US (250000)
#define CS42L42_PDN_SLEEP_US (2000)
+#define CS42L42_INIT_TIMEOUT_MS (45)
#define CS42L42_FULL_SCALE_VOL_MASK (2)
#define CS42L42_FULL_SCALE_VOL_0DB (1)
#define CS42L42_FULL_SCALE_VOL_MINUS6DB (0)
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index a07df6f92960..b7e78bfcffd8 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -7057,6 +7057,27 @@ static void alc295_fixup_dell_inspiron_top_speakers(struct hda_codec *codec,
}
}
+/* Forcibly assign NID 0x03 to HP while NID 0x02 to SPK */
+static void alc287_fixup_bind_dacs(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ struct alc_spec *spec = codec->spec;
+ static const hda_nid_t conn[] = { 0x02, 0x03 }; /* exclude 0x06 */
+ static const hda_nid_t preferred_pairs[] = {
+ 0x17, 0x02, 0x21, 0x03, 0
+ };
+
+ if (action != HDA_FIXUP_ACT_PRE_PROBE)
+ return;
+
+ snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn);
+ spec->gen.preferred_dacs = preferred_pairs;
+ spec->gen.auto_mute_via_amp = 1;
+ snd_hda_codec_write_cache(codec, 0x14, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
+ 0x0); /* Make sure 0x14 was disable */
+}
+
+
enum {
ALC269_FIXUP_GPIO2,
ALC269_FIXUP_SONY_VAIO,
@@ -7319,6 +7340,7 @@ enum {
ALC287_FIXUP_TAS2781_I2C,
ALC245_FIXUP_HP_MUTE_LED_COEFBIT,
ALC245_FIXUP_HP_X360_MUTE_LEDS,
+ ALC287_FIXUP_THINKPAD_I2S_SPK,
};
/* A special fixup for Lenovo C940 and Yoga Duet 7;
@@ -9413,6 +9435,10 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC245_FIXUP_HP_GPIO_LED
},
+ [ALC287_FIXUP_THINKPAD_I2S_SPK] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc287_fixup_bind_dacs,
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -10544,6 +10570,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x17, 0x90170111},
{0x19, 0x03a11030},
{0x21, 0x03211020}),
+ SND_HDA_PIN_QUIRK(0x10ec0287, 0x17aa, "Lenovo", ALC287_FIXUP_THINKPAD_I2S_SPK,
+ {0x17, 0x90170110},
+ {0x19, 0x03a11030},
+ {0x21, 0x03211020}),
SND_HDA_PIN_QUIRK(0x10ec0286, 0x1025, "Acer", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE,
{0x12, 0x90a60130},
{0x17, 0x90170110},
diff --git a/sound/pci/hda/tas2781_hda_i2c.c b/sound/pci/hda/tas2781_hda_i2c.c
index 37114fd61a38..fb802802939e 100644
--- a/sound/pci/hda/tas2781_hda_i2c.c
+++ b/sound/pci/hda/tas2781_hda_i2c.c
@@ -173,16 +173,6 @@ static int tasdevice_get_profile_id(struct snd_kcontrol *kcontrol,
return 0;
}
-static int tasdevice_hda_clamp(int val, int max)
-{
- if (val > max)
- val = max;
-
- if (val < 0)
- val = 0;
- return val;
-}
-
static int tasdevice_set_profile_id(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -191,7 +181,7 @@ static int tasdevice_set_profile_id(struct snd_kcontrol *kcontrol,
int max = tas_priv->rcabin.ncfgs - 1;
int val, ret = 0;
- val = tasdevice_hda_clamp(nr_profile, max);
+ val = clamp(nr_profile, 0, max);
if (tas_priv->rcabin.profile_cfg_id != val) {
tas_priv->rcabin.profile_cfg_id = val;
@@ -248,7 +238,7 @@ static int tasdevice_program_put(struct snd_kcontrol *kcontrol,
int max = tas_fw->nr_programs - 1;
int val, ret = 0;
- val = tasdevice_hda_clamp(nr_program, max);
+ val = clamp(nr_program, 0, max);
if (tas_priv->cur_prog != val) {
tas_priv->cur_prog = val;
@@ -277,7 +267,7 @@ static int tasdevice_config_put(struct snd_kcontrol *kcontrol,
int max = tas_fw->nr_configurations - 1;
int val, ret = 0;
- val = tasdevice_hda_clamp(nr_config, max);
+ val = clamp(nr_config, 0, max);
if (tas_priv->cur_conf != val) {
tas_priv->cur_conf = val;
diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
index b304b3562c82..3ec15b46fa35 100644
--- a/sound/soc/amd/yc/acp6x-mach.c
+++ b/sound/soc/amd/yc/acp6x-mach.c
@@ -217,6 +217,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
.driver_data = &acp6x_card,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82TL"),
+ }
+ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "82V2"),
}
},
@@ -328,6 +335,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
{
.driver_data = &acp6x_card,
.matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
+ DMI_MATCH(DMI_BOARD_NAME, "8A3E"),
+ }
+ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "MECHREVO"),
DMI_MATCH(DMI_BOARD_NAME, "MRID6"),
}
diff --git a/sound/soc/atmel/mchp-pdmc.c b/sound/soc/atmel/mchp-pdmc.c
index afe213a71212..dcc4e14b3dde 100644
--- a/sound/soc/atmel/mchp-pdmc.c
+++ b/sound/soc/atmel/mchp-pdmc.c
@@ -954,7 +954,7 @@ static int mchp_pdmc_dt_init(struct mchp_pdmc *dd)
/* used to clean the channel index found on RHR's MSB */
static int mchp_pdmc_process(struct snd_pcm_substream *substream,
int channel, unsigned long hwoff,
- struct iov_iter *buf, unsigned long bytes)
+ unsigned long bytes)
{
struct snd_pcm_runtime *runtime = substream->runtime;
u8 *dma_ptr = runtime->dma_area + hwoff +
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 95b5bd883215..f1e1dbc509f6 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -1968,11 +1968,15 @@ config SND_SOC_UDA1380
tristate
depends on I2C
+config SND_SOC_WCD_CLASSH
+ tristate
+
config SND_SOC_WCD9335
tristate "WCD9335 Codec"
depends on SLIMBUS
select REGMAP_SLIMBUS
select REGMAP_IRQ
+ select SND_SOC_WCD_CLASSH
help
The WCD9335 is a standalone Hi-Fi audio CODEC IC, supports
Qualcomm Technologies, Inc. (QTI) multimedia solutions,
@@ -1987,6 +1991,7 @@ config SND_SOC_WCD934X
depends on SLIMBUS
select REGMAP_IRQ
select REGMAP_SLIMBUS
+ select SND_SOC_WCD_CLASSH
select SND_SOC_WCD_MBHC
depends on MFD_WCD934X || COMPILE_TEST
help
@@ -1997,6 +2002,7 @@ config SND_SOC_WCD938X
depends on SND_SOC_WCD938X_SDW
tristate
depends on SOUNDWIRE || !SOUNDWIRE
+ select SND_SOC_WCD_CLASSH
config SND_SOC_WCD938X_SDW
tristate "WCD9380/WCD9385 Codec - SDW"
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index c8502a49b40a..a87e56938ce5 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -303,10 +303,11 @@ snd-soc-twl4030-objs := twl4030.o
snd-soc-twl6040-objs := twl6040.o
snd-soc-uda1334-objs := uda1334.o
snd-soc-uda1380-objs := uda1380.o
+snd-soc-wcd-classh-objs := wcd-clsh-v2.o
snd-soc-wcd-mbhc-objs := wcd-mbhc-v2.o
-snd-soc-wcd9335-objs := wcd-clsh-v2.o wcd9335.o
-snd-soc-wcd934x-objs := wcd-clsh-v2.o wcd934x.o
-snd-soc-wcd938x-objs := wcd938x.o wcd-clsh-v2.o
+snd-soc-wcd9335-objs := wcd9335.o
+snd-soc-wcd934x-objs := wcd934x.o
+snd-soc-wcd938x-objs := wcd938x.o
snd-soc-wcd938x-sdw-objs := wcd938x-sdw.o
snd-soc-wl1273-objs := wl1273.o
snd-soc-wm-adsp-objs := wm_adsp.o
@@ -685,6 +686,7 @@ obj-$(CONFIG_SND_SOC_TWL4030) += snd-soc-twl4030.o
obj-$(CONFIG_SND_SOC_TWL6040) += snd-soc-twl6040.o
obj-$(CONFIG_SND_SOC_UDA1334) += snd-soc-uda1334.o
obj-$(CONFIG_SND_SOC_UDA1380) += snd-soc-uda1380.o
+obj-$(CONFIG_SND_SOC_WCD_CLASSH) += snd-soc-wcd-classh.o
obj-$(CONFIG_SND_SOC_WCD_MBHC) += snd-soc-wcd-mbhc.o
obj-$(CONFIG_SND_SOC_WCD9335) += snd-soc-wcd9335.o
obj-$(CONFIG_SND_SOC_WCD934X) += snd-soc-wcd934x.o
diff --git a/sound/soc/codecs/cs35l45.c b/sound/soc/codecs/cs35l45.c
index d1edb9876c10..be4f4229576c 100644
--- a/sound/soc/codecs/cs35l45.c
+++ b/sound/soc/codecs/cs35l45.c
@@ -279,7 +279,7 @@ static const struct snd_kcontrol_new cs35l45_dsp_muxes[] = {
};
static const struct snd_kcontrol_new cs35l45_dac_muxes[] = {
- SOC_DAPM_ENUM("DACPCM1 Source", cs35l45_dacpcm_enums[0]),
+ SOC_DAPM_ENUM("DACPCM Source", cs35l45_dacpcm_enums[0]),
};
static const struct snd_soc_dapm_widget cs35l45_dapm_widgets[] = {
@@ -333,7 +333,7 @@ static const struct snd_soc_dapm_widget cs35l45_dapm_widgets[] = {
SND_SOC_DAPM_MUX("DSP_RX7 Source", SND_SOC_NOPM, 0, 0, &cs35l45_dsp_muxes[6]),
SND_SOC_DAPM_MUX("DSP_RX8 Source", SND_SOC_NOPM, 0, 0, &cs35l45_dsp_muxes[7]),
- SND_SOC_DAPM_MUX("DACPCM1 Source", SND_SOC_NOPM, 0, 0, &cs35l45_dac_muxes[0]),
+ SND_SOC_DAPM_MUX("DACPCM Source", SND_SOC_NOPM, 0, 0, &cs35l45_dac_muxes[0]),
SND_SOC_DAPM_OUT_DRV("AMP", SND_SOC_NOPM, 0, 0, NULL, 0),
@@ -403,7 +403,7 @@ static const struct snd_soc_dapm_route cs35l45_dapm_routes[] = {
{ "ASP_RX1", NULL, "ASP_EN" },
{ "ASP_RX2", NULL, "ASP_EN" },
- { "AMP", NULL, "DACPCM1 Source"},
+ { "AMP", NULL, "DACPCM Source"},
{ "AMP", NULL, "GLOBAL_EN"},
CS35L45_DSP_MUX_ROUTE("DSP_RX1"),
@@ -427,7 +427,7 @@ static const struct snd_soc_dapm_route cs35l45_dapm_routes[] = {
{"DSP1 Preload", NULL, "DSP1 Preloader"},
{"DSP1", NULL, "DSP1 Preloader"},
- CS35L45_DAC_MUX_ROUTE("DACPCM1"),
+ CS35L45_DAC_MUX_ROUTE("DACPCM"),
{ "SPK", NULL, "AMP"},
};
@@ -969,7 +969,7 @@ static irqreturn_t cs35l45_dsp_virt2_mbox_cb(int irq, void *data)
ret = regmap_read(cs35l45->regmap, CS35L45_DSP_VIRT2_MBOX_3, &mbox_val);
if (!ret && mbox_val)
- ret = cs35l45_dsp_virt2_mbox3_irq_handle(cs35l45, mbox_val & CS35L45_MBOX3_CMD_MASK,
+ cs35l45_dsp_virt2_mbox3_irq_handle(cs35l45, mbox_val & CS35L45_MBOX3_CMD_MASK,
(mbox_val & CS35L45_MBOX3_DATA_MASK) >> CS35L45_MBOX3_DATA_SHIFT);
/* Handle DSP trace log IRQ */
@@ -1078,6 +1078,7 @@ static int cs35l45_initialize(struct cs35l45_private *cs35l45)
switch (dev_id[0]) {
case 0x35A450:
+ case 0x35A460:
break;
default:
dev_err(cs35l45->dev, "Bad DEVID 0x%x\n", dev_id[0]);
diff --git a/sound/soc/codecs/cs35l56-shared.c b/sound/soc/codecs/cs35l56-shared.c
index ae373f335ea8..98b1e63360ae 100644
--- a/sound/soc/codecs/cs35l56-shared.c
+++ b/sound/soc/codecs/cs35l56-shared.c
@@ -243,26 +243,27 @@ int cs35l56_wait_for_firmware_boot(struct cs35l56_base *cs35l56_base)
{
unsigned int reg;
unsigned int val;
- int ret;
+ int read_ret, poll_ret;
if (cs35l56_base->rev < CS35L56_REVID_B0)
reg = CS35L56_DSP1_HALO_STATE_A1;
else
reg = CS35L56_DSP1_HALO_STATE;
- ret = regmap_read_poll_timeout(cs35l56_base->regmap, reg,
- val,
- (val < 0xFFFF) && (val >= CS35L56_HALO_STATE_BOOT_DONE),
- CS35L56_HALO_STATE_POLL_US,
- CS35L56_HALO_STATE_TIMEOUT_US);
-
- if ((ret < 0) && (ret != -ETIMEDOUT)) {
- dev_err(cs35l56_base->dev, "Failed to read HALO_STATE: %d\n", ret);
- return ret;
- }
-
- if ((ret == -ETIMEDOUT) || (val != CS35L56_HALO_STATE_BOOT_DONE)) {
- dev_err(cs35l56_base->dev, "Firmware boot fail: HALO_STATE=%#x\n", val);
+ /*
+ * This can't be a regmap_read_poll_timeout() because cs35l56 will NAK
+ * I2C until it has booted which would terminate the poll
+ */
+ poll_ret = read_poll_timeout(regmap_read, read_ret,
+ (val < 0xFFFF) && (val >= CS35L56_HALO_STATE_BOOT_DONE),
+ CS35L56_HALO_STATE_POLL_US,
+ CS35L56_HALO_STATE_TIMEOUT_US,
+ false,
+ cs35l56_base->regmap, reg, &val);
+
+ if (poll_ret) {
+ dev_err(cs35l56_base->dev, "Firmware boot timed out(%d): HALO_STATE=%#x\n",
+ read_ret, val);
return -EIO;
}
diff --git a/sound/soc/codecs/cs42l43.c b/sound/soc/codecs/cs42l43.c
index 24e718e51174..1a95c370fc4c 100644
--- a/sound/soc/codecs/cs42l43.c
+++ b/sound/soc/codecs/cs42l43.c
@@ -2205,7 +2205,8 @@ static int cs42l43_codec_probe(struct platform_device *pdev)
// Don't use devm as we need to get against the MFD device
priv->mclk = clk_get_optional(cs42l43->dev, "mclk");
if (IS_ERR(priv->mclk)) {
- dev_err_probe(priv->dev, PTR_ERR(priv->mclk), "Failed to get mclk\n");
+ ret = PTR_ERR(priv->mclk);
+ dev_err_probe(priv->dev, ret, "Failed to get mclk\n");
goto err_pm;
}
diff --git a/sound/soc/codecs/cx20442.c b/sound/soc/codecs/cx20442.c
index 43c0cac0ec9e..9d54141a0cd1 100644
--- a/sound/soc/codecs/cx20442.c
+++ b/sound/soc/codecs/cx20442.c
@@ -258,8 +258,8 @@ static void v253_hangup(struct tty_struct *tty)
}
/* Line discipline .receive_buf() */
-static void v253_receive(struct tty_struct *tty, const unsigned char *cp,
- const char *fp, int count)
+static void v253_receive(struct tty_struct *tty, const u8 *cp, const u8 *fp,
+ size_t count)
{
struct snd_soc_component *component = tty->disc_data;
struct cx20442_priv *cx20442;
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 038d93e20883..1a137ca3f496 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -3269,13 +3269,17 @@ static int rt5645_component_set_jack(struct snd_soc_component *component,
{
struct snd_soc_jack *mic_jack = NULL;
struct snd_soc_jack *btn_jack = NULL;
- int *type = (int *)data;
+ int type;
- if (*type & SND_JACK_MICROPHONE)
- mic_jack = hs_jack;
- if (*type & (SND_JACK_BTN_0 | SND_JACK_BTN_1 |
- SND_JACK_BTN_2 | SND_JACK_BTN_3))
- btn_jack = hs_jack;
+ if (hs_jack) {
+ type = *(int *)data;
+
+ if (type & SND_JACK_MICROPHONE)
+ mic_jack = hs_jack;
+ if (type & (SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3))
+ btn_jack = hs_jack;
+ }
return rt5645_set_jack_detect(component, hs_jack, mic_jack, btn_jack);
}
diff --git a/sound/soc/codecs/wcd-clsh-v2.c b/sound/soc/codecs/wcd-clsh-v2.c
index a75db27e5205..d96e23ec43d4 100644
--- a/sound/soc/codecs/wcd-clsh-v2.c
+++ b/sound/soc/codecs/wcd-clsh-v2.c
@@ -355,6 +355,7 @@ void wcd_clsh_set_hph_mode(struct wcd_clsh_ctrl *ctrl, int mode)
wcd_clsh_v2_set_hph_mode(comp, mode);
}
+EXPORT_SYMBOL_GPL(wcd_clsh_set_hph_mode);
static void wcd_clsh_set_flyback_current(struct snd_soc_component *comp,
int mode)
@@ -869,11 +870,13 @@ int wcd_clsh_ctrl_set_state(struct wcd_clsh_ctrl *ctrl,
return 0;
}
+EXPORT_SYMBOL_GPL(wcd_clsh_ctrl_set_state);
int wcd_clsh_ctrl_get_state(struct wcd_clsh_ctrl *ctrl)
{
return ctrl->state;
}
+EXPORT_SYMBOL_GPL(wcd_clsh_ctrl_get_state);
struct wcd_clsh_ctrl *wcd_clsh_ctrl_alloc(struct snd_soc_component *comp,
int version)
@@ -890,8 +893,13 @@ struct wcd_clsh_ctrl *wcd_clsh_ctrl_alloc(struct snd_soc_component *comp,
return ctrl;
}
+EXPORT_SYMBOL_GPL(wcd_clsh_ctrl_alloc);
void wcd_clsh_ctrl_free(struct wcd_clsh_ctrl *ctrl)
{
kfree(ctrl);
}
+EXPORT_SYMBOL_GPL(wcd_clsh_ctrl_free);
+
+MODULE_DESCRIPTION("WCD93XX Class-H driver");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/intel/avs/pcm.c b/sound/soc/intel/avs/pcm.c
index 1fbb2c2fadb5..8565a530706d 100644
--- a/sound/soc/intel/avs/pcm.c
+++ b/sound/soc/intel/avs/pcm.c
@@ -796,6 +796,28 @@ static int avs_component_probe(struct snd_soc_component *component)
ret = avs_load_topology(component, filename);
kfree(filename);
+ if (ret == -ENOENT && !strncmp(mach->tplg_filename, "hda-", 4)) {
+ unsigned int vendor_id;
+
+ if (sscanf(mach->tplg_filename, "hda-%08x-tplg.bin", &vendor_id) != 1)
+ return ret;
+
+ if (((vendor_id >> 16) & 0xFFFF) == 0x8086)
+ mach->tplg_filename = devm_kasprintf(adev->dev, GFP_KERNEL,
+ "hda-8086-generic-tplg.bin");
+ else
+ mach->tplg_filename = devm_kasprintf(adev->dev, GFP_KERNEL,
+ "hda-generic-tplg.bin");
+
+ filename = kasprintf(GFP_KERNEL, "%s/%s", component->driver->topology_name_prefix,
+ mach->tplg_filename);
+ if (!filename)
+ return -ENOMEM;
+
+ dev_info(card->dev, "trying to load fallback topology %s\n", mach->tplg_filename);
+ ret = avs_load_topology(component, filename);
+ kfree(filename);
+ }
if (ret < 0)
return ret;
diff --git a/sound/soc/soc-component.c b/sound/soc/soc-component.c
index f18406dfa1e4..ba7c0ae82e00 100644
--- a/sound/soc/soc-component.c
+++ b/sound/soc/soc-component.c
@@ -1054,7 +1054,7 @@ int snd_soc_pcm_component_sync_stop(struct snd_pcm_substream *substream)
int snd_soc_pcm_component_copy(struct snd_pcm_substream *substream,
int channel, unsigned long pos,
- struct iov_iter *buf, unsigned long bytes)
+ struct iov_iter *iter, unsigned long bytes)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct snd_soc_component *component;
@@ -1065,7 +1065,7 @@ int snd_soc_pcm_component_copy(struct snd_pcm_substream *substream,
if (component->driver->copy)
return soc_component_ret(component,
component->driver->copy(component, substream,
- channel, pos, buf, bytes));
+ channel, pos, iter, bytes));
return -EINVAL;
}
diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c
index ff2166525dbc..d0653d775c87 100644
--- a/sound/soc/soc-generic-dmaengine-pcm.c
+++ b/sound/soc/soc-generic-dmaengine-pcm.c
@@ -290,29 +290,29 @@ static snd_pcm_uframes_t dmaengine_pcm_pointer(
static int dmaengine_copy(struct snd_soc_component *component,
struct snd_pcm_substream *substream,
int channel, unsigned long hwoff,
- struct iov_iter *buf, unsigned long bytes)
+ struct iov_iter *iter, unsigned long bytes)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
int (*process)(struct snd_pcm_substream *substream,
int channel, unsigned long hwoff,
- struct iov_iter *buf, unsigned long bytes) = pcm->config->process;
+ unsigned long bytes) = pcm->config->process;
bool is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
void *dma_ptr = runtime->dma_area + hwoff +
channel * (runtime->dma_bytes / runtime->channels);
if (is_playback)
- if (copy_from_iter(dma_ptr, bytes, buf) != bytes)
+ if (copy_from_iter(dma_ptr, bytes, iter) != bytes)
return -EFAULT;
if (process) {
- int ret = process(substream, channel, hwoff, buf, bytes);
+ int ret = process(substream, channel, hwoff, bytes);
if (ret < 0)
return ret;
}
if (!is_playback)
- if (copy_to_iter(dma_ptr, bytes, buf) != bytes)
+ if (copy_to_iter(dma_ptr, bytes, iter) != bytes)
return -EFAULT;
return 0;
diff --git a/sound/soc/stm/stm32_sai_sub.c b/sound/soc/stm/stm32_sai_sub.c
index f9b5d5969155..0acc848c1f00 100644
--- a/sound/soc/stm/stm32_sai_sub.c
+++ b/sound/soc/stm/stm32_sai_sub.c
@@ -1246,7 +1246,7 @@ static const struct snd_soc_dai_ops stm32_sai_pcm_dai_ops2 = {
static int stm32_sai_pcm_process_spdif(struct snd_pcm_substream *substream,
int channel, unsigned long hwoff,
- struct iov_iter *buf, unsigned long bytes)
+ unsigned long bytes)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
diff --git a/sound/soc/ti/ams-delta.c b/sound/soc/ti/ams-delta.c
index 1028b5efcfff..371943350fdf 100644
--- a/sound/soc/ti/ams-delta.c
+++ b/sound/soc/ti/ams-delta.c
@@ -336,7 +336,7 @@ static void cx81801_hangup(struct tty_struct *tty)
}
/* Line discipline .receive_buf() */
-static void cx81801_receive(struct tty_struct *tty, const unsigned char *cp,
+static void cx81801_receive(struct tty_struct *tty, const u8 *cp,
const char *fp, int count)
{
struct snd_soc_component *component = tty->disc_data;
diff --git a/sound/usb/midi2.c b/sound/usb/midi2.c
index a27e244650c8..1ec177fe284e 100644
--- a/sound/usb/midi2.c
+++ b/sound/usb/midi2.c
@@ -265,7 +265,7 @@ static void free_midi_urbs(struct snd_usb_midi2_endpoint *ep)
if (!ep)
return;
- for (i = 0; i < ep->num_urbs; ++i) {
+ for (i = 0; i < NUM_URBS; ++i) {
ctx = &ep->urbs[i];
if (!ctx->urb)
break;
@@ -279,6 +279,7 @@ static void free_midi_urbs(struct snd_usb_midi2_endpoint *ep)
}
/* allocate URBs for an EP */
+/* the callers should handle allocation errors via free_midi_urbs() */
static int alloc_midi_urbs(struct snd_usb_midi2_endpoint *ep)
{
struct snd_usb_midi2_urb *ctx;
@@ -351,8 +352,10 @@ static int snd_usb_midi_v2_open(struct snd_ump_endpoint *ump, int dir)
return -EIO;
if (ep->direction == STR_OUT) {
err = alloc_midi_urbs(ep);
- if (err)
+ if (err) {
+ free_midi_urbs(ep);
return err;
+ }
}
return 0;
}
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index 1f6d904c6481..798e60b5454b 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -434,6 +434,7 @@
#define X86_FEATURE_SEV_ES (19*32+ 3) /* AMD Secure Encrypted Virtualization - Encrypted State */
#define X86_FEATURE_V_TSC_AUX (19*32+ 9) /* "" Virtual TSC_AUX */
#define X86_FEATURE_SME_COHERENT (19*32+10) /* "" AMD hardware-enforced cache coherency */
+#define X86_FEATURE_DEBUG_SWAP (19*32+14) /* AMD SEV-ES full debug state swap support */
/* AMD-defined Extended Feature 2 EAX, CPUID level 0x80000021 (EAX), word 20 */
#define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* "" No Nested Data Breakpoints */
diff --git a/tools/bpf/bpftool/link.c b/tools/bpf/bpftool/link.c
index 0b214f6ab5c8..2e5c231e08ac 100644
--- a/tools/bpf/bpftool/link.c
+++ b/tools/bpf/bpftool/link.c
@@ -83,7 +83,7 @@ const char *evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
#define perf_event_name(array, id) ({ \
const char *event_str = NULL; \
\
- if ((id) >= 0 && (id) < ARRAY_SIZE(array)) \
+ if ((id) < ARRAY_SIZE(array)) \
event_str = array[id]; \
event_str; \
})
diff --git a/tools/iio/iio_generic_buffer.c b/tools/iio/iio_generic_buffer.c
index f8deae4e26a1..44bbf80f0cfd 100644
--- a/tools/iio/iio_generic_buffer.c
+++ b/tools/iio/iio_generic_buffer.c
@@ -51,9 +51,9 @@ enum autochan {
* Has the side effect of filling the channels[i].location values used
* in processing the buffer output.
**/
-static int size_from_channelarray(struct iio_channel_info *channels, int num_channels)
+static unsigned int size_from_channelarray(struct iio_channel_info *channels, int num_channels)
{
- int bytes = 0;
+ unsigned int bytes = 0;
int i = 0;
while (i < num_channels) {
@@ -348,7 +348,7 @@ int main(int argc, char **argv)
ssize_t read_size;
int dev_num = -1, trig_num = -1;
char *buffer_access = NULL;
- int scan_size;
+ unsigned int scan_size;
int noevents = 0;
int notrigger = 0;
char *dummy;
@@ -674,7 +674,16 @@ int main(int argc, char **argv)
}
scan_size = size_from_channelarray(channels, num_channels);
- data = malloc(scan_size * buf_len);
+
+ size_t total_buf_len = scan_size * buf_len;
+
+ if (scan_size > 0 && total_buf_len / scan_size != buf_len) {
+ ret = -EFAULT;
+ perror("Integer overflow happened when calculate scan_size * buf_len");
+ goto error;
+ }
+
+ data = malloc(total_buf_len);
if (!data) {
ret = -ENOMEM;
goto error;
diff --git a/tools/lib/perf/mmap.c b/tools/lib/perf/mmap.c
index 0d1634cedf44..2184814b37dd 100644
--- a/tools/lib/perf/mmap.c
+++ b/tools/lib/perf/mmap.c
@@ -392,6 +392,72 @@ static u64 read_perf_counter(unsigned int counter)
static u64 read_timestamp(void) { return read_sysreg(cntvct_el0); }
+/* __riscv_xlen contains the witdh of the native base integer, here 64-bit */
+#elif defined(__riscv) && __riscv_xlen == 64
+
+/* TODO: implement rv32 support */
+
+#define CSR_CYCLE 0xc00
+#define CSR_TIME 0xc01
+
+#define csr_read(csr) \
+({ \
+ register unsigned long __v; \
+ __asm__ __volatile__ ("csrr %0, %1" \
+ : "=r" (__v) \
+ : "i" (csr) : ); \
+ __v; \
+})
+
+static unsigned long csr_read_num(int csr_num)
+{
+#define switchcase_csr_read(__csr_num, __val) {\
+ case __csr_num: \
+ __val = csr_read(__csr_num); \
+ break; }
+#define switchcase_csr_read_2(__csr_num, __val) {\
+ switchcase_csr_read(__csr_num + 0, __val) \
+ switchcase_csr_read(__csr_num + 1, __val)}
+#define switchcase_csr_read_4(__csr_num, __val) {\
+ switchcase_csr_read_2(__csr_num + 0, __val) \
+ switchcase_csr_read_2(__csr_num + 2, __val)}
+#define switchcase_csr_read_8(__csr_num, __val) {\
+ switchcase_csr_read_4(__csr_num + 0, __val) \
+ switchcase_csr_read_4(__csr_num + 4, __val)}
+#define switchcase_csr_read_16(__csr_num, __val) {\
+ switchcase_csr_read_8(__csr_num + 0, __val) \
+ switchcase_csr_read_8(__csr_num + 8, __val)}
+#define switchcase_csr_read_32(__csr_num, __val) {\
+ switchcase_csr_read_16(__csr_num + 0, __val) \
+ switchcase_csr_read_16(__csr_num + 16, __val)}
+
+ unsigned long ret = 0;
+
+ switch (csr_num) {
+ switchcase_csr_read_32(CSR_CYCLE, ret)
+ default:
+ break;
+ }
+
+ return ret;
+#undef switchcase_csr_read_32
+#undef switchcase_csr_read_16
+#undef switchcase_csr_read_8
+#undef switchcase_csr_read_4
+#undef switchcase_csr_read_2
+#undef switchcase_csr_read
+}
+
+static u64 read_perf_counter(unsigned int counter)
+{
+ return csr_read_num(CSR_CYCLE + counter);
+}
+
+static u64 read_timestamp(void)
+{
+ return csr_read_num(CSR_TIME);
+}
+
#else
static u64 read_perf_counter(unsigned int counter __maybe_unused) { return 0; }
static u64 read_timestamp(void) { return 0; }
diff --git a/tools/mm/Makefile b/tools/mm/Makefile
index 6c1da51f4177..1c5606cc3334 100644
--- a/tools/mm/Makefile
+++ b/tools/mm/Makefile
@@ -8,8 +8,8 @@ TARGETS=page-types slabinfo page_owner_sort
LIB_DIR = ../lib/api
LIBS = $(LIB_DIR)/libapi.a
-CFLAGS += -Wall -Wextra -I../lib/
-LDFLAGS += $(LIBS)
+CFLAGS += -Wall -Wextra -I../lib/ -pthread
+LDFLAGS += $(LIBS) -pthread
all: $(TARGETS)
diff --git a/tools/perf/tests/mmap-basic.c b/tools/perf/tests/mmap-basic.c
index e68ca6229756..886a13a77a16 100644
--- a/tools/perf/tests/mmap-basic.c
+++ b/tools/perf/tests/mmap-basic.c
@@ -284,7 +284,8 @@ static struct test_case tests__basic_mmap[] = {
"permissions"),
TEST_CASE_REASON("User space counter reading of instructions",
mmap_user_read_instr,
-#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__)
+#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || \
+ (defined(__riscv) && __riscv_xlen == 64)
"permissions"
#else
"unsupported"
@@ -292,7 +293,8 @@ static struct test_case tests__basic_mmap[] = {
),
TEST_CASE_REASON("User space counter reading of cycles",
mmap_user_read_cycles,
-#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__)
+#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || \
+ (defined(__riscv) && __riscv_xlen == 64)
"permissions"
#else
"unsupported"
diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile
index dc531805a570..b53753dee02f 100644
--- a/tools/power/cpupower/Makefile
+++ b/tools/power/cpupower/Makefile
@@ -57,7 +57,7 @@ LIB_MIN= 1
PACKAGE = cpupower
PACKAGE_BUGREPORT = linux-pm@vger.kernel.org
-LANGUAGES = de fr it cs pt
+LANGUAGES = de fr it cs pt ka
# Directory definitions. These are default and most probably
diff --git a/tools/power/x86/intel-speed-select/isst-config.c b/tools/power/x86/intel-speed-select/isst-config.c
index a73346e854b8..5fcc2a92957e 100644
--- a/tools/power/x86/intel-speed-select/isst-config.c
+++ b/tools/power/x86/intel-speed-select/isst-config.c
@@ -5,6 +5,7 @@
*/
#include <linux/isst_if.h>
+#include <sys/utsname.h>
#include "isst.h"
@@ -15,7 +16,7 @@ struct process_cmd_struct {
int arg;
};
-static const char *version_str = "v1.16";
+static const char *version_str = "v1.17";
static const int supported_api_ver = 2;
static struct isst_if_platform_info isst_platform_info;
@@ -473,11 +474,44 @@ static unsigned int is_cpu_online(int cpu)
return online;
}
+static int get_kernel_version(int *major, int *minor)
+{
+ struct utsname buf;
+ int ret;
+
+ ret = uname(&buf);
+ if (ret)
+ return ret;
+
+ ret = sscanf(buf.release, "%d.%d", major, minor);
+ if (ret != 2)
+ return ret;
+
+ return 0;
+}
+
+#define CPU0_HOTPLUG_DEPRECATE_MAJOR_VER 6
+#define CPU0_HOTPLUG_DEPRECATE_MINOR_VER 5
+
void set_cpu_online_offline(int cpu, int state)
{
char buffer[128];
int fd, ret;
+ if (!cpu) {
+ int major, minor;
+
+ ret = get_kernel_version(&major, &minor);
+ if (!ret) {
+ if (major > CPU0_HOTPLUG_DEPRECATE_MAJOR_VER || (major == CPU0_HOTPLUG_DEPRECATE_MAJOR_VER &&
+ minor >= CPU0_HOTPLUG_DEPRECATE_MINOR_VER)) {
+ debug_printf("Ignore CPU 0 offline/online for kernel version >= %d.%d\n", major, minor);
+ debug_printf("Use cgroups to isolate CPU 0\n");
+ return;
+ }
+ }
+ }
+
snprintf(buffer, sizeof(buffer),
"/sys/devices/system/cpu/cpu%d/online", cpu);
@@ -778,6 +812,7 @@ static void create_cpu_map(void)
map.cpu_map[0].logical_cpu);
} else {
update_punit_cpu_info(map.cpu_map[0].physical_cpu, &cpu_map[i]);
+ punit_id = cpu_map[i].punit_id;
}
}
cpu_map[i].initialized = 1;
@@ -2621,10 +2656,11 @@ static struct process_cmd_struct isst_cmds[] = {
*/
void parse_cpu_command(char *optarg)
{
- unsigned int start, end;
+ unsigned int start, end, invalid_count;
char *next;
next = optarg;
+ invalid_count = 0;
while (next && *next) {
if (*next == '-') /* no negative cpu numbers */
@@ -2634,6 +2670,8 @@ void parse_cpu_command(char *optarg)
if (max_target_cpus < MAX_CPUS_IN_ONE_REQ)
target_cpus[max_target_cpus++] = start;
+ else
+ invalid_count = 1;
if (*next == '\0')
break;
@@ -2660,6 +2698,8 @@ void parse_cpu_command(char *optarg)
while (++start <= end) {
if (max_target_cpus < MAX_CPUS_IN_ONE_REQ)
target_cpus[max_target_cpus++] = start;
+ else
+ invalid_count = 1;
}
if (*next == ',')
@@ -2668,6 +2708,13 @@ void parse_cpu_command(char *optarg)
goto error;
}
+ if (invalid_count) {
+ isst_ctdp_display_information_start(outf);
+ isst_display_error_info_message(1, "Too many CPUs in one request: max is", 1, MAX_CPUS_IN_ONE_REQ - 1);
+ isst_ctdp_display_information_end(outf);
+ exit(-1);
+ }
+
#ifdef DEBUG
{
int i;
diff --git a/tools/power/x86/intel-speed-select/isst-display.c b/tools/power/x86/intel-speed-select/isst-display.c
index 0403d42ab1ba..14c9b037859a 100644
--- a/tools/power/x86/intel-speed-select/isst-display.c
+++ b/tools/power/x86/intel-speed-select/isst-display.c
@@ -442,7 +442,7 @@ void isst_ctdp_display_information(struct isst_id *id, FILE *outf, int tdp_level
}
if (ctdp_level->mem_freq) {
- snprintf(header, sizeof(header), "mem-frequency(MHz)");
+ snprintf(header, sizeof(header), "max-mem-frequency(MHz)");
snprintf(value, sizeof(value), "%d",
ctdp_level->mem_freq);
format_and_print(outf, level + 2, header, value);
diff --git a/tools/power/x86/intel-speed-select/isst.h b/tools/power/x86/intel-speed-select/isst.h
index 54fc21575d56..8def22dec4a2 100644
--- a/tools/power/x86/intel-speed-select/isst.h
+++ b/tools/power/x86/intel-speed-select/isst.h
@@ -79,7 +79,7 @@
#define DISP_FREQ_MULTIPLIER 100
-#define MAX_PACKAGE_COUNT 8
+#define MAX_PACKAGE_COUNT 32
#define MAX_DIE_PER_PACKAGE 2
#define MAX_PUNIT_PER_DIE 8
diff --git a/tools/testing/radix-tree/multiorder.c b/tools/testing/radix-tree/multiorder.c
index e00520cc6349..cffaf2245d4f 100644
--- a/tools/testing/radix-tree/multiorder.c
+++ b/tools/testing/radix-tree/multiorder.c
@@ -159,7 +159,7 @@ void multiorder_tagged_iteration(struct xarray *xa)
item_kill_tree(xa);
}
-bool stop_iteration = false;
+bool stop_iteration;
static void *creator_func(void *ptr)
{
@@ -201,6 +201,7 @@ static void multiorder_iteration_race(struct xarray *xa)
pthread_t worker_thread[num_threads];
int i;
+ stop_iteration = false;
pthread_create(&worker_thread[0], NULL, &creator_func, xa);
for (i = 1; i < num_threads; i++)
pthread_create(&worker_thread[i], NULL, &iterator_func, xa);
@@ -211,6 +212,61 @@ static void multiorder_iteration_race(struct xarray *xa)
item_kill_tree(xa);
}
+static void *load_creator(void *ptr)
+{
+ /* 'order' is set up to ensure we have sibling entries */
+ unsigned int order;
+ struct radix_tree_root *tree = ptr;
+ int i;
+
+ rcu_register_thread();
+ item_insert_order(tree, 3 << RADIX_TREE_MAP_SHIFT, 0);
+ item_insert_order(tree, 2 << RADIX_TREE_MAP_SHIFT, 0);
+ for (i = 0; i < 10000; i++) {
+ for (order = 1; order < RADIX_TREE_MAP_SHIFT; order++) {
+ unsigned long index = (3 << RADIX_TREE_MAP_SHIFT) -
+ (1 << order);
+ item_insert_order(tree, index, order);
+ item_delete_rcu(tree, index);
+ }
+ }
+ rcu_unregister_thread();
+
+ stop_iteration = true;
+ return NULL;
+}
+
+static void *load_worker(void *ptr)
+{
+ unsigned long index = (3 << RADIX_TREE_MAP_SHIFT) - 1;
+
+ rcu_register_thread();
+ while (!stop_iteration) {
+ struct item *item = xa_load(ptr, index);
+ assert(!xa_is_internal(item));
+ }
+ rcu_unregister_thread();
+
+ return NULL;
+}
+
+static void load_race(struct xarray *xa)
+{
+ const int num_threads = sysconf(_SC_NPROCESSORS_ONLN) * 4;
+ pthread_t worker_thread[num_threads];
+ int i;
+
+ stop_iteration = false;
+ pthread_create(&worker_thread[0], NULL, &load_creator, xa);
+ for (i = 1; i < num_threads; i++)
+ pthread_create(&worker_thread[i], NULL, &load_worker, xa);
+
+ for (i = 0; i < num_threads; i++)
+ pthread_join(worker_thread[i], NULL);
+
+ item_kill_tree(xa);
+}
+
static DEFINE_XARRAY(array);
void multiorder_checks(void)
@@ -218,12 +274,20 @@ void multiorder_checks(void)
multiorder_iteration(&array);
multiorder_tagged_iteration(&array);
multiorder_iteration_race(&array);
+ load_race(&array);
radix_tree_cpu_dead(0);
}
-int __weak main(void)
+int __weak main(int argc, char **argv)
{
+ int opt;
+
+ while ((opt = getopt(argc, argv, "ls:v")) != -1) {
+ if (opt == 'v')
+ test_verbose++;
+ }
+
rcu_register_thread();
radix_tree_init();
multiorder_checks();
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index edef49fcd23e..caede9b574cb 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -50,14 +50,17 @@ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test
test_cgroup_storage \
test_tcpnotify_user test_sysctl \
test_progs-no_alu32
+TEST_INST_SUBDIRS := no_alu32
# Also test bpf-gcc, if present
ifneq ($(BPF_GCC),)
TEST_GEN_PROGS += test_progs-bpf_gcc
+TEST_INST_SUBDIRS += bpf_gcc
endif
ifneq ($(CLANG_CPUV4),)
TEST_GEN_PROGS += test_progs-cpuv4
+TEST_INST_SUBDIRS += cpuv4
endif
TEST_GEN_FILES = test_lwt_ip_encap.bpf.o test_tc_edt.bpf.o
@@ -714,3 +717,12 @@ EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) \
# Delete partially updated (corrupted) files on error
.DELETE_ON_ERROR:
+
+DEFAULT_INSTALL_RULE := $(INSTALL_RULE)
+override define INSTALL_RULE
+ $(DEFAULT_INSTALL_RULE)
+ @for DIR in $(TEST_INST_SUBDIRS); do \
+ mkdir -p $(INSTALL_PATH)/$$DIR; \
+ rsync -a $(OUTPUT)/$$DIR/*.bpf.o $(INSTALL_PATH)/$$DIR;\
+ done
+endef
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_obj_pinning.c b/tools/testing/selftests/bpf/prog_tests/bpf_obj_pinning.c
index 31f1e815f671..ee0458a5ce78 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_obj_pinning.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_obj_pinning.c
@@ -8,6 +8,7 @@
#include <linux/unistd.h>
#include <linux/mount.h>
#include <sys/syscall.h>
+#include "bpf/libbpf_internal.h"
static inline int sys_fsopen(const char *fsname, unsigned flags)
{
@@ -155,7 +156,7 @@ static void validate_pin(int map_fd, const char *map_name, int src_value,
ASSERT_OK(err, "obj_pin");
/* cleanup */
- if (pin_opts.path_fd >= 0)
+ if (path_kind == PATH_FD_REL && pin_opts.path_fd >= 0)
close(pin_opts.path_fd);
if (old_cwd[0])
ASSERT_OK(chdir(old_cwd), "restore_cwd");
@@ -220,7 +221,7 @@ static void validate_get(int map_fd, const char *map_name, int src_value,
goto cleanup;
/* cleanup */
- if (get_opts.path_fd >= 0)
+ if (path_kind == PATH_FD_REL && get_opts.path_fd >= 0)
close(get_opts.path_fd);
if (old_cwd[0])
ASSERT_OK(chdir(old_cwd), "restore_cwd");
diff --git a/tools/testing/selftests/bpf/prog_tests/d_path.c b/tools/testing/selftests/bpf/prog_tests/d_path.c
index 911345c526e6..ccc768592e66 100644
--- a/tools/testing/selftests/bpf/prog_tests/d_path.c
+++ b/tools/testing/selftests/bpf/prog_tests/d_path.c
@@ -12,6 +12,17 @@
#include "test_d_path_check_rdonly_mem.skel.h"
#include "test_d_path_check_types.skel.h"
+/* sys_close_range is not around for long time, so let's
+ * make sure we can call it on systems with older glibc
+ */
+#ifndef __NR_close_range
+#ifdef __alpha__
+#define __NR_close_range 546
+#else
+#define __NR_close_range 436
+#endif
+#endif
+
static int duration;
static struct {
@@ -90,7 +101,11 @@ static int trigger_fstat_events(pid_t pid)
fstat(indicatorfd, &fileStat);
out_close:
- /* triggers filp_close */
+ /* sys_close no longer triggers filp_close, but we can
+ * call sys_close_range instead which still does
+ */
+#define close(fd) syscall(__NR_close_range, fd, fd, 0)
+
close(pipefd[0]);
close(pipefd[1]);
close(sockfd);
@@ -98,6 +113,8 @@ out_close:
close(devfd);
close(localfd);
close(indicatorfd);
+
+#undef close
return ret;
}
diff --git a/tools/testing/selftests/bpf/prog_tests/sk_storage_omem_uncharge.c b/tools/testing/selftests/bpf/prog_tests/sk_storage_omem_uncharge.c
new file mode 100644
index 000000000000..f35852d245e3
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/sk_storage_omem_uncharge.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Facebook */
+#include <test_progs.h>
+#include <bpf/libbpf.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include "sk_storage_omem_uncharge.skel.h"
+
+void test_sk_storage_omem_uncharge(void)
+{
+ struct sk_storage_omem_uncharge *skel;
+ int sk_fd = -1, map_fd, err, value;
+ socklen_t optlen;
+
+ skel = sk_storage_omem_uncharge__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel open_and_load"))
+ return;
+ map_fd = bpf_map__fd(skel->maps.sk_storage);
+
+ /* A standalone socket not binding to addr:port,
+ * so nentns is not needed.
+ */
+ sk_fd = socket(AF_INET6, SOCK_STREAM, 0);
+ if (!ASSERT_GE(sk_fd, 0, "socket"))
+ goto done;
+
+ optlen = sizeof(skel->bss->cookie);
+ err = getsockopt(sk_fd, SOL_SOCKET, SO_COOKIE, &skel->bss->cookie, &optlen);
+ if (!ASSERT_OK(err, "getsockopt(SO_COOKIE)"))
+ goto done;
+
+ value = 0;
+ err = bpf_map_update_elem(map_fd, &sk_fd, &value, 0);
+ if (!ASSERT_OK(err, "bpf_map_update_elem(value=0)"))
+ goto done;
+
+ value = 0xdeadbeef;
+ err = bpf_map_update_elem(map_fd, &sk_fd, &value, 0);
+ if (!ASSERT_OK(err, "bpf_map_update_elem(value=0xdeadbeef)"))
+ goto done;
+
+ err = sk_storage_omem_uncharge__attach(skel);
+ if (!ASSERT_OK(err, "attach"))
+ goto done;
+
+ close(sk_fd);
+ sk_fd = -1;
+
+ ASSERT_EQ(skel->bss->cookie_found, 2, "cookie_found");
+ ASSERT_EQ(skel->bss->omem, 0, "omem");
+
+done:
+ sk_storage_omem_uncharge__destroy(skel);
+ if (sk_fd != -1)
+ close(sk_fd);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h b/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h
index d12665490a90..36d829a65aa4 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h
+++ b/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h
@@ -179,6 +179,32 @@
__ret; \
})
+static inline int poll_connect(int fd, unsigned int timeout_sec)
+{
+ struct timeval timeout = { .tv_sec = timeout_sec };
+ fd_set wfds;
+ int r, eval;
+ socklen_t esize = sizeof(eval);
+
+ FD_ZERO(&wfds);
+ FD_SET(fd, &wfds);
+
+ r = select(fd + 1, NULL, &wfds, NULL, &timeout);
+ if (r == 0)
+ errno = ETIME;
+ if (r != 1)
+ return -1;
+
+ if (getsockopt(fd, SOL_SOCKET, SO_ERROR, &eval, &esize) < 0)
+ return -1;
+ if (eval != 0) {
+ errno = eval;
+ return -1;
+ }
+
+ return 0;
+}
+
static inline int poll_read(int fd, unsigned int timeout_sec)
{
struct timeval timeout = { .tv_sec = timeout_sec };
diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
index 5674a9d0cacf..8df8cbb447f1 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
@@ -1452,11 +1452,18 @@ static int vsock_socketpair_connectible(int sotype, int *v0, int *v1)
if (p < 0)
goto close_cli;
+ if (poll_connect(c, IO_TIMEOUT_SEC) < 0) {
+ FAIL_ERRNO("poll_connect");
+ goto close_acc;
+ }
+
*v0 = p;
*v1 = c;
return 0;
+close_acc:
+ close(p);
close_cli:
close(c);
close_srv:
diff --git a/tools/testing/selftests/bpf/progs/bpf_tracing_net.h b/tools/testing/selftests/bpf/progs/bpf_tracing_net.h
index cfed4df490f3..0b793a102791 100644
--- a/tools/testing/selftests/bpf/progs/bpf_tracing_net.h
+++ b/tools/testing/selftests/bpf/progs/bpf_tracing_net.h
@@ -88,6 +88,7 @@
#define sk_v6_rcv_saddr __sk_common.skc_v6_rcv_saddr
#define sk_flags __sk_common.skc_flags
#define sk_reuse __sk_common.skc_reuse
+#define sk_cookie __sk_common.skc_cookie
#define s6_addr32 in6_u.u6_addr32
diff --git a/tools/testing/selftests/bpf/progs/sk_storage_omem_uncharge.c b/tools/testing/selftests/bpf/progs/sk_storage_omem_uncharge.c
new file mode 100644
index 000000000000..3e745793b27a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/sk_storage_omem_uncharge.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Facebook */
+#include "vmlinux.h"
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+
+void *local_storage_ptr = NULL;
+void *sk_ptr = NULL;
+int cookie_found = 0;
+__u64 cookie = 0;
+__u32 omem = 0;
+
+void *bpf_rdonly_cast(void *, __u32) __ksym;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, int);
+} sk_storage SEC(".maps");
+
+SEC("fexit/bpf_local_storage_destroy")
+int BPF_PROG(bpf_local_storage_destroy, struct bpf_local_storage *local_storage)
+{
+ struct sock *sk;
+
+ if (local_storage_ptr != local_storage)
+ return 0;
+
+ sk = bpf_rdonly_cast(sk_ptr, bpf_core_type_id_kernel(struct sock));
+ if (sk->sk_cookie.counter != cookie)
+ return 0;
+
+ cookie_found++;
+ omem = sk->sk_omem_alloc.counter;
+ local_storage_ptr = NULL;
+
+ return 0;
+}
+
+SEC("fentry/inet6_sock_destruct")
+int BPF_PROG(inet6_sock_destruct, struct sock *sk)
+{
+ int *value;
+
+ if (!cookie || sk->sk_cookie.counter != cookie)
+ return 0;
+
+ value = bpf_sk_storage_get(&sk_storage, sk, 0, 0);
+ if (value && *value == 0xdeadbeef) {
+ cookie_found++;
+ sk_ptr = sk;
+ local_storage_ptr = sk->sk_bpf_storage;
+ }
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/cgroup/.gitignore b/tools/testing/selftests/cgroup/.gitignore
index 4d556df4f77b..af8c3f30b9c1 100644
--- a/tools/testing/selftests/cgroup/.gitignore
+++ b/tools/testing/selftests/cgroup/.gitignore
@@ -5,5 +5,6 @@ test_freezer
test_kmem
test_kill
test_cpu
+test_cpuset
test_zswap
wait_inotify
diff --git a/tools/testing/selftests/cgroup/Makefile b/tools/testing/selftests/cgroup/Makefile
index 27dbdd7bb4bb..c27f05f6ce9b 100644
--- a/tools/testing/selftests/cgroup/Makefile
+++ b/tools/testing/selftests/cgroup/Makefile
@@ -12,6 +12,7 @@ TEST_GEN_PROGS += test_core
TEST_GEN_PROGS += test_freezer
TEST_GEN_PROGS += test_kill
TEST_GEN_PROGS += test_cpu
+TEST_GEN_PROGS += test_cpuset
TEST_GEN_PROGS += test_zswap
LOCAL_HDRS += $(selfdir)/clone3/clone3_selftests.h $(selfdir)/pidfd/pidfd.h
@@ -24,4 +25,5 @@ $(OUTPUT)/test_core: cgroup_util.c
$(OUTPUT)/test_freezer: cgroup_util.c
$(OUTPUT)/test_kill: cgroup_util.c
$(OUTPUT)/test_cpu: cgroup_util.c
+$(OUTPUT)/test_cpuset: cgroup_util.c
$(OUTPUT)/test_zswap: cgroup_util.c
diff --git a/tools/testing/selftests/cgroup/cgroup_util.c b/tools/testing/selftests/cgroup/cgroup_util.c
index e8bbbdb77e0d..0340d4ca8f51 100644
--- a/tools/testing/selftests/cgroup/cgroup_util.c
+++ b/tools/testing/selftests/cgroup/cgroup_util.c
@@ -286,6 +286,8 @@ int cg_destroy(const char *cgroup)
{
int ret;
+ if (!cgroup)
+ return 0;
retry:
ret = rmdir(cgroup);
if (ret && errno == EBUSY) {
diff --git a/tools/testing/selftests/cgroup/cgroup_util.h b/tools/testing/selftests/cgroup/cgroup_util.h
index c92df4e5d395..1df7f202214a 100644
--- a/tools/testing/selftests/cgroup/cgroup_util.h
+++ b/tools/testing/selftests/cgroup/cgroup_util.h
@@ -11,6 +11,8 @@
#define USEC_PER_SEC 1000000L
#define NSEC_PER_SEC 1000000000L
+#define TEST_UID 65534 /* usually nobody, any !root is fine */
+
/*
* Checks if two given values differ by less than err% of their sum.
*/
diff --git a/tools/testing/selftests/cgroup/test_core.c b/tools/testing/selftests/cgroup/test_core.c
index 600123503063..80aa6b2373b9 100644
--- a/tools/testing/selftests/cgroup/test_core.c
+++ b/tools/testing/selftests/cgroup/test_core.c
@@ -683,7 +683,7 @@ cleanup:
*/
static int test_cgcore_lesser_euid_open(const char *root)
{
- const uid_t test_euid = 65534; /* usually nobody, any !root is fine */
+ const uid_t test_euid = TEST_UID;
int ret = KSFT_FAIL;
char *cg_test_a = NULL, *cg_test_b = NULL;
char *cg_test_a_procs = NULL, *cg_test_b_procs = NULL;
diff --git a/tools/testing/selftests/cgroup/test_cpuset.c b/tools/testing/selftests/cgroup/test_cpuset.c
new file mode 100644
index 000000000000..b061ed1e05b4
--- /dev/null
+++ b/tools/testing/selftests/cgroup/test_cpuset.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/limits.h>
+#include <signal.h>
+
+#include "../kselftest.h"
+#include "cgroup_util.h"
+
+static int idle_process_fn(const char *cgroup, void *arg)
+{
+ (void)pause();
+ return 0;
+}
+
+static int do_migration_fn(const char *cgroup, void *arg)
+{
+ int object_pid = (int)(size_t)arg;
+
+ if (setuid(TEST_UID))
+ return EXIT_FAILURE;
+
+ // XXX checking /proc/$pid/cgroup would be quicker than wait
+ if (cg_enter(cgroup, object_pid) ||
+ cg_wait_for_proc_count(cgroup, 1))
+ return EXIT_FAILURE;
+
+ return EXIT_SUCCESS;
+}
+
+static int do_controller_fn(const char *cgroup, void *arg)
+{
+ const char *child = cgroup;
+ const char *parent = arg;
+
+ if (setuid(TEST_UID))
+ return EXIT_FAILURE;
+
+ if (!cg_read_strstr(child, "cgroup.controllers", "cpuset"))
+ return EXIT_FAILURE;
+
+ if (cg_write(parent, "cgroup.subtree_control", "+cpuset"))
+ return EXIT_FAILURE;
+
+ if (cg_read_strstr(child, "cgroup.controllers", "cpuset"))
+ return EXIT_FAILURE;
+
+ if (cg_write(parent, "cgroup.subtree_control", "-cpuset"))
+ return EXIT_FAILURE;
+
+ if (!cg_read_strstr(child, "cgroup.controllers", "cpuset"))
+ return EXIT_FAILURE;
+
+ return EXIT_SUCCESS;
+}
+
+/*
+ * Migrate a process between two sibling cgroups.
+ * The success should only depend on the parent cgroup permissions and not the
+ * migrated process itself (cpuset controller is in place because it uses
+ * security_task_setscheduler() in cgroup v1).
+ *
+ * Deliberately don't set cpuset.cpus in children to avoid definining migration
+ * permissions between two different cpusets.
+ */
+static int test_cpuset_perms_object(const char *root, bool allow)
+{
+ char *parent = NULL, *child_src = NULL, *child_dst = NULL;
+ char *parent_procs = NULL, *child_src_procs = NULL, *child_dst_procs = NULL;
+ const uid_t test_euid = TEST_UID;
+ int object_pid = 0;
+ int ret = KSFT_FAIL;
+
+ parent = cg_name(root, "cpuset_test_0");
+ if (!parent)
+ goto cleanup;
+ parent_procs = cg_name(parent, "cgroup.procs");
+ if (!parent_procs)
+ goto cleanup;
+ if (cg_create(parent))
+ goto cleanup;
+
+ child_src = cg_name(parent, "cpuset_test_1");
+ if (!child_src)
+ goto cleanup;
+ child_src_procs = cg_name(child_src, "cgroup.procs");
+ if (!child_src_procs)
+ goto cleanup;
+ if (cg_create(child_src))
+ goto cleanup;
+
+ child_dst = cg_name(parent, "cpuset_test_2");
+ if (!child_dst)
+ goto cleanup;
+ child_dst_procs = cg_name(child_dst, "cgroup.procs");
+ if (!child_dst_procs)
+ goto cleanup;
+ if (cg_create(child_dst))
+ goto cleanup;
+
+ if (cg_write(parent, "cgroup.subtree_control", "+cpuset"))
+ goto cleanup;
+
+ if (cg_read_strstr(child_src, "cgroup.controllers", "cpuset") ||
+ cg_read_strstr(child_dst, "cgroup.controllers", "cpuset"))
+ goto cleanup;
+
+ /* Enable permissions along src->dst tree path */
+ if (chown(child_src_procs, test_euid, -1) ||
+ chown(child_dst_procs, test_euid, -1))
+ goto cleanup;
+
+ if (allow && chown(parent_procs, test_euid, -1))
+ goto cleanup;
+
+ /* Fork a privileged child as a test object */
+ object_pid = cg_run_nowait(child_src, idle_process_fn, NULL);
+ if (object_pid < 0)
+ goto cleanup;
+
+ /* Carry out migration in a child process that can drop all privileges
+ * (including capabilities), the main process must remain privileged for
+ * cleanup.
+ * Child process's cgroup is irrelevant but we place it into child_dst
+ * as hacky way to pass information about migration target to the child.
+ */
+ if (allow ^ (cg_run(child_dst, do_migration_fn, (void *)(size_t)object_pid) == EXIT_SUCCESS))
+ goto cleanup;
+
+ ret = KSFT_PASS;
+
+cleanup:
+ if (object_pid > 0) {
+ (void)kill(object_pid, SIGTERM);
+ (void)clone_reap(object_pid, WEXITED);
+ }
+
+ cg_destroy(child_dst);
+ free(child_dst_procs);
+ free(child_dst);
+
+ cg_destroy(child_src);
+ free(child_src_procs);
+ free(child_src);
+
+ cg_destroy(parent);
+ free(parent_procs);
+ free(parent);
+
+ return ret;
+}
+
+static int test_cpuset_perms_object_allow(const char *root)
+{
+ return test_cpuset_perms_object(root, true);
+}
+
+static int test_cpuset_perms_object_deny(const char *root)
+{
+ return test_cpuset_perms_object(root, false);
+}
+
+/*
+ * Migrate a process between parent and child implicitely
+ * Implicit migration happens when a controller is enabled/disabled.
+ *
+ */
+static int test_cpuset_perms_subtree(const char *root)
+{
+ char *parent = NULL, *child = NULL;
+ char *parent_procs = NULL, *parent_subctl = NULL, *child_procs = NULL;
+ const uid_t test_euid = TEST_UID;
+ int object_pid = 0;
+ int ret = KSFT_FAIL;
+
+ parent = cg_name(root, "cpuset_test_0");
+ if (!parent)
+ goto cleanup;
+ parent_procs = cg_name(parent, "cgroup.procs");
+ if (!parent_procs)
+ goto cleanup;
+ parent_subctl = cg_name(parent, "cgroup.subtree_control");
+ if (!parent_subctl)
+ goto cleanup;
+ if (cg_create(parent))
+ goto cleanup;
+
+ child = cg_name(parent, "cpuset_test_1");
+ if (!child)
+ goto cleanup;
+ child_procs = cg_name(child, "cgroup.procs");
+ if (!child_procs)
+ goto cleanup;
+ if (cg_create(child))
+ goto cleanup;
+
+ /* Enable permissions as in a delegated subtree */
+ if (chown(parent_procs, test_euid, -1) ||
+ chown(parent_subctl, test_euid, -1) ||
+ chown(child_procs, test_euid, -1))
+ goto cleanup;
+
+ /* Put a privileged child in the subtree and modify controller state
+ * from an unprivileged process, the main process remains privileged
+ * for cleanup.
+ * The unprivileged child runs in subtree too to avoid parent and
+ * internal-node constraing violation.
+ */
+ object_pid = cg_run_nowait(child, idle_process_fn, NULL);
+ if (object_pid < 0)
+ goto cleanup;
+
+ if (cg_run(child, do_controller_fn, parent) != EXIT_SUCCESS)
+ goto cleanup;
+
+ ret = KSFT_PASS;
+
+cleanup:
+ if (object_pid > 0) {
+ (void)kill(object_pid, SIGTERM);
+ (void)clone_reap(object_pid, WEXITED);
+ }
+
+ cg_destroy(child);
+ free(child_procs);
+ free(child);
+
+ cg_destroy(parent);
+ free(parent_subctl);
+ free(parent_procs);
+ free(parent);
+
+ return ret;
+}
+
+
+#define T(x) { x, #x }
+struct cpuset_test {
+ int (*fn)(const char *root);
+ const char *name;
+} tests[] = {
+ T(test_cpuset_perms_object_allow),
+ T(test_cpuset_perms_object_deny),
+ T(test_cpuset_perms_subtree),
+};
+#undef T
+
+int main(int argc, char *argv[])
+{
+ char root[PATH_MAX];
+ int i, ret = EXIT_SUCCESS;
+
+ if (cg_find_unified_root(root, sizeof(root)))
+ ksft_exit_skip("cgroup v2 isn't mounted\n");
+
+ if (cg_read_strstr(root, "cgroup.subtree_control", "cpuset"))
+ if (cg_write(root, "cgroup.subtree_control", "+cpuset"))
+ ksft_exit_skip("Failed to set cpuset controller\n");
+
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
+ switch (tests[i].fn(root)) {
+ case KSFT_PASS:
+ ksft_test_result_pass("%s\n", tests[i].name);
+ break;
+ case KSFT_SKIP:
+ ksft_test_result_skip("%s\n", tests[i].name);
+ break;
+ default:
+ ret = EXIT_FAILURE;
+ ksft_test_result_fail("%s\n", tests[i].name);
+ break;
+ }
+ }
+
+ return ret;
+}
diff --git a/tools/testing/selftests/cgroup/test_cpuset_prs.sh b/tools/testing/selftests/cgroup/test_cpuset_prs.sh
index 2b5215cc599f..4afb132e4e4f 100755
--- a/tools/testing/selftests/cgroup/test_cpuset_prs.sh
+++ b/tools/testing/selftests/cgroup/test_cpuset_prs.sh
@@ -10,7 +10,7 @@
skip_test() {
echo "$1"
echo "Test SKIPPED"
- exit 0
+ exit 4 # ksft_skip
}
[[ $(id -u) -eq 0 ]] || skip_test "Test must be run as root!"
diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_btfarg.tc b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_btfarg.tc
index f34b14ef9781..b9c21a81d248 100644
--- a/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_btfarg.tc
+++ b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_btfarg.tc
@@ -5,6 +5,7 @@
KPROBES=
FPROBES=
+FIELDS=
if grep -qF "p[:[<group>/][<event>]] <place> [<args>]" README ; then
KPROBES=yes
@@ -12,6 +13,9 @@ fi
if grep -qF "f[:[<group>/][<event>]] <func-name>[%return] [<args>]" README ; then
FPROBES=yes
fi
+if grep -qF "<argname>[->field[->field|.field...]]" README ; then
+ FIELDS=yes
+fi
if [ -z "$KPROBES" -a -z "$FPROBES" ] ; then
exit_unsupported
@@ -21,6 +25,9 @@ echo 0 > events/enable
echo > dynamic_events
TP=kfree
+TP2=kmem_cache_alloc
+TP3=getname_flags
+TP4=sched_wakeup
if [ "$FPROBES" ] ; then
echo "f:fpevent $TP object" >> dynamic_events
@@ -33,6 +40,7 @@ echo > dynamic_events
echo "f:fpevent $TP "'$arg1' >> dynamic_events
grep -q "fpevent.*object=object" dynamic_events
+
echo > dynamic_events
echo "f:fpevent $TP "'$arg*' >> dynamic_events
@@ -45,6 +53,18 @@ fi
echo > dynamic_events
+if [ "$FIELDS" ] ; then
+echo "t:tpevent ${TP2} obj_size=s->object_size" >> dynamic_events
+echo "f:fpevent ${TP3}%return path=\$retval->name:string" >> dynamic_events
+echo "t:tpevent2 ${TP4} p->se.group_node.next->prev" >> dynamic_events
+
+grep -q "tpevent .*obj_size=s->object_size" dynamic_events
+grep -q "fpevent.*path=\$retval->name:string" dynamic_events
+grep -q 'tpevent2 .*p->se.group_node.next->prev' dynamic_events
+
+echo > dynamic_events
+fi
+
if [ "$KPROBES" ] ; then
echo "p:kpevent $TP object" >> dynamic_events
grep -q "kpevent.*object=object" dynamic_events
diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/fprobe_syntax_errors.tc b/tools/testing/selftests/ftrace/test.d/dynevent/fprobe_syntax_errors.tc
index 812f5b3f6055..20e42c030095 100644
--- a/tools/testing/selftests/ftrace/test.d/dynevent/fprobe_syntax_errors.tc
+++ b/tools/testing/selftests/ftrace/test.d/dynevent/fprobe_syntax_errors.tc
@@ -30,11 +30,11 @@ check_error 'f:^ vfs_read' # NO_EVENT_NAME
check_error 'f:foo/^12345678901234567890123456789012345678901234567890123456789012345 vfs_read' # EVENT_TOO_LONG
check_error 'f:foo/^bar.1 vfs_read' # BAD_EVENT_NAME
-check_error 'f vfs_read ^$retval' # RETVAL_ON_PROBE
check_error 'f vfs_read ^$stack10000' # BAD_STACK_NUM
check_error 'f vfs_read ^$arg10000' # BAD_ARG_NUM
+check_error 'f vfs_read $retval ^$arg1' # BAD_VAR
check_error 'f vfs_read ^$none_var' # BAD_VAR
check_error 'f vfs_read ^'$REG # BAD_VAR
@@ -103,6 +103,14 @@ check_error 'f vfs_read%return ^$arg*' # NOFENTRY_ARGS
check_error 'f vfs_read ^hoge' # NO_BTFARG
check_error 'f kfree ^$arg10' # NO_BTFARG (exceed the number of parameters)
check_error 'f kfree%return ^$retval' # NO_RETVAL
+
+if grep -qF "<argname>[->field[->field|.field...]]" README ; then
+check_error 'f vfs_read%return $retval->^foo' # NO_PTR_STRCT
+check_error 'f vfs_read file->^foo' # NO_BTF_FIELD
+check_error 'f vfs_read file^-.foo' # BAD_HYPHEN
+check_error 'f vfs_read ^file:string' # BAD_TYPE4STR
+fi
+
else
check_error 'f vfs_read ^$arg*' # NOSUP_BTFARG
check_error 't kfree ^$arg*' # NOSUP_BTFARG
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_char.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_char.tc
index 285b4770efad..ff7499eb98d6 100644
--- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_char.tc
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_char.tc
@@ -34,14 +34,19 @@ mips*)
esac
: "Test get argument (1)"
-echo "p:testprobe tracefs_create_dir arg1=+0(${ARG1}):char" > kprobe_events
+if grep -q eventfs_add_dir available_filter_functions; then
+ DIR_NAME="eventfs_add_dir"
+else
+ DIR_NAME="tracefs_create_dir"
+fi
+echo "p:testprobe ${DIR_NAME} arg1=+0(${ARG1}):char" > kprobe_events
echo 1 > events/kprobes/testprobe/enable
echo "p:test $FUNCTION_FORK" >> kprobe_events
grep -qe "testprobe.* arg1='t'" trace
echo 0 > events/kprobes/testprobe/enable
: "Test get argument (2)"
-echo "p:testprobe tracefs_create_dir arg1=+0(${ARG1}):char arg2=+0(${ARG1}):char[4]" > kprobe_events
+echo "p:testprobe ${DIR_NAME} arg1=+0(${ARG1}):char arg2=+0(${ARG1}):char[4]" > kprobe_events
echo 1 > events/kprobes/testprobe/enable
echo "p:test $FUNCTION_FORK" >> kprobe_events
grep -qe "testprobe.* arg1='t' arg2={'t','e','s','t'}" trace
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc
index a4f8e7c53c1f..a202b2ea4baf 100644
--- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc
@@ -37,14 +37,19 @@ loongarch*)
esac
: "Test get argument (1)"
-echo "p:testprobe tracefs_create_dir arg1=+0(${ARG1}):string" > kprobe_events
+if grep -q eventfs_add_dir available_filter_functions; then
+ DIR_NAME="eventfs_add_dir"
+else
+ DIR_NAME="tracefs_create_dir"
+fi
+echo "p:testprobe ${DIR_NAME} arg1=+0(${ARG1}):string" > kprobe_events
echo 1 > events/kprobes/testprobe/enable
echo "p:test $FUNCTION_FORK" >> kprobe_events
grep -qe "testprobe.* arg1=\"test\"" trace
echo 0 > events/kprobes/testprobe/enable
: "Test get argument (2)"
-echo "p:testprobe tracefs_create_dir arg1=+0(${ARG1}):string arg2=+0(${ARG1}):string" > kprobe_events
+echo "p:testprobe ${DIR_NAME} arg1=+0(${ARG1}):string arg2=+0(${ARG1}):string" > kprobe_events
echo 1 > events/kprobes/testprobe/enable
echo "p:test $FUNCTION_FORK" >> kprobe_events
grep -qe "testprobe.* arg1=\"test\" arg2=\"test\"" trace
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index c692cc86e7da..a3bb36fb3cfc 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -23,6 +23,7 @@ LIBKVM += lib/guest_modes.c
LIBKVM += lib/io.c
LIBKVM += lib/kvm_util.c
LIBKVM += lib/memstress.c
+LIBKVM += lib/guest_sprintf.c
LIBKVM += lib/rbtree.c
LIBKVM += lib/sparsebit.c
LIBKVM += lib/test_util.c
@@ -122,6 +123,7 @@ TEST_GEN_PROGS_x86_64 += access_tracking_perf_test
TEST_GEN_PROGS_x86_64 += demand_paging_test
TEST_GEN_PROGS_x86_64 += dirty_log_test
TEST_GEN_PROGS_x86_64 += dirty_log_perf_test
+TEST_GEN_PROGS_x86_64 += guest_print_test
TEST_GEN_PROGS_x86_64 += hardware_disable_test
TEST_GEN_PROGS_x86_64 += kvm_create_max_vcpus
TEST_GEN_PROGS_x86_64 += kvm_page_table_test
@@ -140,7 +142,6 @@ TEST_GEN_PROGS_EXTENDED_x86_64 += x86_64/nx_huge_pages_test
TEST_GEN_PROGS_aarch64 += aarch64/aarch32_id_regs
TEST_GEN_PROGS_aarch64 += aarch64/arch_timer
TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions
-TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list
TEST_GEN_PROGS_aarch64 += aarch64/hypercalls
TEST_GEN_PROGS_aarch64 += aarch64/page_fault_test
TEST_GEN_PROGS_aarch64 += aarch64/psci_test
@@ -152,6 +153,8 @@ TEST_GEN_PROGS_aarch64 += access_tracking_perf_test
TEST_GEN_PROGS_aarch64 += demand_paging_test
TEST_GEN_PROGS_aarch64 += dirty_log_test
TEST_GEN_PROGS_aarch64 += dirty_log_perf_test
+TEST_GEN_PROGS_aarch64 += guest_print_test
+TEST_GEN_PROGS_aarch64 += get-reg-list
TEST_GEN_PROGS_aarch64 += kvm_create_max_vcpus
TEST_GEN_PROGS_aarch64 += kvm_page_table_test
TEST_GEN_PROGS_aarch64 += memslot_modification_stress_test
@@ -166,8 +169,10 @@ TEST_GEN_PROGS_s390x += s390x/resets
TEST_GEN_PROGS_s390x += s390x/sync_regs_test
TEST_GEN_PROGS_s390x += s390x/tprot
TEST_GEN_PROGS_s390x += s390x/cmma_test
+TEST_GEN_PROGS_s390x += s390x/debug_test
TEST_GEN_PROGS_s390x += demand_paging_test
TEST_GEN_PROGS_s390x += dirty_log_test
+TEST_GEN_PROGS_s390x += guest_print_test
TEST_GEN_PROGS_s390x += kvm_create_max_vcpus
TEST_GEN_PROGS_s390x += kvm_page_table_test
TEST_GEN_PROGS_s390x += rseq_test
@@ -176,11 +181,15 @@ TEST_GEN_PROGS_s390x += kvm_binary_stats_test
TEST_GEN_PROGS_riscv += demand_paging_test
TEST_GEN_PROGS_riscv += dirty_log_test
+TEST_GEN_PROGS_riscv += guest_print_test
+TEST_GEN_PROGS_riscv += get-reg-list
TEST_GEN_PROGS_riscv += kvm_create_max_vcpus
TEST_GEN_PROGS_riscv += kvm_page_table_test
TEST_GEN_PROGS_riscv += set_memory_region_test
TEST_GEN_PROGS_riscv += kvm_binary_stats_test
+SPLIT_TESTS += get-reg-list
+
TEST_PROGS += $(TEST_PROGS_$(ARCH_DIR))
TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(ARCH_DIR))
TEST_GEN_PROGS_EXTENDED += $(TEST_GEN_PROGS_EXTENDED_$(ARCH_DIR))
@@ -204,6 +213,7 @@ endif
CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \
-Wno-gnu-variable-sized-type-not-at-end -MD\
-fno-builtin-memcmp -fno-builtin-memcpy -fno-builtin-memset \
+ -fno-builtin-strnlen \
-fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) \
-I$(LINUX_TOOL_ARCH_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude \
-I$(<D) -Iinclude/$(ARCH_DIR) -I ../rseq -I.. $(EXTRA_CFLAGS) \
@@ -228,11 +238,14 @@ LIBKVM_C_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM_C))
LIBKVM_S_OBJ := $(patsubst %.S, $(OUTPUT)/%.o, $(LIBKVM_S))
LIBKVM_STRING_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM_STRING))
LIBKVM_OBJS = $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ) $(LIBKVM_STRING_OBJ)
+SPLIT_TESTS_TARGETS := $(patsubst %, $(OUTPUT)/%, $(SPLIT_TESTS))
+SPLIT_TESTS_OBJS := $(patsubst %, $(ARCH_DIR)/%.o, $(SPLIT_TESTS))
TEST_GEN_OBJ = $(patsubst %, %.o, $(TEST_GEN_PROGS))
TEST_GEN_OBJ += $(patsubst %, %.o, $(TEST_GEN_PROGS_EXTENDED))
TEST_DEP_FILES = $(patsubst %.o, %.d, $(TEST_GEN_OBJ))
TEST_DEP_FILES += $(patsubst %.o, %.d, $(LIBKVM_OBJS))
+TEST_DEP_FILES += $(patsubst %.o, %.d, $(SPLIT_TESTS_OBJS))
-include $(TEST_DEP_FILES)
$(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): %: %.o
@@ -240,7 +253,10 @@ $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): %: %.o
$(TEST_GEN_OBJ): $(OUTPUT)/%.o: %.c
$(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@
-EXTRA_CLEAN += $(LIBKVM_OBJS) $(TEST_DEP_FILES) $(TEST_GEN_OBJ) cscope.*
+$(SPLIT_TESTS_TARGETS): %: %.o $(SPLIT_TESTS_OBJS)
+ $(CC) $(CFLAGS) $(CPPFLAGS) $(LDFLAGS) $(TARGET_ARCH) $^ $(LDLIBS) -o $@
+
+EXTRA_CLEAN += $(LIBKVM_OBJS) $(TEST_DEP_FILES) $(TEST_GEN_OBJ) $(SPLIT_TESTS_OBJS) cscope.*
x := $(shell mkdir -p $(sort $(dir $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ))))
$(LIBKVM_C_OBJ): $(OUTPUT)/%.o: %.c
diff --git a/tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c b/tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c
index 4951ac53d1f8..b90580840b22 100644
--- a/tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c
+++ b/tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c
@@ -98,7 +98,7 @@ static void test_user_raz_wi(struct kvm_vcpu *vcpu)
uint64_t val;
vcpu_get_reg(vcpu, reg_id, &val);
- ASSERT_EQ(val, 0);
+ TEST_ASSERT_EQ(val, 0);
/*
* Expect the ioctl to succeed with no effect on the register
@@ -107,7 +107,7 @@ static void test_user_raz_wi(struct kvm_vcpu *vcpu)
vcpu_set_reg(vcpu, reg_id, BAD_ID_REG_VAL);
vcpu_get_reg(vcpu, reg_id, &val);
- ASSERT_EQ(val, 0);
+ TEST_ASSERT_EQ(val, 0);
}
}
@@ -127,14 +127,14 @@ static void test_user_raz_invariant(struct kvm_vcpu *vcpu)
uint64_t val;
vcpu_get_reg(vcpu, reg_id, &val);
- ASSERT_EQ(val, 0);
+ TEST_ASSERT_EQ(val, 0);
r = __vcpu_set_reg(vcpu, reg_id, BAD_ID_REG_VAL);
TEST_ASSERT(r < 0 && errno == EINVAL,
"unexpected KVM_SET_ONE_REG error: r=%d, errno=%d", r, errno);
vcpu_get_reg(vcpu, reg_id, &val);
- ASSERT_EQ(val, 0);
+ TEST_ASSERT_EQ(val, 0);
}
}
diff --git a/tools/testing/selftests/kvm/aarch64/arch_timer.c b/tools/testing/selftests/kvm/aarch64/arch_timer.c
index 8ef370924a02..274b8465b42a 100644
--- a/tools/testing/selftests/kvm/aarch64/arch_timer.c
+++ b/tools/testing/selftests/kvm/aarch64/arch_timer.c
@@ -19,7 +19,6 @@
*
* Copyright (c) 2021, Google LLC.
*/
-
#define _GNU_SOURCE
#include <stdlib.h>
@@ -155,11 +154,13 @@ static void guest_validate_irq(unsigned int intid,
xcnt_diff_us = cycles_to_usec(xcnt - shared_data->xcnt);
/* Make sure we are dealing with the correct timer IRQ */
- GUEST_ASSERT_2(intid == timer_irq, intid, timer_irq);
+ GUEST_ASSERT_EQ(intid, timer_irq);
/* Basic 'timer condition met' check */
- GUEST_ASSERT_3(xcnt >= cval, xcnt, cval, xcnt_diff_us);
- GUEST_ASSERT_1(xctl & CTL_ISTATUS, xctl);
+ __GUEST_ASSERT(xcnt >= cval,
+ "xcnt = 0x%llx, cval = 0x%llx, xcnt_diff_us = 0x%llx",
+ xcnt, cval, xcnt_diff_us);
+ __GUEST_ASSERT(xctl & CTL_ISTATUS, "xcnt = 0x%llx", xcnt);
WRITE_ONCE(shared_data->nr_iter, shared_data->nr_iter + 1);
}
@@ -192,8 +193,7 @@ static void guest_run_stage(struct test_vcpu_shared_data *shared_data,
TIMER_TEST_ERR_MARGIN_US);
irq_iter = READ_ONCE(shared_data->nr_iter);
- GUEST_ASSERT_2(config_iter + 1 == irq_iter,
- config_iter + 1, irq_iter);
+ GUEST_ASSERT_EQ(config_iter + 1, irq_iter);
}
}
@@ -243,13 +243,9 @@ static void *test_vcpu_run(void *arg)
break;
case UCALL_ABORT:
sync_global_from_guest(vm, *shared_data);
- REPORT_GUEST_ASSERT_N(uc, "values: %lu, %lu; %lu, vcpu %u; stage; %u; iter: %u",
- GUEST_ASSERT_ARG(uc, 0),
- GUEST_ASSERT_ARG(uc, 1),
- GUEST_ASSERT_ARG(uc, 2),
- vcpu_idx,
- shared_data->guest_stage,
- shared_data->nr_iter);
+ fprintf(stderr, "Guest assert failed, vcpu %u; stage; %u; iter: %u\n",
+ vcpu_idx, shared_data->guest_stage, shared_data->nr_iter);
+ REPORT_GUEST_ASSERT(uc);
break;
default:
TEST_FAIL("Unexpected guest exit\n");
diff --git a/tools/testing/selftests/kvm/aarch64/debug-exceptions.c b/tools/testing/selftests/kvm/aarch64/debug-exceptions.c
index 637be796086f..f5b6cb3a0019 100644
--- a/tools/testing/selftests/kvm/aarch64/debug-exceptions.c
+++ b/tools/testing/selftests/kvm/aarch64/debug-exceptions.c
@@ -365,7 +365,7 @@ static void guest_wp_handler(struct ex_regs *regs)
static void guest_ss_handler(struct ex_regs *regs)
{
- GUEST_ASSERT_1(ss_idx < 4, ss_idx);
+ __GUEST_ASSERT(ss_idx < 4, "Expected index < 4, got '%u'", ss_idx);
ss_addr[ss_idx++] = regs->pc;
regs->pstate |= SPSR_SS;
}
@@ -410,8 +410,8 @@ static void guest_code_ss(int test_cnt)
/* Userspace disables Single Step when the end is nigh. */
asm volatile("iter_ss_end:\n");
- GUEST_ASSERT(bvr == w_bvr);
- GUEST_ASSERT(wvr == w_wvr);
+ GUEST_ASSERT_EQ(bvr, w_bvr);
+ GUEST_ASSERT_EQ(wvr, w_wvr);
}
GUEST_DONE();
}
@@ -450,7 +450,7 @@ static void test_guest_debug_exceptions(uint8_t bpn, uint8_t wpn, uint8_t ctx_bp
vcpu_run(vcpu);
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
- REPORT_GUEST_ASSERT_2(uc, "values: %#lx, %#lx");
+ REPORT_GUEST_ASSERT(uc);
break;
case UCALL_DONE:
goto done;
diff --git a/tools/testing/selftests/kvm/aarch64/get-reg-list.c b/tools/testing/selftests/kvm/aarch64/get-reg-list.c
index 4f10055af2aa..709d7d721760 100644
--- a/tools/testing/selftests/kvm/aarch64/get-reg-list.c
+++ b/tools/testing/selftests/kvm/aarch64/get-reg-list.c
@@ -4,50 +4,17 @@
*
* Copyright (C) 2020, Red Hat, Inc.
*
- * When attempting to migrate from a host with an older kernel to a host
- * with a newer kernel we allow the newer kernel on the destination to
- * list new registers with get-reg-list. We assume they'll be unused, at
- * least until the guest reboots, and so they're relatively harmless.
- * However, if the destination host with the newer kernel is missing
- * registers which the source host with the older kernel has, then that's
- * a regression in get-reg-list. This test checks for that regression by
- * checking the current list against a blessed list. We should never have
- * missing registers, but if new ones appear then they can probably be
- * added to the blessed list. A completely new blessed list can be created
- * by running the test with the --list command line argument.
- *
- * Note, the blessed list should be created from the oldest possible
- * kernel. We can't go older than v4.15, though, because that's the first
- * release to expose the ID system registers in KVM_GET_REG_LIST, see
- * commit 93390c0a1b20 ("arm64: KVM: Hide unsupported AArch64 CPU features
- * from guests"). Also, one must use the --core-reg-fixup command line
- * option when running on an older kernel that doesn't include df205b5c6328
- * ("KVM: arm64: Filter out invalid core register IDs in KVM_GET_REG_LIST")
+ * While the blessed list should be created from the oldest possible
+ * kernel, we can't go older than v5.2, though, because that's the first
+ * release which includes df205b5c6328 ("KVM: arm64: Filter out invalid
+ * core register IDs in KVM_GET_REG_LIST"). Without that commit the core
+ * registers won't match expectations.
*/
#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-#include <sys/types.h>
-#include <sys/wait.h>
#include "kvm_util.h"
#include "test_util.h"
#include "processor.h"
-static struct kvm_reg_list *reg_list;
-static __u64 *blessed_reg, blessed_n;
-
-struct reg_sublist {
- const char *name;
- long capability;
- int feature;
- bool finalize;
- __u64 *regs;
- __u64 regs_n;
- __u64 *rejects_set;
- __u64 rejects_set_n;
-};
-
struct feature_id_reg {
__u64 reg;
__u64 id_reg;
@@ -76,70 +43,7 @@ static struct feature_id_reg feat_id_regs[] = {
}
};
-struct vcpu_config {
- char *name;
- struct reg_sublist sublists[];
-};
-
-static struct vcpu_config *vcpu_configs[];
-static int vcpu_configs_n;
-
-#define for_each_sublist(c, s) \
- for ((s) = &(c)->sublists[0]; (s)->regs; ++(s))
-
-#define for_each_reg(i) \
- for ((i) = 0; (i) < reg_list->n; ++(i))
-
-#define for_each_reg_filtered(i) \
- for_each_reg(i) \
- if (!filter_reg(reg_list->reg[i]))
-
-#define for_each_missing_reg(i) \
- for ((i) = 0; (i) < blessed_n; ++(i)) \
- if (!find_reg(reg_list->reg, reg_list->n, blessed_reg[i])) \
- if (check_supported_feat_reg(vcpu, blessed_reg[i]))
-
-#define for_each_new_reg(i) \
- for_each_reg_filtered(i) \
- if (!find_reg(blessed_reg, blessed_n, reg_list->reg[i]))
-
-static const char *config_name(struct vcpu_config *c)
-{
- struct reg_sublist *s;
- int len = 0;
-
- if (c->name)
- return c->name;
-
- for_each_sublist(c, s)
- len += strlen(s->name) + 1;
-
- c->name = malloc(len);
-
- len = 0;
- for_each_sublist(c, s) {
- if (!strcmp(s->name, "base"))
- continue;
- strcat(c->name + len, s->name);
- len += strlen(s->name) + 1;
- c->name[len - 1] = '+';
- }
- c->name[len - 1] = '\0';
-
- return c->name;
-}
-
-static bool has_cap(struct vcpu_config *c, long capability)
-{
- struct reg_sublist *s;
-
- for_each_sublist(c, s)
- if (s->capability == capability)
- return true;
- return false;
-}
-
-static bool filter_reg(__u64 reg)
+bool filter_reg(__u64 reg)
{
/*
* DEMUX register presence depends on the host's CLIDR_EL1.
@@ -151,16 +55,6 @@ static bool filter_reg(__u64 reg)
return false;
}
-static bool find_reg(__u64 regs[], __u64 nr_regs, __u64 reg)
-{
- int i;
-
- for (i = 0; i < nr_regs; ++i)
- if (reg == regs[i])
- return true;
- return false;
-}
-
static bool check_supported_feat_reg(struct kvm_vcpu *vcpu, __u64 reg)
{
int i, ret;
@@ -180,17 +74,27 @@ static bool check_supported_feat_reg(struct kvm_vcpu *vcpu, __u64 reg)
return true;
}
-static const char *str_with_index(const char *template, __u64 index)
+bool check_supported_reg(struct kvm_vcpu *vcpu, __u64 reg)
{
- char *str, *p;
- int n;
+ return check_supported_feat_reg(vcpu, reg);
+}
- str = strdup(template);
- p = strstr(str, "##");
- n = sprintf(p, "%lld", index);
- strcat(p + n, strstr(template, "##") + 2);
+bool check_reject_set(int err)
+{
+ return err == EPERM;
+}
- return (const char *)str;
+void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c)
+{
+ struct vcpu_reg_sublist *s;
+ int feature;
+
+ for_each_sublist(c, s) {
+ if (s->finalize) {
+ feature = s->feature;
+ vcpu_ioctl(vcpu, KVM_ARM_VCPU_FINALIZE, &feature);
+ }
+ }
}
#define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_COPROC_MASK)
@@ -199,7 +103,7 @@ static const char *str_with_index(const char *template, __u64 index)
#define CORE_SPSR_XX_NR_WORDS 2
#define CORE_FPREGS_XX_NR_WORDS 4
-static const char *core_id_to_str(struct vcpu_config *c, __u64 id)
+static const char *core_id_to_str(const char *prefix, __u64 id)
{
__u64 core_off = id & ~REG_MASK, idx;
@@ -210,8 +114,8 @@ static const char *core_id_to_str(struct vcpu_config *c, __u64 id)
case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
KVM_REG_ARM_CORE_REG(regs.regs[30]):
idx = (core_off - KVM_REG_ARM_CORE_REG(regs.regs[0])) / CORE_REGS_XX_NR_WORDS;
- TEST_ASSERT(idx < 31, "%s: Unexpected regs.regs index: %lld", config_name(c), idx);
- return str_with_index("KVM_REG_ARM_CORE_REG(regs.regs[##])", idx);
+ TEST_ASSERT(idx < 31, "%s: Unexpected regs.regs index: %lld", prefix, idx);
+ return strdup_printf("KVM_REG_ARM_CORE_REG(regs.regs[%lld])", idx);
case KVM_REG_ARM_CORE_REG(regs.sp):
return "KVM_REG_ARM_CORE_REG(regs.sp)";
case KVM_REG_ARM_CORE_REG(regs.pc):
@@ -225,24 +129,24 @@ static const char *core_id_to_str(struct vcpu_config *c, __u64 id)
case KVM_REG_ARM_CORE_REG(spsr[0]) ...
KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
idx = (core_off - KVM_REG_ARM_CORE_REG(spsr[0])) / CORE_SPSR_XX_NR_WORDS;
- TEST_ASSERT(idx < KVM_NR_SPSR, "%s: Unexpected spsr index: %lld", config_name(c), idx);
- return str_with_index("KVM_REG_ARM_CORE_REG(spsr[##])", idx);
+ TEST_ASSERT(idx < KVM_NR_SPSR, "%s: Unexpected spsr index: %lld", prefix, idx);
+ return strdup_printf("KVM_REG_ARM_CORE_REG(spsr[%lld])", idx);
case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
idx = (core_off - KVM_REG_ARM_CORE_REG(fp_regs.vregs[0])) / CORE_FPREGS_XX_NR_WORDS;
- TEST_ASSERT(idx < 32, "%s: Unexpected fp_regs.vregs index: %lld", config_name(c), idx);
- return str_with_index("KVM_REG_ARM_CORE_REG(fp_regs.vregs[##])", idx);
+ TEST_ASSERT(idx < 32, "%s: Unexpected fp_regs.vregs index: %lld", prefix, idx);
+ return strdup_printf("KVM_REG_ARM_CORE_REG(fp_regs.vregs[%lld])", idx);
case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
return "KVM_REG_ARM_CORE_REG(fp_regs.fpsr)";
case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
return "KVM_REG_ARM_CORE_REG(fp_regs.fpcr)";
}
- TEST_FAIL("%s: Unknown core reg id: 0x%llx", config_name(c), id);
+ TEST_FAIL("%s: Unknown core reg id: 0x%llx", prefix, id);
return NULL;
}
-static const char *sve_id_to_str(struct vcpu_config *c, __u64 id)
+static const char *sve_id_to_str(const char *prefix, __u64 id)
{
__u64 sve_off, n, i;
@@ -252,37 +156,37 @@ static const char *sve_id_to_str(struct vcpu_config *c, __u64 id)
sve_off = id & ~(REG_MASK | ((1ULL << 5) - 1));
i = id & (KVM_ARM64_SVE_MAX_SLICES - 1);
- TEST_ASSERT(i == 0, "%s: Currently we don't expect slice > 0, reg id 0x%llx", config_name(c), id);
+ TEST_ASSERT(i == 0, "%s: Currently we don't expect slice > 0, reg id 0x%llx", prefix, id);
switch (sve_off) {
case KVM_REG_ARM64_SVE_ZREG_BASE ...
KVM_REG_ARM64_SVE_ZREG_BASE + (1ULL << 5) * KVM_ARM64_SVE_NUM_ZREGS - 1:
n = (id >> 5) & (KVM_ARM64_SVE_NUM_ZREGS - 1);
TEST_ASSERT(id == KVM_REG_ARM64_SVE_ZREG(n, 0),
- "%s: Unexpected bits set in SVE ZREG id: 0x%llx", config_name(c), id);
- return str_with_index("KVM_REG_ARM64_SVE_ZREG(##, 0)", n);
+ "%s: Unexpected bits set in SVE ZREG id: 0x%llx", prefix, id);
+ return strdup_printf("KVM_REG_ARM64_SVE_ZREG(%lld, 0)", n);
case KVM_REG_ARM64_SVE_PREG_BASE ...
KVM_REG_ARM64_SVE_PREG_BASE + (1ULL << 5) * KVM_ARM64_SVE_NUM_PREGS - 1:
n = (id >> 5) & (KVM_ARM64_SVE_NUM_PREGS - 1);
TEST_ASSERT(id == KVM_REG_ARM64_SVE_PREG(n, 0),
- "%s: Unexpected bits set in SVE PREG id: 0x%llx", config_name(c), id);
- return str_with_index("KVM_REG_ARM64_SVE_PREG(##, 0)", n);
+ "%s: Unexpected bits set in SVE PREG id: 0x%llx", prefix, id);
+ return strdup_printf("KVM_REG_ARM64_SVE_PREG(%lld, 0)", n);
case KVM_REG_ARM64_SVE_FFR_BASE:
TEST_ASSERT(id == KVM_REG_ARM64_SVE_FFR(0),
- "%s: Unexpected bits set in SVE FFR id: 0x%llx", config_name(c), id);
+ "%s: Unexpected bits set in SVE FFR id: 0x%llx", prefix, id);
return "KVM_REG_ARM64_SVE_FFR(0)";
}
return NULL;
}
-static void print_reg(struct vcpu_config *c, __u64 id)
+void print_reg(const char *prefix, __u64 id)
{
unsigned op0, op1, crn, crm, op2;
const char *reg_size = NULL;
TEST_ASSERT((id & KVM_REG_ARCH_MASK) == KVM_REG_ARM64,
- "%s: KVM_REG_ARM64 missing in reg id: 0x%llx", config_name(c), id);
+ "%s: KVM_REG_ARM64 missing in reg id: 0x%llx", prefix, id);
switch (id & KVM_REG_SIZE_MASK) {
case KVM_REG_SIZE_U8:
@@ -314,16 +218,16 @@ static void print_reg(struct vcpu_config *c, __u64 id)
break;
default:
TEST_FAIL("%s: Unexpected reg size: 0x%llx in reg id: 0x%llx",
- config_name(c), (id & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT, id);
+ prefix, (id & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT, id);
}
switch (id & KVM_REG_ARM_COPROC_MASK) {
case KVM_REG_ARM_CORE:
- printf("\tKVM_REG_ARM64 | %s | KVM_REG_ARM_CORE | %s,\n", reg_size, core_id_to_str(c, id));
+ printf("\tKVM_REG_ARM64 | %s | KVM_REG_ARM_CORE | %s,\n", reg_size, core_id_to_str(prefix, id));
break;
case KVM_REG_ARM_DEMUX:
TEST_ASSERT(!(id & ~(REG_MASK | KVM_REG_ARM_DEMUX_ID_MASK | KVM_REG_ARM_DEMUX_VAL_MASK)),
- "%s: Unexpected bits set in DEMUX reg id: 0x%llx", config_name(c), id);
+ "%s: Unexpected bits set in DEMUX reg id: 0x%llx", prefix, id);
printf("\tKVM_REG_ARM64 | %s | KVM_REG_ARM_DEMUX | KVM_REG_ARM_DEMUX_ID_CCSIDR | %lld,\n",
reg_size, id & KVM_REG_ARM_DEMUX_VAL_MASK);
break;
@@ -334,370 +238,34 @@ static void print_reg(struct vcpu_config *c, __u64 id)
crm = (id & KVM_REG_ARM64_SYSREG_CRM_MASK) >> KVM_REG_ARM64_SYSREG_CRM_SHIFT;
op2 = (id & KVM_REG_ARM64_SYSREG_OP2_MASK) >> KVM_REG_ARM64_SYSREG_OP2_SHIFT;
TEST_ASSERT(id == ARM64_SYS_REG(op0, op1, crn, crm, op2),
- "%s: Unexpected bits set in SYSREG reg id: 0x%llx", config_name(c), id);
+ "%s: Unexpected bits set in SYSREG reg id: 0x%llx", prefix, id);
printf("\tARM64_SYS_REG(%d, %d, %d, %d, %d),\n", op0, op1, crn, crm, op2);
break;
case KVM_REG_ARM_FW:
TEST_ASSERT(id == KVM_REG_ARM_FW_REG(id & 0xffff),
- "%s: Unexpected bits set in FW reg id: 0x%llx", config_name(c), id);
+ "%s: Unexpected bits set in FW reg id: 0x%llx", prefix, id);
printf("\tKVM_REG_ARM_FW_REG(%lld),\n", id & 0xffff);
break;
case KVM_REG_ARM_FW_FEAT_BMAP:
TEST_ASSERT(id == KVM_REG_ARM_FW_FEAT_BMAP_REG(id & 0xffff),
- "%s: Unexpected bits set in the bitmap feature FW reg id: 0x%llx", config_name(c), id);
+ "%s: Unexpected bits set in the bitmap feature FW reg id: 0x%llx", prefix, id);
printf("\tKVM_REG_ARM_FW_FEAT_BMAP_REG(%lld),\n", id & 0xffff);
break;
case KVM_REG_ARM64_SVE:
- if (has_cap(c, KVM_CAP_ARM_SVE))
- printf("\t%s,\n", sve_id_to_str(c, id));
- else
- TEST_FAIL("%s: KVM_REG_ARM64_SVE is an unexpected coproc type in reg id: 0x%llx", config_name(c), id);
+ printf("\t%s,\n", sve_id_to_str(prefix, id));
break;
default:
TEST_FAIL("%s: Unexpected coproc type: 0x%llx in reg id: 0x%llx",
- config_name(c), (id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT, id);
- }
-}
-
-/*
- * Older kernels listed each 32-bit word of CORE registers separately.
- * For 64 and 128-bit registers we need to ignore the extra words. We
- * also need to fixup the sizes, because the older kernels stated all
- * registers were 64-bit, even when they weren't.
- */
-static void core_reg_fixup(void)
-{
- struct kvm_reg_list *tmp;
- __u64 id, core_off;
- int i;
-
- tmp = calloc(1, sizeof(*tmp) + reg_list->n * sizeof(__u64));
-
- for (i = 0; i < reg_list->n; ++i) {
- id = reg_list->reg[i];
-
- if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM_CORE) {
- tmp->reg[tmp->n++] = id;
- continue;
- }
-
- core_off = id & ~REG_MASK;
-
- switch (core_off) {
- case 0x52: case 0xd2: case 0xd6:
- /*
- * These offsets are pointing at padding.
- * We need to ignore them too.
- */
- continue;
- case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
- KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
- if (core_off & 3)
- continue;
- id &= ~KVM_REG_SIZE_MASK;
- id |= KVM_REG_SIZE_U128;
- tmp->reg[tmp->n++] = id;
- continue;
- case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
- case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
- id &= ~KVM_REG_SIZE_MASK;
- id |= KVM_REG_SIZE_U32;
- tmp->reg[tmp->n++] = id;
- continue;
- default:
- if (core_off & 1)
- continue;
- tmp->reg[tmp->n++] = id;
- break;
- }
+ prefix, (id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT, id);
}
-
- free(reg_list);
- reg_list = tmp;
-}
-
-static void prepare_vcpu_init(struct vcpu_config *c, struct kvm_vcpu_init *init)
-{
- struct reg_sublist *s;
-
- for_each_sublist(c, s)
- if (s->capability)
- init->features[s->feature / 32] |= 1 << (s->feature % 32);
-}
-
-static void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_config *c)
-{
- struct reg_sublist *s;
- int feature;
-
- for_each_sublist(c, s) {
- if (s->finalize) {
- feature = s->feature;
- vcpu_ioctl(vcpu, KVM_ARM_VCPU_FINALIZE, &feature);
- }
- }
-}
-
-static void check_supported(struct vcpu_config *c)
-{
- struct reg_sublist *s;
-
- for_each_sublist(c, s) {
- if (!s->capability)
- continue;
-
- __TEST_REQUIRE(kvm_has_cap(s->capability),
- "%s: %s not available, skipping tests\n",
- config_name(c), s->name);
- }
-}
-
-static bool print_list;
-static bool print_filtered;
-static bool fixup_core_regs;
-
-static void run_test(struct vcpu_config *c)
-{
- struct kvm_vcpu_init init = { .target = -1, };
- int new_regs = 0, missing_regs = 0, i, n;
- int failed_get = 0, failed_set = 0, failed_reject = 0;
- struct kvm_vcpu *vcpu;
- struct kvm_vm *vm;
- struct reg_sublist *s;
-
- check_supported(c);
-
- vm = vm_create_barebones();
- prepare_vcpu_init(c, &init);
- vcpu = __vm_vcpu_add(vm, 0);
- aarch64_vcpu_setup(vcpu, &init);
- finalize_vcpu(vcpu, c);
-
- reg_list = vcpu_get_reg_list(vcpu);
-
- if (fixup_core_regs)
- core_reg_fixup();
-
- if (print_list || print_filtered) {
- putchar('\n');
- for_each_reg(i) {
- __u64 id = reg_list->reg[i];
- if ((print_list && !filter_reg(id)) ||
- (print_filtered && filter_reg(id)))
- print_reg(c, id);
- }
- putchar('\n');
- return;
- }
-
- /*
- * We only test that we can get the register and then write back the
- * same value. Some registers may allow other values to be written
- * back, but others only allow some bits to be changed, and at least
- * for ID registers set will fail if the value does not exactly match
- * what was returned by get. If registers that allow other values to
- * be written need to have the other values tested, then we should
- * create a new set of tests for those in a new independent test
- * executable.
- */
- for_each_reg(i) {
- uint8_t addr[2048 / 8];
- struct kvm_one_reg reg = {
- .id = reg_list->reg[i],
- .addr = (__u64)&addr,
- };
- bool reject_reg = false;
- int ret;
-
- ret = __vcpu_get_reg(vcpu, reg_list->reg[i], &addr);
- if (ret) {
- printf("%s: Failed to get ", config_name(c));
- print_reg(c, reg.id);
- putchar('\n');
- ++failed_get;
- }
-
- /* rejects_set registers are rejected after KVM_ARM_VCPU_FINALIZE */
- for_each_sublist(c, s) {
- if (s->rejects_set && find_reg(s->rejects_set, s->rejects_set_n, reg.id)) {
- reject_reg = true;
- ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
- if (ret != -1 || errno != EPERM) {
- printf("%s: Failed to reject (ret=%d, errno=%d) ", config_name(c), ret, errno);
- print_reg(c, reg.id);
- putchar('\n');
- ++failed_reject;
- }
- break;
- }
- }
-
- if (!reject_reg) {
- ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
- if (ret) {
- printf("%s: Failed to set ", config_name(c));
- print_reg(c, reg.id);
- putchar('\n');
- ++failed_set;
- }
- }
- }
-
- for_each_sublist(c, s)
- blessed_n += s->regs_n;
- blessed_reg = calloc(blessed_n, sizeof(__u64));
-
- n = 0;
- for_each_sublist(c, s) {
- for (i = 0; i < s->regs_n; ++i)
- blessed_reg[n++] = s->regs[i];
- }
-
- for_each_new_reg(i)
- ++new_regs;
-
- for_each_missing_reg(i)
- ++missing_regs;
-
- if (new_regs || missing_regs) {
- n = 0;
- for_each_reg_filtered(i)
- ++n;
-
- printf("%s: Number blessed registers: %5lld\n", config_name(c), blessed_n);
- printf("%s: Number registers: %5lld (includes %lld filtered registers)\n",
- config_name(c), reg_list->n, reg_list->n - n);
- }
-
- if (new_regs) {
- printf("\n%s: There are %d new registers.\n"
- "Consider adding them to the blessed reg "
- "list with the following lines:\n\n", config_name(c), new_regs);
- for_each_new_reg(i)
- print_reg(c, reg_list->reg[i]);
- putchar('\n');
- }
-
- if (missing_regs) {
- printf("\n%s: There are %d missing registers.\n"
- "The following lines are missing registers:\n\n", config_name(c), missing_regs);
- for_each_missing_reg(i)
- print_reg(c, blessed_reg[i]);
- putchar('\n');
- }
-
- TEST_ASSERT(!missing_regs && !failed_get && !failed_set && !failed_reject,
- "%s: There are %d missing registers; "
- "%d registers failed get; %d registers failed set; %d registers failed reject",
- config_name(c), missing_regs, failed_get, failed_set, failed_reject);
-
- pr_info("%s: PASS\n", config_name(c));
- blessed_n = 0;
- free(blessed_reg);
- free(reg_list);
- kvm_vm_free(vm);
-}
-
-static void help(void)
-{
- struct vcpu_config *c;
- int i;
-
- printf(
- "\n"
- "usage: get-reg-list [--config=<selection>] [--list] [--list-filtered] [--core-reg-fixup]\n\n"
- " --config=<selection> Used to select a specific vcpu configuration for the test/listing\n"
- " '<selection>' may be\n");
-
- for (i = 0; i < vcpu_configs_n; ++i) {
- c = vcpu_configs[i];
- printf(
- " '%s'\n", config_name(c));
- }
-
- printf(
- "\n"
- " --list Print the register list rather than test it (requires --config)\n"
- " --list-filtered Print registers that would normally be filtered out (requires --config)\n"
- " --core-reg-fixup Needed when running on old kernels with broken core reg listings\n"
- "\n"
- );
-}
-
-static struct vcpu_config *parse_config(const char *config)
-{
- struct vcpu_config *c;
- int i;
-
- if (config[8] != '=')
- help(), exit(1);
-
- for (i = 0; i < vcpu_configs_n; ++i) {
- c = vcpu_configs[i];
- if (strcmp(config_name(c), &config[9]) == 0)
- break;
- }
-
- if (i == vcpu_configs_n)
- help(), exit(1);
-
- return c;
-}
-
-int main(int ac, char **av)
-{
- struct vcpu_config *c, *sel = NULL;
- int i, ret = 0;
- pid_t pid;
-
- for (i = 1; i < ac; ++i) {
- if (strcmp(av[i], "--core-reg-fixup") == 0)
- fixup_core_regs = true;
- else if (strncmp(av[i], "--config", 8) == 0)
- sel = parse_config(av[i]);
- else if (strcmp(av[i], "--list") == 0)
- print_list = true;
- else if (strcmp(av[i], "--list-filtered") == 0)
- print_filtered = true;
- else if (strcmp(av[i], "--help") == 0 || strcmp(av[1], "-h") == 0)
- help(), exit(0);
- else
- help(), exit(1);
- }
-
- if (print_list || print_filtered) {
- /*
- * We only want to print the register list of a single config.
- */
- if (!sel)
- help(), exit(1);
- }
-
- for (i = 0; i < vcpu_configs_n; ++i) {
- c = vcpu_configs[i];
- if (sel && c != sel)
- continue;
-
- pid = fork();
-
- if (!pid) {
- run_test(c);
- exit(0);
- } else {
- int wstatus;
- pid_t wpid = wait(&wstatus);
- TEST_ASSERT(wpid == pid && WIFEXITED(wstatus), "wait: Unexpected return");
- if (WEXITSTATUS(wstatus) && WEXITSTATUS(wstatus) != KSFT_SKIP)
- ret = KSFT_FAIL;
- }
- }
-
- return ret;
}
/*
- * The current blessed list was primed with the output of kernel version
+ * The original blessed list was primed with the output of kernel version
* v4.15 with --core-reg-fixup and then later updated with new registers.
+ * (The --core-reg-fixup option and it's fixup function have been removed
+ * from the test, as it's unlikely to use this type of test on a kernel
+ * older than v5.2.)
*
* The blessed list is up to date with kernel version v6.4 (or so we hope)
*/
@@ -1130,14 +698,14 @@ static __u64 pauth_generic_regs[] = {
.regs_n = ARRAY_SIZE(pauth_generic_regs), \
}
-static struct vcpu_config vregs_config = {
+static struct vcpu_reg_list vregs_config = {
.sublists = {
BASE_SUBLIST,
VREGS_SUBLIST,
{0},
},
};
-static struct vcpu_config vregs_pmu_config = {
+static struct vcpu_reg_list vregs_pmu_config = {
.sublists = {
BASE_SUBLIST,
VREGS_SUBLIST,
@@ -1145,14 +713,14 @@ static struct vcpu_config vregs_pmu_config = {
{0},
},
};
-static struct vcpu_config sve_config = {
+static struct vcpu_reg_list sve_config = {
.sublists = {
BASE_SUBLIST,
SVE_SUBLIST,
{0},
},
};
-static struct vcpu_config sve_pmu_config = {
+static struct vcpu_reg_list sve_pmu_config = {
.sublists = {
BASE_SUBLIST,
SVE_SUBLIST,
@@ -1160,7 +728,7 @@ static struct vcpu_config sve_pmu_config = {
{0},
},
};
-static struct vcpu_config pauth_config = {
+static struct vcpu_reg_list pauth_config = {
.sublists = {
BASE_SUBLIST,
VREGS_SUBLIST,
@@ -1168,7 +736,7 @@ static struct vcpu_config pauth_config = {
{0},
},
};
-static struct vcpu_config pauth_pmu_config = {
+static struct vcpu_reg_list pauth_pmu_config = {
.sublists = {
BASE_SUBLIST,
VREGS_SUBLIST,
@@ -1178,7 +746,7 @@ static struct vcpu_config pauth_pmu_config = {
},
};
-static struct vcpu_config *vcpu_configs[] = {
+struct vcpu_reg_list *vcpu_configs[] = {
&vregs_config,
&vregs_pmu_config,
&sve_config,
@@ -1186,4 +754,4 @@ static struct vcpu_config *vcpu_configs[] = {
&pauth_config,
&pauth_pmu_config,
};
-static int vcpu_configs_n = ARRAY_SIZE(vcpu_configs);
+int vcpu_configs_n = ARRAY_SIZE(vcpu_configs);
diff --git a/tools/testing/selftests/kvm/aarch64/hypercalls.c b/tools/testing/selftests/kvm/aarch64/hypercalls.c
index bef1499fb465..31f66ba97228 100644
--- a/tools/testing/selftests/kvm/aarch64/hypercalls.c
+++ b/tools/testing/selftests/kvm/aarch64/hypercalls.c
@@ -8,7 +8,6 @@
* hypercalls are properly masked or unmasked to the guest when disabled or
* enabled from the KVM userspace, respectively.
*/
-
#include <errno.h>
#include <linux/arm-smccc.h>
#include <asm/kvm.h>
@@ -105,15 +104,17 @@ static void guest_test_hvc(const struct test_hvc_info *hc_info)
switch (stage) {
case TEST_STAGE_HVC_IFACE_FEAT_DISABLED:
case TEST_STAGE_HVC_IFACE_FALSE_INFO:
- GUEST_ASSERT_3(res.a0 == SMCCC_RET_NOT_SUPPORTED,
- res.a0, hc_info->func_id, hc_info->arg1);
+ __GUEST_ASSERT(res.a0 == SMCCC_RET_NOT_SUPPORTED,
+ "a0 = 0x%lx, func_id = 0x%x, arg1 = 0x%llx, stage = %u",
+ res.a0, hc_info->func_id, hc_info->arg1, stage);
break;
case TEST_STAGE_HVC_IFACE_FEAT_ENABLED:
- GUEST_ASSERT_3(res.a0 != SMCCC_RET_NOT_SUPPORTED,
- res.a0, hc_info->func_id, hc_info->arg1);
+ __GUEST_ASSERT(res.a0 != SMCCC_RET_NOT_SUPPORTED,
+ "a0 = 0x%lx, func_id = 0x%x, arg1 = 0x%llx, stage = %u",
+ res.a0, hc_info->func_id, hc_info->arg1, stage);
break;
default:
- GUEST_ASSERT_1(0, stage);
+ GUEST_FAIL("Unexpected stage = %u", stage);
}
}
}
@@ -132,7 +133,7 @@ static void guest_code(void)
guest_test_hvc(false_hvc_info);
break;
default:
- GUEST_ASSERT_1(0, stage);
+ GUEST_FAIL("Unexpected stage = %u", stage);
}
GUEST_SYNC(stage);
@@ -290,10 +291,7 @@ static void test_run(void)
guest_done = true;
break;
case UCALL_ABORT:
- REPORT_GUEST_ASSERT_N(uc, "values: 0x%lx, 0x%lx; 0x%lx, stage: %u",
- GUEST_ASSERT_ARG(uc, 0),
- GUEST_ASSERT_ARG(uc, 1),
- GUEST_ASSERT_ARG(uc, 2), stage);
+ REPORT_GUEST_ASSERT(uc);
break;
default:
TEST_FAIL("Unexpected guest exit\n");
diff --git a/tools/testing/selftests/kvm/aarch64/page_fault_test.c b/tools/testing/selftests/kvm/aarch64/page_fault_test.c
index df10f1ffa20d..47bb914ab2fa 100644
--- a/tools/testing/selftests/kvm/aarch64/page_fault_test.c
+++ b/tools/testing/selftests/kvm/aarch64/page_fault_test.c
@@ -7,7 +7,6 @@
* hugetlbfs with a hole). It checks that the expected handling method is
* called (e.g., uffd faults with the right address and write/read flag).
*/
-
#define _GNU_SOURCE
#include <linux/bitmap.h>
#include <fcntl.h>
@@ -293,12 +292,12 @@ static void guest_code(struct test_desc *test)
static void no_dabt_handler(struct ex_regs *regs)
{
- GUEST_ASSERT_1(false, read_sysreg(far_el1));
+ GUEST_FAIL("Unexpected dabt, far_el1 = 0x%llx", read_sysreg(far_el1));
}
static void no_iabt_handler(struct ex_regs *regs)
{
- GUEST_ASSERT_1(false, regs->pc);
+ GUEST_FAIL("Unexpected iabt, pc = 0x%lx", regs->pc);
}
static struct uffd_args {
@@ -318,7 +317,7 @@ static int uffd_generic_handler(int uffd_mode, int uffd, struct uffd_msg *msg,
TEST_ASSERT(uffd_mode == UFFDIO_REGISTER_MODE_MISSING,
"The only expected UFFD mode is MISSING");
- ASSERT_EQ(addr, (uint64_t)args->hva);
+ TEST_ASSERT_EQ(addr, (uint64_t)args->hva);
pr_debug("uffd fault: addr=%p write=%d\n",
(void *)addr, !!(flags & UFFD_PAGEFAULT_FLAG_WRITE));
@@ -432,7 +431,7 @@ static void mmio_on_test_gpa_handler(struct kvm_vm *vm, struct kvm_run *run)
region = vm_get_mem_region(vm, MEM_REGION_TEST_DATA);
hva = (void *)region->region.userspace_addr;
- ASSERT_EQ(run->mmio.phys_addr, region->region.guest_phys_addr);
+ TEST_ASSERT_EQ(run->mmio.phys_addr, region->region.guest_phys_addr);
memcpy(hva, run->mmio.data, run->mmio.len);
events.mmio_exits += 1;
@@ -631,9 +630,9 @@ static void setup_default_handlers(struct test_desc *test)
static void check_event_counts(struct test_desc *test)
{
- ASSERT_EQ(test->expected_events.uffd_faults, events.uffd_faults);
- ASSERT_EQ(test->expected_events.mmio_exits, events.mmio_exits);
- ASSERT_EQ(test->expected_events.fail_vcpu_runs, events.fail_vcpu_runs);
+ TEST_ASSERT_EQ(test->expected_events.uffd_faults, events.uffd_faults);
+ TEST_ASSERT_EQ(test->expected_events.mmio_exits, events.mmio_exits);
+ TEST_ASSERT_EQ(test->expected_events.fail_vcpu_runs, events.fail_vcpu_runs);
}
static void print_test_banner(enum vm_guest_mode mode, struct test_params *p)
@@ -679,7 +678,7 @@ static void vcpu_run_loop(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
}
break;
case UCALL_ABORT:
- REPORT_GUEST_ASSERT_2(uc, "values: %#lx, %#lx");
+ REPORT_GUEST_ASSERT(uc);
break;
case UCALL_DONE:
goto done;
diff --git a/tools/testing/selftests/kvm/aarch64/vgic_irq.c b/tools/testing/selftests/kvm/aarch64/vgic_irq.c
index 90d854e0fcff..2e64b4856e38 100644
--- a/tools/testing/selftests/kvm/aarch64/vgic_irq.c
+++ b/tools/testing/selftests/kvm/aarch64/vgic_irq.c
@@ -7,7 +7,6 @@
* host to inject a specific intid via a GUEST_SYNC call, and then checks that
* it received it.
*/
-
#include <asm/kvm.h>
#include <asm/kvm_para.h>
#include <sys/eventfd.h>
@@ -781,7 +780,7 @@ static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split)
run_guest_cmd(vcpu, gic_fd, &inject_args, &args);
break;
case UCALL_ABORT:
- REPORT_GUEST_ASSERT_2(uc, "values: %#lx, %#lx");
+ REPORT_GUEST_ASSERT(uc);
break;
case UCALL_DONE:
goto done;
diff --git a/tools/testing/selftests/kvm/get-reg-list.c b/tools/testing/selftests/kvm/get-reg-list.c
new file mode 100644
index 000000000000..be7bf5224434
--- /dev/null
+++ b/tools/testing/selftests/kvm/get-reg-list.c
@@ -0,0 +1,401 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Check for KVM_GET_REG_LIST regressions.
+ *
+ * Copyright (C) 2020, Red Hat, Inc.
+ *
+ * When attempting to migrate from a host with an older kernel to a host
+ * with a newer kernel we allow the newer kernel on the destination to
+ * list new registers with get-reg-list. We assume they'll be unused, at
+ * least until the guest reboots, and so they're relatively harmless.
+ * However, if the destination host with the newer kernel is missing
+ * registers which the source host with the older kernel has, then that's
+ * a regression in get-reg-list. This test checks for that regression by
+ * checking the current list against a blessed list. We should never have
+ * missing registers, but if new ones appear then they can probably be
+ * added to the blessed list. A completely new blessed list can be created
+ * by running the test with the --list command line argument.
+ *
+ * The blessed list should be created from the oldest possible kernel.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include "kvm_util.h"
+#include "test_util.h"
+#include "processor.h"
+
+static struct kvm_reg_list *reg_list;
+static __u64 *blessed_reg, blessed_n;
+
+extern struct vcpu_reg_list *vcpu_configs[];
+extern int vcpu_configs_n;
+
+#define for_each_reg(i) \
+ for ((i) = 0; (i) < reg_list->n; ++(i))
+
+#define for_each_reg_filtered(i) \
+ for_each_reg(i) \
+ if (!filter_reg(reg_list->reg[i]))
+
+#define for_each_missing_reg(i) \
+ for ((i) = 0; (i) < blessed_n; ++(i)) \
+ if (!find_reg(reg_list->reg, reg_list->n, blessed_reg[i])) \
+ if (check_supported_reg(vcpu, blessed_reg[i]))
+
+#define for_each_new_reg(i) \
+ for_each_reg_filtered(i) \
+ if (!find_reg(blessed_reg, blessed_n, reg_list->reg[i]))
+
+#define for_each_present_blessed_reg(i) \
+ for_each_reg(i) \
+ if (find_reg(blessed_reg, blessed_n, reg_list->reg[i]))
+
+static const char *config_name(struct vcpu_reg_list *c)
+{
+ struct vcpu_reg_sublist *s;
+ int len = 0;
+
+ if (c->name)
+ return c->name;
+
+ for_each_sublist(c, s)
+ len += strlen(s->name) + 1;
+
+ c->name = malloc(len);
+
+ len = 0;
+ for_each_sublist(c, s) {
+ if (!strcmp(s->name, "base"))
+ continue;
+ strcat(c->name + len, s->name);
+ len += strlen(s->name) + 1;
+ c->name[len - 1] = '+';
+ }
+ c->name[len - 1] = '\0';
+
+ return c->name;
+}
+
+bool __weak check_supported_reg(struct kvm_vcpu *vcpu, __u64 reg)
+{
+ return true;
+}
+
+bool __weak filter_reg(__u64 reg)
+{
+ return false;
+}
+
+static bool find_reg(__u64 regs[], __u64 nr_regs, __u64 reg)
+{
+ int i;
+
+ for (i = 0; i < nr_regs; ++i)
+ if (reg == regs[i])
+ return true;
+ return false;
+}
+
+void __weak print_reg(const char *prefix, __u64 id)
+{
+ printf("\t0x%llx,\n", id);
+}
+
+bool __weak check_reject_set(int err)
+{
+ return true;
+}
+
+void __weak finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c)
+{
+}
+
+#ifdef __aarch64__
+static void prepare_vcpu_init(struct vcpu_reg_list *c, struct kvm_vcpu_init *init)
+{
+ struct vcpu_reg_sublist *s;
+
+ for_each_sublist(c, s)
+ if (s->capability)
+ init->features[s->feature / 32] |= 1 << (s->feature % 32);
+}
+
+static struct kvm_vcpu *vcpu_config_get_vcpu(struct vcpu_reg_list *c, struct kvm_vm *vm)
+{
+ struct kvm_vcpu_init init = { .target = -1, };
+ struct kvm_vcpu *vcpu;
+
+ prepare_vcpu_init(c, &init);
+ vcpu = __vm_vcpu_add(vm, 0);
+ aarch64_vcpu_setup(vcpu, &init);
+
+ return vcpu;
+}
+#else
+static struct kvm_vcpu *vcpu_config_get_vcpu(struct vcpu_reg_list *c, struct kvm_vm *vm)
+{
+ return __vm_vcpu_add(vm, 0);
+}
+#endif
+
+static void check_supported(struct vcpu_reg_list *c)
+{
+ struct vcpu_reg_sublist *s;
+
+ for_each_sublist(c, s) {
+ if (!s->capability)
+ continue;
+
+ __TEST_REQUIRE(kvm_has_cap(s->capability),
+ "%s: %s not available, skipping tests\n",
+ config_name(c), s->name);
+ }
+}
+
+static bool print_list;
+static bool print_filtered;
+
+static void run_test(struct vcpu_reg_list *c)
+{
+ int new_regs = 0, missing_regs = 0, i, n;
+ int failed_get = 0, failed_set = 0, failed_reject = 0;
+ int skipped_set = 0;
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+ struct vcpu_reg_sublist *s;
+
+ check_supported(c);
+
+ vm = vm_create_barebones();
+ vcpu = vcpu_config_get_vcpu(c, vm);
+ finalize_vcpu(vcpu, c);
+
+ reg_list = vcpu_get_reg_list(vcpu);
+
+ if (print_list || print_filtered) {
+ putchar('\n');
+ for_each_reg(i) {
+ __u64 id = reg_list->reg[i];
+ if ((print_list && !filter_reg(id)) ||
+ (print_filtered && filter_reg(id)))
+ print_reg(config_name(c), id);
+ }
+ putchar('\n');
+ return;
+ }
+
+ for_each_sublist(c, s)
+ blessed_n += s->regs_n;
+ blessed_reg = calloc(blessed_n, sizeof(__u64));
+
+ n = 0;
+ for_each_sublist(c, s) {
+ for (i = 0; i < s->regs_n; ++i)
+ blessed_reg[n++] = s->regs[i];
+ }
+
+ /*
+ * We only test that we can get the register and then write back the
+ * same value. Some registers may allow other values to be written
+ * back, but others only allow some bits to be changed, and at least
+ * for ID registers set will fail if the value does not exactly match
+ * what was returned by get. If registers that allow other values to
+ * be written need to have the other values tested, then we should
+ * create a new set of tests for those in a new independent test
+ * executable.
+ *
+ * Only do the get/set tests on present, blessed list registers,
+ * since we don't know the capabilities of any new registers.
+ */
+ for_each_present_blessed_reg(i) {
+ uint8_t addr[2048 / 8];
+ struct kvm_one_reg reg = {
+ .id = reg_list->reg[i],
+ .addr = (__u64)&addr,
+ };
+ bool reject_reg = false, skip_reg = false;
+ int ret;
+
+ ret = __vcpu_get_reg(vcpu, reg_list->reg[i], &addr);
+ if (ret) {
+ printf("%s: Failed to get ", config_name(c));
+ print_reg(config_name(c), reg.id);
+ putchar('\n');
+ ++failed_get;
+ }
+
+ for_each_sublist(c, s) {
+ /* rejects_set registers are rejected for set operation */
+ if (s->rejects_set && find_reg(s->rejects_set, s->rejects_set_n, reg.id)) {
+ reject_reg = true;
+ ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
+ if (ret != -1 || !check_reject_set(errno)) {
+ printf("%s: Failed to reject (ret=%d, errno=%d) ", config_name(c), ret, errno);
+ print_reg(config_name(c), reg.id);
+ putchar('\n');
+ ++failed_reject;
+ }
+ break;
+ }
+
+ /* skips_set registers are skipped for set operation */
+ if (s->skips_set && find_reg(s->skips_set, s->skips_set_n, reg.id)) {
+ skip_reg = true;
+ ++skipped_set;
+ break;
+ }
+ }
+
+ if (!reject_reg && !skip_reg) {
+ ret = __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
+ if (ret) {
+ printf("%s: Failed to set ", config_name(c));
+ print_reg(config_name(c), reg.id);
+ putchar('\n');
+ ++failed_set;
+ }
+ }
+ }
+
+ for_each_new_reg(i)
+ ++new_regs;
+
+ for_each_missing_reg(i)
+ ++missing_regs;
+
+ if (new_regs || missing_regs) {
+ n = 0;
+ for_each_reg_filtered(i)
+ ++n;
+
+ printf("%s: Number blessed registers: %5lld\n", config_name(c), blessed_n);
+ printf("%s: Number registers: %5lld (includes %lld filtered registers)\n",
+ config_name(c), reg_list->n, reg_list->n - n);
+ }
+
+ if (new_regs) {
+ printf("\n%s: There are %d new registers.\n"
+ "Consider adding them to the blessed reg "
+ "list with the following lines:\n\n", config_name(c), new_regs);
+ for_each_new_reg(i)
+ print_reg(config_name(c), reg_list->reg[i]);
+ putchar('\n');
+ }
+
+ if (missing_regs) {
+ printf("\n%s: There are %d missing registers.\n"
+ "The following lines are missing registers:\n\n", config_name(c), missing_regs);
+ for_each_missing_reg(i)
+ print_reg(config_name(c), blessed_reg[i]);
+ putchar('\n');
+ }
+
+ TEST_ASSERT(!missing_regs && !failed_get && !failed_set && !failed_reject,
+ "%s: There are %d missing registers; %d registers failed get; "
+ "%d registers failed set; %d registers failed reject; %d registers skipped set",
+ config_name(c), missing_regs, failed_get, failed_set, failed_reject, skipped_set);
+
+ pr_info("%s: PASS\n", config_name(c));
+ blessed_n = 0;
+ free(blessed_reg);
+ free(reg_list);
+ kvm_vm_free(vm);
+}
+
+static void help(void)
+{
+ struct vcpu_reg_list *c;
+ int i;
+
+ printf(
+ "\n"
+ "usage: get-reg-list [--config=<selection>] [--list] [--list-filtered]\n\n"
+ " --config=<selection> Used to select a specific vcpu configuration for the test/listing\n"
+ " '<selection>' may be\n");
+
+ for (i = 0; i < vcpu_configs_n; ++i) {
+ c = vcpu_configs[i];
+ printf(
+ " '%s'\n", config_name(c));
+ }
+
+ printf(
+ "\n"
+ " --list Print the register list rather than test it (requires --config)\n"
+ " --list-filtered Print registers that would normally be filtered out (requires --config)\n"
+ "\n"
+ );
+}
+
+static struct vcpu_reg_list *parse_config(const char *config)
+{
+ struct vcpu_reg_list *c = NULL;
+ int i;
+
+ if (config[8] != '=')
+ help(), exit(1);
+
+ for (i = 0; i < vcpu_configs_n; ++i) {
+ c = vcpu_configs[i];
+ if (strcmp(config_name(c), &config[9]) == 0)
+ break;
+ }
+
+ if (i == vcpu_configs_n)
+ help(), exit(1);
+
+ return c;
+}
+
+int main(int ac, char **av)
+{
+ struct vcpu_reg_list *c, *sel = NULL;
+ int i, ret = 0;
+ pid_t pid;
+
+ for (i = 1; i < ac; ++i) {
+ if (strncmp(av[i], "--config", 8) == 0)
+ sel = parse_config(av[i]);
+ else if (strcmp(av[i], "--list") == 0)
+ print_list = true;
+ else if (strcmp(av[i], "--list-filtered") == 0)
+ print_filtered = true;
+ else if (strcmp(av[i], "--help") == 0 || strcmp(av[1], "-h") == 0)
+ help(), exit(0);
+ else
+ help(), exit(1);
+ }
+
+ if (print_list || print_filtered) {
+ /*
+ * We only want to print the register list of a single config.
+ */
+ if (!sel)
+ help(), exit(1);
+ }
+
+ for (i = 0; i < vcpu_configs_n; ++i) {
+ c = vcpu_configs[i];
+ if (sel && c != sel)
+ continue;
+
+ pid = fork();
+
+ if (!pid) {
+ run_test(c);
+ exit(0);
+ } else {
+ int wstatus;
+ pid_t wpid = wait(&wstatus);
+ TEST_ASSERT(wpid == pid && WIFEXITED(wstatus), "wait: Unexpected return");
+ if (WEXITSTATUS(wstatus) && WEXITSTATUS(wstatus) != KSFT_SKIP)
+ ret = KSFT_FAIL;
+ }
+ }
+
+ return ret;
+}
diff --git a/tools/testing/selftests/kvm/guest_print_test.c b/tools/testing/selftests/kvm/guest_print_test.c
new file mode 100644
index 000000000000..41230b746190
--- /dev/null
+++ b/tools/testing/selftests/kvm/guest_print_test.c
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * A test for GUEST_PRINTF
+ *
+ * Copyright 2022, Google, Inc. and/or its affiliates.
+ */
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+
+struct guest_vals {
+ uint64_t a;
+ uint64_t b;
+ uint64_t type;
+};
+
+static struct guest_vals vals;
+
+/* GUEST_PRINTF()/GUEST_ASSERT_FMT() does not support float or double. */
+#define TYPE_LIST \
+TYPE(test_type_i64, I64, "%ld", int64_t) \
+TYPE(test_type_u64, U64u, "%lu", uint64_t) \
+TYPE(test_type_x64, U64x, "0x%lx", uint64_t) \
+TYPE(test_type_X64, U64X, "0x%lX", uint64_t) \
+TYPE(test_type_u32, U32u, "%u", uint32_t) \
+TYPE(test_type_x32, U32x, "0x%x", uint32_t) \
+TYPE(test_type_X32, U32X, "0x%X", uint32_t) \
+TYPE(test_type_int, INT, "%d", int) \
+TYPE(test_type_char, CHAR, "%c", char) \
+TYPE(test_type_str, STR, "'%s'", const char *) \
+TYPE(test_type_ptr, PTR, "%p", uintptr_t)
+
+enum args_type {
+#define TYPE(fn, ext, fmt_t, T) TYPE_##ext,
+ TYPE_LIST
+#undef TYPE
+};
+
+static void run_test(struct kvm_vcpu *vcpu, const char *expected_printf,
+ const char *expected_assert);
+
+#define BUILD_TYPE_STRINGS_AND_HELPER(fn, ext, fmt_t, T) \
+const char *PRINTF_FMT_##ext = "Got params a = " fmt_t " and b = " fmt_t; \
+const char *ASSERT_FMT_##ext = "Expected " fmt_t ", got " fmt_t " instead"; \
+static void fn(struct kvm_vcpu *vcpu, T a, T b) \
+{ \
+ char expected_printf[UCALL_BUFFER_LEN]; \
+ char expected_assert[UCALL_BUFFER_LEN]; \
+ \
+ snprintf(expected_printf, UCALL_BUFFER_LEN, PRINTF_FMT_##ext, a, b); \
+ snprintf(expected_assert, UCALL_BUFFER_LEN, ASSERT_FMT_##ext, a, b); \
+ vals = (struct guest_vals){ (uint64_t)a, (uint64_t)b, TYPE_##ext }; \
+ sync_global_to_guest(vcpu->vm, vals); \
+ run_test(vcpu, expected_printf, expected_assert); \
+}
+
+#define TYPE(fn, ext, fmt_t, T) \
+ BUILD_TYPE_STRINGS_AND_HELPER(fn, ext, fmt_t, T)
+ TYPE_LIST
+#undef TYPE
+
+static void guest_code(void)
+{
+ while (1) {
+ switch (vals.type) {
+#define TYPE(fn, ext, fmt_t, T) \
+ case TYPE_##ext: \
+ GUEST_PRINTF(PRINTF_FMT_##ext, vals.a, vals.b); \
+ __GUEST_ASSERT(vals.a == vals.b, \
+ ASSERT_FMT_##ext, vals.a, vals.b); \
+ break;
+ TYPE_LIST
+#undef TYPE
+ default:
+ GUEST_SYNC(vals.type);
+ }
+
+ GUEST_DONE();
+ }
+}
+
+/*
+ * Unfortunately this gets a little messy because 'assert_msg' doesn't
+ * just contains the matching string, it also contains additional assert
+ * info. Fortunately the part that matches should be at the very end of
+ * 'assert_msg'.
+ */
+static void ucall_abort(const char *assert_msg, const char *expected_assert_msg)
+{
+ int len_str = strlen(assert_msg);
+ int len_substr = strlen(expected_assert_msg);
+ int offset = len_str - len_substr;
+
+ TEST_ASSERT(len_substr <= len_str,
+ "Expected '%s' to be a substring of '%s'\n",
+ assert_msg, expected_assert_msg);
+
+ TEST_ASSERT(strcmp(&assert_msg[offset], expected_assert_msg) == 0,
+ "Unexpected mismatch. Expected: '%s', got: '%s'",
+ expected_assert_msg, &assert_msg[offset]);
+}
+
+static void run_test(struct kvm_vcpu *vcpu, const char *expected_printf,
+ const char *expected_assert)
+{
+ struct kvm_run *run = vcpu->run;
+ struct ucall uc;
+
+ while (1) {
+ vcpu_run(vcpu);
+
+ TEST_ASSERT(run->exit_reason == UCALL_EXIT_REASON,
+ "Unexpected exit reason: %u (%s),\n",
+ run->exit_reason, exit_reason_str(run->exit_reason));
+
+ switch (get_ucall(vcpu, &uc)) {
+ case UCALL_SYNC:
+ TEST_FAIL("Unknown 'args_type' = %lu", uc.args[1]);
+ break;
+ case UCALL_PRINTF:
+ TEST_ASSERT(strcmp(uc.buffer, expected_printf) == 0,
+ "Unexpected mismatch. Expected: '%s', got: '%s'",
+ expected_printf, uc.buffer);
+ break;
+ case UCALL_ABORT:
+ ucall_abort(uc.buffer, expected_assert);
+ break;
+ case UCALL_DONE:
+ return;
+ default:
+ TEST_FAIL("Unknown ucall %lu", uc.cmd);
+ }
+ }
+}
+
+static void guest_code_limits(void)
+{
+ char test_str[UCALL_BUFFER_LEN + 10];
+
+ memset(test_str, 'a', sizeof(test_str));
+ test_str[sizeof(test_str) - 1] = 0;
+
+ GUEST_PRINTF("%s", test_str);
+}
+
+static void test_limits(void)
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_run *run;
+ struct kvm_vm *vm;
+ struct ucall uc;
+
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code_limits);
+ run = vcpu->run;
+ vcpu_run(vcpu);
+
+ TEST_ASSERT(run->exit_reason == UCALL_EXIT_REASON,
+ "Unexpected exit reason: %u (%s),\n",
+ run->exit_reason, exit_reason_str(run->exit_reason));
+
+ TEST_ASSERT(get_ucall(vcpu, &uc) == UCALL_ABORT,
+ "Unexpected ucall command: %lu, Expected: %u (UCALL_ABORT)\n",
+ uc.cmd, UCALL_ABORT);
+
+ kvm_vm_free(vm);
+}
+
+int main(int argc, char *argv[])
+{
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code);
+
+ test_type_i64(vcpu, -1, -1);
+ test_type_i64(vcpu, -1, 1);
+ test_type_i64(vcpu, 0x1234567890abcdef, 0x1234567890abcdef);
+ test_type_i64(vcpu, 0x1234567890abcdef, 0x1234567890abcdee);
+
+ test_type_u64(vcpu, 0x1234567890abcdef, 0x1234567890abcdef);
+ test_type_u64(vcpu, 0x1234567890abcdef, 0x1234567890abcdee);
+ test_type_x64(vcpu, 0x1234567890abcdef, 0x1234567890abcdef);
+ test_type_x64(vcpu, 0x1234567890abcdef, 0x1234567890abcdee);
+ test_type_X64(vcpu, 0x1234567890abcdef, 0x1234567890abcdef);
+ test_type_X64(vcpu, 0x1234567890abcdef, 0x1234567890abcdee);
+
+ test_type_u32(vcpu, 0x90abcdef, 0x90abcdef);
+ test_type_u32(vcpu, 0x90abcdef, 0x90abcdee);
+ test_type_x32(vcpu, 0x90abcdef, 0x90abcdef);
+ test_type_x32(vcpu, 0x90abcdef, 0x90abcdee);
+ test_type_X32(vcpu, 0x90abcdef, 0x90abcdef);
+ test_type_X32(vcpu, 0x90abcdef, 0x90abcdee);
+
+ test_type_int(vcpu, -1, -1);
+ test_type_int(vcpu, -1, 1);
+ test_type_int(vcpu, 1, 1);
+
+ test_type_char(vcpu, 'a', 'a');
+ test_type_char(vcpu, 'a', 'A');
+ test_type_char(vcpu, 'a', 'b');
+
+ test_type_str(vcpu, "foo", "foo");
+ test_type_str(vcpu, "foo", "bar");
+
+ test_type_ptr(vcpu, 0x1234567890abcdef, 0x1234567890abcdef);
+ test_type_ptr(vcpu, 0x1234567890abcdef, 0x1234567890abcdee);
+
+ kvm_vm_free(vm);
+
+ test_limits();
+
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/include/aarch64/arch_timer.h b/tools/testing/selftests/kvm/include/aarch64/arch_timer.h
index cb7c03de3a21..b3e97525cb55 100644
--- a/tools/testing/selftests/kvm/include/aarch64/arch_timer.h
+++ b/tools/testing/selftests/kvm/include/aarch64/arch_timer.h
@@ -41,7 +41,7 @@ static inline uint64_t timer_get_cntct(enum arch_timer timer)
case PHYSICAL:
return read_sysreg(cntpct_el0);
default:
- GUEST_ASSERT_1(0, timer);
+ GUEST_FAIL("Unexpected timer type = %u", timer);
}
/* We should not reach here */
@@ -58,7 +58,7 @@ static inline void timer_set_cval(enum arch_timer timer, uint64_t cval)
write_sysreg(cval, cntp_cval_el0);
break;
default:
- GUEST_ASSERT_1(0, timer);
+ GUEST_FAIL("Unexpected timer type = %u", timer);
}
isb();
@@ -72,7 +72,7 @@ static inline uint64_t timer_get_cval(enum arch_timer timer)
case PHYSICAL:
return read_sysreg(cntp_cval_el0);
default:
- GUEST_ASSERT_1(0, timer);
+ GUEST_FAIL("Unexpected timer type = %u", timer);
}
/* We should not reach here */
@@ -89,7 +89,7 @@ static inline void timer_set_tval(enum arch_timer timer, uint32_t tval)
write_sysreg(tval, cntp_tval_el0);
break;
default:
- GUEST_ASSERT_1(0, timer);
+ GUEST_FAIL("Unexpected timer type = %u", timer);
}
isb();
@@ -105,7 +105,7 @@ static inline void timer_set_ctl(enum arch_timer timer, uint32_t ctl)
write_sysreg(ctl, cntp_ctl_el0);
break;
default:
- GUEST_ASSERT_1(0, timer);
+ GUEST_FAIL("Unexpected timer type = %u", timer);
}
isb();
@@ -119,7 +119,7 @@ static inline uint32_t timer_get_ctl(enum arch_timer timer)
case PHYSICAL:
return read_sysreg(cntp_ctl_el0);
default:
- GUEST_ASSERT_1(0, timer);
+ GUEST_FAIL("Unexpected timer type = %u", timer);
}
/* We should not reach here */
diff --git a/tools/testing/selftests/kvm/include/aarch64/ucall.h b/tools/testing/selftests/kvm/include/aarch64/ucall.h
new file mode 100644
index 000000000000..4b68f37efd36
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/aarch64/ucall.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef SELFTEST_KVM_UCALL_H
+#define SELFTEST_KVM_UCALL_H
+
+#include "kvm_util_base.h"
+
+#define UCALL_EXIT_REASON KVM_EXIT_MMIO
+
+/*
+ * ucall_exit_mmio_addr holds per-VM values (global data is duplicated by each
+ * VM), it must not be accessed from host code.
+ */
+extern vm_vaddr_t *ucall_exit_mmio_addr;
+
+static inline void ucall_arch_do_ucall(vm_vaddr_t uc)
+{
+ WRITE_ONCE(*ucall_exit_mmio_addr, uc);
+}
+
+#endif
diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h
index eb1ff597bcca..a18db6a7b3cf 100644
--- a/tools/testing/selftests/kvm/include/kvm_util_base.h
+++ b/tools/testing/selftests/kvm/include/kvm_util_base.h
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/kvm.h>
#include "linux/rbtree.h"
+#include <linux/types.h>
#include <asm/atomic.h>
@@ -124,6 +125,26 @@ struct kvm_vm {
uint32_t memslots[NR_MEM_REGIONS];
};
+struct vcpu_reg_sublist {
+ const char *name;
+ long capability;
+ int feature;
+ bool finalize;
+ __u64 *regs;
+ __u64 regs_n;
+ __u64 *rejects_set;
+ __u64 rejects_set_n;
+ __u64 *skips_set;
+ __u64 skips_set_n;
+};
+
+struct vcpu_reg_list {
+ char *name;
+ struct vcpu_reg_sublist sublists[];
+};
+
+#define for_each_sublist(c, s) \
+ for ((s) = &(c)->sublists[0]; (s)->regs; ++(s))
#define kvm_for_each_vcpu(vm, i, vcpu) \
for ((i) = 0; (i) <= (vm)->last_vcpu_id; (i)++) \
diff --git a/tools/testing/selftests/kvm/include/riscv/processor.h b/tools/testing/selftests/kvm/include/riscv/processor.h
index d00d213c3805..5b62a3d2aa9b 100644
--- a/tools/testing/selftests/kvm/include/riscv/processor.h
+++ b/tools/testing/selftests/kvm/include/riscv/processor.h
@@ -38,6 +38,9 @@ static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t idx,
KVM_REG_RISCV_TIMER_REG(name), \
KVM_REG_SIZE_U64)
+#define RISCV_ISA_EXT_REG(idx) __kvm_reg_id(KVM_REG_RISCV_ISA_EXT, \
+ idx, KVM_REG_SIZE_ULONG)
+
/* L3 index Bit[47:39] */
#define PGTBL_L3_INDEX_MASK 0x0000FF8000000000ULL
#define PGTBL_L3_INDEX_SHIFT 39
diff --git a/tools/testing/selftests/kvm/include/riscv/ucall.h b/tools/testing/selftests/kvm/include/riscv/ucall.h
new file mode 100644
index 000000000000..be46eb32ec27
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/riscv/ucall.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef SELFTEST_KVM_UCALL_H
+#define SELFTEST_KVM_UCALL_H
+
+#include "processor.h"
+
+#define UCALL_EXIT_REASON KVM_EXIT_RISCV_SBI
+
+static inline void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
+{
+}
+
+static inline void ucall_arch_do_ucall(vm_vaddr_t uc)
+{
+ sbi_ecall(KVM_RISCV_SELFTESTS_SBI_EXT,
+ KVM_RISCV_SELFTESTS_SBI_UCALL,
+ uc, 0, 0, 0, 0, 0);
+}
+
+#endif
diff --git a/tools/testing/selftests/kvm/include/s390x/ucall.h b/tools/testing/selftests/kvm/include/s390x/ucall.h
new file mode 100644
index 000000000000..b231bf2e49d6
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/s390x/ucall.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef SELFTEST_KVM_UCALL_H
+#define SELFTEST_KVM_UCALL_H
+
+#include "kvm_util_base.h"
+
+#define UCALL_EXIT_REASON KVM_EXIT_S390_SIEIC
+
+static inline void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
+{
+}
+
+static inline void ucall_arch_do_ucall(vm_vaddr_t uc)
+{
+ /* Exit via DIAGNOSE 0x501 (normally used for breakpoints) */
+ asm volatile ("diag 0,%0,0x501" : : "a"(uc) : "memory");
+}
+
+#endif
diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h
index a6e9f215ce70..7e614adc6cf4 100644
--- a/tools/testing/selftests/kvm/include/test_util.h
+++ b/tools/testing/selftests/kvm/include/test_util.h
@@ -53,14 +53,13 @@ void test_assert(bool exp, const char *exp_str,
#define TEST_ASSERT(e, fmt, ...) \
test_assert((e), #e, __FILE__, __LINE__, fmt, ##__VA_ARGS__)
-#define ASSERT_EQ(a, b) do { \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- TEST_ASSERT(__a == __b, \
- "ASSERT_EQ(%s, %s) failed.\n" \
- "\t%s is %#lx\n" \
- "\t%s is %#lx", \
- #a, #b, #a, (unsigned long) __a, #b, (unsigned long) __b); \
+#define TEST_ASSERT_EQ(a, b) \
+do { \
+ typeof(a) __a = (a); \
+ typeof(b) __b = (b); \
+ test_assert(__a == __b, #a " == " #b, __FILE__, __LINE__, \
+ "%#lx != %#lx (%s != %s)", \
+ (unsigned long)(__a), (unsigned long)(__b), #a, #b);\
} while (0)
#define TEST_ASSERT_KVM_EXIT_REASON(vcpu, expected) do { \
@@ -186,4 +185,9 @@ static inline uint32_t atoi_non_negative(const char *name, const char *num_str)
return num;
}
+int guest_vsnprintf(char *buf, int n, const char *fmt, va_list args);
+int guest_snprintf(char *buf, int n, const char *fmt, ...);
+
+char *strdup_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2), nonnull(1)));
+
#endif /* SELFTEST_KVM_TEST_UTIL_H */
diff --git a/tools/testing/selftests/kvm/include/ucall_common.h b/tools/testing/selftests/kvm/include/ucall_common.h
index 1a6aaef5ccae..112bc1da732a 100644
--- a/tools/testing/selftests/kvm/include/ucall_common.h
+++ b/tools/testing/selftests/kvm/include/ucall_common.h
@@ -7,21 +7,25 @@
#ifndef SELFTEST_KVM_UCALL_COMMON_H
#define SELFTEST_KVM_UCALL_COMMON_H
#include "test_util.h"
+#include "ucall.h"
/* Common ucalls */
enum {
UCALL_NONE,
UCALL_SYNC,
UCALL_ABORT,
+ UCALL_PRINTF,
UCALL_DONE,
UCALL_UNHANDLED,
};
#define UCALL_MAX_ARGS 7
+#define UCALL_BUFFER_LEN 1024
struct ucall {
uint64_t cmd;
uint64_t args[UCALL_MAX_ARGS];
+ char buffer[UCALL_BUFFER_LEN];
/* Host virtual address of this struct. */
struct ucall *hva;
@@ -32,8 +36,12 @@ void ucall_arch_do_ucall(vm_vaddr_t uc);
void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu);
void ucall(uint64_t cmd, int nargs, ...);
+void ucall_fmt(uint64_t cmd, const char *fmt, ...);
+void ucall_assert(uint64_t cmd, const char *exp, const char *file,
+ unsigned int line, const char *fmt, ...);
uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc);
void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa);
+int ucall_nr_pages_required(uint64_t page_size);
/*
* Perform userspace call without any associated data. This bare call avoids
@@ -46,8 +54,11 @@ void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa);
#define GUEST_SYNC_ARGS(stage, arg1, arg2, arg3, arg4) \
ucall(UCALL_SYNC, 6, "hello", stage, arg1, arg2, arg3, arg4)
#define GUEST_SYNC(stage) ucall(UCALL_SYNC, 2, "hello", stage)
+#define GUEST_PRINTF(_fmt, _args...) ucall_fmt(UCALL_PRINTF, _fmt, ##_args)
#define GUEST_DONE() ucall(UCALL_DONE, 0)
+#define REPORT_GUEST_PRINTF(ucall) pr_info("%s", (ucall).buffer)
+
enum guest_assert_builtin_args {
GUEST_ERROR_STRING,
GUEST_FILE,
@@ -55,70 +66,41 @@ enum guest_assert_builtin_args {
GUEST_ASSERT_BUILTIN_NARGS
};
-#define __GUEST_ASSERT(_condition, _condstr, _nargs, _args...) \
-do { \
- if (!(_condition)) \
- ucall(UCALL_ABORT, GUEST_ASSERT_BUILTIN_NARGS + _nargs, \
- "Failed guest assert: " _condstr, \
- __FILE__, __LINE__, ##_args); \
+#define ____GUEST_ASSERT(_condition, _exp, _fmt, _args...) \
+do { \
+ if (!(_condition)) \
+ ucall_assert(UCALL_ABORT, _exp, __FILE__, __LINE__, _fmt, ##_args); \
} while (0)
-#define GUEST_ASSERT(_condition) \
- __GUEST_ASSERT(_condition, #_condition, 0, 0)
-
-#define GUEST_ASSERT_1(_condition, arg1) \
- __GUEST_ASSERT(_condition, #_condition, 1, (arg1))
-
-#define GUEST_ASSERT_2(_condition, arg1, arg2) \
- __GUEST_ASSERT(_condition, #_condition, 2, (arg1), (arg2))
-
-#define GUEST_ASSERT_3(_condition, arg1, arg2, arg3) \
- __GUEST_ASSERT(_condition, #_condition, 3, (arg1), (arg2), (arg3))
-
-#define GUEST_ASSERT_4(_condition, arg1, arg2, arg3, arg4) \
- __GUEST_ASSERT(_condition, #_condition, 4, (arg1), (arg2), (arg3), (arg4))
-
-#define GUEST_ASSERT_EQ(a, b) __GUEST_ASSERT((a) == (b), #a " == " #b, 2, a, b)
+#define __GUEST_ASSERT(_condition, _fmt, _args...) \
+ ____GUEST_ASSERT(_condition, #_condition, _fmt, ##_args)
-#define __REPORT_GUEST_ASSERT(_ucall, fmt, _args...) \
- TEST_FAIL("%s at %s:%ld\n" fmt, \
- (const char *)(_ucall).args[GUEST_ERROR_STRING], \
- (const char *)(_ucall).args[GUEST_FILE], \
- (_ucall).args[GUEST_LINE], \
- ##_args)
+#define GUEST_ASSERT(_condition) \
+ __GUEST_ASSERT(_condition, #_condition)
-#define GUEST_ASSERT_ARG(ucall, i) ((ucall).args[GUEST_ASSERT_BUILTIN_NARGS + i])
+#define GUEST_FAIL(_fmt, _args...) \
+ ucall_assert(UCALL_ABORT, "Unconditional guest failure", \
+ __FILE__, __LINE__, _fmt, ##_args)
-#define REPORT_GUEST_ASSERT(ucall) \
- __REPORT_GUEST_ASSERT((ucall), "")
-
-#define REPORT_GUEST_ASSERT_1(ucall, fmt) \
- __REPORT_GUEST_ASSERT((ucall), \
- fmt, \
- GUEST_ASSERT_ARG((ucall), 0))
-
-#define REPORT_GUEST_ASSERT_2(ucall, fmt) \
- __REPORT_GUEST_ASSERT((ucall), \
- fmt, \
- GUEST_ASSERT_ARG((ucall), 0), \
- GUEST_ASSERT_ARG((ucall), 1))
-
-#define REPORT_GUEST_ASSERT_3(ucall, fmt) \
- __REPORT_GUEST_ASSERT((ucall), \
- fmt, \
- GUEST_ASSERT_ARG((ucall), 0), \
- GUEST_ASSERT_ARG((ucall), 1), \
- GUEST_ASSERT_ARG((ucall), 2))
+#define GUEST_ASSERT_EQ(a, b) \
+do { \
+ typeof(a) __a = (a); \
+ typeof(b) __b = (b); \
+ ____GUEST_ASSERT(__a == __b, #a " == " #b, "%#lx != %#lx (%s != %s)", \
+ (unsigned long)(__a), (unsigned long)(__b), #a, #b); \
+} while (0)
-#define REPORT_GUEST_ASSERT_4(ucall, fmt) \
- __REPORT_GUEST_ASSERT((ucall), \
- fmt, \
- GUEST_ASSERT_ARG((ucall), 0), \
- GUEST_ASSERT_ARG((ucall), 1), \
- GUEST_ASSERT_ARG((ucall), 2), \
- GUEST_ASSERT_ARG((ucall), 3))
+#define GUEST_ASSERT_NE(a, b) \
+do { \
+ typeof(a) __a = (a); \
+ typeof(b) __b = (b); \
+ ____GUEST_ASSERT(__a != __b, #a " != " #b, "%#lx == %#lx (%s == %s)", \
+ (unsigned long)(__a), (unsigned long)(__b), #a, #b); \
+} while (0)
-#define REPORT_GUEST_ASSERT_N(ucall, fmt, args...) \
- __REPORT_GUEST_ASSERT((ucall), fmt, ##args)
+#define REPORT_GUEST_ASSERT(ucall) \
+ test_assert(false, (const char *)(ucall).args[GUEST_ERROR_STRING], \
+ (const char *)(ucall).args[GUEST_FILE], \
+ (ucall).args[GUEST_LINE], "%s", (ucall).buffer)
#endif /* SELFTEST_KVM_UCALL_COMMON_H */
diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
index aa434c8f19c5..4fd042112526 100644
--- a/tools/testing/selftests/kvm/include/x86_64/processor.h
+++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
@@ -239,7 +239,12 @@ struct kvm_x86_cpu_property {
#define X86_PROPERTY_MAX_BASIC_LEAF KVM_X86_CPU_PROPERTY(0, 0, EAX, 0, 31)
#define X86_PROPERTY_PMU_VERSION KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 0, 7)
#define X86_PROPERTY_PMU_NR_GP_COUNTERS KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 8, 15)
+#define X86_PROPERTY_PMU_GP_COUNTERS_BIT_WIDTH KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 16, 23)
#define X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH KVM_X86_CPU_PROPERTY(0xa, 0, EAX, 24, 31)
+#define X86_PROPERTY_PMU_EVENTS_MASK KVM_X86_CPU_PROPERTY(0xa, 0, EBX, 0, 7)
+#define X86_PROPERTY_PMU_FIXED_COUNTERS_BITMASK KVM_X86_CPU_PROPERTY(0xa, 0, ECX, 0, 31)
+#define X86_PROPERTY_PMU_NR_FIXED_COUNTERS KVM_X86_CPU_PROPERTY(0xa, 0, EDX, 0, 4)
+#define X86_PROPERTY_PMU_FIXED_COUNTERS_BIT_WIDTH KVM_X86_CPU_PROPERTY(0xa, 0, EDX, 5, 12)
#define X86_PROPERTY_SUPPORTED_XCR0_LO KVM_X86_CPU_PROPERTY(0xd, 0, EAX, 0, 31)
#define X86_PROPERTY_XSTATE_MAX_SIZE_XCR0 KVM_X86_CPU_PROPERTY(0xd, 0, EBX, 0, 31)
diff --git a/tools/testing/selftests/kvm/include/x86_64/ucall.h b/tools/testing/selftests/kvm/include/x86_64/ucall.h
new file mode 100644
index 000000000000..06b244bd06ee
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/x86_64/ucall.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef SELFTEST_KVM_UCALL_H
+#define SELFTEST_KVM_UCALL_H
+
+#include "kvm_util_base.h"
+
+#define UCALL_EXIT_REASON KVM_EXIT_IO
+
+static inline void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
+{
+}
+
+#endif
diff --git a/tools/testing/selftests/kvm/kvm_page_table_test.c b/tools/testing/selftests/kvm/kvm_page_table_test.c
index b3b00be1ef82..69f26d80c821 100644
--- a/tools/testing/selftests/kvm/kvm_page_table_test.c
+++ b/tools/testing/selftests/kvm/kvm_page_table_test.c
@@ -200,7 +200,7 @@ static void *vcpu_worker(void *data)
if (READ_ONCE(host_quit))
return NULL;
- clock_gettime(CLOCK_MONOTONIC_RAW, &start);
+ clock_gettime(CLOCK_MONOTONIC, &start);
ret = _vcpu_run(vcpu);
ts_diff = timespec_elapsed(start);
@@ -367,7 +367,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
/* Test the stage of KVM creating mappings */
*current_stage = KVM_CREATE_MAPPINGS;
- clock_gettime(CLOCK_MONOTONIC_RAW, &start);
+ clock_gettime(CLOCK_MONOTONIC, &start);
vcpus_complete_new_stage(*current_stage);
ts_diff = timespec_elapsed(start);
@@ -380,7 +380,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
*current_stage = KVM_UPDATE_MAPPINGS;
- clock_gettime(CLOCK_MONOTONIC_RAW, &start);
+ clock_gettime(CLOCK_MONOTONIC, &start);
vcpus_complete_new_stage(*current_stage);
ts_diff = timespec_elapsed(start);
@@ -392,7 +392,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
*current_stage = KVM_ADJUST_MAPPINGS;
- clock_gettime(CLOCK_MONOTONIC_RAW, &start);
+ clock_gettime(CLOCK_MONOTONIC, &start);
vcpus_complete_new_stage(*current_stage);
ts_diff = timespec_elapsed(start);
diff --git a/tools/testing/selftests/kvm/lib/aarch64/ucall.c b/tools/testing/selftests/kvm/lib/aarch64/ucall.c
index f212bd8ab93d..ddab0ce89d4d 100644
--- a/tools/testing/selftests/kvm/lib/aarch64/ucall.c
+++ b/tools/testing/selftests/kvm/lib/aarch64/ucall.c
@@ -6,11 +6,7 @@
*/
#include "kvm_util.h"
-/*
- * ucall_exit_mmio_addr holds per-VM values (global data is duplicated by each
- * VM), it must not be accessed from host code.
- */
-static vm_vaddr_t *ucall_exit_mmio_addr;
+vm_vaddr_t *ucall_exit_mmio_addr;
void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
{
@@ -23,11 +19,6 @@ void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
write_guest_global(vm, ucall_exit_mmio_addr, (vm_vaddr_t *)mmio_gva);
}
-void ucall_arch_do_ucall(vm_vaddr_t uc)
-{
- WRITE_ONCE(*ucall_exit_mmio_addr, uc);
-}
-
void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
diff --git a/tools/testing/selftests/kvm/lib/guest_sprintf.c b/tools/testing/selftests/kvm/lib/guest_sprintf.c
new file mode 100644
index 000000000000..c4a69d8aeb68
--- /dev/null
+++ b/tools/testing/selftests/kvm/lib/guest_sprintf.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include "test_util.h"
+#include "kvm_util.h"
+#include "ucall_common.h"
+
+#define APPEND_BUFFER_SAFE(str, end, v) \
+do { \
+ GUEST_ASSERT(str < end); \
+ *str++ = (v); \
+} while (0)
+
+static int isdigit(int ch)
+{
+ return (ch >= '0') && (ch <= '9');
+}
+
+static int skip_atoi(const char **s)
+{
+ int i = 0;
+
+ while (isdigit(**s))
+ i = i * 10 + *((*s)++) - '0';
+ return i;
+}
+
+#define ZEROPAD 1 /* pad with zero */
+#define SIGN 2 /* unsigned/signed long */
+#define PLUS 4 /* show plus */
+#define SPACE 8 /* space if plus */
+#define LEFT 16 /* left justified */
+#define SMALL 32 /* Must be 32 == 0x20 */
+#define SPECIAL 64 /* 0x */
+
+#define __do_div(n, base) \
+({ \
+ int __res; \
+ \
+ __res = ((uint64_t) n) % (uint32_t) base; \
+ n = ((uint64_t) n) / (uint32_t) base; \
+ __res; \
+})
+
+static char *number(char *str, const char *end, long num, int base, int size,
+ int precision, int type)
+{
+ /* we are called with base 8, 10 or 16, only, thus don't need "G..." */
+ static const char digits[16] = "0123456789ABCDEF"; /* "GHIJKLMNOPQRSTUVWXYZ"; */
+
+ char tmp[66];
+ char c, sign, locase;
+ int i;
+
+ /*
+ * locase = 0 or 0x20. ORing digits or letters with 'locase'
+ * produces same digits or (maybe lowercased) letters
+ */
+ locase = (type & SMALL);
+ if (type & LEFT)
+ type &= ~ZEROPAD;
+ if (base < 2 || base > 16)
+ return NULL;
+ c = (type & ZEROPAD) ? '0' : ' ';
+ sign = 0;
+ if (type & SIGN) {
+ if (num < 0) {
+ sign = '-';
+ num = -num;
+ size--;
+ } else if (type & PLUS) {
+ sign = '+';
+ size--;
+ } else if (type & SPACE) {
+ sign = ' ';
+ size--;
+ }
+ }
+ if (type & SPECIAL) {
+ if (base == 16)
+ size -= 2;
+ else if (base == 8)
+ size--;
+ }
+ i = 0;
+ if (num == 0)
+ tmp[i++] = '0';
+ else
+ while (num != 0)
+ tmp[i++] = (digits[__do_div(num, base)] | locase);
+ if (i > precision)
+ precision = i;
+ size -= precision;
+ if (!(type & (ZEROPAD + LEFT)))
+ while (size-- > 0)
+ APPEND_BUFFER_SAFE(str, end, ' ');
+ if (sign)
+ APPEND_BUFFER_SAFE(str, end, sign);
+ if (type & SPECIAL) {
+ if (base == 8)
+ APPEND_BUFFER_SAFE(str, end, '0');
+ else if (base == 16) {
+ APPEND_BUFFER_SAFE(str, end, '0');
+ APPEND_BUFFER_SAFE(str, end, 'x');
+ }
+ }
+ if (!(type & LEFT))
+ while (size-- > 0)
+ APPEND_BUFFER_SAFE(str, end, c);
+ while (i < precision--)
+ APPEND_BUFFER_SAFE(str, end, '0');
+ while (i-- > 0)
+ APPEND_BUFFER_SAFE(str, end, tmp[i]);
+ while (size-- > 0)
+ APPEND_BUFFER_SAFE(str, end, ' ');
+
+ return str;
+}
+
+int guest_vsnprintf(char *buf, int n, const char *fmt, va_list args)
+{
+ char *str, *end;
+ const char *s;
+ uint64_t num;
+ int i, base;
+ int len;
+
+ int flags; /* flags to number() */
+
+ int field_width; /* width of output field */
+ int precision; /*
+ * min. # of digits for integers; max
+ * number of chars for from string
+ */
+ int qualifier; /* 'h', 'l', or 'L' for integer fields */
+
+ end = buf + n;
+ GUEST_ASSERT(buf < end);
+ GUEST_ASSERT(n > 0);
+
+ for (str = buf; *fmt; ++fmt) {
+ if (*fmt != '%') {
+ APPEND_BUFFER_SAFE(str, end, *fmt);
+ continue;
+ }
+
+ /* process flags */
+ flags = 0;
+repeat:
+ ++fmt; /* this also skips first '%' */
+ switch (*fmt) {
+ case '-':
+ flags |= LEFT;
+ goto repeat;
+ case '+':
+ flags |= PLUS;
+ goto repeat;
+ case ' ':
+ flags |= SPACE;
+ goto repeat;
+ case '#':
+ flags |= SPECIAL;
+ goto repeat;
+ case '0':
+ flags |= ZEROPAD;
+ goto repeat;
+ }
+
+ /* get field width */
+ field_width = -1;
+ if (isdigit(*fmt))
+ field_width = skip_atoi(&fmt);
+ else if (*fmt == '*') {
+ ++fmt;
+ /* it's the next argument */
+ field_width = va_arg(args, int);
+ if (field_width < 0) {
+ field_width = -field_width;
+ flags |= LEFT;
+ }
+ }
+
+ /* get the precision */
+ precision = -1;
+ if (*fmt == '.') {
+ ++fmt;
+ if (isdigit(*fmt))
+ precision = skip_atoi(&fmt);
+ else if (*fmt == '*') {
+ ++fmt;
+ /* it's the next argument */
+ precision = va_arg(args, int);
+ }
+ if (precision < 0)
+ precision = 0;
+ }
+
+ /* get the conversion qualifier */
+ qualifier = -1;
+ if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L') {
+ qualifier = *fmt;
+ ++fmt;
+ }
+
+ /* default base */
+ base = 10;
+
+ switch (*fmt) {
+ case 'c':
+ if (!(flags & LEFT))
+ while (--field_width > 0)
+ APPEND_BUFFER_SAFE(str, end, ' ');
+ APPEND_BUFFER_SAFE(str, end,
+ (uint8_t)va_arg(args, int));
+ while (--field_width > 0)
+ APPEND_BUFFER_SAFE(str, end, ' ');
+ continue;
+
+ case 's':
+ s = va_arg(args, char *);
+ len = strnlen(s, precision);
+
+ if (!(flags & LEFT))
+ while (len < field_width--)
+ APPEND_BUFFER_SAFE(str, end, ' ');
+ for (i = 0; i < len; ++i)
+ APPEND_BUFFER_SAFE(str, end, *s++);
+ while (len < field_width--)
+ APPEND_BUFFER_SAFE(str, end, ' ');
+ continue;
+
+ case 'p':
+ if (field_width == -1) {
+ field_width = 2 * sizeof(void *);
+ flags |= SPECIAL | SMALL | ZEROPAD;
+ }
+ str = number(str, end,
+ (uint64_t)va_arg(args, void *), 16,
+ field_width, precision, flags);
+ continue;
+
+ case 'n':
+ if (qualifier == 'l') {
+ long *ip = va_arg(args, long *);
+ *ip = (str - buf);
+ } else {
+ int *ip = va_arg(args, int *);
+ *ip = (str - buf);
+ }
+ continue;
+
+ case '%':
+ APPEND_BUFFER_SAFE(str, end, '%');
+ continue;
+
+ /* integer number formats - set up the flags and "break" */
+ case 'o':
+ base = 8;
+ break;
+
+ case 'x':
+ flags |= SMALL;
+ case 'X':
+ base = 16;
+ break;
+
+ case 'd':
+ case 'i':
+ flags |= SIGN;
+ case 'u':
+ break;
+
+ default:
+ APPEND_BUFFER_SAFE(str, end, '%');
+ if (*fmt)
+ APPEND_BUFFER_SAFE(str, end, *fmt);
+ else
+ --fmt;
+ continue;
+ }
+ if (qualifier == 'l')
+ num = va_arg(args, uint64_t);
+ else if (qualifier == 'h') {
+ num = (uint16_t)va_arg(args, int);
+ if (flags & SIGN)
+ num = (int16_t)num;
+ } else if (flags & SIGN)
+ num = va_arg(args, int);
+ else
+ num = va_arg(args, uint32_t);
+ str = number(str, end, num, base, field_width, precision, flags);
+ }
+
+ GUEST_ASSERT(str < end);
+ *str = '\0';
+ return str - buf;
+}
+
+int guest_snprintf(char *buf, int n, const char *fmt, ...)
+{
+ va_list va;
+ int len;
+
+ va_start(va, fmt);
+ len = guest_vsnprintf(buf, n, fmt, va);
+ va_end(va);
+
+ return len;
+}
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 9741a7ff6380..7a8af1821f5d 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -312,6 +312,7 @@ static uint64_t vm_nr_pages_required(enum vm_guest_mode mode,
uint32_t nr_runnable_vcpus,
uint64_t extra_mem_pages)
{
+ uint64_t page_size = vm_guest_mode_params[mode].page_size;
uint64_t nr_pages;
TEST_ASSERT(nr_runnable_vcpus,
@@ -340,6 +341,9 @@ static uint64_t vm_nr_pages_required(enum vm_guest_mode mode,
*/
nr_pages += (nr_pages + extra_mem_pages) / PTES_PER_MIN_PAGE * 2;
+ /* Account for the number of pages needed by ucall. */
+ nr_pages += ucall_nr_pages_required(page_size);
+
return vm_adjust_num_guest_pages(mode, nr_pages);
}
@@ -994,7 +998,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
if (src_type == VM_MEM_SRC_ANONYMOUS_THP)
alignment = max(backing_src_pagesz, alignment);
- ASSERT_EQ(guest_paddr, align_up(guest_paddr, backing_src_pagesz));
+ TEST_ASSERT_EQ(guest_paddr, align_up(guest_paddr, backing_src_pagesz));
/* Add enough memory to align up if necessary */
if (alignment > 1)
diff --git a/tools/testing/selftests/kvm/lib/riscv/ucall.c b/tools/testing/selftests/kvm/lib/riscv/ucall.c
index 9a3476a2dfca..fe6d1004f018 100644
--- a/tools/testing/selftests/kvm/lib/riscv/ucall.c
+++ b/tools/testing/selftests/kvm/lib/riscv/ucall.c
@@ -10,10 +10,6 @@
#include "kvm_util.h"
#include "processor.h"
-void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
-{
-}
-
struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0,
unsigned long arg1, unsigned long arg2,
unsigned long arg3, unsigned long arg4,
@@ -40,13 +36,6 @@ struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0,
return ret;
}
-void ucall_arch_do_ucall(vm_vaddr_t uc)
-{
- sbi_ecall(KVM_RISCV_SELFTESTS_SBI_EXT,
- KVM_RISCV_SELFTESTS_SBI_UCALL,
- uc, 0, 0, 0, 0, 0);
-}
-
void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
diff --git a/tools/testing/selftests/kvm/lib/s390x/ucall.c b/tools/testing/selftests/kvm/lib/s390x/ucall.c
index a7f02dc372cf..cca98734653d 100644
--- a/tools/testing/selftests/kvm/lib/s390x/ucall.c
+++ b/tools/testing/selftests/kvm/lib/s390x/ucall.c
@@ -6,16 +6,6 @@
*/
#include "kvm_util.h"
-void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
-{
-}
-
-void ucall_arch_do_ucall(vm_vaddr_t uc)
-{
- /* Exit via DIAGNOSE 0x501 (normally used for breakpoints) */
- asm volatile ("diag 0,%0,0x501" : : "a"(uc) : "memory");
-}
-
void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
diff --git a/tools/testing/selftests/kvm/lib/sparsebit.c b/tools/testing/selftests/kvm/lib/sparsebit.c
index 50e0cf41a7dd..88cb6b84e6f3 100644
--- a/tools/testing/selftests/kvm/lib/sparsebit.c
+++ b/tools/testing/selftests/kvm/lib/sparsebit.c
@@ -634,7 +634,6 @@ static void node_reduce(struct sparsebit *s, struct node *nodep)
tmp = node_prev(s, nodep);
node_rm(s, nodep);
- nodep = NULL;
nodep = tmp;
reduction_performed = true;
diff --git a/tools/testing/selftests/kvm/lib/string_override.c b/tools/testing/selftests/kvm/lib/string_override.c
index 632398adc229..5d1c87277c49 100644
--- a/tools/testing/selftests/kvm/lib/string_override.c
+++ b/tools/testing/selftests/kvm/lib/string_override.c
@@ -37,3 +37,12 @@ void *memset(void *s, int c, size_t count)
*xs++ = c;
return s;
}
+
+size_t strnlen(const char *s, size_t count)
+{
+ const char *sc;
+
+ for (sc = s; count-- && *sc != '\0'; ++sc)
+ /* nothing */;
+ return sc - s;
+}
diff --git a/tools/testing/selftests/kvm/lib/test_util.c b/tools/testing/selftests/kvm/lib/test_util.c
index b772193f6c18..3e36019eeb4a 100644
--- a/tools/testing/selftests/kvm/lib/test_util.c
+++ b/tools/testing/selftests/kvm/lib/test_util.c
@@ -5,6 +5,9 @@
* Copyright (C) 2020, Google LLC.
*/
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <stdarg.h>
#include <assert.h>
#include <ctype.h>
#include <limits.h>
@@ -377,3 +380,15 @@ int atoi_paranoid(const char *num_str)
return num;
}
+
+char *strdup_printf(const char *fmt, ...)
+{
+ va_list ap;
+ char *str;
+
+ va_start(ap, fmt);
+ vasprintf(&str, fmt, ap);
+ va_end(ap);
+
+ return str;
+}
diff --git a/tools/testing/selftests/kvm/lib/ucall_common.c b/tools/testing/selftests/kvm/lib/ucall_common.c
index 2f0e2ea941cc..816a3fa109bf 100644
--- a/tools/testing/selftests/kvm/lib/ucall_common.c
+++ b/tools/testing/selftests/kvm/lib/ucall_common.c
@@ -11,6 +11,11 @@ struct ucall_header {
struct ucall ucalls[KVM_MAX_VCPUS];
};
+int ucall_nr_pages_required(uint64_t page_size)
+{
+ return align_up(sizeof(struct ucall_header), page_size) / page_size;
+}
+
/*
* ucall_pool holds per-VM values (global data is duplicated by each VM), it
* must not be accessed from host code.
@@ -70,6 +75,45 @@ static void ucall_free(struct ucall *uc)
clear_bit(uc - ucall_pool->ucalls, ucall_pool->in_use);
}
+void ucall_assert(uint64_t cmd, const char *exp, const char *file,
+ unsigned int line, const char *fmt, ...)
+{
+ struct ucall *uc;
+ va_list va;
+
+ uc = ucall_alloc();
+ uc->cmd = cmd;
+
+ WRITE_ONCE(uc->args[GUEST_ERROR_STRING], (uint64_t)(exp));
+ WRITE_ONCE(uc->args[GUEST_FILE], (uint64_t)(file));
+ WRITE_ONCE(uc->args[GUEST_LINE], line);
+
+ va_start(va, fmt);
+ guest_vsnprintf(uc->buffer, UCALL_BUFFER_LEN, fmt, va);
+ va_end(va);
+
+ ucall_arch_do_ucall((vm_vaddr_t)uc->hva);
+
+ ucall_free(uc);
+}
+
+void ucall_fmt(uint64_t cmd, const char *fmt, ...)
+{
+ struct ucall *uc;
+ va_list va;
+
+ uc = ucall_alloc();
+ uc->cmd = cmd;
+
+ va_start(va, fmt);
+ guest_vsnprintf(uc->buffer, UCALL_BUFFER_LEN, fmt, va);
+ va_end(va);
+
+ ucall_arch_do_ucall((vm_vaddr_t)uc->hva);
+
+ ucall_free(uc);
+}
+
void ucall(uint64_t cmd, int nargs, ...)
{
struct ucall *uc;
diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index d4a0b504b1e0..d8288374078e 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -1074,11 +1074,6 @@ static bool kvm_fixup_exception(struct ex_regs *regs)
return true;
}
-void kvm_exit_unexpected_vector(uint32_t value)
-{
- ucall(UCALL_UNHANDLED, 1, value);
-}
-
void route_exception(struct ex_regs *regs)
{
typedef void(*handler)(struct ex_regs *);
@@ -1092,7 +1087,10 @@ void route_exception(struct ex_regs *regs)
if (kvm_fixup_exception(regs))
return;
- kvm_exit_unexpected_vector(regs->vector);
+ ucall_assert(UCALL_UNHANDLED,
+ "Unhandled exception in guest", __FILE__, __LINE__,
+ "Unhandled exception '0x%lx' at guest RIP '0x%lx'",
+ regs->vector, regs->rip);
}
void vm_init_descriptor_tables(struct kvm_vm *vm)
@@ -1135,12 +1133,8 @@ void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
{
struct ucall uc;
- if (get_ucall(vcpu, &uc) == UCALL_UNHANDLED) {
- uint64_t vector = uc.args[0];
-
- TEST_FAIL("Unexpected vectored event in guest (vector:0x%lx)",
- vector);
- }
+ if (get_ucall(vcpu, &uc) == UCALL_UNHANDLED)
+ REPORT_GUEST_ASSERT(uc);
}
const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid,
diff --git a/tools/testing/selftests/kvm/lib/x86_64/ucall.c b/tools/testing/selftests/kvm/lib/x86_64/ucall.c
index 4d41dc63cc9e..1265cecc7dd1 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/ucall.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/ucall.c
@@ -8,14 +8,38 @@
#define UCALL_PIO_PORT ((uint16_t)0x1000)
-void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
-{
-}
-
void ucall_arch_do_ucall(vm_vaddr_t uc)
{
- asm volatile("in %[port], %%al"
- : : [port] "d" (UCALL_PIO_PORT), "D" (uc) : "rax", "memory");
+ /*
+ * FIXME: Revert this hack (the entire commit that added it) once nVMX
+ * preserves L2 GPRs across a nested VM-Exit. If a ucall from L2, e.g.
+ * to do a GUEST_SYNC(), lands the vCPU in L1, any and all GPRs can be
+ * clobbered by L1. Save and restore non-volatile GPRs (clobbering RBP
+ * in particular is problematic) along with RDX and RDI (which are
+ * inputs), and clobber volatile GPRs. *sigh*
+ */
+#define HORRIFIC_L2_UCALL_CLOBBER_HACK \
+ "rcx", "rsi", "r8", "r9", "r10", "r11"
+
+ asm volatile("push %%rbp\n\t"
+ "push %%r15\n\t"
+ "push %%r14\n\t"
+ "push %%r13\n\t"
+ "push %%r12\n\t"
+ "push %%rbx\n\t"
+ "push %%rdx\n\t"
+ "push %%rdi\n\t"
+ "in %[port], %%al\n\t"
+ "pop %%rdi\n\t"
+ "pop %%rdx\n\t"
+ "pop %%rbx\n\t"
+ "pop %%r12\n\t"
+ "pop %%r13\n\t"
+ "pop %%r14\n\t"
+ "pop %%r15\n\t"
+ "pop %%rbp\n\t"
+ : : [port] "d" (UCALL_PIO_PORT), "D" (uc) : "rax", "memory",
+ HORRIFIC_L2_UCALL_CLOBBER_HACK);
}
void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu)
diff --git a/tools/testing/selftests/kvm/max_guest_memory_test.c b/tools/testing/selftests/kvm/max_guest_memory_test.c
index feaf2be20ff2..6628dc4dda89 100644
--- a/tools/testing/selftests/kvm/max_guest_memory_test.c
+++ b/tools/testing/selftests/kvm/max_guest_memory_test.c
@@ -55,7 +55,7 @@ static void rendezvous_with_boss(void)
static void run_vcpu(struct kvm_vcpu *vcpu)
{
vcpu_run(vcpu);
- ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_DONE);
+ TEST_ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_DONE);
}
static void *vcpu_worker(void *data)
diff --git a/tools/testing/selftests/kvm/memslot_perf_test.c b/tools/testing/selftests/kvm/memslot_perf_test.c
index 4210cd21d159..20eb2e730800 100644
--- a/tools/testing/selftests/kvm/memslot_perf_test.c
+++ b/tools/testing/selftests/kvm/memslot_perf_test.c
@@ -157,7 +157,7 @@ static void *vcpu_worker(void *__data)
goto done;
break;
case UCALL_ABORT:
- REPORT_GUEST_ASSERT_1(uc, "val = %lu");
+ REPORT_GUEST_ASSERT(uc);
break;
case UCALL_DONE:
goto done;
@@ -560,7 +560,7 @@ static void guest_code_test_memslot_rw(void)
ptr < MEM_TEST_GPA + MEM_TEST_SIZE; ptr += page_size) {
uint64_t val = *(uint64_t *)ptr;
- GUEST_ASSERT_1(val == MEM_TEST_VAL_2, val);
+ GUEST_ASSERT_EQ(val, MEM_TEST_VAL_2);
*(uint64_t *)ptr = 0;
}
diff --git a/tools/testing/selftests/kvm/riscv/get-reg-list.c b/tools/testing/selftests/kvm/riscv/get-reg-list.c
new file mode 100644
index 000000000000..d8ecacd03ecf
--- /dev/null
+++ b/tools/testing/selftests/kvm/riscv/get-reg-list.c
@@ -0,0 +1,872 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Check for KVM_GET_REG_LIST regressions.
+ *
+ * Copyright (c) 2023 Intel Corporation
+ *
+ */
+#include <stdio.h>
+#include "kvm_util.h"
+#include "test_util.h"
+#include "processor.h"
+
+#define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK)
+
+bool filter_reg(__u64 reg)
+{
+ /*
+ * Some ISA extensions are optional and not present on all host,
+ * but they can't be disabled through ISA_EXT registers when present.
+ * So, to make life easy, just filtering out these kind of registers.
+ */
+ switch (reg & ~REG_MASK) {
+ case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSTC:
+ case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVINVAL:
+ case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
+ case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBB:
+ case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSAIA:
+ case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBA:
+ case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBS:
+ case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICNTR:
+ case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICSR:
+ case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIFENCEI:
+ case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHPM:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+bool check_reject_set(int err)
+{
+ return err == EINVAL;
+}
+
+static inline bool vcpu_has_ext(struct kvm_vcpu *vcpu, int ext)
+{
+ int ret;
+ unsigned long value;
+
+ ret = __vcpu_get_reg(vcpu, RISCV_ISA_EXT_REG(ext), &value);
+ if (ret) {
+ printf("Failed to get ext %d", ext);
+ return false;
+ }
+
+ return !!value;
+}
+
+void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c)
+{
+ struct vcpu_reg_sublist *s;
+
+ /*
+ * Disable all extensions which were enabled by default
+ * if they were available in the risc-v host.
+ */
+ for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++)
+ __vcpu_set_reg(vcpu, RISCV_ISA_EXT_REG(i), 0);
+
+ for_each_sublist(c, s) {
+ if (!s->feature)
+ continue;
+
+ /* Try to enable the desired extension */
+ __vcpu_set_reg(vcpu, RISCV_ISA_EXT_REG(s->feature), 1);
+
+ /* Double check whether the desired extension was enabled */
+ __TEST_REQUIRE(vcpu_has_ext(vcpu, s->feature),
+ "%s not available, skipping tests\n", s->name);
+ }
+}
+
+static const char *config_id_to_str(__u64 id)
+{
+ /* reg_off is the offset into struct kvm_riscv_config */
+ __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CONFIG);
+
+ switch (reg_off) {
+ case KVM_REG_RISCV_CONFIG_REG(isa):
+ return "KVM_REG_RISCV_CONFIG_REG(isa)";
+ case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
+ return "KVM_REG_RISCV_CONFIG_REG(zicbom_block_size)";
+ case KVM_REG_RISCV_CONFIG_REG(zicboz_block_size):
+ return "KVM_REG_RISCV_CONFIG_REG(zicboz_block_size)";
+ case KVM_REG_RISCV_CONFIG_REG(mvendorid):
+ return "KVM_REG_RISCV_CONFIG_REG(mvendorid)";
+ case KVM_REG_RISCV_CONFIG_REG(marchid):
+ return "KVM_REG_RISCV_CONFIG_REG(marchid)";
+ case KVM_REG_RISCV_CONFIG_REG(mimpid):
+ return "KVM_REG_RISCV_CONFIG_REG(mimpid)";
+ case KVM_REG_RISCV_CONFIG_REG(satp_mode):
+ return "KVM_REG_RISCV_CONFIG_REG(satp_mode)";
+ }
+
+ /*
+ * Config regs would grow regularly with new pseudo reg added, so
+ * just show raw id to indicate a new pseudo config reg.
+ */
+ return strdup_printf("KVM_REG_RISCV_CONFIG_REG(%lld) /* UNKNOWN */", reg_off);
+}
+
+static const char *core_id_to_str(const char *prefix, __u64 id)
+{
+ /* reg_off is the offset into struct kvm_riscv_core */
+ __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CORE);
+
+ switch (reg_off) {
+ case KVM_REG_RISCV_CORE_REG(regs.pc):
+ return "KVM_REG_RISCV_CORE_REG(regs.pc)";
+ case KVM_REG_RISCV_CORE_REG(regs.ra):
+ return "KVM_REG_RISCV_CORE_REG(regs.ra)";
+ case KVM_REG_RISCV_CORE_REG(regs.sp):
+ return "KVM_REG_RISCV_CORE_REG(regs.sp)";
+ case KVM_REG_RISCV_CORE_REG(regs.gp):
+ return "KVM_REG_RISCV_CORE_REG(regs.gp)";
+ case KVM_REG_RISCV_CORE_REG(regs.tp):
+ return "KVM_REG_RISCV_CORE_REG(regs.tp)";
+ case KVM_REG_RISCV_CORE_REG(regs.t0) ... KVM_REG_RISCV_CORE_REG(regs.t2):
+ return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.t%lld)",
+ reg_off - KVM_REG_RISCV_CORE_REG(regs.t0));
+ case KVM_REG_RISCV_CORE_REG(regs.s0) ... KVM_REG_RISCV_CORE_REG(regs.s1):
+ return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.s%lld)",
+ reg_off - KVM_REG_RISCV_CORE_REG(regs.s0));
+ case KVM_REG_RISCV_CORE_REG(regs.a0) ... KVM_REG_RISCV_CORE_REG(regs.a7):
+ return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.a%lld)",
+ reg_off - KVM_REG_RISCV_CORE_REG(regs.a0));
+ case KVM_REG_RISCV_CORE_REG(regs.s2) ... KVM_REG_RISCV_CORE_REG(regs.s11):
+ return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.s%lld)",
+ reg_off - KVM_REG_RISCV_CORE_REG(regs.s2) + 2);
+ case KVM_REG_RISCV_CORE_REG(regs.t3) ... KVM_REG_RISCV_CORE_REG(regs.t6):
+ return strdup_printf("KVM_REG_RISCV_CORE_REG(regs.t%lld)",
+ reg_off - KVM_REG_RISCV_CORE_REG(regs.t3) + 3);
+ case KVM_REG_RISCV_CORE_REG(mode):
+ return "KVM_REG_RISCV_CORE_REG(mode)";
+ }
+
+ TEST_FAIL("%s: Unknown core reg id: 0x%llx", prefix, id);
+ return NULL;
+}
+
+#define RISCV_CSR_GENERAL(csr) \
+ "KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(" #csr ")"
+#define RISCV_CSR_AIA(csr) \
+ "KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_REG(" #csr ")"
+
+static const char *general_csr_id_to_str(__u64 reg_off)
+{
+ /* reg_off is the offset into struct kvm_riscv_csr */
+ switch (reg_off) {
+ case KVM_REG_RISCV_CSR_REG(sstatus):
+ return RISCV_CSR_GENERAL(sstatus);
+ case KVM_REG_RISCV_CSR_REG(sie):
+ return RISCV_CSR_GENERAL(sie);
+ case KVM_REG_RISCV_CSR_REG(stvec):
+ return RISCV_CSR_GENERAL(stvec);
+ case KVM_REG_RISCV_CSR_REG(sscratch):
+ return RISCV_CSR_GENERAL(sscratch);
+ case KVM_REG_RISCV_CSR_REG(sepc):
+ return RISCV_CSR_GENERAL(sepc);
+ case KVM_REG_RISCV_CSR_REG(scause):
+ return RISCV_CSR_GENERAL(scause);
+ case KVM_REG_RISCV_CSR_REG(stval):
+ return RISCV_CSR_GENERAL(stval);
+ case KVM_REG_RISCV_CSR_REG(sip):
+ return RISCV_CSR_GENERAL(sip);
+ case KVM_REG_RISCV_CSR_REG(satp):
+ return RISCV_CSR_GENERAL(satp);
+ case KVM_REG_RISCV_CSR_REG(scounteren):
+ return RISCV_CSR_GENERAL(scounteren);
+ }
+
+ TEST_FAIL("Unknown general csr reg: 0x%llx", reg_off);
+ return NULL;
+}
+
+static const char *aia_csr_id_to_str(__u64 reg_off)
+{
+ /* reg_off is the offset into struct kvm_riscv_aia_csr */
+ switch (reg_off) {
+ case KVM_REG_RISCV_CSR_AIA_REG(siselect):
+ return RISCV_CSR_AIA(siselect);
+ case KVM_REG_RISCV_CSR_AIA_REG(iprio1):
+ return RISCV_CSR_AIA(iprio1);
+ case KVM_REG_RISCV_CSR_AIA_REG(iprio2):
+ return RISCV_CSR_AIA(iprio2);
+ case KVM_REG_RISCV_CSR_AIA_REG(sieh):
+ return RISCV_CSR_AIA(sieh);
+ case KVM_REG_RISCV_CSR_AIA_REG(siph):
+ return RISCV_CSR_AIA(siph);
+ case KVM_REG_RISCV_CSR_AIA_REG(iprio1h):
+ return RISCV_CSR_AIA(iprio1h);
+ case KVM_REG_RISCV_CSR_AIA_REG(iprio2h):
+ return RISCV_CSR_AIA(iprio2h);
+ }
+
+ TEST_FAIL("Unknown aia csr reg: 0x%llx", reg_off);
+ return NULL;
+}
+
+static const char *csr_id_to_str(const char *prefix, __u64 id)
+{
+ __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_CSR);
+ __u64 reg_subtype = reg_off & KVM_REG_RISCV_SUBTYPE_MASK;
+
+ reg_off &= ~KVM_REG_RISCV_SUBTYPE_MASK;
+
+ switch (reg_subtype) {
+ case KVM_REG_RISCV_CSR_GENERAL:
+ return general_csr_id_to_str(reg_off);
+ case KVM_REG_RISCV_CSR_AIA:
+ return aia_csr_id_to_str(reg_off);
+ }
+
+ TEST_FAIL("%s: Unknown csr subtype: 0x%llx", prefix, reg_subtype);
+ return NULL;
+}
+
+static const char *timer_id_to_str(const char *prefix, __u64 id)
+{
+ /* reg_off is the offset into struct kvm_riscv_timer */
+ __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_TIMER);
+
+ switch (reg_off) {
+ case KVM_REG_RISCV_TIMER_REG(frequency):
+ return "KVM_REG_RISCV_TIMER_REG(frequency)";
+ case KVM_REG_RISCV_TIMER_REG(time):
+ return "KVM_REG_RISCV_TIMER_REG(time)";
+ case KVM_REG_RISCV_TIMER_REG(compare):
+ return "KVM_REG_RISCV_TIMER_REG(compare)";
+ case KVM_REG_RISCV_TIMER_REG(state):
+ return "KVM_REG_RISCV_TIMER_REG(state)";
+ }
+
+ TEST_FAIL("%s: Unknown timer reg id: 0x%llx", prefix, id);
+ return NULL;
+}
+
+static const char *fp_f_id_to_str(const char *prefix, __u64 id)
+{
+ /* reg_off is the offset into struct __riscv_f_ext_state */
+ __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_FP_F);
+
+ switch (reg_off) {
+ case KVM_REG_RISCV_FP_F_REG(f[0]) ...
+ KVM_REG_RISCV_FP_F_REG(f[31]):
+ return strdup_printf("KVM_REG_RISCV_FP_F_REG(f[%lld])", reg_off);
+ case KVM_REG_RISCV_FP_F_REG(fcsr):
+ return "KVM_REG_RISCV_FP_F_REG(fcsr)";
+ }
+
+ TEST_FAIL("%s: Unknown fp_f reg id: 0x%llx", prefix, id);
+ return NULL;
+}
+
+static const char *fp_d_id_to_str(const char *prefix, __u64 id)
+{
+ /* reg_off is the offset into struct __riscv_d_ext_state */
+ __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_FP_D);
+
+ switch (reg_off) {
+ case KVM_REG_RISCV_FP_D_REG(f[0]) ...
+ KVM_REG_RISCV_FP_D_REG(f[31]):
+ return strdup_printf("KVM_REG_RISCV_FP_D_REG(f[%lld])", reg_off);
+ case KVM_REG_RISCV_FP_D_REG(fcsr):
+ return "KVM_REG_RISCV_FP_D_REG(fcsr)";
+ }
+
+ TEST_FAIL("%s: Unknown fp_d reg id: 0x%llx", prefix, id);
+ return NULL;
+}
+
+static const char *isa_ext_id_to_str(__u64 id)
+{
+ /* reg_off is the offset into unsigned long kvm_isa_ext_arr[] */
+ __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_ISA_EXT);
+
+ static const char * const kvm_isa_ext_reg_name[] = {
+ "KVM_RISCV_ISA_EXT_A",
+ "KVM_RISCV_ISA_EXT_C",
+ "KVM_RISCV_ISA_EXT_D",
+ "KVM_RISCV_ISA_EXT_F",
+ "KVM_RISCV_ISA_EXT_H",
+ "KVM_RISCV_ISA_EXT_I",
+ "KVM_RISCV_ISA_EXT_M",
+ "KVM_RISCV_ISA_EXT_SVPBMT",
+ "KVM_RISCV_ISA_EXT_SSTC",
+ "KVM_RISCV_ISA_EXT_SVINVAL",
+ "KVM_RISCV_ISA_EXT_ZIHINTPAUSE",
+ "KVM_RISCV_ISA_EXT_ZICBOM",
+ "KVM_RISCV_ISA_EXT_ZICBOZ",
+ "KVM_RISCV_ISA_EXT_ZBB",
+ "KVM_RISCV_ISA_EXT_SSAIA",
+ "KVM_RISCV_ISA_EXT_V",
+ "KVM_RISCV_ISA_EXT_SVNAPOT",
+ "KVM_RISCV_ISA_EXT_ZBA",
+ "KVM_RISCV_ISA_EXT_ZBS",
+ "KVM_RISCV_ISA_EXT_ZICNTR",
+ "KVM_RISCV_ISA_EXT_ZICSR",
+ "KVM_RISCV_ISA_EXT_ZIFENCEI",
+ "KVM_RISCV_ISA_EXT_ZIHPM",
+ };
+
+ if (reg_off >= ARRAY_SIZE(kvm_isa_ext_reg_name)) {
+ /*
+ * isa_ext regs would grow regularly with new isa extension added, so
+ * just show "reg" to indicate a new extension.
+ */
+ return strdup_printf("%lld /* UNKNOWN */", reg_off);
+ }
+
+ return kvm_isa_ext_reg_name[reg_off];
+}
+
+static const char *sbi_ext_single_id_to_str(__u64 reg_off)
+{
+ /* reg_off is KVM_RISCV_SBI_EXT_ID */
+ static const char * const kvm_sbi_ext_reg_name[] = {
+ "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_V01",
+ "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_TIME",
+ "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_IPI",
+ "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_RFENCE",
+ "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_SRST",
+ "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_HSM",
+ "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_PMU",
+ "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_EXPERIMENTAL",
+ "KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_VENDOR",
+ };
+
+ if (reg_off >= ARRAY_SIZE(kvm_sbi_ext_reg_name)) {
+ /*
+ * sbi_ext regs would grow regularly with new sbi extension added, so
+ * just show "reg" to indicate a new extension.
+ */
+ return strdup_printf("KVM_REG_RISCV_SBI_SINGLE | %lld /* UNKNOWN */", reg_off);
+ }
+
+ return kvm_sbi_ext_reg_name[reg_off];
+}
+
+static const char *sbi_ext_multi_id_to_str(__u64 reg_subtype, __u64 reg_off)
+{
+ if (reg_off > KVM_REG_RISCV_SBI_MULTI_REG_LAST) {
+ /*
+ * sbi_ext regs would grow regularly with new sbi extension added, so
+ * just show "reg" to indicate a new extension.
+ */
+ return strdup_printf("%lld /* UNKNOWN */", reg_off);
+ }
+
+ switch (reg_subtype) {
+ case KVM_REG_RISCV_SBI_MULTI_EN:
+ return strdup_printf("KVM_REG_RISCV_SBI_MULTI_EN | %lld", reg_off);
+ case KVM_REG_RISCV_SBI_MULTI_DIS:
+ return strdup_printf("KVM_REG_RISCV_SBI_MULTI_DIS | %lld", reg_off);
+ }
+
+ return NULL;
+}
+
+static const char *sbi_ext_id_to_str(const char *prefix, __u64 id)
+{
+ __u64 reg_off = id & ~(REG_MASK | KVM_REG_RISCV_SBI_EXT);
+ __u64 reg_subtype = reg_off & KVM_REG_RISCV_SUBTYPE_MASK;
+
+ reg_off &= ~KVM_REG_RISCV_SUBTYPE_MASK;
+
+ switch (reg_subtype) {
+ case KVM_REG_RISCV_SBI_SINGLE:
+ return sbi_ext_single_id_to_str(reg_off);
+ case KVM_REG_RISCV_SBI_MULTI_EN:
+ case KVM_REG_RISCV_SBI_MULTI_DIS:
+ return sbi_ext_multi_id_to_str(reg_subtype, reg_off);
+ }
+
+ TEST_FAIL("%s: Unknown sbi ext subtype: 0x%llx", prefix, reg_subtype);
+ return NULL;
+}
+
+void print_reg(const char *prefix, __u64 id)
+{
+ const char *reg_size = NULL;
+
+ TEST_ASSERT((id & KVM_REG_ARCH_MASK) == KVM_REG_RISCV,
+ "%s: KVM_REG_RISCV missing in reg id: 0x%llx", prefix, id);
+
+ switch (id & KVM_REG_SIZE_MASK) {
+ case KVM_REG_SIZE_U32:
+ reg_size = "KVM_REG_SIZE_U32";
+ break;
+ case KVM_REG_SIZE_U64:
+ reg_size = "KVM_REG_SIZE_U64";
+ break;
+ case KVM_REG_SIZE_U128:
+ reg_size = "KVM_REG_SIZE_U128";
+ break;
+ default:
+ TEST_FAIL("%s: Unexpected reg size: 0x%llx in reg id: 0x%llx",
+ prefix, (id & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT, id);
+ }
+
+ switch (id & KVM_REG_RISCV_TYPE_MASK) {
+ case KVM_REG_RISCV_CONFIG:
+ printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CONFIG | %s,\n",
+ reg_size, config_id_to_str(id));
+ break;
+ case KVM_REG_RISCV_CORE:
+ printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CORE | %s,\n",
+ reg_size, core_id_to_str(prefix, id));
+ break;
+ case KVM_REG_RISCV_CSR:
+ printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_CSR | %s,\n",
+ reg_size, csr_id_to_str(prefix, id));
+ break;
+ case KVM_REG_RISCV_TIMER:
+ printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_TIMER | %s,\n",
+ reg_size, timer_id_to_str(prefix, id));
+ break;
+ case KVM_REG_RISCV_FP_F:
+ printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_FP_F | %s,\n",
+ reg_size, fp_f_id_to_str(prefix, id));
+ break;
+ case KVM_REG_RISCV_FP_D:
+ printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_FP_D | %s,\n",
+ reg_size, fp_d_id_to_str(prefix, id));
+ break;
+ case KVM_REG_RISCV_ISA_EXT:
+ printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_ISA_EXT | %s,\n",
+ reg_size, isa_ext_id_to_str(id));
+ break;
+ case KVM_REG_RISCV_SBI_EXT:
+ printf("\tKVM_REG_RISCV | %s | KVM_REG_RISCV_SBI_EXT | %s,\n",
+ reg_size, sbi_ext_id_to_str(prefix, id));
+ break;
+ default:
+ TEST_FAIL("%s: Unexpected reg type: 0x%llx in reg id: 0x%llx", prefix,
+ (id & KVM_REG_RISCV_TYPE_MASK) >> KVM_REG_RISCV_TYPE_SHIFT, id);
+ }
+}
+
+/*
+ * The current blessed list was primed with the output of kernel version
+ * v6.5-rc3 and then later updated with new registers.
+ */
+static __u64 base_regs[] = {
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(isa),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(mvendorid),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(marchid),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(mimpid),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(satp_mode),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.pc),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.ra),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.sp),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.gp),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.tp),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t0),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t1),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t2),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s0),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s1),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a0),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a1),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a2),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a3),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a4),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a5),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a6),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.a7),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s2),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s3),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s4),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s5),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s6),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s7),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s8),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s9),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s10),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.s11),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t3),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t4),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t5),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(regs.t6),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CORE | KVM_REG_RISCV_CORE_REG(mode),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sstatus),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sie),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(stvec),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sscratch),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sepc),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(scause),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(stval),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(sip),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(satp),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_GENERAL | KVM_REG_RISCV_CSR_REG(scounteren),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(frequency),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(time),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(compare),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(state),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_A,
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_C,
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_I,
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_M,
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_V01,
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_TIME,
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_IPI,
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_RFENCE,
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_SRST,
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_HSM,
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_PMU,
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_EXPERIMENTAL,
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_VENDOR,
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_MULTI_EN | 0,
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_MULTI_DIS | 0,
+};
+
+/*
+ * The skips_set list registers that should skip set test.
+ * - KVM_REG_RISCV_TIMER_REG(state): set would fail if it was not initialized properly.
+ */
+static __u64 base_skips_set[] = {
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(state),
+};
+
+static __u64 h_regs[] = {
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_H,
+};
+
+static __u64 zicbom_regs[] = {
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicbom_block_size),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOM,
+};
+
+static __u64 zicboz_regs[] = {
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CONFIG | KVM_REG_RISCV_CONFIG_REG(zicboz_block_size),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOZ,
+};
+
+static __u64 svpbmt_regs[] = {
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVPBMT,
+};
+
+static __u64 sstc_regs[] = {
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSTC,
+};
+
+static __u64 svinval_regs[] = {
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVINVAL,
+};
+
+static __u64 zihintpause_regs[] = {
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHINTPAUSE,
+};
+
+static __u64 zba_regs[] = {
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBA,
+};
+
+static __u64 zbb_regs[] = {
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBB,
+};
+
+static __u64 zbs_regs[] = {
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBS,
+};
+
+static __u64 zicntr_regs[] = {
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICNTR,
+};
+
+static __u64 zicsr_regs[] = {
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICSR,
+};
+
+static __u64 zifencei_regs[] = {
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIFENCEI,
+};
+
+static __u64 zihpm_regs[] = {
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHPM,
+};
+
+static __u64 aia_regs[] = {
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siselect),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(sieh),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siph),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1h),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2h),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSAIA,
+};
+
+static __u64 fp_f_regs[] = {
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[0]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[1]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[2]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[3]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[4]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[5]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[6]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[7]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[8]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[9]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[10]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[11]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[12]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[13]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[14]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[15]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[16]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[17]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[18]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[19]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[20]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[21]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[22]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[23]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[24]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[25]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[26]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[27]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[28]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[29]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[30]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(f[31]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_F | KVM_REG_RISCV_FP_F_REG(fcsr),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_F,
+};
+
+static __u64 fp_d_regs[] = {
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[0]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[1]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[2]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[3]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[4]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[5]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[6]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[7]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[8]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[9]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[10]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[11]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[12]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[13]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[14]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[15]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[16]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[17]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[18]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[19]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[20]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[21]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[22]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[23]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[24]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[25]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[26]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[27]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[28]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[29]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[30]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(f[31]),
+ KVM_REG_RISCV | KVM_REG_SIZE_U32 | KVM_REG_RISCV_FP_D | KVM_REG_RISCV_FP_D_REG(fcsr),
+ KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_D,
+};
+
+#define BASE_SUBLIST \
+ {"base", .regs = base_regs, .regs_n = ARRAY_SIZE(base_regs), \
+ .skips_set = base_skips_set, .skips_set_n = ARRAY_SIZE(base_skips_set),}
+#define H_REGS_SUBLIST \
+ {"h", .feature = KVM_RISCV_ISA_EXT_H, .regs = h_regs, .regs_n = ARRAY_SIZE(h_regs),}
+#define ZICBOM_REGS_SUBLIST \
+ {"zicbom", .feature = KVM_RISCV_ISA_EXT_ZICBOM, .regs = zicbom_regs, .regs_n = ARRAY_SIZE(zicbom_regs),}
+#define ZICBOZ_REGS_SUBLIST \
+ {"zicboz", .feature = KVM_RISCV_ISA_EXT_ZICBOZ, .regs = zicboz_regs, .regs_n = ARRAY_SIZE(zicboz_regs),}
+#define SVPBMT_REGS_SUBLIST \
+ {"svpbmt", .feature = KVM_RISCV_ISA_EXT_SVPBMT, .regs = svpbmt_regs, .regs_n = ARRAY_SIZE(svpbmt_regs),}
+#define SSTC_REGS_SUBLIST \
+ {"sstc", .feature = KVM_RISCV_ISA_EXT_SSTC, .regs = sstc_regs, .regs_n = ARRAY_SIZE(sstc_regs),}
+#define SVINVAL_REGS_SUBLIST \
+ {"svinval", .feature = KVM_RISCV_ISA_EXT_SVINVAL, .regs = svinval_regs, .regs_n = ARRAY_SIZE(svinval_regs),}
+#define ZIHINTPAUSE_REGS_SUBLIST \
+ {"zihintpause", .feature = KVM_RISCV_ISA_EXT_ZIHINTPAUSE, .regs = zihintpause_regs, .regs_n = ARRAY_SIZE(zihintpause_regs),}
+#define ZBA_REGS_SUBLIST \
+ {"zba", .feature = KVM_RISCV_ISA_EXT_ZBA, .regs = zba_regs, .regs_n = ARRAY_SIZE(zba_regs),}
+#define ZBB_REGS_SUBLIST \
+ {"zbb", .feature = KVM_RISCV_ISA_EXT_ZBB, .regs = zbb_regs, .regs_n = ARRAY_SIZE(zbb_regs),}
+#define ZBS_REGS_SUBLIST \
+ {"zbs", .feature = KVM_RISCV_ISA_EXT_ZBS, .regs = zbs_regs, .regs_n = ARRAY_SIZE(zbs_regs),}
+#define ZICNTR_REGS_SUBLIST \
+ {"zicntr", .feature = KVM_RISCV_ISA_EXT_ZICNTR, .regs = zicntr_regs, .regs_n = ARRAY_SIZE(zicntr_regs),}
+#define ZICSR_REGS_SUBLIST \
+ {"zicsr", .feature = KVM_RISCV_ISA_EXT_ZICSR, .regs = zicsr_regs, .regs_n = ARRAY_SIZE(zicsr_regs),}
+#define ZIFENCEI_REGS_SUBLIST \
+ {"zifencei", .feature = KVM_RISCV_ISA_EXT_ZIFENCEI, .regs = zifencei_regs, .regs_n = ARRAY_SIZE(zifencei_regs),}
+#define ZIHPM_REGS_SUBLIST \
+ {"zihpm", .feature = KVM_RISCV_ISA_EXT_ZIHPM, .regs = zihpm_regs, .regs_n = ARRAY_SIZE(zihpm_regs),}
+#define AIA_REGS_SUBLIST \
+ {"aia", .feature = KVM_RISCV_ISA_EXT_SSAIA, .regs = aia_regs, .regs_n = ARRAY_SIZE(aia_regs),}
+#define FP_F_REGS_SUBLIST \
+ {"fp_f", .feature = KVM_RISCV_ISA_EXT_F, .regs = fp_f_regs, \
+ .regs_n = ARRAY_SIZE(fp_f_regs),}
+#define FP_D_REGS_SUBLIST \
+ {"fp_d", .feature = KVM_RISCV_ISA_EXT_D, .regs = fp_d_regs, \
+ .regs_n = ARRAY_SIZE(fp_d_regs),}
+
+static struct vcpu_reg_list h_config = {
+ .sublists = {
+ BASE_SUBLIST,
+ H_REGS_SUBLIST,
+ {0},
+ },
+};
+
+static struct vcpu_reg_list zicbom_config = {
+ .sublists = {
+ BASE_SUBLIST,
+ ZICBOM_REGS_SUBLIST,
+ {0},
+ },
+};
+
+static struct vcpu_reg_list zicboz_config = {
+ .sublists = {
+ BASE_SUBLIST,
+ ZICBOZ_REGS_SUBLIST,
+ {0},
+ },
+};
+
+static struct vcpu_reg_list svpbmt_config = {
+ .sublists = {
+ BASE_SUBLIST,
+ SVPBMT_REGS_SUBLIST,
+ {0},
+ },
+};
+
+static struct vcpu_reg_list sstc_config = {
+ .sublists = {
+ BASE_SUBLIST,
+ SSTC_REGS_SUBLIST,
+ {0},
+ },
+};
+
+static struct vcpu_reg_list svinval_config = {
+ .sublists = {
+ BASE_SUBLIST,
+ SVINVAL_REGS_SUBLIST,
+ {0},
+ },
+};
+
+static struct vcpu_reg_list zihintpause_config = {
+ .sublists = {
+ BASE_SUBLIST,
+ ZIHINTPAUSE_REGS_SUBLIST,
+ {0},
+ },
+};
+
+static struct vcpu_reg_list zba_config = {
+ .sublists = {
+ BASE_SUBLIST,
+ ZBA_REGS_SUBLIST,
+ {0},
+ },
+};
+
+static struct vcpu_reg_list zbb_config = {
+ .sublists = {
+ BASE_SUBLIST,
+ ZBB_REGS_SUBLIST,
+ {0},
+ },
+};
+
+static struct vcpu_reg_list zbs_config = {
+ .sublists = {
+ BASE_SUBLIST,
+ ZBS_REGS_SUBLIST,
+ {0},
+ },
+};
+
+static struct vcpu_reg_list zicntr_config = {
+ .sublists = {
+ BASE_SUBLIST,
+ ZICNTR_REGS_SUBLIST,
+ {0},
+ },
+};
+
+static struct vcpu_reg_list zicsr_config = {
+ .sublists = {
+ BASE_SUBLIST,
+ ZICSR_REGS_SUBLIST,
+ {0},
+ },
+};
+
+static struct vcpu_reg_list zifencei_config = {
+ .sublists = {
+ BASE_SUBLIST,
+ ZIFENCEI_REGS_SUBLIST,
+ {0},
+ },
+};
+
+static struct vcpu_reg_list zihpm_config = {
+ .sublists = {
+ BASE_SUBLIST,
+ ZIHPM_REGS_SUBLIST,
+ {0},
+ },
+};
+
+static struct vcpu_reg_list aia_config = {
+ .sublists = {
+ BASE_SUBLIST,
+ AIA_REGS_SUBLIST,
+ {0},
+ },
+};
+
+static struct vcpu_reg_list fp_f_config = {
+ .sublists = {
+ BASE_SUBLIST,
+ FP_F_REGS_SUBLIST,
+ {0},
+ },
+};
+
+static struct vcpu_reg_list fp_d_config = {
+ .sublists = {
+ BASE_SUBLIST,
+ FP_D_REGS_SUBLIST,
+ {0},
+ },
+};
+
+struct vcpu_reg_list *vcpu_configs[] = {
+ &h_config,
+ &zicbom_config,
+ &zicboz_config,
+ &svpbmt_config,
+ &sstc_config,
+ &svinval_config,
+ &zihintpause_config,
+ &zba_config,
+ &zbb_config,
+ &zbs_config,
+ &zicntr_config,
+ &zicsr_config,
+ &zifencei_config,
+ &zihpm_config,
+ &aia_config,
+ &fp_f_config,
+ &fp_d_config,
+};
+int vcpu_configs_n = ARRAY_SIZE(vcpu_configs);
diff --git a/tools/testing/selftests/kvm/s390x/cmma_test.c b/tools/testing/selftests/kvm/s390x/cmma_test.c
index 1d73e78e8fa7..c8e0a6495a63 100644
--- a/tools/testing/selftests/kvm/s390x/cmma_test.c
+++ b/tools/testing/selftests/kvm/s390x/cmma_test.c
@@ -237,8 +237,8 @@ static void test_get_cmma_basic(void)
/* GET_CMMA_BITS without CMMA enabled should fail */
rc = vm_get_cmma_bits(vm, 0, &errno_out);
- ASSERT_EQ(rc, -1);
- ASSERT_EQ(errno_out, ENXIO);
+ TEST_ASSERT_EQ(rc, -1);
+ TEST_ASSERT_EQ(errno_out, ENXIO);
enable_cmma(vm);
vcpu = vm_vcpu_add(vm, 1, guest_do_one_essa);
@@ -247,31 +247,31 @@ static void test_get_cmma_basic(void)
/* GET_CMMA_BITS without migration mode and without peeking should fail */
rc = vm_get_cmma_bits(vm, 0, &errno_out);
- ASSERT_EQ(rc, -1);
- ASSERT_EQ(errno_out, EINVAL);
+ TEST_ASSERT_EQ(rc, -1);
+ TEST_ASSERT_EQ(errno_out, EINVAL);
/* GET_CMMA_BITS without migration mode and with peeking should work */
rc = vm_get_cmma_bits(vm, KVM_S390_CMMA_PEEK, &errno_out);
- ASSERT_EQ(rc, 0);
- ASSERT_EQ(errno_out, 0);
+ TEST_ASSERT_EQ(rc, 0);
+ TEST_ASSERT_EQ(errno_out, 0);
enable_dirty_tracking(vm);
enable_migration_mode(vm);
/* GET_CMMA_BITS with invalid flags */
rc = vm_get_cmma_bits(vm, 0xfeedc0fe, &errno_out);
- ASSERT_EQ(rc, -1);
- ASSERT_EQ(errno_out, EINVAL);
+ TEST_ASSERT_EQ(rc, -1);
+ TEST_ASSERT_EQ(errno_out, EINVAL);
kvm_vm_free(vm);
}
static void assert_exit_was_hypercall(struct kvm_vcpu *vcpu)
{
- ASSERT_EQ(vcpu->run->exit_reason, 13);
- ASSERT_EQ(vcpu->run->s390_sieic.icptcode, 4);
- ASSERT_EQ(vcpu->run->s390_sieic.ipa, 0x8300);
- ASSERT_EQ(vcpu->run->s390_sieic.ipb, 0x5010000);
+ TEST_ASSERT_EQ(vcpu->run->exit_reason, 13);
+ TEST_ASSERT_EQ(vcpu->run->s390_sieic.icptcode, 4);
+ TEST_ASSERT_EQ(vcpu->run->s390_sieic.ipa, 0x8300);
+ TEST_ASSERT_EQ(vcpu->run->s390_sieic.ipb, 0x5010000);
}
static void test_migration_mode(void)
@@ -283,8 +283,8 @@ static void test_migration_mode(void)
/* enabling migration mode on a VM without memory should fail */
rc = __enable_migration_mode(vm);
- ASSERT_EQ(rc, -1);
- ASSERT_EQ(errno, EINVAL);
+ TEST_ASSERT_EQ(rc, -1);
+ TEST_ASSERT_EQ(errno, EINVAL);
TEST_ASSERT(!is_migration_mode_on(vm), "migration mode should still be off");
errno = 0;
@@ -304,8 +304,8 @@ static void test_migration_mode(void)
/* migration mode when memslots have dirty tracking off should fail */
rc = __enable_migration_mode(vm);
- ASSERT_EQ(rc, -1);
- ASSERT_EQ(errno, EINVAL);
+ TEST_ASSERT_EQ(rc, -1);
+ TEST_ASSERT_EQ(errno, EINVAL);
TEST_ASSERT(!is_migration_mode_on(vm), "migration mode should still be off");
errno = 0;
@@ -314,7 +314,7 @@ static void test_migration_mode(void)
/* enabling migration mode should work now */
rc = __enable_migration_mode(vm);
- ASSERT_EQ(rc, 0);
+ TEST_ASSERT_EQ(rc, 0);
TEST_ASSERT(is_migration_mode_on(vm), "migration mode should be on");
errno = 0;
@@ -350,7 +350,7 @@ static void test_migration_mode(void)
*/
vm_mem_region_set_flags(vm, TEST_DATA_TWO_MEMSLOT, KVM_MEM_LOG_DIRTY_PAGES);
rc = __enable_migration_mode(vm);
- ASSERT_EQ(rc, 0);
+ TEST_ASSERT_EQ(rc, 0);
TEST_ASSERT(is_migration_mode_on(vm), "migration mode should be on");
errno = 0;
@@ -394,9 +394,9 @@ static void assert_all_slots_cmma_dirty(struct kvm_vm *vm)
};
memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf));
vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args);
- ASSERT_EQ(args.count, MAIN_PAGE_COUNT);
- ASSERT_EQ(args.remaining, TEST_DATA_PAGE_COUNT);
- ASSERT_EQ(args.start_gfn, 0);
+ TEST_ASSERT_EQ(args.count, MAIN_PAGE_COUNT);
+ TEST_ASSERT_EQ(args.remaining, TEST_DATA_PAGE_COUNT);
+ TEST_ASSERT_EQ(args.start_gfn, 0);
/* ...and then - after a hole - the TEST_DATA memslot should follow */
args = (struct kvm_s390_cmma_log){
@@ -407,9 +407,9 @@ static void assert_all_slots_cmma_dirty(struct kvm_vm *vm)
};
memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf));
vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args);
- ASSERT_EQ(args.count, TEST_DATA_PAGE_COUNT);
- ASSERT_EQ(args.start_gfn, TEST_DATA_START_GFN);
- ASSERT_EQ(args.remaining, 0);
+ TEST_ASSERT_EQ(args.count, TEST_DATA_PAGE_COUNT);
+ TEST_ASSERT_EQ(args.start_gfn, TEST_DATA_START_GFN);
+ TEST_ASSERT_EQ(args.remaining, 0);
/* ...and nothing else should be there */
args = (struct kvm_s390_cmma_log){
@@ -420,9 +420,9 @@ static void assert_all_slots_cmma_dirty(struct kvm_vm *vm)
};
memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf));
vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args);
- ASSERT_EQ(args.count, 0);
- ASSERT_EQ(args.start_gfn, 0);
- ASSERT_EQ(args.remaining, 0);
+ TEST_ASSERT_EQ(args.count, 0);
+ TEST_ASSERT_EQ(args.start_gfn, 0);
+ TEST_ASSERT_EQ(args.remaining, 0);
}
/**
@@ -498,11 +498,11 @@ static void assert_cmma_dirty(u64 first_dirty_gfn,
u64 dirty_gfn_count,
const struct kvm_s390_cmma_log *res)
{
- ASSERT_EQ(res->start_gfn, first_dirty_gfn);
- ASSERT_EQ(res->count, dirty_gfn_count);
+ TEST_ASSERT_EQ(res->start_gfn, first_dirty_gfn);
+ TEST_ASSERT_EQ(res->count, dirty_gfn_count);
for (size_t i = 0; i < dirty_gfn_count; i++)
- ASSERT_EQ(cmma_value_buf[0], 0x0); /* stable state */
- ASSERT_EQ(cmma_value_buf[dirty_gfn_count], 0xff); /* not touched */
+ TEST_ASSERT_EQ(cmma_value_buf[0], 0x0); /* stable state */
+ TEST_ASSERT_EQ(cmma_value_buf[dirty_gfn_count], 0xff); /* not touched */
}
static void test_get_skip_holes(void)
diff --git a/tools/testing/selftests/kvm/s390x/debug_test.c b/tools/testing/selftests/kvm/s390x/debug_test.c
new file mode 100644
index 000000000000..84313fb27529
--- /dev/null
+++ b/tools/testing/selftests/kvm/s390x/debug_test.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Test KVM debugging features. */
+#include "kvm_util.h"
+#include "test_util.h"
+
+#include <linux/kvm.h>
+
+#define __LC_SVC_NEW_PSW 0x1c0
+#define __LC_PGM_NEW_PSW 0x1d0
+#define ICPT_INSTRUCTION 0x04
+#define IPA0_DIAG 0x8300
+#define PGM_SPECIFICATION 0x06
+
+/* Common code for testing single-stepping interruptions. */
+extern char int_handler[];
+asm("int_handler:\n"
+ "j .\n");
+
+static struct kvm_vm *test_step_int_1(struct kvm_vcpu **vcpu, void *guest_code,
+ size_t new_psw_off, uint64_t *new_psw)
+{
+ struct kvm_guest_debug debug = {};
+ struct kvm_regs regs;
+ struct kvm_vm *vm;
+ char *lowcore;
+
+ vm = vm_create_with_one_vcpu(vcpu, guest_code);
+ lowcore = addr_gpa2hva(vm, 0);
+ new_psw[0] = (*vcpu)->run->psw_mask;
+ new_psw[1] = (uint64_t)int_handler;
+ memcpy(lowcore + new_psw_off, new_psw, 16);
+ vcpu_regs_get(*vcpu, &regs);
+ regs.gprs[2] = -1;
+ vcpu_regs_set(*vcpu, &regs);
+ debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
+ vcpu_guest_debug_set(*vcpu, &debug);
+ vcpu_run(*vcpu);
+
+ return vm;
+}
+
+static void test_step_int(void *guest_code, size_t new_psw_off)
+{
+ struct kvm_vcpu *vcpu;
+ uint64_t new_psw[2];
+ struct kvm_vm *vm;
+
+ vm = test_step_int_1(&vcpu, guest_code, new_psw_off, new_psw);
+ TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_DEBUG);
+ TEST_ASSERT_EQ(vcpu->run->psw_mask, new_psw[0]);
+ TEST_ASSERT_EQ(vcpu->run->psw_addr, new_psw[1]);
+ kvm_vm_free(vm);
+}
+
+/* Test single-stepping "boring" program interruptions. */
+extern char test_step_pgm_guest_code[];
+asm("test_step_pgm_guest_code:\n"
+ ".insn rr,0x1d00,%r1,%r0 /* dr %r1,%r0 */\n"
+ "j .\n");
+
+static void test_step_pgm(void)
+{
+ test_step_int(test_step_pgm_guest_code, __LC_PGM_NEW_PSW);
+}
+
+/*
+ * Test single-stepping program interruptions caused by DIAG.
+ * Userspace emulation must not interfere with single-stepping.
+ */
+extern char test_step_pgm_diag_guest_code[];
+asm("test_step_pgm_diag_guest_code:\n"
+ "diag %r0,%r0,0\n"
+ "j .\n");
+
+static void test_step_pgm_diag(void)
+{
+ struct kvm_s390_irq irq = {
+ .type = KVM_S390_PROGRAM_INT,
+ .u.pgm.code = PGM_SPECIFICATION,
+ };
+ struct kvm_vcpu *vcpu;
+ uint64_t new_psw[2];
+ struct kvm_vm *vm;
+
+ vm = test_step_int_1(&vcpu, test_step_pgm_diag_guest_code,
+ __LC_PGM_NEW_PSW, new_psw);
+ TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_S390_SIEIC);
+ TEST_ASSERT_EQ(vcpu->run->s390_sieic.icptcode, ICPT_INSTRUCTION);
+ TEST_ASSERT_EQ(vcpu->run->s390_sieic.ipa & 0xff00, IPA0_DIAG);
+ vcpu_ioctl(vcpu, KVM_S390_IRQ, &irq);
+ vcpu_run(vcpu);
+ TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_DEBUG);
+ TEST_ASSERT_EQ(vcpu->run->psw_mask, new_psw[0]);
+ TEST_ASSERT_EQ(vcpu->run->psw_addr, new_psw[1]);
+ kvm_vm_free(vm);
+}
+
+/*
+ * Test single-stepping program interruptions caused by ISKE.
+ * CPUSTAT_KSS handling must not interfere with single-stepping.
+ */
+extern char test_step_pgm_iske_guest_code[];
+asm("test_step_pgm_iske_guest_code:\n"
+ "iske %r2,%r2\n"
+ "j .\n");
+
+static void test_step_pgm_iske(void)
+{
+ test_step_int(test_step_pgm_iske_guest_code, __LC_PGM_NEW_PSW);
+}
+
+/*
+ * Test single-stepping program interruptions caused by LCTL.
+ * KVM emulation must not interfere with single-stepping.
+ */
+extern char test_step_pgm_lctl_guest_code[];
+asm("test_step_pgm_lctl_guest_code:\n"
+ "lctl %c0,%c0,1\n"
+ "j .\n");
+
+static void test_step_pgm_lctl(void)
+{
+ test_step_int(test_step_pgm_lctl_guest_code, __LC_PGM_NEW_PSW);
+}
+
+/* Test single-stepping supervisor-call interruptions. */
+extern char test_step_svc_guest_code[];
+asm("test_step_svc_guest_code:\n"
+ "svc 0\n"
+ "j .\n");
+
+static void test_step_svc(void)
+{
+ test_step_int(test_step_svc_guest_code, __LC_SVC_NEW_PSW);
+}
+
+/* Run all tests above. */
+static struct testdef {
+ const char *name;
+ void (*test)(void);
+} testlist[] = {
+ { "single-step pgm", test_step_pgm },
+ { "single-step pgm caused by diag", test_step_pgm_diag },
+ { "single-step pgm caused by iske", test_step_pgm_iske },
+ { "single-step pgm caused by lctl", test_step_pgm_lctl },
+ { "single-step svc", test_step_svc },
+};
+
+int main(int argc, char *argv[])
+{
+ int idx;
+
+ ksft_print_header();
+ ksft_set_plan(ARRAY_SIZE(testlist));
+ for (idx = 0; idx < ARRAY_SIZE(testlist); idx++) {
+ testlist[idx].test();
+ ksft_test_result_pass("%s\n", testlist[idx].name);
+ }
+ ksft_finished();
+}
diff --git a/tools/testing/selftests/kvm/s390x/memop.c b/tools/testing/selftests/kvm/s390x/memop.c
index 8e4b94d7b8dd..bb3ca9a5d731 100644
--- a/tools/testing/selftests/kvm/s390x/memop.c
+++ b/tools/testing/selftests/kvm/s390x/memop.c
@@ -4,7 +4,6 @@
*
* Copyright (C) 2019, Red Hat, Inc.
*/
-
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@@ -279,10 +278,10 @@ enum stage {
vcpu_run(__vcpu); \
get_ucall(__vcpu, &uc); \
if (uc.cmd == UCALL_ABORT) { \
- REPORT_GUEST_ASSERT_2(uc, "hints: %lu, %lu"); \
+ REPORT_GUEST_ASSERT(uc); \
} \
- ASSERT_EQ(uc.cmd, UCALL_SYNC); \
- ASSERT_EQ(uc.args[1], __stage); \
+ TEST_ASSERT_EQ(uc.cmd, UCALL_SYNC); \
+ TEST_ASSERT_EQ(uc.args[1], __stage); \
}) \
static void prepare_mem12(void)
@@ -469,7 +468,7 @@ static __uint128_t cut_to_size(int size, __uint128_t val)
case 16:
return val;
}
- GUEST_ASSERT_1(false, "Invalid size");
+ GUEST_FAIL("Invalid size = %u", size);
return 0;
}
@@ -598,7 +597,7 @@ static bool _cmpxchg(int size, void *target, __uint128_t *old_addr, __uint128_t
return ret;
}
}
- GUEST_ASSERT_1(false, "Invalid size");
+ GUEST_FAIL("Invalid size = %u", size);
return 0;
}
@@ -808,7 +807,7 @@ static void test_termination(void)
HOST_SYNC(t.vcpu, STAGE_IDLED);
MOP(t.vm, ABSOLUTE, READ, &teid, sizeof(teid), GADDR(prefix + 168));
/* Bits 56, 60, 61 form a code, 0 being the only one allowing for termination */
- ASSERT_EQ(teid & teid_mask, 0);
+ TEST_ASSERT_EQ(teid & teid_mask, 0);
kvm_vm_free(t.kvm_vm);
}
diff --git a/tools/testing/selftests/kvm/s390x/tprot.c b/tools/testing/selftests/kvm/s390x/tprot.c
index a9a0b76e5fa4..c73f948c9b63 100644
--- a/tools/testing/selftests/kvm/s390x/tprot.c
+++ b/tools/testing/selftests/kvm/s390x/tprot.c
@@ -4,7 +4,6 @@
*
* Copyright IBM Corp. 2021
*/
-
#include <sys/mman.h>
#include "test_util.h"
#include "kvm_util.h"
@@ -156,7 +155,9 @@ static enum stage perform_next_stage(int *i, bool mapped_0)
!mapped_0;
if (!skip) {
result = test_protection(tests[*i].addr, tests[*i].key);
- GUEST_ASSERT_2(result == tests[*i].expected, *i, result);
+ __GUEST_ASSERT(result == tests[*i].expected,
+ "Wanted %u, got %u, for i = %u",
+ tests[*i].expected, result, *i);
}
}
return stage;
@@ -190,9 +191,9 @@ static void guest_code(void)
vcpu_run(__vcpu); \
get_ucall(__vcpu, &uc); \
if (uc.cmd == UCALL_ABORT) \
- REPORT_GUEST_ASSERT_2(uc, "hints: %lu, %lu"); \
- ASSERT_EQ(uc.cmd, UCALL_SYNC); \
- ASSERT_EQ(uc.args[1], __stage); \
+ REPORT_GUEST_ASSERT(uc); \
+ TEST_ASSERT_EQ(uc.cmd, UCALL_SYNC); \
+ TEST_ASSERT_EQ(uc.args[1], __stage); \
})
#define HOST_SYNC(vcpu, stage) \
diff --git a/tools/testing/selftests/kvm/set_memory_region_test.c b/tools/testing/selftests/kvm/set_memory_region_test.c
index a849ce23ca97..b32960189f5f 100644
--- a/tools/testing/selftests/kvm/set_memory_region_test.c
+++ b/tools/testing/selftests/kvm/set_memory_region_test.c
@@ -88,7 +88,7 @@ static void *vcpu_worker(void *data)
}
if (run->exit_reason == KVM_EXIT_IO && cmd == UCALL_ABORT)
- REPORT_GUEST_ASSERT_1(uc, "val = %lu");
+ REPORT_GUEST_ASSERT(uc);
return NULL;
}
@@ -156,19 +156,22 @@ static void guest_code_move_memory_region(void)
* window where the memslot is invalid is usually quite small.
*/
val = guest_spin_on_val(0);
- GUEST_ASSERT_1(val == 1 || val == MMIO_VAL, val);
+ __GUEST_ASSERT(val == 1 || val == MMIO_VAL,
+ "Expected '1' or MMIO ('%llx'), got '%llx'", MMIO_VAL, val);
/* Spin until the misaligning memory region move completes. */
val = guest_spin_on_val(MMIO_VAL);
- GUEST_ASSERT_1(val == 1 || val == 0, val);
+ __GUEST_ASSERT(val == 1 || val == 0,
+ "Expected '0' or '1' (no MMIO), got '%llx'", val);
/* Spin until the memory region starts to get re-aligned. */
val = guest_spin_on_val(0);
- GUEST_ASSERT_1(val == 1 || val == MMIO_VAL, val);
+ __GUEST_ASSERT(val == 1 || val == MMIO_VAL,
+ "Expected '1' or MMIO ('%llx'), got '%llx'", MMIO_VAL, val);
/* Spin until the re-aligning memory region move completes. */
val = guest_spin_on_val(MMIO_VAL);
- GUEST_ASSERT_1(val == 1, val);
+ GUEST_ASSERT_EQ(val, 1);
GUEST_DONE();
}
@@ -224,15 +227,15 @@ static void guest_code_delete_memory_region(void)
/* Spin until the memory region is deleted. */
val = guest_spin_on_val(0);
- GUEST_ASSERT_1(val == MMIO_VAL, val);
+ GUEST_ASSERT_EQ(val, MMIO_VAL);
/* Spin until the memory region is recreated. */
val = guest_spin_on_val(MMIO_VAL);
- GUEST_ASSERT_1(val == 0, val);
+ GUEST_ASSERT_EQ(val, 0);
/* Spin until the memory region is deleted. */
val = guest_spin_on_val(0);
- GUEST_ASSERT_1(val == MMIO_VAL, val);
+ GUEST_ASSERT_EQ(val, MMIO_VAL);
asm("1:\n\t"
".pushsection .rodata\n\t"
@@ -249,7 +252,7 @@ static void guest_code_delete_memory_region(void)
"final_rip_end: .quad 1b\n\t"
".popsection");
- GUEST_ASSERT_1(0, 0);
+ GUEST_ASSERT(0);
}
static void test_delete_memory_region(void)
diff --git a/tools/testing/selftests/kvm/steal_time.c b/tools/testing/selftests/kvm/steal_time.c
index c87f38712073..171adfb2a6cb 100644
--- a/tools/testing/selftests/kvm/steal_time.c
+++ b/tools/testing/selftests/kvm/steal_time.c
@@ -31,8 +31,8 @@ static uint64_t guest_stolen_time[NR_VCPUS];
static void check_status(struct kvm_steal_time *st)
{
GUEST_ASSERT(!(READ_ONCE(st->version) & 1));
- GUEST_ASSERT(READ_ONCE(st->flags) == 0);
- GUEST_ASSERT(READ_ONCE(st->preempted) == 0);
+ GUEST_ASSERT_EQ(READ_ONCE(st->flags), 0);
+ GUEST_ASSERT_EQ(READ_ONCE(st->preempted), 0);
}
static void guest_code(int cpu)
@@ -40,7 +40,7 @@ static void guest_code(int cpu)
struct kvm_steal_time *st = st_gva[cpu];
uint32_t version;
- GUEST_ASSERT(rdmsr(MSR_KVM_STEAL_TIME) == ((uint64_t)st_gva[cpu] | KVM_MSR_ENABLED));
+ GUEST_ASSERT_EQ(rdmsr(MSR_KVM_STEAL_TIME), ((uint64_t)st_gva[cpu] | KVM_MSR_ENABLED));
memset(st, 0, sizeof(*st));
GUEST_SYNC(0);
@@ -122,8 +122,8 @@ static int64_t smccc(uint32_t func, uint64_t arg)
static void check_status(struct st_time *st)
{
- GUEST_ASSERT(READ_ONCE(st->rev) == 0);
- GUEST_ASSERT(READ_ONCE(st->attr) == 0);
+ GUEST_ASSERT_EQ(READ_ONCE(st->rev), 0);
+ GUEST_ASSERT_EQ(READ_ONCE(st->attr), 0);
}
static void guest_code(int cpu)
@@ -132,15 +132,15 @@ static void guest_code(int cpu)
int64_t status;
status = smccc(SMCCC_ARCH_FEATURES, PV_TIME_FEATURES);
- GUEST_ASSERT(status == 0);
+ GUEST_ASSERT_EQ(status, 0);
status = smccc(PV_TIME_FEATURES, PV_TIME_FEATURES);
- GUEST_ASSERT(status == 0);
+ GUEST_ASSERT_EQ(status, 0);
status = smccc(PV_TIME_FEATURES, PV_TIME_ST);
- GUEST_ASSERT(status == 0);
+ GUEST_ASSERT_EQ(status, 0);
status = smccc(PV_TIME_ST, 0);
- GUEST_ASSERT(status != -1);
- GUEST_ASSERT(status == (ulong)st_gva[cpu]);
+ GUEST_ASSERT_NE(status, -1);
+ GUEST_ASSERT_EQ(status, (ulong)st_gva[cpu]);
st = (struct st_time *)status;
GUEST_SYNC(0);
diff --git a/tools/testing/selftests/kvm/x86_64/cpuid_test.c b/tools/testing/selftests/kvm/x86_64/cpuid_test.c
index d3c3aa93f090..3b34d8156d1c 100644
--- a/tools/testing/selftests/kvm/x86_64/cpuid_test.c
+++ b/tools/testing/selftests/kvm/x86_64/cpuid_test.c
@@ -35,10 +35,10 @@ static void test_guest_cpuids(struct kvm_cpuid2 *guest_cpuid)
guest_cpuid->entries[i].index,
&eax, &ebx, &ecx, &edx);
- GUEST_ASSERT(eax == guest_cpuid->entries[i].eax &&
- ebx == guest_cpuid->entries[i].ebx &&
- ecx == guest_cpuid->entries[i].ecx &&
- edx == guest_cpuid->entries[i].edx);
+ GUEST_ASSERT_EQ(eax, guest_cpuid->entries[i].eax);
+ GUEST_ASSERT_EQ(ebx, guest_cpuid->entries[i].ebx);
+ GUEST_ASSERT_EQ(ecx, guest_cpuid->entries[i].ecx);
+ GUEST_ASSERT_EQ(edx, guest_cpuid->entries[i].edx);
}
}
@@ -51,7 +51,7 @@ static void guest_main(struct kvm_cpuid2 *guest_cpuid)
GUEST_SYNC(2);
- GUEST_ASSERT(this_cpu_property(X86_PROPERTY_MAX_KVM_LEAF) == 0x40000001);
+ GUEST_ASSERT_EQ(this_cpu_property(X86_PROPERTY_MAX_KVM_LEAF), 0x40000001);
GUEST_DONE();
}
@@ -116,7 +116,7 @@ static void run_vcpu(struct kvm_vcpu *vcpu, int stage)
case UCALL_DONE:
return;
case UCALL_ABORT:
- REPORT_GUEST_ASSERT_2(uc, "values: %#lx, %#lx");
+ REPORT_GUEST_ASSERT(uc);
default:
TEST_ASSERT(false, "Unexpected exit: %s",
exit_reason_str(vcpu->run->exit_reason));
diff --git a/tools/testing/selftests/kvm/x86_64/dirty_log_page_splitting_test.c b/tools/testing/selftests/kvm/x86_64/dirty_log_page_splitting_test.c
index beb7e2c10211..634c6bfcd572 100644
--- a/tools/testing/selftests/kvm/x86_64/dirty_log_page_splitting_test.c
+++ b/tools/testing/selftests/kvm/x86_64/dirty_log_page_splitting_test.c
@@ -72,7 +72,7 @@ static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
vcpu_run(vcpu);
- ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_SYNC);
+ TEST_ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_SYNC);
vcpu_last_completed_iteration[vcpu_idx] = current_iteration;
@@ -179,12 +179,12 @@ static void run_test(enum vm_guest_mode mode, void *unused)
* with that capability.
*/
if (dirty_log_manual_caps) {
- ASSERT_EQ(stats_clear_pass[0].hugepages, 0);
- ASSERT_EQ(stats_clear_pass[0].pages_4k, total_4k_pages);
- ASSERT_EQ(stats_dirty_logging_enabled.hugepages, stats_populated.hugepages);
+ TEST_ASSERT_EQ(stats_clear_pass[0].hugepages, 0);
+ TEST_ASSERT_EQ(stats_clear_pass[0].pages_4k, total_4k_pages);
+ TEST_ASSERT_EQ(stats_dirty_logging_enabled.hugepages, stats_populated.hugepages);
} else {
- ASSERT_EQ(stats_dirty_logging_enabled.hugepages, 0);
- ASSERT_EQ(stats_dirty_logging_enabled.pages_4k, total_4k_pages);
+ TEST_ASSERT_EQ(stats_dirty_logging_enabled.hugepages, 0);
+ TEST_ASSERT_EQ(stats_dirty_logging_enabled.pages_4k, total_4k_pages);
}
/*
@@ -192,9 +192,9 @@ static void run_test(enum vm_guest_mode mode, void *unused)
* memory again, the page counts should be the same as they were
* right after initial population of memory.
*/
- ASSERT_EQ(stats_populated.pages_4k, stats_repopulated.pages_4k);
- ASSERT_EQ(stats_populated.pages_2m, stats_repopulated.pages_2m);
- ASSERT_EQ(stats_populated.pages_1g, stats_repopulated.pages_1g);
+ TEST_ASSERT_EQ(stats_populated.pages_4k, stats_repopulated.pages_4k);
+ TEST_ASSERT_EQ(stats_populated.pages_2m, stats_repopulated.pages_2m);
+ TEST_ASSERT_EQ(stats_populated.pages_1g, stats_repopulated.pages_1g);
}
static void help(char *name)
diff --git a/tools/testing/selftests/kvm/x86_64/exit_on_emulation_failure_test.c b/tools/testing/selftests/kvm/x86_64/exit_on_emulation_failure_test.c
index e334844d6e1d..6c2e5e0ceb1f 100644
--- a/tools/testing/selftests/kvm/x86_64/exit_on_emulation_failure_test.c
+++ b/tools/testing/selftests/kvm/x86_64/exit_on_emulation_failure_test.c
@@ -35,7 +35,7 @@ int main(int argc, char *argv[])
vcpu_run(vcpu);
handle_flds_emulation_failure_exit(vcpu);
vcpu_run(vcpu);
- ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_DONE);
+ TEST_ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_DONE);
kvm_vm_free(vm);
return 0;
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_extended_hypercalls.c b/tools/testing/selftests/kvm/x86_64/hyperv_extended_hypercalls.c
index 73af44d2167f..e036db1f32b9 100644
--- a/tools/testing/selftests/kvm/x86_64/hyperv_extended_hypercalls.c
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_extended_hypercalls.c
@@ -8,7 +8,6 @@
* Copyright 2022 Google LLC
* Author: Vipin Sharma <vipinsh@google.com>
*/
-
#include "kvm_util.h"
#include "processor.h"
#include "hyperv.h"
@@ -84,7 +83,7 @@ int main(void)
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
- REPORT_GUEST_ASSERT_2(uc, "arg1 = %ld, arg2 = %ld");
+ REPORT_GUEST_ASSERT(uc);
break;
case UCALL_DONE:
break;
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_features.c b/tools/testing/selftests/kvm/x86_64/hyperv_features.c
index 78606de9385d..9f28aa276c4e 100644
--- a/tools/testing/selftests/kvm/x86_64/hyperv_features.c
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_features.c
@@ -53,16 +53,21 @@ static void guest_msr(struct msr_data *msr)
vector = rdmsr_safe(msr->idx, &msr_val);
if (msr->fault_expected)
- GUEST_ASSERT_3(vector == GP_VECTOR, msr->idx, vector, GP_VECTOR);
+ __GUEST_ASSERT(vector == GP_VECTOR,
+ "Expected #GP on %sMSR(0x%x), got vector '0x%x'",
+ msr->idx, msr->write ? "WR" : "RD", vector);
else
- GUEST_ASSERT_3(!vector, msr->idx, vector, 0);
+ __GUEST_ASSERT(!vector,
+ "Expected success on %sMSR(0x%x), got vector '0x%x'",
+ msr->idx, msr->write ? "WR" : "RD", vector);
if (vector || is_write_only_msr(msr->idx))
goto done;
if (msr->write)
- GUEST_ASSERT_3(msr_val == msr->write_val, msr->idx,
- msr_val, msr->write_val);
+ __GUEST_ASSERT(!vector,
+ "WRMSR(0x%x) to '0x%llx', RDMSR read '0x%llx'",
+ msr->idx, msr->write_val, msr_val);
/* Invariant TSC bit appears when TSC invariant control MSR is written to */
if (msr->idx == HV_X64_MSR_TSC_INVARIANT_CONTROL) {
@@ -82,7 +87,7 @@ static void guest_hcall(vm_vaddr_t pgs_gpa, struct hcall_data *hcall)
u64 res, input, output;
uint8_t vector;
- GUEST_ASSERT(hcall->control);
+ GUEST_ASSERT_NE(hcall->control, 0);
wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
wrmsr(HV_X64_MSR_HYPERCALL, pgs_gpa);
@@ -96,10 +101,14 @@ static void guest_hcall(vm_vaddr_t pgs_gpa, struct hcall_data *hcall)
vector = __hyperv_hypercall(hcall->control, input, output, &res);
if (hcall->ud_expected) {
- GUEST_ASSERT_2(vector == UD_VECTOR, hcall->control, vector);
+ __GUEST_ASSERT(vector == UD_VECTOR,
+ "Expected #UD for control '%u', got vector '0x%x'",
+ hcall->control, vector);
} else {
- GUEST_ASSERT_2(!vector, hcall->control, vector);
- GUEST_ASSERT_2(res == hcall->expect, hcall->expect, res);
+ __GUEST_ASSERT(!vector,
+ "Expected no exception for control '%u', got vector '0x%x'",
+ hcall->control, vector);
+ GUEST_ASSERT_EQ(res, hcall->expect);
}
GUEST_DONE();
@@ -495,7 +504,7 @@ static void guest_test_msrs_access(void)
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
- REPORT_GUEST_ASSERT_3(uc, "MSR = %lx, arg1 = %lx, arg2 = %lx");
+ REPORT_GUEST_ASSERT(uc);
return;
case UCALL_DONE:
break;
@@ -665,7 +674,7 @@ static void guest_test_hcalls_access(void)
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
- REPORT_GUEST_ASSERT_2(uc, "arg1 = %lx, arg2 = %lx");
+ REPORT_GUEST_ASSERT(uc);
return;
case UCALL_DONE:
break;
diff --git a/tools/testing/selftests/kvm/x86_64/kvm_pv_test.c b/tools/testing/selftests/kvm/x86_64/kvm_pv_test.c
index f774a9e62858..9e2879af7c20 100644
--- a/tools/testing/selftests/kvm/x86_64/kvm_pv_test.c
+++ b/tools/testing/selftests/kvm/x86_64/kvm_pv_test.c
@@ -46,10 +46,10 @@ static void test_msr(struct msr_data *msr)
PR_MSR(msr);
vector = rdmsr_safe(msr->idx, &ignored);
- GUEST_ASSERT_1(vector == GP_VECTOR, vector);
+ GUEST_ASSERT_EQ(vector, GP_VECTOR);
vector = wrmsr_safe(msr->idx, 0);
- GUEST_ASSERT_1(vector == GP_VECTOR, vector);
+ GUEST_ASSERT_EQ(vector, GP_VECTOR);
}
struct hcall_data {
@@ -77,7 +77,7 @@ static void test_hcall(struct hcall_data *hc)
PR_HCALL(hc);
r = kvm_hypercall(hc->nr, 0, 0, 0, 0);
- GUEST_ASSERT(r == -KVM_ENOSYS);
+ GUEST_ASSERT_EQ(r, -KVM_ENOSYS);
}
static void guest_main(void)
@@ -125,7 +125,7 @@ static void enter_guest(struct kvm_vcpu *vcpu)
pr_hcall(&uc);
break;
case UCALL_ABORT:
- REPORT_GUEST_ASSERT_1(uc, "vector = %lu");
+ REPORT_GUEST_ASSERT(uc);
return;
case UCALL_DONE:
return;
diff --git a/tools/testing/selftests/kvm/x86_64/monitor_mwait_test.c b/tools/testing/selftests/kvm/x86_64/monitor_mwait_test.c
index 72812644d7f5..80aa3d8b18f8 100644
--- a/tools/testing/selftests/kvm/x86_64/monitor_mwait_test.c
+++ b/tools/testing/selftests/kvm/x86_64/monitor_mwait_test.c
@@ -16,14 +16,25 @@ enum monitor_mwait_testcases {
MWAIT_DISABLED = BIT(2),
};
+/*
+ * If both MWAIT and its quirk are disabled, MONITOR/MWAIT should #UD, in all
+ * other scenarios KVM should emulate them as nops.
+ */
+#define GUEST_ASSERT_MONITOR_MWAIT(insn, testcase, vector) \
+do { \
+ bool fault_wanted = ((testcase) & MWAIT_QUIRK_DISABLED) && \
+ ((testcase) & MWAIT_DISABLED); \
+ \
+ if (fault_wanted) \
+ __GUEST_ASSERT((vector) == UD_VECTOR, \
+ "Expected #UD on " insn " for testcase '0x%x', got '0x%x'", vector); \
+ else \
+ __GUEST_ASSERT(!(vector), \
+ "Expected success on " insn " for testcase '0x%x', got '0x%x'", vector); \
+} while (0)
+
static void guest_monitor_wait(int testcase)
{
- /*
- * If both MWAIT and its quirk are disabled, MONITOR/MWAIT should #UD,
- * in all other scenarios KVM should emulate them as nops.
- */
- bool fault_wanted = (testcase & MWAIT_QUIRK_DISABLED) &&
- (testcase & MWAIT_DISABLED);
u8 vector;
GUEST_SYNC(testcase);
@@ -33,16 +44,10 @@ static void guest_monitor_wait(int testcase)
* intercept checks, so the inputs for MONITOR and MWAIT must be valid.
*/
vector = kvm_asm_safe("monitor", "a"(guest_monitor_wait), "c"(0), "d"(0));
- if (fault_wanted)
- GUEST_ASSERT_2(vector == UD_VECTOR, testcase, vector);
- else
- GUEST_ASSERT_2(!vector, testcase, vector);
+ GUEST_ASSERT_MONITOR_MWAIT("MONITOR", testcase, vector);
vector = kvm_asm_safe("mwait", "a"(guest_monitor_wait), "c"(0), "d"(0));
- if (fault_wanted)
- GUEST_ASSERT_2(vector == UD_VECTOR, testcase, vector);
- else
- GUEST_ASSERT_2(!vector, testcase, vector);
+ GUEST_ASSERT_MONITOR_MWAIT("MWAIT", testcase, vector);
}
static void guest_code(void)
@@ -85,7 +90,7 @@ int main(int argc, char *argv[])
testcase = uc.args[1];
break;
case UCALL_ABORT:
- REPORT_GUEST_ASSERT_2(uc, "testcase = %lx, vector = %ld");
+ REPORT_GUEST_ASSERT(uc);
goto done;
case UCALL_DONE:
goto done;
diff --git a/tools/testing/selftests/kvm/x86_64/nested_exceptions_test.c b/tools/testing/selftests/kvm/x86_64/nested_exceptions_test.c
index 6502aa23c2f8..3670331adf21 100644
--- a/tools/testing/selftests/kvm/x86_64/nested_exceptions_test.c
+++ b/tools/testing/selftests/kvm/x86_64/nested_exceptions_test.c
@@ -180,9 +180,7 @@ static void assert_ucall_vector(struct kvm_vcpu *vcpu, int vector)
"Expected L2 to ask for %d, L2 says it's done", vector);
break;
case UCALL_ABORT:
- TEST_FAIL("%s at %s:%ld (0x%lx != 0x%lx)",
- (const char *)uc.args[0], __FILE__, uc.args[1],
- uc.args[2], uc.args[3]);
+ REPORT_GUEST_ASSERT(uc);
break;
default:
TEST_FAIL("Expected L2 to ask for %d, got unexpected ucall %lu", vector, uc.cmd);
@@ -247,12 +245,12 @@ int main(int argc, char *argv[])
/* Verify the pending events comes back out the same as it went in. */
vcpu_events_get(vcpu, &events);
- ASSERT_EQ(events.flags & KVM_VCPUEVENT_VALID_PAYLOAD,
- KVM_VCPUEVENT_VALID_PAYLOAD);
- ASSERT_EQ(events.exception.pending, true);
- ASSERT_EQ(events.exception.nr, SS_VECTOR);
- ASSERT_EQ(events.exception.has_error_code, true);
- ASSERT_EQ(events.exception.error_code, SS_ERROR_CODE);
+ TEST_ASSERT_EQ(events.flags & KVM_VCPUEVENT_VALID_PAYLOAD,
+ KVM_VCPUEVENT_VALID_PAYLOAD);
+ TEST_ASSERT_EQ(events.exception.pending, true);
+ TEST_ASSERT_EQ(events.exception.nr, SS_VECTOR);
+ TEST_ASSERT_EQ(events.exception.has_error_code, true);
+ TEST_ASSERT_EQ(events.exception.error_code, SS_ERROR_CODE);
/*
* Run for real with the pending #SS, L1 should get a VM-Exit due to
diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
index 40507ed9fe8a..283cc55597a4 100644
--- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
+++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c
@@ -27,6 +27,15 @@
#define ARCH_PERFMON_BRANCHES_RETIRED 5
#define NUM_BRANCHES 42
+#define INTEL_PMC_IDX_FIXED 32
+
+/* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */
+#define MAX_FILTER_EVENTS 300
+#define MAX_TEST_EVENTS 10
+
+#define PMU_EVENT_FILTER_INVALID_ACTION (KVM_PMU_EVENT_DENY + 1)
+#define PMU_EVENT_FILTER_INVALID_FLAGS (KVM_PMU_EVENT_FLAGS_VALID_MASK << 1)
+#define PMU_EVENT_FILTER_INVALID_NEVENTS (MAX_FILTER_EVENTS + 1)
/*
* This is how the event selector and unit mask are stored in an AMD
@@ -69,21 +78,33 @@
#define INST_RETIRED EVENT(0xc0, 0)
+struct __kvm_pmu_event_filter {
+ __u32 action;
+ __u32 nevents;
+ __u32 fixed_counter_bitmap;
+ __u32 flags;
+ __u32 pad[4];
+ __u64 events[MAX_FILTER_EVENTS];
+};
+
/*
* This event list comprises Intel's eight architectural events plus
* AMD's "retired branch instructions" for Zen[123] (and possibly
* other AMD CPUs).
*/
-static const uint64_t event_list[] = {
- EVENT(0x3c, 0),
- INST_RETIRED,
- EVENT(0x3c, 1),
- EVENT(0x2e, 0x4f),
- EVENT(0x2e, 0x41),
- EVENT(0xc4, 0),
- EVENT(0xc5, 0),
- EVENT(0xa4, 1),
- AMD_ZEN_BR_RETIRED,
+static const struct __kvm_pmu_event_filter base_event_filter = {
+ .nevents = ARRAY_SIZE(base_event_filter.events),
+ .events = {
+ EVENT(0x3c, 0),
+ INST_RETIRED,
+ EVENT(0x3c, 1),
+ EVENT(0x2e, 0x4f),
+ EVENT(0x2e, 0x41),
+ EVENT(0xc4, 0),
+ EVENT(0xc5, 0),
+ EVENT(0xa4, 1),
+ AMD_ZEN_BR_RETIRED,
+ },
};
struct {
@@ -225,48 +246,11 @@ static bool sanity_check_pmu(struct kvm_vcpu *vcpu)
return !r;
}
-static struct kvm_pmu_event_filter *alloc_pmu_event_filter(uint32_t nevents)
-{
- struct kvm_pmu_event_filter *f;
- int size = sizeof(*f) + nevents * sizeof(f->events[0]);
-
- f = malloc(size);
- TEST_ASSERT(f, "Out of memory");
- memset(f, 0, size);
- f->nevents = nevents;
- return f;
-}
-
-
-static struct kvm_pmu_event_filter *
-create_pmu_event_filter(const uint64_t event_list[], int nevents,
- uint32_t action, uint32_t flags)
-{
- struct kvm_pmu_event_filter *f;
- int i;
-
- f = alloc_pmu_event_filter(nevents);
- f->action = action;
- f->flags = flags;
- for (i = 0; i < nevents; i++)
- f->events[i] = event_list[i];
-
- return f;
-}
-
-static struct kvm_pmu_event_filter *event_filter(uint32_t action)
-{
- return create_pmu_event_filter(event_list,
- ARRAY_SIZE(event_list),
- action, 0);
-}
-
/*
* Remove the first occurrence of 'event' (if any) from the filter's
* event list.
*/
-static struct kvm_pmu_event_filter *remove_event(struct kvm_pmu_event_filter *f,
- uint64_t event)
+static void remove_event(struct __kvm_pmu_event_filter *f, uint64_t event)
{
bool found = false;
int i;
@@ -279,7 +263,6 @@ static struct kvm_pmu_event_filter *remove_event(struct kvm_pmu_event_filter *f,
}
if (found)
f->nevents--;
- return f;
}
#define ASSERT_PMC_COUNTING_INSTRUCTIONS() \
@@ -315,66 +298,73 @@ static void test_without_filter(struct kvm_vcpu *vcpu)
}
static void test_with_filter(struct kvm_vcpu *vcpu,
- struct kvm_pmu_event_filter *f)
+ struct __kvm_pmu_event_filter *__f)
{
+ struct kvm_pmu_event_filter *f = (void *)__f;
+
vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f);
run_vcpu_and_sync_pmc_results(vcpu);
}
static void test_amd_deny_list(struct kvm_vcpu *vcpu)
{
- uint64_t event = EVENT(0x1C2, 0);
- struct kvm_pmu_event_filter *f;
+ struct __kvm_pmu_event_filter f = {
+ .action = KVM_PMU_EVENT_DENY,
+ .nevents = 1,
+ .events = {
+ EVENT(0x1C2, 0),
+ },
+ };
- f = create_pmu_event_filter(&event, 1, KVM_PMU_EVENT_DENY, 0);
- test_with_filter(vcpu, f);
- free(f);
+ test_with_filter(vcpu, &f);
ASSERT_PMC_COUNTING_INSTRUCTIONS();
}
static void test_member_deny_list(struct kvm_vcpu *vcpu)
{
- struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_DENY);
+ struct __kvm_pmu_event_filter f = base_event_filter;
- test_with_filter(vcpu, f);
- free(f);
+ f.action = KVM_PMU_EVENT_DENY;
+ test_with_filter(vcpu, &f);
ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS();
}
static void test_member_allow_list(struct kvm_vcpu *vcpu)
{
- struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_ALLOW);
+ struct __kvm_pmu_event_filter f = base_event_filter;
- test_with_filter(vcpu, f);
- free(f);
+ f.action = KVM_PMU_EVENT_ALLOW;
+ test_with_filter(vcpu, &f);
ASSERT_PMC_COUNTING_INSTRUCTIONS();
}
static void test_not_member_deny_list(struct kvm_vcpu *vcpu)
{
- struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_DENY);
+ struct __kvm_pmu_event_filter f = base_event_filter;
- remove_event(f, INST_RETIRED);
- remove_event(f, INTEL_BR_RETIRED);
- remove_event(f, AMD_ZEN_BR_RETIRED);
- test_with_filter(vcpu, f);
- free(f);
+ f.action = KVM_PMU_EVENT_DENY;
+
+ remove_event(&f, INST_RETIRED);
+ remove_event(&f, INTEL_BR_RETIRED);
+ remove_event(&f, AMD_ZEN_BR_RETIRED);
+ test_with_filter(vcpu, &f);
ASSERT_PMC_COUNTING_INSTRUCTIONS();
}
static void test_not_member_allow_list(struct kvm_vcpu *vcpu)
{
- struct kvm_pmu_event_filter *f = event_filter(KVM_PMU_EVENT_ALLOW);
+ struct __kvm_pmu_event_filter f = base_event_filter;
+
+ f.action = KVM_PMU_EVENT_ALLOW;
- remove_event(f, INST_RETIRED);
- remove_event(f, INTEL_BR_RETIRED);
- remove_event(f, AMD_ZEN_BR_RETIRED);
- test_with_filter(vcpu, f);
- free(f);
+ remove_event(&f, INST_RETIRED);
+ remove_event(&f, INTEL_BR_RETIRED);
+ remove_event(&f, AMD_ZEN_BR_RETIRED);
+ test_with_filter(vcpu, &f);
ASSERT_PMC_NOT_COUNTING_INSTRUCTIONS();
}
@@ -569,19 +559,16 @@ static void run_masked_events_test(struct kvm_vcpu *vcpu,
const uint64_t masked_events[],
const int nmasked_events)
{
- struct kvm_pmu_event_filter *f;
+ struct __kvm_pmu_event_filter f = {
+ .nevents = nmasked_events,
+ .action = KVM_PMU_EVENT_ALLOW,
+ .flags = KVM_PMU_EVENT_FLAG_MASKED_EVENTS,
+ };
- f = create_pmu_event_filter(masked_events, nmasked_events,
- KVM_PMU_EVENT_ALLOW,
- KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
- test_with_filter(vcpu, f);
- free(f);
+ memcpy(f.events, masked_events, sizeof(uint64_t) * nmasked_events);
+ test_with_filter(vcpu, &f);
}
-/* Matches KVM_PMU_EVENT_FILTER_MAX_EVENTS in pmu.c */
-#define MAX_FILTER_EVENTS 300
-#define MAX_TEST_EVENTS 10
-
#define ALLOW_LOADS BIT(0)
#define ALLOW_STORES BIT(1)
#define ALLOW_LOADS_STORES BIT(2)
@@ -753,21 +740,33 @@ static void test_masked_events(struct kvm_vcpu *vcpu)
run_masked_events_tests(vcpu, events, nevents);
}
-static int run_filter_test(struct kvm_vcpu *vcpu, const uint64_t *events,
- int nevents, uint32_t flags)
+static int set_pmu_event_filter(struct kvm_vcpu *vcpu,
+ struct __kvm_pmu_event_filter *__f)
{
- struct kvm_pmu_event_filter *f;
- int r;
+ struct kvm_pmu_event_filter *f = (void *)__f;
- f = create_pmu_event_filter(events, nevents, KVM_PMU_EVENT_ALLOW, flags);
- r = __vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f);
- free(f);
+ return __vm_ioctl(vcpu->vm, KVM_SET_PMU_EVENT_FILTER, f);
+}
- return r;
+static int set_pmu_single_event_filter(struct kvm_vcpu *vcpu, uint64_t event,
+ uint32_t flags, uint32_t action)
+{
+ struct __kvm_pmu_event_filter f = {
+ .nevents = 1,
+ .flags = flags,
+ .action = action,
+ .events = {
+ event,
+ },
+ };
+
+ return set_pmu_event_filter(vcpu, &f);
}
static void test_filter_ioctl(struct kvm_vcpu *vcpu)
{
+ uint8_t nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
+ struct __kvm_pmu_event_filter f;
uint64_t e = ~0ul;
int r;
@@ -775,15 +774,144 @@ static void test_filter_ioctl(struct kvm_vcpu *vcpu)
* Unfortunately having invalid bits set in event data is expected to
* pass when flags == 0 (bits other than eventsel+umask).
*/
- r = run_filter_test(vcpu, &e, 1, 0);
+ r = set_pmu_single_event_filter(vcpu, e, 0, KVM_PMU_EVENT_ALLOW);
TEST_ASSERT(r == 0, "Valid PMU Event Filter is failing");
- r = run_filter_test(vcpu, &e, 1, KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
+ r = set_pmu_single_event_filter(vcpu, e,
+ KVM_PMU_EVENT_FLAG_MASKED_EVENTS,
+ KVM_PMU_EVENT_ALLOW);
TEST_ASSERT(r != 0, "Invalid PMU Event Filter is expected to fail");
e = KVM_PMU_ENCODE_MASKED_ENTRY(0xff, 0xff, 0xff, 0xf);
- r = run_filter_test(vcpu, &e, 1, KVM_PMU_EVENT_FLAG_MASKED_EVENTS);
+ r = set_pmu_single_event_filter(vcpu, e,
+ KVM_PMU_EVENT_FLAG_MASKED_EVENTS,
+ KVM_PMU_EVENT_ALLOW);
TEST_ASSERT(r == 0, "Valid PMU Event Filter is failing");
+
+ f = base_event_filter;
+ f.action = PMU_EVENT_FILTER_INVALID_ACTION;
+ r = set_pmu_event_filter(vcpu, &f);
+ TEST_ASSERT(r, "Set invalid action is expected to fail");
+
+ f = base_event_filter;
+ f.flags = PMU_EVENT_FILTER_INVALID_FLAGS;
+ r = set_pmu_event_filter(vcpu, &f);
+ TEST_ASSERT(r, "Set invalid flags is expected to fail");
+
+ f = base_event_filter;
+ f.nevents = PMU_EVENT_FILTER_INVALID_NEVENTS;
+ r = set_pmu_event_filter(vcpu, &f);
+ TEST_ASSERT(r, "Exceeding the max number of filter events should fail");
+
+ f = base_event_filter;
+ f.fixed_counter_bitmap = ~GENMASK_ULL(nr_fixed_counters, 0);
+ r = set_pmu_event_filter(vcpu, &f);
+ TEST_ASSERT(!r, "Masking non-existent fixed counters should be allowed");
+}
+
+static void intel_run_fixed_counter_guest_code(uint8_t fixed_ctr_idx)
+{
+ for (;;) {
+ wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
+ wrmsr(MSR_CORE_PERF_FIXED_CTR0 + fixed_ctr_idx, 0);
+
+ /* Only OS_EN bit is enabled for fixed counter[idx]. */
+ wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, BIT_ULL(4 * fixed_ctr_idx));
+ wrmsr(MSR_CORE_PERF_GLOBAL_CTRL,
+ BIT_ULL(INTEL_PMC_IDX_FIXED + fixed_ctr_idx));
+ __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
+ wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
+
+ GUEST_SYNC(rdmsr(MSR_CORE_PERF_FIXED_CTR0 + fixed_ctr_idx));
+ }
+}
+
+static uint64_t test_with_fixed_counter_filter(struct kvm_vcpu *vcpu,
+ uint32_t action, uint32_t bitmap)
+{
+ struct __kvm_pmu_event_filter f = {
+ .action = action,
+ .fixed_counter_bitmap = bitmap,
+ };
+ set_pmu_event_filter(vcpu, &f);
+
+ return run_vcpu_to_sync(vcpu);
+}
+
+static uint64_t test_set_gp_and_fixed_event_filter(struct kvm_vcpu *vcpu,
+ uint32_t action,
+ uint32_t bitmap)
+{
+ struct __kvm_pmu_event_filter f = base_event_filter;
+
+ f.action = action;
+ f.fixed_counter_bitmap = bitmap;
+ set_pmu_event_filter(vcpu, &f);
+
+ return run_vcpu_to_sync(vcpu);
+}
+
+static void __test_fixed_counter_bitmap(struct kvm_vcpu *vcpu, uint8_t idx,
+ uint8_t nr_fixed_counters)
+{
+ unsigned int i;
+ uint32_t bitmap;
+ uint64_t count;
+
+ TEST_ASSERT(nr_fixed_counters < sizeof(bitmap) * 8,
+ "Invalid nr_fixed_counters");
+
+ /*
+ * Check the fixed performance counter can count normally when KVM
+ * userspace doesn't set any pmu filter.
+ */
+ count = run_vcpu_to_sync(vcpu);
+ TEST_ASSERT(count, "Unexpected count value: %ld\n", count);
+
+ for (i = 0; i < BIT(nr_fixed_counters); i++) {
+ bitmap = BIT(i);
+ count = test_with_fixed_counter_filter(vcpu, KVM_PMU_EVENT_ALLOW,
+ bitmap);
+ TEST_ASSERT_EQ(!!count, !!(bitmap & BIT(idx)));
+
+ count = test_with_fixed_counter_filter(vcpu, KVM_PMU_EVENT_DENY,
+ bitmap);
+ TEST_ASSERT_EQ(!!count, !(bitmap & BIT(idx)));
+
+ /*
+ * Check that fixed_counter_bitmap has higher priority than
+ * events[] when both are set.
+ */
+ count = test_set_gp_and_fixed_event_filter(vcpu,
+ KVM_PMU_EVENT_ALLOW,
+ bitmap);
+ TEST_ASSERT_EQ(!!count, !!(bitmap & BIT(idx)));
+
+ count = test_set_gp_and_fixed_event_filter(vcpu,
+ KVM_PMU_EVENT_DENY,
+ bitmap);
+ TEST_ASSERT_EQ(!!count, !(bitmap & BIT(idx)));
+ }
+}
+
+static void test_fixed_counter_bitmap(void)
+{
+ uint8_t nr_fixed_counters = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
+ struct kvm_vm *vm;
+ struct kvm_vcpu *vcpu;
+ uint8_t idx;
+
+ /*
+ * Check that pmu_event_filter works as expected when it's applied to
+ * fixed performance counters.
+ */
+ for (idx = 0; idx < nr_fixed_counters; idx++) {
+ vm = vm_create_with_one_vcpu(&vcpu,
+ intel_run_fixed_counter_guest_code);
+ vcpu_args_set(vcpu, 1, idx);
+ __test_fixed_counter_bitmap(vcpu, idx, nr_fixed_counters);
+ kvm_vm_free(vm);
+ }
}
int main(int argc, char *argv[])
@@ -829,6 +957,7 @@ int main(int argc, char *argv[])
kvm_vm_free(vm);
test_pmu_config_disable(guest_code);
+ test_fixed_counter_bitmap();
return 0;
}
diff --git a/tools/testing/selftests/kvm/x86_64/recalc_apic_map_test.c b/tools/testing/selftests/kvm/x86_64/recalc_apic_map_test.c
index 4c416ebe7d66..cbc92a862ea9 100644
--- a/tools/testing/selftests/kvm/x86_64/recalc_apic_map_test.c
+++ b/tools/testing/selftests/kvm/x86_64/recalc_apic_map_test.c
@@ -57,7 +57,7 @@ int main(void)
for (i = 0; i < KVM_MAX_VCPUS; i++)
vcpu_set_msr(vcpus[i], MSR_IA32_APICBASE, LAPIC_X2APIC);
- ASSERT_EQ(pthread_create(&thread, NULL, race, vcpus[0]), 0);
+ TEST_ASSERT_EQ(pthread_create(&thread, NULL, race, vcpus[0]), 0);
vcpuN = vcpus[KVM_MAX_VCPUS - 1];
for (t = time(NULL) + TIMEOUT; time(NULL) < t;) {
@@ -65,8 +65,8 @@ int main(void)
vcpu_set_msr(vcpuN, MSR_IA32_APICBASE, LAPIC_DISABLED);
}
- ASSERT_EQ(pthread_cancel(thread), 0);
- ASSERT_EQ(pthread_join(thread, NULL), 0);
+ TEST_ASSERT_EQ(pthread_cancel(thread), 0);
+ TEST_ASSERT_EQ(pthread_join(thread, NULL), 0);
kvm_vm_free(vm);
diff --git a/tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c b/tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c
index b25d7556b638..366cf18600bc 100644
--- a/tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c
+++ b/tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c
@@ -20,7 +20,7 @@ static void guest_bsp_vcpu(void *arg)
{
GUEST_SYNC(1);
- GUEST_ASSERT(get_bsp_flag() != 0);
+ GUEST_ASSERT_NE(get_bsp_flag(), 0);
GUEST_DONE();
}
@@ -29,7 +29,7 @@ static void guest_not_bsp_vcpu(void *arg)
{
GUEST_SYNC(1);
- GUEST_ASSERT(get_bsp_flag() == 0);
+ GUEST_ASSERT_EQ(get_bsp_flag(), 0);
GUEST_DONE();
}
@@ -65,7 +65,7 @@ static void run_vcpu(struct kvm_vcpu *vcpu)
stage);
break;
case UCALL_ABORT:
- REPORT_GUEST_ASSERT_2(uc, "values: %#lx, %#lx");
+ REPORT_GUEST_ASSERT(uc);
default:
TEST_ASSERT(false, "Unexpected exit: %s",
exit_reason_str(vcpu->run->exit_reason));
diff --git a/tools/testing/selftests/kvm/x86_64/svm_nested_soft_inject_test.c b/tools/testing/selftests/kvm/x86_64/svm_nested_soft_inject_test.c
index 4e2479716da6..7ee44496cf97 100644
--- a/tools/testing/selftests/kvm/x86_64/svm_nested_soft_inject_test.c
+++ b/tools/testing/selftests/kvm/x86_64/svm_nested_soft_inject_test.c
@@ -8,7 +8,6 @@
* Copyright (C) 2021, Red Hat, Inc.
*
*/
-
#include <stdatomic.h>
#include <stdio.h>
#include <unistd.h>
@@ -34,13 +33,12 @@ static void l2_guest_code_int(void);
static void guest_int_handler(struct ex_regs *regs)
{
int_fired++;
- GUEST_ASSERT_2(regs->rip == (unsigned long)l2_guest_code_int,
- regs->rip, (unsigned long)l2_guest_code_int);
+ GUEST_ASSERT_EQ(regs->rip, (unsigned long)l2_guest_code_int);
}
static void l2_guest_code_int(void)
{
- GUEST_ASSERT_1(int_fired == 1, int_fired);
+ GUEST_ASSERT_EQ(int_fired, 1);
/*
* Same as the vmmcall() function, but with a ud2 sneaked after the
@@ -53,7 +51,7 @@ static void l2_guest_code_int(void)
: "rbx", "rdx", "rsi", "rdi", "r8", "r9",
"r10", "r11", "r12", "r13", "r14", "r15");
- GUEST_ASSERT_1(bp_fired == 1, bp_fired);
+ GUEST_ASSERT_EQ(bp_fired, 1);
hlt();
}
@@ -66,9 +64,9 @@ static void guest_nmi_handler(struct ex_regs *regs)
if (nmi_stage_get() == 1) {
vmmcall();
- GUEST_ASSERT(false);
+ GUEST_FAIL("Unexpected resume after VMMCALL");
} else {
- GUEST_ASSERT_1(nmi_stage_get() == 3, nmi_stage_get());
+ GUEST_ASSERT_EQ(nmi_stage_get(), 3);
GUEST_DONE();
}
}
@@ -104,7 +102,8 @@ static void l1_guest_code(struct svm_test_data *svm, uint64_t is_nmi, uint64_t i
}
run_guest(vmcb, svm->vmcb_gpa);
- GUEST_ASSERT_3(vmcb->control.exit_code == SVM_EXIT_VMMCALL,
+ __GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL,
+ "Expected VMMCAL #VMEXIT, got '0x%x', info1 = '0x%llx, info2 = '0x%llx'",
vmcb->control.exit_code,
vmcb->control.exit_info_1, vmcb->control.exit_info_2);
@@ -112,7 +111,7 @@ static void l1_guest_code(struct svm_test_data *svm, uint64_t is_nmi, uint64_t i
clgi();
x2apic_write_reg(APIC_ICR, APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_NMI);
- GUEST_ASSERT_1(nmi_stage_get() == 1, nmi_stage_get());
+ GUEST_ASSERT_EQ(nmi_stage_get(), 1);
nmi_stage_inc();
stgi();
@@ -133,7 +132,8 @@ static void l1_guest_code(struct svm_test_data *svm, uint64_t is_nmi, uint64_t i
vmcb->control.next_rip = vmcb->save.rip + 2;
run_guest(vmcb, svm->vmcb_gpa);
- GUEST_ASSERT_3(vmcb->control.exit_code == SVM_EXIT_HLT,
+ __GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_HLT,
+ "Expected HLT #VMEXIT, got '0x%x', info1 = '0x%llx, info2 = '0x%llx'",
vmcb->control.exit_code,
vmcb->control.exit_info_1, vmcb->control.exit_info_2);
@@ -185,7 +185,7 @@ static void run_test(bool is_nmi)
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
- REPORT_GUEST_ASSERT_3(uc, "vals = 0x%lx 0x%lx 0x%lx");
+ REPORT_GUEST_ASSERT(uc);
break;
/* NOT REACHED */
case UCALL_DONE:
diff --git a/tools/testing/selftests/kvm/x86_64/sync_regs_test.c b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c
index 2da89fdc2471..00965ba33f73 100644
--- a/tools/testing/selftests/kvm/x86_64/sync_regs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c
@@ -15,6 +15,7 @@
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
+#include <pthread.h>
#include "test_util.h"
#include "kvm_util.h"
@@ -80,6 +81,133 @@ static void compare_vcpu_events(struct kvm_vcpu_events *left,
#define TEST_SYNC_FIELDS (KVM_SYNC_X86_REGS|KVM_SYNC_X86_SREGS|KVM_SYNC_X86_EVENTS)
#define INVALID_SYNC_FIELD 0x80000000
+/*
+ * Set an exception as pending *and* injected while KVM is processing events.
+ * KVM is supposed to ignore/drop pending exceptions if userspace is also
+ * requesting that an exception be injected.
+ */
+static void *race_events_inj_pen(void *arg)
+{
+ struct kvm_run *run = (struct kvm_run *)arg;
+ struct kvm_vcpu_events *events = &run->s.regs.events;
+
+ WRITE_ONCE(events->exception.nr, UD_VECTOR);
+
+ for (;;) {
+ WRITE_ONCE(run->kvm_dirty_regs, KVM_SYNC_X86_EVENTS);
+ WRITE_ONCE(events->flags, 0);
+ WRITE_ONCE(events->exception.injected, 1);
+ WRITE_ONCE(events->exception.pending, 1);
+
+ pthread_testcancel();
+ }
+
+ return NULL;
+}
+
+/*
+ * Set an invalid exception vector while KVM is processing events. KVM is
+ * supposed to reject any vector >= 32, as well as NMIs (vector 2).
+ */
+static void *race_events_exc(void *arg)
+{
+ struct kvm_run *run = (struct kvm_run *)arg;
+ struct kvm_vcpu_events *events = &run->s.regs.events;
+
+ for (;;) {
+ WRITE_ONCE(run->kvm_dirty_regs, KVM_SYNC_X86_EVENTS);
+ WRITE_ONCE(events->flags, 0);
+ WRITE_ONCE(events->exception.nr, UD_VECTOR);
+ WRITE_ONCE(events->exception.pending, 1);
+ WRITE_ONCE(events->exception.nr, 255);
+
+ pthread_testcancel();
+ }
+
+ return NULL;
+}
+
+/*
+ * Toggle CR4.PAE while KVM is processing SREGS, EFER.LME=1 with CR4.PAE=0 is
+ * illegal, and KVM's MMU heavily relies on vCPU state being valid.
+ */
+static noinline void *race_sregs_cr4(void *arg)
+{
+ struct kvm_run *run = (struct kvm_run *)arg;
+ __u64 *cr4 = &run->s.regs.sregs.cr4;
+ __u64 pae_enabled = *cr4;
+ __u64 pae_disabled = *cr4 & ~X86_CR4_PAE;
+
+ for (;;) {
+ WRITE_ONCE(run->kvm_dirty_regs, KVM_SYNC_X86_SREGS);
+ WRITE_ONCE(*cr4, pae_enabled);
+ asm volatile(".rept 512\n\t"
+ "nop\n\t"
+ ".endr");
+ WRITE_ONCE(*cr4, pae_disabled);
+
+ pthread_testcancel();
+ }
+
+ return NULL;
+}
+
+static void race_sync_regs(void *racer)
+{
+ const time_t TIMEOUT = 2; /* seconds, roughly */
+ struct kvm_x86_state *state;
+ struct kvm_translation tr;
+ struct kvm_vcpu *vcpu;
+ struct kvm_run *run;
+ struct kvm_vm *vm;
+ pthread_t thread;
+ time_t t;
+
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code);
+ run = vcpu->run;
+
+ run->kvm_valid_regs = KVM_SYNC_X86_SREGS;
+ vcpu_run(vcpu);
+ run->kvm_valid_regs = 0;
+
+ /* Save state *before* spawning the thread that mucks with vCPU state. */
+ state = vcpu_save_state(vcpu);
+
+ /*
+ * Selftests run 64-bit guests by default, both EFER.LME and CR4.PAE
+ * should already be set in guest state.
+ */
+ TEST_ASSERT((run->s.regs.sregs.cr4 & X86_CR4_PAE) &&
+ (run->s.regs.sregs.efer & EFER_LME),
+ "vCPU should be in long mode, CR4.PAE=%d, EFER.LME=%d",
+ !!(run->s.regs.sregs.cr4 & X86_CR4_PAE),
+ !!(run->s.regs.sregs.efer & EFER_LME));
+
+ TEST_ASSERT_EQ(pthread_create(&thread, NULL, racer, (void *)run), 0);
+
+ for (t = time(NULL) + TIMEOUT; time(NULL) < t;) {
+ /*
+ * Reload known good state if the vCPU triple faults, e.g. due
+ * to the unhandled #GPs being injected. VMX preserves state
+ * on shutdown, but SVM synthesizes an INIT as the VMCB state
+ * is architecturally undefined on triple fault.
+ */
+ if (!__vcpu_run(vcpu) && run->exit_reason == KVM_EXIT_SHUTDOWN)
+ vcpu_load_state(vcpu, state);
+
+ if (racer == race_sregs_cr4) {
+ tr = (struct kvm_translation) { .linear_address = 0 };
+ __vcpu_ioctl(vcpu, KVM_TRANSLATE, &tr);
+ }
+ }
+
+ TEST_ASSERT_EQ(pthread_cancel(thread), 0);
+ TEST_ASSERT_EQ(pthread_join(thread, NULL), 0);
+
+ kvm_x86_state_cleanup(state);
+ kvm_vm_free(vm);
+}
+
int main(int argc, char *argv[])
{
struct kvm_vcpu *vcpu;
@@ -218,5 +346,9 @@ int main(int argc, char *argv[])
kvm_vm_free(vm);
+ race_sync_regs(race_sregs_cr4);
+ race_sync_regs(race_events_exc);
+ race_sync_regs(race_events_inj_pen);
+
return 0;
}
diff --git a/tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c b/tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c
index c9f67702f657..12b0964f4f13 100644
--- a/tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c
@@ -84,7 +84,7 @@ static void run_vcpu(struct kvm_vcpu *vcpu, int stage)
ksft_test_result_pass("stage %d passed\n", stage + 1);
return;
case UCALL_ABORT:
- REPORT_GUEST_ASSERT_2(uc, "values: %#lx, %#lx");
+ REPORT_GUEST_ASSERT(uc);
default:
TEST_ASSERT(false, "Unexpected exit: %s",
exit_reason_str(vcpu->run->exit_reason));
@@ -103,39 +103,39 @@ int main(void)
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
val = 0;
- ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
- ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
+ TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
+ TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
/* Guest: writes to MSR_IA32_TSC affect both MSRs. */
run_vcpu(vcpu, 1);
val = 1ull * GUEST_STEP;
- ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
- ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
+ TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
+ TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
/* Guest: writes to MSR_IA32_TSC_ADJUST affect both MSRs. */
run_vcpu(vcpu, 2);
val = 2ull * GUEST_STEP;
- ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
- ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
+ TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
+ TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
/*
* Host: writes to MSR_IA32_TSC set the host-side offset
* and therefore do not change MSR_IA32_TSC_ADJUST.
*/
vcpu_set_msr(vcpu, MSR_IA32_TSC, HOST_ADJUST + val);
- ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
- ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
+ TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
+ TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
run_vcpu(vcpu, 3);
/* Host: writes to MSR_IA32_TSC_ADJUST do not modify the TSC. */
vcpu_set_msr(vcpu, MSR_IA32_TSC_ADJUST, UNITY * 123456);
- ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
- ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_TSC_ADJUST), UNITY * 123456);
+ TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
+ TEST_ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_TSC_ADJUST), UNITY * 123456);
/* Restore previous value. */
vcpu_set_msr(vcpu, MSR_IA32_TSC_ADJUST, val);
- ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
- ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
+ TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
+ TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
/*
* Guest: writes to MSR_IA32_TSC_ADJUST do not destroy the
@@ -143,8 +143,8 @@ int main(void)
*/
run_vcpu(vcpu, 4);
val = 3ull * GUEST_STEP;
- ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
- ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
+ TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), HOST_ADJUST + val);
+ TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val);
/*
* Guest: writes to MSR_IA32_TSC affect both MSRs, so the host-side
@@ -152,8 +152,8 @@ int main(void)
*/
run_vcpu(vcpu, 5);
val = 4ull * GUEST_STEP;
- ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
- ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val - HOST_ADJUST);
+ TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC), val);
+ TEST_ASSERT_EQ(rounded_host_rdmsr(MSR_IA32_TSC_ADJUST), val - HOST_ADJUST);
kvm_vm_free(vm);
diff --git a/tools/testing/selftests/kvm/x86_64/userspace_io_test.c b/tools/testing/selftests/kvm/x86_64/userspace_io_test.c
index 0cb51fa42773..255c50b0dc32 100644
--- a/tools/testing/selftests/kvm/x86_64/userspace_io_test.c
+++ b/tools/testing/selftests/kvm/x86_64/userspace_io_test.c
@@ -20,8 +20,8 @@ static void guest_ins_port80(uint8_t *buffer, unsigned int count)
end = (unsigned long)buffer + 8192;
asm volatile("cld; rep; insb" : "+D"(buffer), "+c"(count) : "d"(0x80) : "memory");
- GUEST_ASSERT_1(count == 0, count);
- GUEST_ASSERT_2((unsigned long)buffer == end, buffer, end);
+ GUEST_ASSERT_EQ(count, 0);
+ GUEST_ASSERT_EQ((unsigned long)buffer, end);
}
static void guest_code(void)
@@ -43,7 +43,9 @@ static void guest_code(void)
memset(buffer, 0, sizeof(buffer));
guest_ins_port80(buffer, 8192);
for (i = 0; i < 8192; i++)
- GUEST_ASSERT_2(buffer[i] == 0xaa, i, buffer[i]);
+ __GUEST_ASSERT(buffer[i] == 0xaa,
+ "Expected '0xaa', got '0x%x' at buffer[%u]",
+ buffer[i], i);
GUEST_DONE();
}
@@ -91,7 +93,7 @@ int main(int argc, char *argv[])
case UCALL_DONE:
break;
case UCALL_ABORT:
- REPORT_GUEST_ASSERT_2(uc, "argN+1 = 0x%lx, argN+2 = 0x%lx");
+ REPORT_GUEST_ASSERT(uc);
default:
TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_exception_with_invalid_guest_state.c b/tools/testing/selftests/kvm/x86_64/vmx_exception_with_invalid_guest_state.c
index be0bdb8c6f78..a9b827c69f32 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_exception_with_invalid_guest_state.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_exception_with_invalid_guest_state.c
@@ -50,7 +50,7 @@ static void set_timer(void)
timer.it_value.tv_sec = 0;
timer.it_value.tv_usec = 200;
timer.it_interval = timer.it_value;
- ASSERT_EQ(setitimer(ITIMER_REAL, &timer, NULL), 0);
+ TEST_ASSERT_EQ(setitimer(ITIMER_REAL, &timer, NULL), 0);
}
static void set_or_clear_invalid_guest_state(struct kvm_vcpu *vcpu, bool set)
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c
index 4c90f76930f9..ebbcb0a3f743 100644
--- a/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c
@@ -10,7 +10,6 @@
* and check it can be retrieved with KVM_GET_MSR, also test
* the invalid LBR formats are rejected.
*/
-
#define _GNU_SOURCE /* for program_invocation_short_name */
#include <sys/ioctl.h>
@@ -52,23 +51,24 @@ static const union perf_capabilities format_caps = {
.pebs_format = -1,
};
+static void guest_test_perf_capabilities_gp(uint64_t val)
+{
+ uint8_t vector = wrmsr_safe(MSR_IA32_PERF_CAPABILITIES, val);
+
+ __GUEST_ASSERT(vector == GP_VECTOR,
+ "Expected #GP for value '0x%llx', got vector '0x%x'",
+ val, vector);
+}
+
static void guest_code(uint64_t current_val)
{
- uint8_t vector;
int i;
- vector = wrmsr_safe(MSR_IA32_PERF_CAPABILITIES, current_val);
- GUEST_ASSERT_2(vector == GP_VECTOR, current_val, vector);
-
- vector = wrmsr_safe(MSR_IA32_PERF_CAPABILITIES, 0);
- GUEST_ASSERT_2(vector == GP_VECTOR, 0, vector);
+ guest_test_perf_capabilities_gp(current_val);
+ guest_test_perf_capabilities_gp(0);
- for (i = 0; i < 64; i++) {
- vector = wrmsr_safe(MSR_IA32_PERF_CAPABILITIES,
- current_val ^ BIT_ULL(i));
- GUEST_ASSERT_2(vector == GP_VECTOR,
- current_val ^ BIT_ULL(i), vector);
- }
+ for (i = 0; i < 64; i++)
+ guest_test_perf_capabilities_gp(current_val ^ BIT_ULL(i));
GUEST_DONE();
}
@@ -95,7 +95,7 @@ static void test_guest_wrmsr_perf_capabilities(union perf_capabilities host_cap)
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
- REPORT_GUEST_ASSERT_2(uc, "val = 0x%lx, vector = %lu");
+ REPORT_GUEST_ASSERT(uc);
break;
case UCALL_DONE:
break;
@@ -103,7 +103,8 @@ static void test_guest_wrmsr_perf_capabilities(union perf_capabilities host_cap)
TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
}
- ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES), host_cap.capabilities);
+ TEST_ASSERT_EQ(vcpu_get_msr(vcpu, MSR_IA32_PERF_CAPABILITIES),
+ host_cap.capabilities);
vcpu_set_msr(vcpu, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities);
diff --git a/tools/testing/selftests/kvm/x86_64/xapic_state_test.c b/tools/testing/selftests/kvm/x86_64/xapic_state_test.c
index 396c13f42457..ab75b873a4ad 100644
--- a/tools/testing/selftests/kvm/x86_64/xapic_state_test.c
+++ b/tools/testing/selftests/kvm/x86_64/xapic_state_test.c
@@ -65,17 +65,17 @@ static void ____test_icr(struct xapic_vcpu *x, uint64_t val)
vcpu_ioctl(vcpu, KVM_SET_LAPIC, &xapic);
vcpu_run(vcpu);
- ASSERT_EQ(get_ucall(vcpu, &uc), UCALL_SYNC);
- ASSERT_EQ(uc.args[1], val);
+ TEST_ASSERT_EQ(get_ucall(vcpu, &uc), UCALL_SYNC);
+ TEST_ASSERT_EQ(uc.args[1], val);
vcpu_ioctl(vcpu, KVM_GET_LAPIC, &xapic);
icr = (u64)(*((u32 *)&xapic.regs[APIC_ICR])) |
(u64)(*((u32 *)&xapic.regs[APIC_ICR2])) << 32;
if (!x->is_x2apic) {
val &= (-1u | (0xffull << (32 + 24)));
- ASSERT_EQ(icr, val & ~APIC_ICR_BUSY);
+ TEST_ASSERT_EQ(icr, val & ~APIC_ICR_BUSY);
} else {
- ASSERT_EQ(icr & ~APIC_ICR_BUSY, val & ~APIC_ICR_BUSY);
+ TEST_ASSERT_EQ(icr & ~APIC_ICR_BUSY, val & ~APIC_ICR_BUSY);
}
}
diff --git a/tools/testing/selftests/kvm/x86_64/xcr0_cpuid_test.c b/tools/testing/selftests/kvm/x86_64/xcr0_cpuid_test.c
index 905bd5ae4431..77d04a7bdadd 100644
--- a/tools/testing/selftests/kvm/x86_64/xcr0_cpuid_test.c
+++ b/tools/testing/selftests/kvm/x86_64/xcr0_cpuid_test.c
@@ -4,7 +4,6 @@
*
* Copyright (C) 2022, Google LLC.
*/
-
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
@@ -20,13 +19,14 @@
* Assert that architectural dependency rules are satisfied, e.g. that AVX is
* supported if and only if SSE is supported.
*/
-#define ASSERT_XFEATURE_DEPENDENCIES(supported_xcr0, xfeatures, dependencies) \
-do { \
- uint64_t __supported = (supported_xcr0) & ((xfeatures) | (dependencies)); \
- \
- GUEST_ASSERT_3((__supported & (xfeatures)) != (xfeatures) || \
- __supported == ((xfeatures) | (dependencies)), \
- __supported, (xfeatures), (dependencies)); \
+#define ASSERT_XFEATURE_DEPENDENCIES(supported_xcr0, xfeatures, dependencies) \
+do { \
+ uint64_t __supported = (supported_xcr0) & ((xfeatures) | (dependencies)); \
+ \
+ __GUEST_ASSERT((__supported & (xfeatures)) != (xfeatures) || \
+ __supported == ((xfeatures) | (dependencies)), \
+ "supported = 0x%llx, xfeatures = 0x%llx, dependencies = 0x%llx", \
+ __supported, (xfeatures), (dependencies)); \
} while (0)
/*
@@ -41,7 +41,8 @@ do { \
do { \
uint64_t __supported = (supported_xcr0) & (xfeatures); \
\
- GUEST_ASSERT_2(!__supported || __supported == (xfeatures), \
+ __GUEST_ASSERT(!__supported || __supported == (xfeatures), \
+ "supported = 0x%llx, xfeatures = 0x%llx", \
__supported, (xfeatures)); \
} while (0)
@@ -79,14 +80,18 @@ static void guest_code(void)
XFEATURE_MASK_XTILE);
vector = xsetbv_safe(0, supported_xcr0);
- GUEST_ASSERT_2(!vector, supported_xcr0, vector);
+ __GUEST_ASSERT(!vector,
+ "Expected success on XSETBV(0x%llx), got vector '0x%x'",
+ supported_xcr0, vector);
for (i = 0; i < 64; i++) {
if (supported_xcr0 & BIT_ULL(i))
continue;
vector = xsetbv_safe(0, supported_xcr0 | BIT_ULL(i));
- GUEST_ASSERT_3(vector == GP_VECTOR, supported_xcr0, vector, BIT_ULL(i));
+ __GUEST_ASSERT(vector == GP_VECTOR,
+ "Expected #GP on XSETBV(0x%llx), supported XCR0 = %llx, got vector '0x%x'",
+ BIT_ULL(i), supported_xcr0, vector);
}
GUEST_DONE();
@@ -117,7 +122,7 @@ int main(int argc, char *argv[])
switch (get_ucall(vcpu, &uc)) {
case UCALL_ABORT:
- REPORT_GUEST_ASSERT_3(uc, "0x%lx 0x%lx 0x%lx");
+ REPORT_GUEST_ASSERT(uc);
break;
case UCALL_DONE:
goto done;
diff --git a/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c b/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c
index c94cde3b523f..e149d0574961 100644
--- a/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c
+++ b/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c
@@ -108,16 +108,16 @@ int main(int argc, char *argv[])
vcpu_run(vcpu);
if (run->exit_reason == KVM_EXIT_XEN) {
- ASSERT_EQ(run->xen.type, KVM_EXIT_XEN_HCALL);
- ASSERT_EQ(run->xen.u.hcall.cpl, 0);
- ASSERT_EQ(run->xen.u.hcall.longmode, 1);
- ASSERT_EQ(run->xen.u.hcall.input, INPUTVALUE);
- ASSERT_EQ(run->xen.u.hcall.params[0], ARGVALUE(1));
- ASSERT_EQ(run->xen.u.hcall.params[1], ARGVALUE(2));
- ASSERT_EQ(run->xen.u.hcall.params[2], ARGVALUE(3));
- ASSERT_EQ(run->xen.u.hcall.params[3], ARGVALUE(4));
- ASSERT_EQ(run->xen.u.hcall.params[4], ARGVALUE(5));
- ASSERT_EQ(run->xen.u.hcall.params[5], ARGVALUE(6));
+ TEST_ASSERT_EQ(run->xen.type, KVM_EXIT_XEN_HCALL);
+ TEST_ASSERT_EQ(run->xen.u.hcall.cpl, 0);
+ TEST_ASSERT_EQ(run->xen.u.hcall.longmode, 1);
+ TEST_ASSERT_EQ(run->xen.u.hcall.input, INPUTVALUE);
+ TEST_ASSERT_EQ(run->xen.u.hcall.params[0], ARGVALUE(1));
+ TEST_ASSERT_EQ(run->xen.u.hcall.params[1], ARGVALUE(2));
+ TEST_ASSERT_EQ(run->xen.u.hcall.params[2], ARGVALUE(3));
+ TEST_ASSERT_EQ(run->xen.u.hcall.params[3], ARGVALUE(4));
+ TEST_ASSERT_EQ(run->xen.u.hcall.params[4], ARGVALUE(5));
+ TEST_ASSERT_EQ(run->xen.u.hcall.params[5], ARGVALUE(6));
run->xen.u.hcall.result = RETVALUE;
continue;
}
diff --git a/tools/testing/selftests/landlock/fs_test.c b/tools/testing/selftests/landlock/fs_test.c
index 83d565569512..251594306d40 100644
--- a/tools/testing/selftests/landlock/fs_test.c
+++ b/tools/testing/selftests/landlock/fs_test.c
@@ -113,7 +113,7 @@ static bool supports_filesystem(const char *const filesystem)
{
char str[32];
int len;
- bool res;
+ bool res = true;
FILE *const inf = fopen("/proc/filesystems", "r");
/*
@@ -125,14 +125,16 @@ static bool supports_filesystem(const char *const filesystem)
/* filesystem can be null for bind mounts. */
if (!filesystem)
- return true;
+ goto out;
len = snprintf(str, sizeof(str), "nodev\t%s\n", filesystem);
if (len >= sizeof(str))
/* Ignores too-long filesystem names. */
- return true;
+ goto out;
res = fgrep(inf, str);
+
+out:
fclose(inf);
return res;
}
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
index d328af4a149c..e7d2a530618a 100755
--- a/tools/testing/selftests/net/fib_tests.sh
+++ b/tools/testing/selftests/net/fib_tests.sh
@@ -12,7 +12,8 @@ ksft_skip=4
TESTS="unregister down carrier nexthop suppress ipv6_notify ipv4_notify \
ipv6_rt ipv4_rt ipv6_addr_metric ipv4_addr_metric ipv6_route_metrics \
ipv4_route_metrics ipv4_route_v6_gw rp_filter ipv4_del_addr \
- ipv6_del_addr ipv4_mangle ipv6_mangle ipv4_bcast_neigh fib6_gc_test"
+ ipv6_del_addr ipv4_mangle ipv6_mangle ipv4_bcast_neigh fib6_gc_test \
+ ipv4_mpath_list ipv6_mpath_list"
VERBOSE=0
PAUSE_ON_FAIL=no
@@ -2352,6 +2353,156 @@ ipv4_bcast_neigh_test()
cleanup
}
+mpath_dep_check()
+{
+ if [ ! -x "$(command -v mausezahn)" ]; then
+ echo "mausezahn command not found. Skipping test"
+ return 1
+ fi
+
+ if [ ! -x "$(command -v jq)" ]; then
+ echo "jq command not found. Skipping test"
+ return 1
+ fi
+
+ if [ ! -x "$(command -v bc)" ]; then
+ echo "bc command not found. Skipping test"
+ return 1
+ fi
+
+ if [ ! -x "$(command -v perf)" ]; then
+ echo "perf command not found. Skipping test"
+ return 1
+ fi
+
+ perf list fib:* | grep -q fib_table_lookup
+ if [ $? -ne 0 ]; then
+ echo "IPv4 FIB tracepoint not found. Skipping test"
+ return 1
+ fi
+
+ perf list fib6:* | grep -q fib6_table_lookup
+ if [ $? -ne 0 ]; then
+ echo "IPv6 FIB tracepoint not found. Skipping test"
+ return 1
+ fi
+
+ return 0
+}
+
+link_stats_get()
+{
+ local ns=$1; shift
+ local dev=$1; shift
+ local dir=$1; shift
+ local stat=$1; shift
+
+ ip -n $ns -j -s link show dev $dev \
+ | jq '.[]["stats64"]["'$dir'"]["'$stat'"]'
+}
+
+list_rcv_eval()
+{
+ local file=$1; shift
+ local expected=$1; shift
+
+ local count=$(tail -n 1 $file | jq '.["counter-value"] | tonumber | floor')
+ local ratio=$(echo "scale=2; $count / $expected" | bc -l)
+ local res=$(echo "$ratio >= 0.95" | bc)
+ [[ $res -eq 1 ]]
+ log_test $? 0 "Multipath route hit ratio ($ratio)"
+}
+
+ipv4_mpath_list_test()
+{
+ echo
+ echo "IPv4 multipath list receive tests"
+
+ mpath_dep_check || return 1
+
+ route_setup
+
+ set -e
+ run_cmd "ip netns exec ns1 ethtool -K veth1 tcp-segmentation-offload off"
+
+ run_cmd "ip netns exec ns2 bash -c \"echo 20000 > /sys/class/net/veth2/gro_flush_timeout\""
+ run_cmd "ip netns exec ns2 bash -c \"echo 1 > /sys/class/net/veth2/napi_defer_hard_irqs\""
+ run_cmd "ip netns exec ns2 ethtool -K veth2 generic-receive-offload on"
+ run_cmd "ip -n ns2 link add name nh1 up type dummy"
+ run_cmd "ip -n ns2 link add name nh2 up type dummy"
+ run_cmd "ip -n ns2 address add 172.16.201.1/24 dev nh1"
+ run_cmd "ip -n ns2 address add 172.16.202.1/24 dev nh2"
+ run_cmd "ip -n ns2 neigh add 172.16.201.2 lladdr 00:11:22:33:44:55 nud perm dev nh1"
+ run_cmd "ip -n ns2 neigh add 172.16.202.2 lladdr 00:aa:bb:cc:dd:ee nud perm dev nh2"
+ run_cmd "ip -n ns2 route add 203.0.113.0/24
+ nexthop via 172.16.201.2 nexthop via 172.16.202.2"
+ run_cmd "ip netns exec ns2 sysctl -qw net.ipv4.fib_multipath_hash_policy=1"
+ set +e
+
+ local dmac=$(ip -n ns2 -j link show dev veth2 | jq -r '.[]["address"]')
+ local tmp_file=$(mktemp)
+ local cmd="ip netns exec ns1 mausezahn veth1 -a own -b $dmac
+ -A 172.16.101.1 -B 203.0.113.1 -t udp 'sp=12345,dp=0-65535' -q"
+
+ # Packets forwarded in a list using a multipath route must not reuse a
+ # cached result so that a flow always hits the same nexthop. In other
+ # words, the FIB lookup tracepoint needs to be triggered for every
+ # packet.
+ local t0_rx_pkts=$(link_stats_get ns2 veth2 rx packets)
+ run_cmd "perf stat -e fib:fib_table_lookup --filter 'err == 0' -j -o $tmp_file -- $cmd"
+ local t1_rx_pkts=$(link_stats_get ns2 veth2 rx packets)
+ local diff=$(echo $t1_rx_pkts - $t0_rx_pkts | bc -l)
+ list_rcv_eval $tmp_file $diff
+
+ rm $tmp_file
+ route_cleanup
+}
+
+ipv6_mpath_list_test()
+{
+ echo
+ echo "IPv6 multipath list receive tests"
+
+ mpath_dep_check || return 1
+
+ route_setup
+
+ set -e
+ run_cmd "ip netns exec ns1 ethtool -K veth1 tcp-segmentation-offload off"
+
+ run_cmd "ip netns exec ns2 bash -c \"echo 20000 > /sys/class/net/veth2/gro_flush_timeout\""
+ run_cmd "ip netns exec ns2 bash -c \"echo 1 > /sys/class/net/veth2/napi_defer_hard_irqs\""
+ run_cmd "ip netns exec ns2 ethtool -K veth2 generic-receive-offload on"
+ run_cmd "ip -n ns2 link add name nh1 up type dummy"
+ run_cmd "ip -n ns2 link add name nh2 up type dummy"
+ run_cmd "ip -n ns2 -6 address add 2001:db8:201::1/64 dev nh1"
+ run_cmd "ip -n ns2 -6 address add 2001:db8:202::1/64 dev nh2"
+ run_cmd "ip -n ns2 -6 neigh add 2001:db8:201::2 lladdr 00:11:22:33:44:55 nud perm dev nh1"
+ run_cmd "ip -n ns2 -6 neigh add 2001:db8:202::2 lladdr 00:aa:bb:cc:dd:ee nud perm dev nh2"
+ run_cmd "ip -n ns2 -6 route add 2001:db8:301::/64
+ nexthop via 2001:db8:201::2 nexthop via 2001:db8:202::2"
+ run_cmd "ip netns exec ns2 sysctl -qw net.ipv6.fib_multipath_hash_policy=1"
+ set +e
+
+ local dmac=$(ip -n ns2 -j link show dev veth2 | jq -r '.[]["address"]')
+ local tmp_file=$(mktemp)
+ local cmd="ip netns exec ns1 mausezahn -6 veth1 -a own -b $dmac
+ -A 2001:db8:101::1 -B 2001:db8:301::1 -t udp 'sp=12345,dp=0-65535' -q"
+
+ # Packets forwarded in a list using a multipath route must not reuse a
+ # cached result so that a flow always hits the same nexthop. In other
+ # words, the FIB lookup tracepoint needs to be triggered for every
+ # packet.
+ local t0_rx_pkts=$(link_stats_get ns2 veth2 rx packets)
+ run_cmd "perf stat -e fib6:fib6_table_lookup --filter 'err == 0' -j -o $tmp_file -- $cmd"
+ local t1_rx_pkts=$(link_stats_get ns2 veth2 rx packets)
+ local diff=$(echo $t1_rx_pkts - $t0_rx_pkts | bc -l)
+ list_rcv_eval $tmp_file $diff
+
+ rm $tmp_file
+ route_cleanup
+}
+
################################################################################
# usage
@@ -2433,6 +2584,8 @@ do
ipv6_mangle) ipv6_mangle_test;;
ipv4_bcast_neigh) ipv4_bcast_neigh_test;;
fib6_gc_test|ipv6_gc) fib6_gc_test;;
+ ipv4_mpath_list) ipv4_mpath_list_test;;
+ ipv6_mpath_list) ipv6_mpath_list_test;;
help) echo "Test names: $TESTS"; exit 0;;
esac
diff --git a/tools/testing/selftests/riscv/Makefile b/tools/testing/selftests/riscv/Makefile
index f4b3d5c9af5b..4a9ff515a3a0 100644
--- a/tools/testing/selftests/riscv/Makefile
+++ b/tools/testing/selftests/riscv/Makefile
@@ -5,7 +5,7 @@
ARCH ?= $(shell uname -m 2>/dev/null || echo not)
ifneq (,$(filter $(ARCH),riscv))
-RISCV_SUBTARGETS ?= hwprobe vector
+RISCV_SUBTARGETS ?= hwprobe vector mm
else
RISCV_SUBTARGETS :=
endif
diff --git a/tools/testing/selftests/riscv/mm/.gitignore b/tools/testing/selftests/riscv/mm/.gitignore
new file mode 100644
index 000000000000..5c2c57cb950c
--- /dev/null
+++ b/tools/testing/selftests/riscv/mm/.gitignore
@@ -0,0 +1,2 @@
+mmap_bottomup
+mmap_default
diff --git a/tools/testing/selftests/riscv/mm/Makefile b/tools/testing/selftests/riscv/mm/Makefile
new file mode 100644
index 000000000000..11e0f0568923
--- /dev/null
+++ b/tools/testing/selftests/riscv/mm/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2021 ARM Limited
+# Originally tools/testing/arm64/abi/Makefile
+
+# Additional include paths needed by kselftest.h and local headers
+CFLAGS += -D_GNU_SOURCE -std=gnu99 -I.
+
+TEST_GEN_FILES := testcases/mmap_default testcases/mmap_bottomup
+
+TEST_PROGS := testcases/run_mmap.sh
+
+include ../../lib.mk
+
+$(OUTPUT)/mm: testcases/mmap_default.c testcases/mmap_bottomup.c testcases/mmap_tests.h
+ $(CC) -o$@ $(CFLAGS) $(LDFLAGS) $^
diff --git a/tools/testing/selftests/riscv/mm/testcases/mmap_bottomup.c b/tools/testing/selftests/riscv/mm/testcases/mmap_bottomup.c
new file mode 100644
index 000000000000..b29379f7e478
--- /dev/null
+++ b/tools/testing/selftests/riscv/mm/testcases/mmap_bottomup.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <sys/mman.h>
+#include <testcases/mmap_test.h>
+
+#include "../../kselftest_harness.h"
+
+TEST(infinite_rlimit)
+{
+// Only works on 64 bit
+#if __riscv_xlen == 64
+ struct addresses mmap_addresses;
+
+ EXPECT_EQ(BOTTOM_UP, memory_layout());
+
+ do_mmaps(&mmap_addresses);
+
+ EXPECT_NE(MAP_FAILED, mmap_addresses.no_hint);
+ EXPECT_NE(MAP_FAILED, mmap_addresses.on_37_addr);
+ EXPECT_NE(MAP_FAILED, mmap_addresses.on_38_addr);
+ EXPECT_NE(MAP_FAILED, mmap_addresses.on_46_addr);
+ EXPECT_NE(MAP_FAILED, mmap_addresses.on_47_addr);
+ EXPECT_NE(MAP_FAILED, mmap_addresses.on_55_addr);
+ EXPECT_NE(MAP_FAILED, mmap_addresses.on_56_addr);
+
+ EXPECT_GT(1UL << 47, (unsigned long)mmap_addresses.no_hint);
+ EXPECT_GT(1UL << 38, (unsigned long)mmap_addresses.on_37_addr);
+ EXPECT_GT(1UL << 38, (unsigned long)mmap_addresses.on_38_addr);
+ EXPECT_GT(1UL << 38, (unsigned long)mmap_addresses.on_46_addr);
+ EXPECT_GT(1UL << 47, (unsigned long)mmap_addresses.on_47_addr);
+ EXPECT_GT(1UL << 47, (unsigned long)mmap_addresses.on_55_addr);
+ EXPECT_GT(1UL << 56, (unsigned long)mmap_addresses.on_56_addr);
+#endif
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/riscv/mm/testcases/mmap_default.c b/tools/testing/selftests/riscv/mm/testcases/mmap_default.c
new file mode 100644
index 000000000000..d1accb91b726
--- /dev/null
+++ b/tools/testing/selftests/riscv/mm/testcases/mmap_default.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <sys/mman.h>
+#include <testcases/mmap_test.h>
+
+#include "../../kselftest_harness.h"
+
+TEST(default_rlimit)
+{
+// Only works on 64 bit
+#if __riscv_xlen == 64
+ struct addresses mmap_addresses;
+
+ EXPECT_EQ(TOP_DOWN, memory_layout());
+
+ do_mmaps(&mmap_addresses);
+
+ EXPECT_NE(MAP_FAILED, mmap_addresses.no_hint);
+ EXPECT_NE(MAP_FAILED, mmap_addresses.on_37_addr);
+ EXPECT_NE(MAP_FAILED, mmap_addresses.on_38_addr);
+ EXPECT_NE(MAP_FAILED, mmap_addresses.on_46_addr);
+ EXPECT_NE(MAP_FAILED, mmap_addresses.on_47_addr);
+ EXPECT_NE(MAP_FAILED, mmap_addresses.on_55_addr);
+ EXPECT_NE(MAP_FAILED, mmap_addresses.on_56_addr);
+
+ EXPECT_GT(1UL << 47, (unsigned long)mmap_addresses.no_hint);
+ EXPECT_GT(1UL << 38, (unsigned long)mmap_addresses.on_37_addr);
+ EXPECT_GT(1UL << 38, (unsigned long)mmap_addresses.on_38_addr);
+ EXPECT_GT(1UL << 38, (unsigned long)mmap_addresses.on_46_addr);
+ EXPECT_GT(1UL << 47, (unsigned long)mmap_addresses.on_47_addr);
+ EXPECT_GT(1UL << 47, (unsigned long)mmap_addresses.on_55_addr);
+ EXPECT_GT(1UL << 56, (unsigned long)mmap_addresses.on_56_addr);
+#endif
+}
+
+TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/riscv/mm/testcases/mmap_test.h b/tools/testing/selftests/riscv/mm/testcases/mmap_test.h
new file mode 100644
index 000000000000..9b8434f62f57
--- /dev/null
+++ b/tools/testing/selftests/riscv/mm/testcases/mmap_test.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _TESTCASES_MMAP_TEST_H
+#define _TESTCASES_MMAP_TEST_H
+#include <sys/mman.h>
+#include <sys/resource.h>
+#include <stddef.h>
+
+#define TOP_DOWN 0
+#define BOTTOM_UP 1
+
+struct addresses {
+ int *no_hint;
+ int *on_37_addr;
+ int *on_38_addr;
+ int *on_46_addr;
+ int *on_47_addr;
+ int *on_55_addr;
+ int *on_56_addr;
+};
+
+static inline void do_mmaps(struct addresses *mmap_addresses)
+{
+ /*
+ * Place all of the hint addresses on the boundaries of mmap
+ * sv39, sv48, sv57
+ * User addresses end at 1<<38, 1<<47, 1<<56 respectively
+ */
+ void *on_37_bits = (void *)(1UL << 37);
+ void *on_38_bits = (void *)(1UL << 38);
+ void *on_46_bits = (void *)(1UL << 46);
+ void *on_47_bits = (void *)(1UL << 47);
+ void *on_55_bits = (void *)(1UL << 55);
+ void *on_56_bits = (void *)(1UL << 56);
+
+ int prot = PROT_READ | PROT_WRITE;
+ int flags = MAP_PRIVATE | MAP_ANONYMOUS;
+
+ mmap_addresses->no_hint =
+ mmap(NULL, 5 * sizeof(int), prot, flags, 0, 0);
+ mmap_addresses->on_37_addr =
+ mmap(on_37_bits, 5 * sizeof(int), prot, flags, 0, 0);
+ mmap_addresses->on_38_addr =
+ mmap(on_38_bits, 5 * sizeof(int), prot, flags, 0, 0);
+ mmap_addresses->on_46_addr =
+ mmap(on_46_bits, 5 * sizeof(int), prot, flags, 0, 0);
+ mmap_addresses->on_47_addr =
+ mmap(on_47_bits, 5 * sizeof(int), prot, flags, 0, 0);
+ mmap_addresses->on_55_addr =
+ mmap(on_55_bits, 5 * sizeof(int), prot, flags, 0, 0);
+ mmap_addresses->on_56_addr =
+ mmap(on_56_bits, 5 * sizeof(int), prot, flags, 0, 0);
+}
+
+static inline int memory_layout(void)
+{
+ int prot = PROT_READ | PROT_WRITE;
+ int flags = MAP_PRIVATE | MAP_ANONYMOUS;
+
+ void *value1 = mmap(NULL, sizeof(int), prot, flags, 0, 0);
+ void *value2 = mmap(NULL, sizeof(int), prot, flags, 0, 0);
+
+ return value2 > value1;
+}
+#endif /* _TESTCASES_MMAP_TEST_H */
diff --git a/tools/testing/selftests/riscv/mm/testcases/run_mmap.sh b/tools/testing/selftests/riscv/mm/testcases/run_mmap.sh
new file mode 100755
index 000000000000..ca5ad7c48bad
--- /dev/null
+++ b/tools/testing/selftests/riscv/mm/testcases/run_mmap.sh
@@ -0,0 +1,12 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+original_stack_limit=$(ulimit -s)
+
+./mmap_default
+
+# Force mmap_bottomup to be ran with bottomup memory due to
+# the unlimited stack
+ulimit -s unlimited
+./mmap_bottomup
+ulimit -s $original_stack_limit
diff --git a/tools/testing/selftests/x86/test_shadow_stack.c b/tools/testing/selftests/x86/test_shadow_stack.c
index 2188968674cb..757e6527f67e 100644
--- a/tools/testing/selftests/x86/test_shadow_stack.c
+++ b/tools/testing/selftests/x86/test_shadow_stack.c
@@ -40,7 +40,7 @@
* without building the headers.
*/
#ifndef __NR_map_shadow_stack
-#define __NR_map_shadow_stack 452
+#define __NR_map_shadow_stack 453
#define SHADOW_STACK_SET_TOKEN (1ULL << 0)
diff --git a/tools/workqueue/wq_dump.py b/tools/workqueue/wq_dump.py
new file mode 100644
index 000000000000..d0df5833f2c1
--- /dev/null
+++ b/tools/workqueue/wq_dump.py
@@ -0,0 +1,177 @@
+#!/usr/bin/env drgn
+#
+# Copyright (C) 2023 Tejun Heo <tj@kernel.org>
+# Copyright (C) 2023 Meta Platforms, Inc. and affiliates.
+
+desc = """
+This is a drgn script to show the current workqueue configuration. For more
+info on drgn, visit https://github.com/osandov/drgn.
+
+Affinity Scopes
+===============
+
+Shows the CPUs that can be used for unbound workqueues and how they will be
+grouped by each available affinity type. For each type:
+
+ nr_pods number of CPU pods in the affinity type
+ pod_cpus CPUs in each pod
+ pod_node NUMA node for memory allocation for each pod
+ cpu_pod pod that each CPU is associated to
+
+Worker Pools
+============
+
+Lists all worker pools indexed by their ID. For each pool:
+
+ ref number of pool_workqueue's associated with this pool
+ nice nice value of the worker threads in the pool
+ idle number of idle workers
+ workers number of all workers
+ cpu CPU the pool is associated with (per-cpu pool)
+ cpus CPUs the workers in the pool can run on (unbound pool)
+
+Workqueue CPU -> pool
+=====================
+
+Lists all workqueues along with their type and worker pool association. For
+each workqueue:
+
+ NAME TYPE[,FLAGS] POOL_ID...
+
+ NAME name of the workqueue
+ TYPE percpu, unbound or ordered
+ FLAGS S: strict affinity scope
+ POOL_ID worker pool ID associated with each possible CPU
+"""
+
+import sys
+
+import drgn
+from drgn.helpers.linux.list import list_for_each_entry,list_empty
+from drgn.helpers.linux.percpu import per_cpu_ptr
+from drgn.helpers.linux.cpumask import for_each_cpu,for_each_possible_cpu
+from drgn.helpers.linux.idr import idr_for_each
+
+import argparse
+parser = argparse.ArgumentParser(description=desc,
+ formatter_class=argparse.RawTextHelpFormatter)
+args = parser.parse_args()
+
+def err(s):
+ print(s, file=sys.stderr, flush=True)
+ sys.exit(1)
+
+def cpumask_str(cpumask):
+ output = ""
+ base = 0
+ v = 0
+ for cpu in for_each_cpu(cpumask[0]):
+ while cpu - base >= 32:
+ output += f'{hex(v)} '
+ base += 32
+ v = 0
+ v |= 1 << (cpu - base)
+ if v > 0:
+ output += f'{v:08x}'
+ return output.strip()
+
+worker_pool_idr = prog['worker_pool_idr']
+workqueues = prog['workqueues']
+wq_unbound_cpumask = prog['wq_unbound_cpumask']
+wq_pod_types = prog['wq_pod_types']
+wq_affn_dfl = prog['wq_affn_dfl']
+wq_affn_names = prog['wq_affn_names']
+
+WQ_UNBOUND = prog['WQ_UNBOUND']
+WQ_ORDERED = prog['__WQ_ORDERED']
+WQ_MEM_RECLAIM = prog['WQ_MEM_RECLAIM']
+
+WQ_AFFN_CPU = prog['WQ_AFFN_CPU']
+WQ_AFFN_SMT = prog['WQ_AFFN_SMT']
+WQ_AFFN_CACHE = prog['WQ_AFFN_CACHE']
+WQ_AFFN_NUMA = prog['WQ_AFFN_NUMA']
+WQ_AFFN_SYSTEM = prog['WQ_AFFN_SYSTEM']
+
+print('Affinity Scopes')
+print('===============')
+
+print(f'wq_unbound_cpumask={cpumask_str(wq_unbound_cpumask)}')
+
+def print_pod_type(pt):
+ print(f' nr_pods {pt.nr_pods.value_()}')
+
+ print(' pod_cpus', end='')
+ for pod in range(pt.nr_pods):
+ print(f' [{pod}]={cpumask_str(pt.pod_cpus[pod])}', end='')
+ print('')
+
+ print(' pod_node', end='')
+ for pod in range(pt.nr_pods):
+ print(f' [{pod}]={pt.pod_node[pod].value_()}', end='')
+ print('')
+
+ print(f' cpu_pod ', end='')
+ for cpu in for_each_possible_cpu(prog):
+ print(f' [{cpu}]={pt.cpu_pod[cpu].value_()}', end='')
+ print('')
+
+for affn in [WQ_AFFN_CPU, WQ_AFFN_SMT, WQ_AFFN_CACHE, WQ_AFFN_NUMA, WQ_AFFN_SYSTEM]:
+ print('')
+ print(f'{wq_affn_names[affn].string_().decode().upper()}{" (default)" if affn == wq_affn_dfl else ""}')
+ print_pod_type(wq_pod_types[affn])
+
+print('')
+print('Worker Pools')
+print('============')
+
+max_pool_id_len = 0
+max_ref_len = 0
+for pi, pool in idr_for_each(worker_pool_idr):
+ pool = drgn.Object(prog, 'struct worker_pool', address=pool)
+ max_pool_id_len = max(max_pool_id_len, len(f'{pi}'))
+ max_ref_len = max(max_ref_len, len(f'{pool.refcnt.value_()}'))
+
+for pi, pool in idr_for_each(worker_pool_idr):
+ pool = drgn.Object(prog, 'struct worker_pool', address=pool)
+ print(f'pool[{pi:0{max_pool_id_len}}] ref={pool.refcnt.value_():{max_ref_len}} nice={pool.attrs.nice.value_():3} ', end='')
+ print(f'idle/workers={pool.nr_idle.value_():3}/{pool.nr_workers.value_():3} ', end='')
+ if pool.cpu >= 0:
+ print(f'cpu={pool.cpu.value_():3}', end='')
+ else:
+ print(f'cpus={cpumask_str(pool.attrs.cpumask)}', end='')
+ print(f' pod_cpus={cpumask_str(pool.attrs.__pod_cpumask)}', end='')
+ if pool.attrs.affn_strict:
+ print(' strict', end='')
+ print('')
+
+print('')
+print('Workqueue CPU -> pool')
+print('=====================')
+
+print('[ workqueue \ type CPU', end='')
+for cpu in for_each_possible_cpu(prog):
+ print(f' {cpu:{max_pool_id_len}}', end='')
+print(' dfl]')
+
+for wq in list_for_each_entry('struct workqueue_struct', workqueues.address_of_(), 'list'):
+ print(f'{wq.name.string_().decode()[-24:]:24}', end='')
+ if wq.flags & WQ_UNBOUND:
+ if wq.flags & WQ_ORDERED:
+ print(' ordered ', end='')
+ else:
+ print(' unbound', end='')
+ if wq.unbound_attrs.affn_strict:
+ print(',S ', end='')
+ else:
+ print(' ', end='')
+ else:
+ print(' percpu ', end='')
+
+ for cpu in for_each_possible_cpu(prog):
+ pool_id = per_cpu_ptr(wq.cpu_pwq, cpu)[0].pool.id.value_()
+ field_len = max(len(str(cpu)), max_pool_id_len)
+ print(f' {pool_id:{field_len}}', end='')
+
+ if wq.flags & WQ_UNBOUND:
+ print(f' {wq.dfl_pwq.pool.id.value_():{max_pool_id_len}}', end='')
+ print('')
diff --git a/tools/workqueue/wq_monitor.py b/tools/workqueue/wq_monitor.py
index 6e258d123e8c..a8856a9c45dc 100644
--- a/tools/workqueue/wq_monitor.py
+++ b/tools/workqueue/wq_monitor.py
@@ -20,8 +20,11 @@ https://github.com/osandov/drgn.
and got excluded from concurrency management to avoid stalling
other work items.
- CMwake The number of concurrency-management wake-ups while executing a
- work item of the workqueue.
+ CMW/RPR For per-cpu workqueues, the number of concurrency-management
+ wake-ups while executing a work item of the workqueue. For
+ unbound workqueues, the number of times a worker was repatriated
+ to its affinity scope after being migrated to an off-scope CPU by
+ the scheduler.
mayday The number of times the rescuer was requested while waiting for
new worker creation.
@@ -65,6 +68,7 @@ PWQ_STAT_COMPLETED = prog['PWQ_STAT_COMPLETED'] # work items completed exec
PWQ_STAT_CPU_TIME = prog['PWQ_STAT_CPU_TIME'] # total CPU time consumed
PWQ_STAT_CPU_INTENSIVE = prog['PWQ_STAT_CPU_INTENSIVE'] # wq_cpu_intensive_thresh_us violations
PWQ_STAT_CM_WAKEUP = prog['PWQ_STAT_CM_WAKEUP'] # concurrency-management worker wakeups
+PWQ_STAT_REPATRIATED = prog['PWQ_STAT_REPATRIATED'] # unbound workers brought back into scope
PWQ_STAT_MAYDAY = prog['PWQ_STAT_MAYDAY'] # maydays to rescuer
PWQ_STAT_RESCUED = prog['PWQ_STAT_RESCUED'] # linked work items executed by rescuer
PWQ_NR_STATS = prog['PWQ_NR_STATS']
@@ -89,22 +93,25 @@ class WqStats:
'cpu_time' : self.stats[PWQ_STAT_CPU_TIME],
'cpu_intensive' : self.stats[PWQ_STAT_CPU_INTENSIVE],
'cm_wakeup' : self.stats[PWQ_STAT_CM_WAKEUP],
+ 'repatriated' : self.stats[PWQ_STAT_REPATRIATED],
'mayday' : self.stats[PWQ_STAT_MAYDAY],
'rescued' : self.stats[PWQ_STAT_RESCUED], }
def table_header_str():
return f'{"":>24} {"total":>8} {"infl":>5} {"CPUtime":>8} '\
- f'{"CPUitsv":>7} {"CMwake":>7} {"mayday":>7} {"rescued":>7}'
+ f'{"CPUitsv":>7} {"CMW/RPR":>7} {"mayday":>7} {"rescued":>7}'
def table_row_str(self):
cpu_intensive = '-'
- cm_wakeup = '-'
+ cmw_rpr = '-'
mayday = '-'
rescued = '-'
- if not self.unbound:
+ if self.unbound:
+ cmw_rpr = str(self.stats[PWQ_STAT_REPATRIATED]);
+ else:
cpu_intensive = str(self.stats[PWQ_STAT_CPU_INTENSIVE])
- cm_wakeup = str(self.stats[PWQ_STAT_CM_WAKEUP])
+ cmw_rpr = str(self.stats[PWQ_STAT_CM_WAKEUP])
if self.mem_reclaim:
mayday = str(self.stats[PWQ_STAT_MAYDAY])
@@ -115,7 +122,7 @@ class WqStats:
f'{max(self.stats[PWQ_STAT_STARTED] - self.stats[PWQ_STAT_COMPLETED], 0):5} ' \
f'{self.stats[PWQ_STAT_CPU_TIME] / 1000000:8.1f} ' \
f'{cpu_intensive:>7} ' \
- f'{cm_wakeup:>7} ' \
+ f'{cmw_rpr:>7} ' \
f'{mayday:>7} ' \
f'{rescued:>7} '
return out.rstrip(':')
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
index b74916de5183..484d0873061c 100644
--- a/virt/kvm/Kconfig
+++ b/virt/kvm/Kconfig
@@ -62,9 +62,6 @@ config HAVE_KVM_CPU_RELAX_INTERCEPT
config KVM_VFIO
bool
-config HAVE_KVM_ARCH_TLB_FLUSH_ALL
- bool
-
config HAVE_KVM_INVALID_WAKEUPS
bool
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 2500178cf444..486800a7024b 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -345,7 +345,6 @@ bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
}
EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request);
-#ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL
void kvm_flush_remote_tlbs(struct kvm *kvm)
{
++kvm->stat.generic.remote_tlb_flush_requests;
@@ -361,12 +360,38 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
* kvm_make_all_cpus_request() reads vcpu->mode. We reuse that
* barrier here.
*/
- if (!kvm_arch_flush_remote_tlb(kvm)
+ if (!kvm_arch_flush_remote_tlbs(kvm)
|| kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
++kvm->stat.generic.remote_tlb_flush;
}
EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
-#endif
+
+void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages)
+{
+ if (!kvm_arch_flush_remote_tlbs_range(kvm, gfn, nr_pages))
+ return;
+
+ /*
+ * Fall back to a flushing entire TLBs if the architecture range-based
+ * TLB invalidation is unsupported or can't be performed for whatever
+ * reason.
+ */
+ kvm_flush_remote_tlbs(kvm);
+}
+
+void kvm_flush_remote_tlbs_memslot(struct kvm *kvm,
+ const struct kvm_memory_slot *memslot)
+{
+ /*
+ * All current use cases for flushing the TLBs for a specific memslot
+ * are related to dirty logging, and many do the TLB flush out of
+ * mmu_lock. The interaction between the various operations on memslot
+ * must be serialized by slots_locks to ensure the TLB flush from one
+ * operation is observed by any other operation on the same memslot.
+ */
+ lockdep_assert_held(&kvm->slots_lock);
+ kvm_flush_remote_tlbs_range(kvm, memslot->base_gfn, memslot->npages);
+}
static void kvm_flush_shadow_all(struct kvm *kvm)
{
@@ -526,7 +551,7 @@ typedef void (*on_unlock_fn_t)(struct kvm *kvm);
struct kvm_hva_range {
unsigned long start;
unsigned long end;
- pte_t pte;
+ union kvm_mmu_notifier_arg arg;
hva_handler_t handler;
on_lock_fn_t on_lock;
on_unlock_fn_t on_unlock;
@@ -547,6 +572,8 @@ static void kvm_null_fn(void)
}
#define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn)
+static const union kvm_mmu_notifier_arg KVM_MMU_NOTIFIER_NO_ARG;
+
/* Iterate over each memslot intersecting [start, last] (inclusive) range */
#define kvm_for_each_memslot_in_hva_range(node, slots, start, last) \
for (node = interval_tree_iter_first(&slots->hva_tree, start, last); \
@@ -591,7 +618,7 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
* bother making these conditional (to avoid writes on
* the second or later invocation of the handler).
*/
- gfn_range.pte = range->pte;
+ gfn_range.arg = range->arg;
gfn_range.may_block = range->may_block;
/*
@@ -632,14 +659,14 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
unsigned long start,
unsigned long end,
- pte_t pte,
+ union kvm_mmu_notifier_arg arg,
hva_handler_t handler)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
const struct kvm_hva_range range = {
.start = start,
.end = end,
- .pte = pte,
+ .arg = arg,
.handler = handler,
.on_lock = (void *)kvm_null_fn,
.on_unlock = (void *)kvm_null_fn,
@@ -659,7 +686,6 @@ static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn
const struct kvm_hva_range range = {
.start = start,
.end = end,
- .pte = __pte(0),
.handler = handler,
.on_lock = (void *)kvm_null_fn,
.on_unlock = (void *)kvm_null_fn,
@@ -693,6 +719,7 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
pte_t pte)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
+ const union kvm_mmu_notifier_arg arg = { .pte = pte };
trace_kvm_set_spte_hva(address);
@@ -708,7 +735,7 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
if (!READ_ONCE(kvm->mmu_invalidate_in_progress))
return;
- kvm_handle_hva_range(mn, address, address + 1, pte, kvm_change_spte_gfn);
+ kvm_handle_hva_range(mn, address, address + 1, arg, kvm_change_spte_gfn);
}
void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start,
@@ -747,7 +774,6 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
const struct kvm_hva_range hva_range = {
.start = range->start,
.end = range->end,
- .pte = __pte(0),
.handler = kvm_unmap_gfn_range,
.on_lock = kvm_mmu_invalidate_begin,
.on_unlock = kvm_arch_guest_memory_reclaimed,
@@ -812,7 +838,6 @@ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
const struct kvm_hva_range hva_range = {
.start = range->start,
.end = range->end,
- .pte = __pte(0),
.handler = (void *)kvm_null_fn,
.on_lock = kvm_mmu_invalidate_end,
.on_unlock = (void *)kvm_null_fn,
@@ -845,7 +870,8 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
{
trace_kvm_age_hva(start, end);
- return kvm_handle_hva_range(mn, start, end, __pte(0), kvm_age_gfn);
+ return kvm_handle_hva_range(mn, start, end, KVM_MMU_NOTIFIER_NO_ARG,
+ kvm_age_gfn);
}
static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
@@ -2180,7 +2206,7 @@ static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log)
}
if (flush)
- kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
+ kvm_flush_remote_tlbs_memslot(kvm, memslot);
if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
return -EFAULT;
@@ -2297,7 +2323,7 @@ static int kvm_clear_dirty_log_protect(struct kvm *kvm,
KVM_MMU_UNLOCK(kvm);
if (flush)
- kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
+ kvm_flush_remote_tlbs_memslot(kvm, memslot);
return 0;
}